1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "soc15.h"
25
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32
33 #define XCP_INST_MASK(num_inst, xcp_id) \
34 (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
35
36 #define AMDGPU_XCP_OPS_KFD (1 << 0)
37
aqua_vanjaram_doorbell_index_init(struct amdgpu_device * adev)38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
39 {
40 int i;
41
42 adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
43
44 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
45
46 adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47 adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48 adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
49
50 adev->doorbell_index.sdma_doorbell_range = 20;
51 for (i = 0; i < adev->sdma.num_instances; i++)
52 adev->doorbell_index.sdma_engine[i] =
53 AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54 i * (adev->doorbell_index.sdma_doorbell_range >> 1);
55
56 adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
58
59 adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60 adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
61
62 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
63 }
64
aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device * adev)65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
66 {
67 return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
68 }
69
aqua_vanjaram_set_xcp_id(struct amdgpu_device * adev,uint32_t inst_idx,struct amdgpu_ring * ring)70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
71 uint32_t inst_idx, struct amdgpu_ring *ring)
72 {
73 int xcp_id;
74 enum AMDGPU_XCP_IP_BLOCK ip_blk;
75 uint32_t inst_mask;
76
77 ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
78 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
79 adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
80 if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
81 (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
82 return;
83
84 inst_mask = 1 << inst_idx;
85
86 switch (ring->funcs->type) {
87 case AMDGPU_HW_IP_GFX:
88 case AMDGPU_RING_TYPE_COMPUTE:
89 case AMDGPU_RING_TYPE_KIQ:
90 ip_blk = AMDGPU_XCP_GFX;
91 break;
92 case AMDGPU_RING_TYPE_SDMA:
93 ip_blk = AMDGPU_XCP_SDMA;
94 break;
95 case AMDGPU_RING_TYPE_VCN_ENC:
96 case AMDGPU_RING_TYPE_VCN_JPEG:
97 ip_blk = AMDGPU_XCP_VCN;
98 break;
99 default:
100 DRM_ERROR("Not support ring type %d!", ring->funcs->type);
101 return;
102 }
103
104 for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
105 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
106 ring->xcp_id = xcp_id;
107 dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
108 ring->xcp_id);
109 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
110 adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
111 break;
112 }
113 }
114 }
115
aqua_vanjaram_xcp_gpu_sched_update(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int sel_xcp_id)116 static void aqua_vanjaram_xcp_gpu_sched_update(
117 struct amdgpu_device *adev,
118 struct amdgpu_ring *ring,
119 unsigned int sel_xcp_id)
120 {
121 unsigned int *num_gpu_sched;
122
123 num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
124 .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
125 adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
126 .sched[(*num_gpu_sched)++] = &ring->sched;
127 DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
128 sel_xcp_id, ring->funcs->type,
129 ring->hw_prio, *num_gpu_sched);
130 }
131
aqua_vanjaram_xcp_sched_list_update(struct amdgpu_device * adev)132 static int aqua_vanjaram_xcp_sched_list_update(
133 struct amdgpu_device *adev)
134 {
135 struct amdgpu_ring *ring;
136 int i;
137
138 for (i = 0; i < MAX_XCP; i++) {
139 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
140 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
141 }
142
143 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
144 return 0;
145
146 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
147 ring = adev->rings[i];
148 if (!ring || !ring->sched.ready || ring->no_scheduler)
149 continue;
150
151 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
152
153 /* VCN may be shared by two partitions under CPX MODE in certain
154 * configs.
155 */
156 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
157 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
158 aqua_vanjaram_xcp_vcn_shared(adev))
159 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
160 }
161
162 return 0;
163 }
164
aqua_vanjaram_update_partition_sched_list(struct amdgpu_device * adev)165 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
166 {
167 int i;
168
169 for (i = 0; i < adev->num_rings; i++) {
170 struct amdgpu_ring *ring = adev->rings[i];
171
172 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
173 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
174 aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
175 else
176 aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
177 }
178
179 return aqua_vanjaram_xcp_sched_list_update(adev);
180 }
181
aqua_vanjaram_select_scheds(struct amdgpu_device * adev,u32 hw_ip,u32 hw_prio,struct amdgpu_fpriv * fpriv,unsigned int * num_scheds,struct drm_gpu_scheduler *** scheds)182 static int aqua_vanjaram_select_scheds(
183 struct amdgpu_device *adev,
184 u32 hw_ip,
185 u32 hw_prio,
186 struct amdgpu_fpriv *fpriv,
187 unsigned int *num_scheds,
188 struct drm_gpu_scheduler ***scheds)
189 {
190 u32 sel_xcp_id;
191 int i;
192
193 if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
194 u32 least_ref_cnt = ~0;
195
196 fpriv->xcp_id = 0;
197 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
198 u32 total_ref_cnt;
199
200 total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
201 if (total_ref_cnt < least_ref_cnt) {
202 fpriv->xcp_id = i;
203 least_ref_cnt = total_ref_cnt;
204 }
205 }
206 }
207 sel_xcp_id = fpriv->xcp_id;
208
209 if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
210 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
211 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
212 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
213 DRM_DEBUG("Selected partition #%d", sel_xcp_id);
214 } else {
215 DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
216 return -ENOENT;
217 }
218
219 return 0;
220 }
221
aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,int8_t inst)222 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
223 enum amd_hw_ip_block_type block,
224 int8_t inst)
225 {
226 int8_t dev_inst;
227
228 switch (block) {
229 case GC_HWIP:
230 case SDMA0_HWIP:
231 /* Both JPEG and VCN as JPEG is only alias of VCN */
232 case VCN_HWIP:
233 dev_inst = adev->ip_map.dev_inst[block][inst];
234 break;
235 default:
236 /* For rest of the IPs, no look up required.
237 * Assume 'logical instance == physical instance' for all configs. */
238 dev_inst = inst;
239 break;
240 }
241
242 return dev_inst;
243 }
244
aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,uint32_t mask)245 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
246 enum amd_hw_ip_block_type block,
247 uint32_t mask)
248 {
249 uint32_t dev_mask = 0;
250 int8_t log_inst, dev_inst;
251
252 while (mask) {
253 log_inst = ffs(mask) - 1;
254 dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
255 dev_mask |= (1 << dev_inst);
256 mask &= ~(1 << log_inst);
257 }
258
259 return dev_mask;
260 }
261
aqua_vanjaram_populate_ip_map(struct amdgpu_device * adev,enum amd_hw_ip_block_type ip_block,uint32_t inst_mask)262 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
263 enum amd_hw_ip_block_type ip_block,
264 uint32_t inst_mask)
265 {
266 int l = 0, i;
267
268 while (inst_mask) {
269 i = ffs(inst_mask) - 1;
270 adev->ip_map.dev_inst[ip_block][l++] = i;
271 inst_mask &= ~(1 << i);
272 }
273 for (; l < HWIP_MAX_INSTANCE; l++)
274 adev->ip_map.dev_inst[ip_block][l] = -1;
275 }
276
aqua_vanjaram_ip_map_init(struct amdgpu_device * adev)277 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
278 {
279 u32 ip_map[][2] = {
280 { GC_HWIP, adev->gfx.xcc_mask },
281 { SDMA0_HWIP, adev->sdma.sdma_mask },
282 { VCN_HWIP, adev->vcn.inst_mask },
283 };
284 int i;
285
286 for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
287 aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
288
289 adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
290 adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
291 }
292
293 /* Fixed pattern for smn addressing on different AIDs:
294 * bit[34]: indicate cross AID access
295 * bit[33:32]: indicate target AID id
296 * AID id range is 0 ~ 3 as maximum AID number is 4.
297 */
aqua_vanjaram_encode_ext_smn_addressing(int ext_id)298 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
299 {
300 u64 ext_offset;
301
302 /* local routing and bit[34:32] will be zeros */
303 if (ext_id == 0)
304 return 0;
305
306 /* Initiated from host, accessing to all non-zero aids are cross traffic */
307 ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
308
309 return ext_offset;
310 }
311
312 static enum amdgpu_gfx_partition
__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr * xcp_mgr)313 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
314 {
315 struct amdgpu_device *adev = xcp_mgr->adev;
316 int num_xcc, num_xcc_per_xcp = 0, mode = 0;
317
318 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
319 if (adev->gfx.funcs->get_xccs_per_xcp)
320 num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
321 if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
322 mode = num_xcc / num_xcc_per_xcp;
323
324 if (num_xcc_per_xcp == 1)
325 return AMDGPU_CPX_PARTITION_MODE;
326
327 switch (mode) {
328 case 1:
329 return AMDGPU_SPX_PARTITION_MODE;
330 case 2:
331 return AMDGPU_DPX_PARTITION_MODE;
332 case 3:
333 return AMDGPU_TPX_PARTITION_MODE;
334 case 4:
335 return AMDGPU_QPX_PARTITION_MODE;
336 default:
337 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
338 }
339
340 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
341 }
342
aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)343 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
344 {
345 enum amdgpu_gfx_partition derv_mode,
346 mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
347 struct amdgpu_device *adev = xcp_mgr->adev;
348
349 derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
350
351 if (amdgpu_sriov_vf(adev))
352 return derv_mode;
353
354 if (adev->nbio.funcs->get_compute_partition_mode) {
355 mode = adev->nbio.funcs->get_compute_partition_mode(adev);
356 if (mode != derv_mode)
357 dev_warn(
358 adev->dev,
359 "Mismatch in compute partition mode - reported : %d derived : %d",
360 mode, derv_mode);
361 }
362
363 return mode;
364 }
365
__aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr * xcp_mgr,int mode)366 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
367 {
368 int num_xcc, num_xcc_per_xcp = 0;
369
370 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
371
372 switch (mode) {
373 case AMDGPU_SPX_PARTITION_MODE:
374 num_xcc_per_xcp = num_xcc;
375 break;
376 case AMDGPU_DPX_PARTITION_MODE:
377 num_xcc_per_xcp = num_xcc / 2;
378 break;
379 case AMDGPU_TPX_PARTITION_MODE:
380 num_xcc_per_xcp = num_xcc / 3;
381 break;
382 case AMDGPU_QPX_PARTITION_MODE:
383 num_xcc_per_xcp = num_xcc / 4;
384 break;
385 case AMDGPU_CPX_PARTITION_MODE:
386 num_xcc_per_xcp = 1;
387 break;
388 }
389
390 return num_xcc_per_xcp;
391 }
392
__aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)393 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
394 enum AMDGPU_XCP_IP_BLOCK ip_id,
395 struct amdgpu_xcp_ip *ip)
396 {
397 struct amdgpu_device *adev = xcp_mgr->adev;
398 int num_sdma, num_vcn, num_shared_vcn, num_xcp;
399 int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
400
401 num_sdma = adev->sdma.num_instances;
402 num_vcn = adev->vcn.num_vcn_inst;
403 num_shared_vcn = 1;
404
405 num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
406 num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
407
408 switch (xcp_mgr->mode) {
409 case AMDGPU_SPX_PARTITION_MODE:
410 case AMDGPU_DPX_PARTITION_MODE:
411 case AMDGPU_TPX_PARTITION_MODE:
412 case AMDGPU_QPX_PARTITION_MODE:
413 case AMDGPU_CPX_PARTITION_MODE:
414 num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
415 num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
416 break;
417 default:
418 return -EINVAL;
419 }
420
421 if (num_vcn && num_xcp > num_vcn)
422 num_shared_vcn = num_xcp / num_vcn;
423
424 switch (ip_id) {
425 case AMDGPU_XCP_GFXHUB:
426 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
427 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
428 break;
429 case AMDGPU_XCP_GFX:
430 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
431 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
432 break;
433 case AMDGPU_XCP_SDMA:
434 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
435 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
436 break;
437 case AMDGPU_XCP_VCN:
438 ip->inst_mask =
439 XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
440 /* TODO : Assign IP funcs */
441 break;
442 default:
443 return -EINVAL;
444 }
445
446 ip->ip_id = ip_id;
447
448 return 0;
449 }
450
aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr * xcp_mgr,int mode,struct amdgpu_xcp_cfg * xcp_cfg)451 static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
452 int mode,
453 struct amdgpu_xcp_cfg *xcp_cfg)
454 {
455 struct amdgpu_device *adev = xcp_mgr->adev;
456 int max_res[AMDGPU_XCP_RES_MAX] = {};
457 bool res_lt_xcp;
458 int num_xcp, i;
459 u16 nps_modes;
460
461 if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
462 return -EINVAL;
463
464 max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
465 max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
466 max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
467 max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
468
469 switch (mode) {
470 case AMDGPU_SPX_PARTITION_MODE:
471 num_xcp = 1;
472 nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
473 break;
474 case AMDGPU_DPX_PARTITION_MODE:
475 num_xcp = 2;
476 nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
477 BIT(AMDGPU_NPS2_PARTITION_MODE);
478 break;
479 case AMDGPU_TPX_PARTITION_MODE:
480 num_xcp = 3;
481 nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
482 BIT(AMDGPU_NPS4_PARTITION_MODE);
483 break;
484 case AMDGPU_QPX_PARTITION_MODE:
485 num_xcp = 4;
486 nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
487 BIT(AMDGPU_NPS4_PARTITION_MODE);
488 break;
489 case AMDGPU_CPX_PARTITION_MODE:
490 num_xcp = NUM_XCC(adev->gfx.xcc_mask);
491 nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
492 BIT(AMDGPU_NPS4_PARTITION_MODE);
493 break;
494 default:
495 return -EINVAL;
496 }
497
498 xcp_cfg->compatible_nps_modes =
499 (adev->gmc.supported_nps_modes & nps_modes);
500 xcp_cfg->num_res = ARRAY_SIZE(max_res);
501
502 for (i = 0; i < xcp_cfg->num_res; i++) {
503 res_lt_xcp = max_res[i] < num_xcp;
504 xcp_cfg->xcp_res[i].id = i;
505 xcp_cfg->xcp_res[i].num_inst =
506 res_lt_xcp ? 1 : max_res[i] / num_xcp;
507 xcp_cfg->xcp_res[i].num_inst =
508 i == AMDGPU_XCP_RES_JPEG ?
509 xcp_cfg->xcp_res[i].num_inst *
510 adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
511 xcp_cfg->xcp_res[i].num_shared =
512 res_lt_xcp ? num_xcp / max_res[i] : 1;
513 }
514
515 return 0;
516 }
517
518 static enum amdgpu_gfx_partition
__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr * xcp_mgr)519 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
520 {
521 struct amdgpu_device *adev = xcp_mgr->adev;
522 int num_xcc;
523
524 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
525
526 if (adev->gmc.num_mem_partitions == 1)
527 return AMDGPU_SPX_PARTITION_MODE;
528
529 if (adev->gmc.num_mem_partitions == num_xcc)
530 return AMDGPU_CPX_PARTITION_MODE;
531
532 if (adev->gmc.num_mem_partitions == num_xcc / 2)
533 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
534 AMDGPU_CPX_PARTITION_MODE;
535
536 if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
537 return AMDGPU_DPX_PARTITION_MODE;
538
539 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
540 }
541
__aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr * xcp_mgr,enum amdgpu_gfx_partition mode)542 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
543 enum amdgpu_gfx_partition mode)
544 {
545 struct amdgpu_device *adev = xcp_mgr->adev;
546 int num_xcc, num_xccs_per_xcp;
547
548 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
549 switch (mode) {
550 case AMDGPU_SPX_PARTITION_MODE:
551 return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
552 case AMDGPU_DPX_PARTITION_MODE:
553 return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
554 case AMDGPU_TPX_PARTITION_MODE:
555 return (adev->gmc.num_mem_partitions == 1 ||
556 adev->gmc.num_mem_partitions == 3) &&
557 ((num_xcc % 3) == 0);
558 case AMDGPU_QPX_PARTITION_MODE:
559 num_xccs_per_xcp = num_xcc / 4;
560 return (adev->gmc.num_mem_partitions == 1 ||
561 adev->gmc.num_mem_partitions == 4) &&
562 (num_xccs_per_xcp >= 2);
563 case AMDGPU_CPX_PARTITION_MODE:
564 /* (num_xcc > 1) because 1 XCC is considered SPX, not CPX.
565 * (num_xcc % adev->gmc.num_mem_partitions) == 0 because
566 * num_compute_partitions can't be less than num_mem_partitions
567 */
568 return ((num_xcc > 1) &&
569 (num_xcc % adev->gmc.num_mem_partitions) == 0);
570 default:
571 return false;
572 }
573
574 return false;
575 }
576
__aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)577 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
578 {
579 /* TODO:
580 * Stop user queues and threads, and make sure GPU is empty of work.
581 */
582
583 if (flags & AMDGPU_XCP_OPS_KFD)
584 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
585
586 return 0;
587 }
588
__aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)589 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
590 {
591 int ret = 0;
592
593 if (flags & AMDGPU_XCP_OPS_KFD) {
594 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
595 amdgpu_amdkfd_device_init(xcp_mgr->adev);
596 /* If KFD init failed, return failure */
597 if (!xcp_mgr->adev->kfd.init_complete)
598 ret = -EIO;
599 }
600
601 return ret;
602 }
603
604 static void
__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr * xcp_mgr)605 __aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
606 {
607 struct amdgpu_device *adev = xcp_mgr->adev;
608
609 xcp_mgr->supp_xcp_modes = 0;
610
611 switch (NUM_XCC(adev->gfx.xcc_mask)) {
612 case 8:
613 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
614 BIT(AMDGPU_DPX_PARTITION_MODE) |
615 BIT(AMDGPU_QPX_PARTITION_MODE) |
616 BIT(AMDGPU_CPX_PARTITION_MODE);
617 break;
618 case 6:
619 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
620 BIT(AMDGPU_TPX_PARTITION_MODE) |
621 BIT(AMDGPU_CPX_PARTITION_MODE);
622 break;
623 case 4:
624 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
625 BIT(AMDGPU_DPX_PARTITION_MODE) |
626 BIT(AMDGPU_CPX_PARTITION_MODE);
627 break;
628 /* this seems only existing in emulation phase */
629 case 2:
630 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
631 BIT(AMDGPU_CPX_PARTITION_MODE);
632 break;
633 case 1:
634 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
635 BIT(AMDGPU_CPX_PARTITION_MODE);
636 break;
637
638 default:
639 break;
640 }
641 }
642
__aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)643 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
644 {
645 int mode;
646
647 xcp_mgr->avail_xcp_modes = 0;
648
649 for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
650 if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
651 xcp_mgr->avail_xcp_modes |= BIT(mode);
652 }
653 }
654
aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode,int * num_xcps)655 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
656 int mode, int *num_xcps)
657 {
658 int num_xcc_per_xcp, num_xcc, ret;
659 struct amdgpu_device *adev;
660 u32 flags = 0;
661
662 adev = xcp_mgr->adev;
663 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
664
665 if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
666 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
667 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
668 dev_err(adev->dev,
669 "Invalid config, no compatible compute partition mode found, available memory partitions: %d",
670 adev->gmc.num_mem_partitions);
671 return -EINVAL;
672 }
673 } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
674 dev_err(adev->dev,
675 "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
676 amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
677 return -EINVAL;
678 }
679
680 if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
681 flags |= AMDGPU_XCP_OPS_KFD;
682
683 if (flags & AMDGPU_XCP_OPS_KFD) {
684 ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
685 if (ret)
686 goto out;
687 }
688
689 ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
690 if (ret)
691 goto unlock;
692
693 num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
694 if (adev->gfx.funcs->switch_partition_mode)
695 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
696 num_xcc_per_xcp);
697
698 /* Init info about new xcps */
699 *num_xcps = num_xcc / num_xcc_per_xcp;
700 amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
701
702 ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
703 if (!ret)
704 __aqua_vanjaram_update_available_partition_mode(xcp_mgr);
705 unlock:
706 if (flags & AMDGPU_XCP_OPS_KFD)
707 amdgpu_amdkfd_unlock_kfd(adev);
708 out:
709 return ret;
710 }
711
__aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device * adev,int xcc_id,uint8_t * mem_id)712 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
713 int xcc_id, uint8_t *mem_id)
714 {
715 /* memory/spatial modes validation check is already done */
716 *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
717 *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
718
719 return 0;
720 }
721
aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp * xcp,uint8_t * mem_id)722 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
723 struct amdgpu_xcp *xcp, uint8_t *mem_id)
724 {
725 struct amdgpu_numa_info numa_info;
726 struct amdgpu_device *adev;
727 uint32_t xcc_mask;
728 int r, i, xcc_id;
729
730 adev = xcp_mgr->adev;
731 /* TODO: BIOS is not returning the right info now
732 * Check on this later
733 */
734 /*
735 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
736 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
737 */
738 if (adev->gmc.num_mem_partitions == 1) {
739 /* Only one range */
740 *mem_id = 0;
741 return 0;
742 }
743
744 r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
745 if (r || !xcc_mask)
746 return -EINVAL;
747
748 xcc_id = ffs(xcc_mask) - 1;
749 if (!adev->gmc.is_app_apu)
750 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
751
752 r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
753
754 if (r)
755 return r;
756
757 r = -EINVAL;
758 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
759 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
760 *mem_id = i;
761 r = 0;
762 break;
763 }
764 }
765
766 return r;
767 }
768
aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)769 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
770 enum AMDGPU_XCP_IP_BLOCK ip_id,
771 struct amdgpu_xcp_ip *ip)
772 {
773 if (!ip)
774 return -EINVAL;
775
776 return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
777 }
778
779 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
780 .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
781 .query_partition_mode = &aqua_vanjaram_query_partition_mode,
782 .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
783 .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
784 .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
785 .select_scheds = &aqua_vanjaram_select_scheds,
786 .update_partition_sched_list =
787 &aqua_vanjaram_update_partition_sched_list
788 };
789
aqua_vanjaram_xcp_mgr_init(struct amdgpu_device * adev)790 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
791 {
792 int ret;
793
794 if (amdgpu_sriov_vf(adev))
795 aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
796
797 ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
798 &aqua_vanjaram_xcp_funcs);
799 if (ret)
800 return ret;
801
802 __aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
803 /* TODO: Default memory node affinity init */
804
805 return ret;
806 }
807
aqua_vanjaram_init_soc_config(struct amdgpu_device * adev)808 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
809 {
810 u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
811 int ret, i;
812
813 /* generally 1 AID supports 4 instances */
814 adev->sdma.num_inst_per_aid = 4;
815 adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
816
817 adev->aid_mask = i = 1;
818 inst_mask >>= adev->sdma.num_inst_per_aid;
819
820 for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
821 inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
822 avail_inst = inst_mask & mask;
823 if (avail_inst == mask || avail_inst == 0x3 ||
824 avail_inst == 0xc)
825 adev->aid_mask |= (1 << i);
826 }
827
828 /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
829 * addressed based on logical instance ids.
830 */
831 adev->vcn.harvest_config = 0;
832 adev->vcn.num_inst_per_aid = 1;
833 adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
834 adev->jpeg.harvest_config = 0;
835 adev->jpeg.num_inst_per_aid = 1;
836 adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
837
838 ret = aqua_vanjaram_xcp_mgr_init(adev);
839 if (ret)
840 return ret;
841
842 aqua_vanjaram_ip_map_init(adev);
843
844 return 0;
845 }
846
aqua_read_smn(struct amdgpu_device * adev,struct amdgpu_smn_reg_data * regdata,uint64_t smn_addr)847 static void aqua_read_smn(struct amdgpu_device *adev,
848 struct amdgpu_smn_reg_data *regdata,
849 uint64_t smn_addr)
850 {
851 regdata->addr = smn_addr;
852 regdata->value = RREG32_PCIE(smn_addr);
853 }
854
855 struct aqua_reg_list {
856 uint64_t start_addr;
857 uint32_t num_regs;
858 uint32_t incrx;
859 };
860
861 #define DW_ADDR_INCR 4
862
aqua_read_smn_ext(struct amdgpu_device * adev,struct amdgpu_smn_reg_data * regdata,uint64_t smn_addr,int i)863 static void aqua_read_smn_ext(struct amdgpu_device *adev,
864 struct amdgpu_smn_reg_data *regdata,
865 uint64_t smn_addr, int i)
866 {
867 regdata->addr =
868 smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
869 regdata->value = RREG32_PCIE_EXT(regdata->addr);
870 }
871
872 #define smnreg_0x1A340218 0x1A340218
873 #define smnreg_0x1A3402E4 0x1A3402E4
874 #define smnreg_0x1A340294 0x1A340294
875 #define smreg_0x1A380088 0x1A380088
876
877 #define NUM_PCIE_SMN_REGS 14
878
879 static struct aqua_reg_list pcie_reg_addrs[] = {
880 { smnreg_0x1A340218, 1, 0 },
881 { smnreg_0x1A3402E4, 1, 0 },
882 { smnreg_0x1A340294, 6, DW_ADDR_INCR },
883 { smreg_0x1A380088, 6, DW_ADDR_INCR },
884 };
885
aqua_vanjaram_read_pcie_state(struct amdgpu_device * adev,void * buf,size_t max_size)886 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
887 void *buf, size_t max_size)
888 {
889 struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
890 uint32_t start_addr, incrx, num_regs, szbuf;
891 struct amdgpu_regs_pcie_v1_0 *pcie_regs;
892 struct amdgpu_smn_reg_data *reg_data;
893 struct pci_dev *us_pdev, *ds_pdev;
894 int aer_cap, r, n;
895
896 if (!buf || !max_size)
897 return -EINVAL;
898
899 pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
900
901 szbuf = sizeof(*pcie_reg_state) +
902 amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
903 /* Only one instance of pcie regs */
904 if (max_size < szbuf)
905 return -EOVERFLOW;
906
907 pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
908 sizeof(*pcie_reg_state));
909 pcie_regs->inst_header.instance = 0;
910 pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
911 pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
912
913 reg_data = pcie_regs->smn_reg_values;
914
915 for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
916 start_addr = pcie_reg_addrs[r].start_addr;
917 incrx = pcie_reg_addrs[r].incrx;
918 num_regs = pcie_reg_addrs[r].num_regs;
919 for (n = 0; n < num_regs; n++) {
920 aqua_read_smn(adev, reg_data, start_addr + n * incrx);
921 ++reg_data;
922 }
923 }
924
925 ds_pdev = pci_upstream_bridge(adev->pdev);
926 us_pdev = pci_upstream_bridge(ds_pdev);
927
928 pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
929 &pcie_regs->device_status);
930 pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
931 &pcie_regs->link_status);
932
933 aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
934 if (aer_cap) {
935 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
936 &pcie_regs->pcie_corr_err_status);
937 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
938 &pcie_regs->pcie_uncorr_err_status);
939 }
940
941 pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
942 &pcie_regs->sub_bus_number_latency);
943
944 pcie_reg_state->common_header.structure_size = szbuf;
945 pcie_reg_state->common_header.format_revision = 1;
946 pcie_reg_state->common_header.content_revision = 0;
947 pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
948 pcie_reg_state->common_header.num_instances = 1;
949
950 return pcie_reg_state->common_header.structure_size;
951 }
952
953 #define smnreg_0x11A00050 0x11A00050
954 #define smnreg_0x11A00180 0x11A00180
955 #define smnreg_0x11A00070 0x11A00070
956 #define smnreg_0x11A00200 0x11A00200
957 #define smnreg_0x11A0020C 0x11A0020C
958 #define smnreg_0x11A00210 0x11A00210
959 #define smnreg_0x11A00108 0x11A00108
960
961 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
962
963 #define NUM_XGMI_SMN_REGS 25
964
965 static struct aqua_reg_list xgmi_reg_addrs[] = {
966 { smnreg_0x11A00050, 1, 0 },
967 { smnreg_0x11A00180, 16, DW_ADDR_INCR },
968 { smnreg_0x11A00070, 4, DW_ADDR_INCR },
969 { smnreg_0x11A00200, 1, 0 },
970 { smnreg_0x11A0020C, 1, 0 },
971 { smnreg_0x11A00210, 1, 0 },
972 { smnreg_0x11A00108, 1, 0 },
973 };
974
aqua_vanjaram_read_xgmi_state(struct amdgpu_device * adev,void * buf,size_t max_size)975 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
976 void *buf, size_t max_size)
977 {
978 struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
979 uint32_t start_addr, incrx, num_regs, szbuf;
980 struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
981 struct amdgpu_smn_reg_data *reg_data;
982 const int max_xgmi_instances = 8;
983 int inst = 0, i, j, r, n;
984 const int xgmi_inst = 2;
985 void *p;
986
987 if (!buf || !max_size)
988 return -EINVAL;
989
990 xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
991
992 szbuf = sizeof(*xgmi_reg_state) +
993 amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
994 NUM_XGMI_SMN_REGS);
995 /* Only one instance of pcie regs */
996 if (max_size < szbuf)
997 return -EOVERFLOW;
998
999 p = &xgmi_reg_state->xgmi_state_regs[0];
1000 for_each_inst(i, adev->aid_mask) {
1001 for (j = 0; j < xgmi_inst; ++j) {
1002 xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
1003 xgmi_regs->inst_header.instance = inst++;
1004
1005 xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
1006 xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
1007
1008 reg_data = xgmi_regs->smn_reg_values;
1009
1010 for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
1011 start_addr = xgmi_reg_addrs[r].start_addr;
1012 incrx = xgmi_reg_addrs[r].incrx;
1013 num_regs = xgmi_reg_addrs[r].num_regs;
1014
1015 for (n = 0; n < num_regs; n++) {
1016 aqua_read_smn_ext(
1017 adev, reg_data,
1018 XGMI_LINK_REG(start_addr, j) +
1019 n * incrx,
1020 i);
1021 ++reg_data;
1022 }
1023 }
1024 p = reg_data;
1025 }
1026 }
1027
1028 xgmi_reg_state->common_header.structure_size = szbuf;
1029 xgmi_reg_state->common_header.format_revision = 1;
1030 xgmi_reg_state->common_header.content_revision = 0;
1031 xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
1032 xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
1033
1034 return xgmi_reg_state->common_header.structure_size;
1035 }
1036
1037 #define smnreg_0x11C00070 0x11C00070
1038 #define smnreg_0x11C00210 0x11C00210
1039
1040 static struct aqua_reg_list wafl_reg_addrs[] = {
1041 { smnreg_0x11C00070, 4, DW_ADDR_INCR },
1042 { smnreg_0x11C00210, 1, 0 },
1043 };
1044
1045 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
1046
1047 #define NUM_WAFL_SMN_REGS 5
1048
aqua_vanjaram_read_wafl_state(struct amdgpu_device * adev,void * buf,size_t max_size)1049 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
1050 void *buf, size_t max_size)
1051 {
1052 struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
1053 uint32_t start_addr, incrx, num_regs, szbuf;
1054 struct amdgpu_regs_wafl_v1_0 *wafl_regs;
1055 struct amdgpu_smn_reg_data *reg_data;
1056 const int max_wafl_instances = 8;
1057 int inst = 0, i, j, r, n;
1058 const int wafl_inst = 2;
1059 void *p;
1060
1061 if (!buf || !max_size)
1062 return -EINVAL;
1063
1064 wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
1065
1066 szbuf = sizeof(*wafl_reg_state) +
1067 amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
1068 NUM_WAFL_SMN_REGS);
1069
1070 if (max_size < szbuf)
1071 return -EOVERFLOW;
1072
1073 p = &wafl_reg_state->wafl_state_regs[0];
1074 for_each_inst(i, adev->aid_mask) {
1075 for (j = 0; j < wafl_inst; ++j) {
1076 wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
1077 wafl_regs->inst_header.instance = inst++;
1078
1079 wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
1080 wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
1081
1082 reg_data = wafl_regs->smn_reg_values;
1083
1084 for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
1085 start_addr = wafl_reg_addrs[r].start_addr;
1086 incrx = wafl_reg_addrs[r].incrx;
1087 num_regs = wafl_reg_addrs[r].num_regs;
1088 for (n = 0; n < num_regs; n++) {
1089 aqua_read_smn_ext(
1090 adev, reg_data,
1091 WAFL_LINK_REG(start_addr, j) +
1092 n * incrx,
1093 i);
1094 ++reg_data;
1095 }
1096 }
1097 p = reg_data;
1098 }
1099 }
1100
1101 wafl_reg_state->common_header.structure_size = szbuf;
1102 wafl_reg_state->common_header.format_revision = 1;
1103 wafl_reg_state->common_header.content_revision = 0;
1104 wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
1105 wafl_reg_state->common_header.num_instances = max_wafl_instances;
1106
1107 return wafl_reg_state->common_header.structure_size;
1108 }
1109
1110 #define smnreg_0x1B311060 0x1B311060
1111 #define smnreg_0x1B411060 0x1B411060
1112 #define smnreg_0x1B511060 0x1B511060
1113 #define smnreg_0x1B611060 0x1B611060
1114
1115 #define smnreg_0x1C307120 0x1C307120
1116 #define smnreg_0x1C317120 0x1C317120
1117
1118 #define smnreg_0x1C320830 0x1C320830
1119 #define smnreg_0x1C380830 0x1C380830
1120 #define smnreg_0x1C3D0830 0x1C3D0830
1121 #define smnreg_0x1C420830 0x1C420830
1122
1123 #define smnreg_0x1C320100 0x1C320100
1124 #define smnreg_0x1C380100 0x1C380100
1125 #define smnreg_0x1C3D0100 0x1C3D0100
1126 #define smnreg_0x1C420100 0x1C420100
1127
1128 #define smnreg_0x1B310500 0x1B310500
1129 #define smnreg_0x1C300400 0x1C300400
1130
1131 #define USR_CAKE_INCR 0x11000
1132 #define USR_LINK_INCR 0x100000
1133 #define USR_CP_INCR 0x10000
1134
1135 #define NUM_USR_SMN_REGS 20
1136
1137 struct aqua_reg_list usr_reg_addrs[] = {
1138 { smnreg_0x1B311060, 4, DW_ADDR_INCR },
1139 { smnreg_0x1B411060, 4, DW_ADDR_INCR },
1140 { smnreg_0x1B511060, 4, DW_ADDR_INCR },
1141 { smnreg_0x1B611060, 4, DW_ADDR_INCR },
1142 { smnreg_0x1C307120, 2, DW_ADDR_INCR },
1143 { smnreg_0x1C317120, 2, DW_ADDR_INCR },
1144 };
1145
1146 #define NUM_USR1_SMN_REGS 46
1147 struct aqua_reg_list usr1_reg_addrs[] = {
1148 { smnreg_0x1C320830, 6, USR_CAKE_INCR },
1149 { smnreg_0x1C380830, 5, USR_CAKE_INCR },
1150 { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
1151 { smnreg_0x1C420830, 4, USR_CAKE_INCR },
1152 { smnreg_0x1C320100, 6, USR_CAKE_INCR },
1153 { smnreg_0x1C380100, 5, USR_CAKE_INCR },
1154 { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
1155 { smnreg_0x1C420100, 4, USR_CAKE_INCR },
1156 { smnreg_0x1B310500, 4, USR_LINK_INCR },
1157 { smnreg_0x1C300400, 2, USR_CP_INCR },
1158 };
1159
aqua_vanjaram_read_usr_state(struct amdgpu_device * adev,void * buf,size_t max_size,int reg_state)1160 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
1161 void *buf, size_t max_size,
1162 int reg_state)
1163 {
1164 uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
1165 struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
1166 struct amdgpu_regs_usr_v1_0 *usr_regs;
1167 struct amdgpu_smn_reg_data *reg_data;
1168 const int max_usr_instances = 4;
1169 struct aqua_reg_list *reg_addrs;
1170 int inst = 0, i, n, r, arr_size;
1171 void *p;
1172
1173 if (!buf || !max_size)
1174 return -EINVAL;
1175
1176 switch (reg_state) {
1177 case AMDGPU_REG_STATE_TYPE_USR:
1178 arr_size = ARRAY_SIZE(usr_reg_addrs);
1179 reg_addrs = usr_reg_addrs;
1180 num_smn = NUM_USR_SMN_REGS;
1181 break;
1182 case AMDGPU_REG_STATE_TYPE_USR_1:
1183 arr_size = ARRAY_SIZE(usr1_reg_addrs);
1184 reg_addrs = usr1_reg_addrs;
1185 num_smn = NUM_USR1_SMN_REGS;
1186 break;
1187 default:
1188 return -EINVAL;
1189 }
1190
1191 usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1192
1193 szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1194 sizeof(*usr_regs),
1195 num_smn);
1196 if (max_size < szbuf)
1197 return -EOVERFLOW;
1198
1199 p = &usr_reg_state->usr_state_regs[0];
1200 for_each_inst(i, adev->aid_mask) {
1201 usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1202 usr_regs->inst_header.instance = inst++;
1203 usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1204 usr_regs->inst_header.num_smn_regs = num_smn;
1205 reg_data = usr_regs->smn_reg_values;
1206
1207 for (r = 0; r < arr_size; r++) {
1208 start_addr = reg_addrs[r].start_addr;
1209 incrx = reg_addrs[r].incrx;
1210 num_regs = reg_addrs[r].num_regs;
1211 for (n = 0; n < num_regs; n++) {
1212 aqua_read_smn_ext(adev, reg_data,
1213 start_addr + n * incrx, i);
1214 reg_data++;
1215 }
1216 }
1217 p = reg_data;
1218 }
1219
1220 usr_reg_state->common_header.structure_size = szbuf;
1221 usr_reg_state->common_header.format_revision = 1;
1222 usr_reg_state->common_header.content_revision = 0;
1223 usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1224 usr_reg_state->common_header.num_instances = max_usr_instances;
1225
1226 return usr_reg_state->common_header.structure_size;
1227 }
1228
aqua_vanjaram_get_reg_state(struct amdgpu_device * adev,enum amdgpu_reg_state reg_state,void * buf,size_t max_size)1229 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1230 enum amdgpu_reg_state reg_state, void *buf,
1231 size_t max_size)
1232 {
1233 ssize_t size;
1234
1235 switch (reg_state) {
1236 case AMDGPU_REG_STATE_TYPE_PCIE:
1237 size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1238 break;
1239 case AMDGPU_REG_STATE_TYPE_XGMI:
1240 size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1241 break;
1242 case AMDGPU_REG_STATE_TYPE_WAFL:
1243 size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1244 break;
1245 case AMDGPU_REG_STATE_TYPE_USR:
1246 size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1247 AMDGPU_REG_STATE_TYPE_USR);
1248 break;
1249 case AMDGPU_REG_STATE_TYPE_USR_1:
1250 size = aqua_vanjaram_read_usr_state(
1251 adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1252 break;
1253 default:
1254 return -EINVAL;
1255 }
1256
1257 return size;
1258 }
1259