1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "soc15.h"
25
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32 #include "amdgpu_ip.h"
33
34 #define XCP_INST_MASK(num_inst, xcp_id) \
35 (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
36
aqua_vanjaram_doorbell_index_init(struct amdgpu_device * adev)37 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
38 {
39 int i;
40
41 adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
42
43 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
44
45 adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
46 adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
47 adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
48
49 adev->doorbell_index.sdma_doorbell_range = 20;
50 for (i = 0; i < adev->sdma.num_instances; i++)
51 adev->doorbell_index.sdma_engine[i] =
52 AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
53 i * (adev->doorbell_index.sdma_doorbell_range >> 1);
54
55 adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
56 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
57
58 adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
59 adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
60
61 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
62 }
63
64 /* Fixed pattern for smn addressing on different AIDs:
65 * bit[34]: indicate cross AID access
66 * bit[33:32]: indicate target AID id
67 * AID id range is 0 ~ 3 as maximum AID number is 4.
68 */
aqua_vanjaram_encode_ext_smn_addressing(int ext_id)69 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
70 {
71 u64 ext_offset;
72
73 /* local routing and bit[34:32] will be zeros */
74 if (ext_id == 0)
75 return 0;
76
77 /* Initiated from host, accessing to all non-zero aids are cross traffic */
78 ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
79
80 return ext_offset;
81 }
82
83 static enum amdgpu_gfx_partition
__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr * xcp_mgr)84 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
85 {
86 struct amdgpu_device *adev = xcp_mgr->adev;
87 int num_xcc, num_xcc_per_xcp = 0, mode = 0;
88
89 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
90 if (adev->gfx.funcs->get_xccs_per_xcp)
91 num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
92 if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
93 mode = num_xcc / num_xcc_per_xcp;
94
95 if (num_xcc_per_xcp == 1)
96 return AMDGPU_CPX_PARTITION_MODE;
97
98 switch (mode) {
99 case 1:
100 return AMDGPU_SPX_PARTITION_MODE;
101 case 2:
102 return AMDGPU_DPX_PARTITION_MODE;
103 case 3:
104 return AMDGPU_TPX_PARTITION_MODE;
105 case 4:
106 return AMDGPU_QPX_PARTITION_MODE;
107 default:
108 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
109 }
110
111 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
112 }
113
aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)114 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
115 {
116 enum amdgpu_gfx_partition derv_mode,
117 mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
118 struct amdgpu_device *adev = xcp_mgr->adev;
119
120 derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
121
122 if (amdgpu_sriov_vf(adev))
123 return derv_mode;
124
125 if (adev->nbio.funcs->get_compute_partition_mode) {
126 mode = adev->nbio.funcs->get_compute_partition_mode(adev);
127 if (mode != derv_mode) {
128 dev_warn(
129 adev->dev,
130 "Mismatch in compute partition mode - reported : %d derived : %d",
131 mode, derv_mode);
132 if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
133 amdgpu_device_bus_status_check(adev);
134 }
135 }
136
137 return mode;
138 }
139
__aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr * xcp_mgr,int mode)140 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
141 {
142 int num_xcc, num_xcc_per_xcp = 0;
143
144 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
145
146 switch (mode) {
147 case AMDGPU_SPX_PARTITION_MODE:
148 num_xcc_per_xcp = num_xcc;
149 break;
150 case AMDGPU_DPX_PARTITION_MODE:
151 num_xcc_per_xcp = num_xcc / 2;
152 break;
153 case AMDGPU_TPX_PARTITION_MODE:
154 num_xcc_per_xcp = num_xcc / 3;
155 break;
156 case AMDGPU_QPX_PARTITION_MODE:
157 num_xcc_per_xcp = num_xcc / 4;
158 break;
159 case AMDGPU_CPX_PARTITION_MODE:
160 num_xcc_per_xcp = 1;
161 break;
162 }
163
164 return num_xcc_per_xcp;
165 }
166
__aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)167 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
168 enum AMDGPU_XCP_IP_BLOCK ip_id,
169 struct amdgpu_xcp_ip *ip)
170 {
171 struct amdgpu_device *adev = xcp_mgr->adev;
172 int num_sdma, num_vcn, num_shared_vcn, num_xcp;
173 int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
174
175 num_sdma = adev->sdma.num_instances;
176 num_vcn = adev->vcn.num_vcn_inst;
177 num_shared_vcn = 1;
178
179 num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
180 num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
181
182 switch (xcp_mgr->mode) {
183 case AMDGPU_SPX_PARTITION_MODE:
184 case AMDGPU_DPX_PARTITION_MODE:
185 case AMDGPU_TPX_PARTITION_MODE:
186 case AMDGPU_QPX_PARTITION_MODE:
187 case AMDGPU_CPX_PARTITION_MODE:
188 num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
189 num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
190 break;
191 default:
192 return -EINVAL;
193 }
194
195 if (num_vcn && num_xcp > num_vcn)
196 num_shared_vcn = num_xcp / num_vcn;
197
198 switch (ip_id) {
199 case AMDGPU_XCP_GFXHUB:
200 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
201 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
202 break;
203 case AMDGPU_XCP_GFX:
204 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
205 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
206 break;
207 case AMDGPU_XCP_SDMA:
208 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
209 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
210 break;
211 case AMDGPU_XCP_VCN:
212 ip->inst_mask =
213 XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
214 /* TODO : Assign IP funcs */
215 break;
216 default:
217 return -EINVAL;
218 }
219
220 ip->ip_id = ip_id;
221
222 return 0;
223 }
224
__aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr * xcp_mgr,int px_mode,int * num_xcp,uint16_t * nps_modes)225 static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
226 int px_mode, int *num_xcp,
227 uint16_t *nps_modes)
228 {
229 struct amdgpu_device *adev = xcp_mgr->adev;
230 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
231
232 if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
233 return -EINVAL;
234
235 switch (px_mode) {
236 case AMDGPU_SPX_PARTITION_MODE:
237 *num_xcp = 1;
238 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
239 break;
240 case AMDGPU_DPX_PARTITION_MODE:
241 *num_xcp = 2;
242 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
243 BIT(AMDGPU_NPS2_PARTITION_MODE);
244 break;
245 case AMDGPU_TPX_PARTITION_MODE:
246 *num_xcp = 3;
247 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
248 BIT(AMDGPU_NPS4_PARTITION_MODE);
249 break;
250 case AMDGPU_QPX_PARTITION_MODE:
251 *num_xcp = 4;
252 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
253 BIT(AMDGPU_NPS4_PARTITION_MODE);
254 if (gc_ver == IP_VERSION(9, 5, 0))
255 *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
256 break;
257 case AMDGPU_CPX_PARTITION_MODE:
258 *num_xcp = NUM_XCC(adev->gfx.xcc_mask);
259 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
260 BIT(AMDGPU_NPS4_PARTITION_MODE);
261 if (gc_ver == IP_VERSION(9, 5, 0))
262 *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
263 break;
264 default:
265 return -EINVAL;
266 }
267
268 return 0;
269 }
270
aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr * xcp_mgr,int mode,struct amdgpu_xcp_cfg * xcp_cfg)271 static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
272 int mode,
273 struct amdgpu_xcp_cfg *xcp_cfg)
274 {
275 struct amdgpu_device *adev = xcp_mgr->adev;
276 int max_res[AMDGPU_XCP_RES_MAX] = {};
277 bool res_lt_xcp;
278 int num_xcp, i, r;
279 u16 nps_modes;
280
281 if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
282 return -EINVAL;
283
284 max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
285 max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
286 max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
287 max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
288
289 r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
290 if (r)
291 return r;
292
293 xcp_cfg->compatible_nps_modes =
294 (adev->gmc.supported_nps_modes & nps_modes);
295 xcp_cfg->num_res = ARRAY_SIZE(max_res);
296
297 for (i = 0; i < xcp_cfg->num_res; i++) {
298 res_lt_xcp = max_res[i] < num_xcp;
299 xcp_cfg->xcp_res[i].id = i;
300 xcp_cfg->xcp_res[i].num_inst =
301 res_lt_xcp ? 1 : max_res[i] / num_xcp;
302 xcp_cfg->xcp_res[i].num_inst =
303 i == AMDGPU_XCP_RES_JPEG ?
304 xcp_cfg->xcp_res[i].num_inst *
305 adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
306 xcp_cfg->xcp_res[i].num_shared =
307 res_lt_xcp ? num_xcp / max_res[i] : 1;
308 }
309
310 return 0;
311 }
312
313 static enum amdgpu_gfx_partition
__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr * xcp_mgr)314 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
315 {
316 struct amdgpu_device *adev = xcp_mgr->adev;
317 int num_xcc;
318
319 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
320
321 if (adev->gmc.num_mem_partitions == 1)
322 return AMDGPU_SPX_PARTITION_MODE;
323
324 if (adev->gmc.num_mem_partitions == num_xcc)
325 return AMDGPU_CPX_PARTITION_MODE;
326
327 if (adev->gmc.num_mem_partitions == num_xcc / 2)
328 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
329 AMDGPU_CPX_PARTITION_MODE;
330
331 if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
332 return AMDGPU_DPX_PARTITION_MODE;
333
334 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
335 }
336
__aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr * xcp_mgr,enum amdgpu_gfx_partition mode)337 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
338 enum amdgpu_gfx_partition mode)
339 {
340 struct amdgpu_device *adev = xcp_mgr->adev;
341 int num_xcc, num_xccs_per_xcp, r;
342 int num_xcp, nps_mode;
343 u16 supp_nps_modes;
344 bool comp_mode;
345
346 nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
347 r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
348 &supp_nps_modes);
349 if (r)
350 return false;
351
352 comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
353 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
354 switch (mode) {
355 case AMDGPU_SPX_PARTITION_MODE:
356 return comp_mode && num_xcc > 0;
357 case AMDGPU_DPX_PARTITION_MODE:
358 return comp_mode && (num_xcc % 4) == 0;
359 case AMDGPU_TPX_PARTITION_MODE:
360 return comp_mode && ((num_xcc % 3) == 0);
361 case AMDGPU_QPX_PARTITION_MODE:
362 num_xccs_per_xcp = num_xcc / 4;
363 return comp_mode && (num_xccs_per_xcp >= 2);
364 case AMDGPU_CPX_PARTITION_MODE:
365 return comp_mode && (num_xcc > 1);
366 default:
367 return false;
368 }
369
370 return false;
371 }
372
__aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)373 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
374 {
375 int mode;
376
377 xcp_mgr->avail_xcp_modes = 0;
378
379 for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
380 if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
381 xcp_mgr->avail_xcp_modes |= BIT(mode);
382 }
383 }
384
aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode,int * num_xcps)385 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
386 int mode, int *num_xcps)
387 {
388 int num_xcc_per_xcp, num_xcc, ret;
389 struct amdgpu_device *adev;
390 u32 flags = 0;
391
392 adev = xcp_mgr->adev;
393 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
394
395 if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
396 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
397 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
398 dev_err(adev->dev,
399 "Invalid config, no compatible compute partition mode found, available memory partitions: %d",
400 adev->gmc.num_mem_partitions);
401 return -EINVAL;
402 }
403 } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
404 dev_err(adev->dev,
405 "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
406 amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
407 return -EINVAL;
408 }
409
410 if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
411 flags |= AMDGPU_XCP_OPS_KFD;
412
413 if (flags & AMDGPU_XCP_OPS_KFD) {
414 ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
415 if (ret)
416 goto out;
417 }
418
419 ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
420 if (ret)
421 goto unlock;
422
423 num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
424 if (adev->gfx.funcs->switch_partition_mode)
425 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
426 num_xcc_per_xcp);
427
428 /* Init info about new xcps */
429 *num_xcps = num_xcc / num_xcc_per_xcp;
430 amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
431
432 ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
433 if (!ret)
434 __aqua_vanjaram_update_available_partition_mode(xcp_mgr);
435 unlock:
436 if (flags & AMDGPU_XCP_OPS_KFD)
437 amdgpu_amdkfd_unlock_kfd(adev);
438 out:
439 return ret;
440 }
441
__aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device * adev,int xcc_id,uint8_t * mem_id)442 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
443 int xcc_id, uint8_t *mem_id)
444 {
445 /* memory/spatial modes validation check is already done */
446 *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
447 *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
448
449 return 0;
450 }
451
aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp * xcp,uint8_t * mem_id)452 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
453 struct amdgpu_xcp *xcp, uint8_t *mem_id)
454 {
455 struct amdgpu_numa_info numa_info;
456 struct amdgpu_device *adev;
457 uint32_t xcc_mask;
458 int r, i, xcc_id;
459
460 adev = xcp_mgr->adev;
461 /* TODO: BIOS is not returning the right info now
462 * Check on this later
463 */
464 /*
465 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
466 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
467 */
468 if (adev->gmc.num_mem_partitions == 1) {
469 /* Only one range */
470 *mem_id = 0;
471 return 0;
472 }
473
474 r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
475 if (r || !xcc_mask)
476 return -EINVAL;
477
478 xcc_id = ffs(xcc_mask) - 1;
479 if (!adev->gmc.is_app_apu)
480 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
481
482 r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
483
484 if (r)
485 return r;
486
487 r = -EINVAL;
488 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
489 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
490 *mem_id = i;
491 r = 0;
492 break;
493 }
494 }
495
496 return r;
497 }
498
aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)499 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
500 enum AMDGPU_XCP_IP_BLOCK ip_id,
501 struct amdgpu_xcp_ip *ip)
502 {
503 if (!ip)
504 return -EINVAL;
505
506 return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
507 }
508
509 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
510 .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
511 .query_partition_mode = &aqua_vanjaram_query_partition_mode,
512 .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
513 .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
514 .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
515 };
516
aqua_vanjaram_xcp_mgr_init(struct amdgpu_device * adev)517 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
518 {
519 int ret;
520
521 if (amdgpu_sriov_vf(adev))
522 aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
523
524 ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
525 &aqua_vanjaram_xcp_funcs);
526 if (ret)
527 return ret;
528
529 amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
530 /* TODO: Default memory node affinity init */
531
532 return ret;
533 }
534
aqua_vanjaram_init_soc_config(struct amdgpu_device * adev)535 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
536 {
537 u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
538 int ret, i;
539
540 /* generally 1 AID supports 4 instances */
541 adev->sdma.num_inst_per_aid = 4;
542 adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
543
544 adev->aid_mask = i = 1;
545 inst_mask >>= adev->sdma.num_inst_per_aid;
546
547 for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
548 inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
549 avail_inst = inst_mask & mask;
550 if (avail_inst == mask || avail_inst == 0x3 ||
551 avail_inst == 0xc)
552 adev->aid_mask |= (1 << i);
553 }
554
555 /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
556 * addressed based on logical instance ids.
557 */
558 adev->vcn.harvest_config = 0;
559 adev->vcn.num_inst_per_aid = 1;
560 adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
561 adev->jpeg.harvest_config = 0;
562 adev->jpeg.num_inst_per_aid = 1;
563 adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
564
565 ret = aqua_vanjaram_xcp_mgr_init(adev);
566 if (ret)
567 return ret;
568
569 amdgpu_ip_map_init(adev);
570
571 return 0;
572 }
573
aqua_read_smn(struct amdgpu_device * adev,struct amdgpu_smn_reg_data * regdata,uint64_t smn_addr)574 static void aqua_read_smn(struct amdgpu_device *adev,
575 struct amdgpu_smn_reg_data *regdata,
576 uint64_t smn_addr)
577 {
578 regdata->addr = smn_addr;
579 regdata->value = RREG32_PCIE(smn_addr);
580 }
581
582 struct aqua_reg_list {
583 uint64_t start_addr;
584 uint32_t num_regs;
585 uint32_t incrx;
586 };
587
588 #define DW_ADDR_INCR 4
589
aqua_read_smn_ext(struct amdgpu_device * adev,struct amdgpu_smn_reg_data * regdata,uint64_t smn_addr,int i)590 static void aqua_read_smn_ext(struct amdgpu_device *adev,
591 struct amdgpu_smn_reg_data *regdata,
592 uint64_t smn_addr, int i)
593 {
594 regdata->addr =
595 smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
596 regdata->value = RREG32_PCIE_EXT(regdata->addr);
597 }
598
599 #define smnreg_0x1A340218 0x1A340218
600 #define smnreg_0x1A3402E4 0x1A3402E4
601 #define smnreg_0x1A340294 0x1A340294
602 #define smreg_0x1A380088 0x1A380088
603
604 #define NUM_PCIE_SMN_REGS 14
605
606 static struct aqua_reg_list pcie_reg_addrs[] = {
607 { smnreg_0x1A340218, 1, 0 },
608 { smnreg_0x1A3402E4, 1, 0 },
609 { smnreg_0x1A340294, 6, DW_ADDR_INCR },
610 { smreg_0x1A380088, 6, DW_ADDR_INCR },
611 };
612
aqua_vanjaram_read_pcie_state(struct amdgpu_device * adev,void * buf,size_t max_size)613 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
614 void *buf, size_t max_size)
615 {
616 struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
617 uint32_t start_addr, incrx, num_regs, szbuf;
618 struct amdgpu_regs_pcie_v1_0 *pcie_regs;
619 struct amdgpu_smn_reg_data *reg_data;
620 struct pci_dev *us_pdev, *ds_pdev;
621 int aer_cap, r, n;
622
623 if (!buf || !max_size)
624 return -EINVAL;
625
626 pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
627
628 szbuf = sizeof(*pcie_reg_state) +
629 amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
630 /* Only one instance of pcie regs */
631 if (max_size < szbuf)
632 return -EOVERFLOW;
633
634 pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
635 sizeof(*pcie_reg_state));
636 pcie_regs->inst_header.instance = 0;
637 pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
638 pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
639
640 reg_data = pcie_regs->smn_reg_values;
641
642 for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
643 start_addr = pcie_reg_addrs[r].start_addr;
644 incrx = pcie_reg_addrs[r].incrx;
645 num_regs = pcie_reg_addrs[r].num_regs;
646 for (n = 0; n < num_regs; n++) {
647 aqua_read_smn(adev, reg_data, start_addr + n * incrx);
648 ++reg_data;
649 }
650 }
651
652 ds_pdev = pci_upstream_bridge(adev->pdev);
653 us_pdev = pci_upstream_bridge(ds_pdev);
654
655 pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
656 &pcie_regs->device_status);
657 pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
658 &pcie_regs->link_status);
659
660 aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
661 if (aer_cap) {
662 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
663 &pcie_regs->pcie_corr_err_status);
664 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
665 &pcie_regs->pcie_uncorr_err_status);
666 }
667
668 pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
669 &pcie_regs->sub_bus_number_latency);
670
671 pcie_reg_state->common_header.structure_size = szbuf;
672 pcie_reg_state->common_header.format_revision = 1;
673 pcie_reg_state->common_header.content_revision = 0;
674 pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
675 pcie_reg_state->common_header.num_instances = 1;
676
677 return pcie_reg_state->common_header.structure_size;
678 }
679
680 #define smnreg_0x11A00050 0x11A00050
681 #define smnreg_0x11A00180 0x11A00180
682 #define smnreg_0x11A00070 0x11A00070
683 #define smnreg_0x11A00200 0x11A00200
684 #define smnreg_0x11A0020C 0x11A0020C
685 #define smnreg_0x11A00210 0x11A00210
686 #define smnreg_0x11A00108 0x11A00108
687
688 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
689
690 #define NUM_XGMI_SMN_REGS 25
691
692 static struct aqua_reg_list xgmi_reg_addrs[] = {
693 { smnreg_0x11A00050, 1, 0 },
694 { smnreg_0x11A00180, 16, DW_ADDR_INCR },
695 { smnreg_0x11A00070, 4, DW_ADDR_INCR },
696 { smnreg_0x11A00200, 1, 0 },
697 { smnreg_0x11A0020C, 1, 0 },
698 { smnreg_0x11A00210, 1, 0 },
699 { smnreg_0x11A00108, 1, 0 },
700 };
701
aqua_vanjaram_read_xgmi_state(struct amdgpu_device * adev,void * buf,size_t max_size)702 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
703 void *buf, size_t max_size)
704 {
705 struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
706 uint32_t start_addr, incrx, num_regs, szbuf;
707 struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
708 struct amdgpu_smn_reg_data *reg_data;
709 const int max_xgmi_instances = 8;
710 int inst = 0, i, j, r, n;
711 const int xgmi_inst = 2;
712 void *p;
713
714 if (!buf || !max_size)
715 return -EINVAL;
716
717 xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
718
719 szbuf = sizeof(*xgmi_reg_state) +
720 amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
721 NUM_XGMI_SMN_REGS);
722 /* Only one instance of pcie regs */
723 if (max_size < szbuf)
724 return -EOVERFLOW;
725
726 p = &xgmi_reg_state->xgmi_state_regs[0];
727 for_each_inst(i, adev->aid_mask) {
728 for (j = 0; j < xgmi_inst; ++j) {
729 xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
730 xgmi_regs->inst_header.instance = inst++;
731
732 xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
733 xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
734
735 reg_data = xgmi_regs->smn_reg_values;
736
737 for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
738 start_addr = xgmi_reg_addrs[r].start_addr;
739 incrx = xgmi_reg_addrs[r].incrx;
740 num_regs = xgmi_reg_addrs[r].num_regs;
741
742 for (n = 0; n < num_regs; n++) {
743 aqua_read_smn_ext(
744 adev, reg_data,
745 XGMI_LINK_REG(start_addr, j) +
746 n * incrx,
747 i);
748 ++reg_data;
749 }
750 }
751 p = reg_data;
752 }
753 }
754
755 xgmi_reg_state->common_header.structure_size = szbuf;
756 xgmi_reg_state->common_header.format_revision = 1;
757 xgmi_reg_state->common_header.content_revision = 0;
758 xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
759 xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
760
761 return xgmi_reg_state->common_header.structure_size;
762 }
763
764 #define smnreg_0x11C00070 0x11C00070
765 #define smnreg_0x11C00210 0x11C00210
766
767 static struct aqua_reg_list wafl_reg_addrs[] = {
768 { smnreg_0x11C00070, 4, DW_ADDR_INCR },
769 { smnreg_0x11C00210, 1, 0 },
770 };
771
772 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
773
774 #define NUM_WAFL_SMN_REGS 5
775
aqua_vanjaram_read_wafl_state(struct amdgpu_device * adev,void * buf,size_t max_size)776 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
777 void *buf, size_t max_size)
778 {
779 struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
780 uint32_t start_addr, incrx, num_regs, szbuf;
781 struct amdgpu_regs_wafl_v1_0 *wafl_regs;
782 struct amdgpu_smn_reg_data *reg_data;
783 const int max_wafl_instances = 8;
784 int inst = 0, i, j, r, n;
785 const int wafl_inst = 2;
786 void *p;
787
788 if (!buf || !max_size)
789 return -EINVAL;
790
791 wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
792
793 szbuf = sizeof(*wafl_reg_state) +
794 amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
795 NUM_WAFL_SMN_REGS);
796
797 if (max_size < szbuf)
798 return -EOVERFLOW;
799
800 p = &wafl_reg_state->wafl_state_regs[0];
801 for_each_inst(i, adev->aid_mask) {
802 for (j = 0; j < wafl_inst; ++j) {
803 wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
804 wafl_regs->inst_header.instance = inst++;
805
806 wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
807 wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
808
809 reg_data = wafl_regs->smn_reg_values;
810
811 for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
812 start_addr = wafl_reg_addrs[r].start_addr;
813 incrx = wafl_reg_addrs[r].incrx;
814 num_regs = wafl_reg_addrs[r].num_regs;
815 for (n = 0; n < num_regs; n++) {
816 aqua_read_smn_ext(
817 adev, reg_data,
818 WAFL_LINK_REG(start_addr, j) +
819 n * incrx,
820 i);
821 ++reg_data;
822 }
823 }
824 p = reg_data;
825 }
826 }
827
828 wafl_reg_state->common_header.structure_size = szbuf;
829 wafl_reg_state->common_header.format_revision = 1;
830 wafl_reg_state->common_header.content_revision = 0;
831 wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
832 wafl_reg_state->common_header.num_instances = max_wafl_instances;
833
834 return wafl_reg_state->common_header.structure_size;
835 }
836
837 #define smnreg_0x1B311060 0x1B311060
838 #define smnreg_0x1B411060 0x1B411060
839 #define smnreg_0x1B511060 0x1B511060
840 #define smnreg_0x1B611060 0x1B611060
841
842 #define smnreg_0x1C307120 0x1C307120
843 #define smnreg_0x1C317120 0x1C317120
844
845 #define smnreg_0x1C320830 0x1C320830
846 #define smnreg_0x1C380830 0x1C380830
847 #define smnreg_0x1C3D0830 0x1C3D0830
848 #define smnreg_0x1C420830 0x1C420830
849
850 #define smnreg_0x1C320100 0x1C320100
851 #define smnreg_0x1C380100 0x1C380100
852 #define smnreg_0x1C3D0100 0x1C3D0100
853 #define smnreg_0x1C420100 0x1C420100
854
855 #define smnreg_0x1B310500 0x1B310500
856 #define smnreg_0x1C300400 0x1C300400
857
858 #define USR_CAKE_INCR 0x11000
859 #define USR_LINK_INCR 0x100000
860 #define USR_CP_INCR 0x10000
861
862 #define NUM_USR_SMN_REGS 20
863
864 struct aqua_reg_list usr_reg_addrs[] = {
865 { smnreg_0x1B311060, 4, DW_ADDR_INCR },
866 { smnreg_0x1B411060, 4, DW_ADDR_INCR },
867 { smnreg_0x1B511060, 4, DW_ADDR_INCR },
868 { smnreg_0x1B611060, 4, DW_ADDR_INCR },
869 { smnreg_0x1C307120, 2, DW_ADDR_INCR },
870 { smnreg_0x1C317120, 2, DW_ADDR_INCR },
871 };
872
873 #define NUM_USR1_SMN_REGS 46
874 struct aqua_reg_list usr1_reg_addrs[] = {
875 { smnreg_0x1C320830, 6, USR_CAKE_INCR },
876 { smnreg_0x1C380830, 5, USR_CAKE_INCR },
877 { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
878 { smnreg_0x1C420830, 4, USR_CAKE_INCR },
879 { smnreg_0x1C320100, 6, USR_CAKE_INCR },
880 { smnreg_0x1C380100, 5, USR_CAKE_INCR },
881 { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
882 { smnreg_0x1C420100, 4, USR_CAKE_INCR },
883 { smnreg_0x1B310500, 4, USR_LINK_INCR },
884 { smnreg_0x1C300400, 2, USR_CP_INCR },
885 };
886
aqua_vanjaram_read_usr_state(struct amdgpu_device * adev,void * buf,size_t max_size,int reg_state)887 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
888 void *buf, size_t max_size,
889 int reg_state)
890 {
891 uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
892 struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
893 struct amdgpu_regs_usr_v1_0 *usr_regs;
894 struct amdgpu_smn_reg_data *reg_data;
895 const int max_usr_instances = 4;
896 struct aqua_reg_list *reg_addrs;
897 int inst = 0, i, n, r, arr_size;
898 void *p;
899
900 if (!buf || !max_size)
901 return -EINVAL;
902
903 switch (reg_state) {
904 case AMDGPU_REG_STATE_TYPE_USR:
905 arr_size = ARRAY_SIZE(usr_reg_addrs);
906 reg_addrs = usr_reg_addrs;
907 num_smn = NUM_USR_SMN_REGS;
908 break;
909 case AMDGPU_REG_STATE_TYPE_USR_1:
910 arr_size = ARRAY_SIZE(usr1_reg_addrs);
911 reg_addrs = usr1_reg_addrs;
912 num_smn = NUM_USR1_SMN_REGS;
913 break;
914 default:
915 return -EINVAL;
916 }
917
918 usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
919
920 szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
921 sizeof(*usr_regs),
922 num_smn);
923 if (max_size < szbuf)
924 return -EOVERFLOW;
925
926 p = &usr_reg_state->usr_state_regs[0];
927 for_each_inst(i, adev->aid_mask) {
928 usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
929 usr_regs->inst_header.instance = inst++;
930 usr_regs->inst_header.state = AMDGPU_INST_S_OK;
931 usr_regs->inst_header.num_smn_regs = num_smn;
932 reg_data = usr_regs->smn_reg_values;
933
934 for (r = 0; r < arr_size; r++) {
935 start_addr = reg_addrs[r].start_addr;
936 incrx = reg_addrs[r].incrx;
937 num_regs = reg_addrs[r].num_regs;
938 for (n = 0; n < num_regs; n++) {
939 aqua_read_smn_ext(adev, reg_data,
940 start_addr + n * incrx, i);
941 reg_data++;
942 }
943 }
944 p = reg_data;
945 }
946
947 usr_reg_state->common_header.structure_size = szbuf;
948 usr_reg_state->common_header.format_revision = 1;
949 usr_reg_state->common_header.content_revision = 0;
950 usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
951 usr_reg_state->common_header.num_instances = max_usr_instances;
952
953 return usr_reg_state->common_header.structure_size;
954 }
955
aqua_vanjaram_get_reg_state(struct amdgpu_device * adev,enum amdgpu_reg_state reg_state,void * buf,size_t max_size)956 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
957 enum amdgpu_reg_state reg_state, void *buf,
958 size_t max_size)
959 {
960 ssize_t size;
961
962 switch (reg_state) {
963 case AMDGPU_REG_STATE_TYPE_PCIE:
964 size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
965 break;
966 case AMDGPU_REG_STATE_TYPE_XGMI:
967 size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
968 break;
969 case AMDGPU_REG_STATE_TYPE_WAFL:
970 size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
971 break;
972 case AMDGPU_REG_STATE_TYPE_USR:
973 size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
974 AMDGPU_REG_STATE_TYPE_USR);
975 break;
976 case AMDGPU_REG_STATE_TYPE_USR_1:
977 size = aqua_vanjaram_read_usr_state(
978 adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
979 break;
980 default:
981 return -EINVAL;
982 }
983
984 return size;
985 }
986