1 /*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25
26 #include <drm/drm_cache.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v11_0.h"
31 #include "umc_v8_10.h"
32 #include "athub/athub_3_0_0_sh_mask.h"
33 #include "athub/athub_3_0_0_offset.h"
34 #include "dcn/dcn_3_2_0_offset.h"
35 #include "dcn/dcn_3_2_0_sh_mask.h"
36 #include "oss/osssys_6_0_0_offset.h"
37 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
38 #include "navi10_enum.h"
39 #include "soc15.h"
40 #include "soc15d.h"
41 #include "soc15_common.h"
42 #include "nbio_v4_3.h"
43 #include "gfxhub_v3_0.h"
44 #include "gfxhub_v3_0_3.h"
45 #include "gfxhub_v11_5_0.h"
46 #include "mmhub_v3_0.h"
47 #include "mmhub_v3_0_1.h"
48 #include "mmhub_v3_0_2.h"
49 #include "mmhub_v3_3.h"
50 #include "athub_v3_0.h"
51
52
gmc_v11_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)53 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
54 struct amdgpu_irq_src *src,
55 unsigned int type,
56 enum amdgpu_interrupt_state state)
57 {
58 return 0;
59 }
60
61 static int
gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)62 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
63 struct amdgpu_irq_src *src, unsigned int type,
64 enum amdgpu_interrupt_state state)
65 {
66 switch (state) {
67 case AMDGPU_IRQ_STATE_DISABLE:
68 /* MM HUB */
69 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
70 /* GFX HUB */
71 /* This works because this interrupt is only
72 * enabled at init/resume and disabled in
73 * fini/suspend, so the overall state doesn't
74 * change over the course of suspend/resume.
75 */
76 if (!adev->in_s0ix && (adev->in_runpm || adev->in_suspend ||
77 amdgpu_in_reset(adev)))
78 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
79 break;
80 case AMDGPU_IRQ_STATE_ENABLE:
81 /* MM HUB */
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
83 /* GFX HUB */
84 /* This works because this interrupt is only
85 * enabled at init/resume and disabled in
86 * fini/suspend, so the overall state doesn't
87 * change over the course of suspend/resume.
88 */
89 if (!adev->in_s0ix)
90 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
91 break;
92 default:
93 break;
94 }
95
96 return 0;
97 }
98
gmc_v11_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)99 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
100 struct amdgpu_irq_src *source,
101 struct amdgpu_iv_entry *entry)
102 {
103 uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
104 AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
105 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
106 uint32_t status = 0;
107 u64 addr;
108
109 addr = (u64)entry->src_data[0] << 12;
110 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
111
112 if (!amdgpu_sriov_vf(adev)) {
113 /*
114 * Issue a dummy read to wait for the status register to
115 * be updated to avoid reading an incorrect value due to
116 * the new fast GRBM interface.
117 */
118 if (entry->vmid_src == AMDGPU_GFXHUB(0))
119 RREG32(hub->vm_l2_pro_fault_status);
120
121 status = RREG32(hub->vm_l2_pro_fault_status);
122 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
123
124 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
125 entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
126 }
127
128 if (printk_ratelimit()) {
129 struct amdgpu_task_info *task_info;
130
131 dev_err(adev->dev,
132 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
133 entry->vmid_src ? "mmhub" : "gfxhub",
134 entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
135 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
136 if (task_info) {
137 amdgpu_vm_print_task_info(adev, task_info);
138 amdgpu_vm_put_task_info(task_info);
139 }
140
141 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
142 addr, entry->client_id);
143
144 /* Only print L2 fault status if the status register could be read and
145 * contains useful information
146 */
147 if (status != 0)
148 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
149 }
150
151 return 0;
152 }
153
154 static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
155 .set = gmc_v11_0_vm_fault_interrupt_state,
156 .process = gmc_v11_0_process_interrupt,
157 };
158
159 static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
160 .set = gmc_v11_0_ecc_interrupt_state,
161 .process = amdgpu_umc_process_ecc_irq,
162 };
163
gmc_v11_0_set_irq_funcs(struct amdgpu_device * adev)164 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
165 {
166 adev->gmc.vm_fault.num_types = 1;
167 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
168
169 if (!amdgpu_sriov_vf(adev)) {
170 adev->gmc.ecc_irq.num_types = 1;
171 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
172 }
173 }
174
175 /**
176 * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore
177 *
178 * @adev: amdgpu_device pointer
179 * @vmhub: vmhub type
180 *
181 */
gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)182 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
183 uint32_t vmhub)
184 {
185 return ((vmhub == AMDGPU_MMHUB0(0)) &&
186 (!amdgpu_sriov_vf(adev)));
187 }
188
gmc_v11_0_get_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)189 static bool gmc_v11_0_get_vmid_pasid_mapping_info(
190 struct amdgpu_device *adev,
191 uint8_t vmid, uint16_t *p_pasid)
192 {
193 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
194
195 return !!(*p_pasid);
196 }
197
198 /**
199 * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
200 *
201 * @adev: amdgpu_device pointer
202 * @vmid: vm instance to flush
203 * @vmhub: which hub to flush
204 * @flush_type: the flush type
205 *
206 * Flush the TLB for the requested page table.
207 */
gmc_v11_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)208 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
209 uint32_t vmhub, uint32_t flush_type)
210 {
211 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
212 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
213 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
214 /* Use register 17 for GART */
215 const unsigned int eng = 17;
216 unsigned char hub_ip;
217 u32 sem, req, ack;
218 unsigned int i;
219 u32 tmp;
220
221 if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
222 return;
223
224 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
225 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
226 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
227
228 /* flush hdp cache */
229 amdgpu_device_flush_hdp(adev, NULL);
230
231 /* This is necessary for SRIOV as well as for GFXOFF to function
232 * properly under bare metal
233 */
234 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
235 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
236 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
237 1 << vmid, GET_INST(GC, 0));
238 return;
239 }
240
241 /* This path is needed before KIQ/MES/GFXOFF are set up */
242 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
243
244 spin_lock(&adev->gmc.invalidate_lock);
245 /*
246 * It may lose gpuvm invalidate acknowldege state across power-gating
247 * off cycle, add semaphore acquire before invalidation and semaphore
248 * release after invalidation to avoid entering power gated state
249 * to WA the Issue
250 */
251
252 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
253 if (use_semaphore) {
254 for (i = 0; i < adev->usec_timeout; i++) {
255 /* a read return value of 1 means semaphore acuqire */
256 tmp = RREG32_RLC_NO_KIQ(sem, hub_ip);
257 if (tmp & 0x1)
258 break;
259 udelay(1);
260 }
261
262 if (i >= adev->usec_timeout)
263 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
264 }
265
266 WREG32_RLC_NO_KIQ(req, inv_req, hub_ip);
267
268 /* Wait for ACK with a delay.*/
269 for (i = 0; i < adev->usec_timeout; i++) {
270 tmp = RREG32_RLC_NO_KIQ(ack, hub_ip);
271 tmp &= 1 << vmid;
272 if (tmp)
273 break;
274
275 udelay(1);
276 }
277
278 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
279 if (use_semaphore)
280 WREG32_RLC_NO_KIQ(sem, 0, hub_ip);
281
282 /* Issue additional private vm invalidation to MMHUB */
283 if ((vmhub != AMDGPU_GFXHUB(0)) &&
284 (hub->vm_l2_bank_select_reserved_cid2) &&
285 !amdgpu_sriov_vf(adev)) {
286 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
287 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
288 inv_req |= (1 << 25);
289 /* Issue private invalidation */
290 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
291 /* Read back to ensure invalidation is done*/
292 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
293 }
294
295 spin_unlock(&adev->gmc.invalidate_lock);
296
297 if (i >= adev->usec_timeout)
298 dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
299 }
300
301 /**
302 * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid
303 *
304 * @adev: amdgpu_device pointer
305 * @pasid: pasid to be flush
306 * @flush_type: the flush type
307 * @all_hub: flush all hubs
308 * @inst: is used to select which instance of KIQ to use for the invalidation
309 *
310 * Flush the TLB for the requested pasid.
311 */
gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)312 static void gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
313 uint16_t pasid, uint32_t flush_type,
314 bool all_hub, uint32_t inst)
315 {
316 uint16_t queried;
317 int vmid, i;
318
319 for (vmid = 1; vmid < 16; vmid++) {
320 bool valid;
321
322 valid = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
323 &queried);
324 if (!valid || queried != pasid)
325 continue;
326
327 if (all_hub) {
328 for_each_set_bit(i, adev->vmhubs_mask,
329 AMDGPU_MAX_VMHUBS)
330 gmc_v11_0_flush_gpu_tlb(adev, vmid, i,
331 flush_type);
332 } else {
333 gmc_v11_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
334 flush_type);
335 }
336 }
337 }
338
gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)339 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
340 unsigned int vmid, uint64_t pd_addr)
341 {
342 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
343 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
344 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
345 unsigned int eng = ring->vm_inv_eng;
346
347 /*
348 * It may lose gpuvm invalidate acknowldege state across power-gating
349 * off cycle, add semaphore acquire before invalidation and semaphore
350 * release after invalidation to avoid entering power gated state
351 * to WA the Issue
352 */
353
354 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
355 if (use_semaphore)
356 /* a read return value of 1 means semaphore acuqire */
357 amdgpu_ring_emit_reg_wait(ring,
358 hub->vm_inv_eng0_sem +
359 hub->eng_distance * eng, 0x1, 0x1);
360
361 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
362 (hub->ctx_addr_distance * vmid),
363 lower_32_bits(pd_addr));
364
365 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
366 (hub->ctx_addr_distance * vmid),
367 upper_32_bits(pd_addr));
368
369 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
370 hub->eng_distance * eng,
371 hub->vm_inv_eng0_ack +
372 hub->eng_distance * eng,
373 req, 1 << vmid);
374
375 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
376 if (use_semaphore)
377 /*
378 * add semaphore release after invalidation,
379 * write with 0 means semaphore release
380 */
381 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
382 hub->eng_distance * eng, 0);
383
384 return pd_addr;
385 }
386
gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid)387 static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
388 unsigned int pasid)
389 {
390 struct amdgpu_device *adev = ring->adev;
391 uint32_t reg;
392
393 if (ring->vm_hub == AMDGPU_GFXHUB(0))
394 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
395 else
396 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
397
398 amdgpu_ring_emit_wreg(ring, reg, pasid);
399 }
400
401 /*
402 * PTE format:
403 * 63:59 reserved
404 * 58:57 reserved
405 * 56 F
406 * 55 L
407 * 54 reserved
408 * 53:52 SW
409 * 51 T
410 * 50:48 mtype
411 * 47:12 4k physical page base address
412 * 11:7 fragment
413 * 6 write
414 * 5 read
415 * 4 exe
416 * 3 Z
417 * 2 snooped
418 * 1 system
419 * 0 valid
420 *
421 * PDE format:
422 * 63:59 block fragment size
423 * 58:55 reserved
424 * 54 P
425 * 53:48 reserved
426 * 47:6 physical base address of PD or PTE
427 * 5:3 reserved
428 * 2 C
429 * 1 system
430 * 0 valid
431 */
432
gmc_v11_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)433 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
434 {
435 switch (flags) {
436 case AMDGPU_VM_MTYPE_DEFAULT:
437 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
438 case AMDGPU_VM_MTYPE_NC:
439 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
440 case AMDGPU_VM_MTYPE_WC:
441 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
442 case AMDGPU_VM_MTYPE_CC:
443 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
444 case AMDGPU_VM_MTYPE_UC:
445 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
446 default:
447 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
448 }
449 }
450
gmc_v11_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)451 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
452 uint64_t *addr, uint64_t *flags)
453 {
454 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
455 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
456 BUG_ON(*addr & 0xFFFF00000000003FULL);
457
458 if (!adev->gmc.translate_further)
459 return;
460
461 if (level == AMDGPU_VM_PDB1) {
462 /* Set the block fragment size */
463 if (!(*flags & AMDGPU_PDE_PTE))
464 *flags |= AMDGPU_PDE_BFS(0x9);
465
466 } else if (level == AMDGPU_VM_PDB0) {
467 if (*flags & AMDGPU_PDE_PTE)
468 *flags &= ~AMDGPU_PDE_PTE;
469 else
470 *flags |= AMDGPU_PTE_TF;
471 }
472 }
473
gmc_v11_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)474 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
475 struct amdgpu_bo_va_mapping *mapping,
476 uint64_t *flags)
477 {
478 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
479
480 *flags &= ~AMDGPU_PTE_EXECUTABLE;
481 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
482
483 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
484 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
485
486 *flags &= ~AMDGPU_PTE_NOALLOC;
487 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
488
489 if (mapping->flags & AMDGPU_PTE_PRT) {
490 *flags |= AMDGPU_PTE_PRT;
491 *flags |= AMDGPU_PTE_SNOOPED;
492 *flags |= AMDGPU_PTE_LOG;
493 *flags |= AMDGPU_PTE_SYSTEM;
494 *flags &= ~AMDGPU_PTE_VALID;
495 }
496
497 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
498 AMDGPU_GEM_CREATE_EXT_COHERENT |
499 AMDGPU_GEM_CREATE_UNCACHED))
500 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
501 }
502
gmc_v11_0_get_vbios_fb_size(struct amdgpu_device * adev)503 static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
504 {
505 u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
506 unsigned int size;
507
508 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
509 size = AMDGPU_VBIOS_VGA_ALLOCATION;
510 } else {
511 u32 viewport;
512 u32 pitch;
513
514 viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
515 pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
516 size = (REG_GET_FIELD(viewport,
517 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
518 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
519 4);
520 }
521
522 return size;
523 }
524
525 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
526 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
527 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
528 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
529 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
530 .map_mtype = gmc_v11_0_map_mtype,
531 .get_vm_pde = gmc_v11_0_get_vm_pde,
532 .get_vm_pte = gmc_v11_0_get_vm_pte,
533 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
534 };
535
gmc_v11_0_set_gmc_funcs(struct amdgpu_device * adev)536 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
537 {
538 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
539 }
540
gmc_v11_0_set_umc_funcs(struct amdgpu_device * adev)541 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
542 {
543 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
544 case IP_VERSION(8, 10, 0):
545 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
546 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
547 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
548 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
549 adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
550 if (adev->umc.node_inst_num == 4)
551 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
552 else
553 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
554 adev->umc.ras = &umc_v8_10_ras;
555 break;
556 case IP_VERSION(8, 11, 0):
557 break;
558 default:
559 break;
560 }
561 }
562
563
gmc_v11_0_set_mmhub_funcs(struct amdgpu_device * adev)564 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
565 {
566 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
567 case IP_VERSION(3, 0, 1):
568 adev->mmhub.funcs = &mmhub_v3_0_1_funcs;
569 break;
570 case IP_VERSION(3, 0, 2):
571 adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
572 break;
573 case IP_VERSION(3, 3, 0):
574 case IP_VERSION(3, 3, 1):
575 case IP_VERSION(3, 3, 2):
576 adev->mmhub.funcs = &mmhub_v3_3_funcs;
577 break;
578 default:
579 adev->mmhub.funcs = &mmhub_v3_0_funcs;
580 break;
581 }
582 }
583
gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device * adev)584 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
585 {
586 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
587 case IP_VERSION(11, 0, 3):
588 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
589 break;
590 case IP_VERSION(11, 5, 0):
591 case IP_VERSION(11, 5, 1):
592 case IP_VERSION(11, 5, 2):
593 case IP_VERSION(11, 5, 3):
594 adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs;
595 break;
596 default:
597 adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
598 break;
599 }
600 }
601
gmc_v11_0_early_init(struct amdgpu_ip_block * ip_block)602 static int gmc_v11_0_early_init(struct amdgpu_ip_block *ip_block)
603 {
604 struct amdgpu_device *adev = ip_block->adev;
605
606 gmc_v11_0_set_gfxhub_funcs(adev);
607 gmc_v11_0_set_mmhub_funcs(adev);
608 gmc_v11_0_set_gmc_funcs(adev);
609 gmc_v11_0_set_irq_funcs(adev);
610 gmc_v11_0_set_umc_funcs(adev);
611
612 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
613 adev->gmc.shared_aperture_end =
614 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
615 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
616 adev->gmc.private_aperture_end =
617 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
618 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
619
620 return 0;
621 }
622
gmc_v11_0_late_init(struct amdgpu_ip_block * ip_block)623 static int gmc_v11_0_late_init(struct amdgpu_ip_block *ip_block)
624 {
625 struct amdgpu_device *adev = ip_block->adev;
626 int r;
627
628 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
629 if (r)
630 return r;
631
632 r = amdgpu_gmc_ras_late_init(adev);
633 if (r)
634 return r;
635
636 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
637 }
638
gmc_v11_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)639 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
640 struct amdgpu_gmc *mc)
641 {
642 u64 base = 0;
643
644 base = adev->mmhub.funcs->get_fb_location(adev);
645
646 amdgpu_gmc_set_agp_default(adev, mc);
647 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
648 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH);
649 if (!amdgpu_sriov_vf(adev) &&
650 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) &&
651 (amdgpu_agp == 1))
652 amdgpu_gmc_agp_location(adev, mc);
653
654 /* base offset of vram pages */
655 if (amdgpu_sriov_vf(adev))
656 adev->vm_manager.vram_base_offset = 0;
657 else
658 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
659 }
660
661 /**
662 * gmc_v11_0_mc_init - initialize the memory controller driver params
663 *
664 * @adev: amdgpu_device pointer
665 *
666 * Look up the amount of vram, vram width, and decide how to place
667 * vram and gart within the GPU's physical address space.
668 * Returns 0 for success.
669 */
gmc_v11_0_mc_init(struct amdgpu_device * adev)670 static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
671 {
672 int r;
673
674 /* size in MB on si */
675 adev->gmc.mc_vram_size =
676 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
677 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
678
679 if (!(adev->flags & AMD_IS_APU)) {
680 r = amdgpu_device_resize_fb_bar(adev);
681 if (r)
682 return r;
683 }
684 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
685 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
686
687 #ifdef CONFIG_X86_64
688 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
689 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
690 adev->gmc.aper_size = adev->gmc.real_vram_size;
691 }
692 #endif
693 /* In case the PCI BAR is larger than the actual amount of vram */
694 adev->gmc.visible_vram_size = adev->gmc.aper_size;
695 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
696 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
697
698 /* set the gart size */
699 if (amdgpu_gart_size == -1)
700 adev->gmc.gart_size = 512ULL << 20;
701 else
702 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
703
704 gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
705
706 return 0;
707 }
708
gmc_v11_0_gart_init(struct amdgpu_device * adev)709 static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
710 {
711 int r;
712
713 if (adev->gart.bo) {
714 WARN(1, "PCIE GART already initialized\n");
715 return 0;
716 }
717
718 /* Initialize common gart structure */
719 r = amdgpu_gart_init(adev);
720 if (r)
721 return r;
722
723 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
724 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC) |
725 AMDGPU_PTE_EXECUTABLE;
726
727 return amdgpu_gart_table_vram_alloc(adev);
728 }
729
gmc_v11_0_sw_init(struct amdgpu_ip_block * ip_block)730 static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
731 {
732 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
733 struct amdgpu_device *adev = ip_block->adev;
734
735 adev->mmhub.funcs->init(adev);
736
737 adev->gfxhub.funcs->init(adev);
738
739 spin_lock_init(&adev->gmc.invalidate_lock);
740
741 r = amdgpu_atomfirmware_get_vram_info(adev,
742 &vram_width, &vram_type, &vram_vendor);
743 adev->gmc.vram_width = vram_width;
744
745 adev->gmc.vram_type = vram_type;
746 adev->gmc.vram_vendor = vram_vendor;
747
748 /* The mall_size is already calculated as mall_size_per_umc * num_umc.
749 * However, for gfx1151, which features a 2-to-1 UMC mapping,
750 * the result must be multiplied by 2 to determine the actual mall size.
751 */
752 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
753 case IP_VERSION(11, 5, 1):
754 adev->gmc.mall_size *= 2;
755 break;
756 default:
757 break;
758 }
759
760 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
761 case IP_VERSION(11, 0, 0):
762 case IP_VERSION(11, 0, 1):
763 case IP_VERSION(11, 0, 2):
764 case IP_VERSION(11, 0, 3):
765 case IP_VERSION(11, 0, 4):
766 case IP_VERSION(11, 5, 0):
767 case IP_VERSION(11, 5, 1):
768 case IP_VERSION(11, 5, 2):
769 case IP_VERSION(11, 5, 3):
770 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
771 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
772 /*
773 * To fulfill 4-level page support,
774 * vm size is 256TB (48bit), maximum size,
775 * block size 512 (9bit)
776 */
777 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
778 break;
779 default:
780 break;
781 }
782
783 /* This interrupt is VMC page fault.*/
784 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
785 VMC_1_0__SRCID__VM_FAULT,
786 &adev->gmc.vm_fault);
787
788 if (r)
789 return r;
790
791 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
792 UTCL2_1_0__SRCID__FAULT,
793 &adev->gmc.vm_fault);
794 if (r)
795 return r;
796
797 if (!amdgpu_sriov_vf(adev)) {
798 /* interrupt sent to DF. */
799 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
800 &adev->gmc.ecc_irq);
801 if (r)
802 return r;
803 }
804
805 /*
806 * Set the internal MC address mask This is the max address of the GPU's
807 * internal address space.
808 */
809 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
810
811 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
812 if (r) {
813 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
814 return r;
815 }
816
817 adev->need_swiotlb = drm_need_swiotlb(44);
818
819 r = gmc_v11_0_mc_init(adev);
820 if (r)
821 return r;
822
823 amdgpu_gmc_get_vbios_allocations(adev);
824
825 /* Memory manager */
826 r = amdgpu_bo_init(adev);
827 if (r)
828 return r;
829
830 r = gmc_v11_0_gart_init(adev);
831 if (r)
832 return r;
833
834 /*
835 * number of VMs
836 * VMID 0 is reserved for System
837 * amdgpu graphics/compute will use VMIDs 1-7
838 * amdkfd will use VMIDs 8-15
839 */
840 adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
841
842 amdgpu_vm_manager_init(adev);
843
844 r = amdgpu_gmc_ras_sw_init(adev);
845 if (r)
846 return r;
847
848 return 0;
849 }
850
851 /**
852 * gmc_v11_0_gart_fini - vm fini callback
853 *
854 * @adev: amdgpu_device pointer
855 *
856 * Tears down the driver GART/VM setup (CIK).
857 */
gmc_v11_0_gart_fini(struct amdgpu_device * adev)858 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
859 {
860 amdgpu_gart_table_vram_free(adev);
861 }
862
gmc_v11_0_sw_fini(struct amdgpu_ip_block * ip_block)863 static int gmc_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
864 {
865 struct amdgpu_device *adev = ip_block->adev;
866
867 amdgpu_vm_manager_fini(adev);
868 gmc_v11_0_gart_fini(adev);
869 amdgpu_gem_force_release(adev);
870 amdgpu_bo_fini(adev);
871
872 return 0;
873 }
874
gmc_v11_0_init_golden_registers(struct amdgpu_device * adev)875 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
876 {
877 if (amdgpu_sriov_vf(adev)) {
878 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
879
880 WREG32(hub->vm_contexts_disable, 0);
881 return;
882 }
883 }
884
885 /**
886 * gmc_v11_0_gart_enable - gart enable
887 *
888 * @adev: amdgpu_device pointer
889 */
gmc_v11_0_gart_enable(struct amdgpu_device * adev)890 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
891 {
892 int r;
893 bool value;
894
895 if (adev->gart.bo == NULL) {
896 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
897 return -EINVAL;
898 }
899
900 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
901
902 r = adev->mmhub.funcs->gart_enable(adev);
903 if (r)
904 return r;
905
906 /* Flush HDP after it is initialized */
907 amdgpu_device_flush_hdp(adev, NULL);
908
909 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
910 false : true;
911
912 adev->mmhub.funcs->set_fault_enable_default(adev, value);
913 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
914
915 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
916 (unsigned int)(adev->gmc.gart_size >> 20),
917 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
918
919 return 0;
920 }
921
gmc_v11_0_hw_init(struct amdgpu_ip_block * ip_block)922 static int gmc_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
923 {
924 struct amdgpu_device *adev = ip_block->adev;
925 int r;
926
927 adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
928
929 /* The sequence of these two function calls matters.*/
930 gmc_v11_0_init_golden_registers(adev);
931
932 r = gmc_v11_0_gart_enable(adev);
933 if (r)
934 return r;
935
936 if (adev->umc.funcs && adev->umc.funcs->init_registers)
937 adev->umc.funcs->init_registers(adev);
938
939 return 0;
940 }
941
942 /**
943 * gmc_v11_0_gart_disable - gart disable
944 *
945 * @adev: amdgpu_device pointer
946 *
947 * This disables all VM page table.
948 */
gmc_v11_0_gart_disable(struct amdgpu_device * adev)949 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
950 {
951 adev->mmhub.funcs->gart_disable(adev);
952 }
953
gmc_v11_0_hw_fini(struct amdgpu_ip_block * ip_block)954 static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
955 {
956 struct amdgpu_device *adev = ip_block->adev;
957
958 if (amdgpu_sriov_vf(adev)) {
959 /* full access mode, so don't touch any GMC register */
960 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
961 return 0;
962 }
963
964 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
965
966 if (adev->gmc.ecc_irq.funcs &&
967 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
968 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
969
970 gmc_v11_0_gart_disable(adev);
971
972 return 0;
973 }
974
gmc_v11_0_suspend(struct amdgpu_ip_block * ip_block)975 static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block)
976 {
977 gmc_v11_0_hw_fini(ip_block);
978
979 return 0;
980 }
981
gmc_v11_0_resume(struct amdgpu_ip_block * ip_block)982 static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block)
983 {
984 int r;
985
986 r = gmc_v11_0_hw_init(ip_block);
987 if (r)
988 return r;
989
990 amdgpu_vmid_reset_all(ip_block->adev);
991
992 return 0;
993 }
994
gmc_v11_0_is_idle(struct amdgpu_ip_block * ip_block)995 static bool gmc_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
996 {
997 /* MC is always ready in GMC v11.*/
998 return true;
999 }
1000
gmc_v11_0_wait_for_idle(struct amdgpu_ip_block * ip_block)1001 static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1002 {
1003 /* There is no need to wait for MC idle in GMC v11.*/
1004 return 0;
1005 }
1006
gmc_v11_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1007 static int gmc_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1008 enum amd_clockgating_state state)
1009 {
1010 int r;
1011 struct amdgpu_device *adev = ip_block->adev;
1012
1013 r = adev->mmhub.funcs->set_clockgating(adev, state);
1014 if (r)
1015 return r;
1016
1017 return athub_v3_0_set_clockgating(adev, state);
1018 }
1019
gmc_v11_0_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)1020 static void gmc_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1021 {
1022 struct amdgpu_device *adev = ip_block->adev;
1023
1024 adev->mmhub.funcs->get_clockgating(adev, flags);
1025
1026 athub_v3_0_get_clockgating(adev, flags);
1027 }
1028
gmc_v11_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1029 static int gmc_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1030 enum amd_powergating_state state)
1031 {
1032 return 0;
1033 }
1034
1035 const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
1036 .name = "gmc_v11_0",
1037 .early_init = gmc_v11_0_early_init,
1038 .sw_init = gmc_v11_0_sw_init,
1039 .hw_init = gmc_v11_0_hw_init,
1040 .late_init = gmc_v11_0_late_init,
1041 .sw_fini = gmc_v11_0_sw_fini,
1042 .hw_fini = gmc_v11_0_hw_fini,
1043 .suspend = gmc_v11_0_suspend,
1044 .resume = gmc_v11_0_resume,
1045 .is_idle = gmc_v11_0_is_idle,
1046 .wait_for_idle = gmc_v11_0_wait_for_idle,
1047 .set_clockgating_state = gmc_v11_0_set_clockgating_state,
1048 .set_powergating_state = gmc_v11_0_set_powergating_state,
1049 .get_clockgating_state = gmc_v11_0_get_clockgating_state,
1050 };
1051
1052 const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
1053 .type = AMD_IP_BLOCK_TYPE_GMC,
1054 .major = 11,
1055 .minor = 0,
1056 .rev = 0,
1057 .funcs = &gmc_v11_0_ip_funcs,
1058 };
1059