1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/pci.h> 25 26 #include <drm/drm_cache.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atomfirmware.h" 30 #include "gmc_v11_0.h" 31 #include "umc_v8_10.h" 32 #include "athub/athub_3_0_0_sh_mask.h" 33 #include "athub/athub_3_0_0_offset.h" 34 #include "dcn/dcn_3_2_0_offset.h" 35 #include "dcn/dcn_3_2_0_sh_mask.h" 36 #include "oss/osssys_6_0_0_offset.h" 37 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 38 #include "navi10_enum.h" 39 #include "soc15.h" 40 #include "soc15d.h" 41 #include "soc15_common.h" 42 #include "nbio_v4_3.h" 43 #include "gfxhub_v3_0.h" 44 #include "gfxhub_v3_0_3.h" 45 #include "gfxhub_v11_5_0.h" 46 #include "mmhub_v3_0.h" 47 #include "mmhub_v3_0_1.h" 48 #include "mmhub_v3_0_2.h" 49 #include "mmhub_v3_3.h" 50 #include "athub_v3_0.h" 51 52 53 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev, 54 struct amdgpu_irq_src *src, 55 unsigned int type, 56 enum amdgpu_interrupt_state state) 57 { 58 return 0; 59 } 60 61 static int 62 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 63 struct amdgpu_irq_src *src, unsigned int type, 64 enum amdgpu_interrupt_state state) 65 { 66 switch (state) { 67 case AMDGPU_IRQ_STATE_DISABLE: 68 /* MM HUB */ 69 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); 70 /* GFX HUB */ 71 /* This works because this interrupt is only 72 * enabled at init/resume and disabled in 73 * fini/suspend, so the overall state doesn't 74 * change over the course of suspend/resume. 75 */ 76 if (!adev->in_s0ix && (adev->in_runpm || adev->in_suspend || 77 amdgpu_in_reset(adev))) 78 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); 79 break; 80 case AMDGPU_IRQ_STATE_ENABLE: 81 /* MM HUB */ 82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); 83 /* GFX HUB */ 84 /* This works because this interrupt is only 85 * enabled at init/resume and disabled in 86 * fini/suspend, so the overall state doesn't 87 * change over the course of suspend/resume. 88 */ 89 if (!adev->in_s0ix) 90 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); 91 break; 92 default: 93 break; 94 } 95 96 return 0; 97 } 98 99 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, 100 struct amdgpu_irq_src *source, 101 struct amdgpu_iv_entry *entry) 102 { 103 uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ? 104 AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); 105 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; 106 uint32_t status = 0; 107 u64 addr; 108 109 addr = (u64)entry->src_data[0] << 12; 110 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 111 112 if (!amdgpu_sriov_vf(adev)) { 113 /* 114 * Issue a dummy read to wait for the status register to 115 * be updated to avoid reading an incorrect value due to 116 * the new fast GRBM interface. 117 */ 118 if (entry->vmid_src == AMDGPU_GFXHUB(0)) 119 RREG32(hub->vm_l2_pro_fault_status); 120 121 status = RREG32(hub->vm_l2_pro_fault_status); 122 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 123 124 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, 125 entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0)); 126 } 127 128 if (printk_ratelimit()) { 129 struct amdgpu_task_info *task_info; 130 131 dev_err(adev->dev, 132 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", 133 entry->vmid_src ? "mmhub" : "gfxhub", 134 entry->src_id, entry->ring_id, entry->vmid, entry->pasid); 135 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); 136 if (task_info) { 137 dev_err(adev->dev, 138 " in process %s pid %d thread %s pid %d)\n", 139 task_info->process_name, task_info->tgid, 140 task_info->task_name, task_info->pid); 141 amdgpu_vm_put_task_info(task_info); 142 } 143 144 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", 145 addr, entry->client_id); 146 147 /* Only print L2 fault status if the status register could be read and 148 * contains useful information 149 */ 150 if (status != 0) 151 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); 152 } 153 154 return 0; 155 } 156 157 static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = { 158 .set = gmc_v11_0_vm_fault_interrupt_state, 159 .process = gmc_v11_0_process_interrupt, 160 }; 161 162 static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = { 163 .set = gmc_v11_0_ecc_interrupt_state, 164 .process = amdgpu_umc_process_ecc_irq, 165 }; 166 167 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev) 168 { 169 adev->gmc.vm_fault.num_types = 1; 170 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs; 171 172 if (!amdgpu_sriov_vf(adev)) { 173 adev->gmc.ecc_irq.num_types = 1; 174 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs; 175 } 176 } 177 178 /** 179 * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore 180 * 181 * @adev: amdgpu_device pointer 182 * @vmhub: vmhub type 183 * 184 */ 185 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev, 186 uint32_t vmhub) 187 { 188 return ((vmhub == AMDGPU_MMHUB0(0)) && 189 (!amdgpu_sriov_vf(adev))); 190 } 191 192 static bool gmc_v11_0_get_vmid_pasid_mapping_info( 193 struct amdgpu_device *adev, 194 uint8_t vmid, uint16_t *p_pasid) 195 { 196 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff; 197 198 return !!(*p_pasid); 199 } 200 201 /** 202 * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback 203 * 204 * @adev: amdgpu_device pointer 205 * @vmid: vm instance to flush 206 * @vmhub: which hub to flush 207 * @flush_type: the flush type 208 * 209 * Flush the TLB for the requested page table. 210 */ 211 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 212 uint32_t vmhub, uint32_t flush_type) 213 { 214 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub); 215 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 216 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); 217 /* Use register 17 for GART */ 218 const unsigned int eng = 17; 219 unsigned char hub_ip; 220 u32 sem, req, ack; 221 unsigned int i; 222 u32 tmp; 223 224 if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) 225 return; 226 227 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng; 228 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; 229 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 230 231 /* flush hdp cache */ 232 amdgpu_device_flush_hdp(adev, NULL); 233 234 /* This is necessary for SRIOV as well as for GFXOFF to function 235 * properly under bare metal 236 */ 237 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) && 238 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { 239 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, 240 1 << vmid, GET_INST(GC, 0)); 241 return; 242 } 243 244 /* This path is needed before KIQ/MES/GFXOFF are set up */ 245 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; 246 247 spin_lock(&adev->gmc.invalidate_lock); 248 /* 249 * It may lose gpuvm invalidate acknowldege state across power-gating 250 * off cycle, add semaphore acquire before invalidation and semaphore 251 * release after invalidation to avoid entering power gated state 252 * to WA the Issue 253 */ 254 255 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 256 if (use_semaphore) { 257 for (i = 0; i < adev->usec_timeout; i++) { 258 /* a read return value of 1 means semaphore acuqire */ 259 tmp = RREG32_RLC_NO_KIQ(sem, hub_ip); 260 if (tmp & 0x1) 261 break; 262 udelay(1); 263 } 264 265 if (i >= adev->usec_timeout) 266 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 267 } 268 269 WREG32_RLC_NO_KIQ(req, inv_req, hub_ip); 270 271 /* Wait for ACK with a delay.*/ 272 for (i = 0; i < adev->usec_timeout; i++) { 273 tmp = RREG32_RLC_NO_KIQ(ack, hub_ip); 274 tmp &= 1 << vmid; 275 if (tmp) 276 break; 277 278 udelay(1); 279 } 280 281 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 282 if (use_semaphore) 283 WREG32_RLC_NO_KIQ(sem, 0, hub_ip); 284 285 /* Issue additional private vm invalidation to MMHUB */ 286 if ((vmhub != AMDGPU_GFXHUB(0)) && 287 (hub->vm_l2_bank_select_reserved_cid2) && 288 !amdgpu_sriov_vf(adev)) { 289 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 290 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */ 291 inv_req |= (1 << 25); 292 /* Issue private invalidation */ 293 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req); 294 /* Read back to ensure invalidation is done*/ 295 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 296 } 297 298 spin_unlock(&adev->gmc.invalidate_lock); 299 300 if (i >= adev->usec_timeout) 301 dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n"); 302 } 303 304 /** 305 * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid 306 * 307 * @adev: amdgpu_device pointer 308 * @pasid: pasid to be flush 309 * @flush_type: the flush type 310 * @all_hub: flush all hubs 311 * @inst: is used to select which instance of KIQ to use for the invalidation 312 * 313 * Flush the TLB for the requested pasid. 314 */ 315 static void gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 316 uint16_t pasid, uint32_t flush_type, 317 bool all_hub, uint32_t inst) 318 { 319 uint16_t queried; 320 int vmid, i; 321 322 for (vmid = 1; vmid < 16; vmid++) { 323 bool valid; 324 325 valid = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid, 326 &queried); 327 if (!valid || queried != pasid) 328 continue; 329 330 if (all_hub) { 331 for_each_set_bit(i, adev->vmhubs_mask, 332 AMDGPU_MAX_VMHUBS) 333 gmc_v11_0_flush_gpu_tlb(adev, vmid, i, 334 flush_type); 335 } else { 336 gmc_v11_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 337 flush_type); 338 } 339 } 340 } 341 342 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 343 unsigned int vmid, uint64_t pd_addr) 344 { 345 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); 346 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; 347 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); 348 unsigned int eng = ring->vm_inv_eng; 349 350 /* 351 * It may lose gpuvm invalidate acknowldege state across power-gating 352 * off cycle, add semaphore acquire before invalidation and semaphore 353 * release after invalidation to avoid entering power gated state 354 * to WA the Issue 355 */ 356 357 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 358 if (use_semaphore) 359 /* a read return value of 1 means semaphore acuqire */ 360 amdgpu_ring_emit_reg_wait(ring, 361 hub->vm_inv_eng0_sem + 362 hub->eng_distance * eng, 0x1, 0x1); 363 364 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 365 (hub->ctx_addr_distance * vmid), 366 lower_32_bits(pd_addr)); 367 368 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 369 (hub->ctx_addr_distance * vmid), 370 upper_32_bits(pd_addr)); 371 372 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + 373 hub->eng_distance * eng, 374 hub->vm_inv_eng0_ack + 375 hub->eng_distance * eng, 376 req, 1 << vmid); 377 378 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 379 if (use_semaphore) 380 /* 381 * add semaphore release after invalidation, 382 * write with 0 means semaphore release 383 */ 384 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + 385 hub->eng_distance * eng, 0); 386 387 return pd_addr; 388 } 389 390 static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, 391 unsigned int pasid) 392 { 393 struct amdgpu_device *adev = ring->adev; 394 uint32_t reg; 395 396 if (ring->vm_hub == AMDGPU_GFXHUB(0)) 397 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; 398 else 399 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; 400 401 amdgpu_ring_emit_wreg(ring, reg, pasid); 402 } 403 404 /* 405 * PTE format: 406 * 63:59 reserved 407 * 58:57 reserved 408 * 56 F 409 * 55 L 410 * 54 reserved 411 * 53:52 SW 412 * 51 T 413 * 50:48 mtype 414 * 47:12 4k physical page base address 415 * 11:7 fragment 416 * 6 write 417 * 5 read 418 * 4 exe 419 * 3 Z 420 * 2 snooped 421 * 1 system 422 * 0 valid 423 * 424 * PDE format: 425 * 63:59 block fragment size 426 * 58:55 reserved 427 * 54 P 428 * 53:48 reserved 429 * 47:6 physical base address of PD or PTE 430 * 5:3 reserved 431 * 2 C 432 * 1 system 433 * 0 valid 434 */ 435 436 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 437 { 438 switch (flags) { 439 case AMDGPU_VM_MTYPE_DEFAULT: 440 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); 441 case AMDGPU_VM_MTYPE_NC: 442 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); 443 case AMDGPU_VM_MTYPE_WC: 444 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC); 445 case AMDGPU_VM_MTYPE_CC: 446 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC); 447 case AMDGPU_VM_MTYPE_UC: 448 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC); 449 default: 450 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); 451 } 452 } 453 454 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level, 455 uint64_t *addr, uint64_t *flags) 456 { 457 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 458 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); 459 BUG_ON(*addr & 0xFFFF00000000003FULL); 460 461 if (!adev->gmc.translate_further) 462 return; 463 464 if (level == AMDGPU_VM_PDB1) { 465 /* Set the block fragment size */ 466 if (!(*flags & AMDGPU_PDE_PTE)) 467 *flags |= AMDGPU_PDE_BFS(0x9); 468 469 } else if (level == AMDGPU_VM_PDB0) { 470 if (*flags & AMDGPU_PDE_PTE) 471 *flags &= ~AMDGPU_PDE_PTE; 472 else 473 *flags |= AMDGPU_PTE_TF; 474 } 475 } 476 477 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, 478 struct amdgpu_bo_va_mapping *mapping, 479 uint64_t *flags) 480 { 481 struct amdgpu_bo *bo = mapping->bo_va->base.bo; 482 483 *flags &= ~AMDGPU_PTE_EXECUTABLE; 484 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 485 486 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; 487 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); 488 489 *flags &= ~AMDGPU_PTE_NOALLOC; 490 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC); 491 492 if (mapping->flags & AMDGPU_PTE_PRT) { 493 *flags |= AMDGPU_PTE_PRT; 494 *flags |= AMDGPU_PTE_SNOOPED; 495 *flags |= AMDGPU_PTE_LOG; 496 *flags |= AMDGPU_PTE_SYSTEM; 497 *flags &= ~AMDGPU_PTE_VALID; 498 } 499 500 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT | 501 AMDGPU_GEM_CREATE_EXT_COHERENT | 502 AMDGPU_GEM_CREATE_UNCACHED)) 503 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC); 504 } 505 506 static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) 507 { 508 u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL); 509 unsigned int size; 510 511 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 512 size = AMDGPU_VBIOS_VGA_ALLOCATION; 513 } else { 514 u32 viewport; 515 u32 pitch; 516 517 viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); 518 pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH); 519 size = (REG_GET_FIELD(viewport, 520 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 521 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * 522 4); 523 } 524 525 return size; 526 } 527 528 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = { 529 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb, 530 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid, 531 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb, 532 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping, 533 .map_mtype = gmc_v11_0_map_mtype, 534 .get_vm_pde = gmc_v11_0_get_vm_pde, 535 .get_vm_pte = gmc_v11_0_get_vm_pte, 536 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size, 537 }; 538 539 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev) 540 { 541 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs; 542 } 543 544 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) 545 { 546 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 547 case IP_VERSION(8, 10, 0): 548 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM; 549 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM; 550 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev); 551 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET; 552 adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; 553 if (adev->umc.node_inst_num == 4) 554 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0]; 555 else 556 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0]; 557 adev->umc.ras = &umc_v8_10_ras; 558 break; 559 case IP_VERSION(8, 11, 0): 560 break; 561 default: 562 break; 563 } 564 } 565 566 567 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) 568 { 569 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 570 case IP_VERSION(3, 0, 1): 571 adev->mmhub.funcs = &mmhub_v3_0_1_funcs; 572 break; 573 case IP_VERSION(3, 0, 2): 574 adev->mmhub.funcs = &mmhub_v3_0_2_funcs; 575 break; 576 case IP_VERSION(3, 3, 0): 577 case IP_VERSION(3, 3, 1): 578 case IP_VERSION(3, 3, 2): 579 adev->mmhub.funcs = &mmhub_v3_3_funcs; 580 break; 581 default: 582 adev->mmhub.funcs = &mmhub_v3_0_funcs; 583 break; 584 } 585 } 586 587 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) 588 { 589 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 590 case IP_VERSION(11, 0, 3): 591 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs; 592 break; 593 case IP_VERSION(11, 5, 0): 594 case IP_VERSION(11, 5, 1): 595 case IP_VERSION(11, 5, 2): 596 case IP_VERSION(11, 5, 3): 597 adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs; 598 break; 599 default: 600 adev->gfxhub.funcs = &gfxhub_v3_0_funcs; 601 break; 602 } 603 } 604 605 static int gmc_v11_0_early_init(struct amdgpu_ip_block *ip_block) 606 { 607 struct amdgpu_device *adev = ip_block->adev; 608 609 gmc_v11_0_set_gfxhub_funcs(adev); 610 gmc_v11_0_set_mmhub_funcs(adev); 611 gmc_v11_0_set_gmc_funcs(adev); 612 gmc_v11_0_set_irq_funcs(adev); 613 gmc_v11_0_set_umc_funcs(adev); 614 615 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 616 adev->gmc.shared_aperture_end = 617 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 618 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 619 adev->gmc.private_aperture_end = 620 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 621 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; 622 623 return 0; 624 } 625 626 static int gmc_v11_0_late_init(struct amdgpu_ip_block *ip_block) 627 { 628 struct amdgpu_device *adev = ip_block->adev; 629 int r; 630 631 r = amdgpu_gmc_allocate_vm_inv_eng(adev); 632 if (r) 633 return r; 634 635 r = amdgpu_gmc_ras_late_init(adev); 636 if (r) 637 return r; 638 639 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 640 } 641 642 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, 643 struct amdgpu_gmc *mc) 644 { 645 u64 base = 0; 646 647 base = adev->mmhub.funcs->get_fb_location(adev); 648 649 amdgpu_gmc_set_agp_default(adev, mc); 650 amdgpu_gmc_vram_location(adev, &adev->gmc, base); 651 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH); 652 if (!amdgpu_sriov_vf(adev) && 653 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) && 654 (amdgpu_agp == 1)) 655 amdgpu_gmc_agp_location(adev, mc); 656 657 /* base offset of vram pages */ 658 if (amdgpu_sriov_vf(adev)) 659 adev->vm_manager.vram_base_offset = 0; 660 else 661 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev); 662 } 663 664 /** 665 * gmc_v11_0_mc_init - initialize the memory controller driver params 666 * 667 * @adev: amdgpu_device pointer 668 * 669 * Look up the amount of vram, vram width, and decide how to place 670 * vram and gart within the GPU's physical address space. 671 * Returns 0 for success. 672 */ 673 static int gmc_v11_0_mc_init(struct amdgpu_device *adev) 674 { 675 int r; 676 677 /* size in MB on si */ 678 adev->gmc.mc_vram_size = 679 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 680 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 681 682 if (!(adev->flags & AMD_IS_APU)) { 683 r = amdgpu_device_resize_fb_bar(adev); 684 if (r) 685 return r; 686 } 687 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 688 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 689 690 #ifdef CONFIG_X86_64 691 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { 692 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); 693 adev->gmc.aper_size = adev->gmc.real_vram_size; 694 } 695 #endif 696 /* In case the PCI BAR is larger than the actual amount of vram */ 697 adev->gmc.visible_vram_size = adev->gmc.aper_size; 698 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 699 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 700 701 /* set the gart size */ 702 if (amdgpu_gart_size == -1) 703 adev->gmc.gart_size = 512ULL << 20; 704 else 705 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 706 707 gmc_v11_0_vram_gtt_location(adev, &adev->gmc); 708 709 return 0; 710 } 711 712 static int gmc_v11_0_gart_init(struct amdgpu_device *adev) 713 { 714 int r; 715 716 if (adev->gart.bo) { 717 WARN(1, "PCIE GART already initialized\n"); 718 return 0; 719 } 720 721 /* Initialize common gart structure */ 722 r = amdgpu_gart_init(adev); 723 if (r) 724 return r; 725 726 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 727 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC) | 728 AMDGPU_PTE_EXECUTABLE; 729 730 return amdgpu_gart_table_vram_alloc(adev); 731 } 732 733 static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block) 734 { 735 int r, vram_width = 0, vram_type = 0, vram_vendor = 0; 736 struct amdgpu_device *adev = ip_block->adev; 737 738 adev->mmhub.funcs->init(adev); 739 740 adev->gfxhub.funcs->init(adev); 741 742 spin_lock_init(&adev->gmc.invalidate_lock); 743 744 r = amdgpu_atomfirmware_get_vram_info(adev, 745 &vram_width, &vram_type, &vram_vendor); 746 adev->gmc.vram_width = vram_width; 747 748 adev->gmc.vram_type = vram_type; 749 adev->gmc.vram_vendor = vram_vendor; 750 751 /* The mall_size is already calculated as mall_size_per_umc * num_umc. 752 * However, for gfx1151, which features a 2-to-1 UMC mapping, 753 * the result must be multiplied by 2 to determine the actual mall size. 754 */ 755 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 756 case IP_VERSION(11, 5, 1): 757 adev->gmc.mall_size *= 2; 758 break; 759 default: 760 break; 761 } 762 763 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 764 case IP_VERSION(11, 0, 0): 765 case IP_VERSION(11, 0, 1): 766 case IP_VERSION(11, 0, 2): 767 case IP_VERSION(11, 0, 3): 768 case IP_VERSION(11, 0, 4): 769 case IP_VERSION(11, 5, 0): 770 case IP_VERSION(11, 5, 1): 771 case IP_VERSION(11, 5, 2): 772 case IP_VERSION(11, 5, 3): 773 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); 774 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); 775 /* 776 * To fulfill 4-level page support, 777 * vm size is 256TB (48bit), maximum size, 778 * block size 512 (9bit) 779 */ 780 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 781 break; 782 default: 783 break; 784 } 785 786 /* This interrupt is VMC page fault.*/ 787 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC, 788 VMC_1_0__SRCID__VM_FAULT, 789 &adev->gmc.vm_fault); 790 791 if (r) 792 return r; 793 794 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 795 UTCL2_1_0__SRCID__FAULT, 796 &adev->gmc.vm_fault); 797 if (r) 798 return r; 799 800 if (!amdgpu_sriov_vf(adev)) { 801 /* interrupt sent to DF. */ 802 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0, 803 &adev->gmc.ecc_irq); 804 if (r) 805 return r; 806 } 807 808 /* 809 * Set the internal MC address mask This is the max address of the GPU's 810 * internal address space. 811 */ 812 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 813 814 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 815 if (r) { 816 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); 817 return r; 818 } 819 820 adev->need_swiotlb = drm_need_swiotlb(44); 821 822 r = gmc_v11_0_mc_init(adev); 823 if (r) 824 return r; 825 826 amdgpu_gmc_get_vbios_allocations(adev); 827 828 /* Memory manager */ 829 r = amdgpu_bo_init(adev); 830 if (r) 831 return r; 832 833 r = gmc_v11_0_gart_init(adev); 834 if (r) 835 return r; 836 837 /* 838 * number of VMs 839 * VMID 0 is reserved for System 840 * amdgpu graphics/compute will use VMIDs 1-7 841 * amdkfd will use VMIDs 8-15 842 */ 843 adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8; 844 845 amdgpu_vm_manager_init(adev); 846 847 r = amdgpu_gmc_ras_sw_init(adev); 848 if (r) 849 return r; 850 851 return 0; 852 } 853 854 /** 855 * gmc_v11_0_gart_fini - vm fini callback 856 * 857 * @adev: amdgpu_device pointer 858 * 859 * Tears down the driver GART/VM setup (CIK). 860 */ 861 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev) 862 { 863 amdgpu_gart_table_vram_free(adev); 864 } 865 866 static int gmc_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) 867 { 868 struct amdgpu_device *adev = ip_block->adev; 869 870 amdgpu_vm_manager_fini(adev); 871 gmc_v11_0_gart_fini(adev); 872 amdgpu_gem_force_release(adev); 873 amdgpu_bo_fini(adev); 874 875 return 0; 876 } 877 878 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) 879 { 880 if (amdgpu_sriov_vf(adev)) { 881 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; 882 883 WREG32(hub->vm_contexts_disable, 0); 884 return; 885 } 886 } 887 888 /** 889 * gmc_v11_0_gart_enable - gart enable 890 * 891 * @adev: amdgpu_device pointer 892 */ 893 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) 894 { 895 int r; 896 bool value; 897 898 if (adev->gart.bo == NULL) { 899 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 900 return -EINVAL; 901 } 902 903 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); 904 905 r = adev->mmhub.funcs->gart_enable(adev); 906 if (r) 907 return r; 908 909 /* Flush HDP after it is initialized */ 910 amdgpu_device_flush_hdp(adev, NULL); 911 912 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 913 false : true; 914 915 adev->mmhub.funcs->set_fault_enable_default(adev, value); 916 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); 917 918 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 919 (unsigned int)(adev->gmc.gart_size >> 20), 920 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 921 922 return 0; 923 } 924 925 static int gmc_v11_0_hw_init(struct amdgpu_ip_block *ip_block) 926 { 927 struct amdgpu_device *adev = ip_block->adev; 928 int r; 929 930 adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; 931 932 /* The sequence of these two function calls matters.*/ 933 gmc_v11_0_init_golden_registers(adev); 934 935 r = gmc_v11_0_gart_enable(adev); 936 if (r) 937 return r; 938 939 if (adev->umc.funcs && adev->umc.funcs->init_registers) 940 adev->umc.funcs->init_registers(adev); 941 942 return 0; 943 } 944 945 /** 946 * gmc_v11_0_gart_disable - gart disable 947 * 948 * @adev: amdgpu_device pointer 949 * 950 * This disables all VM page table. 951 */ 952 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev) 953 { 954 adev->mmhub.funcs->gart_disable(adev); 955 } 956 957 static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) 958 { 959 struct amdgpu_device *adev = ip_block->adev; 960 961 if (amdgpu_sriov_vf(adev)) { 962 /* full access mode, so don't touch any GMC register */ 963 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 964 return 0; 965 } 966 967 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 968 969 if (adev->gmc.ecc_irq.funcs && 970 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 971 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 972 973 gmc_v11_0_gart_disable(adev); 974 975 return 0; 976 } 977 978 static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block) 979 { 980 gmc_v11_0_hw_fini(ip_block); 981 982 return 0; 983 } 984 985 static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block) 986 { 987 int r; 988 989 r = gmc_v11_0_hw_init(ip_block); 990 if (r) 991 return r; 992 993 amdgpu_vmid_reset_all(ip_block->adev); 994 995 return 0; 996 } 997 998 static bool gmc_v11_0_is_idle(struct amdgpu_ip_block *ip_block) 999 { 1000 /* MC is always ready in GMC v11.*/ 1001 return true; 1002 } 1003 1004 static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 1005 { 1006 /* There is no need to wait for MC idle in GMC v11.*/ 1007 return 0; 1008 } 1009 1010 static int gmc_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 1011 enum amd_clockgating_state state) 1012 { 1013 int r; 1014 struct amdgpu_device *adev = ip_block->adev; 1015 1016 r = adev->mmhub.funcs->set_clockgating(adev, state); 1017 if (r) 1018 return r; 1019 1020 return athub_v3_0_set_clockgating(adev, state); 1021 } 1022 1023 static void gmc_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 1024 { 1025 struct amdgpu_device *adev = ip_block->adev; 1026 1027 adev->mmhub.funcs->get_clockgating(adev, flags); 1028 1029 athub_v3_0_get_clockgating(adev, flags); 1030 } 1031 1032 static int gmc_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 1033 enum amd_powergating_state state) 1034 { 1035 return 0; 1036 } 1037 1038 const struct amd_ip_funcs gmc_v11_0_ip_funcs = { 1039 .name = "gmc_v11_0", 1040 .early_init = gmc_v11_0_early_init, 1041 .sw_init = gmc_v11_0_sw_init, 1042 .hw_init = gmc_v11_0_hw_init, 1043 .late_init = gmc_v11_0_late_init, 1044 .sw_fini = gmc_v11_0_sw_fini, 1045 .hw_fini = gmc_v11_0_hw_fini, 1046 .suspend = gmc_v11_0_suspend, 1047 .resume = gmc_v11_0_resume, 1048 .is_idle = gmc_v11_0_is_idle, 1049 .wait_for_idle = gmc_v11_0_wait_for_idle, 1050 .set_clockgating_state = gmc_v11_0_set_clockgating_state, 1051 .set_powergating_state = gmc_v11_0_set_powergating_state, 1052 .get_clockgating_state = gmc_v11_0_get_clockgating_state, 1053 }; 1054 1055 const struct amdgpu_ip_block_version gmc_v11_0_ip_block = { 1056 .type = AMD_IP_BLOCK_TYPE_GMC, 1057 .major = 11, 1058 .minor = 0, 1059 .rev = 0, 1060 .funcs = &gmc_v11_0_ip_funcs, 1061 }; 1062