Lines Matching +full:gfx +full:- +full:mem

37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
87 #include <asm/intel-family.h>
100 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
151 #define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0)
183 return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0; in amdgpu_ip_member_of_hwini()
191 adev->init_lvl = &amdgpu_init_minimal_xgmi; in amdgpu_set_init_level()
194 adev->init_lvl = &amdgpu_init_recovery; in amdgpu_set_init_level()
199 adev->init_lvl = &amdgpu_init_default; in amdgpu_set_init_level()
235 ret = sysfs_create_file(&adev->dev->kobj, in amdgpu_device_attr_sysfs_init()
244 sysfs_remove_file(&adev->dev->kobj, in amdgpu_device_attr_sysfs_fini()
279 return -EINVAL; in amdgpu_sysfs_reg_state_get()
295 ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state); in amdgpu_reg_state_sysfs_init()
304 sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); in amdgpu_reg_state_sysfs_fini()
311 if (ip_block->version->funcs->suspend) { in amdgpu_ip_block_suspend()
312 r = ip_block->version->funcs->suspend(ip_block); in amdgpu_ip_block_suspend()
314 dev_err(ip_block->adev->dev, in amdgpu_ip_block_suspend()
316 ip_block->version->funcs->name, r); in amdgpu_ip_block_suspend()
321 ip_block->status.hw = false; in amdgpu_ip_block_suspend()
329 if (ip_block->version->funcs->resume) { in amdgpu_ip_block_resume()
330 r = ip_block->version->funcs->resume(ip_block); in amdgpu_ip_block_resume()
332 dev_err(ip_block->adev->dev, in amdgpu_ip_block_resume()
334 ip_block->version->funcs->name, r); in amdgpu_ip_block_resume()
339 ip_block->status.hw = true; in amdgpu_ip_block_resume()
353 * - "cem" - PCIE CEM card
354 * - "oam" - Open Compute Accelerator Module
355 * - "unknown" - Not known
368 if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type) in amdgpu_device_get_board_info()
369 pkg_type = adev->smuio.funcs->get_pkg_type(adev); in amdgpu_device_get_board_info()
400 if (adev->flags & AMD_IS_APU) in amdgpu_board_attrs_is_visible()
403 return attr->mode; in amdgpu_board_attrs_is_visible()
415 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
426 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) in amdgpu_device_supports_px()
432 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
446 if (adev->has_pr3 || in amdgpu_device_supports_boco()
447 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) in amdgpu_device_supports_boco()
453 * amdgpu_device_supports_baco - Does the device support BACO
476 adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; in amdgpu_device_detect_runtime_pm_mode()
482 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; in amdgpu_device_detect_runtime_pm_mode()
483 dev_info(adev->dev, "Forcing BAMACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
485 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
486 dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n"); in amdgpu_device_detect_runtime_pm_mode()
491 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
492 dev_info(adev->dev, "Forcing BACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
495 case -1: in amdgpu_device_detect_runtime_pm_mode()
496 case -2: in amdgpu_device_detect_runtime_pm_mode()
498 adev->pm.rpm_mode = AMDGPU_RUNPM_PX; in amdgpu_device_detect_runtime_pm_mode()
499 dev_info(adev->dev, "Using ATPX for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
501 adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; in amdgpu_device_detect_runtime_pm_mode()
502 dev_info(adev->dev, "Using BOCO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
507 switch (adev->asic_type) { in amdgpu_device_detect_runtime_pm_mode()
514 if (!adev->gmc.noretry) in amdgpu_device_detect_runtime_pm_mode()
515 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
519 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
523 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { in amdgpu_device_detect_runtime_pm_mode()
525 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; in amdgpu_device_detect_runtime_pm_mode()
526 dev_info(adev->dev, "Using BAMACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
528 dev_info(adev->dev, "Using BACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
534 dev_info(adev->dev, "runtime pm is manually disabled\n"); in amdgpu_device_detect_runtime_pm_mode()
541 if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) in amdgpu_device_detect_runtime_pm_mode()
542 dev_info(adev->dev, "Runtime PM not available\n"); in amdgpu_device_detect_runtime_pm_mode()
545 * amdgpu_device_supports_smart_shift - Is the device dGPU with
564 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
570 * @write: true - write to vram, otherwise - read from vram
586 spin_lock_irqsave(&adev->mmio_idx_lock, flags); in amdgpu_device_mm_access()
601 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); in amdgpu_device_mm_access()
606 * amdgpu_device_aper_access - access vram by vram aperture
612 * @write: true - write to vram, otherwise - read from vram
624 if (!adev->mman.aper_base_kaddr) in amdgpu_device_aper_access()
627 last = min(pos + size, adev->gmc.visible_vram_size); in amdgpu_device_aper_access()
629 addr = adev->mman.aper_base_kaddr + pos; in amdgpu_device_aper_access()
630 count = last - pos; in amdgpu_device_aper_access()
657 * amdgpu_device_vram_access - read/write a buffer in vram
663 * @write: true - write to vram, otherwise - read from vram
672 size -= count; in amdgpu_device_vram_access()
688 if (adev->no_hw_access) in amdgpu_device_skip_hw_access()
704 if (down_read_trylock(&adev->reset_domain->sem)) in amdgpu_device_skip_hw_access()
705 up_read(&adev->reset_domain->sem); in amdgpu_device_skip_hw_access()
707 lockdep_assert_held(&adev->reset_domain->sem); in amdgpu_device_skip_hw_access()
714 * amdgpu_device_rreg - read a memory mapped IO or indirect register
730 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_rreg()
733 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_rreg()
735 up_read(&adev->reset_domain->sem); in amdgpu_device_rreg()
737 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_rreg()
740 ret = adev->pcie_rreg(adev, reg * 4); in amdgpu_device_rreg()
743 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); in amdgpu_device_rreg()
754 * amdgpu_mm_rreg8 - read a memory mapped IO register
766 if (offset < adev->rmmio_size) in amdgpu_mm_rreg8()
767 return (readb(adev->rmmio + offset)); in amdgpu_mm_rreg8()
773 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
791 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_xcc_rreg()
794 adev->gfx.rlc.rlcg_reg_access_supported && in amdgpu_device_xcc_rreg()
801 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_xcc_rreg()
803 up_read(&adev->reset_domain->sem); in amdgpu_device_xcc_rreg()
805 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_xcc_rreg()
808 ret = adev->pcie_rreg(adev, reg * 4); in amdgpu_device_xcc_rreg()
821 * amdgpu_mm_wreg8 - read a memory mapped IO register
834 if (offset < adev->rmmio_size) in amdgpu_mm_wreg8()
835 writeb(value, adev->rmmio + offset); in amdgpu_mm_wreg8()
841 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
857 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_wreg()
860 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_wreg()
862 up_read(&adev->reset_domain->sem); in amdgpu_device_wreg()
864 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_wreg()
867 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_device_wreg()
870 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); in amdgpu_device_wreg()
874 …* amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if i…
891 adev->gfx.rlc.funcs && in amdgpu_mm_wreg_mmio_rlc()
892 adev->gfx.rlc.funcs->is_rlcg_access_range) { in amdgpu_mm_wreg_mmio_rlc()
893 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) in amdgpu_mm_wreg_mmio_rlc()
895 } else if ((reg * 4) >= adev->rmmio_size) { in amdgpu_mm_wreg_mmio_rlc()
896 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_mm_wreg_mmio_rlc()
898 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_mm_wreg_mmio_rlc()
903 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
922 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_xcc_wreg()
925 adev->gfx.rlc.rlcg_reg_access_supported && in amdgpu_device_xcc_wreg()
932 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_xcc_wreg()
934 up_read(&adev->reset_domain->sem); in amdgpu_device_xcc_wreg()
936 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_xcc_wreg()
939 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_device_xcc_wreg()
944 * amdgpu_device_indirect_rreg - read an indirect register
959 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg()
960 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg()
962 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg()
963 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg()
964 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg()
969 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg()
983 if (unlikely(!adev->nbio.funcs)) { in amdgpu_device_indirect_rreg_ext()
987 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg_ext()
988 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg_ext()
992 if (unlikely(!adev->nbio.funcs)) in amdgpu_device_indirect_rreg_ext()
995 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_rreg_ext()
1000 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg_ext()
1001 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg_ext()
1002 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg_ext()
1004 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_rreg_ext()
1021 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg_ext()
1027 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
1042 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg64()
1043 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg64()
1045 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64()
1046 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg64()
1047 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg64()
1057 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64()
1072 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg64_ext()
1073 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg64_ext()
1074 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) in amdgpu_device_indirect_rreg64_ext()
1075 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_rreg64_ext()
1077 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64_ext()
1078 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg64_ext()
1079 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg64_ext()
1081 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_rreg64_ext()
1107 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64_ext()
1113 * amdgpu_device_indirect_wreg - write an indirect register address
1127 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg()
1128 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg()
1130 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg()
1131 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg()
1132 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg()
1138 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg()
1149 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg_ext()
1150 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg_ext()
1151 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) in amdgpu_device_indirect_wreg_ext()
1152 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_wreg_ext()
1156 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg_ext()
1157 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg_ext()
1158 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg_ext()
1160 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_wreg_ext()
1178 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg_ext()
1182 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
1196 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg64()
1197 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg64()
1199 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64()
1200 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg64()
1201 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg64()
1213 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64()
1225 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg64_ext()
1226 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg64_ext()
1227 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) in amdgpu_device_indirect_wreg64_ext()
1228 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_wreg64_ext()
1230 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64_ext()
1231 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg64_ext()
1232 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg64_ext()
1234 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_wreg64_ext()
1262 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64_ext()
1266 * amdgpu_device_get_rev_id - query device rev_id
1274 return adev->nbio.funcs->get_rev_id(adev); in amdgpu_device_get_rev_id()
1278 * amdgpu_invalid_rreg - dummy reg read function
1302 * amdgpu_invalid_wreg - dummy reg write function
1326 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1350 * amdgpu_invalid_wreg64 - dummy reg write function
1374 * amdgpu_block_invalid_rreg - dummy reg read function
1394 * amdgpu_block_invalid_wreg - dummy reg write function
1415 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) in amdgpu_device_get_vbios_flags()
1418 if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev)) in amdgpu_device_get_vbios_flags()
1425 * amdgpu_device_asic_init - Wrapper for atom asic_init
1446 if (optional && !adev->bios) in amdgpu_device_asic_init()
1452 if (optional && !adev->bios) in amdgpu_device_asic_init()
1455 return amdgpu_atom_asic_init(adev->mode_info.atom_context); in amdgpu_device_asic_init()
1462 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1474 &adev->mem_scratch.robj, in amdgpu_device_mem_scratch_init()
1475 &adev->mem_scratch.gpu_addr, in amdgpu_device_mem_scratch_init()
1476 (void **)&adev->mem_scratch.ptr); in amdgpu_device_mem_scratch_init()
1480 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1488 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL); in amdgpu_device_mem_scratch_fini()
1492 * amdgpu_device_program_register_sequence - program an array of registers.
1521 if (adev->family >= AMDGPU_FAMILY_AI) in amdgpu_device_program_register_sequence()
1531 * amdgpu_device_pci_config_reset - reset the GPU
1540 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); in amdgpu_device_pci_config_reset()
1544 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1552 return pci_reset_function(adev->pdev); in amdgpu_device_pci_reset()
1562 * amdgpu_device_wb_fini - Disable Writeback and free memory
1571 if (adev->wb.wb_obj) { in amdgpu_device_wb_fini()
1572 amdgpu_bo_free_kernel(&adev->wb.wb_obj, in amdgpu_device_wb_fini()
1573 &adev->wb.gpu_addr, in amdgpu_device_wb_fini()
1574 (void **)&adev->wb.wb); in amdgpu_device_wb_fini()
1575 adev->wb.wb_obj = NULL; in amdgpu_device_wb_fini()
1580 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1586 * Returns 0 on success or an -error on failure.
1592 if (adev->wb.wb_obj == NULL) { in amdgpu_device_wb_init()
1596 &adev->wb.wb_obj, &adev->wb.gpu_addr, in amdgpu_device_wb_init()
1597 (void **)&adev->wb.wb); in amdgpu_device_wb_init()
1599 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); in amdgpu_device_wb_init()
1603 adev->wb.num_wb = AMDGPU_MAX_WB; in amdgpu_device_wb_init()
1604 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); in amdgpu_device_wb_init()
1607 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); in amdgpu_device_wb_init()
1614 * amdgpu_device_wb_get - Allocate a wb entry
1620 * Returns 0 on success or -EINVAL on failure.
1626 spin_lock_irqsave(&adev->wb.lock, flags); in amdgpu_device_wb_get()
1627 offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); in amdgpu_device_wb_get()
1628 if (offset < adev->wb.num_wb) { in amdgpu_device_wb_get()
1629 __set_bit(offset, adev->wb.used); in amdgpu_device_wb_get()
1630 spin_unlock_irqrestore(&adev->wb.lock, flags); in amdgpu_device_wb_get()
1634 spin_unlock_irqrestore(&adev->wb.lock, flags); in amdgpu_device_wb_get()
1635 return -EINVAL; in amdgpu_device_wb_get()
1640 * amdgpu_device_wb_free - Free a wb entry
1652 spin_lock_irqsave(&adev->wb.lock, flags); in amdgpu_device_wb_free()
1653 if (wb < adev->wb.num_wb) in amdgpu_device_wb_free()
1654 __clear_bit(wb, adev->wb.used); in amdgpu_device_wb_free()
1655 spin_unlock_irqrestore(&adev->wb.lock, flags); in amdgpu_device_wb_free()
1659 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1665 * driver loading by returning -ENODEV.
1669 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); in amdgpu_device_resize_fb_bar()
1685 adev->pdev->vendor == PCI_VENDOR_ID_ATI && in amdgpu_device_resize_fb_bar()
1686 adev->pdev->device == 0x731f && in amdgpu_device_resize_fb_bar()
1687 adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) in amdgpu_device_resize_fb_bar()
1691 if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) in amdgpu_device_resize_fb_bar()
1695 if (adev->gmc.real_vram_size && in amdgpu_device_resize_fb_bar()
1696 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) in amdgpu_device_resize_fb_bar()
1700 root = adev->pdev->bus; in amdgpu_device_resize_fb_bar()
1701 while (root->parent) in amdgpu_device_resize_fb_bar()
1702 root = root->parent; in amdgpu_device_resize_fb_bar()
1705 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && in amdgpu_device_resize_fb_bar()
1706 res->start > 0x100000000ull) in amdgpu_device_resize_fb_bar()
1715 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, in amdgpu_device_resize_fb_bar()
1719 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); in amdgpu_device_resize_fb_bar()
1720 pci_write_config_word(adev->pdev, PCI_COMMAND, in amdgpu_device_resize_fb_bar()
1725 if (adev->asic_type >= CHIP_BONAIRE) in amdgpu_device_resize_fb_bar()
1726 pci_release_resource(adev->pdev, 2); in amdgpu_device_resize_fb_bar()
1728 pci_release_resource(adev->pdev, 0); in amdgpu_device_resize_fb_bar()
1730 r = pci_resize_resource(adev->pdev, 0, rbar_size); in amdgpu_device_resize_fb_bar()
1731 if (r == -ENOSPC) in amdgpu_device_resize_fb_bar()
1733 else if (r && r != -ENOTSUPP) in amdgpu_device_resize_fb_bar()
1736 pci_assign_unassigned_bus_resources(adev->pdev->bus); in amdgpu_device_resize_fb_bar()
1742 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) in amdgpu_device_resize_fb_bar()
1743 return -ENODEV; in amdgpu_device_resize_fb_bar()
1745 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); in amdgpu_device_resize_fb_bar()
1754 * amdgpu_device_need_post - check if the hw need post or not
1772 if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios) in amdgpu_device_need_post()
1776 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot in amdgpu_device_need_post()
1781 if (adev->asic_type == CHIP_FIJI) { in amdgpu_device_need_post()
1785 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); in amdgpu_device_need_post()
1790 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); in amdgpu_device_need_post()
1791 release_firmware(adev->pm.fw); in amdgpu_device_need_post()
1798 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) in amdgpu_device_need_post()
1801 if (adev->has_hw_reset) { in amdgpu_device_need_post()
1802 adev->has_hw_reset = false; in amdgpu_device_need_post()
1807 if (adev->asic_type >= CHIP_BONAIRE) in amdgpu_device_need_post()
1829 case -1: in amdgpu_device_seamless_boot_supported()
1841 if (!(adev->flags & AMD_IS_APU)) in amdgpu_device_seamless_boot_supported()
1844 if (adev->mman.keep_stolen_vga_memory) in amdgpu_device_seamless_boot_supported()
1855 …gn/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-
1856 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1864 if (dev_is_removable(adev->dev)) in amdgpu_device_pcie_dynamic_switching_supported()
1867 if (c->x86_vendor == X86_VENDOR_INTEL) in amdgpu_device_pcie_dynamic_switching_supported()
1874 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1886 case -1: in amdgpu_device_should_use_aspm()
1895 if (adev->flags & AMD_IS_APU) in amdgpu_device_should_use_aspm()
1897 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) in amdgpu_device_should_use_aspm()
1899 return pcie_aspm_enabled(adev->pdev); in amdgpu_device_should_use_aspm()
1904 * amdgpu_device_vga_set_decode - enable/disable vga decode
1926 * amdgpu_device_check_block_size - validate the vm block size
1941 if (amdgpu_vm_block_size == -1) in amdgpu_device_check_block_size()
1945 dev_warn(adev->dev, "VM page table size (%d) too small\n", in amdgpu_device_check_block_size()
1947 amdgpu_vm_block_size = -1; in amdgpu_device_check_block_size()
1952 * amdgpu_device_check_vm_size - validate the vm size
1962 if (amdgpu_vm_size == -1) in amdgpu_device_check_vm_size()
1966 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", in amdgpu_device_check_vm_size()
1968 amdgpu_vm_size = -1; in amdgpu_device_check_vm_size()
1984 DRM_WARN("Not 64-bit OS, feature not supported\n"); in amdgpu_device_check_smu_prv_buffer_size()
2002 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; in amdgpu_device_check_smu_prv_buffer_size()
2009 adev->pm.smu_prv_buffer_size = 0; in amdgpu_device_check_smu_prv_buffer_size()
2014 if (!(adev->flags & AMD_IS_APU) || in amdgpu_device_init_apu_flags()
2015 adev->asic_type < CHIP_RAVEN) in amdgpu_device_init_apu_flags()
2018 switch (adev->asic_type) { in amdgpu_device_init_apu_flags()
2020 if (adev->pdev->device == 0x15dd) in amdgpu_device_init_apu_flags()
2021 adev->apu_flags |= AMD_APU_IS_RAVEN; in amdgpu_device_init_apu_flags()
2022 if (adev->pdev->device == 0x15d8) in amdgpu_device_init_apu_flags()
2023 adev->apu_flags |= AMD_APU_IS_PICASSO; in amdgpu_device_init_apu_flags()
2026 if ((adev->pdev->device == 0x1636) || in amdgpu_device_init_apu_flags()
2027 (adev->pdev->device == 0x164c)) in amdgpu_device_init_apu_flags()
2028 adev->apu_flags |= AMD_APU_IS_RENOIR; in amdgpu_device_init_apu_flags()
2030 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; in amdgpu_device_init_apu_flags()
2033 adev->apu_flags |= AMD_APU_IS_VANGOGH; in amdgpu_device_init_apu_flags()
2038 if ((adev->pdev->device == 0x13FE) || in amdgpu_device_init_apu_flags()
2039 (adev->pdev->device == 0x143F)) in amdgpu_device_init_apu_flags()
2040 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; in amdgpu_device_init_apu_flags()
2050 * amdgpu_device_check_arguments - validate module params
2062 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", in amdgpu_device_check_arguments()
2066 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", in amdgpu_device_check_arguments()
2071 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { in amdgpu_device_check_arguments()
2073 dev_warn(adev->dev, "gart size (%d) too small\n", in amdgpu_device_check_arguments()
2075 amdgpu_gart_size = -1; in amdgpu_device_check_arguments()
2078 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { in amdgpu_device_check_arguments()
2080 dev_warn(adev->dev, "gtt size (%d) too small\n", in amdgpu_device_check_arguments()
2082 amdgpu_gtt_size = -1; in amdgpu_device_check_arguments()
2086 if (amdgpu_vm_fragment_size != -1 && in amdgpu_device_check_arguments()
2088 dev_warn(adev->dev, "valid range is between 4 and 9\n"); in amdgpu_device_check_arguments()
2089 amdgpu_vm_fragment_size = -1; in amdgpu_device_check_arguments()
2093 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", in amdgpu_device_check_arguments()
2097 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", in amdgpu_device_check_arguments()
2102 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) { in amdgpu_device_check_arguments()
2103 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n"); in amdgpu_device_check_arguments()
2104 amdgpu_reset_method = -1; in amdgpu_device_check_arguments()
2113 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); in amdgpu_device_check_arguments()
2116 adev->enforce_isolation[i] = !!enforce_isolation; in amdgpu_device_check_arguments()
2122 * amdgpu_switcheroo_set_state - set switcheroo state
2142 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; in amdgpu_switcheroo_set_state()
2151 dev->switch_power_state = DRM_SWITCH_POWER_ON; in amdgpu_switcheroo_set_state()
2154 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; in amdgpu_switcheroo_set_state()
2161 dev->switch_power_state = DRM_SWITCH_POWER_OFF; in amdgpu_switcheroo_set_state()
2166 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
2183 return atomic_read(&dev->open_count) == 0; in amdgpu_switcheroo_can_switch()
2193 * amdgpu_device_ip_set_clockgating_state - set the CG state
2196 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2210 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_set_clockgating_state()
2211 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_set_clockgating_state()
2213 if (adev->ip_blocks[i].version->type != block_type) in amdgpu_device_ip_set_clockgating_state()
2215 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) in amdgpu_device_ip_set_clockgating_state()
2217 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( in amdgpu_device_ip_set_clockgating_state()
2218 &adev->ip_blocks[i], state); in amdgpu_device_ip_set_clockgating_state()
2221 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_set_clockgating_state()
2227 * amdgpu_device_ip_set_powergating_state - set the PG state
2230 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2244 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_set_powergating_state()
2245 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_set_powergating_state()
2247 if (adev->ip_blocks[i].version->type != block_type) in amdgpu_device_ip_set_powergating_state()
2249 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) in amdgpu_device_ip_set_powergating_state()
2251 r = adev->ip_blocks[i].version->funcs->set_powergating_state( in amdgpu_device_ip_set_powergating_state()
2252 &adev->ip_blocks[i], state); in amdgpu_device_ip_set_powergating_state()
2255 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_set_powergating_state()
2261 * amdgpu_device_ip_get_clockgating_state - get the CG state
2276 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_get_clockgating_state()
2277 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_get_clockgating_state()
2279 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) in amdgpu_device_ip_get_clockgating_state()
2280 adev->ip_blocks[i].version->funcs->get_clockgating_state( in amdgpu_device_ip_get_clockgating_state()
2281 &adev->ip_blocks[i], flags); in amdgpu_device_ip_get_clockgating_state()
2286 * amdgpu_device_ip_wait_for_idle - wait for idle
2289 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2299 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_wait_for_idle()
2300 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_wait_for_idle()
2302 if (adev->ip_blocks[i].version->type == block_type) { in amdgpu_device_ip_wait_for_idle()
2303 if (adev->ip_blocks[i].version->funcs->wait_for_idle) { in amdgpu_device_ip_wait_for_idle()
2304 r = adev->ip_blocks[i].version->funcs->wait_for_idle( in amdgpu_device_ip_wait_for_idle()
2305 &adev->ip_blocks[i]); in amdgpu_device_ip_wait_for_idle()
2317 * amdgpu_device_ip_is_valid - is the hardware IP enabled
2320 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2330 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_is_valid()
2331 if (adev->ip_blocks[i].version->type == block_type) in amdgpu_device_ip_is_valid()
2332 return adev->ip_blocks[i].status.valid; in amdgpu_device_ip_is_valid()
2339 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2342 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2353 for (i = 0; i < adev->num_ip_blocks; i++) in amdgpu_device_ip_get_ip_block()
2354 if (adev->ip_blocks[i].version->type == type) in amdgpu_device_ip_get_ip_block()
2355 return &adev->ip_blocks[i]; in amdgpu_device_ip_get_ip_block()
2377 if (ip_block && ((ip_block->version->major > major) || in amdgpu_device_ip_block_version_cmp()
2378 ((ip_block->version->major == major) && in amdgpu_device_ip_block_version_cmp()
2379 (ip_block->version->minor >= minor)))) in amdgpu_device_ip_block_version_cmp()
2398 return -EINVAL; in amdgpu_device_ip_block_add()
2400 switch (ip_block_version->type) { in amdgpu_device_ip_block_add()
2402 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) in amdgpu_device_ip_block_add()
2406 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) in amdgpu_device_ip_block_add()
2413 dev_info(adev->dev, "detected ip block number %d <%s>\n", in amdgpu_device_ip_block_add()
2414 adev->num_ip_blocks, ip_block_version->funcs->name); in amdgpu_device_ip_block_add()
2416 adev->ip_blocks[adev->num_ip_blocks].adev = adev; in amdgpu_device_ip_block_add()
2418 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; in amdgpu_device_ip_block_add()
2424 * amdgpu_device_enable_virtual_display - enable virtual display feature
2437 adev->enable_virtual_display = false; in amdgpu_device_enable_virtual_display()
2440 const char *pci_address_name = pci_name(adev->pdev); in amdgpu_device_enable_virtual_display()
2450 int res = -1; in amdgpu_device_enable_virtual_display()
2452 adev->enable_virtual_display = true; in amdgpu_device_enable_virtual_display()
2463 adev->mode_info.num_crtc = num_crtc; in amdgpu_device_enable_virtual_display()
2465 adev->mode_info.num_crtc = 1; in amdgpu_device_enable_virtual_display()
2473 adev->enable_virtual_display, adev->mode_info.num_crtc); in amdgpu_device_enable_virtual_display()
2481 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { in amdgpu_device_set_sriov_virtual_display()
2482 adev->mode_info.num_crtc = 1; in amdgpu_device_set_sriov_virtual_display()
2483 adev->enable_virtual_display = true; in amdgpu_device_set_sriov_virtual_display()
2485 adev->enable_virtual_display, adev->mode_info.num_crtc); in amdgpu_device_set_sriov_virtual_display()
2490 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2497 * Returns 0 on success, -EINVAL on failure.
2505 adev->firmware.gpu_info_fw = NULL; in amdgpu_device_parse_gpu_info_fw()
2507 if (adev->mman.discovery_bin) in amdgpu_device_parse_gpu_info_fw()
2510 switch (adev->asic_type) { in amdgpu_device_parse_gpu_info_fw()
2520 if (adev->apu_flags & AMD_APU_IS_RAVEN2) in amdgpu_device_parse_gpu_info_fw()
2522 else if (adev->apu_flags & AMD_APU_IS_PICASSO) in amdgpu_device_parse_gpu_info_fw()
2535 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, in amdgpu_device_parse_gpu_info_fw()
2539 dev_err(adev->dev, in amdgpu_device_parse_gpu_info_fw()
2545 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; in amdgpu_device_parse_gpu_info_fw()
2546 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); in amdgpu_device_parse_gpu_info_fw()
2548 switch (hdr->version_major) { in amdgpu_device_parse_gpu_info_fw()
2552 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2553 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in amdgpu_device_parse_gpu_info_fw()
2558 if (adev->asic_type == CHIP_NAVI12) in amdgpu_device_parse_gpu_info_fw()
2561 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); in amdgpu_device_parse_gpu_info_fw()
2562 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); in amdgpu_device_parse_gpu_info_fw()
2563 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); in amdgpu_device_parse_gpu_info_fw()
2564 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); in amdgpu_device_parse_gpu_info_fw()
2565 adev->gfx.config.max_texture_channel_caches = in amdgpu_device_parse_gpu_info_fw()
2566 le32_to_cpu(gpu_info_fw->gc_num_tccs); in amdgpu_device_parse_gpu_info_fw()
2567 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); in amdgpu_device_parse_gpu_info_fw()
2568 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); in amdgpu_device_parse_gpu_info_fw()
2569 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); in amdgpu_device_parse_gpu_info_fw()
2570 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); in amdgpu_device_parse_gpu_info_fw()
2571 adev->gfx.config.double_offchip_lds_buf = in amdgpu_device_parse_gpu_info_fw()
2572 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); in amdgpu_device_parse_gpu_info_fw()
2573 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); in amdgpu_device_parse_gpu_info_fw()
2574 adev->gfx.cu_info.max_waves_per_simd = in amdgpu_device_parse_gpu_info_fw()
2575 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); in amdgpu_device_parse_gpu_info_fw()
2576 adev->gfx.cu_info.max_scratch_slots_per_cu = in amdgpu_device_parse_gpu_info_fw()
2577 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); in amdgpu_device_parse_gpu_info_fw()
2578 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); in amdgpu_device_parse_gpu_info_fw()
2579 if (hdr->version_minor >= 1) { in amdgpu_device_parse_gpu_info_fw()
2581 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2582 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in amdgpu_device_parse_gpu_info_fw()
2583 adev->gfx.config.num_sc_per_sh = in amdgpu_device_parse_gpu_info_fw()
2584 le32_to_cpu(gpu_info_fw->num_sc_per_sh); in amdgpu_device_parse_gpu_info_fw()
2585 adev->gfx.config.num_packer_per_sc = in amdgpu_device_parse_gpu_info_fw()
2586 le32_to_cpu(gpu_info_fw->num_packer_per_sc); in amdgpu_device_parse_gpu_info_fw()
2594 if (hdr->version_minor == 2) { in amdgpu_device_parse_gpu_info_fw()
2596 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2597 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in amdgpu_device_parse_gpu_info_fw()
2598 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; in amdgpu_device_parse_gpu_info_fw()
2603 dev_err(adev->dev, in amdgpu_device_parse_gpu_info_fw()
2604 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); in amdgpu_device_parse_gpu_info_fw()
2605 err = -EINVAL; in amdgpu_device_parse_gpu_info_fw()
2613 * amdgpu_device_ip_early_init - run early init for hardware IPs
2638 switch (adev->asic_type) { in amdgpu_device_ip_early_init()
2645 adev->family = AMDGPU_FAMILY_SI; in amdgpu_device_ip_early_init()
2657 if (adev->flags & AMD_IS_APU) in amdgpu_device_ip_early_init()
2658 adev->family = AMDGPU_FAMILY_KV; in amdgpu_device_ip_early_init()
2660 adev->family = AMDGPU_FAMILY_CI; in amdgpu_device_ip_early_init()
2676 if (adev->flags & AMD_IS_APU) in amdgpu_device_ip_early_init()
2677 adev->family = AMDGPU_FAMILY_CZ; in amdgpu_device_ip_early_init()
2679 adev->family = AMDGPU_FAMILY_VI; in amdgpu_device_ip_early_init()
2695 ((adev->flags & AMD_IS_APU) == 0) && in amdgpu_device_ip_early_init()
2696 !dev_is_removable(&adev->pdev->dev)) in amdgpu_device_ip_early_init()
2697 adev->flags |= AMD_IS_PX; in amdgpu_device_ip_early_init()
2699 if (!(adev->flags & AMD_IS_APU)) { in amdgpu_device_ip_early_init()
2700 parent = pcie_find_root_port(adev->pdev); in amdgpu_device_ip_early_init()
2701 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; in amdgpu_device_ip_early_init()
2705 adev->pm.pp_feature = amdgpu_pp_feature_mask; in amdgpu_device_ip_early_init()
2707 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; in amdgpu_device_ip_early_init()
2708 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) in amdgpu_device_ip_early_init()
2709 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; in amdgpu_device_ip_early_init()
2711 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; in amdgpu_device_ip_early_init()
2714 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_early_init()
2715 ip_block = &adev->ip_blocks[i]; in amdgpu_device_ip_early_init()
2719 i, adev->ip_blocks[i].version->funcs->name); in amdgpu_device_ip_early_init()
2720 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_early_init()
2721 } else if (ip_block->version->funcs->early_init) { in amdgpu_device_ip_early_init()
2722 r = ip_block->version->funcs->early_init(ip_block); in amdgpu_device_ip_early_init()
2723 if (r == -ENOENT) { in amdgpu_device_ip_early_init()
2724 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_early_init()
2727 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_early_init()
2730 adev->ip_blocks[i].status.valid = true; in amdgpu_device_ip_early_init()
2733 adev->ip_blocks[i].status.valid = true; in amdgpu_device_ip_early_init()
2736 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { in amdgpu_device_ip_early_init()
2748 return -EINVAL; in amdgpu_device_ip_early_init()
2750 if (optional && !adev->bios) in amdgpu_device_ip_early_init()
2752 adev->dev, in amdgpu_device_ip_early_init()
2755 if (adev->bios) { in amdgpu_device_ip_early_init()
2758 dev_err(adev->dev, in amdgpu_device_ip_early_init()
2776 return -ENODEV; in amdgpu_device_ip_early_init()
2778 if (adev->gmc.xgmi.supported) in amdgpu_device_ip_early_init()
2782 if (ip_block->status.valid != false) in amdgpu_device_ip_early_init()
2785 adev->cg_flags &= amdgpu_cg_mask; in amdgpu_device_ip_early_init()
2786 adev->pg_flags &= amdgpu_pg_mask; in amdgpu_device_ip_early_init()
2795 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_hw_init_phase1()
2796 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_hw_init_phase1()
2798 if (adev->ip_blocks[i].status.hw) in amdgpu_device_ip_hw_init_phase1()
2801 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_hw_init_phase1()
2803 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_hw_init_phase1()
2804 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || in amdgpu_device_ip_hw_init_phase1()
2805 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { in amdgpu_device_ip_hw_init_phase1()
2806 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_hw_init_phase1()
2809 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_hw_init_phase1()
2812 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_hw_init_phase1()
2823 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_hw_init_phase2()
2824 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_hw_init_phase2()
2826 if (adev->ip_blocks[i].status.hw) in amdgpu_device_ip_hw_init_phase2()
2829 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_hw_init_phase2()
2831 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_hw_init_phase2()
2834 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_hw_init_phase2()
2837 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_hw_init_phase2()
2849 if (adev->asic_type >= CHIP_VEGA10) { in amdgpu_device_fw_loading()
2850 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_fw_loading()
2851 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_fw_loading()
2858 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_fw_loading()
2862 if (adev->ip_blocks[i].status.hw == true) in amdgpu_device_fw_loading()
2865 if (amdgpu_in_reset(adev) || adev->in_suspend) { in amdgpu_device_fw_loading()
2866 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_fw_loading()
2870 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_fw_loading()
2873 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_fw_loading()
2876 adev->ip_blocks[i].status.hw = true; in amdgpu_device_fw_loading()
2882 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) in amdgpu_device_fw_loading()
2893 .timeout_wq = adev->reset_domain->wq, in amdgpu_device_init_schedulers()
2894 .dev = adev->dev, in amdgpu_device_init_schedulers()
2900 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_init_schedulers()
2903 if (!ring || ring->no_scheduler) in amdgpu_device_init_schedulers()
2906 switch (ring->funcs->type) { in amdgpu_device_init_schedulers()
2908 timeout = adev->gfx_timeout; in amdgpu_device_init_schedulers()
2911 timeout = adev->compute_timeout; in amdgpu_device_init_schedulers()
2914 timeout = adev->sdma_timeout; in amdgpu_device_init_schedulers()
2917 timeout = adev->video_timeout; in amdgpu_device_init_schedulers()
2922 args.credit_limit = ring->num_hw_submission; in amdgpu_device_init_schedulers()
2923 args.score = ring->sched_score; in amdgpu_device_init_schedulers()
2924 args.name = ring->name; in amdgpu_device_init_schedulers()
2926 r = drm_sched_init(&ring->sched, &args); in amdgpu_device_init_schedulers()
2929 ring->name); in amdgpu_device_init_schedulers()
2935 ring->name); in amdgpu_device_init_schedulers()
2941 ring->name); in amdgpu_device_init_schedulers()
2953 * amdgpu_device_ip_init - run init for hardware IPs
2972 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_init()
2973 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_init()
2975 if (adev->ip_blocks[i].version->funcs->sw_init) { in amdgpu_device_ip_init()
2976 r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_init()
2979 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_init()
2983 adev->ip_blocks[i].status.sw = true; in amdgpu_device_ip_init()
2986 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_init()
2989 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { in amdgpu_device_ip_init()
2991 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_init()
2996 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_init()
2997 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { in amdgpu_device_ip_init()
2998 /* need to do gmc hw init early so we can allocate gpu mem */ in amdgpu_device_ip_init()
3008 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_init()
3018 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_init()
3021 if (adev->gfx.mcbp) { in amdgpu_device_ip_init()
3022 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, in amdgpu_device_ip_init()
3045 dev_err(adev->dev, "IB initialization failed (%d).\n", r); in amdgpu_device_ip_init()
3081 init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI); in amdgpu_device_ip_init()
3089 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_init()
3095 r = -ENOENT; in amdgpu_device_ip_init()
3099 if (!hive->reset_domain || in amdgpu_device_ip_init()
3100 !amdgpu_reset_get_reset_domain(hive->reset_domain)) { in amdgpu_device_ip_init()
3101 r = -ENOENT; in amdgpu_device_ip_init()
3107 amdgpu_reset_put_reset_domain(adev->reset_domain); in amdgpu_device_ip_init()
3108 adev->reset_domain = hive->reset_domain; in amdgpu_device_ip_init()
3118 if (adev->mman.buffer_funcs_ring->sched.ready) in amdgpu_device_ip_init()
3122 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { in amdgpu_device_ip_init()
3138 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
3148 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); in amdgpu_device_fill_reset_magic()
3152 * amdgpu_device_check_vram_lost - check if vram is valid
3163 if (memcmp(adev->gart.ptr, adev->reset_magic, in amdgpu_device_check_vram_lost()
3184 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
3204 for (j = 0; j < adev->num_ip_blocks; j++) { in amdgpu_device_set_cg_state()
3205 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; in amdgpu_device_set_cg_state()
3206 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_set_cg_state()
3208 /* skip CG for GFX, SDMA on S0ix */ in amdgpu_device_set_cg_state()
3209 if (adev->in_s0ix && in amdgpu_device_set_cg_state()
3210 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_set_cg_state()
3211 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) in amdgpu_device_set_cg_state()
3214 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && in amdgpu_device_set_cg_state()
3215 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && in amdgpu_device_set_cg_state()
3216 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && in amdgpu_device_set_cg_state()
3217 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && in amdgpu_device_set_cg_state()
3218 adev->ip_blocks[i].version->funcs->set_clockgating_state) { in amdgpu_device_set_cg_state()
3220 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i], in amdgpu_device_set_cg_state()
3224 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_set_cg_state()
3241 for (j = 0; j < adev->num_ip_blocks; j++) { in amdgpu_device_set_pg_state()
3242 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; in amdgpu_device_set_pg_state()
3243 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_set_pg_state()
3245 /* skip PG for GFX, SDMA on S0ix */ in amdgpu_device_set_pg_state()
3246 if (adev->in_s0ix && in amdgpu_device_set_pg_state()
3247 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_set_pg_state()
3248 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) in amdgpu_device_set_pg_state()
3251 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && in amdgpu_device_set_pg_state()
3252 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && in amdgpu_device_set_pg_state()
3253 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && in amdgpu_device_set_pg_state()
3254 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && in amdgpu_device_set_pg_state()
3255 adev->ip_blocks[i].version->funcs->set_powergating_state) { in amdgpu_device_set_pg_state()
3257 r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i], in amdgpu_device_set_pg_state()
3261 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_set_pg_state()
3287 adev = gpu_ins->adev; in amdgpu_device_enable_mgpu_fan_boost()
3288 if (!(adev->flags & AMD_IS_APU) && in amdgpu_device_enable_mgpu_fan_boost()
3289 !gpu_ins->mgpu_fan_enabled) { in amdgpu_device_enable_mgpu_fan_boost()
3294 gpu_ins->mgpu_fan_enabled = 1; in amdgpu_device_enable_mgpu_fan_boost()
3305 * amdgpu_device_ip_late_init - run late init for hardware IPs
3321 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_late_init()
3322 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_ip_late_init()
3324 if (adev->ip_blocks[i].version->funcs->late_init) { in amdgpu_device_ip_late_init()
3325 r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); in amdgpu_device_ip_late_init()
3328 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_late_init()
3332 adev->ip_blocks[i].status.late_initialized = true; in amdgpu_device_ip_late_init()
3355 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) || in amdgpu_device_ip_late_init()
3356 adev->asic_type == CHIP_ALDEBARAN)) in amdgpu_device_ip_late_init()
3359 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_late_init()
3363 * Reset device p-state to low as this was booted with high. in amdgpu_device_ip_late_init()
3375 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_ip_late_init()
3378 if (gpu_instance->adev->flags & AMD_IS_APU) in amdgpu_device_ip_late_init()
3381 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, in amdgpu_device_ip_late_init()
3400 if (!ip_block->version->funcs->hw_fini) { in amdgpu_ip_block_hw_fini()
3402 ip_block->version->funcs->name); in amdgpu_ip_block_hw_fini()
3404 r = ip_block->version->funcs->hw_fini(ip_block); in amdgpu_ip_block_hw_fini()
3408 ip_block->version->funcs->name, r); in amdgpu_ip_block_hw_fini()
3412 ip_block->status.hw = false; in amdgpu_ip_block_hw_fini()
3416 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3429 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_smu_fini_early()
3430 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_smu_fini_early()
3432 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_smu_fini_early()
3433 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); in amdgpu_device_smu_fini_early()
3443 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_fini_early()
3444 if (!adev->ip_blocks[i].version->funcs->early_fini) in amdgpu_device_ip_fini_early()
3447 r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini_early()
3450 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_fini_early()
3462 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini_early()
3463 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_ip_fini_early()
3466 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini_early()
3478 * amdgpu_device_ip_fini - run fini for hardware IPs
3494 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) in amdgpu_device_ip_fini()
3497 if (adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_ip_fini()
3502 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini()
3503 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_fini()
3506 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { in amdgpu_device_ip_fini()
3508 amdgpu_free_static_csa(&adev->virt.csa_obj); in amdgpu_device_ip_fini()
3515 if (adev->ip_blocks[i].version->funcs->sw_fini) { in amdgpu_device_ip_fini()
3516 r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini()
3520 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_fini()
3523 adev->ip_blocks[i].status.sw = false; in amdgpu_device_ip_fini()
3524 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_fini()
3527 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini()
3528 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_ip_fini()
3530 if (adev->ip_blocks[i].version->funcs->late_fini) in amdgpu_device_ip_fini()
3531 adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini()
3532 adev->ip_blocks[i].status.late_initialized = false; in amdgpu_device_ip_fini()
3541 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3559 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); in amdgpu_device_delay_enable_gfx_off()
3561 WARN_ON_ONCE(adev->gfx.gfx_off_state); in amdgpu_device_delay_enable_gfx_off()
3562 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); in amdgpu_device_delay_enable_gfx_off()
3565 adev->gfx.gfx_off_state = true; in amdgpu_device_delay_enable_gfx_off()
3569 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3592 dev_warn(adev->dev, "Failed to disallow df cstate"); in amdgpu_device_ip_suspend_phase1()
3594 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_suspend_phase1()
3595 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_suspend_phase1()
3599 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) in amdgpu_device_ip_suspend_phase1()
3603 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); in amdgpu_device_ip_suspend_phase1()
3612 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3626 if (adev->in_s0ix) in amdgpu_device_ip_suspend_phase2()
3629 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_suspend_phase2()
3630 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_suspend_phase2()
3633 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) in amdgpu_device_ip_suspend_phase2()
3637 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { in amdgpu_device_ip_suspend_phase2()
3638 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase2()
3644 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_suspend_phase2()
3650 if (adev->in_s0ix && in amdgpu_device_ip_suspend_phase2()
3651 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX && in amdgpu_device_ip_suspend_phase2()
3653 cancel_delayed_work_sync(&adev->gfx.idle_work); in amdgpu_device_ip_suspend_phase2()
3654 /* skip suspend of gfx/mes and psp for S0ix in amdgpu_device_ip_suspend_phase2()
3655 * gfx is in gfxoff state, so on resume it will exit gfxoff just in amdgpu_device_ip_suspend_phase2()
3659 if (adev->in_s0ix && in amdgpu_device_ip_suspend_phase2()
3660 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || in amdgpu_device_ip_suspend_phase2()
3661 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_ip_suspend_phase2()
3662 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) in amdgpu_device_ip_suspend_phase2()
3665 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ in amdgpu_device_ip_suspend_phase2()
3666 if (adev->in_s0ix && in amdgpu_device_ip_suspend_phase2()
3669 (adev->ip_blocks[i].version->type == in amdgpu_device_ip_suspend_phase2()
3673 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot. in amdgpu_device_ip_suspend_phase2()
3674 * These are in TMR, hence are expected to be reused by PSP-TOS to reload in amdgpu_device_ip_suspend_phase2()
3676 * from here based on PMFW -> PSP message during re-init sequence. in amdgpu_device_ip_suspend_phase2()
3681 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs && in amdgpu_device_ip_suspend_phase2()
3682 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_ip_suspend_phase2()
3686 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); in amdgpu_device_ip_suspend_phase2()
3687 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase2()
3691 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_ip_suspend_phase2()
3692 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); in amdgpu_device_ip_suspend_phase2()
3695 adev->mp1_state, r); in amdgpu_device_ip_suspend_phase2()
3706 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3749 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_reinit_early_sriov()
3753 block = &adev->ip_blocks[i]; in amdgpu_device_ip_reinit_early_sriov()
3754 block->status.hw = false; in amdgpu_device_ip_reinit_early_sriov()
3758 if (block->version->type != ip_order[j] || in amdgpu_device_ip_reinit_early_sriov()
3759 !block->status.valid) in amdgpu_device_ip_reinit_early_sriov()
3762 r = block->version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_reinit_early_sriov()
3764 dev_err(adev->dev, "RE-INIT-early: %s failed\n", in amdgpu_device_ip_reinit_early_sriov()
3765 block->version->funcs->name); in amdgpu_device_ip_reinit_early_sriov()
3768 block->status.hw = true; in amdgpu_device_ip_reinit_early_sriov()
3798 if (block->status.valid && !block->status.hw) { in amdgpu_device_ip_reinit_late_sriov()
3799 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_ip_reinit_late_sriov()
3802 r = block->version->funcs->hw_init(block); in amdgpu_device_ip_reinit_late_sriov()
3806 dev_err(adev->dev, "RE-INIT-late: %s failed\n", in amdgpu_device_ip_reinit_late_sriov()
3807 block->version->funcs->name); in amdgpu_device_ip_reinit_late_sriov()
3810 block->status.hw = true; in amdgpu_device_ip_reinit_late_sriov()
3818 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3833 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase1()
3834 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase1()
3836 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_resume_phase1()
3837 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_ip_resume_phase1()
3838 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || in amdgpu_device_ip_resume_phase1()
3839 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { in amdgpu_device_ip_resume_phase1()
3841 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_ip_resume_phase1()
3851 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3867 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase2()
3868 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase2()
3870 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_resume_phase2()
3871 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_ip_resume_phase2()
3872 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || in amdgpu_device_ip_resume_phase2()
3873 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE || in amdgpu_device_ip_resume_phase2()
3874 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_ip_resume_phase2()
3876 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_ip_resume_phase2()
3885 * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
3901 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase3()
3902 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase3()
3904 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { in amdgpu_device_ip_resume_phase3()
3905 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_ip_resume_phase3()
3915 * amdgpu_device_ip_resume - run resume for hardware IPs
3940 if (adev->mman.buffer_funcs_ring->sched.ready) in amdgpu_device_ip_resume()
3954 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3958 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3963 if (adev->is_atom_fw) { in amdgpu_device_detect_sriov_bios()
3965 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; in amdgpu_device_detect_sriov_bios()
3968 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; in amdgpu_device_detect_sriov_bios()
3971 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) in amdgpu_device_detect_sriov_bios()
3977 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
4002 * Fallback to the non-DC driver here by default so as not to in amdgpu_device_asic_has_dc_support()
4018 * Fallback to the non-DC driver here by default so as not to in amdgpu_device_asic_has_dc_support()
4034 * amdgpu_device_has_dc_support - check if dc is supported
4042 if (adev->enable_virtual_display || in amdgpu_device_has_dc_support()
4043 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) in amdgpu_device_has_dc_support()
4046 return amdgpu_device_asic_has_dc_support(adev->asic_type); in amdgpu_device_has_dc_support()
4067 task_barrier_enter(&hive->tb); in amdgpu_device_xgmi_reset_func()
4068 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); in amdgpu_device_xgmi_reset_func()
4070 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
4073 task_barrier_exit(&hive->tb); in amdgpu_device_xgmi_reset_func()
4074 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); in amdgpu_device_xgmi_reset_func()
4076 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
4082 task_barrier_full(&hive->tb); in amdgpu_device_xgmi_reset_func()
4083 adev->asic_reset_res = amdgpu_asic_reset(adev); in amdgpu_device_xgmi_reset_func()
4087 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
4089 adev->asic_reset_res, adev_to_drm(adev)->unique); in amdgpu_device_xgmi_reset_func()
4104 * In SR-IOV or passthrough mode, timeout for compute in amdgpu_device_get_job_timeout_settings()
4107 adev->gfx_timeout = msecs_to_jiffies(10000); in amdgpu_device_get_job_timeout_settings()
4108 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
4110 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? in amdgpu_device_get_job_timeout_settings()
4113 adev->compute_timeout = msecs_to_jiffies(60000); in amdgpu_device_get_job_timeout_settings()
4127 dev_warn(adev->dev, "lockup timeout disabled"); in amdgpu_device_get_job_timeout_settings()
4135 adev->gfx_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4138 adev->compute_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4141 adev->sdma_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4144 adev->video_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4152 * it should apply to all non-compute jobs. in amdgpu_device_get_job_timeout_settings()
4155 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
4157 adev->compute_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
4165 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
4175 domain = iommu_get_domain_for_dev(adev->dev); in amdgpu_device_check_iommu_direct_map()
4176 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) in amdgpu_device_check_iommu_direct_map()
4177 adev->ram_is_direct_mapped = true; in amdgpu_device_check_iommu_direct_map()
4182 * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
4192 domain = iommu_get_domain_for_dev(adev->dev); in amdgpu_device_check_iommu_remap()
4193 if (domain && (domain->type == IOMMU_DOMAIN_DMA || in amdgpu_device_check_iommu_remap()
4194 domain->type == IOMMU_DOMAIN_DMA_FQ)) in amdgpu_device_check_iommu_remap()
4204 adev->gfx.mcbp = true; in amdgpu_device_set_mcbp()
4206 adev->gfx.mcbp = false; in amdgpu_device_set_mcbp()
4209 adev->gfx.mcbp = true; in amdgpu_device_set_mcbp()
4211 if (adev->gfx.mcbp) in amdgpu_device_set_mcbp()
4216 * amdgpu_device_init - initialize the driver
4229 struct pci_dev *pdev = adev->pdev; in amdgpu_device_init()
4235 adev->shutdown = false; in amdgpu_device_init()
4236 adev->flags = flags; in amdgpu_device_init()
4239 adev->asic_type = amdgpu_force_asic_type; in amdgpu_device_init()
4241 adev->asic_type = flags & AMD_ASIC_MASK; in amdgpu_device_init()
4243 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; in amdgpu_device_init()
4245 adev->usec_timeout *= 10; in amdgpu_device_init()
4246 adev->gmc.gart_size = 512 * 1024 * 1024; in amdgpu_device_init()
4247 adev->accel_working = false; in amdgpu_device_init()
4248 adev->num_rings = 0; in amdgpu_device_init()
4249 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub()); in amdgpu_device_init()
4250 adev->mman.buffer_funcs = NULL; in amdgpu_device_init()
4251 adev->mman.buffer_funcs_ring = NULL; in amdgpu_device_init()
4252 adev->vm_manager.vm_pte_funcs = NULL; in amdgpu_device_init()
4253 adev->vm_manager.vm_pte_num_scheds = 0; in amdgpu_device_init()
4254 adev->gmc.gmc_funcs = NULL; in amdgpu_device_init()
4255 adev->harvest_ip_mask = 0x0; in amdgpu_device_init()
4256 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); in amdgpu_device_init()
4257 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); in amdgpu_device_init()
4259 adev->smc_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4260 adev->smc_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4261 adev->pcie_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4262 adev->pcie_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4263 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext; in amdgpu_device_init()
4264 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext; in amdgpu_device_init()
4265 adev->pciep_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4266 adev->pciep_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4267 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; in amdgpu_device_init()
4268 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; in amdgpu_device_init()
4269 adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext; in amdgpu_device_init()
4270 adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext; in amdgpu_device_init()
4271 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4272 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4273 adev->didt_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4274 adev->didt_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4275 adev->gc_cac_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4276 adev->gc_cac_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4277 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; in amdgpu_device_init()
4278 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; in amdgpu_device_init()
4281 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, in amdgpu_device_init()
4282 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); in amdgpu_device_init()
4287 mutex_init(&adev->firmware.mutex); in amdgpu_device_init()
4288 mutex_init(&adev->pm.mutex); in amdgpu_device_init()
4289 mutex_init(&adev->gfx.gpu_clock_mutex); in amdgpu_device_init()
4290 mutex_init(&adev->srbm_mutex); in amdgpu_device_init()
4291 mutex_init(&adev->gfx.pipe_reserve_mutex); in amdgpu_device_init()
4292 mutex_init(&adev->gfx.gfx_off_mutex); in amdgpu_device_init()
4293 mutex_init(&adev->gfx.partition_mutex); in amdgpu_device_init()
4294 mutex_init(&adev->grbm_idx_mutex); in amdgpu_device_init()
4295 mutex_init(&adev->mn_lock); in amdgpu_device_init()
4296 mutex_init(&adev->virt.vf_errors.lock); in amdgpu_device_init()
4297 hash_init(adev->mn_hash); in amdgpu_device_init()
4298 mutex_init(&adev->psp.mutex); in amdgpu_device_init()
4299 mutex_init(&adev->notifier_lock); in amdgpu_device_init()
4300 mutex_init(&adev->pm.stable_pstate_ctx_lock); in amdgpu_device_init()
4301 mutex_init(&adev->benchmark_mutex); in amdgpu_device_init()
4302 mutex_init(&adev->gfx.reset_sem_mutex); in amdgpu_device_init()
4303 /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */ in amdgpu_device_init()
4304 mutex_init(&adev->enforce_isolation_mutex); in amdgpu_device_init()
4306 adev->isolation[i].spearhead = dma_fence_get_stub(); in amdgpu_device_init()
4307 amdgpu_sync_create(&adev->isolation[i].active); in amdgpu_device_init()
4308 amdgpu_sync_create(&adev->isolation[i].prev); in amdgpu_device_init()
4310 mutex_init(&adev->gfx.kfd_sch_mutex); in amdgpu_device_init()
4311 mutex_init(&adev->gfx.workload_profile_mutex); in amdgpu_device_init()
4312 mutex_init(&adev->vcn.workload_profile_mutex); in amdgpu_device_init()
4320 spin_lock_init(&adev->mmio_idx_lock); in amdgpu_device_init()
4321 spin_lock_init(&adev->smc_idx_lock); in amdgpu_device_init()
4322 spin_lock_init(&adev->pcie_idx_lock); in amdgpu_device_init()
4323 spin_lock_init(&adev->uvd_ctx_idx_lock); in amdgpu_device_init()
4324 spin_lock_init(&adev->didt_idx_lock); in amdgpu_device_init()
4325 spin_lock_init(&adev->gc_cac_idx_lock); in amdgpu_device_init()
4326 spin_lock_init(&adev->se_cac_idx_lock); in amdgpu_device_init()
4327 spin_lock_init(&adev->audio_endpt_idx_lock); in amdgpu_device_init()
4328 spin_lock_init(&adev->mm_stats.lock); in amdgpu_device_init()
4329 spin_lock_init(&adev->virt.rlcg_reg_lock); in amdgpu_device_init()
4330 spin_lock_init(&adev->wb.lock); in amdgpu_device_init()
4332 INIT_LIST_HEAD(&adev->reset_list); in amdgpu_device_init()
4334 INIT_LIST_HEAD(&adev->ras_list); in amdgpu_device_init()
4336 INIT_LIST_HEAD(&adev->pm.od_kobj_list); in amdgpu_device_init()
4338 INIT_DELAYED_WORK(&adev->delayed_init_work, in amdgpu_device_init()
4340 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, in amdgpu_device_init()
4346 * each GFX and compute ring. If there are any fences, it schedules in amdgpu_device_init()
4352 INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work, in amdgpu_device_init()
4354 adev->gfx.enforce_isolation[i].adev = adev; in amdgpu_device_init()
4355 adev->gfx.enforce_isolation[i].xcp_id = i; in amdgpu_device_init()
4358 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); in amdgpu_device_init()
4360 adev->gfx.gfx_off_req_count = 1; in amdgpu_device_init()
4361 adev->gfx.gfx_off_residency = 0; in amdgpu_device_init()
4362 adev->gfx.gfx_off_entrycount = 0; in amdgpu_device_init()
4363 adev->pm.ac_power = power_supply_is_system_supplied() > 0; in amdgpu_device_init()
4365 atomic_set(&adev->throttling_logging_enabled, 1); in amdgpu_device_init()
4368 * to avoid log flooding. "-1" is subtracted since the thermal in amdgpu_device_init()
4373 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); in amdgpu_device_init()
4375 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); in amdgpu_device_init()
4379 if (adev->asic_type >= CHIP_BONAIRE) { in amdgpu_device_init()
4380 adev->rmmio_base = pci_resource_start(adev->pdev, 5); in amdgpu_device_init()
4381 adev->rmmio_size = pci_resource_len(adev->pdev, 5); in amdgpu_device_init()
4383 adev->rmmio_base = pci_resource_start(adev->pdev, 2); in amdgpu_device_init()
4384 adev->rmmio_size = pci_resource_len(adev->pdev, 2); in amdgpu_device_init()
4388 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); in amdgpu_device_init()
4390 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); in amdgpu_device_init()
4391 if (!adev->rmmio) in amdgpu_device_init()
4392 return -ENOMEM; in amdgpu_device_init()
4394 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); in amdgpu_device_init()
4395 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size); in amdgpu_device_init()
4402 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); in amdgpu_device_init()
4403 if (!adev->reset_domain) in amdgpu_device_init()
4404 return -ENOMEM; in amdgpu_device_init()
4413 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); in amdgpu_device_init()
4431 * No need to remove conflicting FBs for non-display class devices. in amdgpu_device_init()
4434 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA || in amdgpu_device_init()
4435 (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) { in amdgpu_device_init()
4437 r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name); in amdgpu_device_init()
4450 adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; in amdgpu_device_init()
4454 if (adev->gmc.xgmi.supported) { in amdgpu_device_init()
4455 r = adev->gfxhub.funcs->get_xgmi_info(adev); in amdgpu_device_init()
4462 if (adev->virt.fw_reserve.p_pf2vf) in amdgpu_device_init()
4463 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) in amdgpu_device_init()
4464 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == in amdgpu_device_init()
4469 } else if ((adev->flags & AMD_IS_APU) && in amdgpu_device_init()
4472 adev->have_atomics_support = true; in amdgpu_device_init()
4474 adev->have_atomics_support = in amdgpu_device_init()
4475 !pci_enable_atomic_ops_to_root(adev->pdev, in amdgpu_device_init()
4480 if (!adev->have_atomics_support) in amdgpu_device_init()
4481 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); in amdgpu_device_init()
4495 if (adev->bios) in amdgpu_device_init()
4502 if (adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_init()
4503 dev_info(adev->dev, "Pending hive reset.\n"); in amdgpu_device_init()
4520 dev_err(adev->dev, "asic reset on init failed\n"); in amdgpu_device_init()
4527 if (!adev->bios) { in amdgpu_device_init()
4528 dev_err(adev->dev, "no vBIOS found\n"); in amdgpu_device_init()
4529 r = -EINVAL; in amdgpu_device_init()
4535 dev_err(adev->dev, "gpu post error!\n"); in amdgpu_device_init()
4540 if (adev->bios) { in amdgpu_device_init()
4541 if (adev->is_atom_fw) { in amdgpu_device_init()
4545 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); in amdgpu_device_init()
4553 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); in amdgpu_device_init()
4566 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); in amdgpu_device_init()
4576 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); in amdgpu_device_init()
4583 dev_info(adev->dev, in amdgpu_device_init()
4585 adev->gfx.config.max_shader_engines, in amdgpu_device_init()
4586 adev->gfx.config.max_sh_per_se, in amdgpu_device_init()
4587 adev->gfx.config.max_cu_per_sh, in amdgpu_device_init()
4588 adev->gfx.cu_info.number); in amdgpu_device_init()
4590 adev->accel_working = true; in amdgpu_device_init()
4600 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); in amdgpu_device_init()
4612 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { in amdgpu_device_init()
4615 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); in amdgpu_device_init()
4621 queue_delayed_work(system_wq, &adev->delayed_init_work, in amdgpu_device_init()
4627 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_init()
4637 drm_err(&adev->ddev, in amdgpu_device_init()
4646 adev->ucode_sysfs_en = false; in amdgpu_device_init()
4649 adev->ucode_sysfs_en = true; in amdgpu_device_init()
4653 dev_err(adev->dev, "Could not create amdgpu device attr\n"); in amdgpu_device_init()
4655 r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); in amdgpu_device_init()
4657 dev_err(adev->dev, in amdgpu_device_init()
4667 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); in amdgpu_device_init()
4670 if (amdgpu_device_cache_pci_state(adev->pdev)) in amdgpu_device_init()
4677 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) in amdgpu_device_init()
4678 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); in amdgpu_device_init()
4682 if (px || (!dev_is_removable(&adev->pdev->dev) && in amdgpu_device_init()
4684 vga_switcheroo_register_client(adev->pdev, in amdgpu_device_init()
4688 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); in amdgpu_device_init()
4690 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) in amdgpu_device_init()
4695 adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; in amdgpu_device_init()
4696 r = register_pm_notifier(&adev->pm_nb); in amdgpu_device_init()
4711 dev_err(adev->dev, "VF exclusive mode timeout\n"); in amdgpu_device_init()
4713 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; in amdgpu_device_init()
4714 adev->virt.ops = NULL; in amdgpu_device_init()
4715 r = -EAGAIN; in amdgpu_device_init()
4729 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); in amdgpu_device_unmap_mmio()
4731 /* Unmap all mapped bars - Doorbell, registers and VRAM */ in amdgpu_device_unmap_mmio()
4734 iounmap(adev->rmmio); in amdgpu_device_unmap_mmio()
4735 adev->rmmio = NULL; in amdgpu_device_unmap_mmio()
4736 if (adev->mman.aper_base_kaddr) in amdgpu_device_unmap_mmio()
4737 iounmap(adev->mman.aper_base_kaddr); in amdgpu_device_unmap_mmio()
4738 adev->mman.aper_base_kaddr = NULL; in amdgpu_device_unmap_mmio()
4741 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { in amdgpu_device_unmap_mmio()
4742 arch_phys_wc_del(adev->gmc.vram_mtrr); in amdgpu_device_unmap_mmio()
4743 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); in amdgpu_device_unmap_mmio()
4748 * amdgpu_device_fini_hw - tear down the driver
4757 dev_info(adev->dev, "amdgpu: finishing device.\n"); in amdgpu_device_fini_hw()
4758 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_fini_hw()
4760 if (adev->mman.initialized) in amdgpu_device_fini_hw()
4761 drain_workqueue(adev->mman.bdev.wq); in amdgpu_device_fini_hw()
4762 adev->shutdown = true; in amdgpu_device_fini_hw()
4764 unregister_pm_notifier(&adev->pm_nb); in amdgpu_device_fini_hw()
4776 if (adev->mode_info.mode_config_initialized) { in amdgpu_device_fini_hw()
4784 if (adev->pm.sysfs_initialized) in amdgpu_device_fini_hw()
4786 if (adev->ucode_sysfs_en) in amdgpu_device_fini_hw()
4803 if (adev->mman.initialized) in amdgpu_device_fini_hw()
4804 ttm_device_clear_dma_mappings(&adev->mman.bdev); in amdgpu_device_fini_hw()
4820 amdgpu_ucode_release(&adev->firmware.gpu_info_fw); in amdgpu_device_fini_sw()
4821 adev->accel_working = false; in amdgpu_device_fini_sw()
4822 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); in amdgpu_device_fini_sw()
4824 dma_fence_put(adev->isolation[i].spearhead); in amdgpu_device_fini_sw()
4825 amdgpu_sync_free(&adev->isolation[i].active); in amdgpu_device_fini_sw()
4826 amdgpu_sync_free(&adev->isolation[i].prev); in amdgpu_device_fini_sw()
4834 if (adev->bios) { in amdgpu_device_fini_sw()
4840 kfree(adev->fru_info); in amdgpu_device_fini_sw()
4841 adev->fru_info = NULL; in amdgpu_device_fini_sw()
4843 kfree(adev->xcp_mgr); in amdgpu_device_fini_sw()
4844 adev->xcp_mgr = NULL; in amdgpu_device_fini_sw()
4848 if (px || (!dev_is_removable(&adev->pdev->dev) && in amdgpu_device_fini_sw()
4850 vga_switcheroo_unregister_client(adev->pdev); in amdgpu_device_fini_sw()
4853 vga_switcheroo_fini_domain_pm_ops(adev->dev); in amdgpu_device_fini_sw()
4855 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) in amdgpu_device_fini_sw()
4856 vga_client_unregister(adev->pdev); in amdgpu_device_fini_sw()
4860 iounmap(adev->rmmio); in amdgpu_device_fini_sw()
4861 adev->rmmio = NULL; in amdgpu_device_fini_sw()
4867 if (adev->mman.discovery_bin) in amdgpu_device_fini_sw()
4870 amdgpu_reset_put_reset_domain(adev->reset_domain); in amdgpu_device_fini_sw()
4871 adev->reset_domain = NULL; in amdgpu_device_fini_sw()
4873 kfree(adev->pci_state); in amdgpu_device_fini_sw()
4878 * amdgpu_device_evict_resources - evict device resources
4891 if (!adev->in_s4 && (adev->flags & AMD_IS_APU)) in amdgpu_device_evict_resources()
4904 * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
4920 adev->in_s4 = true; in amdgpu_device_pm_notifier()
4923 adev->in_s4 = false; in amdgpu_device_pm_notifier()
4931 * amdgpu_device_prepare - prepare for device suspend
4944 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) in amdgpu_device_prepare()
4952 flush_delayed_work(&adev->gfx.gfx_off_delay_work); in amdgpu_device_prepare()
4954 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_prepare()
4955 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_prepare()
4957 if (!adev->ip_blocks[i].version->funcs->prepare_suspend) in amdgpu_device_prepare()
4959 r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); in amdgpu_device_prepare()
4968 * amdgpu_device_suspend - initiate device suspend
4971 * @notify_clients: notify in-kernel DRM clients
4982 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) in amdgpu_device_suspend()
4985 adev->in_suspend = true; in amdgpu_device_suspend()
5000 cancel_delayed_work_sync(&adev->delayed_init_work); in amdgpu_device_suspend()
5006 if (!adev->in_s0ix) in amdgpu_device_suspend()
5007 amdgpu_amdkfd_suspend(adev, adev->in_runpm); in amdgpu_device_suspend()
5030 * amdgpu_device_resume - initiate device resume
5033 * @notify_clients: notify in-kernel DRM clients
5050 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) in amdgpu_device_resume()
5053 if (adev->in_s0ix) in amdgpu_device_resume()
5060 dev_err(adev->dev, "amdgpu asic init failed\n"); in amdgpu_device_resume()
5066 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); in amdgpu_device_resume()
5070 if (!adev->in_s0ix) { in amdgpu_device_resume()
5071 r = amdgpu_amdkfd_resume(adev, adev->in_runpm); in amdgpu_device_resume()
5080 queue_delayed_work(system_wq, &adev->delayed_init_work, in amdgpu_device_resume()
5092 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_resume()
5099 if (adev->mode_info.num_crtc) { in amdgpu_device_resume()
5110 dev->dev->power.disable_depth++; in amdgpu_device_resume()
5112 if (!adev->dc_enabled) in amdgpu_device_resume()
5117 dev->dev->power.disable_depth--; in amdgpu_device_resume()
5120 adev->in_suspend = false; in amdgpu_device_resume()
5122 if (adev->enable_mes) in amdgpu_device_resume()
5132 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
5152 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_check_soft_reset()
5153 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_check_soft_reset()
5155 if (adev->ip_blocks[i].version->funcs->check_soft_reset) in amdgpu_device_ip_check_soft_reset()
5156 adev->ip_blocks[i].status.hang = in amdgpu_device_ip_check_soft_reset()
5157 adev->ip_blocks[i].version->funcs->check_soft_reset( in amdgpu_device_ip_check_soft_reset()
5158 &adev->ip_blocks[i]); in amdgpu_device_ip_check_soft_reset()
5159 if (adev->ip_blocks[i].status.hang) { in amdgpu_device_ip_check_soft_reset()
5160 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); in amdgpu_device_ip_check_soft_reset()
5168 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
5182 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_pre_soft_reset()
5183 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_pre_soft_reset()
5185 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_pre_soft_reset()
5186 adev->ip_blocks[i].version->funcs->pre_soft_reset) { in amdgpu_device_ip_pre_soft_reset()
5187 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]); in amdgpu_device_ip_pre_soft_reset()
5197 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
5212 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_need_full_reset()
5213 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_need_full_reset()
5215 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || in amdgpu_device_ip_need_full_reset()
5216 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || in amdgpu_device_ip_need_full_reset()
5217 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || in amdgpu_device_ip_need_full_reset()
5218 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || in amdgpu_device_ip_need_full_reset()
5219 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { in amdgpu_device_ip_need_full_reset()
5220 if (adev->ip_blocks[i].status.hang) { in amdgpu_device_ip_need_full_reset()
5221 dev_info(adev->dev, "Some block need full reset!\n"); in amdgpu_device_ip_need_full_reset()
5230 * amdgpu_device_ip_soft_reset - do a soft reset
5244 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_soft_reset()
5245 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_soft_reset()
5247 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_soft_reset()
5248 adev->ip_blocks[i].version->funcs->soft_reset) { in amdgpu_device_ip_soft_reset()
5249 r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]); in amdgpu_device_ip_soft_reset()
5259 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
5273 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_post_soft_reset()
5274 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_post_soft_reset()
5276 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_post_soft_reset()
5277 adev->ip_blocks[i].version->funcs->post_soft_reset) in amdgpu_device_ip_post_soft_reset()
5278 r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]); in amdgpu_device_ip_post_soft_reset()
5287 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5301 if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) { in amdgpu_device_reset_sriov()
5305 clear_bit(AMDGPU_HOST_FLR, &reset_context->flags); in amdgpu_device_reset_sriov()
5337 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reset_sriov()
5348 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) in amdgpu_device_reset_sriov()
5352 * bare-metal does. in amdgpu_device_reset_sriov()
5371 * amdgpu_device_has_job_running - check if there is any unfinished job
5385 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_has_job_running()
5397 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5417 if (amdgpu_gpu_recovery == -1) { in amdgpu_device_should_recover_gpu()
5418 switch (adev->asic_type) { in amdgpu_device_should_recover_gpu()
5443 dev_info(adev->dev, "GPU recovery disabled.\n"); in amdgpu_device_should_recover_gpu()
5452 if (adev->bios) in amdgpu_device_mode1_reset()
5455 dev_info(adev->dev, "GPU mode1 reset\n"); in amdgpu_device_mode1_reset()
5458 * values are used in other cases like restore after mode-2 reset. in amdgpu_device_mode1_reset()
5460 amdgpu_device_cache_pci_state(adev->pdev); in amdgpu_device_mode1_reset()
5463 pci_clear_master(adev->pdev); in amdgpu_device_mode1_reset()
5466 dev_info(adev->dev, "GPU smu mode1 reset\n"); in amdgpu_device_mode1_reset()
5469 dev_info(adev->dev, "GPU psp mode1 reset\n"); in amdgpu_device_mode1_reset()
5476 amdgpu_device_load_pci_state(adev->pdev); in amdgpu_device_mode1_reset()
5482 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_device_mode1_reset()
5483 u32 memsize = adev->nbio.funcs->get_memsize(adev); in amdgpu_device_mode1_reset()
5490 if (i >= adev->usec_timeout) { in amdgpu_device_mode1_reset()
5491 ret = -ETIMEDOUT; in amdgpu_device_mode1_reset()
5495 if (adev->bios) in amdgpu_device_mode1_reset()
5501 dev_err(adev->dev, "GPU mode1 reset failed\n"); in amdgpu_device_mode1_reset()
5510 struct amdgpu_device *tmp_adev = reset_context->reset_req_dev; in amdgpu_device_pre_asic_reset()
5512 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_device_pre_asic_reset()
5514 if (reset_context->reset_req_dev == adev) in amdgpu_device_pre_asic_reset()
5515 job = reset_context->job; in amdgpu_device_pre_asic_reset()
5524 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_pre_asic_reset()
5540 if (job && job->vm) in amdgpu_device_pre_asic_reset()
5541 drm_sched_increase_karma(&job->base); in amdgpu_device_pre_asic_reset()
5545 if (r == -EOPNOTSUPP) in amdgpu_device_pre_asic_reset()
5562 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); in amdgpu_device_pre_asic_reset()
5567 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) { in amdgpu_device_pre_asic_reset()
5568 dev_info(tmp_adev->dev, "Dumping IP State\n"); in amdgpu_device_pre_asic_reset()
5570 for (i = 0; i < tmp_adev->num_ip_blocks; i++) in amdgpu_device_pre_asic_reset()
5571 if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) in amdgpu_device_pre_asic_reset()
5572 tmp_adev->ip_blocks[i].version->funcs in amdgpu_device_pre_asic_reset()
5573 ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]); in amdgpu_device_pre_asic_reset()
5574 dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); in amdgpu_device_pre_asic_reset()
5580 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_device_pre_asic_reset()
5583 &reset_context->flags); in amdgpu_device_pre_asic_reset()
5596 device_list_handle = reset_context->reset_device_list; in amdgpu_device_reinit_after_reset()
5599 return -EINVAL; in amdgpu_device_reinit_after_reset()
5601 full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_device_reinit_after_reset()
5607 if (reset_context->method == AMD_RESET_METHOD_ON_INIT) in amdgpu_device_reinit_after_reset()
5620 dev_warn(tmp_adev->dev, "asic atom init failed!"); in amdgpu_device_reinit_after_reset()
5622 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); in amdgpu_device_reinit_after_reset()
5630 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) in amdgpu_device_reinit_after_reset()
5631 amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); in amdgpu_device_reinit_after_reset()
5643 tmp_adev->xcp_mgr); in amdgpu_device_reinit_after_reset()
5651 if (tmp_adev->mman.buffer_funcs_ring->sched.ready) in amdgpu_device_reinit_after_reset()
5667 if (!reset_context->hive && in amdgpu_device_reinit_after_reset()
5668 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reinit_after_reset()
5691 r = -EINVAL; in amdgpu_device_reinit_after_reset()
5696 if (reset_context->hive && in amdgpu_device_reinit_after_reset()
5697 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reinit_after_reset()
5699 reset_context->hive, tmp_adev); in amdgpu_device_reinit_after_reset()
5711 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); in amdgpu_device_reinit_after_reset()
5712 r = -EAGAIN; in amdgpu_device_reinit_after_reset()
5718 tmp_adev->asic_reset_res = r; in amdgpu_device_reinit_after_reset()
5736 reset_context->reset_device_list = device_list_handle; in amdgpu_do_asic_reset()
5739 if (r == -EOPNOTSUPP) in amdgpu_do_asic_reset()
5746 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
5747 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
5756 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
5758 &tmp_adev->xgmi_reset_work)) in amdgpu_do_asic_reset()
5759 r = -EALREADY; in amdgpu_do_asic_reset()
5764 dev_err(tmp_adev->dev, in amdgpu_do_asic_reset()
5766 r, adev_to_drm(tmp_adev)->unique); in amdgpu_do_asic_reset()
5775 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
5776 flush_work(&tmp_adev->xgmi_reset_work); in amdgpu_do_asic_reset()
5777 r = tmp_adev->asic_reset_res; in amdgpu_do_asic_reset()
5795 if (r == -EAGAIN) in amdgpu_do_asic_reset()
5796 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
5798 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
5809 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; in amdgpu_device_set_mp1_state()
5812 adev->mp1_state = PP_MP1_STATE_RESET; in amdgpu_device_set_mp1_state()
5815 adev->mp1_state = PP_MP1_STATE_NONE; in amdgpu_device_set_mp1_state()
5823 adev->mp1_state = PP_MP1_STATE_NONE; in amdgpu_device_unset_mp1_state()
5830 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), in amdgpu_device_resume_display_audio()
5831 adev->pdev->bus->number, 1); in amdgpu_device_resume_display_audio()
5833 pm_runtime_enable(&(p->dev)); in amdgpu_device_resume_display_audio()
5834 pm_runtime_resume(&(p->dev)); in amdgpu_device_resume_display_audio()
5853 return -EINVAL; in amdgpu_device_suspend_display_audio()
5855 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), in amdgpu_device_suspend_display_audio()
5856 adev->pdev->bus->number, 1); in amdgpu_device_suspend_display_audio()
5858 return -ENODEV; in amdgpu_device_suspend_display_audio()
5860 expires = pm_runtime_autosuspend_expiration(&(p->dev)); in amdgpu_device_suspend_display_audio()
5870 while (!pm_runtime_status_suspended(&(p->dev))) { in amdgpu_device_suspend_display_audio()
5871 if (!pm_runtime_suspend(&(p->dev))) in amdgpu_device_suspend_display_audio()
5875 dev_warn(adev->dev, "failed to suspend display audio\n"); in amdgpu_device_suspend_display_audio()
5878 return -ETIMEDOUT; in amdgpu_device_suspend_display_audio()
5882 pm_runtime_disable(&(p->dev)); in amdgpu_device_suspend_display_audio()
5894 cancel_work(&adev->reset_work); in amdgpu_device_stop_pending_resets()
5897 if (adev->kfd.dev) in amdgpu_device_stop_pending_resets()
5898 cancel_work(&adev->kfd.reset_work); in amdgpu_device_stop_pending_resets()
5901 cancel_work(&adev->virt.flr_work); in amdgpu_device_stop_pending_resets()
5903 if (con && adev->ras_enabled) in amdgpu_device_stop_pending_resets()
5904 cancel_work(&con->recovery_work); in amdgpu_device_stop_pending_resets()
5915 pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status); in amdgpu_device_health_check()
5917 dev_err(tmp_adev->dev, "device lost from bus!"); in amdgpu_device_health_check()
5918 ret = -ENODEV; in amdgpu_device_health_check()
5926 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5933 * Attempt to do soft-reset or full-reset and reinitialize Asic
5956 reset_context->src != AMDGPU_RESET_SRC_RAS) { in amdgpu_device_gpu_recover()
5957 dev_dbg(adev->dev, in amdgpu_device_gpu_recover()
5959 reset_context->src); in amdgpu_device_gpu_recover()
5972 amdgpu_ras_get_context(adev)->reboot) { in amdgpu_device_gpu_recover()
5979 dev_info(adev->dev, "GPU %s begin!\n", in amdgpu_device_gpu_recover()
5985 mutex_lock(&hive->hive_lock); in amdgpu_device_gpu_recover()
5987 reset_context->job = job; in amdgpu_device_gpu_recover()
5988 reset_context->hive = hive; in amdgpu_device_gpu_recover()
5995 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { in amdgpu_device_gpu_recover()
5996 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_device_gpu_recover()
5997 list_add_tail(&tmp_adev->reset_list, &device_list); in amdgpu_device_gpu_recover()
5998 if (adev->shutdown) in amdgpu_device_gpu_recover()
5999 tmp_adev->shutdown = true; in amdgpu_device_gpu_recover()
6001 if (!list_is_first(&adev->reset_list, &device_list)) in amdgpu_device_gpu_recover()
6002 list_rotate_to_front(&adev->reset_list, &device_list); in amdgpu_device_gpu_recover()
6005 list_add_tail(&adev->reset_list, &device_list); in amdgpu_device_gpu_recover()
6018 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); in amdgpu_device_gpu_recover()
6040 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); in amdgpu_device_gpu_recover()
6058 struct amdgpu_ring *ring = tmp_adev->rings[i]; in amdgpu_device_gpu_recover()
6063 drm_sched_stop(&ring->sched, job ? &job->base : NULL); in amdgpu_device_gpu_recover()
6066 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); in amdgpu_device_gpu_recover()
6068 atomic_inc(&tmp_adev->gpu_reset_counter); in amdgpu_device_gpu_recover()
6078 * job->base holds a reference to parent fence in amdgpu_device_gpu_recover()
6080 if (job && dma_fence_is_signaled(&job->hw_fence)) { in amdgpu_device_gpu_recover()
6082 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); in amdgpu_device_gpu_recover()
6091 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", in amdgpu_device_gpu_recover()
6092 r, adev_to_drm(tmp_adev)->unique); in amdgpu_device_gpu_recover()
6093 tmp_adev->asic_reset_res = r; in amdgpu_device_gpu_recover()
6101 dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n"); in amdgpu_device_gpu_recover()
6103 set_bit(AMDGPU_HOST_FLR, &reset_context->flags); in amdgpu_device_gpu_recover()
6107 if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) { in amdgpu_device_gpu_recover()
6112 adev->asic_reset_res = r; in amdgpu_device_gpu_recover()
6115 if (r && r == -EAGAIN) in amdgpu_device_gpu_recover()
6135 struct amdgpu_ring *ring = tmp_adev->rings[i]; in amdgpu_device_gpu_recover()
6140 drm_sched_start(&ring->sched, 0); in amdgpu_device_gpu_recover()
6146 if (tmp_adev->asic_reset_res) in amdgpu_device_gpu_recover()
6147 r = tmp_adev->asic_reset_res; in amdgpu_device_gpu_recover()
6149 tmp_adev->asic_reset_res = 0; in amdgpu_device_gpu_recover()
6156 if (reset_context->src != AMDGPU_RESET_SRC_RAS || in amdgpu_device_gpu_recover()
6158 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", in amdgpu_device_gpu_recover()
6159 atomic_read(&tmp_adev->gpu_reset_counter)); in amdgpu_device_gpu_recover()
6162 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); in amdgpu_device_gpu_recover()
6177 if (!adev->kfd.init_complete) in amdgpu_device_gpu_recover()
6190 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); in amdgpu_device_gpu_recover()
6194 mutex_unlock(&hive->hive_lock); in amdgpu_device_gpu_recover()
6199 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); in amdgpu_device_gpu_recover()
6201 atomic_set(&adev->reset_domain->reset_res, r); in amdgpu_device_gpu_recover()
6210 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
6224 struct pci_dev *parent = adev->pdev; in amdgpu_device_partner_bandwidth()
6235 if (parent->vendor == PCI_VENDOR_ID_ATI) in amdgpu_device_partner_bandwidth()
6243 pcie_bandwidth_available(adev->pdev, NULL, speed, width); in amdgpu_device_partner_bandwidth()
6248 * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
6261 struct pci_dev *parent = adev->pdev; in amdgpu_device_gpu_bandwidth()
6267 if (parent && parent->vendor == PCI_VENDOR_ID_ATI) { in amdgpu_device_gpu_bandwidth()
6272 if (parent->vendor == PCI_VENDOR_ID_ATI) { in amdgpu_device_gpu_bandwidth()
6280 *speed = pcie_get_speed_cap(adev->pdev); in amdgpu_device_gpu_bandwidth()
6281 *width = pcie_get_width_cap(adev->pdev); in amdgpu_device_gpu_bandwidth()
6286 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
6300 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; in amdgpu_device_get_pcie_info()
6303 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; in amdgpu_device_get_pcie_info()
6306 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) { in amdgpu_device_get_pcie_info()
6307 if (adev->pm.pcie_gen_mask == 0) in amdgpu_device_get_pcie_info()
6308 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; in amdgpu_device_get_pcie_info()
6309 if (adev->pm.pcie_mlw_mask == 0) in amdgpu_device_get_pcie_info()
6310 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
6314 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) in amdgpu_device_get_pcie_info()
6321 if (adev->pm.pcie_gen_mask == 0) { in amdgpu_device_get_pcie_info()
6324 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6329 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6335 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6340 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6344 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6347 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; in amdgpu_device_get_pcie_info()
6351 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6355 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6361 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6366 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6370 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6373 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; in amdgpu_device_get_pcie_info()
6377 if (adev->pm.pcie_mlw_mask == 0) { in amdgpu_device_get_pcie_info()
6380 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
6384 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 | in amdgpu_device_get_pcie_info()
6393 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 | in amdgpu_device_get_pcie_info()
6401 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 | in amdgpu_device_get_pcie_info()
6408 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | in amdgpu_device_get_pcie_info()
6414 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | in amdgpu_device_get_pcie_info()
6419 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | in amdgpu_device_get_pcie_info()
6423 adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1; in amdgpu_device_get_pcie_info()
6431 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
6435 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | in amdgpu_device_get_pcie_info()
6444 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | in amdgpu_device_get_pcie_info()
6452 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | in amdgpu_device_get_pcie_info()
6459 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | in amdgpu_device_get_pcie_info()
6465 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | in amdgpu_device_get_pcie_info()
6470 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | in amdgpu_device_get_pcie_info()
6474 adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; in amdgpu_device_get_pcie_info()
6484 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
6498 !adev->gmc.xgmi.connected_to_cpu && in amdgpu_device_is_peer_accessible()
6499 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); in amdgpu_device_is_peer_accessible()
6501 dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n", in amdgpu_device_is_peer_accessible()
6502 pci_name(peer_adev->pdev)); in amdgpu_device_is_peer_accessible()
6504 bool is_large_bar = adev->gmc.visible_vram_size && in amdgpu_device_is_peer_accessible()
6505 adev->gmc.real_vram_size == adev->gmc.visible_vram_size; in amdgpu_device_is_peer_accessible()
6509 uint64_t address_mask = peer_adev->dev->dma_mask ? in amdgpu_device_is_peer_accessible()
6510 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); in amdgpu_device_is_peer_accessible()
6512 adev->gmc.aper_base + adev->gmc.aper_size - 1; in amdgpu_device_is_peer_accessible()
6514 p2p_addressable = !(adev->gmc.aper_base & address_mask || in amdgpu_device_is_peer_accessible()
6529 return -ENOTSUPP; in amdgpu_device_baco_enter()
6531 if (ras && adev->ras_enabled && in amdgpu_device_baco_enter()
6532 adev->nbio.funcs->enable_doorbell_interrupt) in amdgpu_device_baco_enter()
6533 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); in amdgpu_device_baco_enter()
6545 return -ENOTSUPP; in amdgpu_device_baco_exit()
6551 if (ras && adev->ras_enabled && in amdgpu_device_baco_exit()
6552 adev->nbio.funcs->enable_doorbell_interrupt) in amdgpu_device_baco_exit()
6553 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); in amdgpu_device_baco_exit()
6555 if (amdgpu_passthrough(adev) && adev->nbio.funcs && in amdgpu_device_baco_exit()
6556 adev->nbio.funcs->clear_doorbell_interrupt) in amdgpu_device_baco_exit()
6557 adev->nbio.funcs->clear_doorbell_interrupt(adev); in amdgpu_device_baco_exit()
6563 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6579 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_pci_error_detected()
6584 adev->pci_channel_state = state; in amdgpu_pci_error_detected()
6592 * Locking adev->reset_domain->sem will prevent any external access in amdgpu_pci_error_detected()
6595 amdgpu_device_lock_reset_domain(adev->reset_domain); in amdgpu_pci_error_detected()
6603 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_pci_error_detected()
6608 drm_sched_stop(&ring->sched, NULL); in amdgpu_pci_error_detected()
6610 atomic_inc(&adev->gpu_reset_counter); in amdgpu_pci_error_detected()
6621 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6629 /* TODO - dump whatever for debugging purposes */ in amdgpu_pci_mmio_enabled()
6640 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6667 list_add_tail(&adev->reset_list, &device_list); in amdgpu_pci_slot_reset()
6676 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_pci_slot_reset()
6684 r = -ETIME; in amdgpu_pci_slot_reset()
6693 adev->no_hw_access = true; in amdgpu_pci_slot_reset()
6695 adev->no_hw_access = false; in amdgpu_pci_slot_reset()
6703 if (amdgpu_device_cache_pci_state(adev->pdev)) in amdgpu_pci_slot_reset()
6704 pci_restore_state(adev->pdev); in amdgpu_pci_slot_reset()
6710 amdgpu_device_unlock_reset_domain(adev->reset_domain); in amdgpu_pci_slot_reset()
6717 * amdgpu_pci_resume() - resume normal ops after PCI reset
6733 if (adev->pci_channel_state != pci_channel_io_frozen) in amdgpu_pci_resume()
6737 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_pci_resume()
6742 drm_sched_start(&ring->sched, 0); in amdgpu_pci_resume()
6746 amdgpu_device_unlock_reset_domain(adev->reset_domain); in amdgpu_pci_resume()
6760 kfree(adev->pci_state); in amdgpu_device_cache_pci_state()
6762 adev->pci_state = pci_store_saved_state(pdev); in amdgpu_device_cache_pci_state()
6764 if (!adev->pci_state) { in amdgpu_device_cache_pci_state()
6782 if (!adev->pci_state) in amdgpu_device_load_pci_state()
6785 r = pci_load_saved_state(pdev, adev->pci_state); in amdgpu_device_load_pci_state()
6801 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) in amdgpu_device_flush_hdp()
6804 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_flush_hdp()
6807 if (ring && ring->funcs->emit_hdp_flush) in amdgpu_device_flush_hdp()
6817 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) in amdgpu_device_invalidate_hdp()
6820 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_invalidate_hdp()
6828 return atomic_read(&adev->reset_domain->in_gpu_reset); in amdgpu_in_reset()
6832 * amdgpu_device_halt() - bring hardware to some kind of halt state
6846 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6853 struct pci_dev *pdev = adev->pdev; in amdgpu_device_halt()
6863 adev->no_hw_access = true; in amdgpu_device_halt()
6877 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); in amdgpu_device_pcie_port_rreg()
6878 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); in amdgpu_device_pcie_port_rreg()
6880 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_rreg()
6884 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_rreg()
6893 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); in amdgpu_device_pcie_port_wreg()
6894 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); in amdgpu_device_pcie_port_wreg()
6896 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_wreg()
6901 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_wreg()
6905 * amdgpu_device_get_gang - return a reference to the current gang
6915 fence = dma_fence_get_rcu_safe(&adev->gang_submit); in amdgpu_device_get_gang()
6921 * amdgpu_device_switch_gang - switch to a new gang
6946 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, in amdgpu_device_switch_gang()
6959 * amdgpu_device_enforce_isolation - enforce HW isolation
6964 * Makes sure that only one client at a time can use the GFX block.
6972 struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id]; in amdgpu_device_enforce_isolation()
6973 struct drm_sched_fence *f = job->base.s_fence; in amdgpu_device_enforce_isolation()
6979 * For now enforce isolation only for the GFX block since we only need in amdgpu_device_enforce_isolation()
6982 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX && in amdgpu_device_enforce_isolation()
6983 ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) in amdgpu_device_enforce_isolation()
6991 owner = job->enforce_isolation ? f->owner : (void *)~0l; in amdgpu_device_enforce_isolation()
6993 mutex_lock(&adev->enforce_isolation_mutex); in amdgpu_device_enforce_isolation()
7000 if (&f->scheduled != isolation->spearhead && in amdgpu_device_enforce_isolation()
7001 !dma_fence_is_signaled(isolation->spearhead)) { in amdgpu_device_enforce_isolation()
7002 dep = isolation->spearhead; in amdgpu_device_enforce_isolation()
7006 if (isolation->owner != owner) { in amdgpu_device_enforce_isolation()
7013 if (!job->gang_submit) { in amdgpu_device_enforce_isolation()
7020 dma_fence_put(isolation->spearhead); in amdgpu_device_enforce_isolation()
7021 isolation->spearhead = dma_fence_get(&f->scheduled); in amdgpu_device_enforce_isolation()
7022 amdgpu_sync_move(&isolation->active, &isolation->prev); in amdgpu_device_enforce_isolation()
7023 trace_amdgpu_isolation(isolation->owner, owner); in amdgpu_device_enforce_isolation()
7024 isolation->owner = owner; in amdgpu_device_enforce_isolation()
7033 dep = amdgpu_sync_peek_fence(&isolation->prev, ring); in amdgpu_device_enforce_isolation()
7034 r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT); in amdgpu_device_enforce_isolation()
7041 mutex_unlock(&adev->enforce_isolation_mutex); in amdgpu_device_enforce_isolation()
7047 switch (adev->asic_type) { in amdgpu_device_has_display_hardware()
7080 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) in amdgpu_device_has_display_hardware()
7093 uint32_t loop = adev->usec_timeout; in amdgpu_device_wait_on_rreg()
7097 loop = adev->usec_timeout; in amdgpu_device_wait_on_rreg()
7102 loop--; in amdgpu_device_wait_on_rreg()
7107 ret = -ETIMEDOUT; in amdgpu_device_wait_on_rreg()
7118 if (!ring || !ring->adev) in amdgpu_get_soft_full_reset_mask()
7121 if (amdgpu_device_should_recover_gpu(ring->adev)) in amdgpu_get_soft_full_reset_mask()
7124 if (unlikely(!ring->adev->debug_disable_soft_recovery) && in amdgpu_get_soft_full_reset_mask()
7125 !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery) in amdgpu_get_soft_full_reset_mask()