Lines Matching full:sdma
34 #include "sdma/sdma_4_4_2_offset.h"
35 #include "sdma/sdma_4_4_2_sh_mask.h"
105 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_inst_init_golden_registers()
134 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_init_microcode()
289 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_4_2_ring_insert_nop() local
293 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_4_2_ring_insert_nop()
429 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; in sdma_v4_4_2_inst_gfx_stop() local
435 sdma[i] = &adev->sdma.instance[i].ring; in sdma_v4_4_2_inst_gfx_stop()
437 if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) { in sdma_v4_4_2_inst_gfx_stop()
449 if (sdma[i]->use_doorbell) { in sdma_v4_4_2_inst_gfx_stop()
488 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; in sdma_v4_4_2_inst_page_stop() local
494 sdma[i] = &adev->sdma.instance[i].page; in sdma_v4_4_2_inst_page_stop()
496 if ((adev->mman.buffer_funcs_ring == sdma[i]) && in sdma_v4_4_2_inst_page_stop()
586 if (adev->sdma.has_page_queue) in sdma_v4_4_2_inst_enable()
589 /* SDMA FW needs to respond to FREEZE requests during reset. in sdma_v4_4_2_inst_enable()
634 struct amdgpu_ring *ring = &adev->sdma.instance[i].ring; in sdma_v4_4_2_gfx_resume()
722 struct amdgpu_ring *ring = &adev->sdma.instance[i].page; in sdma_v4_4_2_page_resume()
823 * sdma_v4_4_2_inst_load_microcode - load the sDMA ME ucode
843 if (!adev->sdma.instance[i].fw) in sdma_v4_4_2_inst_load_microcode()
846 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v4_4_2_inst_load_microcode()
851 (adev->sdma.instance[i].fw->data + in sdma_v4_4_2_inst_load_microcode()
861 adev->sdma.instance[i].fw_version); in sdma_v4_4_2_inst_load_microcode()
887 /* bypass sdma microcode loading on Gopher */ in sdma_v4_4_2_inst_start()
889 adev->sdma.instance[0].fw) { in sdma_v4_4_2_inst_start()
897 /* enable sdma ring preemption */ in sdma_v4_4_2_inst_start()
908 if (adev->sdma.has_page_queue) in sdma_v4_4_2_inst_start()
939 ring = &adev->sdma.instance[i].ring; in sdma_v4_4_2_inst_start()
945 if (adev->sdma.has_page_queue) { in sdma_v4_4_2_inst_start()
946 struct amdgpu_ring *page = &adev->sdma.instance[i].page; in sdma_v4_4_2_inst_start()
1093 * Update PTEs by copying them from the GART using sDMA.
1121 * Update PTEs by writing them manually using sDMA.
1142 * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA
1151 * Update the page tables using sDMA.
1179 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_4_2_ring_pad_ib() local
1185 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_4_2_ring_pad_ib()
1216 * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
1223 * using sDMA.
1267 adev->sdma.has_page_queue = true; in sdma_v4_4_2_early_init()
1305 /* SDMA trap event */ in sdma_v4_4_2_sw_init()
1306 for (i = 0; i < adev->sdma.num_inst_per_aid; i++) { in sdma_v4_4_2_sw_init()
1309 &adev->sdma.trap_irq); in sdma_v4_4_2_sw_init()
1314 /* SDMA SRAM ECC event */ in sdma_v4_4_2_sw_init()
1315 for (i = 0; i < adev->sdma.num_inst_per_aid; i++) { in sdma_v4_4_2_sw_init()
1318 &adev->sdma.ecc_irq); in sdma_v4_4_2_sw_init()
1323 /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/ in sdma_v4_4_2_sw_init()
1324 for (i = 0; i < adev->sdma.num_inst_per_aid; i++) { in sdma_v4_4_2_sw_init()
1327 &adev->sdma.vm_hole_irq); in sdma_v4_4_2_sw_init()
1333 &adev->sdma.doorbell_invalid_irq); in sdma_v4_4_2_sw_init()
1339 &adev->sdma.pool_timeout_irq); in sdma_v4_4_2_sw_init()
1345 &adev->sdma.srbm_write_irq); in sdma_v4_4_2_sw_init()
1350 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_sw_init()
1351 ring = &adev->sdma.instance[i].ring; in sdma_v4_4_2_sw_init()
1354 aid_id = adev->sdma.instance[i].aid_id; in sdma_v4_4_2_sw_init()
1356 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, in sdma_v4_4_2_sw_init()
1363 sprintf(ring->name, "sdma%d.%d", aid_id, in sdma_v4_4_2_sw_init()
1364 i % adev->sdma.num_inst_per_aid); in sdma_v4_4_2_sw_init()
1365 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, in sdma_v4_4_2_sw_init()
1371 if (adev->sdma.has_page_queue) { in sdma_v4_4_2_sw_init()
1372 ring = &adev->sdma.instance[i].page; in sdma_v4_4_2_sw_init()
1384 i % adev->sdma.num_inst_per_aid); in sdma_v4_4_2_sw_init()
1386 &adev->sdma.trap_irq, in sdma_v4_4_2_sw_init()
1395 dev_err(adev->dev, "fail to initialize sdma ras block\n"); in sdma_v4_4_2_sw_init()
1407 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_sw_fini()
1408 amdgpu_ring_fini(&adev->sdma.instance[i].ring); in sdma_v4_4_2_sw_fini()
1409 if (adev->sdma.has_page_queue) in sdma_v4_4_2_sw_fini()
1410 amdgpu_ring_fini(&adev->sdma.instance[i].page); in sdma_v4_4_2_sw_fini()
1427 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); in sdma_v4_4_2_hw_init()
1445 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); in sdma_v4_4_2_hw_fini()
1447 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_hw_fini()
1448 amdgpu_irq_put(adev, &adev->sdma.ecc_irq, in sdma_v4_4_2_hw_fini()
1484 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_is_idle()
1497 u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; in sdma_v4_4_2_wait_for_idle() local
1501 for (j = 0; j < adev->sdma.num_instances; j++) { in sdma_v4_4_2_wait_for_idle()
1502 sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG); in sdma_v4_4_2_wait_for_idle()
1503 if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK)) in sdma_v4_4_2_wait_for_idle()
1506 if (j == adev->sdma.num_instances) in sdma_v4_4_2_wait_for_idle()
1541 DRM_DEBUG("IH: SDMA trap\n"); in sdma_v4_4_2_process_trap_irq()
1544 /* Client id gives the SDMA instance in AID. To know the exact SDMA in sdma_v4_4_2_process_trap_irq()
1546 * Match node id with the AID id associated with the SDMA instance. */ in sdma_v4_4_2_process_trap_irq()
1547 for (i = instance; i < adev->sdma.num_instances; in sdma_v4_4_2_process_trap_irq()
1548 i += adev->sdma.num_inst_per_aid) { in sdma_v4_4_2_process_trap_irq()
1549 if (adev->sdma.instance[i].aid_id == in sdma_v4_4_2_process_trap_irq()
1554 if (i >= adev->sdma.num_instances) { in sdma_v4_4_2_process_trap_irq()
1557 "Couldn't find the right sdma instance in trap handler"); in sdma_v4_4_2_process_trap_irq()
1563 amdgpu_fence_process(&adev->sdma.instance[i].ring); in sdma_v4_4_2_process_trap_irq()
1602 DRM_ERROR("Illegal instruction in SDMA command stream\n"); in sdma_v4_4_2_process_illegal_inst_irq()
1610 drm_sched_fault(&adev->sdma.instance[instance].ring.sched); in sdma_v4_4_2_process_illegal_inst_irq()
1630 /* sdma ecc interrupt is enabled by default in sdma_v4_4_2_set_ecc_irq_state()
1649 if (instance < 0 || instance >= adev->sdma.num_instances) { in sdma_v4_4_2_print_iv_entry()
1650 dev_err(adev->dev, "sdma instance invalid %d\n", instance); in sdma_v4_4_2_print_iv_entry()
1661 "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u " in sdma_v4_4_2_print_iv_entry()
1683 dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n"); in sdma_v4_4_2_process_doorbell_invalid_irq()
1703 "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n"); in sdma_v4_4_2_process_srbm_write_irq()
1720 /* 1-not override: enable sdma mem light sleep */ in sdma_v4_4_2_inst_update_medium_grain_light_sleep()
1728 /* 0-override:disable sdma mem light sleep */ in sdma_v4_4_2_inst_update_medium_grain_light_sleep()
1783 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); in sdma_v4_4_2_set_clockgating_state()
1901 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_set_ring_funcs()
1902 adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs; in sdma_v4_4_2_set_ring_funcs()
1903 adev->sdma.instance[i].ring.me = i; in sdma_v4_4_2_set_ring_funcs()
1904 if (adev->sdma.has_page_queue) { in sdma_v4_4_2_set_ring_funcs()
1905 adev->sdma.instance[i].page.funcs = in sdma_v4_4_2_set_ring_funcs()
1907 adev->sdma.instance[i].page.me = i; in sdma_v4_4_2_set_ring_funcs()
1911 /* AID to which SDMA belongs depends on physical instance */ in sdma_v4_4_2_set_ring_funcs()
1912 adev->sdma.instance[i].aid_id = in sdma_v4_4_2_set_ring_funcs()
1913 dev_inst / adev->sdma.num_inst_per_aid; in sdma_v4_4_2_set_ring_funcs()
1949 adev->sdma.trap_irq.num_types = adev->sdma.num_instances; in sdma_v4_4_2_set_irq_funcs()
1950 adev->sdma.ecc_irq.num_types = adev->sdma.num_instances; in sdma_v4_4_2_set_irq_funcs()
1951 adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances; in sdma_v4_4_2_set_irq_funcs()
1952 adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances; in sdma_v4_4_2_set_irq_funcs()
1953 adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances; in sdma_v4_4_2_set_irq_funcs()
1954 adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances; in sdma_v4_4_2_set_irq_funcs()
1956 adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs; in sdma_v4_4_2_set_irq_funcs()
1957 adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs; in sdma_v4_4_2_set_irq_funcs()
1958 adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs; in sdma_v4_4_2_set_irq_funcs()
1959 adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs; in sdma_v4_4_2_set_irq_funcs()
1960 adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs; in sdma_v4_4_2_set_irq_funcs()
1961 adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs; in sdma_v4_4_2_set_irq_funcs()
1962 adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs; in sdma_v4_4_2_set_irq_funcs()
1966 * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine
1996 * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine
2030 if (adev->sdma.has_page_queue) in sdma_v4_4_2_set_buffer_funcs()
2031 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page; in sdma_v4_4_2_set_buffer_funcs()
2033 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v4_4_2_set_buffer_funcs()
2050 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_set_vm_pte_funcs()
2051 if (adev->sdma.has_page_queue) in sdma_v4_4_2_set_vm_pte_funcs()
2052 sched = &adev->sdma.instance[i].page.sched; in sdma_v4_4_2_set_vm_pte_funcs()
2054 sched = &adev->sdma.instance[i].ring.sched; in sdma_v4_4_2_set_vm_pte_funcs()
2057 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; in sdma_v4_4_2_set_vm_pte_funcs()
2089 amdgpu_irq_put(adev, &adev->sdma.ecc_irq, in sdma_v4_4_2_xcp_suspend()
2107 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SDMA"},
2146 .die_id = adev->sdma.instance[sdma_inst].aid_id, in sdma_v4_4_2_inst_query_ras_error_count()
2149 /* sdma v4_4_2 doesn't support query ce counts */ in sdma_v4_4_2_inst_query_ras_error_count()
2168 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); in sdma_v4_4_2_query_ras_error_count()
2173 dev_warn(adev->dev, "SDMA RAS is not supported\n"); in sdma_v4_4_2_query_ras_error_count()
2193 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); in sdma_v4_4_2_reset_ras_error_count()
2198 dev_warn(adev->dev, "SDMA RAS is not supported\n"); in sdma_v4_4_2_reset_ras_error_count()
2215 adev->sdma.ras = &sdma_v4_4_2_ras; in sdma_v4_4_2_set_ras_funcs()