Home
last modified time | relevance | path

Searched refs:kiq (Results 1 – 25 of 25) sorted by relevance

/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_gfx.c305 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; in amdgpu_gfx_kiq_init_ring() local
306 struct amdgpu_irq_src *irq = &kiq->irq; in amdgpu_gfx_kiq_init_ring()
307 struct amdgpu_ring *ring = &kiq->ring; in amdgpu_gfx_kiq_init_ring()
310 spin_lock_init(&kiq->ring_lock); in amdgpu_gfx_kiq_init_ring()
318 (adev->doorbell_index.kiq + in amdgpu_gfx_kiq_init_ring()
326 ring->eop_gpu_addr = kiq->eop_gpu_addr; in amdgpu_gfx_kiq_init_ring()
334 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); in amdgpu_gfx_kiq_init_ring()
346 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_i in amdgpu_gfx_kiq_fini() local
356 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_gfx_kiq_init() local
382 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_gfx_mqd_sw_init() local
473 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_gfx_mqd_sw_fini() local
503 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_gfx_disable_kcq() local
553 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_gfx_disable_kgq() local
620 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_gfx_mes_enable_kcq() local
658 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_gfx_enable_kcq() local
722 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_gfx_enable_kgq() local
1062 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_kiq_rreg() local
1133 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_kiq_wreg() local
[all...]
H A Damdgpu_gmc.c718 struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; in amdgpu_gmc_flush_gpu_tlb_pasid()
719 struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; in amdgpu_gmc_flush_gpu_tlb_pasid() local
748 ndw = kiq->pmf->invalidate_tlbs_size + 8; in amdgpu_gmc_flush_gpu_tlb_pasid()
751 ndw += kiq->pmf->invalidate_tlbs_size; in amdgpu_gmc_flush_gpu_tlb_pasid()
754 ndw += kiq->pmf->invalidate_tlbs_size; in amdgpu_gmc_flush_gpu_tlb_pasid()
756 spin_lock(&adev->gfx.kiq[inst].ring_lock); in amdgpu_gmc_flush_gpu_tlb_pasid()
759 spin_unlock(&adev->gfx.kiq[inst].ring_lock); in amdgpu_gmc_flush_gpu_tlb_pasid()
763 kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); in amdgpu_gmc_flush_gpu_tlb_pasid()
766 kiq in amdgpu_gmc_flush_gpu_tlb_pasid()
805 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst]; amdgpu_gmc_fw_reg_write_reg_wait() local
[all...]
H A Dmes_v11_0.c1240 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; in mes_v11_0_kiq_enable_queue() local
1241 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; in mes_v11_0_kiq_enable_queue()
1244 if (!kiq->pmf || !kiq->pmf->kiq_map_queues) in mes_v11_0_kiq_enable_queue()
1247 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); in mes_v11_0_kiq_enable_queue()
1253 kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]); in mes_v11_0_kiq_enable_queue()
1265 ring = &adev->gfx.kiq[0].ring; in mes_v11_0_queue_init()
1320 spin_lock_init(&adev->gfx.kiq[0].ring_lock); in mes_v11_0_kiq_ring_init()
1322 ring = &adev->gfx.kiq[ in mes_v11_0_kiq_ring_init()
[all...]
H A Dmes_v12_0.c1329 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; in mes_v12_0_kiq_enable_queue() local
1330 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; in mes_v12_0_kiq_enable_queue()
1333 if (!kiq->pmf || !kiq->pmf->kiq_map_queues) in mes_v12_0_kiq_enable_queue()
1336 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); in mes_v12_0_kiq_enable_queue()
1342 kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]); in mes_v12_0_kiq_enable_queue()
1359 ring = &adev->gfx.kiq[0].ring; in mes_v12_0_queue_init()
1434 spin_lock_init(&adev->gfx.kiq[0].ring_lock); in mes_v12_0_kiq_ring_init()
1436 ring = &adev->gfx.kiq[ in mes_v12_0_kiq_ring_init()
[all...]
H A Dgfx_v9_0.c1093 adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs; in gfx_v9_0_set_kiq_pm4_funcs()
2469 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); in gfx_v9_0_sw_fini()
3470 adev->gfx.kiq[0].ring.sched.ready = false; in gfx_v9_0_cp_compute_enable()
3667 * so only kiq need set this field. in gfx_v9_0_mqd_init()
3750 (adev->doorbell_index.kiq * 2) << 2); in gfx_v9_0_kiq_init_register()
3841 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup; in gfx_v9_0_kiq_init_queue()
3844 if (adev->gfx.kiq[0].mqd_backup) in gfx_v9_0_kiq_init_queue()
3845 memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation)); in gfx_v9_0_kiq_init_queue()
3869 if (adev->gfx.kiq[0].mqd_backup) in gfx_v9_0_kiq_init_queue()
3870 memcpy(adev->gfx.kiq[ in gfx_v9_0_kiq_init_queue()
4201 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; gfx_v9_0_kiq_read_clock() local
5707 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; gfx_v9_0_ring_preempt_ib() local
7181 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; gfx_v9_0_reset_kcq() local
[all...]
H A Dgfx_v9_4_3.c344 adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; in gfx_v9_4_3_set_kiq_pm4_funcs()
1195 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); in gfx_v9_4_3_sw_fini()
1744 adev->gfx.kiq[xcc_id].ring.sched.ready = false; in gfx_v9_4_3_xcc_cp_compute_enable()
1946 * so only kiq need set this field. in gfx_v9_4_3_xcc_mqd_init()
2032 ((adev->doorbell_index.kiq + in gfx_v9_4_3_xcc_kiq_init_register()
2121 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup; in gfx_v9_4_3_xcc_kiq_init_queue()
2124 if (adev->gfx.kiq[xcc_id].mqd_backup) in gfx_v9_4_3_xcc_kiq_init_queue()
2125 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation)); in gfx_v9_4_3_xcc_kiq_init_queue()
2148 if (adev->gfx.kiq[xcc_id].mqd_backup) in gfx_v9_4_3_xcc_kiq_init_queue()
2149 memcpy(adev->gfx.kiq[xcc_i in gfx_v9_4_3_xcc_kiq_init_queue()
3563 struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id]; gfx_v9_4_3_reset_kcq() local
[all...]
H A Damdgpu_amdkfd_gfx_v10_3.c280 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; in hiq_mqd_load_v10_3()
295 spin_lock(&adev->gfx.kiq[0].ring_lock); in hiq_mqd_load_v10_3()
322 spin_unlock(&adev->gfx.kiq[0].ring_lock); in hiq_mqd_load_v10_3()
H A Dvega10_reg_init.c60 adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ; in vega10_doorbell_index_init()
H A Damdgpu_amdkfd_gfx_v11.c265 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; in hiq_mqd_load_v11()
280 spin_lock(&adev->gfx.kiq[0].ring_lock); in hiq_mqd_load_v11()
307 spin_unlock(&adev->gfx.kiq[0].ring_lock); in hiq_mqd_load_v11()
H A Dvega20_reg_init.c60 adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ; in vega20_doorbell_index_init()
H A Damdgpu_doorbell.h52 uint32_t kiq; member
174 /* kiq/kcq from second XCD. Max 8 XCDs */
244 * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
H A Dgfx_v12_0.c358 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { in gfx_v12_0_kiq_unmap_queues()
426 adev->gfx.kiq[0].pmf = &gfx_v12_0_kiq_pm4_funcs; in gfx_v12_0_set_kiq_pm4_funcs()
1639 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); in gfx_v12_0_sw_fini()
2800 adev->gfx.kiq[0].ring.sched.ready = enable; in gfx_v12_0_cp_compute_enable()
2959 (adev->doorbell_index.kiq * 2) << 2); in gfx_v12_0_cp_set_doorbell_range()
3320 (adev->doorbell_index.kiq * 2) << 2); in gfx_v12_0_kiq_init_register()
3421 gfx_v12_0_kiq_init_queue(&adev->gfx.kiq[0].ring); in gfx_v12_0_kiq_resume()
3422 adev->gfx.kiq[0].ring.sched.ready = true; in gfx_v12_0_kiq_resume()
4590 struct amdgpu_kiq *kiq = &adev->gfx.kiq[ in gfx_v12_0_ring_preempt_ib() local
[all...]
H A Damdgpu_amdkfd_gfx_v10.c294 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; in kgd_hiq_mqd_load()
309 spin_lock(&adev->gfx.kiq[0].ring_lock); in kgd_hiq_mqd_load()
336 spin_unlock(&adev->gfx.kiq[0].ring_lock); in kgd_hiq_mqd_load()
H A Damdgpu_amdkfd_gfx_v9.c305 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[inst].ring; in kgd_gfx_v9_hiq_mqd_load()
320 spin_lock(&adev->gfx.kiq[inst].ring_lock); in kgd_gfx_v9_hiq_mqd_load()
347 spin_unlock(&adev->gfx.kiq[inst].ring_lock); in kgd_gfx_v9_hiq_mqd_load()
H A Dgfx_v8_0.c2041 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); in gfx_v8_0_sw_fini()
4282 adev->gfx.kiq[0].ring.sched.ready = false; in gfx_v8_0_cp_compute_enable()
4302 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; in gfx_v8_0_kiq_kcq_enable()
4534 * so only kiq need set this field. in gfx_v8_0_mqd_init()
4588 if (adev->gfx.kiq[0].mqd_backup) in gfx_v8_0_kiq_init_queue()
4589 memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation)); in gfx_v8_0_kiq_init_queue()
4612 if (adev->gfx.kiq[0].mqd_backup) in gfx_v8_0_kiq_init_queue()
4613 memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation)); in gfx_v8_0_kiq_init_queue()
4652 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2); in gfx_v8_0_set_mec_doorbell_range()
4661 gfx_v8_0_kiq_init_queue(&adev->gfx.kiq[ in gfx_v8_0_kiq_resume()
[all...]
H A Dsoc24.c297 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; in soc24_init_doorbell_index()
H A Damdgpu_gfx.h129 /* Support ASIC-specific kiq pm4 packets*/
415 struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; member
H A Dgmc_v12_0.c302 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) && in gmc_v12_0_flush_gpu_tlb()
H A Dgmc_v11_0.c234 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) && in gmc_v11_0_flush_gpu_tlb()
H A Dgmc_v10_0.c273 if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes && in gmc_v10_0_flush_gpu_tlb()
H A Dsoc21.c482 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; in soc21_init_doorbell_index()
H A Dnv.c561 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; in nv_init_doorbell_index()
H A Daqua_vanjaram.c41 adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START; in aqua_vanjaram_doorbell_index_init()
H A Dgmc_v9_0.c857 if (adev->gfx.kiq[inst].ring.sched.ready && in gmc_v9_0_flush_gpu_tlb()
H A Dvi.c2184 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; in legacy_doorbell_index_init()