Lines Matching +full:0 +full:- +full:job +full:- +full:ring
45 * command ring and the hw will fetch the commands from the IB
48 * put in IBs for execution by the requested ring.
52 * amdgpu_ib_get - request an IB (Indirect Buffer)
62 * Returns 0 on success, error on failure.
71 r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type], in amdgpu_ib_get()
72 &ib->sa_bo, size); in amdgpu_ib_get()
74 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); in amdgpu_ib_get()
78 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); in amdgpu_ib_get()
80 ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC; in amdgpu_ib_get()
83 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); in amdgpu_ib_get()
86 return 0; in amdgpu_ib_get()
90 * amdgpu_ib_free - free an IB (Indirect Buffer)
101 amdgpu_sa_bo_free(adev, &ib->sa_bo, f); in amdgpu_ib_free()
105 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
107 * @ring: ring index the IB is associated with
110 * @job: job to schedule
113 * Schedule an IB on the associated ring (all asics).
114 * Returns 0 on success, error on failure.
116 * On SI, there are two parallel engines fed from the primary ring,
123 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
126 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, in amdgpu_ib_schedule() argument
127 struct amdgpu_ib *ibs, struct amdgpu_job *job, in amdgpu_ib_schedule() argument
130 struct amdgpu_device *adev = ring->adev; in amdgpu_ib_schedule()
131 struct amdgpu_ib *ib = &ibs[0]; in amdgpu_ib_schedule()
134 unsigned int patch_offset = ~0; in amdgpu_ib_schedule()
137 uint32_t status = 0, alloc_size; in amdgpu_ib_schedule()
138 unsigned int fence_flags = 0; in amdgpu_ib_schedule()
141 int vmid = AMDGPU_JOB_GET_VMID(job); in amdgpu_ib_schedule()
144 int r = 0; in amdgpu_ib_schedule()
147 if (num_ibs == 0) in amdgpu_ib_schedule()
148 return -EINVAL; in amdgpu_ib_schedule()
150 /* ring tests don't use a job */ in amdgpu_ib_schedule()
151 if (job) { in amdgpu_ib_schedule()
152 vm = job->vm; in amdgpu_ib_schedule()
153 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule()
154 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule()
155 shadow_va = job->shadow_va; in amdgpu_ib_schedule()
156 csa_va = job->csa_va; in amdgpu_ib_schedule()
157 gds_va = job->gds_va; in amdgpu_ib_schedule()
158 init_shadow = job->init_shadow; in amdgpu_ib_schedule()
161 fence_ctx = 0; in amdgpu_ib_schedule()
162 shadow_va = 0; in amdgpu_ib_schedule()
163 csa_va = 0; in amdgpu_ib_schedule()
164 gds_va = 0; in amdgpu_ib_schedule()
168 if (!ring->sched.ready && !ring->is_mes_queue) { in amdgpu_ib_schedule()
169 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); in amdgpu_ib_schedule()
170 return -EINVAL; in amdgpu_ib_schedule()
173 if (vm && !job->vmid && !ring->is_mes_queue) { in amdgpu_ib_schedule()
174 dev_err(adev->dev, "VM IB without ID\n"); in amdgpu_ib_schedule()
175 return -EINVAL; in amdgpu_ib_schedule()
178 if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) && in amdgpu_ib_schedule()
179 (!ring->funcs->secure_submission_supported)) { in amdgpu_ib_schedule()
180 dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name); in amdgpu_ib_schedule()
181 return -EINVAL; in amdgpu_ib_schedule()
184 alloc_size = ring->funcs->emit_frame_size + num_ibs * in amdgpu_ib_schedule()
185 ring->funcs->emit_ib_size; in amdgpu_ib_schedule()
187 r = amdgpu_ring_alloc(ring, alloc_size); in amdgpu_ib_schedule()
189 dev_err(adev->dev, "scheduling IB failed (%d).\n", r); in amdgpu_ib_schedule()
193 need_ctx_switch = ring->current_ctx != fence_ctx; in amdgpu_ib_schedule()
194 if (ring->funcs->emit_pipeline_sync && job && in amdgpu_ib_schedule()
195 ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) || in amdgpu_ib_schedule()
197 amdgpu_vm_need_pipeline_sync(ring, job))) { in amdgpu_ib_schedule()
201 trace_amdgpu_ib_pipe_sync(job, tmp); in amdgpu_ib_schedule()
206 if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync) in amdgpu_ib_schedule()
207 ring->funcs->emit_mem_sync(ring); in amdgpu_ib_schedule()
209 if (ring->funcs->emit_wave_limit && in amdgpu_ib_schedule()
210 ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) in amdgpu_ib_schedule()
211 ring->funcs->emit_wave_limit(ring, true); in amdgpu_ib_schedule()
213 if (ring->funcs->insert_start) in amdgpu_ib_schedule()
214 ring->funcs->insert_start(ring); in amdgpu_ib_schedule()
216 if (job) { in amdgpu_ib_schedule()
217 r = amdgpu_vm_flush(ring, job, need_pipe_sync); in amdgpu_ib_schedule()
219 amdgpu_ring_undo(ring); in amdgpu_ib_schedule()
224 amdgpu_ring_ib_begin(ring); in amdgpu_ib_schedule()
226 if (ring->funcs->emit_gfx_shadow) in amdgpu_ib_schedule()
227 amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va, in amdgpu_ib_schedule()
230 if (ring->funcs->init_cond_exec) in amdgpu_ib_schedule()
231 patch_offset = amdgpu_ring_init_cond_exec(ring); in amdgpu_ib_schedule()
233 amdgpu_device_flush_hdp(adev, ring); in amdgpu_ib_schedule()
238 if (job && ring->funcs->emit_cntxcntl) { in amdgpu_ib_schedule()
239 status |= job->preamble_status; in amdgpu_ib_schedule()
240 status |= job->preemption_status; in amdgpu_ib_schedule()
241 amdgpu_ring_emit_cntxcntl(ring, status); in amdgpu_ib_schedule()
247 if (job && ring->funcs->emit_frame_cntl) { in amdgpu_ib_schedule()
248 secure = ib->flags & AMDGPU_IB_FLAGS_SECURE; in amdgpu_ib_schedule()
249 amdgpu_ring_emit_frame_cntl(ring, true, secure); in amdgpu_ib_schedule()
252 for (i = 0; i < num_ibs; ++i) { in amdgpu_ib_schedule()
255 if (job && ring->funcs->emit_frame_cntl) { in amdgpu_ib_schedule()
256 if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) { in amdgpu_ib_schedule()
257 amdgpu_ring_emit_frame_cntl(ring, false, secure); in amdgpu_ib_schedule()
259 amdgpu_ring_emit_frame_cntl(ring, true, secure); in amdgpu_ib_schedule()
263 amdgpu_ring_emit_ib(ring, job, ib, status); in amdgpu_ib_schedule()
267 if (job && ring->funcs->emit_frame_cntl) in amdgpu_ib_schedule()
268 amdgpu_ring_emit_frame_cntl(ring, false, secure); in amdgpu_ib_schedule()
270 amdgpu_device_invalidate_hdp(adev, ring); in amdgpu_ib_schedule()
272 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) in amdgpu_ib_schedule()
276 if (job && job->uf_addr) { in amdgpu_ib_schedule()
277 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, in amdgpu_ib_schedule()
281 if (ring->funcs->emit_gfx_shadow) { in amdgpu_ib_schedule()
282 amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); in amdgpu_ib_schedule()
284 if (ring->funcs->init_cond_exec) { in amdgpu_ib_schedule()
285 unsigned int ce_offset = ~0; in amdgpu_ib_schedule()
287 ce_offset = amdgpu_ring_init_cond_exec(ring); in amdgpu_ib_schedule()
288 if (ce_offset != ~0 && ring->funcs->patch_cond_exec) in amdgpu_ib_schedule()
289 amdgpu_ring_patch_cond_exec(ring, ce_offset); in amdgpu_ib_schedule()
293 r = amdgpu_fence_emit(ring, f, job, fence_flags); in amdgpu_ib_schedule()
295 dev_err(adev->dev, "failed to emit fence (%d)\n", r); in amdgpu_ib_schedule()
296 if (job && job->vmid) in amdgpu_ib_schedule()
297 amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid); in amdgpu_ib_schedule()
298 amdgpu_ring_undo(ring); in amdgpu_ib_schedule()
302 if (ring->funcs->insert_end) in amdgpu_ib_schedule()
303 ring->funcs->insert_end(ring); in amdgpu_ib_schedule()
305 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) in amdgpu_ib_schedule()
306 amdgpu_ring_patch_cond_exec(ring, patch_offset); in amdgpu_ib_schedule()
308 ring->current_ctx = fence_ctx; in amdgpu_ib_schedule()
309 if (vm && ring->funcs->emit_switch_buffer) in amdgpu_ib_schedule()
310 amdgpu_ring_emit_switch_buffer(ring); in amdgpu_ib_schedule()
312 if (ring->funcs->emit_wave_limit && in amdgpu_ib_schedule()
313 ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) in amdgpu_ib_schedule()
314 ring->funcs->emit_wave_limit(ring, false); in amdgpu_ib_schedule()
316 amdgpu_ring_ib_end(ring); in amdgpu_ib_schedule()
317 amdgpu_ring_commit(ring); in amdgpu_ib_schedule()
318 return 0; in amdgpu_ib_schedule()
322 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
328 * Returns 0 on success, error on failure.
334 if (adev->ib_pool_ready) in amdgpu_ib_pool_init()
335 return 0; in amdgpu_ib_pool_init()
337 for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { in amdgpu_ib_pool_init()
338 r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], in amdgpu_ib_pool_init()
344 adev->ib_pool_ready = true; in amdgpu_ib_pool_init()
346 return 0; in amdgpu_ib_pool_init()
349 while (i--) in amdgpu_ib_pool_init()
350 amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]); in amdgpu_ib_pool_init()
355 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
366 if (!adev->ib_pool_ready) in amdgpu_ib_pool_fini()
369 for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) in amdgpu_ib_pool_fini()
370 amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]); in amdgpu_ib_pool_fini()
371 adev->ib_pool_ready = false; in amdgpu_ib_pool_fini()
375 * amdgpu_ib_ring_tests - test IBs on the rings
379 * Test an IB (Indirect Buffer) on each ring.
380 * If the test fails, disable the ring.
381 * Returns 0 on success, error if the primary GFX ring
387 int r, ret = 0; in amdgpu_ib_ring_tests()
395 * under SR-IOV should be set to a long time. 8 sec should be enough in amdgpu_ib_ring_tests()
407 } else if (adev->gmc.xgmi.hive_id) { in amdgpu_ib_ring_tests()
411 for (i = 0; i < adev->num_rings; ++i) { in amdgpu_ib_ring_tests()
412 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_ib_ring_tests() local
418 if (!ring->sched.ready || !ring->funcs->test_ib) in amdgpu_ib_ring_tests()
421 if (adev->enable_mes && in amdgpu_ib_ring_tests()
422 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) in amdgpu_ib_ring_tests()
426 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || in amdgpu_ib_ring_tests()
427 ring->funcs->type == AMDGPU_RING_TYPE_VCE || in amdgpu_ib_ring_tests()
428 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || in amdgpu_ib_ring_tests()
429 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || in amdgpu_ib_ring_tests()
430 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || in amdgpu_ib_ring_tests()
431 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) in amdgpu_ib_ring_tests()
436 r = amdgpu_ring_test_ib(ring, tmo); in amdgpu_ib_ring_tests()
438 DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", in amdgpu_ib_ring_tests()
439 ring->name); in amdgpu_ib_ring_tests()
443 ring->sched.ready = false; in amdgpu_ib_ring_tests()
444 DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n", in amdgpu_ib_ring_tests()
445 ring->name, r); in amdgpu_ib_ring_tests()
447 if (ring == &adev->gfx.gfx_ring[0]) { in amdgpu_ib_ring_tests()
449 adev->accel_working = false; in amdgpu_ib_ring_tests()
466 struct amdgpu_device *adev = m->private; in amdgpu_debugfs_sa_info_show()
468 seq_puts(m, "--------------------- DELAYED ---------------------\n"); in amdgpu_debugfs_sa_info_show()
469 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED], in amdgpu_debugfs_sa_info_show()
471 seq_puts(m, "-------------------- IMMEDIATE --------------------\n"); in amdgpu_debugfs_sa_info_show()
472 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE], in amdgpu_debugfs_sa_info_show()
474 seq_puts(m, "--------------------- DIRECT ----------------------\n"); in amdgpu_debugfs_sa_info_show()
475 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m); in amdgpu_debugfs_sa_info_show()
477 return 0; in amdgpu_debugfs_sa_info_show()
487 struct drm_minor *minor = adev_to_drm(adev)->primary; in amdgpu_debugfs_sa_init()
488 struct dentry *root = minor->debugfs_root; in amdgpu_debugfs_sa_init()