Lines Matching +full:0 +full:- +full:job +full:- +full:ring

58 #define AMDGPU_FENCE_OWNER_UNDEFINED	((void *)0ul)
62 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
92 /* Direct submission to the ring buffer during init and reset. */
117 /* sync_seq is protected by ring emission lock */
131 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
132 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
133 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
135 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
136 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
143 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
145 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
147 bool amdgpu_fence_process(struct amdgpu_ring *ring);
148 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
149 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
152 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
156 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring);
157 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
164 /* provided by hw blocks that expose a ring buffer for commands */
174 /* ring read/write ptr handling */
175 u64 (*get_rptr)(struct amdgpu_ring *ring);
176 u64 (*get_wptr)(struct amdgpu_ring *ring);
177 void (*set_wptr)(struct amdgpu_ring *ring);
180 struct amdgpu_job *job,
183 struct amdgpu_job *job,
189 void (*emit_ib)(struct amdgpu_ring *ring,
190 struct amdgpu_job *job,
193 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
195 void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
196 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
198 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
199 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
204 int (*test_ring)(struct amdgpu_ring *ring);
205 int (*test_ib)(struct amdgpu_ring *ring, long timeout);
207 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
208 void (*insert_start)(struct amdgpu_ring *ring);
209 void (*insert_end)(struct amdgpu_ring *ring);
211 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
212 unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
213 void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
215 void (*begin_use)(struct amdgpu_ring *ring);
216 void (*end_use)(struct amdgpu_ring *ring);
217 void (*emit_switch_buffer) (struct amdgpu_ring *ring);
218 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
219 void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va,
221 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
223 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
224 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
226 void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
229 void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
231 /* Try to soft recover the ring to make the fence signal */
232 void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
233 int (*preempt_ib)(struct amdgpu_ring *ring);
234 void (*emit_mem_sync)(struct amdgpu_ring *ring);
235 void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
236 void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
237 void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
238 void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
248 volatile uint32_t *ring; member
308 #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib))) argument
309 #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib… argument
310 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
311 #define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
312 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
313 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
314 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
315 #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags))) argument
316 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
317 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
318 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (fla…
319 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), …
320 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
321 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
322 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
323 #define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (…
324 #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
325 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
326 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
327 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r…
328 #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
329 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
330 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
331 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
332 #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
333 #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
334 #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
335 #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
338 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
339 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
340 void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
341 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
342 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
343 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
345 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
346 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
347 void amdgpu_ring_commit(struct amdgpu_ring *ring);
348 void amdgpu_ring_undo(struct amdgpu_ring *ring);
349 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
353 void amdgpu_ring_fini(struct amdgpu_ring *ring);
354 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
357 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
360 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring, in amdgpu_ring_set_preempt_cond_exec() argument
363 *ring->cond_exe_cpu_addr = cond_exec; in amdgpu_ring_set_preempt_cond_exec()
366 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) in amdgpu_ring_clear_ring() argument
368 int i = 0; in amdgpu_ring_clear_ring()
369 while (i <= ring->buf_mask) in amdgpu_ring_clear_ring()
370 ring->ring[i++] = ring->funcs->nop; in amdgpu_ring_clear_ring()
374 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) in amdgpu_ring_write() argument
376 if (ring->count_dw <= 0) in amdgpu_ring_write()
377 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); in amdgpu_ring_write()
378 ring->ring[ring->wptr++ & ring->buf_mask] = v; in amdgpu_ring_write()
379 ring->wptr &= ring->ptr_mask; in amdgpu_ring_write()
380 ring->count_dw--; in amdgpu_ring_write()
383 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, in amdgpu_ring_write_multiple() argument
389 if (unlikely(ring->count_dw < count_dw)) in amdgpu_ring_write_multiple()
390 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); in amdgpu_ring_write_multiple()
392 occupied = ring->wptr & ring->buf_mask; in amdgpu_ring_write_multiple()
393 dst = (void *)&ring->ring[occupied]; in amdgpu_ring_write_multiple()
394 chunk1 = ring->buf_mask + 1 - occupied; in amdgpu_ring_write_multiple()
396 chunk2 = count_dw - chunk1; in amdgpu_ring_write_multiple()
405 dst = (void *)ring->ring; in amdgpu_ring_write_multiple()
409 ring->wptr += count_dw; in amdgpu_ring_write_multiple()
410 ring->wptr &= ring->ptr_mask; in amdgpu_ring_write_multiple()
411 ring->count_dw -= count_dw; in amdgpu_ring_write_multiple()
414 #define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \ argument
415 (ring->is_mes_queue && ring->mes_ctx ? \
416 (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
418 #define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \ argument
419 (ring->is_mes_queue && ring->mes_ctx ? \
420 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
423 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
426 struct amdgpu_ring *ring);
428 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);
432 return ib->ptr[idx]; in amdgpu_ib_get_value()
438 ib->ptr[idx] = value; in amdgpu_ib_set_value()
447 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
448 struct amdgpu_ib *ibs, struct amdgpu_job *job,
453 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);