Lines Matching full:ring
93 /* Direct submission to the ring buffer during init and reset. */
118 /* sync_seq is protected by ring emission lock */
132 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
133 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
134 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
136 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
137 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
144 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
146 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
148 bool amdgpu_fence_process(struct amdgpu_ring *ring);
149 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
150 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
153 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
157 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring);
158 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
165 /* provided by hw blocks that expose a ring buffer for commands */
175 /* ring read/write ptr handling */
176 u64 (*get_rptr)(struct amdgpu_ring *ring);
177 u64 (*get_wptr)(struct amdgpu_ring *ring);
178 void (*set_wptr)(struct amdgpu_ring *ring);
190 void (*emit_ib)(struct amdgpu_ring *ring,
194 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
196 void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
197 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
199 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
200 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
205 int (*test_ring)(struct amdgpu_ring *ring);
206 int (*test_ib)(struct amdgpu_ring *ring, long timeout);
208 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
209 void (*insert_start)(struct amdgpu_ring *ring);
210 void (*insert_end)(struct amdgpu_ring *ring);
212 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
213 unsigned (*init_cond_exec)(struct amdgpu_ring *ring, uint64_t addr);
215 void (*begin_use)(struct amdgpu_ring *ring);
216 void (*end_use)(struct amdgpu_ring *ring);
217 void (*emit_switch_buffer) (struct amdgpu_ring *ring);
218 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
219 void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va,
221 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
223 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
224 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
226 void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
229 void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
231 /* Try to soft recover the ring to make the fence signal */
232 void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
233 int (*preempt_ib)(struct amdgpu_ring *ring);
234 void (*emit_mem_sync)(struct amdgpu_ring *ring);
235 void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
236 void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
237 void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
238 void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
239 int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
240 void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
241 bool (*is_guilty)(struct amdgpu_ring *ring);
251 uint32_t *ring; member
346 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
347 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
348 void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
349 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
350 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
351 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
353 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
354 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
355 void amdgpu_ring_commit(struct amdgpu_ring *ring);
356 void amdgpu_ring_undo(struct amdgpu_ring *ring);
357 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
361 void amdgpu_ring_fini(struct amdgpu_ring *ring);
362 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
365 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
368 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring, in amdgpu_ring_set_preempt_cond_exec() argument
371 *ring->cond_exe_cpu_addr = cond_exec; in amdgpu_ring_set_preempt_cond_exec()
374 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) in amdgpu_ring_clear_ring() argument
377 while (i <= ring->buf_mask) in amdgpu_ring_clear_ring()
378 ring->ring[i++] = ring->funcs->nop; in amdgpu_ring_clear_ring()
382 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) in amdgpu_ring_write() argument
384 ring->ring[ring->wptr++ & ring->buf_mask] = v; in amdgpu_ring_write()
385 ring->wptr &= ring->ptr_mask; in amdgpu_ring_write()
386 ring->count_dw--; in amdgpu_ring_write()
389 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, in amdgpu_ring_write_multiple() argument
394 occupied = ring->wptr & ring->buf_mask; in amdgpu_ring_write_multiple()
395 chunk1 = ring->buf_mask + 1 - occupied; in amdgpu_ring_write_multiple()
402 memcpy(&ring->ring[occupied], src, chunk1); in amdgpu_ring_write_multiple()
406 memcpy(ring->ring, src, chunk2); in amdgpu_ring_write_multiple()
409 ring->wptr += count_dw; in amdgpu_ring_write_multiple()
410 ring->wptr &= ring->ptr_mask; in amdgpu_ring_write_multiple()
411 ring->count_dw -= count_dw; in amdgpu_ring_write_multiple()
416 * @ring: amdgpu_ring structure
421 static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring, in amdgpu_ring_patch_cond_exec() argument
426 if (!ring->funcs->init_cond_exec) in amdgpu_ring_patch_cond_exec()
429 WARN_ON(offset > ring->buf_mask); in amdgpu_ring_patch_cond_exec()
430 WARN_ON(ring->ring[offset] != 0); in amdgpu_ring_patch_cond_exec()
432 cur = (ring->wptr - 1) & ring->buf_mask; in amdgpu_ring_patch_cond_exec()
434 cur += ring->ring_size >> 2; in amdgpu_ring_patch_cond_exec()
435 ring->ring[offset] = cur - offset; in amdgpu_ring_patch_cond_exec()
438 #define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \ argument
439 (ring->is_mes_queue && ring->mes_ctx ? \
440 (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
442 #define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \ argument
443 (ring->is_mes_queue && ring->mes_ctx ? \
444 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
447 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
450 struct amdgpu_ring *ring);
452 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);
470 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
476 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);