Lines Matching +full:0 +full:- +full:job +full:- +full:ring
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
48 * are no longer in use by the associated ring on the GPU and
56 struct amdgpu_ring *ring; member
65 "amdgpu_fence", sizeof(struct amdgpu_fence), 0, in amdgpu_fence_slab_init()
68 return -ENOMEM; in amdgpu_fence_slab_init()
69 return 0; in amdgpu_fence_slab_init()
86 if (__f->base.ops == &amdgpu_fence_ops || in to_amdgpu_fence()
87 __f->base.ops == &amdgpu_job_fence_ops) in to_amdgpu_fence()
94 * amdgpu_fence_write - write a fence value
96 * @ring: ring the fence is associated with
101 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) in amdgpu_fence_write() argument
103 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_write()
105 if (drv->cpu_addr) in amdgpu_fence_write()
106 *drv->cpu_addr = cpu_to_le32(seq); in amdgpu_fence_write()
110 * amdgpu_fence_read - read a fence value
112 * @ring: ring the fence is associated with
117 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) in amdgpu_fence_read() argument
119 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_read()
120 u32 seq = 0; in amdgpu_fence_read()
122 if (drv->cpu_addr) in amdgpu_fence_read()
123 seq = le32_to_cpu(*drv->cpu_addr); in amdgpu_fence_read()
125 seq = atomic_read(&drv->last_seq); in amdgpu_fence_read()
131 * amdgpu_fence_emit - emit a fence on the requested ring
133 * @ring: ring the fence is associated with
135 * @job: job the fence is embedded in
138 * Emits a fence command on the requested ring (all asics).
139 * Returns 0 on success, -ENOMEM on failure.
141 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, in amdgpu_fence_emit() argument
144 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_emit()
151 if (job == NULL) { in amdgpu_fence_emit()
155 return -ENOMEM; in amdgpu_fence_emit()
156 fence = &am_fence->base; in amdgpu_fence_emit()
157 am_fence->ring = ring; in amdgpu_fence_emit()
159 /* take use of job-embedded fence */ in amdgpu_fence_emit()
160 fence = &job->hw_fence; in amdgpu_fence_emit()
163 seq = ++ring->fence_drv.sync_seq; in amdgpu_fence_emit()
164 if (job && job->job_run_counter) { in amdgpu_fence_emit()
166 fence->seqno = seq; in amdgpu_fence_emit()
170 if (job) { in amdgpu_fence_emit()
172 &ring->fence_drv.lock, in amdgpu_fence_emit()
173 adev->fence_context + ring->idx, seq); in amdgpu_fence_emit()
178 &ring->fence_drv.lock, in amdgpu_fence_emit()
179 adev->fence_context + ring->idx, seq); in amdgpu_fence_emit()
183 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, in amdgpu_fence_emit()
185 pm_runtime_get_noresume(adev_to_drm(adev)->dev); in amdgpu_fence_emit()
187 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit()
203 to_amdgpu_fence(fence)->start_timestamp = ktime_get(); in amdgpu_fence_emit()
206 * emitting the fence would mess up the hardware ring buffer. in amdgpu_fence_emit()
212 return 0; in amdgpu_fence_emit()
216 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
218 * @ring: ring the fence is associated with
222 * Emits a fence command on the requested ring (all asics).
224 * Returns 0 on success, -ENOMEM on failure.
226 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, in amdgpu_fence_emit_polling() argument
233 return -EINVAL; in amdgpu_fence_emit_polling()
235 seq = ++ring->fence_drv.sync_seq; in amdgpu_fence_emit_polling()
236 r = amdgpu_fence_wait_polling(ring, in amdgpu_fence_emit_polling()
237 seq - ring->fence_drv.num_fences_mask, in amdgpu_fence_emit_polling()
240 return -ETIMEDOUT; in amdgpu_fence_emit_polling()
242 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, in amdgpu_fence_emit_polling()
243 seq, 0); in amdgpu_fence_emit_polling()
247 return 0; in amdgpu_fence_emit_polling()
251 * amdgpu_fence_schedule_fallback - schedule fallback check
253 * @ring: pointer to struct amdgpu_ring
257 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) in amdgpu_fence_schedule_fallback() argument
259 mod_timer(&ring->fence_drv.fallback_timer, in amdgpu_fence_schedule_fallback()
264 * amdgpu_fence_process - check for fence activity
266 * @ring: pointer to struct amdgpu_ring
274 bool amdgpu_fence_process(struct amdgpu_ring *ring) in amdgpu_fence_process() argument
276 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_process()
277 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_process()
281 last_seq = atomic_read(&ring->fence_drv.last_seq); in amdgpu_fence_process()
282 seq = amdgpu_fence_read(ring); in amdgpu_fence_process()
284 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); in amdgpu_fence_process()
286 if (del_timer(&ring->fence_drv.fallback_timer) && in amdgpu_fence_process()
287 seq != ring->fence_drv.sync_seq) in amdgpu_fence_process()
288 amdgpu_fence_schedule_fallback(ring); in amdgpu_fence_process()
293 last_seq &= drv->num_fences_mask; in amdgpu_fence_process()
294 seq &= drv->num_fences_mask; in amdgpu_fence_process()
300 last_seq &= drv->num_fences_mask; in amdgpu_fence_process()
301 ptr = &drv->fences[last_seq]; in amdgpu_fence_process()
312 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); in amdgpu_fence_process()
313 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); in amdgpu_fence_process()
314 trace_amdgpu_runpm_reference_dumps(0, __func__); in amdgpu_fence_process()
321 * amdgpu_fence_fallback - fallback for hardware interrupts
323 * @t: timer context used to obtain the pointer to ring structure
329 struct amdgpu_ring *ring = from_timer(ring, t, in amdgpu_fence_fallback() local
332 if (amdgpu_fence_process(ring)) in amdgpu_fence_fallback()
333 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); in amdgpu_fence_fallback()
337 * amdgpu_fence_wait_empty - wait for all fences to signal
339 * @ring: ring index the fence is associated with
341 * Wait for all fences on the requested ring to signal (all asics).
342 * Returns 0 if the fences have passed, error for all other cases.
344 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) in amdgpu_fence_wait_empty() argument
346 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_fence_wait_empty()
351 return 0; in amdgpu_fence_wait_empty()
353 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty()
358 return 0; in amdgpu_fence_wait_empty()
368 * amdgpu_fence_wait_polling - busy wait for givn sequence number
370 * @ring: ring index the fence is associated with
374 * Wait for all fences on the requested ring to signal (all asics).
375 * Returns left time if no timeout, 0 or minus if timeout.
377 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, in amdgpu_fence_wait_polling() argument
382 while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) { in amdgpu_fence_wait_polling()
384 timeout -= 2; in amdgpu_fence_wait_polling()
386 return timeout > 0 ? timeout : 0; in amdgpu_fence_wait_polling()
389 * amdgpu_fence_count_emitted - get the count of emitted fences
391 * @ring: ring the fence is associated with
393 * Get the number of fences emitted on the requested ring (all asics).
394 * Returns the number of emitted fences on the ring. Used by the
395 * dynpm code to ring track activity.
397 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring) in amdgpu_fence_count_emitted() argument
401 /* We are not protected by ring lock when reading the last sequence in amdgpu_fence_count_emitted()
404 emitted = 0x100000000ull; in amdgpu_fence_count_emitted()
405 emitted -= atomic_read(&ring->fence_drv.last_seq); in amdgpu_fence_count_emitted()
406 emitted += READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_fence_count_emitted()
411 * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now
412 * @ring: ring the fence is associated with
417 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring) in amdgpu_fence_last_unsignaled_time_us() argument
419 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_last_unsignaled_time_us()
423 last_seq = atomic_read(&ring->fence_drv.last_seq); in amdgpu_fence_last_unsignaled_time_us()
424 sync_seq = READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_fence_last_unsignaled_time_us()
426 return 0; in amdgpu_fence_last_unsignaled_time_us()
429 last_seq &= drv->num_fences_mask; in amdgpu_fence_last_unsignaled_time_us()
430 fence = drv->fences[last_seq]; in amdgpu_fence_last_unsignaled_time_us()
432 return 0; in amdgpu_fence_last_unsignaled_time_us()
435 to_amdgpu_fence(fence)->start_timestamp); in amdgpu_fence_last_unsignaled_time_us()
439 * amdgpu_fence_update_start_timestamp - update the timestamp of the fence
440 * @ring: ring the fence is associated with
448 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp) in amdgpu_fence_update_start_timestamp() argument
450 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_update_start_timestamp()
453 seq &= drv->num_fences_mask; in amdgpu_fence_update_start_timestamp()
454 fence = drv->fences[seq]; in amdgpu_fence_update_start_timestamp()
458 to_amdgpu_fence(fence)->start_timestamp = timestamp; in amdgpu_fence_update_start_timestamp()
462 * amdgpu_fence_driver_start_ring - make the fence driver
463 * ready for use on the requested ring.
465 * @ring: ring to start the fence driver on
466 * @irq_src: interrupt source to use for this ring
467 * @irq_type: interrupt type to use for this ring
472 * Returns 0 for success, errors for failure.
474 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, in amdgpu_fence_driver_start_ring() argument
478 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_driver_start_ring()
481 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { in amdgpu_fence_driver_start_ring()
482 ring->fence_drv.cpu_addr = ring->fence_cpu_addr; in amdgpu_fence_driver_start_ring()
483 ring->fence_drv.gpu_addr = ring->fence_gpu_addr; in amdgpu_fence_driver_start_ring()
486 index = ALIGN(adev->uvd.fw->size, 8); in amdgpu_fence_driver_start_ring()
487 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; in amdgpu_fence_driver_start_ring()
488 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; in amdgpu_fence_driver_start_ring()
490 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); in amdgpu_fence_driver_start_ring()
492 ring->fence_drv.irq_src = irq_src; in amdgpu_fence_driver_start_ring()
493 ring->fence_drv.irq_type = irq_type; in amdgpu_fence_driver_start_ring()
494 ring->fence_drv.initialized = true; in amdgpu_fence_driver_start_ring()
496 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n", in amdgpu_fence_driver_start_ring()
497 ring->name, ring->fence_drv.gpu_addr); in amdgpu_fence_driver_start_ring()
498 return 0; in amdgpu_fence_driver_start_ring()
502 * amdgpu_fence_driver_init_ring - init the fence driver
503 * for the requested ring.
505 * @ring: ring to init the fence driver on
507 * Init the fence driver for the requested ring (all asics).
510 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) in amdgpu_fence_driver_init_ring() argument
512 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_driver_init_ring()
515 return -EINVAL; in amdgpu_fence_driver_init_ring()
517 if (!is_power_of_2(ring->num_hw_submission)) in amdgpu_fence_driver_init_ring()
518 return -EINVAL; in amdgpu_fence_driver_init_ring()
520 ring->fence_drv.cpu_addr = NULL; in amdgpu_fence_driver_init_ring()
521 ring->fence_drv.gpu_addr = 0; in amdgpu_fence_driver_init_ring()
522 ring->fence_drv.sync_seq = 0; in amdgpu_fence_driver_init_ring()
523 atomic_set(&ring->fence_drv.last_seq, 0); in amdgpu_fence_driver_init_ring()
524 ring->fence_drv.initialized = false; in amdgpu_fence_driver_init_ring()
526 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); in amdgpu_fence_driver_init_ring()
528 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1; in amdgpu_fence_driver_init_ring()
529 spin_lock_init(&ring->fence_drv.lock); in amdgpu_fence_driver_init_ring()
530 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring()
533 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring()
534 return -ENOMEM; in amdgpu_fence_driver_init_ring()
536 return 0; in amdgpu_fence_driver_init_ring()
540 * amdgpu_fence_driver_sw_init - init the fence driver
549 * Returns 0 for success.
553 return 0; in amdgpu_fence_driver_sw_init()
557 * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
560 * @ring: ring that to be checked
567 static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) in amdgpu_fence_need_ring_interrupt_restore() argument
569 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_need_ring_interrupt_restore()
572 switch (ring->funcs->type) { in amdgpu_fence_need_ring_interrupt_restore()
575 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= in amdgpu_fence_need_ring_interrupt_restore()
576 IP_VERSION(5, 0, 0)) in amdgpu_fence_need_ring_interrupt_restore()
589 return !(adev->in_s0ix && is_gfx_power_domain); in amdgpu_fence_need_ring_interrupt_restore()
593 * amdgpu_fence_driver_hw_fini - tear down the fence driver
604 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { in amdgpu_fence_driver_hw_fini()
605 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_fini() local
607 if (!ring || !ring->fence_drv.initialized) in amdgpu_fence_driver_hw_fini()
612 r = amdgpu_fence_wait_empty(ring); in amdgpu_fence_driver_hw_fini()
614 r = -ENODEV; in amdgpu_fence_driver_hw_fini()
617 amdgpu_fence_driver_force_completion(ring); in amdgpu_fence_driver_hw_fini()
620 ring->fence_drv.irq_src && in amdgpu_fence_driver_hw_fini()
621 amdgpu_fence_need_ring_interrupt_restore(ring)) in amdgpu_fence_driver_hw_fini()
622 amdgpu_irq_put(adev, ring->fence_drv.irq_src, in amdgpu_fence_driver_hw_fini()
623 ring->fence_drv.irq_type); in amdgpu_fence_driver_hw_fini()
625 del_timer_sync(&ring->fence_drv.fallback_timer); in amdgpu_fence_driver_hw_fini()
634 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { in amdgpu_fence_driver_isr_toggle()
635 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_isr_toggle() local
637 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src) in amdgpu_fence_driver_isr_toggle()
641 disable_irq(adev->irq.irq); in amdgpu_fence_driver_isr_toggle()
643 enable_irq(adev->irq.irq); in amdgpu_fence_driver_isr_toggle()
651 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { in amdgpu_fence_driver_sw_fini()
652 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_sw_fini() local
654 if (!ring || !ring->fence_drv.initialized) in amdgpu_fence_driver_sw_fini()
663 if (ring->sched.ops) in amdgpu_fence_driver_sw_fini()
664 drm_sched_fini(&ring->sched); in amdgpu_fence_driver_sw_fini()
666 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) in amdgpu_fence_driver_sw_fini()
667 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_sw_fini()
668 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_sw_fini()
669 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_sw_fini()
670 ring->fence_drv.initialized = false; in amdgpu_fence_driver_sw_fini()
675 * amdgpu_fence_driver_hw_init - enable the fence driver
684 * Returns 0 for success.
690 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { in amdgpu_fence_driver_hw_init()
691 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_init() local
693 if (!ring || !ring->fence_drv.initialized) in amdgpu_fence_driver_hw_init()
697 if (ring->fence_drv.irq_src && in amdgpu_fence_driver_hw_init()
698 amdgpu_fence_need_ring_interrupt_restore(ring)) in amdgpu_fence_driver_hw_init()
699 amdgpu_irq_get(adev, ring->fence_drv.irq_src, in amdgpu_fence_driver_hw_init()
700 ring->fence_drv.irq_type); in amdgpu_fence_driver_hw_init()
705 * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
707 * @ring: fence of the ring to be cleared
710 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) in amdgpu_fence_driver_clear_job_fences() argument
715 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { in amdgpu_fence_driver_clear_job_fences()
716 ptr = &ring->fence_drv.fences[i]; in amdgpu_fence_driver_clear_job_fences()
718 if (old && old->ops == &amdgpu_job_fence_ops) { in amdgpu_fence_driver_clear_job_fences()
719 struct amdgpu_job *job; in amdgpu_fence_driver_clear_job_fences() local
721 /* For non-scheduler bad job, i.e. failed ib test, we need to signal in amdgpu_fence_driver_clear_job_fences()
725 job = container_of(old, struct amdgpu_job, hw_fence); in amdgpu_fence_driver_clear_job_fences()
726 if (!job->base.s_fence && !dma_fence_is_signaled(old)) in amdgpu_fence_driver_clear_job_fences()
735 * amdgpu_fence_driver_set_error - set error code on fences
736 * @ring: the ring which contains the fences
739 * Set an error code to all the fences pending on the ring.
741 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error) in amdgpu_fence_driver_set_error() argument
743 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_driver_set_error()
746 spin_lock_irqsave(&drv->lock, flags); in amdgpu_fence_driver_set_error()
747 for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) { in amdgpu_fence_driver_set_error()
750 fence = rcu_dereference_protected(drv->fences[i], in amdgpu_fence_driver_set_error()
751 lockdep_is_held(&drv->lock)); in amdgpu_fence_driver_set_error()
755 spin_unlock_irqrestore(&drv->lock, flags); in amdgpu_fence_driver_set_error()
759 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
761 * @ring: fence of the ring to signal
764 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) in amdgpu_fence_driver_force_completion() argument
766 amdgpu_fence_driver_set_error(ring, -ECANCELED); in amdgpu_fence_driver_force_completion()
767 amdgpu_fence_write(ring, ring->fence_drv.sync_seq); in amdgpu_fence_driver_force_completion()
768 amdgpu_fence_process(ring); in amdgpu_fence_driver_force_completion()
782 return (const char *)to_amdgpu_fence(f)->ring->name; in amdgpu_fence_get_timeline_name()
787 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); in amdgpu_job_fence_get_timeline_name() local
789 return (const char *)to_amdgpu_ring(job->base.sched)->name; in amdgpu_job_fence_get_timeline_name()
793 * amdgpu_fence_enable_signaling - enable signalling on fence
802 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer)) in amdgpu_fence_enable_signaling()
803 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); in amdgpu_fence_enable_signaling()
809 * amdgpu_job_fence_enable_signaling - enable signalling on job fence
813 * only handles the job embedded fence.
817 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); in amdgpu_job_fence_enable_signaling() local
819 if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) in amdgpu_job_fence_enable_signaling()
820 amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); in amdgpu_job_fence_enable_signaling()
826 * amdgpu_fence_free - free up the fence memory
841 * amdgpu_job_fence_free - free up the job with embedded fence
845 * Free up the job with embedded fence after the RCU grace period.
851 /* free job if fence has a parent job */ in amdgpu_job_fence_free()
856 * amdgpu_fence_release - callback that fence can be freed
865 call_rcu(&f->rcu, amdgpu_fence_free); in amdgpu_fence_release()
869 * amdgpu_job_fence_release - callback that job embedded fence can be freed
874 * only handles the job embedded fence.
878 call_rcu(&f->rcu, amdgpu_job_fence_free); in amdgpu_job_fence_release()
901 struct amdgpu_device *adev = m->private; in amdgpu_debugfs_fence_info_show()
904 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { in amdgpu_debugfs_fence_info_show()
905 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_fence_info_show() local
907 if (!ring || !ring->fence_drv.initialized) in amdgpu_debugfs_fence_info_show()
910 amdgpu_fence_process(ring); in amdgpu_debugfs_fence_info_show()
912 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); in amdgpu_debugfs_fence_info_show()
913 seq_printf(m, "Last signaled fence 0x%08x\n", in amdgpu_debugfs_fence_info_show()
914 atomic_read(&ring->fence_drv.last_seq)); in amdgpu_debugfs_fence_info_show()
915 seq_printf(m, "Last emitted 0x%08x\n", in amdgpu_debugfs_fence_info_show()
916 ring->fence_drv.sync_seq); in amdgpu_debugfs_fence_info_show()
918 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX || in amdgpu_debugfs_fence_info_show()
919 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { in amdgpu_debugfs_fence_info_show()
920 seq_printf(m, "Last signaled trailing fence 0x%08x\n", in amdgpu_debugfs_fence_info_show()
921 le32_to_cpu(*ring->trail_fence_cpu_addr)); in amdgpu_debugfs_fence_info_show()
922 seq_printf(m, "Last emitted 0x%08x\n", in amdgpu_debugfs_fence_info_show()
923 ring->trail_seq); in amdgpu_debugfs_fence_info_show()
926 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) in amdgpu_debugfs_fence_info_show()
930 seq_printf(m, "Last preempted 0x%08x\n", in amdgpu_debugfs_fence_info_show()
931 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); in amdgpu_debugfs_fence_info_show()
933 seq_printf(m, "Last reset 0x%08x\n", in amdgpu_debugfs_fence_info_show()
934 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); in amdgpu_debugfs_fence_info_show()
936 seq_printf(m, "Last both 0x%08x\n", in amdgpu_debugfs_fence_info_show()
937 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); in amdgpu_debugfs_fence_info_show()
939 return 0; in amdgpu_debugfs_fence_info_show()
943 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
953 r = pm_runtime_get_sync(dev->dev); in gpu_recover_get()
954 if (r < 0) { in gpu_recover_get()
955 pm_runtime_put_autosuspend(dev->dev); in gpu_recover_get()
956 return 0; in gpu_recover_get()
959 if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work)) in gpu_recover_get()
960 flush_work(&adev->reset_work); in gpu_recover_get()
962 *val = atomic_read(&adev->reset_domain->reset_res); in gpu_recover_get()
964 pm_runtime_mark_last_busy(dev->dev); in gpu_recover_get()
965 pm_runtime_put_autosuspend(dev->dev); in gpu_recover_get()
967 return 0; in gpu_recover_get()
981 memset(&reset_context, 0, sizeof(reset_context)); in amdgpu_debugfs_reset_work()
995 struct drm_minor *minor = adev_to_drm(adev)->primary; in amdgpu_debugfs_fence_init()
996 struct dentry *root = minor->debugfs_root; in amdgpu_debugfs_fence_init()
1003 INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work); in amdgpu_debugfs_fence_init()