Lines Matching +full:0 +full:- +full:job +full:- +full:ring
36 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); in amdgpu_job_timedout() local
37 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
39 struct amdgpu_device *adev = ring->adev; in amdgpu_job_timedout()
44 DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", in amdgpu_job_timedout()
45 __func__, s_job->sched->name); in amdgpu_job_timedout()
47 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout()
51 memset(&ti, 0, sizeof(struct amdgpu_task_info)); in amdgpu_job_timedout()
52 adev->job_hang = true; in amdgpu_job_timedout()
55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
56 DRM_ERROR("ring %s timeout, but soft recovered\n", in amdgpu_job_timedout()
57 s_job->sched->name); in amdgpu_job_timedout()
61 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout()
62 DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n", in amdgpu_job_timedout()
63 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
64 ring->fence_drv.sync_seq); in amdgpu_job_timedout()
68 dma_fence_set_error(&s_job->s_fence->finished, -ETIME); in amdgpu_job_timedout()
70 if (amdgpu_device_should_recover_gpu(ring->adev)) { in amdgpu_job_timedout()
72 memset(&reset_context, 0, sizeof(reset_context)); in amdgpu_job_timedout()
78 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); in amdgpu_job_timedout()
82 drm_sched_suspend_timeout(&ring->sched); in amdgpu_job_timedout()
84 adev->virt.tdr_debug = true; in amdgpu_job_timedout()
88 adev->job_hang = false; in amdgpu_job_timedout()
95 unsigned int num_ibs, struct amdgpu_job **job) in amdgpu_job_alloc() argument
97 if (num_ibs == 0) in amdgpu_job_alloc()
98 return -EINVAL; in amdgpu_job_alloc()
100 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); in amdgpu_job_alloc()
101 if (!*job) in amdgpu_job_alloc()
102 return -ENOMEM; in amdgpu_job_alloc()
105 * Initialize the scheduler to at least some ring so that we always in amdgpu_job_alloc()
108 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
109 (*job)->vm = vm; in amdgpu_job_alloc()
111 amdgpu_sync_create(&(*job)->explicit_sync); in amdgpu_job_alloc()
112 (*job)->generation = amdgpu_vm_generation(adev, vm); in amdgpu_job_alloc()
113 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; in amdgpu_job_alloc()
116 return 0; in amdgpu_job_alloc()
118 return drm_sched_job_init(&(*job)->base, entity, 1, owner); in amdgpu_job_alloc()
124 struct amdgpu_job **job) in amdgpu_job_alloc_with_ib() argument
128 r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); in amdgpu_job_alloc_with_ib()
132 (*job)->num_ibs = 1; in amdgpu_job_alloc_with_ib()
133 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); in amdgpu_job_alloc_with_ib()
136 drm_sched_job_cleanup(&(*job)->base); in amdgpu_job_alloc_with_ib()
137 kfree(*job); in amdgpu_job_alloc_with_ib()
143 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, in amdgpu_job_set_resources() argument
147 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; in amdgpu_job_set_resources()
148 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; in amdgpu_job_set_resources()
151 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; in amdgpu_job_set_resources()
152 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; in amdgpu_job_set_resources()
155 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; in amdgpu_job_set_resources()
156 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; in amdgpu_job_set_resources()
160 void amdgpu_job_free_resources(struct amdgpu_job *job) in amdgpu_job_free_resources() argument
162 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); in amdgpu_job_free_resources() local
167 if (job->base.s_fence && job->base.s_fence->finished.ops) in amdgpu_job_free_resources()
168 f = &job->base.s_fence->finished; in amdgpu_job_free_resources()
169 else if (job->hw_fence.ops) in amdgpu_job_free_resources()
170 f = &job->hw_fence; in amdgpu_job_free_resources()
174 for (i = 0; i < job->num_ibs; ++i) in amdgpu_job_free_resources()
175 amdgpu_ib_free(ring->adev, &job->ibs[i], f); in amdgpu_job_free_resources()
180 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_free_cb() local
184 amdgpu_sync_free(&job->explicit_sync); in amdgpu_job_free_cb()
187 if (!job->hw_fence.ops) in amdgpu_job_free_cb()
188 kfree(job); in amdgpu_job_free_cb()
190 dma_fence_put(&job->hw_fence); in amdgpu_job_free_cb()
193 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, in amdgpu_job_set_gang_leader() argument
196 struct dma_fence *fence = &leader->base.s_fence->scheduled; in amdgpu_job_set_gang_leader()
198 WARN_ON(job->gang_submit); in amdgpu_job_set_gang_leader()
204 if (job != leader) in amdgpu_job_set_gang_leader()
206 job->gang_submit = fence; in amdgpu_job_set_gang_leader()
209 void amdgpu_job_free(struct amdgpu_job *job) in amdgpu_job_free() argument
211 if (job->base.entity) in amdgpu_job_free()
212 drm_sched_job_cleanup(&job->base); in amdgpu_job_free()
214 amdgpu_job_free_resources(job); in amdgpu_job_free()
215 amdgpu_sync_free(&job->explicit_sync); in amdgpu_job_free()
216 if (job->gang_submit != &job->base.s_fence->scheduled) in amdgpu_job_free()
217 dma_fence_put(job->gang_submit); in amdgpu_job_free()
219 if (!job->hw_fence.ops) in amdgpu_job_free()
220 kfree(job); in amdgpu_job_free()
222 dma_fence_put(&job->hw_fence); in amdgpu_job_free()
225 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) in amdgpu_job_submit() argument
229 drm_sched_job_arm(&job->base); in amdgpu_job_submit()
230 f = dma_fence_get(&job->base.s_fence->finished); in amdgpu_job_submit()
231 amdgpu_job_free_resources(job); in amdgpu_job_submit()
232 drm_sched_entity_push_job(&job->base); in amdgpu_job_submit()
237 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, in amdgpu_job_submit_direct() argument
242 job->base.sched = &ring->sched; in amdgpu_job_submit_direct()
243 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence); in amdgpu_job_submit_direct()
248 amdgpu_job_free(job); in amdgpu_job_submit_direct()
249 return 0; in amdgpu_job_submit_direct()
256 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); in amdgpu_job_prepare_job() local
257 struct amdgpu_job *job = to_amdgpu_job(sched_job); in amdgpu_job_prepare_job() local
263 if (r && r != -ENODATA) in amdgpu_job_prepare_job()
266 if (!fence && job->gang_submit) in amdgpu_job_prepare_job()
267 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); in amdgpu_job_prepare_job()
269 while (!fence && job->vm && !job->vmid) { in amdgpu_job_prepare_job()
270 r = amdgpu_vmid_grab(job->vm, ring, job, &fence); in amdgpu_job_prepare_job()
280 dma_fence_set_error(&job->base.s_fence->finished, r); in amdgpu_job_prepare_job()
286 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); in amdgpu_job_run() local
287 struct amdgpu_device *adev = ring->adev; in amdgpu_job_run()
289 struct amdgpu_job *job; in amdgpu_job_run() local
290 int r = 0; in amdgpu_job_run()
292 job = to_amdgpu_job(sched_job); in amdgpu_job_run()
293 finished = &job->base.s_fence->finished; in amdgpu_job_run()
295 trace_amdgpu_sched_run_job(job); in amdgpu_job_run()
297 /* Skip job if VRAM is lost and never resubmit gangs */ in amdgpu_job_run()
298 if (job->generation != amdgpu_vm_generation(adev, job->vm) || in amdgpu_job_run()
299 (job->job_run_counter && job->gang_submit)) in amdgpu_job_run()
300 dma_fence_set_error(finished, -ECANCELED); in amdgpu_job_run()
302 if (finished->error < 0) { in amdgpu_job_run()
305 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, in amdgpu_job_run()
311 job->job_run_counter++; in amdgpu_job_run()
312 amdgpu_job_free_resources(job); in amdgpu_job_run()
328 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { in amdgpu_job_stop_all_jobs_on_sched()
329 struct drm_sched_rq *rq = sched->sched_rq[i]; in amdgpu_job_stop_all_jobs_on_sched()
330 spin_lock(&rq->lock); in amdgpu_job_stop_all_jobs_on_sched()
331 list_for_each_entry(s_entity, &rq->entities, list) { in amdgpu_job_stop_all_jobs_on_sched()
332 while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { in amdgpu_job_stop_all_jobs_on_sched()
333 struct drm_sched_fence *s_fence = s_job->s_fence; in amdgpu_job_stop_all_jobs_on_sched()
335 dma_fence_signal(&s_fence->scheduled); in amdgpu_job_stop_all_jobs_on_sched()
336 dma_fence_set_error(&s_fence->finished, -EHWPOISON); in amdgpu_job_stop_all_jobs_on_sched()
337 dma_fence_signal(&s_fence->finished); in amdgpu_job_stop_all_jobs_on_sched()
340 spin_unlock(&rq->lock); in amdgpu_job_stop_all_jobs_on_sched()
344 list_for_each_entry(s_job, &sched->pending_list, list) { in amdgpu_job_stop_all_jobs_on_sched()
345 struct drm_sched_fence *s_fence = s_job->s_fence; in amdgpu_job_stop_all_jobs_on_sched()
347 dma_fence_set_error(&s_fence->finished, -EHWPOISON); in amdgpu_job_stop_all_jobs_on_sched()
348 dma_fence_signal(&s_fence->finished); in amdgpu_job_stop_all_jobs_on_sched()