Lines Matching full:job
65 static void job_free(struct xe_sched_job *job) in job_free() argument
67 struct xe_exec_queue *q = job->q; in job_free()
70 kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ? in job_free()
71 xe_sched_job_parallel_slab : xe_sched_job_slab, job); in job_free()
74 static struct xe_device *job_to_xe(struct xe_sched_job *job) in job_to_xe() argument
76 return gt_to_xe(job->q->gt); in job_to_xe()
82 struct xe_sched_job *job; in xe_sched_job_create() local
89 /* only a kernel context can submit a vm-less job */ in xe_sched_job_create()
99 job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration); in xe_sched_job_create()
100 if (!job) in xe_sched_job_create()
103 job->q = q; in xe_sched_job_create()
104 kref_init(&job->refcount); in xe_sched_job_create()
105 xe_exec_queue_get(job->q); in xe_sched_job_create()
107 err = drm_sched_job_init(&job->drm, q->entity, 1, NULL); in xe_sched_job_create()
112 job->fence = xe_lrc_create_seqno_fence(q->lrc); in xe_sched_job_create()
113 if (IS_ERR(job->fence)) { in xe_sched_job_create()
114 err = PTR_ERR(job->fence); in xe_sched_job_create()
146 xe_assert(job_to_xe(job), cf->base.seqno == fences[j]->seqno); in xe_sched_job_create()
148 job->fence = &cf->base; in xe_sched_job_create()
156 job->batch_addr[i] = batch_addr[i]; in xe_sched_job_create()
160 xe_device_mem_access_get(job_to_xe(job)); in xe_sched_job_create()
161 xe_device_assert_mem_access(job_to_xe(job)); in xe_sched_job_create()
163 trace_xe_sched_job_create(job); in xe_sched_job_create()
164 return job; in xe_sched_job_create()
173 drm_sched_job_cleanup(&job->drm); in xe_sched_job_create()
176 job_free(job); in xe_sched_job_create()
181 * xe_sched_job_destroy - Destroy XE schedule job
182 * @ref: reference to XE schedule job
184 * Called when ref == 0, drop a reference to job's xe_engine + fence, cleanup
185 * base DRM schedule job, and free memory for XE schedule job.
189 struct xe_sched_job *job = in xe_sched_job_destroy() local
192 if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL)) in xe_sched_job_destroy()
193 xe_device_mem_access_put(job_to_xe(job)); in xe_sched_job_destroy()
194 xe_exec_queue_put(job->q); in xe_sched_job_destroy()
195 dma_fence_put(job->fence); in xe_sched_job_destroy()
196 drm_sched_job_cleanup(&job->drm); in xe_sched_job_destroy()
197 job_free(job); in xe_sched_job_destroy()
200 void xe_sched_job_set_error(struct xe_sched_job *job, int error) in xe_sched_job_set_error() argument
202 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) in xe_sched_job_set_error()
205 dma_fence_set_error(job->fence, error); in xe_sched_job_set_error()
207 if (dma_fence_is_array(job->fence)) { in xe_sched_job_set_error()
209 to_dma_fence_array(job->fence); in xe_sched_job_set_error()
223 trace_xe_sched_job_set_error(job); in xe_sched_job_set_error()
225 dma_fence_enable_sw_signaling(job->fence); in xe_sched_job_set_error()
226 xe_hw_fence_irq_run(job->q->fence_irq); in xe_sched_job_set_error()
229 bool xe_sched_job_started(struct xe_sched_job *job) in xe_sched_job_started() argument
231 struct xe_lrc *lrc = job->q->lrc; in xe_sched_job_started()
233 return !__dma_fence_is_later(xe_sched_job_seqno(job), in xe_sched_job_started()
235 job->fence->ops); in xe_sched_job_started()
238 bool xe_sched_job_completed(struct xe_sched_job *job) in xe_sched_job_completed() argument
240 struct xe_lrc *lrc = job->q->lrc; in xe_sched_job_completed()
247 return !__dma_fence_is_later(xe_sched_job_seqno(job), xe_lrc_seqno(lrc), in xe_sched_job_completed()
248 job->fence->ops); in xe_sched_job_completed()
251 void xe_sched_job_arm(struct xe_sched_job *job) in xe_sched_job_arm() argument
253 drm_sched_job_arm(&job->drm); in xe_sched_job_arm()
256 void xe_sched_job_push(struct xe_sched_job *job) in xe_sched_job_push() argument
258 xe_sched_job_get(job); in xe_sched_job_push()
259 trace_xe_sched_job_exec(job); in xe_sched_job_push()
260 drm_sched_entity_push_job(&job->drm); in xe_sched_job_push()
261 xe_sched_job_put(job); in xe_sched_job_push()
265 * xe_sched_job_last_fence_add_dep - Add last fence dependency to job
266 * @job:job to add the last fence dependency to
267 * @vm: virtual memory job belongs to
272 int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) in xe_sched_job_last_fence_add_dep() argument
276 fence = xe_exec_queue_last_fence_get(job->q, vm); in xe_sched_job_last_fence_add_dep()
278 return drm_sched_job_add_dependency(&job->drm, fence); in xe_sched_job_last_fence_add_dep()