Lines Matching +full:0 +full:- +full:job +full:- +full:ring
1 // SPDX-License-Identifier: MIT
29 * - Passing in a list BO which are read / written to creating implicit syncs
30 * - Binding at exec time
31 * - Flow controlling the ring at exec time
34 * passed into an exec, using the dma-buf implicit sync uAPI, have binds as
35 * seperate operations, and using the DRM scheduler to flow control the ring.
43 * Implicit dependencies for external BOs are handled by using the dma-buf
45 * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
64 * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
67 * There is no need to flow control the ring in the exec as we write the ring at
68 * submission time and set the DRM scheduler max job limit SIZE_OF_RING /
70 * ring is available.
77 * .. code-block::
80 * Wait for any async VM bind passed as in-fences to start
81 * <----------------------------------------------------------------------|
84 * Lock exec (VM dma-resv lock, external BOs dma-resv locks) |
86 * Create job |
87 * Rebind invalidated userptrs + evicted BOs (non-compute-mode) |
88 * Add rebind fence dependency to job |
89 * Add job VM dma-resv bookkeeping slot (non-compute mode) |
90 * Add job to external BOs dma-resv write slots (non-compute mode) |
91 * Check if any userptrs invalidated since pin ------ Drop locks ---------|
92 * Install in / out fences for job
93 * Submit job
99 return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); in xe_exec_fn()
107 struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs); in xe_exec_ioctl()
108 u64 __user *addresses_user = u64_to_user_ptr(args->address); in xe_exec_ioctl()
114 u32 i, num_syncs = 0, num_ufence = 0; in xe_exec_ioctl()
115 struct xe_sched_job *job; in xe_exec_ioctl() local
119 ktime_t end = 0; in xe_exec_ioctl()
120 int err = 0; in xe_exec_ioctl()
122 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_exec_ioctl()
123 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_exec_ioctl()
124 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_ioctl()
125 return -EINVAL; in xe_exec_ioctl()
127 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_ioctl()
129 return -ENOENT; in xe_exec_ioctl()
131 if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) in xe_exec_ioctl()
132 return -EINVAL; in xe_exec_ioctl()
134 if (XE_IOCTL_DBG(xe, args->num_batch_buffer && in xe_exec_ioctl()
135 q->width != args->num_batch_buffer)) in xe_exec_ioctl()
136 return -EINVAL; in xe_exec_ioctl()
138 if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) { in xe_exec_ioctl()
139 err = -ECANCELED; in xe_exec_ioctl()
143 if (args->num_syncs) { in xe_exec_ioctl()
144 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); in xe_exec_ioctl()
146 err = -ENOMEM; in xe_exec_ioctl()
151 vm = q->vm; in xe_exec_ioctl()
153 for (i = 0; i < args->num_syncs; i++) { in xe_exec_ioctl()
157 SYNC_PARSE_FLAG_LR_MODE : 0)); in xe_exec_ioctl()
166 err = -EINVAL; in xe_exec_ioctl()
172 q->width); in xe_exec_ioctl()
174 err = -EFAULT; in xe_exec_ioctl()
181 err = down_write_killable(&vm->lock); in xe_exec_ioctl()
185 err = down_read_interruptible(&vm->lock); in xe_exec_ioctl()
193 downgrade_write(&vm->lock); in xe_exec_ioctl()
199 vm_exec.vm = &vm->gpuvm; in xe_exec_ioctl()
200 vm_exec.num_fences = 1 + vm->xe->info.tile_count; in xe_exec_ioctl()
203 drm_exec_init(exec, vm_exec.flags, 0); in xe_exec_ioctl()
208 err = -EAGAIN; in xe_exec_ioctl()
213 if (xe_vm_is_closed_or_banned(q->vm)) { in xe_exec_ioctl()
214 drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n"); in xe_exec_ioctl()
215 err = -ECANCELED; in xe_exec_ioctl()
219 if (!args->num_batch_buffer) { in xe_exec_ioctl()
228 for (i = 0; i < num_syncs; i++) in xe_exec_ioctl()
238 err = -EWOULDBLOCK; /* Aliased to -EAGAIN */ in xe_exec_ioctl()
243 job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ? in xe_exec_ioctl()
244 addresses : &args->address); in xe_exec_ioctl()
245 if (IS_ERR(job)) { in xe_exec_ioctl()
246 err = PTR_ERR(job); in xe_exec_ioctl()
251 * Rebind any invalidated userptr or evicted BOs in the VM, non-compute in xe_exec_ioctl()
265 dma_fence_put(vm->rebind_fence); in xe_exec_ioctl()
266 vm->rebind_fence = rebind_fence; in xe_exec_ioctl()
268 if (vm->rebind_fence) { in xe_exec_ioctl()
270 &vm->rebind_fence->flags)) { in xe_exec_ioctl()
271 dma_fence_put(vm->rebind_fence); in xe_exec_ioctl()
272 vm->rebind_fence = NULL; in xe_exec_ioctl()
274 dma_fence_get(vm->rebind_fence); in xe_exec_ioctl()
275 err = drm_sched_job_add_dependency(&job->drm, in xe_exec_ioctl()
276 vm->rebind_fence); in xe_exec_ioctl()
284 err = drm_sched_job_add_resv_dependencies(&job->drm, in xe_exec_ioctl()
291 for (i = 0; i < num_syncs && !err; i++) in xe_exec_ioctl()
292 err = xe_sync_entry_add_deps(&syncs[i], job); in xe_exec_ioctl()
297 err = xe_sched_job_last_fence_add_dep(job, vm); in xe_exec_ioctl()
301 err = down_read_interruptible(&vm->userptr.notifier_lock); in xe_exec_ioctl()
312 * the job and let the DRM scheduler / backend clean up the job. in xe_exec_ioctl()
314 xe_sched_job_arm(job); in xe_exec_ioctl()
316 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, in xe_exec_ioctl()
319 for (i = 0; i < num_syncs; i++) in xe_exec_ioctl()
320 xe_sync_entry_signal(&syncs[i], job, in xe_exec_ioctl()
321 &job->drm.s_fence->finished); in xe_exec_ioctl()
324 q->ring_ops->emit_job(job); in xe_exec_ioctl()
326 xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); in xe_exec_ioctl()
327 xe_sched_job_push(job); in xe_exec_ioctl()
331 spin_lock(&xe->ttm.lru_lock); in xe_exec_ioctl()
332 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in xe_exec_ioctl()
333 spin_unlock(&xe->ttm.lru_lock); in xe_exec_ioctl()
338 up_read(&vm->userptr.notifier_lock); in xe_exec_ioctl()
341 xe_sched_job_put(job); in xe_exec_ioctl()
346 up_write(&vm->lock); in xe_exec_ioctl()
348 up_read(&vm->lock); in xe_exec_ioctl()
349 if (err == -EAGAIN && !skip_retry) in xe_exec_ioctl()
352 for (i = 0; i < num_syncs; i++) in xe_exec_ioctl()