Lines Matching full:vm
46 * BO mapped in the VM.
48 * We do not allow a user to trigger a bind at exec time rather we have a VM
50 * sense, a VM bind is basically the same operation as an exec from the user
51 * perspective. e.g. If an exec depends on a VM bind use the in / out fence
56 * the VM that have been invalidated since the last exec, likewise we also have
58 * behind any pending kernel operations on any external BOs in VM or any BOs
59 * private to the VM. This is accomplished by the rebinds waiting on BOs
80 * Wait for any async VM bind passed as in-fences to start
82 * Lock global VM lock in read mode |
84 * Lock exec (VM dma-resv lock, external BOs dma-resv locks) |
89 * Add job VM dma-resv bookkeeping slot (non-compute mode) |
99 return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); in xe_exec_fn()
117 struct xe_vm *vm; in xe_exec_ioctl() local
151 vm = q->vm; in xe_exec_ioctl()
156 (xe_vm_in_lr_mode(vm) ? in xe_exec_ioctl()
180 if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) { in xe_exec_ioctl()
181 err = down_write_killable(&vm->lock); in xe_exec_ioctl()
184 /* We don't allow execs while the VM is in error state */ in xe_exec_ioctl()
185 err = down_read_interruptible(&vm->lock); in xe_exec_ioctl()
192 err = xe_vm_userptr_pin(vm); in xe_exec_ioctl()
193 downgrade_write(&vm->lock); in xe_exec_ioctl()
199 vm_exec.vm = &vm->gpuvm; in xe_exec_ioctl()
200 vm_exec.num_fences = 1 + vm->xe->info.tile_count; in xe_exec_ioctl()
202 if (xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
213 if (xe_vm_is_closed_or_banned(q->vm)) { in xe_exec_ioctl()
214 drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n"); in xe_exec_ioctl()
220 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
223 fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); in xe_exec_ioctl()
230 xe_exec_queue_last_fence_set(q, vm, fence); in xe_exec_ioctl()
251 * Rebind any invalidated userptr or evicted BOs in the VM, non-compute in xe_exec_ioctl()
252 * VM mode only. in xe_exec_ioctl()
254 rebind_fence = xe_vm_rebind(vm, false); in xe_exec_ioctl()
261 * We store the rebind_fence in the VM so subsequent execs don't get in xe_exec_ioctl()
265 dma_fence_put(vm->rebind_fence); in xe_exec_ioctl()
266 vm->rebind_fence = rebind_fence; in xe_exec_ioctl()
268 if (vm->rebind_fence) { in xe_exec_ioctl()
270 &vm->rebind_fence->flags)) { in xe_exec_ioctl()
271 dma_fence_put(vm->rebind_fence); in xe_exec_ioctl()
272 vm->rebind_fence = NULL; in xe_exec_ioctl()
274 dma_fence_get(vm->rebind_fence); in xe_exec_ioctl()
276 vm->rebind_fence); in xe_exec_ioctl()
283 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
285 xe_vm_resv(vm), in xe_exec_ioctl()
296 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
297 err = xe_sched_job_last_fence_add_dep(job, vm); in xe_exec_ioctl()
301 err = down_read_interruptible(&vm->userptr.notifier_lock); in xe_exec_ioctl()
305 err = __xe_vm_userptr_needs_repin(vm); in xe_exec_ioctl()
315 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
316 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, in xe_exec_ioctl()
325 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
326 xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); in xe_exec_ioctl()
328 xe_vm_reactivate_rebind(vm); in xe_exec_ioctl()
330 if (!err && !xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
332 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in xe_exec_ioctl()
337 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
338 up_read(&vm->userptr.notifier_lock); in xe_exec_ioctl()
346 up_write(&vm->lock); in xe_exec_ioctl()
348 up_read(&vm->lock); in xe_exec_ioctl()