Lines Matching full:vm
45 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) in xe_vm_obj() argument
47 return vm->gpuvm.r_obj; in xe_vm_obj()
56 * without the vm->userptr.notifier_lock held. There is no guarantee that the
72 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_userptr_pin_pages() local
73 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages()
75 lockdep_assert_held(&vm->lock); in xe_vma_userptr_pin_pages()
81 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting() argument
85 lockdep_assert_held(&vm->lock); in preempt_fences_waiting()
86 xe_vm_assert_held(vm); in preempt_fences_waiting()
88 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in preempt_fences_waiting()
107 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, in alloc_preempt_fences() argument
110 lockdep_assert_held(&vm->lock); in alloc_preempt_fences()
111 xe_vm_assert_held(vm); in alloc_preempt_fences()
113 if (*count >= vm->preempt.num_exec_queues) in alloc_preempt_fences()
116 for (; *count < vm->preempt.num_exec_queues; ++(*count)) { in alloc_preempt_fences()
128 static int wait_for_existing_preempt_fences(struct xe_vm *vm) in wait_for_existing_preempt_fences() argument
132 xe_vm_assert_held(vm); in wait_for_existing_preempt_fences()
134 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in wait_for_existing_preempt_fences()
138 /* Only -ETIME on fence indicates VM needs to be killed */ in wait_for_existing_preempt_fences()
150 static bool xe_vm_is_idle(struct xe_vm *vm) in xe_vm_is_idle() argument
154 xe_vm_assert_held(vm); in xe_vm_is_idle()
155 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in xe_vm_is_idle()
163 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) in arm_preempt_fences() argument
168 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in arm_preempt_fences()
172 xe_assert(vm->xe, link != list); in arm_preempt_fences()
182 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) in add_preempt_fences() argument
189 if (!vm->preempt.num_exec_queues) in add_preempt_fences()
192 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues); in add_preempt_fences()
196 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in add_preempt_fences()
206 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, in resume_and_reinstall_preempt_fences() argument
211 lockdep_assert_held(&vm->lock); in resume_and_reinstall_preempt_fences()
212 xe_vm_assert_held(vm); in resume_and_reinstall_preempt_fences()
214 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in resume_and_reinstall_preempt_fences()
217 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence, in resume_and_reinstall_preempt_fences()
222 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_add_compute_exec_queue() argument
225 .vm = &vm->gpuvm, in xe_vm_add_compute_exec_queue()
234 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_add_compute_exec_queue()
236 down_write(&vm->lock); in xe_vm_add_compute_exec_queue()
248 list_add(&q->lr.link, &vm->preempt.exec_queues); in xe_vm_add_compute_exec_queue()
249 ++vm->preempt.num_exec_queues; in xe_vm_add_compute_exec_queue()
252 down_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
254 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence, in xe_vm_add_compute_exec_queue()
258 * Check to see if a preemption on VM is in flight or userptr in xe_vm_add_compute_exec_queue()
260 * other preempt fences on the VM. in xe_vm_add_compute_exec_queue()
262 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm); in xe_vm_add_compute_exec_queue()
266 up_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
271 up_write(&vm->lock); in xe_vm_add_compute_exec_queue()
278 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
279 * @vm: The VM.
284 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_remove_compute_exec_queue() argument
286 if (!xe_vm_in_preempt_fence_mode(vm)) in xe_vm_remove_compute_exec_queue()
289 down_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
292 --vm->preempt.num_exec_queues; in xe_vm_remove_compute_exec_queue()
299 up_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
303 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
305 * @vm: The VM.
307 * This function checks for whether the VM has userptrs that need repinning,
313 int __xe_vm_userptr_needs_repin(struct xe_vm *vm) in __xe_vm_userptr_needs_repin() argument
315 lockdep_assert_held_read(&vm->userptr.notifier_lock); in __xe_vm_userptr_needs_repin()
317 return (list_empty(&vm->userptr.repin_list) && in __xe_vm_userptr_needs_repin()
318 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in __xe_vm_userptr_needs_repin()
324 * xe_vm_kill() - VM Kill
325 * @vm: The VM.
326 * @unlocked: Flag indicates the VM's dma-resv is not held
328 * Kill the VM by setting banned flag indicated VM is no longer available for
329 * use. If in preempt fence mode, also kill all exec queue attached to the VM.
331 void xe_vm_kill(struct xe_vm *vm, bool unlocked) in xe_vm_kill() argument
335 lockdep_assert_held(&vm->lock); in xe_vm_kill()
338 xe_vm_lock(vm, false); in xe_vm_kill()
340 vm->flags |= XE_VM_FLAG_BANNED; in xe_vm_kill()
341 trace_xe_vm_kill(vm); in xe_vm_kill()
343 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in xe_vm_kill()
347 xe_vm_unlock(vm); in xe_vm_kill()
349 /* TODO: Inform user the VM is banned */ in xe_vm_kill()
387 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_gpuvm_validate() local
391 lockdep_assert_held(&vm->lock); in xe_gpuvm_validate()
394 &vm->rebind_list); in xe_gpuvm_validate()
396 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false); in xe_gpuvm_validate()
406 * @vm: The vm for which we are rebinding.
419 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, in xe_vm_validate_rebind() argument
427 ret = drm_gpuvm_validate(&vm->gpuvm, exec); in xe_vm_validate_rebind()
431 ret = xe_vm_rebind(vm, false); in xe_vm_validate_rebind()
434 } while (!list_empty(&vm->gpuvm.evict.list)); in xe_vm_validate_rebind()
445 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, in xe_preempt_work_begin() argument
450 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
454 if (xe_vm_is_idle(vm)) { in xe_preempt_work_begin()
455 vm->preempt.rebind_deactivated = true; in xe_preempt_work_begin()
460 if (!preempt_fences_waiting(vm)) { in xe_preempt_work_begin()
465 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
469 err = wait_for_existing_preempt_fences(vm); in xe_preempt_work_begin()
479 return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues); in xe_preempt_work_begin()
484 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); in preempt_rebind_work_func() local
493 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in preempt_rebind_work_func()
494 trace_xe_vm_rebind_worker_enter(vm); in preempt_rebind_work_func()
496 down_write(&vm->lock); in preempt_rebind_work_func()
498 if (xe_vm_is_closed_or_banned(vm)) { in preempt_rebind_work_func()
499 up_write(&vm->lock); in preempt_rebind_work_func()
500 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
505 if (xe_vm_userptr_check_repin(vm)) { in preempt_rebind_work_func()
506 err = xe_vm_userptr_pin(vm); in preempt_rebind_work_func()
516 err = xe_preempt_work_begin(&exec, vm, &done); in preempt_rebind_work_func()
527 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count); in preempt_rebind_work_func()
531 err = xe_vm_rebind(vm, true); in preempt_rebind_work_func()
535 /* Wait on rebinds and munmap style VM unbinds */ in preempt_rebind_work_func()
536 wait = dma_resv_wait_timeout(xe_vm_resv(vm), in preempt_rebind_work_func()
549 down_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
550 if (retry_required(tries, vm)) { in preempt_rebind_work_func()
551 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
558 spin_lock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
559 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in preempt_rebind_work_func()
560 spin_unlock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
563 arm_preempt_fences(vm, &preempt_fences); in preempt_rebind_work_func()
564 resume_and_reinstall_preempt_fences(vm, &exec); in preempt_rebind_work_func()
565 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
571 trace_xe_vm_rebind_worker_retry(vm); in preempt_rebind_work_func()
576 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err); in preempt_rebind_work_func()
577 xe_vm_kill(vm, true); in preempt_rebind_work_func()
579 up_write(&vm->lock); in preempt_rebind_work_func()
583 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
586 static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma) in __vma_userptr_invalidate() argument
598 if (!xe_vm_in_fault_mode(vm) && in __vma_userptr_invalidate()
600 spin_lock(&vm->userptr.invalidated_lock); in __vma_userptr_invalidate()
602 &vm->userptr.invalidated); in __vma_userptr_invalidate()
603 spin_unlock(&vm->userptr.invalidated_lock); in __vma_userptr_invalidate()
610 * to the vm. in __vma_userptr_invalidate()
612 dma_resv_iter_begin(&cursor, xe_vm_resv(vm), in __vma_userptr_invalidate()
618 err = dma_resv_wait_timeout(xe_vm_resv(vm), in __vma_userptr_invalidate()
623 if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) { in __vma_userptr_invalidate()
637 struct xe_vm *vm = xe_vma_vm(vma); in vma_userptr_invalidate() local
639 xe_assert(vm->xe, xe_vma_is_userptr(vma)); in vma_userptr_invalidate()
649 down_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
652 __vma_userptr_invalidate(vm, uvma); in vma_userptr_invalidate()
653 up_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
672 struct xe_vm *vm = xe_vma_vm(&uvma->vma); in xe_vma_userptr_force_invalidate() local
675 lockdep_assert_held(&vm->lock); in xe_vma_userptr_force_invalidate()
677 lockdep_assert_held(&vm->userptr.notifier_lock); in xe_vma_userptr_force_invalidate()
682 xe_vm_assert_held(vm); in xe_vma_userptr_force_invalidate()
687 __vma_userptr_invalidate(vm, uvma); in xe_vma_userptr_force_invalidate()
691 int xe_vm_userptr_pin(struct xe_vm *vm) in xe_vm_userptr_pin() argument
696 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm)); in xe_vm_userptr_pin()
697 lockdep_assert_held_write(&vm->lock); in xe_vm_userptr_pin()
700 spin_lock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
701 xe_assert(vm->xe, list_empty(&vm->userptr.repin_list)); in xe_vm_userptr_pin()
702 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, in xe_vm_userptr_pin()
706 &vm->userptr.repin_list); in xe_vm_userptr_pin()
708 spin_unlock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
711 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, in xe_vm_userptr_pin()
730 xe_vm_lock(vm, false); in xe_vm_userptr_pin()
731 dma_resv_wait_timeout(xe_vm_resv(vm), in xe_vm_userptr_pin()
736 xe_vm_unlock(vm); in xe_vm_userptr_pin()
745 &vm->rebind_list); in xe_vm_userptr_pin()
750 down_write(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
751 spin_lock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
752 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, in xe_vm_userptr_pin()
756 &vm->userptr.invalidated); in xe_vm_userptr_pin()
758 spin_unlock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
759 up_write(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
765 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
767 * @vm: The VM.
769 * This function does an advisory check for whether the VM has userptrs that
775 int xe_vm_userptr_check_repin(struct xe_vm *vm) in xe_vm_userptr_check_repin() argument
777 return (list_empty_careful(&vm->userptr.repin_list) && in xe_vm_userptr_check_repin()
778 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in xe_vm_userptr_check_repin()
850 static struct dma_fence *ops_execute(struct xe_vm *vm,
852 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
856 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) in xe_vm_rebind() argument
864 lockdep_assert_held(&vm->lock); in xe_vm_rebind()
865 if ((xe_vm_in_lr_mode(vm) && !rebind_worker) || in xe_vm_rebind()
866 list_empty(&vm->rebind_list)) in xe_vm_rebind()
869 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_rebind()
873 xe_vm_assert_held(vm); in xe_vm_rebind()
874 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) { in xe_vm_rebind()
875 xe_assert(vm->xe, vma->tile_present); in xe_vm_rebind()
892 fence = ops_execute(vm, &vops); in xe_vm_rebind()
897 list_for_each_entry_safe(vma, next, &vm->rebind_list, in xe_vm_rebind()
911 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask) in xe_vma_rebind() argument
920 lockdep_assert_held(&vm->lock); in xe_vma_rebind()
921 xe_vm_assert_held(vm); in xe_vma_rebind()
922 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vma_rebind()
924 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vma_rebind()
925 for_each_tile(tile, vm->xe, id) { in xe_vma_rebind()
941 fence = ops_execute(vm, &vops); in xe_vma_rebind()
986 * xe_vm_range_rebind() - VM range (re)bind
987 * @vm: The VM which the range belongs to.
997 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm, in xe_vm_range_rebind() argument
1009 lockdep_assert_held(&vm->lock); in xe_vm_range_rebind()
1010 xe_vm_assert_held(vm); in xe_vm_range_rebind()
1011 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vm_range_rebind()
1012 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); in xe_vm_range_rebind()
1014 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_rebind()
1015 for_each_tile(tile, vm->xe, id) { in xe_vm_range_rebind()
1031 fence = ops_execute(vm, &vops); in xe_vm_range_rebind()
1071 * xe_vm_range_unbind() - VM range unbind
1072 * @vm: The VM which the range belongs to.
1080 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm, in xe_vm_range_unbind() argument
1090 lockdep_assert_held(&vm->lock); in xe_vm_range_unbind()
1091 xe_vm_assert_held(vm); in xe_vm_range_unbind()
1092 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vm_range_unbind()
1097 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_unbind()
1098 for_each_tile(tile, vm->xe, id) { in xe_vm_range_unbind()
1114 fence = ops_execute(vm, &vops); in xe_vm_range_unbind()
1139 static struct xe_vma *xe_vma_create(struct xe_vm *vm, in xe_vma_create() argument
1154 xe_assert(vm->xe, start < end); in xe_vma_create()
1155 xe_assert(vm->xe, end < vm->size); in xe_vma_create()
1184 vma->gpuva.vm = &vm->gpuvm; in xe_vma_create()
1192 for_each_tile(tile, vm->xe, id) in xe_vma_create()
1195 if (vm->xe->info.has_atomic_enable_pte_bit) in xe_vma_create()
1205 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); in xe_vma_create()
1239 xe_vm_get(vm); in xe_vma_create()
1247 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy_late() local
1268 xe_vm_put(vm); in xe_vma_destroy_late()
1270 xe_vm_put(vm); in xe_vma_destroy_late()
1297 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy() local
1299 lockdep_assert_held_write(&vm->lock); in xe_vma_destroy()
1300 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy)); in xe_vma_destroy()
1303 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); in xe_vma_destroy()
1305 spin_lock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
1306 xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link)); in xe_vma_destroy()
1308 spin_unlock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
1315 xe_vm_assert_held(vm); in xe_vma_destroy()
1332 * @vma: The vma for witch we want to lock the vm resv and any attached
1341 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_lock_vma() local
1345 XE_WARN_ON(!vm); in xe_vm_lock_vma()
1347 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in xe_vm_lock_vma()
1348 if (!err && bo && !bo->vm) in xe_vm_lock_vma()
1373 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) in xe_vm_find_overlapping_vma() argument
1377 lockdep_assert_held(&vm->lock); in xe_vm_find_overlapping_vma()
1379 if (xe_vm_is_closed_or_banned(vm)) in xe_vm_find_overlapping_vma()
1382 xe_assert(vm->xe, start + range <= vm->size); in xe_vm_find_overlapping_vma()
1384 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); in xe_vm_find_overlapping_vma()
1389 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_insert_vma() argument
1393 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_insert_vma()
1394 lockdep_assert_held(&vm->lock); in xe_vm_insert_vma()
1396 mutex_lock(&vm->snap_mutex); in xe_vm_insert_vma()
1397 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); in xe_vm_insert_vma()
1398 mutex_unlock(&vm->snap_mutex); in xe_vm_insert_vma()
1404 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_remove_vma() argument
1406 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_remove_vma()
1407 lockdep_assert_held(&vm->lock); in xe_vm_remove_vma()
1409 mutex_lock(&vm->snap_mutex); in xe_vm_remove_vma()
1411 mutex_unlock(&vm->snap_mutex); in xe_vm_remove_vma()
1412 if (vm->usm.last_fault_vma == vma) in xe_vm_remove_vma()
1413 vm->usm.last_fault_vma = NULL; in xe_vm_remove_vma()
1565 * given tile and vm.
1568 * @vm: vm to set up for.
1578 struct xe_vm *vm) in xe_vm_create_scratch() argument
1583 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { in xe_vm_create_scratch()
1584 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i); in xe_vm_create_scratch()
1585 if (IS_ERR(vm->scratch_pt[id][i])) in xe_vm_create_scratch()
1586 return PTR_ERR(vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1588 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1595 static void xe_vm_free_scratch(struct xe_vm *vm) in xe_vm_free_scratch() argument
1600 if (!xe_vm_has_scratch(vm)) in xe_vm_free_scratch()
1603 for_each_tile(tile, vm->xe, id) { in xe_vm_free_scratch()
1606 if (!vm->pt_root[id]) in xe_vm_free_scratch()
1609 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i) in xe_vm_free_scratch()
1610 if (vm->scratch_pt[id][i]) in xe_vm_free_scratch()
1611 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL); in xe_vm_free_scratch()
1618 struct xe_vm *vm; in xe_vm_create() local
1624 * Since the GSCCS is not user-accessible, we don't expect a GSC VM to in xe_vm_create()
1629 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in xe_vm_create()
1630 if (!vm) in xe_vm_create()
1633 vm->xe = xe; in xe_vm_create()
1635 vm->size = 1ull << xe->info.va_bits; in xe_vm_create()
1637 vm->flags = flags; in xe_vm_create()
1642 * under a user-VM lock when the PXP session is started at exec_queue in xe_vm_create()
1650 __init_rwsem(&vm->lock, "gsc_vm", &gsc_vm_key); in xe_vm_create()
1652 init_rwsem(&vm->lock); in xe_vm_create()
1654 mutex_init(&vm->snap_mutex); in xe_vm_create()
1656 INIT_LIST_HEAD(&vm->rebind_list); in xe_vm_create()
1658 INIT_LIST_HEAD(&vm->userptr.repin_list); in xe_vm_create()
1659 INIT_LIST_HEAD(&vm->userptr.invalidated); in xe_vm_create()
1660 init_rwsem(&vm->userptr.notifier_lock); in xe_vm_create()
1661 spin_lock_init(&vm->userptr.invalidated_lock); in xe_vm_create()
1663 ttm_lru_bulk_move_init(&vm->lru_bulk_move); in xe_vm_create()
1665 INIT_WORK(&vm->destroy_work, vm_destroy_work_func); in xe_vm_create()
1667 INIT_LIST_HEAD(&vm->preempt.exec_queues); in xe_vm_create()
1668 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ in xe_vm_create()
1671 xe_range_fence_tree_init(&vm->rftree[id]); in xe_vm_create()
1673 vm->pt_ops = &xelp_pt_ops; in xe_vm_create()
1678 * scheduler drops all the references of it, hence protecting the VM in xe_vm_create()
1690 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm, in xe_vm_create()
1691 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops); in xe_vm_create()
1695 err = xe_vm_lock(vm, true); in xe_vm_create()
1700 vm->flags |= XE_VM_FLAG_64K; in xe_vm_create()
1707 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level); in xe_vm_create()
1708 if (IS_ERR(vm->pt_root[id])) { in xe_vm_create()
1709 err = PTR_ERR(vm->pt_root[id]); in xe_vm_create()
1710 vm->pt_root[id] = NULL; in xe_vm_create()
1715 if (xe_vm_has_scratch(vm)) { in xe_vm_create()
1717 if (!vm->pt_root[id]) in xe_vm_create()
1720 err = xe_vm_create_scratch(xe, tile, vm); in xe_vm_create()
1724 vm->batch_invalidate_tlb = true; in xe_vm_create()
1727 if (vm->flags & XE_VM_FLAG_LR_MODE) { in xe_vm_create()
1728 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); in xe_vm_create()
1729 vm->batch_invalidate_tlb = false; in xe_vm_create()
1734 if (!vm->pt_root[id]) in xe_vm_create()
1737 xe_pt_populate_empty(tile, vm, vm->pt_root[id]); in xe_vm_create()
1739 xe_vm_unlock(vm); in xe_vm_create()
1741 /* Kernel migration VM shouldn't have a circular loop.. */ in xe_vm_create()
1747 if (!vm->pt_root[id]) in xe_vm_create()
1755 vm->q[id] = q; in xe_vm_create()
1761 err = xe_svm_init(vm); in xe_vm_create()
1767 vm->composite_fence_ctx = dma_fence_context_alloc(1); in xe_vm_create()
1769 trace_xe_vm_create(vm); in xe_vm_create()
1771 return vm; in xe_vm_create()
1774 xe_vm_unlock(vm); in xe_vm_create()
1776 xe_vm_close_and_put(vm); in xe_vm_create()
1780 mutex_destroy(&vm->snap_mutex); in xe_vm_create()
1782 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_create()
1783 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in xe_vm_create()
1784 kfree(vm); in xe_vm_create()
1790 static void xe_vm_close(struct xe_vm *vm) in xe_vm_close() argument
1792 struct xe_device *xe = vm->xe; in xe_vm_close()
1798 down_write(&vm->lock); in xe_vm_close()
1799 if (xe_vm_in_fault_mode(vm)) in xe_vm_close()
1800 xe_svm_notifier_lock(vm); in xe_vm_close()
1802 vm->size = 0; in xe_vm_close()
1804 if (!((vm->flags & XE_VM_FLAG_MIGRATION))) { in xe_vm_close()
1810 dma_resv_wait_timeout(xe_vm_resv(vm), in xe_vm_close()
1816 if (vm->pt_root[id]) in xe_vm_close()
1817 xe_pt_clear(xe, vm->pt_root[id]); in xe_vm_close()
1820 xe_gt_tlb_invalidation_vm(gt, vm); in xe_vm_close()
1824 if (xe_vm_in_fault_mode(vm)) in xe_vm_close()
1825 xe_svm_notifier_unlock(vm); in xe_vm_close()
1826 up_write(&vm->lock); in xe_vm_close()
1832 void xe_vm_close_and_put(struct xe_vm *vm) in xe_vm_close_and_put() argument
1835 struct xe_device *xe = vm->xe; in xe_vm_close_and_put()
1841 xe_assert(xe, !vm->preempt.num_exec_queues); in xe_vm_close_and_put()
1843 xe_vm_close(vm); in xe_vm_close_and_put()
1844 if (xe_vm_in_preempt_fence_mode(vm)) in xe_vm_close_and_put()
1845 flush_work(&vm->preempt.rebind_work); in xe_vm_close_and_put()
1846 if (xe_vm_in_fault_mode(vm)) in xe_vm_close_and_put()
1847 xe_svm_close(vm); in xe_vm_close_and_put()
1849 down_write(&vm->lock); in xe_vm_close_and_put()
1851 if (vm->q[id]) in xe_vm_close_and_put()
1852 xe_exec_queue_last_fence_put(vm->q[id], vm); in xe_vm_close_and_put()
1854 up_write(&vm->lock); in xe_vm_close_and_put()
1857 if (vm->q[id]) { in xe_vm_close_and_put()
1858 xe_exec_queue_kill(vm->q[id]); in xe_vm_close_and_put()
1859 xe_exec_queue_put(vm->q[id]); in xe_vm_close_and_put()
1860 vm->q[id] = NULL; in xe_vm_close_and_put()
1864 down_write(&vm->lock); in xe_vm_close_and_put()
1865 xe_vm_lock(vm, false); in xe_vm_close_and_put()
1866 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { in xe_vm_close_and_put()
1870 down_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1872 up_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1875 xe_vm_remove_vma(vm, vma); in xe_vm_close_and_put()
1878 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) { in xe_vm_close_and_put()
1889 * All vm operations will add shared fences to resv. in xe_vm_close_and_put()
1895 xe_vm_free_scratch(vm); in xe_vm_close_and_put()
1898 if (vm->pt_root[id]) { in xe_vm_close_and_put()
1899 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); in xe_vm_close_and_put()
1900 vm->pt_root[id] = NULL; in xe_vm_close_and_put()
1903 xe_vm_unlock(vm); in xe_vm_close_and_put()
1906 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL in xe_vm_close_and_put()
1916 if (xe_vm_in_fault_mode(vm)) in xe_vm_close_and_put()
1917 xe_svm_fini(vm); in xe_vm_close_and_put()
1919 up_write(&vm->lock); in xe_vm_close_and_put()
1922 if (vm->usm.asid) { in xe_vm_close_and_put()
1926 xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION)); in xe_vm_close_and_put()
1928 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); in xe_vm_close_and_put()
1929 xe_assert(xe, lookup == vm); in xe_vm_close_and_put()
1934 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_close_and_put()
1936 xe_vm_put(vm); in xe_vm_close_and_put()
1941 struct xe_vm *vm = in vm_destroy_work_func() local
1943 struct xe_device *xe = vm->xe; in vm_destroy_work_func()
1948 xe_assert(xe, !vm->size); in vm_destroy_work_func()
1950 if (xe_vm_in_preempt_fence_mode(vm)) in vm_destroy_work_func()
1951 flush_work(&vm->preempt.rebind_work); in vm_destroy_work_func()
1953 mutex_destroy(&vm->snap_mutex); in vm_destroy_work_func()
1955 if (vm->flags & XE_VM_FLAG_LR_MODE) in vm_destroy_work_func()
1959 XE_WARN_ON(vm->pt_root[id]); in vm_destroy_work_func()
1961 trace_xe_vm_free(vm); in vm_destroy_work_func()
1963 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in vm_destroy_work_func()
1965 if (vm->xef) in vm_destroy_work_func()
1966 xe_file_put(vm->xef); in vm_destroy_work_func()
1968 kfree(vm); in vm_destroy_work_func()
1973 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); in xe_vm_free() local
1975 /* To destroy the VM we need to be able to sleep */ in xe_vm_free()
1976 queue_work(system_unbound_wq, &vm->destroy_work); in xe_vm_free()
1981 struct xe_vm *vm; in xe_vm_lookup() local
1983 mutex_lock(&xef->vm.lock); in xe_vm_lookup()
1984 vm = xa_load(&xef->vm.xa, id); in xe_vm_lookup()
1985 if (vm) in xe_vm_lookup()
1986 xe_vm_get(vm); in xe_vm_lookup()
1987 mutex_unlock(&xef->vm.lock); in xe_vm_lookup()
1989 return vm; in xe_vm_lookup()
1992 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) in xe_vm_pdp4_descriptor() argument
1994 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0, in xe_vm_pdp4_descriptor()
1999 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in to_wait_exec_queue() argument
2001 return q ? q : vm->q[0]; in to_wait_exec_queue()
2030 struct xe_vm *vm; in xe_vm_create_ioctl() local
2066 vm = xe_vm_create(xe, flags); in xe_vm_create_ioctl()
2067 if (IS_ERR(vm)) in xe_vm_create_ioctl()
2068 return PTR_ERR(vm); in xe_vm_create_ioctl()
2072 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, in xe_vm_create_ioctl()
2079 vm->usm.asid = asid; in xe_vm_create_ioctl()
2082 vm->xef = xe_file_get(xef); in xe_vm_create_ioctl()
2084 /* Record BO memory for VM pagetable created against client */ in xe_vm_create_ioctl()
2086 if (vm->pt_root[id]) in xe_vm_create_ioctl()
2087 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); in xe_vm_create_ioctl()
2091 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); in xe_vm_create_ioctl()
2095 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); in xe_vm_create_ioctl()
2104 xe_vm_close_and_put(vm); in xe_vm_create_ioctl()
2115 struct xe_vm *vm; in xe_vm_destroy_ioctl() local
2122 mutex_lock(&xef->vm.lock); in xe_vm_destroy_ioctl()
2123 vm = xa_load(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
2124 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_destroy_ioctl()
2126 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues)) in xe_vm_destroy_ioctl()
2129 xa_erase(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
2130 mutex_unlock(&xef->vm.lock); in xe_vm_destroy_ioctl()
2133 xe_vm_close_and_put(vm); in xe_vm_destroy_ioctl()
2144 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, in prep_vma_destroy() argument
2147 down_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
2149 up_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
2151 xe_vm_remove_vma(vm, vma); in prep_vma_destroy()
2209 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, in vm_bind_ioctl_ops_create() argument
2220 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_create()
2222 vm_dbg(&vm->xe->drm, in vm_bind_ioctl_ops_create()
2230 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range, in vm_bind_ioctl_ops_create()
2234 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
2237 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
2240 xe_assert(vm->xe, bo); in vm_bind_ioctl_ops_create()
2246 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj); in vm_bind_ioctl_ops_create()
2257 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_create()
2280 print_op(vm->xe, __op); in vm_bind_ioctl_ops_create()
2287 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, in new_vma() argument
2295 lockdep_assert_held_write(&vm->lock); in new_vma()
2301 if (!bo->vm) { in new_vma()
2302 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm)); in new_vma()
2315 vma = xe_vma_create(vm, bo, op->gem.offset, in new_vma()
2323 else if (!xe_vma_has_no_bo(vma) && !bo->vm) in new_vma()
2324 err = add_preempt_fences(vm, bo); in new_vma()
2331 prep_vma_destroy(vm, vma, false); in new_vma()
2371 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) in xe_vma_op_commit() argument
2375 lockdep_assert_held_write(&vm->lock); in xe_vma_op_commit()
2379 err |= xe_vm_insert_vma(vm, op->map.vma); in xe_vma_op_commit()
2388 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), in xe_vma_op_commit()
2393 err |= xe_vm_insert_vma(vm, op->remap.prev); in xe_vma_op_commit()
2403 err |= xe_vm_insert_vma(vm, op->remap.next); in xe_vma_op_commit()
2413 /* Adjust for partial unbind after removing VMA from VM */ in xe_vma_op_commit()
2421 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); in xe_vma_op_commit()
2428 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_commit()
2434 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, in vm_bind_ioctl_ops_parse() argument
2437 struct xe_device *xe = vm->xe; in vm_bind_ioctl_ops_parse()
2443 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_parse()
2445 for_each_tile(tile, vm->xe, id) in vm_bind_ioctl_ops_parse()
2469 vma = new_vma(vm, &op->base.map, op->map.pat_index, in vm_bind_ioctl_ops_parse()
2475 if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) && in vm_bind_ioctl_ops_parse()
2495 xe_svm_has_mapping(vm, start, end)) in vm_bind_ioctl_ops_parse()
2514 vma = new_vma(vm, op->base.remap.prev, in vm_bind_ioctl_ops_parse()
2544 vma = new_vma(vm, op->base.remap.next, in vm_bind_ioctl_ops_parse()
2579 xe_svm_has_mapping(vm, xe_vma_start(vma), in vm_bind_ioctl_ops_parse()
2599 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_parse()
2602 err = xe_vma_op_commit(vm, op); in vm_bind_ioctl_ops_parse()
2610 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, in xe_vma_op_unwind() argument
2614 lockdep_assert_held_write(&vm->lock); in xe_vma_op_unwind()
2619 prep_vma_destroy(vm, op->map.vma, post_commit); in xe_vma_op_unwind()
2628 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2630 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2632 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
2641 prep_vma_destroy(vm, op->remap.prev, prev_post_commit); in xe_vma_op_unwind()
2645 prep_vma_destroy(vm, op->remap.next, next_post_commit); in xe_vma_op_unwind()
2649 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2651 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2653 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
2661 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_unwind()
2665 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, in vm_bind_ioctl_ops_unwind() argument
2681 xe_vma_op_unwind(vm, op, in vm_bind_ioctl_ops_unwind()
2693 struct xe_vm *vm = xe_vma_vm(vma); in vma_lock_and_validate() local
2697 if (!bo->vm) in vma_lock_and_validate()
2700 err = xe_bo_validate(bo, vm, in vma_lock_and_validate()
2701 !xe_vm_in_preempt_fence_mode(vm)); in vma_lock_and_validate()
2722 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, in op_lock_and_prep() argument
2730 !xe_vm_in_fault_mode(vm) || in op_lock_and_prep()
2760 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type)); in op_lock_and_prep()
2771 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in op_lock_and_prep()
2778 struct xe_vm *vm, in vm_bind_ioctl_ops_lock_and_prep() argument
2784 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in vm_bind_ioctl_ops_lock_and_prep()
2789 err = op_lock_and_prep(exec, vm, op); in vm_bind_ioctl_ops_lock_and_prep()
2796 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK) in vm_bind_ioctl_ops_lock_and_prep()
2837 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_ops_setup_tile_args() argument
2844 for_each_tile(tile, vm->xe, id) { in vm_ops_setup_tile_args()
2853 if (vm->pt_root[id] && !list_empty(&q->multi_gt_list)) in vm_ops_setup_tile_args()
2856 vops->pt_update_ops[id].q = vm->q[id]; in vm_ops_setup_tile_args()
2863 static struct dma_fence *ops_execute(struct xe_vm *vm, in ops_execute() argument
2873 number_tiles = vm_ops_setup_tile_args(vm, vops); in ops_execute()
2886 for_each_tile(tile, vm->xe, id) { in ops_execute()
2899 for_each_tile(tile, vm->xe, id) { in ops_execute()
2913 vm->composite_fence_ctx, in ops_execute()
2914 vm->composite_fence_seqno++, in ops_execute()
2917 --vm->composite_fence_seqno; in ops_execute()
2924 for_each_tile(tile, vm->xe, id) { in ops_execute()
2934 for_each_tile(tile, vm->xe, id) { in ops_execute()
2946 trace_xe_vm_ops_fail(vm); in ops_execute()
2957 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op, in op_add_ufence() argument
2976 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in op_add_ufence()
2980 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_fini() argument
2983 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q); in vm_bind_ioctl_ops_fini()
2991 op_add_ufence(vm, op, ufence); in vm_bind_ioctl_ops_fini()
3004 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); in vm_bind_ioctl_ops_fini()
3008 static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm, in vm_bind_ioctl_ops_execute() argument
3015 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_execute()
3020 err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); in vm_bind_ioctl_ops_execute()
3027 fence = ops_execute(vm, vops); in vm_bind_ioctl_ops_execute()
3030 vm_bind_ioctl_ops_fini(vm, vops, NULL); in vm_bind_ioctl_ops_execute()
3034 vm_bind_ioctl_ops_fini(vm, vops, fence); in vm_bind_ioctl_ops_execute()
3060 static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, in vm_bind_ioctl_check_args() argument
3111 (!xe_vm_in_fault_mode(vm) || in vm_bind_ioctl_check_args()
3185 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, in vm_bind_ioctl_signal_fences() argument
3194 to_wait_exec_queue(vm, q), vm); in vm_bind_ioctl_signal_fences()
3201 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, in vm_bind_ioctl_signal_fences()
3208 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, in xe_vma_ops_init() argument
3214 vops->vm = vm; in xe_vma_ops_init()
3283 struct xe_vm *vm; in xe_vm_bind_ioctl() local
3293 vm = xe_vm_lookup(xef, args->vm_id); in xe_vm_bind_ioctl()
3294 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_bind_ioctl()
3297 err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops); in xe_vm_bind_ioctl()
3315 xe_svm_flush(vm); in xe_vm_bind_ioctl()
3317 err = down_write_killable(&vm->lock); in xe_vm_bind_ioctl()
3321 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_vm_bind_ioctl()
3330 if (XE_IOCTL_DBG(xe, range > vm->size) || in xe_vm_bind_ioctl()
3331 XE_IOCTL_DBG(xe, addr > vm->size - range)) { in xe_vm_bind_ioctl()
3394 (xe_vm_in_lr_mode(vm) ? in xe_vm_bind_ioctl()
3415 xe_vma_ops_init(&vops, vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3425 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset, in xe_vm_bind_ioctl()
3434 err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); in xe_vm_bind_ioctl()
3441 vm->xe->vm_inject_error_position = in xe_vm_bind_ioctl()
3442 (vm->xe->vm_inject_error_position + 1) % in xe_vm_bind_ioctl()
3458 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_ioctl()
3466 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); in xe_vm_bind_ioctl()
3470 drm_gpuva_ops_free(&vm->gpuvm, ops[i]); in xe_vm_bind_ioctl()
3473 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3482 up_write(&vm->lock); in xe_vm_bind_ioctl()
3487 xe_vm_put(vm); in xe_vm_bind_ioctl()
3496 * xe_vm_bind_kernel_bo - bind a kernel BO to a VM
3497 * @vm: VM to bind the BO to
3503 * Execute a VM bind map operation on a kernel-owned BO to bind it into a
3504 * kernel-owned VM.
3509 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo, in xe_vm_bind_kernel_bo() argument
3519 xe_vm_get(vm); in xe_vm_bind_kernel_bo()
3523 down_write(&vm->lock); in xe_vm_bind_kernel_bo()
3525 xe_vma_ops_init(&vops, vm, q, NULL, 0); in xe_vm_bind_kernel_bo()
3527 ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size, in xe_vm_bind_kernel_bo()
3529 vm->xe->pat.idx[cache_lvl]); in xe_vm_bind_kernel_bo()
3535 err = vm_bind_ioctl_ops_parse(vm, ops, &vops); in xe_vm_bind_kernel_bo()
3539 xe_assert(vm->xe, !list_empty(&vops.list)); in xe_vm_bind_kernel_bo()
3545 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_kernel_bo()
3551 vm_bind_ioctl_ops_unwind(vm, &ops, 1); in xe_vm_bind_kernel_bo()
3554 drm_gpuva_ops_free(&vm->gpuvm, ops); in xe_vm_bind_kernel_bo()
3557 up_write(&vm->lock); in xe_vm_bind_kernel_bo()
3561 xe_vm_put(vm); in xe_vm_bind_kernel_bo()
3571 * xe_vm_lock() - Lock the vm's dma_resv object
3572 * @vm: The struct xe_vm whose lock is to be locked
3579 int xe_vm_lock(struct xe_vm *vm, bool intr) in xe_vm_lock() argument
3582 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); in xe_vm_lock()
3584 return dma_resv_lock(xe_vm_resv(vm), NULL); in xe_vm_lock()
3588 * xe_vm_unlock() - Unlock the vm's dma_resv object
3589 * @vm: The struct xe_vm whose lock is to be released.
3593 void xe_vm_unlock(struct xe_vm *vm) in xe_vm_unlock() argument
3595 dma_resv_unlock(xe_vm_resv(vm)); in xe_vm_unlock()
3677 int xe_vm_validate_protected(struct xe_vm *vm) in xe_vm_validate_protected() argument
3682 if (!vm) in xe_vm_validate_protected()
3685 mutex_lock(&vm->snap_mutex); in xe_vm_validate_protected()
3687 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_validate_protected()
3696 err = xe_pxp_bo_key_check(vm->xe->pxp, bo); in xe_vm_validate_protected()
3702 mutex_unlock(&vm->snap_mutex); in xe_vm_validate_protected()
3717 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm) in xe_vm_snapshot_capture() argument
3723 if (!vm) in xe_vm_snapshot_capture()
3726 mutex_lock(&vm->snap_mutex); in xe_vm_snapshot_capture()
3727 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
3741 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
3771 mutex_unlock(&vm->snap_mutex); in xe_vm_snapshot_capture()