Lines Matching full:vm
40 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) in xe_vm_obj() argument
42 return vm->gpuvm.r_obj; in xe_vm_obj()
51 * without the vm->userptr.notifier_lock held. There is no guarantee that the
68 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_userptr_pin_pages() local
69 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages()
77 lockdep_assert_held(&vm->lock); in xe_vma_userptr_pin_pages()
175 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting() argument
179 lockdep_assert_held(&vm->lock); in preempt_fences_waiting()
180 xe_vm_assert_held(vm); in preempt_fences_waiting()
182 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { in preempt_fences_waiting()
201 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, in alloc_preempt_fences() argument
204 lockdep_assert_held(&vm->lock); in alloc_preempt_fences()
205 xe_vm_assert_held(vm); in alloc_preempt_fences()
207 if (*count >= vm->preempt.num_exec_queues) in alloc_preempt_fences()
210 for (; *count < vm->preempt.num_exec_queues; ++(*count)) { in alloc_preempt_fences()
222 static int wait_for_existing_preempt_fences(struct xe_vm *vm) in wait_for_existing_preempt_fences() argument
226 xe_vm_assert_held(vm); in wait_for_existing_preempt_fences()
228 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { in wait_for_existing_preempt_fences()
242 static bool xe_vm_is_idle(struct xe_vm *vm) in xe_vm_is_idle() argument
246 xe_vm_assert_held(vm); in xe_vm_is_idle()
247 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { in xe_vm_is_idle()
255 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) in arm_preempt_fences() argument
260 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { in arm_preempt_fences()
264 xe_assert(vm->xe, link != list); in arm_preempt_fences()
274 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) in add_preempt_fences() argument
279 if (!vm->preempt.num_exec_queues) in add_preempt_fences()
286 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues); in add_preempt_fences()
290 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) in add_preempt_fences()
302 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, in resume_and_reinstall_preempt_fences() argument
307 lockdep_assert_held(&vm->lock); in resume_and_reinstall_preempt_fences()
308 xe_vm_assert_held(vm); in resume_and_reinstall_preempt_fences()
310 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { in resume_and_reinstall_preempt_fences()
313 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence, in resume_and_reinstall_preempt_fences()
318 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_add_compute_exec_queue() argument
321 .vm = &vm->gpuvm, in xe_vm_add_compute_exec_queue()
330 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_add_compute_exec_queue()
332 down_write(&vm->lock); in xe_vm_add_compute_exec_queue()
344 list_add(&q->compute.link, &vm->preempt.exec_queues); in xe_vm_add_compute_exec_queue()
345 ++vm->preempt.num_exec_queues; in xe_vm_add_compute_exec_queue()
348 down_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
350 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence, in xe_vm_add_compute_exec_queue()
354 * Check to see if a preemption on VM is in flight or userptr in xe_vm_add_compute_exec_queue()
356 * other preempt fences on the VM. in xe_vm_add_compute_exec_queue()
358 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm); in xe_vm_add_compute_exec_queue()
362 up_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
367 up_write(&vm->lock); in xe_vm_add_compute_exec_queue()
373 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
374 * @vm: The VM.
377 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_remove_compute_exec_queue() argument
379 if (!xe_vm_in_preempt_fence_mode(vm)) in xe_vm_remove_compute_exec_queue()
382 down_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
384 --vm->preempt.num_exec_queues; in xe_vm_remove_compute_exec_queue()
390 up_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
394 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
396 * @vm: The VM.
398 * This function checks for whether the VM has userptrs that need repinning,
404 int __xe_vm_userptr_needs_repin(struct xe_vm *vm) in __xe_vm_userptr_needs_repin() argument
406 lockdep_assert_held_read(&vm->userptr.notifier_lock); in __xe_vm_userptr_needs_repin()
408 return (list_empty(&vm->userptr.repin_list) && in __xe_vm_userptr_needs_repin()
409 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in __xe_vm_userptr_needs_repin()
414 static void xe_vm_kill(struct xe_vm *vm) in xe_vm_kill() argument
418 lockdep_assert_held(&vm->lock); in xe_vm_kill()
420 xe_vm_lock(vm, false); in xe_vm_kill()
421 vm->flags |= XE_VM_FLAG_BANNED; in xe_vm_kill()
422 trace_xe_vm_kill(vm); in xe_vm_kill()
424 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) in xe_vm_kill()
426 xe_vm_unlock(vm); in xe_vm_kill()
428 /* TODO: Inform user the VM is banned */ in xe_vm_kill()
466 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_gpuvm_validate() local
470 lockdep_assert_held(&vm->lock); in xe_gpuvm_validate()
473 &vm->rebind_list); in xe_gpuvm_validate()
475 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false); in xe_gpuvm_validate()
483 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, in xe_preempt_work_begin() argument
492 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues + in xe_preempt_work_begin()
493 vm->xe->info.tile_count); in xe_preempt_work_begin()
497 if (xe_vm_is_idle(vm)) { in xe_preempt_work_begin()
498 vm->preempt.rebind_deactivated = true; in xe_preempt_work_begin()
503 if (!preempt_fences_waiting(vm)) { in xe_preempt_work_begin()
508 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues); in xe_preempt_work_begin()
512 err = wait_for_existing_preempt_fences(vm); in xe_preempt_work_begin()
516 return drm_gpuvm_validate(&vm->gpuvm, exec); in xe_preempt_work_begin()
521 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); in preempt_rebind_work_func() local
531 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in preempt_rebind_work_func()
532 trace_xe_vm_rebind_worker_enter(vm); in preempt_rebind_work_func()
534 down_write(&vm->lock); in preempt_rebind_work_func()
536 if (xe_vm_is_closed_or_banned(vm)) { in preempt_rebind_work_func()
537 up_write(&vm->lock); in preempt_rebind_work_func()
538 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
543 if (xe_vm_userptr_check_repin(vm)) { in preempt_rebind_work_func()
544 err = xe_vm_userptr_pin(vm); in preempt_rebind_work_func()
554 err = xe_preempt_work_begin(&exec, vm, &done); in preempt_rebind_work_func()
565 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count); in preempt_rebind_work_func()
569 rebind_fence = xe_vm_rebind(vm, true); in preempt_rebind_work_func()
580 /* Wait on munmap style VM unbinds */ in preempt_rebind_work_func()
581 wait = dma_resv_wait_timeout(xe_vm_resv(vm), in preempt_rebind_work_func()
594 down_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
595 if (retry_required(tries, vm)) { in preempt_rebind_work_func()
596 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
603 spin_lock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
604 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in preempt_rebind_work_func()
605 spin_unlock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
608 arm_preempt_fences(vm, &preempt_fences); in preempt_rebind_work_func()
609 resume_and_reinstall_preempt_fences(vm, &exec); in preempt_rebind_work_func()
610 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
616 trace_xe_vm_rebind_worker_retry(vm); in preempt_rebind_work_func()
621 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err); in preempt_rebind_work_func()
622 xe_vm_kill(vm); in preempt_rebind_work_func()
624 up_write(&vm->lock); in preempt_rebind_work_func()
628 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
638 struct xe_vm *vm = xe_vma_vm(vma); in vma_userptr_invalidate() local
643 xe_assert(vm->xe, xe_vma_is_userptr(vma)); in vma_userptr_invalidate()
649 down_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
654 up_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
662 if (!xe_vm_in_fault_mode(vm) && in vma_userptr_invalidate()
664 spin_lock(&vm->userptr.invalidated_lock); in vma_userptr_invalidate()
666 &vm->userptr.invalidated); in vma_userptr_invalidate()
667 spin_unlock(&vm->userptr.invalidated_lock); in vma_userptr_invalidate()
670 up_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
676 * to the vm. in vma_userptr_invalidate()
678 dma_resv_iter_begin(&cursor, xe_vm_resv(vm), in vma_userptr_invalidate()
684 err = dma_resv_wait_timeout(xe_vm_resv(vm), in vma_userptr_invalidate()
689 if (xe_vm_in_fault_mode(vm)) { in vma_userptr_invalidate()
703 int xe_vm_userptr_pin(struct xe_vm *vm) in xe_vm_userptr_pin() argument
709 lockdep_assert_held_write(&vm->lock); in xe_vm_userptr_pin()
712 spin_lock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
713 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, in xe_vm_userptr_pin()
717 &vm->userptr.repin_list); in xe_vm_userptr_pin()
719 spin_unlock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
722 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, in xe_vm_userptr_pin()
729 list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list); in xe_vm_userptr_pin()
736 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
738 * @vm: The VM.
740 * This function does an advisory check for whether the VM has userptrs that
746 int xe_vm_userptr_check_repin(struct xe_vm *vm) in xe_vm_userptr_check_repin() argument
748 return (list_empty_careful(&vm->userptr.repin_list) && in xe_vm_userptr_check_repin()
749 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in xe_vm_userptr_check_repin()
757 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) in xe_vm_rebind() argument
762 lockdep_assert_held(&vm->lock); in xe_vm_rebind()
763 if (xe_vm_in_lr_mode(vm) && !rebind_worker) in xe_vm_rebind()
766 xe_vm_assert_held(vm); in xe_vm_rebind()
767 list_for_each_entry_safe(vma, next, &vm->rebind_list, in xe_vm_rebind()
769 xe_assert(vm->xe, vma->tile_present); in xe_vm_rebind()
796 static struct xe_vma *xe_vma_create(struct xe_vm *vm, in xe_vma_create() argument
808 xe_assert(vm->xe, start < end); in xe_vma_create()
809 xe_assert(vm->xe, end < vm->size); in xe_vma_create()
836 vma->gpuva.vm = &vm->gpuvm; in xe_vma_create()
842 for_each_tile(tile, vm->xe, id) in xe_vma_create()
845 if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC) in xe_vma_create()
855 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); in xe_vma_create()
888 xe_vm_get(vm); in xe_vma_create()
896 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy_late() local
897 struct xe_device *xe = vm->xe; in xe_vma_destroy_late()
923 xe_vm_put(vm); in xe_vma_destroy_late()
925 xe_vm_put(vm); in xe_vma_destroy_late()
952 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy() local
954 lockdep_assert_held_write(&vm->lock); in xe_vma_destroy()
955 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy)); in xe_vma_destroy()
958 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); in xe_vma_destroy()
960 spin_lock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
962 spin_unlock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
969 xe_vm_assert_held(vm); in xe_vma_destroy()
986 * @vma: The vma for witch we want to lock the vm resv and any attached
998 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_prepare_vma() local
1002 XE_WARN_ON(!vm); in xe_vm_prepare_vma()
1004 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); in xe_vm_prepare_vma()
1006 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in xe_vm_prepare_vma()
1007 if (!err && bo && !bo->vm) { in xe_vm_prepare_vma()
1036 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) in xe_vm_find_overlapping_vma() argument
1040 lockdep_assert_held(&vm->lock); in xe_vm_find_overlapping_vma()
1042 if (xe_vm_is_closed_or_banned(vm)) in xe_vm_find_overlapping_vma()
1045 xe_assert(vm->xe, start + range <= vm->size); in xe_vm_find_overlapping_vma()
1047 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); in xe_vm_find_overlapping_vma()
1052 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_insert_vma() argument
1056 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_insert_vma()
1057 lockdep_assert_held(&vm->lock); in xe_vm_insert_vma()
1059 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); in xe_vm_insert_vma()
1065 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_remove_vma() argument
1067 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_remove_vma()
1068 lockdep_assert_held(&vm->lock); in xe_vm_remove_vma()
1071 if (vm->usm.last_fault_vma == vma) in xe_vm_remove_vma()
1072 vm->usm.last_fault_vma = NULL; in xe_vm_remove_vma()
1229 * given tile and vm.
1232 * @vm: vm to set up for.
1242 struct xe_vm *vm) in xe_vm_create_scratch() argument
1247 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { in xe_vm_create_scratch()
1248 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i); in xe_vm_create_scratch()
1249 if (IS_ERR(vm->scratch_pt[id][i])) in xe_vm_create_scratch()
1250 return PTR_ERR(vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1252 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1258 static void xe_vm_free_scratch(struct xe_vm *vm) in xe_vm_free_scratch() argument
1263 if (!xe_vm_has_scratch(vm)) in xe_vm_free_scratch()
1266 for_each_tile(tile, vm->xe, id) { in xe_vm_free_scratch()
1269 if (!vm->pt_root[id]) in xe_vm_free_scratch()
1272 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i) in xe_vm_free_scratch()
1273 if (vm->scratch_pt[id][i]) in xe_vm_free_scratch()
1274 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL); in xe_vm_free_scratch()
1281 struct xe_vm *vm; in xe_vm_create() local
1286 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in xe_vm_create()
1287 if (!vm) in xe_vm_create()
1290 vm->xe = xe; in xe_vm_create()
1292 vm->size = 1ull << xe->info.va_bits; in xe_vm_create()
1294 vm->flags = flags; in xe_vm_create()
1296 init_rwsem(&vm->lock); in xe_vm_create()
1298 INIT_LIST_HEAD(&vm->rebind_list); in xe_vm_create()
1300 INIT_LIST_HEAD(&vm->userptr.repin_list); in xe_vm_create()
1301 INIT_LIST_HEAD(&vm->userptr.invalidated); in xe_vm_create()
1302 init_rwsem(&vm->userptr.notifier_lock); in xe_vm_create()
1303 spin_lock_init(&vm->userptr.invalidated_lock); in xe_vm_create()
1305 INIT_WORK(&vm->destroy_work, vm_destroy_work_func); in xe_vm_create()
1307 INIT_LIST_HEAD(&vm->preempt.exec_queues); in xe_vm_create()
1308 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ in xe_vm_create()
1311 xe_range_fence_tree_init(&vm->rftree[id]); in xe_vm_create()
1313 vm->pt_ops = &xelp_pt_ops; in xe_vm_create()
1324 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm, in xe_vm_create()
1325 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops); in xe_vm_create()
1329 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); in xe_vm_create()
1334 vm->flags |= XE_VM_FLAG_64K; in xe_vm_create()
1341 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level); in xe_vm_create()
1342 if (IS_ERR(vm->pt_root[id])) { in xe_vm_create()
1343 err = PTR_ERR(vm->pt_root[id]); in xe_vm_create()
1344 vm->pt_root[id] = NULL; in xe_vm_create()
1349 if (xe_vm_has_scratch(vm)) { in xe_vm_create()
1351 if (!vm->pt_root[id]) in xe_vm_create()
1354 err = xe_vm_create_scratch(xe, tile, vm); in xe_vm_create()
1358 vm->batch_invalidate_tlb = true; in xe_vm_create()
1362 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); in xe_vm_create()
1363 vm->flags |= XE_VM_FLAG_LR_MODE; in xe_vm_create()
1364 vm->batch_invalidate_tlb = false; in xe_vm_create()
1369 if (!vm->pt_root[id]) in xe_vm_create()
1372 xe_pt_populate_empty(tile, vm, vm->pt_root[id]); in xe_vm_create()
1374 dma_resv_unlock(xe_vm_resv(vm)); in xe_vm_create()
1376 /* Kernel migration VM shouldn't have a circular loop.. */ in xe_vm_create()
1384 if (!vm->pt_root[id]) in xe_vm_create()
1396 vm->q[id] = q; in xe_vm_create()
1402 vm->composite_fence_ctx = dma_fence_context_alloc(1); in xe_vm_create()
1411 trace_xe_vm_create(vm); in xe_vm_create()
1413 return vm; in xe_vm_create()
1416 dma_resv_unlock(xe_vm_resv(vm)); in xe_vm_create()
1418 xe_vm_close_and_put(vm); in xe_vm_create()
1423 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_create()
1424 kfree(vm); in xe_vm_create()
1430 static void xe_vm_close(struct xe_vm *vm) in xe_vm_close() argument
1432 down_write(&vm->lock); in xe_vm_close()
1433 vm->size = 0; in xe_vm_close()
1434 up_write(&vm->lock); in xe_vm_close()
1437 void xe_vm_close_and_put(struct xe_vm *vm) in xe_vm_close_and_put() argument
1440 struct xe_device *xe = vm->xe; in xe_vm_close_and_put()
1446 xe_assert(xe, !vm->preempt.num_exec_queues); in xe_vm_close_and_put()
1448 xe_vm_close(vm); in xe_vm_close_and_put()
1449 if (xe_vm_in_preempt_fence_mode(vm)) in xe_vm_close_and_put()
1450 flush_work(&vm->preempt.rebind_work); in xe_vm_close_and_put()
1452 down_write(&vm->lock); in xe_vm_close_and_put()
1454 if (vm->q[id]) in xe_vm_close_and_put()
1455 xe_exec_queue_last_fence_put(vm->q[id], vm); in xe_vm_close_and_put()
1457 up_write(&vm->lock); in xe_vm_close_and_put()
1460 if (vm->q[id]) { in xe_vm_close_and_put()
1461 xe_exec_queue_kill(vm->q[id]); in xe_vm_close_and_put()
1462 xe_exec_queue_put(vm->q[id]); in xe_vm_close_and_put()
1463 vm->q[id] = NULL; in xe_vm_close_and_put()
1467 down_write(&vm->lock); in xe_vm_close_and_put()
1468 xe_vm_lock(vm, false); in xe_vm_close_and_put()
1469 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { in xe_vm_close_and_put()
1473 down_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1475 up_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1478 xe_vm_remove_vma(vm, vma); in xe_vm_close_and_put()
1481 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) { in xe_vm_close_and_put()
1492 * All vm operations will add shared fences to resv. in xe_vm_close_and_put()
1498 xe_vm_free_scratch(vm); in xe_vm_close_and_put()
1501 if (vm->pt_root[id]) { in xe_vm_close_and_put()
1502 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); in xe_vm_close_and_put()
1503 vm->pt_root[id] = NULL; in xe_vm_close_and_put()
1506 xe_vm_unlock(vm); in xe_vm_close_and_put()
1509 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL in xe_vm_close_and_put()
1519 up_write(&vm->lock); in xe_vm_close_and_put()
1522 if (vm->flags & XE_VM_FLAG_FAULT_MODE) in xe_vm_close_and_put()
1524 else if (!(vm->flags & XE_VM_FLAG_MIGRATION)) in xe_vm_close_and_put()
1529 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_close_and_put()
1531 xe_vm_put(vm); in xe_vm_close_and_put()
1536 struct xe_vm *vm = in vm_destroy_work_func() local
1538 struct xe_device *xe = vm->xe; in vm_destroy_work_func()
1544 xe_assert(xe, !vm->size); in vm_destroy_work_func()
1546 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) { in vm_destroy_work_func()
1549 if (xe->info.has_asid && vm->usm.asid) { in vm_destroy_work_func()
1551 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); in vm_destroy_work_func()
1552 xe_assert(xe, lookup == vm); in vm_destroy_work_func()
1558 XE_WARN_ON(vm->pt_root[id]); in vm_destroy_work_func()
1560 trace_xe_vm_free(vm); in vm_destroy_work_func()
1561 dma_fence_put(vm->rebind_fence); in vm_destroy_work_func()
1562 kfree(vm); in vm_destroy_work_func()
1567 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); in xe_vm_free() local
1569 /* To destroy the VM we need to be able to sleep */ in xe_vm_free()
1570 queue_work(system_unbound_wq, &vm->destroy_work); in xe_vm_free()
1575 struct xe_vm *vm; in xe_vm_lookup() local
1577 mutex_lock(&xef->vm.lock); in xe_vm_lookup()
1578 vm = xa_load(&xef->vm.xa, id); in xe_vm_lookup()
1579 if (vm) in xe_vm_lookup()
1580 xe_vm_get(vm); in xe_vm_lookup()
1581 mutex_unlock(&xef->vm.lock); in xe_vm_lookup()
1583 return vm; in xe_vm_lookup()
1586 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) in xe_vm_pdp4_descriptor() argument
1588 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0, in xe_vm_pdp4_descriptor()
1593 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in to_wait_exec_queue() argument
1595 return q ? q : vm->q[0]; in to_wait_exec_queue()
1603 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_unbind_vma() local
1604 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); in xe_vm_unbind_vma()
1633 for_each_tile(tile, vm->xe, id) { in xe_vm_unbind_vma()
1637 fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id], in xe_vm_unbind_vma()
1649 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) in xe_vm_unbind_vma()
1655 vm->composite_fence_ctx, in xe_vm_unbind_vma()
1656 vm->composite_fence_seqno++, in xe_vm_unbind_vma()
1659 --vm->composite_fence_seqno; in xe_vm_unbind_vma()
1666 xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; in xe_vm_unbind_vma()
1693 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_bind_vma() local
1708 for_each_tile(tile, vm->xe, id) { in xe_vm_bind_vma()
1712 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id], in xe_vm_bind_vma()
1725 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) in xe_vm_bind_vma()
1731 vm->composite_fence_ctx, in xe_vm_bind_vma()
1732 vm->composite_fence_seqno++, in xe_vm_bind_vma()
1735 --vm->composite_fence_seqno; in xe_vm_bind_vma()
1774 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, in __xe_vm_bind() argument
1780 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); in __xe_vm_bind()
1783 xe_vm_assert_held(vm); in __xe_vm_bind()
1799 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in __xe_vm_bind()
1801 fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); in __xe_vm_bind()
1809 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); in __xe_vm_bind()
1815 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, in xe_vm_bind() argument
1822 xe_vm_assert_held(vm); in xe_vm_bind()
1826 err = xe_bo_validate(bo, vm, true); in xe_vm_bind()
1831 return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op, in xe_vm_bind()
1835 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, in xe_vm_unbind() argument
1840 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); in xe_vm_unbind()
1842 xe_vm_assert_held(vm); in xe_vm_unbind()
1851 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); in xe_vm_unbind()
1868 struct xe_vm *vm; in xe_vm_create_ioctl() local
1915 vm = xe_vm_create(xe, flags); in xe_vm_create_ioctl()
1916 if (IS_ERR(vm)) in xe_vm_create_ioctl()
1917 return PTR_ERR(vm); in xe_vm_create_ioctl()
1919 mutex_lock(&xef->vm.lock); in xe_vm_create_ioctl()
1920 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); in xe_vm_create_ioctl()
1921 mutex_unlock(&xef->vm.lock); in xe_vm_create_ioctl()
1927 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, in xe_vm_create_ioctl()
1934 vm->usm.asid = asid; in xe_vm_create_ioctl()
1938 vm->xef = xef; in xe_vm_create_ioctl()
1940 /* Record BO memory for VM pagetable created against client */ in xe_vm_create_ioctl()
1942 if (vm->pt_root[id]) in xe_vm_create_ioctl()
1943 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); in xe_vm_create_ioctl()
1947 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); in xe_vm_create_ioctl()
1953 mutex_lock(&xef->vm.lock); in xe_vm_create_ioctl()
1954 xa_erase(&xef->vm.xa, id); in xe_vm_create_ioctl()
1955 mutex_unlock(&xef->vm.lock); in xe_vm_create_ioctl()
1957 xe_vm_close_and_put(vm); in xe_vm_create_ioctl()
1968 struct xe_vm *vm; in xe_vm_destroy_ioctl() local
1975 mutex_lock(&xef->vm.lock); in xe_vm_destroy_ioctl()
1976 vm = xa_load(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
1977 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_destroy_ioctl()
1979 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues)) in xe_vm_destroy_ioctl()
1982 xa_erase(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
1983 mutex_unlock(&xef->vm.lock); in xe_vm_destroy_ioctl()
1986 xe_vm_close_and_put(vm); in xe_vm_destroy_ioctl()
1997 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, in xe_vm_prefetch() argument
2002 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); in xe_vm_prefetch()
2005 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type)); in xe_vm_prefetch()
2014 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, in xe_vm_prefetch()
2023 xe_exec_queue_last_fence_get(wait_exec_queue, vm); in xe_vm_prefetch()
2034 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, in prep_vma_destroy() argument
2037 down_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
2039 up_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
2041 xe_vm_remove_vma(vm, vma); in prep_vma_destroy()
2099 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, in vm_bind_ioctl_ops_create() argument
2110 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_create()
2112 vm_dbg(&vm->xe->drm, in vm_bind_ioctl_ops_create()
2120 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range, in vm_bind_ioctl_ops_create()
2124 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
2127 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
2130 xe_assert(vm->xe, bo); in vm_bind_ioctl_ops_create()
2136 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj); in vm_bind_ioctl_ops_create()
2147 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_create()
2163 print_op(vm->xe, __op); in vm_bind_ioctl_ops_create()
2169 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, in new_vma() argument
2177 lockdep_assert_held_write(&vm->lock); in new_vma()
2183 if (!bo->vm) { in new_vma()
2184 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm)); in new_vma()
2197 vma = xe_vma_create(vm, bo, op->gem.offset, in new_vma()
2206 prep_vma_destroy(vm, vma, false); in new_vma()
2210 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) { in new_vma()
2211 err = add_preempt_fences(vm, bo); in new_vma()
2213 prep_vma_destroy(vm, vma, false); in new_vma()
2254 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) in xe_vma_op_commit() argument
2258 lockdep_assert_held_write(&vm->lock); in xe_vma_op_commit()
2262 err |= xe_vm_insert_vma(vm, op->map.vma); in xe_vma_op_commit()
2271 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), in xe_vma_op_commit()
2276 err |= xe_vm_insert_vma(vm, op->remap.prev); in xe_vma_op_commit()
2286 err |= xe_vm_insert_vma(vm, op->remap.next); in xe_vma_op_commit()
2296 /* Adjust for partial unbind after removin VMA from VM */ in xe_vma_op_commit()
2304 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); in xe_vma_op_commit()
2311 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_commit()
2318 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, in vm_bind_ioctl_ops_parse() argument
2327 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_parse()
2352 vma = new_vma(vm, &op->base.map, op->map.pat_index, in vm_bind_ioctl_ops_parse()
2376 vma = new_vma(vm, op->base.remap.prev, in vm_bind_ioctl_ops_parse()
2407 vma = new_vma(vm, op->base.remap.next, in vm_bind_ioctl_ops_parse()
2435 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_parse()
2440 err = xe_vma_op_commit(vm, op); in vm_bind_ioctl_ops_parse()
2461 static int op_execute(struct drm_exec *exec, struct xe_vm *vm, in op_execute() argument
2466 lockdep_assert_held_write(&vm->lock); in op_execute()
2472 xe_vm_assert_held(vm); in op_execute()
2477 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma), in op_execute()
2479 !xe_vm_in_fault_mode(vm), in op_execute()
2491 err = xe_vm_unbind(vm, vma, op->q, op->syncs, in op_execute()
2503 err = xe_vm_bind(vm, op->remap.prev, op->q, in op_execute()
2515 err = xe_vm_bind(vm, op->remap.next, op->q, in op_execute()
2529 err = xe_vm_unbind(vm, vma, op->q, op->syncs, in op_execute()
2534 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region, in op_execute()
2540 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in op_execute()
2549 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, in __xe_vma_op_execute() argument
2558 err = op_execute(&exec, vm, vma, op); in __xe_vma_op_execute()
2566 lockdep_assert_held_write(&vm->lock); in __xe_vma_op_execute()
2589 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) in xe_vma_op_execute() argument
2593 lockdep_assert_held_write(&vm->lock); in xe_vma_op_execute()
2597 ret = __xe_vma_op_execute(vm, op->map.vma, op); in xe_vma_op_execute()
2610 ret = __xe_vma_op_execute(vm, vma, op); in xe_vma_op_execute()
2614 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va), in xe_vma_op_execute()
2618 ret = __xe_vma_op_execute(vm, in xe_vma_op_execute()
2623 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_execute()
2629 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op) in xe_vma_op_cleanup() argument
2643 drm_gpuva_ops_free(&vm->gpuvm, op->ops); in xe_vma_op_cleanup()
2645 xe_vm_put(vm); in xe_vma_op_cleanup()
2648 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, in xe_vma_op_unwind() argument
2652 lockdep_assert_held_write(&vm->lock); in xe_vma_op_unwind()
2657 prep_vma_destroy(vm, op->map.vma, post_commit); in xe_vma_op_unwind()
2666 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2668 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2670 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
2679 prep_vma_destroy(vm, op->remap.prev, prev_post_commit); in xe_vma_op_unwind()
2683 prep_vma_destroy(vm, op->remap.next, next_post_commit); in xe_vma_op_unwind()
2687 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2689 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2691 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
2699 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_unwind()
2703 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, in vm_bind_ioctl_ops_unwind() argument
2719 xe_vma_op_unwind(vm, op, in vm_bind_ioctl_ops_unwind()
2725 drm_gpuva_ops_free(&vm->gpuvm, __ops); in vm_bind_ioctl_ops_unwind()
2729 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, in vm_bind_ioctl_ops_execute() argument
2735 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_execute()
2738 err = xe_vma_op_execute(vm, op); in vm_bind_ioctl_ops_execute()
2740 drm_warn(&vm->xe->drm, "VM op(%d) failed with %d", in vm_bind_ioctl_ops_execute()
2743 * FIXME: Killing VM rather than proper error handling in vm_bind_ioctl_ops_execute()
2745 xe_vm_kill(vm); in vm_bind_ioctl_ops_execute()
2748 xe_vma_op_cleanup(vm, op); in vm_bind_ioctl_ops_execute()
2873 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, in vm_bind_ioctl_signal_fences() argument
2882 to_wait_exec_queue(vm, q), vm); in vm_bind_ioctl_signal_fences()
2889 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, in vm_bind_ioctl_signal_fences()
2904 struct xe_vm *vm; in xe_vm_bind_ioctl() local
2930 vm = xe_vm_lookup(xef, args->vm_id); in xe_vm_bind_ioctl()
2931 if (XE_IOCTL_DBG(xe, !vm)) { in xe_vm_bind_ioctl()
2936 err = down_write_killable(&vm->lock); in xe_vm_bind_ioctl()
2940 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_vm_bind_ioctl()
2949 if (XE_IOCTL_DBG(xe, range > vm->size) || in xe_vm_bind_ioctl()
2950 XE_IOCTL_DBG(xe, addr > vm->size - range)) { in xe_vm_bind_ioctl()
3039 (xe_vm_in_lr_mode(vm) ? in xe_vm_bind_ioctl()
3069 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset, in xe_vm_bind_ioctl()
3078 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs, in xe_vm_bind_ioctl()
3091 xe_vm_get(vm); in xe_vm_bind_ioctl()
3095 err = vm_bind_ioctl_ops_execute(vm, &ops_list); in xe_vm_bind_ioctl()
3097 up_write(&vm->lock); in xe_vm_bind_ioctl()
3101 xe_vm_put(vm); in xe_vm_bind_ioctl()
3114 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); in xe_vm_bind_ioctl()
3117 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3126 up_write(&vm->lock); in xe_vm_bind_ioctl()
3128 xe_vm_put(vm); in xe_vm_bind_ioctl()
3141 * xe_vm_lock() - Lock the vm's dma_resv object
3142 * @vm: The struct xe_vm whose lock is to be locked
3149 int xe_vm_lock(struct xe_vm *vm, bool intr) in xe_vm_lock() argument
3152 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); in xe_vm_lock()
3154 return dma_resv_lock(xe_vm_resv(vm), NULL); in xe_vm_lock()
3158 * xe_vm_unlock() - Unlock the vm's dma_resv object
3159 * @vm: The struct xe_vm whose lock is to be released.
3163 void xe_vm_unlock(struct xe_vm *vm) in xe_vm_unlock() argument
3165 dma_resv_unlock(xe_vm_resv(vm)); in xe_vm_unlock()
3232 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) in xe_analyze_vm() argument
3238 if (!down_read_trylock(&vm->lock)) { in xe_analyze_vm()
3239 drm_printf(p, " Failed to acquire VM lock to dump capture"); in xe_analyze_vm()
3242 if (vm->pt_root[gt_id]) { in xe_analyze_vm()
3243 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE); in xe_analyze_vm()
3244 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo); in xe_analyze_vm()
3245 drm_printf(p, " VM root: A:0x%llx %s\n", addr, in xe_analyze_vm()
3249 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_analyze_vm()
3276 up_read(&vm->lock); in xe_analyze_vm()