Lines Matching refs:svm

57 	return force_smem ? NULL : vm->svm.peer.owner;
83 return container_of(gpusvm, struct xe_vm, svm.gpusvm);
139 spin_lock(&vm->svm.garbage_collector.lock);
142 &vm->svm.garbage_collector.range_list);
143 spin_unlock(&vm->svm.garbage_collector.lock);
145 queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work);
211 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
313 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
392 spin_lock(&vm->svm.garbage_collector.lock);
393 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
403 spin_unlock(&vm->svm.garbage_collector.lock);
422 spin_unlock(&vm->svm.garbage_collector.lock);
430 svm.garbage_collector.work);
844 struct xe_pagemap *xpagemap = vm->svm.pagemaps[id];
848 vm->svm.pagemaps[id] = NULL;
857 return container_of(peer, struct xe_vm, svm.peer)->xe->drm.dev;
887 spin_lock_init(&vm->svm.garbage_collector.lock);
888 INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
889 INIT_WORK(&vm->svm.garbage_collector.work,
892 vm->svm.peer.private = XE_PEER_VM;
893 err = drm_pagemap_acquire_owner(&vm->svm.peer, &xe_owner_list,
900 drm_pagemap_release_owner(&vm->svm.peer);
904 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
909 drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
913 drm_pagemap_release_owner(&vm->svm.peer);
917 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
934 disable_work_sync(&vm->svm.garbage_collector.work);
936 drm_pagemap_release_owner(&vm->svm.peer);
949 drm_gpusvm_fini(&vm->svm.gpusvm);
992 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1044 return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
1315 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1321 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1424 return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1433 * This function UNMAPS svm ranges if start or end address are inside them.
1441 drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1448 drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1452 spin_lock(&vm->svm.garbage_collector.lock);
1454 spin_unlock(&vm->svm.garbage_collector.lock);
1493 r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1517 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1520 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1527 * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1533 * with the svm ranges within the given input start and end
1546 lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1549 drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1896 vm->svm.pagemaps[id] = xpagemap;
2055 flush_work(&vm->svm.garbage_collector.work);