Lines Matching full:range
16 static bool xe_svm_range_in_vram(struct xe_svm_range *range) in xe_svm_range_in_vram() argument
19 * Advisory only check whether the range is currently backed by VRAM in xe_svm_range_in_vram()
25 .__flags = READ_ONCE(range->base.flags.__flags), in xe_svm_range_in_vram()
31 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range) in xe_svm_range_has_vram_binding() argument
34 return xe_svm_range_in_vram(range) && range->tile_present; in xe_svm_range_has_vram_binding()
47 static unsigned long xe_svm_range_start(struct xe_svm_range *range) in xe_svm_range_start() argument
49 return drm_gpusvm_range_start(&range->base); in xe_svm_range_start()
52 static unsigned long xe_svm_range_end(struct xe_svm_range *range) in xe_svm_range_end() argument
54 return drm_gpusvm_range_end(&range->base); in xe_svm_range_end()
57 static unsigned long xe_svm_range_size(struct xe_svm_range *range) in xe_svm_range_size() argument
59 return drm_gpusvm_range_size(&range->base); in xe_svm_range_size()
74 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) in xe_svm_range_debug() argument
76 range_debug(range, operation); in xe_svm_range_debug()
87 struct xe_svm_range *range; in xe_svm_range_alloc() local
89 range = kzalloc(sizeof(*range), GFP_KERNEL); in xe_svm_range_alloc()
90 if (!range) in xe_svm_range_alloc()
93 INIT_LIST_HEAD(&range->garbage_collector_link); in xe_svm_range_alloc()
96 return &range->base; in xe_svm_range_alloc()
99 static void xe_svm_range_free(struct drm_gpusvm_range *range) in xe_svm_range_free() argument
101 xe_vm_put(range_to_vm(range)); in xe_svm_range_free()
102 kfree(range); in xe_svm_range_free()
111 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, in xe_svm_garbage_collector_add_range() argument
116 range_debug(range, "GARBAGE COLLECTOR ADD"); in xe_svm_garbage_collector_add_range()
118 drm_gpusvm_range_set_unmapped(&range->base, mmu_range); in xe_svm_garbage_collector_add_range()
121 if (list_empty(&range->garbage_collector_link)) in xe_svm_garbage_collector_add_range()
122 list_add_tail(&range->garbage_collector_link, in xe_svm_garbage_collector_add_range()
135 struct xe_svm_range *range = to_xe_range(r); in xe_svm_range_notifier_event_begin() local
143 range_debug(range, "NOTIFIER"); in xe_svm_range_notifier_event_begin()
146 if (range->base.flags.unmapped || !range->tile_present) in xe_svm_range_notifier_event_begin()
149 range_debug(range, "NOTIFIER - EXECUTE"); in xe_svm_range_notifier_event_begin()
151 /* Adjust invalidation to range boundaries */ in xe_svm_range_notifier_event_begin()
152 *adj_start = min(xe_svm_range_start(range), mmu_range->start); in xe_svm_range_notifier_event_begin()
153 *adj_end = max(xe_svm_range_end(range), mmu_range->end); in xe_svm_range_notifier_event_begin()
161 if (xe_pt_zap_ptes_range(tile, vm, range)) { in xe_svm_range_notifier_event_begin()
163 range->tile_invalidated |= BIT(id); in xe_svm_range_notifier_event_begin()
224 * invalidation is not required. Could walk range list twice to figure in xe_svm_invalidate()
286 struct xe_svm_range *range) in __xe_svm_garbage_collector() argument
290 range_debug(range, "GARBAGE COLLECTOR"); in __xe_svm_garbage_collector()
293 fence = xe_vm_range_unbind(vm, range); in __xe_svm_garbage_collector()
299 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base); in __xe_svm_garbage_collector()
306 struct xe_svm_range *range; in xe_svm_garbage_collector() local
316 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list, in xe_svm_garbage_collector()
317 typeof(*range), in xe_svm_garbage_collector()
319 if (!range) in xe_svm_garbage_collector()
322 list_del(&range->garbage_collector_link); in xe_svm_garbage_collector()
325 err = __xe_svm_garbage_collector(vm, range); in xe_svm_garbage_collector()
656 static bool xe_svm_range_is_valid(struct xe_svm_range *range, in xe_svm_range_is_valid() argument
661 * Advisory only check whether the range currently has a valid mapping, in xe_svm_range_is_valid()
664 return ((READ_ONCE(range->tile_present) & in xe_svm_range_is_valid()
665 ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) && in xe_svm_range_is_valid()
666 (!devmem_only || xe_svm_range_in_vram(range)); in xe_svm_range_is_valid()
675 struct xe_svm_range *range, in xe_svm_alloc_vram() argument
686 range_debug(range, "ALLOCATE VRAM"); in xe_svm_alloc_vram()
694 xe_svm_range_size(range), in xe_svm_alloc_vram()
709 xe_svm_range_size(range)); in xe_svm_alloc_vram()
716 err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base, in xe_svm_alloc_vram()
739 static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, in xe_svm_range_needs_migrate_to_vram() argument
742 struct xe_vm *vm = range_to_vm(&range->base); in xe_svm_range_needs_migrate_to_vram()
743 u64 range_size = xe_svm_range_size(range); in xe_svm_range_needs_migrate_to_vram()
745 if (!range->base.flags.migrate_devmem) in xe_svm_range_needs_migrate_to_vram()
748 if (xe_svm_range_in_vram(range)) { in xe_svm_range_needs_migrate_to_vram()
749 drm_dbg(&vm->xe->drm, "Range is already in VRAM\n"); in xe_svm_range_needs_migrate_to_vram()
754 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); in xe_svm_range_needs_migrate_to_vram()
789 struct xe_svm_range *range; in xe_svm_handle_pagefault() local
815 range = to_xe_range(r); in xe_svm_handle_pagefault()
816 if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) in xe_svm_handle_pagefault()
819 range_debug(range, "PAGE FAULT"); in xe_svm_handle_pagefault()
822 xe_svm_range_needs_migrate_to_vram(range, vma)) { in xe_svm_handle_pagefault()
823 err = xe_svm_alloc_vram(vm, tile, range, &ctx); in xe_svm_handle_pagefault()
840 range_debug(range, "GET PAGES"); in xe_svm_handle_pagefault()
847 range_debug(range, "PAGE FAULT - EVICT PAGES"); in xe_svm_handle_pagefault()
849 &range->base); in xe_svm_handle_pagefault()
854 range_debug(range, "PAGE FAULT - RETRY PAGES"); in xe_svm_handle_pagefault()
863 range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT"); in xe_svm_handle_pagefault()
867 range_debug(range, "PAGE FAULT - BIND"); in xe_svm_handle_pagefault()
879 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id)); in xe_svm_handle_pagefault()
885 range_debug(range, "PAGE FAULT - RETRY BIND"); in xe_svm_handle_pagefault()
909 * Check if an address range has SVM mappings.
911 * Return: True if address range has a SVM mapping, False otherwise
985 vr->pagemap.range.start = res->start; in xe_devm_add()
986 vr->pagemap.range.end = res->end; in xe_devm_add()