Lines Matching +full:ctx +full:- +full:asid

1 // SPDX-License-Identifier: MIT
25 .__flags = READ_ONCE(range->base.flags.__flags), in xe_svm_range_in_vram()
34 return xe_svm_range_in_vram(range) && range->tile_present; in xe_svm_range_has_vram_binding()
44 return gpusvm_to_vm(r->gpusvm); in range_to_vm()
49 return drm_gpusvm_range_start(&range->base); in xe_svm_range_start()
54 return drm_gpusvm_range_end(&range->base); in xe_svm_range_end()
59 return drm_gpusvm_range_size(&range->base); in xe_svm_range_size()
63 vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
64 "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
66 (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
67 (r__)->base.gpusvm, \
70 (r__)->base.notifier_seq, \
93 INIT_LIST_HEAD(&range->garbage_collector_link); in xe_svm_range_alloc()
96 return &range->base; in xe_svm_range_alloc()
114 struct xe_device *xe = vm->xe; in xe_svm_garbage_collector_add_range()
118 drm_gpusvm_range_set_unmapped(&range->base, mmu_range); in xe_svm_garbage_collector_add_range()
120 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range()
121 if (list_empty(&range->garbage_collector_link)) in xe_svm_garbage_collector_add_range()
122 list_add_tail(&range->garbage_collector_link, in xe_svm_garbage_collector_add_range()
123 &vm->svm.garbage_collector.range_list); in xe_svm_garbage_collector_add_range()
124 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range()
126 queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq, in xe_svm_garbage_collector_add_range()
127 &vm->svm.garbage_collector.work); in xe_svm_garbage_collector_add_range()
136 struct xe_device *xe = vm->xe; in xe_svm_range_notifier_event_begin()
146 if (range->base.flags.unmapped || !range->tile_present) in xe_svm_range_notifier_event_begin()
149 range_debug(range, "NOTIFIER - EXECUTE"); in xe_svm_range_notifier_event_begin()
152 *adj_start = min(xe_svm_range_start(range), mmu_range->start); in xe_svm_range_notifier_event_begin()
153 *adj_end = max(xe_svm_range_end(range), mmu_range->end); in xe_svm_range_notifier_event_begin()
163 range->tile_invalidated |= BIT(id); in xe_svm_range_notifier_event_begin()
173 struct drm_gpusvm_ctx ctx = { .in_notifier = true, }; in xe_svm_range_notifier_event_end() local
177 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx); in xe_svm_range_notifier_event_end()
178 if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP) in xe_svm_range_notifier_event_end()
188 struct xe_device *xe = vm->xe; in xe_svm_invalidate()
193 u64 adj_start = mmu_range->start, adj_end = mmu_range->end; in xe_svm_invalidate()
201 vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm, in xe_svm_invalidate()
202 "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d", in xe_svm_invalidate()
203 vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq, in xe_svm_invalidate()
204 mmu_range->start, mmu_range->end, mmu_range->event); in xe_svm_invalidate()
246 xe_gt_tlb_invalidation_fence_init(tile->primary_gt, in xe_svm_invalidate()
249 err = xe_gt_tlb_invalidation_range(tile->primary_gt, in xe_svm_invalidate()
253 vm->usm.asid); in xe_svm_invalidate()
258 if (!tile->media_gt) in xe_svm_invalidate()
261 xe_gt_tlb_invalidation_fence_init(tile->media_gt, in xe_svm_invalidate()
264 err = xe_gt_tlb_invalidation_range(tile->media_gt, in xe_svm_invalidate()
268 vm->usm.asid); in xe_svm_invalidate()
299 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base); in __xe_svm_garbage_collector()
309 lockdep_assert_held_write(&vm->lock); in xe_svm_garbage_collector()
312 return -ENOENT; in xe_svm_garbage_collector()
314 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
316 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list, in xe_svm_garbage_collector()
322 list_del(&range->garbage_collector_link); in xe_svm_garbage_collector()
323 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
327 drm_warn(&vm->xe->drm, in xe_svm_garbage_collector()
334 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
336 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
346 down_write(&vm->lock); in xe_svm_garbage_collector_work_func()
348 up_write(&vm->lock); in xe_svm_garbage_collector_work_func()
370 xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base); in xe_vram_region_page_to_dpa()
372 offset = (pfn << PAGE_SHIFT) - vr->hpa_base; in xe_vram_region_page_to_dpa()
373 dpa = vr->dpa_base + offset; in xe_vram_region_page_to_dpa()
411 chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE); in xe_svm_copy()
436 match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr; in xe_svm_copy()
440 * Mismatched physical address, 8M copy chunk, or last page - in xe_svm_copy()
452 vm_dbg(&tile->xe->drm, in xe_svm_copy()
453 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", in xe_svm_copy()
454 vram_addr, (u64)dma_addr[pos], i - pos + incr); in xe_svm_copy()
455 __fence = xe_migrate_from_vram(tile->migrate, in xe_svm_copy()
456 i - pos + incr, in xe_svm_copy()
460 vm_dbg(&tile->xe->drm, in xe_svm_copy()
461 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", in xe_svm_copy()
462 (u64)dma_addr[pos], vram_addr, i - pos + incr); in xe_svm_copy()
463 __fence = xe_migrate_to_vram(tile->migrate, in xe_svm_copy()
464 i - pos + incr, in xe_svm_copy()
488 vm_dbg(&tile->xe->drm, in xe_svm_copy()
489 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", in xe_svm_copy()
491 __fence = xe_migrate_from_vram(tile->migrate, 1, in xe_svm_copy()
495 vm_dbg(&tile->xe->drm, in xe_svm_copy()
496 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", in xe_svm_copy()
498 __fence = xe_migrate_to_vram(tile->migrate, 1, in xe_svm_copy()
551 return PHYS_PFN(offset + vr->hpa_base); in block_offset_to_pfn()
556 return &tile->mem.vram.ttm.mm; in tile_to_buddy()
563 struct ttm_resource *res = bo->ttm.resource; in xe_svm_populate_devmem_pfn()
564 struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks; in xe_svm_populate_devmem_pfn()
569 struct xe_vram_region *vr = block->private; in xe_svm_populate_devmem_pfn()
602 * xe_svm_init() - SVM initialize
613 spin_lock_init(&vm->svm.garbage_collector.lock); in xe_svm_init()
614 INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list); in xe_svm_init()
615 INIT_WORK(&vm->svm.garbage_collector.work, in xe_svm_init()
618 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm, in xe_svm_init()
619 current->mm, xe_svm_devm_owner(vm->xe), 0, in xe_svm_init()
620 vm->size, xe_modparam.svm_notifier_size * SZ_1M, in xe_svm_init()
626 drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock); in xe_svm_init()
632 * xe_svm_close() - SVM close
639 xe_assert(vm->xe, xe_vm_is_closed(vm)); in xe_svm_close()
640 flush_work(&vm->svm.garbage_collector.work); in xe_svm_close()
644 * xe_svm_fini() - SVM finalize
651 xe_assert(vm->xe, xe_vm_is_closed(vm)); in xe_svm_fini()
653 drm_gpusvm_fini(&vm->svm.gpusvm); in xe_svm_fini()
664 return ((READ_ONCE(range->tile_present) & in xe_svm_range_is_valid()
665 ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) && in xe_svm_range_is_valid()
671 return &tile->mem.vram; in tile_to_vr()
676 const struct drm_gpusvm_ctx *ctx) in xe_svm_alloc_vram() argument
678 struct mm_struct *mm = vm->svm.gpusvm.mm; in xe_svm_alloc_vram()
689 return -EFAULT; in xe_svm_alloc_vram()
705 drm_gpusvm_devmem_init(&bo->devmem_allocation, in xe_svm_alloc_vram()
706 vm->xe->drm.dev, mm, in xe_svm_alloc_vram()
708 &tile->mem.vram.dpagemap, in xe_svm_alloc_vram()
711 blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks; in xe_svm_alloc_vram()
713 block->private = vr; in xe_svm_alloc_vram()
716 err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base, in xe_svm_alloc_vram()
717 &bo->devmem_allocation, ctx); in xe_svm_alloc_vram()
719 xe_svm_devmem_release(&bo->devmem_allocation); in xe_svm_alloc_vram()
733 if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) in supports_4K_migration()
742 struct xe_vm *vm = range_to_vm(&range->base); in xe_svm_range_needs_migrate_to_vram()
745 if (!range->base.flags.migrate_devmem) in xe_svm_range_needs_migrate_to_vram()
749 drm_dbg(&vm->xe->drm, "Range is already in VRAM\n"); in xe_svm_range_needs_migrate_to_vram()
753 if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) { in xe_svm_range_needs_migrate_to_vram()
754 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); in xe_svm_range_needs_migrate_to_vram()
762 * xe_svm_handle_pagefault() - SVM handle page fault
778 struct drm_gpusvm_ctx ctx = { in xe_svm_handle_pagefault() local
780 .devmem_possible = IS_DGFX(vm->xe) && in xe_svm_handle_pagefault()
782 .check_pages_threshold = IS_DGFX(vm->xe) && in xe_svm_handle_pagefault()
784 .devmem_only = atomic && IS_DGFX(vm->xe) && in xe_svm_handle_pagefault()
786 .timeslice_ms = atomic && IS_DGFX(vm->xe) && in xe_svm_handle_pagefault()
793 int migrate_try_count = ctx.devmem_only ? 3 : 1; in xe_svm_handle_pagefault()
797 lockdep_assert_held_write(&vm->lock); in xe_svm_handle_pagefault()
798 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); in xe_svm_handle_pagefault()
806 r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr, in xe_svm_handle_pagefault()
808 &ctx); in xe_svm_handle_pagefault()
812 if (ctx.devmem_only && !r->flags.migrate_devmem) in xe_svm_handle_pagefault()
813 return -EACCES; in xe_svm_handle_pagefault()
816 if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) in xe_svm_handle_pagefault()
821 if (--migrate_try_count >= 0 && in xe_svm_handle_pagefault()
823 err = xe_svm_alloc_vram(vm, tile, range, &ctx); in xe_svm_handle_pagefault()
824 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ in xe_svm_handle_pagefault()
826 if (migrate_try_count || !ctx.devmem_only) { in xe_svm_handle_pagefault()
827 drm_dbg(&vm->xe->drm, in xe_svm_handle_pagefault()
828 "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n", in xe_svm_handle_pagefault()
829 vm->usm.asid, ERR_PTR(err)); in xe_svm_handle_pagefault()
832 drm_err(&vm->xe->drm, in xe_svm_handle_pagefault()
833 "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n", in xe_svm_handle_pagefault()
834 vm->usm.asid, ERR_PTR(err)); in xe_svm_handle_pagefault()
841 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx); in xe_svm_handle_pagefault()
843 if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { in xe_svm_handle_pagefault()
844 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ in xe_svm_handle_pagefault()
845 if (migrate_try_count > 0 || !ctx.devmem_only) { in xe_svm_handle_pagefault()
846 if (err == -EOPNOTSUPP) { in xe_svm_handle_pagefault()
847 range_debug(range, "PAGE FAULT - EVICT PAGES"); in xe_svm_handle_pagefault()
848 drm_gpusvm_range_evict(&vm->svm.gpusvm, in xe_svm_handle_pagefault()
849 &range->base); in xe_svm_handle_pagefault()
851 drm_dbg(&vm->xe->drm, in xe_svm_handle_pagefault()
852 "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n", in xe_svm_handle_pagefault()
853 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); in xe_svm_handle_pagefault()
854 range_debug(range, "PAGE FAULT - RETRY PAGES"); in xe_svm_handle_pagefault()
857 drm_err(&vm->xe->drm, in xe_svm_handle_pagefault()
858 "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n", in xe_svm_handle_pagefault()
859 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); in xe_svm_handle_pagefault()
863 range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT"); in xe_svm_handle_pagefault()
867 range_debug(range, "PAGE FAULT - BIND"); in xe_svm_handle_pagefault()
872 err = drm_exec_lock_obj(&exec, vm->gpuvm.r_obj); in xe_svm_handle_pagefault()
879 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id)); in xe_svm_handle_pagefault()
883 if (err == -EAGAIN) { in xe_svm_handle_pagefault()
884 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ in xe_svm_handle_pagefault()
885 range_debug(range, "PAGE FAULT - RETRY BIND"); in xe_svm_handle_pagefault()
904 * xe_svm_has_mapping() - SVM has mappings
915 return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end); in xe_svm_has_mapping()
919 * xe_svm_bo_evict() - SVM evict BO to system memory
929 return drm_gpusvm_evict_to_ram(&bo->devmem_allocation); in xe_svm_bo_evict()
940 struct device *pgmap_dev = dpagemap->dev; in xe_drm_pagemap_device_map()
972 struct device *dev = &to_pci_dev(xe->drm.dev)->dev; in xe_devm_add()
978 vr->usable_size); in xe_devm_add()
984 vr->pagemap.type = MEMORY_DEVICE_PRIVATE; in xe_devm_add()
985 vr->pagemap.range.start = res->start; in xe_devm_add()
986 vr->pagemap.range.end = res->end; in xe_devm_add()
987 vr->pagemap.nr_range = 1; in xe_devm_add()
988 vr->pagemap.ops = drm_gpusvm_pagemap_ops_get(); in xe_devm_add()
989 vr->pagemap.owner = xe_svm_devm_owner(xe); in xe_devm_add()
990 addr = devm_memremap_pages(dev, &vr->pagemap); in xe_devm_add()
992 vr->dpagemap.dev = dev; in xe_devm_add()
993 vr->dpagemap.ops = &xe_drm_pagemap_ops; in xe_devm_add()
996 devm_release_mem_region(dev, res->start, resource_size(res)); in xe_devm_add()
998 drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n", in xe_devm_add()
999 tile->id, ERR_PTR(ret)); in xe_devm_add()
1002 vr->hpa_base = res->start; in xe_devm_add()
1004 drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n", in xe_devm_add()
1005 tile->id, vr->io_start, vr->io_start + vr->usable_size, res); in xe_devm_add()
1016 * xe_svm_flush() - SVM flush
1024 flush_work(&vm->svm.garbage_collector.work); in xe_svm_flush()