Lines Matching +full:single +full:- +full:tt

1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-buf.h>
85 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
90 return mem_type_is_vram(res->mem_type); in resource_is_vram()
95 return resource_is_vram(bo->ttm.resource) || in xe_bo_is_vram()
96 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource); in xe_bo_is_vram()
101 return bo->ttm.resource->mem_type == XE_PL_STOLEN; in xe_bo_is_stolen()
105 * xe_bo_has_single_placement - check if BO is placed only in one memory location
110 * Returns: true if the BO is placed in a single memory location, false otherwise.
115 return bo->placement.num_placement == 1; in xe_bo_has_single_placement()
119 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
134 * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND
146 return !list_empty(&bo->ttm.base.gpuva.list); in xe_bo_is_vm_bound()
151 return bo->flags & XE_BO_FLAG_USER; in xe_bo_is_user()
160 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
161 return tile->migrate; in mem_type_to_migrate()
166 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region()
171 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
181 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
183 bo->placements[*c] = (struct ttm_place) { in try_add_system()
203 struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type); in add_vram()
209 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram()
212 xe_assert(xe, vram && vram->usable_size); in add_vram()
213 io_size = vram->io_size; in add_vram()
218 if (io_size < vram->usable_size) { in add_vram()
234 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
236 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
243 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen()
245 bo->placements[*c] = (struct ttm_place) { in try_add_stolen()
264 return -EINVAL; in __xe_bo_placement_for_flags()
266 bo->placement = (struct ttm_placement) { in __xe_bo_placement_for_flags()
268 .placement = bo->placements, in __xe_bo_placement_for_flags()
288 if (tbo->type == ttm_bo_type_sg) { in xe_evict_flags()
289 placement->num_placement = 0; in xe_evict_flags()
298 if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) { in xe_evict_flags()
307 switch (tbo->resource->mem_type) { in xe_evict_flags()
320 /* struct xe_ttm_tt - Subclassed ttm_tt for xe */
323 /** @xe - The xe device */
331 static int xe_tt_map_sg(struct ttm_tt *tt) in xe_tt_map_sg() argument
333 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_tt_map_sg()
334 unsigned long num_pages = tt->num_pages; in xe_tt_map_sg()
337 XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && in xe_tt_map_sg()
338 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)); in xe_tt_map_sg()
340 if (xe_tt->sg) in xe_tt_map_sg()
343 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, in xe_tt_map_sg()
346 xe_sg_segment_size(xe_tt->xe->drm.dev), in xe_tt_map_sg()
351 xe_tt->sg = &xe_tt->sgt; in xe_tt_map_sg()
352 ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, in xe_tt_map_sg()
355 sg_free_table(xe_tt->sg); in xe_tt_map_sg()
356 xe_tt->sg = NULL; in xe_tt_map_sg()
363 static void xe_tt_unmap_sg(struct ttm_tt *tt) in xe_tt_unmap_sg() argument
365 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_tt_unmap_sg()
367 if (xe_tt->sg) { in xe_tt_unmap_sg()
368 dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, in xe_tt_unmap_sg()
370 sg_free_table(xe_tt->sg); in xe_tt_unmap_sg()
371 xe_tt->sg = NULL; in xe_tt_unmap_sg()
377 struct ttm_tt *tt = bo->ttm.ttm; in xe_bo_sg() local
378 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_bo_sg()
380 return xe_tt->sg; in xe_bo_sg()
387 static void xe_ttm_tt_account_add(struct ttm_tt *tt) in xe_ttm_tt_account_add() argument
389 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_ttm_tt_account_add()
391 if (xe_tt->purgeable) in xe_ttm_tt_account_add()
392 xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages); in xe_ttm_tt_account_add()
394 xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0); in xe_ttm_tt_account_add()
397 static void xe_ttm_tt_account_subtract(struct ttm_tt *tt) in xe_ttm_tt_account_subtract() argument
399 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_ttm_tt_account_subtract()
401 if (xe_tt->purgeable) in xe_ttm_tt_account_subtract()
402 xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages); in xe_ttm_tt_account_subtract()
404 xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0); in xe_ttm_tt_account_subtract()
413 struct ttm_tt *tt; in xe_ttm_tt_create() local
422 tt = &xe_tt->ttm; in xe_ttm_tt_create()
423 xe_tt->xe = xe; in xe_ttm_tt_create()
427 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), in xe_ttm_tt_create()
437 switch (bo->cpu_caching) { in xe_ttm_tt_create()
446 WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); in xe_ttm_tt_create()
449 * Display scanout is always non-coherent with the CPU cache. in xe_ttm_tt_create()
452 * non-coherent and require a CPU:WC mapping. in xe_ttm_tt_create()
454 if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || in xe_ttm_tt_create()
455 (xe->info.graphics_verx100 >= 1270 && in xe_ttm_tt_create()
456 bo->flags & XE_BO_FLAG_PAGETABLE)) in xe_ttm_tt_create()
460 if (bo->flags & XE_BO_FLAG_NEEDS_UC) { in xe_ttm_tt_create()
462 * Valid only for internally-created buffers only, for in xe_ttm_tt_create()
465 xe_assert(xe, bo->cpu_caching == 0); in xe_ttm_tt_create()
469 if (ttm_bo->type != ttm_bo_type_sg) in xe_ttm_tt_create()
472 err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages); in xe_ttm_tt_create()
478 if (ttm_bo->type != ttm_bo_type_sg) { in xe_ttm_tt_create()
479 err = ttm_tt_setup_backup(tt); in xe_ttm_tt_create()
481 ttm_tt_fini(tt); in xe_ttm_tt_create()
487 return tt; in xe_ttm_tt_create()
490 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, in xe_ttm_tt_populate() argument
493 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_ttm_tt_populate()
497 * dma-bufs are not populated with pages, and the dma- in xe_ttm_tt_populate()
500 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && in xe_ttm_tt_populate()
501 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) in xe_ttm_tt_populate()
504 if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) { in xe_ttm_tt_populate()
505 err = ttm_tt_restore(ttm_dev, tt, ctx); in xe_ttm_tt_populate()
507 ttm_tt_clear_backed_up(tt); in xe_ttm_tt_populate()
508 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); in xe_ttm_tt_populate()
513 xe_tt->purgeable = false; in xe_ttm_tt_populate()
514 xe_ttm_tt_account_add(tt); in xe_ttm_tt_populate()
519 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) in xe_ttm_tt_unpopulate() argument
521 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && in xe_ttm_tt_unpopulate()
522 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) in xe_ttm_tt_unpopulate()
525 xe_tt_unmap_sg(tt); in xe_ttm_tt_unpopulate()
527 ttm_pool_free(&ttm_dev->pool, tt); in xe_ttm_tt_unpopulate()
528 xe_ttm_tt_account_subtract(tt); in xe_ttm_tt_unpopulate()
531 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) in xe_ttm_tt_destroy() argument
533 ttm_tt_fini(tt); in xe_ttm_tt_destroy()
534 kfree(tt); in xe_ttm_tt_destroy()
542 return vres->used_visible_size == mem->size; in xe_ttm_resource_visible()
550 switch (mem->mem_type) { in xe_ttm_io_mem_reserve()
559 return -EINVAL; in xe_ttm_io_mem_reserve()
561 mem->bus.offset = mem->start << PAGE_SHIFT; in xe_ttm_io_mem_reserve()
563 if (vram->mapping && in xe_ttm_io_mem_reserve()
564 mem->placement & TTM_PL_FLAG_CONTIGUOUS) in xe_ttm_io_mem_reserve()
565 mem->bus.addr = (u8 __force *)vram->mapping + in xe_ttm_io_mem_reserve()
566 mem->bus.offset; in xe_ttm_io_mem_reserve()
568 mem->bus.offset += vram->io_start; in xe_ttm_io_mem_reserve()
569 mem->bus.is_iomem = true; in xe_ttm_io_mem_reserve()
572 mem->bus.caching = ttm_write_combined; in xe_ttm_io_mem_reserve()
578 return -EINVAL; in xe_ttm_io_mem_reserve()
587 struct drm_gem_object *obj = &bo->ttm.base; in xe_bo_trigger_rebind()
592 dma_resv_assert_held(bo->ttm.base.resv); in xe_bo_trigger_rebind()
594 if (!list_empty(&bo->ttm.base.gpuva.list)) { in xe_bo_trigger_rebind()
595 dma_resv_iter_begin(&cursor, bo->ttm.base.resv, in xe_bo_trigger_rebind()
603 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_bo_trigger_rebind()
614 if (ctx->no_wait_gpu && in xe_bo_trigger_rebind()
615 !dma_resv_test_signaled(bo->ttm.base.resv, in xe_bo_trigger_rebind()
617 return -EBUSY; in xe_bo_trigger_rebind()
619 timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in xe_bo_trigger_rebind()
621 ctx->interruptible, in xe_bo_trigger_rebind()
624 return -ETIME; in xe_bo_trigger_rebind()
645 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
656 struct dma_buf_attachment *attach = ttm_bo->base.import_attach; in xe_bo_move_dmabuf()
657 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt, in xe_bo_move_dmabuf()
659 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_dmabuf()
663 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf()
665 if (new_res->mem_type == XE_PL_SYSTEM) in xe_bo_move_dmabuf()
668 if (ttm_bo->sg) { in xe_bo_move_dmabuf()
669 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); in xe_bo_move_dmabuf()
670 ttm_bo->sg = NULL; in xe_bo_move_dmabuf()
677 ttm_bo->sg = sg; in xe_bo_move_dmabuf()
678 xe_tt->sg = sg; in xe_bo_move_dmabuf()
687 * xe_bo_move_notify - Notify subsystems of a pending move
701 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
707 struct ttm_buffer_object *ttm_bo = &bo->ttm; in xe_bo_move_notify()
708 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_notify()
709 struct ttm_resource *old_mem = ttm_bo->resource; in xe_bo_move_notify()
710 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; in xe_bo_move_notify()
719 return -EINVAL; in xe_bo_move_notify()
726 /* Don't call move_notify() for imported dma-bufs. */ in xe_bo_move_notify()
727 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) in xe_bo_move_notify()
728 dma_buf_move_notify(ttm_bo->base.dma_buf); in xe_bo_move_notify()
736 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
737 if (!list_empty(&bo->vram_userfault_link)) in xe_bo_move_notify()
738 list_del_init(&bo->vram_userfault_link); in xe_bo_move_notify()
739 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
750 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move()
752 struct ttm_resource *old_mem = ttm_bo->resource; in xe_bo_move()
753 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; in xe_bo_move()
754 struct ttm_tt *ttm = ttm_bo->ttm; in xe_bo_move()
764 /* Bo creation path, moving to system or TT. */ in xe_bo_move()
766 if (new_mem->mem_type == XE_PL_TT) in xe_bo_move()
773 if (ttm_bo->type == ttm_bo_type_sg) { in xe_bo_move()
781 (ttm->page_flags & TTM_TT_FLAG_SWAPPED)); in xe_bo_move()
783 move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) : in xe_bo_move()
786 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) || in xe_bo_move()
787 (!ttm && ttm_bo->type == ttm_bo_type_device); in xe_bo_move()
789 if (new_mem->mem_type == XE_PL_TT) { in xe_bo_move()
800 if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) && in xe_bo_move()
801 new_mem->mem_type == XE_PL_SYSTEM) { in xe_bo_move()
804 drm_dbg(&xe->drm, "Evict system allocator BO success\n"); in xe_bo_move()
807 drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n", in xe_bo_move()
814 if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) { in xe_bo_move()
820 if (evict && ctx->resv) { in xe_bo_move()
823 drm_gem_for_each_gpuvm_bo(vm_bo, &bo->ttm.base) { in xe_bo_move()
824 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_bo_move()
826 if (xe_vm_resv(vm) == ctx->resv && in xe_bo_move()
828 ret = -EBUSY; in xe_bo_move()
835 * Failed multi-hop where the old_mem is still marked as in xe_bo_move()
839 new_mem->mem_type == XE_PL_TT) { in xe_bo_move()
851 new_mem->mem_type == XE_PL_SYSTEM) { in xe_bo_move()
852 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, in xe_bo_move()
870 new_mem->mem_type == XE_PL_SYSTEM))) { in xe_bo_move()
871 hop->fpfn = 0; in xe_bo_move()
872 hop->lpfn = 0; in xe_bo_move()
873 hop->mem_type = XE_PL_TT; in xe_bo_move()
874 hop->flags = TTM_PL_FLAG_TEMPORARY; in xe_bo_move()
875 ret = -EMULTIHOP; in xe_bo_move()
879 if (bo->tile) in xe_bo_move()
880 migrate = bo->tile->migrate; in xe_bo_move()
882 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
886 migrate = xe->tiles[0].migrate; in xe_bo_move()
889 trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); in xe_bo_move()
897 drm_WARN_ON(&xe->drm, handle_system_ccs); in xe_bo_move()
918 void __iomem *new_addr = vram->mapping + in xe_bo_move()
919 (new_mem->start << PAGE_SHIFT); in xe_bo_move()
921 if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) { in xe_bo_move()
922 ret = -EINVAL; in xe_bo_move()
927 xe_assert(xe, new_mem->start == in xe_bo_move()
928 bo->placements->fpfn); in xe_bo_move()
930 iosys_map_set_vaddr_iomem(&bo->vmap, new_addr); in xe_bo_move()
937 if (mem_type_is_vram(new_mem->mem_type)) in xe_bo_move()
963 * bo->resource == NULL, so just attach the in xe_bo_move()
966 dma_resv_add_fence(ttm_bo->base.resv, fence, in xe_bo_move()
977 if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) && in xe_bo_move()
978 ttm_bo->ttm) { in xe_bo_move()
979 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, in xe_bo_move()
986 xe_tt_unmap_sg(ttm_bo->ttm); in xe_bo_move()
999 if (bo->resource->mem_type != XE_PL_SYSTEM) { in xe_bo_shrink_purge()
1010 xe_tt_unmap_sg(bo->ttm); in xe_bo_shrink_purge()
1014 *scanned += bo->ttm->num_pages; in xe_bo_shrink_purge()
1021 xe_ttm_tt_account_subtract(bo->ttm); in xe_bo_shrink_purge()
1027 * xe_bo_shrink() - Try to shrink an xe bo.
1034 * Try to shrink- or purge a bo, and if it succeeds, unmap dma.
1048 struct ttm_tt *tt = bo->ttm; in xe_bo_shrink() local
1049 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_bo_shrink()
1050 struct ttm_place place = {.mem_type = bo->resource->mem_type}; in xe_bo_shrink()
1052 struct xe_device *xe = xe_tt->xe; in xe_bo_shrink()
1056 if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) || in xe_bo_shrink()
1057 (flags.purge && !xe_tt->purgeable)) in xe_bo_shrink()
1058 return -EBUSY; in xe_bo_shrink()
1061 return -EBUSY; in xe_bo_shrink()
1066 if (xe_tt->purgeable) { in xe_bo_shrink()
1067 if (bo->resource->mem_type != XE_PL_SYSTEM) in xe_bo_shrink()
1074 /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */ in xe_bo_shrink()
1075 needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM && in xe_bo_shrink()
1080 *scanned += tt->num_pages; in xe_bo_shrink()
1089 xe_ttm_tt_account_subtract(tt); in xe_bo_shrink()
1098 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
1104 * suspend-resume.
1126 if (WARN_ON(!bo->ttm.resource)) in xe_bo_evict_pinned()
1127 return -EINVAL; in xe_bo_evict_pinned()
1130 return -EINVAL; in xe_bo_evict_pinned()
1135 ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx); in xe_bo_evict_pinned()
1139 if (!bo->ttm.ttm) { in xe_bo_evict_pinned()
1140 bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0); in xe_bo_evict_pinned()
1141 if (!bo->ttm.ttm) { in xe_bo_evict_pinned()
1142 ret = -ENOMEM; in xe_bo_evict_pinned()
1147 ret = ttm_bo_populate(&bo->ttm, &ctx); in xe_bo_evict_pinned()
1151 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_evict_pinned()
1155 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); in xe_bo_evict_pinned()
1162 ttm_resource_free(&bo->ttm, &new_mem); in xe_bo_evict_pinned()
1167 * xe_bo_restore_pinned() - Restore a pinned VRAM object
1173 * suspend-resume.
1184 struct ttm_place *place = &bo->placements[0]; in xe_bo_restore_pinned()
1189 if (WARN_ON(!bo->ttm.resource)) in xe_bo_restore_pinned()
1190 return -EINVAL; in xe_bo_restore_pinned()
1193 return -EINVAL; in xe_bo_restore_pinned()
1196 return -EINVAL; in xe_bo_restore_pinned()
1198 if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo))) in xe_bo_restore_pinned()
1199 return -EINVAL; in xe_bo_restore_pinned()
1201 if (!mem_type_is_vram(place->mem_type)) in xe_bo_restore_pinned()
1204 ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx); in xe_bo_restore_pinned()
1208 ret = ttm_bo_populate(&bo->ttm, &ctx); in xe_bo_restore_pinned()
1212 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_restore_pinned()
1216 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); in xe_bo_restore_pinned()
1223 ttm_resource_free(&bo->ttm, &new_mem); in xe_bo_restore_pinned()
1234 if (ttm_bo->resource->mem_type == XE_PL_STOLEN) in xe_ttm_io_mem_pfn()
1237 vram = res_to_mem_region(ttm_bo->resource); in xe_ttm_io_mem_pfn()
1238 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); in xe_ttm_io_mem_pfn()
1239 return (vram->io_start + cursor.start) >> PAGE_SHIFT; in xe_ttm_io_mem_pfn()
1250 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_lock_in_destructor()
1253 xe_assert(xe, !kref_read(&ttm_bo->kref)); in xe_ttm_bo_lock_in_destructor()
1261 spin_lock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
1262 locked = dma_resv_trylock(ttm_bo->base.resv); in xe_ttm_bo_lock_in_destructor()
1263 spin_unlock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
1280 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount))); in xe_ttm_bo_release_notify()
1286 if (ttm_bo->base.resv != &ttm_bo->base._resv) in xe_ttm_bo_release_notify()
1298 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1305 dma_resv_replace_fences(ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1306 fence->context, in xe_ttm_bo_release_notify()
1313 dma_resv_unlock(ttm_bo->base.resv); in xe_ttm_bo_release_notify()
1323 * dma-buf attachment. in xe_ttm_bo_delete_mem_notify()
1325 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { in xe_ttm_bo_delete_mem_notify()
1326 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, in xe_ttm_bo_delete_mem_notify()
1329 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg, in xe_ttm_bo_delete_mem_notify()
1331 ttm_bo->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1332 xe_tt->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1338 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_purge()
1340 if (ttm_bo->ttm) { in xe_ttm_bo_purge()
1344 drm_WARN_ON(&xe->drm, ret); in xe_ttm_bo_purge()
1355 if (ttm_bo->ttm) { in xe_ttm_bo_swap_notify()
1357 container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm); in xe_ttm_bo_swap_notify()
1359 if (xe_tt->purgeable) in xe_ttm_bo_swap_notify()
1369 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_access_memory()
1378 if (!mem_type_is_vram(ttm_bo->resource->mem_type)) in xe_ttm_access_memory()
1379 return -EIO; in xe_ttm_access_memory()
1381 /* FIXME: Use GPU for non-visible VRAM */ in xe_ttm_access_memory()
1382 if (!xe_ttm_resource_visible(ttm_bo->resource)) in xe_ttm_access_memory()
1383 return -EIO; in xe_ttm_access_memory()
1385 vram = res_to_mem_region(ttm_bo->resource); in xe_ttm_access_memory()
1386 xe_res_first(ttm_bo->resource, offset & PAGE_MASK, in xe_ttm_access_memory()
1387 bo->size - (offset & PAGE_MASK), &cursor); in xe_ttm_access_memory()
1391 int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left); in xe_ttm_access_memory()
1393 iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping + in xe_ttm_access_memory()
1402 bytes_left -= byte_count; in xe_ttm_access_memory()
1429 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_destroy()
1433 if (bo->ttm.base.import_attach) in xe_ttm_bo_destroy()
1434 drm_prime_gem_destroy(&bo->ttm.base, NULL); in xe_ttm_bo_destroy()
1435 drm_gem_object_release(&bo->ttm.base); in xe_ttm_bo_destroy()
1437 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); in xe_ttm_bo_destroy()
1440 if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size) in xe_ttm_bo_destroy()
1441 xe_ggtt_remove_bo(tile->mem.ggtt, bo); in xe_ttm_bo_destroy()
1444 if (bo->client) in xe_ttm_bo_destroy()
1448 if (bo->vm && xe_bo_is_user(bo)) in xe_ttm_bo_destroy()
1449 xe_vm_put(bo->vm); in xe_ttm_bo_destroy()
1451 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1452 if (!list_empty(&bo->vram_userfault_link)) in xe_ttm_bo_destroy()
1453 list_del(&bo->vram_userfault_link); in xe_ttm_bo_destroy()
1454 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1483 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) { in xe_gem_object_close()
1487 ttm_bo_set_bulk_move(&bo->ttm, NULL); in xe_gem_object_close()
1494 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; in xe_gem_fault()
1495 struct drm_device *ddev = tbo->base.dev; in xe_gem_fault()
1498 bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK; in xe_gem_fault()
1512 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in xe_gem_fault()
1516 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in xe_gem_fault()
1519 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in xe_gem_fault()
1524 if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) { in xe_gem_fault()
1525 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1526 if (list_empty(&bo->vram_userfault_link)) in xe_gem_fault()
1527 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); in xe_gem_fault()
1528 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1531 dma_resv_unlock(tbo->base.resv); in xe_gem_fault()
1542 struct ttm_buffer_object *ttm_bo = vma->vm_private_data; in xe_bo_vm_access()
1555 * xe_bo_read() - Read from an xe_bo
1569 ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0); in xe_bo_read()
1571 ret = -EIO; in xe_bo_read()
1594 * xe_bo_alloc - Allocate storage for a struct xe_bo
1604 * ERR_PTR(-ENOMEM) on error.
1611 return ERR_PTR(-ENOMEM); in xe_bo_alloc()
1617 * xe_bo_free - Free storage allocated using xe_bo_alloc()
1620 * Refer to xe_bo_alloc() documentation for valid use-cases.
1648 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1653 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1657 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || in ___xe_bo_create_locked()
1673 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1681 bo->ccs_cleared = false; in ___xe_bo_create_locked()
1682 bo->tile = tile; in ___xe_bo_create_locked()
1683 bo->size = size; in ___xe_bo_create_locked()
1684 bo->flags = flags; in ___xe_bo_create_locked()
1685 bo->cpu_caching = cpu_caching; in ___xe_bo_create_locked()
1686 bo->ttm.base.funcs = &xe_gem_object_funcs; in ___xe_bo_create_locked()
1687 bo->ttm.priority = XE_BO_PRIORITY_NORMAL; in ___xe_bo_create_locked()
1688 INIT_LIST_HEAD(&bo->pinned_link); in ___xe_bo_create_locked()
1690 INIT_LIST_HEAD(&bo->client_link); in ___xe_bo_create_locked()
1692 INIT_LIST_HEAD(&bo->vram_userfault_link); in ___xe_bo_create_locked()
1694 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); in ___xe_bo_create_locked()
1702 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); in ___xe_bo_create_locked()
1704 xe_ttm_bo_destroy(&bo->ttm); in ___xe_bo_create_locked()
1711 bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement : in ___xe_bo_create_locked()
1712 &bo->placement; in ___xe_bo_create_locked()
1713 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, in ___xe_bo_create_locked()
1723 * dma-resv using the DMA_RESV_USAGE_KERNEL slot. in ___xe_bo_create_locked()
1737 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in ___xe_bo_create_locked()
1744 dma_resv_unlock(bo->ttm.base.resv); in ___xe_bo_create_locked()
1750 bo->created = true; in ___xe_bo_create_locked()
1752 ttm_bo_set_bulk_move(&bo->ttm, bulk); in ___xe_bo_create_locked()
1754 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in ___xe_bo_create_locked()
1764 struct ttm_place *place = bo->placements; in __xe_bo_fixed_placement()
1767 return -EINVAL; in __xe_bo_fixed_placement()
1769 place->flags = TTM_PL_FLAG_CONTIGUOUS; in __xe_bo_fixed_placement()
1770 place->fpfn = start >> PAGE_SHIFT; in __xe_bo_fixed_placement()
1771 place->lpfn = end >> PAGE_SHIFT; in __xe_bo_fixed_placement()
1775 place->mem_type = XE_PL_VRAM0; in __xe_bo_fixed_placement()
1778 place->mem_type = XE_PL_VRAM1; in __xe_bo_fixed_placement()
1781 place->mem_type = XE_PL_STOLEN; in __xe_bo_fixed_placement()
1786 return -EINVAL; in __xe_bo_fixed_placement()
1789 bo->placement = (struct ttm_placement) { in __xe_bo_fixed_placement()
1826 &vm->lru_bulk_move : NULL, size, in __xe_bo_create_locked()
1831 bo->min_align = alignment; in __xe_bo_create_locked()
1842 bo->vm = vm; in __xe_bo_create_locked()
1844 if (bo->flags & XE_BO_FLAG_GGTT) { in __xe_bo_create_locked()
1848 if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) { in __xe_bo_create_locked()
1856 if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t))) in __xe_bo_create_locked()
1860 err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, in __xe_bo_create_locked()
1861 start + bo->size, U64_MAX); in __xe_bo_create_locked()
1863 err = xe_ggtt_insert_bo(t->mem.ggtt, bo); in __xe_bo_create_locked()
1992 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_bo_create_from_data()
2014 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_create_pin_map()
2029 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
2052 dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE; in xe_managed_bo_reinit_in_vram()
2055 xe_assert(xe, !(*src)->vmap.is_iomem); in xe_managed_bo_reinit_in_vram()
2057 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, in xe_managed_bo_reinit_in_vram()
2058 (*src)->size, dst_flags); in xe_managed_bo_reinit_in_vram()
2062 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); in xe_managed_bo_reinit_in_vram()
2074 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in vram_region_gpu_offset()
2076 if (res->mem_type == XE_PL_STOLEN) in vram_region_gpu_offset()
2079 return res_to_mem_region(res)->dpa_base; in vram_region_gpu_offset()
2083 * xe_bo_pin_external - pin an external BO
2086 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2097 xe_assert(xe, !bo->vm); in xe_bo_pin_external()
2106 spin_lock(&xe->pinned.lock); in xe_bo_pin_external()
2107 list_add_tail(&bo->pinned_link, in xe_bo_pin_external()
2108 &xe->pinned.external_vram); in xe_bo_pin_external()
2109 spin_unlock(&xe->pinned.lock); in xe_bo_pin_external()
2113 ttm_bo_pin(&bo->ttm); in xe_bo_pin_external()
2114 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) in xe_bo_pin_external()
2115 xe_ttm_tt_account_subtract(bo->ttm.ttm); in xe_bo_pin_external()
2121 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin_external()
2128 struct ttm_place *place = &bo->placements[0]; in xe_bo_pin()
2136 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | in xe_bo_pin()
2140 * No reason we can't support pinning imported dma-bufs we just don't in xe_bo_pin()
2141 * expect to pin an imported dma-buf. in xe_bo_pin()
2143 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_pin()
2158 bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { in xe_bo_pin()
2159 if (mem_type_is_vram(place->mem_type)) { in xe_bo_pin()
2160 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); in xe_bo_pin()
2162 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - in xe_bo_pin()
2163 vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; in xe_bo_pin()
2164 place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT); in xe_bo_pin()
2168 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { in xe_bo_pin()
2169 spin_lock(&xe->pinned.lock); in xe_bo_pin()
2170 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); in xe_bo_pin()
2171 spin_unlock(&xe->pinned.lock); in xe_bo_pin()
2174 ttm_bo_pin(&bo->ttm); in xe_bo_pin()
2175 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) in xe_bo_pin()
2176 xe_ttm_tt_account_subtract(bo->ttm.ttm); in xe_bo_pin()
2182 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin()
2188 * xe_bo_unpin_external - unpin an external BO
2191 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2201 xe_assert(xe, !bo->vm); in xe_bo_unpin_external()
2205 spin_lock(&xe->pinned.lock); in xe_bo_unpin_external()
2206 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) in xe_bo_unpin_external()
2207 list_del_init(&bo->pinned_link); in xe_bo_unpin_external()
2208 spin_unlock(&xe->pinned.lock); in xe_bo_unpin_external()
2210 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin_external()
2211 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) in xe_bo_unpin_external()
2212 xe_ttm_tt_account_add(bo->ttm.ttm); in xe_bo_unpin_external()
2218 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_unpin_external()
2223 struct ttm_place *place = &bo->placements[0]; in xe_bo_unpin()
2226 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_unpin()
2229 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { in xe_bo_unpin()
2230 spin_lock(&xe->pinned.lock); in xe_bo_unpin()
2231 xe_assert(xe, !list_empty(&bo->pinned_link)); in xe_bo_unpin()
2232 list_del_init(&bo->pinned_link); in xe_bo_unpin()
2233 spin_unlock(&xe->pinned.lock); in xe_bo_unpin()
2235 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin()
2236 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) in xe_bo_unpin()
2237 xe_ttm_tt_account_add(bo->ttm.ttm); in xe_bo_unpin()
2241 * xe_bo_validate() - Make sure the bo is in an allowed placement
2254 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
2265 lockdep_assert_held(&vm->lock); in xe_bo_validate()
2273 return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); in xe_bo_validate()
2278 if (bo->destroy == &xe_ttm_bo_destroy) in xe_bo_is_xe_bo()
2298 offset &= (PAGE_SIZE - 1); in __xe_bo_addr()
2301 xe_assert(xe, bo->ttm.ttm); in __xe_bo_addr()
2309 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT, in __xe_bo_addr()
2311 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource); in __xe_bo_addr()
2317 if (!READ_ONCE(bo->ttm.pin_count)) in xe_bo_addr()
2324 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_vmap()
2331 if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) || in xe_bo_vmap()
2332 !force_contiguous(bo->flags))) in xe_bo_vmap()
2333 return -EINVAL; in xe_bo_vmap()
2335 if (!iosys_map_is_null(&bo->vmap)) in xe_bo_vmap()
2341 * single page bos, which is done here. in xe_bo_vmap()
2345 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap); in xe_bo_vmap()
2349 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); in xe_bo_vmap()
2351 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual); in xe_bo_vmap()
2353 iosys_map_set_vaddr(&bo->vmap, virtual); in xe_bo_vmap()
2360 if (!iosys_map_is_null(&bo->vmap)) { in __xe_bo_vunmap()
2361 iosys_map_clear(&bo->vmap); in __xe_bo_vunmap()
2362 ttm_bo_kunmap(&bo->kmap); in __xe_bo_vunmap()
2379 return -EINVAL; in gem_create_set_pxp_type()
2381 return xe_pxp_key_assign(xe->pxp, bo); in gem_create_set_pxp_type()
2403 return -EFAULT; in gem_create_user_ext_set_property()
2409 return -EINVAL; in gem_create_user_ext_set_property()
2413 return -EINVAL; in gem_create_user_ext_set_property()
2436 return -E2BIG; in gem_create_user_extensions()
2440 return -EFAULT; in gem_create_user_extensions()
2444 return -EINVAL; in gem_create_user_extensions()
2472 if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_gem_create_ioctl()
2473 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_create_ioctl()
2474 return -EINVAL; in xe_gem_create_ioctl()
2477 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || in xe_gem_create_ioctl()
2478 !args->placement)) in xe_gem_create_ioctl()
2479 return -EINVAL; in xe_gem_create_ioctl()
2481 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_create_ioctl()
2485 return -EINVAL; in xe_gem_create_ioctl()
2487 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_create_ioctl()
2488 return -EINVAL; in xe_gem_create_ioctl()
2490 if (XE_IOCTL_DBG(xe, !args->size)) in xe_gem_create_ioctl()
2491 return -EINVAL; in xe_gem_create_ioctl()
2493 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) in xe_gem_create_ioctl()
2494 return -EINVAL; in xe_gem_create_ioctl()
2496 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) in xe_gem_create_ioctl()
2497 return -EINVAL; in xe_gem_create_ioctl()
2500 if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING) in xe_gem_create_ioctl()
2503 if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT) in xe_gem_create_ioctl()
2506 bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1); in xe_gem_create_ioctl()
2511 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && in xe_gem_create_ioctl()
2512 IS_ALIGNED(args->size, SZ_64K)) in xe_gem_create_ioctl()
2515 if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) { in xe_gem_create_ioctl()
2517 return -EINVAL; in xe_gem_create_ioctl()
2522 if (XE_IOCTL_DBG(xe, !args->cpu_caching || in xe_gem_create_ioctl()
2523 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
2524 return -EINVAL; in xe_gem_create_ioctl()
2527 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
2528 return -EINVAL; in xe_gem_create_ioctl()
2531 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) in xe_gem_create_ioctl()
2532 return -EINVAL; in xe_gem_create_ioctl()
2534 if (args->vm_id) { in xe_gem_create_ioctl()
2535 vm = xe_vm_lookup(xef, args->vm_id); in xe_gem_create_ioctl()
2537 return -ENOENT; in xe_gem_create_ioctl()
2547 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, in xe_gem_create_ioctl()
2560 if (args->extensions) { in xe_gem_create_ioctl()
2561 err = gem_create_user_extensions(xe, bo, args->extensions, 0); in xe_gem_create_ioctl()
2566 err = drm_gem_handle_create(file, &bo->ttm.base, &handle); in xe_gem_create_ioctl()
2570 args->handle = handle; in xe_gem_create_ioctl()
2595 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_mmap_offset_ioctl()
2596 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_mmap_offset_ioctl()
2597 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2599 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_mmap_offset_ioctl()
2601 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2603 if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) { in xe_gem_mmap_offset_ioctl()
2605 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2607 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_mmap_offset_ioctl()
2608 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2611 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2615 args->offset = XE_PCI_BARRIER_MMAP_OFFSET; in xe_gem_mmap_offset_ioctl()
2619 gem_obj = drm_gem_object_lookup(file, args->handle); in xe_gem_mmap_offset_ioctl()
2621 return -ENOENT; in xe_gem_mmap_offset_ioctl()
2624 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); in xe_gem_mmap_offset_ioctl()
2631 * xe_bo_lock() - Lock the buffer object's dma_resv object
2638 * Return: 0 on success, -EINTR if @intr is true and the wait for a
2645 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL); in xe_bo_lock()
2647 dma_resv_lock(bo->ttm.base.resv, NULL); in xe_bo_lock()
2653 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
2660 dma_resv_unlock(bo->ttm.base.resv); in xe_bo_unlock()
2664 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
2682 if (bo->ttm.type == ttm_bo_type_kernel) in xe_bo_can_migrate()
2685 if (bo->ttm.type == ttm_bo_type_sg) in xe_bo_can_migrate()
2688 for (cur_place = 0; cur_place < bo->placement.num_placement; in xe_bo_can_migrate()
2690 if (bo->placements[cur_place].mem_type == mem_type) in xe_bo_can_migrate()
2700 place->mem_type = mem_type; in xe_place_from_ttm_type()
2704 * xe_bo_migrate - Migrate an object to the desired region id
2716 * return -EINTR or -ERESTARTSYS if signal pending.
2720 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_migrate()
2731 if (bo->ttm.resource->mem_type == mem_type) in xe_bo_migrate()
2735 return -EBUSY; in xe_bo_migrate()
2738 return -EINVAL; in xe_bo_migrate()
2748 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); in xe_bo_migrate()
2753 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); in xe_bo_migrate()
2756 return ttm_bo_validate(&bo->ttm, &placement, &ctx); in xe_bo_migrate()
2760 * xe_bo_evict - Evict an object to evict placement
2780 xe_evict_flags(&bo->ttm, &placement); in xe_bo_evict()
2781 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx); in xe_bo_evict()
2785 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, in xe_bo_evict()
2792 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
2805 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) in xe_bo_needs_ccs_pages()
2811 * non-VRAM addresses. in xe_bo_needs_ccs_pages()
2813 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) in xe_bo_needs_ccs_pages()
2820 * __xe_bo_release_dummy() - Dummy kref release function
2830 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
2850 drm_gem_object_free(&bo->ttm.base.refcount); in xe_bo_put_commit()
2857 xe_bo_put_commit(&bo_dev->async_list); in xe_bo_dev_work_func()
2861 * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing
2866 INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func); in xe_bo_dev_init()
2870 * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing
2875 flush_work(&bo_dev->async_free); in xe_bo_dev_fini()
2886 if (bo->client) in xe_bo_put()
2887 might_lock(&bo->client->bos_lock); in xe_bo_put()
2890 if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt) in xe_bo_put()
2891 might_lock(&bo->ggtt_node[id]->ggtt->lock); in xe_bo_put()
2892 drm_gem_object_put(&bo->ttm.base); in xe_bo_put()
2897 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
2913 int cpp = DIV_ROUND_UP(args->bpp, 8); in xe_bo_dumb_create()
2916 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); in xe_bo_dumb_create()
2918 args->pitch = ALIGN(args->width * cpp, 64); in xe_bo_dumb_create()
2919 args->size = ALIGN(mul_u32_u32(args->pitch, args->height), in xe_bo_dumb_create()
2922 bo = xe_bo_create_user(xe, NULL, NULL, args->size, in xe_bo_dumb_create()
2930 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle); in xe_bo_dumb_create()
2931 /* drop reference from allocate - handle holds it now */ in xe_bo_dumb_create()
2932 drm_gem_object_put(&bo->ttm.base); in xe_bo_dumb_create()
2934 args->handle = handle; in xe_bo_dumb_create()
2940 struct ttm_buffer_object *tbo = &bo->ttm; in xe_bo_runtime_pm_release_mmap_offset()
2941 struct ttm_device *bdev = tbo->bdev; in xe_bo_runtime_pm_release_mmap_offset()
2943 drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping); in xe_bo_runtime_pm_release_mmap_offset()
2945 list_del_init(&bo->vram_userfault_link); in xe_bo_runtime_pm_release_mmap_offset()