Lines Matching +full:multi +full:- +full:tt
1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-buf.h>
74 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
79 return mem_type_is_vram(res->mem_type); in resource_is_vram()
84 return resource_is_vram(bo->ttm.resource) || in xe_bo_is_vram()
85 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource); in xe_bo_is_vram()
90 return bo->ttm.resource->mem_type == XE_PL_STOLEN; in xe_bo_is_stolen()
94 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
110 return bo->flags & XE_BO_CREATE_USER_BIT; in xe_bo_is_user()
119 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
120 return tile->migrate; in mem_type_to_migrate()
125 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region()
129 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
130 return to_xe_ttm_vram_mgr(mgr)->vram; in res_to_mem_region()
137 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
139 bo->placements[*c] = (struct ttm_place) { in try_add_system()
144 if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID) in try_add_system()
145 bo->props.preferred_mem_type = XE_PL_TT; in try_add_system()
156 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram()
158 vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram; in add_vram()
159 xe_assert(xe, vram && vram->usable_size); in add_vram()
160 io_size = vram->io_size; in add_vram()
170 if (io_size < vram->usable_size) { in add_vram()
181 if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID) in add_vram()
182 bo->props.preferred_mem_type = mem_type; in add_vram()
188 if (bo->props.preferred_gt == XE_GT1) { in try_add_vram()
190 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
192 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
195 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
197 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
205 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen()
207 bo->placements[*c] = (struct ttm_place) { in try_add_stolen()
222 bo->props.preferred_mem_type = XE_BO_PROPS_INVALID; in __xe_bo_placement_for_flags()
226 if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) { in __xe_bo_placement_for_flags()
236 return -EINVAL; in __xe_bo_placement_for_flags()
238 bo->placement = (struct ttm_placement) { in __xe_bo_placement_for_flags()
240 .placement = bo->placements, in __xe_bo_placement_for_flags()
242 .busy_placement = bo->placements, in __xe_bo_placement_for_flags()
260 if (tbo->type == ttm_bo_type_sg) { in xe_evict_flags()
261 placement->num_placement = 0; in xe_evict_flags()
262 placement->num_busy_placement = 0; in xe_evict_flags()
274 switch (tbo->resource->mem_type) { in xe_evict_flags()
294 static int xe_tt_map_sg(struct ttm_tt *tt) in xe_tt_map_sg() argument
296 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_tt_map_sg()
297 unsigned long num_pages = tt->num_pages; in xe_tt_map_sg()
300 XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL); in xe_tt_map_sg()
302 if (xe_tt->sg) in xe_tt_map_sg()
305 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, in xe_tt_map_sg()
308 xe_sg_segment_size(xe_tt->dev), in xe_tt_map_sg()
313 xe_tt->sg = &xe_tt->sgt; in xe_tt_map_sg()
314 ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL, in xe_tt_map_sg()
317 sg_free_table(xe_tt->sg); in xe_tt_map_sg()
318 xe_tt->sg = NULL; in xe_tt_map_sg()
327 struct ttm_tt *tt = bo->ttm.ttm; in xe_bo_sg() local
328 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_bo_sg()
330 return xe_tt->sg; in xe_bo_sg()
338 struct xe_ttm_tt *tt; in xe_ttm_tt_create() local
343 tt = kzalloc(sizeof(*tt), GFP_KERNEL); in xe_ttm_tt_create()
344 if (!tt) in xe_ttm_tt_create()
347 tt->dev = xe->drm.dev; in xe_ttm_tt_create()
351 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), in xe_ttm_tt_create()
354 switch (bo->cpu_caching) { in xe_ttm_tt_create()
363 WARN_ON((bo->flags & XE_BO_CREATE_USER_BIT) && !bo->cpu_caching); in xe_ttm_tt_create()
366 * Display scanout is always non-coherent with the CPU cache. in xe_ttm_tt_create()
368 * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and in xe_ttm_tt_create()
371 if ((!bo->cpu_caching && bo->flags & XE_BO_SCANOUT_BIT) || in xe_ttm_tt_create()
372 (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE)) in xe_ttm_tt_create()
375 err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); in xe_ttm_tt_create()
377 kfree(tt); in xe_ttm_tt_create()
381 return &tt->ttm; in xe_ttm_tt_create()
384 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, in xe_ttm_tt_populate() argument
390 * dma-bufs are not populated with pages, and the dma- in xe_ttm_tt_populate()
393 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) in xe_ttm_tt_populate()
396 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); in xe_ttm_tt_populate()
401 err = xe_tt_map_sg(tt); in xe_ttm_tt_populate()
403 ttm_pool_free(&ttm_dev->pool, tt); in xe_ttm_tt_populate()
408 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) in xe_ttm_tt_unpopulate() argument
410 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_ttm_tt_unpopulate()
412 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) in xe_ttm_tt_unpopulate()
415 if (xe_tt->sg) { in xe_ttm_tt_unpopulate()
416 dma_unmap_sgtable(xe_tt->dev, xe_tt->sg, in xe_ttm_tt_unpopulate()
418 sg_free_table(xe_tt->sg); in xe_ttm_tt_unpopulate()
419 xe_tt->sg = NULL; in xe_ttm_tt_unpopulate()
422 return ttm_pool_free(&ttm_dev->pool, tt); in xe_ttm_tt_unpopulate()
425 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) in xe_ttm_tt_destroy() argument
427 ttm_tt_fini(tt); in xe_ttm_tt_destroy()
428 kfree(tt); in xe_ttm_tt_destroy()
436 switch (mem->mem_type) { in xe_ttm_io_mem_reserve()
446 if (vres->used_visible_size < mem->size) in xe_ttm_io_mem_reserve()
447 return -EINVAL; in xe_ttm_io_mem_reserve()
449 mem->bus.offset = mem->start << PAGE_SHIFT; in xe_ttm_io_mem_reserve()
451 if (vram->mapping && in xe_ttm_io_mem_reserve()
452 mem->placement & TTM_PL_FLAG_CONTIGUOUS) in xe_ttm_io_mem_reserve()
453 mem->bus.addr = (u8 __force *)vram->mapping + in xe_ttm_io_mem_reserve()
454 mem->bus.offset; in xe_ttm_io_mem_reserve()
456 mem->bus.offset += vram->io_start; in xe_ttm_io_mem_reserve()
457 mem->bus.is_iomem = true; in xe_ttm_io_mem_reserve()
460 mem->bus.caching = ttm_write_combined; in xe_ttm_io_mem_reserve()
466 return -EINVAL; in xe_ttm_io_mem_reserve()
475 struct drm_gem_object *obj = &bo->ttm.base; in xe_bo_trigger_rebind()
480 dma_resv_assert_held(bo->ttm.base.resv); in xe_bo_trigger_rebind()
482 if (!list_empty(&bo->ttm.base.gpuva.list)) { in xe_bo_trigger_rebind()
483 dma_resv_iter_begin(&cursor, bo->ttm.base.resv, in xe_bo_trigger_rebind()
491 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_bo_trigger_rebind()
502 if (ctx->no_wait_gpu && in xe_bo_trigger_rebind()
503 !dma_resv_test_signaled(bo->ttm.base.resv, in xe_bo_trigger_rebind()
505 return -EBUSY; in xe_bo_trigger_rebind()
507 timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in xe_bo_trigger_rebind()
509 ctx->interruptible, in xe_bo_trigger_rebind()
512 return -ETIME; in xe_bo_trigger_rebind()
533 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
544 struct dma_buf_attachment *attach = ttm_bo->base.import_attach; in xe_bo_move_dmabuf()
545 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt, in xe_bo_move_dmabuf()
547 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_dmabuf()
551 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf()
553 if (new_res->mem_type == XE_PL_SYSTEM) in xe_bo_move_dmabuf()
556 if (ttm_bo->sg) { in xe_bo_move_dmabuf()
557 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); in xe_bo_move_dmabuf()
558 ttm_bo->sg = NULL; in xe_bo_move_dmabuf()
565 ttm_bo->sg = sg; in xe_bo_move_dmabuf()
566 xe_tt->sg = sg; in xe_bo_move_dmabuf()
575 * xe_bo_move_notify - Notify subsystems of a pending move
589 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
595 struct ttm_buffer_object *ttm_bo = &bo->ttm; in xe_bo_move_notify()
596 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_notify()
605 return -EINVAL; in xe_bo_move_notify()
612 /* Don't call move_notify() for imported dma-bufs. */ in xe_bo_move_notify()
613 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) in xe_bo_move_notify()
614 dma_buf_move_notify(ttm_bo->base.dma_buf); in xe_bo_move_notify()
624 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move()
626 struct ttm_resource *old_mem = ttm_bo->resource; in xe_bo_move()
627 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; in xe_bo_move()
628 struct ttm_tt *ttm = ttm_bo->ttm; in xe_bo_move()
637 /* Bo creation path, moving to system or TT. */ in xe_bo_move()
643 if (ttm_bo->type == ttm_bo_type_sg) { in xe_bo_move()
651 (ttm->page_flags & TTM_TT_FLAG_SWAPPED)); in xe_bo_move()
653 move_lacks_source = handle_system_ccs ? (!bo->ccs_cleared) : in xe_bo_move()
656 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) || in xe_bo_move()
657 (!ttm && ttm_bo->type == ttm_bo_type_device); in xe_bo_move()
664 if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) { in xe_bo_move()
670 * Failed multi-hop where the old_mem is still marked as in xe_bo_move()
674 new_mem->mem_type == XE_PL_TT) { in xe_bo_move()
686 new_mem->mem_type == XE_PL_SYSTEM) { in xe_bo_move()
687 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, in xe_bo_move()
705 new_mem->mem_type == XE_PL_SYSTEM))) { in xe_bo_move()
706 hop->fpfn = 0; in xe_bo_move()
707 hop->lpfn = 0; in xe_bo_move()
708 hop->mem_type = XE_PL_TT; in xe_bo_move()
709 hop->flags = TTM_PL_FLAG_TEMPORARY; in xe_bo_move()
710 ret = -EMULTIHOP; in xe_bo_move()
714 if (bo->tile) in xe_bo_move()
715 migrate = bo->tile->migrate; in xe_bo_move()
717 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
721 migrate = xe->tiles[0].migrate; in xe_bo_move()
724 trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); in xe_bo_move()
744 void __iomem *new_addr = vram->mapping + in xe_bo_move()
745 (new_mem->start << PAGE_SHIFT); in xe_bo_move()
747 if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) { in xe_bo_move()
748 ret = -EINVAL; in xe_bo_move()
753 xe_assert(xe, new_mem->start == in xe_bo_move()
754 bo->placements->fpfn); in xe_bo_move()
756 iosys_map_set_vaddr_iomem(&bo->vmap, new_addr); in xe_bo_move()
781 * bo->resource == NULL, so just attach the in xe_bo_move()
784 dma_resv_add_fence(ttm_bo->base.resv, fence, in xe_bo_move()
800 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
807 * suspend-resume.
828 if (WARN_ON(!bo->ttm.resource)) in xe_bo_evict_pinned()
829 return -EINVAL; in xe_bo_evict_pinned()
832 return -EINVAL; in xe_bo_evict_pinned()
835 return -EINVAL; in xe_bo_evict_pinned()
837 ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx); in xe_bo_evict_pinned()
841 if (!bo->ttm.ttm) { in xe_bo_evict_pinned()
842 bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0); in xe_bo_evict_pinned()
843 if (!bo->ttm.ttm) { in xe_bo_evict_pinned()
844 ret = -ENOMEM; in xe_bo_evict_pinned()
849 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); in xe_bo_evict_pinned()
853 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_evict_pinned()
857 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); in xe_bo_evict_pinned()
861 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, in xe_bo_evict_pinned()
867 ttm_resource_free(&bo->ttm, &new_mem); in xe_bo_evict_pinned()
872 * xe_bo_restore_pinned() - Restore a pinned VRAM object
879 * suspend-resume.
893 if (WARN_ON(!bo->ttm.resource)) in xe_bo_restore_pinned()
894 return -EINVAL; in xe_bo_restore_pinned()
897 return -EINVAL; in xe_bo_restore_pinned()
899 if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm)) in xe_bo_restore_pinned()
900 return -EINVAL; in xe_bo_restore_pinned()
902 ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx); in xe_bo_restore_pinned()
906 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); in xe_bo_restore_pinned()
910 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_restore_pinned()
914 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); in xe_bo_restore_pinned()
918 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, in xe_bo_restore_pinned()
924 ttm_resource_free(&bo->ttm, &new_mem); in xe_bo_restore_pinned()
935 if (ttm_bo->resource->mem_type == XE_PL_STOLEN) in xe_ttm_io_mem_pfn()
938 vram = res_to_mem_region(ttm_bo->resource); in xe_ttm_io_mem_pfn()
939 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); in xe_ttm_io_mem_pfn()
940 return (vram->io_start + cursor.start) >> PAGE_SHIFT; in xe_ttm_io_mem_pfn()
951 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_lock_in_destructor()
954 xe_assert(xe, !kref_read(&ttm_bo->kref)); in xe_ttm_bo_lock_in_destructor()
962 spin_lock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
963 locked = dma_resv_trylock(ttm_bo->base.resv); in xe_ttm_bo_lock_in_destructor()
964 spin_unlock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
981 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount))); in xe_ttm_bo_release_notify()
987 if (ttm_bo->base.resv != &ttm_bo->base._resv) in xe_ttm_bo_release_notify()
999 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1006 dma_resv_replace_fences(ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1007 fence->context, in xe_ttm_bo_release_notify()
1014 dma_resv_unlock(ttm_bo->base.resv); in xe_ttm_bo_release_notify()
1024 * dma-buf attachment. in xe_ttm_bo_delete_mem_notify()
1026 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { in xe_ttm_bo_delete_mem_notify()
1027 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, in xe_ttm_bo_delete_mem_notify()
1030 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg, in xe_ttm_bo_delete_mem_notify()
1032 ttm_bo->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1033 xe_tt->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1054 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_destroy()
1056 if (bo->ttm.base.import_attach) in xe_ttm_bo_destroy()
1057 drm_prime_gem_destroy(&bo->ttm.base, NULL); in xe_ttm_bo_destroy()
1058 drm_gem_object_release(&bo->ttm.base); in xe_ttm_bo_destroy()
1060 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); in xe_ttm_bo_destroy()
1062 if (bo->ggtt_node.size) in xe_ttm_bo_destroy()
1063 xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo); in xe_ttm_bo_destroy()
1066 if (bo->client) in xe_ttm_bo_destroy()
1070 if (bo->vm && xe_bo_is_user(bo)) in xe_ttm_bo_destroy()
1071 xe_vm_put(bo->vm); in xe_ttm_bo_destroy()
1100 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) { in xe_gem_object_close()
1104 ttm_bo_set_bulk_move(&bo->ttm, NULL); in xe_gem_object_close()
1113 return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic; in should_migrate_to_system()
1118 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; in xe_gem_fault()
1119 struct drm_device *ddev = tbo->base.dev; in xe_gem_fault()
1134 if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR) in xe_gem_fault()
1141 vmf->vma->vm_page_prot, in xe_gem_fault()
1145 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in xe_gem_fault()
1147 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in xe_gem_fault()
1150 dma_resv_unlock(tbo->base.resv); in xe_gem_fault()
1170 * xe_bo_alloc - Allocate storage for a struct xe_bo
1180 * ERR_PTR(-ENOMEM) on error.
1187 return ERR_PTR(-ENOMEM); in xe_bo_alloc()
1193 * xe_bo_free - Free storage allocated using xe_bo_alloc()
1196 * Refer to xe_bo_alloc() documentation for valid use-cases.
1223 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1228 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) { in ___xe_bo_create_locked()
1242 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1250 bo->ccs_cleared = false; in ___xe_bo_create_locked()
1251 bo->tile = tile; in ___xe_bo_create_locked()
1252 bo->size = size; in ___xe_bo_create_locked()
1253 bo->flags = flags; in ___xe_bo_create_locked()
1254 bo->cpu_caching = cpu_caching; in ___xe_bo_create_locked()
1255 bo->ttm.base.funcs = &xe_gem_object_funcs; in ___xe_bo_create_locked()
1256 bo->props.preferred_mem_class = XE_BO_PROPS_INVALID; in ___xe_bo_create_locked()
1257 bo->props.preferred_gt = XE_BO_PROPS_INVALID; in ___xe_bo_create_locked()
1258 bo->props.preferred_mem_type = XE_BO_PROPS_INVALID; in ___xe_bo_create_locked()
1259 bo->ttm.priority = XE_BO_PRIORITY_NORMAL; in ___xe_bo_create_locked()
1260 INIT_LIST_HEAD(&bo->pinned_link); in ___xe_bo_create_locked()
1262 INIT_LIST_HEAD(&bo->client_link); in ___xe_bo_create_locked()
1265 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); in ___xe_bo_create_locked()
1273 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); in ___xe_bo_create_locked()
1275 xe_ttm_bo_destroy(&bo->ttm); in ___xe_bo_create_locked()
1282 bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement : in ___xe_bo_create_locked()
1283 &bo->placement; in ___xe_bo_create_locked()
1284 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, in ___xe_bo_create_locked()
1294 * dma-resv using the DMA_RESV_USAGE_KERNEL slot. in ___xe_bo_create_locked()
1308 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in ___xe_bo_create_locked()
1315 dma_resv_unlock(bo->ttm.base.resv); in ___xe_bo_create_locked()
1321 bo->created = true; in ___xe_bo_create_locked()
1323 ttm_bo_set_bulk_move(&bo->ttm, bulk); in ___xe_bo_create_locked()
1325 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in ___xe_bo_create_locked()
1335 struct ttm_place *place = bo->placements; in __xe_bo_fixed_placement()
1338 return -EINVAL; in __xe_bo_fixed_placement()
1340 place->flags = TTM_PL_FLAG_CONTIGUOUS; in __xe_bo_fixed_placement()
1341 place->fpfn = start >> PAGE_SHIFT; in __xe_bo_fixed_placement()
1342 place->lpfn = end >> PAGE_SHIFT; in __xe_bo_fixed_placement()
1346 place->mem_type = XE_PL_VRAM0; in __xe_bo_fixed_placement()
1349 place->mem_type = XE_PL_VRAM1; in __xe_bo_fixed_placement()
1352 place->mem_type = XE_PL_STOLEN; in __xe_bo_fixed_placement()
1357 return -EINVAL; in __xe_bo_fixed_placement()
1360 bo->placement = (struct ttm_placement) { in __xe_bo_fixed_placement()
1398 &vm->lru_bulk_move : NULL, size, in __xe_bo_create_locked()
1412 bo->vm = vm; in __xe_bo_create_locked()
1414 if (bo->flags & XE_BO_CREATE_GGTT_BIT) { in __xe_bo_create_locked()
1421 err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, in __xe_bo_create_locked()
1422 start + bo->size, U64_MAX); in __xe_bo_create_locked()
1424 err = xe_ggtt_insert_bo(tile->mem.ggtt, bo); in __xe_bo_create_locked()
1538 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_bo_create_from_data()
1558 ret = drmm_add_action_or_reset(&xe->drm, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_create_pin_map()
1573 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
1584 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in vram_region_gpu_offset()
1586 if (res->mem_type == XE_PL_STOLEN) in vram_region_gpu_offset()
1589 return res_to_mem_region(res)->dpa_base; in vram_region_gpu_offset()
1593 * xe_bo_pin_external - pin an external BO
1596 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1607 xe_assert(xe, !bo->vm); in xe_bo_pin_external()
1616 spin_lock(&xe->pinned.lock); in xe_bo_pin_external()
1617 list_add_tail(&bo->pinned_link, in xe_bo_pin_external()
1618 &xe->pinned.external_vram); in xe_bo_pin_external()
1619 spin_unlock(&xe->pinned.lock); in xe_bo_pin_external()
1623 ttm_bo_pin(&bo->ttm); in xe_bo_pin_external()
1629 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin_external()
1643 xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT | in xe_bo_pin()
1647 * No reason we can't support pinning imported dma-bufs we just don't in xe_bo_pin()
1648 * expect to pin an imported dma-buf. in xe_bo_pin()
1650 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_pin()
1665 bo->flags & XE_BO_INTERNAL_TEST)) { in xe_bo_pin()
1666 struct ttm_place *place = &(bo->placements[0]); in xe_bo_pin()
1668 if (mem_type_is_vram(place->mem_type)) { in xe_bo_pin()
1669 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); in xe_bo_pin()
1671 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - in xe_bo_pin()
1672 vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; in xe_bo_pin()
1673 place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT); in xe_bo_pin()
1675 spin_lock(&xe->pinned.lock); in xe_bo_pin()
1676 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); in xe_bo_pin()
1677 spin_unlock(&xe->pinned.lock); in xe_bo_pin()
1681 ttm_bo_pin(&bo->ttm); in xe_bo_pin()
1687 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin()
1693 * xe_bo_unpin_external - unpin an external BO
1696 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1706 xe_assert(xe, !bo->vm); in xe_bo_unpin_external()
1710 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) { in xe_bo_unpin_external()
1711 spin_lock(&xe->pinned.lock); in xe_bo_unpin_external()
1712 list_del_init(&bo->pinned_link); in xe_bo_unpin_external()
1713 spin_unlock(&xe->pinned.lock); in xe_bo_unpin_external()
1716 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin_external()
1722 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_unpin_external()
1729 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_unpin()
1733 bo->flags & XE_BO_INTERNAL_TEST)) { in xe_bo_unpin()
1734 struct ttm_place *place = &(bo->placements[0]); in xe_bo_unpin()
1736 if (mem_type_is_vram(place->mem_type)) { in xe_bo_unpin()
1737 xe_assert(xe, !list_empty(&bo->pinned_link)); in xe_bo_unpin()
1739 spin_lock(&xe->pinned.lock); in xe_bo_unpin()
1740 list_del_init(&bo->pinned_link); in xe_bo_unpin()
1741 spin_unlock(&xe->pinned.lock); in xe_bo_unpin()
1745 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin()
1749 * xe_bo_validate() - Make sure the bo is in an allowed placement
1762 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
1772 lockdep_assert_held(&vm->lock); in xe_bo_validate()
1779 return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); in xe_bo_validate()
1784 if (bo->destroy == &xe_ttm_bo_destroy) in xe_bo_is_xe_bo()
1804 offset &= (PAGE_SIZE - 1); in __xe_bo_addr()
1807 xe_assert(xe, bo->ttm.ttm); in __xe_bo_addr()
1815 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT, in __xe_bo_addr()
1817 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource); in __xe_bo_addr()
1823 if (!READ_ONCE(bo->ttm.pin_count)) in xe_bo_addr()
1836 if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) in xe_bo_vmap()
1837 return -EINVAL; in xe_bo_vmap()
1839 if (!iosys_map_is_null(&bo->vmap)) in xe_bo_vmap()
1849 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap); in xe_bo_vmap()
1853 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); in xe_bo_vmap()
1855 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual); in xe_bo_vmap()
1857 iosys_map_set_vaddr(&bo->vmap, virtual); in xe_bo_vmap()
1864 if (!iosys_map_is_null(&bo->vmap)) { in __xe_bo_vunmap()
1865 iosys_map_clear(&bo->vmap); in __xe_bo_vunmap()
1866 ttm_bo_kunmap(&bo->kmap); in __xe_bo_vunmap()
1888 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_create_ioctl()
1889 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_gem_create_ioctl()
1890 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_create_ioctl()
1891 return -EINVAL; in xe_gem_create_ioctl()
1894 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || in xe_gem_create_ioctl()
1895 !args->placement)) in xe_gem_create_ioctl()
1896 return -EINVAL; in xe_gem_create_ioctl()
1898 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_create_ioctl()
1902 return -EINVAL; in xe_gem_create_ioctl()
1904 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_create_ioctl()
1905 return -EINVAL; in xe_gem_create_ioctl()
1907 if (XE_IOCTL_DBG(xe, !args->size)) in xe_gem_create_ioctl()
1908 return -EINVAL; in xe_gem_create_ioctl()
1910 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) in xe_gem_create_ioctl()
1911 return -EINVAL; in xe_gem_create_ioctl()
1913 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) in xe_gem_create_ioctl()
1914 return -EINVAL; in xe_gem_create_ioctl()
1917 if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING) in xe_gem_create_ioctl()
1920 if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT) in xe_gem_create_ioctl()
1923 bo_flags |= args->placement << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1); in xe_gem_create_ioctl()
1925 if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) { in xe_gem_create_ioctl()
1927 return -EINVAL; in xe_gem_create_ioctl()
1932 if (XE_IOCTL_DBG(xe, !args->cpu_caching || in xe_gem_create_ioctl()
1933 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
1934 return -EINVAL; in xe_gem_create_ioctl()
1937 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
1938 return -EINVAL; in xe_gem_create_ioctl()
1941 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) in xe_gem_create_ioctl()
1942 return -EINVAL; in xe_gem_create_ioctl()
1944 if (args->vm_id) { in xe_gem_create_ioctl()
1945 vm = xe_vm_lookup(xef, args->vm_id); in xe_gem_create_ioctl()
1947 return -ENOENT; in xe_gem_create_ioctl()
1953 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, in xe_gem_create_ioctl()
1964 err = drm_gem_handle_create(file, &bo->ttm.base, &handle); in xe_gem_create_ioctl()
1968 args->handle = handle; in xe_gem_create_ioctl()
1993 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_mmap_offset_ioctl()
1994 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_mmap_offset_ioctl()
1995 return -EINVAL; in xe_gem_mmap_offset_ioctl()
1997 if (XE_IOCTL_DBG(xe, args->flags)) in xe_gem_mmap_offset_ioctl()
1998 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2000 gem_obj = drm_gem_object_lookup(file, args->handle); in xe_gem_mmap_offset_ioctl()
2002 return -ENOENT; in xe_gem_mmap_offset_ioctl()
2005 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); in xe_gem_mmap_offset_ioctl()
2012 * xe_bo_lock() - Lock the buffer object's dma_resv object
2019 * Return: 0 on success, -EINTR if @intr is true and the wait for a
2026 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL); in xe_bo_lock()
2028 dma_resv_lock(bo->ttm.base.resv, NULL); in xe_bo_lock()
2034 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
2041 dma_resv_unlock(bo->ttm.base.resv); in xe_bo_unlock()
2045 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
2063 if (bo->ttm.type == ttm_bo_type_kernel) in xe_bo_can_migrate()
2066 if (bo->ttm.type == ttm_bo_type_sg) in xe_bo_can_migrate()
2069 for (cur_place = 0; cur_place < bo->placement.num_placement; in xe_bo_can_migrate()
2071 if (bo->placements[cur_place].mem_type == mem_type) in xe_bo_can_migrate()
2081 place->mem_type = mem_type; in xe_place_from_ttm_type()
2085 * xe_bo_migrate - Migrate an object to the desired region id
2097 * return -EINTR or -ERESTARTSYS if signal pending.
2101 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_migrate()
2111 if (bo->ttm.resource->mem_type == mem_type) in xe_bo_migrate()
2115 return -EBUSY; in xe_bo_migrate()
2118 return -EINVAL; in xe_bo_migrate()
2130 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); in xe_bo_migrate()
2135 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); in xe_bo_migrate()
2138 return ttm_bo_validate(&bo->ttm, &placement, &ctx); in xe_bo_migrate()
2142 * xe_bo_evict - Evict an object to evict placement
2161 xe_evict_flags(&bo->ttm, &placement); in xe_bo_evict()
2162 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx); in xe_bo_evict()
2166 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, in xe_bo_evict()
2173 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
2183 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) in xe_bo_needs_ccs_pages()
2189 * non-VRAM addresses. in xe_bo_needs_ccs_pages()
2191 if (IS_DGFX(xe) && (bo->flags & XE_BO_CREATE_SYSTEM_BIT)) in xe_bo_needs_ccs_pages()
2198 * __xe_bo_release_dummy() - Dummy kref release function
2208 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
2228 drm_gem_object_free(&bo->ttm.base.refcount); in xe_bo_put_commit()
2232 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
2248 int cpp = DIV_ROUND_UP(args->bpp, 8); in xe_bo_dumb_create()
2251 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); in xe_bo_dumb_create()
2253 args->pitch = ALIGN(args->width * cpp, 64); in xe_bo_dumb_create()
2254 args->size = ALIGN(mul_u32_u32(args->pitch, args->height), in xe_bo_dumb_create()
2257 bo = xe_bo_create_user(xe, NULL, NULL, args->size, in xe_bo_dumb_create()
2266 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle); in xe_bo_dumb_create()
2267 /* drop reference from allocate - handle holds it now */ in xe_bo_dumb_create()
2268 drm_gem_object_put(&bo->ttm.base); in xe_bo_dumb_create()
2270 args->handle = handle; in xe_bo_dumb_create()