Lines Matching +full:0 +full:xe
47 .fpfn = 0,
48 .lpfn = 0,
50 .flags = 0,
60 .fpfn = 0,
61 .lpfn = 0,
66 .fpfn = 0,
67 .lpfn = 0,
83 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res) in resource_is_stolen_vram() argument
85 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
155 mem_type_to_migrate(struct xe_device *xe, u32 mem_type) in mem_type_to_migrate() argument
159 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type)); in mem_type_to_migrate()
160 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
166 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region() local
170 xe_assert(xe, resource_is_vram(res)); in res_to_mem_region()
171 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
177 static void try_add_system(struct xe_device *xe, struct xe_bo *bo, in try_add_system() argument
181 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
199 static void add_vram(struct xe_device *xe, struct xe_bo *bo, in add_vram() argument
203 struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type); in add_vram()
209 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram()
212 xe_assert(xe, vram && vram->usable_size); in add_vram()
220 place.fpfn = 0; in add_vram()
230 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, in try_add_vram() argument
234 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
236 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
239 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, in try_add_stolen() argument
243 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen()
248 TTM_PL_FLAG_CONTIGUOUS : 0, in try_add_stolen()
254 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, in __xe_bo_placement_for_flags() argument
257 u32 c = 0; in __xe_bo_placement_for_flags()
259 try_add_vram(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
260 try_add_system(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
261 try_add_stolen(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
271 return 0; in __xe_bo_placement_for_flags()
274 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, in xe_bo_placement_for_flags() argument
278 return __xe_bo_placement_for_flags(xe, bo, bo_flags); in xe_bo_placement_for_flags()
289 placement->num_placement = 0; in xe_evict_flags()
304 * For xe, sg bos that are evicted to system just triggers a in xe_evict_flags()
320 /* struct xe_ttm_tt - Subclassed ttm_tt for xe */
323 /** @xe - The xe device */
324 struct xe_device *xe; member
341 return 0; in xe_tt_map_sg()
344 num_pages, 0, in xe_tt_map_sg()
346 xe_sg_segment_size(xe_tt->xe->drm.dev), in xe_tt_map_sg()
352 ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, in xe_tt_map_sg()
360 return 0; in xe_tt_map_sg()
368 dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, in xe_tt_unmap_sg()
369 DMA_BIDIRECTIONAL, 0); in xe_tt_unmap_sg()
392 xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages); in xe_ttm_tt_account_add()
394 xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0); in xe_ttm_tt_account_add()
402 xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages); in xe_ttm_tt_account_subtract()
404 xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0); in xe_ttm_tt_account_subtract()
411 struct xe_device *xe = xe_bo_device(bo); in xe_ttm_tt_create() local
423 xe_tt->xe = xe; in xe_ttm_tt_create()
425 extra_pages = 0; in xe_ttm_tt_create()
427 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), in xe_ttm_tt_create()
436 if (!IS_DGFX(xe)) { in xe_ttm_tt_create()
455 (xe->info.graphics_verx100 >= 1270 && in xe_ttm_tt_create()
465 xe_assert(xe, bo->cpu_caching == 0); in xe_ttm_tt_create()
502 return 0; in xe_ttm_tt_populate()
516 return 0; in xe_ttm_tt_populate()
548 struct xe_device *xe = ttm_to_xe_device(bdev); in xe_ttm_io_mem_reserve() local
553 return 0; in xe_ttm_io_mem_reserve()
574 return 0; in xe_ttm_io_mem_reserve()
576 return xe_ttm_stolen_io_mem_reserve(xe, mem); in xe_ttm_io_mem_reserve()
582 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, in xe_bo_trigger_rebind() argument
590 int ret = 0; in xe_bo_trigger_rebind()
625 if (timeout < 0) in xe_bo_trigger_rebind()
659 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_dmabuf() local
662 xe_assert(xe, attach); in xe_bo_move_dmabuf()
663 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf()
683 return 0; in xe_bo_move_dmabuf()
701 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
708 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_notify() local
722 ret = xe_bo_trigger_rebind(xe, bo, ctx); in xe_bo_move_notify()
736 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
739 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
742 return 0; in xe_bo_move_notify()
750 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move() local
760 bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) && in xe_bo_move()
762 int ret = 0; in xe_bo_move()
804 drm_dbg(&xe->drm, "Evict system allocator BO success\n"); in xe_bo_move()
807 drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n", in xe_bo_move()
856 if (timeout < 0) { in xe_bo_move()
871 hop->fpfn = 0; in xe_bo_move()
872 hop->lpfn = 0; in xe_bo_move()
882 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
884 migrate = mem_type_to_migrate(xe, old_mem_type); in xe_bo_move()
886 migrate = xe->tiles[0].migrate; in xe_bo_move()
888 xe_assert(xe, migrate); in xe_bo_move()
890 if (xe_rpm_reclaim_safe(xe)) { in xe_bo_move()
895 xe_pm_runtime_get(xe); in xe_bo_move()
897 drm_WARN_ON(&xe->drm, handle_system_ccs); in xe_bo_move()
898 xe_pm_runtime_get_noresume(xe); in xe_bo_move()
923 xe_pm_runtime_put(xe); in xe_bo_move()
927 xe_assert(xe, new_mem->start == in xe_bo_move()
935 u32 flags = 0; in xe_bo_move()
949 xe_pm_runtime_put(xe); in xe_bo_move()
958 ret = 0; in xe_bo_move()
974 xe_pm_runtime_put(xe); in xe_bo_move()
983 if (timeout < 0) in xe_bo_move()
1020 if (lret > 0) in xe_bo_shrink_purge()
1027 * xe_bo_shrink() - Try to shrink an xe bo.
1035 * Note that we need to be able to handle also non xe bos
1052 struct xe_device *xe = xe_tt->xe; in xe_bo_shrink() local
1054 long lret = 0L; in xe_bo_shrink()
1075 needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM && in xe_bo_shrink()
1077 if (needs_rpm && !xe_pm_runtime_get_if_active(xe)) in xe_bo_shrink()
1086 xe_pm_runtime_put(xe); in xe_bo_shrink()
1088 if (lret > 0) in xe_bo_shrink()
1106 * Return: 0 on success. Negative error code on failure.
1133 return 0; in xe_bo_evict_pinned()
1140 bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0); in xe_bo_evict_pinned()
1159 return 0; in xe_bo_evict_pinned()
1175 * Return: 0 on success. Negative error code on failure.
1184 struct ttm_place *place = &bo->placements[0]; in xe_bo_restore_pinned()
1202 return 0; in xe_bo_restore_pinned()
1220 return 0; in xe_bo_restore_pinned()
1238 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); in xe_ttm_io_mem_pfn()
1250 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_lock_in_destructor() local
1253 xe_assert(xe, !kref_read(&ttm_bo->kref)); in xe_ttm_bo_lock_in_destructor()
1264 xe_assert(xe, locked); in xe_ttm_bo_lock_in_destructor()
1338 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_purge() local
1344 drm_WARN_ON(&xe->drm, ret); in xe_ttm_bo_purge()
1369 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_access_memory() local
1376 xe_device_assert_mem_access(xe); in xe_ttm_access_memory()
1396 xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count); in xe_ttm_access_memory()
1398 xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count); in xe_ttm_access_memory()
1429 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_destroy() local
1437 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); in xe_ttm_bo_destroy()
1439 for_each_tile(tile, xe, id) in xe_ttm_bo_destroy()
1451 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1454 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1496 struct xe_device *xe = to_xe_device(ddev); in xe_gem_fault() local
1503 xe_pm_runtime_get(xe); in xe_gem_fault()
1525 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1527 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); in xe_gem_fault()
1528 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1534 xe_pm_runtime_put(xe); in xe_gem_fault()
1544 struct xe_device *xe = xe_bo_device(bo); in xe_bo_vm_access() local
1547 xe_pm_runtime_get(xe); in xe_bo_vm_access()
1549 xe_pm_runtime_put(xe); in xe_bo_vm_access()
1569 ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0); in xe_bo_read()
1570 if (ret >= 0 && ret != size) in xe_bo_read()
1573 ret = 0; in xe_bo_read()
1627 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, in ___xe_bo_create_locked() argument
1644 xe_assert(xe, !tile || type == ttm_bo_type_kernel); in ___xe_bo_create_locked()
1657 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || in ___xe_bo_create_locked()
1694 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); in ___xe_bo_create_locked()
1702 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); in ___xe_bo_create_locked()
1713 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, in ___xe_bo_create_locked()
1742 if (timeout < 0) { in ___xe_bo_create_locked()
1759 static int __xe_bo_fixed_placement(struct xe_device *xe, in __xe_bo_fixed_placement() argument
1785 /* 0 or multiple of the above set */ in __xe_bo_fixed_placement()
1794 return 0; in __xe_bo_fixed_placement()
1798 __xe_bo_create_locked(struct xe_device *xe, in __xe_bo_create_locked() argument
1810 if (start || end != ~0ULL) { in __xe_bo_create_locked()
1816 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size); in __xe_bo_create_locked()
1823 bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, in __xe_bo_create_locked()
1850 tile = xe_device_get_root_tile(xe); in __xe_bo_create_locked()
1852 xe_assert(xe, tile); in __xe_bo_create_locked()
1855 for_each_tile(t, xe, id) { in __xe_bo_create_locked()
1881 xe_bo_create_locked_range(struct xe_device *xe, in xe_bo_create_locked_range() argument
1886 return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, in xe_bo_create_locked_range()
1890 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_locked() argument
1894 return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, in xe_bo_create_locked()
1895 flags, 0); in xe_bo_create_locked()
1898 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_user() argument
1903 struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, in xe_bo_create_user()
1905 flags | XE_BO_FLAG_USER, 0); in xe_bo_create_user()
1912 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create() argument
1916 struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags); in xe_bo_create()
1924 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_pin_map_at() argument
1929 return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, in xe_bo_create_pin_map_at()
1930 type, flags, 0); in xe_bo_create_pin_map_at()
1933 struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, in xe_bo_create_pin_map_at_aligned() argument
1942 u64 start = offset == ~0ull ? 0 : offset; in xe_bo_create_pin_map_at_aligned()
1943 u64 end = offset == ~0ull ? offset : start + size; in xe_bo_create_pin_map_at_aligned()
1946 xe_ttm_stolen_cpu_access_needs_ggtt(xe)) in xe_bo_create_pin_map_at_aligned()
1949 bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, in xe_bo_create_pin_map_at_aligned()
1975 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_pin_map() argument
1979 return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags); in xe_bo_create_pin_map()
1982 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_from_data() argument
1986 struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL, in xe_bo_create_from_data()
1992 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_bo_create_from_data()
2002 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, in xe_managed_bo_create_pin_map() argument
2008 KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags); in xe_managed_bo_create_pin_map()
2010 bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); in xe_managed_bo_create_pin_map()
2014 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_create_pin_map()
2021 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, in xe_managed_bo_create_from_data() argument
2024 struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags); in xe_managed_bo_create_from_data()
2029 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
2036 * @xe: xe device
2045 * Returns 0 for success, negative error code otherwise.
2047 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src) in xe_managed_bo_reinit_in_vram() argument
2054 xe_assert(xe, IS_DGFX(xe)); in xe_managed_bo_reinit_in_vram()
2055 xe_assert(xe, !(*src)->vmap.is_iomem); in xe_managed_bo_reinit_in_vram()
2057 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, in xe_managed_bo_reinit_in_vram()
2062 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); in xe_managed_bo_reinit_in_vram()
2065 return 0; in xe_managed_bo_reinit_in_vram()
2074 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in vram_region_gpu_offset() local
2077 return xe_ttm_stolen_gpu_offset(xe); in vram_region_gpu_offset()
2090 * Returns 0 for success, negative error code otherwise.
2094 struct xe_device *xe = xe_bo_device(bo); in xe_bo_pin_external() local
2097 xe_assert(xe, !bo->vm); in xe_bo_pin_external()
2098 xe_assert(xe, xe_bo_is_user(bo)); in xe_bo_pin_external()
2106 spin_lock(&xe->pinned.lock); in xe_bo_pin_external()
2108 &xe->pinned.external_vram); in xe_bo_pin_external()
2109 spin_unlock(&xe->pinned.lock); in xe_bo_pin_external()
2123 return 0; in xe_bo_pin_external()
2128 struct ttm_place *place = &bo->placements[0]; in xe_bo_pin()
2129 struct xe_device *xe = xe_bo_device(bo); in xe_bo_pin() local
2133 xe_assert(xe, !xe_bo_is_user(bo)); in xe_bo_pin()
2136 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | in xe_bo_pin()
2143 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_pin()
2146 xe_assert(xe, !xe_bo_is_pinned(bo)); in xe_bo_pin()
2157 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && in xe_bo_pin()
2160 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); in xe_bo_pin()
2162 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - in xe_bo_pin()
2169 spin_lock(&xe->pinned.lock); in xe_bo_pin()
2170 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); in xe_bo_pin()
2171 spin_unlock(&xe->pinned.lock); in xe_bo_pin()
2184 return 0; in xe_bo_pin()
2195 * Returns 0 for success, negative error code otherwise.
2199 struct xe_device *xe = xe_bo_device(bo); in xe_bo_unpin_external() local
2201 xe_assert(xe, !bo->vm); in xe_bo_unpin_external()
2202 xe_assert(xe, xe_bo_is_pinned(bo)); in xe_bo_unpin_external()
2203 xe_assert(xe, xe_bo_is_user(bo)); in xe_bo_unpin_external()
2205 spin_lock(&xe->pinned.lock); in xe_bo_unpin_external()
2208 spin_unlock(&xe->pinned.lock); in xe_bo_unpin_external()
2223 struct ttm_place *place = &bo->placements[0]; in xe_bo_unpin()
2224 struct xe_device *xe = xe_bo_device(bo); in xe_bo_unpin() local
2226 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_unpin()
2227 xe_assert(xe, xe_bo_is_pinned(bo)); in xe_bo_unpin()
2230 spin_lock(&xe->pinned.lock); in xe_bo_unpin()
2231 xe_assert(xe, !list_empty(&bo->pinned_link)); in xe_bo_unpin()
2233 spin_unlock(&xe->pinned.lock); in xe_bo_unpin()
2253 * Return: 0 on success, negative error code on failure. May return
2292 struct xe_device *xe = xe_bo_device(bo); in __xe_bo_addr() local
2296 xe_assert(xe, page_size <= PAGE_SIZE); in __xe_bo_addr()
2301 xe_assert(xe, bo->ttm.ttm); in __xe_bo_addr()
2324 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_vmap() local
2331 if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) || in xe_bo_vmap()
2336 return 0; in xe_bo_vmap()
2345 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap); in xe_bo_vmap()
2355 return 0; in xe_bo_vmap()
2372 static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value) in gem_create_set_pxp_type() argument
2375 return 0; in gem_create_set_pxp_type()
2378 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM)) in gem_create_set_pxp_type()
2381 return xe_pxp_key_assign(xe->pxp, bo); in gem_create_set_pxp_type()
2384 typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
2392 static int gem_create_user_ext_set_property(struct xe_device *xe, in gem_create_user_ext_set_property() argument
2402 if (XE_IOCTL_DBG(xe, err)) in gem_create_user_ext_set_property()
2405 if (XE_IOCTL_DBG(xe, ext.property >= in gem_create_user_ext_set_property()
2407 XE_IOCTL_DBG(xe, ext.pad) || in gem_create_user_ext_set_property()
2408 XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY)) in gem_create_user_ext_set_property()
2415 return gem_create_set_property_funcs[idx](xe, bo, ext.value); in gem_create_user_ext_set_property()
2418 typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe,
2427 static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo, in gem_create_user_extensions() argument
2435 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) in gem_create_user_extensions()
2439 if (XE_IOCTL_DBG(xe, err)) in gem_create_user_extensions()
2442 if (XE_IOCTL_DBG(xe, ext.pad) || in gem_create_user_extensions()
2443 XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs))) in gem_create_user_extensions()
2448 err = gem_create_user_extension_funcs[idx](xe, bo, extensions); in gem_create_user_extensions()
2449 if (XE_IOCTL_DBG(xe, err)) in gem_create_user_extensions()
2453 return gem_create_user_extensions(xe, bo, ext.next_extension, in gem_create_user_extensions()
2456 return 0; in gem_create_user_extensions()
2462 struct xe_device *xe = to_xe_device(dev); in xe_gem_create_ioctl() local
2466 ktime_t end = 0; in xe_gem_create_ioctl()
2472 if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_gem_create_ioctl()
2473 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_create_ioctl()
2477 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || in xe_gem_create_ioctl()
2481 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_create_ioctl()
2487 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_create_ioctl()
2490 if (XE_IOCTL_DBG(xe, !args->size)) in xe_gem_create_ioctl()
2493 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) in xe_gem_create_ioctl()
2496 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) in xe_gem_create_ioctl()
2499 bo_flags = 0; in xe_gem_create_ioctl()
2511 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && in xe_gem_create_ioctl()
2516 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK))) in xe_gem_create_ioctl()
2522 if (XE_IOCTL_DBG(xe, !args->cpu_caching || in xe_gem_create_ioctl()
2526 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK && in xe_gem_create_ioctl()
2530 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT && in xe_gem_create_ioctl()
2536 if (XE_IOCTL_DBG(xe, !vm)) in xe_gem_create_ioctl()
2547 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, in xe_gem_create_ioctl()
2561 err = gem_create_user_extensions(xe, bo, args->extensions, 0); in xe_gem_create_ioctl()
2591 struct xe_device *xe = to_xe_device(dev); in xe_gem_mmap_offset_ioctl() local
2595 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_mmap_offset_ioctl()
2596 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_mmap_offset_ioctl()
2599 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_mmap_offset_ioctl()
2604 if (XE_IOCTL_DBG(xe, !IS_DGFX(xe))) in xe_gem_mmap_offset_ioctl()
2607 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_mmap_offset_ioctl()
2610 if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K)) in xe_gem_mmap_offset_ioctl()
2616 return 0; in xe_gem_mmap_offset_ioctl()
2620 if (XE_IOCTL_DBG(xe, !gem_obj)) in xe_gem_mmap_offset_ioctl()
2627 return 0; in xe_gem_mmap_offset_ioctl()
2638 * Return: 0 on success, -EINTR if @intr is true and the wait for a
2640 * function always returns 0.
2649 return 0; in xe_bo_lock()
2688 for (cur_place = 0; cur_place < bo->placement.num_placement; in xe_bo_can_migrate()
2699 memset(place, 0, sizeof(*place)); in xe_place_from_ttm_type()
2715 * Return: 0 on success. Negative error code on failure. In particular may
2720 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_migrate() local
2732 return 0; in xe_bo_migrate()
2748 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); in xe_bo_migrate()
2751 u32 c = 0; in xe_bo_migrate()
2753 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); in xe_bo_migrate()
2767 * Return: 0 on success. Negative error code on failure.
2788 return 0; in xe_bo_evict()
2800 struct xe_device *xe = xe_bo_device(bo); in xe_bo_needs_ccs_pages() local
2802 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) in xe_bo_needs_ccs_pages()
2805 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) in xe_bo_needs_ccs_pages()
2813 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) in xe_bo_needs_ccs_pages()
2910 struct xe_device *xe = to_xe_device(dev); in xe_bo_dumb_create() local
2916 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); in xe_bo_dumb_create()
2922 bo = xe_bo_create_user(xe, NULL, NULL, args->size, in xe_bo_dumb_create()
2924 XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | in xe_bo_dumb_create()