1213d5092SThomas Hellström // SPDX-License-Identifier: MIT
2213d5092SThomas Hellström /*
3213d5092SThomas Hellström * Copyright © 2021 Intel Corporation
4213d5092SThomas Hellström */
5213d5092SThomas Hellström
682508de2SJani Nikula #include <linux/shmem_fs.h>
782508de2SJani Nikula
8213d5092SThomas Hellström #include <drm/ttm/ttm_placement.h>
9a3185f91SChristian König #include <drm/ttm/ttm_tt.h>
1093735059SMatthew Auld #include <drm/drm_buddy.h>
11213d5092SThomas Hellström
12213d5092SThomas Hellström #include "i915_drv.h"
1393735059SMatthew Auld #include "i915_ttm_buddy_manager.h"
14213d5092SThomas Hellström #include "intel_memory_region.h"
15213d5092SThomas Hellström #include "intel_region_ttm.h"
16213d5092SThomas Hellström
17c56ce956SThomas Hellström #include "gem/i915_gem_mman.h"
18213d5092SThomas Hellström #include "gem/i915_gem_object.h"
19213d5092SThomas Hellström #include "gem/i915_gem_region.h"
20213d5092SThomas Hellström #include "gem/i915_gem_ttm.h"
213589fdbdSThomas Hellström #include "gem/i915_gem_ttm_move.h"
22c56ce956SThomas Hellström #include "gem/i915_gem_ttm_pm.h"
2376a6d563SRamalingam C #include "gt/intel_gpu_commands.h"
24213d5092SThomas Hellström
25213d5092SThomas Hellström #define I915_TTM_PRIO_PURGE 0
26213d5092SThomas Hellström #define I915_TTM_PRIO_NO_PAGES 1
27213d5092SThomas Hellström #define I915_TTM_PRIO_HAS_PAGES 2
2893735059SMatthew Auld #define I915_TTM_PRIO_NEEDS_CPU_ACCESS 3
29213d5092SThomas Hellström
3038f28c06SThomas Hellström /*
3138f28c06SThomas Hellström * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
3238f28c06SThomas Hellström */
3338f28c06SThomas Hellström #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
3438f28c06SThomas Hellström
35213d5092SThomas Hellström /**
36213d5092SThomas Hellström * struct i915_ttm_tt - TTM page vector with additional private information
37213d5092SThomas Hellström * @ttm: The base TTM page vector.
38213d5092SThomas Hellström * @dev: The struct device used for dma mapping and unmapping.
39cad7109aSThomas Hellström * @cached_rsgt: The cached scatter-gather table.
407ae03459SMatthew Auld * @is_shmem: Set if using shmem.
417ae03459SMatthew Auld * @filp: The shmem file, if using shmem backend.
42213d5092SThomas Hellström *
43213d5092SThomas Hellström * Note that DMA may be going on right up to the point where the page-
44213d5092SThomas Hellström * vector is unpopulated in delayed destroy. Hence keep the
45213d5092SThomas Hellström * scatter-gather table mapped and cached up to that point. This is
46213d5092SThomas Hellström * different from the cached gem object io scatter-gather table which
47213d5092SThomas Hellström * doesn't have an associated dma mapping.
48213d5092SThomas Hellström */
49213d5092SThomas Hellström struct i915_ttm_tt {
50213d5092SThomas Hellström struct ttm_tt ttm;
51213d5092SThomas Hellström struct device *dev;
52cad7109aSThomas Hellström struct i915_refct_sgt cached_rsgt;
537ae03459SMatthew Auld
547ae03459SMatthew Auld bool is_shmem;
557ae03459SMatthew Auld struct file *filp;
56213d5092SThomas Hellström };
57213d5092SThomas Hellström
5838f28c06SThomas Hellström static const struct ttm_place sys_placement_flags = {
59213d5092SThomas Hellström .fpfn = 0,
60213d5092SThomas Hellström .lpfn = 0,
61213d5092SThomas Hellström .mem_type = I915_PL_SYSTEM,
62213d5092SThomas Hellström .flags = 0,
63213d5092SThomas Hellström };
64213d5092SThomas Hellström
65213d5092SThomas Hellström static struct ttm_placement i915_sys_placement = {
66213d5092SThomas Hellström .num_placement = 1,
6738f28c06SThomas Hellström .placement = &sys_placement_flags,
68213d5092SThomas Hellström };
69213d5092SThomas Hellström
70c56ce956SThomas Hellström /**
71c56ce956SThomas Hellström * i915_ttm_sys_placement - Return the struct ttm_placement to be
72c56ce956SThomas Hellström * used for an object in system memory.
73c56ce956SThomas Hellström *
74c56ce956SThomas Hellström * Rather than making the struct extern, use this
75c56ce956SThomas Hellström * function.
76c56ce956SThomas Hellström *
77c56ce956SThomas Hellström * Return: A pointer to a static variable for sys placement.
78c56ce956SThomas Hellström */
i915_ttm_sys_placement(void)79c56ce956SThomas Hellström struct ttm_placement *i915_ttm_sys_placement(void)
80c56ce956SThomas Hellström {
81c56ce956SThomas Hellström return &i915_sys_placement;
82c56ce956SThomas Hellström }
83c56ce956SThomas Hellström
i915_ttm_err_to_gem(int err)84b07a6483SThomas Hellström static int i915_ttm_err_to_gem(int err)
85b07a6483SThomas Hellström {
86b07a6483SThomas Hellström /* Fastpath */
87b07a6483SThomas Hellström if (likely(!err))
88b07a6483SThomas Hellström return 0;
89b07a6483SThomas Hellström
90b07a6483SThomas Hellström switch (err) {
91b07a6483SThomas Hellström case -EBUSY:
92b07a6483SThomas Hellström /*
93b07a6483SThomas Hellström * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
94b07a6483SThomas Hellström * restart the operation, since we don't record the contending
95b07a6483SThomas Hellström * lock. We use -EAGAIN to restart.
96b07a6483SThomas Hellström */
97b07a6483SThomas Hellström return -EAGAIN;
98b07a6483SThomas Hellström case -ENOSPC:
99b07a6483SThomas Hellström /*
100b07a6483SThomas Hellström * Memory type / region is full, and we can't evict.
101b07a6483SThomas Hellström * Except possibly system, that returns -ENOMEM;
102b07a6483SThomas Hellström */
103b07a6483SThomas Hellström return -ENXIO;
104b07a6483SThomas Hellström default:
105b07a6483SThomas Hellström break;
106b07a6483SThomas Hellström }
107b07a6483SThomas Hellström
108b07a6483SThomas Hellström return err;
109b07a6483SThomas Hellström }
110b07a6483SThomas Hellström
11138f28c06SThomas Hellström static enum ttm_caching
i915_ttm_select_tt_caching(const struct drm_i915_gem_object * obj)11238f28c06SThomas Hellström i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
11338f28c06SThomas Hellström {
11438f28c06SThomas Hellström /*
1152eda4fc6SMatthew Auld * Objects only allowed in system get cached cpu-mappings, or when
1162eda4fc6SMatthew Auld * evicting lmem-only buffers to system for swapping. Other objects get
1172eda4fc6SMatthew Auld * WC mapping for now. Even if in system.
11838f28c06SThomas Hellström */
1192eda4fc6SMatthew Auld if (obj->mm.n_placements <= 1)
12038f28c06SThomas Hellström return ttm_cached;
12138f28c06SThomas Hellström
12238f28c06SThomas Hellström return ttm_write_combined;
12338f28c06SThomas Hellström }
12438f28c06SThomas Hellström
12538f28c06SThomas Hellström static void
i915_ttm_place_from_region(const struct intel_memory_region * mr,struct ttm_place * place,resource_size_t offset,resource_size_t size,unsigned int flags)12638f28c06SThomas Hellström i915_ttm_place_from_region(const struct intel_memory_region *mr,
127beb6a229SMatthew Auld struct ttm_place *place,
128ecbf2060SMatthew Auld resource_size_t offset,
129ecbf2060SMatthew Auld resource_size_t size,
130beb6a229SMatthew Auld unsigned int flags)
13138f28c06SThomas Hellström {
13238f28c06SThomas Hellström memset(place, 0, sizeof(*place));
13338f28c06SThomas Hellström place->mem_type = intel_region_to_ttm_type(mr);
134beb6a229SMatthew Auld
13566ddc693SMatthew Auld if (mr->type == INTEL_MEMORY_SYSTEM)
13666ddc693SMatthew Auld return;
13766ddc693SMatthew Auld
138beb6a229SMatthew Auld if (flags & I915_BO_ALLOC_CONTIGUOUS)
13930b9d1b3SMatthew Auld place->flags |= TTM_PL_FLAG_CONTIGUOUS;
140ecbf2060SMatthew Auld if (offset != I915_BO_INVALID_OFFSET) {
1416949aa0eSGwan-gyeong Mun WARN_ON(overflows_type(offset >> PAGE_SHIFT, place->fpfn));
142ecbf2060SMatthew Auld place->fpfn = offset >> PAGE_SHIFT;
1436949aa0eSGwan-gyeong Mun WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
144ecbf2060SMatthew Auld place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
1453c0fa9f4SVille Syrjälä } else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
14630b9d1b3SMatthew Auld if (flags & I915_BO_ALLOC_GPU_ONLY) {
14730b9d1b3SMatthew Auld place->flags |= TTM_PL_FLAG_TOPDOWN;
14830b9d1b3SMatthew Auld } else {
1493312a4acSMatthew Auld place->fpfn = 0;
1503c0fa9f4SVille Syrjälä WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
1513c0fa9f4SVille Syrjälä place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
1523312a4acSMatthew Auld }
15338f28c06SThomas Hellström }
15430b9d1b3SMatthew Auld }
15538f28c06SThomas Hellström
15638f28c06SThomas Hellström static void
i915_ttm_placement_from_obj(const struct drm_i915_gem_object * obj,struct ttm_place * places,struct ttm_placement * placement)15738f28c06SThomas Hellström i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
158a78a8da5SSomalapuram Amaranath struct ttm_place *places,
15938f28c06SThomas Hellström struct ttm_placement *placement)
16038f28c06SThomas Hellström {
16138f28c06SThomas Hellström unsigned int num_allowed = obj->mm.n_placements;
162beb6a229SMatthew Auld unsigned int flags = obj->flags;
16338f28c06SThomas Hellström unsigned int i;
16438f28c06SThomas Hellström
16538f28c06SThomas Hellström i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
166a78a8da5SSomalapuram Amaranath obj->mm.region, &places[0], obj->bo_offset,
167ecbf2060SMatthew Auld obj->base.size, flags);
16838f28c06SThomas Hellström
16938f28c06SThomas Hellström /* Cache this on object? */
170a78a8da5SSomalapuram Amaranath for (i = 0; i < num_allowed; ++i) {
171a78a8da5SSomalapuram Amaranath i915_ttm_place_from_region(obj->mm.placements[i],
172a78a8da5SSomalapuram Amaranath &places[i + 1], obj->bo_offset,
173a78a8da5SSomalapuram Amaranath obj->base.size, flags);
174a78a8da5SSomalapuram Amaranath places[i + 1].flags |= TTM_PL_FLAG_FALLBACK;
17538f28c06SThomas Hellström }
17638f28c06SThomas Hellström
177a78a8da5SSomalapuram Amaranath placement->num_placement = num_allowed + 1;
178a78a8da5SSomalapuram Amaranath placement->placement = places;
17938f28c06SThomas Hellström }
18038f28c06SThomas Hellström
i915_ttm_tt_shmem_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)1817ae03459SMatthew Auld static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
1827ae03459SMatthew Auld struct ttm_tt *ttm,
1837ae03459SMatthew Auld struct ttm_operation_ctx *ctx)
1847ae03459SMatthew Auld {
1857ae03459SMatthew Auld struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
1867ae03459SMatthew Auld struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
1877ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
18878a07fe7SRobert Beckett const unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
1895719d4feSRobert Beckett const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT;
1907ae03459SMatthew Auld struct file *filp = i915_tt->filp;
1917ae03459SMatthew Auld struct sgt_iter sgt_iter;
1927ae03459SMatthew Auld struct sg_table *st;
1937ae03459SMatthew Auld struct page *page;
1947ae03459SMatthew Auld unsigned long i;
1957ae03459SMatthew Auld int err;
1967ae03459SMatthew Auld
1977ae03459SMatthew Auld if (!filp) {
1987ae03459SMatthew Auld struct address_space *mapping;
1997ae03459SMatthew Auld gfp_t mask;
2007ae03459SMatthew Auld
2017ae03459SMatthew Auld filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE);
2027ae03459SMatthew Auld if (IS_ERR(filp))
2037ae03459SMatthew Auld return PTR_ERR(filp);
2047ae03459SMatthew Auld
2057ae03459SMatthew Auld mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
2067ae03459SMatthew Auld
2077ae03459SMatthew Auld mapping = filp->f_mapping;
2087ae03459SMatthew Auld mapping_set_gfp_mask(mapping, mask);
2097ae03459SMatthew Auld GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
2107ae03459SMatthew Auld
2117ae03459SMatthew Auld i915_tt->filp = filp;
2127ae03459SMatthew Auld }
2137ae03459SMatthew Auld
214cad7109aSThomas Hellström st = &i915_tt->cached_rsgt.table;
215cad7109aSThomas Hellström err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping,
216cad7109aSThomas Hellström max_segment);
217cad7109aSThomas Hellström if (err)
218cad7109aSThomas Hellström return err;
2197ae03459SMatthew Auld
220cad7109aSThomas Hellström err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL,
2217ae03459SMatthew Auld DMA_ATTR_SKIP_CPU_SYNC);
222cad7109aSThomas Hellström if (err)
2237ae03459SMatthew Auld goto err_free_st;
2247ae03459SMatthew Auld
2257ae03459SMatthew Auld i = 0;
2267ae03459SMatthew Auld for_each_sgt_page(page, sgt_iter, st)
2277ae03459SMatthew Auld ttm->pages[i++] = page;
2287ae03459SMatthew Auld
2297ae03459SMatthew Auld if (ttm->page_flags & TTM_TT_FLAG_SWAPPED)
2307ae03459SMatthew Auld ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
2317ae03459SMatthew Auld
2327ae03459SMatthew Auld return 0;
2337ae03459SMatthew Auld
2347ae03459SMatthew Auld err_free_st:
235cad7109aSThomas Hellström shmem_sg_free_table(st, filp->f_mapping, false, false);
236cad7109aSThomas Hellström
2377ae03459SMatthew Auld return err;
2387ae03459SMatthew Auld }
2397ae03459SMatthew Auld
i915_ttm_tt_shmem_unpopulate(struct ttm_tt * ttm)2407ae03459SMatthew Auld static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm)
2417ae03459SMatthew Auld {
2427ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
2437ae03459SMatthew Auld bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED;
244cad7109aSThomas Hellström struct sg_table *st = &i915_tt->cached_rsgt.table;
2457ae03459SMatthew Auld
246cad7109aSThomas Hellström shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping,
2477ae03459SMatthew Auld backup, backup);
2487ae03459SMatthew Auld }
2497ae03459SMatthew Auld
i915_ttm_tt_release(struct kref * ref)250cad7109aSThomas Hellström static void i915_ttm_tt_release(struct kref *ref)
251cad7109aSThomas Hellström {
252cad7109aSThomas Hellström struct i915_ttm_tt *i915_tt =
253cad7109aSThomas Hellström container_of(ref, typeof(*i915_tt), cached_rsgt.kref);
254cad7109aSThomas Hellström struct sg_table *st = &i915_tt->cached_rsgt.table;
255cad7109aSThomas Hellström
256cad7109aSThomas Hellström GEM_WARN_ON(st->sgl);
257cad7109aSThomas Hellström
258cad7109aSThomas Hellström kfree(i915_tt);
259cad7109aSThomas Hellström }
260cad7109aSThomas Hellström
261cad7109aSThomas Hellström static const struct i915_refct_sgt_ops tt_rsgt_ops = {
262cad7109aSThomas Hellström .release = i915_ttm_tt_release
263cad7109aSThomas Hellström };
264cad7109aSThomas Hellström
i915_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)265213d5092SThomas Hellström static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
266213d5092SThomas Hellström uint32_t page_flags)
267213d5092SThomas Hellström {
26876a6d563SRamalingam C struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
26976a6d563SRamalingam C bdev);
270213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
27176a6d563SRamalingam C unsigned long ccs_pages = 0;
2726385eb7aSThomas Hellström enum ttm_caching caching;
273213d5092SThomas Hellström struct i915_ttm_tt *i915_tt;
274213d5092SThomas Hellström int ret;
275213d5092SThomas Hellström
2766667d78aSNirmoy Das if (i915_ttm_is_ghost_object(bo))
2776385eb7aSThomas Hellström return NULL;
2786385eb7aSThomas Hellström
279213d5092SThomas Hellström i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
280213d5092SThomas Hellström if (!i915_tt)
281213d5092SThomas Hellström return NULL;
282213d5092SThomas Hellström
283516198d3SChristian König if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
284516198d3SChristian König ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt))
28543d46f0bSMatthew Auld page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
286213d5092SThomas Hellström
2876385eb7aSThomas Hellström caching = i915_ttm_select_tt_caching(obj);
2887ae03459SMatthew Auld if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
2897ae03459SMatthew Auld page_flags |= TTM_TT_FLAG_EXTERNAL |
2907ae03459SMatthew Auld TTM_TT_FLAG_EXTERNAL_MAPPABLE;
2917ae03459SMatthew Auld i915_tt->is_shmem = true;
292213d5092SThomas Hellström }
293213d5092SThomas Hellström
294873fef88SMatthew Auld if (i915_gem_object_needs_ccs_pages(obj))
29576a6d563SRamalingam C ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
29676a6d563SRamalingam C NUM_BYTES_PER_CCS_BYTE),
29776a6d563SRamalingam C PAGE_SIZE);
29876a6d563SRamalingam C
29976a6d563SRamalingam C ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages);
3007ae03459SMatthew Auld if (ret)
3017ae03459SMatthew Auld goto err_free;
3027ae03459SMatthew Auld
303cad7109aSThomas Hellström __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size,
304cad7109aSThomas Hellström &tt_rsgt_ops);
305cad7109aSThomas Hellström
306213d5092SThomas Hellström i915_tt->dev = obj->base.dev->dev;
307213d5092SThomas Hellström
308213d5092SThomas Hellström return &i915_tt->ttm;
3097ae03459SMatthew Auld
3107ae03459SMatthew Auld err_free:
3117ae03459SMatthew Auld kfree(i915_tt);
3127ae03459SMatthew Auld return NULL;
3137ae03459SMatthew Auld }
3147ae03459SMatthew Auld
i915_ttm_tt_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)3157ae03459SMatthew Auld static int i915_ttm_tt_populate(struct ttm_device *bdev,
3167ae03459SMatthew Auld struct ttm_tt *ttm,
3177ae03459SMatthew Auld struct ttm_operation_ctx *ctx)
3187ae03459SMatthew Auld {
3197ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
3207ae03459SMatthew Auld
3217ae03459SMatthew Auld if (i915_tt->is_shmem)
3227ae03459SMatthew Auld return i915_ttm_tt_shmem_populate(bdev, ttm, ctx);
3237ae03459SMatthew Auld
3247ae03459SMatthew Auld return ttm_pool_alloc(&bdev->pool, ttm, ctx);
325213d5092SThomas Hellström }
326213d5092SThomas Hellström
i915_ttm_tt_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)327213d5092SThomas Hellström static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
328213d5092SThomas Hellström {
329213d5092SThomas Hellström struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
330cad7109aSThomas Hellström struct sg_table *st = &i915_tt->cached_rsgt.table;
331cad7109aSThomas Hellström
332cad7109aSThomas Hellström if (st->sgl)
333cad7109aSThomas Hellström dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
334213d5092SThomas Hellström
3357ae03459SMatthew Auld if (i915_tt->is_shmem) {
3367ae03459SMatthew Auld i915_ttm_tt_shmem_unpopulate(ttm);
3377ae03459SMatthew Auld } else {
338cad7109aSThomas Hellström sg_free_table(st);
339213d5092SThomas Hellström ttm_pool_free(&bdev->pool, ttm);
340213d5092SThomas Hellström }
3417ae03459SMatthew Auld }
342213d5092SThomas Hellström
i915_ttm_tt_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)343213d5092SThomas Hellström static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
344213d5092SThomas Hellström {
345213d5092SThomas Hellström struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
346213d5092SThomas Hellström
3477ae03459SMatthew Auld if (i915_tt->filp)
3487ae03459SMatthew Auld fput(i915_tt->filp);
3497ae03459SMatthew Auld
350c865204eSThomas Hellström ttm_tt_fini(ttm);
351cad7109aSThomas Hellström i915_refct_sgt_put(&i915_tt->cached_rsgt);
352213d5092SThomas Hellström }
353213d5092SThomas Hellström
i915_ttm_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)354213d5092SThomas Hellström static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
355213d5092SThomas Hellström const struct ttm_place *place)
356213d5092SThomas Hellström {
357213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
358213d5092SThomas Hellström
3596667d78aSNirmoy Das if (i915_ttm_is_ghost_object(bo))
3606385eb7aSThomas Hellström return false;
3616385eb7aSThomas Hellström
3627ae03459SMatthew Auld /*
3637ae03459SMatthew Auld * EXTERNAL objects should never be swapped out by TTM, instead we need
3647ae03459SMatthew Auld * to handle that ourselves. TTM will already skip such objects for us,
3657ae03459SMatthew Auld * but we would like to avoid grabbing locks for no good reason.
3667ae03459SMatthew Auld */
3677ae03459SMatthew Auld if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
3686164807dSDan Carpenter return false;
3697ae03459SMatthew Auld
370213d5092SThomas Hellström /* Will do for now. Our pinned objects are still on TTM's LRU lists */
37193735059SMatthew Auld if (!i915_gem_object_evictable(obj))
37293735059SMatthew Auld return false;
37393735059SMatthew Auld
37492b2b55eSArunpravin Paneer Selvam return ttm_bo_eviction_valuable(bo, place);
375213d5092SThomas Hellström }
376213d5092SThomas Hellström
i915_ttm_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)377213d5092SThomas Hellström static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
378213d5092SThomas Hellström struct ttm_placement *placement)
379213d5092SThomas Hellström {
380213d5092SThomas Hellström *placement = i915_sys_placement;
381213d5092SThomas Hellström }
382213d5092SThomas Hellström
3833589fdbdSThomas Hellström /**
3843589fdbdSThomas Hellström * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information
3853589fdbdSThomas Hellström * @obj: The GEM object
3863589fdbdSThomas Hellström * This function frees any LMEM-related information that is cached on
3873589fdbdSThomas Hellström * the object. For example the radix tree for fast page lookup and the
3883589fdbdSThomas Hellström * cached refcounted sg-table
3893589fdbdSThomas Hellström */
i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object * obj)3903589fdbdSThomas Hellström void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj)
391213d5092SThomas Hellström {
392cf3e3e86SMaarten Lankhorst struct radix_tree_iter iter;
393cf3e3e86SMaarten Lankhorst void __rcu **slot;
394cf3e3e86SMaarten Lankhorst
395cad7109aSThomas Hellström if (!obj->ttm.cached_io_rsgt)
396cf3e3e86SMaarten Lankhorst return;
397cf3e3e86SMaarten Lankhorst
398cf3e3e86SMaarten Lankhorst rcu_read_lock();
399cf3e3e86SMaarten Lankhorst radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
400cf3e3e86SMaarten Lankhorst radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
401cf3e3e86SMaarten Lankhorst rcu_read_unlock();
402cf3e3e86SMaarten Lankhorst
403cad7109aSThomas Hellström i915_refct_sgt_put(obj->ttm.cached_io_rsgt);
404cad7109aSThomas Hellström obj->ttm.cached_io_rsgt = NULL;
405213d5092SThomas Hellström }
406213d5092SThomas Hellström
4073589fdbdSThomas Hellström /**
4083589fdbdSThomas Hellström * i915_ttm_purge - Clear an object of its memory
4093589fdbdSThomas Hellström * @obj: The object
4103589fdbdSThomas Hellström *
4113589fdbdSThomas Hellström * This function is called to clear an object of it's memory when it is
4123589fdbdSThomas Hellström * marked as not needed anymore.
4133589fdbdSThomas Hellström *
4143589fdbdSThomas Hellström * Return: 0 on success, negative error code on failure.
41532b7cf51SThomas Hellström */
i915_ttm_purge(struct drm_i915_gem_object * obj)4163589fdbdSThomas Hellström int i915_ttm_purge(struct drm_i915_gem_object *obj)
417213d5092SThomas Hellström {
418213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
4197ae03459SMatthew Auld struct i915_ttm_tt *i915_tt =
4207ae03459SMatthew Auld container_of(bo->ttm, typeof(*i915_tt), ttm);
421213d5092SThomas Hellström struct ttm_operation_ctx ctx = {
422213d5092SThomas Hellström .interruptible = true,
423213d5092SThomas Hellström .no_wait_gpu = false,
424213d5092SThomas Hellström };
425213d5092SThomas Hellström struct ttm_placement place = {};
426213d5092SThomas Hellström int ret;
427213d5092SThomas Hellström
428213d5092SThomas Hellström if (obj->mm.madv == __I915_MADV_PURGED)
4297ae03459SMatthew Auld return 0;
430213d5092SThomas Hellström
431213d5092SThomas Hellström ret = ttm_bo_validate(bo, &place, &ctx);
4327ae03459SMatthew Auld if (ret)
4337ae03459SMatthew Auld return ret;
4347ae03459SMatthew Auld
4357ae03459SMatthew Auld if (bo->ttm && i915_tt->filp) {
4367ae03459SMatthew Auld /*
4377ae03459SMatthew Auld * The below fput(which eventually calls shmem_truncate) might
4387ae03459SMatthew Auld * be delayed by worker, so when directly called to purge the
4397ae03459SMatthew Auld * pages(like by the shrinker) we should try to be more
4407ae03459SMatthew Auld * aggressive and release the pages immediately.
4417ae03459SMatthew Auld */
4427ae03459SMatthew Auld shmem_truncate_range(file_inode(i915_tt->filp),
4437ae03459SMatthew Auld 0, (loff_t)-1);
4447ae03459SMatthew Auld fput(fetch_and_zero(&i915_tt->filp));
4457ae03459SMatthew Auld }
4467ae03459SMatthew Auld
4473c2b8f32SThomas Hellström obj->write_domain = 0;
4483c2b8f32SThomas Hellström obj->read_domains = 0;
4493c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj);
450cad7109aSThomas Hellström i915_ttm_free_cached_io_rsgt(obj);
451213d5092SThomas Hellström obj->mm.madv = __I915_MADV_PURGED;
4523589fdbdSThomas Hellström
4537ae03459SMatthew Auld return 0;
454213d5092SThomas Hellström }
4557ae03459SMatthew Auld
i915_ttm_shrink(struct drm_i915_gem_object * obj,unsigned int flags)456ffa3fe08SMatthew Auld static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
4577ae03459SMatthew Auld {
4587ae03459SMatthew Auld struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
4597ae03459SMatthew Auld struct i915_ttm_tt *i915_tt =
4607ae03459SMatthew Auld container_of(bo->ttm, typeof(*i915_tt), ttm);
4617ae03459SMatthew Auld struct ttm_operation_ctx ctx = {
4627ae03459SMatthew Auld .interruptible = true,
463ffa3fe08SMatthew Auld .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT,
4647ae03459SMatthew Auld };
4657ae03459SMatthew Auld struct ttm_placement place = {};
4667ae03459SMatthew Auld int ret;
4677ae03459SMatthew Auld
46858c7ee06SMatthew Auld if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource))
4697ae03459SMatthew Auld return 0;
4707ae03459SMatthew Auld
4717ae03459SMatthew Auld GEM_BUG_ON(!i915_tt->is_shmem);
4727ae03459SMatthew Auld
4737ae03459SMatthew Auld if (!i915_tt->filp)
4747ae03459SMatthew Auld return 0;
4757ae03459SMatthew Auld
476004746e4SThomas Hellström ret = ttm_bo_wait_ctx(bo, &ctx);
477004746e4SThomas Hellström if (ret)
478004746e4SThomas Hellström return ret;
479004746e4SThomas Hellström
4807ae03459SMatthew Auld switch (obj->mm.madv) {
4817ae03459SMatthew Auld case I915_MADV_DONTNEED:
4827ae03459SMatthew Auld return i915_ttm_purge(obj);
4837ae03459SMatthew Auld case __I915_MADV_PURGED:
4847ae03459SMatthew Auld return 0;
4857ae03459SMatthew Auld }
4867ae03459SMatthew Auld
4877ae03459SMatthew Auld if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)
4887ae03459SMatthew Auld return 0;
4897ae03459SMatthew Auld
4907ae03459SMatthew Auld bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
4917ae03459SMatthew Auld ret = ttm_bo_validate(bo, &place, &ctx);
4927ae03459SMatthew Auld if (ret) {
4937ae03459SMatthew Auld bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
4947ae03459SMatthew Auld return ret;
4957ae03459SMatthew Auld }
4967ae03459SMatthew Auld
497ffa3fe08SMatthew Auld if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
4987ae03459SMatthew Auld __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
4997ae03459SMatthew Auld
5007ae03459SMatthew Auld return 0;
501213d5092SThomas Hellström }
502213d5092SThomas Hellström
i915_ttm_delete_mem_notify(struct ttm_buffer_object * bo)503213d5092SThomas Hellström static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
504213d5092SThomas Hellström {
505213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
506213d5092SThomas Hellström
50758c7ee06SMatthew Auld /*
50858c7ee06SMatthew Auld * This gets called twice by ttm, so long as we have a ttm resource or
50958c7ee06SMatthew Auld * ttm_tt then we can still safely call this. Due to pipeline-gutting,
51058c7ee06SMatthew Auld * we maybe have NULL bo->resource, but in that case we should always
51158c7ee06SMatthew Auld * have a ttm alive (like if the pages are swapped out).
51258c7ee06SMatthew Auld */
51358c7ee06SMatthew Auld if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) {
514068396bbSThomas Hellström __i915_gem_object_pages_fini(obj);
515cad7109aSThomas Hellström i915_ttm_free_cached_io_rsgt(obj);
516213d5092SThomas Hellström }
517213d5092SThomas Hellström }
518213d5092SThomas Hellström
i915_ttm_tt_get_st(struct ttm_tt * ttm)519cad7109aSThomas Hellström static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
520213d5092SThomas Hellström {
521213d5092SThomas Hellström struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
522213d5092SThomas Hellström struct sg_table *st;
523213d5092SThomas Hellström int ret;
524213d5092SThomas Hellström
525cad7109aSThomas Hellström if (i915_tt->cached_rsgt.table.sgl)
526cad7109aSThomas Hellström return i915_refct_sgt_get(&i915_tt->cached_rsgt);
527213d5092SThomas Hellström
528cad7109aSThomas Hellström st = &i915_tt->cached_rsgt.table;
52923852becSLinus Torvalds ret = sg_alloc_table_from_pages_segment(st,
53023852becSLinus Torvalds ttm->pages, ttm->num_pages,
53123852becSLinus Torvalds 0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
53278a07fe7SRobert Beckett i915_sg_segment_size(i915_tt->dev), GFP_KERNEL);
53323852becSLinus Torvalds if (ret) {
534cad7109aSThomas Hellström st->sgl = NULL;
53523852becSLinus Torvalds return ERR_PTR(ret);
536213d5092SThomas Hellström }
537213d5092SThomas Hellström
538213d5092SThomas Hellström ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
539213d5092SThomas Hellström if (ret) {
540213d5092SThomas Hellström sg_free_table(st);
541213d5092SThomas Hellström return ERR_PTR(ret);
542213d5092SThomas Hellström }
543213d5092SThomas Hellström
544cad7109aSThomas Hellström return i915_refct_sgt_get(&i915_tt->cached_rsgt);
545213d5092SThomas Hellström }
546213d5092SThomas Hellström
5473589fdbdSThomas Hellström /**
5483589fdbdSThomas Hellström * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the
5493589fdbdSThomas Hellström * resource memory
5503589fdbdSThomas Hellström * @obj: The GEM object used for sg-table caching
5513589fdbdSThomas Hellström * @res: The struct ttm_resource for which an sg-table is requested.
5523589fdbdSThomas Hellström *
5533589fdbdSThomas Hellström * This function returns a refcounted sg-table representing the memory
5543589fdbdSThomas Hellström * pointed to by @res. If @res is the object's current resource it may also
5553589fdbdSThomas Hellström * cache the sg_table on the object or attempt to access an already cached
5563589fdbdSThomas Hellström * sg-table. The refcounted sg-table needs to be put when no-longer in use.
5573589fdbdSThomas Hellström *
5583589fdbdSThomas Hellström * Return: A valid pointer to a struct i915_refct_sgt or error pointer on
5593589fdbdSThomas Hellström * failure.
5603589fdbdSThomas Hellström */
5613589fdbdSThomas Hellström struct i915_refct_sgt *
i915_ttm_resource_get_st(struct drm_i915_gem_object * obj,struct ttm_resource * res)562213d5092SThomas Hellström i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
563213d5092SThomas Hellström struct ttm_resource *res)
564213d5092SThomas Hellström {
565213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
5669306b2b2SMatthew Auld u32 page_alignment;
567213d5092SThomas Hellström
5683589fdbdSThomas Hellström if (!i915_ttm_gtt_binds_lmem(res))
569213d5092SThomas Hellström return i915_ttm_tt_get_st(bo->ttm);
570213d5092SThomas Hellström
571bc99f120SMatthew Auld page_alignment = bo->page_alignment << PAGE_SHIFT;
572bc99f120SMatthew Auld if (!page_alignment)
573bc99f120SMatthew Auld page_alignment = obj->mm.region->min_page_size;
574bc99f120SMatthew Auld
5753c2b8f32SThomas Hellström /*
5763c2b8f32SThomas Hellström * If CPU mapping differs, we need to add the ttm_tt pages to
5773c2b8f32SThomas Hellström * the resulting st. Might make sense for GGTT.
5783c2b8f32SThomas Hellström */
5793589fdbdSThomas Hellström GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res));
580cad7109aSThomas Hellström if (bo->resource == res) {
581cad7109aSThomas Hellström if (!obj->ttm.cached_io_rsgt) {
582cad7109aSThomas Hellström struct i915_refct_sgt *rsgt;
583cad7109aSThomas Hellström
584cad7109aSThomas Hellström rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
585bc99f120SMatthew Auld res,
586bc99f120SMatthew Auld page_alignment);
587cad7109aSThomas Hellström if (IS_ERR(rsgt))
588cad7109aSThomas Hellström return rsgt;
589cad7109aSThomas Hellström
590cad7109aSThomas Hellström obj->ttm.cached_io_rsgt = rsgt;
591cad7109aSThomas Hellström }
592cad7109aSThomas Hellström return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
593cad7109aSThomas Hellström }
594cad7109aSThomas Hellström
595bc99f120SMatthew Auld return intel_region_ttm_resource_to_rsgt(obj->mm.region, res,
596bc99f120SMatthew Auld page_alignment);
597213d5092SThomas Hellström }
598213d5092SThomas Hellström
i915_ttm_truncate(struct drm_i915_gem_object * obj)5996ef295e3SMatthew Auld static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
6006ef295e3SMatthew Auld {
6016ef295e3SMatthew Auld struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
60258377de4SChristian König long err;
6036ef295e3SMatthew Auld
6046ef295e3SMatthew Auld WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
6056ef295e3SMatthew Auld
60658377de4SChristian König err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
60758377de4SChristian König true, 15 * HZ);
60858377de4SChristian König if (err < 0)
6095524b5e5SMatthew Auld return err;
61058377de4SChristian König if (err == 0)
61158377de4SChristian König return -EBUSY;
6125524b5e5SMatthew Auld
6136ef295e3SMatthew Auld err = i915_ttm_move_notify(bo);
6146ef295e3SMatthew Auld if (err)
6156ef295e3SMatthew Auld return err;
6166ef295e3SMatthew Auld
6176ef295e3SMatthew Auld return i915_ttm_purge(obj);
6186ef295e3SMatthew Auld }
6196ef295e3SMatthew Auld
i915_ttm_swap_notify(struct ttm_buffer_object * bo)6203589fdbdSThomas Hellström static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
621213d5092SThomas Hellström {
622213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
6236385eb7aSThomas Hellström int ret;
624213d5092SThomas Hellström
6256667d78aSNirmoy Das if (i915_ttm_is_ghost_object(bo))
6266385eb7aSThomas Hellström return;
6276385eb7aSThomas Hellström
6286385eb7aSThomas Hellström ret = i915_ttm_move_notify(bo);
6293589fdbdSThomas Hellström GEM_WARN_ON(ret);
6303589fdbdSThomas Hellström GEM_WARN_ON(obj->ttm.cached_io_rsgt);
6313589fdbdSThomas Hellström if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
632213d5092SThomas Hellström i915_ttm_purge(obj);
633213d5092SThomas Hellström }
634213d5092SThomas Hellström
635bfe53be2SMatthew Auld /**
636bfe53be2SMatthew Auld * i915_ttm_resource_mappable - Return true if the ttm resource is CPU
637bfe53be2SMatthew Auld * accessible.
638bfe53be2SMatthew Auld * @res: The TTM resource to check.
639bfe53be2SMatthew Auld *
640bfe53be2SMatthew Auld * This is interesting on small-BAR systems where we may encounter lmem objects
641bfe53be2SMatthew Auld * that can't be accessed via the CPU.
642bfe53be2SMatthew Auld */
i915_ttm_resource_mappable(struct ttm_resource * res)643bfe53be2SMatthew Auld bool i915_ttm_resource_mappable(struct ttm_resource *res)
644503725c2SMatthew Auld {
645503725c2SMatthew Auld struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
646503725c2SMatthew Auld
647503725c2SMatthew Auld if (!i915_ttm_cpu_maps_iomem(res))
648503725c2SMatthew Auld return true;
649503725c2SMatthew Auld
650e3c92eb4SSomalapuram Amaranath return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
651503725c2SMatthew Auld }
652503725c2SMatthew Auld
i915_ttm_io_mem_reserve(struct ttm_device * bdev,struct ttm_resource * mem)653cf3e3e86SMaarten Lankhorst static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
654cf3e3e86SMaarten Lankhorst {
655bfe53be2SMatthew Auld struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
656bfe53be2SMatthew Auld bool unknown_state;
657bfe53be2SMatthew Auld
6586667d78aSNirmoy Das if (i915_ttm_is_ghost_object(mem->bo))
659bfe53be2SMatthew Auld return -EINVAL;
660bfe53be2SMatthew Auld
661bfe53be2SMatthew Auld if (!kref_get_unless_zero(&obj->base.refcount))
662bfe53be2SMatthew Auld return -EINVAL;
663bfe53be2SMatthew Auld
664bfe53be2SMatthew Auld assert_object_held(obj);
665bfe53be2SMatthew Auld
666bfe53be2SMatthew Auld unknown_state = i915_gem_object_has_unknown_state(obj);
667bfe53be2SMatthew Auld i915_gem_object_put(obj);
668bfe53be2SMatthew Auld if (unknown_state)
669bfe53be2SMatthew Auld return -EINVAL;
670bfe53be2SMatthew Auld
6713589fdbdSThomas Hellström if (!i915_ttm_cpu_maps_iomem(mem))
672cf3e3e86SMaarten Lankhorst return 0;
673cf3e3e86SMaarten Lankhorst
674503725c2SMatthew Auld if (!i915_ttm_resource_mappable(mem))
675503725c2SMatthew Auld return -EINVAL;
676503725c2SMatthew Auld
677cf3e3e86SMaarten Lankhorst mem->bus.caching = ttm_write_combined;
678cf3e3e86SMaarten Lankhorst mem->bus.is_iomem = true;
679cf3e3e86SMaarten Lankhorst
680cf3e3e86SMaarten Lankhorst return 0;
681cf3e3e86SMaarten Lankhorst }
682cf3e3e86SMaarten Lankhorst
i915_ttm_io_mem_pfn(struct ttm_buffer_object * bo,unsigned long page_offset)683cf3e3e86SMaarten Lankhorst static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
684cf3e3e86SMaarten Lankhorst unsigned long page_offset)
685cf3e3e86SMaarten Lankhorst {
686cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
687cf3e3e86SMaarten Lankhorst struct scatterlist *sg;
6886385eb7aSThomas Hellström unsigned long base;
689cf3e3e86SMaarten Lankhorst unsigned int ofs;
690cf3e3e86SMaarten Lankhorst
6916667d78aSNirmoy Das GEM_BUG_ON(i915_ttm_is_ghost_object(bo));
692cf3e3e86SMaarten Lankhorst GEM_WARN_ON(bo->ttm);
693cf3e3e86SMaarten Lankhorst
6946385eb7aSThomas Hellström base = obj->mm.region->iomap.base - obj->mm.region->region.start;
695f47e6306SChris Wilson sg = i915_gem_object_page_iter_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs);
696cf3e3e86SMaarten Lankhorst
697cf3e3e86SMaarten Lankhorst return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
698cf3e3e86SMaarten Lankhorst }
699cf3e3e86SMaarten Lankhorst
i915_ttm_access_memory(struct ttm_buffer_object * bo,unsigned long offset,void * buf,int len,int write)70026b15eb0SMatthew Auld static int i915_ttm_access_memory(struct ttm_buffer_object *bo,
70126b15eb0SMatthew Auld unsigned long offset, void *buf,
70226b15eb0SMatthew Auld int len, int write)
70326b15eb0SMatthew Auld {
70426b15eb0SMatthew Auld struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
70526b15eb0SMatthew Auld resource_size_t iomap = obj->mm.region->iomap.base -
70626b15eb0SMatthew Auld obj->mm.region->region.start;
70726b15eb0SMatthew Auld unsigned long page = offset >> PAGE_SHIFT;
70826b15eb0SMatthew Auld unsigned long bytes_left = len;
70926b15eb0SMatthew Auld
71026b15eb0SMatthew Auld /*
71126b15eb0SMatthew Auld * TODO: For now just let it fail if the resource is non-mappable,
71226b15eb0SMatthew Auld * otherwise we need to perform the memcpy from the gpu here, without
71326b15eb0SMatthew Auld * interfering with the object (like moving the entire thing).
71426b15eb0SMatthew Auld */
71526b15eb0SMatthew Auld if (!i915_ttm_resource_mappable(bo->resource))
71626b15eb0SMatthew Auld return -EIO;
71726b15eb0SMatthew Auld
71826b15eb0SMatthew Auld offset -= page << PAGE_SHIFT;
71926b15eb0SMatthew Auld do {
72026b15eb0SMatthew Auld unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
72126b15eb0SMatthew Auld void __iomem *ptr;
72226b15eb0SMatthew Auld dma_addr_t daddr;
72326b15eb0SMatthew Auld
72426b15eb0SMatthew Auld daddr = i915_gem_object_get_dma_address(obj, page);
72526b15eb0SMatthew Auld ptr = ioremap_wc(iomap + daddr + offset, bytes);
72626b15eb0SMatthew Auld if (!ptr)
72726b15eb0SMatthew Auld return -EIO;
72826b15eb0SMatthew Auld
72926b15eb0SMatthew Auld if (write)
73026b15eb0SMatthew Auld memcpy_toio(ptr, buf, bytes);
73126b15eb0SMatthew Auld else
73226b15eb0SMatthew Auld memcpy_fromio(buf, ptr, bytes);
73326b15eb0SMatthew Auld iounmap(ptr);
73426b15eb0SMatthew Auld
73526b15eb0SMatthew Auld page++;
73626b15eb0SMatthew Auld buf += bytes;
73726b15eb0SMatthew Auld bytes_left -= bytes;
73826b15eb0SMatthew Auld offset = 0;
73926b15eb0SMatthew Auld } while (bytes_left);
74026b15eb0SMatthew Auld
74126b15eb0SMatthew Auld return len;
74226b15eb0SMatthew Auld }
74326b15eb0SMatthew Auld
7446385eb7aSThomas Hellström /*
7456385eb7aSThomas Hellström * All callbacks need to take care not to downcast a struct ttm_buffer_object
7466385eb7aSThomas Hellström * without checking its subclass, since it might be a TTM ghost object.
7476385eb7aSThomas Hellström */
748213d5092SThomas Hellström static struct ttm_device_funcs i915_ttm_bo_driver = {
749213d5092SThomas Hellström .ttm_tt_create = i915_ttm_tt_create,
7507ae03459SMatthew Auld .ttm_tt_populate = i915_ttm_tt_populate,
751213d5092SThomas Hellström .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
752213d5092SThomas Hellström .ttm_tt_destroy = i915_ttm_tt_destroy,
753213d5092SThomas Hellström .eviction_valuable = i915_ttm_eviction_valuable,
754213d5092SThomas Hellström .evict_flags = i915_ttm_evict_flags,
755213d5092SThomas Hellström .move = i915_ttm_move,
756213d5092SThomas Hellström .swap_notify = i915_ttm_swap_notify,
757213d5092SThomas Hellström .delete_mem_notify = i915_ttm_delete_mem_notify,
758cf3e3e86SMaarten Lankhorst .io_mem_reserve = i915_ttm_io_mem_reserve,
759cf3e3e86SMaarten Lankhorst .io_mem_pfn = i915_ttm_io_mem_pfn,
76026b15eb0SMatthew Auld .access_memory = i915_ttm_access_memory,
761213d5092SThomas Hellström };
762213d5092SThomas Hellström
763213d5092SThomas Hellström /**
764213d5092SThomas Hellström * i915_ttm_driver - Return a pointer to the TTM device funcs
765213d5092SThomas Hellström *
766213d5092SThomas Hellström * Return: Pointer to statically allocated TTM device funcs.
767213d5092SThomas Hellström */
i915_ttm_driver(void)768213d5092SThomas Hellström struct ttm_device_funcs *i915_ttm_driver(void)
769213d5092SThomas Hellström {
770213d5092SThomas Hellström return &i915_ttm_bo_driver;
771213d5092SThomas Hellström }
772213d5092SThomas Hellström
__i915_ttm_get_pages(struct drm_i915_gem_object * obj,struct ttm_placement * placement)773b6e913e1SThomas Hellström static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
774b6e913e1SThomas Hellström struct ttm_placement *placement)
775213d5092SThomas Hellström {
776213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
777213d5092SThomas Hellström struct ttm_operation_ctx ctx = {
778213d5092SThomas Hellström .interruptible = true,
779213d5092SThomas Hellström .no_wait_gpu = false,
780213d5092SThomas Hellström };
78192653f2aSDavid Gow struct ttm_placement initial_placement;
78292653f2aSDavid Gow struct ttm_place initial_place;
783213d5092SThomas Hellström int ret;
784213d5092SThomas Hellström
785b07a6483SThomas Hellström /* First try only the requested placement. No eviction. */
78692653f2aSDavid Gow initial_placement.num_placement = 1;
78792653f2aSDavid Gow memcpy(&initial_place, placement->placement, sizeof(struct ttm_place));
78892653f2aSDavid Gow initial_place.flags |= TTM_PL_FLAG_DESIRED;
78992653f2aSDavid Gow initial_placement.placement = &initial_place;
79092653f2aSDavid Gow ret = ttm_bo_validate(bo, &initial_placement, &ctx);
791b07a6483SThomas Hellström if (ret) {
792b07a6483SThomas Hellström ret = i915_ttm_err_to_gem(ret);
793b07a6483SThomas Hellström /*
794b07a6483SThomas Hellström * Anything that wants to restart the operation gets to
795b07a6483SThomas Hellström * do that.
796b07a6483SThomas Hellström */
797b07a6483SThomas Hellström if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
798b07a6483SThomas Hellström ret == -EAGAIN)
799b07a6483SThomas Hellström return ret;
800213d5092SThomas Hellström
801b07a6483SThomas Hellström /*
802b07a6483SThomas Hellström * If the initial attempt fails, allow all accepted placements,
803b07a6483SThomas Hellström * evicting if necessary.
804b07a6483SThomas Hellström */
805b6e913e1SThomas Hellström ret = ttm_bo_validate(bo, placement, &ctx);
806213d5092SThomas Hellström if (ret)
807b07a6483SThomas Hellström return i915_ttm_err_to_gem(ret);
808b07a6483SThomas Hellström }
809213d5092SThomas Hellström
8103c2b8f32SThomas Hellström if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
811fc5d9667SThomas Hellström ret = ttm_bo_populate(bo, &ctx);
8123c2b8f32SThomas Hellström if (ret)
8133c2b8f32SThomas Hellström return ret;
8143c2b8f32SThomas Hellström
8153c2b8f32SThomas Hellström i915_ttm_adjust_domains_after_move(obj);
8163c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj);
8173c2b8f32SThomas Hellström }
8183c2b8f32SThomas Hellström
81975e38285SJason Ekstrand if (!i915_gem_object_has_pages(obj)) {
820cad7109aSThomas Hellström struct i915_refct_sgt *rsgt =
821cad7109aSThomas Hellström i915_ttm_resource_get_st(obj, bo->resource);
822213d5092SThomas Hellström
823cad7109aSThomas Hellström if (IS_ERR(rsgt))
824cad7109aSThomas Hellström return PTR_ERR(rsgt);
825cad7109aSThomas Hellström
826cad7109aSThomas Hellström GEM_BUG_ON(obj->mm.rsgt);
827cad7109aSThomas Hellström obj->mm.rsgt = rsgt;
8288c949515SMatthew Auld __i915_gem_object_set_pages(obj, &rsgt->table);
82975e38285SJason Ekstrand }
830213d5092SThomas Hellström
83176a6d563SRamalingam C GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages));
832ebd4a8ecSMatthew Auld i915_ttm_adjust_lru(obj);
833213d5092SThomas Hellström return ret;
834213d5092SThomas Hellström }
835213d5092SThomas Hellström
i915_ttm_get_pages(struct drm_i915_gem_object * obj)836b6e913e1SThomas Hellström static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
837b6e913e1SThomas Hellström {
838a78a8da5SSomalapuram Amaranath struct ttm_place places[I915_TTM_MAX_PLACEMENTS + 1];
839b6e913e1SThomas Hellström struct ttm_placement placement;
840b6e913e1SThomas Hellström
841c3bfba9aSChris Wilson /* restricted by sg_alloc_table */
842c3bfba9aSChris Wilson if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
843c3bfba9aSChris Wilson return -E2BIG;
844c3bfba9aSChris Wilson
845b6e913e1SThomas Hellström GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
846b6e913e1SThomas Hellström
847b6e913e1SThomas Hellström /* Move to the requested placement. */
848a78a8da5SSomalapuram Amaranath i915_ttm_placement_from_obj(obj, places, &placement);
849b6e913e1SThomas Hellström
850b6e913e1SThomas Hellström return __i915_ttm_get_pages(obj, &placement);
851b6e913e1SThomas Hellström }
852b6e913e1SThomas Hellström
853b6e913e1SThomas Hellström /**
854b6e913e1SThomas Hellström * DOC: Migration vs eviction
855b6e913e1SThomas Hellström *
856b6e913e1SThomas Hellström * GEM migration may not be the same as TTM migration / eviction. If
857b6e913e1SThomas Hellström * the TTM core decides to evict an object it may be evicted to a
858b6e913e1SThomas Hellström * TTM memory type that is not in the object's allowable GEM regions, or
859b6e913e1SThomas Hellström * in fact theoretically to a TTM memory type that doesn't correspond to
860b6e913e1SThomas Hellström * a GEM memory region. In that case the object's GEM region is not
861b6e913e1SThomas Hellström * updated, and the data is migrated back to the GEM region at
862b6e913e1SThomas Hellström * get_pages time. TTM may however set up CPU ptes to the object even
863b6e913e1SThomas Hellström * when it is evicted.
864b6e913e1SThomas Hellström * Gem forced migration using the i915_ttm_migrate() op, is allowed even
865b6e913e1SThomas Hellström * to regions that are not in the object's list of allowable placements.
866b6e913e1SThomas Hellström */
__i915_ttm_migrate(struct drm_i915_gem_object * obj,struct intel_memory_region * mr,unsigned int flags)867503725c2SMatthew Auld static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
868503725c2SMatthew Auld struct intel_memory_region *mr,
869503725c2SMatthew Auld unsigned int flags)
870b6e913e1SThomas Hellström {
871b6e913e1SThomas Hellström struct ttm_place requested;
872b6e913e1SThomas Hellström struct ttm_placement placement;
873b6e913e1SThomas Hellström int ret;
874b6e913e1SThomas Hellström
875ecbf2060SMatthew Auld i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
876ecbf2060SMatthew Auld obj->base.size, flags);
877b6e913e1SThomas Hellström placement.num_placement = 1;
878b6e913e1SThomas Hellström placement.placement = &requested;
879b6e913e1SThomas Hellström
880b6e913e1SThomas Hellström ret = __i915_ttm_get_pages(obj, &placement);
881b6e913e1SThomas Hellström if (ret)
882b6e913e1SThomas Hellström return ret;
883b6e913e1SThomas Hellström
884b6e913e1SThomas Hellström /*
885b6e913e1SThomas Hellström * Reinitialize the region bindings. This is primarily
886b6e913e1SThomas Hellström * required for objects where the new region is not in
887b6e913e1SThomas Hellström * its allowable placements.
888b6e913e1SThomas Hellström */
889b6e913e1SThomas Hellström if (obj->mm.region != mr) {
890b6e913e1SThomas Hellström i915_gem_object_release_memory_region(obj);
891b6e913e1SThomas Hellström i915_gem_object_init_memory_region(obj, mr);
892b6e913e1SThomas Hellström }
893b6e913e1SThomas Hellström
894b6e913e1SThomas Hellström return 0;
895b6e913e1SThomas Hellström }
896b6e913e1SThomas Hellström
i915_ttm_migrate(struct drm_i915_gem_object * obj,struct intel_memory_region * mr,unsigned int flags)897503725c2SMatthew Auld static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
898695ddc93SMatthew Auld struct intel_memory_region *mr,
899695ddc93SMatthew Auld unsigned int flags)
900503725c2SMatthew Auld {
901695ddc93SMatthew Auld return __i915_ttm_migrate(obj, mr, flags);
902503725c2SMatthew Auld }
903503725c2SMatthew Auld
i915_ttm_put_pages(struct drm_i915_gem_object * obj,struct sg_table * st)904213d5092SThomas Hellström static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
905213d5092SThomas Hellström struct sg_table *st)
906213d5092SThomas Hellström {
907213d5092SThomas Hellström /*
908213d5092SThomas Hellström * We're currently not called from a shrinker, so put_pages()
909213d5092SThomas Hellström * typically means the object is about to destroyed, or called
910213d5092SThomas Hellström * from move_notify(). So just avoid doing much for now.
911213d5092SThomas Hellström * If the object is not destroyed next, The TTM eviction logic
912213d5092SThomas Hellström * and shrinkers will move it out if needed.
913213d5092SThomas Hellström */
914cad7109aSThomas Hellström
915cad7109aSThomas Hellström if (obj->mm.rsgt)
916cad7109aSThomas Hellström i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt));
917213d5092SThomas Hellström }
918213d5092SThomas Hellström
9193589fdbdSThomas Hellström /**
9203589fdbdSThomas Hellström * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists.
9213589fdbdSThomas Hellström * @obj: The object
9223589fdbdSThomas Hellström */
i915_ttm_adjust_lru(struct drm_i915_gem_object * obj)9233589fdbdSThomas Hellström void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
924213d5092SThomas Hellström {
925213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
9267ae03459SMatthew Auld struct i915_ttm_tt *i915_tt =
9277ae03459SMatthew Auld container_of(bo->ttm, typeof(*i915_tt), ttm);
928ebd4a8ecSMatthew Auld bool shrinkable =
929ebd4a8ecSMatthew Auld bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm);
930213d5092SThomas Hellström
931213d5092SThomas Hellström /*
932213d5092SThomas Hellström * Don't manipulate the TTM LRUs while in TTM bo destruction.
933213d5092SThomas Hellström * We're called through i915_ttm_delete_mem_notify().
934213d5092SThomas Hellström */
935213d5092SThomas Hellström if (!kref_read(&bo->kref))
936213d5092SThomas Hellström return;
937213d5092SThomas Hellström
938213d5092SThomas Hellström /*
939ebd4a8ecSMatthew Auld * We skip managing the shrinker LRU in set_pages() and just manage
940ebd4a8ecSMatthew Auld * everything here. This does at least solve the issue with having
941ebd4a8ecSMatthew Auld * temporary shmem mappings(like with evicted lmem) not being visible to
942ebd4a8ecSMatthew Auld * the shrinker. Only our shmem objects are shrinkable, everything else
943ebd4a8ecSMatthew Auld * we keep as unshrinkable.
944ebd4a8ecSMatthew Auld *
945ebd4a8ecSMatthew Auld * To make sure everything plays nice we keep an extra shrink pin in TTM
946ebd4a8ecSMatthew Auld * if the underlying pages are not currently shrinkable. Once we release
947ebd4a8ecSMatthew Auld * our pin, like when the pages are moved to shmem, the pages will then
948ebd4a8ecSMatthew Auld * be added to the shrinker LRU, assuming the caller isn't also holding
949ebd4a8ecSMatthew Auld * a pin.
950ebd4a8ecSMatthew Auld *
951ebd4a8ecSMatthew Auld * TODO: consider maybe also bumping the shrinker list here when we have
952ebd4a8ecSMatthew Auld * already unpinned it, which should give us something more like an LRU.
953d3cb30f8SThomas Hellström *
954d3cb30f8SThomas Hellström * TODO: There is a small window of opportunity for this function to
955d3cb30f8SThomas Hellström * get called from eviction after we've dropped the last GEM refcount,
956d3cb30f8SThomas Hellström * but before the TTM deleted flag is set on the object. Avoid
957d3cb30f8SThomas Hellström * adjusting the shrinker list in such cases, since the object is
958d3cb30f8SThomas Hellström * not available to the shrinker anyway due to its zero refcount.
959d3cb30f8SThomas Hellström * To fix this properly we should move to a TTM shrinker LRU list for
960d3cb30f8SThomas Hellström * these objects.
961ebd4a8ecSMatthew Auld */
962d3cb30f8SThomas Hellström if (kref_get_unless_zero(&obj->base.refcount)) {
963ebd4a8ecSMatthew Auld if (shrinkable != obj->mm.ttm_shrinkable) {
964ebd4a8ecSMatthew Auld if (shrinkable) {
965ebd4a8ecSMatthew Auld if (obj->mm.madv == I915_MADV_WILLNEED)
966ebd4a8ecSMatthew Auld __i915_gem_object_make_shrinkable(obj);
967ebd4a8ecSMatthew Auld else
968ebd4a8ecSMatthew Auld __i915_gem_object_make_purgeable(obj);
969ebd4a8ecSMatthew Auld } else {
970ebd4a8ecSMatthew Auld i915_gem_object_make_unshrinkable(obj);
971ebd4a8ecSMatthew Auld }
972ebd4a8ecSMatthew Auld
973ebd4a8ecSMatthew Auld obj->mm.ttm_shrinkable = shrinkable;
974ebd4a8ecSMatthew Auld }
975d3cb30f8SThomas Hellström i915_gem_object_put(obj);
976d3cb30f8SThomas Hellström }
977ebd4a8ecSMatthew Auld
978ebd4a8ecSMatthew Auld /*
979213d5092SThomas Hellström * Put on the correct LRU list depending on the MADV status
980213d5092SThomas Hellström */
981213d5092SThomas Hellström spin_lock(&bo->bdev->lru_lock);
982ebd4a8ecSMatthew Auld if (shrinkable) {
9837ae03459SMatthew Auld /* Try to keep shmem_tt from being considered for shrinking. */
9847ae03459SMatthew Auld bo->priority = TTM_MAX_BO_PRIORITY - 1;
9857ae03459SMatthew Auld } else if (obj->mm.madv != I915_MADV_WILLNEED) {
986213d5092SThomas Hellström bo->priority = I915_TTM_PRIO_PURGE;
987213d5092SThomas Hellström } else if (!i915_gem_object_has_pages(obj)) {
988213d5092SThomas Hellström bo->priority = I915_TTM_PRIO_NO_PAGES;
989ba2c5d15SMatthew Auld } else {
99093735059SMatthew Auld struct ttm_resource_manager *man =
99193735059SMatthew Auld ttm_manager_type(bo->bdev, bo->resource->mem_type);
99293735059SMatthew Auld
99393735059SMatthew Auld /*
99493735059SMatthew Auld * If we need to place an LMEM resource which doesn't need CPU
99593735059SMatthew Auld * access then we should try not to victimize mappable objects
99693735059SMatthew Auld * first, since we likely end up stealing more of the mappable
997*54296aa4SNitin Gote * portion. And likewise when we try to find space for a mappable
99893735059SMatthew Auld * object, we know not to ever victimize objects that don't
99993735059SMatthew Auld * occupy any mappable pages.
100093735059SMatthew Auld */
100193735059SMatthew Auld if (i915_ttm_cpu_maps_iomem(bo->resource) &&
100293735059SMatthew Auld i915_ttm_buddy_man_visible_size(man) < man->size &&
100393735059SMatthew Auld !(obj->flags & I915_BO_ALLOC_GPU_ONLY))
100493735059SMatthew Auld bo->priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS;
100593735059SMatthew Auld else
1006ba2c5d15SMatthew Auld bo->priority = I915_TTM_PRIO_HAS_PAGES;
1007213d5092SThomas Hellström }
1008213d5092SThomas Hellström
1009fee2ede1SChristian König ttm_bo_move_to_lru_tail(bo);
1010213d5092SThomas Hellström spin_unlock(&bo->bdev->lru_lock);
1011213d5092SThomas Hellström }
1012213d5092SThomas Hellström
1013213d5092SThomas Hellström /*
1014213d5092SThomas Hellström * TTM-backed gem object destruction requires some clarification.
1015213d5092SThomas Hellström * Basically we have two possibilities here. We can either rely on the
1016213d5092SThomas Hellström * i915 delayed destruction and put the TTM object when the object
1017213d5092SThomas Hellström * is idle. This would be detected by TTM which would bypass the
1018213d5092SThomas Hellström * TTM delayed destroy handling. The other approach is to put the TTM
1019213d5092SThomas Hellström * object early and rely on the TTM destroyed handling, and then free
1020213d5092SThomas Hellström * the leftover parts of the GEM object once TTM's destroyed list handling is
1021213d5092SThomas Hellström * complete. For now, we rely on the latter for two reasons:
1022213d5092SThomas Hellström * a) TTM can evict an object even when it's on the delayed destroy list,
1023213d5092SThomas Hellström * which in theory allows for complete eviction.
1024213d5092SThomas Hellström * b) There is work going on in TTM to allow freeing an object even when
1025213d5092SThomas Hellström * it's not idle, and using the TTM destroyed list handling could help us
1026213d5092SThomas Hellström * benefit from that.
1027213d5092SThomas Hellström */
i915_ttm_delayed_free(struct drm_i915_gem_object * obj)1028213d5092SThomas Hellström static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
1029213d5092SThomas Hellström {
1030068396bbSThomas Hellström GEM_BUG_ON(!obj->ttm.created);
1031068396bbSThomas Hellström
1032213d5092SThomas Hellström ttm_bo_put(i915_gem_to_ttm(obj));
1033213d5092SThomas Hellström }
1034213d5092SThomas Hellström
vm_fault_ttm(struct vm_fault * vmf)1035cf3e3e86SMaarten Lankhorst static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
1036cf3e3e86SMaarten Lankhorst {
1037cf3e3e86SMaarten Lankhorst struct vm_area_struct *area = vmf->vma;
10386385eb7aSThomas Hellström struct ttm_buffer_object *bo = area->vm_private_data;
1039ebd4a8ecSMatthew Auld struct drm_device *dev = bo->base.dev;
10406667d78aSNirmoy Das struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
10418231ac7eSJani Nikula intel_wakeref_t wakeref = NULL;
1042ebd4a8ecSMatthew Auld vm_fault_t ret;
1043ebd4a8ecSMatthew Auld int idx;
1044cf3e3e86SMaarten Lankhorst
1045cf3e3e86SMaarten Lankhorst /* Sanity check that we allow writing into this object */
1046cf3e3e86SMaarten Lankhorst if (unlikely(i915_gem_object_is_readonly(obj) &&
1047cf3e3e86SMaarten Lankhorst area->vm_flags & VM_WRITE))
1048cf3e3e86SMaarten Lankhorst return VM_FAULT_SIGBUS;
1049cf3e3e86SMaarten Lankhorst
1050ebd4a8ecSMatthew Auld ret = ttm_bo_vm_reserve(bo, vmf);
1051ebd4a8ecSMatthew Auld if (ret)
1052ebd4a8ecSMatthew Auld return ret;
1053ebd4a8ecSMatthew Auld
105403ee5956SMatthew Auld if (obj->mm.madv != I915_MADV_WILLNEED) {
105503ee5956SMatthew Auld dma_resv_unlock(bo->base.resv);
105603ee5956SMatthew Auld return VM_FAULT_SIGBUS;
105703ee5956SMatthew Auld }
105803ee5956SMatthew Auld
1059516198d3SChristian König /*
1060516198d3SChristian König * This must be swapped out with shmem ttm_tt (pipeline-gutting).
1061516198d3SChristian König * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
1062516198d3SChristian König * far as far doing a ttm_bo_move_null(), which should skip all the
1063516198d3SChristian König * other junk.
1064516198d3SChristian König */
1065516198d3SChristian König if (!bo->resource) {
1066516198d3SChristian König struct ttm_operation_ctx ctx = {
1067516198d3SChristian König .interruptible = true,
1068516198d3SChristian König .no_wait_gpu = true, /* should be idle already */
1069516198d3SChristian König };
1070fde789e8SMatthew Auld int err;
1071516198d3SChristian König
1072516198d3SChristian König GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
1073516198d3SChristian König
1074fde789e8SMatthew Auld err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
1075fde789e8SMatthew Auld if (err) {
1076516198d3SChristian König dma_resv_unlock(bo->base.resv);
1077516198d3SChristian König return VM_FAULT_SIGBUS;
1078516198d3SChristian König }
1079516198d3SChristian König } else if (!i915_ttm_resource_mappable(bo->resource)) {
1080503725c2SMatthew Auld int err = -ENODEV;
1081503725c2SMatthew Auld int i;
1082503725c2SMatthew Auld
1083503725c2SMatthew Auld for (i = 0; i < obj->mm.n_placements; i++) {
1084503725c2SMatthew Auld struct intel_memory_region *mr = obj->mm.placements[i];
1085503725c2SMatthew Auld unsigned int flags;
1086503725c2SMatthew Auld
10873c0fa9f4SVille Syrjälä if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
1088503725c2SMatthew Auld continue;
1089503725c2SMatthew Auld
1090503725c2SMatthew Auld flags = obj->flags;
1091503725c2SMatthew Auld flags &= ~I915_BO_ALLOC_GPU_ONLY;
1092503725c2SMatthew Auld err = __i915_ttm_migrate(obj, mr, flags);
1093503725c2SMatthew Auld if (!err)
1094503725c2SMatthew Auld break;
1095503725c2SMatthew Auld }
1096503725c2SMatthew Auld
1097503725c2SMatthew Auld if (err) {
109847cdb66aSNirmoy Das drm_dbg_ratelimited(dev,
109947cdb66aSNirmoy Das "Unable to make resource CPU accessible(err = %pe)\n",
1100e5cedf98SNirmoy Das ERR_PTR(err));
1101503725c2SMatthew Auld dma_resv_unlock(bo->base.resv);
1102ad74457aSAnshuman Gupta ret = VM_FAULT_SIGBUS;
1103ad74457aSAnshuman Gupta goto out_rpm;
1104503725c2SMatthew Auld }
1105503725c2SMatthew Auld }
1106503725c2SMatthew Auld
1107625b7446SMatthew Auld if (i915_ttm_cpu_maps_iomem(bo->resource))
1108625b7446SMatthew Auld wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
1109625b7446SMatthew Auld
1110ebd4a8ecSMatthew Auld if (drm_dev_enter(dev, &idx)) {
1111ebd4a8ecSMatthew Auld ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1112be373fadSMatthew Auld TTM_BO_VM_NUM_PREFAULT);
1113ebd4a8ecSMatthew Auld drm_dev_exit(idx);
1114ebd4a8ecSMatthew Auld } else {
1115ebd4a8ecSMatthew Auld ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1116ebd4a8ecSMatthew Auld }
1117ad74457aSAnshuman Gupta
1118ebd4a8ecSMatthew Auld if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
1119ad74457aSAnshuman Gupta goto out_rpm;
1120ad74457aSAnshuman Gupta
11211cacd689SAnshuman Gupta /*
11221cacd689SAnshuman Gupta * ttm_bo_vm_reserve() already has dma_resv_lock.
11231cacd689SAnshuman Gupta * userfault_count is protected by dma_resv lock and rpm wakeref.
11241cacd689SAnshuman Gupta */
1125ad74457aSAnshuman Gupta if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
1126ad74457aSAnshuman Gupta obj->userfault_count = 1;
11271cacd689SAnshuman Gupta spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
1128e66c8dcfSAnshuman Gupta list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
11291cacd689SAnshuman Gupta spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
1130ccb0e027SMatthew Auld
1131ccb0e027SMatthew Auld GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
1132ad74457aSAnshuman Gupta }
1133ad74457aSAnshuman Gupta
11344c1bfe25SJani Nikula if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0)
1135e66c8dcfSAnshuman Gupta intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
1136ad74457aSAnshuman Gupta msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
1137ebd4a8ecSMatthew Auld
1138ebd4a8ecSMatthew Auld i915_ttm_adjust_lru(obj);
1139ebd4a8ecSMatthew Auld
1140ebd4a8ecSMatthew Auld dma_resv_unlock(bo->base.resv);
1141ad74457aSAnshuman Gupta
1142ad74457aSAnshuman Gupta out_rpm:
1143ad74457aSAnshuman Gupta if (wakeref)
1144ad74457aSAnshuman Gupta intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
1145ad74457aSAnshuman Gupta
1146ebd4a8ecSMatthew Auld return ret;
1147cf3e3e86SMaarten Lankhorst }
1148cf3e3e86SMaarten Lankhorst
1149cf3e3e86SMaarten Lankhorst static int
vm_access_ttm(struct vm_area_struct * area,unsigned long addr,void * buf,int len,int write)1150cf3e3e86SMaarten Lankhorst vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
1151cf3e3e86SMaarten Lankhorst void *buf, int len, int write)
1152cf3e3e86SMaarten Lankhorst {
1153cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj =
1154cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(area->vm_private_data);
1155cf3e3e86SMaarten Lankhorst
1156cf3e3e86SMaarten Lankhorst if (i915_gem_object_is_readonly(obj) && write)
1157cf3e3e86SMaarten Lankhorst return -EACCES;
1158cf3e3e86SMaarten Lankhorst
1159cf3e3e86SMaarten Lankhorst return ttm_bo_vm_access(area, addr, buf, len, write);
1160cf3e3e86SMaarten Lankhorst }
1161cf3e3e86SMaarten Lankhorst
ttm_vm_open(struct vm_area_struct * vma)1162cf3e3e86SMaarten Lankhorst static void ttm_vm_open(struct vm_area_struct *vma)
1163cf3e3e86SMaarten Lankhorst {
1164cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj =
1165cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(vma->vm_private_data);
1166cf3e3e86SMaarten Lankhorst
11676667d78aSNirmoy Das GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
1168cf3e3e86SMaarten Lankhorst i915_gem_object_get(obj);
1169cf3e3e86SMaarten Lankhorst }
1170cf3e3e86SMaarten Lankhorst
ttm_vm_close(struct vm_area_struct * vma)1171cf3e3e86SMaarten Lankhorst static void ttm_vm_close(struct vm_area_struct *vma)
1172cf3e3e86SMaarten Lankhorst {
1173cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj =
1174cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(vma->vm_private_data);
1175cf3e3e86SMaarten Lankhorst
11766667d78aSNirmoy Das GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
1177cf3e3e86SMaarten Lankhorst i915_gem_object_put(obj);
1178cf3e3e86SMaarten Lankhorst }
1179cf3e3e86SMaarten Lankhorst
1180cf3e3e86SMaarten Lankhorst static const struct vm_operations_struct vm_ops_ttm = {
1181cf3e3e86SMaarten Lankhorst .fault = vm_fault_ttm,
1182cf3e3e86SMaarten Lankhorst .access = vm_access_ttm,
1183cf3e3e86SMaarten Lankhorst .open = ttm_vm_open,
1184cf3e3e86SMaarten Lankhorst .close = ttm_vm_close,
1185cf3e3e86SMaarten Lankhorst };
1186cf3e3e86SMaarten Lankhorst
i915_ttm_mmap_offset(struct drm_i915_gem_object * obj)1187cf3e3e86SMaarten Lankhorst static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
1188cf3e3e86SMaarten Lankhorst {
1189cf3e3e86SMaarten Lankhorst /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
1190cf3e3e86SMaarten Lankhorst GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
1191cf3e3e86SMaarten Lankhorst
1192cf3e3e86SMaarten Lankhorst return drm_vma_node_offset_addr(&obj->base.vma_node);
1193cf3e3e86SMaarten Lankhorst }
1194cf3e3e86SMaarten Lankhorst
i915_ttm_unmap_virtual(struct drm_i915_gem_object * obj)11958ee262baSMatthew Auld static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
11968ee262baSMatthew Auld {
11971cacd689SAnshuman Gupta struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
11988231ac7eSJani Nikula intel_wakeref_t wakeref = NULL;
11991cacd689SAnshuman Gupta
12001cacd689SAnshuman Gupta assert_object_held_shared(obj);
12011cacd689SAnshuman Gupta
12021cacd689SAnshuman Gupta if (i915_ttm_cpu_maps_iomem(bo->resource)) {
12031cacd689SAnshuman Gupta wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
12041cacd689SAnshuman Gupta
12051cacd689SAnshuman Gupta /* userfault_count is protected by obj lock and rpm wakeref. */
12061cacd689SAnshuman Gupta if (obj->userfault_count) {
12071cacd689SAnshuman Gupta spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
12081cacd689SAnshuman Gupta list_del(&obj->userfault_link);
12091cacd689SAnshuman Gupta spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
12101cacd689SAnshuman Gupta obj->userfault_count = 0;
12111cacd689SAnshuman Gupta }
12121cacd689SAnshuman Gupta }
12131cacd689SAnshuman Gupta
1214ccb0e027SMatthew Auld GEM_WARN_ON(obj->userfault_count);
1215ccb0e027SMatthew Auld
12168ee262baSMatthew Auld ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
12171cacd689SAnshuman Gupta
12181cacd689SAnshuman Gupta if (wakeref)
12191cacd689SAnshuman Gupta intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
12208ee262baSMatthew Auld }
12218ee262baSMatthew Auld
12224bc2d574SMatthew Auld static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
1223213d5092SThomas Hellström .name = "i915_gem_object_ttm",
12245d12ffe6SMatthew Auld .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
12255d12ffe6SMatthew Auld I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
1226213d5092SThomas Hellström
1227213d5092SThomas Hellström .get_pages = i915_ttm_get_pages,
1228213d5092SThomas Hellström .put_pages = i915_ttm_put_pages,
12296ef295e3SMatthew Auld .truncate = i915_ttm_truncate,
1230ffa3fe08SMatthew Auld .shrink = i915_ttm_shrink,
12317ae03459SMatthew Auld
1232213d5092SThomas Hellström .adjust_lru = i915_ttm_adjust_lru,
1233213d5092SThomas Hellström .delayed_free = i915_ttm_delayed_free,
1234b6e913e1SThomas Hellström .migrate = i915_ttm_migrate,
12357ae03459SMatthew Auld
1236cf3e3e86SMaarten Lankhorst .mmap_offset = i915_ttm_mmap_offset,
12378ee262baSMatthew Auld .unmap_virtual = i915_ttm_unmap_virtual,
1238cf3e3e86SMaarten Lankhorst .mmap_ops = &vm_ops_ttm,
1239213d5092SThomas Hellström };
1240213d5092SThomas Hellström
i915_ttm_bo_destroy(struct ttm_buffer_object * bo)1241213d5092SThomas Hellström void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
1242213d5092SThomas Hellström {
1243213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
1244213d5092SThomas Hellström
1245213d5092SThomas Hellström i915_gem_object_release_memory_region(obj);
1246cf3e3e86SMaarten Lankhorst mutex_destroy(&obj->ttm.get_io_page.lock);
1247068396bbSThomas Hellström
1248068396bbSThomas Hellström if (obj->ttm.created) {
1249ebd4a8ecSMatthew Auld /*
1250ebd4a8ecSMatthew Auld * We freely manage the shrinker LRU outide of the mm.pages life
1251ebd4a8ecSMatthew Auld * cycle. As a result when destroying the object we should be
1252ebd4a8ecSMatthew Auld * extra paranoid and ensure we remove it from the LRU, before
1253ebd4a8ecSMatthew Auld * we free the object.
1254ebd4a8ecSMatthew Auld *
1255ebd4a8ecSMatthew Auld * Touching the ttm_shrinkable outside of the object lock here
1256ebd4a8ecSMatthew Auld * should be safe now that the last GEM object ref was dropped.
1257ebd4a8ecSMatthew Auld */
1258ebd4a8ecSMatthew Auld if (obj->mm.ttm_shrinkable)
1259ebd4a8ecSMatthew Auld i915_gem_object_make_unshrinkable(obj);
1260ebd4a8ecSMatthew Auld
1261c56ce956SThomas Hellström i915_ttm_backup_free(obj);
1262c56ce956SThomas Hellström
126348b09612SMaarten Lankhorst /* This releases all gem object bindings to the backend. */
126448b09612SMaarten Lankhorst __i915_gem_free_object(obj);
126548b09612SMaarten Lankhorst
1266213d5092SThomas Hellström call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
1267068396bbSThomas Hellström } else {
1268068396bbSThomas Hellström __i915_gem_object_fini(obj);
1269068396bbSThomas Hellström }
1270213d5092SThomas Hellström }
1271213d5092SThomas Hellström
127298a1daccSLee Jones /*
1273213d5092SThomas Hellström * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
1274213d5092SThomas Hellström * @mem: The initial memory region for the object.
1275213d5092SThomas Hellström * @obj: The gem object.
1276213d5092SThomas Hellström * @size: Object size in bytes.
1277213d5092SThomas Hellström * @flags: gem object flags.
1278213d5092SThomas Hellström *
1279213d5092SThomas Hellström * Return: 0 on success, negative error code on failure.
1280213d5092SThomas Hellström */
__i915_gem_ttm_object_init(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,resource_size_t offset,resource_size_t size,resource_size_t page_size,unsigned int flags)1281213d5092SThomas Hellström int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
1282213d5092SThomas Hellström struct drm_i915_gem_object *obj,
12839b78b5daSMatthew Auld resource_size_t offset,
1284213d5092SThomas Hellström resource_size_t size,
1285d22632c8SMatthew Auld resource_size_t page_size,
1286213d5092SThomas Hellström unsigned int flags)
1287213d5092SThomas Hellström {
1288213d5092SThomas Hellström static struct lock_class_key lock_class;
1289213d5092SThomas Hellström struct drm_i915_private *i915 = mem->i915;
12903c2b8f32SThomas Hellström struct ttm_operation_ctx ctx = {
12913c2b8f32SThomas Hellström .interruptible = true,
12923c2b8f32SThomas Hellström .no_wait_gpu = false,
12933c2b8f32SThomas Hellström };
1294213d5092SThomas Hellström enum ttm_bo_type bo_type;
1295213d5092SThomas Hellström int ret;
1296213d5092SThomas Hellström
1297213d5092SThomas Hellström drm_gem_private_object_init(&i915->drm, &obj->base, size);
1298213d5092SThomas Hellström i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
1299068396bbSThomas Hellström
1300ecbf2060SMatthew Auld obj->bo_offset = offset;
1301ecbf2060SMatthew Auld
1302068396bbSThomas Hellström /* Don't put on a region list until we're either locked or fully initialized. */
13038b1f7f92SThomas Hellström obj->mm.region = mem;
1304068396bbSThomas Hellström INIT_LIST_HEAD(&obj->mm.region_link);
1305068396bbSThomas Hellström
1306cf3e3e86SMaarten Lankhorst INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
1307cf3e3e86SMaarten Lankhorst mutex_init(&obj->ttm.get_io_page.lock);
1308213d5092SThomas Hellström bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
1309213d5092SThomas Hellström ttm_bo_type_kernel;
1310213d5092SThomas Hellström
13113c2b8f32SThomas Hellström obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
13123c2b8f32SThomas Hellström
1313d22632c8SMatthew Auld /* Forcing the page size is kernel internal only */
1314d22632c8SMatthew Auld GEM_BUG_ON(page_size && obj->mm.n_placements);
1315d22632c8SMatthew Auld
1316213d5092SThomas Hellström /*
1317ebd4a8ecSMatthew Auld * Keep an extra shrink pin to prevent the object from being made
1318ebd4a8ecSMatthew Auld * shrinkable too early. If the ttm_tt is ever allocated in shmem, we
1319ebd4a8ecSMatthew Auld * drop the pin. The TTM backend manages the shrinker LRU itself,
1320ebd4a8ecSMatthew Auld * outside of the normal mm.pages life cycle.
1321ebd4a8ecSMatthew Auld */
1322ebd4a8ecSMatthew Auld i915_gem_object_make_unshrinkable(obj);
1323ebd4a8ecSMatthew Auld
1324ebd4a8ecSMatthew Auld /*
1325213d5092SThomas Hellström * If this function fails, it will call the destructor, but
1326213d5092SThomas Hellström * our caller still owns the object. So no freeing in the
1327213d5092SThomas Hellström * destructor until obj->ttm.created is true.
1328213d5092SThomas Hellström * Similarly, in delayed_destroy, we can't call ttm_bo_put()
1329213d5092SThomas Hellström * until successful initialization.
1330213d5092SThomas Hellström */
1331347987a2SChristian König ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
1332347987a2SChristian König &i915_sys_placement, page_size >> PAGE_SHIFT,
13333c2b8f32SThomas Hellström &ctx, NULL, NULL, i915_ttm_bo_destroy);
133418f968cbSGwan-gyeong Mun
133518f968cbSGwan-gyeong Mun /*
133618f968cbSGwan-gyeong Mun * XXX: The ttm_bo_init_reserved() functions returns -ENOSPC if the size
133718f968cbSGwan-gyeong Mun * is too big to add vma. The direct function that returns -ENOSPC is
133818f968cbSGwan-gyeong Mun * drm_mm_insert_node_in_range(). To handle the same error as other code
133918f968cbSGwan-gyeong Mun * that returns -E2BIG when the size is too large, it converts -ENOSPC to
134018f968cbSGwan-gyeong Mun * -E2BIG.
134118f968cbSGwan-gyeong Mun */
134218f968cbSGwan-gyeong Mun if (size >> PAGE_SHIFT > INT_MAX && ret == -ENOSPC)
134318f968cbSGwan-gyeong Mun ret = -E2BIG;
134418f968cbSGwan-gyeong Mun
13453c2b8f32SThomas Hellström if (ret)
1346b07a6483SThomas Hellström return i915_ttm_err_to_gem(ret);
1347213d5092SThomas Hellström
1348213d5092SThomas Hellström obj->ttm.created = true;
1349068396bbSThomas Hellström i915_gem_object_release_memory_region(obj);
1350068396bbSThomas Hellström i915_gem_object_init_memory_region(obj, mem);
13513c2b8f32SThomas Hellström i915_ttm_adjust_domains_after_move(obj);
13523c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj);
13533c2b8f32SThomas Hellström i915_gem_object_unlock(obj);
1354213d5092SThomas Hellström
13553c2b8f32SThomas Hellström return 0;
1356213d5092SThomas Hellström }
135732b7cf51SThomas Hellström
135832b7cf51SThomas Hellström static const struct intel_memory_region_ops ttm_system_region_ops = {
135932b7cf51SThomas Hellström .init_object = __i915_gem_ttm_object_init,
13608b1f7f92SThomas Hellström .release = intel_region_ttm_fini,
136132b7cf51SThomas Hellström };
136232b7cf51SThomas Hellström
136332b7cf51SThomas Hellström struct intel_memory_region *
i915_gem_ttm_system_setup(struct drm_i915_private * i915,u16 type,u16 instance)136432b7cf51SThomas Hellström i915_gem_ttm_system_setup(struct drm_i915_private *i915,
136532b7cf51SThomas Hellström u16 type, u16 instance)
136632b7cf51SThomas Hellström {
136732b7cf51SThomas Hellström struct intel_memory_region *mr;
136832b7cf51SThomas Hellström
136932b7cf51SThomas Hellström mr = intel_memory_region_create(i915, 0,
137032b7cf51SThomas Hellström totalram_pages() << PAGE_SHIFT,
1371235582caSMatthew Auld PAGE_SIZE, 0, 0,
137232b7cf51SThomas Hellström type, instance,
137332b7cf51SThomas Hellström &ttm_system_region_ops);
137432b7cf51SThomas Hellström if (IS_ERR(mr))
137532b7cf51SThomas Hellström return mr;
137632b7cf51SThomas Hellström
137732b7cf51SThomas Hellström intel_memory_region_set_name(mr, "system-ttm");
137832b7cf51SThomas Hellström return mr;
1379213d5092SThomas Hellström }
1380