1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2014-2016 Intel Corporation
4 */
5
6 #include <linux/scatterlist.h>
7 #include <linux/slab.h>
8
9 #include "i915_drv.h"
10 #include "i915_gem.h"
11 #include "i915_gem_internal.h"
12 #include "i915_gem_object.h"
13 #include "i915_scatterlist.h"
14 #include "i915_utils.h"
15
16 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
17 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
18
internal_free_pages(struct sg_table * st)19 static void internal_free_pages(struct sg_table *st)
20 {
21 struct scatterlist *sg;
22
23 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
24 if (sg_page(sg))
25 __free_pages(sg_page(sg), get_order(sg->length));
26 }
27
28 sg_free_table(st);
29 kfree(st);
30 }
31
i915_gem_object_get_pages_internal(struct drm_i915_gem_object * obj)32 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
33 {
34 struct drm_i915_private *i915 = to_i915(obj->base.dev);
35 struct sg_table *st;
36 struct scatterlist *sg;
37 unsigned int npages; /* restricted by sg_alloc_table */
38 int max_order = MAX_PAGE_ORDER;
39 unsigned int max_segment;
40 gfp_t gfp;
41
42 if (overflows_type(obj->base.size >> PAGE_SHIFT, npages))
43 return -E2BIG;
44
45 npages = obj->base.size >> PAGE_SHIFT;
46 max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT;
47 max_order = min(max_order, get_order(max_segment));
48
49 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
50 if (IS_I965GM(i915) || IS_I965G(i915)) {
51 /* 965gm cannot relocate objects above 4GiB. */
52 gfp &= ~__GFP_HIGHMEM;
53 gfp |= __GFP_DMA32;
54 }
55
56 create_st:
57 st = kmalloc(sizeof(*st), GFP_KERNEL);
58 if (!st)
59 return -ENOMEM;
60
61 if (sg_alloc_table(st, npages, GFP_KERNEL)) {
62 kfree(st);
63 return -ENOMEM;
64 }
65
66 sg = st->sgl;
67 st->nents = 0;
68
69 do {
70 int order = min(fls(npages) - 1, max_order);
71 struct page *page;
72
73 do {
74 page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
75 order);
76 if (page)
77 break;
78 if (!order--)
79 goto err;
80
81 /* Limit subsequent allocations as well */
82 max_order = order;
83 } while (1);
84
85 sg_set_page(sg, page, PAGE_SIZE << order, 0);
86 st->nents++;
87
88 npages -= 1 << order;
89 if (!npages) {
90 sg_mark_end(sg);
91 break;
92 }
93
94 sg = __sg_next(sg);
95 } while (1);
96
97 if (i915_gem_gtt_prepare_pages(obj, st)) {
98 /* Failed to dma-map try again with single page sg segments */
99 if (get_order(st->sgl->length)) {
100 internal_free_pages(st);
101 max_order = 0;
102 goto create_st;
103 }
104 goto err;
105 }
106
107 __i915_gem_object_set_pages(obj, st);
108
109 return 0;
110
111 err:
112 sg_set_page(sg, NULL, 0, 0);
113 sg_mark_end(sg);
114 internal_free_pages(st);
115
116 return -ENOMEM;
117 }
118
i915_gem_object_put_pages_internal(struct drm_i915_gem_object * obj,struct sg_table * pages)119 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
120 struct sg_table *pages)
121 {
122 i915_gem_gtt_finish_pages(obj, pages);
123 internal_free_pages(pages);
124
125 obj->mm.dirty = false;
126
127 __start_cpu_write(obj);
128 }
129
130 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
131 .name = "i915_gem_object_internal",
132 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
133 .get_pages = i915_gem_object_get_pages_internal,
134 .put_pages = i915_gem_object_put_pages_internal,
135 };
136
137 struct drm_i915_gem_object *
__i915_gem_object_create_internal(struct drm_i915_private * i915,const struct drm_i915_gem_object_ops * ops,phys_addr_t size)138 __i915_gem_object_create_internal(struct drm_i915_private *i915,
139 const struct drm_i915_gem_object_ops *ops,
140 phys_addr_t size)
141 {
142 static struct lock_class_key lock_class;
143 struct drm_i915_gem_object *obj;
144 unsigned int cache_level;
145
146 GEM_BUG_ON(!size);
147 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
148
149 if (overflows_type(size, obj->base.size))
150 return ERR_PTR(-E2BIG);
151
152 obj = i915_gem_object_alloc();
153 if (!obj)
154 return ERR_PTR(-ENOMEM);
155
156 drm_gem_private_object_init(&i915->drm, &obj->base, size);
157 i915_gem_object_init(obj, ops, &lock_class, 0);
158 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
159
160 /*
161 * Mark the object as volatile, such that the pages are marked as
162 * dontneed whilst they are still pinned. As soon as they are unpinned
163 * they are allowed to be reaped by the shrinker, and the caller is
164 * expected to repopulate - the contents of this object are only valid
165 * whilst active and pinned.
166 */
167 i915_gem_object_set_volatile(obj);
168
169 obj->read_domains = I915_GEM_DOMAIN_CPU;
170 obj->write_domain = I915_GEM_DOMAIN_CPU;
171
172 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
173 i915_gem_object_set_cache_coherency(obj, cache_level);
174
175 return obj;
176 }
177
178 /**
179 * i915_gem_object_create_internal: create an object with volatile pages
180 * @i915: the i915 device
181 * @size: the size in bytes of backing storage to allocate for the object
182 *
183 * Creates a new object that wraps some internal memory for private use.
184 * This object is not backed by swappable storage, and as such its contents
185 * are volatile and only valid whilst pinned. If the object is reaped by the
186 * shrinker, its pages and data will be discarded. Equally, it is not a full
187 * GEM object and so not valid for access from userspace. This makes it useful
188 * for hardware interfaces like ringbuffers (which are pinned from the time
189 * the request is written to the time the hardware stops accessing it), but
190 * not for contexts (which need to be preserved when not active for later
191 * reuse). Note that it is not cleared upon allocation.
192 */
193 struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private * i915,phys_addr_t size)194 i915_gem_object_create_internal(struct drm_i915_private *i915,
195 phys_addr_t size)
196 {
197 return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size);
198 }
199