1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <drm/drm_fourcc.h>
7 #include <drm/drm_print.h>
8
9 #include "display/intel_display.h"
10 #include "gem/i915_gem_ioctls.h"
11 #include "gem/i915_gem_lmem.h"
12 #include "gem/i915_gem_region.h"
13 #include "pxp/intel_pxp.h"
14
15 #include "i915_drv.h"
16 #include "i915_gem_create.h"
17 #include "i915_trace.h"
18 #include "i915_user_extensions.h"
19
object_max_page_size(struct intel_memory_region ** placements,unsigned int n_placements)20 static u32 object_max_page_size(struct intel_memory_region **placements,
21 unsigned int n_placements)
22 {
23 u32 max_page_size = 0;
24 int i;
25
26 for (i = 0; i < n_placements; i++) {
27 struct intel_memory_region *mr = placements[i];
28
29 GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
30 max_page_size = max_t(u32, max_page_size, mr->min_page_size);
31 }
32
33 GEM_BUG_ON(!max_page_size);
34 return max_page_size;
35 }
36
object_set_placements(struct drm_i915_gem_object * obj,struct intel_memory_region ** placements,unsigned int n_placements)37 static int object_set_placements(struct drm_i915_gem_object *obj,
38 struct intel_memory_region **placements,
39 unsigned int n_placements)
40 {
41 struct intel_memory_region **arr;
42 unsigned int i;
43
44 GEM_BUG_ON(!n_placements);
45
46 /*
47 * For the common case of one memory region, skip storing an
48 * allocated array and just point at the region directly.
49 */
50 if (n_placements == 1) {
51 struct intel_memory_region *mr = placements[0];
52 struct drm_i915_private *i915 = mr->i915;
53
54 obj->mm.placements = &i915->mm.regions[mr->id];
55 obj->mm.n_placements = 1;
56 } else {
57 arr = kmalloc_objs(struct intel_memory_region *, n_placements);
58 if (!arr)
59 return -ENOMEM;
60
61 for (i = 0; i < n_placements; i++)
62 arr[i] = placements[i];
63
64 obj->mm.placements = arr;
65 obj->mm.n_placements = n_placements;
66 }
67
68 return 0;
69 }
70
i915_gem_publish(struct drm_i915_gem_object * obj,struct drm_file * file,u64 * size_p,u32 * handle_p)71 static int i915_gem_publish(struct drm_i915_gem_object *obj,
72 struct drm_file *file,
73 u64 *size_p,
74 u32 *handle_p)
75 {
76 u64 size = obj->base.size;
77 int ret;
78
79 ret = drm_gem_handle_create(file, &obj->base, handle_p);
80 /* drop reference from allocate - handle holds it now */
81 i915_gem_object_put(obj);
82 if (ret)
83 return ret;
84
85 *size_p = size;
86 return 0;
87 }
88
89 static struct drm_i915_gem_object *
__i915_gem_object_create_user_ext(struct drm_i915_private * i915,u64 size,struct intel_memory_region ** placements,unsigned int n_placements,unsigned int ext_flags)90 __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
91 struct intel_memory_region **placements,
92 unsigned int n_placements,
93 unsigned int ext_flags)
94 {
95 struct intel_memory_region *mr = placements[0];
96 struct drm_i915_gem_object *obj;
97 unsigned int flags;
98 int ret;
99
100 i915_gem_flush_free_objects(i915);
101
102 size = round_up(size, object_max_page_size(placements, n_placements));
103 if (size == 0)
104 return ERR_PTR(-EINVAL);
105
106 /* For most of the ABI (e.g. mmap) we think in system pages */
107 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
108
109 if (i915_gem_object_size_2big(size))
110 return ERR_PTR(-E2BIG);
111
112 obj = i915_gem_object_alloc();
113 if (!obj)
114 return ERR_PTR(-ENOMEM);
115
116 ret = object_set_placements(obj, placements, n_placements);
117 if (ret)
118 goto object_free;
119
120 /*
121 * I915_BO_ALLOC_USER will make sure the object is cleared before
122 * any user access.
123 */
124 flags = I915_BO_ALLOC_USER;
125
126 ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
127 if (ret)
128 goto object_free;
129
130 GEM_BUG_ON(size != obj->base.size);
131
132 /* Add any flag set by create_ext options */
133 obj->flags |= ext_flags;
134
135 trace_i915_gem_object_create(obj);
136 return obj;
137
138 object_free:
139 if (obj->mm.n_placements > 1)
140 kfree(obj->mm.placements);
141 i915_gem_object_free(obj);
142 return ERR_PTR(ret);
143 }
144
145 /**
146 * __i915_gem_object_create_user - Creates a new object using the same path as
147 * DRM_I915_GEM_CREATE_EXT
148 * @i915: i915 private
149 * @size: size of the buffer, in bytes
150 * @placements: possible placement regions, in priority order
151 * @n_placements: number of possible placement regions
152 *
153 * This function is exposed primarily for selftests and does very little
154 * error checking. It is assumed that the set of placement regions has
155 * already been verified to be valid.
156 */
157 struct drm_i915_gem_object *
__i915_gem_object_create_user(struct drm_i915_private * i915,u64 size,struct intel_memory_region ** placements,unsigned int n_placements)158 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
159 struct intel_memory_region **placements,
160 unsigned int n_placements)
161 {
162 return __i915_gem_object_create_user_ext(i915, size, placements,
163 n_placements, 0);
164 }
165
166 int
i915_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)167 i915_gem_dumb_create(struct drm_file *file,
168 struct drm_device *dev,
169 struct drm_mode_create_dumb *args)
170 {
171 struct drm_i915_gem_object *obj;
172 struct intel_memory_region *mr;
173 enum intel_memory_type mem_type;
174 int cpp = DIV_ROUND_UP(args->bpp, 8);
175 u32 format;
176
177 switch (cpp) {
178 case 1:
179 format = DRM_FORMAT_C8;
180 break;
181 case 2:
182 format = DRM_FORMAT_RGB565;
183 break;
184 case 4:
185 format = DRM_FORMAT_XRGB8888;
186 break;
187 default:
188 return -EINVAL;
189 }
190
191 /* have to work out size/pitch and return them */
192 args->pitch = ALIGN(args->width * cpp, 64);
193
194 /* align stride to page size so that we can remap */
195 if (args->pitch > intel_dumb_fb_max_stride(dev, format,
196 DRM_FORMAT_MOD_LINEAR))
197 args->pitch = ALIGN(args->pitch, 4096);
198
199 if (args->pitch < args->width)
200 return -EINVAL;
201
202 args->size = mul_u32_u32(args->pitch, args->height);
203
204 mem_type = INTEL_MEMORY_SYSTEM;
205 if (HAS_LMEM(to_i915(dev)))
206 mem_type = INTEL_MEMORY_LOCAL;
207
208 mr = intel_memory_region_by_type(to_i915(dev), mem_type);
209
210 obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
211 if (IS_ERR(obj))
212 return PTR_ERR(obj);
213
214 return i915_gem_publish(obj, file, &args->size, &args->handle);
215 }
216
217 /**
218 * i915_gem_create_ioctl - Creates a new mm object and returns a handle to it.
219 * @dev: drm device pointer
220 * @data: ioctl data blob
221 * @file: drm file pointer
222 */
223 int
i915_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)224 i915_gem_create_ioctl(struct drm_device *dev, void *data,
225 struct drm_file *file)
226 {
227 struct drm_i915_private *i915 = to_i915(dev);
228 struct drm_i915_gem_create *args = data;
229 struct drm_i915_gem_object *obj;
230 struct intel_memory_region *mr;
231
232 mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
233
234 obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
235 if (IS_ERR(obj))
236 return PTR_ERR(obj);
237
238 return i915_gem_publish(obj, file, &args->size, &args->handle);
239 }
240
241 struct create_ext {
242 struct drm_i915_private *i915;
243 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
244 unsigned int n_placements;
245 unsigned int placement_mask;
246 unsigned long flags;
247 unsigned int pat_index;
248 };
249
repr_placements(char * buf,size_t size,struct intel_memory_region ** placements,int n_placements)250 static void repr_placements(char *buf, size_t size,
251 struct intel_memory_region **placements,
252 int n_placements)
253 {
254 int i;
255
256 buf[0] = '\0';
257
258 for (i = 0; i < n_placements; i++) {
259 struct intel_memory_region *mr = placements[i];
260 int r;
261
262 r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }",
263 mr->name, mr->type, mr->instance);
264 if (r >= size)
265 return;
266
267 buf += r;
268 size -= r;
269 }
270 }
271
set_placements(struct drm_i915_gem_create_ext_memory_regions * args,struct create_ext * ext_data)272 static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
273 struct create_ext *ext_data)
274 {
275 struct drm_i915_private *i915 = ext_data->i915;
276 struct drm_i915_gem_memory_class_instance __user *uregions =
277 u64_to_user_ptr(args->regions);
278 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
279 u32 mask;
280 int i, ret = 0;
281
282 if (args->pad) {
283 drm_dbg(&i915->drm, "pad should be zero\n");
284 ret = -EINVAL;
285 }
286
287 if (!args->num_regions) {
288 drm_dbg(&i915->drm, "num_regions is zero\n");
289 ret = -EINVAL;
290 }
291
292 BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
293 BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
294 if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
295 drm_dbg(&i915->drm, "num_regions is too large\n");
296 ret = -EINVAL;
297 }
298
299 if (ret)
300 return ret;
301
302 mask = 0;
303 for (i = 0; i < args->num_regions; i++) {
304 struct drm_i915_gem_memory_class_instance region;
305 struct intel_memory_region *mr;
306
307 if (copy_from_user(®ion, uregions, sizeof(region)))
308 return -EFAULT;
309
310 mr = intel_memory_region_lookup(i915,
311 region.memory_class,
312 region.memory_instance);
313 if (!mr || mr->private) {
314 drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
315 region.memory_class, region.memory_instance, i);
316 ret = -EINVAL;
317 goto out_dump;
318 }
319
320 if (mask & BIT(mr->id)) {
321 drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
322 mr->name, region.memory_class,
323 region.memory_instance, i);
324 ret = -EINVAL;
325 goto out_dump;
326 }
327
328 placements[i] = mr;
329 mask |= BIT(mr->id);
330
331 ++uregions;
332 }
333
334 if (ext_data->n_placements) {
335 ret = -EINVAL;
336 goto out_dump;
337 }
338
339 ext_data->n_placements = args->num_regions;
340 for (i = 0; i < args->num_regions; i++)
341 ext_data->placements[i] = placements[i];
342
343 ext_data->placement_mask = mask;
344 return 0;
345
346 out_dump:
347 if (1) {
348 char buf[256];
349
350 if (ext_data->n_placements) {
351 repr_placements(buf,
352 sizeof(buf),
353 ext_data->placements,
354 ext_data->n_placements);
355 drm_dbg(&i915->drm,
356 "Placements were already set in previous EXT. Existing placements: %s\n",
357 buf);
358 }
359
360 repr_placements(buf, sizeof(buf), placements, i);
361 drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
362 }
363
364 return ret;
365 }
366
ext_set_placements(struct i915_user_extension __user * base,void * data)367 static int ext_set_placements(struct i915_user_extension __user *base,
368 void *data)
369 {
370 struct drm_i915_gem_create_ext_memory_regions ext;
371
372 if (copy_from_user(&ext, base, sizeof(ext)))
373 return -EFAULT;
374
375 return set_placements(&ext, data);
376 }
377
ext_set_protected(struct i915_user_extension __user * base,void * data)378 static int ext_set_protected(struct i915_user_extension __user *base, void *data)
379 {
380 struct drm_i915_gem_create_ext_protected_content ext;
381 struct create_ext *ext_data = data;
382
383 if (copy_from_user(&ext, base, sizeof(ext)))
384 return -EFAULT;
385
386 if (ext.flags)
387 return -EINVAL;
388
389 if (!intel_pxp_is_enabled(ext_data->i915->pxp))
390 return -ENODEV;
391
392 ext_data->flags |= I915_BO_PROTECTED;
393
394 return 0;
395 }
396
ext_set_pat(struct i915_user_extension __user * base,void * data)397 static int ext_set_pat(struct i915_user_extension __user *base, void *data)
398 {
399 struct create_ext *ext_data = data;
400 struct drm_i915_private *i915 = ext_data->i915;
401 struct drm_i915_gem_create_ext_set_pat ext;
402 unsigned int max_pat_index;
403
404 BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) !=
405 offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd));
406
407 /* Limiting the extension only to Xe_LPG and beyond */
408 if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))
409 return -ENODEV;
410
411 if (copy_from_user(&ext, base, sizeof(ext)))
412 return -EFAULT;
413
414 max_pat_index = INTEL_INFO(i915)->max_pat_index;
415
416 if (ext.pat_index > max_pat_index) {
417 drm_dbg(&i915->drm, "PAT index is invalid: %u\n",
418 ext.pat_index);
419 return -EINVAL;
420 }
421
422 ext_data->pat_index = ext.pat_index;
423
424 return 0;
425 }
426
427 static const i915_user_extension_fn create_extensions[] = {
428 [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
429 [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
430 [I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat,
431 };
432
433 #define PAT_INDEX_NOT_SET 0xffff
434 /**
435 * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
436 * @dev: drm device pointer
437 * @data: ioctl data blob
438 * @file: drm file pointer
439 */
440 int
i915_gem_create_ext_ioctl(struct drm_device * dev,void * data,struct drm_file * file)441 i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
442 struct drm_file *file)
443 {
444 struct drm_i915_private *i915 = to_i915(dev);
445 struct drm_i915_gem_create_ext *args = data;
446 struct create_ext ext_data = { .i915 = i915 };
447 struct drm_i915_gem_object *obj;
448 int ret;
449
450 if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
451 return -EINVAL;
452
453 ext_data.pat_index = PAT_INDEX_NOT_SET;
454 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
455 create_extensions,
456 ARRAY_SIZE(create_extensions),
457 &ext_data);
458 if (ret)
459 return ret;
460
461 if (!ext_data.n_placements) {
462 ext_data.placements[0] =
463 intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
464 ext_data.n_placements = 1;
465 }
466
467 if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
468 if (ext_data.n_placements == 1)
469 return -EINVAL;
470
471 /*
472 * We always need to be able to spill to system memory, if we
473 * can't place in the mappable part of LMEM.
474 */
475 if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
476 return -EINVAL;
477 } else {
478 if (ext_data.n_placements > 1 ||
479 ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
480 ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
481 }
482
483 obj = __i915_gem_object_create_user_ext(i915, args->size,
484 ext_data.placements,
485 ext_data.n_placements,
486 ext_data.flags);
487 if (IS_ERR(obj))
488 return PTR_ERR(obj);
489
490 if (ext_data.pat_index != PAT_INDEX_NOT_SET) {
491 i915_gem_object_set_pat_index(obj, ext_data.pat_index);
492 /* Mark pat_index is set by UMD */
493 obj->pat_set_by_user = true;
494 }
495
496 return i915_gem_publish(obj, file, &args->size, &args->handle);
497 }
498