1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17
18 #include <drm/drm.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_gem_shmem_helper.h>
22 #include <drm/drm_prime.h>
23 #include <drm/drm_print.h>
24
25 MODULE_IMPORT_NS("DMA_BUF");
26
27 /**
28 * DOC: overview
29 *
30 * This library provides helpers for GEM objects backed by shmem buffers
31 * allocated using anonymous pageable memory.
32 *
33 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
34 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
35 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
36 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
37 */
38
39 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
40 .free = drm_gem_shmem_object_free,
41 .print_info = drm_gem_shmem_object_print_info,
42 .pin = drm_gem_shmem_object_pin,
43 .unpin = drm_gem_shmem_object_unpin,
44 .get_sg_table = drm_gem_shmem_object_get_sg_table,
45 .vmap = drm_gem_shmem_object_vmap,
46 .vunmap = drm_gem_shmem_object_vunmap,
47 .mmap = drm_gem_shmem_object_mmap,
48 .vm_ops = &drm_gem_shmem_vm_ops,
49 };
50
51 static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device * dev,size_t size,bool private,struct vfsmount * gemfs)52 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
53 struct vfsmount *gemfs)
54 {
55 struct drm_gem_shmem_object *shmem;
56 struct drm_gem_object *obj;
57 int ret = 0;
58
59 size = PAGE_ALIGN(size);
60
61 if (dev->driver->gem_create_object) {
62 obj = dev->driver->gem_create_object(dev, size);
63 if (IS_ERR(obj))
64 return ERR_CAST(obj);
65 shmem = to_drm_gem_shmem_obj(obj);
66 } else {
67 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 if (!shmem)
69 return ERR_PTR(-ENOMEM);
70 obj = &shmem->base;
71 }
72
73 if (!obj->funcs)
74 obj->funcs = &drm_gem_shmem_funcs;
75
76 if (private) {
77 drm_gem_private_object_init(dev, obj, size);
78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 } else {
80 ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
81 }
82 if (ret) {
83 drm_gem_private_object_fini(obj);
84 goto err_free;
85 }
86
87 ret = drm_gem_create_mmap_offset(obj);
88 if (ret)
89 goto err_release;
90
91 INIT_LIST_HEAD(&shmem->madv_list);
92
93 if (!private) {
94 /*
95 * Our buffers are kept pinned, so allocating them
96 * from the MOVABLE zone is a really bad idea, and
97 * conflicts with CMA. See comments above new_inode()
98 * why this is required _and_ expected if you're
99 * going to pin these pages.
100 */
101 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103 }
104
105 return shmem;
106
107 err_release:
108 drm_gem_object_release(obj);
109 err_free:
110 kfree(obj);
111
112 return ERR_PTR(ret);
113 }
114 /**
115 * drm_gem_shmem_create - Allocate an object with the given size
116 * @dev: DRM device
117 * @size: Size of the object to allocate
118 *
119 * This function creates a shmem GEM object.
120 *
121 * Returns:
122 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123 * error code on failure.
124 */
drm_gem_shmem_create(struct drm_device * dev,size_t size)125 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126 {
127 return __drm_gem_shmem_create(dev, size, false, NULL);
128 }
129 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130
131 /**
132 * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
133 * given mountpoint
134 * @dev: DRM device
135 * @size: Size of the object to allocate
136 * @gemfs: tmpfs mount where the GEM object will be created
137 *
138 * This function creates a shmem GEM object in a given tmpfs mountpoint.
139 *
140 * Returns:
141 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
142 * error code on failure.
143 */
drm_gem_shmem_create_with_mnt(struct drm_device * dev,size_t size,struct vfsmount * gemfs)144 struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
145 size_t size,
146 struct vfsmount *gemfs)
147 {
148 return __drm_gem_shmem_create(dev, size, false, gemfs);
149 }
150 EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
151
152 /**
153 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
154 * @shmem: shmem GEM object to free
155 *
156 * This function cleans up the GEM object state and frees the memory used to
157 * store the object itself.
158 */
drm_gem_shmem_free(struct drm_gem_shmem_object * shmem)159 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
160 {
161 struct drm_gem_object *obj = &shmem->base;
162
163 if (drm_gem_is_imported(obj)) {
164 drm_prime_gem_destroy(obj, shmem->sgt);
165 } else {
166 dma_resv_lock(shmem->base.resv, NULL);
167
168 drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
169
170 if (shmem->sgt) {
171 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
172 DMA_BIDIRECTIONAL, 0);
173 sg_free_table(shmem->sgt);
174 kfree(shmem->sgt);
175 }
176 if (shmem->pages)
177 drm_gem_shmem_put_pages_locked(shmem);
178
179 drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
180 drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
181
182 dma_resv_unlock(shmem->base.resv);
183 }
184
185 drm_gem_object_release(obj);
186 kfree(shmem);
187 }
188 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
189
drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object * shmem)190 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
191 {
192 struct drm_gem_object *obj = &shmem->base;
193 struct page **pages;
194
195 dma_resv_assert_held(shmem->base.resv);
196
197 if (refcount_inc_not_zero(&shmem->pages_use_count))
198 return 0;
199
200 pages = drm_gem_get_pages(obj);
201 if (IS_ERR(pages)) {
202 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
203 PTR_ERR(pages));
204 return PTR_ERR(pages);
205 }
206
207 /*
208 * TODO: Allocating WC pages which are correctly flushed is only
209 * supported on x86. Ideal solution would be a GFP_WC flag, which also
210 * ttm_pool.c could use.
211 */
212 #ifdef CONFIG_X86
213 if (shmem->map_wc)
214 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
215 #endif
216
217 shmem->pages = pages;
218
219 refcount_set(&shmem->pages_use_count, 1);
220
221 return 0;
222 }
223
224 /*
225 * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
226 * @shmem: shmem GEM object
227 *
228 * This function decreases the use count and puts the backing pages when use drops to zero.
229 */
drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object * shmem)230 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
231 {
232 struct drm_gem_object *obj = &shmem->base;
233
234 dma_resv_assert_held(shmem->base.resv);
235
236 if (refcount_dec_and_test(&shmem->pages_use_count)) {
237 #ifdef CONFIG_X86
238 if (shmem->map_wc)
239 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
240 #endif
241
242 drm_gem_put_pages(obj, shmem->pages,
243 shmem->pages_mark_dirty_on_put,
244 shmem->pages_mark_accessed_on_put);
245 shmem->pages = NULL;
246 }
247 }
248 EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
249
drm_gem_shmem_pin_locked(struct drm_gem_shmem_object * shmem)250 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
251 {
252 int ret;
253
254 dma_resv_assert_held(shmem->base.resv);
255
256 drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
257
258 if (refcount_inc_not_zero(&shmem->pages_pin_count))
259 return 0;
260
261 ret = drm_gem_shmem_get_pages_locked(shmem);
262 if (!ret)
263 refcount_set(&shmem->pages_pin_count, 1);
264
265 return ret;
266 }
267 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
268
drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object * shmem)269 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
270 {
271 dma_resv_assert_held(shmem->base.resv);
272
273 if (refcount_dec_and_test(&shmem->pages_pin_count))
274 drm_gem_shmem_put_pages_locked(shmem);
275 }
276 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
277
278 /**
279 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
280 * @shmem: shmem GEM object
281 *
282 * This function makes sure the backing pages are pinned in memory while the
283 * buffer is exported.
284 *
285 * Returns:
286 * 0 on success or a negative error code on failure.
287 */
drm_gem_shmem_pin(struct drm_gem_shmem_object * shmem)288 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
289 {
290 struct drm_gem_object *obj = &shmem->base;
291 int ret;
292
293 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
294
295 if (refcount_inc_not_zero(&shmem->pages_pin_count))
296 return 0;
297
298 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
299 if (ret)
300 return ret;
301 ret = drm_gem_shmem_pin_locked(shmem);
302 dma_resv_unlock(shmem->base.resv);
303
304 return ret;
305 }
306 EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
307
308 /**
309 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
310 * @shmem: shmem GEM object
311 *
312 * This function removes the requirement that the backing pages are pinned in
313 * memory.
314 */
drm_gem_shmem_unpin(struct drm_gem_shmem_object * shmem)315 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
316 {
317 struct drm_gem_object *obj = &shmem->base;
318
319 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
320
321 if (refcount_dec_not_one(&shmem->pages_pin_count))
322 return;
323
324 dma_resv_lock(shmem->base.resv, NULL);
325 drm_gem_shmem_unpin_locked(shmem);
326 dma_resv_unlock(shmem->base.resv);
327 }
328 EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
329
330 /*
331 * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
332 * @shmem: shmem GEM object
333 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
334 * store.
335 *
336 * This function makes sure that a contiguous kernel virtual address mapping
337 * exists for the buffer backing the shmem GEM object. It hides the differences
338 * between dma-buf imported and natively allocated objects.
339 *
340 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
341 *
342 * Returns:
343 * 0 on success or a negative error code on failure.
344 */
drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object * shmem,struct iosys_map * map)345 int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
346 struct iosys_map *map)
347 {
348 struct drm_gem_object *obj = &shmem->base;
349 int ret = 0;
350
351 dma_resv_assert_held(obj->resv);
352
353 if (drm_gem_is_imported(obj)) {
354 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
355 } else {
356 pgprot_t prot = PAGE_KERNEL;
357
358 dma_resv_assert_held(shmem->base.resv);
359
360 if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
361 iosys_map_set_vaddr(map, shmem->vaddr);
362 return 0;
363 }
364
365 ret = drm_gem_shmem_pin_locked(shmem);
366 if (ret)
367 return ret;
368
369 if (shmem->map_wc)
370 prot = pgprot_writecombine(prot);
371 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
372 VM_MAP, prot);
373 if (!shmem->vaddr) {
374 ret = -ENOMEM;
375 } else {
376 iosys_map_set_vaddr(map, shmem->vaddr);
377 refcount_set(&shmem->vmap_use_count, 1);
378 }
379 }
380
381 if (ret) {
382 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
383 goto err_put_pages;
384 }
385
386 return 0;
387
388 err_put_pages:
389 if (!drm_gem_is_imported(obj))
390 drm_gem_shmem_unpin_locked(shmem);
391
392 return ret;
393 }
394 EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
395
396 /*
397 * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
398 * @shmem: shmem GEM object
399 * @map: Kernel virtual address where the SHMEM GEM object was mapped
400 *
401 * This function cleans up a kernel virtual address mapping acquired by
402 * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
403 * drops to zero.
404 *
405 * This function hides the differences between dma-buf imported and natively
406 * allocated objects.
407 */
drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object * shmem,struct iosys_map * map)408 void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
409 struct iosys_map *map)
410 {
411 struct drm_gem_object *obj = &shmem->base;
412
413 dma_resv_assert_held(obj->resv);
414
415 if (drm_gem_is_imported(obj)) {
416 dma_buf_vunmap(obj->import_attach->dmabuf, map);
417 } else {
418 dma_resv_assert_held(shmem->base.resv);
419
420 if (refcount_dec_and_test(&shmem->vmap_use_count)) {
421 vunmap(shmem->vaddr);
422 shmem->vaddr = NULL;
423
424 drm_gem_shmem_unpin_locked(shmem);
425 }
426 }
427 }
428 EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
429
430 static int
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)431 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
432 struct drm_device *dev, size_t size,
433 uint32_t *handle)
434 {
435 struct drm_gem_shmem_object *shmem;
436 int ret;
437
438 shmem = drm_gem_shmem_create(dev, size);
439 if (IS_ERR(shmem))
440 return PTR_ERR(shmem);
441
442 /*
443 * Allocate an id of idr table where the obj is registered
444 * and handle has the id what user can see.
445 */
446 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
447 /* drop reference from allocate - handle holds it now. */
448 drm_gem_object_put(&shmem->base);
449
450 return ret;
451 }
452
453 /* Update madvise status, returns true if not purged, else
454 * false or -errno.
455 */
drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object * shmem,int madv)456 int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
457 {
458 dma_resv_assert_held(shmem->base.resv);
459
460 if (shmem->madv >= 0)
461 shmem->madv = madv;
462
463 madv = shmem->madv;
464
465 return (madv >= 0);
466 }
467 EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
468
drm_gem_shmem_purge_locked(struct drm_gem_shmem_object * shmem)469 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
470 {
471 struct drm_gem_object *obj = &shmem->base;
472 struct drm_device *dev = obj->dev;
473
474 dma_resv_assert_held(shmem->base.resv);
475
476 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
477
478 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
479 sg_free_table(shmem->sgt);
480 kfree(shmem->sgt);
481 shmem->sgt = NULL;
482
483 drm_gem_shmem_put_pages_locked(shmem);
484
485 shmem->madv = -1;
486
487 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
488 drm_gem_free_mmap_offset(obj);
489
490 /* Our goal here is to return as much of the memory as
491 * is possible back to the system as we are called from OOM.
492 * To do this we must instruct the shmfs to drop all of its
493 * backing pages, *now*.
494 */
495 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
496
497 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
498 }
499 EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
500
501 /**
502 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
503 * @file: DRM file structure to create the dumb buffer for
504 * @dev: DRM device
505 * @args: IOCTL data
506 *
507 * This function computes the pitch of the dumb buffer and rounds it up to an
508 * integer number of bytes per pixel. Drivers for hardware that doesn't have
509 * any additional restrictions on the pitch can directly use this function as
510 * their &drm_driver.dumb_create callback.
511 *
512 * For hardware with additional restrictions, drivers can adjust the fields
513 * set up by userspace before calling into this function.
514 *
515 * Returns:
516 * 0 on success or a negative error code on failure.
517 */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)518 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
519 struct drm_mode_create_dumb *args)
520 {
521 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
522
523 if (!args->pitch || !args->size) {
524 args->pitch = min_pitch;
525 args->size = PAGE_ALIGN(args->pitch * args->height);
526 } else {
527 /* ensure sane minimum values */
528 if (args->pitch < min_pitch)
529 args->pitch = min_pitch;
530 if (args->size < args->pitch * args->height)
531 args->size = PAGE_ALIGN(args->pitch * args->height);
532 }
533
534 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
535 }
536 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
537
drm_gem_shmem_fault(struct vm_fault * vmf)538 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
539 {
540 struct vm_area_struct *vma = vmf->vma;
541 struct drm_gem_object *obj = vma->vm_private_data;
542 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
543 loff_t num_pages = obj->size >> PAGE_SHIFT;
544 vm_fault_t ret;
545 struct page *page;
546 pgoff_t page_offset;
547
548 /* We don't use vmf->pgoff since that has the fake offset */
549 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
550
551 dma_resv_lock(shmem->base.resv, NULL);
552
553 if (page_offset >= num_pages ||
554 drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
555 shmem->madv < 0) {
556 ret = VM_FAULT_SIGBUS;
557 } else {
558 page = shmem->pages[page_offset];
559
560 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
561 }
562
563 dma_resv_unlock(shmem->base.resv);
564
565 return ret;
566 }
567
drm_gem_shmem_vm_open(struct vm_area_struct * vma)568 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
569 {
570 struct drm_gem_object *obj = vma->vm_private_data;
571 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
572
573 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
574
575 dma_resv_lock(shmem->base.resv, NULL);
576
577 /*
578 * We should have already pinned the pages when the buffer was first
579 * mmap'd, vm_open() just grabs an additional reference for the new
580 * mm the vma is getting copied into (ie. on fork()).
581 */
582 drm_WARN_ON_ONCE(obj->dev,
583 !refcount_inc_not_zero(&shmem->pages_use_count));
584
585 dma_resv_unlock(shmem->base.resv);
586
587 drm_gem_vm_open(vma);
588 }
589
drm_gem_shmem_vm_close(struct vm_area_struct * vma)590 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
591 {
592 struct drm_gem_object *obj = vma->vm_private_data;
593 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
594
595 dma_resv_lock(shmem->base.resv, NULL);
596 drm_gem_shmem_put_pages_locked(shmem);
597 dma_resv_unlock(shmem->base.resv);
598
599 drm_gem_vm_close(vma);
600 }
601
602 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
603 .fault = drm_gem_shmem_fault,
604 .open = drm_gem_shmem_vm_open,
605 .close = drm_gem_shmem_vm_close,
606 };
607 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
608
609 /**
610 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
611 * @shmem: shmem GEM object
612 * @vma: VMA for the area to be mapped
613 *
614 * This function implements an augmented version of the GEM DRM file mmap
615 * operation for shmem objects.
616 *
617 * Returns:
618 * 0 on success or a negative error code on failure.
619 */
drm_gem_shmem_mmap(struct drm_gem_shmem_object * shmem,struct vm_area_struct * vma)620 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
621 {
622 struct drm_gem_object *obj = &shmem->base;
623 int ret;
624
625 if (drm_gem_is_imported(obj)) {
626 /* Reset both vm_ops and vm_private_data, so we don't end up with
627 * vm_ops pointing to our implementation if the dma-buf backend
628 * doesn't set those fields.
629 */
630 vma->vm_private_data = NULL;
631 vma->vm_ops = NULL;
632
633 ret = dma_buf_mmap(obj->dma_buf, vma, 0);
634
635 /* Drop the reference drm_gem_mmap_obj() acquired.*/
636 if (!ret)
637 drm_gem_object_put(obj);
638
639 return ret;
640 }
641
642 if (is_cow_mapping(vma->vm_flags))
643 return -EINVAL;
644
645 dma_resv_lock(shmem->base.resv, NULL);
646 ret = drm_gem_shmem_get_pages_locked(shmem);
647 dma_resv_unlock(shmem->base.resv);
648
649 if (ret)
650 return ret;
651
652 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
653 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
654 if (shmem->map_wc)
655 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
656
657 return 0;
658 }
659 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
660
661 /**
662 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
663 * @shmem: shmem GEM object
664 * @p: DRM printer
665 * @indent: Tab indentation level
666 */
drm_gem_shmem_print_info(const struct drm_gem_shmem_object * shmem,struct drm_printer * p,unsigned int indent)667 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
668 struct drm_printer *p, unsigned int indent)
669 {
670 if (drm_gem_is_imported(&shmem->base))
671 return;
672
673 drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
674 drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
675 drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
676 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
677 }
678 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
679
680 /**
681 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
682 * pages for a shmem GEM object
683 * @shmem: shmem GEM object
684 *
685 * This function exports a scatter/gather table suitable for PRIME usage by
686 * calling the standard DMA mapping API.
687 *
688 * Drivers who need to acquire an scatter/gather table for objects need to call
689 * drm_gem_shmem_get_pages_sgt() instead.
690 *
691 * Returns:
692 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
693 */
drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object * shmem)694 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
695 {
696 struct drm_gem_object *obj = &shmem->base;
697
698 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
699
700 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
701 }
702 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
703
drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object * shmem)704 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
705 {
706 struct drm_gem_object *obj = &shmem->base;
707 int ret;
708 struct sg_table *sgt;
709
710 if (shmem->sgt)
711 return shmem->sgt;
712
713 drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
714
715 ret = drm_gem_shmem_get_pages_locked(shmem);
716 if (ret)
717 return ERR_PTR(ret);
718
719 sgt = drm_gem_shmem_get_sg_table(shmem);
720 if (IS_ERR(sgt)) {
721 ret = PTR_ERR(sgt);
722 goto err_put_pages;
723 }
724 /* Map the pages for use by the h/w. */
725 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
726 if (ret)
727 goto err_free_sgt;
728
729 shmem->sgt = sgt;
730
731 return sgt;
732
733 err_free_sgt:
734 sg_free_table(sgt);
735 kfree(sgt);
736 err_put_pages:
737 drm_gem_shmem_put_pages_locked(shmem);
738 return ERR_PTR(ret);
739 }
740
741 /**
742 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
743 * scatter/gather table for a shmem GEM object.
744 * @shmem: shmem GEM object
745 *
746 * This function returns a scatter/gather table suitable for driver usage. If
747 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
748 * table created.
749 *
750 * This is the main function for drivers to get at backing storage, and it hides
751 * and difference between dma-buf imported and natively allocated objects.
752 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
753 *
754 * Returns:
755 * A pointer to the scatter/gather table of pinned pages or errno on failure.
756 */
drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object * shmem)757 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
758 {
759 int ret;
760 struct sg_table *sgt;
761
762 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
763 if (ret)
764 return ERR_PTR(ret);
765 sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
766 dma_resv_unlock(shmem->base.resv);
767
768 return sgt;
769 }
770 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
771
772 /**
773 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
774 * another driver's scatter/gather table of pinned pages
775 * @dev: Device to import into
776 * @attach: DMA-BUF attachment
777 * @sgt: Scatter/gather table of pinned pages
778 *
779 * This function imports a scatter/gather table exported via DMA-BUF by
780 * another driver. Drivers that use the shmem helpers should set this as their
781 * &drm_driver.gem_prime_import_sg_table callback.
782 *
783 * Returns:
784 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
785 * error code on failure.
786 */
787 struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)788 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
789 struct dma_buf_attachment *attach,
790 struct sg_table *sgt)
791 {
792 size_t size = PAGE_ALIGN(attach->dmabuf->size);
793 struct drm_gem_shmem_object *shmem;
794
795 shmem = __drm_gem_shmem_create(dev, size, true, NULL);
796 if (IS_ERR(shmem))
797 return ERR_CAST(shmem);
798
799 shmem->sgt = sgt;
800
801 drm_dbg_prime(dev, "size = %zu\n", size);
802
803 return &shmem->base;
804 }
805 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
806
807 /**
808 * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
809 * @dev: Device to import into
810 * @dma_buf: dma-buf object to import
811 *
812 * Drivers that use the shmem helpers but also wants to import dmabuf without
813 * mapping its sg_table can use this as their &drm_driver.gem_prime_import
814 * implementation.
815 */
drm_gem_shmem_prime_import_no_map(struct drm_device * dev,struct dma_buf * dma_buf)816 struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
817 struct dma_buf *dma_buf)
818 {
819 struct dma_buf_attachment *attach;
820 struct drm_gem_shmem_object *shmem;
821 struct drm_gem_object *obj;
822 size_t size;
823 int ret;
824
825 if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
826 /*
827 * Importing dmabuf exported from our own gem increases
828 * refcount on gem itself instead of f_count of dmabuf.
829 */
830 obj = dma_buf->priv;
831 drm_gem_object_get(obj);
832 return obj;
833 }
834
835 attach = dma_buf_attach(dma_buf, dev->dev);
836 if (IS_ERR(attach))
837 return ERR_CAST(attach);
838
839 get_dma_buf(dma_buf);
840
841 size = PAGE_ALIGN(attach->dmabuf->size);
842
843 shmem = __drm_gem_shmem_create(dev, size, true, NULL);
844 if (IS_ERR(shmem)) {
845 ret = PTR_ERR(shmem);
846 goto fail_detach;
847 }
848
849 drm_dbg_prime(dev, "size = %zu\n", size);
850
851 shmem->base.import_attach = attach;
852 shmem->base.resv = dma_buf->resv;
853
854 return &shmem->base;
855
856 fail_detach:
857 dma_buf_detach(dma_buf, attach);
858 dma_buf_put(dma_buf);
859
860 return ERR_PTR(ret);
861 }
862 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
863
864 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
865 MODULE_IMPORT_NS("DMA_BUF");
866 MODULE_LICENSE("GPL v2");
867