1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17 
18 #include <drm/drm.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_gem_shmem_helper.h>
22 #include <drm/drm_prime.h>
23 #include <drm/drm_print.h>
24 
25 MODULE_IMPORT_NS("DMA_BUF");
26 
27 /**
28  * DOC: overview
29  *
30  * This library provides helpers for GEM objects backed by shmem buffers
31  * allocated using anonymous pageable memory.
32  *
33  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
34  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
35  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
36  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
37  */
38 
39 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
40 	.free = drm_gem_shmem_object_free,
41 	.print_info = drm_gem_shmem_object_print_info,
42 	.pin = drm_gem_shmem_object_pin,
43 	.unpin = drm_gem_shmem_object_unpin,
44 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
45 	.vmap = drm_gem_shmem_object_vmap,
46 	.vunmap = drm_gem_shmem_object_vunmap,
47 	.mmap = drm_gem_shmem_object_mmap,
48 	.vm_ops = &drm_gem_shmem_vm_ops,
49 };
50 
51 static struct drm_gem_shmem_object *
52 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
53 		       struct vfsmount *gemfs)
54 {
55 	struct drm_gem_shmem_object *shmem;
56 	struct drm_gem_object *obj;
57 	int ret = 0;
58 
59 	size = PAGE_ALIGN(size);
60 
61 	if (dev->driver->gem_create_object) {
62 		obj = dev->driver->gem_create_object(dev, size);
63 		if (IS_ERR(obj))
64 			return ERR_CAST(obj);
65 		shmem = to_drm_gem_shmem_obj(obj);
66 	} else {
67 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 		if (!shmem)
69 			return ERR_PTR(-ENOMEM);
70 		obj = &shmem->base;
71 	}
72 
73 	if (!obj->funcs)
74 		obj->funcs = &drm_gem_shmem_funcs;
75 
76 	if (private) {
77 		drm_gem_private_object_init(dev, obj, size);
78 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 	} else {
80 		ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
81 	}
82 	if (ret) {
83 		drm_gem_private_object_fini(obj);
84 		goto err_free;
85 	}
86 
87 	ret = drm_gem_create_mmap_offset(obj);
88 	if (ret)
89 		goto err_release;
90 
91 	INIT_LIST_HEAD(&shmem->madv_list);
92 
93 	if (!private) {
94 		/*
95 		 * Our buffers are kept pinned, so allocating them
96 		 * from the MOVABLE zone is a really bad idea, and
97 		 * conflicts with CMA. See comments above new_inode()
98 		 * why this is required _and_ expected if you're
99 		 * going to pin these pages.
100 		 */
101 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103 	}
104 
105 	return shmem;
106 
107 err_release:
108 	drm_gem_object_release(obj);
109 err_free:
110 	kfree(obj);
111 
112 	return ERR_PTR(ret);
113 }
114 /**
115  * drm_gem_shmem_create - Allocate an object with the given size
116  * @dev: DRM device
117  * @size: Size of the object to allocate
118  *
119  * This function creates a shmem GEM object.
120  *
121  * Returns:
122  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123  * error code on failure.
124  */
125 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126 {
127 	return __drm_gem_shmem_create(dev, size, false, NULL);
128 }
129 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130 
131 /**
132  * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
133  * given mountpoint
134  * @dev: DRM device
135  * @size: Size of the object to allocate
136  * @gemfs: tmpfs mount where the GEM object will be created
137  *
138  * This function creates a shmem GEM object in a given tmpfs mountpoint.
139  *
140  * Returns:
141  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
142  * error code on failure.
143  */
144 struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
145 							   size_t size,
146 							   struct vfsmount *gemfs)
147 {
148 	return __drm_gem_shmem_create(dev, size, false, gemfs);
149 }
150 EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
151 
152 /**
153  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
154  * @shmem: shmem GEM object to free
155  *
156  * This function cleans up the GEM object state and frees the memory used to
157  * store the object itself.
158  */
159 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
160 {
161 	struct drm_gem_object *obj = &shmem->base;
162 
163 	if (drm_gem_is_imported(obj)) {
164 		drm_prime_gem_destroy(obj, shmem->sgt);
165 	} else {
166 		dma_resv_lock(shmem->base.resv, NULL);
167 
168 		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
169 
170 		if (shmem->sgt) {
171 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
172 					  DMA_BIDIRECTIONAL, 0);
173 			sg_free_table(shmem->sgt);
174 			kfree(shmem->sgt);
175 		}
176 		if (shmem->pages)
177 			drm_gem_shmem_put_pages_locked(shmem);
178 
179 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
180 		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
181 
182 		dma_resv_unlock(shmem->base.resv);
183 	}
184 
185 	drm_gem_object_release(obj);
186 	kfree(shmem);
187 }
188 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
189 
190 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
191 {
192 	struct drm_gem_object *obj = &shmem->base;
193 	struct page **pages;
194 
195 	dma_resv_assert_held(shmem->base.resv);
196 
197 	if (refcount_inc_not_zero(&shmem->pages_use_count))
198 		return 0;
199 
200 	pages = drm_gem_get_pages(obj);
201 	if (IS_ERR(pages)) {
202 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
203 			    PTR_ERR(pages));
204 		return PTR_ERR(pages);
205 	}
206 
207 	/*
208 	 * TODO: Allocating WC pages which are correctly flushed is only
209 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
210 	 * ttm_pool.c could use.
211 	 */
212 #ifdef CONFIG_X86
213 	if (shmem->map_wc)
214 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
215 #endif
216 
217 	shmem->pages = pages;
218 
219 	refcount_set(&shmem->pages_use_count, 1);
220 
221 	return 0;
222 }
223 
224 /*
225  * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
226  * @shmem: shmem GEM object
227  *
228  * This function decreases the use count and puts the backing pages when use drops to zero.
229  */
230 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
231 {
232 	struct drm_gem_object *obj = &shmem->base;
233 
234 	dma_resv_assert_held(shmem->base.resv);
235 
236 	if (refcount_dec_and_test(&shmem->pages_use_count)) {
237 #ifdef CONFIG_X86
238 		if (shmem->map_wc)
239 			set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
240 #endif
241 
242 		drm_gem_put_pages(obj, shmem->pages,
243 				  shmem->pages_mark_dirty_on_put,
244 				  shmem->pages_mark_accessed_on_put);
245 		shmem->pages = NULL;
246 	}
247 }
248 EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
249 
250 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
251 {
252 	int ret;
253 
254 	dma_resv_assert_held(shmem->base.resv);
255 
256 	drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
257 
258 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
259 		return 0;
260 
261 	ret = drm_gem_shmem_get_pages_locked(shmem);
262 	if (!ret)
263 		refcount_set(&shmem->pages_pin_count, 1);
264 
265 	return ret;
266 }
267 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
268 
269 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
270 {
271 	dma_resv_assert_held(shmem->base.resv);
272 
273 	if (refcount_dec_and_test(&shmem->pages_pin_count))
274 		drm_gem_shmem_put_pages_locked(shmem);
275 }
276 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
277 
278 /**
279  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
280  * @shmem: shmem GEM object
281  *
282  * This function makes sure the backing pages are pinned in memory while the
283  * buffer is exported.
284  *
285  * Returns:
286  * 0 on success or a negative error code on failure.
287  */
288 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
289 {
290 	struct drm_gem_object *obj = &shmem->base;
291 	int ret;
292 
293 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
294 
295 	if (refcount_inc_not_zero(&shmem->pages_pin_count))
296 		return 0;
297 
298 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
299 	if (ret)
300 		return ret;
301 	ret = drm_gem_shmem_pin_locked(shmem);
302 	dma_resv_unlock(shmem->base.resv);
303 
304 	return ret;
305 }
306 EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
307 
308 /**
309  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
310  * @shmem: shmem GEM object
311  *
312  * This function removes the requirement that the backing pages are pinned in
313  * memory.
314  */
315 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
316 {
317 	struct drm_gem_object *obj = &shmem->base;
318 
319 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
320 
321 	if (refcount_dec_not_one(&shmem->pages_pin_count))
322 		return;
323 
324 	dma_resv_lock(shmem->base.resv, NULL);
325 	drm_gem_shmem_unpin_locked(shmem);
326 	dma_resv_unlock(shmem->base.resv);
327 }
328 EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
329 
330 /*
331  * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
332  * @shmem: shmem GEM object
333  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
334  *       store.
335  *
336  * This function makes sure that a contiguous kernel virtual address mapping
337  * exists for the buffer backing the shmem GEM object. It hides the differences
338  * between dma-buf imported and natively allocated objects.
339  *
340  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
341  *
342  * Returns:
343  * 0 on success or a negative error code on failure.
344  */
345 int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
346 			      struct iosys_map *map)
347 {
348 	struct drm_gem_object *obj = &shmem->base;
349 	int ret = 0;
350 
351 	if (drm_gem_is_imported(obj)) {
352 		ret = dma_buf_vmap(obj->dma_buf, map);
353 	} else {
354 		pgprot_t prot = PAGE_KERNEL;
355 
356 		dma_resv_assert_held(shmem->base.resv);
357 
358 		if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
359 			iosys_map_set_vaddr(map, shmem->vaddr);
360 			return 0;
361 		}
362 
363 		ret = drm_gem_shmem_pin_locked(shmem);
364 		if (ret)
365 			return ret;
366 
367 		if (shmem->map_wc)
368 			prot = pgprot_writecombine(prot);
369 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
370 				    VM_MAP, prot);
371 		if (!shmem->vaddr) {
372 			ret = -ENOMEM;
373 		} else {
374 			iosys_map_set_vaddr(map, shmem->vaddr);
375 			refcount_set(&shmem->vmap_use_count, 1);
376 		}
377 	}
378 
379 	if (ret) {
380 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
381 		goto err_put_pages;
382 	}
383 
384 	return 0;
385 
386 err_put_pages:
387 	if (!drm_gem_is_imported(obj))
388 		drm_gem_shmem_unpin_locked(shmem);
389 
390 	return ret;
391 }
392 EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
393 
394 /*
395  * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
396  * @shmem: shmem GEM object
397  * @map: Kernel virtual address where the SHMEM GEM object was mapped
398  *
399  * This function cleans up a kernel virtual address mapping acquired by
400  * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
401  * drops to zero.
402  *
403  * This function hides the differences between dma-buf imported and natively
404  * allocated objects.
405  */
406 void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
407 				 struct iosys_map *map)
408 {
409 	struct drm_gem_object *obj = &shmem->base;
410 
411 	if (drm_gem_is_imported(obj)) {
412 		dma_buf_vunmap(obj->dma_buf, map);
413 	} else {
414 		dma_resv_assert_held(shmem->base.resv);
415 
416 		if (refcount_dec_and_test(&shmem->vmap_use_count)) {
417 			vunmap(shmem->vaddr);
418 			shmem->vaddr = NULL;
419 
420 			drm_gem_shmem_unpin_locked(shmem);
421 		}
422 	}
423 }
424 EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
425 
426 static int
427 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
428 				 struct drm_device *dev, size_t size,
429 				 uint32_t *handle)
430 {
431 	struct drm_gem_shmem_object *shmem;
432 	int ret;
433 
434 	shmem = drm_gem_shmem_create(dev, size);
435 	if (IS_ERR(shmem))
436 		return PTR_ERR(shmem);
437 
438 	/*
439 	 * Allocate an id of idr table where the obj is registered
440 	 * and handle has the id what user can see.
441 	 */
442 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
443 	/* drop reference from allocate - handle holds it now. */
444 	drm_gem_object_put(&shmem->base);
445 
446 	return ret;
447 }
448 
449 /* Update madvise status, returns true if not purged, else
450  * false or -errno.
451  */
452 int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
453 {
454 	dma_resv_assert_held(shmem->base.resv);
455 
456 	if (shmem->madv >= 0)
457 		shmem->madv = madv;
458 
459 	madv = shmem->madv;
460 
461 	return (madv >= 0);
462 }
463 EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
464 
465 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
466 {
467 	struct drm_gem_object *obj = &shmem->base;
468 	struct drm_device *dev = obj->dev;
469 
470 	dma_resv_assert_held(shmem->base.resv);
471 
472 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
473 
474 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
475 	sg_free_table(shmem->sgt);
476 	kfree(shmem->sgt);
477 	shmem->sgt = NULL;
478 
479 	drm_gem_shmem_put_pages_locked(shmem);
480 
481 	shmem->madv = -1;
482 
483 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
484 	drm_gem_free_mmap_offset(obj);
485 
486 	/* Our goal here is to return as much of the memory as
487 	 * is possible back to the system as we are called from OOM.
488 	 * To do this we must instruct the shmfs to drop all of its
489 	 * backing pages, *now*.
490 	 */
491 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
492 
493 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
494 }
495 EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
496 
497 /**
498  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
499  * @file: DRM file structure to create the dumb buffer for
500  * @dev: DRM device
501  * @args: IOCTL data
502  *
503  * This function computes the pitch of the dumb buffer and rounds it up to an
504  * integer number of bytes per pixel. Drivers for hardware that doesn't have
505  * any additional restrictions on the pitch can directly use this function as
506  * their &drm_driver.dumb_create callback.
507  *
508  * For hardware with additional restrictions, drivers can adjust the fields
509  * set up by userspace before calling into this function.
510  *
511  * Returns:
512  * 0 on success or a negative error code on failure.
513  */
514 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
515 			      struct drm_mode_create_dumb *args)
516 {
517 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
518 
519 	if (!args->pitch || !args->size) {
520 		args->pitch = min_pitch;
521 		args->size = PAGE_ALIGN(args->pitch * args->height);
522 	} else {
523 		/* ensure sane minimum values */
524 		if (args->pitch < min_pitch)
525 			args->pitch = min_pitch;
526 		if (args->size < args->pitch * args->height)
527 			args->size = PAGE_ALIGN(args->pitch * args->height);
528 	}
529 
530 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
531 }
532 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
533 
534 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
535 {
536 	struct vm_area_struct *vma = vmf->vma;
537 	struct drm_gem_object *obj = vma->vm_private_data;
538 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
539 	loff_t num_pages = obj->size >> PAGE_SHIFT;
540 	vm_fault_t ret;
541 	struct page *page;
542 	pgoff_t page_offset;
543 
544 	/* We don't use vmf->pgoff since that has the fake offset */
545 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
546 
547 	dma_resv_lock(shmem->base.resv, NULL);
548 
549 	if (page_offset >= num_pages ||
550 	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
551 	    shmem->madv < 0) {
552 		ret = VM_FAULT_SIGBUS;
553 	} else {
554 		page = shmem->pages[page_offset];
555 
556 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
557 	}
558 
559 	dma_resv_unlock(shmem->base.resv);
560 
561 	return ret;
562 }
563 
564 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
565 {
566 	struct drm_gem_object *obj = vma->vm_private_data;
567 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
568 
569 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
570 
571 	dma_resv_lock(shmem->base.resv, NULL);
572 
573 	/*
574 	 * We should have already pinned the pages when the buffer was first
575 	 * mmap'd, vm_open() just grabs an additional reference for the new
576 	 * mm the vma is getting copied into (ie. on fork()).
577 	 */
578 	drm_WARN_ON_ONCE(obj->dev,
579 			 !refcount_inc_not_zero(&shmem->pages_use_count));
580 
581 	dma_resv_unlock(shmem->base.resv);
582 
583 	drm_gem_vm_open(vma);
584 }
585 
586 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
587 {
588 	struct drm_gem_object *obj = vma->vm_private_data;
589 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
590 
591 	dma_resv_lock(shmem->base.resv, NULL);
592 	drm_gem_shmem_put_pages_locked(shmem);
593 	dma_resv_unlock(shmem->base.resv);
594 
595 	drm_gem_vm_close(vma);
596 }
597 
598 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
599 	.fault = drm_gem_shmem_fault,
600 	.open = drm_gem_shmem_vm_open,
601 	.close = drm_gem_shmem_vm_close,
602 };
603 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
604 
605 /**
606  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
607  * @shmem: shmem GEM object
608  * @vma: VMA for the area to be mapped
609  *
610  * This function implements an augmented version of the GEM DRM file mmap
611  * operation for shmem objects.
612  *
613  * Returns:
614  * 0 on success or a negative error code on failure.
615  */
616 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
617 {
618 	struct drm_gem_object *obj = &shmem->base;
619 	int ret;
620 
621 	if (drm_gem_is_imported(obj)) {
622 		/* Reset both vm_ops and vm_private_data, so we don't end up with
623 		 * vm_ops pointing to our implementation if the dma-buf backend
624 		 * doesn't set those fields.
625 		 */
626 		vma->vm_private_data = NULL;
627 		vma->vm_ops = NULL;
628 
629 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
630 
631 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
632 		if (!ret)
633 			drm_gem_object_put(obj);
634 
635 		return ret;
636 	}
637 
638 	if (is_cow_mapping(vma->vm_flags))
639 		return -EINVAL;
640 
641 	dma_resv_lock(shmem->base.resv, NULL);
642 	ret = drm_gem_shmem_get_pages_locked(shmem);
643 	dma_resv_unlock(shmem->base.resv);
644 
645 	if (ret)
646 		return ret;
647 
648 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
649 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
650 	if (shmem->map_wc)
651 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
652 
653 	return 0;
654 }
655 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
656 
657 /**
658  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
659  * @shmem: shmem GEM object
660  * @p: DRM printer
661  * @indent: Tab indentation level
662  */
663 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
664 			      struct drm_printer *p, unsigned int indent)
665 {
666 	if (drm_gem_is_imported(&shmem->base))
667 		return;
668 
669 	drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
670 	drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
671 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
672 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
673 }
674 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
675 
676 /**
677  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
678  *                              pages for a shmem GEM object
679  * @shmem: shmem GEM object
680  *
681  * This function exports a scatter/gather table suitable for PRIME usage by
682  * calling the standard DMA mapping API.
683  *
684  * Drivers who need to acquire an scatter/gather table for objects need to call
685  * drm_gem_shmem_get_pages_sgt() instead.
686  *
687  * Returns:
688  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
689  */
690 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
691 {
692 	struct drm_gem_object *obj = &shmem->base;
693 
694 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
695 
696 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
697 }
698 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
699 
700 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
701 {
702 	struct drm_gem_object *obj = &shmem->base;
703 	int ret;
704 	struct sg_table *sgt;
705 
706 	if (shmem->sgt)
707 		return shmem->sgt;
708 
709 	drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
710 
711 	ret = drm_gem_shmem_get_pages_locked(shmem);
712 	if (ret)
713 		return ERR_PTR(ret);
714 
715 	sgt = drm_gem_shmem_get_sg_table(shmem);
716 	if (IS_ERR(sgt)) {
717 		ret = PTR_ERR(sgt);
718 		goto err_put_pages;
719 	}
720 	/* Map the pages for use by the h/w. */
721 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
722 	if (ret)
723 		goto err_free_sgt;
724 
725 	shmem->sgt = sgt;
726 
727 	return sgt;
728 
729 err_free_sgt:
730 	sg_free_table(sgt);
731 	kfree(sgt);
732 err_put_pages:
733 	drm_gem_shmem_put_pages_locked(shmem);
734 	return ERR_PTR(ret);
735 }
736 
737 /**
738  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
739  *				 scatter/gather table for a shmem GEM object.
740  * @shmem: shmem GEM object
741  *
742  * This function returns a scatter/gather table suitable for driver usage. If
743  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
744  * table created.
745  *
746  * This is the main function for drivers to get at backing storage, and it hides
747  * and difference between dma-buf imported and natively allocated objects.
748  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
749  *
750  * Returns:
751  * A pointer to the scatter/gather table of pinned pages or errno on failure.
752  */
753 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
754 {
755 	int ret;
756 	struct sg_table *sgt;
757 
758 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
759 	if (ret)
760 		return ERR_PTR(ret);
761 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
762 	dma_resv_unlock(shmem->base.resv);
763 
764 	return sgt;
765 }
766 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
767 
768 /**
769  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
770  *                 another driver's scatter/gather table of pinned pages
771  * @dev: Device to import into
772  * @attach: DMA-BUF attachment
773  * @sgt: Scatter/gather table of pinned pages
774  *
775  * This function imports a scatter/gather table exported via DMA-BUF by
776  * another driver. Drivers that use the shmem helpers should set this as their
777  * &drm_driver.gem_prime_import_sg_table callback.
778  *
779  * Returns:
780  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
781  * error code on failure.
782  */
783 struct drm_gem_object *
784 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
785 				    struct dma_buf_attachment *attach,
786 				    struct sg_table *sgt)
787 {
788 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
789 	struct drm_gem_shmem_object *shmem;
790 
791 	shmem = __drm_gem_shmem_create(dev, size, true, NULL);
792 	if (IS_ERR(shmem))
793 		return ERR_CAST(shmem);
794 
795 	shmem->sgt = sgt;
796 
797 	drm_dbg_prime(dev, "size = %zu\n", size);
798 
799 	return &shmem->base;
800 }
801 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
802 
803 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
804 MODULE_IMPORT_NS("DMA_BUF");
805 MODULE_LICENSE("GPL v2");
806