xref: /linux/include/drm/drm_gem_shmem_helper.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __DRM_GEM_SHMEM_HELPER_H__
4 #define __DRM_GEM_SHMEM_HELPER_H__
5 
6 #include <linux/fs.h>
7 #include <linux/mm.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_file.h>
11 #include <drm/drm_gem.h>
12 #include <drm/drm_ioctl.h>
13 #include <drm/drm_prime.h>
14 
15 struct dma_buf_attachment;
16 struct drm_mode_create_dumb;
17 struct drm_printer;
18 struct sg_table;
19 
20 /**
21  * struct drm_gem_shmem_object - GEM object backed by shmem
22  */
23 struct drm_gem_shmem_object {
24 	/**
25 	 * @base: Base GEM object
26 	 */
27 	struct drm_gem_object base;
28 
29 	/**
30 	 * @pages: Page table
31 	 */
32 	struct page **pages;
33 
34 	/**
35 	 * @pages_use_count:
36 	 *
37 	 * Reference count on the pages table.
38 	 * The pages are put when the count reaches zero.
39 	 */
40 	refcount_t pages_use_count;
41 
42 	/**
43 	 * @pages_pin_count:
44 	 *
45 	 * Reference count on the pinned pages table.
46 	 *
47 	 * Pages are hard-pinned and reside in memory if count
48 	 * greater than zero. Otherwise, when count is zero, the pages are
49 	 * allowed to be evicted and purged by memory shrinker.
50 	 */
51 	refcount_t pages_pin_count;
52 
53 	/**
54 	 * @madv: State for madvise
55 	 *
56 	 * 0 is active/inuse.
57 	 * A negative value is the object is purged.
58 	 * Positive values are driver specific and not used by the helpers.
59 	 */
60 	int madv;
61 
62 	/**
63 	 * @madv_list: List entry for madvise tracking
64 	 *
65 	 * Typically used by drivers to track purgeable objects
66 	 */
67 	struct list_head madv_list;
68 
69 	/**
70 	 * @sgt: Scatter/gather table for imported PRIME buffers
71 	 */
72 	struct sg_table *sgt;
73 
74 	/**
75 	 * @vaddr: Kernel virtual address of the backing memory
76 	 */
77 	void *vaddr;
78 
79 	/**
80 	 * @vmap_use_count:
81 	 *
82 	 * Reference count on the virtual address.
83 	 * The address are un-mapped when the count reaches zero.
84 	 */
85 	refcount_t vmap_use_count;
86 
87 	/**
88 	 * @pages_mark_dirty_on_put:
89 	 *
90 	 * Mark pages as dirty when they are put.
91 	 */
92 	bool pages_mark_dirty_on_put : 1;
93 
94 	/**
95 	 * @pages_mark_accessed_on_put:
96 	 *
97 	 * Mark pages as accessed when they are put.
98 	 */
99 	bool pages_mark_accessed_on_put : 1;
100 
101 	/**
102 	 * @map_wc: map object write-combined (instead of using shmem defaults).
103 	 */
104 	bool map_wc : 1;
105 };
106 
107 #define to_drm_gem_shmem_obj(obj) \
108 	container_of(obj, struct drm_gem_shmem_object, base)
109 
110 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
111 struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
112 							   size_t size,
113 							   struct vfsmount *gemfs);
114 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
115 
116 void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
117 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
118 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
119 int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
120 			      struct iosys_map *map);
121 void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
122 				 struct iosys_map *map);
123 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
124 
125 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem);
126 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem);
127 
128 int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv);
129 
drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object * shmem)130 static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
131 {
132 	return (shmem->madv > 0) &&
133 		!refcount_read(&shmem->pages_pin_count) && shmem->sgt &&
134 		!shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base);
135 }
136 
137 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
138 
139 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
140 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
141 
142 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
143 			      struct drm_printer *p, unsigned int indent);
144 
145 extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
146 
147 /*
148  * GEM object functions
149  */
150 
151 /**
152  * drm_gem_shmem_object_free - GEM object function for drm_gem_shmem_free()
153  * @obj: GEM object to free
154  *
155  * This function wraps drm_gem_shmem_free(). Drivers that employ the shmem helpers
156  * should use it as their &drm_gem_object_funcs.free handler.
157  */
drm_gem_shmem_object_free(struct drm_gem_object * obj)158 static inline void drm_gem_shmem_object_free(struct drm_gem_object *obj)
159 {
160 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
161 
162 	drm_gem_shmem_free(shmem);
163 }
164 
165 /**
166  * drm_gem_shmem_object_print_info() - Print &drm_gem_shmem_object info for debugfs
167  * @p: DRM printer
168  * @indent: Tab indentation level
169  * @obj: GEM object
170  *
171  * This function wraps drm_gem_shmem_print_info(). Drivers that employ the shmem helpers should
172  * use this function as their &drm_gem_object_funcs.print_info handler.
173  */
drm_gem_shmem_object_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)174 static inline void drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent,
175 						   const struct drm_gem_object *obj)
176 {
177 	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
178 
179 	drm_gem_shmem_print_info(shmem, p, indent);
180 }
181 
182 /**
183  * drm_gem_shmem_object_pin - GEM object function for drm_gem_shmem_pin()
184  * @obj: GEM object
185  *
186  * This function wraps drm_gem_shmem_pin(). Drivers that employ the shmem helpers should
187  * use it as their &drm_gem_object_funcs.pin handler.
188  */
drm_gem_shmem_object_pin(struct drm_gem_object * obj)189 static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj)
190 {
191 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
192 
193 	return drm_gem_shmem_pin_locked(shmem);
194 }
195 
196 /**
197  * drm_gem_shmem_object_unpin - GEM object function for drm_gem_shmem_unpin()
198  * @obj: GEM object
199  *
200  * This function wraps drm_gem_shmem_unpin(). Drivers that employ the shmem helpers should
201  * use it as their &drm_gem_object_funcs.unpin handler.
202  */
drm_gem_shmem_object_unpin(struct drm_gem_object * obj)203 static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
204 {
205 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
206 
207 	drm_gem_shmem_unpin_locked(shmem);
208 }
209 
210 /**
211  * drm_gem_shmem_object_get_sg_table - GEM object function for drm_gem_shmem_get_sg_table()
212  * @obj: GEM object
213  *
214  * This function wraps drm_gem_shmem_get_sg_table(). Drivers that employ the shmem helpers should
215  * use it as their &drm_gem_object_funcs.get_sg_table handler.
216  *
217  * Returns:
218  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
219  */
drm_gem_shmem_object_get_sg_table(struct drm_gem_object * obj)220 static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
221 {
222 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
223 
224 	return drm_gem_shmem_get_sg_table(shmem);
225 }
226 
227 /*
228  * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked()
229  * @obj: GEM object
230  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
231  *
232  * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem
233  * helpers should use it as their &drm_gem_object_funcs.vmap handler.
234  *
235  * Returns:
236  * 0 on success or a negative error code on failure.
237  */
drm_gem_shmem_object_vmap(struct drm_gem_object * obj,struct iosys_map * map)238 static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
239 					    struct iosys_map *map)
240 {
241 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
242 
243 	return drm_gem_shmem_vmap_locked(shmem, map);
244 }
245 
246 /*
247  * drm_gem_shmem_object_vunmap - GEM object function for drm_gem_shmem_vunmap()
248  * @obj: GEM object
249  * @map: Kernel virtual address where the SHMEM GEM object was mapped
250  *
251  * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem
252  * helpers should use it as their &drm_gem_object_funcs.vunmap handler.
253  */
drm_gem_shmem_object_vunmap(struct drm_gem_object * obj,struct iosys_map * map)254 static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
255 					       struct iosys_map *map)
256 {
257 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
258 
259 	drm_gem_shmem_vunmap_locked(shmem, map);
260 }
261 
262 /**
263  * drm_gem_shmem_object_mmap - GEM object function for drm_gem_shmem_mmap()
264  * @obj: GEM object
265  * @vma: VMA for the area to be mapped
266  *
267  * This function wraps drm_gem_shmem_mmap(). Drivers that employ the shmem helpers should
268  * use it as their &drm_gem_object_funcs.mmap handler.
269  *
270  * Returns:
271  * 0 on success or a negative error code on failure.
272  */
drm_gem_shmem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)273 static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
274 {
275 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
276 
277 	return drm_gem_shmem_mmap(shmem, vma);
278 }
279 
280 /*
281  * Driver ops
282  */
283 
284 struct drm_gem_object *
285 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
286 				    struct dma_buf_attachment *attach,
287 				    struct sg_table *sgt);
288 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
289 			      struct drm_mode_create_dumb *args);
290 struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
291 							 struct dma_buf *buf);
292 
293 /**
294  * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
295  *
296  * This macro provides a shortcut for setting the shmem GEM operations
297  * in the &drm_driver structure. Drivers that do not require an s/g table
298  * for imported buffers should use this.
299  */
300 #define DRM_GEM_SHMEM_DRIVER_OPS \
301 	.gem_prime_import       = drm_gem_shmem_prime_import_no_map, \
302 	.dumb_create            = drm_gem_shmem_dumb_create
303 
304 #endif /* __DRM_GEM_SHMEM_HELPER_H__ */
305