1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #include "pvr_device.h"
5 #include "pvr_gem.h"
6 #include "pvr_vm.h"
7
8 #include <drm/drm_gem.h>
9 #include <drm/drm_prime.h>
10
11 #include <linux/compiler.h>
12 #include <linux/compiler_attributes.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/gfp.h>
18 #include <linux/iosys-map.h>
19 #include <linux/log2.h>
20 #include <linux/mutex.h>
21 #include <linux/pagemap.h>
22 #include <linux/property.h>
23 #include <linux/refcount.h>
24 #include <linux/scatterlist.h>
25
pvr_gem_object_free(struct drm_gem_object * obj)26 static void pvr_gem_object_free(struct drm_gem_object *obj)
27 {
28 drm_gem_shmem_object_free(obj);
29 }
30
pvr_gem_mmap(struct drm_gem_object * gem_obj,struct vm_area_struct * vma)31 static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
32 {
33 struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
34 struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
35
36 if (!(pvr_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS))
37 return -EINVAL;
38
39 return drm_gem_shmem_mmap(shmem_obj, vma);
40 }
41
42 static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
43 .free = pvr_gem_object_free,
44 .print_info = drm_gem_shmem_object_print_info,
45 .pin = drm_gem_shmem_object_pin,
46 .unpin = drm_gem_shmem_object_unpin,
47 .get_sg_table = drm_gem_shmem_object_get_sg_table,
48 .vmap = drm_gem_shmem_object_vmap,
49 .vunmap = drm_gem_shmem_object_vunmap,
50 .mmap = pvr_gem_mmap,
51 .vm_ops = &drm_gem_shmem_vm_ops,
52 };
53
54 /**
55 * pvr_gem_object_flags_validate() - Verify that a collection of PowerVR GEM
56 * mapping and/or creation flags form a valid combination.
57 * @flags: PowerVR GEM mapping/creation flags to validate.
58 *
59 * This function explicitly allows kernel-only flags. All ioctl entrypoints
60 * should do their own validation as well as relying on this function.
61 *
62 * Return:
63 * * %true if @flags contains valid mapping and/or creation flags, or
64 * * %false otherwise.
65 */
66 static bool
pvr_gem_object_flags_validate(u64 flags)67 pvr_gem_object_flags_validate(u64 flags)
68 {
69 static const u64 invalid_combinations[] = {
70 /*
71 * Memory flagged as PM/FW-protected cannot be mapped to
72 * userspace. To make this explicit, we require that the two
73 * flags allowing each of these respective features are never
74 * specified together.
75 */
76 (DRM_PVR_BO_PM_FW_PROTECT |
77 DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
78 };
79
80 /*
81 * Check for bits set in undefined regions. Reserved regions refer to
82 * options that can only be set by the kernel. These are explicitly
83 * allowed in most cases, and must be checked specifically in IOCTL
84 * callback code.
85 */
86 if ((flags & PVR_BO_UNDEFINED_MASK) != 0)
87 return false;
88
89 /*
90 * Check for all combinations of flags marked as invalid in the array
91 * above.
92 */
93 for (int i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
94 u64 combo = invalid_combinations[i];
95
96 if ((flags & combo) == combo)
97 return false;
98 }
99
100 return true;
101 }
102
103 /**
104 * pvr_gem_object_into_handle() - Convert a reference to an object into a
105 * userspace-accessible handle.
106 * @pvr_obj: [IN] Target PowerVR-specific object.
107 * @pvr_file: [IN] File to associate the handle with.
108 * @handle: [OUT] Pointer to store the created handle in. Remains unmodified if
109 * an error is encountered.
110 *
111 * If an error is encountered, ownership of @pvr_obj will not have been
112 * transferred. If this function succeeds, however, further use of @pvr_obj is
113 * considered undefined behaviour unless another reference to it is explicitly
114 * held.
115 *
116 * Return:
117 * * 0 on success, or
118 * * Any error encountered while attempting to allocate a handle on @pvr_file.
119 */
120 int
pvr_gem_object_into_handle(struct pvr_gem_object * pvr_obj,struct pvr_file * pvr_file,u32 * handle)121 pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
122 struct pvr_file *pvr_file, u32 *handle)
123 {
124 struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
125 struct drm_file *file = from_pvr_file(pvr_file);
126
127 u32 new_handle;
128 int err;
129
130 err = drm_gem_handle_create(file, gem_obj, &new_handle);
131 if (err)
132 return err;
133
134 /*
135 * Release our reference to @pvr_obj, effectively transferring
136 * ownership to the handle.
137 */
138 pvr_gem_object_put(pvr_obj);
139
140 /*
141 * Do not store the new handle in @handle until no more errors can
142 * occur.
143 */
144 *handle = new_handle;
145
146 return 0;
147 }
148
149 /**
150 * pvr_gem_object_from_handle() - Obtain a reference to an object from a
151 * userspace handle.
152 * @pvr_file: PowerVR-specific file to which @handle is associated.
153 * @handle: Userspace handle referencing the target object.
154 *
155 * On return, @handle always maintains its reference to the requested object
156 * (if it had one in the first place). If this function succeeds, the returned
157 * object will hold an additional reference. When the caller is finished with
158 * the returned object, they should call pvr_gem_object_put() on it to release
159 * this reference.
160 *
161 * Return:
162 * * A pointer to the requested PowerVR-specific object on success, or
163 * * %NULL otherwise.
164 */
165 struct pvr_gem_object *
pvr_gem_object_from_handle(struct pvr_file * pvr_file,u32 handle)166 pvr_gem_object_from_handle(struct pvr_file *pvr_file, u32 handle)
167 {
168 struct drm_file *file = from_pvr_file(pvr_file);
169 struct drm_gem_object *gem_obj;
170
171 gem_obj = drm_gem_object_lookup(file, handle);
172 if (!gem_obj)
173 return NULL;
174
175 return gem_to_pvr_gem(gem_obj);
176 }
177
178 /**
179 * pvr_gem_object_vmap() - Map a PowerVR GEM object into CPU virtual address
180 * space.
181 * @pvr_obj: Target PowerVR GEM object.
182 *
183 * Once the caller is finished with the CPU mapping, they must call
184 * pvr_gem_object_vunmap() on @pvr_obj.
185 *
186 * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_cpu() is called to make
187 * sure the CPU mapping is consistent.
188 *
189 * Return:
190 * * A pointer to the CPU mapping on success,
191 * * -%ENOMEM if the mapping fails, or
192 * * Any error encountered while attempting to acquire a reference to the
193 * backing pages for @pvr_obj.
194 */
195 void *
pvr_gem_object_vmap(struct pvr_gem_object * pvr_obj)196 pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
197 {
198 struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
199 struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
200 struct iosys_map map;
201 int err;
202
203 dma_resv_lock(obj->resv, NULL);
204
205 err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
206 if (err)
207 goto err_unlock;
208
209 if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
210 struct device *dev = shmem_obj->base.dev->dev;
211
212 /* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped
213 * in GPU space yet.
214 */
215 if (shmem_obj->sgt)
216 dma_sync_sgtable_for_cpu(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
217 }
218
219 dma_resv_unlock(obj->resv);
220
221 return map.vaddr;
222
223 err_unlock:
224 dma_resv_unlock(obj->resv);
225
226 return ERR_PTR(err);
227 }
228
229 /**
230 * pvr_gem_object_vunmap() - Unmap a PowerVR memory object from CPU virtual
231 * address space.
232 * @pvr_obj: Target PowerVR GEM object.
233 *
234 * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_device() is called to make
235 * sure the GPU mapping is consistent.
236 */
237 void
pvr_gem_object_vunmap(struct pvr_gem_object * pvr_obj)238 pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
239 {
240 struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
241 struct iosys_map map = IOSYS_MAP_INIT_VADDR(shmem_obj->vaddr);
242 struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
243
244 if (WARN_ON(!map.vaddr))
245 return;
246
247 dma_resv_lock(obj->resv, NULL);
248
249 if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
250 struct device *dev = shmem_obj->base.dev->dev;
251
252 /* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped
253 * in GPU space yet.
254 */
255 if (shmem_obj->sgt)
256 dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
257 }
258
259 drm_gem_shmem_vunmap_locked(shmem_obj, &map);
260
261 dma_resv_unlock(obj->resv);
262 }
263
264 /**
265 * pvr_gem_object_zero() - Zeroes the physical memory behind an object.
266 * @pvr_obj: Target PowerVR GEM object.
267 *
268 * Return:
269 * * 0 on success, or
270 * * Any error encountered while attempting to map @pvr_obj to the CPU (see
271 * pvr_gem_object_vmap()).
272 */
273 static int
pvr_gem_object_zero(struct pvr_gem_object * pvr_obj)274 pvr_gem_object_zero(struct pvr_gem_object *pvr_obj)
275 {
276 void *cpu_ptr;
277
278 cpu_ptr = pvr_gem_object_vmap(pvr_obj);
279 if (IS_ERR(cpu_ptr))
280 return PTR_ERR(cpu_ptr);
281
282 memset(cpu_ptr, 0, pvr_gem_object_size(pvr_obj));
283
284 /* Make sure the zero-ing is done before vumap-ing the object. */
285 wmb();
286
287 pvr_gem_object_vunmap(pvr_obj);
288
289 return 0;
290 }
291
292 /**
293 * pvr_gem_create_object() - Allocate and pre-initializes a pvr_gem_object
294 * @drm_dev: DRM device creating this object.
295 * @size: Size of the object to allocate in bytes.
296 *
297 * Return:
298 * * The new pre-initialized GEM object on success,
299 * * -ENOMEM if the allocation failed.
300 */
pvr_gem_create_object(struct drm_device * drm_dev,size_t size)301 struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size)
302 {
303 struct drm_gem_object *gem_obj;
304 struct pvr_gem_object *pvr_obj;
305
306 pvr_obj = kzalloc(sizeof(*pvr_obj), GFP_KERNEL);
307 if (!pvr_obj)
308 return ERR_PTR(-ENOMEM);
309
310 gem_obj = gem_from_pvr_gem(pvr_obj);
311 gem_obj->funcs = &pvr_gem_object_funcs;
312
313 return gem_obj;
314 }
315
316 /**
317 * pvr_gem_object_create() - Creates a PowerVR-specific buffer object.
318 * @pvr_dev: Target PowerVR device.
319 * @size: Size of the object to allocate in bytes. Must be greater than zero.
320 * Any value which is not an exact multiple of the system page size will be
321 * rounded up to satisfy this condition.
322 * @flags: Options which affect both this operation and future mapping
323 * operations performed on the returned object. Must be a combination of
324 * DRM_PVR_BO_* and/or PVR_BO_* flags.
325 *
326 * The created object may be larger than @size, but can never be smaller. To
327 * get the exact size, call pvr_gem_object_size() on the returned pointer.
328 *
329 * Return:
330 * * The newly-minted PowerVR-specific buffer object on success,
331 * * -%EINVAL if @size is zero or @flags is not valid,
332 * * -%ENOMEM if sufficient physical memory cannot be allocated, or
333 * * Any other error returned by drm_gem_create_mmap_offset().
334 */
335 struct pvr_gem_object *
pvr_gem_object_create(struct pvr_device * pvr_dev,size_t size,u64 flags)336 pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
337 {
338 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
339 struct drm_gem_shmem_object *shmem_obj;
340 struct pvr_gem_object *pvr_obj;
341 struct sg_table *sgt;
342 int err;
343
344 /* Verify @size and @flags before continuing. */
345 if (size == 0 || !pvr_gem_object_flags_validate(flags))
346 return ERR_PTR(-EINVAL);
347
348 if (device_get_dma_attr(drm_dev->dev) == DEV_DMA_COHERENT)
349 flags |= PVR_BO_CPU_CACHED;
350
351 shmem_obj = drm_gem_shmem_create(drm_dev, size);
352 if (IS_ERR(shmem_obj))
353 return ERR_CAST(shmem_obj);
354
355 shmem_obj->pages_mark_dirty_on_put = true;
356 shmem_obj->map_wc = !(flags & PVR_BO_CPU_CACHED);
357 pvr_obj = shmem_gem_to_pvr_gem(shmem_obj);
358 pvr_obj->flags = flags;
359
360 sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
361 if (IS_ERR(sgt)) {
362 err = PTR_ERR(sgt);
363 goto err_shmem_object_free;
364 }
365
366 dma_sync_sgtable_for_device(drm_dev->dev, sgt, DMA_BIDIRECTIONAL);
367
368 /*
369 * Do this last because pvr_gem_object_zero() requires a fully
370 * configured instance of struct pvr_gem_object.
371 */
372 pvr_gem_object_zero(pvr_obj);
373
374 return pvr_obj;
375
376 err_shmem_object_free:
377 drm_gem_shmem_free(shmem_obj);
378
379 return ERR_PTR(err);
380 }
381
382 /**
383 * pvr_gem_get_dma_addr() - Get DMA address for given offset in object
384 * @pvr_obj: Pointer to object to lookup address in.
385 * @offset: Offset within object to lookup address at.
386 * @dma_addr_out: Pointer to location to store DMA address.
387 *
388 * Returns:
389 * * 0 on success, or
390 * * -%EINVAL if object is not currently backed, or if @offset is out of valid
391 * range for this object.
392 */
393 int
pvr_gem_get_dma_addr(struct pvr_gem_object * pvr_obj,u32 offset,dma_addr_t * dma_addr_out)394 pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
395 dma_addr_t *dma_addr_out)
396 {
397 struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
398 u32 accumulated_offset = 0;
399 struct scatterlist *sgl;
400 unsigned int sgt_idx;
401
402 WARN_ON(!shmem_obj->sgt);
403 for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, sgt_idx) {
404 u32 new_offset = accumulated_offset + sg_dma_len(sgl);
405
406 if (offset >= accumulated_offset && offset < new_offset) {
407 *dma_addr_out = sg_dma_address(sgl) +
408 (offset - accumulated_offset);
409 return 0;
410 }
411
412 accumulated_offset = new_offset;
413 }
414
415 return -EINVAL;
416 }
417