xref: /linux/drivers/gpu/drm/imagination/pvr_drv.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_context.h"
5 #include "pvr_debugfs.h"
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_free_list.h"
9 #include "pvr_gem.h"
10 #include "pvr_hwrt.h"
11 #include "pvr_job.h"
12 #include "pvr_mmu.h"
13 #include "pvr_power.h"
14 #include "pvr_rogue_defs.h"
15 #include "pvr_rogue_fwif_client.h"
16 #include "pvr_rogue_fwif_shared.h"
17 #include "pvr_vm.h"
18 
19 #include <uapi/drm/pvr_drm.h>
20 
21 #include <drm/drm_device.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_file.h>
24 #include <drm/drm_gem.h>
25 #include <drm/drm_ioctl.h>
26 
27 #include <linux/err.h>
28 #include <linux/export.h>
29 #include <linux/fs.h>
30 #include <linux/kernel.h>
31 #include <linux/list.h>
32 #include <linux/mod_devicetable.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/of_device.h>
36 #include <linux/of_platform.h>
37 #include <linux/platform_device.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/xarray.h>
40 
41 /**
42  * DOC: PowerVR (Series 6 and later) and IMG Graphics Driver
43  *
44  * This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
45  *
46  * * AXE-1-16M (found in Texas Instruments AM62)
47  * * BXS-4-64 MC1 (found in Texas Instruments J721S2/AM68)
48  */
49 
50 /**
51  * pvr_ioctl_create_bo() - IOCTL to create a GEM buffer object.
52  * @drm_dev: [IN] Target DRM device.
53  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
54  * &struct drm_pvr_ioctl_create_bo_args.
55  * @file: [IN] DRM file-private data.
56  *
57  * Called from userspace with %DRM_IOCTL_PVR_CREATE_BO.
58  *
59  * Return:
60  *  * 0 on success,
61  *  * -%EINVAL if the value of &drm_pvr_ioctl_create_bo_args.size is zero
62  *    or wider than &typedef size_t,
63  *  * -%EINVAL if any bits in &drm_pvr_ioctl_create_bo_args.flags that are
64  *    reserved or undefined are set,
65  *  * -%EINVAL if any padding fields in &drm_pvr_ioctl_create_bo_args are not
66  *    zero,
67  *  * Any error encountered while creating the object (see
68  *    pvr_gem_object_create()), or
69  *  * Any error encountered while transferring ownership of the object into a
70  *    userspace-accessible handle (see pvr_gem_object_into_handle()).
71  */
72 static int
pvr_ioctl_create_bo(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)73 pvr_ioctl_create_bo(struct drm_device *drm_dev, void *raw_args,
74 		    struct drm_file *file)
75 {
76 	struct drm_pvr_ioctl_create_bo_args *args = raw_args;
77 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
78 	struct pvr_file *pvr_file = to_pvr_file(file);
79 
80 	struct pvr_gem_object *pvr_obj;
81 	size_t sanitized_size;
82 
83 	int idx;
84 	int err;
85 
86 	if (!drm_dev_enter(drm_dev, &idx))
87 		return -EIO;
88 
89 	/* All padding fields must be zeroed. */
90 	if (args->_padding_c != 0) {
91 		err = -EINVAL;
92 		goto err_drm_dev_exit;
93 	}
94 
95 	/*
96 	 * On 64-bit platforms (our primary target), size_t is a u64. However,
97 	 * on other architectures we have to check for overflow when casting
98 	 * down to size_t from u64.
99 	 *
100 	 * We also disallow zero-sized allocations, and reserved (kernel-only)
101 	 * flags.
102 	 */
103 	if (args->size > SIZE_MAX || args->size == 0 || args->flags &
104 	    ~DRM_PVR_BO_FLAGS_MASK || args->size & (PVR_DEVICE_PAGE_SIZE - 1)) {
105 		err = -EINVAL;
106 		goto err_drm_dev_exit;
107 	}
108 
109 	sanitized_size = (size_t)args->size;
110 
111 	/*
112 	 * Create a buffer object and transfer ownership to a userspace-
113 	 * accessible handle.
114 	 */
115 	pvr_obj = pvr_gem_object_create(pvr_dev, sanitized_size, args->flags);
116 	if (IS_ERR(pvr_obj)) {
117 		err = PTR_ERR(pvr_obj);
118 		goto err_drm_dev_exit;
119 	}
120 
121 	/* This function will not modify &args->handle unless it succeeds. */
122 	err = pvr_gem_object_into_handle(pvr_obj, pvr_file, &args->handle);
123 	if (err)
124 		goto err_destroy_obj;
125 
126 	drm_dev_exit(idx);
127 
128 	return 0;
129 
130 err_destroy_obj:
131 	/*
132 	 * GEM objects are refcounted, so there is no explicit destructor
133 	 * function. Instead, we release the singular reference we currently
134 	 * hold on the object and let GEM take care of the rest.
135 	 */
136 	pvr_gem_object_put(pvr_obj);
137 
138 err_drm_dev_exit:
139 	drm_dev_exit(idx);
140 
141 	return err;
142 }
143 
144 /**
145  * pvr_ioctl_get_bo_mmap_offset() - IOCTL to generate a "fake" offset to be
146  * used when calling mmap() from userspace to map the given GEM buffer object
147  * @drm_dev: [IN] DRM device (unused).
148  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
149  *                     &struct drm_pvr_ioctl_get_bo_mmap_offset_args.
150  * @file: [IN] DRM file private data.
151  *
152  * Called from userspace with %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET.
153  *
154  * This IOCTL does *not* perform an mmap. See the docs on
155  * &struct drm_pvr_ioctl_get_bo_mmap_offset_args for details.
156  *
157  * Return:
158  *  * 0 on success,
159  *  * -%ENOENT if the handle does not reference a valid GEM buffer object,
160  *  * -%EINVAL if any padding fields in &struct
161  *    drm_pvr_ioctl_get_bo_mmap_offset_args are not zero, or
162  *  * Any error returned by drm_gem_create_mmap_offset().
163  */
164 static int
pvr_ioctl_get_bo_mmap_offset(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)165 pvr_ioctl_get_bo_mmap_offset(struct drm_device *drm_dev, void *raw_args,
166 			     struct drm_file *file)
167 {
168 	struct drm_pvr_ioctl_get_bo_mmap_offset_args *args = raw_args;
169 	struct pvr_file *pvr_file = to_pvr_file(file);
170 	struct pvr_gem_object *pvr_obj;
171 	struct drm_gem_object *gem_obj;
172 	int idx;
173 	int ret;
174 
175 	if (!drm_dev_enter(drm_dev, &idx))
176 		return -EIO;
177 
178 	/* All padding fields must be zeroed. */
179 	if (args->_padding_4 != 0) {
180 		ret = -EINVAL;
181 		goto err_drm_dev_exit;
182 	}
183 
184 	/*
185 	 * Obtain a kernel reference to the buffer object. This reference is
186 	 * counted and must be manually dropped before returning. If a buffer
187 	 * object cannot be found for the specified handle, return -%ENOENT (No
188 	 * such file or directory).
189 	 */
190 	pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
191 	if (!pvr_obj) {
192 		ret = -ENOENT;
193 		goto err_drm_dev_exit;
194 	}
195 
196 	gem_obj = gem_from_pvr_gem(pvr_obj);
197 
198 	/*
199 	 * Allocate a fake offset which can be used in userspace calls to mmap
200 	 * on the DRM device file. If this fails, return the error code. This
201 	 * operation is idempotent.
202 	 */
203 	ret = drm_gem_create_mmap_offset(gem_obj);
204 	if (ret != 0) {
205 		/* Drop our reference to the buffer object. */
206 		drm_gem_object_put(gem_obj);
207 		goto err_drm_dev_exit;
208 	}
209 
210 	/*
211 	 * Read out the fake offset allocated by the earlier call to
212 	 * drm_gem_create_mmap_offset.
213 	 */
214 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
215 
216 	/* Drop our reference to the buffer object. */
217 	pvr_gem_object_put(pvr_obj);
218 
219 err_drm_dev_exit:
220 	drm_dev_exit(idx);
221 
222 	return ret;
223 }
224 
225 static __always_inline __maybe_unused u64
pvr_fw_version_packed(u32 major,u32 minor)226 pvr_fw_version_packed(u32 major, u32 minor)
227 {
228 	return ((u64)major << 32) | minor;
229 }
230 
231 static u32
rogue_get_common_store_partition_space_size(struct pvr_device * pvr_dev)232 rogue_get_common_store_partition_space_size(struct pvr_device *pvr_dev)
233 {
234 	u32 max_partitions = 0;
235 	u32 tile_size_x = 0;
236 	u32 tile_size_y = 0;
237 
238 	PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &tile_size_x);
239 	PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &tile_size_y);
240 	PVR_FEATURE_VALUE(pvr_dev, max_partitions, &max_partitions);
241 
242 	if (tile_size_x == 16 && tile_size_y == 16) {
243 		u32 usc_min_output_registers_per_pix = 0;
244 
245 		PVR_FEATURE_VALUE(pvr_dev, usc_min_output_registers_per_pix,
246 				  &usc_min_output_registers_per_pix);
247 
248 		return tile_size_x * tile_size_y * max_partitions *
249 		       usc_min_output_registers_per_pix;
250 	}
251 
252 	return max_partitions * 1024;
253 }
254 
255 static u32
rogue_get_common_store_alloc_region_size(struct pvr_device * pvr_dev)256 rogue_get_common_store_alloc_region_size(struct pvr_device *pvr_dev)
257 {
258 	u32 common_store_size_in_dwords = 512 * 4 * 4;
259 	u32 alloc_region_size;
260 
261 	PVR_FEATURE_VALUE(pvr_dev, common_store_size_in_dwords, &common_store_size_in_dwords);
262 
263 	alloc_region_size = common_store_size_in_dwords - (256U * 4U) -
264 			    rogue_get_common_store_partition_space_size(pvr_dev);
265 
266 	if (PVR_HAS_QUIRK(pvr_dev, 44079)) {
267 		u32 common_store_split_point = (768U * 4U * 4U);
268 
269 		return min(common_store_split_point - (256U * 4U), alloc_region_size);
270 	}
271 
272 	return alloc_region_size;
273 }
274 
275 static inline u32
rogue_get_num_phantoms(struct pvr_device * pvr_dev)276 rogue_get_num_phantoms(struct pvr_device *pvr_dev)
277 {
278 	u32 num_clusters = 1;
279 
280 	PVR_FEATURE_VALUE(pvr_dev, num_clusters, &num_clusters);
281 
282 	return ROGUE_REQ_NUM_PHANTOMS(num_clusters);
283 }
284 
285 static inline u32
rogue_get_max_coeffs(struct pvr_device * pvr_dev)286 rogue_get_max_coeffs(struct pvr_device *pvr_dev)
287 {
288 	u32 max_coeff_additional_portion = ROGUE_MAX_VERTEX_SHARED_REGISTERS;
289 	u32 pending_allocation_shared_regs = 2U * 1024U;
290 	u32 pending_allocation_coeff_regs = 0U;
291 	u32 num_phantoms = rogue_get_num_phantoms(pvr_dev);
292 	u32 tiles_in_flight = 0;
293 	u32 max_coeff_pixel_portion;
294 
295 	PVR_FEATURE_VALUE(pvr_dev, isp_max_tiles_in_flight, &tiles_in_flight);
296 	max_coeff_pixel_portion = DIV_ROUND_UP(tiles_in_flight, num_phantoms);
297 	max_coeff_pixel_portion *= ROGUE_MAX_PIXEL_SHARED_REGISTERS;
298 
299 	/*
300 	 * Compute tasks on cores with BRN48492 and without compute overlap may lock
301 	 * up without two additional lines of coeffs.
302 	 */
303 	if (PVR_HAS_QUIRK(pvr_dev, 48492) && !PVR_HAS_FEATURE(pvr_dev, compute_overlap))
304 		pending_allocation_coeff_regs = 2U * 1024U;
305 
306 	if (PVR_HAS_ENHANCEMENT(pvr_dev, 38748))
307 		pending_allocation_shared_regs = 0;
308 
309 	if (PVR_HAS_ENHANCEMENT(pvr_dev, 38020))
310 		max_coeff_additional_portion += ROGUE_MAX_COMPUTE_SHARED_REGISTERS;
311 
312 	return rogue_get_common_store_alloc_region_size(pvr_dev) + pending_allocation_coeff_regs -
313 		(max_coeff_pixel_portion + max_coeff_additional_portion +
314 		 pending_allocation_shared_regs);
315 }
316 
317 static inline u32
rogue_get_cdm_max_local_mem_size_regs(struct pvr_device * pvr_dev)318 rogue_get_cdm_max_local_mem_size_regs(struct pvr_device *pvr_dev)
319 {
320 	u32 available_coeffs_in_dwords = rogue_get_max_coeffs(pvr_dev);
321 
322 	if (PVR_HAS_QUIRK(pvr_dev, 48492) && PVR_HAS_FEATURE(pvr_dev, roguexe) &&
323 	    !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) {
324 		/* Driver must not use the 2 reserved lines. */
325 		available_coeffs_in_dwords -= ROGUE_CSRM_LINE_SIZE_IN_DWORDS * 2;
326 	}
327 
328 	/*
329 	 * The maximum amount of local memory available to a kernel is the minimum
330 	 * of the total number of coefficient registers available and the max common
331 	 * store allocation size which can be made by the CDM.
332 	 *
333 	 * If any coeff lines are reserved for tessellation or pixel then we need to
334 	 * subtract those too.
335 	 */
336 	return min(available_coeffs_in_dwords, (u32)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS);
337 }
338 
339 /**
340  * pvr_dev_query_gpu_info_get()
341  * @pvr_dev: Device pointer.
342  * @args: [IN] Device query arguments containing a pointer to a userspace
343  *        struct drm_pvr_dev_query_gpu_info.
344  *
345  * If the query object pointer is NULL, the size field is updated with the
346  * expected size of the query object.
347  *
348  * Returns:
349  *  * 0 on success, or if size is requested using a NULL pointer, or
350  *  * -%E2BIG if the indicated length of the allocation is less than is
351  *    required to contain the copied data, or
352  *  * -%EFAULT if local memory could not be copied to userspace.
353  */
354 static int
pvr_dev_query_gpu_info_get(struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)355 pvr_dev_query_gpu_info_get(struct pvr_device *pvr_dev,
356 			   struct drm_pvr_ioctl_dev_query_args *args)
357 {
358 	struct drm_pvr_dev_query_gpu_info gpu_info = {0};
359 	int err;
360 
361 	if (!args->pointer) {
362 		args->size = sizeof(struct drm_pvr_dev_query_gpu_info);
363 		return 0;
364 	}
365 
366 	gpu_info.gpu_id =
367 		pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id);
368 	gpu_info.num_phantoms = rogue_get_num_phantoms(pvr_dev);
369 
370 	err = PVR_UOBJ_SET(args->pointer, args->size, gpu_info);
371 	if (err < 0)
372 		return err;
373 
374 	if (args->size > sizeof(gpu_info))
375 		args->size = sizeof(gpu_info);
376 	return 0;
377 }
378 
379 /**
380  * pvr_dev_query_runtime_info_get()
381  * @pvr_dev: Device pointer.
382  * @args: [IN] Device query arguments containing a pointer to a userspace
383  *        struct drm_pvr_dev_query_runtime_info.
384  *
385  * If the query object pointer is NULL, the size field is updated with the
386  * expected size of the query object.
387  *
388  * Returns:
389  *  * 0 on success, or if size is requested using a NULL pointer, or
390  *  * -%E2BIG if the indicated length of the allocation is less than is
391  *    required to contain the copied data, or
392  *  * -%EFAULT if local memory could not be copied to userspace.
393  */
394 static int
pvr_dev_query_runtime_info_get(struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)395 pvr_dev_query_runtime_info_get(struct pvr_device *pvr_dev,
396 			       struct drm_pvr_ioctl_dev_query_args *args)
397 {
398 	struct drm_pvr_dev_query_runtime_info runtime_info = {0};
399 	int err;
400 
401 	if (!args->pointer) {
402 		args->size = sizeof(struct drm_pvr_dev_query_runtime_info);
403 		return 0;
404 	}
405 
406 	runtime_info.free_list_min_pages =
407 		pvr_get_free_list_min_pages(pvr_dev);
408 	runtime_info.free_list_max_pages =
409 		ROGUE_PM_MAX_FREELIST_SIZE / ROGUE_PM_PAGE_SIZE;
410 	runtime_info.common_store_alloc_region_size =
411 		rogue_get_common_store_alloc_region_size(pvr_dev);
412 	runtime_info.common_store_partition_space_size =
413 		rogue_get_common_store_partition_space_size(pvr_dev);
414 	runtime_info.max_coeffs = rogue_get_max_coeffs(pvr_dev);
415 	runtime_info.cdm_max_local_mem_size_regs =
416 		rogue_get_cdm_max_local_mem_size_regs(pvr_dev);
417 
418 	err = PVR_UOBJ_SET(args->pointer, args->size, runtime_info);
419 	if (err < 0)
420 		return err;
421 
422 	if (args->size > sizeof(runtime_info))
423 		args->size = sizeof(runtime_info);
424 	return 0;
425 }
426 
427 /**
428  * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given
429  * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required
430  * for it.
431  * @pvr_dev: Device pointer.
432  * @args: [IN] Device query arguments containing a pointer to a userspace
433  *        struct drm_pvr_dev_query_query_quirks.
434  *
435  * If the query object pointer is NULL, the size field is updated with the
436  * expected size of the query object.
437  * If the userspace pointer in the query object is NULL, or the count is
438  * short, no data is copied.
439  * The count field will be updated to that copied, or if either pointer is
440  * NULL, that which would have been copied.
441  * The size field in the query object will be updated to the size copied.
442  *
443  * Returns:
444  *  * 0 on success, or if size/count is requested using a NULL pointer, or
445  *  * -%EINVAL if args contained non-zero reserved fields, or
446  *  * -%E2BIG if the indicated length of the allocation is less than is
447  *    required to contain the copied data, or
448  *  * -%EFAULT if local memory could not be copied to userspace.
449  */
450 static int
pvr_dev_query_quirks_get(struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)451 pvr_dev_query_quirks_get(struct pvr_device *pvr_dev,
452 			 struct drm_pvr_ioctl_dev_query_args *args)
453 {
454 	/*
455 	 * @FIXME - hardcoding of numbers here is intended as an
456 	 * intermediate step so the UAPI can be fixed, but requires a
457 	 * a refactor in the future to store them in a more appropriate
458 	 * location
459 	 */
460 	static const u32 umd_quirks_musthave[] = {
461 		47217,
462 		49927,
463 		62269,
464 	};
465 	static const u32 umd_quirks[] = {
466 		48545,
467 		51764,
468 	};
469 	struct drm_pvr_dev_query_quirks query;
470 	u32 out[ARRAY_SIZE(umd_quirks_musthave) + ARRAY_SIZE(umd_quirks)];
471 	size_t out_musthave_count = 0;
472 	size_t out_count = 0;
473 	int err;
474 
475 	if (!args->pointer) {
476 		args->size = sizeof(struct drm_pvr_dev_query_quirks);
477 		return 0;
478 	}
479 
480 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
481 
482 	if (err < 0)
483 		return err;
484 	if (query._padding_c)
485 		return -EINVAL;
486 
487 	for (int i = 0; i < ARRAY_SIZE(umd_quirks_musthave); i++) {
488 		if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks_musthave[i])) {
489 			out[out_count++] = umd_quirks_musthave[i];
490 			out_musthave_count++;
491 		}
492 	}
493 
494 	for (int i = 0; i < ARRAY_SIZE(umd_quirks); i++) {
495 		if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks[i]))
496 			out[out_count++] = umd_quirks[i];
497 	}
498 
499 	if (!query.quirks)
500 		goto copy_out;
501 	if (query.count < out_count)
502 		return -E2BIG;
503 
504 	if (copy_to_user(u64_to_user_ptr(query.quirks), out,
505 			 out_count * sizeof(u32))) {
506 		return -EFAULT;
507 	}
508 
509 	query.musthave_count = out_musthave_count;
510 
511 copy_out:
512 	query.count = out_count;
513 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
514 	if (err < 0)
515 		return err;
516 
517 	args->size = sizeof(query);
518 	return 0;
519 }
520 
521 /**
522  * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the
523  * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount
524  * of space required for it.
525  * @pvr_dev: Device pointer.
526  * @args: [IN] Device query arguments containing a pointer to a userspace
527  *        struct drm_pvr_dev_query_enhancements.
528  *
529  * If the query object pointer is NULL, the size field is updated with the
530  * expected size of the query object.
531  * If the userspace pointer in the query object is NULL, or the count is
532  * short, no data is copied.
533  * The count field will be updated to that copied, or if either pointer is
534  * NULL, that which would have been copied.
535  * The size field in the query object will be updated to the size copied.
536  *
537  * Returns:
538  *  * 0 on success, or if size/count is requested using a NULL pointer, or
539  *  * -%EINVAL if args contained non-zero reserved fields, or
540  *  * -%E2BIG if the indicated length of the allocation is less than is
541  *    required to contain the copied data, or
542  *  * -%EFAULT if local memory could not be copied to userspace.
543  */
544 static int
pvr_dev_query_enhancements_get(struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)545 pvr_dev_query_enhancements_get(struct pvr_device *pvr_dev,
546 			       struct drm_pvr_ioctl_dev_query_args *args)
547 {
548 	/*
549 	 * @FIXME - hardcoding of numbers here is intended as an
550 	 * intermediate step so the UAPI can be fixed, but requires a
551 	 * a refactor in the future to store them in a more appropriate
552 	 * location
553 	 */
554 	const u32 umd_enhancements[] = {
555 		35421,
556 		42064,
557 	};
558 	struct drm_pvr_dev_query_enhancements query;
559 	u32 out[ARRAY_SIZE(umd_enhancements)];
560 	size_t out_idx = 0;
561 	int err;
562 
563 	if (!args->pointer) {
564 		args->size = sizeof(struct drm_pvr_dev_query_enhancements);
565 		return 0;
566 	}
567 
568 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
569 
570 	if (err < 0)
571 		return err;
572 	if (query._padding_a)
573 		return -EINVAL;
574 	if (query._padding_c)
575 		return -EINVAL;
576 
577 	for (int i = 0; i < ARRAY_SIZE(umd_enhancements); i++) {
578 		if (pvr_device_has_uapi_enhancement(pvr_dev, umd_enhancements[i]))
579 			out[out_idx++] = umd_enhancements[i];
580 	}
581 
582 	if (!query.enhancements)
583 		goto copy_out;
584 	if (query.count < out_idx)
585 		return -E2BIG;
586 
587 	if (copy_to_user(u64_to_user_ptr(query.enhancements), out,
588 			 out_idx * sizeof(u32))) {
589 		return -EFAULT;
590 	}
591 
592 copy_out:
593 	query.count = out_idx;
594 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
595 	if (err < 0)
596 		return err;
597 
598 	args->size = sizeof(query);
599 	return 0;
600 }
601 
602 /**
603  * pvr_ioctl_dev_query() - IOCTL to copy information about a device
604  * @drm_dev: [IN] DRM device.
605  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
606  *                     &struct drm_pvr_ioctl_dev_query_args.
607  * @file: [IN] DRM file private data.
608  *
609  * Called from userspace with %DRM_IOCTL_PVR_DEV_QUERY.
610  * If the given receiving struct pointer is NULL, or the indicated size is too
611  * small, the expected size of the struct type will be returned in the size
612  * argument field.
613  *
614  * Return:
615  *  * 0 on success or when fetching the size with args->pointer == NULL, or
616  *  * -%E2BIG if the indicated size of the receiving struct is less than is
617  *    required to contain the copied data, or
618  *  * -%EINVAL if the indicated struct type is unknown, or
619  *  * -%ENOMEM if local memory could not be allocated, or
620  *  * -%EFAULT if local memory could not be copied to userspace.
621  */
622 static int
pvr_ioctl_dev_query(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)623 pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args,
624 		    struct drm_file *file)
625 {
626 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
627 	struct drm_pvr_ioctl_dev_query_args *args = raw_args;
628 	int idx;
629 	int ret = -EINVAL;
630 
631 	if (!drm_dev_enter(drm_dev, &idx))
632 		return -EIO;
633 
634 	switch ((enum drm_pvr_dev_query)args->type) {
635 	case DRM_PVR_DEV_QUERY_GPU_INFO_GET:
636 		ret = pvr_dev_query_gpu_info_get(pvr_dev, args);
637 		break;
638 
639 	case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET:
640 		ret = pvr_dev_query_runtime_info_get(pvr_dev, args);
641 		break;
642 
643 	case DRM_PVR_DEV_QUERY_QUIRKS_GET:
644 		ret = pvr_dev_query_quirks_get(pvr_dev, args);
645 		break;
646 
647 	case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET:
648 		ret = pvr_dev_query_enhancements_get(pvr_dev, args);
649 		break;
650 
651 	case DRM_PVR_DEV_QUERY_HEAP_INFO_GET:
652 		ret = pvr_heap_info_get(pvr_dev, args);
653 		break;
654 
655 	case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET:
656 		ret = pvr_static_data_areas_get(pvr_dev, args);
657 		break;
658 	}
659 
660 	drm_dev_exit(idx);
661 
662 	return ret;
663 }
664 
665 /**
666  * pvr_ioctl_create_context() - IOCTL to create a context
667  * @drm_dev: [IN] DRM device.
668  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
669  *                     &struct drm_pvr_ioctl_create_context_args.
670  * @file: [IN] DRM file private data.
671  *
672  * Called from userspace with %DRM_IOCTL_PVR_CREATE_CONTEXT.
673  *
674  * Return:
675  *  * 0 on success, or
676  *  * -%EINVAL if provided arguments are invalid, or
677  *  * -%EFAULT if arguments can't be copied from userspace, or
678  *  * Any error returned by pvr_create_render_context().
679  */
680 static int
pvr_ioctl_create_context(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)681 pvr_ioctl_create_context(struct drm_device *drm_dev, void *raw_args,
682 			 struct drm_file *file)
683 {
684 	struct drm_pvr_ioctl_create_context_args *args = raw_args;
685 	struct pvr_file *pvr_file = file->driver_priv;
686 	int idx;
687 	int ret;
688 
689 	if (!drm_dev_enter(drm_dev, &idx))
690 		return -EIO;
691 
692 	ret = pvr_context_create(pvr_file, args);
693 
694 	drm_dev_exit(idx);
695 
696 	return ret;
697 }
698 
699 /**
700  * pvr_ioctl_destroy_context() - IOCTL to destroy a context
701  * @drm_dev: [IN] DRM device.
702  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
703  *                     &struct drm_pvr_ioctl_destroy_context_args.
704  * @file: [IN] DRM file private data.
705  *
706  * Called from userspace with %DRM_IOCTL_PVR_DESTROY_CONTEXT.
707  *
708  * Return:
709  *  * 0 on success, or
710  *  * -%EINVAL if context not in context list.
711  */
712 static int
pvr_ioctl_destroy_context(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)713 pvr_ioctl_destroy_context(struct drm_device *drm_dev, void *raw_args,
714 			  struct drm_file *file)
715 {
716 	struct drm_pvr_ioctl_destroy_context_args *args = raw_args;
717 	struct pvr_file *pvr_file = file->driver_priv;
718 
719 	if (args->_padding_4)
720 		return -EINVAL;
721 
722 	return pvr_context_destroy(pvr_file, args->handle);
723 }
724 
725 /**
726  * pvr_ioctl_create_free_list() - IOCTL to create a free list
727  * @drm_dev: [IN] DRM device.
728  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
729  *                     &struct drm_pvr_ioctl_create_free_list_args.
730  * @file: [IN] DRM file private data.
731  *
732  * Called from userspace with %DRM_IOCTL_PVR_CREATE_FREE_LIST.
733  *
734  * Return:
735  *  * 0 on success, or
736  *  * Any error returned by pvr_free_list_create().
737  */
738 static int
pvr_ioctl_create_free_list(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)739 pvr_ioctl_create_free_list(struct drm_device *drm_dev, void *raw_args,
740 			   struct drm_file *file)
741 {
742 	struct drm_pvr_ioctl_create_free_list_args *args = raw_args;
743 	struct pvr_file *pvr_file = to_pvr_file(file);
744 	struct pvr_free_list *free_list;
745 	int idx;
746 	int err;
747 
748 	if (!drm_dev_enter(drm_dev, &idx))
749 		return -EIO;
750 
751 	free_list = pvr_free_list_create(pvr_file, args);
752 	if (IS_ERR(free_list)) {
753 		err = PTR_ERR(free_list);
754 		goto err_drm_dev_exit;
755 	}
756 
757 	/* Allocate object handle for userspace. */
758 	err = xa_alloc(&pvr_file->free_list_handles,
759 		       &args->handle,
760 		       free_list,
761 		       xa_limit_32b,
762 		       GFP_KERNEL);
763 	if (err < 0)
764 		goto err_cleanup;
765 
766 	drm_dev_exit(idx);
767 
768 	return 0;
769 
770 err_cleanup:
771 	pvr_free_list_put(free_list);
772 
773 err_drm_dev_exit:
774 	drm_dev_exit(idx);
775 
776 	return err;
777 }
778 
779 /**
780  * pvr_ioctl_destroy_free_list() - IOCTL to destroy a free list
781  * @drm_dev: [IN] DRM device.
782  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
783  *                 &struct drm_pvr_ioctl_destroy_free_list_args.
784  * @file: [IN] DRM file private data.
785  *
786  * Called from userspace with %DRM_IOCTL_PVR_DESTROY_FREE_LIST.
787  *
788  * Return:
789  *  * 0 on success, or
790  *  * -%EINVAL if free list not in object list.
791  */
792 static int
pvr_ioctl_destroy_free_list(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)793 pvr_ioctl_destroy_free_list(struct drm_device *drm_dev, void *raw_args,
794 			    struct drm_file *file)
795 {
796 	struct drm_pvr_ioctl_destroy_free_list_args *args = raw_args;
797 	struct pvr_file *pvr_file = to_pvr_file(file);
798 	struct pvr_free_list *free_list;
799 
800 	if (args->_padding_4)
801 		return -EINVAL;
802 
803 	free_list = xa_erase(&pvr_file->free_list_handles, args->handle);
804 	if (!free_list)
805 		return -EINVAL;
806 
807 	pvr_free_list_put(free_list);
808 	return 0;
809 }
810 
811 /**
812  * pvr_ioctl_create_hwrt_dataset() - IOCTL to create a HWRT dataset
813  * @drm_dev: [IN] DRM device.
814  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
815  *                     &struct drm_pvr_ioctl_create_hwrt_dataset_args.
816  * @file: [IN] DRM file private data.
817  *
818  * Called from userspace with %DRM_IOCTL_PVR_CREATE_HWRT_DATASET.
819  *
820  * Return:
821  *  * 0 on success, or
822  *  * Any error returned by pvr_hwrt_dataset_create().
823  */
824 static int
pvr_ioctl_create_hwrt_dataset(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)825 pvr_ioctl_create_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
826 			      struct drm_file *file)
827 {
828 	struct drm_pvr_ioctl_create_hwrt_dataset_args *args = raw_args;
829 	struct pvr_file *pvr_file = to_pvr_file(file);
830 	struct pvr_hwrt_dataset *hwrt;
831 	int idx;
832 	int err;
833 
834 	if (!drm_dev_enter(drm_dev, &idx))
835 		return -EIO;
836 
837 	hwrt = pvr_hwrt_dataset_create(pvr_file, args);
838 	if (IS_ERR(hwrt)) {
839 		err = PTR_ERR(hwrt);
840 		goto err_drm_dev_exit;
841 	}
842 
843 	/* Allocate object handle for userspace. */
844 	err = xa_alloc(&pvr_file->hwrt_handles,
845 		       &args->handle,
846 		       hwrt,
847 		       xa_limit_32b,
848 		       GFP_KERNEL);
849 	if (err < 0)
850 		goto err_cleanup;
851 
852 	drm_dev_exit(idx);
853 
854 	return 0;
855 
856 err_cleanup:
857 	pvr_hwrt_dataset_put(hwrt);
858 
859 err_drm_dev_exit:
860 	drm_dev_exit(idx);
861 
862 	return err;
863 }
864 
865 /**
866  * pvr_ioctl_destroy_hwrt_dataset() - IOCTL to destroy a HWRT dataset
867  * @drm_dev: [IN] DRM device.
868  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
869  *                 &struct drm_pvr_ioctl_destroy_hwrt_dataset_args.
870  * @file: [IN] DRM file private data.
871  *
872  * Called from userspace with %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET.
873  *
874  * Return:
875  *  * 0 on success, or
876  *  * -%EINVAL if HWRT dataset not in object list.
877  */
878 static int
pvr_ioctl_destroy_hwrt_dataset(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)879 pvr_ioctl_destroy_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
880 			       struct drm_file *file)
881 {
882 	struct drm_pvr_ioctl_destroy_hwrt_dataset_args *args = raw_args;
883 	struct pvr_file *pvr_file = to_pvr_file(file);
884 	struct pvr_hwrt_dataset *hwrt;
885 
886 	if (args->_padding_4)
887 		return -EINVAL;
888 
889 	hwrt = xa_erase(&pvr_file->hwrt_handles, args->handle);
890 	if (!hwrt)
891 		return -EINVAL;
892 
893 	pvr_hwrt_dataset_put(hwrt);
894 	return 0;
895 }
896 
897 /**
898  * pvr_ioctl_create_vm_context() - IOCTL to create a VM context
899  * @drm_dev: [IN] DRM device.
900  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
901  *                     &struct drm_pvr_ioctl_create_vm_context_args.
902  * @file: [IN] DRM file private data.
903  *
904  * Called from userspace with %DRM_IOCTL_PVR_CREATE_VM_CONTEXT.
905  *
906  * Return:
907  *  * 0 on success, or
908  *  * Any error returned by pvr_vm_create_context().
909  */
910 static int
pvr_ioctl_create_vm_context(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)911 pvr_ioctl_create_vm_context(struct drm_device *drm_dev, void *raw_args,
912 			    struct drm_file *file)
913 {
914 	struct drm_pvr_ioctl_create_vm_context_args *args = raw_args;
915 	struct pvr_file *pvr_file = to_pvr_file(file);
916 	struct pvr_vm_context *vm_ctx;
917 	int idx;
918 	int err;
919 
920 	if (!drm_dev_enter(drm_dev, &idx))
921 		return -EIO;
922 
923 	if (args->_padding_4) {
924 		err = -EINVAL;
925 		goto err_drm_dev_exit;
926 	}
927 
928 	vm_ctx = pvr_vm_create_context(pvr_file->pvr_dev, true);
929 	if (IS_ERR(vm_ctx)) {
930 		err = PTR_ERR(vm_ctx);
931 		goto err_drm_dev_exit;
932 	}
933 
934 	/* Allocate object handle for userspace. */
935 	err = xa_alloc(&pvr_file->vm_ctx_handles,
936 		       &args->handle,
937 		       vm_ctx,
938 		       xa_limit_32b,
939 		       GFP_KERNEL);
940 	if (err < 0)
941 		goto err_cleanup;
942 
943 	drm_dev_exit(idx);
944 
945 	return 0;
946 
947 err_cleanup:
948 	pvr_vm_context_put(vm_ctx);
949 
950 err_drm_dev_exit:
951 	drm_dev_exit(idx);
952 
953 	return err;
954 }
955 
956 /**
957  * pvr_ioctl_destroy_vm_context() - IOCTL to destroy a VM context
958 * @drm_dev: [IN] DRM device.
959 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
960 *                 &struct drm_pvr_ioctl_destroy_vm_context_args.
961 * @file: [IN] DRM file private data.
962 *
963 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT.
964 *
965 * Return:
966 *  * 0 on success, or
967 *  * -%EINVAL if object not in object list.
968  */
969 static int
pvr_ioctl_destroy_vm_context(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)970 pvr_ioctl_destroy_vm_context(struct drm_device *drm_dev, void *raw_args,
971 			     struct drm_file *file)
972 {
973 	struct drm_pvr_ioctl_destroy_vm_context_args *args = raw_args;
974 	struct pvr_file *pvr_file = to_pvr_file(file);
975 	struct pvr_vm_context *vm_ctx;
976 
977 	if (args->_padding_4)
978 		return -EINVAL;
979 
980 	vm_ctx = xa_erase(&pvr_file->vm_ctx_handles, args->handle);
981 	if (!vm_ctx)
982 		return -EINVAL;
983 
984 	pvr_vm_context_put(vm_ctx);
985 	return 0;
986 }
987 
988 /**
989  * pvr_ioctl_vm_map() - IOCTL to map buffer to GPU address space.
990  * @drm_dev: [IN] DRM device.
991  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
992  *                 &struct drm_pvr_ioctl_vm_map_args.
993  * @file: [IN] DRM file private data.
994  *
995  * Called from userspace with %DRM_IOCTL_PVR_VM_MAP.
996  *
997  * Return:
998  *  * 0 on success,
999  *  * -%EINVAL if &drm_pvr_ioctl_vm_op_map_args.flags is not zero,
1000  *  * -%EINVAL if the bounds specified by &drm_pvr_ioctl_vm_op_map_args.offset
1001  *    and &drm_pvr_ioctl_vm_op_map_args.size are not valid or do not fall
1002  *    within the buffer object specified by
1003  *    &drm_pvr_ioctl_vm_op_map_args.handle,
1004  *  * -%EINVAL if the bounds specified by
1005  *    &drm_pvr_ioctl_vm_op_map_args.device_addr and
1006  *    &drm_pvr_ioctl_vm_op_map_args.size do not form a valid device-virtual
1007  *    address range which falls entirely within a single heap, or
1008  *  * -%ENOENT if &drm_pvr_ioctl_vm_op_map_args.handle does not refer to a
1009  *    valid PowerVR buffer object.
1010  */
1011 static int
pvr_ioctl_vm_map(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)1012 pvr_ioctl_vm_map(struct drm_device *drm_dev, void *raw_args,
1013 		 struct drm_file *file)
1014 {
1015 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1016 	struct drm_pvr_ioctl_vm_map_args *args = raw_args;
1017 	struct pvr_file *pvr_file = to_pvr_file(file);
1018 	struct pvr_vm_context *vm_ctx;
1019 
1020 	struct pvr_gem_object *pvr_obj;
1021 	size_t pvr_obj_size;
1022 
1023 	u64 offset_plus_size;
1024 	int idx;
1025 	int err;
1026 
1027 	if (!drm_dev_enter(drm_dev, &idx))
1028 		return -EIO;
1029 
1030 	/* Initial validation of args. */
1031 	if (args->_padding_14) {
1032 		err = -EINVAL;
1033 		goto err_drm_dev_exit;
1034 	}
1035 
1036 	if (args->flags != 0 ||
1037 	    check_add_overflow(args->offset, args->size, &offset_plus_size) ||
1038 	    !pvr_find_heap_containing(pvr_dev, args->device_addr, args->size)) {
1039 		err = -EINVAL;
1040 		goto err_drm_dev_exit;
1041 	}
1042 
1043 	vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
1044 	if (!vm_ctx) {
1045 		err = -EINVAL;
1046 		goto err_drm_dev_exit;
1047 	}
1048 
1049 	pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
1050 	if (!pvr_obj) {
1051 		err = -ENOENT;
1052 		goto err_put_vm_context;
1053 	}
1054 
1055 	pvr_obj_size = pvr_gem_object_size(pvr_obj);
1056 
1057 	/*
1058 	 * Validate offset and size args. The alignment of these will be
1059 	 * checked when mapping; for now just check that they're within valid
1060 	 * bounds
1061 	 */
1062 	if (args->offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) {
1063 		err = -EINVAL;
1064 		goto err_put_pvr_object;
1065 	}
1066 
1067 	err = pvr_vm_map(vm_ctx, pvr_obj, args->offset,
1068 			 args->device_addr, args->size);
1069 	if (err)
1070 		goto err_put_pvr_object;
1071 
1072 	/*
1073 	 * In order to set up the mapping, we needed a reference to &pvr_obj.
1074 	 * However, pvr_vm_map() obtains and stores its own reference, so we
1075 	 * must release ours before returning.
1076 	 */
1077 
1078 err_put_pvr_object:
1079 	pvr_gem_object_put(pvr_obj);
1080 
1081 err_put_vm_context:
1082 	pvr_vm_context_put(vm_ctx);
1083 
1084 err_drm_dev_exit:
1085 	drm_dev_exit(idx);
1086 
1087 	return err;
1088 }
1089 
1090 /**
1091  * pvr_ioctl_vm_unmap() - IOCTL to unmap buffer from GPU address space.
1092  * @drm_dev: [IN] DRM device.
1093  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
1094  *                 &struct drm_pvr_ioctl_vm_unmap_args.
1095  * @file: [IN] DRM file private data.
1096  *
1097  * Called from userspace with %DRM_IOCTL_PVR_VM_UNMAP.
1098  *
1099  * Return:
1100  *  * 0 on success,
1101  *  * -%EINVAL if &drm_pvr_ioctl_vm_op_unmap_args.device_addr is not a valid
1102  *    device page-aligned device-virtual address, or
1103  *  * -%ENOENT if there is currently no PowerVR buffer object mapped at
1104  *    &drm_pvr_ioctl_vm_op_unmap_args.device_addr.
1105  */
1106 static int
pvr_ioctl_vm_unmap(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)1107 pvr_ioctl_vm_unmap(struct drm_device *drm_dev, void *raw_args,
1108 		   struct drm_file *file)
1109 {
1110 	struct drm_pvr_ioctl_vm_unmap_args *args = raw_args;
1111 	struct pvr_file *pvr_file = to_pvr_file(file);
1112 	struct pvr_vm_context *vm_ctx;
1113 	int err;
1114 
1115 	/* Initial validation of args. */
1116 	if (args->_padding_4)
1117 		return -EINVAL;
1118 
1119 	vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
1120 	if (!vm_ctx)
1121 		return -EINVAL;
1122 
1123 	err = pvr_vm_unmap(vm_ctx, args->device_addr, args->size);
1124 
1125 	pvr_vm_context_put(vm_ctx);
1126 
1127 	return err;
1128 }
1129 
1130 /*
1131  * pvr_ioctl_submit_job() - IOCTL to submit a job to the GPU
1132  * @drm_dev: [IN] DRM device.
1133  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
1134  *                 &struct drm_pvr_ioctl_submit_job_args.
1135  * @file: [IN] DRM file private data.
1136  *
1137  * Called from userspace with %DRM_IOCTL_PVR_SUBMIT_JOB.
1138  *
1139  * Return:
1140  *  * 0 on success, or
1141  *  * -%EINVAL if arguments are invalid.
1142  */
1143 static int
pvr_ioctl_submit_jobs(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)1144 pvr_ioctl_submit_jobs(struct drm_device *drm_dev, void *raw_args,
1145 		      struct drm_file *file)
1146 {
1147 	struct drm_pvr_ioctl_submit_jobs_args *args = raw_args;
1148 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1149 	struct pvr_file *pvr_file = to_pvr_file(file);
1150 	int idx;
1151 	int err;
1152 
1153 	if (!drm_dev_enter(drm_dev, &idx))
1154 		return -EIO;
1155 
1156 	err = pvr_submit_jobs(pvr_dev, pvr_file, args);
1157 
1158 	drm_dev_exit(idx);
1159 
1160 	return err;
1161 }
1162 
1163 int
pvr_get_uobj(u64 usr_ptr,u32 usr_stride,u32 min_stride,u32 obj_size,void * out)1164 pvr_get_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, void *out)
1165 {
1166 	if (usr_stride < min_stride)
1167 		return -EINVAL;
1168 
1169 	return copy_struct_from_user(out, obj_size, u64_to_user_ptr(usr_ptr), usr_stride);
1170 }
1171 
1172 int
pvr_set_uobj(u64 usr_ptr,u32 usr_stride,u32 min_stride,u32 obj_size,const void * in)1173 pvr_set_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, const void *in)
1174 {
1175 	if (usr_stride < min_stride)
1176 		return -EINVAL;
1177 
1178 	if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_stride, obj_size)))
1179 		return -EFAULT;
1180 
1181 	if (usr_stride > obj_size &&
1182 	    clear_user(u64_to_user_ptr(usr_ptr + obj_size), usr_stride - obj_size)) {
1183 		return -EFAULT;
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 int
pvr_get_uobj_array(const struct drm_pvr_obj_array * in,u32 min_stride,u32 obj_size,void ** out)1190 pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, void **out)
1191 {
1192 	int ret = 0;
1193 	void *out_alloc;
1194 
1195 	if (in->stride < min_stride)
1196 		return -EINVAL;
1197 
1198 	if (!in->count)
1199 		return 0;
1200 
1201 	out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
1202 	if (!out_alloc)
1203 		return -ENOMEM;
1204 
1205 	if (obj_size == in->stride) {
1206 		if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
1207 				   (unsigned long)obj_size * in->count))
1208 			ret = -EFAULT;
1209 	} else {
1210 		void __user *in_ptr = u64_to_user_ptr(in->array);
1211 		void *out_ptr = out_alloc;
1212 
1213 		for (u32 i = 0; i < in->count; i++) {
1214 			ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
1215 			if (ret)
1216 				break;
1217 
1218 			out_ptr += obj_size;
1219 			in_ptr += in->stride;
1220 		}
1221 	}
1222 
1223 	if (ret) {
1224 		kvfree(out_alloc);
1225 		return ret;
1226 	}
1227 
1228 	*out = out_alloc;
1229 	return 0;
1230 }
1231 
1232 int
pvr_set_uobj_array(const struct drm_pvr_obj_array * out,u32 min_stride,u32 obj_size,const void * in)1233 pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
1234 		   const void *in)
1235 {
1236 	if (out->stride < min_stride)
1237 		return -EINVAL;
1238 
1239 	if (!out->count)
1240 		return 0;
1241 
1242 	if (obj_size == out->stride) {
1243 		if (copy_to_user(u64_to_user_ptr(out->array), in,
1244 				 (unsigned long)obj_size * out->count))
1245 			return -EFAULT;
1246 	} else {
1247 		u32 cpy_elem_size = min_t(u32, out->stride, obj_size);
1248 		void __user *out_ptr = u64_to_user_ptr(out->array);
1249 		const void *in_ptr = in;
1250 
1251 		for (u32 i = 0; i < out->count; i++) {
1252 			if (copy_to_user(out_ptr, in_ptr, cpy_elem_size))
1253 				return -EFAULT;
1254 
1255 			out_ptr += obj_size;
1256 			in_ptr += out->stride;
1257 		}
1258 
1259 		if (out->stride > obj_size &&
1260 		    clear_user(u64_to_user_ptr(out->array + obj_size),
1261 			       out->stride - obj_size)) {
1262 			return -EFAULT;
1263 		}
1264 	}
1265 
1266 	return 0;
1267 }
1268 
1269 #define DRM_PVR_IOCTL(_name, _func, _flags) \
1270 	DRM_IOCTL_DEF_DRV(PVR_##_name, pvr_ioctl_##_func, _flags)
1271 
1272 /* clang-format off */
1273 
1274 static const struct drm_ioctl_desc pvr_drm_driver_ioctls[] = {
1275 	DRM_PVR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
1276 	DRM_PVR_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
1277 	DRM_PVR_IOCTL(GET_BO_MMAP_OFFSET, get_bo_mmap_offset, DRM_RENDER_ALLOW),
1278 	DRM_PVR_IOCTL(CREATE_VM_CONTEXT, create_vm_context, DRM_RENDER_ALLOW),
1279 	DRM_PVR_IOCTL(DESTROY_VM_CONTEXT, destroy_vm_context, DRM_RENDER_ALLOW),
1280 	DRM_PVR_IOCTL(VM_MAP, vm_map, DRM_RENDER_ALLOW),
1281 	DRM_PVR_IOCTL(VM_UNMAP, vm_unmap, DRM_RENDER_ALLOW),
1282 	DRM_PVR_IOCTL(CREATE_CONTEXT, create_context, DRM_RENDER_ALLOW),
1283 	DRM_PVR_IOCTL(DESTROY_CONTEXT, destroy_context, DRM_RENDER_ALLOW),
1284 	DRM_PVR_IOCTL(CREATE_FREE_LIST, create_free_list, DRM_RENDER_ALLOW),
1285 	DRM_PVR_IOCTL(DESTROY_FREE_LIST, destroy_free_list, DRM_RENDER_ALLOW),
1286 	DRM_PVR_IOCTL(CREATE_HWRT_DATASET, create_hwrt_dataset, DRM_RENDER_ALLOW),
1287 	DRM_PVR_IOCTL(DESTROY_HWRT_DATASET, destroy_hwrt_dataset, DRM_RENDER_ALLOW),
1288 	DRM_PVR_IOCTL(SUBMIT_JOBS, submit_jobs, DRM_RENDER_ALLOW),
1289 };
1290 
1291 /* clang-format on */
1292 
1293 #undef DRM_PVR_IOCTL
1294 
1295 /**
1296  * pvr_drm_driver_open() - Driver callback when a new &struct drm_file is opened
1297  * @drm_dev: [IN] DRM device.
1298  * @file: [IN] DRM file private data.
1299  *
1300  * Allocates powervr-specific file private data (&struct pvr_file).
1301  *
1302  * Registered in &pvr_drm_driver.
1303  *
1304  * Return:
1305  *  * 0 on success,
1306  *  * -%ENOMEM if the allocation of a &struct ipvr_file fails, or
1307  *  * Any error returned by pvr_memory_context_init().
1308  */
1309 static int
pvr_drm_driver_open(struct drm_device * drm_dev,struct drm_file * file)1310 pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
1311 {
1312 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1313 	struct pvr_file *pvr_file;
1314 
1315 	pvr_file = kzalloc(sizeof(*pvr_file), GFP_KERNEL);
1316 	if (!pvr_file)
1317 		return -ENOMEM;
1318 
1319 	/*
1320 	 * Store reference to base DRM file private data for use by
1321 	 * from_pvr_file.
1322 	 */
1323 	pvr_file->file = file;
1324 
1325 	/*
1326 	 * Store reference to powervr-specific outer device struct in file
1327 	 * private data for convenient access.
1328 	 */
1329 	pvr_file->pvr_dev = pvr_dev;
1330 
1331 	INIT_LIST_HEAD(&pvr_file->contexts);
1332 
1333 	xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1);
1334 	xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1);
1335 	xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1);
1336 	xa_init_flags(&pvr_file->vm_ctx_handles, XA_FLAGS_ALLOC1);
1337 
1338 	/*
1339 	 * Store reference to powervr-specific file private data in DRM file
1340 	 * private data.
1341 	 */
1342 	file->driver_priv = pvr_file;
1343 
1344 	return 0;
1345 }
1346 
1347 /**
1348  * pvr_drm_driver_postclose() - One of the driver callbacks when a &struct
1349  * drm_file is closed.
1350  * @drm_dev: [IN] DRM device (unused).
1351  * @file: [IN] DRM file private data.
1352  *
1353  * Frees powervr-specific file private data (&struct pvr_file).
1354  *
1355  * Registered in &pvr_drm_driver.
1356  */
1357 static void
pvr_drm_driver_postclose(__always_unused struct drm_device * drm_dev,struct drm_file * file)1358 pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev,
1359 			 struct drm_file *file)
1360 {
1361 	struct pvr_file *pvr_file = to_pvr_file(file);
1362 
1363 	/* Kill remaining contexts. */
1364 	pvr_destroy_contexts_for_file(pvr_file);
1365 
1366 	/* Drop references on any remaining objects. */
1367 	pvr_destroy_free_lists_for_file(pvr_file);
1368 	pvr_destroy_hwrt_datasets_for_file(pvr_file);
1369 	pvr_destroy_vm_contexts_for_file(pvr_file);
1370 
1371 	kfree(pvr_file);
1372 	file->driver_priv = NULL;
1373 }
1374 
1375 DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops);
1376 
1377 static struct drm_driver pvr_drm_driver = {
1378 	.driver_features = DRIVER_GEM | DRIVER_GEM_GPUVA | DRIVER_RENDER |
1379 			   DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
1380 	.open = pvr_drm_driver_open,
1381 	.postclose = pvr_drm_driver_postclose,
1382 	.ioctls = pvr_drm_driver_ioctls,
1383 	.num_ioctls = ARRAY_SIZE(pvr_drm_driver_ioctls),
1384 	.fops = &pvr_drm_driver_fops,
1385 #if defined(CONFIG_DEBUG_FS)
1386 	.debugfs_init = pvr_debugfs_init,
1387 #endif
1388 
1389 	.name = PVR_DRIVER_NAME,
1390 	.desc = PVR_DRIVER_DESC,
1391 	.major = PVR_DRIVER_MAJOR,
1392 	.minor = PVR_DRIVER_MINOR,
1393 	.patchlevel = PVR_DRIVER_PATCHLEVEL,
1394 
1395 	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
1396 	.gem_create_object = pvr_gem_create_object,
1397 };
1398 
1399 static int
pvr_probe(struct platform_device * plat_dev)1400 pvr_probe(struct platform_device *plat_dev)
1401 {
1402 	struct pvr_device *pvr_dev;
1403 	struct drm_device *drm_dev;
1404 	int err;
1405 
1406 	pvr_dev = devm_drm_dev_alloc(&plat_dev->dev, &pvr_drm_driver,
1407 				     struct pvr_device, base);
1408 	if (IS_ERR(pvr_dev))
1409 		return PTR_ERR(pvr_dev);
1410 
1411 	drm_dev = &pvr_dev->base;
1412 
1413 	platform_set_drvdata(plat_dev, drm_dev);
1414 
1415 	err = pvr_power_domains_init(pvr_dev);
1416 	if (err)
1417 		return err;
1418 
1419 	init_rwsem(&pvr_dev->reset_sem);
1420 
1421 	pvr_context_device_init(pvr_dev);
1422 
1423 	err = pvr_queue_device_init(pvr_dev);
1424 	if (err)
1425 		goto err_context_fini;
1426 
1427 	devm_pm_runtime_enable(&plat_dev->dev);
1428 	pm_runtime_mark_last_busy(&plat_dev->dev);
1429 
1430 	pm_runtime_set_autosuspend_delay(&plat_dev->dev, 50);
1431 	pm_runtime_use_autosuspend(&plat_dev->dev);
1432 	pvr_watchdog_init(pvr_dev);
1433 
1434 	err = pvr_device_init(pvr_dev);
1435 	if (err)
1436 		goto err_watchdog_fini;
1437 
1438 	err = drm_dev_register(drm_dev, 0);
1439 	if (err)
1440 		goto err_device_fini;
1441 
1442 	xa_init_flags(&pvr_dev->free_list_ids, XA_FLAGS_ALLOC1);
1443 	xa_init_flags(&pvr_dev->job_ids, XA_FLAGS_ALLOC1);
1444 
1445 	return 0;
1446 
1447 err_device_fini:
1448 	pvr_device_fini(pvr_dev);
1449 
1450 err_watchdog_fini:
1451 	pvr_watchdog_fini(pvr_dev);
1452 
1453 	pvr_queue_device_fini(pvr_dev);
1454 
1455 err_context_fini:
1456 	pvr_context_device_fini(pvr_dev);
1457 
1458 	pvr_power_domains_fini(pvr_dev);
1459 
1460 	return err;
1461 }
1462 
pvr_remove(struct platform_device * plat_dev)1463 static void pvr_remove(struct platform_device *plat_dev)
1464 {
1465 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
1466 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1467 
1468 	WARN_ON(!xa_empty(&pvr_dev->job_ids));
1469 	WARN_ON(!xa_empty(&pvr_dev->free_list_ids));
1470 
1471 	xa_destroy(&pvr_dev->job_ids);
1472 	xa_destroy(&pvr_dev->free_list_ids);
1473 
1474 	pm_runtime_suspend(drm_dev->dev);
1475 	pvr_device_fini(pvr_dev);
1476 	drm_dev_unplug(drm_dev);
1477 	pvr_watchdog_fini(pvr_dev);
1478 	pvr_queue_device_fini(pvr_dev);
1479 	pvr_context_device_fini(pvr_dev);
1480 	pvr_power_domains_fini(pvr_dev);
1481 }
1482 
1483 static const struct of_device_id dt_match[] = {
1484 	{ .compatible = "img,img-rogue", .data = NULL },
1485 
1486 	/*
1487 	 * This legacy compatible string was introduced early on before the more generic
1488 	 * "img,img-rogue" was added. Keep it around here for compatibility, but never use
1489 	 * "img,img-axe" in new devicetrees.
1490 	 */
1491 	{ .compatible = "img,img-axe", .data = NULL },
1492 	{}
1493 };
1494 MODULE_DEVICE_TABLE(of, dt_match);
1495 
1496 static const struct dev_pm_ops pvr_pm_ops = {
1497 	RUNTIME_PM_OPS(pvr_power_device_suspend, pvr_power_device_resume, pvr_power_device_idle)
1498 };
1499 
1500 static struct platform_driver pvr_driver = {
1501 	.probe = pvr_probe,
1502 	.remove = pvr_remove,
1503 	.driver = {
1504 		.name = PVR_DRIVER_NAME,
1505 		.pm = &pvr_pm_ops,
1506 		.of_match_table = dt_match,
1507 	},
1508 };
1509 module_platform_driver(pvr_driver);
1510 
1511 MODULE_AUTHOR("Imagination Technologies Ltd.");
1512 MODULE_DESCRIPTION(PVR_DRIVER_DESC);
1513 MODULE_LICENSE("Dual MIT/GPL");
1514 MODULE_IMPORT_NS("DMA_BUF");
1515 MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
1516 MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
1517