xref: /linux/drivers/gpu/drm/panthor/panthor_drv.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5 
6 #ifdef CONFIG_ARM_ARCH_TIMER
7 #include <asm/arch_timer.h>
8 #endif
9 
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/of_platform.h>
13 #include <linux/pagemap.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/time64.h>
17 
18 #include <drm/drm_auth.h>
19 #include <drm/drm_debugfs.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_exec.h>
22 #include <drm/drm_ioctl.h>
23 #include <drm/drm_syncobj.h>
24 #include <drm/drm_utils.h>
25 #include <drm/gpu_scheduler.h>
26 #include <drm/panthor_drm.h>
27 
28 #include "panthor_device.h"
29 #include "panthor_fw.h"
30 #include "panthor_gem.h"
31 #include "panthor_gpu.h"
32 #include "panthor_heap.h"
33 #include "panthor_mmu.h"
34 #include "panthor_regs.h"
35 #include "panthor_sched.h"
36 
37 /**
38  * DOC: user <-> kernel object copy helpers.
39  */
40 
41 /**
42  * panthor_set_uobj() - Copy kernel object to user object.
43  * @usr_ptr: Users pointer.
44  * @usr_size: Size of the user object.
45  * @min_size: Minimum size for this object.
46  * @kern_size: Size of the kernel object.
47  * @in: Address of the kernel object to copy.
48  *
49  * Helper automating kernel -> user object copies.
50  *
51  * Don't use this function directly, use PANTHOR_UOBJ_SET() instead.
52  *
53  * Return: 0 on success, a negative error code otherwise.
54  */
55 static int
panthor_set_uobj(u64 usr_ptr,u32 usr_size,u32 min_size,u32 kern_size,const void * in)56 panthor_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 kern_size, const void *in)
57 {
58 	/* User size shouldn't be smaller than the minimal object size. */
59 	if (usr_size < min_size)
60 		return -EINVAL;
61 
62 	if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_size, kern_size)))
63 		return -EFAULT;
64 
65 	/* When the kernel object is smaller than the user object, we fill the gap with
66 	 * zeros.
67 	 */
68 	if (usr_size > kern_size &&
69 	    clear_user(u64_to_user_ptr(usr_ptr + kern_size), usr_size - kern_size)) {
70 		return -EFAULT;
71 	}
72 
73 	return 0;
74 }
75 
76 /**
77  * panthor_get_uobj_array() - Copy a user object array into a kernel accessible object array.
78  * @in: The object array to copy.
79  * @min_stride: Minimum array stride.
80  * @obj_size: Kernel object size.
81  *
82  * Helper automating user -> kernel object copies.
83  *
84  * Don't use this function directly, use PANTHOR_UOBJ_GET_ARRAY() instead.
85  *
86  * Return: newly allocated object array or an ERR_PTR on error.
87  */
88 static void *
panthor_get_uobj_array(const struct drm_panthor_obj_array * in,u32 min_stride,u32 obj_size)89 panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
90 		       u32 obj_size)
91 {
92 	int ret = 0;
93 	void *out_alloc;
94 
95 	if (!in->count)
96 		return NULL;
97 
98 	/* User stride must be at least the minimum object size, otherwise it might
99 	 * lack useful information.
100 	 */
101 	if (in->stride < min_stride)
102 		return ERR_PTR(-EINVAL);
103 
104 	out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
105 	if (!out_alloc)
106 		return ERR_PTR(-ENOMEM);
107 
108 	if (obj_size == in->stride) {
109 		/* Fast path when user/kernel have the same uAPI header version. */
110 		if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
111 				   (unsigned long)obj_size * in->count))
112 			ret = -EFAULT;
113 	} else {
114 		void __user *in_ptr = u64_to_user_ptr(in->array);
115 		void *out_ptr = out_alloc;
116 
117 		/* If the sizes differ, we need to copy elements one by one. */
118 		for (u32 i = 0; i < in->count; i++) {
119 			ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
120 			if (ret)
121 				break;
122 
123 			out_ptr += obj_size;
124 			in_ptr += in->stride;
125 		}
126 	}
127 
128 	if (ret) {
129 		kvfree(out_alloc);
130 		return ERR_PTR(ret);
131 	}
132 
133 	return out_alloc;
134 }
135 
136 /**
137  * PANTHOR_UOBJ_MIN_SIZE_INTERNAL() - Get the minimum user object size
138  * @_typename: Object type.
139  * @_last_mandatory_field: Last mandatory field.
140  *
141  * Get the minimum user object size based on the last mandatory field name,
142  * A.K.A, the name of the last field of the structure at the time this
143  * structure was added to the uAPI.
144  *
145  * Don't use directly, use PANTHOR_UOBJ_DECL() instead.
146  */
147 #define PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
148 	(offsetof(_typename, _last_mandatory_field) + \
149 	 sizeof(((_typename *)NULL)->_last_mandatory_field))
150 
151 /**
152  * PANTHOR_UOBJ_DECL() - Declare a new uAPI object whose subject to
153  * evolutions.
154  * @_typename: Object type.
155  * @_last_mandatory_field: Last mandatory field.
156  *
157  * Should be used to extend the PANTHOR_UOBJ_MIN_SIZE() list.
158  */
159 #define PANTHOR_UOBJ_DECL(_typename, _last_mandatory_field) \
160 	_typename : PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
161 
162 /**
163  * PANTHOR_UOBJ_MIN_SIZE() - Get the minimum size of a given uAPI object
164  * @_obj_name: Object to get the minimum size of.
165  *
166  * Don't use this macro directly, it's automatically called by
167  * PANTHOR_UOBJ_{SET,GET_ARRAY}().
168  */
169 #define PANTHOR_UOBJ_MIN_SIZE(_obj_name) \
170 	_Generic(_obj_name, \
171 		 PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
172 		 PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
173 		 PANTHOR_UOBJ_DECL(struct drm_panthor_timestamp_info, current_timestamp), \
174 		 PANTHOR_UOBJ_DECL(struct drm_panthor_group_priorities_info, pad), \
175 		 PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
176 		 PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
177 		 PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
178 		 PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs))
179 
180 /**
181  * PANTHOR_UOBJ_SET() - Copy a kernel object to a user object.
182  * @_dest_usr_ptr: User pointer to copy to.
183  * @_usr_size: Size of the user object.
184  * @_src_obj: Kernel object to copy (not a pointer).
185  *
186  * Return: 0 on success, a negative error code otherwise.
187  */
188 #define PANTHOR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
189 	panthor_set_uobj(_dest_usr_ptr, _usr_size, \
190 			 PANTHOR_UOBJ_MIN_SIZE(_src_obj), \
191 			 sizeof(_src_obj), &(_src_obj))
192 
193 /**
194  * PANTHOR_UOBJ_GET_ARRAY() - Copy a user object array to a kernel accessible
195  * object array.
196  * @_dest_array: Local variable that will hold the newly allocated kernel
197  * object array.
198  * @_uobj_array: The drm_panthor_obj_array object describing the user object
199  * array.
200  *
201  * Return: 0 on success, a negative error code otherwise.
202  */
203 #define PANTHOR_UOBJ_GET_ARRAY(_dest_array, _uobj_array) \
204 	({ \
205 		typeof(_dest_array) _tmp; \
206 		_tmp = panthor_get_uobj_array(_uobj_array, \
207 					      PANTHOR_UOBJ_MIN_SIZE((_dest_array)[0]), \
208 					      sizeof((_dest_array)[0])); \
209 		if (!IS_ERR(_tmp)) \
210 			_dest_array = _tmp; \
211 		PTR_ERR_OR_ZERO(_tmp); \
212 	})
213 
214 /**
215  * struct panthor_sync_signal - Represent a synchronization object point to attach
216  * our job fence to.
217  *
218  * This structure is here to keep track of fences that are currently bound to
219  * a specific syncobj point.
220  *
221  * At the beginning of a job submission, the fence
222  * is retrieved from the syncobj itself, and can be NULL if no fence was attached
223  * to this point.
224  *
225  * At the end, it points to the fence of the last job that had a
226  * %DRM_PANTHOR_SYNC_OP_SIGNAL on this syncobj.
227  *
228  * With jobs being submitted in batches, the fence might change several times during
229  * the process, allowing one job to wait on a job that's part of the same submission
230  * but appears earlier in the drm_panthor_group_submit::queue_submits array.
231  */
232 struct panthor_sync_signal {
233 	/** @node: list_head to track signal ops within a submit operation */
234 	struct list_head node;
235 
236 	/** @handle: The syncobj handle. */
237 	u32 handle;
238 
239 	/**
240 	 * @point: The syncobj point.
241 	 *
242 	 * Zero for regular syncobjs, and non-zero for timeline syncobjs.
243 	 */
244 	u64 point;
245 
246 	/**
247 	 * @syncobj: The sync object pointed by @handle.
248 	 */
249 	struct drm_syncobj *syncobj;
250 
251 	/**
252 	 * @chain: Chain object used to link the new fence to an existing
253 	 * timeline syncobj.
254 	 *
255 	 * NULL for regular syncobj, non-NULL for timeline syncobjs.
256 	 */
257 	struct dma_fence_chain *chain;
258 
259 	/**
260 	 * @fence: The fence to assign to the syncobj or syncobj-point.
261 	 */
262 	struct dma_fence *fence;
263 };
264 
265 /**
266  * struct panthor_job_ctx - Job context
267  */
268 struct panthor_job_ctx {
269 	/** @job: The job that is about to be submitted to drm_sched. */
270 	struct drm_sched_job *job;
271 
272 	/** @syncops: Array of sync operations. */
273 	struct drm_panthor_sync_op *syncops;
274 
275 	/** @syncop_count: Number of sync operations. */
276 	u32 syncop_count;
277 };
278 
279 /**
280  * struct panthor_submit_ctx - Submission context
281  *
282  * Anything that's related to a submission (%DRM_IOCTL_PANTHOR_VM_BIND or
283  * %DRM_IOCTL_PANTHOR_GROUP_SUBMIT) is kept here, so we can automate the
284  * initialization and cleanup steps.
285  */
286 struct panthor_submit_ctx {
287 	/** @file: DRM file this submission happens on. */
288 	struct drm_file *file;
289 
290 	/**
291 	 * @signals: List of struct panthor_sync_signal.
292 	 *
293 	 * %DRM_PANTHOR_SYNC_OP_SIGNAL operations will be recorded here,
294 	 * and %DRM_PANTHOR_SYNC_OP_WAIT will first check if an entry
295 	 * matching the syncobj+point exists before calling
296 	 * drm_syncobj_find_fence(). This allows us to describe dependencies
297 	 * existing between jobs that are part of the same batch.
298 	 */
299 	struct list_head signals;
300 
301 	/** @jobs: Array of jobs. */
302 	struct panthor_job_ctx *jobs;
303 
304 	/** @job_count: Number of entries in the @jobs array. */
305 	u32 job_count;
306 
307 	/** @exec: drm_exec context used to acquire and prepare resv objects. */
308 	struct drm_exec exec;
309 };
310 
311 #define PANTHOR_SYNC_OP_FLAGS_MASK \
312 	(DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK | DRM_PANTHOR_SYNC_OP_SIGNAL)
313 
sync_op_is_signal(const struct drm_panthor_sync_op * sync_op)314 static bool sync_op_is_signal(const struct drm_panthor_sync_op *sync_op)
315 {
316 	return !!(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
317 }
318 
sync_op_is_wait(const struct drm_panthor_sync_op * sync_op)319 static bool sync_op_is_wait(const struct drm_panthor_sync_op *sync_op)
320 {
321 	/* Note that DRM_PANTHOR_SYNC_OP_WAIT == 0 */
322 	return !(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
323 }
324 
325 /**
326  * panthor_check_sync_op() - Check drm_panthor_sync_op fields
327  * @sync_op: The sync operation to check.
328  *
329  * Return: 0 on success, -EINVAL otherwise.
330  */
331 static int
panthor_check_sync_op(const struct drm_panthor_sync_op * sync_op)332 panthor_check_sync_op(const struct drm_panthor_sync_op *sync_op)
333 {
334 	u8 handle_type;
335 
336 	if (sync_op->flags & ~PANTHOR_SYNC_OP_FLAGS_MASK)
337 		return -EINVAL;
338 
339 	handle_type = sync_op->flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK;
340 	if (handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
341 	    handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ)
342 		return -EINVAL;
343 
344 	if (handle_type == DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
345 	    sync_op->timeline_value != 0)
346 		return -EINVAL;
347 
348 	return 0;
349 }
350 
351 /**
352  * panthor_sync_signal_free() - Release resources and free a panthor_sync_signal object
353  * @sig_sync: Signal object to free.
354  */
355 static void
panthor_sync_signal_free(struct panthor_sync_signal * sig_sync)356 panthor_sync_signal_free(struct panthor_sync_signal *sig_sync)
357 {
358 	if (!sig_sync)
359 		return;
360 
361 	drm_syncobj_put(sig_sync->syncobj);
362 	dma_fence_chain_free(sig_sync->chain);
363 	dma_fence_put(sig_sync->fence);
364 	kfree(sig_sync);
365 }
366 
367 /**
368  * panthor_submit_ctx_add_sync_signal() - Add a signal operation to a submit context
369  * @ctx: Context to add the signal operation to.
370  * @handle: Syncobj handle.
371  * @point: Syncobj point.
372  *
373  * Return: 0 on success, otherwise negative error value.
374  */
375 static int
panthor_submit_ctx_add_sync_signal(struct panthor_submit_ctx * ctx,u32 handle,u64 point)376 panthor_submit_ctx_add_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
377 {
378 	struct panthor_sync_signal *sig_sync;
379 	struct dma_fence *cur_fence;
380 	int ret;
381 
382 	sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL);
383 	if (!sig_sync)
384 		return -ENOMEM;
385 
386 	sig_sync->handle = handle;
387 	sig_sync->point = point;
388 
389 	if (point > 0) {
390 		sig_sync->chain = dma_fence_chain_alloc();
391 		if (!sig_sync->chain) {
392 			ret = -ENOMEM;
393 			goto err_free_sig_sync;
394 		}
395 	}
396 
397 	sig_sync->syncobj = drm_syncobj_find(ctx->file, handle);
398 	if (!sig_sync->syncobj) {
399 		ret = -EINVAL;
400 		goto err_free_sig_sync;
401 	}
402 
403 	/* Retrieve the current fence attached to that point. It's
404 	 * perfectly fine to get a NULL fence here, it just means there's
405 	 * no fence attached to that point yet.
406 	 */
407 	if (!drm_syncobj_find_fence(ctx->file, handle, point, 0, &cur_fence))
408 		sig_sync->fence = cur_fence;
409 
410 	list_add_tail(&sig_sync->node, &ctx->signals);
411 
412 	return 0;
413 
414 err_free_sig_sync:
415 	panthor_sync_signal_free(sig_sync);
416 	return ret;
417 }
418 
419 /**
420  * panthor_submit_ctx_search_sync_signal() - Search an existing signal operation in a
421  * submit context.
422  * @ctx: Context to search the signal operation in.
423  * @handle: Syncobj handle.
424  * @point: Syncobj point.
425  *
426  * Return: A valid panthor_sync_signal object if found, NULL otherwise.
427  */
428 static struct panthor_sync_signal *
panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx * ctx,u32 handle,u64 point)429 panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
430 {
431 	struct panthor_sync_signal *sig_sync;
432 
433 	list_for_each_entry(sig_sync, &ctx->signals, node) {
434 		if (handle == sig_sync->handle && point == sig_sync->point)
435 			return sig_sync;
436 	}
437 
438 	return NULL;
439 }
440 
441 /**
442  * panthor_submit_ctx_add_job() - Add a job to a submit context
443  * @ctx: Context to search the signal operation in.
444  * @idx: Index of the job in the context.
445  * @job: Job to add.
446  * @syncs: Sync operations provided by userspace.
447  *
448  * Return: 0 on success, a negative error code otherwise.
449  */
450 static int
panthor_submit_ctx_add_job(struct panthor_submit_ctx * ctx,u32 idx,struct drm_sched_job * job,const struct drm_panthor_obj_array * syncs)451 panthor_submit_ctx_add_job(struct panthor_submit_ctx *ctx, u32 idx,
452 			   struct drm_sched_job *job,
453 			   const struct drm_panthor_obj_array *syncs)
454 {
455 	int ret;
456 
457 	ctx->jobs[idx].job = job;
458 
459 	ret = PANTHOR_UOBJ_GET_ARRAY(ctx->jobs[idx].syncops, syncs);
460 	if (ret)
461 		return ret;
462 
463 	ctx->jobs[idx].syncop_count = syncs->count;
464 	return 0;
465 }
466 
467 /**
468  * panthor_submit_ctx_get_sync_signal() - Search signal operation and add one if none was found.
469  * @ctx: Context to search the signal operation in.
470  * @handle: Syncobj handle.
471  * @point: Syncobj point.
472  *
473  * Return: 0 on success, a negative error code otherwise.
474  */
475 static int
panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx * ctx,u32 handle,u64 point)476 panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
477 {
478 	struct panthor_sync_signal *sig_sync;
479 
480 	sig_sync = panthor_submit_ctx_search_sync_signal(ctx, handle, point);
481 	if (sig_sync)
482 		return 0;
483 
484 	return panthor_submit_ctx_add_sync_signal(ctx, handle, point);
485 }
486 
487 /**
488  * panthor_submit_ctx_update_job_sync_signal_fences() - Update fences
489  * on the signal operations specified by a job.
490  * @ctx: Context to search the signal operation in.
491  * @job_idx: Index of the job to operate on.
492  *
493  * Return: 0 on success, a negative error code otherwise.
494  */
495 static int
panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx * ctx,u32 job_idx)496 panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx *ctx,
497 						 u32 job_idx)
498 {
499 	struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
500 						    struct panthor_device,
501 						    base);
502 	struct dma_fence *done_fence = &ctx->jobs[job_idx].job->s_fence->finished;
503 	const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
504 	u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
505 
506 	for (u32 i = 0; i < sync_op_count; i++) {
507 		struct dma_fence *old_fence;
508 		struct panthor_sync_signal *sig_sync;
509 
510 		if (!sync_op_is_signal(&sync_ops[i]))
511 			continue;
512 
513 		sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
514 								 sync_ops[i].timeline_value);
515 		if (drm_WARN_ON(&ptdev->base, !sig_sync))
516 			return -EINVAL;
517 
518 		old_fence = sig_sync->fence;
519 		sig_sync->fence = dma_fence_get(done_fence);
520 		dma_fence_put(old_fence);
521 
522 		if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
523 			return -EINVAL;
524 	}
525 
526 	return 0;
527 }
528 
529 /**
530  * panthor_submit_ctx_collect_job_signal_ops() - Iterate over all job signal operations
531  * and add them to the context.
532  * @ctx: Context to search the signal operation in.
533  * @job_idx: Index of the job to operate on.
534  *
535  * Return: 0 on success, a negative error code otherwise.
536  */
537 static int
panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx * ctx,u32 job_idx)538 panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx *ctx,
539 					  u32 job_idx)
540 {
541 	const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
542 	u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
543 
544 	for (u32 i = 0; i < sync_op_count; i++) {
545 		int ret;
546 
547 		if (!sync_op_is_signal(&sync_ops[i]))
548 			continue;
549 
550 		ret = panthor_check_sync_op(&sync_ops[i]);
551 		if (ret)
552 			return ret;
553 
554 		ret = panthor_submit_ctx_get_sync_signal(ctx,
555 							 sync_ops[i].handle,
556 							 sync_ops[i].timeline_value);
557 		if (ret)
558 			return ret;
559 	}
560 
561 	return 0;
562 }
563 
564 /**
565  * panthor_submit_ctx_push_fences() - Iterate over the signal array, and for each entry, push
566  * the currently assigned fence to the associated syncobj.
567  * @ctx: Context to push fences on.
568  *
569  * This is the last step of a submission procedure, and is done once we know the submission
570  * is effective and job fences are guaranteed to be signaled in finite time.
571  */
572 static void
panthor_submit_ctx_push_fences(struct panthor_submit_ctx * ctx)573 panthor_submit_ctx_push_fences(struct panthor_submit_ctx *ctx)
574 {
575 	struct panthor_sync_signal *sig_sync;
576 
577 	list_for_each_entry(sig_sync, &ctx->signals, node) {
578 		if (sig_sync->chain) {
579 			drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain,
580 					      sig_sync->fence, sig_sync->point);
581 			sig_sync->chain = NULL;
582 		} else {
583 			drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence);
584 		}
585 	}
586 }
587 
588 /**
589  * panthor_submit_ctx_add_sync_deps_to_job() - Add sync wait operations as
590  * job dependencies.
591  * @ctx: Submit context.
592  * @job_idx: Index of the job to operate on.
593  *
594  * Return: 0 on success, a negative error code otherwise.
595  */
596 static int
panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx * ctx,u32 job_idx)597 panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx *ctx,
598 					u32 job_idx)
599 {
600 	struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
601 						    struct panthor_device,
602 						    base);
603 	const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
604 	struct drm_sched_job *job = ctx->jobs[job_idx].job;
605 	u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
606 	int ret = 0;
607 
608 	for (u32 i = 0; i < sync_op_count; i++) {
609 		struct panthor_sync_signal *sig_sync;
610 		struct dma_fence *fence;
611 
612 		if (!sync_op_is_wait(&sync_ops[i]))
613 			continue;
614 
615 		ret = panthor_check_sync_op(&sync_ops[i]);
616 		if (ret)
617 			return ret;
618 
619 		sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
620 								 sync_ops[i].timeline_value);
621 		if (sig_sync) {
622 			if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
623 				return -EINVAL;
624 
625 			fence = dma_fence_get(sig_sync->fence);
626 		} else {
627 			ret = drm_syncobj_find_fence(ctx->file, sync_ops[i].handle,
628 						     sync_ops[i].timeline_value,
629 						     0, &fence);
630 			if (ret)
631 				return ret;
632 		}
633 
634 		ret = drm_sched_job_add_dependency(job, fence);
635 		if (ret)
636 			return ret;
637 	}
638 
639 	return 0;
640 }
641 
642 /**
643  * panthor_submit_ctx_collect_jobs_signal_ops() - Collect all signal operations
644  * and add them to the submit context.
645  * @ctx: Submit context.
646  *
647  * Return: 0 on success, a negative error code otherwise.
648  */
649 static int
panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx * ctx)650 panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx *ctx)
651 {
652 	for (u32 i = 0; i < ctx->job_count; i++) {
653 		int ret;
654 
655 		ret = panthor_submit_ctx_collect_job_signal_ops(ctx, i);
656 		if (ret)
657 			return ret;
658 	}
659 
660 	return 0;
661 }
662 
663 /**
664  * panthor_submit_ctx_add_deps_and_arm_jobs() - Add jobs dependencies and arm jobs
665  * @ctx: Submit context.
666  *
667  * Must be called after the resv preparation has been taken care of.
668  *
669  * Return: 0 on success, a negative error code otherwise.
670  */
671 static int
panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx * ctx)672 panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx *ctx)
673 {
674 	for (u32 i = 0; i < ctx->job_count; i++) {
675 		int ret;
676 
677 		ret = panthor_submit_ctx_add_sync_deps_to_job(ctx, i);
678 		if (ret)
679 			return ret;
680 
681 		drm_sched_job_arm(ctx->jobs[i].job);
682 
683 		ret = panthor_submit_ctx_update_job_sync_signal_fences(ctx, i);
684 		if (ret)
685 			return ret;
686 	}
687 
688 	return 0;
689 }
690 
691 /**
692  * panthor_submit_ctx_push_jobs() - Push jobs to their scheduling entities.
693  * @ctx: Submit context.
694  * @upd_resvs: Callback used to update reservation objects that were previously
695  * preapred.
696  */
697 static void
panthor_submit_ctx_push_jobs(struct panthor_submit_ctx * ctx,void (* upd_resvs)(struct drm_exec *,struct drm_sched_job *))698 panthor_submit_ctx_push_jobs(struct panthor_submit_ctx *ctx,
699 			     void (*upd_resvs)(struct drm_exec *, struct drm_sched_job *))
700 {
701 	for (u32 i = 0; i < ctx->job_count; i++) {
702 		upd_resvs(&ctx->exec, ctx->jobs[i].job);
703 		drm_sched_entity_push_job(ctx->jobs[i].job);
704 
705 		/* Job is owned by the scheduler now. */
706 		ctx->jobs[i].job = NULL;
707 	}
708 
709 	panthor_submit_ctx_push_fences(ctx);
710 }
711 
712 /**
713  * panthor_submit_ctx_init() - Initializes a submission context
714  * @ctx: Submit context to initialize.
715  * @file: drm_file this submission happens on.
716  * @job_count: Number of jobs that will be submitted.
717  *
718  * Return: 0 on success, a negative error code otherwise.
719  */
panthor_submit_ctx_init(struct panthor_submit_ctx * ctx,struct drm_file * file,u32 job_count)720 static int panthor_submit_ctx_init(struct panthor_submit_ctx *ctx,
721 				   struct drm_file *file, u32 job_count)
722 {
723 	ctx->jobs = kvmalloc_array(job_count, sizeof(*ctx->jobs),
724 				   GFP_KERNEL | __GFP_ZERO);
725 	if (!ctx->jobs)
726 		return -ENOMEM;
727 
728 	ctx->file = file;
729 	ctx->job_count = job_count;
730 	INIT_LIST_HEAD(&ctx->signals);
731 	drm_exec_init(&ctx->exec,
732 		      DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES,
733 		      0);
734 	return 0;
735 }
736 
737 /**
738  * panthor_submit_ctx_cleanup() - Cleanup a submission context
739  * @ctx: Submit context to cleanup.
740  * @job_put: Job put callback.
741  */
panthor_submit_ctx_cleanup(struct panthor_submit_ctx * ctx,void (* job_put)(struct drm_sched_job *))742 static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx,
743 				       void (*job_put)(struct drm_sched_job *))
744 {
745 	struct panthor_sync_signal *sig_sync, *tmp;
746 	unsigned long i;
747 
748 	drm_exec_fini(&ctx->exec);
749 
750 	list_for_each_entry_safe(sig_sync, tmp, &ctx->signals, node)
751 		panthor_sync_signal_free(sig_sync);
752 
753 	for (i = 0; i < ctx->job_count; i++) {
754 		job_put(ctx->jobs[i].job);
755 		kvfree(ctx->jobs[i].syncops);
756 	}
757 
758 	kvfree(ctx->jobs);
759 }
760 
panthor_query_timestamp_info(struct panthor_device * ptdev,struct drm_panthor_timestamp_info * arg)761 static int panthor_query_timestamp_info(struct panthor_device *ptdev,
762 					struct drm_panthor_timestamp_info *arg)
763 {
764 	int ret;
765 
766 	ret = panthor_device_resume_and_get(ptdev);
767 	if (ret)
768 		return ret;
769 
770 #ifdef CONFIG_ARM_ARCH_TIMER
771 	arg->timestamp_frequency = arch_timer_get_cntfrq();
772 #else
773 	arg->timestamp_frequency = 0;
774 #endif
775 	arg->current_timestamp = gpu_read64_counter(ptdev, GPU_TIMESTAMP);
776 	arg->timestamp_offset = gpu_read64(ptdev, GPU_TIMESTAMP_OFFSET);
777 
778 	pm_runtime_put(ptdev->base.dev);
779 	return 0;
780 }
781 
group_priority_permit(struct drm_file * file,u8 priority)782 static int group_priority_permit(struct drm_file *file,
783 				 u8 priority)
784 {
785 	/* Ensure that priority is valid */
786 	if (priority > PANTHOR_GROUP_PRIORITY_REALTIME)
787 		return -EINVAL;
788 
789 	/* Medium priority and below are always allowed */
790 	if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM)
791 		return 0;
792 
793 	/* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
794 	if (capable(CAP_SYS_NICE) || drm_is_current_master(file))
795 		return 0;
796 
797 	return -EACCES;
798 }
799 
panthor_query_group_priorities_info(struct drm_file * file,struct drm_panthor_group_priorities_info * arg)800 static void panthor_query_group_priorities_info(struct drm_file *file,
801 						struct drm_panthor_group_priorities_info *arg)
802 {
803 	int prio;
804 
805 	memset(arg, 0, sizeof(*arg));
806 	for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) {
807 		if (!group_priority_permit(file, prio))
808 			arg->allowed_mask |= BIT(prio);
809 	}
810 }
811 
panthor_ioctl_dev_query(struct drm_device * ddev,void * data,struct drm_file * file)812 static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file)
813 {
814 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
815 	struct drm_panthor_dev_query *args = data;
816 	struct drm_panthor_timestamp_info timestamp_info;
817 	struct drm_panthor_group_priorities_info priorities_info;
818 	int ret;
819 
820 	if (!args->pointer) {
821 		switch (args->type) {
822 		case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
823 			args->size = sizeof(ptdev->gpu_info);
824 			return 0;
825 
826 		case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
827 			args->size = sizeof(ptdev->csif_info);
828 			return 0;
829 
830 		case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO:
831 			args->size = sizeof(timestamp_info);
832 			return 0;
833 
834 		case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO:
835 			args->size = sizeof(priorities_info);
836 			return 0;
837 
838 		default:
839 			return -EINVAL;
840 		}
841 	}
842 
843 	switch (args->type) {
844 	case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
845 		return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->gpu_info);
846 
847 	case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
848 		return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info);
849 
850 	case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO:
851 		ret = panthor_query_timestamp_info(ptdev, &timestamp_info);
852 
853 		if (ret)
854 			return ret;
855 
856 		return PANTHOR_UOBJ_SET(args->pointer, args->size, timestamp_info);
857 
858 	case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO:
859 		panthor_query_group_priorities_info(file, &priorities_info);
860 		return PANTHOR_UOBJ_SET(args->pointer, args->size, priorities_info);
861 
862 	default:
863 		return -EINVAL;
864 	}
865 }
866 
867 #define PANTHOR_VM_CREATE_FLAGS			0
868 
panthor_ioctl_vm_create(struct drm_device * ddev,void * data,struct drm_file * file)869 static int panthor_ioctl_vm_create(struct drm_device *ddev, void *data,
870 				   struct drm_file *file)
871 {
872 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
873 	struct panthor_file *pfile = file->driver_priv;
874 	struct drm_panthor_vm_create *args = data;
875 	int cookie, ret;
876 
877 	if (!drm_dev_enter(ddev, &cookie))
878 		return -ENODEV;
879 
880 	ret = panthor_vm_pool_create_vm(ptdev, pfile->vms,  args);
881 	if (ret >= 0) {
882 		args->id = ret;
883 		ret = 0;
884 	}
885 
886 	drm_dev_exit(cookie);
887 	return ret;
888 }
889 
panthor_ioctl_vm_destroy(struct drm_device * ddev,void * data,struct drm_file * file)890 static int panthor_ioctl_vm_destroy(struct drm_device *ddev, void *data,
891 				    struct drm_file *file)
892 {
893 	struct panthor_file *pfile = file->driver_priv;
894 	struct drm_panthor_vm_destroy *args = data;
895 
896 	if (args->pad)
897 		return -EINVAL;
898 
899 	return panthor_vm_pool_destroy_vm(pfile->vms, args->id);
900 }
901 
902 #define PANTHOR_BO_FLAGS		DRM_PANTHOR_BO_NO_MMAP
903 
panthor_ioctl_bo_create(struct drm_device * ddev,void * data,struct drm_file * file)904 static int panthor_ioctl_bo_create(struct drm_device *ddev, void *data,
905 				   struct drm_file *file)
906 {
907 	struct panthor_file *pfile = file->driver_priv;
908 	struct drm_panthor_bo_create *args = data;
909 	struct panthor_vm *vm = NULL;
910 	int cookie, ret;
911 
912 	if (!drm_dev_enter(ddev, &cookie))
913 		return -ENODEV;
914 
915 	if (!args->size || args->pad ||
916 	    (args->flags & ~PANTHOR_BO_FLAGS)) {
917 		ret = -EINVAL;
918 		goto out_dev_exit;
919 	}
920 
921 	if (args->exclusive_vm_id) {
922 		vm = panthor_vm_pool_get_vm(pfile->vms, args->exclusive_vm_id);
923 		if (!vm) {
924 			ret = -EINVAL;
925 			goto out_dev_exit;
926 		}
927 	}
928 
929 	ret = panthor_gem_create_with_handle(file, ddev, vm, &args->size,
930 					     args->flags, &args->handle);
931 
932 	panthor_vm_put(vm);
933 
934 out_dev_exit:
935 	drm_dev_exit(cookie);
936 	return ret;
937 }
938 
panthor_ioctl_bo_mmap_offset(struct drm_device * ddev,void * data,struct drm_file * file)939 static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
940 					struct drm_file *file)
941 {
942 	struct drm_panthor_bo_mmap_offset *args = data;
943 	struct panthor_gem_object *bo;
944 	struct drm_gem_object *obj;
945 	int ret;
946 
947 	if (args->pad)
948 		return -EINVAL;
949 
950 	obj = drm_gem_object_lookup(file, args->handle);
951 	if (!obj)
952 		return -ENOENT;
953 
954 	bo = to_panthor_bo(obj);
955 	if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) {
956 		ret = -EPERM;
957 		goto out;
958 	}
959 
960 	ret = drm_gem_create_mmap_offset(obj);
961 	if (ret)
962 		goto out;
963 
964 	args->offset = drm_vma_node_offset_addr(&obj->vma_node);
965 
966 out:
967 	drm_gem_object_put(obj);
968 	return ret;
969 }
970 
panthor_ioctl_group_submit(struct drm_device * ddev,void * data,struct drm_file * file)971 static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
972 				      struct drm_file *file)
973 {
974 	struct panthor_file *pfile = file->driver_priv;
975 	struct drm_panthor_group_submit *args = data;
976 	struct drm_panthor_queue_submit *jobs_args;
977 	struct panthor_submit_ctx ctx;
978 	int ret = 0, cookie;
979 
980 	if (args->pad)
981 		return -EINVAL;
982 
983 	if (!drm_dev_enter(ddev, &cookie))
984 		return -ENODEV;
985 
986 	ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->queue_submits);
987 	if (ret)
988 		goto out_dev_exit;
989 
990 	ret = panthor_submit_ctx_init(&ctx, file, args->queue_submits.count);
991 	if (ret)
992 		goto out_free_jobs_args;
993 
994 	/* Create jobs and attach sync operations */
995 	for (u32 i = 0; i < args->queue_submits.count; i++) {
996 		const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
997 		struct drm_sched_job *job;
998 
999 		job = panthor_job_create(pfile, args->group_handle, qsubmit,
1000 					 file->client_id);
1001 		if (IS_ERR(job)) {
1002 			ret = PTR_ERR(job);
1003 			goto out_cleanup_submit_ctx;
1004 		}
1005 
1006 		ret = panthor_submit_ctx_add_job(&ctx, i, job, &qsubmit->syncs);
1007 		if (ret)
1008 			goto out_cleanup_submit_ctx;
1009 	}
1010 
1011 	/*
1012 	 * Collect signal operations on all jobs, such that each job can pick
1013 	 * from it for its dependencies and update the fence to signal when the
1014 	 * job is submitted.
1015 	 */
1016 	ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
1017 	if (ret)
1018 		goto out_cleanup_submit_ctx;
1019 
1020 	/*
1021 	 * We acquire/prepare revs on all jobs before proceeding with the
1022 	 * dependency registration.
1023 	 *
1024 	 * This is solving two problems:
1025 	 * 1. drm_sched_job_arm() and drm_sched_entity_push_job() must be
1026 	 *    protected by a lock to make sure no concurrent access to the same
1027 	 *    entity get interleaved, which would mess up with the fence seqno
1028 	 *    ordering. Luckily, one of the resv being acquired is the VM resv,
1029 	 *    and a scheduling entity is only bound to a single VM. As soon as
1030 	 *    we acquire the VM resv, we should be safe.
1031 	 * 2. Jobs might depend on fences that were issued by previous jobs in
1032 	 *    the same batch, so we can't add dependencies on all jobs before
1033 	 *    arming previous jobs and registering the fence to the signal
1034 	 *    array, otherwise we might miss dependencies, or point to an
1035 	 *    outdated fence.
1036 	 */
1037 	if (args->queue_submits.count > 0) {
1038 		/* All jobs target the same group, so they also point to the same VM. */
1039 		struct panthor_vm *vm = panthor_job_vm(ctx.jobs[0].job);
1040 
1041 		drm_exec_until_all_locked(&ctx.exec) {
1042 			ret = panthor_vm_prepare_mapped_bos_resvs(&ctx.exec, vm,
1043 								  args->queue_submits.count);
1044 		}
1045 
1046 		if (ret)
1047 			goto out_cleanup_submit_ctx;
1048 	}
1049 
1050 	/*
1051 	 * Now that resvs are locked/prepared, we can iterate over each job to
1052 	 * add the dependencies, arm the job fence, register the job fence to
1053 	 * the signal array.
1054 	 */
1055 	ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
1056 	if (ret)
1057 		goto out_cleanup_submit_ctx;
1058 
1059 	/* Nothing can fail after that point, so we can make our job fences
1060 	 * visible to the outside world. Push jobs and set the job fences to
1061 	 * the resv slots we reserved.  This also pushes the fences to the
1062 	 * syncobjs that are part of the signal array.
1063 	 */
1064 	panthor_submit_ctx_push_jobs(&ctx, panthor_job_update_resvs);
1065 
1066 out_cleanup_submit_ctx:
1067 	panthor_submit_ctx_cleanup(&ctx, panthor_job_put);
1068 
1069 out_free_jobs_args:
1070 	kvfree(jobs_args);
1071 
1072 out_dev_exit:
1073 	drm_dev_exit(cookie);
1074 	return ret;
1075 }
1076 
panthor_ioctl_group_destroy(struct drm_device * ddev,void * data,struct drm_file * file)1077 static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
1078 				       struct drm_file *file)
1079 {
1080 	struct panthor_file *pfile = file->driver_priv;
1081 	struct drm_panthor_group_destroy *args = data;
1082 
1083 	if (args->pad)
1084 		return -EINVAL;
1085 
1086 	return panthor_group_destroy(pfile, args->group_handle);
1087 }
1088 
panthor_ioctl_group_create(struct drm_device * ddev,void * data,struct drm_file * file)1089 static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
1090 				      struct drm_file *file)
1091 {
1092 	struct panthor_file *pfile = file->driver_priv;
1093 	struct drm_panthor_group_create *args = data;
1094 	struct drm_panthor_queue_create *queue_args;
1095 	int ret;
1096 
1097 	if (!args->queues.count)
1098 		return -EINVAL;
1099 
1100 	ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
1101 	if (ret)
1102 		return ret;
1103 
1104 	ret = group_priority_permit(file, args->priority);
1105 	if (ret)
1106 		return ret;
1107 
1108 	ret = panthor_group_create(pfile, args, queue_args);
1109 	if (ret >= 0) {
1110 		args->group_handle = ret;
1111 		ret = 0;
1112 	}
1113 
1114 	kvfree(queue_args);
1115 	return ret;
1116 }
1117 
panthor_ioctl_group_get_state(struct drm_device * ddev,void * data,struct drm_file * file)1118 static int panthor_ioctl_group_get_state(struct drm_device *ddev, void *data,
1119 					 struct drm_file *file)
1120 {
1121 	struct panthor_file *pfile = file->driver_priv;
1122 	struct drm_panthor_group_get_state *args = data;
1123 
1124 	return panthor_group_get_state(pfile, args);
1125 }
1126 
panthor_ioctl_tiler_heap_create(struct drm_device * ddev,void * data,struct drm_file * file)1127 static int panthor_ioctl_tiler_heap_create(struct drm_device *ddev, void *data,
1128 					   struct drm_file *file)
1129 {
1130 	struct panthor_file *pfile = file->driver_priv;
1131 	struct drm_panthor_tiler_heap_create *args = data;
1132 	struct panthor_heap_pool *pool;
1133 	struct panthor_vm *vm;
1134 	int ret;
1135 
1136 	vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
1137 	if (!vm)
1138 		return -EINVAL;
1139 
1140 	pool = panthor_vm_get_heap_pool(vm, true);
1141 	if (IS_ERR(pool)) {
1142 		ret = PTR_ERR(pool);
1143 		goto out_put_vm;
1144 	}
1145 
1146 	ret = panthor_heap_create(pool,
1147 				  args->initial_chunk_count,
1148 				  args->chunk_size,
1149 				  args->max_chunks,
1150 				  args->target_in_flight,
1151 				  &args->tiler_heap_ctx_gpu_va,
1152 				  &args->first_heap_chunk_gpu_va);
1153 	if (ret < 0)
1154 		goto out_put_heap_pool;
1155 
1156 	/* Heap pools are per-VM. We combine the VM and HEAP id to make
1157 	 * a unique heap handle.
1158 	 */
1159 	args->handle = (args->vm_id << 16) | ret;
1160 	ret = 0;
1161 
1162 out_put_heap_pool:
1163 	panthor_heap_pool_put(pool);
1164 
1165 out_put_vm:
1166 	panthor_vm_put(vm);
1167 	return ret;
1168 }
1169 
panthor_ioctl_tiler_heap_destroy(struct drm_device * ddev,void * data,struct drm_file * file)1170 static int panthor_ioctl_tiler_heap_destroy(struct drm_device *ddev, void *data,
1171 					    struct drm_file *file)
1172 {
1173 	struct panthor_file *pfile = file->driver_priv;
1174 	struct drm_panthor_tiler_heap_destroy *args = data;
1175 	struct panthor_heap_pool *pool;
1176 	struct panthor_vm *vm;
1177 	int ret;
1178 
1179 	if (args->pad)
1180 		return -EINVAL;
1181 
1182 	vm = panthor_vm_pool_get_vm(pfile->vms, args->handle >> 16);
1183 	if (!vm)
1184 		return -EINVAL;
1185 
1186 	pool = panthor_vm_get_heap_pool(vm, false);
1187 	if (IS_ERR(pool)) {
1188 		ret = PTR_ERR(pool);
1189 		goto out_put_vm;
1190 	}
1191 
1192 	ret = panthor_heap_destroy(pool, args->handle & GENMASK(15, 0));
1193 	panthor_heap_pool_put(pool);
1194 
1195 out_put_vm:
1196 	panthor_vm_put(vm);
1197 	return ret;
1198 }
1199 
panthor_ioctl_vm_bind_async(struct drm_device * ddev,struct drm_panthor_vm_bind * args,struct drm_file * file)1200 static int panthor_ioctl_vm_bind_async(struct drm_device *ddev,
1201 				       struct drm_panthor_vm_bind *args,
1202 				       struct drm_file *file)
1203 {
1204 	struct panthor_file *pfile = file->driver_priv;
1205 	struct drm_panthor_vm_bind_op *jobs_args;
1206 	struct panthor_submit_ctx ctx;
1207 	struct panthor_vm *vm;
1208 	int ret = 0;
1209 
1210 	vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
1211 	if (!vm)
1212 		return -EINVAL;
1213 
1214 	ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
1215 	if (ret)
1216 		goto out_put_vm;
1217 
1218 	ret = panthor_submit_ctx_init(&ctx, file, args->ops.count);
1219 	if (ret)
1220 		goto out_free_jobs_args;
1221 
1222 	for (u32 i = 0; i < args->ops.count; i++) {
1223 		struct drm_panthor_vm_bind_op *op = &jobs_args[i];
1224 		struct drm_sched_job *job;
1225 
1226 		job = panthor_vm_bind_job_create(file, vm, op);
1227 		if (IS_ERR(job)) {
1228 			ret = PTR_ERR(job);
1229 			goto out_cleanup_submit_ctx;
1230 		}
1231 
1232 		ret = panthor_submit_ctx_add_job(&ctx, i, job, &op->syncs);
1233 		if (ret)
1234 			goto out_cleanup_submit_ctx;
1235 	}
1236 
1237 	ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
1238 	if (ret)
1239 		goto out_cleanup_submit_ctx;
1240 
1241 	/* Prepare reservation objects for each VM_BIND job. */
1242 	drm_exec_until_all_locked(&ctx.exec) {
1243 		for (u32 i = 0; i < ctx.job_count; i++) {
1244 			ret = panthor_vm_bind_job_prepare_resvs(&ctx.exec, ctx.jobs[i].job);
1245 			drm_exec_retry_on_contention(&ctx.exec);
1246 			if (ret)
1247 				goto out_cleanup_submit_ctx;
1248 		}
1249 	}
1250 
1251 	ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
1252 	if (ret)
1253 		goto out_cleanup_submit_ctx;
1254 
1255 	/* Nothing can fail after that point. */
1256 	panthor_submit_ctx_push_jobs(&ctx, panthor_vm_bind_job_update_resvs);
1257 
1258 out_cleanup_submit_ctx:
1259 	panthor_submit_ctx_cleanup(&ctx, panthor_vm_bind_job_put);
1260 
1261 out_free_jobs_args:
1262 	kvfree(jobs_args);
1263 
1264 out_put_vm:
1265 	panthor_vm_put(vm);
1266 	return ret;
1267 }
1268 
panthor_ioctl_vm_bind_sync(struct drm_device * ddev,struct drm_panthor_vm_bind * args,struct drm_file * file)1269 static int panthor_ioctl_vm_bind_sync(struct drm_device *ddev,
1270 				      struct drm_panthor_vm_bind *args,
1271 				      struct drm_file *file)
1272 {
1273 	struct panthor_file *pfile = file->driver_priv;
1274 	struct drm_panthor_vm_bind_op *jobs_args;
1275 	struct panthor_vm *vm;
1276 	int ret;
1277 
1278 	vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
1279 	if (!vm)
1280 		return -EINVAL;
1281 
1282 	ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
1283 	if (ret)
1284 		goto out_put_vm;
1285 
1286 	for (u32 i = 0; i < args->ops.count; i++) {
1287 		ret = panthor_vm_bind_exec_sync_op(file, vm, &jobs_args[i]);
1288 		if (ret) {
1289 			/* Update ops.count so the user knows where things failed. */
1290 			args->ops.count = i;
1291 			break;
1292 		}
1293 	}
1294 
1295 	kvfree(jobs_args);
1296 
1297 out_put_vm:
1298 	panthor_vm_put(vm);
1299 	return ret;
1300 }
1301 
1302 #define PANTHOR_VM_BIND_FLAGS DRM_PANTHOR_VM_BIND_ASYNC
1303 
panthor_ioctl_vm_bind(struct drm_device * ddev,void * data,struct drm_file * file)1304 static int panthor_ioctl_vm_bind(struct drm_device *ddev, void *data,
1305 				 struct drm_file *file)
1306 {
1307 	struct drm_panthor_vm_bind *args = data;
1308 	int cookie, ret;
1309 
1310 	if (!drm_dev_enter(ddev, &cookie))
1311 		return -ENODEV;
1312 
1313 	if (args->flags & DRM_PANTHOR_VM_BIND_ASYNC)
1314 		ret = panthor_ioctl_vm_bind_async(ddev, args, file);
1315 	else
1316 		ret = panthor_ioctl_vm_bind_sync(ddev, args, file);
1317 
1318 	drm_dev_exit(cookie);
1319 	return ret;
1320 }
1321 
panthor_ioctl_vm_get_state(struct drm_device * ddev,void * data,struct drm_file * file)1322 static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
1323 				      struct drm_file *file)
1324 {
1325 	struct panthor_file *pfile = file->driver_priv;
1326 	struct drm_panthor_vm_get_state *args = data;
1327 	struct panthor_vm *vm;
1328 
1329 	vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
1330 	if (!vm)
1331 		return -EINVAL;
1332 
1333 	if (panthor_vm_is_unusable(vm))
1334 		args->state = DRM_PANTHOR_VM_STATE_UNUSABLE;
1335 	else
1336 		args->state = DRM_PANTHOR_VM_STATE_USABLE;
1337 
1338 	panthor_vm_put(vm);
1339 	return 0;
1340 }
1341 
panthor_ioctl_bo_set_label(struct drm_device * ddev,void * data,struct drm_file * file)1342 static int panthor_ioctl_bo_set_label(struct drm_device *ddev, void *data,
1343 				      struct drm_file *file)
1344 {
1345 	struct drm_panthor_bo_set_label *args = data;
1346 	struct drm_gem_object *obj;
1347 	const char *label = NULL;
1348 	int ret = 0;
1349 
1350 	if (args->pad)
1351 		return -EINVAL;
1352 
1353 	obj = drm_gem_object_lookup(file, args->handle);
1354 	if (!obj)
1355 		return -ENOENT;
1356 
1357 	if (args->label) {
1358 		label = strndup_user((const char __user *)(uintptr_t)args->label,
1359 				     PANTHOR_BO_LABEL_MAXLEN);
1360 		if (IS_ERR(label)) {
1361 			ret = PTR_ERR(label);
1362 			if (ret == -EINVAL)
1363 				ret = -E2BIG;
1364 			goto err_put_obj;
1365 		}
1366 	}
1367 
1368 	/*
1369 	 * We treat passing a label of length 0 and passing a NULL label
1370 	 * differently, because even though they might seem conceptually
1371 	 * similar, future uses of the BO label might expect a different
1372 	 * behaviour in each case.
1373 	 */
1374 	panthor_gem_bo_set_label(obj, label);
1375 
1376 err_put_obj:
1377 	drm_gem_object_put(obj);
1378 
1379 	return ret;
1380 }
1381 
panthor_ioctl_set_user_mmio_offset(struct drm_device * ddev,void * data,struct drm_file * file)1382 static int panthor_ioctl_set_user_mmio_offset(struct drm_device *ddev,
1383 					      void *data, struct drm_file *file)
1384 {
1385 	struct drm_panthor_set_user_mmio_offset *args = data;
1386 	struct panthor_file *pfile = file->driver_priv;
1387 
1388 	if (args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_32BIT &&
1389 	    args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
1390 		return -EINVAL;
1391 
1392 	WRITE_ONCE(pfile->user_mmio.offset, args->offset);
1393 	return 0;
1394 }
1395 
1396 static int
panthor_open(struct drm_device * ddev,struct drm_file * file)1397 panthor_open(struct drm_device *ddev, struct drm_file *file)
1398 {
1399 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
1400 	struct panthor_file *pfile;
1401 	int ret;
1402 
1403 	if (!try_module_get(THIS_MODULE))
1404 		return -EINVAL;
1405 
1406 	pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
1407 	if (!pfile) {
1408 		ret = -ENOMEM;
1409 		goto err_put_mod;
1410 	}
1411 
1412 	pfile->ptdev = ptdev;
1413 	pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
1414 
1415 #ifdef CONFIG_ARM64
1416 	/*
1417 	 * With 32-bit systems being limited by the 32-bit representation of
1418 	 * mmap2's pgoffset field, we need to make the MMIO offset arch
1419 	 * specific.
1420 	 */
1421 	if (test_tsk_thread_flag(current, TIF_32BIT))
1422 		pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
1423 #endif
1424 
1425 
1426 	ret = panthor_vm_pool_create(pfile);
1427 	if (ret)
1428 		goto err_free_file;
1429 
1430 	ret = panthor_group_pool_create(pfile);
1431 	if (ret)
1432 		goto err_destroy_vm_pool;
1433 
1434 	file->driver_priv = pfile;
1435 	return 0;
1436 
1437 err_destroy_vm_pool:
1438 	panthor_vm_pool_destroy(pfile);
1439 
1440 err_free_file:
1441 	kfree(pfile);
1442 
1443 err_put_mod:
1444 	module_put(THIS_MODULE);
1445 	return ret;
1446 }
1447 
1448 static void
panthor_postclose(struct drm_device * ddev,struct drm_file * file)1449 panthor_postclose(struct drm_device *ddev, struct drm_file *file)
1450 {
1451 	struct panthor_file *pfile = file->driver_priv;
1452 
1453 	panthor_group_pool_destroy(pfile);
1454 	panthor_vm_pool_destroy(pfile);
1455 
1456 	kfree(pfile);
1457 	module_put(THIS_MODULE);
1458 }
1459 
1460 static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
1461 #define PANTHOR_IOCTL(n, func, flags) \
1462 	DRM_IOCTL_DEF_DRV(PANTHOR_##n, panthor_ioctl_##func, flags)
1463 
1464 	PANTHOR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
1465 	PANTHOR_IOCTL(VM_CREATE, vm_create, DRM_RENDER_ALLOW),
1466 	PANTHOR_IOCTL(VM_DESTROY, vm_destroy, DRM_RENDER_ALLOW),
1467 	PANTHOR_IOCTL(VM_BIND, vm_bind, DRM_RENDER_ALLOW),
1468 	PANTHOR_IOCTL(VM_GET_STATE, vm_get_state, DRM_RENDER_ALLOW),
1469 	PANTHOR_IOCTL(BO_CREATE, bo_create, DRM_RENDER_ALLOW),
1470 	PANTHOR_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, DRM_RENDER_ALLOW),
1471 	PANTHOR_IOCTL(GROUP_CREATE, group_create, DRM_RENDER_ALLOW),
1472 	PANTHOR_IOCTL(GROUP_DESTROY, group_destroy, DRM_RENDER_ALLOW),
1473 	PANTHOR_IOCTL(GROUP_GET_STATE, group_get_state, DRM_RENDER_ALLOW),
1474 	PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
1475 	PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
1476 	PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
1477 	PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
1478 	PANTHOR_IOCTL(SET_USER_MMIO_OFFSET, set_user_mmio_offset, DRM_RENDER_ALLOW),
1479 };
1480 
panthor_mmap(struct file * filp,struct vm_area_struct * vma)1481 static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
1482 {
1483 	struct drm_file *file = filp->private_data;
1484 	struct panthor_file *pfile = file->driver_priv;
1485 	struct panthor_device *ptdev = pfile->ptdev;
1486 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
1487 	u64 user_mmio_offset;
1488 	int ret, cookie;
1489 
1490 	if (!drm_dev_enter(file->minor->dev, &cookie))
1491 		return -ENODEV;
1492 
1493 	/* Adjust the user MMIO offset to match the offset used kernel side.
1494 	 * We use a local variable with a READ_ONCE() here to make sure
1495 	 * the user_mmio_offset we use for the is_user_mmio_mapping() check
1496 	 * hasn't changed when we do the offset adjustment.
1497 	 */
1498 	user_mmio_offset = READ_ONCE(pfile->user_mmio.offset);
1499 	if (offset >= user_mmio_offset) {
1500 		offset -= user_mmio_offset;
1501 		offset += DRM_PANTHOR_USER_MMIO_OFFSET;
1502 		vma->vm_pgoff = offset >> PAGE_SHIFT;
1503 		ret = panthor_device_mmap_io(ptdev, vma);
1504 	} else {
1505 		ret = drm_gem_mmap(filp, vma);
1506 	}
1507 
1508 	drm_dev_exit(cookie);
1509 	return ret;
1510 }
1511 
panthor_gpu_show_fdinfo(struct panthor_device * ptdev,struct panthor_file * pfile,struct drm_printer * p)1512 static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
1513 				    struct panthor_file *pfile,
1514 				    struct drm_printer *p)
1515 {
1516 	if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_ALL)
1517 		panthor_fdinfo_gather_group_samples(pfile);
1518 
1519 	if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) {
1520 #ifdef CONFIG_ARM_ARCH_TIMER
1521 		drm_printf(p, "drm-engine-panthor:\t%llu ns\n",
1522 			   DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC),
1523 					    arch_timer_get_cntfrq()));
1524 #endif
1525 	}
1526 	if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES)
1527 		drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles);
1528 
1529 	drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate);
1530 	drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
1531 }
1532 
panthor_show_internal_memory_stats(struct drm_printer * p,struct drm_file * file)1533 static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
1534 {
1535 	char *drv_name = file->minor->dev->driver->name;
1536 	struct panthor_file *pfile = file->driver_priv;
1537 	struct drm_memory_stats stats = {0};
1538 
1539 	panthor_fdinfo_gather_group_mem_info(pfile, &stats);
1540 	panthor_vm_heaps_sizes(pfile, &stats);
1541 
1542 	drm_fdinfo_print_size(p, drv_name, "resident", "memory", stats.resident);
1543 	drm_fdinfo_print_size(p, drv_name, "active", "memory", stats.active);
1544 }
1545 
panthor_show_fdinfo(struct drm_printer * p,struct drm_file * file)1546 static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
1547 {
1548 	struct drm_device *dev = file->minor->dev;
1549 	struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
1550 
1551 	panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
1552 	panthor_show_internal_memory_stats(p, file);
1553 
1554 	drm_show_memory_stats(p, file);
1555 }
1556 
1557 static const struct file_operations panthor_drm_driver_fops = {
1558 	.open = drm_open,
1559 	.release = drm_release,
1560 	.unlocked_ioctl = drm_ioctl,
1561 	.compat_ioctl = drm_compat_ioctl,
1562 	.poll = drm_poll,
1563 	.read = drm_read,
1564 	.llseek = noop_llseek,
1565 	.mmap = panthor_mmap,
1566 	.show_fdinfo = drm_show_fdinfo,
1567 	.fop_flags = FOP_UNSIGNED_OFFSET,
1568 };
1569 
1570 #ifdef CONFIG_DEBUG_FS
panthor_gems_show(struct seq_file * m,void * data)1571 static int panthor_gems_show(struct seq_file *m, void *data)
1572 {
1573 	struct drm_info_node *node = m->private;
1574 	struct drm_device *dev = node->minor->dev;
1575 	struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
1576 
1577 	panthor_gem_debugfs_print_bos(ptdev, m);
1578 
1579 	return 0;
1580 }
1581 
1582 static struct drm_info_list panthor_debugfs_list[] = {
1583 	{"gems", panthor_gems_show, 0, NULL},
1584 };
1585 
panthor_gems_debugfs_init(struct drm_minor * minor)1586 static int panthor_gems_debugfs_init(struct drm_minor *minor)
1587 {
1588 	drm_debugfs_create_files(panthor_debugfs_list,
1589 				 ARRAY_SIZE(panthor_debugfs_list),
1590 				 minor->debugfs_root, minor);
1591 
1592 	return 0;
1593 }
1594 
panthor_debugfs_init(struct drm_minor * minor)1595 static void panthor_debugfs_init(struct drm_minor *minor)
1596 {
1597 	panthor_mmu_debugfs_init(minor);
1598 	panthor_gems_debugfs_init(minor);
1599 }
1600 #endif
1601 
1602 /*
1603  * PanCSF driver version:
1604  * - 1.0 - initial interface
1605  * - 1.1 - adds DEV_QUERY_TIMESTAMP_INFO query
1606  * - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
1607  *       - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
1608  * - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
1609  * - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl
1610  * - 1.5 - adds DRM_PANTHOR_SET_USER_MMIO_OFFSET ioctl
1611  */
1612 static const struct drm_driver panthor_drm_driver = {
1613 	.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
1614 			   DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
1615 	.open = panthor_open,
1616 	.postclose = panthor_postclose,
1617 	.show_fdinfo = panthor_show_fdinfo,
1618 	.ioctls = panthor_drm_driver_ioctls,
1619 	.num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls),
1620 	.fops = &panthor_drm_driver_fops,
1621 	.name = "panthor",
1622 	.desc = "Panthor DRM driver",
1623 	.major = 1,
1624 	.minor = 5,
1625 
1626 	.gem_create_object = panthor_gem_create_object,
1627 	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
1628 #ifdef CONFIG_DEBUG_FS
1629 	.debugfs_init = panthor_debugfs_init,
1630 #endif
1631 };
1632 
panthor_probe(struct platform_device * pdev)1633 static int panthor_probe(struct platform_device *pdev)
1634 {
1635 	struct panthor_device *ptdev;
1636 
1637 	ptdev = devm_drm_dev_alloc(&pdev->dev, &panthor_drm_driver,
1638 				   struct panthor_device, base);
1639 	if (IS_ERR(ptdev))
1640 		return -ENOMEM;
1641 
1642 	platform_set_drvdata(pdev, ptdev);
1643 
1644 	return panthor_device_init(ptdev);
1645 }
1646 
panthor_remove(struct platform_device * pdev)1647 static void panthor_remove(struct platform_device *pdev)
1648 {
1649 	struct panthor_device *ptdev = platform_get_drvdata(pdev);
1650 
1651 	panthor_device_unplug(ptdev);
1652 }
1653 
profiling_show(struct device * dev,struct device_attribute * attr,char * buf)1654 static ssize_t profiling_show(struct device *dev,
1655 			      struct device_attribute *attr,
1656 			      char *buf)
1657 {
1658 	struct panthor_device *ptdev = dev_get_drvdata(dev);
1659 
1660 	return sysfs_emit(buf, "%d\n", ptdev->profile_mask);
1661 }
1662 
profiling_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1663 static ssize_t profiling_store(struct device *dev,
1664 			       struct device_attribute *attr,
1665 			       const char *buf, size_t len)
1666 {
1667 	struct panthor_device *ptdev = dev_get_drvdata(dev);
1668 	u32 value;
1669 	int err;
1670 
1671 	err = kstrtou32(buf, 0, &value);
1672 	if (err)
1673 		return err;
1674 
1675 	if ((value & ~PANTHOR_DEVICE_PROFILING_ALL) != 0)
1676 		return -EINVAL;
1677 
1678 	ptdev->profile_mask = value;
1679 
1680 	return len;
1681 }
1682 
1683 static DEVICE_ATTR_RW(profiling);
1684 
1685 static struct attribute *panthor_attrs[] = {
1686 	&dev_attr_profiling.attr,
1687 	NULL,
1688 };
1689 
1690 ATTRIBUTE_GROUPS(panthor);
1691 
1692 static const struct of_device_id dt_match[] = {
1693 	{ .compatible = "rockchip,rk3588-mali" },
1694 	{ .compatible = "arm,mali-valhall-csf" },
1695 	{}
1696 };
1697 MODULE_DEVICE_TABLE(of, dt_match);
1698 
1699 static DEFINE_RUNTIME_DEV_PM_OPS(panthor_pm_ops,
1700 				 panthor_device_suspend,
1701 				 panthor_device_resume,
1702 				 NULL);
1703 
1704 static struct platform_driver panthor_driver = {
1705 	.probe = panthor_probe,
1706 	.remove = panthor_remove,
1707 	.driver = {
1708 		.name = "panthor",
1709 		.pm = pm_ptr(&panthor_pm_ops),
1710 		.of_match_table = dt_match,
1711 		.dev_groups = panthor_groups,
1712 	},
1713 };
1714 
1715 /*
1716  * Workqueue used to cleanup stuff.
1717  *
1718  * We create a dedicated workqueue so we can drain on unplug and
1719  * make sure all resources are freed before the module is unloaded.
1720  */
1721 struct workqueue_struct *panthor_cleanup_wq;
1722 
panthor_init(void)1723 static int __init panthor_init(void)
1724 {
1725 	int ret;
1726 
1727 	ret = panthor_mmu_pt_cache_init();
1728 	if (ret)
1729 		return ret;
1730 
1731 	panthor_cleanup_wq = alloc_workqueue("panthor-cleanup", WQ_UNBOUND, 0);
1732 	if (!panthor_cleanup_wq) {
1733 		pr_err("panthor: Failed to allocate the workqueues");
1734 		ret = -ENOMEM;
1735 		goto err_mmu_pt_cache_fini;
1736 	}
1737 
1738 	ret = platform_driver_register(&panthor_driver);
1739 	if (ret)
1740 		goto err_destroy_cleanup_wq;
1741 
1742 	return 0;
1743 
1744 err_destroy_cleanup_wq:
1745 	destroy_workqueue(panthor_cleanup_wq);
1746 
1747 err_mmu_pt_cache_fini:
1748 	panthor_mmu_pt_cache_fini();
1749 	return ret;
1750 }
1751 module_init(panthor_init);
1752 
panthor_exit(void)1753 static void __exit panthor_exit(void)
1754 {
1755 	platform_driver_unregister(&panthor_driver);
1756 	destroy_workqueue(panthor_cleanup_wq);
1757 	panthor_mmu_pt_cache_fini();
1758 }
1759 module_exit(panthor_exit);
1760 
1761 MODULE_AUTHOR("Panthor Project Developers");
1762 MODULE_DESCRIPTION("Panthor DRM Driver");
1763 MODULE_LICENSE("Dual MIT/GPL");
1764