1 /* SPDX-License-Identifier: GPL-2.0 or MIT */
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2023 Collabora ltd. */
4
5 #ifndef __PANTHOR_GEM_H__
6 #define __PANTHOR_GEM_H__
7
8 #include <drm/drm_gem_shmem_helper.h>
9 #include <drm/drm_mm.h>
10
11 #include <linux/iosys-map.h>
12 #include <linux/rwsem.h>
13
14 struct panthor_vm;
15
16 /**
17 * struct panthor_gem_object - Driver specific GEM object.
18 */
19 struct panthor_gem_object {
20 /** @base: Inherit from drm_gem_shmem_object. */
21 struct drm_gem_shmem_object base;
22
23 /**
24 * @exclusive_vm_root_gem: Root GEM of the exclusive VM this GEM object
25 * is attached to.
26 *
27 * If @exclusive_vm_root_gem != NULL, any attempt to bind the GEM to a
28 * different VM will fail.
29 *
30 * All FW memory objects have this field set to the root GEM of the MCU
31 * VM.
32 */
33 struct drm_gem_object *exclusive_vm_root_gem;
34
35 /**
36 * @gpuva_list_lock: Custom GPUVA lock.
37 *
38 * Used to protect insertion of drm_gpuva elements to the
39 * drm_gem_object.gpuva.list list.
40 *
41 * We can't use the GEM resv for that, because drm_gpuva_link() is
42 * called in a dma-signaling path, where we're not allowed to take
43 * resv locks.
44 */
45 struct mutex gpuva_list_lock;
46
47 /** @flags: Combination of drm_panthor_bo_flags flags. */
48 u32 flags;
49 };
50
51 /**
52 * struct panthor_kernel_bo - Kernel buffer object.
53 *
54 * These objects are only manipulated by the kernel driver and not
55 * directly exposed to the userspace. The GPU address of a kernel
56 * BO might be passed to userspace though.
57 */
58 struct panthor_kernel_bo {
59 /**
60 * @obj: The GEM object backing this kernel buffer object.
61 */
62 struct drm_gem_object *obj;
63
64 /**
65 * @vm: VM this private buffer is attached to.
66 */
67 struct panthor_vm *vm;
68
69 /**
70 * @va_node: VA space allocated to this GEM.
71 */
72 struct drm_mm_node va_node;
73
74 /**
75 * @kmap: Kernel CPU mapping of @gem.
76 */
77 void *kmap;
78 };
79
80 static inline
to_panthor_bo(struct drm_gem_object * obj)81 struct panthor_gem_object *to_panthor_bo(struct drm_gem_object *obj)
82 {
83 return container_of(to_drm_gem_shmem_obj(obj), struct panthor_gem_object, base);
84 }
85
86 struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size);
87
88 int
89 panthor_gem_create_with_handle(struct drm_file *file,
90 struct drm_device *ddev,
91 struct panthor_vm *exclusive_vm,
92 u64 *size, u32 flags, uint32_t *handle);
93
94 static inline u64
panthor_kernel_bo_gpuva(struct panthor_kernel_bo * bo)95 panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
96 {
97 return bo->va_node.start;
98 }
99
100 static inline size_t
panthor_kernel_bo_size(struct panthor_kernel_bo * bo)101 panthor_kernel_bo_size(struct panthor_kernel_bo *bo)
102 {
103 return bo->obj->size;
104 }
105
106 static inline int
panthor_kernel_bo_vmap(struct panthor_kernel_bo * bo)107 panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
108 {
109 struct iosys_map map;
110 int ret;
111
112 if (bo->kmap)
113 return 0;
114
115 ret = drm_gem_vmap_unlocked(bo->obj, &map);
116 if (ret)
117 return ret;
118
119 bo->kmap = map.vaddr;
120 return 0;
121 }
122
123 static inline void
panthor_kernel_bo_vunmap(struct panthor_kernel_bo * bo)124 panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
125 {
126 if (bo->kmap) {
127 struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
128
129 drm_gem_vunmap_unlocked(bo->obj, &map);
130 bo->kmap = NULL;
131 }
132 }
133
134 struct panthor_kernel_bo *
135 panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
136 size_t size, u32 bo_flags, u32 vm_map_flags,
137 u64 gpu_va);
138
139 void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
140
141 #endif /* __PANTHOR_GEM_H__ */
142