1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_TYPES_H_
7 #define _XE_VM_TYPES_H_
8 
9 #include <drm/drm_gpuvm.h>
10 
11 #include <linux/dma-resv.h>
12 #include <linux/kref.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/scatterlist.h>
15 
16 #include "xe_device_types.h"
17 #include "xe_pt_types.h"
18 #include "xe_range_fence.h"
19 
20 struct xe_bo;
21 struct xe_sync_entry;
22 struct xe_user_fence;
23 struct xe_vm;
24 
25 #define XE_VMA_READ_ONLY	DRM_GPUVA_USERBITS
26 #define XE_VMA_DESTROYED	(DRM_GPUVA_USERBITS << 1)
27 #define XE_VMA_ATOMIC_PTE_BIT	(DRM_GPUVA_USERBITS << 2)
28 #define XE_VMA_FIRST_REBIND	(DRM_GPUVA_USERBITS << 3)
29 #define XE_VMA_LAST_REBIND	(DRM_GPUVA_USERBITS << 4)
30 #define XE_VMA_PTE_4K		(DRM_GPUVA_USERBITS << 5)
31 #define XE_VMA_PTE_2M		(DRM_GPUVA_USERBITS << 6)
32 #define XE_VMA_PTE_1G		(DRM_GPUVA_USERBITS << 7)
33 #define XE_VMA_PTE_64K		(DRM_GPUVA_USERBITS << 8)
34 #define XE_VMA_PTE_COMPACT	(DRM_GPUVA_USERBITS << 9)
35 
36 /** struct xe_userptr - User pointer */
37 struct xe_userptr {
38 	/** @invalidate_link: Link for the vm::userptr.invalidated list */
39 	struct list_head invalidate_link;
40 	/** @userptr: link into VM repin list if userptr. */
41 	struct list_head repin_link;
42 	/**
43 	 * @notifier: MMU notifier for user pointer (invalidation call back)
44 	 */
45 	struct mmu_interval_notifier notifier;
46 	/** @sgt: storage for a scatter gather table */
47 	struct sg_table sgt;
48 	/** @sg: allocated scatter gather table */
49 	struct sg_table *sg;
50 	/** @notifier_seq: notifier sequence number */
51 	unsigned long notifier_seq;
52 	/**
53 	 * @initial_bind: user pointer has been bound at least once.
54 	 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
55 	 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
56 	 */
57 	bool initial_bind;
58 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
59 	u32 divisor;
60 #endif
61 };
62 
63 struct xe_vma {
64 	/** @gpuva: Base GPUVA object */
65 	struct drm_gpuva gpuva;
66 
67 	/**
68 	 * @combined_links: links into lists which are mutually exclusive.
69 	 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
70 	 * resv.
71 	 */
72 	union {
73 		/** @rebind: link into VM if this VMA needs rebinding. */
74 		struct list_head rebind;
75 		/** @destroy: link to contested list when VM is being closed. */
76 		struct list_head destroy;
77 	} combined_links;
78 
79 	union {
80 		/** @destroy_cb: callback to destroy VMA when unbind job is done */
81 		struct dma_fence_cb destroy_cb;
82 		/** @destroy_work: worker to destroy this BO */
83 		struct work_struct destroy_work;
84 	};
85 
86 	/** @usm: unified shared memory state */
87 	struct {
88 		/** @tile_invalidated: VMA has been invalidated */
89 		u8 tile_invalidated;
90 	} usm;
91 
92 	/** @tile_mask: Tile mask of where to create binding for this VMA */
93 	u8 tile_mask;
94 
95 	/**
96 	 * @tile_present: GT mask of binding are present for this VMA.
97 	 * protected by vm->lock, vm->resv and for userptrs,
98 	 * vm->userptr.notifier_lock for writing. Needs either for reading,
99 	 * but if reading is done under the vm->lock only, it needs to be held
100 	 * in write mode.
101 	 */
102 	u8 tile_present;
103 
104 	/**
105 	 * @pat_index: The pat index to use when encoding the PTEs for this vma.
106 	 */
107 	u16 pat_index;
108 
109 	/**
110 	 * @ufence: The user fence that was provided with MAP.
111 	 * Needs to be signalled before UNMAP can be processed.
112 	 */
113 	struct xe_user_fence *ufence;
114 };
115 
116 /**
117  * struct xe_userptr_vma - A userptr vma subclass
118  * @vma: The vma.
119  * @userptr: Additional userptr information.
120  */
121 struct xe_userptr_vma {
122 	struct xe_vma vma;
123 	struct xe_userptr userptr;
124 };
125 
126 struct xe_device;
127 
128 struct xe_vm {
129 	/** @gpuvm: base GPUVM used to track VMAs */
130 	struct drm_gpuvm gpuvm;
131 
132 	struct xe_device *xe;
133 
134 	/* exec queue used for (un)binding vma's */
135 	struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
136 
137 	/** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
138 	struct ttm_lru_bulk_move lru_bulk_move;
139 
140 	u64 size;
141 
142 	struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
143 	struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
144 
145 	/**
146 	 * @flags: flags for this VM, statically setup a creation time aside
147 	 * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
148 	 */
149 #define XE_VM_FLAG_64K			BIT(0)
150 #define XE_VM_FLAG_LR_MODE		BIT(1)
151 #define XE_VM_FLAG_MIGRATION		BIT(2)
152 #define XE_VM_FLAG_SCRATCH_PAGE		BIT(3)
153 #define XE_VM_FLAG_FAULT_MODE		BIT(4)
154 #define XE_VM_FLAG_BANNED		BIT(5)
155 #define XE_VM_FLAG_TILE_ID(flags)	FIELD_GET(GENMASK(7, 6), flags)
156 #define XE_VM_FLAG_SET_TILE_ID(tile)	FIELD_PREP(GENMASK(7, 6), (tile)->id)
157 	unsigned long flags;
158 
159 	/** @composite_fence_ctx: context composite fence */
160 	u64 composite_fence_ctx;
161 	/** @composite_fence_seqno: seqno for composite fence */
162 	u32 composite_fence_seqno;
163 
164 	/**
165 	 * @lock: outer most lock, protects objects of anything attached to this
166 	 * VM
167 	 */
168 	struct rw_semaphore lock;
169 
170 	/**
171 	 * @rebind_list: list of VMAs that need rebinding. Protected by the
172 	 * vm->lock in write mode, OR (the vm->lock in read mode and the
173 	 * vm resv).
174 	 */
175 	struct list_head rebind_list;
176 
177 	/** @rebind_fence: rebind fence from execbuf */
178 	struct dma_fence *rebind_fence;
179 
180 	/**
181 	 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
182 	 * from an irq context can be last put and the destroy needs to be able
183 	 * to sleep.
184 	 */
185 	struct work_struct destroy_work;
186 
187 	/**
188 	 * @rftree: range fence tree to track updates to page table structure.
189 	 * Used to implement conflict tracking between independent bind engines.
190 	 */
191 	struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
192 
193 	/** @async_ops: async VM operations (bind / unbinds) */
194 	struct {
195 		/** @list: list of pending async VM ops */
196 		struct list_head pending;
197 		/** @work: worker to execute async VM ops */
198 		struct work_struct work;
199 		/** @lock: protects list of pending async VM ops and fences */
200 		spinlock_t lock;
201 		/** @fence: fence state */
202 		struct {
203 			/** @context: context of async fence */
204 			u64 context;
205 			/** @seqno: seqno of async fence */
206 			u32 seqno;
207 		} fence;
208 		/** @error: error state for async VM ops */
209 		int error;
210 		/**
211 		 * @munmap_rebind_inflight: an munmap style VM bind is in the
212 		 * middle of a set of ops which requires a rebind at the end.
213 		 */
214 		bool munmap_rebind_inflight;
215 	} async_ops;
216 
217 	const struct xe_pt_ops *pt_ops;
218 
219 	/** @userptr: user pointer state */
220 	struct {
221 		/**
222 		 * @userptr.repin_list: list of VMAs which are user pointers,
223 		 * and needs repinning. Protected by @lock.
224 		 */
225 		struct list_head repin_list;
226 		/**
227 		 * @notifier_lock: protects notifier in write mode and
228 		 * submission in read mode.
229 		 */
230 		struct rw_semaphore notifier_lock;
231 		/**
232 		 * @userptr.invalidated_lock: Protects the
233 		 * @userptr.invalidated list.
234 		 */
235 		spinlock_t invalidated_lock;
236 		/**
237 		 * @userptr.invalidated: List of invalidated userptrs, not yet
238 		 * picked
239 		 * up for revalidation. Protected from access with the
240 		 * @invalidated_lock. Removing items from the list
241 		 * additionally requires @lock in write mode, and adding
242 		 * items to the list requires the @userptr.notifer_lock in
243 		 * write mode.
244 		 */
245 		struct list_head invalidated;
246 	} userptr;
247 
248 	/** @preempt: preempt state */
249 	struct {
250 		/**
251 		 * @min_run_period_ms: The minimum run period before preempting
252 		 * an engine again
253 		 */
254 		s64 min_run_period_ms;
255 		/** @exec_queues: list of exec queues attached to this VM */
256 		struct list_head exec_queues;
257 		/** @num_exec_queues: number exec queues attached to this VM */
258 		int num_exec_queues;
259 		/**
260 		 * @rebind_deactivated: Whether rebind has been temporarily deactivated
261 		 * due to no work available. Protected by the vm resv.
262 		 */
263 		bool rebind_deactivated;
264 		/**
265 		 * @rebind_work: worker to rebind invalidated userptrs / evicted
266 		 * BOs
267 		 */
268 		struct work_struct rebind_work;
269 	} preempt;
270 
271 	/** @um: unified memory state */
272 	struct {
273 		/** @asid: address space ID, unique to each VM */
274 		u32 asid;
275 		/**
276 		 * @last_fault_vma: Last fault VMA, used for fast lookup when we
277 		 * get a flood of faults to the same VMA
278 		 */
279 		struct xe_vma *last_fault_vma;
280 	} usm;
281 
282 	/** @error_capture: allow to track errors */
283 	struct {
284 		/** @capture_once: capture only one error per VM */
285 		bool capture_once;
286 	} error_capture;
287 
288 	/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
289 	bool batch_invalidate_tlb;
290 	/** @xef: XE file handle for tracking this VM's drm client */
291 	struct xe_file *xef;
292 };
293 
294 /** struct xe_vma_op_map - VMA map operation */
295 struct xe_vma_op_map {
296 	/** @vma: VMA to map */
297 	struct xe_vma *vma;
298 	/** @is_null: is NULL binding */
299 	bool is_null;
300 	/** @pat_index: The pat index to use for this operation. */
301 	u16 pat_index;
302 };
303 
304 /** struct xe_vma_op_remap - VMA remap operation */
305 struct xe_vma_op_remap {
306 	/** @prev: VMA preceding part of a split mapping */
307 	struct xe_vma *prev;
308 	/** @next: VMA subsequent part of a split mapping */
309 	struct xe_vma *next;
310 	/** @start: start of the VMA unmap */
311 	u64 start;
312 	/** @range: range of the VMA unmap */
313 	u64 range;
314 	/** @skip_prev: skip prev rebind */
315 	bool skip_prev;
316 	/** @skip_next: skip next rebind */
317 	bool skip_next;
318 	/** @unmap_done: unmap operation in done */
319 	bool unmap_done;
320 };
321 
322 /** struct xe_vma_op_prefetch - VMA prefetch operation */
323 struct xe_vma_op_prefetch {
324 	/** @region: memory region to prefetch to */
325 	u32 region;
326 };
327 
328 /** enum xe_vma_op_flags - flags for VMA operation */
329 enum xe_vma_op_flags {
330 	/** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
331 	XE_VMA_OP_FIRST			= BIT(0),
332 	/** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
333 	XE_VMA_OP_LAST			= BIT(1),
334 	/** @XE_VMA_OP_COMMITTED: VMA operation committed */
335 	XE_VMA_OP_COMMITTED		= BIT(2),
336 	/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
337 	XE_VMA_OP_PREV_COMMITTED	= BIT(3),
338 	/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
339 	XE_VMA_OP_NEXT_COMMITTED	= BIT(4),
340 };
341 
342 /** struct xe_vma_op - VMA operation */
343 struct xe_vma_op {
344 	/** @base: GPUVA base operation */
345 	struct drm_gpuva_op base;
346 	/**
347 	 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
348 	 * operations is processed
349 	 */
350 	struct drm_gpuva_ops *ops;
351 	/** @q: exec queue for this operation */
352 	struct xe_exec_queue *q;
353 	/**
354 	 * @syncs: syncs for this operation, only used on first and last
355 	 * operation
356 	 */
357 	struct xe_sync_entry *syncs;
358 	/** @num_syncs: number of syncs */
359 	u32 num_syncs;
360 	/** @link: async operation link */
361 	struct list_head link;
362 	/** @flags: operation flags */
363 	enum xe_vma_op_flags flags;
364 
365 	union {
366 		/** @map: VMA map operation specific data */
367 		struct xe_vma_op_map map;
368 		/** @remap: VMA remap operation specific data */
369 		struct xe_vma_op_remap remap;
370 		/** @prefetch: VMA prefetch operation specific data */
371 		struct xe_vma_op_prefetch prefetch;
372 	};
373 };
374 #endif
375