1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 
3 #ifndef __DRM_GPUVM_H__
4 #define __DRM_GPUVM_H__
5 
6 /*
7  * Copyright (c) 2022 Red Hat.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <linux/dma-resv.h>
29 #include <linux/list.h>
30 #include <linux/rbtree.h>
31 #include <linux/types.h>
32 
33 #include <drm/drm_device.h>
34 #include <drm/drm_gem.h>
35 #include <drm/drm_exec.h>
36 
37 struct drm_gpuvm;
38 struct drm_gpuvm_bo;
39 struct drm_gpuvm_ops;
40 
41 /**
42  * enum drm_gpuva_flags - flags for struct drm_gpuva
43  */
44 enum drm_gpuva_flags {
45 	/**
46 	 * @DRM_GPUVA_INVALIDATED:
47 	 *
48 	 * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
49 	 */
50 	DRM_GPUVA_INVALIDATED = (1 << 0),
51 
52 	/**
53 	 * @DRM_GPUVA_SPARSE:
54 	 *
55 	 * Flag indicating that the &drm_gpuva is a sparse mapping.
56 	 */
57 	DRM_GPUVA_SPARSE = (1 << 1),
58 
59 	/**
60 	 * @DRM_GPUVA_USERBITS: user defined bits
61 	 */
62 	DRM_GPUVA_USERBITS = (1 << 2),
63 };
64 
65 /**
66  * struct drm_gpuva - structure to track a GPU VA mapping
67  *
68  * This structure represents a GPU VA mapping and is associated with a
69  * &drm_gpuvm.
70  *
71  * Typically, this structure is embedded in bigger driver structures.
72  */
73 struct drm_gpuva {
74 	/**
75 	 * @vm: the &drm_gpuvm this object is associated with
76 	 */
77 	struct drm_gpuvm *vm;
78 
79 	/**
80 	 * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
81 	 * &drm_gem_object
82 	 */
83 	struct drm_gpuvm_bo *vm_bo;
84 
85 	/**
86 	 * @flags: the &drm_gpuva_flags for this mapping
87 	 */
88 	enum drm_gpuva_flags flags;
89 
90 	/**
91 	 * @va: structure containing the address and range of the &drm_gpuva
92 	 */
93 	struct {
94 		/**
95 		 * @va.addr: the start address
96 		 */
97 		u64 addr;
98 
99 		/*
100 		 * @range: the range
101 		 */
102 		u64 range;
103 	} va;
104 
105 	/**
106 	 * @gem: structure containing the &drm_gem_object and it's offset
107 	 */
108 	struct {
109 		/**
110 		 * @gem.offset: the offset within the &drm_gem_object
111 		 */
112 		u64 offset;
113 
114 		/**
115 		 * @gem.obj: the mapped &drm_gem_object
116 		 */
117 		struct drm_gem_object *obj;
118 
119 		/**
120 		 * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo
121 		 */
122 		struct list_head entry;
123 	} gem;
124 
125 	/**
126 	 * @rb: structure containing data to store &drm_gpuvas in a rb-tree
127 	 */
128 	struct {
129 		/**
130 		 * @rb.node: the rb-tree node
131 		 */
132 		struct rb_node node;
133 
134 		/**
135 		 * @rb.entry: The &list_head to additionally connect &drm_gpuvas
136 		 * in the same order they appear in the interval tree. This is
137 		 * useful to keep iterating &drm_gpuvas from a start node found
138 		 * through the rb-tree while doing modifications on the rb-tree
139 		 * itself.
140 		 */
141 		struct list_head entry;
142 
143 		/**
144 		 * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
145 		 */
146 		u64 __subtree_last;
147 	} rb;
148 };
149 
150 int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
151 void drm_gpuva_remove(struct drm_gpuva *va);
152 
153 void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
154 void drm_gpuva_unlink(struct drm_gpuva *va);
155 
156 struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
157 				 u64 addr, u64 range);
158 struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
159 				       u64 addr, u64 range);
160 struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
161 struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
162 
drm_gpuva_init(struct drm_gpuva * va,u64 addr,u64 range,struct drm_gem_object * obj,u64 offset)163 static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
164 				  struct drm_gem_object *obj, u64 offset)
165 {
166 	va->va.addr = addr;
167 	va->va.range = range;
168 	va->gem.obj = obj;
169 	va->gem.offset = offset;
170 }
171 
172 /**
173  * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
174  * invalidated
175  * @va: the &drm_gpuva to set the invalidate flag for
176  * @invalidate: indicates whether the &drm_gpuva is invalidated
177  */
drm_gpuva_invalidate(struct drm_gpuva * va,bool invalidate)178 static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
179 {
180 	if (invalidate)
181 		va->flags |= DRM_GPUVA_INVALIDATED;
182 	else
183 		va->flags &= ~DRM_GPUVA_INVALIDATED;
184 }
185 
186 /**
187  * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
188  * is invalidated
189  * @va: the &drm_gpuva to check
190  *
191  * Returns: %true if the GPU VA is invalidated, %false otherwise
192  */
drm_gpuva_invalidated(struct drm_gpuva * va)193 static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
194 {
195 	return va->flags & DRM_GPUVA_INVALIDATED;
196 }
197 
198 /**
199  * enum drm_gpuvm_flags - flags for struct drm_gpuvm
200  */
201 enum drm_gpuvm_flags {
202 	/**
203 	 * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
204 	 * GPUVM's &dma_resv lock
205 	 */
206 	DRM_GPUVM_RESV_PROTECTED = BIT(0),
207 
208 	/**
209 	 * @DRM_GPUVM_USERBITS: user defined bits
210 	 */
211 	DRM_GPUVM_USERBITS = BIT(1),
212 };
213 
214 /**
215  * struct drm_gpuvm - DRM GPU VA Manager
216  *
217  * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
218  * &maple_tree structures. Typically, this structure is embedded in bigger
219  * driver structures.
220  *
221  * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
222  * pages.
223  *
224  * There should be one manager instance per GPU virtual address space.
225  */
226 struct drm_gpuvm {
227 	/**
228 	 * @name: the name of the DRM GPU VA space
229 	 */
230 	const char *name;
231 
232 	/**
233 	 * @flags: the &drm_gpuvm_flags of this GPUVM
234 	 */
235 	enum drm_gpuvm_flags flags;
236 
237 	/**
238 	 * @drm: the &drm_device this VM lives in
239 	 */
240 	struct drm_device *drm;
241 
242 	/**
243 	 * @mm_start: start of the VA space
244 	 */
245 	u64 mm_start;
246 
247 	/**
248 	 * @mm_range: length of the VA space
249 	 */
250 	u64 mm_range;
251 
252 	/**
253 	 * @rb: structures to track &drm_gpuva entries
254 	 */
255 	struct {
256 		/**
257 		 * @rb.tree: the rb-tree to track GPU VA mappings
258 		 */
259 		struct rb_root_cached tree;
260 
261 		/**
262 		 * @rb.list: the &list_head to track GPU VA mappings
263 		 */
264 		struct list_head list;
265 	} rb;
266 
267 	/**
268 	 * @kref: reference count of this object
269 	 */
270 	struct kref kref;
271 
272 	/**
273 	 * @kernel_alloc_node:
274 	 *
275 	 * &drm_gpuva representing the address space cutout reserved for
276 	 * the kernel
277 	 */
278 	struct drm_gpuva kernel_alloc_node;
279 
280 	/**
281 	 * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
282 	 */
283 	const struct drm_gpuvm_ops *ops;
284 
285 	/**
286 	 * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv.
287 	 */
288 	struct drm_gem_object *r_obj;
289 
290 	/**
291 	 * @extobj: structure holding the extobj list
292 	 */
293 	struct {
294 		/**
295 		 * @extobj.list: &list_head storing &drm_gpuvm_bos serving as
296 		 * external object
297 		 */
298 		struct list_head list;
299 
300 		/**
301 		 * @extobj.local_list: pointer to the local list temporarily
302 		 * storing entries from the external object list
303 		 */
304 		struct list_head *local_list;
305 
306 		/**
307 		 * @extobj.lock: spinlock to protect the extobj list
308 		 */
309 		spinlock_t lock;
310 	} extobj;
311 
312 	/**
313 	 * @evict: structure holding the evict list and evict list lock
314 	 */
315 	struct {
316 		/**
317 		 * @evict.list: &list_head storing &drm_gpuvm_bos currently
318 		 * being evicted
319 		 */
320 		struct list_head list;
321 
322 		/**
323 		 * @evict.local_list: pointer to the local list temporarily
324 		 * storing entries from the evicted object list
325 		 */
326 		struct list_head *local_list;
327 
328 		/**
329 		 * @evict.lock: spinlock to protect the evict list
330 		 */
331 		spinlock_t lock;
332 	} evict;
333 };
334 
335 void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
336 		    enum drm_gpuvm_flags flags,
337 		    struct drm_device *drm,
338 		    struct drm_gem_object *r_obj,
339 		    u64 start_offset, u64 range,
340 		    u64 reserve_offset, u64 reserve_range,
341 		    const struct drm_gpuvm_ops *ops);
342 
343 /**
344  * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
345  * @gpuvm: the &drm_gpuvm to acquire the reference of
346  *
347  * This function acquires an additional reference to @gpuvm. It is illegal to
348  * call this without already holding a reference. No locks required.
349  *
350  * Returns: the &struct drm_gpuvm pointer
351  */
352 static inline struct drm_gpuvm *
drm_gpuvm_get(struct drm_gpuvm * gpuvm)353 drm_gpuvm_get(struct drm_gpuvm *gpuvm)
354 {
355 	kref_get(&gpuvm->kref);
356 
357 	return gpuvm;
358 }
359 
360 void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
361 
362 bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
363 bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
364 
365 struct drm_gem_object *
366 drm_gpuvm_resv_object_alloc(struct drm_device *drm);
367 
368 /**
369  * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
370  * set
371  * @gpuvm: the &drm_gpuvm
372  *
373  * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
374  */
375 static inline bool
drm_gpuvm_resv_protected(struct drm_gpuvm * gpuvm)376 drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
377 {
378 	return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
379 }
380 
381 /**
382  * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
383  * @gpuvm__: the &drm_gpuvm
384  *
385  * Returns: a pointer to the &drm_gpuvm's shared &dma_resv
386  */
387 #define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
388 
389 /**
390  * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
391  * &dma_resv
392  * @gpuvm__: the &drm_gpuvm
393  *
394  * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared
395  * &dma_resv
396  */
397 #define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
398 
399 #define drm_gpuvm_resv_held(gpuvm__) \
400 	dma_resv_held(drm_gpuvm_resv(gpuvm__))
401 
402 #define drm_gpuvm_resv_assert_held(gpuvm__) \
403 	dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
404 
405 #define drm_gpuvm_resv_held(gpuvm__) \
406 	dma_resv_held(drm_gpuvm_resv(gpuvm__))
407 
408 #define drm_gpuvm_resv_assert_held(gpuvm__) \
409 	dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
410 
411 /**
412  * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
413  * external object
414  * @gpuvm: the &drm_gpuvm to check
415  * @obj: the &drm_gem_object to check
416  *
417  * Returns: true if the &drm_gem_object &dma_resv differs from the
418  * &drm_gpuvms &dma_resv, false otherwise
419  */
420 static inline bool
drm_gpuvm_is_extobj(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)421 drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
422 		    struct drm_gem_object *obj)
423 {
424 	return obj && obj->resv != drm_gpuvm_resv(gpuvm);
425 }
426 
427 static inline struct drm_gpuva *
__drm_gpuva_next(struct drm_gpuva * va)428 __drm_gpuva_next(struct drm_gpuva *va)
429 {
430 	if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
431 		return list_next_entry(va, rb.entry);
432 
433 	return NULL;
434 }
435 
436 /**
437  * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
438  * @va__: &drm_gpuva structure to assign to in each iteration step
439  * @gpuvm__: &drm_gpuvm to walk over
440  * @start__: starting offset, the first gpuva will overlap this
441  * @end__: ending offset, the last gpuva will start before this (but may
442  * overlap)
443  *
444  * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
445  * between @start__ and @end__. It is implemented similarly to list_for_each(),
446  * but is using the &drm_gpuvm's internal interval tree to accelerate
447  * the search for the starting &drm_gpuva, and hence isn't safe against removal
448  * of elements. It assumes that @end__ is within (or is the upper limit of) the
449  * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
450  * @kernel_alloc_node.
451  */
452 #define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
453 	for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
454 	     va__ && (va__->va.addr < (end__)); \
455 	     va__ = __drm_gpuva_next(va__))
456 
457 /**
458  * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
459  * &drm_gpuvas
460  * @va__: &drm_gpuva to assign to in each iteration step
461  * @next__: another &drm_gpuva to use as temporary storage
462  * @gpuvm__: &drm_gpuvm to walk over
463  * @start__: starting offset, the first gpuva will overlap this
464  * @end__: ending offset, the last gpuva will start before this (but may
465  * overlap)
466  *
467  * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
468  * between @start__ and @end__. It is implemented similarly to
469  * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
470  * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
471  * against removal of elements. It assumes that @end__ is within (or is the
472  * upper limit of) the &drm_gpuvm. This iterator does not skip over the
473  * &drm_gpuvm's @kernel_alloc_node.
474  */
475 #define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
476 	for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
477 	     next__ = __drm_gpuva_next(va__); \
478 	     va__ && (va__->va.addr < (end__)); \
479 	     va__ = next__, next__ = __drm_gpuva_next(va__))
480 
481 /**
482  * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
483  * @va__: &drm_gpuva to assign to in each iteration step
484  * @gpuvm__: &drm_gpuvm to walk over
485  *
486  * This iterator walks over all &drm_gpuva structures associated with the given
487  * &drm_gpuvm.
488  */
489 #define drm_gpuvm_for_each_va(va__, gpuvm__) \
490 	list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
491 
492 /**
493  * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
494  * @va__: &drm_gpuva to assign to in each iteration step
495  * @next__: another &drm_gpuva to use as temporary storage
496  * @gpuvm__: &drm_gpuvm to walk over
497  *
498  * This iterator walks over all &drm_gpuva structures associated with the given
499  * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
500  * hence safe against the removal of elements.
501  */
502 #define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
503 	list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
504 
505 /**
506  * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
507  *
508  * This structure should be created on the stack as &drm_exec should be.
509  *
510  * Optionally, @extra can be set in order to lock additional &drm_gem_objects.
511  */
512 struct drm_gpuvm_exec {
513 	/**
514 	 * @exec: the &drm_exec structure
515 	 */
516 	struct drm_exec exec;
517 
518 	/**
519 	 * @flags: the flags for the struct drm_exec
520 	 */
521 	u32 flags;
522 
523 	/**
524 	 * @vm: the &drm_gpuvm to lock its DMA reservations
525 	 */
526 	struct drm_gpuvm *vm;
527 
528 	/**
529 	 * @num_fences: the number of fences to reserve for the &dma_resv of the
530 	 * locked &drm_gem_objects
531 	 */
532 	unsigned int num_fences;
533 
534 	/**
535 	 * @extra: Callback and corresponding private data for the driver to
536 	 * lock arbitrary additional &drm_gem_objects.
537 	 */
538 	struct {
539 		/**
540 		 * @extra.fn: The driver callback to lock additional
541 		 * &drm_gem_objects.
542 		 */
543 		int (*fn)(struct drm_gpuvm_exec *vm_exec);
544 
545 		/**
546 		 * @extra.priv: driver private data for the @fn callback
547 		 */
548 		void *priv;
549 	} extra;
550 };
551 
552 int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
553 			 struct drm_exec *exec,
554 			 unsigned int num_fences);
555 
556 int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
557 			      struct drm_exec *exec,
558 			      unsigned int num_fences);
559 
560 int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm,
561 			    struct drm_exec *exec,
562 			    u64 addr, u64 range,
563 			    unsigned int num_fences);
564 
565 int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec);
566 
567 int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
568 			      struct drm_gem_object **objs,
569 			      unsigned int num_objs);
570 
571 int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
572 			      u64 addr, u64 range);
573 
574 /**
575  * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
576  * @vm_exec: the &drm_gpuvm_exec wrapper
577  *
578  * Releases all dma-resv locks of all &drm_gem_objects previously acquired
579  * through drm_gpuvm_exec_lock() or its variants.
580  *
581  * Returns: 0 on success, negative error code on failure.
582  */
583 static inline void
drm_gpuvm_exec_unlock(struct drm_gpuvm_exec * vm_exec)584 drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
585 {
586 	drm_exec_fini(&vm_exec->exec);
587 }
588 
589 int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec);
590 void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
591 			      struct drm_exec *exec,
592 			      struct dma_fence *fence,
593 			      enum dma_resv_usage private_usage,
594 			      enum dma_resv_usage extobj_usage);
595 
596 /**
597  * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
598  * @vm_exec: the &drm_gpuvm_exec wrapper
599  * @fence: fence to add
600  * @private_usage: private dma-resv usage
601  * @extobj_usage: extobj dma-resv usage
602  *
603  * See drm_gpuvm_resv_add_fence().
604  */
605 static inline void
drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec * vm_exec,struct dma_fence * fence,enum dma_resv_usage private_usage,enum dma_resv_usage extobj_usage)606 drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
607 			      struct dma_fence *fence,
608 			      enum dma_resv_usage private_usage,
609 			      enum dma_resv_usage extobj_usage)
610 {
611 	drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
612 				 private_usage, extobj_usage);
613 }
614 
615 /**
616  * drm_gpuvm_exec_validate() - validate all BOs marked as evicted
617  * @vm_exec: the &drm_gpuvm_exec wrapper
618  *
619  * See drm_gpuvm_validate().
620  *
621  * Returns: 0 on success, negative error code on failure.
622  */
623 static inline int
drm_gpuvm_exec_validate(struct drm_gpuvm_exec * vm_exec)624 drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
625 {
626 	return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
627 }
628 
629 /**
630  * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
631  * &drm_gem_object combination
632  *
633  * This structure is an abstraction representing a &drm_gpuvm and
634  * &drm_gem_object combination. It serves as an indirection to accelerate
635  * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
636  * &drm_gem_object.
637  *
638  * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
639  * accelerate validation.
640  *
641  * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
642  * a GEM object is mapped first in a GPU-VM and release the instance once the
643  * last mapping of the GEM object in this GPU-VM is unmapped.
644  */
645 struct drm_gpuvm_bo {
646 	/**
647 	 * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
648 	 * counted pointer.
649 	 */
650 	struct drm_gpuvm *vm;
651 
652 	/**
653 	 * @obj: The &drm_gem_object being mapped in @vm. This is a reference
654 	 * counted pointer.
655 	 */
656 	struct drm_gem_object *obj;
657 
658 	/**
659 	 * @evicted: Indicates whether the &drm_gem_object is evicted; field
660 	 * protected by the &drm_gem_object's dma-resv lock.
661 	 */
662 	bool evicted;
663 
664 	/**
665 	 * @kref: The reference count for this &drm_gpuvm_bo.
666 	 */
667 	struct kref kref;
668 
669 	/**
670 	 * @list: Structure containing all &list_heads.
671 	 */
672 	struct {
673 		/**
674 		 * @list.gpuva: The list of linked &drm_gpuvas.
675 		 *
676 		 * It is safe to access entries from this list as long as the
677 		 * GEM's gpuva lock is held. See also struct drm_gem_object.
678 		 */
679 		struct list_head gpuva;
680 
681 		/**
682 		 * @list.entry: Structure containing all &list_heads serving as
683 		 * entry.
684 		 */
685 		struct {
686 			/**
687 			 * @list.entry.gem: List entry to attach to the
688 			 * &drm_gem_objects gpuva list.
689 			 */
690 			struct list_head gem;
691 
692 			/**
693 			 * @list.entry.evict: List entry to attach to the
694 			 * &drm_gpuvms extobj list.
695 			 */
696 			struct list_head extobj;
697 
698 			/**
699 			 * @list.entry.evict: List entry to attach to the
700 			 * &drm_gpuvms evict list.
701 			 */
702 			struct list_head evict;
703 		} entry;
704 	} list;
705 };
706 
707 struct drm_gpuvm_bo *
708 drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
709 		    struct drm_gem_object *obj);
710 
711 struct drm_gpuvm_bo *
712 drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
713 		    struct drm_gem_object *obj);
714 struct drm_gpuvm_bo *
715 drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
716 
717 /**
718  * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
719  * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
720  *
721  * This function acquires an additional reference to @vm_bo. It is illegal to
722  * call this without already holding a reference. No locks required.
723  *
724  * Returns: the &struct vm_bo pointer
725  */
726 static inline struct drm_gpuvm_bo *
drm_gpuvm_bo_get(struct drm_gpuvm_bo * vm_bo)727 drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
728 {
729 	kref_get(&vm_bo->kref);
730 	return vm_bo;
731 }
732 
733 bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
734 
735 struct drm_gpuvm_bo *
736 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
737 		  struct drm_gem_object *obj);
738 
739 void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
740 
741 /**
742  * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
743  * to/from the &drm_gpuvms evicted list
744  * @obj: the &drm_gem_object
745  * @evict: indicates whether @obj is evicted
746  *
747  * See drm_gpuvm_bo_evict().
748  */
749 static inline void
drm_gpuvm_bo_gem_evict(struct drm_gem_object * obj,bool evict)750 drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
751 {
752 	struct drm_gpuvm_bo *vm_bo;
753 
754 	drm_gem_gpuva_assert_lock_held(obj);
755 	drm_gem_for_each_gpuvm_bo(vm_bo, obj)
756 		drm_gpuvm_bo_evict(vm_bo, evict);
757 }
758 
759 void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
760 
761 /**
762  * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
763  * @va__: &drm_gpuva structure to assign to in each iteration step
764  * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
765  *
766  * This iterator walks over all &drm_gpuva structures associated with the
767  * &drm_gpuvm_bo.
768  *
769  * The caller must hold the GEM's gpuva lock.
770  */
771 #define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
772 	list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
773 
774 /**
775  * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
776  * &drm_gpuva
777  * @va__: &drm_gpuva structure to assign to in each iteration step
778  * @next__: &next &drm_gpuva to store the next step
779  * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
780  *
781  * This iterator walks over all &drm_gpuva structures associated with the
782  * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
783  * it is save against removal of elements.
784  *
785  * The caller must hold the GEM's gpuva lock.
786  */
787 #define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
788 	list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
789 
790 /**
791  * enum drm_gpuva_op_type - GPU VA operation type
792  *
793  * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
794  */
795 enum drm_gpuva_op_type {
796 	/**
797 	 * @DRM_GPUVA_OP_MAP: the map op type
798 	 */
799 	DRM_GPUVA_OP_MAP,
800 
801 	/**
802 	 * @DRM_GPUVA_OP_REMAP: the remap op type
803 	 */
804 	DRM_GPUVA_OP_REMAP,
805 
806 	/**
807 	 * @DRM_GPUVA_OP_UNMAP: the unmap op type
808 	 */
809 	DRM_GPUVA_OP_UNMAP,
810 
811 	/**
812 	 * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
813 	 */
814 	DRM_GPUVA_OP_PREFETCH,
815 
816 	/**
817 	 * @DRM_GPUVA_OP_DRIVER: the driver defined op type
818 	 */
819 	DRM_GPUVA_OP_DRIVER,
820 };
821 
822 /**
823  * struct drm_gpuva_op_map - GPU VA map operation
824  *
825  * This structure represents a single map operation generated by the
826  * DRM GPU VA manager.
827  */
828 struct drm_gpuva_op_map {
829 	/**
830 	 * @va: structure containing address and range of a map
831 	 * operation
832 	 */
833 	struct {
834 		/**
835 		 * @va.addr: the base address of the new mapping
836 		 */
837 		u64 addr;
838 
839 		/**
840 		 * @va.range: the range of the new mapping
841 		 */
842 		u64 range;
843 	} va;
844 
845 	/**
846 	 * @gem: structure containing the &drm_gem_object and it's offset
847 	 */
848 	struct {
849 		/**
850 		 * @gem.offset: the offset within the &drm_gem_object
851 		 */
852 		u64 offset;
853 
854 		/**
855 		 * @gem.obj: the &drm_gem_object to map
856 		 */
857 		struct drm_gem_object *obj;
858 	} gem;
859 };
860 
861 /**
862  * struct drm_gpuva_op_unmap - GPU VA unmap operation
863  *
864  * This structure represents a single unmap operation generated by the
865  * DRM GPU VA manager.
866  */
867 struct drm_gpuva_op_unmap {
868 	/**
869 	 * @va: the &drm_gpuva to unmap
870 	 */
871 	struct drm_gpuva *va;
872 
873 	/**
874 	 * @keep:
875 	 *
876 	 * Indicates whether this &drm_gpuva is physically contiguous with the
877 	 * original mapping request.
878 	 *
879 	 * Optionally, if &keep is set, drivers may keep the actual page table
880 	 * mappings for this &drm_gpuva, adding the missing page table entries
881 	 * only and update the &drm_gpuvm accordingly.
882 	 */
883 	bool keep;
884 };
885 
886 /**
887  * struct drm_gpuva_op_remap - GPU VA remap operation
888  *
889  * This represents a single remap operation generated by the DRM GPU VA manager.
890  *
891  * A remap operation is generated when an existing GPU VA mmapping is split up
892  * by inserting a new GPU VA mapping or by partially unmapping existent
893  * mapping(s), hence it consists of a maximum of two map and one unmap
894  * operation.
895  *
896  * The @unmap operation takes care of removing the original existing mapping.
897  * @prev is used to remap the preceding part, @next the subsequent part.
898  *
899  * If either a new mapping's start address is aligned with the start address
900  * of the old mapping or the new mapping's end address is aligned with the
901  * end address of the old mapping, either @prev or @next is NULL.
902  *
903  * Note, the reason for a dedicated remap operation, rather than arbitrary
904  * unmap and map operations, is to give drivers the chance of extracting driver
905  * specific data for creating the new mappings from the unmap operations's
906  * &drm_gpuva structure which typically is embedded in larger driver specific
907  * structures.
908  */
909 struct drm_gpuva_op_remap {
910 	/**
911 	 * @prev: the preceding part of a split mapping
912 	 */
913 	struct drm_gpuva_op_map *prev;
914 
915 	/**
916 	 * @next: the subsequent part of a split mapping
917 	 */
918 	struct drm_gpuva_op_map *next;
919 
920 	/**
921 	 * @unmap: the unmap operation for the original existing mapping
922 	 */
923 	struct drm_gpuva_op_unmap *unmap;
924 };
925 
926 /**
927  * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
928  *
929  * This structure represents a single prefetch operation generated by the
930  * DRM GPU VA manager.
931  */
932 struct drm_gpuva_op_prefetch {
933 	/**
934 	 * @va: the &drm_gpuva to prefetch
935 	 */
936 	struct drm_gpuva *va;
937 };
938 
939 /**
940  * struct drm_gpuva_op - GPU VA operation
941  *
942  * This structure represents a single generic operation.
943  *
944  * The particular type of the operation is defined by @op.
945  */
946 struct drm_gpuva_op {
947 	/**
948 	 * @entry:
949 	 *
950 	 * The &list_head used to distribute instances of this struct within
951 	 * &drm_gpuva_ops.
952 	 */
953 	struct list_head entry;
954 
955 	/**
956 	 * @op: the type of the operation
957 	 */
958 	enum drm_gpuva_op_type op;
959 
960 	union {
961 		/**
962 		 * @map: the map operation
963 		 */
964 		struct drm_gpuva_op_map map;
965 
966 		/**
967 		 * @remap: the remap operation
968 		 */
969 		struct drm_gpuva_op_remap remap;
970 
971 		/**
972 		 * @unmap: the unmap operation
973 		 */
974 		struct drm_gpuva_op_unmap unmap;
975 
976 		/**
977 		 * @prefetch: the prefetch operation
978 		 */
979 		struct drm_gpuva_op_prefetch prefetch;
980 	};
981 };
982 
983 /**
984  * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
985  */
986 struct drm_gpuva_ops {
987 	/**
988 	 * @list: the &list_head
989 	 */
990 	struct list_head list;
991 };
992 
993 /**
994  * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
995  * @op: &drm_gpuva_op to assign in each iteration step
996  * @ops: &drm_gpuva_ops to walk
997  *
998  * This iterator walks over all ops within a given list of operations.
999  */
1000 #define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
1001 
1002 /**
1003  * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
1004  * @op: &drm_gpuva_op to assign in each iteration step
1005  * @next: &next &drm_gpuva_op to store the next step
1006  * @ops: &drm_gpuva_ops to walk
1007  *
1008  * This iterator walks over all ops within a given list of operations. It is
1009  * implemented with list_for_each_safe(), so save against removal of elements.
1010  */
1011 #define drm_gpuva_for_each_op_safe(op, next, ops) \
1012 	list_for_each_entry_safe(op, next, &(ops)->list, entry)
1013 
1014 /**
1015  * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
1016  * @op: &drm_gpuva_op to assign in each iteration step
1017  * @ops: &drm_gpuva_ops to walk
1018  *
1019  * This iterator walks over all ops within a given list of operations beginning
1020  * from the given operation in reverse order.
1021  */
1022 #define drm_gpuva_for_each_op_from_reverse(op, ops) \
1023 	list_for_each_entry_from_reverse(op, &(ops)->list, entry)
1024 
1025 /**
1026  * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse
1027  * @op: &drm_gpuva_op to assign in each iteration step
1028  * @ops: &drm_gpuva_ops to walk
1029  *
1030  * This iterator walks over all ops within a given list of operations in reverse
1031  */
1032 #define drm_gpuva_for_each_op_reverse(op, ops) \
1033 	list_for_each_entry_reverse(op, &(ops)->list, entry)
1034 
1035 /**
1036  * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
1037  * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
1038  */
1039 #define drm_gpuva_first_op(ops) \
1040 	list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
1041 
1042 /**
1043  * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
1044  * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
1045  */
1046 #define drm_gpuva_last_op(ops) \
1047 	list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
1048 
1049 /**
1050  * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
1051  * @op: the current &drm_gpuva_op
1052  */
1053 #define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
1054 
1055 /**
1056  * drm_gpuva_next_op() - next &drm_gpuva_op in the list
1057  * @op: the current &drm_gpuva_op
1058  */
1059 #define drm_gpuva_next_op(op) list_next_entry(op, entry)
1060 
1061 struct drm_gpuva_ops *
1062 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
1063 			    u64 addr, u64 range,
1064 			    struct drm_gem_object *obj, u64 offset);
1065 struct drm_gpuva_ops *
1066 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
1067 			      u64 addr, u64 range);
1068 
1069 struct drm_gpuva_ops *
1070 drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
1071 				 u64 addr, u64 range);
1072 
1073 struct drm_gpuva_ops *
1074 drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
1075 
1076 void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
1077 			struct drm_gpuva_ops *ops);
1078 
drm_gpuva_init_from_op(struct drm_gpuva * va,struct drm_gpuva_op_map * op)1079 static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
1080 					  struct drm_gpuva_op_map *op)
1081 {
1082 	drm_gpuva_init(va, op->va.addr, op->va.range,
1083 		       op->gem.obj, op->gem.offset);
1084 }
1085 
1086 /**
1087  * struct drm_gpuvm_ops - callbacks for split/merge steps
1088  *
1089  * This structure defines the callbacks used by &drm_gpuvm_sm_map and
1090  * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
1091  * operations to drivers.
1092  */
1093 struct drm_gpuvm_ops {
1094 	/**
1095 	 * @vm_free: called when the last reference of a struct drm_gpuvm is
1096 	 * dropped
1097 	 *
1098 	 * This callback is mandatory.
1099 	 */
1100 	void (*vm_free)(struct drm_gpuvm *gpuvm);
1101 
1102 	/**
1103 	 * @op_alloc: called when the &drm_gpuvm allocates
1104 	 * a struct drm_gpuva_op
1105 	 *
1106 	 * Some drivers may want to embed struct drm_gpuva_op into driver
1107 	 * specific structures. By implementing this callback drivers can
1108 	 * allocate memory accordingly.
1109 	 *
1110 	 * This callback is optional.
1111 	 */
1112 	struct drm_gpuva_op *(*op_alloc)(void);
1113 
1114 	/**
1115 	 * @op_free: called when the &drm_gpuvm frees a
1116 	 * struct drm_gpuva_op
1117 	 *
1118 	 * Some drivers may want to embed struct drm_gpuva_op into driver
1119 	 * specific structures. By implementing this callback drivers can
1120 	 * free the previously allocated memory accordingly.
1121 	 *
1122 	 * This callback is optional.
1123 	 */
1124 	void (*op_free)(struct drm_gpuva_op *op);
1125 
1126 	/**
1127 	 * @vm_bo_alloc: called when the &drm_gpuvm allocates
1128 	 * a struct drm_gpuvm_bo
1129 	 *
1130 	 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1131 	 * specific structures. By implementing this callback drivers can
1132 	 * allocate memory accordingly.
1133 	 *
1134 	 * This callback is optional.
1135 	 */
1136 	struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
1137 
1138 	/**
1139 	 * @vm_bo_free: called when the &drm_gpuvm frees a
1140 	 * struct drm_gpuvm_bo
1141 	 *
1142 	 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1143 	 * specific structures. By implementing this callback drivers can
1144 	 * free the previously allocated memory accordingly.
1145 	 *
1146 	 * This callback is optional.
1147 	 */
1148 	void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
1149 
1150 	/**
1151 	 * @vm_bo_validate: called from drm_gpuvm_validate()
1152 	 *
1153 	 * Drivers receive this callback for every evicted &drm_gem_object being
1154 	 * mapped in the corresponding &drm_gpuvm.
1155 	 *
1156 	 * Typically, drivers would call their driver specific variant of
1157 	 * ttm_bo_validate() from within this callback.
1158 	 */
1159 	int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo,
1160 			      struct drm_exec *exec);
1161 
1162 	/**
1163 	 * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
1164 	 * mapping once all previous steps were completed
1165 	 *
1166 	 * The &priv pointer matches the one the driver passed to
1167 	 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1168 	 *
1169 	 * Can be NULL if &drm_gpuvm_sm_map is used.
1170 	 */
1171 	int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
1172 
1173 	/**
1174 	 * @sm_step_remap: called from &drm_gpuvm_sm_map and
1175 	 * &drm_gpuvm_sm_unmap to split up an existent mapping
1176 	 *
1177 	 * This callback is called when existent mapping needs to be split up.
1178 	 * This is the case when either a newly requested mapping overlaps or
1179 	 * is enclosed by an existent mapping or a partial unmap of an existent
1180 	 * mapping is requested.
1181 	 *
1182 	 * The &priv pointer matches the one the driver passed to
1183 	 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1184 	 *
1185 	 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1186 	 * used.
1187 	 */
1188 	int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
1189 
1190 	/**
1191 	 * @sm_step_unmap: called from &drm_gpuvm_sm_map and
1192 	 * &drm_gpuvm_sm_unmap to unmap an existent mapping
1193 	 *
1194 	 * This callback is called when existent mapping needs to be unmapped.
1195 	 * This is the case when either a newly requested mapping encloses an
1196 	 * existent mapping or an unmap of an existent mapping is requested.
1197 	 *
1198 	 * The &priv pointer matches the one the driver passed to
1199 	 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1200 	 *
1201 	 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1202 	 * used.
1203 	 */
1204 	int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
1205 };
1206 
1207 int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
1208 		     u64 addr, u64 range,
1209 		     struct drm_gem_object *obj, u64 offset);
1210 
1211 int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
1212 		       u64 addr, u64 range);
1213 
1214 void drm_gpuva_map(struct drm_gpuvm *gpuvm,
1215 		   struct drm_gpuva *va,
1216 		   struct drm_gpuva_op_map *op);
1217 
1218 void drm_gpuva_remap(struct drm_gpuva *prev,
1219 		     struct drm_gpuva *next,
1220 		     struct drm_gpuva_op_remap *op);
1221 
1222 void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
1223 
1224 /**
1225  * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
1226  * the unmap stage of a remap op.
1227  * @op: Remap op.
1228  * @start_addr: Output pointer for the start of the required unmap.
1229  * @range: Output pointer for the length of the required unmap.
1230  *
1231  * The given start address and range will be set such that they represent the
1232  * range of the address space that was previously covered by the mapping being
1233  * re-mapped, but is now empty.
1234  */
1235 static inline void
drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap * op,u64 * start_addr,u64 * range)1236 drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op,
1237 				  u64 *start_addr, u64 *range)
1238 {
1239 	const u64 va_start = op->prev ?
1240 			     op->prev->va.addr + op->prev->va.range :
1241 			     op->unmap->va->va.addr;
1242 	const u64 va_end = op->next ?
1243 			   op->next->va.addr :
1244 			   op->unmap->va->va.addr + op->unmap->va->va.range;
1245 
1246 	if (start_addr)
1247 		*start_addr = va_start;
1248 	if (range)
1249 		*range = va_end - va_start;
1250 }
1251 
1252 #endif /* __DRM_GPUVM_H__ */
1253