1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3 * Copyright (c) 2022 Red Hat.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Danilo Krummrich <dakr@redhat.com>
25 *
26 */
27
28 #include <drm/drm_gpuvm.h>
29
30 #include <linux/export.h>
31 #include <linux/interval_tree_generic.h>
32 #include <linux/mm.h>
33
34 /**
35 * DOC: Overview
36 *
37 * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a
38 * GPU's virtual address (VA) space and manages the corresponding virtual
39 * mappings represented by &drm_gpuva objects. It also keeps track of the
40 * mapping's backing &drm_gem_object buffers.
41 *
42 * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
43 * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
44 *
45 * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
46 * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
47 *
48 * The GPU VA manager internally uses a rb-tree to manage the
49 * &drm_gpuva mappings within a GPU's virtual address space.
50 *
51 * The &drm_gpuvm structure contains a special &drm_gpuva representing the
52 * portion of VA space reserved by the kernel. This node is initialized together
53 * with the GPU VA manager instance and removed when the GPU VA manager is
54 * destroyed.
55 *
56 * In a typical application drivers would embed struct drm_gpuvm and
57 * struct drm_gpuva within their own driver specific structures, there won't be
58 * any memory allocations of its own nor memory allocations of &drm_gpuva
59 * entries.
60 *
61 * The data structures needed to store &drm_gpuvas within the &drm_gpuvm are
62 * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva
63 * entries from within dma-fence signalling critical sections it is enough to
64 * pre-allocate the &drm_gpuva structures.
65 *
66 * &drm_gem_objects which are private to a single VM can share a common
67 * &dma_resv in order to improve locking efficiency (e.g. with &drm_exec).
68 * For this purpose drivers must pass a &drm_gem_object to drm_gpuvm_init(), in
69 * the following called 'resv object', which serves as the container of the
70 * GPUVM's shared &dma_resv. This resv object can be a driver specific
71 * &drm_gem_object, such as the &drm_gem_object containing the root page table,
72 * but it can also be a 'dummy' object, which can be allocated with
73 * drm_gpuvm_resv_object_alloc().
74 *
75 * In order to connect a struct drm_gpuva its backing &drm_gem_object each
76 * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
77 * &drm_gpuvm_bo contains a list of &drm_gpuva structures.
78 *
79 * A &drm_gpuvm_bo is an abstraction that represents a combination of a
80 * &drm_gpuvm and a &drm_gem_object. Every such combination should be unique.
81 * This is ensured by the API through drm_gpuvm_bo_obtain() and
82 * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
83 * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
84 * particular combination. If not existent a new instance is created and linked
85 * to the &drm_gem_object.
86 *
87 * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
88 * as entry for the &drm_gpuvm's lists of external and evicted objects. Those
89 * lists are maintained in order to accelerate locking of dma-resv locks and
90 * validation of evicted objects bound in a &drm_gpuvm. For instance, all
91 * &drm_gem_object's &dma_resv of a given &drm_gpuvm can be locked by calling
92 * drm_gpuvm_exec_lock(). Once locked drivers can call drm_gpuvm_validate() in
93 * order to validate all evicted &drm_gem_objects. It is also possible to lock
94 * additional &drm_gem_objects by providing the corresponding parameters to
95 * drm_gpuvm_exec_lock() as well as open code the &drm_exec loop while making
96 * use of helper functions such as drm_gpuvm_prepare_range() or
97 * drm_gpuvm_prepare_objects().
98 *
99 * Every bound &drm_gem_object is treated as external object when its &dma_resv
100 * structure is different than the &drm_gpuvm's common &dma_resv structure.
101 */
102
103 /**
104 * DOC: Split and Merge
105 *
106 * Besides its capability to manage and represent a GPU VA space, the
107 * GPU VA manager also provides functions to let the &drm_gpuvm calculate a
108 * sequence of operations to satisfy a given map or unmap request.
109 *
110 * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
111 * and merging of existent GPU VA mappings with the ones that are requested to
112 * be mapped or unmapped. This feature is required by the Vulkan API to
113 * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
114 * as VM BIND.
115 *
116 * Drivers can call drm_gpuvm_sm_map() to receive a sequence of callbacks
117 * containing map, unmap and remap operations for a given newly requested
118 * mapping. The sequence of callbacks represents the set of operations to
119 * execute in order to integrate the new mapping cleanly into the current state
120 * of the GPU VA space.
121 *
122 * Depending on how the new GPU VA mapping intersects with the existent mappings
123 * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
124 * of unmap operations, a maximum of two remap operations and a single map
125 * operation. The caller might receive no callback at all if no operation is
126 * required, e.g. if the requested mapping already exists in the exact same way.
127 *
128 * The single map operation represents the original map operation requested by
129 * the caller.
130 *
131 * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
132 * &drm_gpuva to unmap is physically contiguous with the original mapping
133 * request. Optionally, if 'keep' is set, drivers may keep the actual page table
134 * entries for this &drm_gpuva, adding the missing page table entries only and
135 * update the &drm_gpuvm's view of things accordingly.
136 *
137 * Drivers may do the same optimization, namely delta page table updates, also
138 * for remap operations. This is possible since &drm_gpuva_op_remap consists of
139 * one unmap operation and one or two map operations, such that drivers can
140 * derive the page table update delta accordingly.
141 *
142 * Note that there can't be more than two existent mappings to split up, one at
143 * the beginning and one at the end of the new mapping, hence there is a
144 * maximum of two remap operations.
145 *
146 * Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
147 * call back into the driver in order to unmap a range of GPU VA space. The
148 * logic behind this function is way simpler though: For all existent mappings
149 * enclosed by the given range unmap operations are created. For mappings which
150 * are only partically located within the given range, remap operations are
151 * created such that those mappings are split up and re-mapped partically.
152 *
153 * As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
154 * drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
155 * to directly obtain an instance of struct drm_gpuva_ops containing a list of
156 * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
157 * contains the &drm_gpuva_ops analogous to the callbacks one would receive when
158 * calling drm_gpuvm_sm_map() or drm_gpuvm_sm_unmap(). While this way requires
159 * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
160 * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
161 * allocations are possible (e.g. to allocate GPU page tables) and once in the
162 * dma-fence signalling critical path.
163 *
164 * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and
165 * drm_gpuva_remove() may be used. These functions can safely be used from
166 * &drm_gpuvm_ops callbacks originating from drm_gpuvm_sm_map() or
167 * drm_gpuvm_sm_unmap(). However, it might be more convenient to use the
168 * provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
169 * drm_gpuva_unmap() instead.
170 *
171 * The following diagram depicts the basic relationships of existent GPU VA
172 * mappings, a newly requested mapping and the resulting mappings as implemented
173 * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
174 *
175 * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
176 * could be kept.
177 *
178 * ::
179 *
180 * 0 a 1
181 * old: |-----------| (bo_offset=n)
182 *
183 * 0 a 1
184 * req: |-----------| (bo_offset=n)
185 *
186 * 0 a 1
187 * new: |-----------| (bo_offset=n)
188 *
189 *
190 * 2) Requested mapping is identical, except for the BO offset, hence replace
191 * the mapping.
192 *
193 * ::
194 *
195 * 0 a 1
196 * old: |-----------| (bo_offset=n)
197 *
198 * 0 a 1
199 * req: |-----------| (bo_offset=m)
200 *
201 * 0 a 1
202 * new: |-----------| (bo_offset=m)
203 *
204 *
205 * 3) Requested mapping is identical, except for the backing BO, hence replace
206 * the mapping.
207 *
208 * ::
209 *
210 * 0 a 1
211 * old: |-----------| (bo_offset=n)
212 *
213 * 0 b 1
214 * req: |-----------| (bo_offset=n)
215 *
216 * 0 b 1
217 * new: |-----------| (bo_offset=n)
218 *
219 *
220 * 4) Existent mapping is a left aligned subset of the requested one, hence
221 * replace the existent one.
222 *
223 * ::
224 *
225 * 0 a 1
226 * old: |-----| (bo_offset=n)
227 *
228 * 0 a 2
229 * req: |-----------| (bo_offset=n)
230 *
231 * 0 a 2
232 * new: |-----------| (bo_offset=n)
233 *
234 * .. note::
235 * We expect to see the same result for a request with a different BO
236 * and/or non-contiguous BO offset.
237 *
238 *
239 * 5) Requested mapping's range is a left aligned subset of the existent one,
240 * but backed by a different BO. Hence, map the requested mapping and split
241 * the existent one adjusting its BO offset.
242 *
243 * ::
244 *
245 * 0 a 2
246 * old: |-----------| (bo_offset=n)
247 *
248 * 0 b 1
249 * req: |-----| (bo_offset=n)
250 *
251 * 0 b 1 a' 2
252 * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
253 *
254 * .. note::
255 * We expect to see the same result for a request with a different BO
256 * and/or non-contiguous BO offset.
257 *
258 *
259 * 6) Existent mapping is a superset of the requested mapping. Split it up, but
260 * indicate that the backing PTEs could be kept.
261 *
262 * ::
263 *
264 * 0 a 2
265 * old: |-----------| (bo_offset=n)
266 *
267 * 0 a 1
268 * req: |-----| (bo_offset=n)
269 *
270 * 0 a 1 a' 2
271 * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
272 *
273 *
274 * 7) Requested mapping's range is a right aligned subset of the existent one,
275 * but backed by a different BO. Hence, map the requested mapping and split
276 * the existent one, without adjusting the BO offset.
277 *
278 * ::
279 *
280 * 0 a 2
281 * old: |-----------| (bo_offset=n)
282 *
283 * 1 b 2
284 * req: |-----| (bo_offset=m)
285 *
286 * 0 a 1 b 2
287 * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
288 *
289 *
290 * 8) Existent mapping is a superset of the requested mapping. Split it up, but
291 * indicate that the backing PTEs could be kept.
292 *
293 * ::
294 *
295 * 0 a 2
296 * old: |-----------| (bo_offset=n)
297 *
298 * 1 a 2
299 * req: |-----| (bo_offset=n+1)
300 *
301 * 0 a' 1 a 2
302 * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
303 *
304 *
305 * 9) Existent mapping is overlapped at the end by the requested mapping backed
306 * by a different BO. Hence, map the requested mapping and split up the
307 * existent one, without adjusting the BO offset.
308 *
309 * ::
310 *
311 * 0 a 2
312 * old: |-----------| (bo_offset=n)
313 *
314 * 1 b 3
315 * req: |-----------| (bo_offset=m)
316 *
317 * 0 a 1 b 3
318 * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
319 *
320 *
321 * 10) Existent mapping is overlapped by the requested mapping, both having the
322 * same backing BO with a contiguous offset. Indicate the backing PTEs of
323 * the old mapping could be kept.
324 *
325 * ::
326 *
327 * 0 a 2
328 * old: |-----------| (bo_offset=n)
329 *
330 * 1 a 3
331 * req: |-----------| (bo_offset=n+1)
332 *
333 * 0 a' 1 a 3
334 * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
335 *
336 *
337 * 11) Requested mapping's range is a centered subset of the existent one
338 * having a different backing BO. Hence, map the requested mapping and split
339 * up the existent one in two mappings, adjusting the BO offset of the right
340 * one accordingly.
341 *
342 * ::
343 *
344 * 0 a 3
345 * old: |-----------------| (bo_offset=n)
346 *
347 * 1 b 2
348 * req: |-----| (bo_offset=m)
349 *
350 * 0 a 1 b 2 a' 3
351 * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
352 *
353 *
354 * 12) Requested mapping is a contiguous subset of the existent one. Split it
355 * up, but indicate that the backing PTEs could be kept.
356 *
357 * ::
358 *
359 * 0 a 3
360 * old: |-----------------| (bo_offset=n)
361 *
362 * 1 a 2
363 * req: |-----| (bo_offset=n+1)
364 *
365 * 0 a' 1 a 2 a'' 3
366 * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
367 *
368 *
369 * 13) Existent mapping is a right aligned subset of the requested one, hence
370 * replace the existent one.
371 *
372 * ::
373 *
374 * 1 a 2
375 * old: |-----| (bo_offset=n+1)
376 *
377 * 0 a 2
378 * req: |-----------| (bo_offset=n)
379 *
380 * 0 a 2
381 * new: |-----------| (bo_offset=n)
382 *
383 * .. note::
384 * We expect to see the same result for a request with a different bo
385 * and/or non-contiguous bo_offset.
386 *
387 *
388 * 14) Existent mapping is a centered subset of the requested one, hence
389 * replace the existent one.
390 *
391 * ::
392 *
393 * 1 a 2
394 * old: |-----| (bo_offset=n+1)
395 *
396 * 0 a 3
397 * req: |----------------| (bo_offset=n)
398 *
399 * 0 a 3
400 * new: |----------------| (bo_offset=n)
401 *
402 * .. note::
403 * We expect to see the same result for a request with a different bo
404 * and/or non-contiguous bo_offset.
405 *
406 *
407 * 15) Existent mappings is overlapped at the beginning by the requested mapping
408 * backed by a different BO. Hence, map the requested mapping and split up
409 * the existent one, adjusting its BO offset accordingly.
410 *
411 * ::
412 *
413 * 1 a 3
414 * old: |-----------| (bo_offset=n)
415 *
416 * 0 b 2
417 * req: |-----------| (bo_offset=m)
418 *
419 * 0 b 2 a' 3
420 * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
421 */
422
423 /**
424 * DOC: Locking
425 *
426 * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
427 * locking itself, it is the drivers responsibility to take care about locking.
428 * Drivers might want to protect the following operations: inserting, removing
429 * and iterating &drm_gpuva objects as well as generating all kinds of
430 * operations, such as split / merge or prefetch.
431 *
432 * DRM GPUVM also does not take care of the locking of the backing
433 * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
434 * itself; drivers are responsible to enforce mutual exclusion using either the
435 * GEMs dma_resv lock or alternatively a driver specific external lock. For the
436 * latter see also drm_gem_gpuva_set_lock().
437 *
438 * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
439 * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
440 * by functions such as drm_gpuva_link() or drm_gpuva_unlink(), but also
441 * drm_gpuvm_bo_obtain() and drm_gpuvm_bo_put().
442 *
443 * The latter is required since on creation and destruction of a &drm_gpuvm_bo
444 * the &drm_gpuvm_bo is attached / removed from the &drm_gem_objects gpuva list.
445 * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and
446 * &drm_gem_object must be able to observe previous creations and destructions
447 * of &drm_gpuvm_bos in order to keep instances unique.
448 *
449 * The &drm_gpuvm's lists for keeping track of external and evicted objects are
450 * protected against concurrent insertion / removal and iteration internally.
451 *
452 * However, drivers still need ensure to protect concurrent calls to functions
453 * iterating those lists, namely drm_gpuvm_prepare_objects() and
454 * drm_gpuvm_validate().
455 *
456 * Alternatively, drivers can set the &DRM_GPUVM_RESV_PROTECTED flag to indicate
457 * that the corresponding &dma_resv locks are held in order to protect the
458 * lists. If &DRM_GPUVM_RESV_PROTECTED is set, internal locking is disabled and
459 * the corresponding lockdep checks are enabled. This is an optimization for
460 * drivers which are capable of taking the corresponding &dma_resv locks and
461 * hence do not require internal locking.
462 */
463
464 /**
465 * DOC: Examples
466 *
467 * This section gives two examples on how to let the DRM GPUVA Manager generate
468 * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
469 * make use of them.
470 *
471 * The below code is strictly limited to illustrate the generic usage pattern.
472 * To maintain simplicitly, it doesn't make use of any abstractions for common
473 * code, different (asyncronous) stages with fence signalling critical paths,
474 * any other helpers or error handling in terms of freeing memory and dropping
475 * previously taken locks.
476 *
477 * 1) Obtain a list of &drm_gpuva_op to create a new mapping::
478 *
479 * // Allocates a new &drm_gpuva.
480 * struct drm_gpuva * driver_gpuva_alloc(void);
481 *
482 * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
483 * // structure in individual driver structures and lock the dma-resv with
484 * // drm_exec or similar helpers.
485 * int driver_mapping_create(struct drm_gpuvm *gpuvm,
486 * u64 addr, u64 range,
487 * struct drm_gem_object *obj, u64 offset)
488 * {
489 * struct drm_gpuva_ops *ops;
490 * struct drm_gpuva_op *op
491 * struct drm_gpuvm_bo *vm_bo;
492 *
493 * driver_lock_va_space();
494 * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
495 * obj, offset);
496 * if (IS_ERR(ops))
497 * return PTR_ERR(ops);
498 *
499 * vm_bo = drm_gpuvm_bo_obtain(gpuvm, obj);
500 * if (IS_ERR(vm_bo))
501 * return PTR_ERR(vm_bo);
502 *
503 * drm_gpuva_for_each_op(op, ops) {
504 * struct drm_gpuva *va;
505 *
506 * switch (op->op) {
507 * case DRM_GPUVA_OP_MAP:
508 * va = driver_gpuva_alloc();
509 * if (!va)
510 * ; // unwind previous VA space updates,
511 * // free memory and unlock
512 *
513 * driver_vm_map();
514 * drm_gpuva_map(gpuvm, va, &op->map);
515 * drm_gpuva_link(va, vm_bo);
516 *
517 * break;
518 * case DRM_GPUVA_OP_REMAP: {
519 * struct drm_gpuva *prev = NULL, *next = NULL;
520 *
521 * va = op->remap.unmap->va;
522 *
523 * if (op->remap.prev) {
524 * prev = driver_gpuva_alloc();
525 * if (!prev)
526 * ; // unwind previous VA space
527 * // updates, free memory and
528 * // unlock
529 * }
530 *
531 * if (op->remap.next) {
532 * next = driver_gpuva_alloc();
533 * if (!next)
534 * ; // unwind previous VA space
535 * // updates, free memory and
536 * // unlock
537 * }
538 *
539 * driver_vm_remap();
540 * drm_gpuva_remap(prev, next, &op->remap);
541 *
542 * if (prev)
543 * drm_gpuva_link(prev, va->vm_bo);
544 * if (next)
545 * drm_gpuva_link(next, va->vm_bo);
546 * drm_gpuva_unlink(va);
547 *
548 * break;
549 * }
550 * case DRM_GPUVA_OP_UNMAP:
551 * va = op->unmap->va;
552 *
553 * driver_vm_unmap();
554 * drm_gpuva_unlink(va);
555 * drm_gpuva_unmap(&op->unmap);
556 *
557 * break;
558 * default:
559 * break;
560 * }
561 * }
562 * drm_gpuvm_bo_put(vm_bo);
563 * driver_unlock_va_space();
564 *
565 * return 0;
566 * }
567 *
568 * 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
569 *
570 * struct driver_context {
571 * struct drm_gpuvm *gpuvm;
572 * struct drm_gpuvm_bo *vm_bo;
573 * struct drm_gpuva *new_va;
574 * struct drm_gpuva *prev_va;
575 * struct drm_gpuva *next_va;
576 * };
577 *
578 * // ops to pass to drm_gpuvm_init()
579 * static const struct drm_gpuvm_ops driver_gpuvm_ops = {
580 * .sm_step_map = driver_gpuva_map,
581 * .sm_step_remap = driver_gpuva_remap,
582 * .sm_step_unmap = driver_gpuva_unmap,
583 * };
584 *
585 * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
586 * // structure in individual driver structures and lock the dma-resv with
587 * // drm_exec or similar helpers.
588 * int driver_mapping_create(struct drm_gpuvm *gpuvm,
589 * u64 addr, u64 range,
590 * struct drm_gem_object *obj, u64 offset)
591 * {
592 * struct driver_context ctx;
593 * struct drm_gpuvm_bo *vm_bo;
594 * struct drm_gpuva_ops *ops;
595 * struct drm_gpuva_op *op;
596 * int ret = 0;
597 *
598 * ctx.gpuvm = gpuvm;
599 *
600 * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
601 * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
602 * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
603 * ctx.vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
604 * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va || !vm_bo) {
605 * ret = -ENOMEM;
606 * goto out;
607 * }
608 *
609 * // Typically protected with a driver specific GEM gpuva lock
610 * // used in the fence signaling path for drm_gpuva_link() and
611 * // drm_gpuva_unlink(), hence pre-allocate.
612 * ctx.vm_bo = drm_gpuvm_bo_obtain_prealloc(ctx.vm_bo);
613 *
614 * driver_lock_va_space();
615 * ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset);
616 * driver_unlock_va_space();
617 *
618 * out:
619 * drm_gpuvm_bo_put(ctx.vm_bo);
620 * kfree(ctx.new_va);
621 * kfree(ctx.prev_va);
622 * kfree(ctx.next_va);
623 * return ret;
624 * }
625 *
626 * int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
627 * {
628 * struct driver_context *ctx = __ctx;
629 *
630 * drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
631 *
632 * drm_gpuva_link(ctx->new_va, ctx->vm_bo);
633 *
634 * // prevent the new GPUVA from being freed in
635 * // driver_mapping_create()
636 * ctx->new_va = NULL;
637 *
638 * return 0;
639 * }
640 *
641 * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
642 * {
643 * struct driver_context *ctx = __ctx;
644 * struct drm_gpuva *va = op->remap.unmap->va;
645 *
646 * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
647 *
648 * if (op->remap.prev) {
649 * drm_gpuva_link(ctx->prev_va, va->vm_bo);
650 * ctx->prev_va = NULL;
651 * }
652 *
653 * if (op->remap.next) {
654 * drm_gpuva_link(ctx->next_va, va->vm_bo);
655 * ctx->next_va = NULL;
656 * }
657 *
658 * drm_gpuva_unlink(va);
659 * kfree(va);
660 *
661 * return 0;
662 * }
663 *
664 * int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
665 * {
666 * drm_gpuva_unlink(op->unmap.va);
667 * drm_gpuva_unmap(&op->unmap);
668 * kfree(op->unmap.va);
669 *
670 * return 0;
671 * }
672 */
673
674 /**
675 * get_next_vm_bo_from_list() - get the next vm_bo element
676 * @__gpuvm: the &drm_gpuvm
677 * @__list_name: the name of the list we're iterating on
678 * @__local_list: a pointer to the local list used to store already iterated items
679 * @__prev_vm_bo: the previous element we got from get_next_vm_bo_from_list()
680 *
681 * This helper is here to provide lockless list iteration. Lockless as in, the
682 * iterator releases the lock immediately after picking the first element from
683 * the list, so list insertion deletion can happen concurrently.
684 *
685 * Elements popped from the original list are kept in a local list, so removal
686 * and is_empty checks can still happen while we're iterating the list.
687 */
688 #define get_next_vm_bo_from_list(__gpuvm, __list_name, __local_list, __prev_vm_bo) \
689 ({ \
690 struct drm_gpuvm_bo *__vm_bo = NULL; \
691 \
692 drm_gpuvm_bo_put(__prev_vm_bo); \
693 \
694 spin_lock(&(__gpuvm)->__list_name.lock); \
695 if (!(__gpuvm)->__list_name.local_list) \
696 (__gpuvm)->__list_name.local_list = __local_list; \
697 else \
698 drm_WARN_ON((__gpuvm)->drm, \
699 (__gpuvm)->__list_name.local_list != __local_list); \
700 \
701 while (!list_empty(&(__gpuvm)->__list_name.list)) { \
702 __vm_bo = list_first_entry(&(__gpuvm)->__list_name.list, \
703 struct drm_gpuvm_bo, \
704 list.entry.__list_name); \
705 if (kref_get_unless_zero(&__vm_bo->kref)) { \
706 list_move_tail(&(__vm_bo)->list.entry.__list_name, \
707 __local_list); \
708 break; \
709 } else { \
710 list_del_init(&(__vm_bo)->list.entry.__list_name); \
711 __vm_bo = NULL; \
712 } \
713 } \
714 spin_unlock(&(__gpuvm)->__list_name.lock); \
715 \
716 __vm_bo; \
717 })
718
719 /**
720 * for_each_vm_bo_in_list() - internal vm_bo list iterator
721 * @__gpuvm: the &drm_gpuvm
722 * @__list_name: the name of the list we're iterating on
723 * @__local_list: a pointer to the local list used to store already iterated items
724 * @__vm_bo: the struct drm_gpuvm_bo to assign in each iteration step
725 *
726 * This helper is here to provide lockless list iteration. Lockless as in, the
727 * iterator releases the lock immediately after picking the first element from the
728 * list, hence list insertion and deletion can happen concurrently.
729 *
730 * It is not allowed to re-assign the vm_bo pointer from inside this loop.
731 *
732 * Typical use:
733 *
734 * struct drm_gpuvm_bo *vm_bo;
735 * LIST_HEAD(my_local_list);
736 *
737 * ret = 0;
738 * for_each_vm_bo_in_list(gpuvm, <list_name>, &my_local_list, vm_bo) {
739 * ret = do_something_with_vm_bo(..., vm_bo);
740 * if (ret)
741 * break;
742 * }
743 * // Drop ref in case we break out of the loop.
744 * drm_gpuvm_bo_put(vm_bo);
745 * restore_vm_bo_list(gpuvm, <list_name>, &my_local_list);
746 *
747 *
748 * Only used for internal list iterations, not meant to be exposed to the outside
749 * world.
750 */
751 #define for_each_vm_bo_in_list(__gpuvm, __list_name, __local_list, __vm_bo) \
752 for (__vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \
753 __local_list, NULL); \
754 __vm_bo; \
755 __vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \
756 __local_list, __vm_bo))
757
758 static void
__restore_vm_bo_list(struct drm_gpuvm * gpuvm,spinlock_t * lock,struct list_head * list,struct list_head ** local_list)759 __restore_vm_bo_list(struct drm_gpuvm *gpuvm, spinlock_t *lock,
760 struct list_head *list, struct list_head **local_list)
761 {
762 /* Merge back the two lists, moving local list elements to the
763 * head to preserve previous ordering, in case it matters.
764 */
765 spin_lock(lock);
766 if (*local_list) {
767 list_splice(*local_list, list);
768 *local_list = NULL;
769 }
770 spin_unlock(lock);
771 }
772
773 /**
774 * restore_vm_bo_list() - move vm_bo elements back to their original list
775 * @__gpuvm: the &drm_gpuvm
776 * @__list_name: the name of the list we're iterating on
777 *
778 * When we're done iterating a vm_bo list, we should call restore_vm_bo_list()
779 * to restore the original state and let new iterations take place.
780 */
781 #define restore_vm_bo_list(__gpuvm, __list_name) \
782 __restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock, \
783 &(__gpuvm)->__list_name.list, \
784 &(__gpuvm)->__list_name.local_list)
785
786 static void
cond_spin_lock(spinlock_t * lock,bool cond)787 cond_spin_lock(spinlock_t *lock, bool cond)
788 {
789 if (cond)
790 spin_lock(lock);
791 }
792
793 static void
cond_spin_unlock(spinlock_t * lock,bool cond)794 cond_spin_unlock(spinlock_t *lock, bool cond)
795 {
796 if (cond)
797 spin_unlock(lock);
798 }
799
800 static void
__drm_gpuvm_bo_list_add(struct drm_gpuvm * gpuvm,spinlock_t * lock,struct list_head * entry,struct list_head * list)801 __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
802 struct list_head *entry, struct list_head *list)
803 {
804 cond_spin_lock(lock, !!lock);
805 if (list_empty(entry))
806 list_add_tail(entry, list);
807 cond_spin_unlock(lock, !!lock);
808 }
809
810 /**
811 * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
812 * @__vm_bo: the &drm_gpuvm_bo
813 * @__list_name: the name of the list to insert into
814 * @__lock: whether to lock with the internal spinlock
815 *
816 * Inserts the given @__vm_bo into the list specified by @__list_name.
817 */
818 #define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock) \
819 __drm_gpuvm_bo_list_add((__vm_bo)->vm, \
820 __lock ? &(__vm_bo)->vm->__list_name.lock : \
821 NULL, \
822 &(__vm_bo)->list.entry.__list_name, \
823 &(__vm_bo)->vm->__list_name.list)
824
825 static void
__drm_gpuvm_bo_list_del(struct drm_gpuvm * gpuvm,spinlock_t * lock,struct list_head * entry,bool init)826 __drm_gpuvm_bo_list_del(struct drm_gpuvm *gpuvm, spinlock_t *lock,
827 struct list_head *entry, bool init)
828 {
829 cond_spin_lock(lock, !!lock);
830 if (init) {
831 if (!list_empty(entry))
832 list_del_init(entry);
833 } else {
834 list_del(entry);
835 }
836 cond_spin_unlock(lock, !!lock);
837 }
838
839 /**
840 * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list
841 * @__vm_bo: the &drm_gpuvm_bo
842 * @__list_name: the name of the list to insert into
843 * @__lock: whether to lock with the internal spinlock
844 *
845 * Removes the given @__vm_bo from the list specified by @__list_name.
846 */
847 #define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock) \
848 __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
849 __lock ? &(__vm_bo)->vm->__list_name.lock : \
850 NULL, \
851 &(__vm_bo)->list.entry.__list_name, \
852 true)
853
854 /**
855 * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list
856 * @__vm_bo: the &drm_gpuvm_bo
857 * @__list_name: the name of the list to insert into
858 * @__lock: whether to lock with the internal spinlock
859 *
860 * Removes the given @__vm_bo from the list specified by @__list_name.
861 */
862 #define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock) \
863 __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
864 __lock ? &(__vm_bo)->vm->__list_name.lock : \
865 NULL, \
866 &(__vm_bo)->list.entry.__list_name, \
867 false)
868
869 #define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node)
870
871 #define GPUVA_START(node) ((node)->va.addr)
872 #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
873
874 /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
875 * about this.
876 */
877 INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
878 GPUVA_START, GPUVA_LAST, static __maybe_unused,
879 drm_gpuva_it)
880
881 static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
882 struct drm_gpuva *va);
883 static void __drm_gpuva_remove(struct drm_gpuva *va);
884
885 static bool
drm_gpuvm_check_overflow(u64 addr,u64 range)886 drm_gpuvm_check_overflow(u64 addr, u64 range)
887 {
888 u64 end;
889
890 return check_add_overflow(addr, range, &end);
891 }
892
893 static bool
drm_gpuvm_warn_check_overflow(struct drm_gpuvm * gpuvm,u64 addr,u64 range)894 drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
895 {
896 return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range),
897 "GPUVA address limited to %zu bytes.\n", sizeof(addr));
898 }
899
900 static bool
drm_gpuvm_in_mm_range(struct drm_gpuvm * gpuvm,u64 addr,u64 range)901 drm_gpuvm_in_mm_range(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
902 {
903 u64 end = addr + range;
904 u64 mm_start = gpuvm->mm_start;
905 u64 mm_end = mm_start + gpuvm->mm_range;
906
907 return addr >= mm_start && end <= mm_end;
908 }
909
910 static bool
drm_gpuvm_in_kernel_node(struct drm_gpuvm * gpuvm,u64 addr,u64 range)911 drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
912 {
913 u64 end = addr + range;
914 u64 kstart = gpuvm->kernel_alloc_node.va.addr;
915 u64 krange = gpuvm->kernel_alloc_node.va.range;
916 u64 kend = kstart + krange;
917
918 return krange && addr < kend && kstart < end;
919 }
920
921 /**
922 * drm_gpuvm_range_valid() - checks whether the given range is valid for the
923 * given &drm_gpuvm
924 * @gpuvm: the GPUVM to check the range for
925 * @addr: the base address
926 * @range: the range starting from the base address
927 *
928 * Checks whether the range is within the GPUVM's managed boundaries.
929 *
930 * Returns: true for a valid range, false otherwise
931 */
932 bool
drm_gpuvm_range_valid(struct drm_gpuvm * gpuvm,u64 addr,u64 range)933 drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
934 u64 addr, u64 range)
935 {
936 return !drm_gpuvm_check_overflow(addr, range) &&
937 drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
938 !drm_gpuvm_in_kernel_node(gpuvm, addr, range);
939 }
940 EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid);
941
942 static void
drm_gpuvm_gem_object_free(struct drm_gem_object * obj)943 drm_gpuvm_gem_object_free(struct drm_gem_object *obj)
944 {
945 drm_gem_object_release(obj);
946 kfree(obj);
947 }
948
949 static const struct drm_gem_object_funcs drm_gpuvm_object_funcs = {
950 .free = drm_gpuvm_gem_object_free,
951 };
952
953 /**
954 * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object
955 * @drm: the drivers &drm_device
956 *
957 * Allocates a dummy &drm_gem_object which can be passed to drm_gpuvm_init() in
958 * order to serve as root GEM object providing the &drm_resv shared across
959 * &drm_gem_objects local to a single GPUVM.
960 *
961 * Returns: the &drm_gem_object on success, NULL on failure
962 */
963 struct drm_gem_object *
drm_gpuvm_resv_object_alloc(struct drm_device * drm)964 drm_gpuvm_resv_object_alloc(struct drm_device *drm)
965 {
966 struct drm_gem_object *obj;
967
968 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
969 if (!obj)
970 return NULL;
971
972 obj->funcs = &drm_gpuvm_object_funcs;
973 drm_gem_private_object_init(drm, obj, 0);
974
975 return obj;
976 }
977 EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc);
978
979 /**
980 * drm_gpuvm_init() - initialize a &drm_gpuvm
981 * @gpuvm: pointer to the &drm_gpuvm to initialize
982 * @name: the name of the GPU VA space
983 * @flags: the &drm_gpuvm_flags for this GPUVM
984 * @drm: the &drm_device this VM resides in
985 * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv
986 * @start_offset: the start offset of the GPU VA space
987 * @range: the size of the GPU VA space
988 * @reserve_offset: the start of the kernel reserved GPU VA area
989 * @reserve_range: the size of the kernel reserved GPU VA area
990 * @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap
991 *
992 * The &drm_gpuvm must be initialized with this function before use.
993 *
994 * Note that @gpuvm must be cleared to 0 before calling this function. The given
995 * &name is expected to be managed by the surrounding driver structures.
996 */
997 void
drm_gpuvm_init(struct drm_gpuvm * gpuvm,const char * name,enum drm_gpuvm_flags flags,struct drm_device * drm,struct drm_gem_object * r_obj,u64 start_offset,u64 range,u64 reserve_offset,u64 reserve_range,const struct drm_gpuvm_ops * ops)998 drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
999 enum drm_gpuvm_flags flags,
1000 struct drm_device *drm,
1001 struct drm_gem_object *r_obj,
1002 u64 start_offset, u64 range,
1003 u64 reserve_offset, u64 reserve_range,
1004 const struct drm_gpuvm_ops *ops)
1005 {
1006 gpuvm->rb.tree = RB_ROOT_CACHED;
1007 INIT_LIST_HEAD(&gpuvm->rb.list);
1008
1009 INIT_LIST_HEAD(&gpuvm->extobj.list);
1010 spin_lock_init(&gpuvm->extobj.lock);
1011
1012 INIT_LIST_HEAD(&gpuvm->evict.list);
1013 spin_lock_init(&gpuvm->evict.lock);
1014
1015 kref_init(&gpuvm->kref);
1016
1017 gpuvm->name = name ? name : "unknown";
1018 gpuvm->flags = flags;
1019 gpuvm->ops = ops;
1020 gpuvm->drm = drm;
1021 gpuvm->r_obj = r_obj;
1022
1023 drm_gem_object_get(r_obj);
1024
1025 drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range);
1026 gpuvm->mm_start = start_offset;
1027 gpuvm->mm_range = range;
1028
1029 memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
1030 if (reserve_range) {
1031 gpuvm->kernel_alloc_node.va.addr = reserve_offset;
1032 gpuvm->kernel_alloc_node.va.range = reserve_range;
1033
1034 if (likely(!drm_gpuvm_warn_check_overflow(gpuvm, reserve_offset,
1035 reserve_range)))
1036 __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node);
1037 }
1038 }
1039 EXPORT_SYMBOL_GPL(drm_gpuvm_init);
1040
1041 static void
drm_gpuvm_fini(struct drm_gpuvm * gpuvm)1042 drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
1043 {
1044 gpuvm->name = NULL;
1045
1046 if (gpuvm->kernel_alloc_node.va.range)
1047 __drm_gpuva_remove(&gpuvm->kernel_alloc_node);
1048
1049 drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
1050 "GPUVA tree is not empty, potentially leaking memory.\n");
1051
1052 drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list),
1053 "Extobj list should be empty.\n");
1054 drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
1055 "Evict list should be empty.\n");
1056
1057 drm_gem_object_put(gpuvm->r_obj);
1058 }
1059
1060 static void
drm_gpuvm_free(struct kref * kref)1061 drm_gpuvm_free(struct kref *kref)
1062 {
1063 struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
1064
1065 drm_gpuvm_fini(gpuvm);
1066
1067 if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
1068 return;
1069
1070 gpuvm->ops->vm_free(gpuvm);
1071 }
1072
1073 /**
1074 * drm_gpuvm_put() - drop a struct drm_gpuvm reference
1075 * @gpuvm: the &drm_gpuvm to release the reference of
1076 *
1077 * This releases a reference to @gpuvm.
1078 *
1079 * This function may be called from atomic context.
1080 */
1081 void
drm_gpuvm_put(struct drm_gpuvm * gpuvm)1082 drm_gpuvm_put(struct drm_gpuvm *gpuvm)
1083 {
1084 if (gpuvm)
1085 kref_put(&gpuvm->kref, drm_gpuvm_free);
1086 }
1087 EXPORT_SYMBOL_GPL(drm_gpuvm_put);
1088
1089 static int
exec_prepare_obj(struct drm_exec * exec,struct drm_gem_object * obj,unsigned int num_fences)1090 exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
1091 unsigned int num_fences)
1092 {
1093 return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) :
1094 drm_exec_lock_obj(exec, obj);
1095 }
1096
1097 /**
1098 * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
1099 * @gpuvm: the &drm_gpuvm
1100 * @exec: the &drm_exec context
1101 * @num_fences: the amount of &dma_fences to reserve
1102 *
1103 * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if
1104 * @num_fences is zero drm_exec_lock_obj() is called instead.
1105 *
1106 * Using this function directly, it is the drivers responsibility to call
1107 * drm_exec_init() and drm_exec_fini() accordingly.
1108 *
1109 * Returns: 0 on success, negative error code on failure.
1110 */
1111 int
drm_gpuvm_prepare_vm(struct drm_gpuvm * gpuvm,struct drm_exec * exec,unsigned int num_fences)1112 drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
1113 struct drm_exec *exec,
1114 unsigned int num_fences)
1115 {
1116 return exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
1117 }
1118 EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm);
1119
1120 static int
__drm_gpuvm_prepare_objects(struct drm_gpuvm * gpuvm,struct drm_exec * exec,unsigned int num_fences)1121 __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1122 struct drm_exec *exec,
1123 unsigned int num_fences)
1124 {
1125 struct drm_gpuvm_bo *vm_bo;
1126 LIST_HEAD(extobjs);
1127 int ret = 0;
1128
1129 for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
1130 ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1131 if (ret)
1132 break;
1133 }
1134 /* Drop ref in case we break out of the loop. */
1135 drm_gpuvm_bo_put(vm_bo);
1136 restore_vm_bo_list(gpuvm, extobj);
1137
1138 return ret;
1139 }
1140
1141 static int
drm_gpuvm_prepare_objects_locked(struct drm_gpuvm * gpuvm,struct drm_exec * exec,unsigned int num_fences)1142 drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
1143 struct drm_exec *exec,
1144 unsigned int num_fences)
1145 {
1146 struct drm_gpuvm_bo *vm_bo;
1147 int ret = 0;
1148
1149 drm_gpuvm_resv_assert_held(gpuvm);
1150 list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
1151 ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1152 if (ret)
1153 break;
1154
1155 if (vm_bo->evicted)
1156 drm_gpuvm_bo_list_add(vm_bo, evict, false);
1157 }
1158
1159 return ret;
1160 }
1161
1162 /**
1163 * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
1164 * @gpuvm: the &drm_gpuvm
1165 * @exec: the &drm_exec locking context
1166 * @num_fences: the amount of &dma_fences to reserve
1167 *
1168 * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given
1169 * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj()
1170 * is called instead.
1171 *
1172 * Using this function directly, it is the drivers responsibility to call
1173 * drm_exec_init() and drm_exec_fini() accordingly.
1174 *
1175 * Note: This function is safe against concurrent insertion and removal of
1176 * external objects, however it is not safe against concurrent usage itself.
1177 *
1178 * Drivers need to make sure to protect this case with either an outer VM lock
1179 * or by calling drm_gpuvm_prepare_vm() before this function within the
1180 * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures
1181 * mutual exclusion.
1182 *
1183 * Returns: 0 on success, negative error code on failure.
1184 */
1185 int
drm_gpuvm_prepare_objects(struct drm_gpuvm * gpuvm,struct drm_exec * exec,unsigned int num_fences)1186 drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1187 struct drm_exec *exec,
1188 unsigned int num_fences)
1189 {
1190 if (drm_gpuvm_resv_protected(gpuvm))
1191 return drm_gpuvm_prepare_objects_locked(gpuvm, exec,
1192 num_fences);
1193 else
1194 return __drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1195 }
1196 EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects);
1197
1198 /**
1199 * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range
1200 * @gpuvm: the &drm_gpuvm
1201 * @exec: the &drm_exec locking context
1202 * @addr: the start address within the VA space
1203 * @range: the range to iterate within the VA space
1204 * @num_fences: the amount of &dma_fences to reserve
1205 *
1206 * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr
1207 * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called
1208 * instead.
1209 *
1210 * Returns: 0 on success, negative error code on failure.
1211 */
1212 int
drm_gpuvm_prepare_range(struct drm_gpuvm * gpuvm,struct drm_exec * exec,u64 addr,u64 range,unsigned int num_fences)1213 drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
1214 u64 addr, u64 range, unsigned int num_fences)
1215 {
1216 struct drm_gpuva *va;
1217 u64 end = addr + range;
1218 int ret;
1219
1220 drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
1221 struct drm_gem_object *obj = va->gem.obj;
1222
1223 ret = exec_prepare_obj(exec, obj, num_fences);
1224 if (ret)
1225 return ret;
1226 }
1227
1228 return 0;
1229 }
1230 EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
1231
1232 /**
1233 * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
1234 * @vm_exec: the &drm_gpuvm_exec wrapper
1235 *
1236 * Acquires all dma-resv locks of all &drm_gem_objects the given
1237 * &drm_gpuvm contains mappings of.
1238 *
1239 * Addionally, when calling this function with struct drm_gpuvm_exec::extra
1240 * being set the driver receives the given @fn callback to lock additional
1241 * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
1242 * would call drm_exec_prepare_obj() from within this callback.
1243 *
1244 * Returns: 0 on success, negative error code on failure.
1245 */
1246 int
drm_gpuvm_exec_lock(struct drm_gpuvm_exec * vm_exec)1247 drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec)
1248 {
1249 struct drm_gpuvm *gpuvm = vm_exec->vm;
1250 struct drm_exec *exec = &vm_exec->exec;
1251 unsigned int num_fences = vm_exec->num_fences;
1252 int ret;
1253
1254 drm_exec_init(exec, vm_exec->flags, 0);
1255
1256 drm_exec_until_all_locked(exec) {
1257 ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences);
1258 drm_exec_retry_on_contention(exec);
1259 if (ret)
1260 goto err;
1261
1262 ret = drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1263 drm_exec_retry_on_contention(exec);
1264 if (ret)
1265 goto err;
1266
1267 if (vm_exec->extra.fn) {
1268 ret = vm_exec->extra.fn(vm_exec);
1269 drm_exec_retry_on_contention(exec);
1270 if (ret)
1271 goto err;
1272 }
1273 }
1274
1275 return 0;
1276
1277 err:
1278 drm_exec_fini(exec);
1279 return ret;
1280 }
1281 EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock);
1282
1283 static int
fn_lock_array(struct drm_gpuvm_exec * vm_exec)1284 fn_lock_array(struct drm_gpuvm_exec *vm_exec)
1285 {
1286 struct {
1287 struct drm_gem_object **objs;
1288 unsigned int num_objs;
1289 } *args = vm_exec->extra.priv;
1290
1291 return drm_exec_prepare_array(&vm_exec->exec, args->objs,
1292 args->num_objs, vm_exec->num_fences);
1293 }
1294
1295 /**
1296 * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
1297 * @vm_exec: the &drm_gpuvm_exec wrapper
1298 * @objs: additional &drm_gem_objects to lock
1299 * @num_objs: the number of additional &drm_gem_objects to lock
1300 *
1301 * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm
1302 * contains mappings of, plus the ones given through @objs.
1303 *
1304 * Returns: 0 on success, negative error code on failure.
1305 */
1306 int
drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec * vm_exec,struct drm_gem_object ** objs,unsigned int num_objs)1307 drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
1308 struct drm_gem_object **objs,
1309 unsigned int num_objs)
1310 {
1311 struct {
1312 struct drm_gem_object **objs;
1313 unsigned int num_objs;
1314 } args;
1315
1316 args.objs = objs;
1317 args.num_objs = num_objs;
1318
1319 vm_exec->extra.fn = fn_lock_array;
1320 vm_exec->extra.priv = &args;
1321
1322 return drm_gpuvm_exec_lock(vm_exec);
1323 }
1324 EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_array);
1325
1326 /**
1327 * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range
1328 * @vm_exec: the &drm_gpuvm_exec wrapper
1329 * @addr: the start address within the VA space
1330 * @range: the range to iterate within the VA space
1331 *
1332 * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and
1333 * @addr + @range.
1334 *
1335 * Returns: 0 on success, negative error code on failure.
1336 */
1337 int
drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec * vm_exec,u64 addr,u64 range)1338 drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
1339 u64 addr, u64 range)
1340 {
1341 struct drm_gpuvm *gpuvm = vm_exec->vm;
1342 struct drm_exec *exec = &vm_exec->exec;
1343 int ret;
1344
1345 drm_exec_init(exec, vm_exec->flags, 0);
1346
1347 drm_exec_until_all_locked(exec) {
1348 ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
1349 vm_exec->num_fences);
1350 drm_exec_retry_on_contention(exec);
1351 if (ret)
1352 goto err;
1353 }
1354
1355 return ret;
1356
1357 err:
1358 drm_exec_fini(exec);
1359 return ret;
1360 }
1361 EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_range);
1362
1363 static int
__drm_gpuvm_validate(struct drm_gpuvm * gpuvm,struct drm_exec * exec)1364 __drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1365 {
1366 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1367 struct drm_gpuvm_bo *vm_bo;
1368 LIST_HEAD(evict);
1369 int ret = 0;
1370
1371 for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) {
1372 ret = ops->vm_bo_validate(vm_bo, exec);
1373 if (ret)
1374 break;
1375 }
1376 /* Drop ref in case we break out of the loop. */
1377 drm_gpuvm_bo_put(vm_bo);
1378 restore_vm_bo_list(gpuvm, evict);
1379
1380 return ret;
1381 }
1382
1383 static int
drm_gpuvm_validate_locked(struct drm_gpuvm * gpuvm,struct drm_exec * exec)1384 drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1385 {
1386 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1387 struct drm_gpuvm_bo *vm_bo, *next;
1388 int ret = 0;
1389
1390 drm_gpuvm_resv_assert_held(gpuvm);
1391
1392 list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
1393 list.entry.evict) {
1394 ret = ops->vm_bo_validate(vm_bo, exec);
1395 if (ret)
1396 break;
1397
1398 dma_resv_assert_held(vm_bo->obj->resv);
1399 if (!vm_bo->evicted)
1400 drm_gpuvm_bo_list_del_init(vm_bo, evict, false);
1401 }
1402
1403 return ret;
1404 }
1405
1406 /**
1407 * drm_gpuvm_validate() - validate all BOs marked as evicted
1408 * @gpuvm: the &drm_gpuvm to validate evicted BOs
1409 * @exec: the &drm_exec instance used for locking the GPUVM
1410 *
1411 * Calls the &drm_gpuvm_ops::vm_bo_validate callback for all evicted buffer
1412 * objects being mapped in the given &drm_gpuvm.
1413 *
1414 * Returns: 0 on success, negative error code on failure.
1415 */
1416 int
drm_gpuvm_validate(struct drm_gpuvm * gpuvm,struct drm_exec * exec)1417 drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1418 {
1419 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1420
1421 if (unlikely(!ops || !ops->vm_bo_validate))
1422 return -EOPNOTSUPP;
1423
1424 if (drm_gpuvm_resv_protected(gpuvm))
1425 return drm_gpuvm_validate_locked(gpuvm, exec);
1426 else
1427 return __drm_gpuvm_validate(gpuvm, exec);
1428 }
1429 EXPORT_SYMBOL_GPL(drm_gpuvm_validate);
1430
1431 /**
1432 * drm_gpuvm_resv_add_fence - add fence to private and all extobj
1433 * dma-resv
1434 * @gpuvm: the &drm_gpuvm to add a fence to
1435 * @exec: the &drm_exec locking context
1436 * @fence: fence to add
1437 * @private_usage: private dma-resv usage
1438 * @extobj_usage: extobj dma-resv usage
1439 */
1440 void
drm_gpuvm_resv_add_fence(struct drm_gpuvm * gpuvm,struct drm_exec * exec,struct dma_fence * fence,enum dma_resv_usage private_usage,enum dma_resv_usage extobj_usage)1441 drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
1442 struct drm_exec *exec,
1443 struct dma_fence *fence,
1444 enum dma_resv_usage private_usage,
1445 enum dma_resv_usage extobj_usage)
1446 {
1447 struct drm_gem_object *obj;
1448 unsigned long index;
1449
1450 drm_exec_for_each_locked_object(exec, index, obj) {
1451 dma_resv_assert_held(obj->resv);
1452 dma_resv_add_fence(obj->resv, fence,
1453 drm_gpuvm_is_extobj(gpuvm, obj) ?
1454 extobj_usage : private_usage);
1455 }
1456 }
1457 EXPORT_SYMBOL_GPL(drm_gpuvm_resv_add_fence);
1458
1459 /**
1460 * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
1461 * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1462 * @obj: The &drm_gem_object being mapped in the @gpuvm.
1463 *
1464 * If provided by the driver, this function uses the &drm_gpuvm_ops
1465 * vm_bo_alloc() callback to allocate.
1466 *
1467 * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1468 */
1469 struct drm_gpuvm_bo *
drm_gpuvm_bo_create(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)1470 drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
1471 struct drm_gem_object *obj)
1472 {
1473 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1474 struct drm_gpuvm_bo *vm_bo;
1475
1476 if (ops && ops->vm_bo_alloc)
1477 vm_bo = ops->vm_bo_alloc();
1478 else
1479 vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL);
1480
1481 if (unlikely(!vm_bo))
1482 return NULL;
1483
1484 vm_bo->vm = drm_gpuvm_get(gpuvm);
1485 vm_bo->obj = obj;
1486 drm_gem_object_get(obj);
1487
1488 kref_init(&vm_bo->kref);
1489 INIT_LIST_HEAD(&vm_bo->list.gpuva);
1490 INIT_LIST_HEAD(&vm_bo->list.entry.gem);
1491
1492 INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
1493 INIT_LIST_HEAD(&vm_bo->list.entry.evict);
1494
1495 return vm_bo;
1496 }
1497 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
1498
1499 static void
drm_gpuvm_bo_destroy(struct kref * kref)1500 drm_gpuvm_bo_destroy(struct kref *kref)
1501 {
1502 struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1503 kref);
1504 struct drm_gpuvm *gpuvm = vm_bo->vm;
1505 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1506 struct drm_gem_object *obj = vm_bo->obj;
1507 bool lock = !drm_gpuvm_resv_protected(gpuvm);
1508
1509 if (!lock)
1510 drm_gpuvm_resv_assert_held(gpuvm);
1511
1512 drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
1513 drm_gpuvm_bo_list_del(vm_bo, evict, lock);
1514
1515 drm_gem_gpuva_assert_lock_held(obj);
1516 list_del(&vm_bo->list.entry.gem);
1517
1518 if (ops && ops->vm_bo_free)
1519 ops->vm_bo_free(vm_bo);
1520 else
1521 kfree(vm_bo);
1522
1523 drm_gpuvm_put(gpuvm);
1524 drm_gem_object_put(obj);
1525 }
1526
1527 /**
1528 * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
1529 * @vm_bo: the &drm_gpuvm_bo to release the reference of
1530 *
1531 * This releases a reference to @vm_bo.
1532 *
1533 * If the reference count drops to zero, the &gpuvm_bo is destroyed, which
1534 * includes removing it from the GEMs gpuva list. Hence, if a call to this
1535 * function can potentially let the reference count drop to zero the caller must
1536 * hold the dma-resv or driver specific GEM gpuva lock.
1537 *
1538 * This function may only be called from non-atomic context.
1539 *
1540 * Returns: true if vm_bo was destroyed, false otherwise.
1541 */
1542 bool
drm_gpuvm_bo_put(struct drm_gpuvm_bo * vm_bo)1543 drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
1544 {
1545 might_sleep();
1546
1547 if (vm_bo)
1548 return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
1549
1550 return false;
1551 }
1552 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
1553
1554 static struct drm_gpuvm_bo *
__drm_gpuvm_bo_find(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)1555 __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1556 struct drm_gem_object *obj)
1557 {
1558 struct drm_gpuvm_bo *vm_bo;
1559
1560 drm_gem_gpuva_assert_lock_held(obj);
1561 drm_gem_for_each_gpuvm_bo(vm_bo, obj)
1562 if (vm_bo->vm == gpuvm)
1563 return vm_bo;
1564
1565 return NULL;
1566 }
1567
1568 /**
1569 * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
1570 * &drm_gpuvm and &drm_gem_object
1571 * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1572 * @obj: The &drm_gem_object being mapped in the @gpuvm.
1573 *
1574 * Find the &drm_gpuvm_bo representing the combination of the given
1575 * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1576 * count of the &drm_gpuvm_bo accordingly.
1577 *
1578 * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1579 */
1580 struct drm_gpuvm_bo *
drm_gpuvm_bo_find(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)1581 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1582 struct drm_gem_object *obj)
1583 {
1584 struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj);
1585
1586 return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL;
1587 }
1588 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
1589
1590 /**
1591 * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
1592 * given &drm_gpuvm and &drm_gem_object
1593 * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1594 * @obj: The &drm_gem_object being mapped in the @gpuvm.
1595 *
1596 * Find the &drm_gpuvm_bo representing the combination of the given
1597 * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1598 * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
1599 * &drm_gpuvm_bo.
1600 *
1601 * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1602 *
1603 * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
1604 */
1605 struct drm_gpuvm_bo *
drm_gpuvm_bo_obtain(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj)1606 drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
1607 struct drm_gem_object *obj)
1608 {
1609 struct drm_gpuvm_bo *vm_bo;
1610
1611 vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1612 if (vm_bo)
1613 return vm_bo;
1614
1615 vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
1616 if (!vm_bo)
1617 return ERR_PTR(-ENOMEM);
1618
1619 drm_gem_gpuva_assert_lock_held(obj);
1620 list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
1621
1622 return vm_bo;
1623 }
1624 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
1625
1626 /**
1627 * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
1628 * for the given &drm_gpuvm and &drm_gem_object
1629 * @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
1630 *
1631 * Find the &drm_gpuvm_bo representing the combination of the given
1632 * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1633 * count of the found &drm_gpuvm_bo accordingly, while the @__vm_bo reference
1634 * count is decreased. If not found @__vm_bo is returned without further
1635 * increase of the reference count.
1636 *
1637 * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1638 *
1639 * Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
1640 * &drm_gpuvm_bo was found
1641 */
1642 struct drm_gpuvm_bo *
drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo * __vm_bo)1643 drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
1644 {
1645 struct drm_gpuvm *gpuvm = __vm_bo->vm;
1646 struct drm_gem_object *obj = __vm_bo->obj;
1647 struct drm_gpuvm_bo *vm_bo;
1648
1649 vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1650 if (vm_bo) {
1651 drm_gpuvm_bo_put(__vm_bo);
1652 return vm_bo;
1653 }
1654
1655 drm_gem_gpuva_assert_lock_held(obj);
1656 list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
1657
1658 return __vm_bo;
1659 }
1660 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc);
1661
1662 /**
1663 * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's
1664 * extobj list
1665 * @vm_bo: The &drm_gpuvm_bo to add to its &drm_gpuvm's the extobj list.
1666 *
1667 * Adds the given @vm_bo to its &drm_gpuvm's extobj list if not on the list
1668 * already and if the corresponding &drm_gem_object is an external object,
1669 * actually.
1670 */
1671 void
drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo * vm_bo)1672 drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo)
1673 {
1674 struct drm_gpuvm *gpuvm = vm_bo->vm;
1675 bool lock = !drm_gpuvm_resv_protected(gpuvm);
1676
1677 if (!lock)
1678 drm_gpuvm_resv_assert_held(gpuvm);
1679
1680 if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj))
1681 drm_gpuvm_bo_list_add(vm_bo, extobj, lock);
1682 }
1683 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
1684
1685 /**
1686 * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms
1687 * evicted list
1688 * @vm_bo: the &drm_gpuvm_bo to add or remove
1689 * @evict: indicates whether the object is evicted
1690 *
1691 * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
1692 */
1693 void
drm_gpuvm_bo_evict(struct drm_gpuvm_bo * vm_bo,bool evict)1694 drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
1695 {
1696 struct drm_gpuvm *gpuvm = vm_bo->vm;
1697 struct drm_gem_object *obj = vm_bo->obj;
1698 bool lock = !drm_gpuvm_resv_protected(gpuvm);
1699
1700 dma_resv_assert_held(obj->resv);
1701 vm_bo->evicted = evict;
1702
1703 /* Can't add external objects to the evicted list directly if not using
1704 * internal spinlocks, since in this case the evicted list is protected
1705 * with the VM's common dma-resv lock.
1706 */
1707 if (drm_gpuvm_is_extobj(gpuvm, obj) && !lock)
1708 return;
1709
1710 if (evict)
1711 drm_gpuvm_bo_list_add(vm_bo, evict, lock);
1712 else
1713 drm_gpuvm_bo_list_del_init(vm_bo, evict, lock);
1714 }
1715 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_evict);
1716
1717 static int
__drm_gpuva_insert(struct drm_gpuvm * gpuvm,struct drm_gpuva * va)1718 __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1719 struct drm_gpuva *va)
1720 {
1721 struct rb_node *node;
1722 struct list_head *head;
1723
1724 if (drm_gpuva_it_iter_first(&gpuvm->rb.tree,
1725 GPUVA_START(va),
1726 GPUVA_LAST(va)))
1727 return -EEXIST;
1728
1729 va->vm = gpuvm;
1730
1731 drm_gpuva_it_insert(va, &gpuvm->rb.tree);
1732
1733 node = rb_prev(&va->rb.node);
1734 if (node)
1735 head = &(to_drm_gpuva(node))->rb.entry;
1736 else
1737 head = &gpuvm->rb.list;
1738
1739 list_add(&va->rb.entry, head);
1740
1741 return 0;
1742 }
1743
1744 /**
1745 * drm_gpuva_insert() - insert a &drm_gpuva
1746 * @gpuvm: the &drm_gpuvm to insert the &drm_gpuva in
1747 * @va: the &drm_gpuva to insert
1748 *
1749 * Insert a &drm_gpuva with a given address and range into a
1750 * &drm_gpuvm.
1751 *
1752 * It is safe to use this function using the safe versions of iterating the GPU
1753 * VA space, such as drm_gpuvm_for_each_va_safe() and
1754 * drm_gpuvm_for_each_va_range_safe().
1755 *
1756 * Returns: 0 on success, negative error code on failure.
1757 */
1758 int
drm_gpuva_insert(struct drm_gpuvm * gpuvm,struct drm_gpuva * va)1759 drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1760 struct drm_gpuva *va)
1761 {
1762 u64 addr = va->va.addr;
1763 u64 range = va->va.range;
1764 int ret;
1765
1766 if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
1767 return -EINVAL;
1768
1769 ret = __drm_gpuva_insert(gpuvm, va);
1770 if (likely(!ret))
1771 /* Take a reference of the GPUVM for the successfully inserted
1772 * drm_gpuva. We can't take the reference in
1773 * __drm_gpuva_insert() itself, since we don't want to increse
1774 * the reference count for the GPUVM's kernel_alloc_node.
1775 */
1776 drm_gpuvm_get(gpuvm);
1777
1778 return ret;
1779 }
1780 EXPORT_SYMBOL_GPL(drm_gpuva_insert);
1781
1782 static void
__drm_gpuva_remove(struct drm_gpuva * va)1783 __drm_gpuva_remove(struct drm_gpuva *va)
1784 {
1785 drm_gpuva_it_remove(va, &va->vm->rb.tree);
1786 list_del_init(&va->rb.entry);
1787 }
1788
1789 /**
1790 * drm_gpuva_remove() - remove a &drm_gpuva
1791 * @va: the &drm_gpuva to remove
1792 *
1793 * This removes the given &va from the underlaying tree.
1794 *
1795 * It is safe to use this function using the safe versions of iterating the GPU
1796 * VA space, such as drm_gpuvm_for_each_va_safe() and
1797 * drm_gpuvm_for_each_va_range_safe().
1798 */
1799 void
drm_gpuva_remove(struct drm_gpuva * va)1800 drm_gpuva_remove(struct drm_gpuva *va)
1801 {
1802 struct drm_gpuvm *gpuvm = va->vm;
1803
1804 if (unlikely(va == &gpuvm->kernel_alloc_node)) {
1805 drm_WARN(gpuvm->drm, 1,
1806 "Can't destroy kernel reserved node.\n");
1807 return;
1808 }
1809
1810 __drm_gpuva_remove(va);
1811 drm_gpuvm_put(va->vm);
1812 }
1813 EXPORT_SYMBOL_GPL(drm_gpuva_remove);
1814
1815 /**
1816 * drm_gpuva_link() - link a &drm_gpuva
1817 * @va: the &drm_gpuva to link
1818 * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to
1819 *
1820 * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
1821 * &drm_gpuvm_bo to the &drm_gem_object it is associated with.
1822 *
1823 * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional
1824 * reference of the latter is taken.
1825 *
1826 * This function expects the caller to protect the GEM's GPUVA list against
1827 * concurrent access using either the GEMs dma_resv lock or a driver specific
1828 * lock set through drm_gem_gpuva_set_lock().
1829 */
1830 void
drm_gpuva_link(struct drm_gpuva * va,struct drm_gpuvm_bo * vm_bo)1831 drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
1832 {
1833 struct drm_gem_object *obj = va->gem.obj;
1834 struct drm_gpuvm *gpuvm = va->vm;
1835
1836 if (unlikely(!obj))
1837 return;
1838
1839 drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj);
1840
1841 va->vm_bo = drm_gpuvm_bo_get(vm_bo);
1842
1843 drm_gem_gpuva_assert_lock_held(obj);
1844 list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
1845 }
1846 EXPORT_SYMBOL_GPL(drm_gpuva_link);
1847
1848 /**
1849 * drm_gpuva_unlink() - unlink a &drm_gpuva
1850 * @va: the &drm_gpuva to unlink
1851 *
1852 * This removes the given &va from the GPU VA list of the &drm_gem_object it is
1853 * associated with.
1854 *
1855 * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
1856 * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case
1857 * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
1858 *
1859 * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of
1860 * the latter is dropped.
1861 *
1862 * This function expects the caller to protect the GEM's GPUVA list against
1863 * concurrent access using either the GEMs dma_resv lock or a driver specific
1864 * lock set through drm_gem_gpuva_set_lock().
1865 */
1866 void
drm_gpuva_unlink(struct drm_gpuva * va)1867 drm_gpuva_unlink(struct drm_gpuva *va)
1868 {
1869 struct drm_gem_object *obj = va->gem.obj;
1870 struct drm_gpuvm_bo *vm_bo = va->vm_bo;
1871
1872 if (unlikely(!obj))
1873 return;
1874
1875 drm_gem_gpuva_assert_lock_held(obj);
1876 list_del_init(&va->gem.entry);
1877
1878 va->vm_bo = NULL;
1879 drm_gpuvm_bo_put(vm_bo);
1880 }
1881 EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
1882
1883 /**
1884 * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
1885 * @gpuvm: the &drm_gpuvm to search in
1886 * @addr: the &drm_gpuvas address
1887 * @range: the &drm_gpuvas range
1888 *
1889 * Returns: the first &drm_gpuva within the given range
1890 */
1891 struct drm_gpuva *
drm_gpuva_find_first(struct drm_gpuvm * gpuvm,u64 addr,u64 range)1892 drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
1893 u64 addr, u64 range)
1894 {
1895 u64 last = addr + range - 1;
1896
1897 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last);
1898 }
1899 EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
1900
1901 /**
1902 * drm_gpuva_find() - find a &drm_gpuva
1903 * @gpuvm: the &drm_gpuvm to search in
1904 * @addr: the &drm_gpuvas address
1905 * @range: the &drm_gpuvas range
1906 *
1907 * Returns: the &drm_gpuva at a given &addr and with a given &range
1908 */
1909 struct drm_gpuva *
drm_gpuva_find(struct drm_gpuvm * gpuvm,u64 addr,u64 range)1910 drm_gpuva_find(struct drm_gpuvm *gpuvm,
1911 u64 addr, u64 range)
1912 {
1913 struct drm_gpuva *va;
1914
1915 va = drm_gpuva_find_first(gpuvm, addr, range);
1916 if (!va)
1917 goto out;
1918
1919 if (va->va.addr != addr ||
1920 va->va.range != range)
1921 goto out;
1922
1923 return va;
1924
1925 out:
1926 return NULL;
1927 }
1928 EXPORT_SYMBOL_GPL(drm_gpuva_find);
1929
1930 /**
1931 * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
1932 * @gpuvm: the &drm_gpuvm to search in
1933 * @start: the given GPU VA's start address
1934 *
1935 * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
1936 *
1937 * Note that if there is any free space between the GPU VA mappings no mapping
1938 * is returned.
1939 *
1940 * Returns: a pointer to the found &drm_gpuva or NULL if none was found
1941 */
1942 struct drm_gpuva *
drm_gpuva_find_prev(struct drm_gpuvm * gpuvm,u64 start)1943 drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start)
1944 {
1945 if (!drm_gpuvm_range_valid(gpuvm, start - 1, 1))
1946 return NULL;
1947
1948 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start);
1949 }
1950 EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
1951
1952 /**
1953 * drm_gpuva_find_next() - find the &drm_gpuva after the given address
1954 * @gpuvm: the &drm_gpuvm to search in
1955 * @end: the given GPU VA's end address
1956 *
1957 * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
1958 *
1959 * Note that if there is any free space between the GPU VA mappings no mapping
1960 * is returned.
1961 *
1962 * Returns: a pointer to the found &drm_gpuva or NULL if none was found
1963 */
1964 struct drm_gpuva *
drm_gpuva_find_next(struct drm_gpuvm * gpuvm,u64 end)1965 drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end)
1966 {
1967 if (!drm_gpuvm_range_valid(gpuvm, end, 1))
1968 return NULL;
1969
1970 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1);
1971 }
1972 EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
1973
1974 /**
1975 * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
1976 * is empty
1977 * @gpuvm: the &drm_gpuvm to check the range for
1978 * @addr: the start address of the range
1979 * @range: the range of the interval
1980 *
1981 * Returns: true if the interval is empty, false otherwise
1982 */
1983 bool
drm_gpuvm_interval_empty(struct drm_gpuvm * gpuvm,u64 addr,u64 range)1984 drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
1985 {
1986 return !drm_gpuva_find_first(gpuvm, addr, range);
1987 }
1988 EXPORT_SYMBOL_GPL(drm_gpuvm_interval_empty);
1989
1990 /**
1991 * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
1992 * &drm_gpuva_op_map
1993 * @gpuvm: the &drm_gpuvm
1994 * @va: the &drm_gpuva to insert
1995 * @op: the &drm_gpuva_op_map to initialize @va with
1996 *
1997 * Initializes the @va from the @op and inserts it into the given @gpuvm.
1998 */
1999 void
drm_gpuva_map(struct drm_gpuvm * gpuvm,struct drm_gpuva * va,struct drm_gpuva_op_map * op)2000 drm_gpuva_map(struct drm_gpuvm *gpuvm,
2001 struct drm_gpuva *va,
2002 struct drm_gpuva_op_map *op)
2003 {
2004 drm_gpuva_init_from_op(va, op);
2005 drm_gpuva_insert(gpuvm, va);
2006 }
2007 EXPORT_SYMBOL_GPL(drm_gpuva_map);
2008
2009 /**
2010 * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
2011 * &drm_gpuva_op_remap
2012 * @prev: the &drm_gpuva to remap when keeping the start of a mapping
2013 * @next: the &drm_gpuva to remap when keeping the end of a mapping
2014 * @op: the &drm_gpuva_op_remap to initialize @prev and @next with
2015 *
2016 * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
2017 * @next.
2018 */
2019 void
drm_gpuva_remap(struct drm_gpuva * prev,struct drm_gpuva * next,struct drm_gpuva_op_remap * op)2020 drm_gpuva_remap(struct drm_gpuva *prev,
2021 struct drm_gpuva *next,
2022 struct drm_gpuva_op_remap *op)
2023 {
2024 struct drm_gpuva *va = op->unmap->va;
2025 struct drm_gpuvm *gpuvm = va->vm;
2026
2027 drm_gpuva_remove(va);
2028
2029 if (op->prev) {
2030 drm_gpuva_init_from_op(prev, op->prev);
2031 drm_gpuva_insert(gpuvm, prev);
2032 }
2033
2034 if (op->next) {
2035 drm_gpuva_init_from_op(next, op->next);
2036 drm_gpuva_insert(gpuvm, next);
2037 }
2038 }
2039 EXPORT_SYMBOL_GPL(drm_gpuva_remap);
2040
2041 /**
2042 * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
2043 * &drm_gpuva_op_unmap
2044 * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
2045 *
2046 * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
2047 */
2048 void
drm_gpuva_unmap(struct drm_gpuva_op_unmap * op)2049 drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
2050 {
2051 drm_gpuva_remove(op->va);
2052 }
2053 EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
2054
2055 static int
op_map_cb(const struct drm_gpuvm_ops * fn,void * priv,u64 addr,u64 range,struct drm_gem_object * obj,u64 offset)2056 op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
2057 u64 addr, u64 range,
2058 struct drm_gem_object *obj, u64 offset)
2059 {
2060 struct drm_gpuva_op op = {};
2061
2062 op.op = DRM_GPUVA_OP_MAP;
2063 op.map.va.addr = addr;
2064 op.map.va.range = range;
2065 op.map.gem.obj = obj;
2066 op.map.gem.offset = offset;
2067
2068 return fn->sm_step_map(&op, priv);
2069 }
2070
2071 static int
op_remap_cb(const struct drm_gpuvm_ops * fn,void * priv,struct drm_gpuva_op_map * prev,struct drm_gpuva_op_map * next,struct drm_gpuva_op_unmap * unmap)2072 op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2073 struct drm_gpuva_op_map *prev,
2074 struct drm_gpuva_op_map *next,
2075 struct drm_gpuva_op_unmap *unmap)
2076 {
2077 struct drm_gpuva_op op = {};
2078 struct drm_gpuva_op_remap *r;
2079
2080 op.op = DRM_GPUVA_OP_REMAP;
2081 r = &op.remap;
2082 r->prev = prev;
2083 r->next = next;
2084 r->unmap = unmap;
2085
2086 return fn->sm_step_remap(&op, priv);
2087 }
2088
2089 static int
op_unmap_cb(const struct drm_gpuvm_ops * fn,void * priv,struct drm_gpuva * va,bool merge)2090 op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2091 struct drm_gpuva *va, bool merge)
2092 {
2093 struct drm_gpuva_op op = {};
2094
2095 op.op = DRM_GPUVA_OP_UNMAP;
2096 op.unmap.va = va;
2097 op.unmap.keep = merge;
2098
2099 return fn->sm_step_unmap(&op, priv);
2100 }
2101
2102 static int
__drm_gpuvm_sm_map(struct drm_gpuvm * gpuvm,const struct drm_gpuvm_ops * ops,void * priv,u64 req_addr,u64 req_range,struct drm_gem_object * req_obj,u64 req_offset)2103 __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
2104 const struct drm_gpuvm_ops *ops, void *priv,
2105 u64 req_addr, u64 req_range,
2106 struct drm_gem_object *req_obj, u64 req_offset)
2107 {
2108 struct drm_gpuva *va, *next;
2109 u64 req_end = req_addr + req_range;
2110 int ret;
2111
2112 if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2113 return -EINVAL;
2114
2115 drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2116 struct drm_gem_object *obj = va->gem.obj;
2117 u64 offset = va->gem.offset;
2118 u64 addr = va->va.addr;
2119 u64 range = va->va.range;
2120 u64 end = addr + range;
2121 bool merge = !!va->gem.obj;
2122
2123 if (addr == req_addr) {
2124 merge &= obj == req_obj &&
2125 offset == req_offset;
2126
2127 if (end == req_end) {
2128 ret = op_unmap_cb(ops, priv, va, merge);
2129 if (ret)
2130 return ret;
2131 break;
2132 }
2133
2134 if (end < req_end) {
2135 ret = op_unmap_cb(ops, priv, va, merge);
2136 if (ret)
2137 return ret;
2138 continue;
2139 }
2140
2141 if (end > req_end) {
2142 struct drm_gpuva_op_map n = {
2143 .va.addr = req_end,
2144 .va.range = range - req_range,
2145 .gem.obj = obj,
2146 .gem.offset = offset + req_range,
2147 };
2148 struct drm_gpuva_op_unmap u = {
2149 .va = va,
2150 .keep = merge,
2151 };
2152
2153 ret = op_remap_cb(ops, priv, NULL, &n, &u);
2154 if (ret)
2155 return ret;
2156 break;
2157 }
2158 } else if (addr < req_addr) {
2159 u64 ls_range = req_addr - addr;
2160 struct drm_gpuva_op_map p = {
2161 .va.addr = addr,
2162 .va.range = ls_range,
2163 .gem.obj = obj,
2164 .gem.offset = offset,
2165 };
2166 struct drm_gpuva_op_unmap u = { .va = va };
2167
2168 merge &= obj == req_obj &&
2169 offset + ls_range == req_offset;
2170 u.keep = merge;
2171
2172 if (end == req_end) {
2173 ret = op_remap_cb(ops, priv, &p, NULL, &u);
2174 if (ret)
2175 return ret;
2176 break;
2177 }
2178
2179 if (end < req_end) {
2180 ret = op_remap_cb(ops, priv, &p, NULL, &u);
2181 if (ret)
2182 return ret;
2183 continue;
2184 }
2185
2186 if (end > req_end) {
2187 struct drm_gpuva_op_map n = {
2188 .va.addr = req_end,
2189 .va.range = end - req_end,
2190 .gem.obj = obj,
2191 .gem.offset = offset + ls_range +
2192 req_range,
2193 };
2194
2195 ret = op_remap_cb(ops, priv, &p, &n, &u);
2196 if (ret)
2197 return ret;
2198 break;
2199 }
2200 } else if (addr > req_addr) {
2201 merge &= obj == req_obj &&
2202 offset == req_offset +
2203 (addr - req_addr);
2204
2205 if (end == req_end) {
2206 ret = op_unmap_cb(ops, priv, va, merge);
2207 if (ret)
2208 return ret;
2209 break;
2210 }
2211
2212 if (end < req_end) {
2213 ret = op_unmap_cb(ops, priv, va, merge);
2214 if (ret)
2215 return ret;
2216 continue;
2217 }
2218
2219 if (end > req_end) {
2220 struct drm_gpuva_op_map n = {
2221 .va.addr = req_end,
2222 .va.range = end - req_end,
2223 .gem.obj = obj,
2224 .gem.offset = offset + req_end - addr,
2225 };
2226 struct drm_gpuva_op_unmap u = {
2227 .va = va,
2228 .keep = merge,
2229 };
2230
2231 ret = op_remap_cb(ops, priv, NULL, &n, &u);
2232 if (ret)
2233 return ret;
2234 break;
2235 }
2236 }
2237 }
2238
2239 return op_map_cb(ops, priv,
2240 req_addr, req_range,
2241 req_obj, req_offset);
2242 }
2243
2244 static int
__drm_gpuvm_sm_unmap(struct drm_gpuvm * gpuvm,const struct drm_gpuvm_ops * ops,void * priv,u64 req_addr,u64 req_range)2245 __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
2246 const struct drm_gpuvm_ops *ops, void *priv,
2247 u64 req_addr, u64 req_range)
2248 {
2249 struct drm_gpuva *va, *next;
2250 u64 req_end = req_addr + req_range;
2251 int ret;
2252
2253 if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2254 return -EINVAL;
2255
2256 drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2257 struct drm_gpuva_op_map prev = {}, next = {};
2258 bool prev_split = false, next_split = false;
2259 struct drm_gem_object *obj = va->gem.obj;
2260 u64 offset = va->gem.offset;
2261 u64 addr = va->va.addr;
2262 u64 range = va->va.range;
2263 u64 end = addr + range;
2264
2265 if (addr < req_addr) {
2266 prev.va.addr = addr;
2267 prev.va.range = req_addr - addr;
2268 prev.gem.obj = obj;
2269 prev.gem.offset = offset;
2270
2271 prev_split = true;
2272 }
2273
2274 if (end > req_end) {
2275 next.va.addr = req_end;
2276 next.va.range = end - req_end;
2277 next.gem.obj = obj;
2278 next.gem.offset = offset + (req_end - addr);
2279
2280 next_split = true;
2281 }
2282
2283 if (prev_split || next_split) {
2284 struct drm_gpuva_op_unmap unmap = { .va = va };
2285
2286 ret = op_remap_cb(ops, priv,
2287 prev_split ? &prev : NULL,
2288 next_split ? &next : NULL,
2289 &unmap);
2290 if (ret)
2291 return ret;
2292 } else {
2293 ret = op_unmap_cb(ops, priv, va, false);
2294 if (ret)
2295 return ret;
2296 }
2297 }
2298
2299 return 0;
2300 }
2301
2302 /**
2303 * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
2304 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2305 * @priv: pointer to a driver private data structure
2306 * @req_addr: the start address of the new mapping
2307 * @req_range: the range of the new mapping
2308 * @req_obj: the &drm_gem_object to map
2309 * @req_offset: the offset within the &drm_gem_object
2310 *
2311 * This function iterates the given range of the GPU VA space. It utilizes the
2312 * &drm_gpuvm_ops to call back into the driver providing the split and merge
2313 * steps.
2314 *
2315 * Drivers may use these callbacks to update the GPU VA space right away within
2316 * the callback. In case the driver decides to copy and store the operations for
2317 * later processing neither this function nor &drm_gpuvm_sm_unmap is allowed to
2318 * be called before the &drm_gpuvm's view of the GPU VA space was
2319 * updated with the previous set of operations. To update the
2320 * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2321 * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2322 * used.
2323 *
2324 * A sequence of callbacks can contain map, unmap and remap operations, but
2325 * the sequence of callbacks might also be empty if no operation is required,
2326 * e.g. if the requested mapping already exists in the exact same way.
2327 *
2328 * There can be an arbitrary amount of unmap operations, a maximum of two remap
2329 * operations and a single map operation. The latter one represents the original
2330 * map operation requested by the caller.
2331 *
2332 * Returns: 0 on success or a negative error code
2333 */
2334 int
drm_gpuvm_sm_map(struct drm_gpuvm * gpuvm,void * priv,u64 req_addr,u64 req_range,struct drm_gem_object * req_obj,u64 req_offset)2335 drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
2336 u64 req_addr, u64 req_range,
2337 struct drm_gem_object *req_obj, u64 req_offset)
2338 {
2339 const struct drm_gpuvm_ops *ops = gpuvm->ops;
2340
2341 if (unlikely(!(ops && ops->sm_step_map &&
2342 ops->sm_step_remap &&
2343 ops->sm_step_unmap)))
2344 return -EINVAL;
2345
2346 return __drm_gpuvm_sm_map(gpuvm, ops, priv,
2347 req_addr, req_range,
2348 req_obj, req_offset);
2349 }
2350 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
2351
2352 /**
2353 * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
2354 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2355 * @priv: pointer to a driver private data structure
2356 * @req_addr: the start address of the range to unmap
2357 * @req_range: the range of the mappings to unmap
2358 *
2359 * This function iterates the given range of the GPU VA space. It utilizes the
2360 * &drm_gpuvm_ops to call back into the driver providing the operations to
2361 * unmap and, if required, split existent mappings.
2362 *
2363 * Drivers may use these callbacks to update the GPU VA space right away within
2364 * the callback. In case the driver decides to copy and store the operations for
2365 * later processing neither this function nor &drm_gpuvm_sm_map is allowed to be
2366 * called before the &drm_gpuvm's view of the GPU VA space was updated
2367 * with the previous set of operations. To update the &drm_gpuvm's view
2368 * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
2369 * drm_gpuva_destroy_unlocked() should be used.
2370 *
2371 * A sequence of callbacks can contain unmap and remap operations, depending on
2372 * whether there are actual overlapping mappings to split.
2373 *
2374 * There can be an arbitrary amount of unmap operations and a maximum of two
2375 * remap operations.
2376 *
2377 * Returns: 0 on success or a negative error code
2378 */
2379 int
drm_gpuvm_sm_unmap(struct drm_gpuvm * gpuvm,void * priv,u64 req_addr,u64 req_range)2380 drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
2381 u64 req_addr, u64 req_range)
2382 {
2383 const struct drm_gpuvm_ops *ops = gpuvm->ops;
2384
2385 if (unlikely(!(ops && ops->sm_step_remap &&
2386 ops->sm_step_unmap)))
2387 return -EINVAL;
2388
2389 return __drm_gpuvm_sm_unmap(gpuvm, ops, priv,
2390 req_addr, req_range);
2391 }
2392 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
2393
2394 static int
drm_gpuva_sm_step_lock(struct drm_gpuva_op * op,void * priv)2395 drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
2396 {
2397 struct drm_exec *exec = priv;
2398
2399 switch (op->op) {
2400 case DRM_GPUVA_OP_REMAP:
2401 if (op->remap.unmap->va->gem.obj)
2402 return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
2403 return 0;
2404 case DRM_GPUVA_OP_UNMAP:
2405 if (op->unmap.va->gem.obj)
2406 return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
2407 return 0;
2408 default:
2409 return 0;
2410 }
2411 }
2412
2413 static const struct drm_gpuvm_ops lock_ops = {
2414 .sm_step_map = drm_gpuva_sm_step_lock,
2415 .sm_step_remap = drm_gpuva_sm_step_lock,
2416 .sm_step_unmap = drm_gpuva_sm_step_lock,
2417 };
2418
2419 /**
2420 * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
2421 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2422 * @exec: the &drm_exec locking context
2423 * @num_fences: for newly mapped objects, the # of fences to reserve
2424 * @req_addr: the start address of the range to unmap
2425 * @req_range: the range of the mappings to unmap
2426 * @req_obj: the &drm_gem_object to map
2427 * @req_offset: the offset within the &drm_gem_object
2428 *
2429 * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
2430 * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
2431 * will be newly mapped.
2432 *
2433 * The expected usage is:
2434 *
2435 * vm_bind {
2436 * struct drm_exec exec;
2437 *
2438 * // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
2439 * drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
2440 *
2441 * drm_exec_until_all_locked (&exec) {
2442 * for_each_vm_bind_operation {
2443 * switch (op->op) {
2444 * case DRIVER_OP_UNMAP:
2445 * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
2446 * break;
2447 * case DRIVER_OP_MAP:
2448 * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
2449 * op->addr, op->range,
2450 * obj, op->obj_offset);
2451 * break;
2452 * }
2453 *
2454 * drm_exec_retry_on_contention(&exec);
2455 * if (ret)
2456 * return ret;
2457 * }
2458 * }
2459 * }
2460 *
2461 * This enables all locking to be performed before the driver begins modifying
2462 * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
2463 * where an earlier op can alter the sequence of steps generated for a later
2464 * op, because the later altered step will involve the same GEM object(s)
2465 * already seen in the earlier locking step. For example:
2466 *
2467 * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
2468 * DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already
2469 * locked the GEM object in the earlier DRIVER_OP_UNMAP op.
2470 *
2471 * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
2472 * op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
2473 * required without the earlier DRIVER_OP_MAP. This is safe because we've
2474 * already locked the GEM object in the earlier DRIVER_OP_MAP step.
2475 *
2476 * Returns: 0 on success or a negative error codec
2477 */
2478 int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm * gpuvm,struct drm_exec * exec,unsigned int num_fences,u64 req_addr,u64 req_range,struct drm_gem_object * req_obj,u64 req_offset)2479 drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
2480 struct drm_exec *exec, unsigned int num_fences,
2481 u64 req_addr, u64 req_range,
2482 struct drm_gem_object *req_obj, u64 req_offset)
2483 {
2484 if (req_obj) {
2485 int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
2486 if (ret)
2487 return ret;
2488 }
2489
2490 return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
2491 req_addr, req_range,
2492 req_obj, req_offset);
2493
2494 }
2495 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
2496
2497 /**
2498 * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
2499 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2500 * @exec: the &drm_exec locking context
2501 * @req_addr: the start address of the range to unmap
2502 * @req_range: the range of the mappings to unmap
2503 *
2504 * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
2505 * remapped by drm_gpuvm_sm_unmap().
2506 *
2507 * See drm_gpuvm_sm_map_exec_lock() for expected usage.
2508 *
2509 * Returns: 0 on success or a negative error code
2510 */
2511 int
drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm * gpuvm,struct drm_exec * exec,u64 req_addr,u64 req_range)2512 drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
2513 u64 req_addr, u64 req_range)
2514 {
2515 return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
2516 req_addr, req_range);
2517 }
2518 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
2519
2520 static struct drm_gpuva_op *
gpuva_op_alloc(struct drm_gpuvm * gpuvm)2521 gpuva_op_alloc(struct drm_gpuvm *gpuvm)
2522 {
2523 const struct drm_gpuvm_ops *fn = gpuvm->ops;
2524 struct drm_gpuva_op *op;
2525
2526 if (fn && fn->op_alloc)
2527 op = fn->op_alloc();
2528 else
2529 op = kzalloc(sizeof(*op), GFP_KERNEL);
2530
2531 if (unlikely(!op))
2532 return NULL;
2533
2534 return op;
2535 }
2536
2537 static void
gpuva_op_free(struct drm_gpuvm * gpuvm,struct drm_gpuva_op * op)2538 gpuva_op_free(struct drm_gpuvm *gpuvm,
2539 struct drm_gpuva_op *op)
2540 {
2541 const struct drm_gpuvm_ops *fn = gpuvm->ops;
2542
2543 if (fn && fn->op_free)
2544 fn->op_free(op);
2545 else
2546 kfree(op);
2547 }
2548
2549 static int
drm_gpuva_sm_step(struct drm_gpuva_op * __op,void * priv)2550 drm_gpuva_sm_step(struct drm_gpuva_op *__op,
2551 void *priv)
2552 {
2553 struct {
2554 struct drm_gpuvm *vm;
2555 struct drm_gpuva_ops *ops;
2556 } *args = priv;
2557 struct drm_gpuvm *gpuvm = args->vm;
2558 struct drm_gpuva_ops *ops = args->ops;
2559 struct drm_gpuva_op *op;
2560
2561 op = gpuva_op_alloc(gpuvm);
2562 if (unlikely(!op))
2563 goto err;
2564
2565 memcpy(op, __op, sizeof(*op));
2566
2567 if (op->op == DRM_GPUVA_OP_REMAP) {
2568 struct drm_gpuva_op_remap *__r = &__op->remap;
2569 struct drm_gpuva_op_remap *r = &op->remap;
2570
2571 r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
2572 GFP_KERNEL);
2573 if (unlikely(!r->unmap))
2574 goto err_free_op;
2575
2576 if (__r->prev) {
2577 r->prev = kmemdup(__r->prev, sizeof(*r->prev),
2578 GFP_KERNEL);
2579 if (unlikely(!r->prev))
2580 goto err_free_unmap;
2581 }
2582
2583 if (__r->next) {
2584 r->next = kmemdup(__r->next, sizeof(*r->next),
2585 GFP_KERNEL);
2586 if (unlikely(!r->next))
2587 goto err_free_prev;
2588 }
2589 }
2590
2591 list_add_tail(&op->entry, &ops->list);
2592
2593 return 0;
2594
2595 err_free_unmap:
2596 kfree(op->remap.unmap);
2597 err_free_prev:
2598 kfree(op->remap.prev);
2599 err_free_op:
2600 gpuva_op_free(gpuvm, op);
2601 err:
2602 return -ENOMEM;
2603 }
2604
2605 static const struct drm_gpuvm_ops gpuvm_list_ops = {
2606 .sm_step_map = drm_gpuva_sm_step,
2607 .sm_step_remap = drm_gpuva_sm_step,
2608 .sm_step_unmap = drm_gpuva_sm_step,
2609 };
2610
2611 /**
2612 * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
2613 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2614 * @req_addr: the start address of the new mapping
2615 * @req_range: the range of the new mapping
2616 * @req_obj: the &drm_gem_object to map
2617 * @req_offset: the offset within the &drm_gem_object
2618 *
2619 * This function creates a list of operations to perform splitting and merging
2620 * of existent mapping(s) with the newly requested one.
2621 *
2622 * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2623 * in the given order. It can contain map, unmap and remap operations, but it
2624 * also can be empty if no operation is required, e.g. if the requested mapping
2625 * already exists is the exact same way.
2626 *
2627 * There can be an arbitrary amount of unmap operations, a maximum of two remap
2628 * operations and a single map operation. The latter one represents the original
2629 * map operation requested by the caller.
2630 *
2631 * Note that before calling this function again with another mapping request it
2632 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2633 * previously obtained operations must be either processed or abandoned. To
2634 * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2635 * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2636 * used.
2637 *
2638 * After the caller finished processing the returned &drm_gpuva_ops, they must
2639 * be freed with &drm_gpuva_ops_free.
2640 *
2641 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2642 */
2643 struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm * gpuvm,u64 req_addr,u64 req_range,struct drm_gem_object * req_obj,u64 req_offset)2644 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
2645 u64 req_addr, u64 req_range,
2646 struct drm_gem_object *req_obj, u64 req_offset)
2647 {
2648 struct drm_gpuva_ops *ops;
2649 struct {
2650 struct drm_gpuvm *vm;
2651 struct drm_gpuva_ops *ops;
2652 } args;
2653 int ret;
2654
2655 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2656 if (unlikely(!ops))
2657 return ERR_PTR(-ENOMEM);
2658
2659 INIT_LIST_HEAD(&ops->list);
2660
2661 args.vm = gpuvm;
2662 args.ops = ops;
2663
2664 ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
2665 req_addr, req_range,
2666 req_obj, req_offset);
2667 if (ret)
2668 goto err_free_ops;
2669
2670 return ops;
2671
2672 err_free_ops:
2673 drm_gpuva_ops_free(gpuvm, ops);
2674 return ERR_PTR(ret);
2675 }
2676 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
2677
2678 /**
2679 * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
2680 * unmap
2681 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2682 * @req_addr: the start address of the range to unmap
2683 * @req_range: the range of the mappings to unmap
2684 *
2685 * This function creates a list of operations to perform unmapping and, if
2686 * required, splitting of the mappings overlapping the unmap range.
2687 *
2688 * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2689 * in the given order. It can contain unmap and remap operations, depending on
2690 * whether there are actual overlapping mappings to split.
2691 *
2692 * There can be an arbitrary amount of unmap operations and a maximum of two
2693 * remap operations.
2694 *
2695 * Note that before calling this function again with another range to unmap it
2696 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2697 * previously obtained operations must be processed or abandoned. To update the
2698 * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2699 * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2700 * used.
2701 *
2702 * After the caller finished processing the returned &drm_gpuva_ops, they must
2703 * be freed with &drm_gpuva_ops_free.
2704 *
2705 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2706 */
2707 struct drm_gpuva_ops *
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm * gpuvm,u64 req_addr,u64 req_range)2708 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
2709 u64 req_addr, u64 req_range)
2710 {
2711 struct drm_gpuva_ops *ops;
2712 struct {
2713 struct drm_gpuvm *vm;
2714 struct drm_gpuva_ops *ops;
2715 } args;
2716 int ret;
2717
2718 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2719 if (unlikely(!ops))
2720 return ERR_PTR(-ENOMEM);
2721
2722 INIT_LIST_HEAD(&ops->list);
2723
2724 args.vm = gpuvm;
2725 args.ops = ops;
2726
2727 ret = __drm_gpuvm_sm_unmap(gpuvm, &gpuvm_list_ops, &args,
2728 req_addr, req_range);
2729 if (ret)
2730 goto err_free_ops;
2731
2732 return ops;
2733
2734 err_free_ops:
2735 drm_gpuva_ops_free(gpuvm, ops);
2736 return ERR_PTR(ret);
2737 }
2738 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_ops_create);
2739
2740 /**
2741 * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
2742 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2743 * @addr: the start address of the range to prefetch
2744 * @range: the range of the mappings to prefetch
2745 *
2746 * This function creates a list of operations to perform prefetching.
2747 *
2748 * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2749 * in the given order. It can contain prefetch operations.
2750 *
2751 * There can be an arbitrary amount of prefetch operations.
2752 *
2753 * After the caller finished processing the returned &drm_gpuva_ops, they must
2754 * be freed with &drm_gpuva_ops_free.
2755 *
2756 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2757 */
2758 struct drm_gpuva_ops *
drm_gpuvm_prefetch_ops_create(struct drm_gpuvm * gpuvm,u64 addr,u64 range)2759 drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
2760 u64 addr, u64 range)
2761 {
2762 struct drm_gpuva_ops *ops;
2763 struct drm_gpuva_op *op;
2764 struct drm_gpuva *va;
2765 u64 end = addr + range;
2766 int ret;
2767
2768 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2769 if (!ops)
2770 return ERR_PTR(-ENOMEM);
2771
2772 INIT_LIST_HEAD(&ops->list);
2773
2774 drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
2775 op = gpuva_op_alloc(gpuvm);
2776 if (!op) {
2777 ret = -ENOMEM;
2778 goto err_free_ops;
2779 }
2780
2781 op->op = DRM_GPUVA_OP_PREFETCH;
2782 op->prefetch.va = va;
2783 list_add_tail(&op->entry, &ops->list);
2784 }
2785
2786 return ops;
2787
2788 err_free_ops:
2789 drm_gpuva_ops_free(gpuvm, ops);
2790 return ERR_PTR(ret);
2791 }
2792 EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
2793
2794 /**
2795 * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
2796 * @vm_bo: the &drm_gpuvm_bo abstraction
2797 *
2798 * This function creates a list of operations to perform unmapping for every
2799 * GPUVA attached to a GEM.
2800 *
2801 * The list can be iterated with &drm_gpuva_for_each_op and consists out of an
2802 * arbitrary amount of unmap operations.
2803 *
2804 * After the caller finished processing the returned &drm_gpuva_ops, they must
2805 * be freed with &drm_gpuva_ops_free.
2806 *
2807 * It is the callers responsibility to protect the GEMs GPUVA list against
2808 * concurrent access using the GEMs dma_resv lock.
2809 *
2810 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2811 */
2812 struct drm_gpuva_ops *
drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo * vm_bo)2813 drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
2814 {
2815 struct drm_gpuva_ops *ops;
2816 struct drm_gpuva_op *op;
2817 struct drm_gpuva *va;
2818 int ret;
2819
2820 drm_gem_gpuva_assert_lock_held(vm_bo->obj);
2821
2822 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2823 if (!ops)
2824 return ERR_PTR(-ENOMEM);
2825
2826 INIT_LIST_HEAD(&ops->list);
2827
2828 drm_gpuvm_bo_for_each_va(va, vm_bo) {
2829 op = gpuva_op_alloc(vm_bo->vm);
2830 if (!op) {
2831 ret = -ENOMEM;
2832 goto err_free_ops;
2833 }
2834
2835 op->op = DRM_GPUVA_OP_UNMAP;
2836 op->unmap.va = va;
2837 list_add_tail(&op->entry, &ops->list);
2838 }
2839
2840 return ops;
2841
2842 err_free_ops:
2843 drm_gpuva_ops_free(vm_bo->vm, ops);
2844 return ERR_PTR(ret);
2845 }
2846 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap_ops_create);
2847
2848 /**
2849 * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
2850 * @gpuvm: the &drm_gpuvm the ops were created for
2851 * @ops: the &drm_gpuva_ops to free
2852 *
2853 * Frees the given &drm_gpuva_ops structure including all the ops associated
2854 * with it.
2855 */
2856 void
drm_gpuva_ops_free(struct drm_gpuvm * gpuvm,struct drm_gpuva_ops * ops)2857 drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
2858 struct drm_gpuva_ops *ops)
2859 {
2860 struct drm_gpuva_op *op, *next;
2861
2862 drm_gpuva_for_each_op_safe(op, next, ops) {
2863 list_del(&op->entry);
2864
2865 if (op->op == DRM_GPUVA_OP_REMAP) {
2866 kfree(op->remap.prev);
2867 kfree(op->remap.next);
2868 kfree(op->remap.unmap);
2869 }
2870
2871 gpuva_op_free(gpuvm, op);
2872 }
2873
2874 kfree(ops);
2875 }
2876 EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
2877
2878 MODULE_DESCRIPTION("DRM GPUVM");
2879 MODULE_LICENSE("GPL");
2880