1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #include "pvr_vm.h"
5
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_mmu.h"
10 #include "pvr_rogue_fwif.h"
11 #include "pvr_rogue_heap_config.h"
12
13 #include <drm/drm_exec.h>
14 #include <drm/drm_gem.h>
15 #include <drm/drm_gpuvm.h>
16
17 #include <linux/bug.h>
18 #include <linux/container_of.h>
19 #include <linux/err.h>
20 #include <linux/errno.h>
21 #include <linux/gfp_types.h>
22 #include <linux/kref.h>
23 #include <linux/mutex.h>
24 #include <linux/stddef.h>
25
26 /**
27 * DOC: Memory context
28 *
29 * This is the "top level" datatype in the VM code. It's exposed in the public
30 * API as an opaque handle.
31 */
32
33 /**
34 * struct pvr_vm_context - Context type used to represent a single VM.
35 */
36 struct pvr_vm_context {
37 /**
38 * @pvr_dev: The PowerVR device to which this context is bound.
39 * This binding is immutable for the life of the context.
40 */
41 struct pvr_device *pvr_dev;
42
43 /** @mmu_ctx: The context for binding to physical memory. */
44 struct pvr_mmu_context *mmu_ctx;
45
46 /** @gpuvm_mgr: GPUVM object associated with this context. */
47 struct drm_gpuvm gpuvm_mgr;
48
49 /** @lock: Global lock on this VM. */
50 struct mutex lock;
51
52 /**
53 * @fw_mem_ctx_obj: Firmware object representing firmware memory
54 * context.
55 */
56 struct pvr_fw_object *fw_mem_ctx_obj;
57
58 /** @ref_count: Reference count of object. */
59 struct kref ref_count;
60
61 /**
62 * @dummy_gem: GEM object to enable VM reservation. All private BOs
63 * should use the @dummy_gem.resv and not their own _resv field.
64 */
65 struct drm_gem_object dummy_gem;
66 };
67
68 static inline
to_pvr_vm_context(struct drm_gpuvm * gpuvm)69 struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
70 {
71 return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
72 }
73
pvr_vm_context_get(struct pvr_vm_context * vm_ctx)74 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
75 {
76 if (vm_ctx)
77 kref_get(&vm_ctx->ref_count);
78
79 return vm_ctx;
80 }
81
82 /**
83 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
84 * page table structure behind a VM context.
85 * @vm_ctx: Target VM context.
86 */
pvr_vm_get_page_table_root_addr(struct pvr_vm_context * vm_ctx)87 dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
88 {
89 return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
90 }
91
92 /**
93 * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
94 * @vm_ctx: Target VM context.
95 *
96 * This is used to allow private BOs to share a dma_resv for faster fence
97 * updates.
98 *
99 * Returns: The dma_resv pointer.
100 */
pvr_vm_get_dma_resv(struct pvr_vm_context * vm_ctx)101 struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
102 {
103 return vm_ctx->dummy_gem.resv;
104 }
105
106 /**
107 * DOC: Memory mappings
108 */
109
110 /**
111 * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
112 */
113 struct pvr_vm_gpuva {
114 /** @base: The wrapped drm_gpuva object. */
115 struct drm_gpuva base;
116 };
117
118 #define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
119
120 enum pvr_vm_bind_type {
121 PVR_VM_BIND_TYPE_MAP,
122 PVR_VM_BIND_TYPE_UNMAP,
123 };
124
125 /**
126 * struct pvr_vm_bind_op - Context of a map/unmap operation.
127 */
128 struct pvr_vm_bind_op {
129 /** @type: Map or unmap. */
130 enum pvr_vm_bind_type type;
131
132 /** @pvr_obj: Object associated with mapping (map only). */
133 struct pvr_gem_object *pvr_obj;
134
135 /**
136 * @vm_ctx: VM context where the mapping will be created or destroyed.
137 */
138 struct pvr_vm_context *vm_ctx;
139
140 /** @mmu_op_ctx: MMU op context. */
141 struct pvr_mmu_op_context *mmu_op_ctx;
142
143 /** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
144 struct drm_gpuvm_bo *gpuvm_bo;
145
146 /**
147 * @new_va: Prealloced VA mapping object (init in callback).
148 * Used when creating a mapping.
149 */
150 struct pvr_vm_gpuva *new_va;
151
152 /**
153 * @prev_va: Prealloced VA mapping object (init in callback).
154 * Used when a mapping or unmapping operation overlaps an existing
155 * mapping and splits away the beginning into a new mapping.
156 */
157 struct pvr_vm_gpuva *prev_va;
158
159 /**
160 * @next_va: Prealloced VA mapping object (init in callback).
161 * Used when a mapping or unmapping operation overlaps an existing
162 * mapping and splits away the end into a new mapping.
163 */
164 struct pvr_vm_gpuva *next_va;
165
166 /** @offset: Offset into @pvr_obj to begin mapping from. */
167 u64 offset;
168
169 /** @device_addr: Device-virtual address at the start of the mapping. */
170 u64 device_addr;
171
172 /** @size: Size of the desired mapping. */
173 u64 size;
174 };
175
176 /**
177 * pvr_vm_bind_op_exec() - Execute a single bind op.
178 * @bind_op: Bind op context.
179 *
180 * Returns:
181 * * 0 on success,
182 * * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
183 * a callback function.
184 */
pvr_vm_bind_op_exec(struct pvr_vm_bind_op * bind_op)185 static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
186 {
187 switch (bind_op->type) {
188 case PVR_VM_BIND_TYPE_MAP: {
189 const struct drm_gpuvm_map_req map_req = {
190 .map.va.addr = bind_op->device_addr,
191 .map.va.range = bind_op->size,
192 .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
193 .map.gem.offset = bind_op->offset,
194 };
195
196 return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
197 bind_op, &map_req);
198 }
199
200 case PVR_VM_BIND_TYPE_UNMAP:
201 return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
202 bind_op, bind_op->device_addr,
203 bind_op->size);
204 }
205
206 /*
207 * This shouldn't happen unless something went wrong
208 * in drm_sched.
209 */
210 WARN_ON(1);
211 return -EINVAL;
212 }
213
pvr_vm_bind_op_fini(struct pvr_vm_bind_op * bind_op)214 static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
215 {
216 drm_gpuvm_bo_put(bind_op->gpuvm_bo);
217
218 kfree(bind_op->new_va);
219 kfree(bind_op->prev_va);
220 kfree(bind_op->next_va);
221
222 if (bind_op->pvr_obj)
223 pvr_gem_object_put(bind_op->pvr_obj);
224
225 if (bind_op->mmu_op_ctx)
226 pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
227 }
228
229 static int
pvr_vm_bind_op_map_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 offset,u64 device_addr,u64 size)230 pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
231 struct pvr_vm_context *vm_ctx,
232 struct pvr_gem_object *pvr_obj, u64 offset,
233 u64 device_addr, u64 size)
234 {
235 struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
236 const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
237 const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
238 struct sg_table *sgt;
239 u64 offset_plus_size;
240 int err;
241
242 if (check_add_overflow(offset, size, &offset_plus_size))
243 return -EINVAL;
244
245 if (is_user &&
246 !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
247 return -EINVAL;
248 }
249
250 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
251 offset & ~PAGE_MASK || size & ~PAGE_MASK ||
252 offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
253 return -EINVAL;
254
255 bind_op->type = PVR_VM_BIND_TYPE_MAP;
256
257 dma_resv_lock(obj->resv, NULL);
258 bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
259 dma_resv_unlock(obj->resv);
260 if (IS_ERR(bind_op->gpuvm_bo))
261 return PTR_ERR(bind_op->gpuvm_bo);
262
263 bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
264 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
265 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
266 if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
267 err = -ENOMEM;
268 goto err_bind_op_fini;
269 }
270
271 /* Pin pages so they're ready for use. */
272 sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
273 err = PTR_ERR_OR_ZERO(sgt);
274 if (err)
275 goto err_bind_op_fini;
276
277 bind_op->mmu_op_ctx =
278 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
279 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
280 if (err) {
281 bind_op->mmu_op_ctx = NULL;
282 goto err_bind_op_fini;
283 }
284
285 bind_op->pvr_obj = pvr_obj;
286 bind_op->vm_ctx = vm_ctx;
287 bind_op->device_addr = device_addr;
288 bind_op->size = size;
289 bind_op->offset = offset;
290
291 return 0;
292
293 err_bind_op_fini:
294 pvr_vm_bind_op_fini(bind_op);
295
296 return err;
297 }
298
299 static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 device_addr,u64 size)300 pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
301 struct pvr_vm_context *vm_ctx,
302 struct pvr_gem_object *pvr_obj,
303 u64 device_addr, u64 size)
304 {
305 int err;
306
307 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
308 return -EINVAL;
309
310 bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
311
312 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
313 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
314 if (!bind_op->prev_va || !bind_op->next_va) {
315 err = -ENOMEM;
316 goto err_bind_op_fini;
317 }
318
319 bind_op->mmu_op_ctx =
320 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
321 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
322 if (err) {
323 bind_op->mmu_op_ctx = NULL;
324 goto err_bind_op_fini;
325 }
326
327 bind_op->pvr_obj = pvr_obj;
328 bind_op->vm_ctx = vm_ctx;
329 bind_op->device_addr = device_addr;
330 bind_op->size = size;
331
332 return 0;
333
334 err_bind_op_fini:
335 pvr_vm_bind_op_fini(bind_op);
336
337 return err;
338 }
339
340 /**
341 * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
342 * @op: gpuva op containing the remap details.
343 * @op_ctx: Operation context.
344 *
345 * Context: Called by drm_gpuvm_sm_map following a successful mapping while
346 * @op_ctx.vm_ctx mutex is held.
347 *
348 * Return:
349 * * 0 on success, or
350 * * Any error returned by pvr_mmu_map().
351 */
352 static int
pvr_vm_gpuva_map(struct drm_gpuva_op * op,void * op_ctx)353 pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
354 {
355 struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
356 struct pvr_vm_bind_op *ctx = op_ctx;
357 int err;
358
359 if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
360 return -EINVAL;
361
362 err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
363 op->map.va.addr);
364 if (err)
365 return err;
366
367 drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
368 drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
369 ctx->new_va = NULL;
370
371 return 0;
372 }
373
374 /**
375 * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
376 * @op: gpuva op containing the unmap details.
377 * @op_ctx: Operation context.
378 *
379 * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
380 * @op_ctx.vm_ctx mutex is held.
381 *
382 * Return:
383 * * 0 on success, or
384 * * Any error returned by pvr_mmu_unmap().
385 */
386 static int
pvr_vm_gpuva_unmap(struct drm_gpuva_op * op,void * op_ctx)387 pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
388 {
389 struct pvr_vm_bind_op *ctx = op_ctx;
390
391 int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
392 op->unmap.va->va.range);
393
394 if (err)
395 return err;
396
397 drm_gpuva_unmap(&op->unmap);
398 drm_gpuva_unlink(op->unmap.va);
399 kfree(to_pvr_vm_gpuva(op->unmap.va));
400
401 return 0;
402 }
403
404 /**
405 * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
406 * @op: gpuva op containing the remap details.
407 * @op_ctx: Operation context.
408 *
409 * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
410 * mapping or unmapping operation causes a region to be split. The
411 * @op_ctx.vm_ctx mutex is held.
412 *
413 * Return:
414 * * 0 on success, or
415 * * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
416 */
417 static int
pvr_vm_gpuva_remap(struct drm_gpuva_op * op,void * op_ctx)418 pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
419 {
420 struct pvr_vm_bind_op *ctx = op_ctx;
421 u64 va_start = 0, va_range = 0;
422 int err;
423
424 drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
425 err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
426 if (err)
427 return err;
428
429 /* No actual remap required: the page table tree depth is fixed to 3,
430 * and we use 4k page table entries only for now.
431 */
432 drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
433
434 if (op->remap.prev) {
435 pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
436 drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
437 ctx->prev_va = NULL;
438 }
439
440 if (op->remap.next) {
441 pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
442 drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
443 ctx->next_va = NULL;
444 }
445
446 drm_gpuva_unlink(op->remap.unmap->va);
447 kfree(to_pvr_vm_gpuva(op->remap.unmap->va));
448
449 return 0;
450 }
451
452 /*
453 * Public API
454 *
455 * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
456 */
457
458 /**
459 * pvr_device_addr_is_valid() - Tests whether a device-virtual address
460 * is valid.
461 * @device_addr: Virtual device address to test.
462 *
463 * Return:
464 * * %true if @device_addr is within the valid range for a device page
465 * table and is aligned to the device page size, or
466 * * %false otherwise.
467 */
468 bool
pvr_device_addr_is_valid(u64 device_addr)469 pvr_device_addr_is_valid(u64 device_addr)
470 {
471 return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
472 (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
473 }
474
475 /**
476 * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
477 * address and associated size are both valid.
478 * @vm_ctx: Target VM context.
479 * @device_addr: Virtual device address to test.
480 * @size: Size of the range based at @device_addr to test.
481 *
482 * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
483 * @device_addr + @size) to verify a device-virtual address range initially
484 * seems intuitive, but it produces a false-negative when the address range
485 * is right at the end of device-virtual address space.
486 *
487 * This function catches that corner case, as well as checking that
488 * @size is non-zero.
489 *
490 * Return:
491 * * %true if @device_addr is device page aligned; @size is device page
492 * aligned; the range specified by @device_addr and @size is within the
493 * bounds of the device-virtual address space, and @size is non-zero, or
494 * * %false otherwise.
495 */
496 bool
pvr_device_addr_and_size_are_valid(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)497 pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
498 u64 device_addr, u64 size)
499 {
500 return pvr_device_addr_is_valid(device_addr) &&
501 drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
502 size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
503 (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
504 }
505
pvr_gpuvm_free(struct drm_gpuvm * gpuvm)506 static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
507 {
508 kfree(to_pvr_vm_context(gpuvm));
509 }
510
511 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
512 .vm_free = pvr_gpuvm_free,
513 .sm_step_map = pvr_vm_gpuva_map,
514 .sm_step_remap = pvr_vm_gpuva_remap,
515 .sm_step_unmap = pvr_vm_gpuva_unmap,
516 };
517
518 static void
fw_mem_context_init(void * cpu_ptr,void * priv)519 fw_mem_context_init(void *cpu_ptr, void *priv)
520 {
521 struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
522 struct pvr_vm_context *vm_ctx = priv;
523
524 fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
525 fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
526 }
527
528 /**
529 * pvr_vm_create_context() - Create a new VM context.
530 * @pvr_dev: Target PowerVR device.
531 * @is_userspace_context: %true if this context is for userspace. This will
532 * create a firmware memory context for the VM context
533 * and disable warnings when tearing down mappings.
534 *
535 * Return:
536 * * A handle to the newly-minted VM context on success,
537 * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
538 * missing or has an unsupported value,
539 * * -%ENOMEM if allocation of the structure behind the opaque handle fails,
540 * or
541 * * Any error encountered while setting up internal structures.
542 */
543 struct pvr_vm_context *
pvr_vm_create_context(struct pvr_device * pvr_dev,bool is_userspace_context)544 pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
545 {
546 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
547
548 struct pvr_vm_context *vm_ctx;
549 u16 device_addr_bits;
550
551 int err;
552
553 err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
554 &device_addr_bits);
555 if (err) {
556 drm_err(drm_dev,
557 "Failed to get device virtual address space bits\n");
558 return ERR_PTR(err);
559 }
560
561 if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
562 drm_err(drm_dev,
563 "Device has unsupported virtual address space size\n");
564 return ERR_PTR(-EINVAL);
565 }
566
567 vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
568 if (!vm_ctx)
569 return ERR_PTR(-ENOMEM);
570
571 vm_ctx->pvr_dev = pvr_dev;
572
573 vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
574 err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
575 if (err)
576 goto err_free;
577
578 if (is_userspace_context) {
579 err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
580 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
581 fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
582
583 if (err)
584 goto err_page_table_destroy;
585 }
586
587 drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
588 drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
589 is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
590 0, &pvr_dev->base, &vm_ctx->dummy_gem,
591 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
592
593 mutex_init(&vm_ctx->lock);
594 kref_init(&vm_ctx->ref_count);
595
596 return vm_ctx;
597
598 err_page_table_destroy:
599 pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
600
601 err_free:
602 kfree(vm_ctx);
603
604 return ERR_PTR(err);
605 }
606
607 /**
608 * pvr_vm_context_release() - Teardown a VM context.
609 * @ref_count: Pointer to reference counter of the VM context.
610 *
611 * This function also ensures that no mappings are left dangling by calling
612 * pvr_vm_unmap_all.
613 */
614 static void
pvr_vm_context_release(struct kref * ref_count)615 pvr_vm_context_release(struct kref *ref_count)
616 {
617 struct pvr_vm_context *vm_ctx =
618 container_of(ref_count, struct pvr_vm_context, ref_count);
619
620 if (vm_ctx->fw_mem_ctx_obj)
621 pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
622
623 pvr_vm_unmap_all(vm_ctx);
624
625 pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
626 drm_gem_private_object_fini(&vm_ctx->dummy_gem);
627 mutex_destroy(&vm_ctx->lock);
628
629 drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
630 }
631
632 /**
633 * pvr_vm_context_lookup() - Look up VM context from handle
634 * @pvr_file: Pointer to pvr_file structure.
635 * @handle: Object handle.
636 *
637 * Takes reference on VM context object. Call pvr_vm_context_put() to release.
638 *
639 * Returns:
640 * * The requested object on success, or
641 * * %NULL on failure (object does not exist in list, or is not a VM context)
642 */
643 struct pvr_vm_context *
pvr_vm_context_lookup(struct pvr_file * pvr_file,u32 handle)644 pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
645 {
646 struct pvr_vm_context *vm_ctx;
647
648 xa_lock(&pvr_file->vm_ctx_handles);
649 vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
650 pvr_vm_context_get(vm_ctx);
651 xa_unlock(&pvr_file->vm_ctx_handles);
652
653 return vm_ctx;
654 }
655
656 /**
657 * pvr_vm_context_put() - Release a reference on a VM context
658 * @vm_ctx: Target VM context.
659 *
660 * Returns:
661 * * %true if the VM context was destroyed, or
662 * * %false if there are any references still remaining.
663 */
664 bool
pvr_vm_context_put(struct pvr_vm_context * vm_ctx)665 pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
666 {
667 if (vm_ctx)
668 return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
669
670 return true;
671 }
672
673 /**
674 * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
675 * given file.
676 * @pvr_file: Pointer to pvr_file structure.
677 *
678 * Removes all vm_contexts associated with @pvr_file from the device VM context
679 * list and drops initial references. vm_contexts will then be destroyed once
680 * all outstanding references are dropped.
681 */
pvr_destroy_vm_contexts_for_file(struct pvr_file * pvr_file)682 void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
683 {
684 struct pvr_vm_context *vm_ctx;
685 unsigned long handle;
686
687 xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
688 /* vm_ctx is not used here because that would create a race with xa_erase */
689 pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
690 }
691 }
692
693 static int
pvr_vm_lock_extra(struct drm_gpuvm_exec * vm_exec)694 pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
695 {
696 struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
697 struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
698
699 /* Acquire lock on the GEM object being mapped/unmapped. */
700 return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
701 }
702
703 /**
704 * pvr_vm_map() - Map a section of physical memory into a section of
705 * device-virtual memory.
706 * @vm_ctx: Target VM context.
707 * @pvr_obj: Target PowerVR memory object.
708 * @pvr_obj_offset: Offset into @pvr_obj to map from.
709 * @device_addr: Virtual device address at the start of the requested mapping.
710 * @size: Size of the requested mapping.
711 *
712 * No handle is returned to represent the mapping. Instead, callers should
713 * remember @device_addr and use that as a handle.
714 *
715 * Return:
716 * * 0 on success,
717 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
718 * address; the region specified by @pvr_obj_offset and @size does not fall
719 * entirely within @pvr_obj, or any part of the specified region of @pvr_obj
720 * is not device-virtual page-aligned,
721 * * Any error encountered while performing internal operations required to
722 * destroy the mapping (returned from pvr_vm_gpuva_map or
723 * pvr_vm_gpuva_remap).
724 */
725 int
pvr_vm_map(struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 pvr_obj_offset,u64 device_addr,u64 size)726 pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
727 u64 pvr_obj_offset, u64 device_addr, u64 size)
728 {
729 struct pvr_vm_bind_op bind_op = {0};
730 struct drm_gpuvm_exec vm_exec = {
731 .vm = &vm_ctx->gpuvm_mgr,
732 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
733 DRM_EXEC_IGNORE_DUPLICATES,
734 .extra = {
735 .fn = pvr_vm_lock_extra,
736 .priv = &bind_op,
737 },
738 };
739
740 int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
741 pvr_obj_offset, device_addr,
742 size);
743
744 if (err)
745 return err;
746
747 pvr_gem_object_get(pvr_obj);
748
749 err = drm_gpuvm_exec_lock(&vm_exec);
750 if (err)
751 goto err_cleanup;
752
753 err = pvr_vm_bind_op_exec(&bind_op);
754
755 drm_gpuvm_exec_unlock(&vm_exec);
756
757 err_cleanup:
758 pvr_vm_bind_op_fini(&bind_op);
759
760 return err;
761 }
762
763 /**
764 * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
765 * memory.
766 * @vm_ctx: Target VM context.
767 * @pvr_obj: Target PowerVR memory object.
768 * @device_addr: Virtual device address at the start of the target mapping.
769 * @size: Size of the target mapping.
770 *
771 * Return:
772 * * 0 on success,
773 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
774 * address,
775 * * Any error encountered while performing internal operations required to
776 * destroy the mapping (returned from pvr_vm_gpuva_unmap or
777 * pvr_vm_gpuva_remap).
778 *
779 * The vm_ctx->lock must be held when calling this function.
780 */
781 static int
pvr_vm_unmap_obj_locked(struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 device_addr,u64 size)782 pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
783 struct pvr_gem_object *pvr_obj,
784 u64 device_addr, u64 size)
785 {
786 struct pvr_vm_bind_op bind_op = {0};
787 struct drm_gpuvm_exec vm_exec = {
788 .vm = &vm_ctx->gpuvm_mgr,
789 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
790 DRM_EXEC_IGNORE_DUPLICATES,
791 .extra = {
792 .fn = pvr_vm_lock_extra,
793 .priv = &bind_op,
794 },
795 };
796
797 int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
798 device_addr, size);
799 if (err)
800 return err;
801
802 pvr_gem_object_get(pvr_obj);
803
804 err = drm_gpuvm_exec_lock(&vm_exec);
805 if (err)
806 goto err_cleanup;
807
808 err = pvr_vm_bind_op_exec(&bind_op);
809
810 drm_gpuvm_exec_unlock(&vm_exec);
811
812 err_cleanup:
813 pvr_vm_bind_op_fini(&bind_op);
814
815 return err;
816 }
817
818 /**
819 * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
820 * memory.
821 * @vm_ctx: Target VM context.
822 * @pvr_obj: Target PowerVR memory object.
823 * @device_addr: Virtual device address at the start of the target mapping.
824 * @size: Size of the target mapping.
825 *
826 * Return:
827 * * 0 on success,
828 * * Any error encountered by pvr_vm_unmap_obj_locked.
829 */
830 int
pvr_vm_unmap_obj(struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 device_addr,u64 size)831 pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
832 u64 device_addr, u64 size)
833 {
834 int err;
835
836 mutex_lock(&vm_ctx->lock);
837 err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
838 mutex_unlock(&vm_ctx->lock);
839
840 return err;
841 }
842
843 /**
844 * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
845 * @vm_ctx: Target VM context.
846 * @device_addr: Virtual device address at the start of the target mapping.
847 * @size: Size of the target mapping.
848 *
849 * Return:
850 * * 0 on success,
851 * * Any error encountered by drm_gpuva_find,
852 * * Any error encountered by pvr_vm_unmap_obj_locked.
853 */
854 int
pvr_vm_unmap(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)855 pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
856 {
857 struct pvr_gem_object *pvr_obj;
858 struct drm_gpuva *va;
859 int err;
860
861 mutex_lock(&vm_ctx->lock);
862
863 va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
864 if (va) {
865 pvr_obj = gem_to_pvr_gem(va->gem.obj);
866 err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
867 va->va.addr, va->va.range);
868 } else {
869 err = -ENOENT;
870 }
871
872 mutex_unlock(&vm_ctx->lock);
873
874 return err;
875 }
876
877 /**
878 * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
879 * @vm_ctx: Target VM context.
880 *
881 * This function ensures that no mappings are left dangling by unmapping them
882 * all in order of ascending device-virtual address.
883 */
884 void
pvr_vm_unmap_all(struct pvr_vm_context * vm_ctx)885 pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
886 {
887 mutex_lock(&vm_ctx->lock);
888
889 for (;;) {
890 struct pvr_gem_object *pvr_obj;
891 struct drm_gpuva *va;
892
893 va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
894 vm_ctx->gpuvm_mgr.mm_start,
895 vm_ctx->gpuvm_mgr.mm_range);
896 if (!va)
897 break;
898
899 pvr_obj = gem_to_pvr_gem(va->gem.obj);
900
901 WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
902 va->va.addr, va->va.range));
903 }
904
905 mutex_unlock(&vm_ctx->lock);
906 }
907
908 /* Static data areas are determined by firmware. */
909 static const struct drm_pvr_static_data_area static_data_areas[] = {
910 {
911 .area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
912 .location_heap_id = DRM_PVR_HEAP_GENERAL,
913 .offset = 0,
914 .size = 128,
915 },
916 {
917 .area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
918 .location_heap_id = DRM_PVR_HEAP_GENERAL,
919 .offset = 128,
920 .size = 1024,
921 },
922 {
923 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
924 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
925 .offset = 0,
926 .size = 128,
927 },
928 {
929 .area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
930 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
931 .offset = 128,
932 .size = 128,
933 },
934 {
935 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
936 .location_heap_id = DRM_PVR_HEAP_USC_CODE,
937 .offset = 0,
938 .size = 128,
939 },
940 };
941
942 #define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
943
944 /*
945 * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
946 * static data area for each heap.
947 */
948 static const struct drm_pvr_heap pvr_heaps[] = {
949 [DRM_PVR_HEAP_GENERAL] = {
950 .base = ROGUE_GENERAL_HEAP_BASE,
951 .size = ROGUE_GENERAL_HEAP_SIZE,
952 .flags = 0,
953 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
954 },
955 [DRM_PVR_HEAP_PDS_CODE_DATA] = {
956 .base = ROGUE_PDSCODEDATA_HEAP_BASE,
957 .size = ROGUE_PDSCODEDATA_HEAP_SIZE,
958 .flags = 0,
959 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
960 },
961 [DRM_PVR_HEAP_USC_CODE] = {
962 .base = ROGUE_USCCODE_HEAP_BASE,
963 .size = ROGUE_USCCODE_HEAP_SIZE,
964 .flags = 0,
965 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
966 },
967 [DRM_PVR_HEAP_RGNHDR] = {
968 .base = ROGUE_RGNHDR_HEAP_BASE,
969 .size = ROGUE_RGNHDR_HEAP_SIZE,
970 .flags = 0,
971 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
972 },
973 [DRM_PVR_HEAP_VIS_TEST] = {
974 .base = ROGUE_VISTEST_HEAP_BASE,
975 .size = ROGUE_VISTEST_HEAP_SIZE,
976 .flags = 0,
977 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
978 },
979 [DRM_PVR_HEAP_TRANSFER_FRAG] = {
980 .base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
981 .size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
982 .flags = 0,
983 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
984 },
985 };
986
987 int
pvr_static_data_areas_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)988 pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
989 struct drm_pvr_ioctl_dev_query_args *args)
990 {
991 struct drm_pvr_dev_query_static_data_areas query = {0};
992 int err;
993
994 if (!args->pointer) {
995 args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
996 return 0;
997 }
998
999 err = PVR_UOBJ_GET(query, args->size, args->pointer);
1000 if (err < 0)
1001 return err;
1002
1003 if (!query.static_data_areas.array) {
1004 query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
1005 query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
1006 goto copy_out;
1007 }
1008
1009 if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
1010 query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
1011
1012 err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
1013 if (err < 0)
1014 return err;
1015
1016 copy_out:
1017 err = PVR_UOBJ_SET(args->pointer, args->size, query);
1018 if (err < 0)
1019 return err;
1020
1021 args->size = sizeof(query);
1022 return 0;
1023 }
1024
1025 int
pvr_heap_info_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)1026 pvr_heap_info_get(const struct pvr_device *pvr_dev,
1027 struct drm_pvr_ioctl_dev_query_args *args)
1028 {
1029 struct drm_pvr_dev_query_heap_info query = {0};
1030 u64 dest;
1031 int err;
1032
1033 if (!args->pointer) {
1034 args->size = sizeof(struct drm_pvr_dev_query_heap_info);
1035 return 0;
1036 }
1037
1038 err = PVR_UOBJ_GET(query, args->size, args->pointer);
1039 if (err < 0)
1040 return err;
1041
1042 if (!query.heaps.array) {
1043 query.heaps.count = ARRAY_SIZE(pvr_heaps);
1044 query.heaps.stride = sizeof(struct drm_pvr_heap);
1045 goto copy_out;
1046 }
1047
1048 if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
1049 query.heaps.count = ARRAY_SIZE(pvr_heaps);
1050
1051 /* Region header heap is only present if BRN63142 is present. */
1052 dest = query.heaps.array;
1053 for (size_t i = 0; i < query.heaps.count; i++) {
1054 struct drm_pvr_heap heap = pvr_heaps[i];
1055
1056 if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
1057 heap.size = 0;
1058
1059 err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
1060 if (err < 0)
1061 return err;
1062
1063 dest += query.heaps.stride;
1064 }
1065
1066 copy_out:
1067 err = PVR_UOBJ_SET(args->pointer, args->size, query);
1068 if (err < 0)
1069 return err;
1070
1071 args->size = sizeof(query);
1072 return 0;
1073 }
1074
1075 /**
1076 * pvr_heap_contains_range() - Determine if a given heap contains the specified
1077 * device-virtual address range.
1078 * @pvr_heap: Target heap.
1079 * @start: Inclusive start of the target range.
1080 * @end: Inclusive end of the target range.
1081 *
1082 * It is an error to call this function with values of @start and @end that do
1083 * not satisfy the condition @start <= @end.
1084 */
1085 static __always_inline bool
pvr_heap_contains_range(const struct drm_pvr_heap * pvr_heap,u64 start,u64 end)1086 pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
1087 {
1088 return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
1089 }
1090
1091 /**
1092 * pvr_find_heap_containing() - Find a heap which contains the specified
1093 * device-virtual address range.
1094 * @pvr_dev: Target PowerVR device.
1095 * @start: Start of the target range.
1096 * @size: Size of the target range.
1097 *
1098 * Return:
1099 * * A pointer to a constant instance of struct drm_pvr_heap representing the
1100 * heap containing the entire range specified by @start and @size on
1101 * success, or
1102 * * %NULL if no such heap exists.
1103 */
1104 const struct drm_pvr_heap *
pvr_find_heap_containing(struct pvr_device * pvr_dev,u64 start,u64 size)1105 pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
1106 {
1107 u64 end;
1108
1109 if (check_add_overflow(start, size - 1, &end))
1110 return NULL;
1111
1112 /*
1113 * There are no guarantees about the order of address ranges in
1114 * &pvr_heaps, so iterate over the entire array for a heap whose
1115 * range completely encompasses the given range.
1116 */
1117 for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
1118 /* Filter heaps that present only with an associated quirk */
1119 if (heap_id == DRM_PVR_HEAP_RGNHDR &&
1120 !PVR_HAS_QUIRK(pvr_dev, 63142)) {
1121 continue;
1122 }
1123
1124 if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
1125 return &pvr_heaps[heap_id];
1126 }
1127
1128 return NULL;
1129 }
1130
1131 /**
1132 * pvr_vm_find_gem_object() - Look up a buffer object from a given
1133 * device-virtual address.
1134 * @vm_ctx: [IN] Target VM context.
1135 * @device_addr: [IN] Virtual device address at the start of the required
1136 * object.
1137 * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
1138 * of the mapped region within the buffer object. May be
1139 * %NULL if this information is not required.
1140 * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
1141 * region. May be %NULL if this information is not required.
1142 *
1143 * If successful, a reference will be taken on the buffer object. The caller
1144 * must drop the reference with pvr_gem_object_put().
1145 *
1146 * Return:
1147 * * The PowerVR buffer object mapped at @device_addr if one exists, or
1148 * * %NULL otherwise.
1149 */
1150 struct pvr_gem_object *
pvr_vm_find_gem_object(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 * mapped_offset_out,u64 * mapped_size_out)1151 pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
1152 u64 *mapped_offset_out, u64 *mapped_size_out)
1153 {
1154 struct pvr_gem_object *pvr_obj;
1155 struct drm_gpuva *va;
1156
1157 mutex_lock(&vm_ctx->lock);
1158
1159 va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
1160 if (!va)
1161 goto err_unlock;
1162
1163 pvr_obj = gem_to_pvr_gem(va->gem.obj);
1164 pvr_gem_object_get(pvr_obj);
1165
1166 if (mapped_offset_out)
1167 *mapped_offset_out = va->gem.offset;
1168 if (mapped_size_out)
1169 *mapped_size_out = va->va.range;
1170
1171 mutex_unlock(&vm_ctx->lock);
1172
1173 return pvr_obj;
1174
1175 err_unlock:
1176 mutex_unlock(&vm_ctx->lock);
1177
1178 return NULL;
1179 }
1180
1181 /**
1182 * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
1183 * @vm_ctx: Target VM context.
1184 *
1185 * Returns:
1186 * * FW object representing firmware memory context, or
1187 * * %NULL if this VM context does not have a firmware memory context.
1188 */
1189 struct pvr_fw_object *
pvr_vm_get_fw_mem_context(struct pvr_vm_context * vm_ctx)1190 pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
1191 {
1192 return vm_ctx->fw_mem_ctx_obj;
1193 }
1194