Lines Matching +full:iommu +full:- +full:ctx

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <linux/dma-buf.h>
30 …"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d import… in ivpu_dbg_bo()
31 action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0, in ivpu_dbg_bo()
32 (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc, in ivpu_dbg_bo()
33 (bool)bo->base.base.import_attach); in ivpu_dbg_bo()
37 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
40 * to IOMMU address space and finally updates the VPU MMU page tables
41 * to allow the VPU to translate VPU address to IOMMU address.
48 mutex_lock(&bo->lock); in ivpu_bo_pin()
51 drm_WARN_ON(&vdev->drm, !bo->ctx); in ivpu_bo_pin()
53 if (!bo->mmu_mapped) { in ivpu_bo_pin()
54 struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base); in ivpu_bo_pin()
58 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); in ivpu_bo_pin()
62 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt, in ivpu_bo_pin()
68 bo->mmu_mapped = true; in ivpu_bo_pin()
72 mutex_unlock(&bo->lock); in ivpu_bo_pin()
78 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, in ivpu_bo_alloc_vpu_addr() argument
84 if (!drm_dev_enter(&vdev->drm, &idx)) in ivpu_bo_alloc_vpu_addr()
85 return -ENODEV; in ivpu_bo_alloc_vpu_addr()
87 mutex_lock(&bo->lock); in ivpu_bo_alloc_vpu_addr()
89 ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node); in ivpu_bo_alloc_vpu_addr()
91 bo->ctx = ctx; in ivpu_bo_alloc_vpu_addr()
92 bo->vpu_addr = bo->mm_node.start; in ivpu_bo_alloc_vpu_addr()
94 ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); in ivpu_bo_alloc_vpu_addr()
99 mutex_unlock(&bo->lock); in ivpu_bo_alloc_vpu_addr()
110 lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount)); in ivpu_bo_unbind_locked()
112 if (bo->mmu_mapped) { in ivpu_bo_unbind_locked()
113 drm_WARN_ON(&vdev->drm, !bo->ctx); in ivpu_bo_unbind_locked()
114 drm_WARN_ON(&vdev->drm, !bo->vpu_addr); in ivpu_bo_unbind_locked()
115 drm_WARN_ON(&vdev->drm, !bo->base.sgt); in ivpu_bo_unbind_locked()
116 ivpu_mmu_context_unmap_sgt(vdev, bo->ctx, bo->vpu_addr, bo->base.sgt); in ivpu_bo_unbind_locked()
117 bo->mmu_mapped = false; in ivpu_bo_unbind_locked()
120 if (bo->ctx) { in ivpu_bo_unbind_locked()
121 ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node); in ivpu_bo_unbind_locked()
122 bo->ctx = NULL; in ivpu_bo_unbind_locked()
125 if (bo->base.base.import_attach) in ivpu_bo_unbind_locked()
128 dma_resv_lock(bo->base.base.resv, NULL); in ivpu_bo_unbind_locked()
129 if (bo->base.sgt) { in ivpu_bo_unbind_locked()
130 dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); in ivpu_bo_unbind_locked()
131 sg_free_table(bo->base.sgt); in ivpu_bo_unbind_locked()
132 kfree(bo->base.sgt); in ivpu_bo_unbind_locked()
133 bo->base.sgt = NULL; in ivpu_bo_unbind_locked()
135 dma_resv_unlock(bo->base.base.resv); in ivpu_bo_unbind_locked()
138 void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) in ivpu_bo_unbind_all_bos_from_context() argument
142 if (drm_WARN_ON(&vdev->drm, !ctx)) in ivpu_bo_unbind_all_bos_from_context()
145 mutex_lock(&vdev->bo_list_lock); in ivpu_bo_unbind_all_bos_from_context()
146 list_for_each_entry(bo, &vdev->bo_list, bo_list_node) { in ivpu_bo_unbind_all_bos_from_context()
147 mutex_lock(&bo->lock); in ivpu_bo_unbind_all_bos_from_context()
148 if (bo->ctx == ctx) { in ivpu_bo_unbind_all_bos_from_context()
152 mutex_unlock(&bo->lock); in ivpu_bo_unbind_all_bos_from_context()
154 mutex_unlock(&vdev->bo_list_lock); in ivpu_bo_unbind_all_bos_from_context()
162 return ERR_PTR(-EINVAL); in ivpu_gem_create_object()
166 return ERR_PTR(-ENOMEM); in ivpu_gem_create_object()
168 bo->base.base.funcs = &ivpu_gem_funcs; in ivpu_gem_create_object()
169 bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */ in ivpu_gem_create_object()
171 INIT_LIST_HEAD(&bo->bo_list_node); in ivpu_gem_create_object()
172 mutex_init(&bo->lock); in ivpu_gem_create_object()
174 return &bo->base.base; in ivpu_gem_create_object()
180 struct device *attach_dev = dev->dev; in ivpu_gem_prime_import()
204 obj->import_attach = attach; in ivpu_gem_prime_import()
205 obj->resv = dma_buf->resv; in ivpu_gem_prime_import()
228 return ERR_PTR(-EINVAL); in ivpu_bo_alloc()
231 shmem = drm_gem_shmem_create(&vdev->drm, size); in ivpu_bo_alloc()
235 bo = to_ivpu_bo(&shmem->base); in ivpu_bo_alloc()
236 bo->base.map_wc = flags & DRM_IVPU_BO_WC; in ivpu_bo_alloc()
237 bo->flags = flags; in ivpu_bo_alloc()
239 mutex_lock(&vdev->bo_list_lock); in ivpu_bo_alloc()
240 list_add_tail(&bo->bo_list_node, &vdev->bo_list); in ivpu_bo_alloc()
241 mutex_unlock(&vdev->bo_list_lock); in ivpu_bo_alloc()
248 struct ivpu_file_priv *file_priv = file->driver_priv; in ivpu_gem_bo_open()
249 struct ivpu_device *vdev = file_priv->vdev; in ivpu_gem_bo_open()
253 if (bo->ctx) { in ivpu_gem_bo_open()
254 ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n", in ivpu_gem_bo_open()
255 file_priv->ctx.id, bo->ctx->id); in ivpu_gem_bo_open()
256 return -EALREADY; in ivpu_gem_bo_open()
259 if (bo->flags & DRM_IVPU_BO_SHAVE_MEM) in ivpu_gem_bo_open()
260 range = &vdev->hw->ranges.shave; in ivpu_gem_bo_open()
261 else if (bo->flags & DRM_IVPU_BO_DMA_MEM) in ivpu_gem_bo_open()
262 range = &vdev->hw->ranges.dma; in ivpu_gem_bo_open()
264 range = &vdev->hw->ranges.user; in ivpu_gem_bo_open()
266 return ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, range); in ivpu_gem_bo_open()
271 struct ivpu_device *vdev = to_ivpu_device(obj->dev); in ivpu_gem_bo_free()
276 mutex_lock(&vdev->bo_list_lock); in ivpu_gem_bo_free()
277 list_del(&bo->bo_list_node); in ivpu_gem_bo_free()
278 mutex_unlock(&vdev->bo_list_lock); in ivpu_gem_bo_free()
280 drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); in ivpu_gem_bo_free()
283 mutex_destroy(&bo->lock); in ivpu_gem_bo_free()
285 drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1); in ivpu_gem_bo_free()
286 drm_gem_shmem_free(&bo->base); in ivpu_gem_bo_free()
304 struct ivpu_file_priv *file_priv = file->driver_priv; in ivpu_bo_create_ioctl()
305 struct ivpu_device *vdev = file_priv->vdev; in ivpu_bo_create_ioctl()
307 u64 size = PAGE_ALIGN(args->size); in ivpu_bo_create_ioctl()
311 if (args->flags & ~DRM_IVPU_BO_FLAGS) in ivpu_bo_create_ioctl()
312 return -EINVAL; in ivpu_bo_create_ioctl()
315 return -EINVAL; in ivpu_bo_create_ioctl()
317 bo = ivpu_bo_alloc(vdev, size, args->flags); in ivpu_bo_create_ioctl()
319 ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)", in ivpu_bo_create_ioctl()
320 bo, file_priv->ctx.id, args->size, args->flags); in ivpu_bo_create_ioctl()
324 ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); in ivpu_bo_create_ioctl()
326 args->vpu_addr = bo->vpu_addr; in ivpu_bo_create_ioctl()
328 drm_gem_object_put(&bo->base.base); in ivpu_bo_create_ioctl()
334 ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, in ivpu_bo_create() argument
341 if (drm_WARN_ON(&vdev->drm, !range)) in ivpu_bo_create()
344 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->start)); in ivpu_bo_create()
345 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end)); in ivpu_bo_create()
346 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); in ivpu_bo_create()
351 bo, range->start, size, flags); in ivpu_bo_create()
355 ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range); in ivpu_bo_create()
364 dma_resv_lock(bo->base.base.resv, NULL); in ivpu_bo_create()
365 ret = drm_gem_shmem_vmap(&bo->base, &map); in ivpu_bo_create()
366 dma_resv_unlock(bo->base.base.resv); in ivpu_bo_create()
375 drm_gem_object_put(&bo->base.base); in ivpu_bo_create()
381 return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags); in ivpu_bo_create_global()
386 struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr); in ivpu_bo_free()
388 if (bo->flags & DRM_IVPU_BO_MAPPABLE) { in ivpu_bo_free()
389 dma_resv_lock(bo->base.base.resv, NULL); in ivpu_bo_free()
390 drm_gem_shmem_vunmap(&bo->base, &map); in ivpu_bo_free()
391 dma_resv_unlock(bo->base.base.resv); in ivpu_bo_free()
394 drm_gem_object_put(&bo->base.base); in ivpu_bo_free()
404 obj = drm_gem_object_lookup(file, args->handle); in ivpu_bo_info_ioctl()
406 return -ENOENT; in ivpu_bo_info_ioctl()
410 mutex_lock(&bo->lock); in ivpu_bo_info_ioctl()
411 args->flags = bo->flags; in ivpu_bo_info_ioctl()
412 args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node); in ivpu_bo_info_ioctl()
413 args->vpu_addr = bo->vpu_addr; in ivpu_bo_info_ioctl()
414 args->size = obj->size; in ivpu_bo_info_ioctl()
415 mutex_unlock(&bo->lock); in ivpu_bo_info_ioctl()
428 timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); in ivpu_bo_wait_ioctl()
433 obj = drm_gem_object_lookup(file, args->handle); in ivpu_bo_wait_ioctl()
435 return -EINVAL; in ivpu_bo_wait_ioctl()
437 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout); in ivpu_bo_wait_ioctl()
439 ret = -ETIMEDOUT; in ivpu_bo_wait_ioctl()
442 args->job_status = to_ivpu_bo(obj)->job_status; in ivpu_bo_wait_ioctl()
452 mutex_lock(&bo->lock); in ivpu_bo_print_info()
454 drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u", in ivpu_bo_print_info()
455 bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size, in ivpu_bo_print_info()
456 bo->flags, kref_read(&bo->base.base.refcount)); in ivpu_bo_print_info()
458 if (bo->base.pages) in ivpu_bo_print_info()
461 if (bo->mmu_mapped) in ivpu_bo_print_info()
464 if (bo->base.base.import_attach) in ivpu_bo_print_info()
469 mutex_unlock(&bo->lock); in ivpu_bo_print_info()
477 drm_printf(p, "%-9s %-3s %-14s %-10s %-10s %-4s %s\n", in ivpu_bo_list()
478 "bo", "ctx", "vpu_addr", "size", "flags", "refs", "attribs"); in ivpu_bo_list()
480 mutex_lock(&vdev->bo_list_lock); in ivpu_bo_list()
481 list_for_each_entry(bo, &vdev->bo_list, bo_list_node) in ivpu_bo_list()
483 mutex_unlock(&vdev->bo_list_lock); in ivpu_bo_list()
488 struct drm_printer p = drm_info_printer(dev->dev); in ivpu_bo_list_print()