Lines Matching +full:iommu +full:- +full:ctx
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
8 #include <linux/dma-mapping.h>
9 #include <linux/fault-inject.h>
28 * - 1.0.0 - initial interface
29 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
30 * - 1.2.0 - adds explicit fence support for submit ioctl
31 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
34 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
36 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
37 * - 1.6.0 - Syncobj support
38 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
39 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
40 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
41 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
42 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
43 * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
52 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
70 struct drm_device *ddev = priv->dev; in msm_drm_uninit()
76 * msm_drm_init, drm_dev->registered is used as an indicator that the in msm_drm_uninit()
79 if (ddev->registered) { in msm_drm_uninit()
81 if (priv->kms) in msm_drm_uninit()
86 * work before msm_irq_uninstall() to avoid work re-enabling an in msm_drm_uninit()
90 flush_workqueue(priv->wq); in msm_drm_uninit()
97 if (priv->kms) in msm_drm_uninit()
104 ddev->dev_private = NULL; in msm_drm_uninit()
107 destroy_workqueue(priv->wq); in msm_drm_uninit()
114 struct msm_drm_private *priv = dev->dev_private; in msm_use_mmu()
118 * On other platforms IOMMU can be declared specified either for the in msm_use_mmu()
121 return priv->is_a2xx || in msm_use_mmu()
122 device_iommu_mapped(dev->dev) || in msm_use_mmu()
123 device_iommu_mapped(dev->dev->parent); in msm_use_mmu()
128 struct msm_drm_private *priv = dev->dev_private; in msm_init_vram()
133 /* In the device-tree world, we could have a 'memory-region' in msm_init_vram()
138 * 1) device with no IOMMU, in which case we need exclusive in msm_init_vram()
141 * 2) device with IOMMU, but where the bootloader puts up in msm_init_vram()
150 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); in msm_init_vram()
157 size = r.end - r.start + 1; in msm_init_vram()
160 /* if we have no IOMMU, then we need to use carveout allocator. in msm_init_vram()
162 * mach-msm: in msm_init_vram()
173 priv->vram.size = size; in msm_init_vram()
175 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); in msm_init_vram()
176 spin_lock_init(&priv->vram.lock); in msm_init_vram()
181 /* note that for no-kernel-mapping, the vaddr returned in msm_init_vram()
182 * is bogus, but non-null if allocation succeeded: in msm_init_vram()
184 p = dma_alloc_attrs(dev->dev, size, in msm_init_vram()
185 &priv->vram.paddr, GFP_KERNEL, attrs); in msm_init_vram()
187 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n"); in msm_init_vram()
188 priv->vram.paddr = 0; in msm_init_vram()
189 return -ENOMEM; in msm_init_vram()
192 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n", in msm_init_vram()
193 (uint32_t)priv->vram.paddr, in msm_init_vram()
194 (uint32_t)(priv->vram.paddr + size)); in msm_init_vram()
202 struct msm_drm_private *priv = ddev->dev_private; in msm_deinit_vram()
205 if (!priv->vram.paddr) in msm_deinit_vram()
208 drm_mm_takedown(&priv->vram.mm); in msm_deinit_vram()
209 dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr, in msm_deinit_vram()
220 return -ENODEV; in msm_drm_init()
227 ddev->dev_private = priv; in msm_drm_init()
228 priv->dev = ddev; in msm_drm_init()
230 priv->wq = alloc_ordered_workqueue("msm", 0); in msm_drm_init()
231 if (!priv->wq) { in msm_drm_init()
232 ret = -ENOMEM; in msm_drm_init()
236 INIT_LIST_HEAD(&priv->objects); in msm_drm_init()
237 mutex_init(&priv->obj_lock); in msm_drm_init()
242 mutex_init(&priv->lru.lock); in msm_drm_init()
243 drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock); in msm_drm_init()
244 drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock); in msm_drm_init()
245 drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock); in msm_drm_init()
246 drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock); in msm_drm_init()
250 might_lock(&priv->lru.lock); in msm_drm_init()
253 if (priv->kms_init) { in msm_drm_init()
265 /* Bind all our sub-components: */ in msm_drm_init()
274 if (priv->kms_init) { in msm_drm_init()
280 WARN_ON(dev->of_node); in msm_drm_init()
281 ddev->driver_features &= ~DRIVER_MODESET; in msm_drm_init()
282 ddev->driver_features &= ~DRIVER_ATOMIC; in msm_drm_init()
293 if (priv->kms_init) { in msm_drm_init()
308 destroy_workqueue(priv->wq); in msm_drm_init()
322 struct msm_drm_private *priv = dev->dev_private; in load_gpu()
326 if (!priv->gpu) in load_gpu()
327 priv->gpu = adreno_load_gpu(dev); in load_gpu()
335 struct msm_drm_private *priv = dev->dev_private; in context_init()
336 struct msm_file_private *ctx; in context_init() local
338 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); in context_init()
339 if (!ctx) in context_init()
340 return -ENOMEM; in context_init()
342 INIT_LIST_HEAD(&ctx->submitqueues); in context_init()
343 rwlock_init(&ctx->queuelock); in context_init()
345 kref_init(&ctx->ref); in context_init()
346 msm_submitqueue_init(dev, ctx); in context_init()
348 ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current); in context_init()
349 file->driver_priv = ctx; in context_init()
351 ctx->seqno = atomic_inc_return(&ident); in context_init()
366 static void context_close(struct msm_file_private *ctx) in context_close() argument
368 msm_submitqueue_close(ctx); in context_close()
369 msm_file_private_put(ctx); in context_close()
374 struct msm_drm_private *priv = dev->dev_private; in msm_postclose()
375 struct msm_file_private *ctx = file->driver_priv; in msm_postclose() local
378 * It is not possible to set sysprof param to non-zero if gpu in msm_postclose()
381 if (priv->gpu) in msm_postclose()
382 msm_file_private_set_sysprof(ctx, priv->gpu, 0); in msm_postclose()
384 context_close(ctx); in msm_postclose()
394 struct msm_drm_private *priv = dev->dev_private; in msm_ioctl_get_param()
401 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0)) in msm_ioctl_get_param()
402 return -EINVAL; in msm_ioctl_get_param()
404 gpu = priv->gpu; in msm_ioctl_get_param()
407 return -ENXIO; in msm_ioctl_get_param()
409 return gpu->funcs->get_param(gpu, file->driver_priv, in msm_ioctl_get_param()
410 args->param, &args->value, &args->len); in msm_ioctl_get_param()
416 struct msm_drm_private *priv = dev->dev_private; in msm_ioctl_set_param()
420 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0)) in msm_ioctl_set_param()
421 return -EINVAL; in msm_ioctl_set_param()
423 gpu = priv->gpu; in msm_ioctl_set_param()
426 return -ENXIO; in msm_ioctl_set_param()
428 return gpu->funcs->set_param(gpu, file->driver_priv, in msm_ioctl_set_param()
429 args->param, args->value, args->len); in msm_ioctl_set_param()
436 uint32_t flags = args->flags; in msm_ioctl_gem_new()
438 if (args->flags & ~MSM_BO_FLAGS) { in msm_ioctl_gem_new()
439 DRM_ERROR("invalid flags: %08x\n", args->flags); in msm_ioctl_gem_new()
440 return -EINVAL; in msm_ioctl_gem_new()
455 if (should_fail(&fail_gem_alloc, args->size)) in msm_ioctl_gem_new()
456 return -ENOMEM; in msm_ioctl_gem_new()
458 return msm_gem_new_handle(dev, file, args->size, in msm_ioctl_gem_new()
459 args->flags, &args->handle, NULL); in msm_ioctl_gem_new()
472 ktime_t timeout = to_ktime(args->timeout); in msm_ioctl_gem_cpu_prep()
475 if (args->op & ~MSM_PREP_FLAGS) { in msm_ioctl_gem_cpu_prep()
476 DRM_ERROR("invalid op: %08x\n", args->op); in msm_ioctl_gem_cpu_prep()
477 return -EINVAL; in msm_ioctl_gem_cpu_prep()
480 obj = drm_gem_object_lookup(file, args->handle); in msm_ioctl_gem_cpu_prep()
482 return -ENOENT; in msm_ioctl_gem_cpu_prep()
484 ret = msm_gem_cpu_prep(obj, args->op, &timeout); in msm_ioctl_gem_cpu_prep()
498 obj = drm_gem_object_lookup(file, args->handle); in msm_ioctl_gem_cpu_fini()
500 return -ENOENT; in msm_ioctl_gem_cpu_fini()
513 struct msm_drm_private *priv = dev->dev_private; in msm_ioctl_gem_info_iova()
514 struct msm_file_private *ctx = file->driver_priv; in msm_ioctl_gem_info_iova() local
516 if (!priv->gpu) in msm_ioctl_gem_info_iova()
517 return -EINVAL; in msm_ioctl_gem_info_iova()
519 if (should_fail(&fail_gem_iova, obj->size)) in msm_ioctl_gem_info_iova()
520 return -ENOMEM; in msm_ioctl_gem_info_iova()
523 * Don't pin the memory here - just get an address so that userspace can in msm_ioctl_gem_info_iova()
526 return msm_gem_get_iova(obj, ctx->aspace, iova); in msm_ioctl_gem_info_iova()
533 struct msm_drm_private *priv = dev->dev_private; in msm_ioctl_gem_info_set_iova()
534 struct msm_file_private *ctx = file->driver_priv; in msm_ioctl_gem_info_set_iova() local
536 if (!priv->gpu) in msm_ioctl_gem_info_set_iova()
537 return -EINVAL; in msm_ioctl_gem_info_set_iova()
539 /* Only supported if per-process address space is supported: */ in msm_ioctl_gem_info_set_iova()
540 if (priv->gpu->aspace == ctx->aspace) in msm_ioctl_gem_info_set_iova()
541 return UERR(EOPNOTSUPP, dev, "requires per-process pgtables"); in msm_ioctl_gem_info_set_iova()
543 if (should_fail(&fail_gem_iova, obj->size)) in msm_ioctl_gem_info_set_iova()
544 return -ENOMEM; in msm_ioctl_gem_info_set_iova()
546 return msm_gem_set_iova(obj, ctx->aspace, iova); in msm_ioctl_gem_info_set_iova()
559 return -EOVERFLOW; in msm_ioctl_gem_info_set_metadata()
571 msm_obj->metadata = in msm_ioctl_gem_info_set_metadata()
572 krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL); in msm_ioctl_gem_info_set_metadata()
573 msm_obj->metadata_size = metadata_size; in msm_ioctl_gem_info_set_metadata()
574 memcpy(msm_obj->metadata, buf, metadata_size); in msm_ioctl_gem_info_set_metadata()
601 *metadata_size = msm_obj->metadata_size; in msm_ioctl_gem_info_get_metadata()
610 len = msm_obj->metadata_size; in msm_ioctl_gem_info_get_metadata()
611 buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL); in msm_ioctl_gem_info_get_metadata()
616 ret = -ETOOSMALL; in msm_ioctl_gem_info_get_metadata()
618 ret = -EFAULT; in msm_ioctl_gem_info_get_metadata()
636 if (args->pad) in msm_ioctl_gem_info()
637 return -EINVAL; in msm_ioctl_gem_info()
639 switch (args->info) { in msm_ioctl_gem_info()
645 if (args->len) in msm_ioctl_gem_info()
646 return -EINVAL; in msm_ioctl_gem_info()
654 return -EINVAL; in msm_ioctl_gem_info()
657 obj = drm_gem_object_lookup(file, args->handle); in msm_ioctl_gem_info()
659 return -ENOENT; in msm_ioctl_gem_info()
663 switch (args->info) { in msm_ioctl_gem_info()
665 args->value = msm_gem_mmap_offset(obj); in msm_ioctl_gem_info()
668 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value); in msm_ioctl_gem_info()
671 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value); in msm_ioctl_gem_info()
674 if (obj->import_attach) { in msm_ioctl_gem_info()
675 ret = -EINVAL; in msm_ioctl_gem_info()
678 /* Hide internal kernel-only flags: */ in msm_ioctl_gem_info()
679 args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS; in msm_ioctl_gem_info()
684 if (args->len >= sizeof(msm_obj->name)) { in msm_ioctl_gem_info()
685 ret = -EINVAL; in msm_ioctl_gem_info()
688 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value), in msm_ioctl_gem_info()
689 args->len)) { in msm_ioctl_gem_info()
690 msm_obj->name[0] = '\0'; in msm_ioctl_gem_info()
691 ret = -EFAULT; in msm_ioctl_gem_info()
694 msm_obj->name[args->len] = '\0'; in msm_ioctl_gem_info()
695 for (i = 0; i < args->len; i++) { in msm_ioctl_gem_info()
696 if (!isprint(msm_obj->name[i])) { in msm_ioctl_gem_info()
697 msm_obj->name[i] = '\0'; in msm_ioctl_gem_info()
703 if (args->value && (args->len < strlen(msm_obj->name))) { in msm_ioctl_gem_info()
704 ret = -ETOOSMALL; in msm_ioctl_gem_info()
707 args->len = strlen(msm_obj->name); in msm_ioctl_gem_info()
708 if (args->value) { in msm_ioctl_gem_info()
709 if (copy_to_user(u64_to_user_ptr(args->value), in msm_ioctl_gem_info()
710 msm_obj->name, args->len)) in msm_ioctl_gem_info()
711 ret = -EFAULT; in msm_ioctl_gem_info()
716 obj, u64_to_user_ptr(args->value), args->len); in msm_ioctl_gem_info()
720 obj, u64_to_user_ptr(args->value), &args->len); in msm_ioctl_gem_info()
735 if (fence_after(fence_id, queue->last_fence)) { in wait_fence()
737 fence_id, queue->last_fence); in wait_fence()
738 return -EINVAL; in wait_fence()
743 * back to underlying dma-fence in wait_fence()
749 spin_lock(&queue->idr_lock); in wait_fence()
750 fence = idr_find(&queue->fence_idr, fence_id); in wait_fence()
753 spin_unlock(&queue->idr_lock); in wait_fence()
763 ret = -ETIMEDOUT; in wait_fence()
764 } else if (ret != -ERESTARTSYS) { in wait_fence()
776 struct msm_drm_private *priv = dev->dev_private; in msm_ioctl_wait_fence()
781 if (args->flags & ~MSM_WAIT_FENCE_FLAGS) { in msm_ioctl_wait_fence()
782 DRM_ERROR("invalid flags: %08x\n", args->flags); in msm_ioctl_wait_fence()
783 return -EINVAL; in msm_ioctl_wait_fence()
786 if (!priv->gpu) in msm_ioctl_wait_fence()
789 queue = msm_submitqueue_get(file->driver_priv, args->queueid); in msm_ioctl_wait_fence()
791 return -ENOENT; in msm_ioctl_wait_fence()
793 ret = wait_fence(queue, args->fence, to_ktime(args->timeout), args->flags); in msm_ioctl_wait_fence()
807 switch (args->madv) { in msm_ioctl_gem_madvise()
812 return -EINVAL; in msm_ioctl_gem_madvise()
815 obj = drm_gem_object_lookup(file, args->handle); in msm_ioctl_gem_madvise()
817 return -ENOENT; in msm_ioctl_gem_madvise()
820 ret = msm_gem_madvise(obj, args->madv); in msm_ioctl_gem_madvise()
822 args->retained = ret; in msm_ioctl_gem_madvise()
837 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS) in msm_ioctl_submitqueue_new()
838 return -EINVAL; in msm_ioctl_submitqueue_new()
840 return msm_submitqueue_create(dev, file->driver_priv, args->prio, in msm_ioctl_submitqueue_new()
841 args->flags, &args->id); in msm_ioctl_submitqueue_new()
847 return msm_submitqueue_query(dev, file->driver_priv, data); in msm_ioctl_submitqueue_query()
855 return msm_submitqueue_remove(file->driver_priv, id); in msm_ioctl_submitqueue_close()
875 struct drm_device *dev = file->minor->dev; in msm_show_fdinfo()
876 struct msm_drm_private *priv = dev->dev_private; in msm_show_fdinfo()
878 if (!priv->gpu) in msm_show_fdinfo()
881 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, p); in msm_show_fdinfo()
924 * Identify what components need to be added by parsing what remote-endpoints
932 struct device_node *np = master_dev->of_node; in add_components_mdp()
949 * remote-endpoint isn't a component that we need to add in add_components_mdp()
988 "qcom,msm8917-mdp5",
989 "qcom,msm8937-mdp5",
990 "qcom,msm8953-mdp5",
991 "qcom,msm8996-mdp5",
992 "qcom,sdm630-mdp5",
993 "qcom,sdm660-mdp5",
1000 if (!of_device_is_compatible(dev->of_node, "qcom,mdp5")) in msm_disp_drv_should_bind()
1004 if (!of_device_compatible_match(dev->of_node, msm_mdp5_dpu_migration)) in msm_disp_drv_should_bind()
1018 { .compatible = "qcom,adreno-3xx" },
1020 { .compatible = "qcom,kgsl-3d0" },
1066 return -ENOMEM; in msm_drv_probe()
1068 priv->kms = kms; in msm_drv_probe()
1069 priv->kms_init = kms_init; in msm_drv_probe()
1083 /* on all devices that I am aware of, iommu's which can map in msm_drv_probe()
1104 return msm_drv_probe(&pdev->dev, NULL, NULL); in msm_pdev_probe()
1109 component_master_del(&pdev->dev, &msm_drm_ops); in msm_pdev_remove()
1123 return -EINVAL; in msm_drm_register()