Lines Matching +full:iommu +full:- +full:ctx

1 // SPDX-License-Identifier: GPL-2.0-only
7 #include <linux/dma-map-ops.h>
11 #include <linux/dma-buf.h>
28 struct msm_drm_private *priv = obj->dev->dev_private; in physaddr()
29 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + in physaddr()
30 priv->vram.paddr; in physaddr()
36 return !msm_obj->vram_node; in use_pages()
41 uint64_t total_mem = atomic64_add_return(size, &priv->total_mem); in update_device_mem()
47 struct msm_file_private *ctx = file->driver_priv; in update_ctx_mem() local
48 uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem); in update_ctx_mem()
50 rcu_read_lock(); /* Locks file->pid! */ in update_ctx_mem()
51 trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem); in update_ctx_mem()
58 update_ctx_mem(file, obj->size); in msm_gem_open()
64 update_ctx_mem(file, -obj->size); in msm_gem_close()
68 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
74 * display generation, the display's iommu may be wired up to either
75 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
76 * that here we either have dma-direct or iommu ops.
83 struct device *dev = msm_obj->base.dev->dev; in sync_for_device()
85 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); in sync_for_device()
90 struct device *dev = msm_obj->base.dev->dev; in sync_for_cpu()
92 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); in sync_for_cpu()
97 struct msm_drm_private *priv = obj->dev->dev_private; in update_lru_active()
100 GEM_WARN_ON(!msm_obj->pages); in update_lru_active()
102 if (msm_obj->pin_count) { in update_lru_active()
103 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj); in update_lru_active()
104 } else if (msm_obj->madv == MSM_MADV_WILLNEED) { in update_lru_active()
105 drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj); in update_lru_active()
107 GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED); in update_lru_active()
109 drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj); in update_lru_active()
115 struct msm_drm_private *priv = obj->dev->dev_private; in update_lru_locked()
118 msm_gem_assert_locked(&msm_obj->base); in update_lru_locked()
120 if (!msm_obj->pages) { in update_lru_locked()
121 GEM_WARN_ON(msm_obj->pin_count); in update_lru_locked()
123 drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj); in update_lru_locked()
131 struct msm_drm_private *priv = obj->dev->dev_private; in update_lru()
133 mutex_lock(&priv->lru.lock); in update_lru()
135 mutex_unlock(&priv->lru.lock); in update_lru()
138 /* allocate pages from VRAM carveout, used when no IOMMU: */
142 struct msm_drm_private *priv = obj->dev->dev_private; in get_pages_vram()
149 return ERR_PTR(-ENOMEM); in get_pages_vram()
151 spin_lock(&priv->vram.lock); in get_pages_vram()
152 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); in get_pages_vram()
153 spin_unlock(&priv->vram.lock); in get_pages_vram()
174 if (!msm_obj->pages) { in get_pages()
175 struct drm_device *dev = obj->dev; in get_pages()
177 int npages = obj->size >> PAGE_SHIFT; in get_pages()
185 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", in get_pages()
190 update_device_mem(dev->dev_private, obj->size); in get_pages()
192 msm_obj->pages = p; in get_pages()
194 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); in get_pages()
195 if (IS_ERR(msm_obj->sgt)) { in get_pages()
196 void *ptr = ERR_CAST(msm_obj->sgt); in get_pages()
198 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); in get_pages()
199 msm_obj->sgt = NULL; in get_pages()
203 /* For non-cached buffers, ensure the new pages are clean in get_pages()
206 if (msm_obj->flags & MSM_BO_WC) in get_pages()
212 return msm_obj->pages; in get_pages()
218 struct msm_drm_private *priv = obj->dev->dev_private; in put_pages_vram()
220 spin_lock(&priv->vram.lock); in put_pages_vram()
221 drm_mm_remove_node(msm_obj->vram_node); in put_pages_vram()
222 spin_unlock(&priv->vram.lock); in put_pages_vram()
224 kvfree(msm_obj->pages); in put_pages_vram()
231 if (msm_obj->pages) { in put_pages()
232 if (msm_obj->sgt) { in put_pages()
233 /* For non-cached buffers, ensure the new in put_pages()
237 if (msm_obj->flags & MSM_BO_WC) in put_pages()
240 sg_free_table(msm_obj->sgt); in put_pages()
241 kfree(msm_obj->sgt); in put_pages()
242 msm_obj->sgt = NULL; in put_pages()
245 update_device_mem(obj->dev->dev_private, -obj->size); in put_pages()
248 drm_gem_put_pages(obj, msm_obj->pages, true, false); in put_pages()
252 msm_obj->pages = NULL; in put_pages()
264 if (msm_obj->madv > madv) { in msm_gem_get_pages_locked()
265 DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n", in msm_gem_get_pages_locked()
266 msm_obj->madv, madv); in msm_gem_get_pages_locked()
267 return ERR_PTR(-EBUSY); in msm_gem_get_pages_locked()
278 struct msm_drm_private *priv = obj->dev->dev_private; in msm_gem_pin_obj_locked()
282 to_msm_bo(obj)->pin_count++; in msm_gem_pin_obj_locked()
283 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj); in msm_gem_pin_obj_locked()
288 struct msm_drm_private *priv = obj->dev->dev_private; in pin_obj_locked()
290 mutex_lock(&priv->lru.lock); in pin_obj_locked()
292 mutex_unlock(&priv->lru.lock); in pin_obj_locked()
317 if (msm_obj->flags & MSM_BO_WC) in msm_gem_pgprot()
324 struct vm_area_struct *vma = vmf->vma; in msm_gem_fault()
325 struct drm_gem_object *obj = vma->vm_private_data; in msm_gem_fault()
343 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { in msm_gem_fault()
355 /* We don't use vmf->pgoff since that has the fake offset: */ in msm_gem_fault()
356 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in msm_gem_fault()
360 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, in msm_gem_fault()
363 ret = vmf_insert_pfn(vma, vmf->address, pfn); in msm_gem_fault()
374 struct drm_device *dev = obj->dev; in mmap_offset()
383 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); in mmap_offset()
387 return drm_vma_node_offset_addr(&obj->vma_node); in mmap_offset()
410 return ERR_PTR(-ENOMEM); in add_vma()
412 list_add_tail(&vma->list, &msm_obj->vmas); in add_vma()
425 list_for_each_entry(vma, &msm_obj->vmas, list) { in lookup_vma()
426 if (vma->aspace == aspace) in lookup_vma()
438 list_del(&vma->list); in del_vma()
444 * iova range) in addition to removing the iommu mapping. In the eviction
445 * case (!close), we keep the iova allocated, but only remove the iommu
456 list_for_each_entry(vma, &msm_obj->vmas, list) { in put_iova_spaces()
457 if (vma->aspace) { in put_iova_spaces()
474 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { in put_iova_vmas()
496 ret = msm_gem_vma_init(vma, obj->size, in get_vma_locked()
503 GEM_WARN_ON(vma->iova < range_start); in get_vma_locked()
504 GEM_WARN_ON((vma->iova + obj->size) > range_end); in get_vma_locked()
516 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) in msm_gem_pin_vma_locked()
519 if (msm_obj->flags & MSM_BO_MAP_PRIV) in msm_gem_pin_vma_locked()
522 if (msm_obj->flags & MSM_BO_CACHED_COHERENT) in msm_gem_pin_vma_locked()
531 return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size); in msm_gem_pin_vma_locked()
536 struct msm_drm_private *priv = obj->dev->dev_private; in msm_gem_unpin_locked()
541 mutex_lock(&priv->lru.lock); in msm_gem_unpin_locked()
542 msm_obj->pin_count--; in msm_gem_unpin_locked()
543 GEM_WARN_ON(msm_obj->pin_count < 0); in msm_gem_unpin_locked()
545 mutex_unlock(&priv->lru.lock); in msm_gem_unpin_locked()
548 /* Special unpin path for use in fence-signaling path, avoiding the need
558 msm_obj->pin_count--; in msm_gem_unpin_active()
559 GEM_WARN_ON(msm_obj->pin_count < 0); in msm_gem_unpin_active()
584 *iova = vma->iova; in get_and_pin_iova_range_locked()
630 *iova = vma->iova; in msm_gem_get_iova()
669 vma = get_vma_locked(obj, aspace, iova, iova + obj->size); in msm_gem_set_iova()
672 } else if (GEM_WARN_ON(vma->iova != iova)) { in msm_gem_set_iova()
674 ret = -EBUSY; in msm_gem_set_iova()
703 args->pitch = align_pitch(args->width, args->bpp); in msm_gem_dumb_create()
704 args->size = PAGE_ALIGN(args->pitch * args->height); in msm_gem_dumb_create()
705 return msm_gem_new_handle(dev, file, args->size, in msm_gem_dumb_create()
706 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); in msm_gem_dumb_create()
718 ret = -ENOENT; in msm_gem_dumb_map_offset()
738 if (obj->import_attach) in get_vaddr()
739 return ERR_PTR(-ENODEV); in get_vaddr()
753 msm_obj->vmap_count++; in get_vaddr()
755 if (!msm_obj->vaddr) { in get_vaddr()
756 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, in get_vaddr()
758 if (msm_obj->vaddr == NULL) { in get_vaddr()
759 ret = -ENOMEM; in get_vaddr()
764 return msm_obj->vaddr; in get_vaddr()
767 msm_obj->vmap_count--; in get_vaddr()
804 GEM_WARN_ON(msm_obj->vmap_count < 1); in msm_gem_put_vaddr_locked()
806 msm_obj->vmap_count--; in msm_gem_put_vaddr_locked()
818 * false or -errno.
822 struct msm_drm_private *priv = obj->dev->dev_private; in msm_gem_madvise()
827 mutex_lock(&priv->lru.lock); in msm_gem_madvise()
829 if (msm_obj->madv != __MSM_MADV_PURGED) in msm_gem_madvise()
830 msm_obj->madv = madv; in msm_gem_madvise()
832 madv = msm_obj->madv; in msm_gem_madvise()
839 mutex_unlock(&priv->lru.lock); in msm_gem_madvise()
848 struct drm_device *dev = obj->dev; in msm_gem_purge()
849 struct msm_drm_private *priv = obj->dev->dev_private; in msm_gem_purge()
855 /* Get rid of any iommu mapping(s): */ in msm_gem_purge()
860 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); in msm_gem_purge()
866 mutex_lock(&priv->lru.lock); in msm_gem_purge()
867 /* A one-way transition: */ in msm_gem_purge()
868 msm_obj->madv = __MSM_MADV_PURGED; in msm_gem_purge()
869 mutex_unlock(&priv->lru.lock); in msm_gem_purge()
878 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); in msm_gem_purge()
880 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, in msm_gem_purge()
881 0, (loff_t)-1); in msm_gem_purge()
889 struct drm_device *dev = obj->dev; in msm_gem_evict()
895 /* Get rid of any iommu mapping(s): */ in msm_gem_evict()
898 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); in msm_gem_evict()
909 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) in msm_gem_vunmap()
912 vunmap(msm_obj->vaddr); in msm_gem_vunmap()
913 msm_obj->vaddr = NULL; in msm_gem_vunmap()
920 if (to_msm_bo(obj)->pin_count) in msm_gem_active()
923 return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true)); in msm_gem_active()
934 dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write), in msm_gem_cpu_prep()
938 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), in msm_gem_cpu_prep()
941 return remain == 0 ? -EBUSY : -ETIMEDOUT; in msm_gem_cpu_prep()
961 struct dma_resv *robj = obj->resv; in msm_gem_describe()
963 uint64_t off = drm_vma_node_start(&obj->vma_node); in msm_gem_describe()
968 stats->all.count++; in msm_gem_describe()
969 stats->all.size += obj->size; in msm_gem_describe()
972 stats->active.count++; in msm_gem_describe()
973 stats->active.size += obj->size; in msm_gem_describe()
976 if (msm_obj->pages) { in msm_gem_describe()
977 stats->resident.count++; in msm_gem_describe()
978 stats->resident.size += obj->size; in msm_gem_describe()
981 switch (msm_obj->madv) { in msm_gem_describe()
983 stats->purged.count++; in msm_gem_describe()
984 stats->purged.size += obj->size; in msm_gem_describe()
988 stats->purgeable.count++; in msm_gem_describe()
989 stats->purgeable.size += obj->size; in msm_gem_describe()
999 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I', in msm_gem_describe()
1000 obj->name, kref_read(&obj->refcount), in msm_gem_describe()
1001 off, msm_obj->vaddr); in msm_gem_describe()
1003 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); in msm_gem_describe()
1005 if (!list_empty(&msm_obj->vmas)) { in msm_gem_describe()
1009 list_for_each_entry(vma, &msm_obj->vmas, list) { in msm_gem_describe()
1011 if (vma->aspace) { in msm_gem_describe()
1012 struct msm_gem_address_space *aspace = vma->aspace; in msm_gem_describe()
1014 get_pid_task(aspace->pid, PIDTYPE_PID); in msm_gem_describe()
1016 comm = kstrdup(task->comm, GFP_KERNEL); in msm_gem_describe()
1021 name = aspace->name; in msm_gem_describe()
1027 vma->aspace, vma->iova, in msm_gem_describe()
1028 vma->mapped ? "mapped" : "unmapped"); in msm_gem_describe()
1046 struct drm_gem_object *obj = &msm_obj->base; in msm_gem_describe_objects()
1068 struct drm_device *dev = obj->dev; in msm_gem_free_object()
1069 struct msm_drm_private *priv = dev->dev_private; in msm_gem_free_object()
1071 mutex_lock(&priv->obj_lock); in msm_gem_free_object()
1072 list_del(&msm_obj->node); in msm_gem_free_object()
1073 mutex_unlock(&priv->obj_lock); in msm_gem_free_object()
1077 if (obj->import_attach) { in msm_gem_free_object()
1078 GEM_WARN_ON(msm_obj->vaddr); in msm_gem_free_object()
1083 kvfree(msm_obj->pages); in msm_gem_free_object()
1087 drm_prime_gem_destroy(obj, msm_obj->sgt); in msm_gem_free_object()
1096 kfree(msm_obj->metadata); in msm_gem_free_object()
1105 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); in msm_gem_object_mmap()
1128 /* drop reference from allocate - handle holds it now */ in msm_gem_new_handle()
1139 if (msm_obj->pages) in msm_gem_status()
1142 if (msm_obj->madv == MSM_MADV_DONTNEED) in msm_gem_status()
1172 struct msm_drm_private *priv = dev->dev_private; in msm_gem_new_impl()
1180 if (priv->has_cached_coherent) in msm_gem_new_impl()
1184 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", in msm_gem_new_impl()
1186 return -EINVAL; in msm_gem_new_impl()
1191 return -ENOMEM; in msm_gem_new_impl()
1193 msm_obj->flags = flags; in msm_gem_new_impl()
1194 msm_obj->madv = MSM_MADV_WILLNEED; in msm_gem_new_impl()
1196 INIT_LIST_HEAD(&msm_obj->node); in msm_gem_new_impl()
1197 INIT_LIST_HEAD(&msm_obj->vmas); in msm_gem_new_impl()
1199 *obj = &msm_obj->base; in msm_gem_new_impl()
1200 (*obj)->funcs = &msm_gem_object_funcs; in msm_gem_new_impl()
1207 struct msm_drm_private *priv = dev->dev_private; in msm_gem_new()
1217 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) in msm_gem_new()
1220 if (GEM_WARN_ON(use_vram && !priv->vram.size)) in msm_gem_new()
1221 return ERR_PTR(-EINVAL); in msm_gem_new()
1227 return ERR_PTR(-EINVAL); in msm_gem_new()
1250 to_msm_bo(obj)->vram_node = &vma->node; in msm_gem_new()
1260 vma->iova = physaddr(obj); in msm_gem_new()
1271 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); in msm_gem_new()
1274 drm_gem_lru_move_tail(&priv->lru.unbacked, obj); in msm_gem_new()
1276 mutex_lock(&priv->obj_lock); in msm_gem_new()
1277 list_add_tail(&msm_obj->node, &priv->objects); in msm_gem_new()
1278 mutex_unlock(&priv->obj_lock); in msm_gem_new()
1294 struct msm_drm_private *priv = dev->dev_private; in msm_gem_import()
1300 /* if we don't have IOMMU, don't bother pretending we can import: */ in msm_gem_import()
1302 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); in msm_gem_import()
1303 return ERR_PTR(-EINVAL); in msm_gem_import()
1306 size = PAGE_ALIGN(dmabuf->size); in msm_gem_import()
1318 msm_obj->sgt = sgt; in msm_gem_import()
1319 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in msm_gem_import()
1320 if (!msm_obj->pages) { in msm_gem_import()
1322 ret = -ENOMEM; in msm_gem_import()
1326 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); in msm_gem_import()
1334 drm_gem_lru_move_tail(&priv->lru.pinned, obj); in msm_gem_import()
1336 mutex_lock(&priv->obj_lock); in msm_gem_import()
1337 list_add_tail(&msm_obj->node, &priv->objects); in msm_gem_import()
1338 mutex_unlock(&priv->obj_lock); in msm_gem_import()
1406 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); in msm_gem_object_set_name()