Lines Matching +full:per +full:- +full:device
1 // SPDX-License-Identifier: GPL-2.0
3 * Coherent per-device memory handling.
10 #include <linux/dma-direct.h>
11 #include <linux/dma-map-ops.h>
25 static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) in dev_get_coherent_memory()
27 if (dev && dev->dma_mem) in dev_get_coherent_memory()
28 return dev->dma_mem; in dev_get_coherent_memory()
32 static inline dma_addr_t dma_get_device_base(struct device *dev, in dma_get_device_base()
35 if (mem->use_dev_dma_pfn_offset) in dma_get_device_base()
36 return phys_to_dma(dev, PFN_PHYS(mem->pfn_base)); in dma_get_device_base()
37 return mem->device_base; in dma_get_device_base()
51 ret = -EINVAL; in dma_init_coherent_memory()
57 ret = -EINVAL; in dma_init_coherent_memory()
62 ret = -ENOMEM; in dma_init_coherent_memory()
65 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); in dma_init_coherent_memory()
66 if (!dma_mem->bitmap) { in dma_init_coherent_memory()
67 ret = -ENOMEM; in dma_init_coherent_memory()
71 dma_mem->virt_base = mem_base; in dma_init_coherent_memory()
72 dma_mem->device_base = device_addr; in dma_init_coherent_memory()
73 dma_mem->pfn_base = PFN_DOWN(phys_addr); in dma_init_coherent_memory()
74 dma_mem->size = pages; in dma_init_coherent_memory()
75 spin_lock_init(&dma_mem->spinlock); in dma_init_coherent_memory()
92 memunmap(mem->virt_base); in dma_release_coherent_memory()
93 kfree(mem->bitmap); in dma_release_coherent_memory()
97 static int dma_assign_coherent_memory(struct device *dev, in dma_assign_coherent_memory()
101 return -ENODEV; in dma_assign_coherent_memory()
103 if (dev->dma_mem) in dma_assign_coherent_memory()
104 return -EBUSY; in dma_assign_coherent_memory()
106 dev->dma_mem = mem; in dma_assign_coherent_memory()
112 * is asked for coherent memory for this device. This shall only be used
113 * from platform code, usually based on the device tree description.
118 * device_addr is the DMA address the device needs to be programmed with to
125 * be declared per device.
127 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, in dma_declare_coherent_memory()
143 static void *__dma_alloc_from_coherent(struct device *dev, in __dma_alloc_from_coherent()
152 spin_lock_irqsave(&mem->spinlock, flags); in __dma_alloc_from_coherent()
154 if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT))) in __dma_alloc_from_coherent()
157 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); in __dma_alloc_from_coherent()
166 ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT); in __dma_alloc_from_coherent()
167 spin_unlock_irqrestore(&mem->spinlock, flags); in __dma_alloc_from_coherent()
171 spin_unlock_irqrestore(&mem->spinlock, flags); in __dma_alloc_from_coherent()
176 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
177 * @dev: device from which we allocate memory
183 * This function should be only called from per-arch dma_alloc_coherent()
184 * to support allocation from per-device coherent memory pools.
189 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, in dma_alloc_from_dev_coherent()
201 void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, in dma_alloc_from_global_coherent()
214 if (mem && vaddr >= mem->virt_base && vaddr < in __dma_release_from_coherent()
215 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { in __dma_release_from_coherent()
216 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; in __dma_release_from_coherent()
219 spin_lock_irqsave(&mem->spinlock, flags); in __dma_release_from_coherent()
220 bitmap_release_region(mem->bitmap, page, order); in __dma_release_from_coherent()
221 spin_unlock_irqrestore(&mem->spinlock, flags); in __dma_release_from_coherent()
228 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
229 * @dev: device from which the memory was allocated
233 * This checks whether the memory was allocated from the per-device
239 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) in dma_release_from_dev_coherent()
258 if (mem && vaddr >= mem->virt_base && vaddr + size <= in __dma_mmap_from_coherent()
259 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { in __dma_mmap_from_coherent()
260 unsigned long off = vma->vm_pgoff; in __dma_mmap_from_coherent()
261 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; in __dma_mmap_from_coherent()
265 *ret = -ENXIO; in __dma_mmap_from_coherent()
266 if (off < count && user_count <= count - off) { in __dma_mmap_from_coherent()
267 unsigned long pfn = mem->pfn_base + start + off; in __dma_mmap_from_coherent()
268 *ret = remap_pfn_range(vma, vma->vm_start, pfn, in __dma_mmap_from_coherent()
270 vma->vm_page_prot); in __dma_mmap_from_coherent()
278 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
279 * @dev: device from which the memory was allocated
285 * This checks whether the memory was allocated from the per-device
288 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
292 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, in dma_mmap_from_dev_coherent()
311 * Support for reserved memory regions defined in device tree
320 static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) in rmem_dma_device_init()
322 struct dma_coherent_mem *mem = rmem->priv; in rmem_dma_device_init()
326 ret = dma_init_coherent_memory(rmem->base, rmem->base, in rmem_dma_device_init()
327 rmem->size, &mem); in rmem_dma_device_init()
330 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_dma_device_init()
334 mem->use_dev_dma_pfn_offset = true; in rmem_dma_device_init()
335 rmem->priv = mem; in rmem_dma_device_init()
341 struct device *dev) in rmem_dma_device_release()
344 dev->dma_mem = NULL; in rmem_dma_device_release()
354 unsigned long node = rmem->fdt_node; in rmem_dma_setup()
357 return -EINVAL; in rmem_dma_setup()
360 if (!of_get_flat_dt_prop(node, "no-map", NULL)) { in rmem_dma_setup()
361 pr_err("Reserved memory: regions without no-map are not yet supported\n"); in rmem_dma_setup()
362 return -EINVAL; in rmem_dma_setup()
365 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { in rmem_dma_setup()
372 rmem->ops = &rmem_dma_ops; in rmem_dma_setup()
374 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_dma_setup()
384 return -ENOMEM; in dma_init_reserved_memory()
386 ops = dma_reserved_default_memory->ops; in dma_init_reserved_memory()
390 * dma_assign_coherent_memory() for "NULL" device. in dma_init_reserved_memory()
392 ret = ops->device_init(dma_reserved_default_memory, NULL); in dma_init_reserved_memory()
395 dma_coherent_default_memory = dma_reserved_default_memory->priv; in dma_init_reserved_memory()
404 RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);