Lines Matching +full:reserved +full:- +full:memory

1 // SPDX-License-Identifier: GPL-2.0+
3 * Contiguous Memory Allocator for DMA mapping framework
4 * Copyright (c) 2010-2011 by Samsung Electronics.
9 * Contiguous Memory Allocator
11 * The Contiguous Memory Allocator (CMA) makes it possible to
12 * allocate big contiguous chunks of memory after the system has
17 * Various devices on embedded systems have no scatter-getter and/or
18 * IO map support and require contiguous blocks of memory to
22 * Such devices often require big memory buffers (a full HD frame
24 * MB of memory), which makes mechanisms such as kmalloc() or
27 * At the same time, a solution where a big memory region is
28 * reserved for a device is suboptimal since often more memory is
29 * reserved then strictly required and, moreover, the memory is
32 * CMA tries to solve this issue by operating on memory regions
34 * can use the memory for pagecache and when device driver requests
45 #include <linux/dma-map-ops.h>
61 * The size can be set in bytes or as a percentage of the total memory
68 static phys_addr_t size_cmdline __initdata = -1;
76 return -EINVAL; in early_cma()
83 if (*p != '-') { in early_cma()
203 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
204 * @limit: End address of the reserved memory (optional, 0 for any).
206 * This function reserves memory from early allocator. It should be
208 * has been activated and all other subsystems have already allocated/reserved
209 * memory.
222 if (size_cmdline != -1) { in dma_contiguous_reserve()
257 * dma_contiguous_reserve_area() - reserve custom contiguous area
258 * @size: Size of the reserved area (in bytes),
259 * @base: Base address of the reserved area optional, use 0 for any
260 * @limit: End address of the reserved memory (optional, 0 for any).
262 * @fixed: hint about where to place the reserved area
264 * This function reserves memory from early allocator. It should be
266 * has been activated and all other subsystems have already allocated/reserved
267 * memory. This function allows to create custom reserved areas for specific
280 "reserved", res_cma); in dma_contiguous_reserve_area()
284 /* Architecture specific contiguous memory fixup. */ in dma_contiguous_reserve_area()
292 * dma_alloc_from_contiguous() - allocate pages from contiguous area
298 * This function allocates memory buffer for specified device. It uses
299 * device specific contiguous memory area if available or the default
313 * dma_release_from_contiguous() - release allocated pages
318 * This function releases memory allocated by dma_alloc_from_contiguous().
336 * dma_alloc_contiguous() - allocate contiguous pages
341 * tries to use device specific contiguous memory area if available, or it
342 * tries to use per-numa cma, if the allocation fails, it will fallback to
345 * Note that it bypass one-page size of allocations from the per-numa and
359 if (dev->cma_area) in dma_alloc_contiguous()
360 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous()
390 * dma_free_contiguous() - release allocated pages
395 * This function releases memory allocated by dma_alloc_contiguous(). As the
398 * upon a false-return.
405 if (dev->cma_area) { in dma_free_contiguous()
406 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous()
410 * otherwise, page is from either per-numa cma or default cma in dma_free_contiguous()
429 * Support for reserved memory regions defined in device tree
441 dev->cma_area = rmem->priv; in rmem_cma_device_init()
448 dev->cma_area = NULL; in rmem_cma_device_release()
458 unsigned long node = rmem->fdt_node; in rmem_cma_setup()
459 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); in rmem_cma_setup()
463 if (size_cmdline != -1 && default_cma) { in rmem_cma_setup()
464 pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", in rmem_cma_setup()
465 rmem->name); in rmem_cma_setup()
466 return -EBUSY; in rmem_cma_setup()
470 of_get_flat_dt_prop(node, "no-map", NULL)) in rmem_cma_setup()
471 return -EINVAL; in rmem_cma_setup()
473 if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) { in rmem_cma_setup()
474 pr_err("Reserved memory: incorrect alignment of CMA region\n"); in rmem_cma_setup()
475 return -EINVAL; in rmem_cma_setup()
478 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); in rmem_cma_setup()
480 pr_err("Reserved memory: unable to setup CMA region\n"); in rmem_cma_setup()
483 /* Architecture specific contiguous memory fixup. */ in rmem_cma_setup()
484 dma_contiguous_early_fixup(rmem->base, rmem->size); in rmem_cma_setup()
489 rmem->ops = &rmem_cma_ops; in rmem_cma_setup()
490 rmem->priv = cma; in rmem_cma_setup()
492 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", in rmem_cma_setup()
493 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_cma_setup()
497 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);