/linux/mm/ |
H A D | cma_debug.c | 3 * CMA DebugFS Interface 10 #include <linux/cma.h> 16 #include "cma.h" 36 struct cma *cma = data; in cma_used_get() local 38 spin_lock_irq(&cma->lock); in cma_used_get() 39 *val = cma->count - cma->available_count; in cma_used_get() 40 spin_unlock_irq(&cma->lock); in cma_used_get() 48 struct cma *cma = data; in cma_maxchunk_get() local 55 spin_lock_irq(&cma->lock); in cma_maxchunk_get() 56 for (r = 0; r < cma->nranges; r++) { in cma_maxchunk_get() [all …]
|
H A D | cma.c | 15 #define pr_fmt(fmt) "cma: " fmt 27 #include <linux/cma.h> 31 #include <trace/events/cma.h> 34 #include "cma.h" 36 struct cma cma_areas[MAX_CMA_AREAS]; 39 phys_addr_t cma_get_base(const struct cma *cma) in cma_get_base() argument 41 WARN_ON_ONCE(cma->nranges != 1); in cma_get_base() 42 return PFN_PHYS(cma->ranges[0].base_pfn); in cma_get_base() 45 unsigned long cma_get_size(const struct cma *cma) in cma_get_size() argument 47 return cma->count << PAGE_SHIFT; in cma_get_size() [all …]
|
H A D | cma_sysfs.c | 3 * CMA SysFS Interface 8 #include <linux/cma.h> 12 #include "cma.h" 17 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages) in cma_sysfs_account_success_pages() argument 19 atomic64_add(nr_pages, &cma->nr_pages_succeeded); in cma_sysfs_account_success_pages() 22 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages) in cma_sysfs_account_fail_pages() argument 24 atomic64_add(nr_pages, &cma->nr_pages_failed); in cma_sysfs_account_fail_pages() 27 void cma_sysfs_account_release_pages(struct cma *cma, unsigned long nr_pages) in cma_sysfs_account_release_pages() argument 29 atomic64_add(nr_pages, &cma->nr_pages_released); in cma_sysfs_account_release_pages() 32 static inline struct cma *cma_from_kobj(struct kobject *kobj) in cma_from_kobj() [all …]
|
H A D | cma.h | 10 struct cma *cma; member 39 struct cma { struct 53 /* the number of CMA page successful allocations */ 55 /* the number of CMA page allocation failures */ 57 /* the number of CMA page released */ 74 extern struct cma cma_areas[MAX_CMA_AREAS]; argument 77 static inline unsigned long cma_bitmap_maxno(struct cma *cma, in cma_bitmap_maxno() argument 80 return cmr->count >> cma->order_per_bit; in cma_bitmap_maxno() 84 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages); 85 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages); [all …]
|
H A D | hugetlb_cma.c | 4 #include <linux/cma.h> 16 static struct cma *hugetlb_cma[MAX_NUMNODES]; 59 struct cma *cma; in hugetlb_cma_alloc_bootmem() local 63 cma = hugetlb_cma[*nid]; in hugetlb_cma_alloc_bootmem() 64 m = cma_reserve_early(cma, huge_page_size(h)); in hugetlb_cma_alloc_bootmem() 70 cma = hugetlb_cma[node]; in hugetlb_cma_alloc_bootmem() 71 if (!cma || node == *nid) in hugetlb_cma_alloc_bootmem() 73 m = cma_reserve_early(cma, huge_page_size(h)); in hugetlb_cma_alloc_bootmem() 83 m->cma = cma; in hugetlb_cma_alloc_bootmem() 145 * HugeTLB CMA reservation is required for gigantic in hugetlb_cma_reserve() [all …]
|
H A D | Kconfig | 680 depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU 709 def_bool (MEMORY_ISOLATION && COMPACTION) || CMA 963 config CMA config 971 CMA reserves a region of memory and allows only movable pages to 979 bool "CMA debugfs interface" 980 depends on CMA && DEBUG_FS 982 Turns on the DebugFS interface for CMA. 985 bool "CMA information through sysfs interface" 986 depends on CMA && SYSFS 989 from CMA. [all …]
|
/linux/include/linux/ |
H A D | cma.h | 23 struct cma; 26 extern phys_addr_t cma_get_base(const struct cma *cma); 27 extern unsigned long cma_get_size(const struct cma *cma); 28 extern const char *cma_get_name(const struct cma *cma); 33 bool fixed, const char *name, struct cma **res_cma, 38 bool fixed, const char *name, struct cma **res_cma) in cma_declare_contiguous() 45 const char *name, struct cma **res_cma, int nid); 49 struct cma **res_cma); 50 extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, 52 extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count); [all …]
|
H A D | dma-map-ops.h | 13 struct cma; 100 extern struct cma *dma_contiguous_default_area; 102 static inline struct cma *dev_get_cma_area(struct device *dev) in dev_get_cma_area() 111 phys_addr_t limit, struct cma **res_cma, bool fixed); 122 static inline struct cma *dev_get_cma_area(struct device *dev) in dev_get_cma_area() 130 phys_addr_t base, phys_addr_t limit, struct cma **res_cma, in dma_contiguous_reserve_area()
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-kernel-mm-cma | 1 What: /sys/kernel/mm/cma/ 5 /sys/kernel/mm/cma/ contains a subdirectory for each CMA 6 heap name (also sometimes called CMA areas). 8 Each CMA heap subdirectory (that is, each 9 /sys/kernel/mm/cma/<cma-heap-name> directory) contains the 15 What: /sys/kernel/mm/cma/<cma-heap-name>/alloc_pages_success 19 the number of pages CMA API succeeded to allocate 21 What: /sys/kernel/mm/cma/<cma-heap-name>/alloc_pages_fail 25 the number of pages CMA API failed to allocate 27 What: /sys/kernel/mm/cma/<cma-heap-name>/release_pages_success [all …]
|
/linux/kernel/dma/ |
H A D | contiguous.c | 11 * The Contiguous Memory Allocator (CMA) makes it possible to 32 * CMA tries to solve this issue by operating on memory regions 38 #define pr_fmt(fmt) "cma: " fmt 46 #include <linux/cma.h> 55 struct cma *dma_contiguous_default_area; 58 * Default global CMA area size can be defined in kernel's .config. 64 * Users, who want to set the size of global CMA area for their system 65 * should use cma= kernel parameter. 91 early_param("cma", early_cma); 95 static struct cma *dma_contiguous_numa_area[MAX_NUMNODES]; [all …]
|
H A D | Kconfig | 162 depends on HAVE_DMA_CONTIGUOUS && CMA 168 You can disable CMA by specifying "cma=0" on the kernel's command 180 Enable this option to get numa CMA areas so that NUMA devices 183 You can set the size of pernuma CMA by specifying "cma_pernuma=size" 184 or set the node id and its size of CMA by specifying "numa_cma= 196 Memory Allocator. If the size of 0 is selected, CMA is disabled by 197 default, but it can be enabled by passing cma=size[MG] to the kernel. 208 If 0 percent is selected, CMA is disabled by default, but it can be 209 enabled by passing cma=size[MG] to the kernel.
|
H A D | pool.c | 6 #include <linux/cma.h> 60 struct cma *cma; in cma_in_zone() local 62 cma = dev_get_cma_area(NULL); in cma_in_zone() 63 if (!cma) in cma_in_zone() 66 size = cma_get_size(cma); in cma_in_zone() 70 /* CMA can't cross zone boundaries, see cma_activate_area() */ in cma_in_zone() 71 end = cma_get_base(cma) + size - 1; in cma_in_zone()
|
/linux/Documentation/admin-guide/mm/ |
H A D | cma_debugfs.rst | 2 CMA Debugfs Interface 5 The CMA debugfs interface is useful to retrieve basic information out of the 6 different CMA areas and to test allocation/release in each of the areas. 8 Each CMA area represents a directory under <debugfs>/cma/, represented by 9 its CMA name like below: 11 <debugfs>/cma/<cma_name> 15 - [RO] base_pfn: The base PFN (Page Frame Number) of the CMA area. 17 - [RO] count: Amount of memory in the CMA area. 22 in the CMA area. 24 range N in the CMA area. [all …]
|
/linux/drivers/dma-buf/heaps/ |
H A D | Kconfig | 9 bool "DMA-BUF CMA Heap" 12 Choose this option to enable dma-buf CMA heap. This heap is backed 13 by the Contiguous Memory Allocator (CMA). If your system has these 17 bool "Legacy DMA-BUF CMA Heap" 21 Add a duplicate CMA-backed dma-buf heap with legacy naming derived 22 from the CMA area's devicetree node, or "reserved" if the area is not
|
H A D | cma_heap.c | 3 * DMABUF CMA heap exporter 15 #include <linux/cma.h> 32 struct cma *cma; member 262 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); in cma_heap_dma_buf_release() 306 cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false); in cma_heap_allocate() 310 /* Clear the cma pages */ in cma_heap_allocate() 362 cma_release(cma_heap->cma, cma_pages, pagecount); in cma_heap_allocate() 373 static int __init __add_cma_heap(struct cma *cma, const char *name) in __add_cma_heap() argument 381 cma_heap->cma = cma; in __add_cma_heap() 400 struct cma *default_cma = dev_get_cma_area(NULL); in add_default_cma_heap() [all …]
|
/linux/Documentation/userspace-api/ |
H A D | dma-buf-heaps.rst | 19 - The ``cma`` heap allocates physically contiguous, cacheable, 20 buffers. Only present if a CMA region is present. Such a region is 22 ``cma`` parameter, a memory region Device-Tree node with the 23 ``linux,cma-default`` property set, or through the ``CMA_SIZE_MBYTES`` or 28 ``reserved``, ``linux,cma``, or ``default-pool``.
|
/linux/Documentation/driver-api/cxl/allocation/ |
H A D | hugepages.rst | 9 CXL Memory onlined as SystemRAM during early boot is eligible for use by CMA, 10 as the NUMA node hosting that capacity will be `Online` at the time CMA 14 capacity allocated by CMA - as the NUMA node hosting the capacity is `Offline` 15 at :code:`__init` time - when CMA carves out contiguous capacity.
|
/linux/kernel/ |
H A D | crash_reserve.c | 17 #include <linux/cma.h> 182 [SUFFIX_CMA] = ",cma", 189 * crashkernel=size,[high|low|cma] 342 * optional CMA reservation in parse_crashkernel() 488 struct cma *res; in reserve_crashkernel_cma() 509 pr_warn("crashkernel CMA reservation failed: %lld MB requested, %lld MB reserved in %d ranges\n", in reserve_crashkernel_cma() 512 pr_info("crashkernel CMA reserved: %lld MB in %d ranges\n", in reserve_crashkernel_cma() 520 pr_warn("crashkernel CMA reservation not supported\n"); in reserve_crashkernel_cma()
|
/linux/arch/s390/mm/ |
H A D | init.c | 32 #include <linux/cma.h> 215 /* Prevent memory blocks which contain cma regions from going offline */ 222 static int s390_cma_check_range(struct cma *cma, void *data) in s390_cma_check_range() argument 228 if (cma_intersects(cma, mem_data->start, mem_data->end)) in s390_cma_check_range()
|
/linux/Documentation/driver-api/cxl/linux/ |
H A D | early-boot.rst | 124 The contiguous memory allocator (CMA) enables reservation of contiguous memory 125 regions on NUMA nodes during early boot. However, CMA cannot reserve memory 133 This means if users intend to defer management of CXL memory to the driver, CMA 135 SystemRAM in `ZONE_NORMAL` during early boot, CMA reservations per-node can be
|
/linux/arch/arm64/boot/dts/cix/ |
H A D | sky1-orion-o6.dts | 27 linux,cma { 31 linux,cma-default;
|
/linux/Documentation/core-api/kho/ |
H A D | concepts.rst | 46 The scratch regions are declared as CMA when page allocator is initialized so 47 that their memory can be used during system lifetime. CMA gives us the 49 must be at a static physical memory location and CMA enforces that only
|
/linux/arch/xtensa/boot/dts/ |
H A D | kc705.dts | 22 linux,cma { 28 linux,cma-default;
|
/linux/arch/loongarch/boot/dts/ |
H A D | loongson-2k0500-ref.dts | 35 linux,cma { 39 linux,cma-default;
|
/linux/Documentation/devicetree/bindings/media/ |
H A D | nuvoton,npcm-vcd.yaml | 46 CMA pool to use for buffers allocation instead of the default CMA pool.
|