Lines Matching full:cma
15 #define pr_fmt(fmt) "cma: " fmt
26 #include <linux/cma.h>
30 #include <trace/events/cma.h>
33 #include "cma.h"
35 struct cma cma_areas[MAX_CMA_AREAS];
41 bool fixed, const char *name, struct cma **res_cma,
44 phys_addr_t cma_get_base(const struct cma *cma) in cma_get_base() argument
46 WARN_ON_ONCE(cma->nranges != 1); in cma_get_base()
47 return PFN_PHYS(cma->ranges[0].base_pfn); in cma_get_base()
50 unsigned long cma_get_size(const struct cma *cma) in cma_get_size() argument
52 return cma->count << PAGE_SHIFT; in cma_get_size()
55 const char *cma_get_name(const struct cma *cma) in cma_get_name() argument
57 return cma->name; in cma_get_name()
60 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, in cma_bitmap_aligned_mask() argument
63 if (align_order <= cma->order_per_bit) in cma_bitmap_aligned_mask()
65 return (1UL << (align_order - cma->order_per_bit)) - 1; in cma_bitmap_aligned_mask()
72 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, in cma_bitmap_aligned_offset() argument
77 >> cma->order_per_bit; in cma_bitmap_aligned_offset()
80 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, in cma_bitmap_pages_to_bits() argument
83 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
86 static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr, in cma_clear_bitmap() argument
92 bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
93 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_clear_bitmap()
95 spin_lock_irqsave(&cma->lock, flags); in cma_clear_bitmap()
97 cma->available_count += count; in cma_clear_bitmap()
98 spin_unlock_irqrestore(&cma->lock, flags); in cma_clear_bitmap()
102 * Check if a CMA area contains no ranges that intersect with
106 bool cma_validate_zones(struct cma *cma) in cma_validate_zones() argument
119 valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags); in cma_validate_zones()
120 if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags)) in cma_validate_zones()
123 for (r = 0; r < cma->nranges; r++) { in cma_validate_zones()
124 cmr = &cma->ranges[r]; in cma_validate_zones()
130 * CMA resv range to be in the same zone. in cma_validate_zones()
133 if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) { in cma_validate_zones()
134 set_bit(CMA_ZONES_INVALID, &cma->flags); in cma_validate_zones()
139 set_bit(CMA_ZONES_VALID, &cma->flags); in cma_validate_zones()
144 static void __init cma_activate_area(struct cma *cma) in cma_activate_area() argument
151 for (allocrange = 0; allocrange < cma->nranges; allocrange++) { in cma_activate_area()
152 cmr = &cma->ranges[allocrange]; in cma_activate_area()
153 cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr), in cma_activate_area()
159 if (!cma_validate_zones(cma)) in cma_activate_area()
162 for (r = 0; r < cma->nranges; r++) { in cma_activate_area()
163 cmr = &cma->ranges[r]; in cma_activate_area()
166 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_activate_area()
175 spin_lock_init(&cma->lock); in cma_activate_area()
177 mutex_init(&cma->alloc_mutex); in cma_activate_area()
180 INIT_HLIST_HEAD(&cma->mem_head); in cma_activate_area()
181 spin_lock_init(&cma->mem_head_lock); in cma_activate_area()
183 set_bit(CMA_ACTIVATED, &cma->flags); in cma_activate_area()
189 bitmap_free(cma->ranges[r].bitmap); in cma_activate_area()
191 /* Expose all pages to the buddy, they are useless for CMA. */ in cma_activate_area()
192 if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) { in cma_activate_area()
194 cmr = &cma->ranges[r]; in cma_activate_area()
200 totalcma_pages -= cma->count; in cma_activate_area()
201 cma->available_count = cma->count = 0; in cma_activate_area()
202 pr_err("CMA area %s could not be activated\n", cma->name); in cma_activate_area()
216 void __init cma_reserve_pages_on_error(struct cma *cma) in cma_reserve_pages_on_error() argument
218 set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags); in cma_reserve_pages_on_error()
223 struct cma **res_cma) in cma_new_area()
225 struct cma *cma; in cma_new_area() local
228 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_new_area()
236 cma = &cma_areas[cma_area_count]; in cma_new_area()
240 snprintf(cma->name, CMA_MAX_NAME, "%s", name); in cma_new_area()
242 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); in cma_new_area()
244 cma->available_count = cma->count = size >> PAGE_SHIFT; in cma_new_area()
245 cma->order_per_bit = order_per_bit; in cma_new_area()
246 *res_cma = cma; in cma_new_area()
247 totalcma_pages += cma->count; in cma_new_area()
252 static void __init cma_drop_area(struct cma *cma) in cma_drop_area() argument
254 totalcma_pages -= cma->count; in cma_drop_area()
266 * @res_cma: Pointer to store the created cma region.
273 struct cma **res_cma) in cma_init_reserved_mem()
275 struct cma *cma; in cma_init_reserved_mem() local
283 * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which in cma_init_reserved_mem()
295 ret = cma_new_area(name, size, order_per_bit, &cma); in cma_init_reserved_mem()
299 cma->ranges[0].base_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
300 cma->ranges[0].early_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
301 cma->ranges[0].count = cma->count; in cma_init_reserved_mem()
302 cma->nranges = 1; in cma_init_reserved_mem()
303 cma->nid = NUMA_NO_NODE; in cma_init_reserved_mem()
305 *res_cma = cma; in cma_init_reserved_mem()
312 * which one(s) to use for a CMA area.
321 * Work array used during CMA initialization.
361 * Create CMA areas with a total size of @total_size. A normal allocation
371 const char *name, struct cma **res_cma, int nid) in cma_declare_contiguous_multi()
382 struct cma *cma; in cma_declare_contiguous_multi() local
412 ret = cma_new_area(name, total_size, order_per_bit, &cma); in cma_declare_contiguous_multi()
477 cma_drop_area(cma); in cma_declare_contiguous_multi()
498 * Walk the final list, and add a CMA range for in cma_declare_contiguous_multi()
518 cmrp = &cma->ranges[nr++]; in cma_declare_contiguous_multi()
535 cma_drop_area(cma); in cma_declare_contiguous_multi()
540 cma->nranges = nr; in cma_declare_contiguous_multi()
541 cma->nid = nid; in cma_declare_contiguous_multi()
542 *res_cma = cma; in cma_declare_contiguous_multi()
560 * @alignment: Alignment for the CMA area, should be power of 2 or zero
564 * @res_cma: Pointer to store the created cma region.
578 bool fixed, const char *name, struct cma **res_cma, in cma_declare_contiguous_nid()
598 bool fixed, const char *name, struct cma **res_cma, in __cma_declare_contiguous_nid()
619 pr_err("Not enough slots for CMA reserved regions!\n"); in __cma_declare_contiguous_nid()
685 * It will place the new cma area close to the start of the node in __cma_declare_contiguous_nid()
687 * cma area and not into it. in __cma_declare_contiguous_nid()
739 static void cma_debug_show_areas(struct cma *cma) in cma_debug_show_areas() argument
748 spin_lock_irq(&cma->lock); in cma_debug_show_areas()
750 for (r = 0; r < cma->nranges; r++) { in cma_debug_show_areas()
751 cmr = &cma->ranges[r]; in cma_debug_show_areas()
754 nbits = cma_bitmap_maxno(cma, cmr); in cma_debug_show_areas()
765 nr_part = nr_zero << cma->order_per_bit; in cma_debug_show_areas()
772 pr_cont("=> %lu free of %lu total pages\n", cma->available_count, in cma_debug_show_areas()
773 cma->count); in cma_debug_show_areas()
774 spin_unlock_irq(&cma->lock); in cma_debug_show_areas()
777 static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr, in cma_range_alloc() argument
788 mask = cma_bitmap_aligned_mask(cma, align); in cma_range_alloc()
789 offset = cma_bitmap_aligned_offset(cma, cmr, align); in cma_range_alloc()
790 bitmap_maxno = cma_bitmap_maxno(cma, cmr); in cma_range_alloc()
791 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_range_alloc()
797 spin_lock_irq(&cma->lock); in cma_range_alloc()
802 if (count > cma->available_count) { in cma_range_alloc()
803 spin_unlock_irq(&cma->lock); in cma_range_alloc()
810 spin_unlock_irq(&cma->lock); in cma_range_alloc()
814 cma->available_count -= count; in cma_range_alloc()
820 spin_unlock_irq(&cma->lock); in cma_range_alloc()
822 pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit); in cma_range_alloc()
823 mutex_lock(&cma->alloc_mutex); in cma_range_alloc()
825 mutex_unlock(&cma->alloc_mutex); in cma_range_alloc()
831 cma_clear_bitmap(cma, cmr, pfn, count); in cma_range_alloc()
838 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), in cma_range_alloc()
848 static struct page *__cma_alloc(struct cma *cma, unsigned long count, in __cma_alloc() argument
854 const char *name = cma ? cma->name : NULL; in __cma_alloc()
858 if (!cma || !cma->count) in __cma_alloc()
861 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, in __cma_alloc()
862 (void *)cma, cma->name, count, align); in __cma_alloc()
867 for (r = 0; r < cma->nranges; r++) { in __cma_alloc()
870 ret = cma_range_alloc(cma, &cma->ranges[r], count, align, in __cma_alloc()
877 * CMA can allocate multiple page blocks, which results in different in __cma_alloc()
888 __func__, cma->name, count, ret); in __cma_alloc()
889 cma_debug_show_areas(cma); in __cma_alloc()
897 cma_sysfs_account_success_pages(cma, count); in __cma_alloc()
900 cma_sysfs_account_fail_pages(cma, count); in __cma_alloc()
908 * @cma: Contiguous memory region for which the allocation is performed.
916 struct page *cma_alloc(struct cma *cma, unsigned long count, in cma_alloc() argument
919 return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); in cma_alloc()
922 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) in cma_alloc_folio() argument
929 page = __cma_alloc(cma, 1 << order, order, gfp); in cma_alloc_folio()
934 bool cma_pages_valid(struct cma *cma, const struct page *pages, in cma_pages_valid() argument
942 if (!cma || !pages || count > cma->count) in cma_pages_valid()
948 for (r = 0; r < cma->nranges; r++) { in cma_pages_valid()
949 cmr = &cma->ranges[r]; in cma_pages_valid()
966 * @cma: Contiguous memory region for which the allocation is performed.
974 bool cma_release(struct cma *cma, const struct page *pages, in cma_release() argument
983 if (!cma_pages_valid(cma, pages, count)) in cma_release()
989 for (r = 0; r < cma->nranges; r++) { in cma_release()
990 cmr = &cma->ranges[r]; in cma_release()
998 if (r == cma->nranges) in cma_release()
1002 cma_clear_bitmap(cma, cmr, pfn, count); in cma_release()
1003 cma_sysfs_account_release_pages(cma, count); in cma_release()
1004 trace_cma_release(cma->name, pfn, pages, count); in cma_release()
1009 bool cma_free_folio(struct cma *cma, const struct folio *folio) in cma_free_folio() argument
1014 return cma_release(cma, &folio->page, folio_nr_pages(folio)); in cma_free_folio()
1017 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) in cma_for_each_area() argument
1031 bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end) in cma_intersects() argument
1037 for (r = 0; r < cma->nranges; r++) { in cma_intersects()
1038 cmr = &cma->ranges[r]; in cma_intersects()
1053 * Very basic function to reserve memory from a CMA area that has not
1060 * The CMA bitmaps have not yet been allocated, so just start
1067 * set the migrate type and CMA stats the pageblocks that were reserved.
1069 * If the CMA area fails to activate later, memory obtained through
1074 void __init *cma_reserve_early(struct cma *cma, unsigned long size) in cma_reserve_early() argument
1081 if (!cma || !cma->count) in cma_reserve_early()
1086 if (test_bit(CMA_ACTIVATED, &cma->flags)) in cma_reserve_early()
1092 if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit))) in cma_reserve_early()
1097 if (size > cma->available_count) in cma_reserve_early()
1100 for (r = 0; r < cma->nranges; r++) { in cma_reserve_early()
1101 cmr = &cma->ranges[r]; in cma_reserve_early()
1106 cma->available_count -= size; in cma_reserve_early()