Lines Matching +full:align +full:- +full:end

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2010-2011 by Samsung Electronics.
46 WARN_ON_ONCE(cma->nranges != 1); in cma_get_base()
47 return PFN_PHYS(cma->ranges[0].base_pfn); in cma_get_base()
52 return cma->count << PAGE_SHIFT; in cma_get_size()
57 return cma->name; in cma_get_name()
63 if (align_order <= cma->order_per_bit) in cma_bitmap_aligned_mask()
65 return (1UL << (align_order - cma->order_per_bit)) - 1; in cma_bitmap_aligned_mask()
76 return (cmr->base_pfn & ((1UL << align_order) - 1)) in cma_bitmap_aligned_offset()
77 >> cma->order_per_bit; in cma_bitmap_aligned_offset()
83 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
92 bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
95 spin_lock_irqsave(&cma->lock, flags); in cma_clear_bitmap()
96 bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count); in cma_clear_bitmap()
97 cma->available_count += count; in cma_clear_bitmap()
98 spin_unlock_irqrestore(&cma->lock, flags); in cma_clear_bitmap()
119 valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags); in cma_validate_zones()
120 if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags)) in cma_validate_zones()
123 for (r = 0; r < cma->nranges; r++) { in cma_validate_zones()
124 cmr = &cma->ranges[r]; in cma_validate_zones()
125 base_pfn = cmr->base_pfn; in cma_validate_zones()
133 if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) { in cma_validate_zones()
134 set_bit(CMA_ZONES_INVALID, &cma->flags); in cma_validate_zones()
139 set_bit(CMA_ZONES_VALID, &cma->flags); in cma_validate_zones()
151 for (allocrange = 0; allocrange < cma->nranges; allocrange++) { in cma_activate_area()
152 cmr = &cma->ranges[allocrange]; in cma_activate_area()
153 cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr), in cma_activate_area()
155 if (!cmr->bitmap) in cma_activate_area()
162 for (r = 0; r < cma->nranges; r++) { in cma_activate_area()
163 cmr = &cma->ranges[r]; in cma_activate_area()
164 if (cmr->early_pfn != cmr->base_pfn) { in cma_activate_area()
165 count = cmr->early_pfn - cmr->base_pfn; in cma_activate_area()
167 bitmap_set(cmr->bitmap, 0, bitmap_count); in cma_activate_area()
170 for (pfn = cmr->early_pfn; pfn < cmr->base_pfn + cmr->count; in cma_activate_area()
175 spin_lock_init(&cma->lock); in cma_activate_area()
177 mutex_init(&cma->alloc_mutex); in cma_activate_area()
180 INIT_HLIST_HEAD(&cma->mem_head); in cma_activate_area()
181 spin_lock_init(&cma->mem_head_lock); in cma_activate_area()
183 set_bit(CMA_ACTIVATED, &cma->flags); in cma_activate_area()
189 bitmap_free(cma->ranges[r].bitmap); in cma_activate_area()
192 if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) { in cma_activate_area()
194 cmr = &cma->ranges[r]; in cma_activate_area()
195 end_pfn = cmr->base_pfn + cmr->count; in cma_activate_area()
196 for (pfn = cmr->early_pfn; pfn < end_pfn; pfn++) in cma_activate_area()
200 totalcma_pages -= cma->count; in cma_activate_area()
201 cma->available_count = cma->count = 0; in cma_activate_area()
202 pr_err("CMA area %s could not be activated\n", cma->name); in cma_activate_area()
218 set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags); in cma_reserve_pages_on_error()
229 return -ENOSPC; in cma_new_area()
240 snprintf(cma->name, CMA_MAX_NAME, "%s", name); in cma_new_area()
242 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); in cma_new_area()
244 cma->available_count = cma->count = size >> PAGE_SHIFT; in cma_new_area()
245 cma->order_per_bit = order_per_bit; in cma_new_area()
247 totalcma_pages += cma->count; in cma_new_area()
254 totalcma_pages -= cma->count; in cma_drop_area()
255 cma_area_count--; in cma_drop_area()
259 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
280 return -EINVAL; in cma_init_reserved_mem()
288 return -EINVAL; in cma_init_reserved_mem()
293 return -EINVAL; in cma_init_reserved_mem()
299 cma->ranges[0].base_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
300 cma->ranges[0].early_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
301 cma->ranges[0].count = cma->count; in cma_init_reserved_mem()
302 cma->nranges = 1; in cma_init_reserved_mem()
303 cma->nid = NUMA_NO_NODE; in cma_init_reserved_mem()
328 return mlp->size > mrp->size; in revsizecmp()
334 return mlp->base < mrp->base; in basecmp()
349 list_add(&mrp->list, ranges); in list_insert_sorted()
356 __list_add(&mrp->list, mlp->list.prev, &mlp->list); in list_insert_sorted()
370 phys_addr_t align, unsigned int order_per_bit, in cma_declare_contiguous_multi() argument
373 phys_addr_t start = 0, end; in cma_declare_contiguous_multi() local
387 ret = __cma_declare_contiguous_nid(&start, total_size, 0, align, in cma_declare_contiguous_multi()
389 if (ret != -ENOMEM) in cma_declare_contiguous_multi()
400 * - @align is a power of 2 in cma_declare_contiguous_multi()
401 * - @align is >= pageblock alignment in cma_declare_contiguous_multi()
402 * - @size is aligned to @align and to @order_per_bit in cma_declare_contiguous_multi()
405 * aligned to @align, and a size that is aligned to in cma_declare_contiguous_multi()
406 * both @align and @order_to_bit, things will work out. in cma_declare_contiguous_multi()
416 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); in cma_declare_contiguous_multi()
420 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { in cma_declare_contiguous_multi()
424 start = ALIGN(start, align); in cma_declare_contiguous_multi()
425 if (start >= end) in cma_declare_contiguous_multi()
428 end = ALIGN_DOWN(end, align); in cma_declare_contiguous_multi()
429 if (end <= start) in cma_declare_contiguous_multi()
432 size = end - start; in cma_declare_contiguous_multi()
438 pr_debug("consider %016llx - %016llx\n", (u64)start, (u64)end); in cma_declare_contiguous_multi()
446 * not, re-use the smallest element. in cma_declare_contiguous_multi()
453 if (size < mrp->size) in cma_declare_contiguous_multi()
455 list_del(&mrp->list); in cma_declare_contiguous_multi()
456 sizesum -= mrp->size; in cma_declare_contiguous_multi()
457 pr_debug("deleted %016llx - %016llx from the list\n", in cma_declare_contiguous_multi()
458 (u64)mrp->base, (u64)mrp->base + size); in cma_declare_contiguous_multi()
460 mrp->base = start; in cma_declare_contiguous_multi()
461 mrp->size = size; in cma_declare_contiguous_multi()
467 pr_debug("added %016llx - %016llx to the list\n", in cma_declare_contiguous_multi()
468 (u64)mrp->base, (u64)mrp->base + size); in cma_declare_contiguous_multi()
478 ret = -ENOMEM; in cma_declare_contiguous_multi()
485 * want to mimic a bottom-up memblock allocation. in cma_declare_contiguous_multi()
492 sizesum += mlp->size; in cma_declare_contiguous_multi()
505 size = min(sizeleft, mlp->size); in cma_declare_contiguous_multi()
506 if (memblock_reserve(mlp->base, size)) { in cma_declare_contiguous_multi()
516 pr_debug("created region %d: %016llx - %016llx\n", in cma_declare_contiguous_multi()
517 nr, (u64)mlp->base, (u64)mlp->base + size); in cma_declare_contiguous_multi()
518 cmrp = &cma->ranges[nr++]; in cma_declare_contiguous_multi()
519 cmrp->base_pfn = PHYS_PFN(mlp->base); in cma_declare_contiguous_multi()
520 cmrp->early_pfn = cmrp->base_pfn; in cma_declare_contiguous_multi()
521 cmrp->count = size >> PAGE_SHIFT; in cma_declare_contiguous_multi()
523 sizeleft -= size; in cma_declare_contiguous_multi()
533 memblock_phys_free(mlp->base, mlp->size); in cma_declare_contiguous_multi()
536 ret = -ENOMEM; in cma_declare_contiguous_multi()
540 cma->nranges = nr; in cma_declare_contiguous_multi()
541 cma->nid = nid; in cma_declare_contiguous_multi()
556 * cma_declare_contiguous_nid() - reserve custom contiguous area
559 * @limit: End address of the reserved memory (optional, 0 for any).
612 highmem_start = __pa(high_memory - 1) + 1; in __cma_declare_contiguous_nid()
620 return -ENOSPC; in __cma_declare_contiguous_nid()
624 return -EINVAL; in __cma_declare_contiguous_nid()
627 return -EINVAL; in __cma_declare_contiguous_nid()
634 if (fixed && base & (alignment - 1)) { in __cma_declare_contiguous_nid()
637 return -EINVAL; in __cma_declare_contiguous_nid()
639 base = ALIGN(base, alignment); in __cma_declare_contiguous_nid()
640 size = ALIGN(size, alignment); in __cma_declare_contiguous_nid()
641 limit &= ~(alignment - 1); in __cma_declare_contiguous_nid()
648 return -EINVAL; in __cma_declare_contiguous_nid()
657 return -EINVAL; in __cma_declare_contiguous_nid()
661 * If the limit is unspecified or above the memblock end, its effective in __cma_declare_contiguous_nid()
662 * value will be the memblock end. Set it explicitly to simplify further in __cma_declare_contiguous_nid()
671 return -EINVAL; in __cma_declare_contiguous_nid()
678 return -EBUSY; in __cma_declare_contiguous_nid()
684 * If there is enough memory, try a bottom-up allocation first. in __cma_declare_contiguous_nid()
716 return -ENOMEM; in __cma_declare_contiguous_nid()
733 (*res_cma)->nid = nid; in __cma_declare_contiguous_nid()
748 spin_lock_irq(&cma->lock); in cma_debug_show_areas()
750 for (r = 0; r < cma->nranges; r++) { in cma_debug_show_areas()
751 cmr = &cma->ranges[r]; in cma_debug_show_areas()
758 next_zero_bit = find_next_zero_bit(cmr->bitmap, in cma_debug_show_areas()
762 next_set_bit = find_next_bit(cmr->bitmap, nbits, in cma_debug_show_areas()
764 nr_zero = next_set_bit - next_zero_bit; in cma_debug_show_areas()
765 nr_part = nr_zero << cma->order_per_bit; in cma_debug_show_areas()
772 pr_cont("=> %lu free of %lu total pages\n", cma->available_count, in cma_debug_show_areas()
773 cma->count); in cma_debug_show_areas()
774 spin_unlock_irq(&cma->lock); in cma_debug_show_areas()
778 unsigned long count, unsigned int align, in cma_range_alloc() argument
782 unsigned long pfn = -1; in cma_range_alloc()
785 int ret = -EBUSY; in cma_range_alloc()
788 mask = cma_bitmap_aligned_mask(cma, align); in cma_range_alloc()
789 offset = cma_bitmap_aligned_offset(cma, cmr, align); in cma_range_alloc()
797 spin_lock_irq(&cma->lock); in cma_range_alloc()
802 if (count > cma->available_count) { in cma_range_alloc()
803 spin_unlock_irq(&cma->lock); in cma_range_alloc()
806 bitmap_no = bitmap_find_next_zero_area_off(cmr->bitmap, in cma_range_alloc()
810 spin_unlock_irq(&cma->lock); in cma_range_alloc()
813 bitmap_set(cmr->bitmap, bitmap_no, bitmap_count); in cma_range_alloc()
814 cma->available_count -= count; in cma_range_alloc()
820 spin_unlock_irq(&cma->lock); in cma_range_alloc()
822 pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit); in cma_range_alloc()
823 mutex_lock(&cma->alloc_mutex); in cma_range_alloc()
825 mutex_unlock(&cma->alloc_mutex); in cma_range_alloc()
832 if (ret != -EBUSY) in cma_range_alloc()
838 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), in cma_range_alloc()
839 count, align); in cma_range_alloc()
849 unsigned int align, gfp_t gfp) in __cma_alloc() argument
852 int ret = -ENOMEM, r; in __cma_alloc()
854 const char *name = cma ? cma->name : NULL; in __cma_alloc()
856 trace_cma_alloc_start(name, count, align); in __cma_alloc()
858 if (!cma || !cma->count) in __cma_alloc()
861 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, in __cma_alloc()
862 (void *)cma, cma->name, count, align); in __cma_alloc()
867 for (r = 0; r < cma->nranges; r++) { in __cma_alloc()
870 ret = cma_range_alloc(cma, &cma->ranges[r], count, align, in __cma_alloc()
872 if (ret != -EBUSY || page) in __cma_alloc()
887 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", in __cma_alloc()
888 __func__, cma->name, count, ret); in __cma_alloc()
894 page, count, align, ret); in __cma_alloc()
907 * cma_alloc() - allocate pages from contiguous area
910 * @align: Requested alignment of pages (in PAGE_SIZE order).
917 unsigned int align, bool no_warn) in cma_alloc() argument
919 return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); in cma_alloc()
937 unsigned long pfn, end; in cma_pages_valid() local
942 if (!cma || !pages || count > cma->count) in cma_pages_valid()
948 for (r = 0; r < cma->nranges; r++) { in cma_pages_valid()
949 cmr = &cma->ranges[r]; in cma_pages_valid()
950 end = cmr->base_pfn + cmr->count; in cma_pages_valid()
951 if (pfn >= cmr->base_pfn && pfn < end) { in cma_pages_valid()
952 ret = pfn + count <= end; in cma_pages_valid()
965 * cma_release() - release allocated pages
989 for (r = 0; r < cma->nranges; r++) { in cma_release()
990 cmr = &cma->ranges[r]; in cma_release()
991 if (pfn >= cmr->base_pfn && in cma_release()
992 pfn < (cmr->base_pfn + cmr->count)) { in cma_release()
993 VM_BUG_ON(end_pfn > cmr->base_pfn + cmr->count); in cma_release()
998 if (r == cma->nranges) in cma_release()
1004 trace_cma_release(cma->name, pfn, pages, count); in cma_release()
1014 return cma_release(cma, &folio->page, folio_nr_pages(folio)); in cma_free_folio()
1031 bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end) in cma_intersects() argument
1037 for (r = 0; r < cma->nranges; r++) { in cma_intersects()
1038 cmr = &cma->ranges[r]; in cma_intersects()
1040 rstart = PFN_PHYS(cmr->base_pfn); in cma_intersects()
1041 rend = PFN_PHYS(cmr->base_pfn + cmr->count); in cma_intersects()
1042 if (end < rstart) in cma_intersects()
1055 * system is single-threaded, so there is no locking. The alignment
1056 * checking is restrictive - only pageblock-aligned areas
1065 * in the area properly, since this just points to memblock-allocated
1071 * the responsibility of the caller (e.g. like normal memblock-allocated
1081 if (!cma || !cma->count) in cma_reserve_early()
1086 if (test_bit(CMA_ACTIVATED, &cma->flags)) in cma_reserve_early()
1092 if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit))) in cma_reserve_early()
1097 if (size > cma->available_count) in cma_reserve_early()
1100 for (r = 0; r < cma->nranges; r++) { in cma_reserve_early()
1101 cmr = &cma->ranges[r]; in cma_reserve_early()
1102 available = cmr->count - (cmr->early_pfn - cmr->base_pfn); in cma_reserve_early()
1104 ret = phys_to_virt(PFN_PHYS(cmr->early_pfn)); in cma_reserve_early()
1105 cmr->early_pfn += size; in cma_reserve_early()
1106 cma->available_count -= size; in cma_reserve_early()