Lines Matching full:order
217 static void __free_pages_ok(struct page *page, unsigned int order,
292 static bool page_contains_unaccepted(struct page *page, unsigned int order);
293 static bool cond_accept_memory(struct zone *zone, unsigned int order,
319 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
321 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
329 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
514 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument
519 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex()
520 VM_BUG_ON(order != HPAGE_PMD_ORDER); in order_to_pindex()
527 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex()
530 return (MIGRATE_PCPTYPES * order) + migratetype; in order_to_pindex()
535 int order = pindex / MIGRATE_PCPTYPES; in pindex_to_order() local
539 order = HPAGE_PMD_ORDER; in pindex_to_order()
541 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in pindex_to_order()
544 return order; in pindex_to_order()
547 static inline bool pcp_allowed_order(unsigned int order) in pcp_allowed_order() argument
549 if (order <= PAGE_ALLOC_COSTLY_ORDER) in pcp_allowed_order()
552 if (order == HPAGE_PMD_ORDER) in pcp_allowed_order()
559 * Higher-order pages are called "compound pages". They are structured thusly:
566 * The first tail page's ->compound_order holds the order of allocation.
567 * This usage means that zero-order pages may not be compound.
570 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
573 int nr_pages = 1 << order; in prep_compound_page()
579 prep_compound_head(page, order); in prep_compound_page()
582 static inline void set_buddy_order(struct page *page, unsigned int order) in set_buddy_order() argument
584 set_page_private(page, order); in set_buddy_order()
601 int order, int migratetype) in compaction_capture() argument
603 if (!capc || order != capc->cc->order) in compaction_capture()
612 * Do not let lower order allocations pollute a movable pageblock in compaction_capture()
616 * have trouble finding a high-order free page. in compaction_capture()
618 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && in compaction_capture()
623 trace_mm_page_alloc_extfrag(page, capc->cc->order, order, in compaction_capture()
638 int order, int migratetype) in compaction_capture() argument
663 unsigned int order, int migratetype, in __add_to_free_list() argument
666 struct free_area *area = &zone->free_area[order]; in __add_to_free_list()
667 int nr_pages = 1 << order; in __add_to_free_list()
679 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) in __add_to_free_list()
689 unsigned int order, int old_mt, int new_mt) in move_to_free_list() argument
691 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
692 int nr_pages = 1 << order; in move_to_free_list()
704 if (order >= pageblock_order && in move_to_free_list()
713 unsigned int order, int migratetype) in __del_page_from_free_list() argument
715 int nr_pages = 1 << order; in __del_page_from_free_list()
728 zone->free_area[order].nr_free--; in __del_page_from_free_list()
730 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) in __del_page_from_free_list()
735 unsigned int order, int migratetype) in del_page_from_free_list() argument
737 __del_page_from_free_list(page, zone, order, migratetype); in del_page_from_free_list()
738 account_freepages(zone, -(1 << order), migratetype); in del_page_from_free_list()
750 * of the next-higher order is free. If it is, it's possible
754 * as a 2-level higher order page
758 struct page *page, unsigned int order) in buddy_merge_likely() argument
763 if (order >= MAX_PAGE_ORDER - 1) in buddy_merge_likely()
769 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, in buddy_merge_likely()
786 * free pages of length of (1 << order) and marked with PageBuddy.
787 * Page's order is recorded in page_private(page) field.
799 struct zone *zone, unsigned int order, in __free_one_page() argument
812 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
815 account_freepages(zone, 1 << order, migratetype); in __free_one_page()
817 while (order < MAX_PAGE_ORDER) { in __free_one_page()
820 if (compaction_capture(capc, page, order, migratetype)) { in __free_one_page()
821 account_freepages(zone, -(1 << order), migratetype); in __free_one_page()
825 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); in __free_one_page()
829 if (unlikely(order >= pageblock_order)) { in __free_one_page()
846 * merge with it and move up one order. in __free_one_page()
849 clear_page_guard(zone, buddy, order); in __free_one_page()
851 __del_page_from_free_list(buddy, zone, order, buddy_mt); in __free_one_page()
865 order++; in __free_one_page()
869 set_buddy_order(page, order); in __free_one_page()
873 else if (is_shuffle_order(order)) in __free_one_page()
876 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); in __free_one_page()
878 __add_to_free_list(page, zone, order, migratetype, to_tail); in __free_one_page()
882 page_reporting_notify_free(order); in __free_one_page()
1172 unsigned int order) in free_pages_prepare() argument
1182 trace_mm_page_free(page, order); in free_pages_prepare()
1183 kmsan_free_page(page, order); in free_pages_prepare()
1186 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1202 if (unlikely(PageHWPoison(page)) && !order) { in free_pages_prepare()
1204 reset_page_owner(page, order); in free_pages_prepare()
1205 page_table_check_free(page, order); in free_pages_prepare()
1206 pgalloc_tag_sub(page, 1 << order); in free_pages_prepare()
1217 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1221 * avoid checking PageCompound for order-0 pages. in free_pages_prepare()
1223 if (unlikely(order)) { in free_pages_prepare()
1232 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1246 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); in free_pages_prepare()
1258 reset_page_owner(page, order); in free_pages_prepare()
1259 page_table_check_free(page, order); in free_pages_prepare()
1260 pgalloc_tag_sub(page, 1 << order); in free_pages_prepare()
1264 PAGE_SIZE << order); in free_pages_prepare()
1266 PAGE_SIZE << order); in free_pages_prepare()
1269 kernel_poison_pages(page, 1 << order); in free_pages_prepare()
1280 kasan_poison_pages(page, order, init); in free_pages_prepare()
1287 kernel_init_pages(page, 1 << order); in free_pages_prepare()
1294 arch_free_page(page, order); in free_pages_prepare()
1296 debug_pagealloc_unmap_pages(page, 1 << order); in free_pages_prepare()
1311 unsigned int order; in free_pcppages_bulk() local
1336 order = pindex_to_order(pindex); in free_pcppages_bulk()
1337 nr_pages = 1 << order; in free_pcppages_bulk()
1351 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); in free_pcppages_bulk()
1352 trace_mm_page_pcpu_drain(page, order, mt); in free_pcppages_bulk()
1361 unsigned long pfn, int order, fpi_t fpi) in split_large_buddy() argument
1363 unsigned long end = pfn + (1 << order); in split_large_buddy()
1365 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); in split_large_buddy()
1369 if (order > pageblock_order) in split_large_buddy()
1370 order = pageblock_order; in split_large_buddy()
1375 __free_one_page(page, pfn, zone, order, mt, fpi); in split_large_buddy()
1376 pfn += 1 << order; in split_large_buddy()
1384 unsigned int order) in add_page_to_zone_llist() argument
1386 /* Remember the order */ in add_page_to_zone_llist()
1387 page->order = order; in add_page_to_zone_llist()
1393 unsigned long pfn, unsigned int order, in free_one_page() argument
1401 add_page_to_zone_llist(zone, page, order); in free_one_page()
1416 unsigned int p_order = p->order; in free_one_page()
1422 split_large_buddy(zone, page, pfn, order, fpi_flags); in free_one_page()
1425 __count_vm_events(PGFREE, 1 << order); in free_one_page()
1428 static void __free_pages_ok(struct page *page, unsigned int order, in __free_pages_ok() argument
1434 if (free_pages_prepare(page, order)) in __free_pages_ok()
1435 free_one_page(zone, page, pfn, order, fpi_flags); in __free_pages_ok()
1438 void __meminit __free_pages_core(struct page *page, unsigned int order, in __free_pages_core() argument
1441 unsigned int nr_pages = 1 << order; in __free_pages_core()
1472 if (page_contains_unaccepted(page, order)) { in __free_pages_core()
1473 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) in __free_pages_core()
1476 accept_memory(page_to_phys(page), PAGE_SIZE << order); in __free_pages_core()
1483 __free_pages_ok(page, order, FPI_TO_TAIL); in __free_pages_core()
1503 * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
1539 * The order of subdivision here is critical for the IO subsystem.
1540 * Please do not alter this order without good reasons and regression
1542 * the order in which smaller blocks are delivered depends on the order
1544 * influencing the order in which pages are delivered to the IO
1617 static inline bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
1620 for (int i = 0; i < (1 << order); i++) { in check_new_pages()
1659 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
1669 arch_alloc_page(page, order); in post_alloc_hook()
1670 debug_pagealloc_map_pages(page, 1 << order); in post_alloc_hook()
1677 kernel_unpoison_pages(page, 1 << order); in post_alloc_hook()
1691 for (i = 0; i != 1 << order; ++i) in post_alloc_hook()
1698 kasan_unpoison_pages(page, order, init)) { in post_alloc_hook()
1707 for (i = 0; i != 1 << order; ++i) in post_alloc_hook()
1712 kernel_init_pages(page, 1 << order); in post_alloc_hook()
1714 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
1715 page_table_check_alloc(page, order); in post_alloc_hook()
1716 pgalloc_tag_add(page, current, 1 << order); in post_alloc_hook()
1719 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
1722 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
1724 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
1725 prep_compound_page(page, order); in prep_new_page()
1744 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1752 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { in __rmqueue_smallest()
1758 page_del_and_expand(zone, page, order, current_order, in __rmqueue_smallest()
1760 trace_mm_page_alloc_zone_locked(page, order, migratetype, in __rmqueue_smallest()
1761 pcp_allowed_order(order) && in __rmqueue_smallest()
1771 * This array describes the order lists are fallen back to when
1784 unsigned int order) in __rmqueue_cma_fallback() argument
1786 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1790 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
1802 unsigned int order; in __move_freepages_block() local
1819 order = buddy_order(page); in __move_freepages_block()
1821 move_to_free_list(page, zone, order, old_mt, new_mt); in __move_freepages_block()
1823 pfn += 1 << order; in __move_freepages_block()
1824 pages_moved += 1 << order; in __move_freepages_block()
1897 int order = 0; in find_large_buddy() local
1903 if (++order > MAX_PAGE_ORDER) in find_large_buddy()
1905 pfn &= ~0UL << order; in find_large_buddy()
1953 int order = buddy_order(buddy); in move_freepages_block_isolate() local
1955 del_page_from_free_list(buddy, zone, order, in move_freepages_block_isolate()
1958 split_large_buddy(zone, buddy, pfn, order, FPI_NONE); in move_freepages_block_isolate()
1964 int order = buddy_order(page); in move_freepages_block_isolate() local
1966 del_page_from_free_list(page, zone, order, in move_freepages_block_isolate()
1969 split_large_buddy(zone, page, pfn, order, FPI_NONE); in move_freepages_block_isolate()
2033 static bool should_try_claim_block(unsigned int order, int start_mt) in should_try_claim_block() argument
2036 * Leaving this order check is intended, although there is in should_try_claim_block()
2037 * relaxed order check in next check. The reason is that in should_try_claim_block()
2042 if (order >= pageblock_order) in should_try_claim_block()
2049 if (order >= pageblock_order / 2) in should_try_claim_block()
2076 * Check whether there is a suitable fallback freepage with requested order.
2083 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
2098 if (should_try_claim_block(order, migratetype)) in find_suitable_fallback()
2109 * This function implements actual block claiming behaviour. If order is large
2117 int current_order, int order, int start_type, in try_to_claim_block() argument
2129 nr_added = expand(zone, page, order, current_order, start_type); in try_to_claim_block()
2175 return __rmqueue_smallest(zone, order, start_type); in try_to_claim_block()
2185 * The use of signed ints for order and current_order is a deliberate
2190 __rmqueue_claim(struct zone *zone, int order, int start_migratetype, in __rmqueue_claim() argument
2195 int min_order = order; in __rmqueue_claim()
2205 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) in __rmqueue_claim()
2225 page = try_to_claim_block(zone, page, current_order, order, in __rmqueue_claim()
2229 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_claim()
2243 __rmqueue_steal(struct zone *zone, int order, int start_migratetype) in __rmqueue_steal() argument
2251 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { in __rmqueue_steal()
2259 page_del_and_expand(zone, page, order, current_order, fallback_mt); in __rmqueue_steal()
2260 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_steal()
2280 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2294 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2311 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2317 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2325 page = __rmqueue_claim(zone, order, migratetype, alloc_flags); in __rmqueue()
2334 page = __rmqueue_steal(zone, order, migratetype); in __rmqueue()
2349 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2364 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2371 * physical page order. The page is added to the tail of in rmqueue_bulk()
2375 * head, thus also in the physical page order. This is useful in rmqueue_bulk()
2579 /* Free as much as possible if batch freeing high-order pages. */ in nr_pcp_free()
2650 unsigned int order, fpi_t fpi_flags) in free_frozen_page_commit() argument
2662 __count_vm_events(PGFREE, 1 << order); in free_frozen_page_commit()
2663 pindex = order_to_pindex(migratetype, order); in free_frozen_page_commit()
2665 pcp->count += 1 << order; in free_frozen_page_commit()
2669 * As high-order pages other than THP's stored on PCP can contribute in free_frozen_page_commit()
2674 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { in free_frozen_page_commit()
2684 pcp->free_count += (1 << order); in free_frozen_page_commit()
2707 static void __free_frozen_pages(struct page *page, unsigned int order, in __free_frozen_pages() argument
2716 if (!pcp_allowed_order(order)) { in __free_frozen_pages()
2717 __free_pages_ok(page, order, fpi_flags); in __free_frozen_pages()
2721 if (!free_pages_prepare(page, order)) in __free_frozen_pages()
2735 free_one_page(zone, page, pfn, order, fpi_flags); in __free_frozen_pages()
2743 add_page_to_zone_llist(zone, page, order); in __free_frozen_pages()
2749 free_frozen_page_commit(zone, pcp, page, migratetype, order, fpi_flags); in __free_frozen_pages()
2752 free_one_page(zone, page, pfn, order, fpi_flags); in __free_frozen_pages()
2757 void free_frozen_pages(struct page *page, unsigned int order) in free_frozen_pages() argument
2759 __free_frozen_pages(page, order, FPI_NONE); in free_frozen_pages()
2776 unsigned int order = folio_order(folio); in free_unref_folios() local
2778 if (!free_pages_prepare(&folio->page, order)) in free_unref_folios()
2784 if (!pcp_allowed_order(order)) { in free_unref_folios()
2786 pfn, order, FPI_NONE); in free_unref_folios()
2789 folio->private = (void *)(unsigned long)order; in free_unref_folios()
2800 unsigned int order = (unsigned long)folio->private; in free_unref_folios() local
2822 order, FPI_NONE); in free_unref_folios()
2835 order, FPI_NONE); in free_unref_folios()
2850 order, FPI_NONE); in free_unref_folios()
2861 * split_page takes a non-compound higher-order page, and splits it into
2862 * n (1<<order) sub-pages: page[0..n]
2868 void split_page(struct page *page, unsigned int order) in split_page() argument
2875 for (i = 1; i < (1 << order); i++) in split_page()
2877 split_page_owner(page, order, 0); in split_page()
2878 pgalloc_tag_split(page_folio(page), order, 0); in split_page()
2879 split_page_memcg(page, order); in split_page()
2883 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
2892 * emulate a high-order watermark check with a raised order-0 in __isolate_free_page()
2893 * watermark, because we already know our high-order page in __isolate_free_page()
2896 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
2901 del_page_from_free_list(page, zone, order, mt); in __isolate_free_page()
2907 if (order >= pageblock_order - 1) { in __isolate_free_page()
2908 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
2921 return 1UL << order; in __isolate_free_page()
2927 * @order: Order of the isolated page
2933 void __putback_isolated_page(struct page *page, unsigned int order, int mt) in __putback_isolated_page() argument
2941 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
2973 unsigned int order, unsigned int alloc_flags, in rmqueue_buddy() argument
2988 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
2992 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm); in rmqueue_buddy()
2996 * order-0 (atomic) allocs access to HIGHATOMIC in rmqueue_buddy()
2998 * high-order atomic allocation in the future. in rmqueue_buddy()
3001 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
3009 } while (check_new_pages(page, order)); in rmqueue_buddy()
3011 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_buddy()
3017 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) in nr_pcp_alloc() argument
3031 if (order) in nr_pcp_alloc()
3043 if (!order) { in nr_pcp_alloc()
3047 * subsequent allocation of order-0 pages without any freeing. in nr_pcp_alloc()
3056 * Scale batch relative to order if batch implies free pages in nr_pcp_alloc()
3062 batch = max(batch >> order, 2); in nr_pcp_alloc()
3069 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, in __rmqueue_pcplist() argument
3079 int batch = nr_pcp_alloc(pcp, zone, order); in __rmqueue_pcplist()
3082 alloced = rmqueue_bulk(zone, order, in __rmqueue_pcplist()
3086 pcp->count += alloced << order; in __rmqueue_pcplist()
3093 pcp->count -= 1 << order; in __rmqueue_pcplist()
3094 } while (check_new_pages(page, order)); in __rmqueue_pcplist()
3101 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
3123 list = &pcp->lists[order_to_pindex(migratetype, order)]; in rmqueue_pcplist()
3124 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3128 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_pcplist()
3136 * Use pcplists for THP or "cheap" high-order allocations.
3148 struct zone *zone, unsigned int order, in rmqueue() argument
3154 if (likely(pcp_allowed_order(order))) { in rmqueue()
3155 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
3161 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, in rmqueue()
3178 * exclusive use of high-order atomic allocations if there are no
3179 * empty page blocks that contain a page with a suitable order
3181 static void reserve_highatomic_pageblock(struct page *page, int order, in reserve_highatomic_pageblock() argument
3211 if (order < pageblock_order) { in reserve_highatomic_pageblock()
3216 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); in reserve_highatomic_pageblock()
3217 zone->nr_reserved_highatomic += 1 << order; in reserve_highatomic_pageblock()
3226 * potentially hurts the reliability of high-order allocations when under
3241 int order; in unreserve_highatomic_pageblock() local
3255 for (order = 0; order < NR_PAGE_ORDERS; order++) { in unreserve_highatomic_pageblock()
3256 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
3263 size = max(pageblock_nr_pages, 1UL << order); in unreserve_highatomic_pageblock()
3284 if (order < pageblock_order) in unreserve_highatomic_pageblock()
3289 move_to_free_list(page, zone, order, in unreserve_highatomic_pageblock()
3292 change_pageblock_range(page, order, in unreserve_highatomic_pageblock()
3313 unsigned int order, unsigned int alloc_flags) in __zone_watermark_unusable_free() argument
3315 long unusable_free = (1 << order) - 1; in __zone_watermark_unusable_free()
3334 * Return true if free base pages are above 'mark'. For high-order checks it
3335 * will return true of the order-0 watermark is reached and there is at least
3339 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok() argument
3347 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); in __zone_watermark_ok()
3379 * Check watermarks for an order-0 allocation request. If these in __zone_watermark_ok()
3380 * are not met, then a high-order request also cannot go ahead in __zone_watermark_ok()
3386 /* If this is an order-0 request then the watermark is fine */ in __zone_watermark_ok()
3387 if (!order) in __zone_watermark_ok()
3390 /* For a high-order request, check at least one suitable page is free */ in __zone_watermark_ok()
3391 for (o = order; o < NR_PAGE_ORDERS; o++) { in __zone_watermark_ok()
3417 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
3420 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_ok()
3424 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast() argument
3433 * Fast check for order-0 only. If this fails then the reserves in zone_watermark_fast()
3436 if (!order) { in zone_watermark_fast()
3449 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_fast()
3454 * Ignore watermark boosting for __GFP_HIGH order-0 allocations in zone_watermark_fast()
3459 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost in zone_watermark_fast()
3462 return __zone_watermark_ok(z, order, mark, highest_zoneidx, in zone_watermark_fast()
3545 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
3615 cond_accept_memory(zone, order, alloc_flags); in get_page_from_freelist()
3628 if (zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3637 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3642 if (cond_accept_memory(zone, order, alloc_flags)) in get_page_from_freelist()
3650 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3662 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3672 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3681 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, in get_page_from_freelist()
3684 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3687 * If this is a high-order atomic allocation then check in get_page_from_freelist()
3691 reserve_highatomic_pageblock(page, order, zone); in get_page_from_freelist()
3695 if (cond_accept_memory(zone, order, alloc_flags)) in get_page_from_freelist()
3700 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3763 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
3769 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3776 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3782 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
3790 .order = order, in __alloc_pages_may_oom()
3814 ~__GFP_DIRECT_RECLAIM, order, in __alloc_pages_may_oom()
3822 /* The OOM killer will not help higher order allocs */ in __alloc_pages_may_oom()
3823 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
3860 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
3875 /* Try memory compaction for high-order allocations before reclaim */
3877 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3885 if (!order) in __alloc_pages_direct_compact()
3892 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
3909 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
3913 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
3919 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
3936 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
3947 if (!order) in should_compact_retry()
3954 * Compaction was skipped due to a lack of free order-0 in should_compact_retry()
3958 ret = compaction_zonelist_suitable(ac, order, alloc_flags); in should_compact_retry()
3976 if (order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
3988 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? in should_compact_retry()
3997 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); in should_compact_retry()
4002 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4011 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, in should_compact_retry() argument
4019 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
4025 * Let's give them a good hope and keep retrying while the order-0 in should_compact_retry()
4123 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
4136 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
4149 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
4158 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
4163 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4182 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
4192 reclaim_order = max(order, pageblock_order); in wake_all_kswapds()
4194 reclaim_order = order; in wake_all_kswapds()
4208 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) in gfp_to_alloc_flags() argument
4237 if (order > 0) in gfp_to_alloc_flags()
4312 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
4322 * their order will become available due to high fragmentation so in should_reclaim_retry()
4325 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) in should_reclaim_retry()
4359 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4361 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
4422 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
4428 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; in __alloc_pages_slowpath()
4443 * allocate greater than order-1 page units with __GFP_NOFAIL. in __alloc_pages_slowpath()
4445 WARN_ON_ONCE(order > 1); in __alloc_pages_slowpath()
4472 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); in __alloc_pages_slowpath()
4499 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4505 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4512 * movable high-order allocations, do that as well, as compaction will in __alloc_pages_slowpath()
4520 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
4522 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
4538 * order, fail immediately unless the allocator has in __alloc_pages_slowpath()
4544 * bursty high order allocations, in __alloc_pages_slowpath()
4575 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4594 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4607 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4613 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4623 * Do not retry costly high order allocations unless they are in __alloc_pages_slowpath()
4630 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4635 * It doesn't make any sense to retry for the compaction if the order-0 in __alloc_pages_slowpath()
4641 should_compact_retry(ac, order, alloc_flags, in __alloc_pages_slowpath()
4661 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4706 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); in __alloc_pages_slowpath()
4715 "page allocation failure: order:%u", order); in __alloc_pages_slowpath()
4720 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
4749 should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
4769 * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
4938 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, in __alloc_frozen_pages_noprof() argument
4947 * There are several places where we assume that the order value is sane in __alloc_frozen_pages_noprof()
4950 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) in __alloc_frozen_pages_noprof()
4963 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, in __alloc_frozen_pages_noprof()
4974 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); in __alloc_frozen_pages_noprof()
4987 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); in __alloc_frozen_pages_noprof()
4991 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { in __alloc_frozen_pages_noprof()
4992 free_frozen_pages(page, order); in __alloc_frozen_pages_noprof()
4996 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); in __alloc_frozen_pages_noprof()
4997 kmsan_alloc_page(page, order, alloc_gfp); in __alloc_frozen_pages_noprof()
5003 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, in __alloc_pages_noprof() argument
5008 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); in __alloc_pages_noprof()
5015 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, in __folio_alloc_noprof() argument
5018 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, in __folio_alloc_noprof()
5029 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) in get_free_pages_noprof() argument
5033 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); in get_free_pages_noprof()
5049 * @order: The order of the allocation.
5053 * pages. It does not check that the @order passed in matches that of
5067 static void ___free_pages(struct page *page, unsigned int order, in ___free_pages() argument
5076 __free_frozen_pages(page, order, fpi_flags); in ___free_pages()
5078 pgalloc_tag_sub_pages(tag, (1 << order) - 1); in ___free_pages()
5079 while (order-- > 0) in ___free_pages()
5080 __free_frozen_pages(page + (1 << order), order, in ___free_pages()
5084 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
5086 ___free_pages(page, order, FPI_NONE); in __free_pages()
5094 void free_pages_nolock(struct page *page, unsigned int order) in free_pages_nolock() argument
5096 ___free_pages(page, order, FPI_TRYLOCK); in free_pages_nolock()
5099 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
5103 __free_pages(virt_to_page((void *)addr), order); in free_pages()
5109 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
5117 split_page_owner(page, order, 0); in make_alloc_exact()
5118 pgalloc_tag_split(page_folio(page), order, 0); in make_alloc_exact()
5119 split_page_memcg(page, order); in make_alloc_exact()
5123 last = page + (1UL << order); in make_alloc_exact()
5147 unsigned int order = get_order(size); in alloc_pages_exact_noprof() local
5153 addr = get_free_pages_noprof(gfp_mask, order); in alloc_pages_exact_noprof()
5154 return make_alloc_exact(addr, order, size); in alloc_pages_exact_noprof()
5172 unsigned int order = get_order(size); in alloc_pages_exact_nid_noprof() local
5178 p = alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_exact_nid_noprof()
5181 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid_noprof()
5442 pr_info("Fallback order for Node %d: ", local_node); in build_zonelists()
5583 * needs the percpu allocator in order to allocate its pagesets in build_all_zonelists_init()
5674 * fragmented and becoming unavailable for high-order allocations. in zone_batchsize()
5868 * consecutive high-order pages freeing without allocation. in zone_pcp_update_cacheinfo()
6610 int order; in split_free_pages() local
6612 for (order = 0; order < NR_PAGE_ORDERS; order++) { in split_free_pages()
6614 int nr_pages = 1 << order; in split_free_pages()
6616 list_for_each_entry_safe(page, next, &list[order], lru) { in split_free_pages()
6619 post_alloc_hook(page, order, gfp_mask); in split_free_pages()
6621 if (!order) in split_free_pages()
6624 split_page(page, order); in split_free_pages()
6626 /* Add all subpages to the order-0 head, in sequence. */ in split_free_pages()
6699 .order = -1, in alloc_contig_range_noprof()
6714 * MIGRATE_ISOLATE. Because pageblock and max order pages may in alloc_contig_range_noprof()
6773 * page allocator holds, ie. they can be part of higher order in alloc_contig_range_noprof()
6805 int order = ilog2(end - start); in alloc_contig_range_noprof() local
6807 check_new_pages(head, order); in alloc_contig_range_noprof()
6808 prep_new_page(head, order, gfp_mask, 0); in alloc_contig_range_noprof()
7005 unsigned int order; in __offline_isolated_pages() local
7035 order = buddy_order(page); in __offline_isolated_pages()
7036 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); in __offline_isolated_pages()
7037 pfn += (1 << order); in __offline_isolated_pages()
7051 unsigned int order; in is_free_buddy_page() local
7053 for (order = 0; order < NR_PAGE_ORDERS; order++) { in is_free_buddy_page()
7054 const struct page *head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
7057 buddy_order_unsafe(head) >= order) in is_free_buddy_page()
7061 return order <= MAX_PAGE_ORDER; in is_free_buddy_page()
7067 unsigned int order, int migratetype, in add_to_free_list() argument
7070 __add_to_free_list(page, zone, order, migratetype, tail); in add_to_free_list()
7071 account_freepages(zone, 1 << order, migratetype); in add_to_free_list()
7075 * Break down a higher-order page in sub-pages, and keep our target out of
7112 unsigned int order; in take_page_off_buddy() local
7116 for (order = 0; order < NR_PAGE_ORDERS; order++) { in take_page_off_buddy()
7117 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
7120 if (PageBuddy(page_head) && page_order >= order) { in take_page_off_buddy()
7199 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
7203 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); in page_contains_unaccepted()
7254 static bool cond_accept_memory(struct zone *zone, unsigned int order, in cond_accept_memory() argument
7280 __zone_watermark_unusable_free(zone, order, 0) - in cond_accept_memory()
7313 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
7318 static bool cond_accept_memory(struct zone *zone, unsigned int order, in cond_accept_memory() argument
7335 * @order: allocation order size
7337 * Allocates pages of a given order from the given node. This is safe to
7346 struct page *try_alloc_pages_noprof(int nid, unsigned int order) in try_alloc_pages_noprof() argument
7387 if (!pcp_allowed_order(order)) in try_alloc_pages_noprof()
7397 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac, in try_alloc_pages_noprof()
7404 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); in try_alloc_pages_noprof()
7412 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { in try_alloc_pages_noprof()
7413 free_pages_nolock(page, order); in try_alloc_pages_noprof()
7416 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); in try_alloc_pages_noprof()
7417 kmsan_alloc_page(page, order, alloc_gfp); in try_alloc_pages_noprof()