Lines Matching full:order

114  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
152 static void __free_pages_ok(struct page *page, unsigned int order);
324 * Higher-order pages are called "compound pages". They are structured thusly:
334 * put_page() function. Its ->lru.prev holds the order of allocation.
335 * This usage means that zero-order pages may not be compound.
343 void prep_compound_page(struct page *page, unsigned long order) in prep_compound_page() argument
346 int nr_pages = 1 << order; in prep_compound_page()
349 set_compound_order(page, order); in prep_compound_page()
360 static int destroy_compound_page(struct page *page, unsigned long order) in destroy_compound_page() argument
363 int nr_pages = 1 << order; in destroy_compound_page()
366 if (unlikely(compound_order(page) != order) || in destroy_compound_page()
387 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) in prep_zero_page() argument
396 for (i = 0; i < (1 << order); i++) in prep_zero_page()
431 static inline void set_page_order(struct page *page, int order) in set_page_order() argument
433 set_page_private(page, order); in set_page_order()
447 * 1) Any buddy B1 will have an order O twin B2 which satisfies
450 * For example, if the starting buddy (buddy2) is #8 its order
454 * 2) Any buddy B will have an order O+1 parent P which
461 __find_buddy_index(unsigned long page_idx, unsigned int order) in __find_buddy_index() argument
463 return page_idx ^ (1 << order); in __find_buddy_index()
471 * (c) a page and its buddy have the same order &&
477 * For recording page's order, we use page_private(page).
480 int order) in page_is_buddy() argument
488 if (page_is_guard(buddy) && page_order(buddy) == order) { in page_is_buddy()
493 if (PageBuddy(buddy) && page_order(buddy) == order) { in page_is_buddy()
513 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
514 * order is recorded in page_private(page) field.
525 struct zone *zone, unsigned int order, in __free_one_page() argument
534 if (unlikely(destroy_compound_page(page, order))) in __free_one_page()
541 VM_BUG_ON(page_idx & ((1 << order) - 1)); in __free_one_page()
544 while (order < MAX_ORDER-1) { in __free_one_page()
545 buddy_idx = __find_buddy_index(page_idx, order); in __free_one_page()
547 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
551 * merge with it and move up one order. in __free_one_page()
556 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); in __free_one_page()
559 zone->free_area[order].nr_free--; in __free_one_page()
565 order++; in __free_one_page()
567 set_page_order(page, order); in __free_one_page()
571 * of the next-highest order is free. If it is, it's possible in __free_one_page()
575 * as a higher order page in __free_one_page()
577 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { in __free_one_page()
581 buddy_idx = __find_buddy_index(combined_idx, order + 1); in __free_one_page()
583 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { in __free_one_page()
585 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
590 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
592 zone->free_area[order].nr_free++; in __free_one_page()
623 * Assumes all pages on list are in same zone, and of same order.
678 static void free_one_page(struct zone *zone, struct page *page, int order, in free_one_page() argument
685 __free_one_page(page, zone, order, migratetype); in free_one_page()
686 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); in free_one_page()
690 static bool free_pages_prepare(struct page *page, unsigned int order) in free_pages_prepare() argument
695 trace_mm_page_free(page, order); in free_pages_prepare()
696 kmemcheck_free_shadow(page, order); in free_pages_prepare()
700 for (i = 0; i < (1 << order); i++) in free_pages_prepare()
706 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); in free_pages_prepare()
708 PAGE_SIZE << order); in free_pages_prepare()
710 arch_free_page(page, order); in free_pages_prepare()
711 kernel_map_pages(page, 1 << order, 0); in free_pages_prepare()
716 static void __free_pages_ok(struct page *page, unsigned int order) in __free_pages_ok() argument
721 if (!free_pages_prepare(page, order)) in __free_pages_ok()
727 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
728 free_one_page(page_zone(page), page, order, in __free_pages_ok()
733 void __meminit __free_pages_bootmem(struct page *page, unsigned int order) in __free_pages_bootmem() argument
735 unsigned int nr_pages = 1 << order; in __free_pages_bootmem()
749 __free_pages(page, order); in __free_pages_bootmem()
754 * The order of subdivision here is critical for the IO subsystem.
755 * Please do not alter this order without good reasons and regression
757 * the order in which smaller blocks are delivered depends on the order
759 * influencing the order in which pages are delivered to the IO
817 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) in prep_new_page() argument
821 for (i = 0; i < (1 << order); i++) { in prep_new_page()
830 arch_alloc_page(page, order); in prep_new_page()
831 kernel_map_pages(page, 1 << order, 1); in prep_new_page()
834 prep_zero_page(page, order, gfp_flags); in prep_new_page()
836 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
837 prep_compound_page(page, order); in prep_new_page()
847 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
855 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest()
865 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
874 * This array describes the order lists are fallen back to when
894 unsigned long order; in move_freepages() local
922 order = page_order(page); in move_freepages()
924 &zone->free_area[order].free_list[migratetype]); in move_freepages()
925 page += 1 << order; in move_freepages()
926 pages_moved += 1 << order; in move_freepages()
966 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) in __rmqueue_fallback() argument
974 for (current_order = MAX_ORDER-1; current_order >= order; in __rmqueue_fallback()
1022 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_fallback()
1024 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
1038 static struct page *__rmqueue(struct zone *zone, unsigned int order, in __rmqueue() argument
1044 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
1047 page = __rmqueue_fallback(zone, order, migratetype); in __rmqueue()
1060 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
1069 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
1077 struct page *page = __rmqueue(zone, order, migratetype); in rmqueue_bulk()
1083 * in physical page order. The page is added to the callers and in rmqueue_bulk()
1097 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
1177 int order, t; in mark_free_pages() local
1194 for_each_migratetype_order(order, t) { in mark_free_pages()
1195 list_for_each(curr, &zone->free_area[order].free_list[t]) { in mark_free_pages()
1199 for (i = 0; i < (1UL << order); i++) in mark_free_pages()
1208 * Free a 0-order page
1260 * Free a list of 0-order pages
1273 * split_page takes a non-compound higher-order page, and splits it into
1274 * n (1<<order) sub-pages: page[0..n]
1280 void split_page(struct page *page, unsigned int order) in split_page() argument
1293 split_page(virt_to_page(page[0].shadow), order); in split_page()
1296 for (i = 1; i < (1 << order); i++) in split_page()
1312 unsigned int order; in split_free_page() local
1319 order = page_order(page); in split_free_page()
1322 watermark = low_wmark_pages(zone) + (1 << order); in split_free_page()
1328 zone->free_area[order].nr_free--; in split_free_page()
1330 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); in split_free_page()
1334 split_page(page, order); in split_free_page()
1336 if (order >= pageblock_order - 1) { in split_free_page()
1337 struct page *endpage = page + (1 << order) - 1; in split_free_page()
1342 return 1 << order; in split_free_page()
1347 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1352 struct zone *zone, int order, gfp_t gfp_flags, in buffered_rmqueue() argument
1360 if (likely(order == 0)) { in buffered_rmqueue()
1391 * allocate greater than order-1 page units with in buffered_rmqueue()
1394 WARN_ON_ONCE(order > 1); in buffered_rmqueue()
1397 page = __rmqueue(zone, order, migratetype); in buffered_rmqueue()
1401 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); in buffered_rmqueue()
1404 __count_zone_vm_events(PGALLOC, zone, 1 << order); in buffered_rmqueue()
1409 if (prep_new_page(page, order, gfp_flags)) in buffered_rmqueue()
1452 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1454 if (order < fail_page_alloc.min_order) in should_fail_alloc_page()
1463 return should_fail(&fail_page_alloc.attr, 1 << order); in should_fail_alloc_page()
1484 if (!debugfs_create_u32("min-order", mode, dir, in fail_page_alloc_debugfs()
1501 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1509 * Return true if free pages are above 'mark'. This takes into account the order
1512 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, in __zone_watermark_ok() argument
1519 free_pages -= (1 << order) - 1; in __zone_watermark_ok()
1527 for (o = 0; o < order; o++) { in __zone_watermark_ok()
1528 /* At the next order, this order's pages become unavailable */ in __zone_watermark_ok()
1531 /* Require fewer higher order pages to be free */ in __zone_watermark_ok()
1540 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, in zone_watermark_ok() argument
1543 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok()
1547 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, in zone_watermark_ok_safe() argument
1555 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok_safe()
1703 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, in get_page_from_freelist() argument
1765 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
1791 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist()
1801 if (!zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
1808 page = buffered_rmqueue(preferred_zone, zone, order, in get_page_from_freelist()
1843 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) in warn_alloc_failed() argument
1877 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n", in warn_alloc_failed()
1878 current->comm, order, gfp_mask); in warn_alloc_failed()
1886 should_alloc_retry(gfp_t gfp_mask, unsigned int order, in should_alloc_retry() argument
1907 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER in should_alloc_retry()
1911 if (order <= PAGE_ALLOC_COSTLY_ORDER) in should_alloc_retry()
1915 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is in should_alloc_retry()
1917 * (above), or we've reclaimed an order of pages at least as in should_alloc_retry()
1918 * large as the allocation's order. In both cases, if the in should_alloc_retry()
1921 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) in should_alloc_retry()
1928 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
1947 order, zonelist, high_zoneidx, in __alloc_pages_may_oom()
1954 /* The OOM killer will not help higher order allocs */ in __alloc_pages_may_oom()
1955 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
1971 out_of_memory(zonelist, gfp_mask, order, nodemask); in __alloc_pages_may_oom()
1979 /* Try memory compaction for high-order allocations before reclaim */
1981 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
1990 if (!order) in __alloc_pages_direct_compact()
1999 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, in __alloc_pages_direct_compact()
2009 order, zonelist, high_zoneidx, in __alloc_pages_direct_compact()
2040 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2053 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
2071 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); in __alloc_pages_direct_reclaim()
2087 page = get_page_from_freelist(gfp_mask, nodemask, order, in __alloc_pages_direct_reclaim()
2110 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, in __alloc_pages_high_priority() argument
2118 page = get_page_from_freelist(gfp_mask, nodemask, order, in __alloc_pages_high_priority()
2130 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist, in wake_all_kswapd() argument
2138 wakeup_kswapd(zone, order, classzone_idx); in wake_all_kswapd()
2184 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
2198 * In the slowpath, we sanity check order to avoid ever trying to in __alloc_pages_slowpath()
2200 * be using allocators in order of preference for an area that is in __alloc_pages_slowpath()
2203 if (order >= MAX_ORDER) { in __alloc_pages_slowpath()
2221 wake_all_kswapd(order, zonelist, high_zoneidx, in __alloc_pages_slowpath()
2241 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, in __alloc_pages_slowpath()
2249 page = __alloc_pages_high_priority(gfp_mask, order, in __alloc_pages_slowpath()
2272 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
2284 * If compaction is deferred for high-order allocations, it is because in __alloc_pages_slowpath()
2293 page = __alloc_pages_direct_reclaim(gfp_mask, order, in __alloc_pages_slowpath()
2309 page = __alloc_pages_may_oom(gfp_mask, order, in __alloc_pages_slowpath()
2318 * The oom killer is not called for high-order in __alloc_pages_slowpath()
2323 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_slowpath()
2340 if (should_alloc_retry(gfp_mask, order, did_some_progress, in __alloc_pages_slowpath()
2347 * High-order allocations do not necessarily loop after in __alloc_pages_slowpath()
2351 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
2363 warn_alloc_failed(gfp_mask, order, NULL); in __alloc_pages_slowpath()
2367 kmemcheck_pagealloc_alloc(page, order, gfp_mask); in __alloc_pages_slowpath()
2376 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, in __alloc_pages_nodemask() argument
2390 if (should_fail_alloc_page(gfp_mask, order)) in __alloc_pages_nodemask()
2412 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, in __alloc_pages_nodemask()
2416 page = __alloc_pages_slowpath(gfp_mask, order, in __alloc_pages_nodemask()
2421 trace_mm_page_alloc(page, order, gfp_mask, migratetype); in __alloc_pages_nodemask()
2429 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
2439 page = alloc_pages(gfp_mask, order); in __get_free_pages()
2452 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
2455 if (order == 0) in __free_pages()
2458 __free_pages_ok(page, order); in __free_pages()
2464 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
2468 __free_pages(virt_to_page((void *)addr), order); in free_pages()
2474 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) in make_alloc_exact() argument
2477 unsigned long alloc_end = addr + (PAGE_SIZE << order); in make_alloc_exact()
2480 split_page(virt_to_page((void *)addr), order); in make_alloc_exact()
2504 unsigned int order = get_order(size); in alloc_pages_exact() local
2507 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
2508 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
2526 unsigned order = get_order(size); in alloc_pages_exact_nid() local
2527 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
2530 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
2771 unsigned long nr[MAX_ORDER], flags, order, total = 0; in show_free_areas() local
2779 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
2780 nr[order] = zone->free_area[order].nr_free; in show_free_areas()
2781 total += nr[order] << order; in show_free_areas()
2784 for (order = 0; order < MAX_ORDER; order++) in show_free_areas()
2785 printk("%lu*%lukB ", nr[order], K(1UL) << order); in show_free_areas()
2830 * 1 = order by ([node] distance, -zonetype)
2831 * 2 = order by (-zonetype, [node] distance)
2840 /* zonelist order in the kernel.
2858 * = "[nN]ode - order by node locality, then by zone within node
2859 * = "[zZ]one - order by zone, then by locality within zone
3071 * This function detect ZONE_DMA/DMA32 size and configures zone order. in default_zonelist_order()
3085 * If any node has only lowmem, then node order in default_zonelist_order()
3139 int order = current_zonelist_order; in build_zonelists() local
3177 if (order == ZONELIST_ORDER_NODE) in build_zonelists()
3180 node_order[j++] = node; /* remember order */ in build_zonelists()
3183 if (order == ZONELIST_ORDER_ZONE) { in build_zonelists()
3184 /* calculate node order -- i.e., DMA last! */ in build_zonelists()
3326 * needs the percpu allocator in order to allocate its pagesets in __build_all_zonelists()
3384 printk("Built %i zonelists in %s order, mobility grouping %s. " in build_all_zonelists()
3504 * Reserve blocks are generally in place to help high-order atomic in setup_zone_migrate_reserve()
3626 int order, t; in zone_init_free_lists() local
3627 for_each_migratetype_order(order, t) { in zone_init_free_lists()
3628 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
3629 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
3682 * fragmented and becoming unavailable for high-order allocations. in zone_batchsize()
4015 * zones within a node are in order of monotonic increases memory addresses
4204 /* Return a sensible default order for the pageblock size. */
4214 static inline void __init set_pageblock_order(unsigned int order) in set_pageblock_order() argument
4221 * Assume the largest contiguous order of interest is a huge page. in set_pageblock_order()
4224 pageblock_order = order; in set_pageblock_order()
4234 static inline int pageblock_default_order(unsigned int order) in pageblock_default_order() argument
4350 * aligned but the node_mem_map endpoints must be in order in alloc_node_mem_map()
4887 * in order to balance the zones. in calculate_totalreserve_pages()
5221 /* Make sure we've got at least a 0-order allocation.. */ in alloc_large_system_hash()
5268 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", in alloc_large_system_hash()
5518 int order, i; in __offline_isolated_pages() local
5538 order = page_order(page); in __offline_isolated_pages()
5541 pfn, 1 << order, end_pfn); in __offline_isolated_pages()
5545 zone->free_area[order].nr_free--; in __offline_isolated_pages()
5547 - (1UL << order)); in __offline_isolated_pages()
5548 for (i = 0; i < (1 << order); i++) in __offline_isolated_pages()
5550 pfn += (1 << order); in __offline_isolated_pages()
5562 int order; in is_free_buddy_page() local
5565 for (order = 0; order < MAX_ORDER; order++) { in is_free_buddy_page()
5566 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
5568 if (PageBuddy(page_head) && page_order(page_head) >= order) in is_free_buddy_page()
5573 return order < MAX_ORDER; in is_free_buddy_page()