Lines Matching full:order
53 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
54 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
57 * Page order with-respect-to which proactive compaction
87 unsigned int i, order, nr_pages; in split_map_pages() local
94 order = page_private(page); in split_map_pages()
95 nr_pages = 1 << order; in split_map_pages()
97 post_alloc_hook(page, order, __GFP_MOVABLE); in split_map_pages()
98 if (order) in split_map_pages()
99 split_page(page, order); in split_map_pages()
153 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
158 if (order < zone->compact_order_failed) in defer_compaction()
159 zone->compact_order_failed = order; in defer_compaction()
164 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
168 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
172 if (order < zone->compact_order_failed) in compaction_deferred()
181 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
187 * Update defer tracking counters after successful compaction of given order,
191 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
198 if (order >= zone->compact_order_failed) in compaction_defer_reset()
199 zone->compact_order_failed = order + 1; in compaction_defer_reset()
201 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
205 static bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
207 if (order < zone->compact_order_failed) in compaction_restarting()
286 * released. It is always pointless to compact pages of such order (if they are
595 unsigned int order; in isolate_freepages_block() local
626 const unsigned int order = compound_order(page); in isolate_freepages_block() local
628 if (blockpfn + (1UL << order) <= end_pfn) { in isolate_freepages_block()
629 blockpfn += (1UL << order) - 1; in isolate_freepages_block()
630 page += (1UL << order) - 1; in isolate_freepages_block()
631 nr_scanned += (1UL << order) - 1; in isolate_freepages_block()
650 /* Found a free page, will break it into order-0 pages */ in isolate_freepages_block()
651 order = buddy_order(page); in isolate_freepages_block()
652 isolated = __isolate_free_page(page, order); in isolate_freepages_block()
655 set_page_private(page, order); in isolate_freepages_block()
741 * is more than pageblock order. In this case, we adjust in isolate_freepages_range()
768 * pageblock_nr_pages for some non-negative n. (Max order in isolate_freepages_range()
880 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
890 * previous order-aligned block, and did not skip it due in isolate_migratepages_block()
898 * We failed to isolate in the previous order-aligned in isolate_migratepages_block()
901 * next_skip_pfn by 1 << order, as low_pfn might have in isolate_migratepages_block()
903 * a compound or a high-order buddy page in the in isolate_migratepages_block()
906 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
990 * Skip if free. We read page order here without zone lock in isolate_migratepages_block()
1000 * a valid page order. Consider only values in the in isolate_migratepages_block()
1001 * valid order range to prevent low_pfn overflow. in isolate_migratepages_block()
1019 const unsigned int order = compound_order(page); in isolate_migratepages_block() local
1021 if (likely(order <= MAX_PAGE_ORDER)) { in isolate_migratepages_block()
1022 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
1023 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
1224 * instead of migrating, as we cannot form the cc->order buddy in isolate_migratepages_block()
1243 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1371 * pageblock, so it's not worth to check order for valid range. in suitable_migration_target()
1469 static int next_search_order(struct compact_control *cc, int order) in next_search_order() argument
1471 order--; in next_search_order()
1472 if (order < 0) in next_search_order()
1473 order = cc->order - 1; in next_search_order()
1476 if (order == cc->search_order) { in next_search_order()
1479 cc->search_order = cc->order - 1; in next_search_order()
1483 return order; in next_search_order()
1495 int order; in fast_isolate_freepages() local
1497 /* Full compaction passes in a negative order */ in fast_isolate_freepages()
1498 if (cc->order <= 0) in fast_isolate_freepages()
1522 * Search starts from the last successful isolation order or the next in fast_isolate_freepages()
1523 * order to search after a previous failure in fast_isolate_freepages()
1525 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); in fast_isolate_freepages()
1527 for (order = cc->search_order; in fast_isolate_freepages()
1528 !page && order >= 0; in fast_isolate_freepages()
1529 order = next_search_order(cc, order)) { in fast_isolate_freepages()
1530 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1555 cc->search_order = order; in fast_isolate_freepages()
1584 if (__isolate_free_page(page, order)) { in fast_isolate_freepages()
1585 set_page_private(page, order); in fast_isolate_freepages()
1586 nr_isolated = 1 << order; in fast_isolate_freepages()
1594 order = cc->search_order + 1; in fast_isolate_freepages()
1606 * Smaller scan on next order so the total scan is related in fast_isolate_freepages()
1873 int order; in fast_find_migrateblock() local
1900 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) in fast_find_migrateblock()
1923 for (order = cc->order - 1; in fast_find_migrateblock()
1924 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; in fast_find_migrateblock()
1925 order--) { in fast_find_migrateblock()
1926 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
2090 * order == -1 is expected when compacting proactively via
2095 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument
2097 return order == -1; in is_via_compact_memory()
2194 unsigned int order; in __compact_finished() local
2237 if (is_via_compact_memory(cc->order)) in __compact_finished()
2251 for (order = cc->order; order < NR_PAGE_ORDERS; order++) { in __compact_finished()
2252 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2269 if (find_suitable_fallback(area, order, migratetype, in __compact_finished()
2294 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2301 static bool __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
2307 * Watermarks for order-0 must be met for compaction to be able to in __compaction_suitable()
2320 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? in __compaction_suitable()
2322 watermark += compact_gap(order); in __compaction_suitable()
2330 bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) in compaction_suitable() argument
2335 suitable = __compaction_suitable(zone, order, highest_zoneidx, in compaction_suitable()
2342 * watermarks, but we already failed the high-order watermark check in compaction_suitable()
2355 if (order > PAGE_ALLOC_COSTLY_ORDER) { in compaction_suitable()
2356 int fragindex = fragmentation_index(zone, order); in compaction_suitable()
2368 trace_mm_compaction_suitable(zone, order, compact_result); in compaction_suitable()
2373 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, in compaction_zonelist_suitable() argument
2389 * want to trash just for a single high order allocation which in compaction_zonelist_suitable()
2393 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
2395 if (__compaction_suitable(zone, order, ac->highest_zoneidx, in compaction_zonelist_suitable()
2404 * Should we do compaction for target allocation order.
2405 * Return COMPACT_SUCCESS if allocation for target order can be already
2407 * Return COMPACT_SKIPPED if compaction for target order is likely to fail
2408 * Return COMPACT_CONTINUE if compaction for target order should be ran
2411 compaction_suit_allocation_order(struct zone *zone, unsigned int order, in compaction_suit_allocation_order() argument
2417 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, in compaction_suit_allocation_order()
2421 if (!compaction_suitable(zone, order, highest_zoneidx)) in compaction_suit_allocation_order()
2451 if (!is_via_compact_memory(cc->order)) { in compact_zone()
2452 ret = compaction_suit_allocation_order(cc->zone, cc->order, in compact_zone()
2463 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2545 * previous cc->order aligned block. in compact_zone()
2593 if (cc->order == COMPACTION_HPAGE_ORDER) in compact_zone()
2609 * cc->order aligned block where we migrated from? If yes, in compact_zone()
2614 if (cc->order > 0 && last_migrated_pfn) { in compact_zone()
2616 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
2657 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
2664 .order = order, in compact_zone_order()
2665 .search_order = order, in compact_zone_order()
2712 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2714 * @order: The order of the current allocation
2722 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument
2733 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); in try_to_compact_pages()
2741 && compaction_deferred(zone, order)) { in try_to_compact_pages()
2746 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2758 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
2770 defer_compaction(zone, order); in try_to_compact_pages()
2799 .order = -1, in proactive_compact_node()
2830 .order = -1, in compact_node()
2972 * order is allocatable. in kcompactd_do_work()
2977 .order = pgdat->kcompactd_max_order, in kcompactd_do_work()
2986 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
2997 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
3001 cc.order, zoneid, ALLOC_WMARK_MIN); in kcompactd_do_work()
3012 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
3017 * order >= cc.order. This is ratelimited by the in kcompactd_do_work()
3026 defer_compaction(zone, cc.order); in kcompactd_do_work()
3037 * the requested order/highest_zoneidx in case it was higher/tighter in kcompactd_do_work()
3040 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
3046 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) in wakeup_kcompactd() argument
3048 if (!order) in wakeup_kcompactd()
3051 if (pgdat->kcompactd_max_order < order) in wakeup_kcompactd()
3052 pgdat->kcompactd_max_order = order; in wakeup_kcompactd()
3067 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, in wakeup_kcompactd()