Lines Matching full:zone
59 * the "fragmentation score" of a node/zone.
153 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
155 zone->compact_considered = 0; in defer_compaction()
156 zone->compact_defer_shift++; in defer_compaction()
158 if (order < zone->compact_order_failed) in defer_compaction()
159 zone->compact_order_failed = order; in defer_compaction()
161 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
162 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
164 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
168 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
170 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
172 if (order < zone->compact_order_failed) in compaction_deferred()
176 if (++zone->compact_considered >= defer_limit) { in compaction_deferred()
177 zone->compact_considered = defer_limit; in compaction_deferred()
181 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
191 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
195 zone->compact_considered = 0; in compaction_defer_reset()
196 zone->compact_defer_shift = 0; in compaction_defer_reset()
198 if (order >= zone->compact_order_failed) in compaction_defer_reset()
199 zone->compact_order_failed = order + 1; in compaction_defer_reset()
201 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
205 static bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
207 if (order < zone->compact_order_failed) in compaction_restarting()
210 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && in compaction_restarting()
211 zone->compact_considered >= 1UL << zone->compact_defer_shift; in compaction_restarting()
224 static void reset_cached_positions(struct zone *zone) in reset_cached_positions() argument
226 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions()
227 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions()
228 zone->compact_cached_free_pfn = in reset_cached_positions()
229 pageblock_start_pfn(zone_end_pfn(zone) - 1); in reset_cached_positions()
303 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, in __reset_isolation_pfn() argument
313 if (zone != page_zone(page)) in __reset_isolation_pfn()
333 /* Ensure the start of the pageblock or zone is online and valid */ in __reset_isolation_pfn()
335 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn()
342 /* Ensure the end of the pageblock or zone is online and valid */ in __reset_isolation_pfn()
344 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); in __reset_isolation_pfn()
376 static void __reset_isolation_suitable(struct zone *zone) in __reset_isolation_suitable() argument
378 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable()
379 unsigned long free_pfn = zone_end_pfn(zone) - 1; in __reset_isolation_suitable()
386 if (!zone->compact_blockskip_flush) in __reset_isolation_suitable()
389 zone->compact_blockskip_flush = false; in __reset_isolation_suitable()
392 * Walk the zone and update pageblock skip information. Source looks in __reset_isolation_suitable()
402 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && in __reset_isolation_suitable()
406 zone->compact_init_migrate_pfn = reset_migrate; in __reset_isolation_suitable()
407 zone->compact_cached_migrate_pfn[0] = reset_migrate; in __reset_isolation_suitable()
408 zone->compact_cached_migrate_pfn[1] = reset_migrate; in __reset_isolation_suitable()
412 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && in __reset_isolation_suitable()
416 zone->compact_init_free_pfn = reset_free; in __reset_isolation_suitable()
417 zone->compact_cached_free_pfn = reset_free; in __reset_isolation_suitable()
423 zone->compact_cached_migrate_pfn[0] = migrate_pfn; in __reset_isolation_suitable()
424 zone->compact_cached_migrate_pfn[1] = migrate_pfn; in __reset_isolation_suitable()
425 zone->compact_cached_free_pfn = free_pfn; in __reset_isolation_suitable()
434 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() local
435 if (!populated_zone(zone)) in reset_isolation_suitable()
438 __reset_isolation_suitable(zone); in reset_isolation_suitable()
463 struct zone *zone = cc->zone; in update_cached_migrate() local
472 if (pfn > zone->compact_cached_migrate_pfn[0]) in update_cached_migrate()
473 zone->compact_cached_migrate_pfn[0] = pfn; in update_cached_migrate()
475 pfn > zone->compact_cached_migrate_pfn[1]) in update_cached_migrate()
476 zone->compact_cached_migrate_pfn[1] = pfn; in update_cached_migrate()
486 struct zone *zone = cc->zone; in update_pageblock_skip() local
493 if (pfn < zone->compact_cached_free_pfn) in update_pageblock_skip()
494 zone->compact_cached_free_pfn = pfn; in update_pageblock_skip()
613 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
642 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block()
678 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
712 * Non-free pages, invalid PFNs, or zone boundaries within the
729 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range()
730 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range()
752 block_end_pfn, cc->zone)) in isolate_freepages_range()
789 pg_data_t *pgdat = cc->zone->zone_pgdat; in too_many_isolated()
840 pg_data_t *pgdat = cc->zone->zone_pgdat; in isolate_migratepages_block()
941 low_pfn == cc->zone->zone_start_pfn)) { in isolate_migratepages_block()
990 * Skip if free. We read page order here without zone lock in isolate_migratepages_block()
1313 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range()
1314 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range()
1324 block_end_pfn, cc->zone)) in isolate_migratepages_range()
1369 * We are checking page_order without zone->lock taken. But in suitable_migration_target()
1454 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around()
1455 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); in fast_isolate_around()
1457 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); in fast_isolate_around()
1505 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { in fast_isolate_freepages()
1530 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1540 spin_lock_irqsave(&cc->zone->lock, flags); in fast_isolate_freepages()
1551 cc->zone->zone_start_pfn); in fast_isolate_freepages()
1599 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_isolate_freepages()
1631 zone_end_pfn(cc->zone)), in fast_isolate_freepages()
1632 cc->zone); in fast_isolate_freepages()
1642 if (highest && highest >= cc->zone->compact_cached_free_pfn) { in fast_isolate_freepages()
1644 cc->zone->compact_cached_free_pfn = highest; in fast_isolate_freepages()
1661 struct zone *zone = cc->zone; in isolate_freepages() local
1677 * successfully isolated from, zone-cached value, or the end of the in isolate_freepages()
1678 * zone when isolating for the first time. For looping we also need in isolate_freepages()
1682 * zone which ends in the middle of a pageblock. in isolate_freepages()
1689 zone_end_pfn(zone)); in isolate_freepages()
1705 * This can iterate a massively long zone without finding any in isolate_freepages()
1712 zone); in isolate_freepages()
1805 * freelist. All pages on the freelist are from the same zone, so there is no
1888 * If the migrate_pfn is not at the start of a zone or the start in fast_find_migrateblock()
1892 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) in fast_find_migrateblock()
1919 if (cc->migrate_pfn != cc->zone->zone_start_pfn) in fast_find_migrateblock()
1926 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
1934 spin_lock_irqsave(&cc->zone->lock, flags); in fast_find_migrateblock()
1960 if (pfn < cc->zone->zone_start_pfn) in fast_find_migrateblock()
1961 pfn = cc->zone->zone_start_pfn; in fast_find_migrateblock()
1967 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_find_migrateblock()
2000 * Start at where we last stopped, or beginning of the zone as in isolate_migratepages()
2006 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages()
2007 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages()
2030 * This can potentially iterate a massively long zone with in isolate_migratepages()
2038 block_end_pfn, cc->zone); in isolate_migratepages()
2056 low_pfn == cc->zone->zone_start_pfn) && in isolate_migratepages()
2118 * A zone's fragmentation score is the external fragmentation wrt to the
2121 static unsigned int fragmentation_score_zone(struct zone *zone) in fragmentation_score_zone() argument
2123 return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); in fragmentation_score_zone()
2127 * A weighted zone's fragmentation score is the external fragmentation
2128 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
2136 static unsigned int fragmentation_score_zone_weighted(struct zone *zone) in fragmentation_score_zone_weighted() argument
2140 score = zone->present_pages * fragmentation_score_zone(zone); in fragmentation_score_zone_weighted()
2141 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); in fragmentation_score_zone_weighted()
2157 struct zone *zone; in fragmentation_score_node() local
2159 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_node()
2160 if (!populated_zone(zone)) in fragmentation_score_node()
2162 score += fragmentation_score_zone_weighted(zone); in fragmentation_score_node()
2201 reset_cached_positions(cc->zone); in __compact_finished()
2210 cc->zone->compact_blockskip_flush = true; in __compact_finished()
2222 pgdat = cc->zone->zone_pgdat; in __compact_finished()
2226 score = fragmentation_score_zone(cc->zone); in __compact_finished()
2252 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2294 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2301 static bool __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
2321 low_wmark_pages(zone) : min_wmark_pages(zone); in __compaction_suitable()
2323 return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, in __compaction_suitable()
2328 * compaction_suitable: Is this suitable to run compaction on this zone now?
2330 bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) in compaction_suitable() argument
2335 suitable = __compaction_suitable(zone, order, highest_zoneidx, in compaction_suitable()
2336 zone_page_state(zone, NR_FREE_PAGES)); in compaction_suitable()
2356 int fragindex = fragmentation_index(zone, order); in compaction_suitable()
2368 trace_mm_compaction_suitable(zone, order, compact_result); in compaction_suitable()
2376 struct zone *zone; in compaction_zonelist_suitable() local
2380 * Make sure at least one zone would pass __compaction_suitable if we continue in compaction_zonelist_suitable()
2383 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable()
2393 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
2394 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in compaction_zonelist_suitable()
2395 if (__compaction_suitable(zone, order, ac->highest_zoneidx, in compaction_zonelist_suitable()
2411 compaction_suit_allocation_order(struct zone *zone, unsigned int order, in compaction_suit_allocation_order() argument
2416 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in compaction_suit_allocation_order()
2417 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, in compaction_suit_allocation_order()
2421 if (!compaction_suitable(zone, order, highest_zoneidx)) in compaction_suit_allocation_order()
2431 unsigned long start_pfn = cc->zone->zone_start_pfn; in compact_zone()
2432 unsigned long end_pfn = zone_end_pfn(cc->zone); in compact_zone()
2439 * These counters track activities during zone compaction. Initialize in compact_zone()
2440 * them before compacting a new zone. in compact_zone()
2452 ret = compaction_suit_allocation_order(cc->zone, cc->order, in compact_zone()
2463 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2464 __reset_isolation_suitable(cc->zone); in compact_zone()
2467 * Setup to move all movable pages to the end of the zone. Used cached in compact_zone()
2469 * want to compact the whole zone), but check that it is initialised in compact_zone()
2470 * by ensuring the values are within zone boundaries. in compact_zone()
2477 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; in compact_zone()
2478 cc->free_pfn = cc->zone->compact_cached_free_pfn; in compact_zone()
2481 cc->zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
2485 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
2486 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
2489 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) in compact_zone()
2504 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; in compact_zone()
2538 cc->zone->compact_cached_migrate_pfn[1] = in compact_zone()
2539 cc->zone->compact_cached_migrate_pfn[0]; in compact_zone()
2550 last_migrated_pfn = max(cc->zone->zone_start_pfn, in compact_zone()
2619 lru_add_drain_cpu_zone(cc->zone); in compact_zone()
2640 * already reset to zone end in compact_finished() in compact_zone()
2642 if (free_pfn > cc->zone->compact_cached_free_pfn) in compact_zone()
2643 cc->zone->compact_cached_free_pfn = free_pfn; in compact_zone()
2657 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
2667 .zone = zone, in compact_zone_order()
2727 struct zone *zone; in try_to_compact_pages() local
2735 /* Compact each zone in the list */ in try_to_compact_pages()
2736 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
2741 && compaction_deferred(zone, order)) { in try_to_compact_pages()
2746 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2753 * We think the allocation will succeed in this zone, in try_to_compact_pages()
2756 * succeeds in this zone. in try_to_compact_pages()
2758 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
2766 * We think that allocation won't succeed in this zone in try_to_compact_pages()
2770 defer_compaction(zone, order); in try_to_compact_pages()
2786 * Compact all zones within a node till each zone's fragmentation score
2792 * per-zone locks.
2797 struct zone *zone; in proactive_compact_node() local
2808 zone = &pgdat->node_zones[zoneid]; in proactive_compact_node()
2809 if (!populated_zone(zone)) in proactive_compact_node()
2812 cc.zone = zone; in proactive_compact_node()
2828 struct zone *zone; in compact_node() local
2840 zone = &pgdat->node_zones[zoneid]; in compact_node()
2841 if (!populated_zone(zone)) in compact_node()
2844 cc.zone = zone; in compact_node()
2948 struct zone *zone; in kcompactd_node_suitable() local
2953 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
2955 if (!populated_zone(zone)) in kcompactd_node_suitable()
2958 ret = compaction_suit_allocation_order(zone, in kcompactd_node_suitable()
2975 struct zone *zone; in kcompactd_do_work() local
2993 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
2994 if (!populated_zone(zone)) in kcompactd_do_work()
2997 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
3000 ret = compaction_suit_allocation_order(zone, in kcompactd_do_work()
3008 cc.zone = zone; in kcompactd_do_work()
3012 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
3016 * otherwise coalesce on the zone's free area for in kcompactd_do_work()
3020 drain_all_pages(zone); in kcompactd_do_work()
3026 defer_compaction(zone, cc.order); in kcompactd_do_work()
3113 * as the condition of the zone changing substantionally in kcompactd()