Lines Matching full:order
58 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
61 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
64 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
65 * order-0 pages and then compact the zone
97 int order; member
101 * enough amount of memory. i.e, mode for high order allocation.
374 * reclaim/compaction.Depending on the order, we will either set the in set_reclaim_mode()
375 * sync mode or just reclaim order-0 pages later. in set_reclaim_mode()
387 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) in set_reclaim_mode()
389 else if (sc->order && priority < DEF_PRIORITY - 2) in set_reclaim_mode()
421 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) in may_write_to_queue()
548 * Must be careful with the order of the tests. When someone has in __remove_mapping()
563 * Reversing the order of the tests ensures such a situation cannot in __remove_mapping()
1141 * @order: The caller's attempted allocation order
1150 unsigned long *nr_scanned, int order, isolate_mode_t mode, in isolate_lru_pages() argument
1197 if (!order) in isolate_lru_pages()
1201 * Attempt to take all pages in the order aligned region in isolate_lru_pages()
1204 * round the target page pfn down to the requested order in isolate_lru_pages()
1211 pfn = page_pfn & ~((1 << order) - 1); in isolate_lru_pages()
1212 end_pfn = pfn + (1 << order); in isolate_lru_pages()
1278 trace_mm_vmscan_lru_isolate(order, in isolate_lru_pages()
1460 * This will stall high-order allocations noticeably. Only do that when really
1483 * For high-order allocations, there are two stall thresholds. in should_reclaim_stall()
1485 * order allocations such as stacks require the scanning in should_reclaim_stall()
1488 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) in should_reclaim_stall()
1537 &nr_scanned, sc->order, in shrink_inactive_list()
1715 &nr_scanned, sc->order, in shrink_active_list()
2014 * disruption to the system, a small number of order-0 pages continue to be
2058 pages_for_compaction = (2UL << sc->order); in should_continue_reclaim()
2067 switch (compaction_suitable(mz->zone, sc->order)) { in should_continue_reclaim()
2172 /* Returns true if compaction should go ahead for a high-order request */
2179 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER) in compaction_ready()
2191 watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); in compaction_ready()
2202 if (!compaction_suitable(zone, sc->order)) in compaction_ready()
2215 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
2225 * high-order allocation and compaction is ready to begin. This indicates to
2256 * non-zero order, only frequent costly order in shrink_zones()
2274 sc->order, sc->gfp_mask, in shrink_zones()
2428 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, in try_to_free_pages() argument
2438 .order = order, in try_to_free_pages()
2446 trace_mm_vmscan_direct_reclaim_begin(order, in try_to_free_pages()
2470 .order = 0, in mem_cgroup_shrink_node_zone()
2512 .order = 0, in try_to_free_mem_cgroup_pages()
2567 * pgdat_balanced is used when checking if a node is balanced for high-order
2596 static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, in sleeping_prematurely() argument
2625 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), in sleeping_prematurely()
2633 * For high-order requests, the balanced zones must contain at least in sleeping_prematurely()
2634 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones in sleeping_prematurely()
2637 if (order) in sleeping_prematurely()
2647 * Returns the final order kswapd was reclaiming at
2664 static unsigned long balance_pgdat(pg_data_t *pgdat, int order, in balance_pgdat() argument
2685 .order = order, in balance_pgdat()
2727 if (!zone_watermark_ok_safe(zone, order, in balance_pgdat()
2772 order, sc.gfp_mask, in balance_pgdat()
2789 if (!zone_watermark_ok_safe(zone, order, in balance_pgdat()
2818 if (!zone_watermark_ok_safe(zone, order, in balance_pgdat()
2826 if (!zone_watermark_ok_safe(zone, order, in balance_pgdat()
2843 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) in balance_pgdat()
2868 * order-0: All zones must meet high watermark for a balanced node in balance_pgdat()
2869 * high-order: Balanced zones must make up at least 25% of the node in balance_pgdat()
2872 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { in balance_pgdat()
2879 * rebalanced for high-order allocations in all zones. in balance_pgdat()
2882 * not balanced. For high-order allocations, there is in balance_pgdat()
2886 * Instead, recheck all watermarks at order-0 as they in balance_pgdat()
2888 * back to sleep. High-order users can still perform direct in balance_pgdat()
2892 order = sc.order = 0; in balance_pgdat()
2898 * If kswapd was reclaiming at a higher order, it has the option of in balance_pgdat()
2900 * ensure that the watermarks for order-0 on *all* zones are met and in balance_pgdat()
2905 if (order) { in balance_pgdat()
2915 /* Confirm the zone is balanced for order-0 */ in balance_pgdat()
2918 order = sc.order = 0; in balance_pgdat()
2930 * Return the order we were reclaiming at so sleeping_prematurely() in balance_pgdat()
2931 * makes a decision on the order we were last reclaiming at. However, in balance_pgdat()
2933 * was awake, order will remain at the higher level in balance_pgdat()
2936 return order; in balance_pgdat()
2939 static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) in kswapd_try_to_sleep() argument
2950 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { in kswapd_try_to_sleep()
2960 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { in kswapd_try_to_sleep()
2998 unsigned long order, new_order; in kswapd() local
3023 * you need a small amount of memory in order to be able to in kswapd()
3031 order = new_order = 0; in kswapd()
3051 if (order < new_order || classzone_idx > new_classzone_idx) { in kswapd()
3053 * Don't sleep if someone wants a larger 'order' in kswapd()
3056 order = new_order; in kswapd()
3061 order = pgdat->kswapd_max_order; in kswapd()
3063 new_order = order; in kswapd()
3078 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); in kswapd()
3080 balanced_order = balance_pgdat(pgdat, order, in kswapd()
3090 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) in wakeup_kswapd() argument
3100 if (pgdat->kswapd_max_order < order) { in wakeup_kswapd()
3101 pgdat->kswapd_max_order = order; in wakeup_kswapd()
3106 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) in wakeup_kswapd()
3109 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); in wakeup_kswapd()
3154 * LRU order by reclaiming preferentially
3167 .order = 0, in shrink_all_memory()
3339 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) in __zone_reclaim() argument
3341 /* Minimum pages needed in order to stay on node */ in __zone_reclaim()
3342 const unsigned long nr_pages = 1 << order; in __zone_reclaim()
3353 .order = order, in __zone_reclaim()
3424 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) in zone_reclaim() argument
3465 ret = __zone_reclaim(zone, gfp_mask, order); in zone_reclaim()