xref: /linux/mm/page_alloc.c (revision 334fbe734e687404f346eba7d5d96ed2b44d35ab)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16 
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/highmem.h>
20 #include <linux/interrupt.h>
21 #include <linux/jiffies.h>
22 #include <linux/compiler.h>
23 #include <linux/kernel.h>
24 #include <linux/kasan.h>
25 #include <linux/kmsan.h>
26 #include <linux/module.h>
27 #include <linux/suspend.h>
28 #include <linux/ratelimit.h>
29 #include <linux/oom.h>
30 #include <linux/topology.h>
31 #include <linux/sysctl.h>
32 #include <linux/cpu.h>
33 #include <linux/cpuset.h>
34 #include <linux/folio_batch.h>
35 #include <linux/memory_hotplug.h>
36 #include <linux/nodemask.h>
37 #include <linux/vmstat.h>
38 #include <linux/fault-inject.h>
39 #include <linux/compaction.h>
40 #include <trace/events/kmem.h>
41 #include <trace/events/oom.h>
42 #include <linux/prefetch.h>
43 #include <linux/mm_inline.h>
44 #include <linux/mmu_notifier.h>
45 #include <linux/migrate.h>
46 #include <linux/sched/mm.h>
47 #include <linux/page_owner.h>
48 #include <linux/page_table_check.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/lockdep.h>
52 #include <linux/psi.h>
53 #include <linux/khugepaged.h>
54 #include <linux/delayacct.h>
55 #include <linux/cacheinfo.h>
56 #include <linux/pgalloc_tag.h>
57 #include <asm/div64.h>
58 #include "internal.h"
59 #include "shuffle.h"
60 #include "page_reporting.h"
61 
62 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
63 typedef int __bitwise fpi_t;
64 
65 /* No special request */
66 #define FPI_NONE		((__force fpi_t)0)
67 
68 /*
69  * Skip free page reporting notification for the (possibly merged) page.
70  * This does not hinder free page reporting from grabbing the page,
71  * reporting it and marking it "reported" -  it only skips notifying
72  * the free page reporting infrastructure about a newly freed page. For
73  * example, used when temporarily pulling a page from a freelist and
74  * putting it back unmodified.
75  */
76 #define FPI_SKIP_REPORT_NOTIFY	((__force fpi_t)BIT(0))
77 
78 /*
79  * Place the (possibly merged) page to the tail of the freelist. Will ignore
80  * page shuffling (relevant code - e.g., memory onlining - is expected to
81  * shuffle the whole zone).
82  *
83  * Note: No code should rely on this flag for correctness - it's purely
84  *       to allow for optimizations when handing back either fresh pages
85  *       (memory onlining) or untouched pages (page isolation, free page
86  *       reporting).
87  */
88 #define FPI_TO_TAIL		((__force fpi_t)BIT(1))
89 
90 /* Free the page without taking locks. Rely on trylock only. */
91 #define FPI_TRYLOCK		((__force fpi_t)BIT(2))
92 
93 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
94 static DEFINE_MUTEX(pcp_batch_high_lock);
95 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
96 
97 /*
98  * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
99  * a migration causing the wrong PCP to be locked and remote memory being
100  * potentially allocated, pin the task to the CPU for the lookup+lock.
101  * preempt_disable is used on !RT because it is faster than migrate_disable.
102  * migrate_disable is used on RT because otherwise RT spinlock usage is
103  * interfered with and a high priority task cannot preempt the allocator.
104  */
105 #ifndef CONFIG_PREEMPT_RT
106 #define pcpu_task_pin()		preempt_disable()
107 #define pcpu_task_unpin()	preempt_enable()
108 #else
109 #define pcpu_task_pin()		migrate_disable()
110 #define pcpu_task_unpin()	migrate_enable()
111 #endif
112 
113 /*
114  * A helper to lookup and trylock pcp with embedded spinlock.
115  * The return value should be used with the unlock helper.
116  * NULL return value means the trylock failed.
117  */
118 #ifdef CONFIG_SMP
119 #define pcp_spin_trylock(ptr)						\
120 ({									\
121 	struct per_cpu_pages *_ret;					\
122 	pcpu_task_pin();						\
123 	_ret = this_cpu_ptr(ptr);					\
124 	if (!spin_trylock(&_ret->lock)) {				\
125 		pcpu_task_unpin();					\
126 		_ret = NULL;						\
127 	}								\
128 	_ret;								\
129 })
130 
131 #define pcp_spin_unlock(ptr)						\
132 ({									\
133 	spin_unlock(&ptr->lock);					\
134 	pcpu_task_unpin();						\
135 })
136 
137 /*
138  * On CONFIG_SMP=n the UP implementation of spin_trylock() never fails and thus
139  * is not compatible with our locking scheme. However we do not need pcp for
140  * scalability in the first place, so just make all the trylocks fail and take
141  * the slow path unconditionally.
142  */
143 #else
144 #define pcp_spin_trylock(ptr)		\
145 		NULL
146 
147 #define pcp_spin_unlock(ptr)		\
148 		BUG_ON(1)
149 #endif
150 
151 /*
152  * In some cases we do not need to pin the task to the CPU because we are
153  * already given a specific cpu's pcp pointer.
154  */
155 #define pcp_spin_lock_nopin(ptr)			\
156 		spin_lock(&(ptr)->lock)
157 #define pcp_spin_unlock_nopin(ptr)			\
158 		spin_unlock(&(ptr)->lock)
159 
160 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
161 DEFINE_PER_CPU(int, numa_node);
162 EXPORT_PER_CPU_SYMBOL(numa_node);
163 #endif
164 
165 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
166 
167 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
168 /*
169  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
170  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
171  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
172  * defined in <linux/topology.h>.
173  */
174 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
175 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
176 #endif
177 
178 static DEFINE_MUTEX(pcpu_drain_mutex);
179 
180 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
181 volatile unsigned long latent_entropy __latent_entropy;
182 EXPORT_SYMBOL(latent_entropy);
183 #endif
184 
185 /*
186  * Array of node states.
187  */
188 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
189 	[N_POSSIBLE] = NODE_MASK_ALL,
190 	[N_ONLINE] = { { [0] = 1UL } },
191 #ifndef CONFIG_NUMA
192 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
193 #ifdef CONFIG_HIGHMEM
194 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
195 #endif
196 	[N_MEMORY] = { { [0] = 1UL } },
197 	[N_CPU] = { { [0] = 1UL } },
198 #endif	/* NUMA */
199 };
200 EXPORT_SYMBOL(node_states);
201 
202 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
203 
204 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
205 unsigned int pageblock_order __read_mostly;
206 #endif
207 
208 static void __free_pages_ok(struct page *page, unsigned int order,
209 			    fpi_t fpi_flags);
210 static void reserve_highatomic_pageblock(struct page *page, int order,
211 					 struct zone *zone);
212 
213 /*
214  * results with 256, 32 in the lowmem_reserve sysctl:
215  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
216  *	1G machine -> (16M dma, 784M normal, 224M high)
217  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
218  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
219  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
220  *
221  * TBD: should special case ZONE_DMA32 machines here - in those we normally
222  * don't need any ZONE_NORMAL reservation
223  */
224 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
225 #ifdef CONFIG_ZONE_DMA
226 	[ZONE_DMA] = 256,
227 #endif
228 #ifdef CONFIG_ZONE_DMA32
229 	[ZONE_DMA32] = 256,
230 #endif
231 	[ZONE_NORMAL] = 32,
232 #ifdef CONFIG_HIGHMEM
233 	[ZONE_HIGHMEM] = 0,
234 #endif
235 	[ZONE_MOVABLE] = 0,
236 };
237 
238 char * const zone_names[MAX_NR_ZONES] = {
239 #ifdef CONFIG_ZONE_DMA
240 	 "DMA",
241 #endif
242 #ifdef CONFIG_ZONE_DMA32
243 	 "DMA32",
244 #endif
245 	 "Normal",
246 #ifdef CONFIG_HIGHMEM
247 	 "HighMem",
248 #endif
249 	 "Movable",
250 #ifdef CONFIG_ZONE_DEVICE
251 	 "Device",
252 #endif
253 };
254 
255 const char * const migratetype_names[MIGRATE_TYPES] = {
256 	"Unmovable",
257 	"Movable",
258 	"Reclaimable",
259 	"HighAtomic",
260 #ifdef CONFIG_CMA
261 	"CMA",
262 #endif
263 #ifdef CONFIG_MEMORY_ISOLATION
264 	"Isolate",
265 #endif
266 };
267 
268 int min_free_kbytes = 1024;
269 int user_min_free_kbytes = -1;
270 static int watermark_boost_factor __read_mostly = 15000;
271 static int watermark_scale_factor = 10;
272 int defrag_mode;
273 
274 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
275 int movable_zone;
276 EXPORT_SYMBOL(movable_zone);
277 
278 #if MAX_NUMNODES > 1
279 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
280 unsigned int nr_online_nodes __read_mostly = 1;
281 EXPORT_SYMBOL(nr_node_ids);
282 EXPORT_SYMBOL(nr_online_nodes);
283 #endif
284 
285 static bool page_contains_unaccepted(struct page *page, unsigned int order);
286 static bool cond_accept_memory(struct zone *zone, unsigned int order,
287 			       int alloc_flags);
288 static bool __free_unaccepted(struct page *page);
289 
290 int page_group_by_mobility_disabled __read_mostly;
291 
292 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
293 /*
294  * During boot we initialize deferred pages on-demand, as needed, but once
295  * page_alloc_init_late() has finished, the deferred pages are all initialized,
296  * and we can permanently disable that path.
297  */
298 DEFINE_STATIC_KEY_TRUE(deferred_pages);
299 
deferred_pages_enabled(void)300 static inline bool deferred_pages_enabled(void)
301 {
302 	return static_branch_unlikely(&deferred_pages);
303 }
304 
305 /*
306  * deferred_grow_zone() is __init, but it is called from
307  * get_page_from_freelist() during early boot until deferred_pages permanently
308  * disables this call. This is why we have refdata wrapper to avoid warning,
309  * and to ensure that the function body gets unloaded.
310  */
311 static bool __ref
_deferred_grow_zone(struct zone * zone,unsigned int order)312 _deferred_grow_zone(struct zone *zone, unsigned int order)
313 {
314 	return deferred_grow_zone(zone, order);
315 }
316 #else
deferred_pages_enabled(void)317 static inline bool deferred_pages_enabled(void)
318 {
319 	return false;
320 }
321 
_deferred_grow_zone(struct zone * zone,unsigned int order)322 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
323 {
324 	return false;
325 }
326 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
327 
328 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(const struct page * page,unsigned long pfn)329 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
330 							unsigned long pfn)
331 {
332 #ifdef CONFIG_SPARSEMEM
333 	return section_to_usemap(__pfn_to_section(pfn));
334 #else
335 	return page_zone(page)->pageblock_flags;
336 #endif /* CONFIG_SPARSEMEM */
337 }
338 
pfn_to_bitidx(const struct page * page,unsigned long pfn)339 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
340 {
341 #ifdef CONFIG_SPARSEMEM
342 	pfn &= (PAGES_PER_SECTION-1);
343 #else
344 	pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
345 #endif /* CONFIG_SPARSEMEM */
346 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
347 }
348 
is_standalone_pb_bit(enum pageblock_bits pb_bit)349 static __always_inline bool is_standalone_pb_bit(enum pageblock_bits pb_bit)
350 {
351 	return pb_bit >= PB_compact_skip && pb_bit < __NR_PAGEBLOCK_BITS;
352 }
353 
354 static __always_inline void
get_pfnblock_bitmap_bitidx(const struct page * page,unsigned long pfn,unsigned long ** bitmap_word,unsigned long * bitidx)355 get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn,
356 			   unsigned long **bitmap_word, unsigned long *bitidx)
357 {
358 	unsigned long *bitmap;
359 	unsigned long word_bitidx;
360 
361 #ifdef CONFIG_MEMORY_ISOLATION
362 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8);
363 #else
364 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
365 #endif
366 	BUILD_BUG_ON(__MIGRATE_TYPE_END > MIGRATETYPE_MASK);
367 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
368 
369 	bitmap = get_pageblock_bitmap(page, pfn);
370 	*bitidx = pfn_to_bitidx(page, pfn);
371 	word_bitidx = *bitidx / BITS_PER_LONG;
372 	*bitidx &= (BITS_PER_LONG - 1);
373 	*bitmap_word = &bitmap[word_bitidx];
374 }
375 
376 
377 /**
378  * __get_pfnblock_flags_mask - Return the requested group of flags for
379  * a pageblock_nr_pages block of pages
380  * @page: The page within the block of interest
381  * @pfn: The target page frame number
382  * @mask: mask of bits that the caller is interested in
383  *
384  * Return: pageblock_bits flags
385  */
__get_pfnblock_flags_mask(const struct page * page,unsigned long pfn,unsigned long mask)386 static unsigned long __get_pfnblock_flags_mask(const struct page *page,
387 					       unsigned long pfn,
388 					       unsigned long mask)
389 {
390 	unsigned long *bitmap_word;
391 	unsigned long bitidx;
392 	unsigned long word;
393 
394 	get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
395 	/*
396 	 * This races, without locks, with set_pfnblock_migratetype(). Ensure
397 	 * a consistent read of the memory array, so that results, even though
398 	 * racy, are not corrupted.
399 	 */
400 	word = READ_ONCE(*bitmap_word);
401 	return (word >> bitidx) & mask;
402 }
403 
404 /**
405  * get_pfnblock_bit - Check if a standalone bit of a pageblock is set
406  * @page: The page within the block of interest
407  * @pfn: The target page frame number
408  * @pb_bit: pageblock bit to check
409  *
410  * Return: true if the bit is set, otherwise false
411  */
get_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)412 bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
413 		      enum pageblock_bits pb_bit)
414 {
415 	unsigned long *bitmap_word;
416 	unsigned long bitidx;
417 
418 	if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
419 		return false;
420 
421 	get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
422 
423 	return test_bit(bitidx + pb_bit, bitmap_word);
424 }
425 
426 /**
427  * get_pfnblock_migratetype - Return the migratetype of a pageblock
428  * @page: The page within the block of interest
429  * @pfn: The target page frame number
430  *
431  * Return: The migratetype of the pageblock
432  *
433  * Use get_pfnblock_migratetype() if caller already has both @page and @pfn
434  * to save a call to page_to_pfn().
435  */
436 __always_inline enum migratetype
get_pfnblock_migratetype(const struct page * page,unsigned long pfn)437 get_pfnblock_migratetype(const struct page *page, unsigned long pfn)
438 {
439 	unsigned long mask = MIGRATETYPE_AND_ISO_MASK;
440 	unsigned long flags;
441 
442 	flags = __get_pfnblock_flags_mask(page, pfn, mask);
443 
444 #ifdef CONFIG_MEMORY_ISOLATION
445 	if (flags & BIT(PB_migrate_isolate))
446 		return MIGRATE_ISOLATE;
447 #endif
448 	return flags & MIGRATETYPE_MASK;
449 }
450 
451 /**
452  * __set_pfnblock_flags_mask - Set the requested group of flags for
453  * a pageblock_nr_pages block of pages
454  * @page: The page within the block of interest
455  * @pfn: The target page frame number
456  * @flags: The flags to set
457  * @mask: mask of bits that the caller is interested in
458  */
__set_pfnblock_flags_mask(struct page * page,unsigned long pfn,unsigned long flags,unsigned long mask)459 static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn,
460 				      unsigned long flags, unsigned long mask)
461 {
462 	unsigned long *bitmap_word;
463 	unsigned long bitidx;
464 	unsigned long word;
465 
466 	get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
467 
468 	mask <<= bitidx;
469 	flags <<= bitidx;
470 
471 	word = READ_ONCE(*bitmap_word);
472 	do {
473 	} while (!try_cmpxchg(bitmap_word, &word, (word & ~mask) | flags));
474 }
475 
476 /**
477  * set_pfnblock_bit - Set a standalone bit of a pageblock
478  * @page: The page within the block of interest
479  * @pfn: The target page frame number
480  * @pb_bit: pageblock bit to set
481  */
set_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)482 void set_pfnblock_bit(const struct page *page, unsigned long pfn,
483 		      enum pageblock_bits pb_bit)
484 {
485 	unsigned long *bitmap_word;
486 	unsigned long bitidx;
487 
488 	if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
489 		return;
490 
491 	get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
492 
493 	set_bit(bitidx + pb_bit, bitmap_word);
494 }
495 
496 /**
497  * clear_pfnblock_bit - Clear a standalone bit of a pageblock
498  * @page: The page within the block of interest
499  * @pfn: The target page frame number
500  * @pb_bit: pageblock bit to clear
501  */
clear_pfnblock_bit(const struct page * page,unsigned long pfn,enum pageblock_bits pb_bit)502 void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
503 			enum pageblock_bits pb_bit)
504 {
505 	unsigned long *bitmap_word;
506 	unsigned long bitidx;
507 
508 	if (WARN_ON_ONCE(!is_standalone_pb_bit(pb_bit)))
509 		return;
510 
511 	get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
512 
513 	clear_bit(bitidx + pb_bit, bitmap_word);
514 }
515 
516 /**
517  * set_pageblock_migratetype - Set the migratetype of a pageblock
518  * @page: The page within the block of interest
519  * @migratetype: migratetype to set
520  */
set_pageblock_migratetype(struct page * page,enum migratetype migratetype)521 static void set_pageblock_migratetype(struct page *page,
522 				      enum migratetype migratetype)
523 {
524 	if (unlikely(page_group_by_mobility_disabled &&
525 		     migratetype < MIGRATE_PCPTYPES))
526 		migratetype = MIGRATE_UNMOVABLE;
527 
528 #ifdef CONFIG_MEMORY_ISOLATION
529 	if (migratetype == MIGRATE_ISOLATE) {
530 		VM_WARN_ONCE(1,
531 			"Use set_pageblock_isolate() for pageblock isolation");
532 		return;
533 	}
534 	VM_WARN_ONCE(get_pageblock_isolate(page),
535 		     "Use clear_pageblock_isolate() to unisolate pageblock");
536 	/* MIGRATETYPE_AND_ISO_MASK clears PB_migrate_isolate if it is set */
537 #endif
538 	__set_pfnblock_flags_mask(page, page_to_pfn(page),
539 				  (unsigned long)migratetype,
540 				  MIGRATETYPE_AND_ISO_MASK);
541 }
542 
init_pageblock_migratetype(struct page * page,enum migratetype migratetype,bool isolate)543 void __meminit init_pageblock_migratetype(struct page *page,
544 					  enum migratetype migratetype,
545 					  bool isolate)
546 {
547 	unsigned long flags;
548 
549 	if (unlikely(page_group_by_mobility_disabled &&
550 		     migratetype < MIGRATE_PCPTYPES))
551 		migratetype = MIGRATE_UNMOVABLE;
552 
553 	flags = migratetype;
554 
555 #ifdef CONFIG_MEMORY_ISOLATION
556 	if (migratetype == MIGRATE_ISOLATE) {
557 		VM_WARN_ONCE(
558 			1,
559 			"Set isolate=true to isolate pageblock with a migratetype");
560 		return;
561 	}
562 	if (isolate)
563 		flags |= BIT(PB_migrate_isolate);
564 #endif
565 	__set_pfnblock_flags_mask(page, page_to_pfn(page), flags,
566 				  MIGRATETYPE_AND_ISO_MASK);
567 }
568 
569 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone * zone,struct page * page)570 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
571 {
572 	int ret;
573 	unsigned seq;
574 	unsigned long pfn = page_to_pfn(page);
575 	unsigned long sp, start_pfn;
576 
577 	do {
578 		seq = zone_span_seqbegin(zone);
579 		start_pfn = zone->zone_start_pfn;
580 		sp = zone->spanned_pages;
581 		ret = !zone_spans_pfn(zone, pfn);
582 	} while (zone_span_seqretry(zone, seq));
583 
584 	if (ret)
585 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
586 			pfn, zone_to_nid(zone), zone->name,
587 			start_pfn, start_pfn + sp);
588 
589 	return ret;
590 }
591 
592 /*
593  * Temporary debugging check for pages not lying within a given zone.
594  */
bad_range(struct zone * zone,struct page * page)595 static bool __maybe_unused bad_range(struct zone *zone, struct page *page)
596 {
597 	if (page_outside_zone_boundaries(zone, page))
598 		return true;
599 	if (zone != page_zone(page))
600 		return true;
601 
602 	return false;
603 }
604 #else
bad_range(struct zone * zone,struct page * page)605 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page)
606 {
607 	return false;
608 }
609 #endif
610 
bad_page(struct page * page,const char * reason)611 static void bad_page(struct page *page, const char *reason)
612 {
613 	static unsigned long resume;
614 	static unsigned long nr_shown;
615 	static unsigned long nr_unshown;
616 
617 	/*
618 	 * Allow a burst of 60 reports, then keep quiet for that minute;
619 	 * or allow a steady drip of one report per second.
620 	 */
621 	if (nr_shown == 60) {
622 		if (time_before(jiffies, resume)) {
623 			nr_unshown++;
624 			goto out;
625 		}
626 		if (nr_unshown) {
627 			pr_alert(
628 			      "BUG: Bad page state: %lu messages suppressed\n",
629 				nr_unshown);
630 			nr_unshown = 0;
631 		}
632 		nr_shown = 0;
633 	}
634 	if (nr_shown++ == 0)
635 		resume = jiffies + 60 * HZ;
636 
637 	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
638 		current->comm, page_to_pfn(page));
639 	dump_page(page, reason);
640 
641 	print_modules();
642 	dump_stack();
643 out:
644 	/* Leave bad fields for debug, except PageBuddy could make trouble */
645 	if (PageBuddy(page))
646 		__ClearPageBuddy(page);
647 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
648 }
649 
order_to_pindex(int migratetype,int order)650 static inline unsigned int order_to_pindex(int migratetype, int order)
651 {
652 
653 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
654 	bool movable;
655 	if (order > PAGE_ALLOC_COSTLY_ORDER) {
656 		VM_BUG_ON(!is_pmd_order(order));
657 
658 		movable = migratetype == MIGRATE_MOVABLE;
659 
660 		return NR_LOWORDER_PCP_LISTS + movable;
661 	}
662 #else
663 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
664 #endif
665 
666 	return (MIGRATE_PCPTYPES * order) + migratetype;
667 }
668 
pindex_to_order(unsigned int pindex)669 static inline int pindex_to_order(unsigned int pindex)
670 {
671 	int order = pindex / MIGRATE_PCPTYPES;
672 
673 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
674 	if (pindex >= NR_LOWORDER_PCP_LISTS)
675 		order = HPAGE_PMD_ORDER;
676 #else
677 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
678 #endif
679 
680 	return order;
681 }
682 
pcp_allowed_order(unsigned int order)683 static inline bool pcp_allowed_order(unsigned int order)
684 {
685 	if (order <= PAGE_ALLOC_COSTLY_ORDER)
686 		return true;
687 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
688 	if (is_pmd_order(order))
689 		return true;
690 #endif
691 	return false;
692 }
693 
694 /*
695  * Higher-order pages are called "compound pages".  They are structured thusly:
696  *
697  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
698  *
699  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
700  * in bit 0 of page->compound_info. The rest of bits is pointer to head page.
701  *
702  * The first tail page's ->compound_order holds the order of allocation.
703  * This usage means that zero-order pages may not be compound.
704  */
705 
prep_compound_page(struct page * page,unsigned int order)706 void prep_compound_page(struct page *page, unsigned int order)
707 {
708 	int i;
709 	int nr_pages = 1 << order;
710 
711 	__SetPageHead(page);
712 	for (i = 1; i < nr_pages; i++)
713 		prep_compound_tail(page + i, page, order);
714 
715 	prep_compound_head(page, order);
716 }
717 
set_buddy_order(struct page * page,unsigned int order)718 static inline void set_buddy_order(struct page *page, unsigned int order)
719 {
720 	set_page_private(page, order);
721 	__SetPageBuddy(page);
722 }
723 
724 #ifdef CONFIG_COMPACTION
task_capc(struct zone * zone)725 static inline struct capture_control *task_capc(struct zone *zone)
726 {
727 	struct capture_control *capc = current->capture_control;
728 
729 	return unlikely(capc) &&
730 		!(current->flags & PF_KTHREAD) &&
731 		!capc->page &&
732 		capc->cc->zone == zone ? capc : NULL;
733 }
734 
735 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)736 compaction_capture(struct capture_control *capc, struct page *page,
737 		   int order, int migratetype)
738 {
739 	if (!capc || order != capc->cc->order)
740 		return false;
741 
742 	/* Do not accidentally pollute CMA or isolated regions*/
743 	if (is_migrate_cma(migratetype) ||
744 	    is_migrate_isolate(migratetype))
745 		return false;
746 
747 	/*
748 	 * Do not let lower order allocations pollute a movable pageblock
749 	 * unless compaction is also requesting movable pages.
750 	 * This might let an unmovable request use a reclaimable pageblock
751 	 * and vice-versa but no more than normal fallback logic which can
752 	 * have trouble finding a high-order free page.
753 	 */
754 	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE &&
755 	    capc->cc->migratetype != MIGRATE_MOVABLE)
756 		return false;
757 
758 	if (migratetype != capc->cc->migratetype)
759 		trace_mm_page_alloc_extfrag(page, capc->cc->order, order,
760 					    capc->cc->migratetype, migratetype);
761 
762 	capc->page = page;
763 	return true;
764 }
765 
766 #else
task_capc(struct zone * zone)767 static inline struct capture_control *task_capc(struct zone *zone)
768 {
769 	return NULL;
770 }
771 
772 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)773 compaction_capture(struct capture_control *capc, struct page *page,
774 		   int order, int migratetype)
775 {
776 	return false;
777 }
778 #endif /* CONFIG_COMPACTION */
779 
account_freepages(struct zone * zone,int nr_pages,int migratetype)780 static inline void account_freepages(struct zone *zone, int nr_pages,
781 				     int migratetype)
782 {
783 	lockdep_assert_held(&zone->lock);
784 
785 	if (is_migrate_isolate(migratetype))
786 		return;
787 
788 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
789 
790 	if (is_migrate_cma(migratetype))
791 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
792 	else if (migratetype == MIGRATE_HIGHATOMIC)
793 		WRITE_ONCE(zone->nr_free_highatomic,
794 			   zone->nr_free_highatomic + nr_pages);
795 }
796 
797 /* Used for pages not on another list */
__add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail)798 static inline void __add_to_free_list(struct page *page, struct zone *zone,
799 				      unsigned int order, int migratetype,
800 				      bool tail)
801 {
802 	struct free_area *area = &zone->free_area[order];
803 	int nr_pages = 1 << order;
804 
805 	VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
806 		     "page type is %d, passed migratetype is %d (nr=%d)\n",
807 		     get_pageblock_migratetype(page), migratetype, nr_pages);
808 
809 	if (tail)
810 		list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
811 	else
812 		list_add(&page->buddy_list, &area->free_list[migratetype]);
813 	area->nr_free++;
814 
815 	if (order >= pageblock_order && !is_migrate_isolate(migratetype))
816 		__mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
817 }
818 
819 /*
820  * Used for pages which are on another list. Move the pages to the tail
821  * of the list - so the moved pages won't immediately be considered for
822  * allocation again (e.g., optimization for memory onlining).
823  */
move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int old_mt,int new_mt)824 static inline void move_to_free_list(struct page *page, struct zone *zone,
825 				     unsigned int order, int old_mt, int new_mt)
826 {
827 	struct free_area *area = &zone->free_area[order];
828 	int nr_pages = 1 << order;
829 
830 	/* Free page moving can fail, so it happens before the type update */
831 	VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt,
832 		     "page type is %d, passed migratetype is %d (nr=%d)\n",
833 		     get_pageblock_migratetype(page), old_mt, nr_pages);
834 
835 	list_move_tail(&page->buddy_list, &area->free_list[new_mt]);
836 
837 	account_freepages(zone, -nr_pages, old_mt);
838 	account_freepages(zone, nr_pages, new_mt);
839 
840 	if (order >= pageblock_order &&
841 	    is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) {
842 		if (!is_migrate_isolate(old_mt))
843 			nr_pages = -nr_pages;
844 		__mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
845 	}
846 }
847 
__del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)848 static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
849 					     unsigned int order, int migratetype)
850 {
851 	int nr_pages = 1 << order;
852 
853         VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
854 		     "page type is %d, passed migratetype is %d (nr=%d)\n",
855 		     get_pageblock_migratetype(page), migratetype, nr_pages);
856 
857 	/* clear reported state and update reported page count */
858 	if (page_reported(page))
859 		__ClearPageReported(page);
860 
861 	list_del(&page->buddy_list);
862 	__ClearPageBuddy(page);
863 	set_page_private(page, 0);
864 	zone->free_area[order].nr_free--;
865 
866 	if (order >= pageblock_order && !is_migrate_isolate(migratetype))
867 		__mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages);
868 }
869 
del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)870 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
871 					   unsigned int order, int migratetype)
872 {
873 	__del_page_from_free_list(page, zone, order, migratetype);
874 	account_freepages(zone, -(1 << order), migratetype);
875 }
876 
get_page_from_free_area(struct free_area * area,int migratetype)877 static inline struct page *get_page_from_free_area(struct free_area *area,
878 					    int migratetype)
879 {
880 	return list_first_entry_or_null(&area->free_list[migratetype],
881 					struct page, buddy_list);
882 }
883 
884 /*
885  * If this is less than the 2nd largest possible page, check if the buddy
886  * of the next-higher order is free. If it is, it's possible
887  * that pages are being freed that will coalesce soon. In case,
888  * that is happening, add the free page to the tail of the list
889  * so it's less likely to be used soon and more likely to be merged
890  * as a 2-level higher order page
891  */
892 static inline bool
buddy_merge_likely(unsigned long pfn,unsigned long buddy_pfn,struct page * page,unsigned int order)893 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
894 		   struct page *page, unsigned int order)
895 {
896 	unsigned long higher_page_pfn;
897 	struct page *higher_page;
898 
899 	if (order >= MAX_PAGE_ORDER - 1)
900 		return false;
901 
902 	higher_page_pfn = buddy_pfn & pfn;
903 	higher_page = page + (higher_page_pfn - pfn);
904 
905 	return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
906 			NULL) != NULL;
907 }
908 
change_pageblock_range(struct page * pageblock_page,int start_order,int migratetype)909 static void change_pageblock_range(struct page *pageblock_page,
910 				   int start_order, int migratetype)
911 {
912 	int nr_pageblocks = 1 << (start_order - pageblock_order);
913 
914 	while (nr_pageblocks--) {
915 		set_pageblock_migratetype(pageblock_page, migratetype);
916 		pageblock_page += pageblock_nr_pages;
917 	}
918 }
919 
920 /*
921  * Freeing function for a buddy system allocator.
922  *
923  * The concept of a buddy system is to maintain direct-mapped table
924  * (containing bit values) for memory blocks of various "orders".
925  * The bottom level table contains the map for the smallest allocatable
926  * units of memory (here, pages), and each level above it describes
927  * pairs of units from the levels below, hence, "buddies".
928  * At a high level, all that happens here is marking the table entry
929  * at the bottom level available, and propagating the changes upward
930  * as necessary, plus some accounting needed to play nicely with other
931  * parts of the VM system.
932  * At each level, we keep a list of pages, which are heads of continuous
933  * free pages of length of (1 << order) and marked with PageBuddy.
934  * Page's order is recorded in page_private(page) field.
935  * So when we are allocating or freeing one, we can derive the state of the
936  * other.  That is, if we allocate a small block, and both were
937  * free, the remainder of the region must be split into blocks.
938  * If a block is freed, and its buddy is also free, then this
939  * triggers coalescing into a block of larger size.
940  *
941  * -- nyc
942  */
943 
__free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags)944 static inline void __free_one_page(struct page *page,
945 		unsigned long pfn,
946 		struct zone *zone, unsigned int order,
947 		int migratetype, fpi_t fpi_flags)
948 {
949 	struct capture_control *capc = task_capc(zone);
950 	unsigned long buddy_pfn = 0;
951 	unsigned long combined_pfn;
952 	struct page *buddy;
953 	bool to_tail;
954 
955 	VM_BUG_ON(!zone_is_initialized(zone));
956 	VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page);
957 
958 	VM_BUG_ON(migratetype == -1);
959 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
960 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
961 
962 	account_freepages(zone, 1 << order, migratetype);
963 
964 	while (order < MAX_PAGE_ORDER) {
965 		int buddy_mt = migratetype;
966 
967 		if (compaction_capture(capc, page, order, migratetype)) {
968 			account_freepages(zone, -(1 << order), migratetype);
969 			return;
970 		}
971 
972 		buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
973 		if (!buddy)
974 			goto done_merging;
975 
976 		if (unlikely(order >= pageblock_order)) {
977 			/*
978 			 * We want to prevent merge between freepages on pageblock
979 			 * without fallbacks and normal pageblock. Without this,
980 			 * pageblock isolation could cause incorrect freepage or CMA
981 			 * accounting or HIGHATOMIC accounting.
982 			 */
983 			buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
984 
985 			if (migratetype != buddy_mt &&
986 			    (!migratetype_is_mergeable(migratetype) ||
987 			     !migratetype_is_mergeable(buddy_mt)))
988 				goto done_merging;
989 		}
990 
991 		/*
992 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
993 		 * merge with it and move up one order.
994 		 */
995 		if (page_is_guard(buddy))
996 			clear_page_guard(zone, buddy, order);
997 		else
998 			__del_page_from_free_list(buddy, zone, order, buddy_mt);
999 
1000 		if (unlikely(buddy_mt != migratetype)) {
1001 			/*
1002 			 * Match buddy type. This ensures that an
1003 			 * expand() down the line puts the sub-blocks
1004 			 * on the right freelists.
1005 			 */
1006 			change_pageblock_range(buddy, order, migratetype);
1007 		}
1008 
1009 		combined_pfn = buddy_pfn & pfn;
1010 		page = page + (combined_pfn - pfn);
1011 		pfn = combined_pfn;
1012 		order++;
1013 	}
1014 
1015 done_merging:
1016 	set_buddy_order(page, order);
1017 
1018 	if (fpi_flags & FPI_TO_TAIL)
1019 		to_tail = true;
1020 	else if (is_shuffle_order(order))
1021 		to_tail = shuffle_pick_tail();
1022 	else
1023 		to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1024 
1025 	__add_to_free_list(page, zone, order, migratetype, to_tail);
1026 
1027 	/* Notify page reporting subsystem of freed page */
1028 	if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1029 		page_reporting_notify_free(order);
1030 }
1031 
1032 /*
1033  * A bad page could be due to a number of fields. Instead of multiple branches,
1034  * try and check multiple fields with one check. The caller must do a detailed
1035  * check if necessary.
1036  */
page_expected_state(struct page * page,unsigned long check_flags)1037 static inline bool page_expected_state(struct page *page,
1038 					unsigned long check_flags)
1039 {
1040 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1041 		return false;
1042 
1043 	if (unlikely((unsigned long)page->mapping |
1044 			page_ref_count(page) |
1045 #ifdef CONFIG_MEMCG
1046 			page->memcg_data |
1047 #endif
1048 			(page->flags.f & check_flags)))
1049 		return false;
1050 
1051 	return true;
1052 }
1053 
page_bad_reason(struct page * page,unsigned long flags)1054 static const char *page_bad_reason(struct page *page, unsigned long flags)
1055 {
1056 	const char *bad_reason = NULL;
1057 
1058 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1059 		bad_reason = "nonzero mapcount";
1060 	if (unlikely(page->mapping != NULL))
1061 		bad_reason = "non-NULL mapping";
1062 	if (unlikely(page_ref_count(page) != 0))
1063 		bad_reason = "nonzero _refcount";
1064 	if (unlikely(page->flags.f & flags)) {
1065 		if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1066 			bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1067 		else
1068 			bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1069 	}
1070 #ifdef CONFIG_MEMCG
1071 	if (unlikely(page->memcg_data))
1072 		bad_reason = "page still charged to cgroup";
1073 #endif
1074 	return bad_reason;
1075 }
1076 
free_page_is_bad(struct page * page)1077 static inline bool free_page_is_bad(struct page *page)
1078 {
1079 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1080 		return false;
1081 
1082 	/* Something has gone sideways, find it */
1083 	bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1084 	return true;
1085 }
1086 
is_check_pages_enabled(void)1087 static inline bool is_check_pages_enabled(void)
1088 {
1089 	return static_branch_unlikely(&check_pages_enabled);
1090 }
1091 
free_tail_page_prepare(struct page * head_page,struct page * page)1092 static int free_tail_page_prepare(struct page *head_page, struct page *page)
1093 {
1094 	struct folio *folio = (struct folio *)head_page;
1095 	int ret = 1;
1096 
1097 	/*
1098 	 * We rely page->lru.next never has bit 0 set, unless the page
1099 	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1100 	 */
1101 	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1102 
1103 	if (!is_check_pages_enabled()) {
1104 		ret = 0;
1105 		goto out;
1106 	}
1107 	switch (page - head_page) {
1108 	case 1:
1109 		/* the first tail page: these may be in place of ->mapping */
1110 		if (unlikely(folio_large_mapcount(folio))) {
1111 			bad_page(page, "nonzero large_mapcount");
1112 			goto out;
1113 		}
1114 		if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) &&
1115 		    unlikely(atomic_read(&folio->_nr_pages_mapped))) {
1116 			bad_page(page, "nonzero nr_pages_mapped");
1117 			goto out;
1118 		}
1119 		if (IS_ENABLED(CONFIG_MM_ID)) {
1120 			if (unlikely(folio->_mm_id_mapcount[0] != -1)) {
1121 				bad_page(page, "nonzero mm mapcount 0");
1122 				goto out;
1123 			}
1124 			if (unlikely(folio->_mm_id_mapcount[1] != -1)) {
1125 				bad_page(page, "nonzero mm mapcount 1");
1126 				goto out;
1127 			}
1128 		}
1129 		if (IS_ENABLED(CONFIG_64BIT)) {
1130 			if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
1131 				bad_page(page, "nonzero entire_mapcount");
1132 				goto out;
1133 			}
1134 			if (unlikely(atomic_read(&folio->_pincount))) {
1135 				bad_page(page, "nonzero pincount");
1136 				goto out;
1137 			}
1138 		}
1139 		break;
1140 	case 2:
1141 		/* the second tail page: deferred_list overlaps ->mapping */
1142 		if (unlikely(!list_empty(&folio->_deferred_list))) {
1143 			bad_page(page, "on deferred list");
1144 			goto out;
1145 		}
1146 		if (!IS_ENABLED(CONFIG_64BIT)) {
1147 			if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
1148 				bad_page(page, "nonzero entire_mapcount");
1149 				goto out;
1150 			}
1151 			if (unlikely(atomic_read(&folio->_pincount))) {
1152 				bad_page(page, "nonzero pincount");
1153 				goto out;
1154 			}
1155 		}
1156 		break;
1157 	case 3:
1158 		/* the third tail page: hugetlb specifics overlap ->mappings */
1159 		if (IS_ENABLED(CONFIG_HUGETLB_PAGE))
1160 			break;
1161 		fallthrough;
1162 	default:
1163 		if (page->mapping != TAIL_MAPPING) {
1164 			bad_page(page, "corrupted mapping in tail page");
1165 			goto out;
1166 		}
1167 		break;
1168 	}
1169 	if (unlikely(!PageTail(page))) {
1170 		bad_page(page, "PageTail not set");
1171 		goto out;
1172 	}
1173 	if (unlikely(compound_head(page) != head_page)) {
1174 		bad_page(page, "compound_head not consistent");
1175 		goto out;
1176 	}
1177 	ret = 0;
1178 out:
1179 	page->mapping = NULL;
1180 	clear_compound_head(page);
1181 	return ret;
1182 }
1183 
1184 /*
1185  * Skip KASAN memory poisoning when either:
1186  *
1187  * 1. For generic KASAN: deferred memory initialization has not yet completed.
1188  *    Tag-based KASAN modes skip pages freed via deferred memory initialization
1189  *    using page tags instead (see below).
1190  * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1191  *    that error detection is disabled for accesses via the page address.
1192  *
1193  * Pages will have match-all tags in the following circumstances:
1194  *
1195  * 1. Pages are being initialized for the first time, including during deferred
1196  *    memory init; see the call to page_kasan_tag_reset in __init_single_page.
1197  * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the
1198  *    exception of pages unpoisoned by kasan_unpoison_vmalloc.
1199  * 3. The allocation was excluded from being checked due to sampling,
1200  *    see the call to kasan_unpoison_pages.
1201  *
1202  * Poisoning pages during deferred memory init will greatly lengthen the
1203  * process and cause problem in large memory systems as the deferred pages
1204  * initialization is done with interrupt disabled.
1205  *
1206  * Assuming that there will be no reference to those newly initialized
1207  * pages before they are ever allocated, this should have no effect on
1208  * KASAN memory tracking as the poison will be properly inserted at page
1209  * allocation time. The only corner case is when pages are allocated by
1210  * on-demand allocation and then freed again before the deferred pages
1211  * initialization is done, but this is not likely to happen.
1212  */
should_skip_kasan_poison(struct page * page)1213 static inline bool should_skip_kasan_poison(struct page *page)
1214 {
1215 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1216 		return deferred_pages_enabled();
1217 
1218 	return page_kasan_tag(page) == KASAN_TAG_KERNEL;
1219 }
1220 
kernel_init_pages(struct page * page,int numpages)1221 static void kernel_init_pages(struct page *page, int numpages)
1222 {
1223 	int i;
1224 
1225 	/* s390's use of memset() could override KASAN redzones. */
1226 	kasan_disable_current();
1227 	for (i = 0; i < numpages; i++)
1228 		clear_highpage_kasan_tagged(page + i);
1229 	kasan_enable_current();
1230 }
1231 
1232 #ifdef CONFIG_MEM_ALLOC_PROFILING
1233 
1234 /* Should be called only if mem_alloc_profiling_enabled() */
__clear_page_tag_ref(struct page * page)1235 void __clear_page_tag_ref(struct page *page)
1236 {
1237 	union pgtag_ref_handle handle;
1238 	union codetag_ref ref;
1239 
1240 	if (get_page_tag_ref(page, &ref, &handle)) {
1241 		set_codetag_empty(&ref);
1242 		update_page_tag_ref(handle, &ref);
1243 		put_page_tag_ref(handle);
1244 	}
1245 }
1246 
1247 /* Should be called only if mem_alloc_profiling_enabled() */
1248 static noinline
__pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1249 void __pgalloc_tag_add(struct page *page, struct task_struct *task,
1250 		       unsigned int nr)
1251 {
1252 	union pgtag_ref_handle handle;
1253 	union codetag_ref ref;
1254 
1255 	if (get_page_tag_ref(page, &ref, &handle)) {
1256 		alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
1257 		update_page_tag_ref(handle, &ref);
1258 		put_page_tag_ref(handle);
1259 	}
1260 }
1261 
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1262 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1263 				   unsigned int nr)
1264 {
1265 	if (mem_alloc_profiling_enabled())
1266 		__pgalloc_tag_add(page, task, nr);
1267 }
1268 
1269 /* Should be called only if mem_alloc_profiling_enabled() */
1270 static noinline
__pgalloc_tag_sub(struct page * page,unsigned int nr)1271 void __pgalloc_tag_sub(struct page *page, unsigned int nr)
1272 {
1273 	union pgtag_ref_handle handle;
1274 	union codetag_ref ref;
1275 
1276 	if (get_page_tag_ref(page, &ref, &handle)) {
1277 		alloc_tag_sub(&ref, PAGE_SIZE * nr);
1278 		update_page_tag_ref(handle, &ref);
1279 		put_page_tag_ref(handle);
1280 	}
1281 }
1282 
pgalloc_tag_sub(struct page * page,unsigned int nr)1283 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
1284 {
1285 	if (mem_alloc_profiling_enabled())
1286 		__pgalloc_tag_sub(page, nr);
1287 }
1288 
1289 /* When tag is not NULL, assuming mem_alloc_profiling_enabled */
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)1290 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
1291 {
1292 	if (tag)
1293 		this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
1294 }
1295 
1296 #else /* CONFIG_MEM_ALLOC_PROFILING */
1297 
pgalloc_tag_add(struct page * page,struct task_struct * task,unsigned int nr)1298 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
1299 				   unsigned int nr) {}
pgalloc_tag_sub(struct page * page,unsigned int nr)1300 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
pgalloc_tag_sub_pages(struct alloc_tag * tag,unsigned int nr)1301 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
1302 
1303 #endif /* CONFIG_MEM_ALLOC_PROFILING */
1304 
__free_pages_prepare(struct page * page,unsigned int order,fpi_t fpi_flags)1305 __always_inline bool __free_pages_prepare(struct page *page,
1306 					  unsigned int order, fpi_t fpi_flags)
1307 {
1308 	int bad = 0;
1309 	bool skip_kasan_poison = should_skip_kasan_poison(page);
1310 	bool init = want_init_on_free();
1311 	bool compound = PageCompound(page);
1312 	struct folio *folio = page_folio(page);
1313 
1314 	VM_BUG_ON_PAGE(PageTail(page), page);
1315 
1316 	trace_mm_page_free(page, order);
1317 	kmsan_free_page(page, order);
1318 
1319 	if (memcg_kmem_online() && PageMemcgKmem(page))
1320 		__memcg_kmem_uncharge_page(page, order);
1321 
1322 	/*
1323 	 * In rare cases, when truncation or holepunching raced with
1324 	 * munlock after VM_LOCKED was cleared, Mlocked may still be
1325 	 * found set here.  This does not indicate a problem, unless
1326 	 * "unevictable_pgs_cleared" appears worryingly large.
1327 	 */
1328 	if (unlikely(folio_test_mlocked(folio))) {
1329 		long nr_pages = folio_nr_pages(folio);
1330 
1331 		__folio_clear_mlocked(folio);
1332 		zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
1333 		count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
1334 	}
1335 
1336 	if (unlikely(PageHWPoison(page)) && !order) {
1337 		/* Do not let hwpoison pages hit pcplists/buddy */
1338 		reset_page_owner(page, order);
1339 		page_table_check_free(page, order);
1340 		pgalloc_tag_sub(page, 1 << order);
1341 
1342 		/*
1343 		 * The page is isolated and accounted for.
1344 		 * Mark the codetag as empty to avoid accounting error
1345 		 * when the page is freed by unpoison_memory().
1346 		 */
1347 		clear_page_tag_ref(page);
1348 		return false;
1349 	}
1350 
1351 	VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1352 
1353 	/*
1354 	 * Check tail pages before head page information is cleared to
1355 	 * avoid checking PageCompound for order-0 pages.
1356 	 */
1357 	if (unlikely(order)) {
1358 		int i;
1359 
1360 		if (compound) {
1361 			page[1].flags.f &= ~PAGE_FLAGS_SECOND;
1362 #ifdef NR_PAGES_IN_LARGE_FOLIO
1363 			folio->_nr_pages = 0;
1364 #endif
1365 		}
1366 		for (i = 1; i < (1 << order); i++) {
1367 			if (compound)
1368 				bad += free_tail_page_prepare(page, page + i);
1369 			if (is_check_pages_enabled()) {
1370 				if (free_page_is_bad(page + i)) {
1371 					bad++;
1372 					continue;
1373 				}
1374 			}
1375 			(page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
1376 		}
1377 	}
1378 	if (folio_test_anon(folio)) {
1379 		mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
1380 		folio->mapping = NULL;
1381 	}
1382 	if (unlikely(page_has_type(page))) {
1383 		/* networking expects to clear its page type before releasing */
1384 		if (is_check_pages_enabled()) {
1385 			if (unlikely(PageNetpp(page))) {
1386 				bad_page(page, "page_pool leak");
1387 				return false;
1388 			}
1389 		}
1390 		/* Reset the page_type (which overlays _mapcount) */
1391 		page->page_type = UINT_MAX;
1392 	}
1393 
1394 	if (is_check_pages_enabled()) {
1395 		if (free_page_is_bad(page))
1396 			bad++;
1397 		if (bad)
1398 			return false;
1399 	}
1400 
1401 	page_cpupid_reset_last(page);
1402 	page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
1403 	page->private = 0;
1404 	reset_page_owner(page, order);
1405 	page_table_check_free(page, order);
1406 	pgalloc_tag_sub(page, 1 << order);
1407 
1408 	if (!PageHighMem(page) && !(fpi_flags & FPI_TRYLOCK)) {
1409 		debug_check_no_locks_freed(page_address(page),
1410 					   PAGE_SIZE << order);
1411 		debug_check_no_obj_freed(page_address(page),
1412 					   PAGE_SIZE << order);
1413 	}
1414 
1415 	kernel_poison_pages(page, 1 << order);
1416 
1417 	/*
1418 	 * As memory initialization might be integrated into KASAN,
1419 	 * KASAN poisoning and memory initialization code must be
1420 	 * kept together to avoid discrepancies in behavior.
1421 	 *
1422 	 * With hardware tag-based KASAN, memory tags must be set before the
1423 	 * page becomes unavailable via debug_pagealloc or arch_free_page.
1424 	 */
1425 	if (!skip_kasan_poison) {
1426 		kasan_poison_pages(page, order, init);
1427 
1428 		/* Memory is already initialized if KASAN did it internally. */
1429 		if (kasan_has_integrated_init())
1430 			init = false;
1431 	}
1432 	if (init)
1433 		kernel_init_pages(page, 1 << order);
1434 
1435 	/*
1436 	 * arch_free_page() can make the page's contents inaccessible.  s390
1437 	 * does this.  So nothing which can access the page's contents should
1438 	 * happen after this.
1439 	 */
1440 	arch_free_page(page, order);
1441 
1442 	debug_pagealloc_unmap_pages(page, 1 << order);
1443 
1444 	return true;
1445 }
1446 
free_pages_prepare(struct page * page,unsigned int order)1447 bool free_pages_prepare(struct page *page, unsigned int order)
1448 {
1449 	return __free_pages_prepare(page, order, FPI_NONE);
1450 }
1451 
1452 /*
1453  * Frees a number of pages from the PCP lists
1454  * Assumes all pages on list are in same zone.
1455  * count is the number of pages to free.
1456  */
free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp,int pindex)1457 static void free_pcppages_bulk(struct zone *zone, int count,
1458 					struct per_cpu_pages *pcp,
1459 					int pindex)
1460 {
1461 	unsigned long flags;
1462 	unsigned int order;
1463 	struct page *page;
1464 
1465 	/*
1466 	 * Ensure proper count is passed which otherwise would stuck in the
1467 	 * below while (list_empty(list)) loop.
1468 	 */
1469 	count = min(pcp->count, count);
1470 
1471 	/* Ensure requested pindex is drained first. */
1472 	pindex = pindex - 1;
1473 
1474 	spin_lock_irqsave(&zone->lock, flags);
1475 
1476 	while (count > 0) {
1477 		struct list_head *list;
1478 		int nr_pages;
1479 
1480 		/* Remove pages from lists in a round-robin fashion. */
1481 		do {
1482 			if (++pindex > NR_PCP_LISTS - 1)
1483 				pindex = 0;
1484 			list = &pcp->lists[pindex];
1485 		} while (list_empty(list));
1486 
1487 		order = pindex_to_order(pindex);
1488 		nr_pages = 1 << order;
1489 		do {
1490 			unsigned long pfn;
1491 			int mt;
1492 
1493 			page = list_last_entry(list, struct page, pcp_list);
1494 			pfn = page_to_pfn(page);
1495 			mt = get_pfnblock_migratetype(page, pfn);
1496 
1497 			/* must delete to avoid corrupting pcp list */
1498 			list_del(&page->pcp_list);
1499 			count -= nr_pages;
1500 			pcp->count -= nr_pages;
1501 
1502 			__free_one_page(page, pfn, zone, order, mt, FPI_NONE);
1503 			trace_mm_page_pcpu_drain(page, order, mt);
1504 		} while (count > 0 && !list_empty(list));
1505 	}
1506 
1507 	spin_unlock_irqrestore(&zone->lock, flags);
1508 }
1509 
1510 /* Split a multi-block free page into its individual pageblocks. */
split_large_buddy(struct zone * zone,struct page * page,unsigned long pfn,int order,fpi_t fpi)1511 static void split_large_buddy(struct zone *zone, struct page *page,
1512 			      unsigned long pfn, int order, fpi_t fpi)
1513 {
1514 	unsigned long end = pfn + (1 << order);
1515 
1516 	VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order));
1517 	/* Caller removed page from freelist, buddy info cleared! */
1518 	VM_WARN_ON_ONCE(PageBuddy(page));
1519 
1520 	if (order > pageblock_order)
1521 		order = pageblock_order;
1522 
1523 	do {
1524 		int mt = get_pfnblock_migratetype(page, pfn);
1525 
1526 		__free_one_page(page, pfn, zone, order, mt, fpi);
1527 		pfn += 1 << order;
1528 		if (pfn == end)
1529 			break;
1530 		page = pfn_to_page(pfn);
1531 	} while (1);
1532 }
1533 
add_page_to_zone_llist(struct zone * zone,struct page * page,unsigned int order)1534 static void add_page_to_zone_llist(struct zone *zone, struct page *page,
1535 				   unsigned int order)
1536 {
1537 	/* Remember the order */
1538 	page->private = order;
1539 	/* Add the page to the free list */
1540 	llist_add(&page->pcp_llist, &zone->trylock_free_pages);
1541 }
1542 
free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,fpi_t fpi_flags)1543 static void free_one_page(struct zone *zone, struct page *page,
1544 			  unsigned long pfn, unsigned int order,
1545 			  fpi_t fpi_flags)
1546 {
1547 	struct llist_head *llhead;
1548 	unsigned long flags;
1549 
1550 	if (unlikely(fpi_flags & FPI_TRYLOCK)) {
1551 		if (!spin_trylock_irqsave(&zone->lock, flags)) {
1552 			add_page_to_zone_llist(zone, page, order);
1553 			return;
1554 		}
1555 	} else {
1556 		spin_lock_irqsave(&zone->lock, flags);
1557 	}
1558 
1559 	/* The lock succeeded. Process deferred pages. */
1560 	llhead = &zone->trylock_free_pages;
1561 	if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK))) {
1562 		struct llist_node *llnode;
1563 		struct page *p, *tmp;
1564 
1565 		llnode = llist_del_all(llhead);
1566 		llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) {
1567 			unsigned int p_order = p->private;
1568 
1569 			split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags);
1570 			__count_vm_events(PGFREE, 1 << p_order);
1571 		}
1572 	}
1573 	split_large_buddy(zone, page, pfn, order, fpi_flags);
1574 	spin_unlock_irqrestore(&zone->lock, flags);
1575 
1576 	__count_vm_events(PGFREE, 1 << order);
1577 }
1578 
__free_pages_ok(struct page * page,unsigned int order,fpi_t fpi_flags)1579 static void __free_pages_ok(struct page *page, unsigned int order,
1580 			    fpi_t fpi_flags)
1581 {
1582 	unsigned long pfn = page_to_pfn(page);
1583 	struct zone *zone = page_zone(page);
1584 
1585 	if (__free_pages_prepare(page, order, fpi_flags))
1586 		free_one_page(zone, page, pfn, order, fpi_flags);
1587 }
1588 
__free_pages_core(struct page * page,unsigned int order,enum meminit_context context)1589 void __meminit __free_pages_core(struct page *page, unsigned int order,
1590 		enum meminit_context context)
1591 {
1592 	unsigned int nr_pages = 1 << order;
1593 	struct page *p = page;
1594 	unsigned int loop;
1595 
1596 	/*
1597 	 * When initializing the memmap, __init_single_page() sets the refcount
1598 	 * of all pages to 1 ("allocated"/"not free"). We have to set the
1599 	 * refcount of all involved pages to 0.
1600 	 *
1601 	 * Note that hotplugged memory pages are initialized to PageOffline().
1602 	 * Pages freed from memblock might be marked as reserved.
1603 	 */
1604 	if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
1605 	    unlikely(context == MEMINIT_HOTPLUG)) {
1606 		for (loop = 0; loop < nr_pages; loop++, p++) {
1607 			VM_WARN_ON_ONCE(PageReserved(p));
1608 			__ClearPageOffline(p);
1609 			set_page_count(p, 0);
1610 		}
1611 
1612 		adjust_managed_page_count(page, nr_pages);
1613 	} else {
1614 		for (loop = 0; loop < nr_pages; loop++, p++) {
1615 			__ClearPageReserved(p);
1616 			set_page_count(p, 0);
1617 		}
1618 
1619 		/* memblock adjusts totalram_pages() manually. */
1620 		atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1621 	}
1622 
1623 	if (page_contains_unaccepted(page, order)) {
1624 		if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
1625 			return;
1626 
1627 		accept_memory(page_to_phys(page), PAGE_SIZE << order);
1628 	}
1629 
1630 	/*
1631 	 * Bypass PCP and place fresh pages right to the tail, primarily
1632 	 * relevant for memory onlining.
1633 	 */
1634 	__free_pages_ok(page, order, FPI_TO_TAIL);
1635 }
1636 
1637 /*
1638  * Check that the whole (or subset of) a pageblock given by the interval of
1639  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1640  * with the migration of free compaction scanner.
1641  *
1642  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1643  *
1644  * It's possible on some configurations to have a setup like node0 node1 node0
1645  * i.e. it's possible that all pages within a zones range of pages do not
1646  * belong to a single zone. We assume that a border between node0 and node1
1647  * can occur within a single pageblock, but not a node0 node1 node0
1648  * interleaving within a single pageblock. It is therefore sufficient to check
1649  * the first and last page of a pageblock and avoid checking each individual
1650  * page in a pageblock.
1651  *
1652  * Note: the function may return non-NULL struct page even for a page block
1653  * which contains a memory hole (i.e. there is no physical memory for a subset
1654  * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
1655  * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1656  * even though the start pfn is online and valid. This should be safe most of
1657  * the time because struct pages are still initialized via init_unavailable_range()
1658  * and pfn walkers shouldn't touch any physical memory range for which they do
1659  * not recognize any specific metadata in struct pages.
1660  */
__pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)1661 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1662 				     unsigned long end_pfn, struct zone *zone)
1663 {
1664 	struct page *start_page;
1665 	struct page *end_page;
1666 
1667 	/* end_pfn is one past the range we are checking */
1668 	end_pfn--;
1669 
1670 	if (!pfn_valid(end_pfn))
1671 		return NULL;
1672 
1673 	start_page = pfn_to_online_page(start_pfn);
1674 	if (!start_page)
1675 		return NULL;
1676 
1677 	if (page_zone(start_page) != zone)
1678 		return NULL;
1679 
1680 	end_page = pfn_to_page(end_pfn);
1681 
1682 	/* This gives a shorter code than deriving page_zone(end_page) */
1683 	if (page_zone_id(start_page) != page_zone_id(end_page))
1684 		return NULL;
1685 
1686 	return start_page;
1687 }
1688 
1689 /*
1690  * The order of subdivision here is critical for the IO subsystem.
1691  * Please do not alter this order without good reasons and regression
1692  * testing. Specifically, as large blocks of memory are subdivided,
1693  * the order in which smaller blocks are delivered depends on the order
1694  * they're subdivided in this function. This is the primary factor
1695  * influencing the order in which pages are delivered to the IO
1696  * subsystem according to empirical testing, and this is also justified
1697  * by considering the behavior of a buddy system containing a single
1698  * large block of memory acted on by a series of small allocations.
1699  * This behavior is a critical factor in sglist merging's success.
1700  *
1701  * -- nyc
1702  */
expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1703 static inline unsigned int expand(struct zone *zone, struct page *page, int low,
1704 				  int high, int migratetype)
1705 {
1706 	unsigned int size = 1 << high;
1707 	unsigned int nr_added = 0;
1708 
1709 	while (high > low) {
1710 		high--;
1711 		size >>= 1;
1712 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1713 
1714 		/*
1715 		 * Mark as guard pages (or page), that will allow to
1716 		 * merge back to allocator when buddy will be freed.
1717 		 * Corresponding page table entries will not be touched,
1718 		 * pages will stay not present in virtual address space
1719 		 */
1720 		if (set_page_guard(zone, &page[size], high))
1721 			continue;
1722 
1723 		__add_to_free_list(&page[size], zone, high, migratetype, false);
1724 		set_buddy_order(&page[size], high);
1725 		nr_added += size;
1726 	}
1727 
1728 	return nr_added;
1729 }
1730 
page_del_and_expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1731 static __always_inline void page_del_and_expand(struct zone *zone,
1732 						struct page *page, int low,
1733 						int high, int migratetype)
1734 {
1735 	int nr_pages = 1 << high;
1736 
1737 	__del_page_from_free_list(page, zone, high, migratetype);
1738 	nr_pages -= expand(zone, page, low, high, migratetype);
1739 	account_freepages(zone, -nr_pages, migratetype);
1740 }
1741 
check_new_page_bad(struct page * page)1742 static void check_new_page_bad(struct page *page)
1743 {
1744 	if (unlikely(PageHWPoison(page))) {
1745 		/* Don't complain about hwpoisoned pages */
1746 		if (PageBuddy(page))
1747 			__ClearPageBuddy(page);
1748 		return;
1749 	}
1750 
1751 	bad_page(page,
1752 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
1753 }
1754 
1755 /*
1756  * This page is about to be returned from the page allocator
1757  */
check_new_page(struct page * page)1758 static bool check_new_page(struct page *page)
1759 {
1760 	if (likely(page_expected_state(page,
1761 				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1762 		return false;
1763 
1764 	check_new_page_bad(page);
1765 	return true;
1766 }
1767 
check_new_pages(struct page * page,unsigned int order)1768 static inline bool check_new_pages(struct page *page, unsigned int order)
1769 {
1770 	if (is_check_pages_enabled()) {
1771 		for (int i = 0; i < (1 << order); i++) {
1772 			struct page *p = page + i;
1773 
1774 			if (check_new_page(p))
1775 				return true;
1776 		}
1777 	}
1778 
1779 	return false;
1780 }
1781 
should_skip_kasan_unpoison(gfp_t flags)1782 static inline bool should_skip_kasan_unpoison(gfp_t flags)
1783 {
1784 	/* Don't skip if a software KASAN mode is enabled. */
1785 	if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
1786 	    IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1787 		return false;
1788 
1789 	/* Skip, if hardware tag-based KASAN is not enabled. */
1790 	if (!kasan_hw_tags_enabled())
1791 		return true;
1792 
1793 	/*
1794 	 * With hardware tag-based KASAN enabled, skip if this has been
1795 	 * requested via __GFP_SKIP_KASAN.
1796 	 */
1797 	return flags & __GFP_SKIP_KASAN;
1798 }
1799 
should_skip_init(gfp_t flags)1800 static inline bool should_skip_init(gfp_t flags)
1801 {
1802 	/* Don't skip, if hardware tag-based KASAN is not enabled. */
1803 	if (!kasan_hw_tags_enabled())
1804 		return false;
1805 
1806 	/* For hardware tag-based KASAN, skip if requested. */
1807 	return (flags & __GFP_SKIP_ZERO);
1808 }
1809 
post_alloc_hook(struct page * page,unsigned int order,gfp_t gfp_flags)1810 inline void post_alloc_hook(struct page *page, unsigned int order,
1811 				gfp_t gfp_flags)
1812 {
1813 	bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
1814 			!should_skip_init(gfp_flags);
1815 	bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
1816 	int i;
1817 
1818 	set_page_private(page, 0);
1819 
1820 	arch_alloc_page(page, order);
1821 	debug_pagealloc_map_pages(page, 1 << order);
1822 
1823 	/*
1824 	 * Page unpoisoning must happen before memory initialization.
1825 	 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
1826 	 * allocations and the page unpoisoning code will complain.
1827 	 */
1828 	kernel_unpoison_pages(page, 1 << order);
1829 
1830 	/*
1831 	 * As memory initialization might be integrated into KASAN,
1832 	 * KASAN unpoisoning and memory initialization code must be
1833 	 * kept together to avoid discrepancies in behavior.
1834 	 */
1835 
1836 	/*
1837 	 * If memory tags should be zeroed
1838 	 * (which happens only when memory should be initialized as well).
1839 	 */
1840 	if (zero_tags)
1841 		init = !tag_clear_highpages(page, 1 << order);
1842 
1843 	if (!should_skip_kasan_unpoison(gfp_flags) &&
1844 	    kasan_unpoison_pages(page, order, init)) {
1845 		/* Take note that memory was initialized by KASAN. */
1846 		if (kasan_has_integrated_init())
1847 			init = false;
1848 	} else {
1849 		/*
1850 		 * If memory tags have not been set by KASAN, reset the page
1851 		 * tags to ensure page_address() dereferencing does not fault.
1852 		 */
1853 		for (i = 0; i != 1 << order; ++i)
1854 			page_kasan_tag_reset(page + i);
1855 	}
1856 	/* If memory is still not initialized, initialize it now. */
1857 	if (init)
1858 		kernel_init_pages(page, 1 << order);
1859 
1860 	set_page_owner(page, order, gfp_flags);
1861 	page_table_check_alloc(page, order);
1862 	pgalloc_tag_add(page, current, 1 << order);
1863 }
1864 
prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags)1865 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1866 							unsigned int alloc_flags)
1867 {
1868 	post_alloc_hook(page, order, gfp_flags);
1869 
1870 	if (order && (gfp_flags & __GFP_COMP))
1871 		prep_compound_page(page, order);
1872 
1873 	/*
1874 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1875 	 * allocate the page. The expectation is that the caller is taking
1876 	 * steps that will free more memory. The caller should avoid the page
1877 	 * being used for !PFMEMALLOC purposes.
1878 	 */
1879 	if (alloc_flags & ALLOC_NO_WATERMARKS)
1880 		set_page_pfmemalloc(page);
1881 	else
1882 		clear_page_pfmemalloc(page);
1883 }
1884 
1885 /*
1886  * Go through the free lists for the given migratetype and remove
1887  * the smallest available page from the freelists
1888  */
1889 static __always_inline
__rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype)1890 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1891 						int migratetype)
1892 {
1893 	unsigned int current_order;
1894 	struct free_area *area;
1895 	struct page *page;
1896 
1897 	/* Find a page of the appropriate size in the preferred list */
1898 	for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
1899 		area = &(zone->free_area[current_order]);
1900 		page = get_page_from_free_area(area, migratetype);
1901 		if (!page)
1902 			continue;
1903 
1904 		page_del_and_expand(zone, page, order, current_order,
1905 				    migratetype);
1906 		trace_mm_page_alloc_zone_locked(page, order, migratetype,
1907 				pcp_allowed_order(order) &&
1908 				migratetype < MIGRATE_PCPTYPES);
1909 		return page;
1910 	}
1911 
1912 	return NULL;
1913 }
1914 
1915 
1916 /*
1917  * This array describes the order lists are fallen back to when
1918  * the free lists for the desirable migrate type are depleted
1919  *
1920  * The other migratetypes do not have fallbacks.
1921  */
1922 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = {
1923 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE   },
1924 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
1925 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE   },
1926 };
1927 
1928 #ifdef CONFIG_CMA
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1929 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1930 					unsigned int order)
1931 {
1932 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1933 }
1934 #else
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1935 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1936 					unsigned int order) { return NULL; }
1937 #endif
1938 
1939 /*
1940  * Move all free pages of a block to new type's freelist. Caller needs to
1941  * change the block type.
1942  */
__move_freepages_block(struct zone * zone,unsigned long start_pfn,int old_mt,int new_mt)1943 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
1944 				  int old_mt, int new_mt)
1945 {
1946 	struct page *page;
1947 	unsigned long pfn, end_pfn;
1948 	unsigned int order;
1949 	int pages_moved = 0;
1950 
1951 	VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
1952 	end_pfn = pageblock_end_pfn(start_pfn);
1953 
1954 	for (pfn = start_pfn; pfn < end_pfn;) {
1955 		page = pfn_to_page(pfn);
1956 		if (!PageBuddy(page)) {
1957 			pfn++;
1958 			continue;
1959 		}
1960 
1961 		/* Make sure we are not inadvertently changing nodes */
1962 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1963 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1964 
1965 		order = buddy_order(page);
1966 
1967 		move_to_free_list(page, zone, order, old_mt, new_mt);
1968 
1969 		pfn += 1 << order;
1970 		pages_moved += 1 << order;
1971 	}
1972 
1973 	return pages_moved;
1974 }
1975 
prep_move_freepages_block(struct zone * zone,struct page * page,unsigned long * start_pfn,int * num_free,int * num_movable)1976 static bool prep_move_freepages_block(struct zone *zone, struct page *page,
1977 				      unsigned long *start_pfn,
1978 				      int *num_free, int *num_movable)
1979 {
1980 	unsigned long pfn, start, end;
1981 
1982 	pfn = page_to_pfn(page);
1983 	start = pageblock_start_pfn(pfn);
1984 	end = pageblock_end_pfn(pfn);
1985 
1986 	/*
1987 	 * The caller only has the lock for @zone, don't touch ranges
1988 	 * that straddle into other zones. While we could move part of
1989 	 * the range that's inside the zone, this call is usually
1990 	 * accompanied by other operations such as migratetype updates
1991 	 * which also should be locked.
1992 	 */
1993 	if (!zone_spans_pfn(zone, start))
1994 		return false;
1995 	if (!zone_spans_pfn(zone, end - 1))
1996 		return false;
1997 
1998 	*start_pfn = start;
1999 
2000 	if (num_free) {
2001 		*num_free = 0;
2002 		*num_movable = 0;
2003 		for (pfn = start; pfn < end;) {
2004 			page = pfn_to_page(pfn);
2005 			if (PageBuddy(page)) {
2006 				int nr = 1 << buddy_order(page);
2007 
2008 				*num_free += nr;
2009 				pfn += nr;
2010 				continue;
2011 			}
2012 			/*
2013 			 * We assume that pages that could be isolated for
2014 			 * migration are movable. But we don't actually try
2015 			 * isolating, as that would be expensive.
2016 			 */
2017 			if (PageLRU(page) || page_has_movable_ops(page))
2018 				(*num_movable)++;
2019 			pfn++;
2020 		}
2021 	}
2022 
2023 	return true;
2024 }
2025 
move_freepages_block(struct zone * zone,struct page * page,int old_mt,int new_mt)2026 static int move_freepages_block(struct zone *zone, struct page *page,
2027 				int old_mt, int new_mt)
2028 {
2029 	unsigned long start_pfn;
2030 	int res;
2031 
2032 	if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
2033 		return -1;
2034 
2035 	res = __move_freepages_block(zone, start_pfn, old_mt, new_mt);
2036 	set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
2037 
2038 	return res;
2039 
2040 }
2041 
2042 #ifdef CONFIG_MEMORY_ISOLATION
2043 /* Look for a buddy that straddles start_pfn */
find_large_buddy(unsigned long start_pfn)2044 static unsigned long find_large_buddy(unsigned long start_pfn)
2045 {
2046 	/*
2047 	 * If start_pfn is not an order-0 PageBuddy, next PageBuddy containing
2048 	 * start_pfn has minimal order of __ffs(start_pfn) + 1. Start checking
2049 	 * the order with __ffs(start_pfn). If start_pfn is order-0 PageBuddy,
2050 	 * the starting order does not matter.
2051 	 */
2052 	int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER;
2053 	struct page *page;
2054 	unsigned long pfn = start_pfn;
2055 
2056 	while (!PageBuddy(page = pfn_to_page(pfn))) {
2057 		/* Nothing found */
2058 		if (++order > MAX_PAGE_ORDER)
2059 			return start_pfn;
2060 		pfn &= ~0UL << order;
2061 	}
2062 
2063 	/*
2064 	 * Found a preceding buddy, but does it straddle?
2065 	 */
2066 	if (pfn + (1 << buddy_order(page)) > start_pfn)
2067 		return pfn;
2068 
2069 	/* Nothing found */
2070 	return start_pfn;
2071 }
2072 
toggle_pageblock_isolate(struct page * page,bool isolate)2073 static inline void toggle_pageblock_isolate(struct page *page, bool isolate)
2074 {
2075 	if (isolate)
2076 		set_pageblock_isolate(page);
2077 	else
2078 		clear_pageblock_isolate(page);
2079 }
2080 
2081 /**
2082  * __move_freepages_block_isolate - move free pages in block for page isolation
2083  * @zone: the zone
2084  * @page: the pageblock page
2085  * @isolate: to isolate the given pageblock or unisolate it
2086  *
2087  * This is similar to move_freepages_block(), but handles the special
2088  * case encountered in page isolation, where the block of interest
2089  * might be part of a larger buddy spanning multiple pageblocks.
2090  *
2091  * Unlike the regular page allocator path, which moves pages while
2092  * stealing buddies off the freelist, page isolation is interested in
2093  * arbitrary pfn ranges that may have overlapping buddies on both ends.
2094  *
2095  * This function handles that. Straddling buddies are split into
2096  * individual pageblocks. Only the block of interest is moved.
2097  *
2098  * Returns %true if pages could be moved, %false otherwise.
2099  */
__move_freepages_block_isolate(struct zone * zone,struct page * page,bool isolate)2100 static bool __move_freepages_block_isolate(struct zone *zone,
2101 		struct page *page, bool isolate)
2102 {
2103 	unsigned long start_pfn, buddy_pfn;
2104 	int from_mt;
2105 	int to_mt;
2106 	struct page *buddy;
2107 
2108 	if (isolate == get_pageblock_isolate(page)) {
2109 		VM_WARN_ONCE(1, "%s a pageblock that is already in that state",
2110 			     isolate ? "Isolate" : "Unisolate");
2111 		return false;
2112 	}
2113 
2114 	if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
2115 		return false;
2116 
2117 	/* No splits needed if buddies can't span multiple blocks */
2118 	if (pageblock_order == MAX_PAGE_ORDER)
2119 		goto move;
2120 
2121 	buddy_pfn = find_large_buddy(start_pfn);
2122 	buddy = pfn_to_page(buddy_pfn);
2123 	/* We're a part of a larger buddy */
2124 	if (PageBuddy(buddy) && buddy_order(buddy) > pageblock_order) {
2125 		int order = buddy_order(buddy);
2126 
2127 		del_page_from_free_list(buddy, zone, order,
2128 					get_pfnblock_migratetype(buddy, buddy_pfn));
2129 		toggle_pageblock_isolate(page, isolate);
2130 		split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE);
2131 		return true;
2132 	}
2133 
2134 move:
2135 	/* Use MIGRATETYPE_MASK to get non-isolate migratetype */
2136 	if (isolate) {
2137 		from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
2138 						    MIGRATETYPE_MASK);
2139 		to_mt = MIGRATE_ISOLATE;
2140 	} else {
2141 		from_mt = MIGRATE_ISOLATE;
2142 		to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
2143 						  MIGRATETYPE_MASK);
2144 	}
2145 
2146 	__move_freepages_block(zone, start_pfn, from_mt, to_mt);
2147 	toggle_pageblock_isolate(pfn_to_page(start_pfn), isolate);
2148 
2149 	return true;
2150 }
2151 
pageblock_isolate_and_move_free_pages(struct zone * zone,struct page * page)2152 bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
2153 {
2154 	return __move_freepages_block_isolate(zone, page, true);
2155 }
2156 
pageblock_unisolate_and_move_free_pages(struct zone * zone,struct page * page)2157 bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page)
2158 {
2159 	return __move_freepages_block_isolate(zone, page, false);
2160 }
2161 
2162 #endif /* CONFIG_MEMORY_ISOLATION */
2163 
boost_watermark(struct zone * zone)2164 static inline bool boost_watermark(struct zone *zone)
2165 {
2166 	unsigned long max_boost;
2167 
2168 	if (!watermark_boost_factor)
2169 		return false;
2170 	/*
2171 	 * Don't bother in zones that are unlikely to produce results.
2172 	 * On small machines, including kdump capture kernels running
2173 	 * in a small area, boosting the watermark can cause an out of
2174 	 * memory situation immediately.
2175 	 */
2176 	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2177 		return false;
2178 
2179 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2180 			watermark_boost_factor, 10000);
2181 
2182 	/*
2183 	 * high watermark may be uninitialised if fragmentation occurs
2184 	 * very early in boot so do not boost. We do not fall
2185 	 * through and boost by pageblock_nr_pages as failing
2186 	 * allocations that early means that reclaim is not going
2187 	 * to help and it may even be impossible to reclaim the
2188 	 * boosted watermark resulting in a hang.
2189 	 */
2190 	if (!max_boost)
2191 		return false;
2192 
2193 	max_boost = max(pageblock_nr_pages, max_boost);
2194 
2195 	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2196 		max_boost);
2197 
2198 	return true;
2199 }
2200 
2201 /*
2202  * When we are falling back to another migratetype during allocation, should we
2203  * try to claim an entire block to satisfy further allocations, instead of
2204  * polluting multiple pageblocks?
2205  */
should_try_claim_block(unsigned int order,int start_mt)2206 static bool should_try_claim_block(unsigned int order, int start_mt)
2207 {
2208 	/*
2209 	 * Leaving this order check is intended, although there is
2210 	 * relaxed order check in next check. The reason is that
2211 	 * we can actually claim the whole pageblock if this condition met,
2212 	 * but, below check doesn't guarantee it and that is just heuristic
2213 	 * so could be changed anytime.
2214 	 */
2215 	if (order >= pageblock_order)
2216 		return true;
2217 
2218 	/*
2219 	 * Above a certain threshold, always try to claim, as it's likely there
2220 	 * will be more free pages in the pageblock.
2221 	 */
2222 	if (order >= pageblock_order / 2)
2223 		return true;
2224 
2225 	/*
2226 	 * Unmovable/reclaimable allocations would cause permanent
2227 	 * fragmentations if they fell back to allocating from a movable block
2228 	 * (polluting it), so we try to claim the whole block regardless of the
2229 	 * allocation size. Later movable allocations can always steal from this
2230 	 * block, which is less problematic.
2231 	 */
2232 	if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE)
2233 		return true;
2234 
2235 	if (page_group_by_mobility_disabled)
2236 		return true;
2237 
2238 	/*
2239 	 * Movable pages won't cause permanent fragmentation, so when you alloc
2240 	 * small pages, we just need to temporarily steal unmovable or
2241 	 * reclaimable pages that are closest to the request size. After a
2242 	 * while, memory compaction may occur to form large contiguous pages,
2243 	 * and the next movable allocation may not need to steal.
2244 	 */
2245 	return false;
2246 }
2247 
2248 /*
2249  * Check whether there is a suitable fallback freepage with requested order.
2250  * If claimable is true, this function returns fallback_mt only if
2251  * we would do this whole-block claiming. This would help to reduce
2252  * fragmentation due to mixed migratetype pages in one pageblock.
2253  */
find_suitable_fallback(struct free_area * area,unsigned int order,int migratetype,bool claimable)2254 int find_suitable_fallback(struct free_area *area, unsigned int order,
2255 			   int migratetype, bool claimable)
2256 {
2257 	int i;
2258 
2259 	if (claimable && !should_try_claim_block(order, migratetype))
2260 		return -2;
2261 
2262 	if (area->nr_free == 0)
2263 		return -1;
2264 
2265 	for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
2266 		int fallback_mt = fallbacks[migratetype][i];
2267 
2268 		if (!free_area_empty(area, fallback_mt))
2269 			return fallback_mt;
2270 	}
2271 
2272 	return -1;
2273 }
2274 
2275 /*
2276  * This function implements actual block claiming behaviour. If order is large
2277  * enough, we can claim the whole pageblock for the requested migratetype. If
2278  * not, we check the pageblock for constituent pages; if at least half of the
2279  * pages are free or compatible, we can still claim the whole block, so pages
2280  * freed in the future will be put on the correct free list.
2281  */
2282 static struct page *
try_to_claim_block(struct zone * zone,struct page * page,int current_order,int order,int start_type,int block_type,unsigned int alloc_flags)2283 try_to_claim_block(struct zone *zone, struct page *page,
2284 		   int current_order, int order, int start_type,
2285 		   int block_type, unsigned int alloc_flags)
2286 {
2287 	int free_pages, movable_pages, alike_pages;
2288 	unsigned long start_pfn;
2289 
2290 	/* Take ownership for orders >= pageblock_order */
2291 	if (current_order >= pageblock_order) {
2292 		unsigned int nr_added;
2293 
2294 		del_page_from_free_list(page, zone, current_order, block_type);
2295 		change_pageblock_range(page, current_order, start_type);
2296 		nr_added = expand(zone, page, order, current_order, start_type);
2297 		account_freepages(zone, nr_added, start_type);
2298 		return page;
2299 	}
2300 
2301 	/*
2302 	 * Boost watermarks to increase reclaim pressure to reduce the
2303 	 * likelihood of future fallbacks. Wake kswapd now as the node
2304 	 * may be balanced overall and kswapd will not wake naturally.
2305 	 */
2306 	if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2307 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2308 
2309 	/* moving whole block can fail due to zone boundary conditions */
2310 	if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
2311 				       &movable_pages))
2312 		return NULL;
2313 
2314 	/*
2315 	 * Determine how many pages are compatible with our allocation.
2316 	 * For movable allocation, it's the number of movable pages which
2317 	 * we just obtained. For other types it's a bit more tricky.
2318 	 */
2319 	if (start_type == MIGRATE_MOVABLE) {
2320 		alike_pages = movable_pages;
2321 	} else {
2322 		/*
2323 		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2324 		 * to MOVABLE pageblock, consider all non-movable pages as
2325 		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2326 		 * vice versa, be conservative since we can't distinguish the
2327 		 * exact migratetype of non-movable pages.
2328 		 */
2329 		if (block_type == MIGRATE_MOVABLE)
2330 			alike_pages = pageblock_nr_pages
2331 						- (free_pages + movable_pages);
2332 		else
2333 			alike_pages = 0;
2334 	}
2335 	/*
2336 	 * If a sufficient number of pages in the block are either free or of
2337 	 * compatible migratability as our allocation, claim the whole block.
2338 	 */
2339 	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2340 			page_group_by_mobility_disabled) {
2341 		__move_freepages_block(zone, start_pfn, block_type, start_type);
2342 		set_pageblock_migratetype(pfn_to_page(start_pfn), start_type);
2343 		return __rmqueue_smallest(zone, order, start_type);
2344 	}
2345 
2346 	return NULL;
2347 }
2348 
2349 /*
2350  * Try to allocate from some fallback migratetype by claiming the entire block,
2351  * i.e. converting it to the allocation's start migratetype.
2352  *
2353  * The use of signed ints for order and current_order is a deliberate
2354  * deviation from the rest of this file, to make the for loop
2355  * condition simpler.
2356  */
2357 static __always_inline struct page *
__rmqueue_claim(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags)2358 __rmqueue_claim(struct zone *zone, int order, int start_migratetype,
2359 						unsigned int alloc_flags)
2360 {
2361 	struct free_area *area;
2362 	int current_order;
2363 	int min_order = order;
2364 	struct page *page;
2365 	int fallback_mt;
2366 
2367 	/*
2368 	 * Do not steal pages from freelists belonging to other pageblocks
2369 	 * i.e. orders < pageblock_order. If there are no local zones free,
2370 	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2371 	 */
2372 	if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2373 		min_order = pageblock_order;
2374 
2375 	/*
2376 	 * Find the largest available free page in the other list. This roughly
2377 	 * approximates finding the pageblock with the most free pages, which
2378 	 * would be too costly to do exactly.
2379 	 */
2380 	for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
2381 				--current_order) {
2382 		area = &(zone->free_area[current_order]);
2383 		fallback_mt = find_suitable_fallback(area, current_order,
2384 						     start_migratetype, true);
2385 
2386 		/* No block in that order */
2387 		if (fallback_mt == -1)
2388 			continue;
2389 
2390 		/* Advanced into orders too low to claim, abort */
2391 		if (fallback_mt == -2)
2392 			break;
2393 
2394 		page = get_page_from_free_area(area, fallback_mt);
2395 		page = try_to_claim_block(zone, page, current_order, order,
2396 					  start_migratetype, fallback_mt,
2397 					  alloc_flags);
2398 		if (page) {
2399 			trace_mm_page_alloc_extfrag(page, order, current_order,
2400 						    start_migratetype, fallback_mt);
2401 			return page;
2402 		}
2403 	}
2404 
2405 	return NULL;
2406 }
2407 
2408 /*
2409  * Try to steal a single page from some fallback migratetype. Leave the rest of
2410  * the block as its current migratetype, potentially causing fragmentation.
2411  */
2412 static __always_inline struct page *
__rmqueue_steal(struct zone * zone,int order,int start_migratetype)2413 __rmqueue_steal(struct zone *zone, int order, int start_migratetype)
2414 {
2415 	struct free_area *area;
2416 	int current_order;
2417 	struct page *page;
2418 	int fallback_mt;
2419 
2420 	for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
2421 		area = &(zone->free_area[current_order]);
2422 		fallback_mt = find_suitable_fallback(area, current_order,
2423 						     start_migratetype, false);
2424 		if (fallback_mt == -1)
2425 			continue;
2426 
2427 		page = get_page_from_free_area(area, fallback_mt);
2428 		page_del_and_expand(zone, page, order, current_order, fallback_mt);
2429 		trace_mm_page_alloc_extfrag(page, order, current_order,
2430 					    start_migratetype, fallback_mt);
2431 		return page;
2432 	}
2433 
2434 	return NULL;
2435 }
2436 
2437 enum rmqueue_mode {
2438 	RMQUEUE_NORMAL,
2439 	RMQUEUE_CMA,
2440 	RMQUEUE_CLAIM,
2441 	RMQUEUE_STEAL,
2442 };
2443 
2444 /*
2445  * Do the hard work of removing an element from the buddy allocator.
2446  * Call me with the zone->lock already held.
2447  */
2448 static __always_inline struct page *
__rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,enum rmqueue_mode * mode)2449 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2450 	  unsigned int alloc_flags, enum rmqueue_mode *mode)
2451 {
2452 	struct page *page;
2453 
2454 	if (IS_ENABLED(CONFIG_CMA)) {
2455 		/*
2456 		 * Balance movable allocations between regular and CMA areas by
2457 		 * allocating from CMA when over half of the zone's free memory
2458 		 * is in the CMA area.
2459 		 */
2460 		if (alloc_flags & ALLOC_CMA &&
2461 		    zone_page_state(zone, NR_FREE_CMA_PAGES) >
2462 		    zone_page_state(zone, NR_FREE_PAGES) / 2) {
2463 			page = __rmqueue_cma_fallback(zone, order);
2464 			if (page)
2465 				return page;
2466 		}
2467 	}
2468 
2469 	/*
2470 	 * First try the freelists of the requested migratetype, then try
2471 	 * fallbacks modes with increasing levels of fragmentation risk.
2472 	 *
2473 	 * The fallback logic is expensive and rmqueue_bulk() calls in
2474 	 * a loop with the zone->lock held, meaning the freelists are
2475 	 * not subject to any outside changes. Remember in *mode where
2476 	 * we found pay dirt, to save us the search on the next call.
2477 	 */
2478 	switch (*mode) {
2479 	case RMQUEUE_NORMAL:
2480 		page = __rmqueue_smallest(zone, order, migratetype);
2481 		if (page)
2482 			return page;
2483 		fallthrough;
2484 	case RMQUEUE_CMA:
2485 		if (alloc_flags & ALLOC_CMA) {
2486 			page = __rmqueue_cma_fallback(zone, order);
2487 			if (page) {
2488 				*mode = RMQUEUE_CMA;
2489 				return page;
2490 			}
2491 		}
2492 		fallthrough;
2493 	case RMQUEUE_CLAIM:
2494 		page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
2495 		if (page) {
2496 			/* Replenished preferred freelist, back to normal mode. */
2497 			*mode = RMQUEUE_NORMAL;
2498 			return page;
2499 		}
2500 		fallthrough;
2501 	case RMQUEUE_STEAL:
2502 		if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
2503 			page = __rmqueue_steal(zone, order, migratetype);
2504 			if (page) {
2505 				*mode = RMQUEUE_STEAL;
2506 				return page;
2507 			}
2508 		}
2509 	}
2510 	return NULL;
2511 }
2512 
2513 /*
2514  * Obtain a specified number of elements from the buddy allocator, all under
2515  * a single hold of the lock, for efficiency.  Add them to the supplied list.
2516  * Returns the number of new pages which were placed at *list.
2517  */
rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags)2518 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2519 			unsigned long count, struct list_head *list,
2520 			int migratetype, unsigned int alloc_flags)
2521 {
2522 	enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
2523 	unsigned long flags;
2524 	int i;
2525 
2526 	if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2527 		if (!spin_trylock_irqsave(&zone->lock, flags))
2528 			return 0;
2529 	} else {
2530 		spin_lock_irqsave(&zone->lock, flags);
2531 	}
2532 	for (i = 0; i < count; ++i) {
2533 		struct page *page = __rmqueue(zone, order, migratetype,
2534 					      alloc_flags, &rmqm);
2535 		if (unlikely(page == NULL))
2536 			break;
2537 
2538 		/*
2539 		 * Split buddy pages returned by expand() are received here in
2540 		 * physical page order. The page is added to the tail of
2541 		 * caller's list. From the callers perspective, the linked list
2542 		 * is ordered by page number under some conditions. This is
2543 		 * useful for IO devices that can forward direction from the
2544 		 * head, thus also in the physical page order. This is useful
2545 		 * for IO devices that can merge IO requests if the physical
2546 		 * pages are ordered properly.
2547 		 */
2548 		list_add_tail(&page->pcp_list, list);
2549 	}
2550 	spin_unlock_irqrestore(&zone->lock, flags);
2551 
2552 	return i;
2553 }
2554 
2555 /*
2556  * Called from the vmstat counter updater to decay the PCP high.
2557  * Return whether there are addition works to do.
2558  */
decay_pcp_high(struct zone * zone,struct per_cpu_pages * pcp)2559 bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
2560 {
2561 	int high_min, to_drain, to_drain_batched, batch;
2562 	bool todo = false;
2563 
2564 	high_min = READ_ONCE(pcp->high_min);
2565 	batch = READ_ONCE(pcp->batch);
2566 	/*
2567 	 * Decrease pcp->high periodically to try to free possible
2568 	 * idle PCP pages.  And, avoid to free too many pages to
2569 	 * control latency.  This caps pcp->high decrement too.
2570 	 */
2571 	if (pcp->high > high_min) {
2572 		pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2573 				 pcp->high - (pcp->high >> 3), high_min);
2574 		if (pcp->high > high_min)
2575 			todo = true;
2576 	}
2577 
2578 	to_drain = pcp->count - pcp->high;
2579 	while (to_drain > 0) {
2580 		to_drain_batched = min(to_drain, batch);
2581 		pcp_spin_lock_nopin(pcp);
2582 		free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
2583 		pcp_spin_unlock_nopin(pcp);
2584 		todo = true;
2585 
2586 		to_drain -= to_drain_batched;
2587 	}
2588 
2589 	return todo;
2590 }
2591 
2592 #ifdef CONFIG_NUMA
2593 /*
2594  * Called from the vmstat counter updater to drain pagesets of this
2595  * currently executing processor on remote nodes after they have
2596  * expired.
2597  */
drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp)2598 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2599 {
2600 	int to_drain, batch;
2601 
2602 	batch = READ_ONCE(pcp->batch);
2603 	to_drain = min(pcp->count, batch);
2604 	if (to_drain > 0) {
2605 		pcp_spin_lock_nopin(pcp);
2606 		free_pcppages_bulk(zone, to_drain, pcp, 0);
2607 		pcp_spin_unlock_nopin(pcp);
2608 	}
2609 }
2610 #endif
2611 
2612 /*
2613  * Drain pcplists of the indicated processor and zone.
2614  */
drain_pages_zone(unsigned int cpu,struct zone * zone)2615 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2616 {
2617 	struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2618 	int count;
2619 
2620 	do {
2621 		pcp_spin_lock_nopin(pcp);
2622 		count = pcp->count;
2623 		if (count) {
2624 			int to_drain = min(count,
2625 				pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
2626 
2627 			free_pcppages_bulk(zone, to_drain, pcp, 0);
2628 			count -= to_drain;
2629 		}
2630 		pcp_spin_unlock_nopin(pcp);
2631 	} while (count);
2632 }
2633 
2634 /*
2635  * Drain pcplists of all zones on the indicated processor.
2636  */
drain_pages(unsigned int cpu)2637 static void drain_pages(unsigned int cpu)
2638 {
2639 	struct zone *zone;
2640 
2641 	for_each_populated_zone(zone) {
2642 		drain_pages_zone(cpu, zone);
2643 	}
2644 }
2645 
2646 /*
2647  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2648  */
drain_local_pages(struct zone * zone)2649 void drain_local_pages(struct zone *zone)
2650 {
2651 	int cpu = smp_processor_id();
2652 
2653 	if (zone)
2654 		drain_pages_zone(cpu, zone);
2655 	else
2656 		drain_pages(cpu);
2657 }
2658 
2659 /*
2660  * The implementation of drain_all_pages(), exposing an extra parameter to
2661  * drain on all cpus.
2662  *
2663  * drain_all_pages() is optimized to only execute on cpus where pcplists are
2664  * not empty. The check for non-emptiness can however race with a free to
2665  * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2666  * that need the guarantee that every CPU has drained can disable the
2667  * optimizing racy check.
2668  */
__drain_all_pages(struct zone * zone,bool force_all_cpus)2669 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
2670 {
2671 	int cpu;
2672 
2673 	/*
2674 	 * Allocate in the BSS so we won't require allocation in
2675 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2676 	 */
2677 	static cpumask_t cpus_with_pcps;
2678 
2679 	/*
2680 	 * Do not drain if one is already in progress unless it's specific to
2681 	 * a zone. Such callers are primarily CMA and memory hotplug and need
2682 	 * the drain to be complete when the call returns.
2683 	 */
2684 	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2685 		if (!zone)
2686 			return;
2687 		mutex_lock(&pcpu_drain_mutex);
2688 	}
2689 
2690 	/*
2691 	 * We don't care about racing with CPU hotplug event
2692 	 * as offline notification will cause the notified
2693 	 * cpu to drain that CPU pcps and on_each_cpu_mask
2694 	 * disables preemption as part of its processing
2695 	 */
2696 	for_each_online_cpu(cpu) {
2697 		struct per_cpu_pages *pcp;
2698 		struct zone *z;
2699 		bool has_pcps = false;
2700 
2701 		if (force_all_cpus) {
2702 			/*
2703 			 * The pcp.count check is racy, some callers need a
2704 			 * guarantee that no cpu is missed.
2705 			 */
2706 			has_pcps = true;
2707 		} else if (zone) {
2708 			pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2709 			if (pcp->count)
2710 				has_pcps = true;
2711 		} else {
2712 			for_each_populated_zone(z) {
2713 				pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2714 				if (pcp->count) {
2715 					has_pcps = true;
2716 					break;
2717 				}
2718 			}
2719 		}
2720 
2721 		if (has_pcps)
2722 			cpumask_set_cpu(cpu, &cpus_with_pcps);
2723 		else
2724 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
2725 	}
2726 
2727 	for_each_cpu(cpu, &cpus_with_pcps) {
2728 		if (zone)
2729 			drain_pages_zone(cpu, zone);
2730 		else
2731 			drain_pages(cpu);
2732 	}
2733 
2734 	mutex_unlock(&pcpu_drain_mutex);
2735 }
2736 
2737 /*
2738  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2739  *
2740  * When zone parameter is non-NULL, spill just the single zone's pages.
2741  */
drain_all_pages(struct zone * zone)2742 void drain_all_pages(struct zone *zone)
2743 {
2744 	__drain_all_pages(zone, false);
2745 }
2746 
nr_pcp_free(struct per_cpu_pages * pcp,int batch,int high,bool free_high)2747 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high)
2748 {
2749 	int min_nr_free, max_nr_free;
2750 
2751 	/* Free as much as possible if batch freeing high-order pages. */
2752 	if (unlikely(free_high))
2753 		return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX);
2754 
2755 	/* Check for PCP disabled or boot pageset */
2756 	if (unlikely(high < batch))
2757 		return 1;
2758 
2759 	/* Leave at least pcp->batch pages on the list */
2760 	min_nr_free = batch;
2761 	max_nr_free = high - batch;
2762 
2763 	/*
2764 	 * Increase the batch number to the number of the consecutive
2765 	 * freed pages to reduce zone lock contention.
2766 	 */
2767 	batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free);
2768 
2769 	return batch;
2770 }
2771 
nr_pcp_high(struct per_cpu_pages * pcp,struct zone * zone,int batch,bool free_high)2772 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2773 		       int batch, bool free_high)
2774 {
2775 	int high, high_min, high_max;
2776 
2777 	high_min = READ_ONCE(pcp->high_min);
2778 	high_max = READ_ONCE(pcp->high_max);
2779 	high = pcp->high = clamp(pcp->high, high_min, high_max);
2780 
2781 	if (unlikely(!high))
2782 		return 0;
2783 
2784 	if (unlikely(free_high)) {
2785 		pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2786 				high_min);
2787 		return 0;
2788 	}
2789 
2790 	/*
2791 	 * If reclaim is active, limit the number of pages that can be
2792 	 * stored on pcp lists
2793 	 */
2794 	if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) {
2795 		int free_count = max_t(int, pcp->free_count, batch);
2796 
2797 		pcp->high = max(high - free_count, high_min);
2798 		return min(batch << 2, pcp->high);
2799 	}
2800 
2801 	if (high_min == high_max)
2802 		return high;
2803 
2804 	if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) {
2805 		int free_count = max_t(int, pcp->free_count, batch);
2806 
2807 		pcp->high = max(high - free_count, high_min);
2808 		high = max(pcp->count, high_min);
2809 	} else if (pcp->count >= high) {
2810 		int need_high = pcp->free_count + batch;
2811 
2812 		/* pcp->high should be large enough to hold batch freed pages */
2813 		if (pcp->high < need_high)
2814 			pcp->high = clamp(need_high, high_min, high_max);
2815 	}
2816 
2817 	return high;
2818 }
2819 
2820 /*
2821  * Tune pcp alloc factor and adjust count & free_count. Free pages to bring the
2822  * pcp's watermarks below high.
2823  *
2824  * May return a freed pcp, if during page freeing the pcp spinlock cannot be
2825  * reacquired. Return true if pcp is locked, false otherwise.
2826  */
free_frozen_page_commit(struct zone * zone,struct per_cpu_pages * pcp,struct page * page,int migratetype,unsigned int order,fpi_t fpi_flags)2827 static bool free_frozen_page_commit(struct zone *zone,
2828 		struct per_cpu_pages *pcp, struct page *page, int migratetype,
2829 		unsigned int order, fpi_t fpi_flags)
2830 {
2831 	int high, batch;
2832 	int to_free, to_free_batched;
2833 	int pindex;
2834 	int cpu = smp_processor_id();
2835 	int ret = true;
2836 	bool free_high = false;
2837 
2838 	/*
2839 	 * On freeing, reduce the number of pages that are batch allocated.
2840 	 * See nr_pcp_alloc() where alloc_factor is increased for subsequent
2841 	 * allocations.
2842 	 */
2843 	pcp->alloc_factor >>= 1;
2844 	__count_vm_events(PGFREE, 1 << order);
2845 	pindex = order_to_pindex(migratetype, order);
2846 	list_add(&page->pcp_list, &pcp->lists[pindex]);
2847 	pcp->count += 1 << order;
2848 
2849 	batch = READ_ONCE(pcp->batch);
2850 	/*
2851 	 * As high-order pages other than THP's stored on PCP can contribute
2852 	 * to fragmentation, limit the number stored when PCP is heavily
2853 	 * freeing without allocation. The remainder after bulk freeing
2854 	 * stops will be drained from vmstat refresh context.
2855 	 */
2856 	if (order && order <= PAGE_ALLOC_COSTLY_ORDER) {
2857 		free_high = (pcp->free_count >= (batch + pcp->high_min / 2) &&
2858 			     (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) &&
2859 			     (!(pcp->flags & PCPF_FREE_HIGH_BATCH) ||
2860 			      pcp->count >= batch));
2861 		pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER;
2862 	} else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) {
2863 		pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER;
2864 	}
2865 	if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX))
2866 		pcp->free_count += (1 << order);
2867 
2868 	if (unlikely(fpi_flags & FPI_TRYLOCK)) {
2869 		/*
2870 		 * Do not attempt to take a zone lock. Let pcp->count get
2871 		 * over high mark temporarily.
2872 		 */
2873 		return true;
2874 	}
2875 
2876 	high = nr_pcp_high(pcp, zone, batch, free_high);
2877 	if (pcp->count < high)
2878 		return true;
2879 
2880 	to_free = nr_pcp_free(pcp, batch, high, free_high);
2881 	while (to_free > 0 && pcp->count > 0) {
2882 		to_free_batched = min(to_free, batch);
2883 		free_pcppages_bulk(zone, to_free_batched, pcp, pindex);
2884 		to_free -= to_free_batched;
2885 
2886 		if (to_free == 0 || pcp->count == 0)
2887 			break;
2888 
2889 		pcp_spin_unlock(pcp);
2890 
2891 		pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2892 		if (!pcp) {
2893 			ret = false;
2894 			break;
2895 		}
2896 
2897 		/*
2898 		 * Check if this thread has been migrated to a different CPU.
2899 		 * If that is the case, give up and indicate that the pcp is
2900 		 * returned in an unlocked state.
2901 		 */
2902 		if (smp_processor_id() != cpu) {
2903 			pcp_spin_unlock(pcp);
2904 			ret = false;
2905 			break;
2906 		}
2907 	}
2908 
2909 	if (test_bit(ZONE_BELOW_HIGH, &zone->flags) &&
2910 	    zone_watermark_ok(zone, 0, high_wmark_pages(zone),
2911 			      ZONE_MOVABLE, 0)) {
2912 		struct pglist_data *pgdat = zone->zone_pgdat;
2913 		clear_bit(ZONE_BELOW_HIGH, &zone->flags);
2914 
2915 		/*
2916 		 * Assume that memory pressure on this node is gone and may be
2917 		 * in a reclaimable state. If a memory fallback node exists,
2918 		 * direct reclaim may not have been triggered, causing a
2919 		 * 'hopeless node' to stay in that state for a while.  Let
2920 		 * kswapd work again by resetting kswapd_failures.
2921 		 */
2922 		if (kswapd_test_hopeless(pgdat) &&
2923 		    next_memory_node(pgdat->node_id) < MAX_NUMNODES)
2924 			kswapd_clear_hopeless(pgdat, KSWAPD_CLEAR_HOPELESS_PCP);
2925 	}
2926 	return ret;
2927 }
2928 
2929 /*
2930  * Free a pcp page
2931  */
__free_frozen_pages(struct page * page,unsigned int order,fpi_t fpi_flags)2932 static void __free_frozen_pages(struct page *page, unsigned int order,
2933 				fpi_t fpi_flags)
2934 {
2935 	struct per_cpu_pages *pcp;
2936 	struct zone *zone;
2937 	unsigned long pfn = page_to_pfn(page);
2938 	int migratetype;
2939 
2940 	if (!pcp_allowed_order(order)) {
2941 		__free_pages_ok(page, order, fpi_flags);
2942 		return;
2943 	}
2944 
2945 	if (!__free_pages_prepare(page, order, fpi_flags))
2946 		return;
2947 
2948 	/*
2949 	 * We only track unmovable, reclaimable and movable on pcp lists.
2950 	 * Place ISOLATE pages on the isolated list because they are being
2951 	 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
2952 	 * get those areas back if necessary. Otherwise, we may have to free
2953 	 * excessively into the page allocator
2954 	 */
2955 	zone = page_zone(page);
2956 	migratetype = get_pfnblock_migratetype(page, pfn);
2957 	if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
2958 		if (unlikely(is_migrate_isolate(migratetype))) {
2959 			free_one_page(zone, page, pfn, order, fpi_flags);
2960 			return;
2961 		}
2962 		migratetype = MIGRATE_MOVABLE;
2963 	}
2964 
2965 	if (unlikely((fpi_flags & FPI_TRYLOCK) && IS_ENABLED(CONFIG_PREEMPT_RT)
2966 		     && (in_nmi() || in_hardirq()))) {
2967 		add_page_to_zone_llist(zone, page, order);
2968 		return;
2969 	}
2970 	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2971 	if (pcp) {
2972 		if (!free_frozen_page_commit(zone, pcp, page, migratetype,
2973 						order, fpi_flags))
2974 			return;
2975 		pcp_spin_unlock(pcp);
2976 	} else {
2977 		free_one_page(zone, page, pfn, order, fpi_flags);
2978 	}
2979 }
2980 
free_frozen_pages(struct page * page,unsigned int order)2981 void free_frozen_pages(struct page *page, unsigned int order)
2982 {
2983 	__free_frozen_pages(page, order, FPI_NONE);
2984 }
2985 
free_frozen_pages_nolock(struct page * page,unsigned int order)2986 void free_frozen_pages_nolock(struct page *page, unsigned int order)
2987 {
2988 	__free_frozen_pages(page, order, FPI_TRYLOCK);
2989 }
2990 
2991 /*
2992  * Free a batch of folios
2993  */
free_unref_folios(struct folio_batch * folios)2994 void free_unref_folios(struct folio_batch *folios)
2995 {
2996 	struct per_cpu_pages *pcp = NULL;
2997 	struct zone *locked_zone = NULL;
2998 	int i, j;
2999 
3000 	/* Prepare folios for freeing */
3001 	for (i = 0, j = 0; i < folios->nr; i++) {
3002 		struct folio *folio = folios->folios[i];
3003 		unsigned long pfn = folio_pfn(folio);
3004 		unsigned int order = folio_order(folio);
3005 
3006 		if (!__free_pages_prepare(&folio->page, order, FPI_NONE))
3007 			continue;
3008 		/*
3009 		 * Free orders not handled on the PCP directly to the
3010 		 * allocator.
3011 		 */
3012 		if (!pcp_allowed_order(order)) {
3013 			free_one_page(folio_zone(folio), &folio->page,
3014 				      pfn, order, FPI_NONE);
3015 			continue;
3016 		}
3017 		folio->private = (void *)(unsigned long)order;
3018 		if (j != i)
3019 			folios->folios[j] = folio;
3020 		j++;
3021 	}
3022 	folios->nr = j;
3023 
3024 	for (i = 0; i < folios->nr; i++) {
3025 		struct folio *folio = folios->folios[i];
3026 		struct zone *zone = folio_zone(folio);
3027 		unsigned long pfn = folio_pfn(folio);
3028 		unsigned int order = (unsigned long)folio->private;
3029 		int migratetype;
3030 
3031 		folio->private = NULL;
3032 		migratetype = get_pfnblock_migratetype(&folio->page, pfn);
3033 
3034 		/* Different zone requires a different pcp lock */
3035 		if (zone != locked_zone ||
3036 		    is_migrate_isolate(migratetype)) {
3037 			if (pcp) {
3038 				pcp_spin_unlock(pcp);
3039 				locked_zone = NULL;
3040 				pcp = NULL;
3041 			}
3042 
3043 			/*
3044 			 * Free isolated pages directly to the
3045 			 * allocator, see comment in free_frozen_pages.
3046 			 */
3047 			if (is_migrate_isolate(migratetype)) {
3048 				free_one_page(zone, &folio->page, pfn,
3049 					      order, FPI_NONE);
3050 				continue;
3051 			}
3052 
3053 			/*
3054 			 * trylock is necessary as folios may be getting freed
3055 			 * from IRQ or SoftIRQ context after an IO completion.
3056 			 */
3057 			pcp = pcp_spin_trylock(zone->per_cpu_pageset);
3058 			if (unlikely(!pcp)) {
3059 				free_one_page(zone, &folio->page, pfn,
3060 					      order, FPI_NONE);
3061 				continue;
3062 			}
3063 			locked_zone = zone;
3064 		}
3065 
3066 		/*
3067 		 * Non-isolated types over MIGRATE_PCPTYPES get added
3068 		 * to the MIGRATE_MOVABLE pcp list.
3069 		 */
3070 		if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3071 			migratetype = MIGRATE_MOVABLE;
3072 
3073 		trace_mm_page_free_batched(&folio->page);
3074 		if (!free_frozen_page_commit(zone, pcp, &folio->page,
3075 				migratetype, order, FPI_NONE)) {
3076 			pcp = NULL;
3077 			locked_zone = NULL;
3078 		}
3079 	}
3080 
3081 	if (pcp)
3082 		pcp_spin_unlock(pcp);
3083 	folio_batch_reinit(folios);
3084 }
3085 
__split_page(struct page * page,unsigned int order)3086 static void __split_page(struct page *page, unsigned int order)
3087 {
3088 	VM_WARN_ON_PAGE(PageCompound(page), page);
3089 
3090 	split_page_owner(page, order, 0);
3091 	pgalloc_tag_split(page_folio(page), order, 0);
3092 	split_page_memcg(page, order);
3093 }
3094 
3095 /*
3096  * split_page takes a non-compound higher-order page, and splits it into
3097  * n (1<<order) sub-pages: page[0..n]
3098  * Each sub-page must be freed individually.
3099  *
3100  * Note: this is probably too low level an operation for use in drivers.
3101  * Please consult with lkml before using this in your driver.
3102  */
split_page(struct page * page,unsigned int order)3103 void split_page(struct page *page, unsigned int order)
3104 {
3105 	int i;
3106 
3107 	VM_WARN_ON_PAGE(!page_count(page), page);
3108 
3109 	for (i = 1; i < (1 << order); i++)
3110 		set_page_refcounted(page + i);
3111 
3112 	__split_page(page, order);
3113 }
3114 EXPORT_SYMBOL_GPL(split_page);
3115 
__isolate_free_page(struct page * page,unsigned int order)3116 int __isolate_free_page(struct page *page, unsigned int order)
3117 {
3118 	struct zone *zone = page_zone(page);
3119 	int mt = get_pageblock_migratetype(page);
3120 
3121 	if (!is_migrate_isolate(mt)) {
3122 		unsigned long watermark;
3123 		/*
3124 		 * Obey watermarks as if the page was being allocated. We can
3125 		 * emulate a high-order watermark check with a raised order-0
3126 		 * watermark, because we already know our high-order page
3127 		 * exists.
3128 		 */
3129 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3130 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3131 			return 0;
3132 	}
3133 
3134 	del_page_from_free_list(page, zone, order, mt);
3135 
3136 	/*
3137 	 * Set the pageblock if the isolated page is at least half of a
3138 	 * pageblock
3139 	 */
3140 	if (order >= pageblock_order - 1) {
3141 		struct page *endpage = page + (1 << order) - 1;
3142 		for (; page < endpage; page += pageblock_nr_pages) {
3143 			int mt = get_pageblock_migratetype(page);
3144 			/*
3145 			 * Only change normal pageblocks (i.e., they can merge
3146 			 * with others)
3147 			 */
3148 			if (migratetype_is_mergeable(mt))
3149 				move_freepages_block(zone, page, mt,
3150 						     MIGRATE_MOVABLE);
3151 		}
3152 	}
3153 
3154 	return 1UL << order;
3155 }
3156 
3157 /**
3158  * __putback_isolated_page - Return a now-isolated page back where we got it
3159  * @page: Page that was isolated
3160  * @order: Order of the isolated page
3161  * @mt: The page's pageblock's migratetype
3162  *
3163  * This function is meant to return a page pulled from the free lists via
3164  * __isolate_free_page back to the free lists they were pulled from.
3165  */
__putback_isolated_page(struct page * page,unsigned int order,int mt)3166 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3167 {
3168 	struct zone *zone = page_zone(page);
3169 
3170 	/* zone lock should be held when this function is called */
3171 	lockdep_assert_held(&zone->lock);
3172 
3173 	/* Return isolated page to tail of freelist. */
3174 	__free_one_page(page, page_to_pfn(page), zone, order, mt,
3175 			FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3176 }
3177 
3178 /*
3179  * Update NUMA hit/miss statistics
3180  */
zone_statistics(struct zone * preferred_zone,struct zone * z,long nr_account)3181 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3182 				   long nr_account)
3183 {
3184 #ifdef CONFIG_NUMA
3185 	enum numa_stat_item local_stat = NUMA_LOCAL;
3186 
3187 	/* skip numa counters update if numa stats is disabled */
3188 	if (!static_branch_likely(&vm_numa_stat_key))
3189 		return;
3190 
3191 	if (zone_to_nid(z) != numa_node_id())
3192 		local_stat = NUMA_OTHER;
3193 
3194 	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3195 		__count_numa_events(z, NUMA_HIT, nr_account);
3196 	else {
3197 		__count_numa_events(z, NUMA_MISS, nr_account);
3198 		__count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3199 	}
3200 	__count_numa_events(z, local_stat, nr_account);
3201 #endif
3202 }
3203 
3204 static __always_inline
rmqueue_buddy(struct zone * preferred_zone,struct zone * zone,unsigned int order,unsigned int alloc_flags,int migratetype)3205 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
3206 			   unsigned int order, unsigned int alloc_flags,
3207 			   int migratetype)
3208 {
3209 	struct page *page;
3210 	unsigned long flags;
3211 
3212 	do {
3213 		page = NULL;
3214 		if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
3215 			if (!spin_trylock_irqsave(&zone->lock, flags))
3216 				return NULL;
3217 		} else {
3218 			spin_lock_irqsave(&zone->lock, flags);
3219 		}
3220 		if (alloc_flags & ALLOC_HIGHATOMIC)
3221 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3222 		if (!page) {
3223 			enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
3224 
3225 			page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
3226 
3227 			/*
3228 			 * If the allocation fails, allow OOM handling and
3229 			 * order-0 (atomic) allocs access to HIGHATOMIC
3230 			 * reserves as failing now is worse than failing a
3231 			 * high-order atomic allocation in the future.
3232 			 */
3233 			if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK)))
3234 				page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3235 
3236 			if (!page) {
3237 				spin_unlock_irqrestore(&zone->lock, flags);
3238 				return NULL;
3239 			}
3240 		}
3241 		spin_unlock_irqrestore(&zone->lock, flags);
3242 	} while (check_new_pages(page, order));
3243 
3244 	/*
3245 	 * If this is a high-order atomic allocation then check
3246 	 * if the pageblock should be reserved for the future
3247 	 */
3248 	if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
3249 		reserve_highatomic_pageblock(page, order, zone);
3250 
3251 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3252 	zone_statistics(preferred_zone, zone, 1);
3253 
3254 	return page;
3255 }
3256 
nr_pcp_alloc(struct per_cpu_pages * pcp,struct zone * zone,int order)3257 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order)
3258 {
3259 	int high, base_batch, batch, max_nr_alloc;
3260 	int high_max, high_min;
3261 
3262 	base_batch = READ_ONCE(pcp->batch);
3263 	high_min = READ_ONCE(pcp->high_min);
3264 	high_max = READ_ONCE(pcp->high_max);
3265 	high = pcp->high = clamp(pcp->high, high_min, high_max);
3266 
3267 	/* Check for PCP disabled or boot pageset */
3268 	if (unlikely(high < base_batch))
3269 		return 1;
3270 
3271 	if (order)
3272 		batch = base_batch;
3273 	else
3274 		batch = (base_batch << pcp->alloc_factor);
3275 
3276 	/*
3277 	 * If we had larger pcp->high, we could avoid to allocate from
3278 	 * zone.
3279 	 */
3280 	if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags))
3281 		high = pcp->high = min(high + batch, high_max);
3282 
3283 	if (!order) {
3284 		max_nr_alloc = max(high - pcp->count - base_batch, base_batch);
3285 		/*
3286 		 * Double the number of pages allocated each time there is
3287 		 * subsequent allocation of order-0 pages without any freeing.
3288 		 */
3289 		if (batch <= max_nr_alloc &&
3290 		    pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX)
3291 			pcp->alloc_factor++;
3292 		batch = min(batch, max_nr_alloc);
3293 	}
3294 
3295 	/*
3296 	 * Scale batch relative to order if batch implies free pages
3297 	 * can be stored on the PCP. Batch can be 1 for small zones or
3298 	 * for boot pagesets which should never store free pages as
3299 	 * the pages may belong to arbitrary zones.
3300 	 */
3301 	if (batch > 1)
3302 		batch = max(batch >> order, 2);
3303 
3304 	return batch;
3305 }
3306 
3307 /* Remove page from the per-cpu list, caller must protect the list */
3308 static inline
__rmqueue_pcplist(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,struct list_head * list)3309 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3310 			int migratetype,
3311 			unsigned int alloc_flags,
3312 			struct per_cpu_pages *pcp,
3313 			struct list_head *list)
3314 {
3315 	struct page *page;
3316 
3317 	do {
3318 		if (list_empty(list)) {
3319 			int batch = nr_pcp_alloc(pcp, zone, order);
3320 			int alloced;
3321 
3322 			/*
3323 			 * Don't refill the list for a higher order atomic
3324 			 * allocation under memory pressure, as this would
3325 			 * not build up any HIGHATOMIC reserves, which
3326 			 * might be needed soon.
3327 			 *
3328 			 * Instead, direct it towards the reserves by
3329 			 * returning NULL, which will make the caller fall
3330 			 * back to rmqueue_buddy. This will try to use the
3331 			 * reserves first and grow them if needed.
3332 			 */
3333 			if (alloc_flags & ALLOC_HIGHATOMIC)
3334 				return NULL;
3335 
3336 			alloced = rmqueue_bulk(zone, order,
3337 					batch, list,
3338 					migratetype, alloc_flags);
3339 
3340 			pcp->count += alloced << order;
3341 			if (unlikely(list_empty(list)))
3342 				return NULL;
3343 		}
3344 
3345 		page = list_first_entry(list, struct page, pcp_list);
3346 		list_del(&page->pcp_list);
3347 		pcp->count -= 1 << order;
3348 	} while (check_new_pages(page, order));
3349 
3350 	return page;
3351 }
3352 
3353 /* Lock and remove page from the per-cpu list */
rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)3354 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3355 			struct zone *zone, unsigned int order,
3356 			int migratetype, unsigned int alloc_flags)
3357 {
3358 	struct per_cpu_pages *pcp;
3359 	struct list_head *list;
3360 	struct page *page;
3361 
3362 	/* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
3363 	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
3364 	if (!pcp)
3365 		return NULL;
3366 
3367 	/*
3368 	 * On allocation, reduce the number of pages that are batch freed.
3369 	 * See nr_pcp_free() where free_factor is increased for subsequent
3370 	 * frees.
3371 	 */
3372 	pcp->free_count >>= 1;
3373 	list = &pcp->lists[order_to_pindex(migratetype, order)];
3374 	page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3375 	pcp_spin_unlock(pcp);
3376 	if (page) {
3377 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3378 		zone_statistics(preferred_zone, zone, 1);
3379 	}
3380 	return page;
3381 }
3382 
3383 /*
3384  * Allocate a page from the given zone.
3385  * Use pcplists for THP or "cheap" high-order allocations.
3386  */
3387 
3388 /*
3389  * Do not instrument rmqueue() with KMSAN. This function may call
3390  * __msan_poison_alloca() through a call to set_pfnblock_migratetype().
3391  * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
3392  * may call rmqueue() again, which will result in a deadlock.
3393  */
3394 __no_sanitize_memory
3395 static inline
rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype)3396 struct page *rmqueue(struct zone *preferred_zone,
3397 			struct zone *zone, unsigned int order,
3398 			gfp_t gfp_flags, unsigned int alloc_flags,
3399 			int migratetype)
3400 {
3401 	struct page *page;
3402 
3403 	if (likely(pcp_allowed_order(order))) {
3404 		page = rmqueue_pcplist(preferred_zone, zone, order,
3405 				       migratetype, alloc_flags);
3406 		if (likely(page))
3407 			goto out;
3408 	}
3409 
3410 	page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3411 							migratetype);
3412 
3413 out:
3414 	/* Separate test+clear to avoid unnecessary atomics */
3415 	if ((alloc_flags & ALLOC_KSWAPD) &&
3416 	    unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
3417 		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3418 		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3419 	}
3420 
3421 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3422 	return page;
3423 }
3424 
3425 /*
3426  * Reserve the pageblock(s) surrounding an allocation request for
3427  * exclusive use of high-order atomic allocations if there are no
3428  * empty page blocks that contain a page with a suitable order
3429  */
reserve_highatomic_pageblock(struct page * page,int order,struct zone * zone)3430 static void reserve_highatomic_pageblock(struct page *page, int order,
3431 					 struct zone *zone)
3432 {
3433 	int mt;
3434 	unsigned long max_managed, flags;
3435 
3436 	/*
3437 	 * The number reserved as: minimum is 1 pageblock, maximum is
3438 	 * roughly 1% of a zone. But if 1% of a zone falls below a
3439 	 * pageblock size, then don't reserve any pageblocks.
3440 	 * Check is race-prone but harmless.
3441 	 */
3442 	if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
3443 		return;
3444 	max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
3445 	if (zone->nr_reserved_highatomic >= max_managed)
3446 		return;
3447 
3448 	spin_lock_irqsave(&zone->lock, flags);
3449 
3450 	/* Recheck the nr_reserved_highatomic limit under the lock */
3451 	if (zone->nr_reserved_highatomic >= max_managed)
3452 		goto out_unlock;
3453 
3454 	/* Yoink! */
3455 	mt = get_pageblock_migratetype(page);
3456 	/* Only reserve normal pageblocks (i.e., they can merge with others) */
3457 	if (!migratetype_is_mergeable(mt))
3458 		goto out_unlock;
3459 
3460 	if (order < pageblock_order) {
3461 		if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
3462 			goto out_unlock;
3463 		zone->nr_reserved_highatomic += pageblock_nr_pages;
3464 	} else {
3465 		change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
3466 		zone->nr_reserved_highatomic += 1 << order;
3467 	}
3468 
3469 out_unlock:
3470 	spin_unlock_irqrestore(&zone->lock, flags);
3471 }
3472 
3473 /*
3474  * Used when an allocation is about to fail under memory pressure. This
3475  * potentially hurts the reliability of high-order allocations when under
3476  * intense memory pressure but failed atomic allocations should be easier
3477  * to recover from than an OOM.
3478  *
3479  * If @force is true, try to unreserve pageblocks even though highatomic
3480  * pageblock is exhausted.
3481  */
unreserve_highatomic_pageblock(const struct alloc_context * ac,bool force)3482 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
3483 						bool force)
3484 {
3485 	struct zonelist *zonelist = ac->zonelist;
3486 	unsigned long flags;
3487 	struct zoneref *z;
3488 	struct zone *zone;
3489 	struct page *page;
3490 	int order;
3491 	int ret;
3492 
3493 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
3494 								ac->nodemask) {
3495 		/*
3496 		 * Preserve at least one pageblock unless memory pressure
3497 		 * is really high.
3498 		 */
3499 		if (!force && zone->nr_reserved_highatomic <=
3500 					pageblock_nr_pages)
3501 			continue;
3502 
3503 		spin_lock_irqsave(&zone->lock, flags);
3504 		for (order = 0; order < NR_PAGE_ORDERS; order++) {
3505 			struct free_area *area = &(zone->free_area[order]);
3506 			unsigned long size;
3507 
3508 			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
3509 			if (!page)
3510 				continue;
3511 
3512 			size = max(pageblock_nr_pages, 1UL << order);
3513 			/*
3514 			 * It should never happen but changes to
3515 			 * locking could inadvertently allow a per-cpu
3516 			 * drain to add pages to MIGRATE_HIGHATOMIC
3517 			 * while unreserving so be safe and watch for
3518 			 * underflows.
3519 			 */
3520 			if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic))
3521 				size = zone->nr_reserved_highatomic;
3522 			zone->nr_reserved_highatomic -= size;
3523 
3524 			/*
3525 			 * Convert to ac->migratetype and avoid the normal
3526 			 * pageblock stealing heuristics. Minimally, the caller
3527 			 * is doing the work and needs the pages. More
3528 			 * importantly, if the block was always converted to
3529 			 * MIGRATE_UNMOVABLE or another type then the number
3530 			 * of pageblocks that cannot be completely freed
3531 			 * may increase.
3532 			 */
3533 			if (order < pageblock_order)
3534 				ret = move_freepages_block(zone, page,
3535 							   MIGRATE_HIGHATOMIC,
3536 							   ac->migratetype);
3537 			else {
3538 				move_to_free_list(page, zone, order,
3539 						  MIGRATE_HIGHATOMIC,
3540 						  ac->migratetype);
3541 				change_pageblock_range(page, order,
3542 						       ac->migratetype);
3543 				ret = 1;
3544 			}
3545 			/*
3546 			 * Reserving the block(s) already succeeded,
3547 			 * so this should not fail on zone boundaries.
3548 			 */
3549 			WARN_ON_ONCE(ret == -1);
3550 			if (ret > 0) {
3551 				spin_unlock_irqrestore(&zone->lock, flags);
3552 				return ret;
3553 			}
3554 		}
3555 		spin_unlock_irqrestore(&zone->lock, flags);
3556 	}
3557 
3558 	return false;
3559 }
3560 
__zone_watermark_unusable_free(struct zone * z,unsigned int order,unsigned int alloc_flags)3561 static inline long __zone_watermark_unusable_free(struct zone *z,
3562 				unsigned int order, unsigned int alloc_flags)
3563 {
3564 	long unusable_free = (1 << order) - 1;
3565 
3566 	/*
3567 	 * If the caller does not have rights to reserves below the min
3568 	 * watermark then subtract the free pages reserved for highatomic.
3569 	 */
3570 	if (likely(!(alloc_flags & ALLOC_RESERVES)))
3571 		unusable_free += READ_ONCE(z->nr_free_highatomic);
3572 
3573 #ifdef CONFIG_CMA
3574 	/* If allocation can't use CMA areas don't use free CMA pages */
3575 	if (!(alloc_flags & ALLOC_CMA))
3576 		unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3577 #endif
3578 
3579 	return unusable_free;
3580 }
3581 
3582 /*
3583  * Return true if free base pages are above 'mark'. For high-order checks it
3584  * will return true of the order-0 watermark is reached and there is at least
3585  * one free page of a suitable size. Checking now avoids taking the zone lock
3586  * to check in the allocation paths if no pages are free.
3587  */
__zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,long free_pages)3588 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3589 			 int highest_zoneidx, unsigned int alloc_flags,
3590 			 long free_pages)
3591 {
3592 	long min = mark;
3593 	int o;
3594 
3595 	/* free_pages may go negative - that's OK */
3596 	free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3597 
3598 	if (unlikely(alloc_flags & ALLOC_RESERVES)) {
3599 		/*
3600 		 * __GFP_HIGH allows access to 50% of the min reserve as well
3601 		 * as OOM.
3602 		 */
3603 		if (alloc_flags & ALLOC_MIN_RESERVE) {
3604 			min -= min / 2;
3605 
3606 			/*
3607 			 * Non-blocking allocations (e.g. GFP_ATOMIC) can
3608 			 * access more reserves than just __GFP_HIGH. Other
3609 			 * non-blocking allocations requests such as GFP_NOWAIT
3610 			 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
3611 			 * access to the min reserve.
3612 			 */
3613 			if (alloc_flags & ALLOC_NON_BLOCK)
3614 				min -= min / 4;
3615 		}
3616 
3617 		/*
3618 		 * OOM victims can try even harder than the normal reserve
3619 		 * users on the grounds that it's definitely going to be in
3620 		 * the exit path shortly and free memory. Any allocation it
3621 		 * makes during the free path will be small and short-lived.
3622 		 */
3623 		if (alloc_flags & ALLOC_OOM)
3624 			min -= min / 2;
3625 	}
3626 
3627 	/*
3628 	 * Check watermarks for an order-0 allocation request. If these
3629 	 * are not met, then a high-order request also cannot go ahead
3630 	 * even if a suitable page happened to be free.
3631 	 */
3632 	if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3633 		return false;
3634 
3635 	/* If this is an order-0 request then the watermark is fine */
3636 	if (!order)
3637 		return true;
3638 
3639 	/* For a high-order request, check at least one suitable page is free */
3640 	for (o = order; o < NR_PAGE_ORDERS; o++) {
3641 		struct free_area *area = &z->free_area[o];
3642 		int mt;
3643 
3644 		if (!area->nr_free)
3645 			continue;
3646 
3647 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3648 			if (!free_area_empty(area, mt))
3649 				return true;
3650 		}
3651 
3652 #ifdef CONFIG_CMA
3653 		if ((alloc_flags & ALLOC_CMA) &&
3654 		    !free_area_empty(area, MIGRATE_CMA)) {
3655 			return true;
3656 		}
3657 #endif
3658 		if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
3659 		    !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
3660 			return true;
3661 		}
3662 	}
3663 	return false;
3664 }
3665 
zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags)3666 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3667 		      int highest_zoneidx, unsigned int alloc_flags)
3668 {
3669 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3670 					zone_page_state(z, NR_FREE_PAGES));
3671 }
3672 
zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask)3673 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3674 				unsigned long mark, int highest_zoneidx,
3675 				unsigned int alloc_flags, gfp_t gfp_mask)
3676 {
3677 	long free_pages;
3678 
3679 	free_pages = zone_page_state(z, NR_FREE_PAGES);
3680 
3681 	/*
3682 	 * Fast check for order-0 only. If this fails then the reserves
3683 	 * need to be calculated.
3684 	 */
3685 	if (!order) {
3686 		long usable_free;
3687 		long reserved;
3688 
3689 		usable_free = free_pages;
3690 		reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
3691 
3692 		/* reserved may over estimate high-atomic reserves. */
3693 		usable_free -= min(usable_free, reserved);
3694 		if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
3695 			return true;
3696 	}
3697 
3698 	if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3699 					free_pages))
3700 		return true;
3701 
3702 	/*
3703 	 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
3704 	 * when checking the min watermark. The min watermark is the
3705 	 * point where boosting is ignored so that kswapd is woken up
3706 	 * when below the low watermark.
3707 	 */
3708 	if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
3709 		&& ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3710 		mark = z->_watermark[WMARK_MIN];
3711 		return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3712 					alloc_flags, free_pages);
3713 	}
3714 
3715 	return false;
3716 }
3717 
3718 #ifdef CONFIG_NUMA
3719 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
3720 
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3721 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3722 {
3723 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3724 				node_reclaim_distance;
3725 }
3726 #else	/* CONFIG_NUMA */
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3727 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3728 {
3729 	return true;
3730 }
3731 #endif	/* CONFIG_NUMA */
3732 
3733 /*
3734  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3735  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3736  * premature use of a lower zone may cause lowmem pressure problems that
3737  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3738  * probably too small. It only makes sense to spread allocations to avoid
3739  * fragmentation between the Normal and DMA32 zones.
3740  */
3741 static inline unsigned int
alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask)3742 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3743 {
3744 	unsigned int alloc_flags;
3745 
3746 	/*
3747 	 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3748 	 * to save a branch.
3749 	 */
3750 	alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3751 
3752 	if (defrag_mode) {
3753 		alloc_flags |= ALLOC_NOFRAGMENT;
3754 		return alloc_flags;
3755 	}
3756 
3757 #ifdef CONFIG_ZONE_DMA32
3758 	if (!zone)
3759 		return alloc_flags;
3760 
3761 	if (zone_idx(zone) != ZONE_NORMAL)
3762 		return alloc_flags;
3763 
3764 	/*
3765 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3766 	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3767 	 * on UMA that if Normal is populated then so is DMA32.
3768 	 */
3769 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3770 	if (nr_online_nodes > 1 && !populated_zone(--zone))
3771 		return alloc_flags;
3772 
3773 	alloc_flags |= ALLOC_NOFRAGMENT;
3774 #endif /* CONFIG_ZONE_DMA32 */
3775 	return alloc_flags;
3776 }
3777 
3778 /* Must be called after current_gfp_context() which can change gfp_mask */
gfp_to_alloc_flags_cma(gfp_t gfp_mask,unsigned int alloc_flags)3779 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3780 						  unsigned int alloc_flags)
3781 {
3782 #ifdef CONFIG_CMA
3783 	if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3784 		alloc_flags |= ALLOC_CMA;
3785 #endif
3786 	return alloc_flags;
3787 }
3788 
3789 /*
3790  * get_page_from_freelist goes through the zonelist trying to allocate
3791  * a page.
3792  */
3793 static struct page *
get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac)3794 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3795 						const struct alloc_context *ac)
3796 {
3797 	struct zoneref *z;
3798 	struct zone *zone;
3799 	struct pglist_data *last_pgdat = NULL;
3800 	bool last_pgdat_dirty_ok = false;
3801 	bool no_fallback;
3802 	bool skip_kswapd_nodes = nr_online_nodes > 1;
3803 	bool skipped_kswapd_nodes = false;
3804 
3805 retry:
3806 	/*
3807 	 * Scan zonelist, looking for a zone with enough free.
3808 	 * See also cpuset_current_node_allowed() comment in kernel/cgroup/cpuset.c.
3809 	 */
3810 	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3811 	z = ac->preferred_zoneref;
3812 	for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3813 					ac->nodemask) {
3814 		struct page *page;
3815 		unsigned long mark;
3816 
3817 		if (cpusets_enabled() &&
3818 			(alloc_flags & ALLOC_CPUSET) &&
3819 			!__cpuset_zone_allowed(zone, gfp_mask))
3820 				continue;
3821 		/*
3822 		 * When allocating a page cache page for writing, we
3823 		 * want to get it from a node that is within its dirty
3824 		 * limit, such that no single node holds more than its
3825 		 * proportional share of globally allowed dirty pages.
3826 		 * The dirty limits take into account the node's
3827 		 * lowmem reserves and high watermark so that kswapd
3828 		 * should be able to balance it without having to
3829 		 * write pages from its LRU list.
3830 		 *
3831 		 * XXX: For now, allow allocations to potentially
3832 		 * exceed the per-node dirty limit in the slowpath
3833 		 * (spread_dirty_pages unset) before going into reclaim,
3834 		 * which is important when on a NUMA setup the allowed
3835 		 * nodes are together not big enough to reach the
3836 		 * global limit.  The proper fix for these situations
3837 		 * will require awareness of nodes in the
3838 		 * dirty-throttling and the flusher threads.
3839 		 */
3840 		if (ac->spread_dirty_pages) {
3841 			if (last_pgdat != zone->zone_pgdat) {
3842 				last_pgdat = zone->zone_pgdat;
3843 				last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3844 			}
3845 
3846 			if (!last_pgdat_dirty_ok)
3847 				continue;
3848 		}
3849 
3850 		if (no_fallback && !defrag_mode && nr_online_nodes > 1 &&
3851 		    zone != zonelist_zone(ac->preferred_zoneref)) {
3852 			int local_nid;
3853 
3854 			/*
3855 			 * If moving to a remote node, retry but allow
3856 			 * fragmenting fallbacks. Locality is more important
3857 			 * than fragmentation avoidance.
3858 			 */
3859 			local_nid = zonelist_node_idx(ac->preferred_zoneref);
3860 			if (zone_to_nid(zone) != local_nid) {
3861 				alloc_flags &= ~ALLOC_NOFRAGMENT;
3862 				goto retry;
3863 			}
3864 		}
3865 
3866 		/*
3867 		 * If kswapd is already active on a node, keep looking
3868 		 * for other nodes that might be idle. This can happen
3869 		 * if another process has NUMA bindings and is causing
3870 		 * kswapd wakeups on only some nodes. Avoid accidental
3871 		 * "node_reclaim_mode"-like behavior in this case.
3872 		 */
3873 		if (skip_kswapd_nodes &&
3874 		    !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) {
3875 			skipped_kswapd_nodes = true;
3876 			continue;
3877 		}
3878 
3879 		cond_accept_memory(zone, order, alloc_flags);
3880 
3881 		/*
3882 		 * Detect whether the number of free pages is below high
3883 		 * watermark.  If so, we will decrease pcp->high and free
3884 		 * PCP pages in free path to reduce the possibility of
3885 		 * premature page reclaiming.  Detection is done here to
3886 		 * avoid to do that in hotter free path.
3887 		 */
3888 		if (test_bit(ZONE_BELOW_HIGH, &zone->flags))
3889 			goto check_alloc_wmark;
3890 
3891 		mark = high_wmark_pages(zone);
3892 		if (zone_watermark_fast(zone, order, mark,
3893 					ac->highest_zoneidx, alloc_flags,
3894 					gfp_mask))
3895 			goto try_this_zone;
3896 		else
3897 			set_bit(ZONE_BELOW_HIGH, &zone->flags);
3898 
3899 check_alloc_wmark:
3900 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3901 		if (!zone_watermark_fast(zone, order, mark,
3902 				       ac->highest_zoneidx, alloc_flags,
3903 				       gfp_mask)) {
3904 			int ret;
3905 
3906 			if (cond_accept_memory(zone, order, alloc_flags))
3907 				goto try_this_zone;
3908 
3909 			/*
3910 			 * Watermark failed for this zone, but see if we can
3911 			 * grow this zone if it contains deferred pages.
3912 			 */
3913 			if (deferred_pages_enabled()) {
3914 				if (_deferred_grow_zone(zone, order))
3915 					goto try_this_zone;
3916 			}
3917 			/* Checked here to keep the fast path fast */
3918 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3919 			if (alloc_flags & ALLOC_NO_WATERMARKS)
3920 				goto try_this_zone;
3921 
3922 			if (!node_reclaim_enabled() ||
3923 			    !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
3924 				continue;
3925 
3926 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3927 			switch (ret) {
3928 			case NODE_RECLAIM_NOSCAN:
3929 				/* did not scan */
3930 				continue;
3931 			case NODE_RECLAIM_FULL:
3932 				/* scanned but unreclaimable */
3933 				continue;
3934 			default:
3935 				/* did we reclaim enough */
3936 				if (zone_watermark_ok(zone, order, mark,
3937 					ac->highest_zoneidx, alloc_flags))
3938 					goto try_this_zone;
3939 
3940 				continue;
3941 			}
3942 		}
3943 
3944 try_this_zone:
3945 		page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
3946 				gfp_mask, alloc_flags, ac->migratetype);
3947 		if (page) {
3948 			prep_new_page(page, order, gfp_mask, alloc_flags);
3949 
3950 			return page;
3951 		} else {
3952 			if (cond_accept_memory(zone, order, alloc_flags))
3953 				goto try_this_zone;
3954 
3955 			/* Try again if zone has deferred pages */
3956 			if (deferred_pages_enabled()) {
3957 				if (_deferred_grow_zone(zone, order))
3958 					goto try_this_zone;
3959 			}
3960 		}
3961 	}
3962 
3963 	/*
3964 	 * If we skipped over nodes with active kswapds and found no
3965 	 * idle nodes, retry and place anywhere the watermarks permit.
3966 	 */
3967 	if (skip_kswapd_nodes && skipped_kswapd_nodes) {
3968 		skip_kswapd_nodes = false;
3969 		goto retry;
3970 	}
3971 
3972 	/*
3973 	 * It's possible on a UMA machine to get through all zones that are
3974 	 * fragmented. If avoiding fragmentation, reset and try again.
3975 	 */
3976 	if (no_fallback && !defrag_mode) {
3977 		alloc_flags &= ~ALLOC_NOFRAGMENT;
3978 		goto retry;
3979 	}
3980 
3981 	return NULL;
3982 }
3983 
warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask)3984 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3985 {
3986 	unsigned int filter = SHOW_MEM_FILTER_NODES;
3987 
3988 	/*
3989 	 * This documents exceptions given to allocations in certain
3990 	 * contexts that are allowed to allocate outside current's set
3991 	 * of allowed nodes.
3992 	 */
3993 	if (!(gfp_mask & __GFP_NOMEMALLOC))
3994 		if (tsk_is_oom_victim(current) ||
3995 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
3996 			filter &= ~SHOW_MEM_FILTER_NODES;
3997 	if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3998 		filter &= ~SHOW_MEM_FILTER_NODES;
3999 
4000 	__show_mem(filter, nodemask, gfp_zone(gfp_mask));
4001 	mem_cgroup_show_protected_memory(NULL);
4002 }
4003 
warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...)4004 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4005 {
4006 	struct va_format vaf;
4007 	va_list args;
4008 	static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4009 
4010 	if ((gfp_mask & __GFP_NOWARN) ||
4011 	     !__ratelimit(&nopage_rs) ||
4012 	     ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4013 		return;
4014 
4015 	va_start(args, fmt);
4016 	vaf.fmt = fmt;
4017 	vaf.va = &args;
4018 	pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4019 			current->comm, &vaf, gfp_mask, &gfp_mask,
4020 			nodemask_pr_args(nodemask));
4021 	va_end(args);
4022 
4023 	cpuset_print_current_mems_allowed();
4024 	pr_cont("\n");
4025 	dump_stack();
4026 	warn_alloc_show_mem(gfp_mask, nodemask);
4027 }
4028 
4029 static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac)4030 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4031 			      unsigned int alloc_flags,
4032 			      const struct alloc_context *ac)
4033 {
4034 	struct page *page;
4035 
4036 	page = get_page_from_freelist(gfp_mask, order,
4037 			alloc_flags|ALLOC_CPUSET, ac);
4038 	/*
4039 	 * fallback to ignore cpuset restriction if our nodes
4040 	 * are depleted
4041 	 */
4042 	if (!page)
4043 		page = get_page_from_freelist(gfp_mask, order,
4044 				alloc_flags, ac);
4045 	return page;
4046 }
4047 
4048 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress)4049 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4050 	const struct alloc_context *ac, unsigned long *did_some_progress)
4051 {
4052 	struct oom_control oc = {
4053 		.zonelist = ac->zonelist,
4054 		.nodemask = ac->nodemask,
4055 		.memcg = NULL,
4056 		.gfp_mask = gfp_mask,
4057 		.order = order,
4058 	};
4059 	struct page *page;
4060 
4061 	*did_some_progress = 0;
4062 
4063 	/*
4064 	 * Acquire the oom lock.  If that fails, somebody else is
4065 	 * making progress for us.
4066 	 */
4067 	if (!mutex_trylock(&oom_lock)) {
4068 		*did_some_progress = 1;
4069 		schedule_timeout_uninterruptible(1);
4070 		return NULL;
4071 	}
4072 
4073 	/*
4074 	 * Go through the zonelist yet one more time, keep very high watermark
4075 	 * here, this is only to catch a parallel oom killing, we must fail if
4076 	 * we're still under heavy pressure. But make sure that this reclaim
4077 	 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4078 	 * allocation which will never fail due to oom_lock already held.
4079 	 */
4080 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4081 				      ~__GFP_DIRECT_RECLAIM, order,
4082 				      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4083 	if (page)
4084 		goto out;
4085 
4086 	/* Coredumps can quickly deplete all memory reserves */
4087 	if (current->flags & PF_DUMPCORE)
4088 		goto out;
4089 	/* The OOM killer will not help higher order allocs */
4090 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4091 		goto out;
4092 	/*
4093 	 * We have already exhausted all our reclaim opportunities without any
4094 	 * success so it is time to admit defeat. We will skip the OOM killer
4095 	 * because it is very likely that the caller has a more reasonable
4096 	 * fallback than shooting a random task.
4097 	 *
4098 	 * The OOM killer may not free memory on a specific node.
4099 	 */
4100 	if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4101 		goto out;
4102 	/* The OOM killer does not needlessly kill tasks for lowmem */
4103 	if (ac->highest_zoneidx < ZONE_NORMAL)
4104 		goto out;
4105 	if (pm_suspended_storage())
4106 		goto out;
4107 	/*
4108 	 * XXX: GFP_NOFS allocations should rather fail than rely on
4109 	 * other request to make a forward progress.
4110 	 * We are in an unfortunate situation where out_of_memory cannot
4111 	 * do much for this context but let's try it to at least get
4112 	 * access to memory reserved if the current task is killed (see
4113 	 * out_of_memory). Once filesystems are ready to handle allocation
4114 	 * failures more gracefully we should just bail out here.
4115 	 */
4116 
4117 	/* Exhausted what can be done so it's blame time */
4118 	if (out_of_memory(&oc) ||
4119 	    WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
4120 		*did_some_progress = 1;
4121 
4122 		/*
4123 		 * Help non-failing allocations by giving them access to memory
4124 		 * reserves
4125 		 */
4126 		if (gfp_mask & __GFP_NOFAIL)
4127 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4128 					ALLOC_NO_WATERMARKS, ac);
4129 	}
4130 out:
4131 	mutex_unlock(&oom_lock);
4132 	return page;
4133 }
4134 
4135 /*
4136  * Maximum number of compaction retries with a progress before OOM
4137  * killer is consider as the only way to move forward.
4138  */
4139 #define MAX_COMPACT_RETRIES 16
4140 
4141 #ifdef CONFIG_COMPACTION
4142 /* Try memory compaction for high-order allocations before reclaim */
4143 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4144 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4145 		unsigned int alloc_flags, const struct alloc_context *ac,
4146 		enum compact_priority prio, enum compact_result *compact_result)
4147 {
4148 	struct page *page = NULL;
4149 	unsigned long pflags;
4150 	unsigned int noreclaim_flag;
4151 
4152 	if (!order)
4153 		return NULL;
4154 
4155 	psi_memstall_enter(&pflags);
4156 	delayacct_compact_start();
4157 	noreclaim_flag = memalloc_noreclaim_save();
4158 
4159 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4160 								prio, &page);
4161 
4162 	memalloc_noreclaim_restore(noreclaim_flag);
4163 	psi_memstall_leave(&pflags);
4164 	delayacct_compact_end();
4165 
4166 	if (*compact_result == COMPACT_SKIPPED)
4167 		return NULL;
4168 	/*
4169 	 * At least in one zone compaction wasn't deferred or skipped, so let's
4170 	 * count a compaction stall
4171 	 */
4172 	count_vm_event(COMPACTSTALL);
4173 
4174 	/* Prep a captured page if available */
4175 	if (page)
4176 		prep_new_page(page, order, gfp_mask, alloc_flags);
4177 
4178 	/* Try get a page from the freelist if available */
4179 	if (!page)
4180 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4181 
4182 	if (page) {
4183 		struct zone *zone = page_zone(page);
4184 
4185 		zone->compact_blockskip_flush = false;
4186 		compaction_defer_reset(zone, order, true);
4187 		count_vm_event(COMPACTSUCCESS);
4188 		return page;
4189 	}
4190 
4191 	/*
4192 	 * It's bad if compaction run occurs and fails. The most likely reason
4193 	 * is that pages exist, but not enough to satisfy watermarks.
4194 	 */
4195 	count_vm_event(COMPACTFAIL);
4196 
4197 	cond_resched();
4198 
4199 	return NULL;
4200 }
4201 
4202 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4203 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4204 		     enum compact_result compact_result,
4205 		     enum compact_priority *compact_priority,
4206 		     int *compaction_retries)
4207 {
4208 	int max_retries = MAX_COMPACT_RETRIES;
4209 	int min_priority;
4210 	bool ret = false;
4211 	int retries = *compaction_retries;
4212 	enum compact_priority priority = *compact_priority;
4213 
4214 	if (!order)
4215 		return false;
4216 
4217 	if (fatal_signal_pending(current))
4218 		return false;
4219 
4220 	/*
4221 	 * Compaction was skipped due to a lack of free order-0
4222 	 * migration targets. Continue if reclaim can help.
4223 	 */
4224 	if (compact_result == COMPACT_SKIPPED) {
4225 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4226 		goto out;
4227 	}
4228 
4229 	/*
4230 	 * Compaction managed to coalesce some page blocks, but the
4231 	 * allocation failed presumably due to a race. Retry some.
4232 	 */
4233 	if (compact_result == COMPACT_SUCCESS) {
4234 		/*
4235 		 * !costly requests are much more important than
4236 		 * __GFP_RETRY_MAYFAIL costly ones because they are de
4237 		 * facto nofail and invoke OOM killer to move on while
4238 		 * costly can fail and users are ready to cope with
4239 		 * that. 1/4 retries is rather arbitrary but we would
4240 		 * need much more detailed feedback from compaction to
4241 		 * make a better decision.
4242 		 */
4243 		if (order > PAGE_ALLOC_COSTLY_ORDER)
4244 			max_retries /= 4;
4245 
4246 		if (++(*compaction_retries) <= max_retries) {
4247 			ret = true;
4248 			goto out;
4249 		}
4250 	}
4251 
4252 	/*
4253 	 * Compaction failed. Retry with increasing priority.
4254 	 */
4255 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4256 			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4257 
4258 	if (*compact_priority > min_priority) {
4259 		(*compact_priority)--;
4260 		*compaction_retries = 0;
4261 		ret = true;
4262 	}
4263 out:
4264 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4265 	return ret;
4266 }
4267 #else
4268 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4269 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4270 		unsigned int alloc_flags, const struct alloc_context *ac,
4271 		enum compact_priority prio, enum compact_result *compact_result)
4272 {
4273 	*compact_result = COMPACT_SKIPPED;
4274 	return NULL;
4275 }
4276 
4277 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4278 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4279 		     enum compact_result compact_result,
4280 		     enum compact_priority *compact_priority,
4281 		     int *compaction_retries)
4282 {
4283 	struct zone *zone;
4284 	struct zoneref *z;
4285 
4286 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4287 		return false;
4288 
4289 	/*
4290 	 * There are setups with compaction disabled which would prefer to loop
4291 	 * inside the allocator rather than hit the oom killer prematurely.
4292 	 * Let's give them a good hope and keep retrying while the order-0
4293 	 * watermarks are OK.
4294 	 */
4295 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4296 				ac->highest_zoneidx, ac->nodemask) {
4297 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4298 					ac->highest_zoneidx, alloc_flags))
4299 			return true;
4300 	}
4301 	return false;
4302 }
4303 #endif /* CONFIG_COMPACTION */
4304 
4305 #ifdef CONFIG_LOCKDEP
4306 static struct lockdep_map __fs_reclaim_map =
4307 	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4308 
__need_reclaim(gfp_t gfp_mask)4309 static bool __need_reclaim(gfp_t gfp_mask)
4310 {
4311 	/* no reclaim without waiting on it */
4312 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4313 		return false;
4314 
4315 	/* this guy won't enter reclaim */
4316 	if (current->flags & PF_MEMALLOC)
4317 		return false;
4318 
4319 	if (gfp_mask & __GFP_NOLOCKDEP)
4320 		return false;
4321 
4322 	return true;
4323 }
4324 
__fs_reclaim_acquire(unsigned long ip)4325 void __fs_reclaim_acquire(unsigned long ip)
4326 {
4327 	lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4328 }
4329 
__fs_reclaim_release(unsigned long ip)4330 void __fs_reclaim_release(unsigned long ip)
4331 {
4332 	lock_release(&__fs_reclaim_map, ip);
4333 }
4334 
fs_reclaim_acquire(gfp_t gfp_mask)4335 void fs_reclaim_acquire(gfp_t gfp_mask)
4336 {
4337 	gfp_mask = current_gfp_context(gfp_mask);
4338 
4339 	if (__need_reclaim(gfp_mask)) {
4340 		if (gfp_mask & __GFP_FS)
4341 			__fs_reclaim_acquire(_RET_IP_);
4342 
4343 #ifdef CONFIG_MMU_NOTIFIER
4344 		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4345 		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4346 #endif
4347 
4348 	}
4349 }
4350 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4351 
fs_reclaim_release(gfp_t gfp_mask)4352 void fs_reclaim_release(gfp_t gfp_mask)
4353 {
4354 	gfp_mask = current_gfp_context(gfp_mask);
4355 
4356 	if (__need_reclaim(gfp_mask)) {
4357 		if (gfp_mask & __GFP_FS)
4358 			__fs_reclaim_release(_RET_IP_);
4359 	}
4360 }
4361 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4362 #endif
4363 
4364 /*
4365  * Zonelists may change due to hotplug during allocation. Detect when zonelists
4366  * have been rebuilt so allocation retries. Reader side does not lock and
4367  * retries the allocation if zonelist changes. Writer side is protected by the
4368  * embedded spin_lock.
4369  */
4370 static DEFINE_SEQLOCK(zonelist_update_seq);
4371 
zonelist_iter_begin(void)4372 static unsigned int zonelist_iter_begin(void)
4373 {
4374 	if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4375 		return read_seqbegin(&zonelist_update_seq);
4376 
4377 	return 0;
4378 }
4379 
check_retry_zonelist(unsigned int seq)4380 static unsigned int check_retry_zonelist(unsigned int seq)
4381 {
4382 	if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4383 		return read_seqretry(&zonelist_update_seq, seq);
4384 
4385 	return seq;
4386 }
4387 
4388 /* Perform direct synchronous page reclaim */
4389 static unsigned long
__perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac)4390 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4391 					const struct alloc_context *ac)
4392 {
4393 	unsigned int noreclaim_flag;
4394 	unsigned long progress;
4395 
4396 	cond_resched();
4397 
4398 	/* We now go into synchronous reclaim */
4399 	cpuset_memory_pressure_bump();
4400 	fs_reclaim_acquire(gfp_mask);
4401 	noreclaim_flag = memalloc_noreclaim_save();
4402 
4403 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4404 								ac->nodemask);
4405 
4406 	memalloc_noreclaim_restore(noreclaim_flag);
4407 	fs_reclaim_release(gfp_mask);
4408 
4409 	cond_resched();
4410 
4411 	return progress;
4412 }
4413 
4414 /* The really slow allocator path where we enter direct reclaim */
4415 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress)4416 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4417 		unsigned int alloc_flags, const struct alloc_context *ac,
4418 		unsigned long *did_some_progress)
4419 {
4420 	struct page *page = NULL;
4421 	unsigned long pflags;
4422 	bool drained = false;
4423 
4424 	psi_memstall_enter(&pflags);
4425 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4426 	if (unlikely(!(*did_some_progress)))
4427 		goto out;
4428 
4429 retry:
4430 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4431 
4432 	/*
4433 	 * If an allocation failed after direct reclaim, it could be because
4434 	 * pages are pinned on the per-cpu lists or in high alloc reserves.
4435 	 * Shrink them and try again
4436 	 */
4437 	if (!page && !drained) {
4438 		unreserve_highatomic_pageblock(ac, false);
4439 		drain_all_pages(NULL);
4440 		drained = true;
4441 		goto retry;
4442 	}
4443 out:
4444 	psi_memstall_leave(&pflags);
4445 
4446 	return page;
4447 }
4448 
wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac)4449 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4450 			     const struct alloc_context *ac)
4451 {
4452 	struct zoneref *z;
4453 	struct zone *zone;
4454 	pg_data_t *last_pgdat = NULL;
4455 	enum zone_type highest_zoneidx = ac->highest_zoneidx;
4456 	unsigned int reclaim_order;
4457 
4458 	if (defrag_mode)
4459 		reclaim_order = max(order, pageblock_order);
4460 	else
4461 		reclaim_order = order;
4462 
4463 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4464 					ac->nodemask) {
4465 		if (!managed_zone(zone))
4466 			continue;
4467 		if (last_pgdat == zone->zone_pgdat)
4468 			continue;
4469 		wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx);
4470 		last_pgdat = zone->zone_pgdat;
4471 	}
4472 }
4473 
4474 static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask,unsigned int order)4475 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
4476 {
4477 	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4478 
4479 	/*
4480 	 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
4481 	 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4482 	 * to save two branches.
4483 	 */
4484 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
4485 	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4486 
4487 	/*
4488 	 * The caller may dip into page reserves a bit more if the caller
4489 	 * cannot run direct reclaim, or if the caller has realtime scheduling
4490 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4491 	 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
4492 	 */
4493 	alloc_flags |= (__force int)
4494 		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4495 
4496 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
4497 		/*
4498 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4499 		 * if it can't schedule.
4500 		 */
4501 		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
4502 			alloc_flags |= ALLOC_NON_BLOCK;
4503 
4504 			if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE))
4505 				alloc_flags |= ALLOC_HIGHATOMIC;
4506 		}
4507 
4508 		/*
4509 		 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
4510 		 * GFP_ATOMIC) rather than fail, see the comment for
4511 		 * cpuset_current_node_allowed().
4512 		 */
4513 		if (alloc_flags & ALLOC_MIN_RESERVE)
4514 			alloc_flags &= ~ALLOC_CPUSET;
4515 	} else if (unlikely(rt_or_dl_task(current)) && in_task())
4516 		alloc_flags |= ALLOC_MIN_RESERVE;
4517 
4518 	alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4519 
4520 	if (defrag_mode)
4521 		alloc_flags |= ALLOC_NOFRAGMENT;
4522 
4523 	return alloc_flags;
4524 }
4525 
oom_reserves_allowed(struct task_struct * tsk)4526 static bool oom_reserves_allowed(struct task_struct *tsk)
4527 {
4528 	if (!tsk_is_oom_victim(tsk))
4529 		return false;
4530 
4531 	/*
4532 	 * !MMU doesn't have oom reaper so give access to memory reserves
4533 	 * only to the thread with TIF_MEMDIE set
4534 	 */
4535 	if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4536 		return false;
4537 
4538 	return true;
4539 }
4540 
4541 /*
4542  * Distinguish requests which really need access to full memory
4543  * reserves from oom victims which can live with a portion of it
4544  */
__gfp_pfmemalloc_flags(gfp_t gfp_mask)4545 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4546 {
4547 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4548 		return 0;
4549 	if (gfp_mask & __GFP_MEMALLOC)
4550 		return ALLOC_NO_WATERMARKS;
4551 	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4552 		return ALLOC_NO_WATERMARKS;
4553 	if (!in_interrupt()) {
4554 		if (current->flags & PF_MEMALLOC)
4555 			return ALLOC_NO_WATERMARKS;
4556 		else if (oom_reserves_allowed(current))
4557 			return ALLOC_OOM;
4558 	}
4559 
4560 	return 0;
4561 }
4562 
gfp_pfmemalloc_allowed(gfp_t gfp_mask)4563 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4564 {
4565 	return !!__gfp_pfmemalloc_flags(gfp_mask);
4566 }
4567 
4568 /*
4569  * Checks whether it makes sense to retry the reclaim to make a forward progress
4570  * for the given allocation request.
4571  *
4572  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4573  * without success, or when we couldn't even meet the watermark if we
4574  * reclaimed all remaining pages on the LRU lists.
4575  *
4576  * Returns true if a retry is viable or false to enter the oom path.
4577  */
4578 static inline bool
should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops)4579 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4580 		     struct alloc_context *ac, int alloc_flags,
4581 		     bool did_some_progress, int *no_progress_loops)
4582 {
4583 	struct zone *zone;
4584 	struct zoneref *z;
4585 	bool ret = false;
4586 
4587 	/*
4588 	 * Costly allocations might have made a progress but this doesn't mean
4589 	 * their order will become available due to high fragmentation so
4590 	 * always increment the no progress counter for them
4591 	 */
4592 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4593 		*no_progress_loops = 0;
4594 	else
4595 		(*no_progress_loops)++;
4596 
4597 	if (*no_progress_loops > MAX_RECLAIM_RETRIES)
4598 		goto out;
4599 
4600 
4601 	/*
4602 	 * Keep reclaiming pages while there is a chance this will lead
4603 	 * somewhere.  If none of the target zones can satisfy our allocation
4604 	 * request even if all reclaimable pages are considered then we are
4605 	 * screwed and have to go OOM.
4606 	 */
4607 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4608 				ac->highest_zoneidx, ac->nodemask) {
4609 		unsigned long available;
4610 		unsigned long reclaimable;
4611 		unsigned long min_wmark = min_wmark_pages(zone);
4612 		bool wmark;
4613 
4614 		if (cpusets_enabled() &&
4615 			(alloc_flags & ALLOC_CPUSET) &&
4616 			!__cpuset_zone_allowed(zone, gfp_mask))
4617 				continue;
4618 
4619 		available = reclaimable = zone_reclaimable_pages(zone);
4620 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4621 
4622 		/*
4623 		 * Would the allocation succeed if we reclaimed all
4624 		 * reclaimable pages?
4625 		 */
4626 		wmark = __zone_watermark_ok(zone, order, min_wmark,
4627 				ac->highest_zoneidx, alloc_flags, available);
4628 		trace_reclaim_retry_zone(z, order, reclaimable,
4629 				available, min_wmark, *no_progress_loops, wmark);
4630 		if (wmark) {
4631 			ret = true;
4632 			break;
4633 		}
4634 	}
4635 
4636 	/*
4637 	 * Memory allocation/reclaim might be called from a WQ context and the
4638 	 * current implementation of the WQ concurrency control doesn't
4639 	 * recognize that a particular WQ is congested if the worker thread is
4640 	 * looping without ever sleeping. Therefore we have to do a short sleep
4641 	 * here rather than calling cond_resched().
4642 	 */
4643 	if (current->flags & PF_WQ_WORKER)
4644 		schedule_timeout_uninterruptible(1);
4645 	else
4646 		cond_resched();
4647 out:
4648 	/* Before OOM, exhaust highatomic_reserve */
4649 	if (!ret)
4650 		return unreserve_highatomic_pageblock(ac, true);
4651 
4652 	return ret;
4653 }
4654 
4655 static inline bool
check_retry_cpuset(int cpuset_mems_cookie,struct alloc_context * ac)4656 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4657 {
4658 	/*
4659 	 * It's possible that cpuset's mems_allowed and the nodemask from
4660 	 * mempolicy don't intersect. This should be normally dealt with by
4661 	 * policy_nodemask(), but it's possible to race with cpuset update in
4662 	 * such a way the check therein was true, and then it became false
4663 	 * before we got our cpuset_mems_cookie here.
4664 	 * This assumes that for all allocations, ac->nodemask can come only
4665 	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4666 	 * when it does not intersect with the cpuset restrictions) or the
4667 	 * caller can deal with a violated nodemask.
4668 	 */
4669 	if (cpusets_enabled() && ac->nodemask &&
4670 			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4671 		ac->nodemask = NULL;
4672 		return true;
4673 	}
4674 
4675 	/*
4676 	 * When updating a task's mems_allowed or mempolicy nodemask, it is
4677 	 * possible to race with parallel threads in such a way that our
4678 	 * allocation can fail while the mask is being updated. If we are about
4679 	 * to fail, check if the cpuset changed during allocation and if so,
4680 	 * retry.
4681 	 */
4682 	if (read_mems_allowed_retry(cpuset_mems_cookie))
4683 		return true;
4684 
4685 	return false;
4686 }
4687 
4688 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac)4689 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4690 						struct alloc_context *ac)
4691 {
4692 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4693 	bool can_compact = can_direct_reclaim && gfp_compaction_allowed(gfp_mask);
4694 	bool nofail = gfp_mask & __GFP_NOFAIL;
4695 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4696 	struct page *page = NULL;
4697 	unsigned int alloc_flags;
4698 	unsigned long did_some_progress;
4699 	enum compact_priority compact_priority;
4700 	enum compact_result compact_result;
4701 	int compaction_retries;
4702 	int no_progress_loops;
4703 	unsigned int cpuset_mems_cookie;
4704 	unsigned int zonelist_iter_cookie;
4705 	int reserve_flags;
4706 	bool compact_first = false;
4707 	bool can_retry_reserves = true;
4708 
4709 	if (unlikely(nofail)) {
4710 		/*
4711 		 * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM,
4712 		 * otherwise, we may result in lockup.
4713 		 */
4714 		WARN_ON_ONCE(!can_direct_reclaim);
4715 		/*
4716 		 * PF_MEMALLOC request from this context is rather bizarre
4717 		 * because we cannot reclaim anything and only can loop waiting
4718 		 * for somebody to do a work for us.
4719 		 */
4720 		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4721 	}
4722 
4723 restart:
4724 	compaction_retries = 0;
4725 	no_progress_loops = 0;
4726 	compact_result = COMPACT_SKIPPED;
4727 	compact_priority = DEF_COMPACT_PRIORITY;
4728 	cpuset_mems_cookie = read_mems_allowed_begin();
4729 	zonelist_iter_cookie = zonelist_iter_begin();
4730 
4731 	/*
4732 	 * For costly allocations, try direct compaction first, as it's likely
4733 	 * that we have enough base pages and don't need to reclaim. For non-
4734 	 * movable high-order allocations, do that as well, as compaction will
4735 	 * try prevent permanent fragmentation by migrating from blocks of the
4736 	 * same migratetype.
4737 	 */
4738 	if (can_compact && (costly_order || (order > 0 &&
4739 					ac->migratetype != MIGRATE_MOVABLE))) {
4740 		compact_first = true;
4741 		compact_priority = INIT_COMPACT_PRIORITY;
4742 	}
4743 
4744 	/*
4745 	 * The fast path uses conservative alloc_flags to succeed only until
4746 	 * kswapd needs to be woken up, and to avoid the cost of setting up
4747 	 * alloc_flags precisely. So we do that now.
4748 	 */
4749 	alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
4750 
4751 	/*
4752 	 * We need to recalculate the starting point for the zonelist iterator
4753 	 * because we might have used different nodemask in the fast path, or
4754 	 * there was a cpuset modification and we are retrying - otherwise we
4755 	 * could end up iterating over non-eligible zones endlessly.
4756 	 */
4757 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4758 					ac->highest_zoneidx, ac->nodemask);
4759 	if (!zonelist_zone(ac->preferred_zoneref))
4760 		goto nopage;
4761 
4762 	/*
4763 	 * Check for insane configurations where the cpuset doesn't contain
4764 	 * any suitable zone to satisfy the request - e.g. non-movable
4765 	 * GFP_HIGHUSER allocations from MOVABLE nodes only.
4766 	 */
4767 	if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
4768 		struct zoneref *z = first_zones_zonelist(ac->zonelist,
4769 					ac->highest_zoneidx,
4770 					&cpuset_current_mems_allowed);
4771 		if (!zonelist_zone(z))
4772 			goto nopage;
4773 	}
4774 
4775 retry:
4776 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4777 	if (alloc_flags & ALLOC_KSWAPD)
4778 		wake_all_kswapds(order, gfp_mask, ac);
4779 
4780 	/*
4781 	 * The adjusted alloc_flags might result in immediate success, so try
4782 	 * that first
4783 	 */
4784 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4785 	if (page)
4786 		goto got_pg;
4787 
4788 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4789 	if (reserve_flags)
4790 		alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4791 					  (alloc_flags & ALLOC_KSWAPD);
4792 
4793 	/*
4794 	 * Reset the nodemask and zonelist iterators if memory policies can be
4795 	 * ignored. These allocations are high priority and system rather than
4796 	 * user oriented.
4797 	 */
4798 	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4799 		ac->nodemask = NULL;
4800 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4801 					ac->highest_zoneidx, ac->nodemask);
4802 
4803 		/*
4804 		 * The first time we adjust anything due to being allowed to
4805 		 * ignore memory policies or watermarks, retry immediately. This
4806 		 * allows us to keep the first allocation attempt optimistic so
4807 		 * it can succeed in a zone that is still above watermarks.
4808 		 */
4809 		if (can_retry_reserves) {
4810 			can_retry_reserves = false;
4811 			goto retry;
4812 		}
4813 	}
4814 
4815 	/* Caller is not willing to reclaim, we can't balance anything */
4816 	if (!can_direct_reclaim)
4817 		goto nopage;
4818 
4819 	/* Avoid recursion of direct reclaim */
4820 	if (current->flags & PF_MEMALLOC)
4821 		goto nopage;
4822 
4823 	/* Try direct reclaim and then allocating */
4824 	if (!compact_first) {
4825 		page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags,
4826 							ac, &did_some_progress);
4827 		if (page)
4828 			goto got_pg;
4829 	}
4830 
4831 	/* Try direct compaction and then allocating */
4832 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4833 					compact_priority, &compact_result);
4834 	if (page)
4835 		goto got_pg;
4836 
4837 	if (compact_first) {
4838 		/*
4839 		 * THP page faults may attempt local node only first, but are
4840 		 * then allowed to only compact, not reclaim, see
4841 		 * alloc_pages_mpol().
4842 		 *
4843 		 * Compaction has failed above and we don't want such THP
4844 		 * allocations to put reclaim pressure on a single node in a
4845 		 * situation where other nodes might have plenty of available
4846 		 * memory.
4847 		 */
4848 		if (gfp_has_flags(gfp_mask, __GFP_NORETRY | __GFP_THISNODE))
4849 			goto nopage;
4850 
4851 		/*
4852 		 * For the initial compaction attempt we have lowered its
4853 		 * priority. Restore it for further retries, if those are
4854 		 * allowed. With __GFP_NORETRY there will be a single round of
4855 		 * reclaim and compaction with the lowered priority.
4856 		 */
4857 		if (!(gfp_mask & __GFP_NORETRY))
4858 			compact_priority = DEF_COMPACT_PRIORITY;
4859 
4860 		compact_first = false;
4861 		goto retry;
4862 	}
4863 
4864 	/* Do not loop if specifically requested */
4865 	if (gfp_mask & __GFP_NORETRY)
4866 		goto nopage;
4867 
4868 	/*
4869 	 * Do not retry costly high order allocations unless they are
4870 	 * __GFP_RETRY_MAYFAIL and we can compact
4871 	 */
4872 	if (costly_order && (!can_compact ||
4873 			     !(gfp_mask & __GFP_RETRY_MAYFAIL)))
4874 		goto nopage;
4875 
4876 	/*
4877 	 * Deal with possible cpuset update races or zonelist updates to avoid
4878 	 * infinite retries. No "goto retry;" can be placed above this check
4879 	 * unless it can execute just once.
4880 	 */
4881 	if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4882 	    check_retry_zonelist(zonelist_iter_cookie))
4883 		goto restart;
4884 
4885 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4886 				 did_some_progress > 0, &no_progress_loops))
4887 		goto retry;
4888 
4889 	/*
4890 	 * It doesn't make any sense to retry for the compaction if the order-0
4891 	 * reclaim is not able to make any progress because the current
4892 	 * implementation of the compaction depends on the sufficient amount
4893 	 * of free memory (see __compaction_suitable)
4894 	 */
4895 	if (did_some_progress > 0 && can_compact &&
4896 			should_compact_retry(ac, order, alloc_flags,
4897 				compact_result, &compact_priority,
4898 				&compaction_retries))
4899 		goto retry;
4900 
4901 	/* Reclaim/compaction failed to prevent the fallback */
4902 	if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) {
4903 		alloc_flags &= ~ALLOC_NOFRAGMENT;
4904 		goto retry;
4905 	}
4906 
4907 	/*
4908 	 * Deal with possible cpuset update races or zonelist updates to avoid
4909 	 * a unnecessary OOM kill.
4910 	 */
4911 	if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4912 	    check_retry_zonelist(zonelist_iter_cookie))
4913 		goto restart;
4914 
4915 	/* Reclaim has failed us, start killing things */
4916 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4917 	if (page)
4918 		goto got_pg;
4919 
4920 	/* Avoid allocations with no watermarks from looping endlessly */
4921 	if (tsk_is_oom_victim(current) &&
4922 	    (alloc_flags & ALLOC_OOM ||
4923 	     (gfp_mask & __GFP_NOMEMALLOC)))
4924 		goto nopage;
4925 
4926 	/* Retry as long as the OOM killer is making progress */
4927 	if (did_some_progress) {
4928 		no_progress_loops = 0;
4929 		goto retry;
4930 	}
4931 
4932 nopage:
4933 	/*
4934 	 * Deal with possible cpuset update races or zonelist updates to avoid
4935 	 * a unnecessary OOM kill.
4936 	 */
4937 	if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4938 	    check_retry_zonelist(zonelist_iter_cookie))
4939 		goto restart;
4940 
4941 	/*
4942 	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4943 	 * we always retry
4944 	 */
4945 	if (unlikely(nofail)) {
4946 		/*
4947 		 * Lacking direct_reclaim we can't do anything to reclaim memory,
4948 		 * we disregard these unreasonable nofail requests and still
4949 		 * return NULL
4950 		 */
4951 		if (!can_direct_reclaim)
4952 			goto fail;
4953 
4954 		/*
4955 		 * Help non-failing allocations by giving some access to memory
4956 		 * reserves normally used for high priority non-blocking
4957 		 * allocations but do not use ALLOC_NO_WATERMARKS because this
4958 		 * could deplete whole memory reserves which would just make
4959 		 * the situation worse.
4960 		 */
4961 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4962 		if (page)
4963 			goto got_pg;
4964 
4965 		cond_resched();
4966 		goto retry;
4967 	}
4968 fail:
4969 	warn_alloc(gfp_mask, ac->nodemask,
4970 			"page allocation failure: order:%u", order);
4971 got_pg:
4972 	return page;
4973 }
4974 
prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags)4975 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4976 		int preferred_nid, nodemask_t *nodemask,
4977 		struct alloc_context *ac, gfp_t *alloc_gfp,
4978 		unsigned int *alloc_flags)
4979 {
4980 	ac->highest_zoneidx = gfp_zone(gfp_mask);
4981 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4982 	ac->nodemask = nodemask;
4983 	ac->migratetype = gfp_migratetype(gfp_mask);
4984 
4985 	if (cpusets_enabled()) {
4986 		*alloc_gfp |= __GFP_HARDWALL;
4987 		/*
4988 		 * When we are in the interrupt context, it is irrelevant
4989 		 * to the current task context. It means that any node ok.
4990 		 */
4991 		if (in_task() && !ac->nodemask)
4992 			ac->nodemask = &cpuset_current_mems_allowed;
4993 		else
4994 			*alloc_flags |= ALLOC_CPUSET;
4995 	}
4996 
4997 	might_alloc(gfp_mask);
4998 
4999 	/*
5000 	 * Don't invoke should_fail logic, since it may call
5001 	 * get_random_u32() and printk() which need to spin_lock.
5002 	 */
5003 	if (!(*alloc_flags & ALLOC_TRYLOCK) &&
5004 	    should_fail_alloc_page(gfp_mask, order))
5005 		return false;
5006 
5007 	*alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5008 
5009 	/* Dirty zone balancing only done in the fast path */
5010 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5011 
5012 	/*
5013 	 * The preferred zone is used for statistics but crucially it is
5014 	 * also used as the starting point for the zonelist iterator. It
5015 	 * may get reset for allocations that ignore memory policies.
5016 	 */
5017 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5018 					ac->highest_zoneidx, ac->nodemask);
5019 
5020 	return true;
5021 }
5022 
5023 /*
5024  * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
5025  * @gfp: GFP flags for the allocation
5026  * @preferred_nid: The preferred NUMA node ID to allocate from
5027  * @nodemask: Set of nodes to allocate from, may be NULL
5028  * @nr_pages: The number of pages desired in the array
5029  * @page_array: Array to store the pages
5030  *
5031  * This is a batched version of the page allocator that attempts to allocate
5032  * @nr_pages quickly.  Pages are added to @page_array.
5033  *
5034  * Note that only the elements in @page_array that were cleared to %NULL on
5035  * entry are populated with newly allocated pages. @nr_pages is the maximum
5036  * number of pages that will be stored in the array.
5037  *
5038  * Returns the number of pages in @page_array, including ones already
5039  * allocated on entry.  This can be less than the number requested in @nr_pages,
5040  * but all empty slots are filled from the beginning.  I.e., if all slots in
5041  * @page_array were set to %NULL on entry, the slots from 0 to the return value
5042  * - 1 will be filled.
5043  */
alloc_pages_bulk_noprof(gfp_t gfp,int preferred_nid,nodemask_t * nodemask,int nr_pages,struct page ** page_array)5044 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
5045 			nodemask_t *nodemask, int nr_pages,
5046 			struct page **page_array)
5047 {
5048 	struct page *page;
5049 	struct zone *zone;
5050 	struct zoneref *z;
5051 	struct per_cpu_pages *pcp;
5052 	struct list_head *pcp_list;
5053 	struct alloc_context ac;
5054 	gfp_t alloc_gfp;
5055 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5056 	int nr_populated = 0, nr_account = 0;
5057 
5058 	/*
5059 	 * Skip populated array elements to determine if any pages need
5060 	 * to be allocated before disabling IRQs.
5061 	 */
5062 	while (nr_populated < nr_pages && page_array[nr_populated])
5063 		nr_populated++;
5064 
5065 	/* No pages requested? */
5066 	if (unlikely(nr_pages <= 0))
5067 		goto out;
5068 
5069 	/* Already populated array? */
5070 	if (unlikely(nr_pages - nr_populated == 0))
5071 		goto out;
5072 
5073 	/* Bulk allocator does not support memcg accounting. */
5074 	if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
5075 		goto failed;
5076 
5077 	/* Use the single page allocator for one page. */
5078 	if (nr_pages - nr_populated == 1)
5079 		goto failed;
5080 
5081 #ifdef CONFIG_PAGE_OWNER
5082 	/*
5083 	 * PAGE_OWNER may recurse into the allocator to allocate space to
5084 	 * save the stack with pagesets.lock held. Releasing/reacquiring
5085 	 * removes much of the performance benefit of bulk allocation so
5086 	 * force the caller to allocate one page at a time as it'll have
5087 	 * similar performance to added complexity to the bulk allocator.
5088 	 */
5089 	if (static_branch_unlikely(&page_owner_inited))
5090 		goto failed;
5091 #endif
5092 
5093 	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5094 	gfp &= gfp_allowed_mask;
5095 	alloc_gfp = gfp;
5096 	if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5097 		goto out;
5098 	gfp = alloc_gfp;
5099 
5100 	/* Find an allowed local zone that meets the low watermark. */
5101 	z = ac.preferred_zoneref;
5102 	for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
5103 		unsigned long mark;
5104 
5105 		if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5106 		    !__cpuset_zone_allowed(zone, gfp)) {
5107 			continue;
5108 		}
5109 
5110 		if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
5111 		    zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
5112 			goto failed;
5113 		}
5114 
5115 		cond_accept_memory(zone, 0, alloc_flags);
5116 retry_this_zone:
5117 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages - nr_populated;
5118 		if (zone_watermark_fast(zone, 0,  mark,
5119 				zonelist_zone_idx(ac.preferred_zoneref),
5120 				alloc_flags, gfp)) {
5121 			break;
5122 		}
5123 
5124 		if (cond_accept_memory(zone, 0, alloc_flags))
5125 			goto retry_this_zone;
5126 
5127 		/* Try again if zone has deferred pages */
5128 		if (deferred_pages_enabled()) {
5129 			if (_deferred_grow_zone(zone, 0))
5130 				goto retry_this_zone;
5131 		}
5132 	}
5133 
5134 	/*
5135 	 * If there are no allowed local zones that meets the watermarks then
5136 	 * try to allocate a single page and reclaim if necessary.
5137 	 */
5138 	if (unlikely(!zone))
5139 		goto failed;
5140 
5141 	/* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
5142 	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
5143 	if (!pcp)
5144 		goto failed;
5145 
5146 	/* Attempt the batch allocation */
5147 	pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
5148 	while (nr_populated < nr_pages) {
5149 
5150 		/* Skip existing pages */
5151 		if (page_array[nr_populated]) {
5152 			nr_populated++;
5153 			continue;
5154 		}
5155 
5156 		page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5157 								pcp, pcp_list);
5158 		if (unlikely(!page)) {
5159 			/* Try and allocate at least one page */
5160 			if (!nr_account) {
5161 				pcp_spin_unlock(pcp);
5162 				goto failed;
5163 			}
5164 			break;
5165 		}
5166 		nr_account++;
5167 
5168 		prep_new_page(page, 0, gfp, 0);
5169 		set_page_refcounted(page);
5170 		page_array[nr_populated++] = page;
5171 	}
5172 
5173 	pcp_spin_unlock(pcp);
5174 
5175 	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5176 	zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
5177 
5178 out:
5179 	return nr_populated;
5180 
5181 failed:
5182 	page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
5183 	if (page)
5184 		page_array[nr_populated++] = page;
5185 	goto out;
5186 }
5187 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
5188 
5189 /*
5190  * This is the 'heart' of the zoned buddy allocator.
5191  */
__alloc_frozen_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5192 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
5193 		int preferred_nid, nodemask_t *nodemask)
5194 {
5195 	struct page *page;
5196 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
5197 	gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5198 	struct alloc_context ac = { };
5199 
5200 	/*
5201 	 * There are several places where we assume that the order value is sane
5202 	 * so bail out early if the request is out of bound.
5203 	 */
5204 	if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
5205 		return NULL;
5206 
5207 	gfp &= gfp_allowed_mask;
5208 	/*
5209 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5210 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
5211 	 * from a particular context which has been marked by
5212 	 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5213 	 * movable zones are not used during allocation.
5214 	 */
5215 	gfp = current_gfp_context(gfp);
5216 	alloc_gfp = gfp;
5217 	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5218 			&alloc_gfp, &alloc_flags))
5219 		return NULL;
5220 
5221 	/*
5222 	 * Forbid the first pass from falling back to types that fragment
5223 	 * memory until all local zones are considered.
5224 	 */
5225 	alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
5226 
5227 	/* First allocation attempt */
5228 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5229 	if (likely(page))
5230 		goto out;
5231 
5232 	alloc_gfp = gfp;
5233 	ac.spread_dirty_pages = false;
5234 
5235 	/*
5236 	 * Restore the original nodemask if it was potentially replaced with
5237 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5238 	 */
5239 	ac.nodemask = nodemask;
5240 
5241 	page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5242 
5243 out:
5244 	if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
5245 	    unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5246 		free_frozen_pages(page, order);
5247 		page = NULL;
5248 	}
5249 
5250 	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5251 	kmsan_alloc_page(page, order, alloc_gfp);
5252 
5253 	return page;
5254 }
5255 EXPORT_SYMBOL(__alloc_frozen_pages_noprof);
5256 
__alloc_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5257 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
5258 		int preferred_nid, nodemask_t *nodemask)
5259 {
5260 	struct page *page;
5261 
5262 	page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask);
5263 	if (page)
5264 		set_page_refcounted(page);
5265 	return page;
5266 }
5267 EXPORT_SYMBOL(__alloc_pages_noprof);
5268 
__folio_alloc_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5269 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
5270 		nodemask_t *nodemask)
5271 {
5272 	struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order,
5273 					preferred_nid, nodemask);
5274 	return page_rmappable_folio(page);
5275 }
5276 EXPORT_SYMBOL(__folio_alloc_noprof);
5277 
5278 /*
5279  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5280  * address cannot represent highmem pages. Use alloc_pages and then kmap if
5281  * you need to access high mem.
5282  */
get_free_pages_noprof(gfp_t gfp_mask,unsigned int order)5283 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order)
5284 {
5285 	struct page *page;
5286 
5287 	page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order);
5288 	if (!page)
5289 		return 0;
5290 	return (unsigned long) page_address(page);
5291 }
5292 EXPORT_SYMBOL(get_free_pages_noprof);
5293 
get_zeroed_page_noprof(gfp_t gfp_mask)5294 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask)
5295 {
5296 	return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0);
5297 }
5298 EXPORT_SYMBOL(get_zeroed_page_noprof);
5299 
___free_pages(struct page * page,unsigned int order,fpi_t fpi_flags)5300 static void ___free_pages(struct page *page, unsigned int order,
5301 			  fpi_t fpi_flags)
5302 {
5303 	/* get PageHead before we drop reference */
5304 	int head = PageHead(page);
5305 	/* get alloc tag in case the page is released by others */
5306 	struct alloc_tag *tag = pgalloc_tag_get(page);
5307 
5308 	if (put_page_testzero(page))
5309 		__free_frozen_pages(page, order, fpi_flags);
5310 	else if (!head) {
5311 		pgalloc_tag_sub_pages(tag, (1 << order) - 1);
5312 		while (order-- > 0) {
5313 			/*
5314 			 * The "tail" pages of this non-compound high-order
5315 			 * page will have no code tags, so to avoid warnings
5316 			 * mark them as empty.
5317 			 */
5318 			clear_page_tag_ref(page + (1 << order));
5319 			__free_frozen_pages(page + (1 << order), order,
5320 					    fpi_flags);
5321 		}
5322 	}
5323 }
5324 
5325 /**
5326  * __free_pages - Free pages allocated with alloc_pages().
5327  * @page: The page pointer returned from alloc_pages().
5328  * @order: The order of the allocation.
5329  *
5330  * This function can free multi-page allocations that are not compound
5331  * pages.  It does not check that the @order passed in matches that of
5332  * the allocation, so it is easy to leak memory.  Freeing more memory
5333  * than was allocated will probably emit a warning.
5334  *
5335  * If the last reference to this page is speculative, it will be released
5336  * by put_page() which only frees the first page of a non-compound
5337  * allocation.  To prevent the remaining pages from being leaked, we free
5338  * the subsequent pages here.  If you want to use the page's reference
5339  * count to decide when to free the allocation, you should allocate a
5340  * compound page, and use put_page() instead of __free_pages().
5341  *
5342  * Context: May be called in interrupt context or while holding a normal
5343  * spinlock, but not in NMI context or while holding a raw spinlock.
5344  */
__free_pages(struct page * page,unsigned int order)5345 void __free_pages(struct page *page, unsigned int order)
5346 {
5347 	___free_pages(page, order, FPI_NONE);
5348 }
5349 EXPORT_SYMBOL(__free_pages);
5350 
5351 /*
5352  * Can be called while holding raw_spin_lock or from IRQ and NMI for any
5353  * page type (not only those that came from alloc_pages_nolock)
5354  */
free_pages_nolock(struct page * page,unsigned int order)5355 void free_pages_nolock(struct page *page, unsigned int order)
5356 {
5357 	___free_pages(page, order, FPI_TRYLOCK);
5358 }
5359 
5360 /**
5361  * free_pages - Free pages allocated with __get_free_pages().
5362  * @addr: The virtual address tied to a page returned from __get_free_pages().
5363  * @order: The order of the allocation.
5364  *
5365  * This function behaves the same as __free_pages(). Use this function
5366  * to free pages when you only have a valid virtual address. If you have
5367  * the page, call __free_pages() instead.
5368  */
free_pages(unsigned long addr,unsigned int order)5369 void free_pages(unsigned long addr, unsigned int order)
5370 {
5371 	if (addr != 0) {
5372 		VM_BUG_ON(!virt_addr_valid((void *)addr));
5373 		__free_pages(virt_to_page((void *)addr), order);
5374 	}
5375 }
5376 
5377 EXPORT_SYMBOL(free_pages);
5378 
make_alloc_exact(unsigned long addr,unsigned int order,size_t size)5379 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5380 		size_t size)
5381 {
5382 	if (addr) {
5383 		unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
5384 		struct page *page = virt_to_page((void *)addr);
5385 		struct page *last = page + nr;
5386 
5387 		__split_page(page, order);
5388 		while (page < --last)
5389 			set_page_refcounted(last);
5390 
5391 		last = page + (1UL << order);
5392 		for (page += nr; page < last; page++)
5393 			__free_pages_ok(page, 0, FPI_TO_TAIL);
5394 	}
5395 	return (void *)addr;
5396 }
5397 
5398 /**
5399  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5400  * @size: the number of bytes to allocate
5401  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5402  *
5403  * This function is similar to alloc_pages(), except that it allocates the
5404  * minimum number of pages to satisfy the request.  alloc_pages() can only
5405  * allocate memory in power-of-two pages.
5406  *
5407  * This function is also limited by MAX_PAGE_ORDER.
5408  *
5409  * Memory allocated by this function must be released by free_pages_exact().
5410  *
5411  * Return: pointer to the allocated area or %NULL in case of error.
5412  */
alloc_pages_exact_noprof(size_t size,gfp_t gfp_mask)5413 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask)
5414 {
5415 	unsigned int order = get_order(size);
5416 	unsigned long addr;
5417 
5418 	if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5419 		gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5420 
5421 	addr = get_free_pages_noprof(gfp_mask, order);
5422 	return make_alloc_exact(addr, order, size);
5423 }
5424 EXPORT_SYMBOL(alloc_pages_exact_noprof);
5425 
5426 /**
5427  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5428  *			   pages on a node.
5429  * @nid: the preferred node ID where memory should be allocated
5430  * @size: the number of bytes to allocate
5431  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5432  *
5433  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5434  * back.
5435  *
5436  * Return: pointer to the allocated area or %NULL in case of error.
5437  */
alloc_pages_exact_nid_noprof(int nid,size_t size,gfp_t gfp_mask)5438 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask)
5439 {
5440 	unsigned int order = get_order(size);
5441 	struct page *p;
5442 
5443 	if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5444 		gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5445 
5446 	p = alloc_pages_node_noprof(nid, gfp_mask, order);
5447 	if (!p)
5448 		return NULL;
5449 	return make_alloc_exact((unsigned long)page_address(p), order, size);
5450 }
5451 
5452 /**
5453  * free_pages_exact - release memory allocated via alloc_pages_exact()
5454  * @virt: the value returned by alloc_pages_exact.
5455  * @size: size of allocation, same value as passed to alloc_pages_exact().
5456  *
5457  * Release the memory allocated by a previous call to alloc_pages_exact.
5458  */
free_pages_exact(void * virt,size_t size)5459 void free_pages_exact(void *virt, size_t size)
5460 {
5461 	unsigned long addr = (unsigned long)virt;
5462 	unsigned long end = addr + PAGE_ALIGN(size);
5463 
5464 	while (addr < end) {
5465 		free_page(addr);
5466 		addr += PAGE_SIZE;
5467 	}
5468 }
5469 EXPORT_SYMBOL(free_pages_exact);
5470 
5471 /**
5472  * nr_free_zone_pages - count number of pages beyond high watermark
5473  * @offset: The zone index of the highest zone
5474  *
5475  * nr_free_zone_pages() counts the number of pages which are beyond the
5476  * high watermark within all zones at or below a given zone index.  For each
5477  * zone, the number of pages is calculated as:
5478  *
5479  *     nr_free_zone_pages = managed_pages - high_pages
5480  *
5481  * Return: number of pages beyond high watermark.
5482  */
nr_free_zone_pages(int offset)5483 static unsigned long nr_free_zone_pages(int offset)
5484 {
5485 	struct zoneref *z;
5486 	struct zone *zone;
5487 
5488 	/* Just pick one node, since fallback list is circular */
5489 	unsigned long sum = 0;
5490 
5491 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5492 
5493 	for_each_zone_zonelist(zone, z, zonelist, offset) {
5494 		unsigned long size = zone_managed_pages(zone);
5495 		unsigned long high = high_wmark_pages(zone);
5496 		if (size > high)
5497 			sum += size - high;
5498 	}
5499 
5500 	return sum;
5501 }
5502 
5503 /**
5504  * nr_free_buffer_pages - count number of pages beyond high watermark
5505  *
5506  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5507  * watermark within ZONE_DMA and ZONE_NORMAL.
5508  *
5509  * Return: number of pages beyond high watermark within ZONE_DMA and
5510  * ZONE_NORMAL.
5511  */
nr_free_buffer_pages(void)5512 unsigned long nr_free_buffer_pages(void)
5513 {
5514 	return nr_free_zone_pages(gfp_zone(GFP_USER));
5515 }
5516 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5517 
zoneref_set_zone(struct zone * zone,struct zoneref * zoneref)5518 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5519 {
5520 	zoneref->zone = zone;
5521 	zoneref->zone_idx = zone_idx(zone);
5522 }
5523 
5524 /*
5525  * Builds allocation fallback zone lists.
5526  *
5527  * Add all populated zones of a node to the zonelist.
5528  */
build_zonerefs_node(pg_data_t * pgdat,struct zoneref * zonerefs)5529 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5530 {
5531 	struct zone *zone;
5532 	enum zone_type zone_type = MAX_NR_ZONES;
5533 	int nr_zones = 0;
5534 
5535 	do {
5536 		zone_type--;
5537 		zone = pgdat->node_zones + zone_type;
5538 		if (populated_zone(zone)) {
5539 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5540 			check_highest_zone(zone_type);
5541 		}
5542 	} while (zone_type);
5543 
5544 	return nr_zones;
5545 }
5546 
5547 #ifdef CONFIG_NUMA
5548 
__parse_numa_zonelist_order(char * s)5549 static int __parse_numa_zonelist_order(char *s)
5550 {
5551 	/*
5552 	 * We used to support different zonelists modes but they turned
5553 	 * out to be just not useful. Let's keep the warning in place
5554 	 * if somebody still use the cmd line parameter so that we do
5555 	 * not fail it silently
5556 	 */
5557 	if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5558 		pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
5559 		return -EINVAL;
5560 	}
5561 	return 0;
5562 }
5563 
5564 static char numa_zonelist_order[] = "Node";
5565 #define NUMA_ZONELIST_ORDER_LEN	16
5566 /*
5567  * sysctl handler for numa_zonelist_order
5568  */
numa_zonelist_order_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5569 static int numa_zonelist_order_handler(const struct ctl_table *table, int write,
5570 		void *buffer, size_t *length, loff_t *ppos)
5571 {
5572 	if (write)
5573 		return __parse_numa_zonelist_order(buffer);
5574 	return proc_dostring(table, write, buffer, length, ppos);
5575 }
5576 
5577 static int node_load[MAX_NUMNODES];
5578 
5579 /**
5580  * find_next_best_node - find the next node that should appear in a given node's fallback list
5581  * @node: node whose fallback list we're appending
5582  * @used_node_mask: nodemask_t of already used nodes
5583  *
5584  * We use a number of factors to determine which is the next node that should
5585  * appear on a given node's fallback list.  The node should not have appeared
5586  * already in @node's fallback list, and it should be the next closest node
5587  * according to the distance array (which contains arbitrary distance values
5588  * from each node to each node in the system), and should also prefer nodes
5589  * with no CPUs, since presumably they'll have very little allocation pressure
5590  * on them otherwise.
5591  *
5592  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5593  */
find_next_best_node(int node,nodemask_t * used_node_mask)5594 int find_next_best_node(int node, nodemask_t *used_node_mask)
5595 {
5596 	int n, val;
5597 	int min_val = INT_MAX;
5598 	int best_node = NUMA_NO_NODE;
5599 
5600 	/*
5601 	 * Use the local node if we haven't already, but for memoryless local
5602 	 * node, we should skip it and fall back to other nodes.
5603 	 */
5604 	if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) {
5605 		node_set(node, *used_node_mask);
5606 		return node;
5607 	}
5608 
5609 	for_each_node_state(n, N_MEMORY) {
5610 
5611 		/* Don't want a node to appear more than once */
5612 		if (node_isset(n, *used_node_mask))
5613 			continue;
5614 
5615 		/* Use the distance array to find the distance */
5616 		val = node_distance(node, n);
5617 
5618 		/* Penalize nodes under us ("prefer the next node") */
5619 		val += (n < node);
5620 
5621 		/* Give preference to headless and unused nodes */
5622 		if (!cpumask_empty(cpumask_of_node(n)))
5623 			val += PENALTY_FOR_NODE_WITH_CPUS;
5624 
5625 		/* Slight preference for less loaded node */
5626 		val *= MAX_NUMNODES;
5627 		val += node_load[n];
5628 
5629 		if (val < min_val) {
5630 			min_val = val;
5631 			best_node = n;
5632 		}
5633 	}
5634 
5635 	if (best_node >= 0)
5636 		node_set(best_node, *used_node_mask);
5637 
5638 	return best_node;
5639 }
5640 
5641 
5642 /*
5643  * Build zonelists ordered by node and zones within node.
5644  * This results in maximum locality--normal zone overflows into local
5645  * DMA zone, if any--but risks exhausting DMA zone.
5646  */
build_zonelists_in_node_order(pg_data_t * pgdat,int * node_order,unsigned nr_nodes)5647 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5648 		unsigned nr_nodes)
5649 {
5650 	struct zoneref *zonerefs;
5651 	int i;
5652 
5653 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5654 
5655 	for (i = 0; i < nr_nodes; i++) {
5656 		int nr_zones;
5657 
5658 		pg_data_t *node = NODE_DATA(node_order[i]);
5659 
5660 		nr_zones = build_zonerefs_node(node, zonerefs);
5661 		zonerefs += nr_zones;
5662 	}
5663 	zonerefs->zone = NULL;
5664 	zonerefs->zone_idx = 0;
5665 }
5666 
5667 /*
5668  * Build __GFP_THISNODE zonelists
5669  */
build_thisnode_zonelists(pg_data_t * pgdat)5670 static void build_thisnode_zonelists(pg_data_t *pgdat)
5671 {
5672 	struct zoneref *zonerefs;
5673 	int nr_zones;
5674 
5675 	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5676 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
5677 	zonerefs += nr_zones;
5678 	zonerefs->zone = NULL;
5679 	zonerefs->zone_idx = 0;
5680 }
5681 
build_zonelists(pg_data_t * pgdat)5682 static void build_zonelists(pg_data_t *pgdat)
5683 {
5684 	static int node_order[MAX_NUMNODES];
5685 	int node, nr_nodes = 0;
5686 	nodemask_t used_mask = NODE_MASK_NONE;
5687 	int local_node, prev_node;
5688 
5689 	/* NUMA-aware ordering of nodes */
5690 	local_node = pgdat->node_id;
5691 	prev_node = local_node;
5692 
5693 	memset(node_order, 0, sizeof(node_order));
5694 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5695 		/*
5696 		 * We don't want to pressure a particular node.
5697 		 * So adding penalty to the first node in same
5698 		 * distance group to make it round-robin.
5699 		 */
5700 		if (node_distance(local_node, node) !=
5701 		    node_distance(local_node, prev_node))
5702 			node_load[node] += 1;
5703 
5704 		node_order[nr_nodes++] = node;
5705 		prev_node = node;
5706 	}
5707 
5708 	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5709 	build_thisnode_zonelists(pgdat);
5710 	pr_info("Fallback order for Node %d: ", local_node);
5711 	for (node = 0; node < nr_nodes; node++)
5712 		pr_cont("%d ", node_order[node]);
5713 	pr_cont("\n");
5714 }
5715 
5716 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5717 /*
5718  * Return node id of node used for "local" allocations.
5719  * I.e., first node id of first zone in arg node's generic zonelist.
5720  * Used for initializing percpu 'numa_mem', which is used primarily
5721  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5722  */
local_memory_node(int node)5723 int local_memory_node(int node)
5724 {
5725 	struct zoneref *z;
5726 
5727 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5728 				   gfp_zone(GFP_KERNEL),
5729 				   NULL);
5730 	return zonelist_node_idx(z);
5731 }
5732 #endif
5733 
5734 static void setup_min_unmapped_ratio(void);
5735 static void setup_min_slab_ratio(void);
5736 #else	/* CONFIG_NUMA */
5737 
build_zonelists(pg_data_t * pgdat)5738 static void build_zonelists(pg_data_t *pgdat)
5739 {
5740 	struct zoneref *zonerefs;
5741 	int nr_zones;
5742 
5743 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5744 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
5745 	zonerefs += nr_zones;
5746 
5747 	zonerefs->zone = NULL;
5748 	zonerefs->zone_idx = 0;
5749 }
5750 
5751 #endif	/* CONFIG_NUMA */
5752 
5753 /*
5754  * Boot pageset table. One per cpu which is going to be used for all
5755  * zones and all nodes. The parameters will be set in such a way
5756  * that an item put on a list will immediately be handed over to
5757  * the buddy list. This is safe since pageset manipulation is done
5758  * with interrupts disabled.
5759  *
5760  * The boot_pagesets must be kept even after bootup is complete for
5761  * unused processors and/or zones. They do play a role for bootstrapping
5762  * hotplugged processors.
5763  *
5764  * zoneinfo_show() and maybe other functions do
5765  * not check if the processor is online before following the pageset pointer.
5766  * Other parts of the kernel may not check if the zone is available.
5767  */
5768 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5769 /* These effectively disable the pcplists in the boot pageset completely */
5770 #define BOOT_PAGESET_HIGH	0
5771 #define BOOT_PAGESET_BATCH	1
5772 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
5773 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
5774 
__build_all_zonelists(void * data)5775 static void __build_all_zonelists(void *data)
5776 {
5777 	int nid;
5778 	int __maybe_unused cpu;
5779 	pg_data_t *self = data;
5780 	unsigned long flags;
5781 
5782 	/*
5783 	 * The zonelist_update_seq must be acquired with irqsave because the
5784 	 * reader can be invoked from IRQ with GFP_ATOMIC.
5785 	 */
5786 	write_seqlock_irqsave(&zonelist_update_seq, flags);
5787 	/*
5788 	 * Also disable synchronous printk() to prevent any printk() from
5789 	 * trying to hold port->lock, for
5790 	 * tty_insert_flip_string_and_push_buffer() on other CPU might be
5791 	 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
5792 	 */
5793 	printk_deferred_enter();
5794 
5795 #ifdef CONFIG_NUMA
5796 	memset(node_load, 0, sizeof(node_load));
5797 #endif
5798 
5799 	/*
5800 	 * This node is hotadded and no memory is yet present.   So just
5801 	 * building zonelists is fine - no need to touch other nodes.
5802 	 */
5803 	if (self && !node_online(self->node_id)) {
5804 		build_zonelists(self);
5805 	} else {
5806 		/*
5807 		 * All possible nodes have pgdat preallocated
5808 		 * in free_area_init
5809 		 */
5810 		for_each_node(nid) {
5811 			pg_data_t *pgdat = NODE_DATA(nid);
5812 
5813 			build_zonelists(pgdat);
5814 		}
5815 
5816 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5817 		/*
5818 		 * We now know the "local memory node" for each node--
5819 		 * i.e., the node of the first zone in the generic zonelist.
5820 		 * Set up numa_mem percpu variable for on-line cpus.  During
5821 		 * boot, only the boot cpu should be on-line;  we'll init the
5822 		 * secondary cpus' numa_mem as they come on-line.  During
5823 		 * node/memory hotplug, we'll fixup all on-line cpus.
5824 		 */
5825 		for_each_online_cpu(cpu)
5826 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5827 #endif
5828 	}
5829 
5830 	printk_deferred_exit();
5831 	write_sequnlock_irqrestore(&zonelist_update_seq, flags);
5832 }
5833 
5834 static noinline void __init
build_all_zonelists_init(void)5835 build_all_zonelists_init(void)
5836 {
5837 	int cpu;
5838 
5839 	__build_all_zonelists(NULL);
5840 
5841 	/*
5842 	 * Initialize the boot_pagesets that are going to be used
5843 	 * for bootstrapping processors. The real pagesets for
5844 	 * each zone will be allocated later when the per cpu
5845 	 * allocator is available.
5846 	 *
5847 	 * boot_pagesets are used also for bootstrapping offline
5848 	 * cpus if the system is already booted because the pagesets
5849 	 * are needed to initialize allocators on a specific cpu too.
5850 	 * F.e. the percpu allocator needs the page allocator which
5851 	 * needs the percpu allocator in order to allocate its pagesets
5852 	 * (a chicken-egg dilemma).
5853 	 */
5854 	for_each_possible_cpu(cpu)
5855 		per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
5856 
5857 	mminit_verify_zonelist();
5858 	cpuset_init_current_mems_allowed();
5859 }
5860 
5861 /*
5862  * unless system_state == SYSTEM_BOOTING.
5863  *
5864  * __ref due to call of __init annotated helper build_all_zonelists_init
5865  * [protected by SYSTEM_BOOTING].
5866  */
build_all_zonelists(pg_data_t * pgdat)5867 void __ref build_all_zonelists(pg_data_t *pgdat)
5868 {
5869 	unsigned long vm_total_pages;
5870 
5871 	if (system_state == SYSTEM_BOOTING) {
5872 		build_all_zonelists_init();
5873 	} else {
5874 		__build_all_zonelists(pgdat);
5875 		/* cpuset refresh routine should be here */
5876 	}
5877 	/* Get the number of free pages beyond high watermark in all zones. */
5878 	vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5879 	/*
5880 	 * Disable grouping by mobility if the number of pages in the
5881 	 * system is too low to allow the mechanism to work. It would be
5882 	 * more accurate, but expensive to check per-zone. This check is
5883 	 * made on memory-hotadd so a system can start with mobility
5884 	 * disabled and enable it later
5885 	 */
5886 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5887 		page_group_by_mobility_disabled = 1;
5888 	else
5889 		page_group_by_mobility_disabled = 0;
5890 
5891 	pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
5892 		nr_online_nodes,
5893 		str_off_on(page_group_by_mobility_disabled),
5894 		vm_total_pages);
5895 #ifdef CONFIG_NUMA
5896 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5897 #endif
5898 }
5899 
zone_batchsize(struct zone * zone)5900 static int zone_batchsize(struct zone *zone)
5901 {
5902 #ifdef CONFIG_MMU
5903 	int batch;
5904 
5905 	/*
5906 	 * The number of pages to batch allocate is either ~0.025%
5907 	 * of the zone or 256KB, whichever is smaller. The batch
5908 	 * size is striking a balance between allocation latency
5909 	 * and zone lock contention.
5910 	 */
5911 	batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE);
5912 	if (batch <= 1)
5913 		return 1;
5914 
5915 	/*
5916 	 * Clamp the batch to a 2^n - 1 value. Having a power
5917 	 * of 2 value was found to be more likely to have
5918 	 * suboptimal cache aliasing properties in some cases.
5919 	 *
5920 	 * For example if 2 tasks are alternately allocating
5921 	 * batches of pages, one task can end up with a lot
5922 	 * of pages of one half of the possible page colors
5923 	 * and the other with pages of the other colors.
5924 	 */
5925 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
5926 
5927 	return batch;
5928 
5929 #else
5930 	/* The deferral and batching of frees should be suppressed under NOMMU
5931 	 * conditions.
5932 	 *
5933 	 * The problem is that NOMMU needs to be able to allocate large chunks
5934 	 * of contiguous memory as there's no hardware page translation to
5935 	 * assemble apparent contiguous memory from discontiguous pages.
5936 	 *
5937 	 * Queueing large contiguous runs of pages for batching, however,
5938 	 * causes the pages to actually be freed in smaller chunks.  As there
5939 	 * can be a significant delay between the individual batches being
5940 	 * recycled, this leads to the once large chunks of space being
5941 	 * fragmented and becoming unavailable for high-order allocations.
5942 	 */
5943 	return 1;
5944 #endif
5945 }
5946 
5947 static int percpu_pagelist_high_fraction;
zone_highsize(struct zone * zone,int batch,int cpu_online,int high_fraction)5948 static int zone_highsize(struct zone *zone, int batch, int cpu_online,
5949 			 int high_fraction)
5950 {
5951 #ifdef CONFIG_MMU
5952 	int high;
5953 	int nr_split_cpus;
5954 	unsigned long total_pages;
5955 
5956 	if (!high_fraction) {
5957 		/*
5958 		 * By default, the high value of the pcp is based on the zone
5959 		 * low watermark so that if they are full then background
5960 		 * reclaim will not be started prematurely.
5961 		 */
5962 		total_pages = low_wmark_pages(zone);
5963 	} else {
5964 		/*
5965 		 * If percpu_pagelist_high_fraction is configured, the high
5966 		 * value is based on a fraction of the managed pages in the
5967 		 * zone.
5968 		 */
5969 		total_pages = zone_managed_pages(zone) / high_fraction;
5970 	}
5971 
5972 	/*
5973 	 * Split the high value across all online CPUs local to the zone. Note
5974 	 * that early in boot that CPUs may not be online yet and that during
5975 	 * CPU hotplug that the cpumask is not yet updated when a CPU is being
5976 	 * onlined. For memory nodes that have no CPUs, split the high value
5977 	 * across all online CPUs to mitigate the risk that reclaim is triggered
5978 	 * prematurely due to pages stored on pcp lists.
5979 	 */
5980 	nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5981 	if (!nr_split_cpus)
5982 		nr_split_cpus = num_online_cpus();
5983 	high = total_pages / nr_split_cpus;
5984 
5985 	/*
5986 	 * Ensure high is at least batch*4. The multiple is based on the
5987 	 * historical relationship between high and batch.
5988 	 */
5989 	high = max(high, batch << 2);
5990 
5991 	return high;
5992 #else
5993 	return 0;
5994 #endif
5995 }
5996 
5997 /*
5998  * pcp->high and pcp->batch values are related and generally batch is lower
5999  * than high. They are also related to pcp->count such that count is lower
6000  * than high, and as soon as it reaches high, the pcplist is flushed.
6001  *
6002  * However, guaranteeing these relations at all times would require e.g. write
6003  * barriers here but also careful usage of read barriers at the read side, and
6004  * thus be prone to error and bad for performance. Thus the update only prevents
6005  * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max
6006  * should ensure they can cope with those fields changing asynchronously, and
6007  * fully trust only the pcp->count field on the local CPU with interrupts
6008  * disabled.
6009  *
6010  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6011  * outside of boot time (or some other assurance that no concurrent updaters
6012  * exist).
6013  */
pageset_update(struct per_cpu_pages * pcp,unsigned long high_min,unsigned long high_max,unsigned long batch)6014 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min,
6015 			   unsigned long high_max, unsigned long batch)
6016 {
6017 	WRITE_ONCE(pcp->batch, batch);
6018 	WRITE_ONCE(pcp->high_min, high_min);
6019 	WRITE_ONCE(pcp->high_max, high_max);
6020 }
6021 
per_cpu_pages_init(struct per_cpu_pages * pcp,struct per_cpu_zonestat * pzstats)6022 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
6023 {
6024 	int pindex;
6025 
6026 	memset(pcp, 0, sizeof(*pcp));
6027 	memset(pzstats, 0, sizeof(*pzstats));
6028 
6029 	spin_lock_init(&pcp->lock);
6030 	for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
6031 		INIT_LIST_HEAD(&pcp->lists[pindex]);
6032 
6033 	/*
6034 	 * Set batch and high values safe for a boot pageset. A true percpu
6035 	 * pageset's initialization will update them subsequently. Here we don't
6036 	 * need to be as careful as pageset_update() as nobody can access the
6037 	 * pageset yet.
6038 	 */
6039 	pcp->high_min = BOOT_PAGESET_HIGH;
6040 	pcp->high_max = BOOT_PAGESET_HIGH;
6041 	pcp->batch = BOOT_PAGESET_BATCH;
6042 }
6043 
__zone_set_pageset_high_and_batch(struct zone * zone,unsigned long high_min,unsigned long high_max,unsigned long batch)6044 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min,
6045 					      unsigned long high_max, unsigned long batch)
6046 {
6047 	struct per_cpu_pages *pcp;
6048 	int cpu;
6049 
6050 	for_each_possible_cpu(cpu) {
6051 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6052 		pageset_update(pcp, high_min, high_max, batch);
6053 	}
6054 }
6055 
6056 /*
6057  * Calculate and set new high and batch values for all per-cpu pagesets of a
6058  * zone based on the zone's size.
6059  */
zone_set_pageset_high_and_batch(struct zone * zone,int cpu_online)6060 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
6061 {
6062 	int new_high_min, new_high_max, new_batch;
6063 
6064 	new_batch = zone_batchsize(zone);
6065 	if (percpu_pagelist_high_fraction) {
6066 		new_high_min = zone_highsize(zone, new_batch, cpu_online,
6067 					     percpu_pagelist_high_fraction);
6068 		/*
6069 		 * PCP high is tuned manually, disable auto-tuning via
6070 		 * setting high_min and high_max to the manual value.
6071 		 */
6072 		new_high_max = new_high_min;
6073 	} else {
6074 		new_high_min = zone_highsize(zone, new_batch, cpu_online, 0);
6075 		new_high_max = zone_highsize(zone, new_batch, cpu_online,
6076 					     MIN_PERCPU_PAGELIST_HIGH_FRACTION);
6077 	}
6078 
6079 	if (zone->pageset_high_min == new_high_min &&
6080 	    zone->pageset_high_max == new_high_max &&
6081 	    zone->pageset_batch == new_batch)
6082 		return;
6083 
6084 	zone->pageset_high_min = new_high_min;
6085 	zone->pageset_high_max = new_high_max;
6086 	zone->pageset_batch = new_batch;
6087 
6088 	__zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
6089 					  new_batch);
6090 }
6091 
setup_zone_pageset(struct zone * zone)6092 void __meminit setup_zone_pageset(struct zone *zone)
6093 {
6094 	int cpu;
6095 
6096 	/* Size may be 0 on !SMP && !NUMA */
6097 	if (sizeof(struct per_cpu_zonestat) > 0)
6098 		zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
6099 
6100 	zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
6101 	for_each_possible_cpu(cpu) {
6102 		struct per_cpu_pages *pcp;
6103 		struct per_cpu_zonestat *pzstats;
6104 
6105 		pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6106 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6107 		per_cpu_pages_init(pcp, pzstats);
6108 	}
6109 
6110 	zone_set_pageset_high_and_batch(zone, 0);
6111 }
6112 
6113 /*
6114  * The zone indicated has a new number of managed_pages; batch sizes and percpu
6115  * page high values need to be recalculated.
6116  */
zone_pcp_update(struct zone * zone,int cpu_online)6117 static void zone_pcp_update(struct zone *zone, int cpu_online)
6118 {
6119 	mutex_lock(&pcp_batch_high_lock);
6120 	zone_set_pageset_high_and_batch(zone, cpu_online);
6121 	mutex_unlock(&pcp_batch_high_lock);
6122 }
6123 
zone_pcp_update_cacheinfo(struct zone * zone,unsigned int cpu)6124 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
6125 {
6126 	struct per_cpu_pages *pcp;
6127 	struct cpu_cacheinfo *cci;
6128 
6129 	pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
6130 	cci = get_cpu_cacheinfo(cpu);
6131 	/*
6132 	 * If data cache slice of CPU is large enough, "pcp->batch"
6133 	 * pages can be preserved in PCP before draining PCP for
6134 	 * consecutive high-order pages freeing without allocation.
6135 	 * This can reduce zone lock contention without hurting
6136 	 * cache-hot pages sharing.
6137 	 */
6138 	pcp_spin_lock_nopin(pcp);
6139 	if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
6140 		pcp->flags |= PCPF_FREE_HIGH_BATCH;
6141 	else
6142 		pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
6143 	pcp_spin_unlock_nopin(pcp);
6144 }
6145 
setup_pcp_cacheinfo(unsigned int cpu)6146 void setup_pcp_cacheinfo(unsigned int cpu)
6147 {
6148 	struct zone *zone;
6149 
6150 	for_each_populated_zone(zone)
6151 		zone_pcp_update_cacheinfo(zone, cpu);
6152 }
6153 
6154 /*
6155  * Allocate per cpu pagesets and initialize them.
6156  * Before this call only boot pagesets were available.
6157  */
setup_per_cpu_pageset(void)6158 void __init setup_per_cpu_pageset(void)
6159 {
6160 	struct pglist_data *pgdat;
6161 	struct zone *zone;
6162 	int __maybe_unused cpu;
6163 
6164 	for_each_populated_zone(zone)
6165 		setup_zone_pageset(zone);
6166 
6167 #ifdef CONFIG_NUMA
6168 	/*
6169 	 * Unpopulated zones continue using the boot pagesets.
6170 	 * The numa stats for these pagesets need to be reset.
6171 	 * Otherwise, they will end up skewing the stats of
6172 	 * the nodes these zones are associated with.
6173 	 */
6174 	for_each_possible_cpu(cpu) {
6175 		struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
6176 		memset(pzstats->vm_numa_event, 0,
6177 		       sizeof(pzstats->vm_numa_event));
6178 	}
6179 #endif
6180 
6181 	for_each_online_pgdat(pgdat)
6182 		pgdat->per_cpu_nodestats =
6183 			alloc_percpu(struct per_cpu_nodestat);
6184 }
6185 
zone_pcp_init(struct zone * zone)6186 __meminit void zone_pcp_init(struct zone *zone)
6187 {
6188 	/*
6189 	 * per cpu subsystem is not up at this point. The following code
6190 	 * relies on the ability of the linker to provide the
6191 	 * offset of a (static) per cpu variable into the per cpu area.
6192 	 */
6193 	zone->per_cpu_pageset = &boot_pageset;
6194 	zone->per_cpu_zonestats = &boot_zonestats;
6195 	zone->pageset_high_min = BOOT_PAGESET_HIGH;
6196 	zone->pageset_high_max = BOOT_PAGESET_HIGH;
6197 	zone->pageset_batch = BOOT_PAGESET_BATCH;
6198 
6199 	if (populated_zone(zone))
6200 		pr_debug("  %s zone: %lu pages, LIFO batch:%u\n", zone->name,
6201 			 zone->present_pages, zone_batchsize(zone));
6202 }
6203 
6204 static void setup_per_zone_lowmem_reserve(void);
6205 
adjust_managed_page_count(struct page * page,long count)6206 void adjust_managed_page_count(struct page *page, long count)
6207 {
6208 	atomic_long_add(count, &page_zone(page)->managed_pages);
6209 	totalram_pages_add(count);
6210 	setup_per_zone_lowmem_reserve();
6211 }
6212 EXPORT_SYMBOL(adjust_managed_page_count);
6213 
free_reserved_area(void * start,void * end,int poison,const char * s)6214 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
6215 {
6216 	void *pos;
6217 	unsigned long pages = 0;
6218 
6219 	start = (void *)PAGE_ALIGN((unsigned long)start);
6220 	end = (void *)((unsigned long)end & PAGE_MASK);
6221 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
6222 		struct page *page = virt_to_page(pos);
6223 		void *direct_map_addr;
6224 
6225 		/*
6226 		 * 'direct_map_addr' might be different from 'pos'
6227 		 * because some architectures' virt_to_page()
6228 		 * work with aliases.  Getting the direct map
6229 		 * address ensures that we get a _writeable_
6230 		 * alias for the memset().
6231 		 */
6232 		direct_map_addr = page_address(page);
6233 		/*
6234 		 * Perform a kasan-unchecked memset() since this memory
6235 		 * has not been initialized.
6236 		 */
6237 		direct_map_addr = kasan_reset_tag(direct_map_addr);
6238 		if ((unsigned int)poison <= 0xFF)
6239 			memset(direct_map_addr, poison, PAGE_SIZE);
6240 
6241 		free_reserved_page(page);
6242 	}
6243 
6244 	if (pages && s)
6245 		pr_info("Freeing %s memory: %ldK\n", s, K(pages));
6246 
6247 	return pages;
6248 }
6249 
free_reserved_page(struct page * page)6250 void free_reserved_page(struct page *page)
6251 {
6252 	clear_page_tag_ref(page);
6253 	ClearPageReserved(page);
6254 	init_page_count(page);
6255 	__free_page(page);
6256 	adjust_managed_page_count(page, 1);
6257 }
6258 EXPORT_SYMBOL(free_reserved_page);
6259 
page_alloc_cpu_dead(unsigned int cpu)6260 static int page_alloc_cpu_dead(unsigned int cpu)
6261 {
6262 	struct zone *zone;
6263 
6264 	lru_add_drain_cpu(cpu);
6265 	mlock_drain_remote(cpu);
6266 	drain_pages(cpu);
6267 
6268 	/*
6269 	 * Spill the event counters of the dead processor
6270 	 * into the current processors event counters.
6271 	 * This artificially elevates the count of the current
6272 	 * processor.
6273 	 */
6274 	vm_events_fold_cpu(cpu);
6275 
6276 	/*
6277 	 * Zero the differential counters of the dead processor
6278 	 * so that the vm statistics are consistent.
6279 	 *
6280 	 * This is only okay since the processor is dead and cannot
6281 	 * race with what we are doing.
6282 	 */
6283 	cpu_vm_stats_fold(cpu);
6284 
6285 	for_each_populated_zone(zone)
6286 		zone_pcp_update(zone, 0);
6287 
6288 	return 0;
6289 }
6290 
page_alloc_cpu_online(unsigned int cpu)6291 static int page_alloc_cpu_online(unsigned int cpu)
6292 {
6293 	struct zone *zone;
6294 
6295 	for_each_populated_zone(zone)
6296 		zone_pcp_update(zone, 1);
6297 	return 0;
6298 }
6299 
page_alloc_init_cpuhp(void)6300 void __init page_alloc_init_cpuhp(void)
6301 {
6302 	int ret;
6303 
6304 	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
6305 					"mm/page_alloc:pcp",
6306 					page_alloc_cpu_online,
6307 					page_alloc_cpu_dead);
6308 	WARN_ON(ret < 0);
6309 }
6310 
6311 /*
6312  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
6313  *	or min_free_kbytes changes.
6314  */
calculate_totalreserve_pages(void)6315 static void calculate_totalreserve_pages(void)
6316 {
6317 	struct pglist_data *pgdat;
6318 	unsigned long reserve_pages = 0;
6319 	enum zone_type i, j;
6320 
6321 	for_each_online_pgdat(pgdat) {
6322 
6323 		pgdat->totalreserve_pages = 0;
6324 
6325 		for (i = 0; i < MAX_NR_ZONES; i++) {
6326 			struct zone *zone = pgdat->node_zones + i;
6327 			long max = 0;
6328 			unsigned long managed_pages = zone_managed_pages(zone);
6329 
6330 			/*
6331 			 * lowmem_reserve[j] is monotonically non-decreasing
6332 			 * in j for a given zone (see
6333 			 * setup_per_zone_lowmem_reserve()). The maximum
6334 			 * valid reserve lives at the highest index with a
6335 			 * non-zero value, so scan backwards and stop at the
6336 			 * first hit.
6337 			 */
6338 			for (j = MAX_NR_ZONES - 1; j > i; j--) {
6339 				if (!zone->lowmem_reserve[j])
6340 					continue;
6341 
6342 				max = zone->lowmem_reserve[j];
6343 				break;
6344 			}
6345 			/* we treat the high watermark as reserved pages. */
6346 			max += high_wmark_pages(zone);
6347 
6348 			max = min_t(unsigned long, max, managed_pages);
6349 
6350 			pgdat->totalreserve_pages += max;
6351 
6352 			reserve_pages += max;
6353 		}
6354 	}
6355 	totalreserve_pages = reserve_pages;
6356 	trace_mm_calculate_totalreserve_pages(totalreserve_pages);
6357 }
6358 
6359 /*
6360  * setup_per_zone_lowmem_reserve - called whenever
6361  *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
6362  *	has a correct pages reserved value, so an adequate number of
6363  *	pages are left in the zone after a successful __alloc_pages().
6364  */
setup_per_zone_lowmem_reserve(void)6365 static void setup_per_zone_lowmem_reserve(void)
6366 {
6367 	struct pglist_data *pgdat;
6368 	enum zone_type i, j;
6369 	/*
6370 	 * For a given zone node_zones[i], lowmem_reserve[j] (j > i)
6371 	 * represents how many pages in zone i must effectively be kept
6372 	 * in reserve when deciding whether an allocation class that is
6373 	 * allowed to allocate from zones up to j may fall back into
6374 	 * zone i.
6375 	 *
6376 	 * As j increases, the allocation class can use a strictly larger
6377 	 * set of fallback zones and therefore must not be allowed to
6378 	 * deplete low zones more aggressively than a less flexible one.
6379 	 * As a result, lowmem_reserve[j] is required to be monotonically
6380 	 * non-decreasing in j for each zone i. Callers such as
6381 	 * calculate_totalreserve_pages() rely on this monotonicity when
6382 	 * selecting the maximum reserve entry.
6383 	 */
6384 	for_each_online_pgdat(pgdat) {
6385 		for (i = 0; i < MAX_NR_ZONES - 1; i++) {
6386 			struct zone *zone = &pgdat->node_zones[i];
6387 			int ratio = sysctl_lowmem_reserve_ratio[i];
6388 			bool clear = !ratio || !zone_managed_pages(zone);
6389 			unsigned long managed_pages = 0;
6390 
6391 			for (j = i + 1; j < MAX_NR_ZONES; j++) {
6392 				struct zone *upper_zone = &pgdat->node_zones[j];
6393 
6394 				managed_pages += zone_managed_pages(upper_zone);
6395 
6396 				if (clear)
6397 					zone->lowmem_reserve[j] = 0;
6398 				else
6399 					zone->lowmem_reserve[j] = managed_pages / ratio;
6400 				trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone,
6401 								       zone->lowmem_reserve[j]);
6402 			}
6403 		}
6404 	}
6405 
6406 	/* update totalreserve_pages */
6407 	calculate_totalreserve_pages();
6408 }
6409 
__setup_per_zone_wmarks(void)6410 static void __setup_per_zone_wmarks(void)
6411 {
6412 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6413 	unsigned long lowmem_pages = 0;
6414 	struct zone *zone;
6415 	unsigned long flags;
6416 
6417 	/* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */
6418 	for_each_zone(zone) {
6419 		if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
6420 			lowmem_pages += zone_managed_pages(zone);
6421 	}
6422 
6423 	for_each_zone(zone) {
6424 		u64 tmp;
6425 
6426 		spin_lock_irqsave(&zone->lock, flags);
6427 		tmp = (u64)pages_min * zone_managed_pages(zone);
6428 		tmp = div64_ul(tmp, lowmem_pages);
6429 		if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
6430 			/*
6431 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6432 			 * need highmem and movable zones pages, so cap pages_min
6433 			 * to a small  value here.
6434 			 *
6435 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
6436 			 * deltas control async page reclaim, and so should
6437 			 * not be capped for highmem and movable zones.
6438 			 */
6439 			unsigned long min_pages;
6440 
6441 			min_pages = zone_managed_pages(zone) / 1024;
6442 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
6443 			zone->_watermark[WMARK_MIN] = min_pages;
6444 		} else {
6445 			/*
6446 			 * If it's a lowmem zone, reserve a number of pages
6447 			 * proportionate to the zone's size.
6448 			 */
6449 			zone->_watermark[WMARK_MIN] = tmp;
6450 		}
6451 
6452 		/*
6453 		 * Set the kswapd watermarks distance according to the
6454 		 * scale factor in proportion to available memory, but
6455 		 * ensure a minimum size on small systems.
6456 		 */
6457 		tmp = max_t(u64, tmp >> 2,
6458 			    mult_frac(zone_managed_pages(zone),
6459 				      watermark_scale_factor, 10000));
6460 
6461 		zone->watermark_boost = 0;
6462 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
6463 		zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
6464 		zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
6465 		trace_mm_setup_per_zone_wmarks(zone);
6466 
6467 		spin_unlock_irqrestore(&zone->lock, flags);
6468 	}
6469 
6470 	/* update totalreserve_pages */
6471 	calculate_totalreserve_pages();
6472 }
6473 
6474 /**
6475  * setup_per_zone_wmarks - called when min_free_kbytes changes
6476  * or when memory is hot-{added|removed}
6477  *
6478  * Ensures that the watermark[min,low,high] values for each zone are set
6479  * correctly with respect to min_free_kbytes.
6480  */
setup_per_zone_wmarks(void)6481 void setup_per_zone_wmarks(void)
6482 {
6483 	struct zone *zone;
6484 	static DEFINE_SPINLOCK(lock);
6485 
6486 	spin_lock(&lock);
6487 	__setup_per_zone_wmarks();
6488 	spin_unlock(&lock);
6489 
6490 	/*
6491 	 * The watermark size have changed so update the pcpu batch
6492 	 * and high limits or the limits may be inappropriate.
6493 	 */
6494 	for_each_zone(zone)
6495 		zone_pcp_update(zone, 0);
6496 }
6497 
6498 /*
6499  * Initialise min_free_kbytes.
6500  *
6501  * For small machines we want it small (128k min).  For large machines
6502  * we want it large (256MB max).  But it is not linear, because network
6503  * bandwidth does not increase linearly with machine size.  We use
6504  *
6505  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
6506  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
6507  *
6508  * which yields
6509  *
6510  * 16MB:	512k
6511  * 32MB:	724k
6512  * 64MB:	1024k
6513  * 128MB:	1448k
6514  * 256MB:	2048k
6515  * 512MB:	2896k
6516  * 1024MB:	4096k
6517  * 2048MB:	5792k
6518  * 4096MB:	8192k
6519  * 8192MB:	11584k
6520  * 16384MB:	16384k
6521  */
calculate_min_free_kbytes(void)6522 void calculate_min_free_kbytes(void)
6523 {
6524 	unsigned long lowmem_kbytes;
6525 	int new_min_free_kbytes;
6526 
6527 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
6528 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6529 
6530 	if (new_min_free_kbytes > user_min_free_kbytes)
6531 		min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
6532 	else
6533 		pr_warn_ratelimited("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6534 				    new_min_free_kbytes, user_min_free_kbytes);
6535 
6536 }
6537 
init_per_zone_wmark_min(void)6538 int __meminit init_per_zone_wmark_min(void)
6539 {
6540 	calculate_min_free_kbytes();
6541 	setup_per_zone_wmarks();
6542 	refresh_zone_stat_thresholds();
6543 	setup_per_zone_lowmem_reserve();
6544 
6545 #ifdef CONFIG_NUMA
6546 	setup_min_unmapped_ratio();
6547 	setup_min_slab_ratio();
6548 #endif
6549 
6550 	khugepaged_min_free_kbytes_update();
6551 
6552 	return 0;
6553 }
postcore_initcall(init_per_zone_wmark_min)6554 postcore_initcall(init_per_zone_wmark_min)
6555 
6556 /*
6557  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
6558  *	that we can call two helper functions whenever min_free_kbytes
6559  *	changes.
6560  */
6561 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write,
6562 		void *buffer, size_t *length, loff_t *ppos)
6563 {
6564 	int rc;
6565 
6566 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6567 	if (rc)
6568 		return rc;
6569 
6570 	if (write) {
6571 		user_min_free_kbytes = min_free_kbytes;
6572 		setup_per_zone_wmarks();
6573 	}
6574 	return 0;
6575 }
6576 
watermark_scale_factor_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6577 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write,
6578 		void *buffer, size_t *length, loff_t *ppos)
6579 {
6580 	int rc;
6581 
6582 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6583 	if (rc)
6584 		return rc;
6585 
6586 	if (write)
6587 		setup_per_zone_wmarks();
6588 
6589 	return 0;
6590 }
6591 
6592 #ifdef CONFIG_NUMA
setup_min_unmapped_ratio(void)6593 static void setup_min_unmapped_ratio(void)
6594 {
6595 	pg_data_t *pgdat;
6596 	struct zone *zone;
6597 
6598 	for_each_online_pgdat(pgdat)
6599 		pgdat->min_unmapped_pages = 0;
6600 
6601 	for_each_zone(zone)
6602 		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
6603 						         sysctl_min_unmapped_ratio) / 100;
6604 }
6605 
6606 
sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6607 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write,
6608 		void *buffer, size_t *length, loff_t *ppos)
6609 {
6610 	int rc;
6611 
6612 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6613 	if (rc)
6614 		return rc;
6615 
6616 	setup_min_unmapped_ratio();
6617 
6618 	return 0;
6619 }
6620 
setup_min_slab_ratio(void)6621 static void setup_min_slab_ratio(void)
6622 {
6623 	pg_data_t *pgdat;
6624 	struct zone *zone;
6625 
6626 	for_each_online_pgdat(pgdat)
6627 		pgdat->min_slab_pages = 0;
6628 
6629 	for_each_zone(zone)
6630 		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
6631 						     sysctl_min_slab_ratio) / 100;
6632 }
6633 
sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6634 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write,
6635 		void *buffer, size_t *length, loff_t *ppos)
6636 {
6637 	int rc;
6638 
6639 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6640 	if (rc)
6641 		return rc;
6642 
6643 	setup_min_slab_ratio();
6644 
6645 	return 0;
6646 }
6647 #endif
6648 
6649 /*
6650  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6651  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6652  *	whenever sysctl_lowmem_reserve_ratio changes.
6653  *
6654  * The reserve ratio obviously has absolutely no relation with the
6655  * minimum watermarks. The lowmem reserve ratio can only make sense
6656  * if in function of the boot time zone sizes.
6657  */
lowmem_reserve_ratio_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6658 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table,
6659 		int write, void *buffer, size_t *length, loff_t *ppos)
6660 {
6661 	int i;
6662 
6663 	proc_dointvec_minmax(table, write, buffer, length, ppos);
6664 
6665 	for (i = 0; i < MAX_NR_ZONES; i++) {
6666 		if (sysctl_lowmem_reserve_ratio[i] < 1)
6667 			sysctl_lowmem_reserve_ratio[i] = 0;
6668 	}
6669 
6670 	setup_per_zone_lowmem_reserve();
6671 	return 0;
6672 }
6673 
6674 /*
6675  * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
6676  * cpu. It is the fraction of total pages in each zone that a hot per cpu
6677  * pagelist can have before it gets flushed back to buddy allocator.
6678  */
percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6679 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table,
6680 		int write, void *buffer, size_t *length, loff_t *ppos)
6681 {
6682 	struct zone *zone;
6683 	int old_percpu_pagelist_high_fraction;
6684 	int ret;
6685 
6686 	/*
6687 	 * Avoid using pcp_batch_high_lock for reads as the value is read
6688 	 * atomically and a race with offlining is harmless.
6689 	 */
6690 
6691 	if (!write)
6692 		return proc_dointvec_minmax(table, write, buffer, length, ppos);
6693 
6694 	mutex_lock(&pcp_batch_high_lock);
6695 	old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
6696 
6697 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6698 	if (ret < 0)
6699 		goto out;
6700 
6701 	/* Sanity checking to avoid pcp imbalance */
6702 	if (percpu_pagelist_high_fraction &&
6703 	    percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
6704 		percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
6705 		ret = -EINVAL;
6706 		goto out;
6707 	}
6708 
6709 	/* No change? */
6710 	if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
6711 		goto out;
6712 
6713 	for_each_populated_zone(zone)
6714 		zone_set_pageset_high_and_batch(zone, 0);
6715 out:
6716 	mutex_unlock(&pcp_batch_high_lock);
6717 	return ret;
6718 }
6719 
6720 static const struct ctl_table page_alloc_sysctl_table[] = {
6721 	{
6722 		.procname	= "min_free_kbytes",
6723 		.data		= &min_free_kbytes,
6724 		.maxlen		= sizeof(min_free_kbytes),
6725 		.mode		= 0644,
6726 		.proc_handler	= min_free_kbytes_sysctl_handler,
6727 		.extra1		= SYSCTL_ZERO,
6728 	},
6729 	{
6730 		.procname	= "watermark_boost_factor",
6731 		.data		= &watermark_boost_factor,
6732 		.maxlen		= sizeof(watermark_boost_factor),
6733 		.mode		= 0644,
6734 		.proc_handler	= proc_dointvec_minmax,
6735 		.extra1		= SYSCTL_ZERO,
6736 	},
6737 	{
6738 		.procname	= "watermark_scale_factor",
6739 		.data		= &watermark_scale_factor,
6740 		.maxlen		= sizeof(watermark_scale_factor),
6741 		.mode		= 0644,
6742 		.proc_handler	= watermark_scale_factor_sysctl_handler,
6743 		.extra1		= SYSCTL_ONE,
6744 		.extra2		= SYSCTL_THREE_THOUSAND,
6745 	},
6746 	{
6747 		.procname	= "defrag_mode",
6748 		.data		= &defrag_mode,
6749 		.maxlen		= sizeof(defrag_mode),
6750 		.mode		= 0644,
6751 		.proc_handler	= proc_dointvec_minmax,
6752 		.extra1		= SYSCTL_ZERO,
6753 		.extra2		= SYSCTL_ONE,
6754 	},
6755 	{
6756 		.procname	= "percpu_pagelist_high_fraction",
6757 		.data		= &percpu_pagelist_high_fraction,
6758 		.maxlen		= sizeof(percpu_pagelist_high_fraction),
6759 		.mode		= 0644,
6760 		.proc_handler	= percpu_pagelist_high_fraction_sysctl_handler,
6761 		.extra1		= SYSCTL_ZERO,
6762 	},
6763 	{
6764 		.procname	= "lowmem_reserve_ratio",
6765 		.data		= &sysctl_lowmem_reserve_ratio,
6766 		.maxlen		= sizeof(sysctl_lowmem_reserve_ratio),
6767 		.mode		= 0644,
6768 		.proc_handler	= lowmem_reserve_ratio_sysctl_handler,
6769 	},
6770 #ifdef CONFIG_NUMA
6771 	{
6772 		.procname	= "numa_zonelist_order",
6773 		.data		= &numa_zonelist_order,
6774 		.maxlen		= NUMA_ZONELIST_ORDER_LEN,
6775 		.mode		= 0644,
6776 		.proc_handler	= numa_zonelist_order_handler,
6777 	},
6778 	{
6779 		.procname	= "min_unmapped_ratio",
6780 		.data		= &sysctl_min_unmapped_ratio,
6781 		.maxlen		= sizeof(sysctl_min_unmapped_ratio),
6782 		.mode		= 0644,
6783 		.proc_handler	= sysctl_min_unmapped_ratio_sysctl_handler,
6784 		.extra1		= SYSCTL_ZERO,
6785 		.extra2		= SYSCTL_ONE_HUNDRED,
6786 	},
6787 	{
6788 		.procname	= "min_slab_ratio",
6789 		.data		= &sysctl_min_slab_ratio,
6790 		.maxlen		= sizeof(sysctl_min_slab_ratio),
6791 		.mode		= 0644,
6792 		.proc_handler	= sysctl_min_slab_ratio_sysctl_handler,
6793 		.extra1		= SYSCTL_ZERO,
6794 		.extra2		= SYSCTL_ONE_HUNDRED,
6795 	},
6796 #endif
6797 };
6798 
page_alloc_sysctl_init(void)6799 void __init page_alloc_sysctl_init(void)
6800 {
6801 	register_sysctl_init("vm", page_alloc_sysctl_table);
6802 }
6803 
6804 #ifdef CONFIG_CONTIG_ALLOC
6805 /* Usage: See admin-guide/dynamic-debug-howto.rst */
alloc_contig_dump_pages(struct list_head * page_list)6806 static void alloc_contig_dump_pages(struct list_head *page_list)
6807 {
6808 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
6809 
6810 	if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
6811 		struct page *page;
6812 
6813 		dump_stack();
6814 		list_for_each_entry(page, page_list, lru)
6815 			dump_page(page, "migration failure");
6816 	}
6817 }
6818 
6819 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control * cc,unsigned long start,unsigned long end)6820 static int __alloc_contig_migrate_range(struct compact_control *cc,
6821 					unsigned long start, unsigned long end)
6822 {
6823 	/* This function is based on compact_zone() from compaction.c. */
6824 	unsigned int nr_reclaimed;
6825 	unsigned long pfn = start;
6826 	unsigned int tries = 0;
6827 	int ret = 0;
6828 	struct migration_target_control mtc = {
6829 		.nid = zone_to_nid(cc->zone),
6830 		.gfp_mask = cc->gfp_mask,
6831 		.reason = MR_CONTIG_RANGE,
6832 	};
6833 
6834 	lru_cache_disable();
6835 
6836 	while (pfn < end || !list_empty(&cc->migratepages)) {
6837 		if (fatal_signal_pending(current)) {
6838 			ret = -EINTR;
6839 			break;
6840 		}
6841 
6842 		if (list_empty(&cc->migratepages)) {
6843 			cc->nr_migratepages = 0;
6844 			ret = isolate_migratepages_range(cc, pfn, end);
6845 			if (ret && ret != -EAGAIN)
6846 				break;
6847 			pfn = cc->migrate_pfn;
6848 			tries = 0;
6849 		} else if (++tries == 5) {
6850 			ret = -EBUSY;
6851 			break;
6852 		}
6853 
6854 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6855 							&cc->migratepages);
6856 		cc->nr_migratepages -= nr_reclaimed;
6857 
6858 		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
6859 			NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
6860 
6861 		/*
6862 		 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
6863 		 * to retry again over this error, so do the same here.
6864 		 */
6865 		if (ret == -ENOMEM)
6866 			break;
6867 	}
6868 
6869 	lru_cache_enable();
6870 	if (ret < 0) {
6871 		if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6872 			alloc_contig_dump_pages(&cc->migratepages);
6873 		putback_movable_pages(&cc->migratepages);
6874 	}
6875 
6876 	return (ret < 0) ? ret : 0;
6877 }
6878 
split_free_frozen_pages(struct list_head * list,gfp_t gfp_mask)6879 static void split_free_frozen_pages(struct list_head *list, gfp_t gfp_mask)
6880 {
6881 	int order;
6882 
6883 	for (order = 0; order < NR_PAGE_ORDERS; order++) {
6884 		struct page *page, *next;
6885 		int nr_pages = 1 << order;
6886 
6887 		list_for_each_entry_safe(page, next, &list[order], lru) {
6888 			int i;
6889 
6890 			post_alloc_hook(page, order, gfp_mask);
6891 			if (!order)
6892 				continue;
6893 
6894 			__split_page(page, order);
6895 
6896 			/* Add all subpages to the order-0 head, in sequence. */
6897 			list_del(&page->lru);
6898 			for (i = 0; i < nr_pages; i++)
6899 				list_add_tail(&page[i].lru, &list[0]);
6900 		}
6901 	}
6902 }
6903 
__alloc_contig_verify_gfp_mask(gfp_t gfp_mask,gfp_t * gfp_cc_mask)6904 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
6905 {
6906 	const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
6907 	const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
6908 				  __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO |
6909 				  __GFP_SKIP_KASAN;
6910 	const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
6911 
6912 	/*
6913 	 * We are given the range to allocate; node, mobility and placement
6914 	 * hints are irrelevant at this point. We'll simply ignore them.
6915 	 */
6916 	gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE |
6917 		      __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE);
6918 
6919 	/*
6920 	 * We only support most reclaim flags (but not NOFAIL/NORETRY), and
6921 	 * selected action flags.
6922 	 */
6923 	if (gfp_mask & ~(reclaim_mask | action_mask))
6924 		return -EINVAL;
6925 
6926 	/*
6927 	 * Flags to control page compaction/migration/reclaim, to free up our
6928 	 * page range. Migratable pages are movable, __GFP_MOVABLE is implied
6929 	 * for them.
6930 	 *
6931 	 * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that
6932 	 * to not degrade callers.
6933 	 */
6934 	*gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) |
6935 			__GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
6936 	return 0;
6937 }
6938 
__free_contig_frozen_range(unsigned long pfn,unsigned long nr_pages)6939 static void __free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
6940 {
6941 	for (; nr_pages--; pfn++)
6942 		free_frozen_pages(pfn_to_page(pfn), 0);
6943 }
6944 
6945 /**
6946  * alloc_contig_frozen_range() -- tries to allocate given range of frozen pages
6947  * @start:	start PFN to allocate
6948  * @end:	one-past-the-last PFN to allocate
6949  * @alloc_flags:	allocation information
6950  * @gfp_mask:	GFP mask. Node/zone/placement hints are ignored; only some
6951  *		action and reclaim modifiers are supported. Reclaim modifiers
6952  *		control allocation behavior during compaction/migration/reclaim.
6953  *
6954  * The PFN range does not have to be pageblock aligned. The PFN range must
6955  * belong to a single zone.
6956  *
6957  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
6958  * pageblocks in the range.  Once isolated, the pageblocks should not
6959  * be modified by others.
6960  *
6961  * All frozen pages which PFN is in [start, end) are allocated for the
6962  * caller, and they could be freed with free_contig_frozen_range(),
6963  * free_frozen_pages() also could be used to free compound frozen pages
6964  * directly.
6965  *
6966  * Return: zero on success or negative error code.
6967  */
alloc_contig_frozen_range_noprof(unsigned long start,unsigned long end,acr_flags_t alloc_flags,gfp_t gfp_mask)6968 int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
6969 		acr_flags_t alloc_flags, gfp_t gfp_mask)
6970 {
6971 	const unsigned int order = ilog2(end - start);
6972 	unsigned long outer_start, outer_end;
6973 	int ret = 0;
6974 
6975 	struct compact_control cc = {
6976 		.nr_migratepages = 0,
6977 		.order = -1,
6978 		.zone = page_zone(pfn_to_page(start)),
6979 		.mode = MIGRATE_SYNC,
6980 		.ignore_skip_hint = true,
6981 		.no_set_skip_hint = true,
6982 		.alloc_contig = true,
6983 	};
6984 	INIT_LIST_HEAD(&cc.migratepages);
6985 	enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ?
6986 					    PB_ISOLATE_MODE_CMA_ALLOC :
6987 					    PB_ISOLATE_MODE_OTHER;
6988 
6989 	/*
6990 	 * In contrast to the buddy, we allow for orders here that exceed
6991 	 * MAX_PAGE_ORDER, so we must manually make sure that we are not
6992 	 * exceeding the maximum folio order.
6993 	 */
6994 	if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER))
6995 		return -EINVAL;
6996 
6997 	gfp_mask = current_gfp_context(gfp_mask);
6998 	if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
6999 		return -EINVAL;
7000 
7001 	/*
7002 	 * What we do here is we mark all pageblocks in range as
7003 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
7004 	 * have different sizes, and due to the way page allocator
7005 	 * work, start_isolate_page_range() has special handlings for this.
7006 	 *
7007 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7008 	 * migrate the pages from an unaligned range (ie. pages that
7009 	 * we are interested in). This will put all the pages in
7010 	 * range back to page allocator as MIGRATE_ISOLATE.
7011 	 *
7012 	 * When this is done, we take the pages in range from page
7013 	 * allocator removing them from the buddy system.  This way
7014 	 * page allocator will never consider using them.
7015 	 *
7016 	 * This lets us mark the pageblocks back as
7017 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7018 	 * aligned range but not in the unaligned, original range are
7019 	 * put back to page allocator so that buddy can use them.
7020 	 */
7021 
7022 	ret = start_isolate_page_range(start, end, mode);
7023 	if (ret)
7024 		goto done;
7025 
7026 	drain_all_pages(cc.zone);
7027 
7028 	/*
7029 	 * In case of -EBUSY, we'd like to know which page causes problem.
7030 	 * So, just fall through. test_pages_isolated() has a tracepoint
7031 	 * which will report the busy page.
7032 	 *
7033 	 * It is possible that busy pages could become available before
7034 	 * the call to test_pages_isolated, and the range will actually be
7035 	 * allocated.  So, if we fall through be sure to clear ret so that
7036 	 * -EBUSY is not accidentally used or returned to caller.
7037 	 */
7038 	ret = __alloc_contig_migrate_range(&cc, start, end);
7039 	if (ret && ret != -EBUSY)
7040 		goto done;
7041 
7042 	/*
7043 	 * When in-use hugetlb pages are migrated, they may simply be released
7044 	 * back into the free hugepage pool instead of being returned to the
7045 	 * buddy system.  After the migration of in-use huge pages is completed,
7046 	 * we will invoke replace_free_hugepage_folios() to ensure that these
7047 	 * hugepages are properly released to the buddy system.
7048 	 */
7049 	ret = replace_free_hugepage_folios(start, end);
7050 	if (ret)
7051 		goto done;
7052 
7053 	/*
7054 	 * Pages from [start, end) are within a pageblock_nr_pages
7055 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
7056 	 * more, all pages in [start, end) are free in page allocator.
7057 	 * What we are going to do is to allocate all pages from
7058 	 * [start, end) (that is remove them from page allocator).
7059 	 *
7060 	 * The only problem is that pages at the beginning and at the
7061 	 * end of interesting range may be not aligned with pages that
7062 	 * page allocator holds, ie. they can be part of higher order
7063 	 * pages.  Because of this, we reserve the bigger range and
7064 	 * once this is done free the pages we are not interested in.
7065 	 *
7066 	 * We don't have to hold zone->lock here because the pages are
7067 	 * isolated thus they won't get removed from buddy.
7068 	 */
7069 	outer_start = find_large_buddy(start);
7070 
7071 	/* Make sure the range is really isolated. */
7072 	if (test_pages_isolated(outer_start, end, mode)) {
7073 		ret = -EBUSY;
7074 		goto done;
7075 	}
7076 
7077 	/* Grab isolated pages from freelists. */
7078 	outer_end = isolate_freepages_range(&cc, outer_start, end);
7079 	if (!outer_end) {
7080 		ret = -EBUSY;
7081 		goto done;
7082 	}
7083 
7084 	if (!(gfp_mask & __GFP_COMP)) {
7085 		split_free_frozen_pages(cc.freepages, gfp_mask);
7086 
7087 		/* Free head and tail (if any) */
7088 		if (start != outer_start)
7089 			__free_contig_frozen_range(outer_start, start - outer_start);
7090 		if (end != outer_end)
7091 			__free_contig_frozen_range(end, outer_end - end);
7092 	} else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
7093 		struct page *head = pfn_to_page(start);
7094 
7095 		check_new_pages(head, order);
7096 		prep_new_page(head, order, gfp_mask, 0);
7097 	} else {
7098 		ret = -EINVAL;
7099 		WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
7100 		     start, end, outer_start, outer_end);
7101 	}
7102 done:
7103 	undo_isolate_page_range(start, end);
7104 	return ret;
7105 }
7106 EXPORT_SYMBOL(alloc_contig_frozen_range_noprof);
7107 
7108 /**
7109  * alloc_contig_range() -- tries to allocate given range of pages
7110  * @start:	start PFN to allocate
7111  * @end:	one-past-the-last PFN to allocate
7112  * @alloc_flags:	allocation information
7113  * @gfp_mask:	GFP mask.
7114  *
7115  * This routine is a wrapper around alloc_contig_frozen_range(), it can't
7116  * be used to allocate compound pages, the refcount of each allocated page
7117  * will be set to one.
7118  *
7119  * All pages which PFN is in [start, end) are allocated for the caller,
7120  * and should be freed with free_contig_range() or by manually calling
7121  * __free_page() on each allocated page.
7122  *
7123  * Return: zero on success or negative error code.
7124  */
alloc_contig_range_noprof(unsigned long start,unsigned long end,acr_flags_t alloc_flags,gfp_t gfp_mask)7125 int alloc_contig_range_noprof(unsigned long start, unsigned long end,
7126 			      acr_flags_t alloc_flags, gfp_t gfp_mask)
7127 {
7128 	int ret;
7129 
7130 	if (WARN_ON(gfp_mask & __GFP_COMP))
7131 		return -EINVAL;
7132 
7133 	ret = alloc_contig_frozen_range_noprof(start, end, alloc_flags, gfp_mask);
7134 	if (!ret)
7135 		set_pages_refcounted(pfn_to_page(start), end - start);
7136 
7137 	return ret;
7138 }
7139 EXPORT_SYMBOL(alloc_contig_range_noprof);
7140 
pfn_range_valid_contig(struct zone * z,unsigned long start_pfn,unsigned long nr_pages,bool skip_hugetlb,bool * skipped_hugetlb)7141 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
7142 				   unsigned long nr_pages, bool skip_hugetlb,
7143 				   bool *skipped_hugetlb)
7144 {
7145 	unsigned long end_pfn = start_pfn + nr_pages;
7146 	struct page *page;
7147 
7148 	while (start_pfn < end_pfn) {
7149 		unsigned long step = 1;
7150 
7151 		page = pfn_to_online_page(start_pfn);
7152 		if (!page)
7153 			return false;
7154 
7155 		if (page_zone(page) != z)
7156 			return false;
7157 
7158 		if (page_is_unmovable(z, page, PB_ISOLATE_MODE_OTHER, &step))
7159 			return false;
7160 
7161 		/*
7162 		 * Only consider ranges containing hugepages if those pages are
7163 		 * smaller than the requested contiguous region.  e.g.:
7164 		 *     Move 2MB pages to free up a 1GB range.
7165 		 *     Don't move 1GB pages to free up a 2MB range.
7166 		 *
7167 		 * This makes contiguous allocation more reliable if multiple
7168 		 * hugepage sizes are used without causing needless movement.
7169 		 */
7170 		if (PageHuge(page)) {
7171 			unsigned int order;
7172 
7173 			if (skip_hugetlb) {
7174 				*skipped_hugetlb = true;
7175 				return false;
7176 			}
7177 
7178 			page = compound_head(page);
7179 			order = compound_order(page);
7180 			if ((order >= MAX_FOLIO_ORDER) ||
7181 			    (nr_pages <= (1 << order)))
7182 				return false;
7183 		}
7184 
7185 		start_pfn += step;
7186 	}
7187 	return true;
7188 }
7189 
zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)7190 static bool zone_spans_last_pfn(const struct zone *zone,
7191 				unsigned long start_pfn, unsigned long nr_pages)
7192 {
7193 	unsigned long last_pfn = start_pfn + nr_pages - 1;
7194 
7195 	return zone_spans_pfn(zone, last_pfn);
7196 }
7197 
7198 /**
7199  * alloc_contig_frozen_pages() -- tries to find and allocate contiguous range of frozen pages
7200  * @nr_pages:	Number of contiguous pages to allocate
7201  * @gfp_mask:	GFP mask. Node/zone/placement hints limit the search; only some
7202  *		action and reclaim modifiers are supported. Reclaim modifiers
7203  *		control allocation behavior during compaction/migration/reclaim.
7204  * @nid:	Target node
7205  * @nodemask:	Mask for other possible nodes
7206  *
7207  * This routine is a wrapper around alloc_contig_frozen_range(). It scans over
7208  * zones on an applicable zonelist to find a contiguous pfn range which can then
7209  * be tried for allocation with alloc_contig_frozen_range(). This routine is
7210  * intended for allocation requests which can not be fulfilled with the buddy
7211  * allocator.
7212  *
7213  * The allocated memory is always aligned to a page boundary. If nr_pages is a
7214  * power of two, then allocated range is also guaranteed to be aligned to same
7215  * nr_pages (e.g. 1GB request would be aligned to 1GB).
7216  *
7217  * Allocated frozen pages need be freed with free_contig_frozen_range(),
7218  * or by manually calling free_frozen_pages() on each allocated frozen
7219  * non-compound page, for compound frozen pages could be freed with
7220  * free_frozen_pages() directly.
7221  *
7222  * Return: pointer to contiguous frozen pages on success, or NULL if not successful.
7223  */
alloc_contig_frozen_pages_noprof(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)7224 struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages,
7225 		gfp_t gfp_mask, int nid, nodemask_t *nodemask)
7226 {
7227 	unsigned long ret, pfn, flags;
7228 	struct zonelist *zonelist;
7229 	struct zone *zone;
7230 	struct zoneref *z;
7231 	bool skip_hugetlb = true;
7232 	bool skipped_hugetlb = false;
7233 
7234 retry:
7235 	zonelist = node_zonelist(nid, gfp_mask);
7236 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
7237 					gfp_zone(gfp_mask), nodemask) {
7238 		spin_lock_irqsave(&zone->lock, flags);
7239 
7240 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
7241 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
7242 			if (pfn_range_valid_contig(zone, pfn, nr_pages,
7243 						   skip_hugetlb,
7244 						   &skipped_hugetlb)) {
7245 				/*
7246 				 * We release the zone lock here because
7247 				 * alloc_contig_frozen_range() will also lock
7248 				 * the zone at some point. If there's an
7249 				 * allocation spinning on this lock, it may
7250 				 * win the race and cause allocation to fail.
7251 				 */
7252 				spin_unlock_irqrestore(&zone->lock, flags);
7253 				ret = alloc_contig_frozen_range_noprof(pfn,
7254 							pfn + nr_pages,
7255 							ACR_FLAGS_NONE,
7256 							gfp_mask);
7257 				if (!ret)
7258 					return pfn_to_page(pfn);
7259 				spin_lock_irqsave(&zone->lock, flags);
7260 			}
7261 			pfn += nr_pages;
7262 		}
7263 		spin_unlock_irqrestore(&zone->lock, flags);
7264 	}
7265 	/*
7266 	 * If we failed, retry the search, but treat regions with HugeTLB pages
7267 	 * as valid targets.  This retains fast-allocations on first pass
7268 	 * without trying to migrate HugeTLB pages (which may fail). On the
7269 	 * second pass, we will try moving HugeTLB pages when those pages are
7270 	 * smaller than the requested contiguous region size.
7271 	 */
7272 	if (skip_hugetlb && skipped_hugetlb) {
7273 		skip_hugetlb = false;
7274 		goto retry;
7275 	}
7276 	return NULL;
7277 }
7278 EXPORT_SYMBOL(alloc_contig_frozen_pages_noprof);
7279 
7280 /**
7281  * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
7282  * @nr_pages:	Number of contiguous pages to allocate
7283  * @gfp_mask:	GFP mask.
7284  * @nid:	Target node
7285  * @nodemask:	Mask for other possible nodes
7286  *
7287  * This routine is a wrapper around alloc_contig_frozen_pages(), it can't
7288  * be used to allocate compound pages, the refcount of each allocated page
7289  * will be set to one.
7290  *
7291  * Allocated pages can be freed with free_contig_range() or by manually
7292  * calling __free_page() on each allocated page.
7293  *
7294  * Return: pointer to contiguous pages on success, or NULL if not successful.
7295  */
alloc_contig_pages_noprof(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)7296 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
7297 		int nid, nodemask_t *nodemask)
7298 {
7299 	struct page *page;
7300 
7301 	if (WARN_ON(gfp_mask & __GFP_COMP))
7302 		return NULL;
7303 
7304 	page = alloc_contig_frozen_pages_noprof(nr_pages, gfp_mask, nid,
7305 						nodemask);
7306 	if (page)
7307 		set_pages_refcounted(page, nr_pages);
7308 
7309 	return page;
7310 }
7311 EXPORT_SYMBOL(alloc_contig_pages_noprof);
7312 
7313 /**
7314  * free_contig_frozen_range() -- free the contiguous range of frozen pages
7315  * @pfn:	start PFN to free
7316  * @nr_pages:	Number of contiguous frozen pages to free
7317  *
7318  * This can be used to free the allocated compound/non-compound frozen pages.
7319  */
free_contig_frozen_range(unsigned long pfn,unsigned long nr_pages)7320 void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
7321 {
7322 	struct page *first_page = pfn_to_page(pfn);
7323 	const unsigned int order = ilog2(nr_pages);
7324 
7325 	if (WARN_ON_ONCE(first_page != compound_head(first_page)))
7326 		return;
7327 
7328 	if (PageHead(first_page)) {
7329 		WARN_ON_ONCE(order != compound_order(first_page));
7330 		free_frozen_pages(first_page, order);
7331 		return;
7332 	}
7333 
7334 	__free_contig_frozen_range(pfn, nr_pages);
7335 }
7336 EXPORT_SYMBOL(free_contig_frozen_range);
7337 
7338 /**
7339  * free_contig_range() -- free the contiguous range of pages
7340  * @pfn:	start PFN to free
7341  * @nr_pages:	Number of contiguous pages to free
7342  *
7343  * This can be only used to free the allocated non-compound pages.
7344  */
free_contig_range(unsigned long pfn,unsigned long nr_pages)7345 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
7346 {
7347 	if (WARN_ON_ONCE(PageHead(pfn_to_page(pfn))))
7348 		return;
7349 
7350 	for (; nr_pages--; pfn++)
7351 		__free_page(pfn_to_page(pfn));
7352 }
7353 EXPORT_SYMBOL(free_contig_range);
7354 #endif /* CONFIG_CONTIG_ALLOC */
7355 
7356 /*
7357  * Effectively disable pcplists for the zone by setting the high limit to 0
7358  * and draining all cpus. A concurrent page freeing on another CPU that's about
7359  * to put the page on pcplist will either finish before the drain and the page
7360  * will be drained, or observe the new high limit and skip the pcplist.
7361  *
7362  * Must be paired with a call to zone_pcp_enable().
7363  */
zone_pcp_disable(struct zone * zone)7364 void zone_pcp_disable(struct zone *zone)
7365 {
7366 	mutex_lock(&pcp_batch_high_lock);
7367 	__zone_set_pageset_high_and_batch(zone, 0, 0, 1);
7368 	__drain_all_pages(zone, true);
7369 }
7370 
zone_pcp_enable(struct zone * zone)7371 void zone_pcp_enable(struct zone *zone)
7372 {
7373 	__zone_set_pageset_high_and_batch(zone, zone->pageset_high_min,
7374 		zone->pageset_high_max, zone->pageset_batch);
7375 	mutex_unlock(&pcp_batch_high_lock);
7376 }
7377 
zone_pcp_reset(struct zone * zone)7378 void zone_pcp_reset(struct zone *zone)
7379 {
7380 	int cpu;
7381 	struct per_cpu_zonestat *pzstats;
7382 
7383 	if (zone->per_cpu_pageset != &boot_pageset) {
7384 		for_each_online_cpu(cpu) {
7385 			pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
7386 			drain_zonestat(zone, pzstats);
7387 		}
7388 		free_percpu(zone->per_cpu_pageset);
7389 		zone->per_cpu_pageset = &boot_pageset;
7390 		if (zone->per_cpu_zonestats != &boot_zonestats) {
7391 			free_percpu(zone->per_cpu_zonestats);
7392 			zone->per_cpu_zonestats = &boot_zonestats;
7393 		}
7394 	}
7395 }
7396 
7397 #ifdef CONFIG_MEMORY_HOTREMOVE
7398 /*
7399  * All pages in the range must be in a single zone, must not contain holes,
7400  * must span full sections, and must be isolated before calling this function.
7401  *
7402  * Returns the number of managed (non-PageOffline()) pages in the range: the
7403  * number of pages for which memory offlining code must adjust managed page
7404  * counters using adjust_managed_page_count().
7405  */
__offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)7406 unsigned long __offline_isolated_pages(unsigned long start_pfn,
7407 		unsigned long end_pfn)
7408 {
7409 	unsigned long already_offline = 0, flags;
7410 	unsigned long pfn = start_pfn;
7411 	struct page *page;
7412 	struct zone *zone;
7413 	unsigned int order;
7414 
7415 	offline_mem_sections(pfn, end_pfn);
7416 	zone = page_zone(pfn_to_page(pfn));
7417 	spin_lock_irqsave(&zone->lock, flags);
7418 	while (pfn < end_pfn) {
7419 		page = pfn_to_page(pfn);
7420 		/*
7421 		 * The HWPoisoned page may be not in buddy system, and
7422 		 * page_count() is not 0.
7423 		 */
7424 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7425 			pfn++;
7426 			continue;
7427 		}
7428 		/*
7429 		 * At this point all remaining PageOffline() pages have a
7430 		 * reference count of 0 and can simply be skipped.
7431 		 */
7432 		if (PageOffline(page)) {
7433 			BUG_ON(page_count(page));
7434 			BUG_ON(PageBuddy(page));
7435 			already_offline++;
7436 			pfn++;
7437 			continue;
7438 		}
7439 
7440 		BUG_ON(page_count(page));
7441 		BUG_ON(!PageBuddy(page));
7442 		VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE);
7443 		order = buddy_order(page);
7444 		del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE);
7445 		pfn += (1 << order);
7446 	}
7447 	spin_unlock_irqrestore(&zone->lock, flags);
7448 
7449 	return end_pfn - start_pfn - already_offline;
7450 }
7451 #endif
7452 
7453 /*
7454  * This function returns a stable result only if called under zone lock.
7455  */
is_free_buddy_page(const struct page * page)7456 bool is_free_buddy_page(const struct page *page)
7457 {
7458 	unsigned long pfn = page_to_pfn(page);
7459 	unsigned int order;
7460 
7461 	for (order = 0; order < NR_PAGE_ORDERS; order++) {
7462 		const struct page *head = page - (pfn & ((1 << order) - 1));
7463 
7464 		if (PageBuddy(head) &&
7465 		    buddy_order_unsafe(head) >= order)
7466 			break;
7467 	}
7468 
7469 	return order <= MAX_PAGE_ORDER;
7470 }
7471 EXPORT_SYMBOL(is_free_buddy_page);
7472 
7473 #ifdef CONFIG_MEMORY_FAILURE
add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail)7474 static inline void add_to_free_list(struct page *page, struct zone *zone,
7475 				    unsigned int order, int migratetype,
7476 				    bool tail)
7477 {
7478 	__add_to_free_list(page, zone, order, migratetype, tail);
7479 	account_freepages(zone, 1 << order, migratetype);
7480 }
7481 
7482 /*
7483  * Break down a higher-order page in sub-pages, and keep our target out of
7484  * buddy allocator.
7485  */
break_down_buddy_pages(struct zone * zone,struct page * page,struct page * target,int low,int high,int migratetype)7486 static void break_down_buddy_pages(struct zone *zone, struct page *page,
7487 				   struct page *target, int low, int high,
7488 				   int migratetype)
7489 {
7490 	unsigned long size = 1 << high;
7491 	struct page *current_buddy;
7492 
7493 	while (high > low) {
7494 		high--;
7495 		size >>= 1;
7496 
7497 		if (target >= &page[size]) {
7498 			current_buddy = page;
7499 			page = page + size;
7500 		} else {
7501 			current_buddy = page + size;
7502 		}
7503 
7504 		if (set_page_guard(zone, current_buddy, high))
7505 			continue;
7506 
7507 		add_to_free_list(current_buddy, zone, high, migratetype, false);
7508 		set_buddy_order(current_buddy, high);
7509 	}
7510 }
7511 
7512 /*
7513  * Take a page that will be marked as poisoned off the buddy allocator.
7514  */
take_page_off_buddy(struct page * page)7515 bool take_page_off_buddy(struct page *page)
7516 {
7517 	struct zone *zone = page_zone(page);
7518 	unsigned long pfn = page_to_pfn(page);
7519 	unsigned long flags;
7520 	unsigned int order;
7521 	bool ret = false;
7522 
7523 	spin_lock_irqsave(&zone->lock, flags);
7524 	for (order = 0; order < NR_PAGE_ORDERS; order++) {
7525 		struct page *page_head = page - (pfn & ((1 << order) - 1));
7526 		int page_order = buddy_order(page_head);
7527 
7528 		if (PageBuddy(page_head) && page_order >= order) {
7529 			unsigned long pfn_head = page_to_pfn(page_head);
7530 			int migratetype = get_pfnblock_migratetype(page_head,
7531 								   pfn_head);
7532 
7533 			del_page_from_free_list(page_head, zone, page_order,
7534 						migratetype);
7535 			break_down_buddy_pages(zone, page_head, page, 0,
7536 						page_order, migratetype);
7537 			SetPageHWPoisonTakenOff(page);
7538 			ret = true;
7539 			break;
7540 		}
7541 		if (page_count(page_head) > 0)
7542 			break;
7543 	}
7544 	spin_unlock_irqrestore(&zone->lock, flags);
7545 	return ret;
7546 }
7547 
7548 /*
7549  * Cancel takeoff done by take_page_off_buddy().
7550  */
put_page_back_buddy(struct page * page)7551 bool put_page_back_buddy(struct page *page)
7552 {
7553 	struct zone *zone = page_zone(page);
7554 	unsigned long flags;
7555 	bool ret = false;
7556 
7557 	spin_lock_irqsave(&zone->lock, flags);
7558 	if (put_page_testzero(page)) {
7559 		unsigned long pfn = page_to_pfn(page);
7560 		int migratetype = get_pfnblock_migratetype(page, pfn);
7561 
7562 		ClearPageHWPoisonTakenOff(page);
7563 		__free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
7564 		if (TestClearPageHWPoison(page)) {
7565 			ret = true;
7566 		}
7567 	}
7568 	spin_unlock_irqrestore(&zone->lock, flags);
7569 
7570 	return ret;
7571 }
7572 #endif
7573 
has_managed_zone(enum zone_type zone)7574 bool has_managed_zone(enum zone_type zone)
7575 {
7576 	struct pglist_data *pgdat;
7577 
7578 	for_each_online_pgdat(pgdat) {
7579 		if (managed_zone(&pgdat->node_zones[zone]))
7580 			return true;
7581 	}
7582 	return false;
7583 }
7584 
7585 #ifdef CONFIG_UNACCEPTED_MEMORY
7586 
7587 static bool lazy_accept = true;
7588 
accept_memory_parse(char * p)7589 static int __init accept_memory_parse(char *p)
7590 {
7591 	if (!strcmp(p, "lazy")) {
7592 		lazy_accept = true;
7593 		return 0;
7594 	} else if (!strcmp(p, "eager")) {
7595 		lazy_accept = false;
7596 		return 0;
7597 	} else {
7598 		return -EINVAL;
7599 	}
7600 }
7601 early_param("accept_memory", accept_memory_parse);
7602 
page_contains_unaccepted(struct page * page,unsigned int order)7603 static bool page_contains_unaccepted(struct page *page, unsigned int order)
7604 {
7605 	phys_addr_t start = page_to_phys(page);
7606 
7607 	return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
7608 }
7609 
__accept_page(struct zone * zone,unsigned long * flags,struct page * page)7610 static void __accept_page(struct zone *zone, unsigned long *flags,
7611 			  struct page *page)
7612 {
7613 	list_del(&page->lru);
7614 	account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
7615 	__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
7616 	__ClearPageUnaccepted(page);
7617 	spin_unlock_irqrestore(&zone->lock, *flags);
7618 
7619 	accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
7620 
7621 	__free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
7622 }
7623 
accept_page(struct page * page)7624 void accept_page(struct page *page)
7625 {
7626 	struct zone *zone = page_zone(page);
7627 	unsigned long flags;
7628 
7629 	spin_lock_irqsave(&zone->lock, flags);
7630 	if (!PageUnaccepted(page)) {
7631 		spin_unlock_irqrestore(&zone->lock, flags);
7632 		return;
7633 	}
7634 
7635 	/* Unlocks zone->lock */
7636 	__accept_page(zone, &flags, page);
7637 }
7638 
try_to_accept_memory_one(struct zone * zone)7639 static bool try_to_accept_memory_one(struct zone *zone)
7640 {
7641 	unsigned long flags;
7642 	struct page *page;
7643 
7644 	spin_lock_irqsave(&zone->lock, flags);
7645 	page = list_first_entry_or_null(&zone->unaccepted_pages,
7646 					struct page, lru);
7647 	if (!page) {
7648 		spin_unlock_irqrestore(&zone->lock, flags);
7649 		return false;
7650 	}
7651 
7652 	/* Unlocks zone->lock */
7653 	__accept_page(zone, &flags, page);
7654 
7655 	return true;
7656 }
7657 
cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags)7658 static bool cond_accept_memory(struct zone *zone, unsigned int order,
7659 			       int alloc_flags)
7660 {
7661 	long to_accept, wmark;
7662 	bool ret = false;
7663 
7664 	if (list_empty(&zone->unaccepted_pages))
7665 		return false;
7666 
7667 	/* Bailout, since try_to_accept_memory_one() needs to take a lock */
7668 	if (alloc_flags & ALLOC_TRYLOCK)
7669 		return false;
7670 
7671 	wmark = promo_wmark_pages(zone);
7672 
7673 	/*
7674 	 * Watermarks have not been initialized yet.
7675 	 *
7676 	 * Accepting one MAX_ORDER page to ensure progress.
7677 	 */
7678 	if (!wmark)
7679 		return try_to_accept_memory_one(zone);
7680 
7681 	/* How much to accept to get to promo watermark? */
7682 	to_accept = wmark -
7683 		    (zone_page_state(zone, NR_FREE_PAGES) -
7684 		    __zone_watermark_unusable_free(zone, order, 0) -
7685 		    zone_page_state(zone, NR_UNACCEPTED));
7686 
7687 	while (to_accept > 0) {
7688 		if (!try_to_accept_memory_one(zone))
7689 			break;
7690 		ret = true;
7691 		to_accept -= MAX_ORDER_NR_PAGES;
7692 	}
7693 
7694 	return ret;
7695 }
7696 
__free_unaccepted(struct page * page)7697 static bool __free_unaccepted(struct page *page)
7698 {
7699 	struct zone *zone = page_zone(page);
7700 	unsigned long flags;
7701 
7702 	if (!lazy_accept)
7703 		return false;
7704 
7705 	spin_lock_irqsave(&zone->lock, flags);
7706 	list_add_tail(&page->lru, &zone->unaccepted_pages);
7707 	account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
7708 	__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
7709 	__SetPageUnaccepted(page);
7710 	spin_unlock_irqrestore(&zone->lock, flags);
7711 
7712 	return true;
7713 }
7714 
7715 #else
7716 
page_contains_unaccepted(struct page * page,unsigned int order)7717 static bool page_contains_unaccepted(struct page *page, unsigned int order)
7718 {
7719 	return false;
7720 }
7721 
cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags)7722 static bool cond_accept_memory(struct zone *zone, unsigned int order,
7723 			       int alloc_flags)
7724 {
7725 	return false;
7726 }
7727 
__free_unaccepted(struct page * page)7728 static bool __free_unaccepted(struct page *page)
7729 {
7730 	BUILD_BUG();
7731 	return false;
7732 }
7733 
7734 #endif /* CONFIG_UNACCEPTED_MEMORY */
7735 
alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order)7736 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
7737 {
7738 	/*
7739 	 * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed.
7740 	 * Do not specify __GFP_KSWAPD_RECLAIM either, since wake up of kswapd
7741 	 * is not safe in arbitrary context.
7742 	 *
7743 	 * These two are the conditions for gfpflags_allow_spinning() being true.
7744 	 *
7745 	 * Specify __GFP_NOWARN since failing alloc_pages_nolock() is not a reason
7746 	 * to warn. Also warn would trigger printk() which is unsafe from
7747 	 * various contexts. We cannot use printk_deferred_enter() to mitigate,
7748 	 * since the running context is unknown.
7749 	 *
7750 	 * Specify __GFP_ZERO to make sure that call to kmsan_alloc_page() below
7751 	 * is safe in any context. Also zeroing the page is mandatory for
7752 	 * BPF use cases.
7753 	 *
7754 	 * Though __GFP_NOMEMALLOC is not checked in the code path below,
7755 	 * specify it here to highlight that alloc_pages_nolock()
7756 	 * doesn't want to deplete reserves.
7757 	 */
7758 	gfp_t alloc_gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_NOMEMALLOC | __GFP_COMP
7759 			| gfp_flags;
7760 	unsigned int alloc_flags = ALLOC_TRYLOCK;
7761 	struct alloc_context ac = { };
7762 	struct page *page;
7763 
7764 	VM_WARN_ON_ONCE(gfp_flags & ~__GFP_ACCOUNT);
7765 	/*
7766 	 * In PREEMPT_RT spin_trylock() will call raw_spin_lock() which is
7767 	 * unsafe in NMI. If spin_trylock() is called from hard IRQ the current
7768 	 * task may be waiting for one rt_spin_lock, but rt_spin_trylock() will
7769 	 * mark the task as the owner of another rt_spin_lock which will
7770 	 * confuse PI logic, so return immediately if called from hard IRQ or
7771 	 * NMI.
7772 	 *
7773 	 * Note, irqs_disabled() case is ok. This function can be called
7774 	 * from raw_spin_lock_irqsave region.
7775 	 */
7776 	if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
7777 		return NULL;
7778 	if (!pcp_allowed_order(order))
7779 		return NULL;
7780 
7781 	/* Bailout, since _deferred_grow_zone() needs to take a lock */
7782 	if (deferred_pages_enabled())
7783 		return NULL;
7784 
7785 	if (nid == NUMA_NO_NODE)
7786 		nid = numa_node_id();
7787 
7788 	prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac,
7789 			    &alloc_gfp, &alloc_flags);
7790 
7791 	/*
7792 	 * Best effort allocation from percpu free list.
7793 	 * If it's empty attempt to spin_trylock zone->lock.
7794 	 */
7795 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
7796 
7797 	/* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
7798 
7799 	if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) &&
7800 	    unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
7801 		__free_frozen_pages(page, order, FPI_TRYLOCK);
7802 		page = NULL;
7803 	}
7804 	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
7805 	kmsan_alloc_page(page, order, alloc_gfp);
7806 	return page;
7807 }
7808 /**
7809  * alloc_pages_nolock - opportunistic reentrant allocation from any context
7810  * @gfp_flags: GFP flags. Only __GFP_ACCOUNT allowed.
7811  * @nid: node to allocate from
7812  * @order: allocation order size
7813  *
7814  * Allocates pages of a given order from the given node. This is safe to
7815  * call from any context (from atomic, NMI, and also reentrant
7816  * allocator -> tracepoint -> alloc_pages_nolock_noprof).
7817  * Allocation is best effort and to be expected to fail easily so nobody should
7818  * rely on the success. Failures are not reported via warn_alloc().
7819  * See always fail conditions below.
7820  *
7821  * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN.
7822  * It means ENOMEM. There is no reason to call it again and expect !NULL.
7823  */
alloc_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order)7824 struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
7825 {
7826 	struct page *page;
7827 
7828 	page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order);
7829 	if (page)
7830 		set_page_refcounted(page);
7831 	return page;
7832 }
7833 EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof);
7834