1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/khugepaged.h>
12 #include <linux/mm.h>
13 #include <linux/mm_inline.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagewalk.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/swap_cgroup.h>
20 #include <linux/tracepoint-defs.h>
21 
22 /* Internal core VMA manipulation functions. */
23 #include "vma.h"
24 
25 struct folio_batch;
26 
27 /*
28  * Maintains state across a page table move. The operation assumes both source
29  * and destination VMAs already exist and are specified by the user.
30  *
31  * Partial moves are permitted, but the old and new ranges must both reside
32  * within a VMA.
33  *
34  * mmap lock must be held in write and VMA write locks must be held on any VMA
35  * that is visible.
36  *
37  * Use the PAGETABLE_MOVE() macro to initialise this struct.
38  *
39  * The old_addr and new_addr fields are updated as the page table move is
40  * executed.
41  *
42  * NOTE: The page table move is affected by reading from [old_addr, old_end),
43  * and old_addr may be updated for better page table alignment, so len_in
44  * represents the length of the range being copied as specified by the user.
45  */
46 struct pagetable_move_control {
47 	struct vm_area_struct *old; /* Source VMA. */
48 	struct vm_area_struct *new; /* Destination VMA. */
49 	unsigned long old_addr; /* Address from which the move begins. */
50 	unsigned long old_end; /* Exclusive address at which old range ends. */
51 	unsigned long new_addr; /* Address to move page tables to. */
52 	unsigned long len_in; /* Bytes to remap specified by user. */
53 
54 	bool need_rmap_locks; /* Do rmap locks need to be taken? */
55 	bool for_stack; /* Is this an early temp stack being moved? */
56 };
57 
58 #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_)	\
59 	struct pagetable_move_control name = {				\
60 		.old = old_,						\
61 		.new = new_,						\
62 		.old_addr = old_addr_,					\
63 		.old_end = (old_addr_) + (len_),			\
64 		.new_addr = new_addr_,					\
65 		.len_in = len_,						\
66 	}
67 
68 /*
69  * The set of flags that only affect watermark checking and reclaim
70  * behaviour. This is used by the MM to obey the caller constraints
71  * about IO, FS and watermark checking while ignoring placement
72  * hints such as HIGHMEM usage.
73  */
74 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
75 			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
76 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
77 			__GFP_NOLOCKDEP)
78 
79 /* The GFP flags allowed during early boot */
80 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
81 
82 /* Control allocation cpuset and node placement constraints */
83 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
84 
85 /* Do not use these with a slab allocator */
86 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
87 
88 /*
89  * Different from WARN_ON_ONCE(), no warning will be issued
90  * when we specify __GFP_NOWARN.
91  */
92 #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
93 	static bool __section(".data..once") __warned;			\
94 	int __ret_warn_once = !!(cond);					\
95 									\
96 	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
97 		__warned = true;					\
98 		WARN_ON(1);						\
99 	}								\
100 	unlikely(__ret_warn_once);					\
101 })
102 
103 void page_writeback_init(void);
104 
105 /*
106  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
107  * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
108  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
109  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
110  */
111 #define ENTIRELY_MAPPED		0x800000
112 #define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
113 
114 /*
115  * Flags passed to __show_mem() and show_free_areas() to suppress output in
116  * various contexts.
117  */
118 #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
119 
120 /*
121  * How many individual pages have an elevated _mapcount.  Excludes
122  * the folio's entire_mapcount.
123  *
124  * Don't use this function outside of debugging code.
125  */
folio_nr_pages_mapped(const struct folio * folio)126 static inline int folio_nr_pages_mapped(const struct folio *folio)
127 {
128 	if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
129 		return -1;
130 	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
131 }
132 
133 /*
134  * Retrieve the first entry of a folio based on a provided entry within the
135  * folio. We cannot rely on folio->swap as there is no guarantee that it has
136  * been initialized. Used for calling arch_swap_restore()
137  */
folio_swap(swp_entry_t entry,const struct folio * folio)138 static inline swp_entry_t folio_swap(swp_entry_t entry,
139 		const struct folio *folio)
140 {
141 	swp_entry_t swap = {
142 		.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
143 	};
144 
145 	return swap;
146 }
147 
folio_raw_mapping(const struct folio * folio)148 static inline void *folio_raw_mapping(const struct folio *folio)
149 {
150 	unsigned long mapping = (unsigned long)folio->mapping;
151 
152 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
153 }
154 
155 /*
156  * This is a file-backed mapping, and is about to be memory mapped - invoke its
157  * mmap hook and safely handle error conditions. On error, VMA hooks will be
158  * mutated.
159  *
160  * @file: File which backs the mapping.
161  * @vma:  VMA which we are mapping.
162  *
163  * Returns: 0 if success, error otherwise.
164  */
mmap_file(struct file * file,struct vm_area_struct * vma)165 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
166 {
167 	int err = call_mmap(file, vma);
168 
169 	if (likely(!err))
170 		return 0;
171 
172 	/*
173 	 * OK, we tried to call the file hook for mmap(), but an error
174 	 * arose. The mapping is in an inconsistent state and we most not invoke
175 	 * any further hooks on it.
176 	 */
177 	vma->vm_ops = &vma_dummy_vm_ops;
178 
179 	return err;
180 }
181 
182 /*
183  * If the VMA has a close hook then close it, and since closing it might leave
184  * it in an inconsistent state which makes the use of any hooks suspect, clear
185  * them down by installing dummy empty hooks.
186  */
vma_close(struct vm_area_struct * vma)187 static inline void vma_close(struct vm_area_struct *vma)
188 {
189 	if (vma->vm_ops && vma->vm_ops->close) {
190 		vma->vm_ops->close(vma);
191 
192 		/*
193 		 * The mapping is in an inconsistent state, and no further hooks
194 		 * may be invoked upon it.
195 		 */
196 		vma->vm_ops = &vma_dummy_vm_ops;
197 	}
198 }
199 
200 #ifdef CONFIG_MMU
201 
202 /* Flags for folio_pte_batch(). */
203 typedef int __bitwise fpb_t;
204 
205 /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
206 #define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
207 
208 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
209 #define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
210 
__pte_batch_clear_ignored(pte_t pte,fpb_t flags)211 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
212 {
213 	if (flags & FPB_IGNORE_DIRTY)
214 		pte = pte_mkclean(pte);
215 	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
216 		pte = pte_clear_soft_dirty(pte);
217 	return pte_wrprotect(pte_mkold(pte));
218 }
219 
220 /**
221  * folio_pte_batch - detect a PTE batch for a large folio
222  * @folio: The large folio to detect a PTE batch for.
223  * @addr: The user virtual address the first page is mapped at.
224  * @start_ptep: Page table pointer for the first entry.
225  * @pte: Page table entry for the first page.
226  * @max_nr: The maximum number of table entries to consider.
227  * @flags: Flags to modify the PTE batch semantics.
228  * @any_writable: Optional pointer to indicate whether any entry except the
229  *		  first one is writable.
230  * @any_young: Optional pointer to indicate whether any entry except the
231  *		  first one is young.
232  * @any_dirty: Optional pointer to indicate whether any entry except the
233  *		  first one is dirty.
234  *
235  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
236  * pages of the same large folio.
237  *
238  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
239  * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
240  * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
241  *
242  * start_ptep must map any page of the folio. max_nr must be at least one and
243  * must be limited by the caller so scanning cannot exceed a single page table.
244  *
245  * Return: the number of table entries in the batch.
246  */
folio_pte_batch(struct folio * folio,unsigned long addr,pte_t * start_ptep,pte_t pte,int max_nr,fpb_t flags,bool * any_writable,bool * any_young,bool * any_dirty)247 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
248 		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
249 		bool *any_writable, bool *any_young, bool *any_dirty)
250 {
251 	pte_t expected_pte, *ptep;
252 	bool writable, young, dirty;
253 	int nr, cur_nr;
254 
255 	if (any_writable)
256 		*any_writable = false;
257 	if (any_young)
258 		*any_young = false;
259 	if (any_dirty)
260 		*any_dirty = false;
261 
262 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
263 	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
264 	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
265 
266 	/* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
267 	max_nr = min_t(unsigned long, max_nr,
268 		       folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
269 
270 	nr = pte_batch_hint(start_ptep, pte);
271 	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
272 	ptep = start_ptep + nr;
273 
274 	while (nr < max_nr) {
275 		pte = ptep_get(ptep);
276 		if (any_writable)
277 			writable = !!pte_write(pte);
278 		if (any_young)
279 			young = !!pte_young(pte);
280 		if (any_dirty)
281 			dirty = !!pte_dirty(pte);
282 		pte = __pte_batch_clear_ignored(pte, flags);
283 
284 		if (!pte_same(pte, expected_pte))
285 			break;
286 
287 		if (any_writable)
288 			*any_writable |= writable;
289 		if (any_young)
290 			*any_young |= young;
291 		if (any_dirty)
292 			*any_dirty |= dirty;
293 
294 		cur_nr = pte_batch_hint(ptep, pte);
295 		expected_pte = pte_advance_pfn(expected_pte, cur_nr);
296 		ptep += cur_nr;
297 		nr += cur_nr;
298 	}
299 
300 	return min(nr, max_nr);
301 }
302 
303 /**
304  * pte_move_swp_offset - Move the swap entry offset field of a swap pte
305  *	 forward or backward by delta
306  * @pte: The initial pte state; is_swap_pte(pte) must be true and
307  *	 non_swap_entry() must be false.
308  * @delta: The direction and the offset we are moving; forward if delta
309  *	 is positive; backward if delta is negative
310  *
311  * Moves the swap offset, while maintaining all other fields, including
312  * swap type, and any swp pte bits. The resulting pte is returned.
313  */
pte_move_swp_offset(pte_t pte,long delta)314 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
315 {
316 	swp_entry_t entry = pte_to_swp_entry(pte);
317 	pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
318 						   (swp_offset(entry) + delta)));
319 
320 	if (pte_swp_soft_dirty(pte))
321 		new = pte_swp_mksoft_dirty(new);
322 	if (pte_swp_exclusive(pte))
323 		new = pte_swp_mkexclusive(new);
324 	if (pte_swp_uffd_wp(pte))
325 		new = pte_swp_mkuffd_wp(new);
326 
327 	return new;
328 }
329 
330 
331 /**
332  * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
333  * @pte: The initial pte state; is_swap_pte(pte) must be true and
334  *	 non_swap_entry() must be false.
335  *
336  * Increments the swap offset, while maintaining all other fields, including
337  * swap type, and any swp pte bits. The resulting pte is returned.
338  */
pte_next_swp_offset(pte_t pte)339 static inline pte_t pte_next_swp_offset(pte_t pte)
340 {
341 	return pte_move_swp_offset(pte, 1);
342 }
343 
344 /**
345  * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
346  * @start_ptep: Page table pointer for the first entry.
347  * @max_nr: The maximum number of table entries to consider.
348  * @pte: Page table entry for the first entry.
349  *
350  * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
351  * containing swap entries all with consecutive offsets and targeting the same
352  * swap type, all with matching swp pte bits.
353  *
354  * max_nr must be at least one and must be limited by the caller so scanning
355  * cannot exceed a single page table.
356  *
357  * Return: the number of table entries in the batch.
358  */
swap_pte_batch(pte_t * start_ptep,int max_nr,pte_t pte)359 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
360 {
361 	pte_t expected_pte = pte_next_swp_offset(pte);
362 	const pte_t *end_ptep = start_ptep + max_nr;
363 	swp_entry_t entry = pte_to_swp_entry(pte);
364 	pte_t *ptep = start_ptep + 1;
365 	unsigned short cgroup_id;
366 
367 	VM_WARN_ON(max_nr < 1);
368 	VM_WARN_ON(!is_swap_pte(pte));
369 	VM_WARN_ON(non_swap_entry(entry));
370 
371 	cgroup_id = lookup_swap_cgroup_id(entry);
372 	while (ptep < end_ptep) {
373 		pte = ptep_get(ptep);
374 
375 		if (!pte_same(pte, expected_pte))
376 			break;
377 		if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
378 			break;
379 		expected_pte = pte_next_swp_offset(expected_pte);
380 		ptep++;
381 	}
382 
383 	return ptep - start_ptep;
384 }
385 #endif /* CONFIG_MMU */
386 
387 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
388 						int nr_throttled);
acct_reclaim_writeback(struct folio * folio)389 static inline void acct_reclaim_writeback(struct folio *folio)
390 {
391 	pg_data_t *pgdat = folio_pgdat(folio);
392 	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
393 
394 	if (nr_throttled)
395 		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
396 }
397 
wake_throttle_isolated(pg_data_t * pgdat)398 static inline void wake_throttle_isolated(pg_data_t *pgdat)
399 {
400 	wait_queue_head_t *wqh;
401 
402 	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
403 	if (waitqueue_active(wqh))
404 		wake_up(wqh);
405 }
406 
407 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
vmf_anon_prepare(struct vm_fault * vmf)408 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
409 {
410 	vm_fault_t ret = __vmf_anon_prepare(vmf);
411 
412 	if (unlikely(ret & VM_FAULT_RETRY))
413 		vma_end_read(vmf->vma);
414 	return ret;
415 }
416 
417 vm_fault_t do_swap_page(struct vm_fault *vmf);
418 void folio_rotate_reclaimable(struct folio *folio);
419 bool __folio_end_writeback(struct folio *folio);
420 void deactivate_file_folio(struct folio *folio);
421 void folio_activate(struct folio *folio);
422 
423 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
424 		   struct vm_area_struct *start_vma, unsigned long floor,
425 		   unsigned long ceiling, bool mm_wr_locked);
426 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
427 
428 struct zap_details;
429 void unmap_page_range(struct mmu_gather *tlb,
430 			     struct vm_area_struct *vma,
431 			     unsigned long addr, unsigned long end,
432 			     struct zap_details *details);
433 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
434 			   gfp_t gfp);
435 
436 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
437 		unsigned int order);
438 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
force_page_cache_readahead(struct address_space * mapping,struct file * file,pgoff_t index,unsigned long nr_to_read)439 static inline void force_page_cache_readahead(struct address_space *mapping,
440 		struct file *file, pgoff_t index, unsigned long nr_to_read)
441 {
442 	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
443 	force_page_cache_ra(&ractl, nr_to_read);
444 }
445 
446 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
447 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
448 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
449 		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
450 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
451 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
452 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
453 		loff_t end);
454 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
455 unsigned long mapping_try_invalidate(struct address_space *mapping,
456 		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
457 
458 /**
459  * folio_evictable - Test whether a folio is evictable.
460  * @folio: The folio to test.
461  *
462  * Test whether @folio is evictable -- i.e., should be placed on
463  * active/inactive lists vs unevictable list.
464  *
465  * Reasons folio might not be evictable:
466  * 1. folio's mapping marked unevictable
467  * 2. One of the pages in the folio is part of an mlocked VMA
468  */
folio_evictable(struct folio * folio)469 static inline bool folio_evictable(struct folio *folio)
470 {
471 	bool ret;
472 
473 	/* Prevent address_space of inode and swap cache from being freed */
474 	rcu_read_lock();
475 	ret = !mapping_unevictable(folio_mapping(folio)) &&
476 			!folio_test_mlocked(folio);
477 	rcu_read_unlock();
478 	return ret;
479 }
480 
481 /*
482  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
483  * a count of one.
484  */
set_page_refcounted(struct page * page)485 static inline void set_page_refcounted(struct page *page)
486 {
487 	VM_BUG_ON_PAGE(PageTail(page), page);
488 	VM_BUG_ON_PAGE(page_ref_count(page), page);
489 	set_page_count(page, 1);
490 }
491 
492 /*
493  * Return true if a folio needs ->release_folio() calling upon it.
494  */
folio_needs_release(struct folio * folio)495 static inline bool folio_needs_release(struct folio *folio)
496 {
497 	struct address_space *mapping = folio_mapping(folio);
498 
499 	return folio_has_private(folio) ||
500 		(mapping && mapping_release_always(mapping));
501 }
502 
503 extern unsigned long highest_memmap_pfn;
504 
505 /*
506  * Maximum number of reclaim retries without progress before the OOM
507  * killer is consider the only way forward.
508  */
509 #define MAX_RECLAIM_RETRIES 16
510 
511 /*
512  * in mm/vmscan.c:
513  */
514 bool folio_isolate_lru(struct folio *folio);
515 void folio_putback_lru(struct folio *folio);
516 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
517 
518 /*
519  * in mm/rmap.c:
520  */
521 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
522 
523 /*
524  * in mm/page_alloc.c
525  */
526 #define K(x) ((x) << (PAGE_SHIFT-10))
527 
528 extern char * const zone_names[MAX_NR_ZONES];
529 
530 /* perform sanity checks on struct pages being allocated or freed */
531 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
532 
533 extern int min_free_kbytes;
534 extern int defrag_mode;
535 
536 void setup_per_zone_wmarks(void);
537 void calculate_min_free_kbytes(void);
538 int __meminit init_per_zone_wmark_min(void);
539 void page_alloc_sysctl_init(void);
540 
541 /*
542  * Structure for holding the mostly immutable allocation parameters passed
543  * between functions involved in allocations, including the alloc_pages*
544  * family of functions.
545  *
546  * nodemask, migratetype and highest_zoneidx are initialized only once in
547  * __alloc_pages() and then never change.
548  *
549  * zonelist, preferred_zone and highest_zoneidx are set first in
550  * __alloc_pages() for the fast path, and might be later changed
551  * in __alloc_pages_slowpath(). All other functions pass the whole structure
552  * by a const pointer.
553  */
554 struct alloc_context {
555 	struct zonelist *zonelist;
556 	nodemask_t *nodemask;
557 	struct zoneref *preferred_zoneref;
558 	int migratetype;
559 
560 	/*
561 	 * highest_zoneidx represents highest usable zone index of
562 	 * the allocation request. Due to the nature of the zone,
563 	 * memory on lower zone than the highest_zoneidx will be
564 	 * protected by lowmem_reserve[highest_zoneidx].
565 	 *
566 	 * highest_zoneidx is also used by reclaim/compaction to limit
567 	 * the target zone since higher zone than this index cannot be
568 	 * usable for this allocation request.
569 	 */
570 	enum zone_type highest_zoneidx;
571 	bool spread_dirty_pages;
572 };
573 
574 /*
575  * This function returns the order of a free page in the buddy system. In
576  * general, page_zone(page)->lock must be held by the caller to prevent the
577  * page from being allocated in parallel and returning garbage as the order.
578  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
579  * page cannot be allocated or merged in parallel. Alternatively, it must
580  * handle invalid values gracefully, and use buddy_order_unsafe() below.
581  */
buddy_order(struct page * page)582 static inline unsigned int buddy_order(struct page *page)
583 {
584 	/* PageBuddy() must be checked by the caller */
585 	return page_private(page);
586 }
587 
588 /*
589  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
590  * PageBuddy() should be checked first by the caller to minimize race window,
591  * and invalid values must be handled gracefully.
592  *
593  * READ_ONCE is used so that if the caller assigns the result into a local
594  * variable and e.g. tests it for valid range before using, the compiler cannot
595  * decide to remove the variable and inline the page_private(page) multiple
596  * times, potentially observing different values in the tests and the actual
597  * use of the result.
598  */
599 #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
600 
601 /*
602  * This function checks whether a page is free && is the buddy
603  * we can coalesce a page and its buddy if
604  * (a) the buddy is not in a hole (check before calling!) &&
605  * (b) the buddy is in the buddy system &&
606  * (c) a page and its buddy have the same order &&
607  * (d) a page and its buddy are in the same zone.
608  *
609  * For recording whether a page is in the buddy system, we set PageBuddy.
610  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
611  *
612  * For recording page's order, we use page_private(page).
613  */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)614 static inline bool page_is_buddy(struct page *page, struct page *buddy,
615 				 unsigned int order)
616 {
617 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
618 		return false;
619 
620 	if (buddy_order(buddy) != order)
621 		return false;
622 
623 	/*
624 	 * zone check is done late to avoid uselessly calculating
625 	 * zone/node ids for pages that could never merge.
626 	 */
627 	if (page_zone_id(page) != page_zone_id(buddy))
628 		return false;
629 
630 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
631 
632 	return true;
633 }
634 
635 /*
636  * Locate the struct page for both the matching buddy in our
637  * pair (buddy1) and the combined O(n+1) page they form (page).
638  *
639  * 1) Any buddy B1 will have an order O twin B2 which satisfies
640  * the following equation:
641  *     B2 = B1 ^ (1 << O)
642  * For example, if the starting buddy (buddy2) is #8 its order
643  * 1 buddy is #10:
644  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
645  *
646  * 2) Any buddy B will have an order O+1 parent P which
647  * satisfies the following equation:
648  *     P = B & ~(1 << O)
649  *
650  * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
651  */
652 static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn,unsigned int order)653 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
654 {
655 	return page_pfn ^ (1 << order);
656 }
657 
658 /*
659  * Find the buddy of @page and validate it.
660  * @page: The input page
661  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
662  *       function is used in the performance-critical __free_one_page().
663  * @order: The order of the page
664  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
665  *             page_to_pfn().
666  *
667  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
668  * not the same as @page. The validation is necessary before use it.
669  *
670  * Return: the found buddy page or NULL if not found.
671  */
find_buddy_page_pfn(struct page * page,unsigned long pfn,unsigned int order,unsigned long * buddy_pfn)672 static inline struct page *find_buddy_page_pfn(struct page *page,
673 			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
674 {
675 	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
676 	struct page *buddy;
677 
678 	buddy = page + (__buddy_pfn - pfn);
679 	if (buddy_pfn)
680 		*buddy_pfn = __buddy_pfn;
681 
682 	if (page_is_buddy(page, buddy, order))
683 		return buddy;
684 	return NULL;
685 }
686 
687 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
688 				unsigned long end_pfn, struct zone *zone);
689 
pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)690 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
691 				unsigned long end_pfn, struct zone *zone)
692 {
693 	if (zone->contiguous)
694 		return pfn_to_page(start_pfn);
695 
696 	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
697 }
698 
699 void set_zone_contiguous(struct zone *zone);
700 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
701 			   unsigned long nr_pages);
702 
clear_zone_contiguous(struct zone * zone)703 static inline void clear_zone_contiguous(struct zone *zone)
704 {
705 	zone->contiguous = false;
706 }
707 
708 extern int __isolate_free_page(struct page *page, unsigned int order);
709 extern void __putback_isolated_page(struct page *page, unsigned int order,
710 				    int mt);
711 extern void memblock_free_pages(struct page *page, unsigned long pfn,
712 					unsigned int order);
713 extern void __free_pages_core(struct page *page, unsigned int order,
714 		enum meminit_context context);
715 
716 /*
717  * This will have no effect, other than possibly generating a warning, if the
718  * caller passes in a non-large folio.
719  */
folio_set_order(struct folio * folio,unsigned int order)720 static inline void folio_set_order(struct folio *folio, unsigned int order)
721 {
722 	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
723 		return;
724 
725 	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
726 #ifdef NR_PAGES_IN_LARGE_FOLIO
727 	folio->_nr_pages = 1U << order;
728 #endif
729 }
730 
731 bool __folio_unqueue_deferred_split(struct folio *folio);
folio_unqueue_deferred_split(struct folio * folio)732 static inline bool folio_unqueue_deferred_split(struct folio *folio)
733 {
734 	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
735 		return false;
736 
737 	/*
738 	 * At this point, there is no one trying to add the folio to
739 	 * deferred_list. If folio is not in deferred_list, it's safe
740 	 * to check without acquiring the split_queue_lock.
741 	 */
742 	if (data_race(list_empty(&folio->_deferred_list)))
743 		return false;
744 
745 	return __folio_unqueue_deferred_split(folio);
746 }
747 
page_rmappable_folio(struct page * page)748 static inline struct folio *page_rmappable_folio(struct page *page)
749 {
750 	struct folio *folio = (struct folio *)page;
751 
752 	if (folio && folio_test_large(folio))
753 		folio_set_large_rmappable(folio);
754 	return folio;
755 }
756 
prep_compound_head(struct page * page,unsigned int order)757 static inline void prep_compound_head(struct page *page, unsigned int order)
758 {
759 	struct folio *folio = (struct folio *)page;
760 
761 	folio_set_order(folio, order);
762 	atomic_set(&folio->_large_mapcount, -1);
763 	if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
764 		atomic_set(&folio->_nr_pages_mapped, 0);
765 	if (IS_ENABLED(CONFIG_MM_ID)) {
766 		folio->_mm_ids = 0;
767 		folio->_mm_id_mapcount[0] = -1;
768 		folio->_mm_id_mapcount[1] = -1;
769 	}
770 	if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
771 		atomic_set(&folio->_pincount, 0);
772 		atomic_set(&folio->_entire_mapcount, -1);
773 	}
774 	if (order > 1)
775 		INIT_LIST_HEAD(&folio->_deferred_list);
776 }
777 
prep_compound_tail(struct page * head,int tail_idx)778 static inline void prep_compound_tail(struct page *head, int tail_idx)
779 {
780 	struct page *p = head + tail_idx;
781 
782 	p->mapping = TAIL_MAPPING;
783 	set_compound_head(p, head);
784 	set_page_private(p, 0);
785 }
786 
787 void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
788 extern bool free_pages_prepare(struct page *page, unsigned int order);
789 
790 extern int user_min_free_kbytes;
791 
792 struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
793 		nodemask_t *);
794 #define __alloc_frozen_pages(...) \
795 	alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
796 void free_frozen_pages(struct page *page, unsigned int order);
797 void free_unref_folios(struct folio_batch *fbatch);
798 
799 #ifdef CONFIG_NUMA
800 struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
801 #else
alloc_frozen_pages_noprof(gfp_t gfp,unsigned int order)802 static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
803 {
804 	return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
805 }
806 #endif
807 
808 #define alloc_frozen_pages(...) \
809 	alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
810 
811 extern void zone_pcp_reset(struct zone *zone);
812 extern void zone_pcp_disable(struct zone *zone);
813 extern void zone_pcp_enable(struct zone *zone);
814 extern void zone_pcp_init(struct zone *zone);
815 
816 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
817 			  phys_addr_t min_addr,
818 			  int nid, bool exact_nid);
819 
820 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
821 		unsigned long, enum meminit_context, struct vmem_altmap *, int);
822 
823 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
824 
825 /*
826  * in mm/compaction.c
827  */
828 /*
829  * compact_control is used to track pages being migrated and the free pages
830  * they are being migrated to during memory compaction. The free_pfn starts
831  * at the end of a zone and migrate_pfn begins at the start. Movable pages
832  * are moved to the end of a zone during a compaction run and the run
833  * completes when free_pfn <= migrate_pfn
834  */
835 struct compact_control {
836 	struct list_head freepages[NR_PAGE_ORDERS];	/* List of free pages to migrate to */
837 	struct list_head migratepages;	/* List of pages being migrated */
838 	unsigned int nr_freepages;	/* Number of isolated free pages */
839 	unsigned int nr_migratepages;	/* Number of pages to migrate */
840 	unsigned long free_pfn;		/* isolate_freepages search base */
841 	/*
842 	 * Acts as an in/out parameter to page isolation for migration.
843 	 * isolate_migratepages uses it as a search base.
844 	 * isolate_migratepages_block will update the value to the next pfn
845 	 * after the last isolated one.
846 	 */
847 	unsigned long migrate_pfn;
848 	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
849 	struct zone *zone;
850 	unsigned long total_migrate_scanned;
851 	unsigned long total_free_scanned;
852 	unsigned short fast_search_fail;/* failures to use free list searches */
853 	short search_order;		/* order to start a fast search at */
854 	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
855 	int order;			/* order a direct compactor needs */
856 	int migratetype;		/* migratetype of direct compactor */
857 	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
858 	const int highest_zoneidx;	/* zone index of a direct compactor */
859 	enum migrate_mode mode;		/* Async or sync migration mode */
860 	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
861 	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
862 	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
863 	bool direct_compaction;		/* False from kcompactd or /proc/... */
864 	bool proactive_compaction;	/* kcompactd proactive compaction */
865 	bool whole_zone;		/* Whole zone should/has been scanned */
866 	bool contended;			/* Signal lock contention */
867 	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
868 					 * when there are potentially transient
869 					 * isolation or migration failures to
870 					 * ensure forward progress.
871 					 */
872 	bool alloc_contig;		/* alloc_contig_range allocation */
873 };
874 
875 /*
876  * Used in direct compaction when a page should be taken from the freelists
877  * immediately when one is created during the free path.
878  */
879 struct capture_control {
880 	struct compact_control *cc;
881 	struct page *page;
882 };
883 
884 unsigned long
885 isolate_freepages_range(struct compact_control *cc,
886 			unsigned long start_pfn, unsigned long end_pfn);
887 int
888 isolate_migratepages_range(struct compact_control *cc,
889 			   unsigned long low_pfn, unsigned long end_pfn);
890 
891 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
892 void init_cma_reserved_pageblock(struct page *page);
893 
894 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
895 
896 struct cma;
897 
898 #ifdef CONFIG_CMA
899 void *cma_reserve_early(struct cma *cma, unsigned long size);
900 void init_cma_pageblock(struct page *page);
901 #else
cma_reserve_early(struct cma * cma,unsigned long size)902 static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
903 {
904 	return NULL;
905 }
init_cma_pageblock(struct page * page)906 static inline void init_cma_pageblock(struct page *page)
907 {
908 }
909 #endif
910 
911 
912 int find_suitable_fallback(struct free_area *area, unsigned int order,
913 			int migratetype, bool claim_only, bool *claim_block);
914 
free_area_empty(struct free_area * area,int migratetype)915 static inline bool free_area_empty(struct free_area *area, int migratetype)
916 {
917 	return list_empty(&area->free_list[migratetype]);
918 }
919 
920 /* mm/util.c */
921 struct anon_vma *folio_anon_vma(const struct folio *folio);
922 
923 #ifdef CONFIG_MMU
924 void unmap_mapping_folio(struct folio *folio);
925 extern long populate_vma_page_range(struct vm_area_struct *vma,
926 		unsigned long start, unsigned long end, int *locked);
927 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
928 		unsigned long end, bool write, int *locked);
929 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
930 			       unsigned long bytes);
931 
932 /*
933  * NOTE: This function can't tell whether the folio is "fully mapped" in the
934  * range.
935  * "fully mapped" means all the pages of folio is associated with the page
936  * table of range while this function just check whether the folio range is
937  * within the range [start, end). Function caller needs to do page table
938  * check if it cares about the page table association.
939  *
940  * Typical usage (like mlock or madvise) is:
941  * Caller knows at least 1 page of folio is associated with page table of VMA
942  * and the range [start, end) is intersect with the VMA range. Caller wants
943  * to know whether the folio is fully associated with the range. It calls
944  * this function to check whether the folio is in the range first. Then checks
945  * the page table to know whether the folio is fully mapped to the range.
946  */
947 static inline bool
folio_within_range(struct folio * folio,struct vm_area_struct * vma,unsigned long start,unsigned long end)948 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
949 		unsigned long start, unsigned long end)
950 {
951 	pgoff_t pgoff, addr;
952 	unsigned long vma_pglen = vma_pages(vma);
953 
954 	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
955 	if (start > end)
956 		return false;
957 
958 	if (start < vma->vm_start)
959 		start = vma->vm_start;
960 
961 	if (end > vma->vm_end)
962 		end = vma->vm_end;
963 
964 	pgoff = folio_pgoff(folio);
965 
966 	/* if folio start address is not in vma range */
967 	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
968 		return false;
969 
970 	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
971 
972 	return !(addr < start || end - addr < folio_size(folio));
973 }
974 
975 static inline bool
folio_within_vma(struct folio * folio,struct vm_area_struct * vma)976 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
977 {
978 	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
979 }
980 
981 /*
982  * mlock_vma_folio() and munlock_vma_folio():
983  * should be called with vma's mmap_lock held for read or write,
984  * under page table lock for the pte/pmd being added or removed.
985  *
986  * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
987  * the end of folio_remove_rmap_*(); but new anon folios are managed by
988  * folio_add_lru_vma() calling mlock_new_folio().
989  */
990 void mlock_folio(struct folio *folio);
mlock_vma_folio(struct folio * folio,struct vm_area_struct * vma)991 static inline void mlock_vma_folio(struct folio *folio,
992 				struct vm_area_struct *vma)
993 {
994 	/*
995 	 * The VM_SPECIAL check here serves two purposes.
996 	 * 1) VM_IO check prevents migration from double-counting during mlock.
997 	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
998 	 *    is never left set on a VM_SPECIAL vma, there is an interval while
999 	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
1000 	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
1001 	 */
1002 	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
1003 		mlock_folio(folio);
1004 }
1005 
1006 void munlock_folio(struct folio *folio);
munlock_vma_folio(struct folio * folio,struct vm_area_struct * vma)1007 static inline void munlock_vma_folio(struct folio *folio,
1008 					struct vm_area_struct *vma)
1009 {
1010 	/*
1011 	 * munlock if the function is called. Ideally, we should only
1012 	 * do munlock if any page of folio is unmapped from VMA and
1013 	 * cause folio not fully mapped to VMA.
1014 	 *
1015 	 * But it's not easy to confirm that's the situation. So we
1016 	 * always munlock the folio and page reclaim will correct it
1017 	 * if it's wrong.
1018 	 */
1019 	if (unlikely(vma->vm_flags & VM_LOCKED))
1020 		munlock_folio(folio);
1021 }
1022 
1023 void mlock_new_folio(struct folio *folio);
1024 bool need_mlock_drain(int cpu);
1025 void mlock_drain_local(void);
1026 void mlock_drain_remote(int cpu);
1027 
1028 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
1029 
1030 /**
1031  * vma_address - Find the virtual address a page range is mapped at
1032  * @vma: The vma which maps this object.
1033  * @pgoff: The page offset within its object.
1034  * @nr_pages: The number of pages to consider.
1035  *
1036  * If any page in this range is mapped by this VMA, return the first address
1037  * where any of these pages appear.  Otherwise, return -EFAULT.
1038  */
vma_address(const struct vm_area_struct * vma,pgoff_t pgoff,unsigned long nr_pages)1039 static inline unsigned long vma_address(const struct vm_area_struct *vma,
1040 		pgoff_t pgoff, unsigned long nr_pages)
1041 {
1042 	unsigned long address;
1043 
1044 	if (pgoff >= vma->vm_pgoff) {
1045 		address = vma->vm_start +
1046 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1047 		/* Check for address beyond vma (or wrapped through 0?) */
1048 		if (address < vma->vm_start || address >= vma->vm_end)
1049 			address = -EFAULT;
1050 	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
1051 		/* Test above avoids possibility of wrap to 0 on 32-bit */
1052 		address = vma->vm_start;
1053 	} else {
1054 		address = -EFAULT;
1055 	}
1056 	return address;
1057 }
1058 
1059 /*
1060  * Then at what user virtual address will none of the range be found in vma?
1061  * Assumes that vma_address() already returned a good starting address.
1062  */
vma_address_end(struct page_vma_mapped_walk * pvmw)1063 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
1064 {
1065 	struct vm_area_struct *vma = pvmw->vma;
1066 	pgoff_t pgoff;
1067 	unsigned long address;
1068 
1069 	/* Common case, plus ->pgoff is invalid for KSM */
1070 	if (pvmw->nr_pages == 1)
1071 		return pvmw->address + PAGE_SIZE;
1072 
1073 	pgoff = pvmw->pgoff + pvmw->nr_pages;
1074 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1075 	/* Check for address beyond vma (or wrapped through 0?) */
1076 	if (address < vma->vm_start || address > vma->vm_end)
1077 		address = vma->vm_end;
1078 	return address;
1079 }
1080 
maybe_unlock_mmap_for_io(struct vm_fault * vmf,struct file * fpin)1081 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1082 						    struct file *fpin)
1083 {
1084 	int flags = vmf->flags;
1085 
1086 	if (fpin)
1087 		return fpin;
1088 
1089 	/*
1090 	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1091 	 * anything, so we only pin the file and drop the mmap_lock if only
1092 	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1093 	 */
1094 	if (fault_flag_allow_retry_first(flags) &&
1095 	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
1096 		fpin = get_file(vmf->vma->vm_file);
1097 		release_fault_lock(vmf);
1098 	}
1099 	return fpin;
1100 }
1101 #else /* !CONFIG_MMU */
unmap_mapping_folio(struct folio * folio)1102 static inline void unmap_mapping_folio(struct folio *folio) { }
mlock_new_folio(struct folio * folio)1103 static inline void mlock_new_folio(struct folio *folio) { }
need_mlock_drain(int cpu)1104 static inline bool need_mlock_drain(int cpu) { return false; }
mlock_drain_local(void)1105 static inline void mlock_drain_local(void) { }
mlock_drain_remote(int cpu)1106 static inline void mlock_drain_remote(int cpu) { }
vunmap_range_noflush(unsigned long start,unsigned long end)1107 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1108 {
1109 }
1110 #endif /* !CONFIG_MMU */
1111 
1112 /* Memory initialisation debug and verification */
1113 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1114 DECLARE_STATIC_KEY_TRUE(deferred_pages);
1115 
1116 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1117 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1118 
1119 enum mminit_level {
1120 	MMINIT_WARNING,
1121 	MMINIT_VERIFY,
1122 	MMINIT_TRACE
1123 };
1124 
1125 #ifdef CONFIG_DEBUG_MEMORY_INIT
1126 
1127 extern int mminit_loglevel;
1128 
1129 #define mminit_dprintk(level, prefix, fmt, arg...) \
1130 do { \
1131 	if (level < mminit_loglevel) { \
1132 		if (level <= MMINIT_WARNING) \
1133 			pr_warn("mminit::" prefix " " fmt, ##arg);	\
1134 		else \
1135 			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1136 	} \
1137 } while (0)
1138 
1139 extern void mminit_verify_pageflags_layout(void);
1140 extern void mminit_verify_zonelist(void);
1141 #else
1142 
mminit_dprintk(enum mminit_level level,const char * prefix,const char * fmt,...)1143 static inline void mminit_dprintk(enum mminit_level level,
1144 				const char *prefix, const char *fmt, ...)
1145 {
1146 }
1147 
mminit_verify_pageflags_layout(void)1148 static inline void mminit_verify_pageflags_layout(void)
1149 {
1150 }
1151 
mminit_verify_zonelist(void)1152 static inline void mminit_verify_zonelist(void)
1153 {
1154 }
1155 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1156 
1157 #define NODE_RECLAIM_NOSCAN	-2
1158 #define NODE_RECLAIM_FULL	-1
1159 #define NODE_RECLAIM_SOME	0
1160 #define NODE_RECLAIM_SUCCESS	1
1161 
1162 #ifdef CONFIG_NUMA
1163 extern int node_reclaim_mode;
1164 
1165 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1166 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1167 #else
1168 #define node_reclaim_mode 0
1169 
node_reclaim(struct pglist_data * pgdat,gfp_t mask,unsigned int order)1170 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1171 				unsigned int order)
1172 {
1173 	return NODE_RECLAIM_NOSCAN;
1174 }
find_next_best_node(int node,nodemask_t * used_node_mask)1175 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1176 {
1177 	return NUMA_NO_NODE;
1178 }
1179 #endif
1180 
node_reclaim_enabled(void)1181 static inline bool node_reclaim_enabled(void)
1182 {
1183 	/* Is any node_reclaim_mode bit set? */
1184 	return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
1185 }
1186 
1187 /*
1188  * mm/memory-failure.c
1189  */
1190 #ifdef CONFIG_MEMORY_FAILURE
1191 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
1192 void shake_folio(struct folio *folio);
1193 extern int hwpoison_filter(struct page *p);
1194 
1195 extern u32 hwpoison_filter_dev_major;
1196 extern u32 hwpoison_filter_dev_minor;
1197 extern u64 hwpoison_filter_flags_mask;
1198 extern u64 hwpoison_filter_flags_value;
1199 extern u64 hwpoison_filter_memcg;
1200 extern u32 hwpoison_filter_enable;
1201 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
1202 void SetPageHWPoisonTakenOff(struct page *page);
1203 void ClearPageHWPoisonTakenOff(struct page *page);
1204 bool take_page_off_buddy(struct page *page);
1205 bool put_page_back_buddy(struct page *page);
1206 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1207 void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
1208 		     struct vm_area_struct *vma, struct list_head *to_kill,
1209 		     unsigned long ksm_addr);
1210 unsigned long page_mapped_in_vma(const struct page *page,
1211 		struct vm_area_struct *vma);
1212 
1213 #else
unmap_poisoned_folio(struct folio * folio,unsigned long pfn,bool must_kill)1214 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
1215 {
1216 	return -EBUSY;
1217 }
1218 #endif
1219 
1220 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
1221         unsigned long, unsigned long,
1222         unsigned long, unsigned long);
1223 
1224 extern void set_pageblock_order(void);
1225 struct folio *alloc_migrate_folio(struct folio *src, unsigned long private);
1226 unsigned long reclaim_pages(struct list_head *folio_list);
1227 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1228 					    struct list_head *folio_list);
1229 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1230 #define ALLOC_WMARK_MIN		WMARK_MIN
1231 #define ALLOC_WMARK_LOW		WMARK_LOW
1232 #define ALLOC_WMARK_HIGH	WMARK_HIGH
1233 #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1234 
1235 /* Mask to get the watermark bits */
1236 #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1237 
1238 /*
1239  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1240  * cannot assume a reduced access to memory reserves is sufficient for
1241  * !MMU
1242  */
1243 #ifdef CONFIG_MMU
1244 #define ALLOC_OOM		0x08
1245 #else
1246 #define ALLOC_OOM		ALLOC_NO_WATERMARKS
1247 #endif
1248 
1249 #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
1250 				       * to 25% of the min watermark or
1251 				       * 62.5% if __GFP_HIGH is set.
1252 				       */
1253 #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
1254 				       * of the min watermark.
1255 				       */
1256 #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
1257 #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
1258 #ifdef CONFIG_ZONE_DMA32
1259 #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
1260 #else
1261 #define ALLOC_NOFRAGMENT	  0x0
1262 #endif
1263 #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1264 #define ALLOC_TRYLOCK		0x400 /* Only use spin_trylock in allocation path */
1265 #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1266 
1267 /* Flags that allow allocations below the min watermark. */
1268 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1269 
1270 enum ttu_flags;
1271 struct tlbflush_unmap_batch;
1272 
1273 
1274 /*
1275  * only for MM internal work items which do not depend on
1276  * any allocations or locks which might depend on allocations
1277  */
1278 extern struct workqueue_struct *mm_percpu_wq;
1279 
1280 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1281 void try_to_unmap_flush(void);
1282 void try_to_unmap_flush_dirty(void);
1283 void flush_tlb_batched_pending(struct mm_struct *mm);
1284 #else
try_to_unmap_flush(void)1285 static inline void try_to_unmap_flush(void)
1286 {
1287 }
try_to_unmap_flush_dirty(void)1288 static inline void try_to_unmap_flush_dirty(void)
1289 {
1290 }
flush_tlb_batched_pending(struct mm_struct * mm)1291 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1292 {
1293 }
1294 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1295 
1296 extern const struct trace_print_flags pageflag_names[];
1297 extern const struct trace_print_flags vmaflag_names[];
1298 extern const struct trace_print_flags gfpflag_names[];
1299 
is_migrate_highatomic(enum migratetype migratetype)1300 static inline bool is_migrate_highatomic(enum migratetype migratetype)
1301 {
1302 	return migratetype == MIGRATE_HIGHATOMIC;
1303 }
1304 
1305 void setup_zone_pageset(struct zone *zone);
1306 
1307 struct migration_target_control {
1308 	int nid;		/* preferred node id */
1309 	nodemask_t *nmask;
1310 	gfp_t gfp_mask;
1311 	enum migrate_reason reason;
1312 };
1313 
1314 /*
1315  * mm/filemap.c
1316  */
1317 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1318 			      struct folio *folio, loff_t fpos, size_t size);
1319 
1320 /*
1321  * mm/vmalloc.c
1322  */
1323 #ifdef CONFIG_MMU
1324 void __init vmalloc_init(void);
1325 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1326                 pgprot_t prot, struct page **pages, unsigned int page_shift);
1327 unsigned int get_vm_area_page_order(struct vm_struct *vm);
1328 #else
vmalloc_init(void)1329 static inline void vmalloc_init(void)
1330 {
1331 }
1332 
1333 static inline
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)1334 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1335                 pgprot_t prot, struct page **pages, unsigned int page_shift)
1336 {
1337 	return -EINVAL;
1338 }
1339 #endif
1340 
1341 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1342 			       unsigned long end, pgprot_t prot,
1343 			       struct page **pages, unsigned int page_shift);
1344 
1345 void vunmap_range_noflush(unsigned long start, unsigned long end);
1346 
1347 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1348 
1349 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1350 		      unsigned long addr, int *flags, bool writable,
1351 		      int *last_cpupid);
1352 
1353 void free_zone_device_folio(struct folio *folio);
1354 int migrate_device_coherent_folio(struct folio *folio);
1355 
1356 struct vm_struct *__get_vm_area_node(unsigned long size,
1357 				     unsigned long align, unsigned long shift,
1358 				     unsigned long flags, unsigned long start,
1359 				     unsigned long end, int node, gfp_t gfp_mask,
1360 				     const void *caller);
1361 
1362 /*
1363  * mm/gup.c
1364  */
1365 int __must_check try_grab_folio(struct folio *folio, int refs,
1366 				unsigned int flags);
1367 
1368 /*
1369  * mm/huge_memory.c
1370  */
1371 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1372 	       pud_t *pud, bool write);
1373 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1374 	       pmd_t *pmd, bool write);
1375 
1376 /*
1377  * Parses a string with mem suffixes into its order. Useful to parse kernel
1378  * parameters.
1379  */
get_order_from_str(const char * size_str,unsigned long valid_orders)1380 static inline int get_order_from_str(const char *size_str,
1381 				     unsigned long valid_orders)
1382 {
1383 	unsigned long size;
1384 	char *endptr;
1385 	int order;
1386 
1387 	size = memparse(size_str, &endptr);
1388 
1389 	if (!is_power_of_2(size))
1390 		return -EINVAL;
1391 	order = get_order(size);
1392 	if (BIT(order) & ~valid_orders)
1393 		return -EINVAL;
1394 
1395 	return order;
1396 }
1397 
1398 enum {
1399 	/* mark page accessed */
1400 	FOLL_TOUCH = 1 << 16,
1401 	/* a retry, previous pass started an IO */
1402 	FOLL_TRIED = 1 << 17,
1403 	/* we are working on non-current tsk/mm */
1404 	FOLL_REMOTE = 1 << 18,
1405 	/* pages must be released via unpin_user_page */
1406 	FOLL_PIN = 1 << 19,
1407 	/* gup_fast: prevent fall-back to slow gup */
1408 	FOLL_FAST_ONLY = 1 << 20,
1409 	/* allow unlocking the mmap lock */
1410 	FOLL_UNLOCKABLE = 1 << 21,
1411 	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1412 	FOLL_MADV_POPULATE = 1 << 22,
1413 };
1414 
1415 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1416 			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1417 			    FOLL_MADV_POPULATE)
1418 
1419 /*
1420  * Indicates for which pages that are write-protected in the page table,
1421  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1422  * GUP pin will remain consistent with the pages mapped into the page tables
1423  * of the MM.
1424  *
1425  * Temporary unmapping of PageAnonExclusive() pages or clearing of
1426  * PageAnonExclusive() has to protect against concurrent GUP:
1427  * * Ordinary GUP: Using the PT lock
1428  * * GUP-fast and fork(): mm->write_protect_seq
1429  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1430  *    folio_try_share_anon_rmap_*()
1431  *
1432  * Must be called with the (sub)page that's actually referenced via the
1433  * page table entry, which might not necessarily be the head page for a
1434  * PTE-mapped THP.
1435  *
1436  * If the vma is NULL, we're coming from the GUP-fast path and might have
1437  * to fallback to the slow path just to lookup the vma.
1438  */
gup_must_unshare(struct vm_area_struct * vma,unsigned int flags,struct page * page)1439 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1440 				    unsigned int flags, struct page *page)
1441 {
1442 	/*
1443 	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1444 	 * has to be writable -- and if it references (part of) an anonymous
1445 	 * folio, that part is required to be marked exclusive.
1446 	 */
1447 	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1448 		return false;
1449 	/*
1450 	 * Note: PageAnon(page) is stable until the page is actually getting
1451 	 * freed.
1452 	 */
1453 	if (!PageAnon(page)) {
1454 		/*
1455 		 * We only care about R/O long-term pining: R/O short-term
1456 		 * pinning does not have the semantics to observe successive
1457 		 * changes through the process page tables.
1458 		 */
1459 		if (!(flags & FOLL_LONGTERM))
1460 			return false;
1461 
1462 		/* We really need the vma ... */
1463 		if (!vma)
1464 			return true;
1465 
1466 		/*
1467 		 * ... because we only care about writable private ("COW")
1468 		 * mappings where we have to break COW early.
1469 		 */
1470 		return is_cow_mapping(vma->vm_flags);
1471 	}
1472 
1473 	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1474 	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1475 		smp_rmb();
1476 
1477 	/*
1478 	 * Note that KSM pages cannot be exclusive, and consequently,
1479 	 * cannot get pinned.
1480 	 */
1481 	return !PageAnonExclusive(page);
1482 }
1483 
1484 extern bool mirrored_kernelcore;
1485 bool memblock_has_mirror(void);
1486 void memblock_free_all(void);
1487 
vma_set_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)1488 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1489 					  unsigned long start, unsigned long end,
1490 					  pgoff_t pgoff)
1491 {
1492 	vma->vm_start = start;
1493 	vma->vm_end = end;
1494 	vma->vm_pgoff = pgoff;
1495 }
1496 
vma_soft_dirty_enabled(struct vm_area_struct * vma)1497 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1498 {
1499 	/*
1500 	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1501 	 * enablements, because when without soft-dirty being compiled in,
1502 	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1503 	 * will be constantly true.
1504 	 */
1505 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1506 		return false;
1507 
1508 	/*
1509 	 * Soft-dirty is kind of special: its tracking is enabled when the
1510 	 * vma flags not set.
1511 	 */
1512 	return !(vma->vm_flags & VM_SOFTDIRTY);
1513 }
1514 
pmd_needs_soft_dirty_wp(struct vm_area_struct * vma,pmd_t pmd)1515 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1516 {
1517 	return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1518 }
1519 
pte_needs_soft_dirty_wp(struct vm_area_struct * vma,pte_t pte)1520 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1521 {
1522 	return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1523 }
1524 
1525 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1526 				unsigned long zone, int nid);
1527 void __meminit __init_page_from_nid(unsigned long pfn, int nid);
1528 
1529 /* shrinker related functions */
1530 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1531 			  int priority);
1532 
1533 #ifdef CONFIG_SHRINKER_DEBUG
shrinker_debugfs_name_alloc(struct shrinker * shrinker,const char * fmt,va_list ap)1534 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1535 			struct shrinker *shrinker, const char *fmt, va_list ap)
1536 {
1537 	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1538 
1539 	return shrinker->name ? 0 : -ENOMEM;
1540 }
1541 
shrinker_debugfs_name_free(struct shrinker * shrinker)1542 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1543 {
1544 	kfree_const(shrinker->name);
1545 	shrinker->name = NULL;
1546 }
1547 
1548 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1549 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1550 					      int *debugfs_id);
1551 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1552 				    int debugfs_id);
1553 #else /* CONFIG_SHRINKER_DEBUG */
shrinker_debugfs_add(struct shrinker * shrinker)1554 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1555 {
1556 	return 0;
1557 }
shrinker_debugfs_name_alloc(struct shrinker * shrinker,const char * fmt,va_list ap)1558 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1559 					      const char *fmt, va_list ap)
1560 {
1561 	return 0;
1562 }
shrinker_debugfs_name_free(struct shrinker * shrinker)1563 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1564 {
1565 }
shrinker_debugfs_detach(struct shrinker * shrinker,int * debugfs_id)1566 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1567 						     int *debugfs_id)
1568 {
1569 	*debugfs_id = -1;
1570 	return NULL;
1571 }
shrinker_debugfs_remove(struct dentry * debugfs_entry,int debugfs_id)1572 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1573 					   int debugfs_id)
1574 {
1575 }
1576 #endif /* CONFIG_SHRINKER_DEBUG */
1577 
1578 /* Only track the nodes of mappings with shadow entries */
1579 void workingset_update_node(struct xa_node *node);
1580 extern struct list_lru shadow_nodes;
1581 #define mapping_set_update(xas, mapping) do {			\
1582 	if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {	\
1583 		xas_set_update(xas, workingset_update_node);	\
1584 		xas_set_lru(xas, &shadow_nodes);		\
1585 	}							\
1586 } while (0)
1587 
1588 /* mremap.c */
1589 unsigned long move_page_tables(struct pagetable_move_control *pmc);
1590 
1591 #ifdef CONFIG_UNACCEPTED_MEMORY
1592 void accept_page(struct page *page);
1593 #else /* CONFIG_UNACCEPTED_MEMORY */
accept_page(struct page * page)1594 static inline void accept_page(struct page *page)
1595 {
1596 }
1597 #endif /* CONFIG_UNACCEPTED_MEMORY */
1598 
1599 /* pagewalk.c */
1600 int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
1601 		unsigned long end, const struct mm_walk_ops *ops,
1602 		void *private);
1603 
1604 /* pt_reclaim.c */
1605 bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval);
1606 void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
1607 	      pmd_t pmdval);
1608 void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
1609 		     struct mmu_gather *tlb);
1610 
1611 #ifdef CONFIG_PT_RECLAIM
1612 bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1613 			   struct zap_details *details);
1614 #else
reclaim_pt_is_enabled(unsigned long start,unsigned long end,struct zap_details * details)1615 static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1616 					 struct zap_details *details)
1617 {
1618 	return false;
1619 }
1620 #endif /* CONFIG_PT_RECLAIM */
1621 
1622 
1623 #endif	/* __MM_INTERNAL_H */
1624