xref: /linux/include/linux/mm.h (revision 334fbe734e687404f346eba7d5d96ed2b44d35ab)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_H
3 #define _LINUX_MM_H
4 
5 #include <linux/args.h>
6 #include <linux/errno.h>
7 #include <linux/mmdebug.h>
8 #include <linux/gfp.h>
9 #include <linux/pgalloc_tag.h>
10 #include <linux/bug.h>
11 #include <linux/list.h>
12 #include <linux/mmzone.h>
13 #include <linux/rbtree.h>
14 #include <linux/atomic.h>
15 #include <linux/debug_locks.h>
16 #include <linux/compiler.h>
17 #include <linux/mm_types.h>
18 #include <linux/mmap_lock.h>
19 #include <linux/range.h>
20 #include <linux/pfn.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/bit_spinlock.h>
23 #include <linux/shrinker.h>
24 #include <linux/resource.h>
25 #include <linux/page_ext.h>
26 #include <linux/err.h>
27 #include <linux/page-flags.h>
28 #include <linux/page_ref.h>
29 #include <linux/overflow.h>
30 #include <linux/sched.h>
31 #include <linux/pgtable.h>
32 #include <linux/kasan.h>
33 #include <linux/memremap.h>
34 #include <linux/slab.h>
35 #include <linux/cacheinfo.h>
36 #include <linux/rcuwait.h>
37 #include <linux/bitmap.h>
38 #include <linux/bitops.h>
39 #include <linux/iommu-debug-pagealloc.h>
40 
41 struct mempolicy;
42 struct anon_vma;
43 struct anon_vma_chain;
44 struct user_struct;
45 struct pt_regs;
46 struct folio_batch;
47 
48 void arch_mm_preinit(void);
49 void mm_core_init_early(void);
50 void mm_core_init(void);
51 void init_mm_internals(void);
52 
53 extern atomic_long_t _totalram_pages;
totalram_pages(void)54 static inline unsigned long totalram_pages(void)
55 {
56 	return (unsigned long)atomic_long_read(&_totalram_pages);
57 }
58 
totalram_pages_inc(void)59 static inline void totalram_pages_inc(void)
60 {
61 	atomic_long_inc(&_totalram_pages);
62 }
63 
totalram_pages_dec(void)64 static inline void totalram_pages_dec(void)
65 {
66 	atomic_long_dec(&_totalram_pages);
67 }
68 
totalram_pages_add(long count)69 static inline void totalram_pages_add(long count)
70 {
71 	atomic_long_add(count, &_totalram_pages);
72 }
73 
74 extern void * high_memory;
75 
76 /*
77  * Convert between pages and MB
78  * 20 is the shift for 1MB (2^20 = 1MB)
79  * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
80  * So (20 - PAGE_SHIFT) converts between pages and MB
81  */
82 #define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
83 #define MB_TO_PAGES(mb)    ((mb) << (20 - PAGE_SHIFT))
84 
85 #ifdef CONFIG_SYSCTL
86 extern int sysctl_legacy_va_layout;
87 #else
88 #define sysctl_legacy_va_layout 0
89 #endif
90 
91 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
92 extern const int mmap_rnd_bits_min;
93 extern int mmap_rnd_bits_max __ro_after_init;
94 extern int mmap_rnd_bits __read_mostly;
95 #endif
96 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
97 extern const int mmap_rnd_compat_bits_min;
98 extern const int mmap_rnd_compat_bits_max;
99 extern int mmap_rnd_compat_bits __read_mostly;
100 #endif
101 
102 #ifndef DIRECT_MAP_PHYSMEM_END
103 # ifdef MAX_PHYSMEM_BITS
104 # define DIRECT_MAP_PHYSMEM_END	((1ULL << MAX_PHYSMEM_BITS) - 1)
105 # else
106 # define DIRECT_MAP_PHYSMEM_END	(((phys_addr_t)-1)&~(1ULL<<63))
107 # endif
108 #endif
109 
110 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
111 
112 #include <asm/page.h>
113 #include <asm/processor.h>
114 
115 #ifndef __pa_symbol
116 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
117 #endif
118 
119 #ifndef page_to_virt
120 #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
121 #endif
122 
123 #ifndef lm_alias
124 #define lm_alias(x)	__va(__pa_symbol(x))
125 #endif
126 
127 /*
128  * To prevent common memory management code establishing
129  * a zero page mapping on a read fault.
130  * This macro should be defined within <asm/pgtable.h>.
131  * s390 does this to prevent multiplexing of hardware bits
132  * related to the physical page in case of virtualization.
133  */
134 #ifndef mm_forbids_zeropage
135 #define mm_forbids_zeropage(X)	(0)
136 #endif
137 
138 /*
139  * On some architectures it is expensive to call memset() for small sizes.
140  * If an architecture decides to implement their own version of
141  * mm_zero_struct_page they should wrap the defines below in a #ifndef and
142  * define their own version of this macro in <asm/pgtable.h>
143  */
144 #if BITS_PER_LONG == 64
145 /* This function must be updated when the size of struct page grows above 96
146  * or reduces below 56. The idea that compiler optimizes out switch()
147  * statement, and only leaves move/store instructions. Also the compiler can
148  * combine write statements if they are both assignments and can be reordered,
149  * this can result in several of the writes here being dropped.
150  */
151 #define	mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
__mm_zero_struct_page(struct page * page)152 static inline void __mm_zero_struct_page(struct page *page)
153 {
154 	unsigned long *_pp = (void *)page;
155 
156 	 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
157 	BUILD_BUG_ON(sizeof(struct page) & 7);
158 	BUILD_BUG_ON(sizeof(struct page) < 56);
159 	BUILD_BUG_ON(sizeof(struct page) > 96);
160 
161 	switch (sizeof(struct page)) {
162 	case 96:
163 		_pp[11] = 0;
164 		fallthrough;
165 	case 88:
166 		_pp[10] = 0;
167 		fallthrough;
168 	case 80:
169 		_pp[9] = 0;
170 		fallthrough;
171 	case 72:
172 		_pp[8] = 0;
173 		fallthrough;
174 	case 64:
175 		_pp[7] = 0;
176 		fallthrough;
177 	case 56:
178 		_pp[6] = 0;
179 		_pp[5] = 0;
180 		_pp[4] = 0;
181 		_pp[3] = 0;
182 		_pp[2] = 0;
183 		_pp[1] = 0;
184 		_pp[0] = 0;
185 	}
186 }
187 #else
188 #define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
189 #endif
190 
191 /*
192  * Default maximum number of active map areas, this limits the number of vmas
193  * per mm struct. Users can overwrite this number by sysctl but there is a
194  * problem.
195  *
196  * When a program's coredump is generated as ELF format, a section is created
197  * per a vma. In ELF, the number of sections is represented in unsigned short.
198  * This means the number of sections should be smaller than 65535 at coredump.
199  * Because the kernel adds some informative sections to a image of program at
200  * generating coredump, we need some margin. The number of extra sections is
201  * 1-3 now and depends on arch. We use "5" as safe margin, here.
202  *
203  * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
204  * not a hard limit any more. Although some userspace tools can be surprised by
205  * that.
206  */
207 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
208 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
209 
210 extern unsigned long sysctl_user_reserve_kbytes;
211 extern unsigned long sysctl_admin_reserve_kbytes;
212 
213 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
214 bool page_range_contiguous(const struct page *page, unsigned long nr_pages);
215 #else
page_range_contiguous(const struct page * page,unsigned long nr_pages)216 static inline bool page_range_contiguous(const struct page *page,
217 		unsigned long nr_pages)
218 {
219 	return true;
220 }
221 #endif
222 
223 /* to align the pointer to the (next) page boundary */
224 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
225 
226 /* to align the pointer to the (prev) page boundary */
227 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
228 
229 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
230 #define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
231 
232 /**
233  * folio_page_idx - Return the number of a page in a folio.
234  * @folio: The folio.
235  * @page: The folio page.
236  *
237  * This function expects that the page is actually part of the folio.
238  * The returned number is relative to the start of the folio.
239  */
folio_page_idx(const struct folio * folio,const struct page * page)240 static inline unsigned long folio_page_idx(const struct folio *folio,
241 		const struct page *page)
242 {
243 	return page - &folio->page;
244 }
245 
lru_to_folio(struct list_head * head)246 static inline struct folio *lru_to_folio(struct list_head *head)
247 {
248 	return list_entry((head)->prev, struct folio, lru);
249 }
250 
251 void setup_initial_init_mm(void *start_code, void *end_code,
252 			   void *end_data, void *brk);
253 
254 /*
255  * Linux kernel virtual memory manager primitives.
256  * The idea being to have a "virtual" mm in the same way
257  * we have a virtual fs - giving a cleaner interface to the
258  * mm details, and allowing different kinds of memory mappings
259  * (from shared memory to executable loading to arbitrary
260  * mmap() functions).
261  */
262 
263 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
264 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
265 void vm_area_free(struct vm_area_struct *);
266 
267 #ifndef CONFIG_MMU
268 extern struct rb_root nommu_region_tree;
269 extern struct rw_semaphore nommu_region_sem;
270 
271 extern unsigned int kobjsize(const void *objp);
272 #endif
273 
274 /*
275  * vm_flags in vm_area_struct, see mm_types.h.
276  * When changing, update also include/trace/events/mmflags.h
277  */
278 
279 #define VM_NONE		0x00000000
280 
281 /**
282  * typedef vma_flag_t - specifies an individual VMA flag by bit number.
283  *
284  * This value is made type safe by sparse to avoid passing invalid flag values
285  * around.
286  */
287 typedef int __bitwise vma_flag_t;
288 
289 #define DECLARE_VMA_BIT(name, bitnum) \
290 	VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum)
291 #define DECLARE_VMA_BIT_ALIAS(name, aliased) \
292 	VMA_ ## name ## _BIT = (VMA_ ## aliased ## _BIT)
293 enum {
294 	DECLARE_VMA_BIT(READ, 0),
295 	DECLARE_VMA_BIT(WRITE, 1),
296 	DECLARE_VMA_BIT(EXEC, 2),
297 	DECLARE_VMA_BIT(SHARED, 3),
298 	/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
299 	DECLARE_VMA_BIT(MAYREAD, 4),	/* limits for mprotect() etc. */
300 	DECLARE_VMA_BIT(MAYWRITE, 5),
301 	DECLARE_VMA_BIT(MAYEXEC, 6),
302 	DECLARE_VMA_BIT(MAYSHARE, 7),
303 	DECLARE_VMA_BIT(GROWSDOWN, 8),	/* general info on the segment */
304 #ifdef CONFIG_MMU
305 	DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */
306 #else
307 	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
308 	DECLARE_VMA_BIT(MAYOVERLAY, 9),
309 #endif /* CONFIG_MMU */
310 	/* Page-ranges managed without "struct page", just pure PFN */
311 	DECLARE_VMA_BIT(PFNMAP, 10),
312 	DECLARE_VMA_BIT(MAYBE_GUARD, 11),
313 	DECLARE_VMA_BIT(UFFD_WP, 12),	/* wrprotect pages tracking */
314 	DECLARE_VMA_BIT(LOCKED, 13),
315 	DECLARE_VMA_BIT(IO, 14),	/* Memory mapped I/O or similar */
316 	DECLARE_VMA_BIT(SEQ_READ, 15),	/* App will access data sequentially */
317 	DECLARE_VMA_BIT(RAND_READ, 16),	/* App will not benefit from clustered reads */
318 	DECLARE_VMA_BIT(DONTCOPY, 17),	/* Do not copy this vma on fork */
319 	DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */
320 	DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */
321 	DECLARE_VMA_BIT(ACCOUNT, 20),	/* Is a VM accounted object */
322 	DECLARE_VMA_BIT(NORESERVE, 21),	/* should the VM suppress accounting */
323 	DECLARE_VMA_BIT(HUGETLB, 22),	/* Huge TLB Page VM */
324 	DECLARE_VMA_BIT(SYNC, 23),	/* Synchronous page faults */
325 	DECLARE_VMA_BIT(ARCH_1, 24),	/* Architecture-specific flag */
326 	DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */
327 	DECLARE_VMA_BIT(DONTDUMP, 26),	/* Do not include in the core dump */
328 	DECLARE_VMA_BIT(SOFTDIRTY, 27),	/* NOT soft dirty clean area */
329 	DECLARE_VMA_BIT(MIXEDMAP, 28),	/* Can contain struct page and pure PFN pages */
330 	DECLARE_VMA_BIT(HUGEPAGE, 29),	/* MADV_HUGEPAGE marked this vma */
331 	DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */
332 	DECLARE_VMA_BIT(MERGEABLE, 31),	/* KSM may merge identical pages */
333 	/* These bits are reused, we define specific uses below. */
334 	DECLARE_VMA_BIT(HIGH_ARCH_0, 32),
335 	DECLARE_VMA_BIT(HIGH_ARCH_1, 33),
336 	DECLARE_VMA_BIT(HIGH_ARCH_2, 34),
337 	DECLARE_VMA_BIT(HIGH_ARCH_3, 35),
338 	DECLARE_VMA_BIT(HIGH_ARCH_4, 36),
339 	DECLARE_VMA_BIT(HIGH_ARCH_5, 37),
340 	DECLARE_VMA_BIT(HIGH_ARCH_6, 38),
341 	/*
342 	 * This flag is used to connect VFIO to arch specific KVM code. It
343 	 * indicates that the memory under this VMA is safe for use with any
344 	 * non-cachable memory type inside KVM. Some VFIO devices, on some
345 	 * platforms, are thought to be unsafe and can cause machine crashes
346 	 * if KVM does not lock down the memory type.
347 	 */
348 	DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
349 #if defined(CONFIG_PPC32)
350 	DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
351 #elif defined(CONFIG_64BIT)
352 	DECLARE_VMA_BIT(DROPPABLE, 40),
353 #endif
354 	DECLARE_VMA_BIT(UFFD_MINOR, 41),
355 	DECLARE_VMA_BIT(SEALED, 42),
356 	/* Flags that reuse flags above. */
357 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0),
358 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1),
359 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
360 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
361 	DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
362 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_RISCV_USER_CFI)
363 	/*
364 	 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
365 	 * support core mm.
366 	 *
367 	 * These VMAs will get a single end guard page. This helps userspace
368 	 * protect itself from attacks. A single page is enough for current
369 	 * shadow stack archs (x86). See the comments near alloc_shstk() in
370 	 * arch/x86/kernel/shstk.c for more details on the guard size.
371 	 */
372 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5),
373 #elif defined(CONFIG_ARM64_GCS)
374 	/*
375 	 * arm64's Guarded Control Stack implements similar functionality and
376 	 * has similar constraints to shadow stacks.
377 	 */
378 	DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6),
379 #endif
380 	DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1),		/* Strong Access Ordering (powerpc) */
381 	DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1),		/* parisc */
382 	DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1),	/* sparc64 */
383 	DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1),	/* arm64 */
384 	DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1),	/* sparc64, arm64 */
385 	DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1),	/* !CONFIG_MMU */
386 	DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4),	/* arm64 */
387 	DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */
388 #ifdef CONFIG_STACK_GROWSUP
389 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP),
390 	DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN),
391 #else
392 	DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN),
393 #endif
394 };
395 #undef DECLARE_VMA_BIT
396 #undef DECLARE_VMA_BIT_ALIAS
397 
398 #define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
399 #define VM_READ		INIT_VM_FLAG(READ)
400 #define VM_WRITE	INIT_VM_FLAG(WRITE)
401 #define VM_EXEC		INIT_VM_FLAG(EXEC)
402 #define VM_SHARED	INIT_VM_FLAG(SHARED)
403 #define VM_MAYREAD	INIT_VM_FLAG(MAYREAD)
404 #define VM_MAYWRITE	INIT_VM_FLAG(MAYWRITE)
405 #define VM_MAYEXEC	INIT_VM_FLAG(MAYEXEC)
406 #define VM_MAYSHARE	INIT_VM_FLAG(MAYSHARE)
407 #define VM_GROWSDOWN	INIT_VM_FLAG(GROWSDOWN)
408 #ifdef CONFIG_MMU
409 #define VM_UFFD_MISSING	INIT_VM_FLAG(UFFD_MISSING)
410 #else
411 #define VM_UFFD_MISSING	VM_NONE
412 #define VM_MAYOVERLAY	INIT_VM_FLAG(MAYOVERLAY)
413 #endif
414 #define VM_PFNMAP	INIT_VM_FLAG(PFNMAP)
415 #define VM_MAYBE_GUARD	INIT_VM_FLAG(MAYBE_GUARD)
416 #define VM_UFFD_WP	INIT_VM_FLAG(UFFD_WP)
417 #define VM_LOCKED	INIT_VM_FLAG(LOCKED)
418 #define VM_IO		INIT_VM_FLAG(IO)
419 #define VM_SEQ_READ	INIT_VM_FLAG(SEQ_READ)
420 #define VM_RAND_READ	INIT_VM_FLAG(RAND_READ)
421 #define VM_DONTCOPY	INIT_VM_FLAG(DONTCOPY)
422 #define VM_DONTEXPAND	INIT_VM_FLAG(DONTEXPAND)
423 #define VM_LOCKONFAULT	INIT_VM_FLAG(LOCKONFAULT)
424 #define VM_ACCOUNT	INIT_VM_FLAG(ACCOUNT)
425 #define VM_NORESERVE	INIT_VM_FLAG(NORESERVE)
426 #define VM_HUGETLB	INIT_VM_FLAG(HUGETLB)
427 #define VM_SYNC		INIT_VM_FLAG(SYNC)
428 #define VM_ARCH_1	INIT_VM_FLAG(ARCH_1)
429 #define VM_WIPEONFORK	INIT_VM_FLAG(WIPEONFORK)
430 #define VM_DONTDUMP	INIT_VM_FLAG(DONTDUMP)
431 #ifdef CONFIG_MEM_SOFT_DIRTY
432 #define VM_SOFTDIRTY	INIT_VM_FLAG(SOFTDIRTY)
433 #else
434 #define VM_SOFTDIRTY	VM_NONE
435 #endif
436 #define VM_MIXEDMAP	INIT_VM_FLAG(MIXEDMAP)
437 #define VM_HUGEPAGE	INIT_VM_FLAG(HUGEPAGE)
438 #define VM_NOHUGEPAGE	INIT_VM_FLAG(NOHUGEPAGE)
439 #define VM_MERGEABLE	INIT_VM_FLAG(MERGEABLE)
440 #define VM_STACK	INIT_VM_FLAG(STACK)
441 #ifdef CONFIG_STACK_GROWSUP
442 #define VM_STACK_EARLY	INIT_VM_FLAG(STACK_EARLY)
443 #else
444 #define VM_STACK_EARLY	VM_NONE
445 #endif
446 #ifdef CONFIG_ARCH_HAS_PKEYS
447 #define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT)
448 /* Despite the naming, these are FLAGS not bits. */
449 #define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0)
450 #define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1)
451 #define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2)
452 #if CONFIG_ARCH_PKEY_BITS > 3
453 #define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3)
454 #else
455 #define VM_PKEY_BIT3  VM_NONE
456 #endif /* CONFIG_ARCH_PKEY_BITS > 3 */
457 #if CONFIG_ARCH_PKEY_BITS > 4
458 #define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4)
459 #else
460 #define VM_PKEY_BIT4  VM_NONE
461 #endif /* CONFIG_ARCH_PKEY_BITS > 4 */
462 #endif /* CONFIG_ARCH_HAS_PKEYS */
463 #if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) || \
464 	defined(CONFIG_RISCV_USER_CFI)
465 #define VM_SHADOW_STACK	INIT_VM_FLAG(SHADOW_STACK)
466 #define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
467 #else
468 #define VM_SHADOW_STACK	VM_NONE
469 #define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
470 #endif
471 #if defined(CONFIG_PPC64)
472 #define VM_SAO		INIT_VM_FLAG(SAO)
473 #elif defined(CONFIG_PARISC)
474 #define VM_GROWSUP	INIT_VM_FLAG(GROWSUP)
475 #elif defined(CONFIG_SPARC64)
476 #define VM_SPARC_ADI	INIT_VM_FLAG(SPARC_ADI)
477 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
478 #elif defined(CONFIG_ARM64)
479 #define VM_ARM64_BTI	INIT_VM_FLAG(ARM64_BTI)
480 #define VM_ARCH_CLEAR	INIT_VM_FLAG(ARCH_CLEAR)
481 #elif !defined(CONFIG_MMU)
482 #define VM_MAPPED_COPY	INIT_VM_FLAG(MAPPED_COPY)
483 #endif
484 #ifndef VM_GROWSUP
485 #define VM_GROWSUP	VM_NONE
486 #endif
487 #ifdef CONFIG_ARM64_MTE
488 #define VM_MTE		INIT_VM_FLAG(MTE)
489 #define VM_MTE_ALLOWED	INIT_VM_FLAG(MTE_ALLOWED)
490 #else
491 #define VM_MTE		VM_NONE
492 #define VM_MTE_ALLOWED	VM_NONE
493 #endif
494 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
495 #define VM_UFFD_MINOR	INIT_VM_FLAG(UFFD_MINOR)
496 #else
497 #define VM_UFFD_MINOR	VM_NONE
498 #endif
499 #ifdef CONFIG_64BIT
500 #define VM_ALLOW_ANY_UNCACHED	INIT_VM_FLAG(ALLOW_ANY_UNCACHED)
501 #define VM_SEALED		INIT_VM_FLAG(SEALED)
502 #else
503 #define VM_ALLOW_ANY_UNCACHED	VM_NONE
504 #define VM_SEALED		VM_NONE
505 #endif
506 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
507 #define VM_DROPPABLE		INIT_VM_FLAG(DROPPABLE)
508 #define VMA_DROPPABLE		mk_vma_flags(VMA_DROPPABLE_BIT)
509 #else
510 #define VM_DROPPABLE		VM_NONE
511 #define VMA_DROPPABLE		EMPTY_VMA_FLAGS
512 #endif
513 
514 /* Bits set in the VMA until the stack is in its final location */
515 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
516 
517 #define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \
518 		       VMA_EXEC_BIT : VMA_READ_BIT)
519 
520 /* Common data flag combinations */
521 #define VMA_DATA_FLAGS_TSK_EXEC	mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
522 		TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT,	  \
523 		VMA_MAYEXEC_BIT)
524 #define VMA_DATA_FLAGS_NON_EXEC	mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
525 		VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT)
526 #define VMA_DATA_FLAGS_EXEC	mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
527 		VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT,	  \
528 		VMA_MAYEXEC_BIT)
529 
530 #ifndef VMA_DATA_DEFAULT_FLAGS		/* arch can override this */
531 #define VMA_DATA_DEFAULT_FLAGS  VMA_DATA_FLAGS_EXEC
532 #endif
533 
534 #ifndef VMA_STACK_DEFAULT_FLAGS		/* arch can override this */
535 #define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS
536 #endif
537 
538 #define VMA_STACK_FLAGS	append_vma_flags(VMA_STACK_DEFAULT_FLAGS,	\
539 		VMA_STACK_BIT, VMA_ACCOUNT_BIT)
540 
541 /* Temporary until VMA flags conversion complete. */
542 #define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
543 
544 #ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
545 #define VM_SEALED_SYSMAP	VM_SEALED
546 #else
547 #define VM_SEALED_SYSMAP	VM_NONE
548 #endif
549 
550 /* VMA basic access permission flags */
551 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
552 #define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
553 
554 /*
555  * Special vmas that are non-mergable, non-mlock()able.
556  */
557 
558 #define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
559 				       VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
560 #define VM_SPECIAL vma_flags_to_legacy(VMA_SPECIAL_FLAGS)
561 
562 /*
563  * Physically remapped pages are special. Tell the
564  * rest of the world about it:
565  *   IO tells people not to look at these pages
566  *	(accesses can have side effects).
567  *   PFNMAP tells the core MM that the base pages are just
568  *	raw PFN mappings, and do not have a "struct page" associated
569  *	with them.
570  *   DONTEXPAND
571  *      Disable vma merging and expanding with mremap().
572  *   DONTDUMP
573  *      Omit vma from core dump, even when VM_IO turned off.
574  */
575 #define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT,	\
576 				     VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
577 
578 /* This mask prevents VMA from being scanned with khugepaged */
579 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
580 
581 /* This mask defines which mm->def_flags a process can inherit its parent */
582 #define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
583 
584 /* This mask represents all the VMA flag bits used by mlock */
585 #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
586 
587 #define VMA_LOCKED_MASK	mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
588 
589 /* These flags can be updated atomically via VMA/mmap read lock. */
590 #define VM_ATOMIC_SET_ALLOWED VM_MAYBE_GUARD
591 
592 /* Arch-specific flags to clear when updating VM flags on protection change */
593 #ifndef VM_ARCH_CLEAR
594 #define VM_ARCH_CLEAR	VM_NONE
595 #endif
596 #define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
597 
598 /*
599  * Flags which should be 'sticky' on merge - that is, flags which, when one VMA
600  * possesses it but the other does not, the merged VMA should nonetheless have
601  * applied to it:
602  *
603  *   VMA_SOFTDIRTY_BIT - if a VMA is marked soft-dirty, that is has not had its
604  *                       references cleared via /proc/$pid/clear_refs, any
605  *                       merged VMA should be considered soft-dirty also as it
606  *                       operates at a VMA granularity.
607  *
608  * VMA_MAYBE_GUARD_BIT - If a VMA may have guard regions in place it implies
609  *                       that mapped page tables may contain metadata not
610  *                       described by the VMA and thus any merged VMA may also
611  *                       contain this metadata, and thus we must make this flag
612  *                       sticky.
613  */
614 #ifdef CONFIG_MEM_SOFT_DIRTY
615 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT)
616 #else
617 #define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT)
618 #endif
619 
620 /*
621  * VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one
622  * of these flags and the other not does not preclude a merge.
623  *
624  *    VMA_STICKY_FLAGS - When merging VMAs, VMA flags must match, unless they
625  *                       are 'sticky'. If any sticky flags exist in either VMA,
626  *                       we simply set all of them on the merged VMA.
627  */
628 #define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS
629 
630 /*
631  * Flags which should result in page tables being copied on fork. These are
632  * flags which indicate that the VMA maps page tables which cannot be
633  * reconsistuted upon page fault, so necessitate page table copying upon fork.
634  *
635  * Note that these flags should be compared with the DESTINATION VMA not the
636  * source, as VM_UFFD_WP may not be propagated to destination, while all other
637  * flags will be.
638  *
639  * VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be
640  *                           reasonably reconstructed on page fault.
641  *
642  *              VM_UFFD_WP - Encodes metadata about an installed uffd
643  *                           write protect handler, which cannot be
644  *                           reconstructed on page fault.
645  *
646  *                           We always copy pgtables when dst_vma has uffd-wp
647  *                           enabled even if it's file-backed
648  *                           (e.g. shmem). Because when uffd-wp is enabled,
649  *                           pgtable contains uffd-wp protection information,
650  *                           that's something we can't retrieve from page cache,
651  *                           and skip copying will lose those info.
652  *
653  *          VM_MAYBE_GUARD - Could contain page guard region markers which
654  *                           by design are a property of the page tables
655  *                           only and thus cannot be reconstructed on page
656  *                           fault.
657  */
658 #define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
659 
660 /*
661  * mapping from the currently active vm_flags protection bits (the
662  * low four bits) to a page protection mask..
663  */
664 
665 /*
666  * The default fault flags that should be used by most of the
667  * arch-specific page fault handlers.
668  */
669 #define FAULT_FLAG_DEFAULT  (FAULT_FLAG_ALLOW_RETRY | \
670 			     FAULT_FLAG_KILLABLE | \
671 			     FAULT_FLAG_INTERRUPTIBLE)
672 
673 /**
674  * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
675  * @flags: Fault flags.
676  *
677  * This is mostly used for places where we want to try to avoid taking
678  * the mmap_lock for too long a time when waiting for another condition
679  * to change, in which case we can try to be polite to release the
680  * mmap_lock in the first round to avoid potential starvation of other
681  * processes that would also want the mmap_lock.
682  *
683  * Return: true if the page fault allows retry and this is the first
684  * attempt of the fault handling; false otherwise.
685  */
fault_flag_allow_retry_first(enum fault_flag flags)686 static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
687 {
688 	return (flags & FAULT_FLAG_ALLOW_RETRY) &&
689 	    (!(flags & FAULT_FLAG_TRIED));
690 }
691 
692 #define FAULT_FLAG_TRACE \
693 	{ FAULT_FLAG_WRITE,		"WRITE" }, \
694 	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
695 	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
696 	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
697 	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
698 	{ FAULT_FLAG_TRIED,		"TRIED" }, \
699 	{ FAULT_FLAG_USER,		"USER" }, \
700 	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
701 	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }, \
702 	{ FAULT_FLAG_INTERRUPTIBLE,	"INTERRUPTIBLE" }, \
703 	{ FAULT_FLAG_VMA_LOCK,		"VMA_LOCK" }
704 
705 /*
706  * vm_fault is filled by the pagefault handler and passed to the vma's
707  * ->fault function. The vma's ->fault is responsible for returning a bitmask
708  * of VM_FAULT_xxx flags that give details about how the fault was handled.
709  *
710  * MM layer fills up gfp_mask for page allocations but fault handler might
711  * alter it if its implementation requires a different allocation context.
712  *
713  * pgoff should be used in favour of virtual_address, if possible.
714  */
715 struct vm_fault {
716 	const struct {
717 		struct vm_area_struct *vma;	/* Target VMA */
718 		gfp_t gfp_mask;			/* gfp mask to be used for allocations */
719 		pgoff_t pgoff;			/* Logical page offset based on vma */
720 		unsigned long address;		/* Faulting virtual address - masked */
721 		unsigned long real_address;	/* Faulting virtual address - unmasked */
722 	};
723 	enum fault_flag flags;		/* FAULT_FLAG_xxx flags
724 					 * XXX: should really be 'const' */
725 	pmd_t *pmd;			/* Pointer to pmd entry matching
726 					 * the 'address' */
727 	pud_t *pud;			/* Pointer to pud entry matching
728 					 * the 'address'
729 					 */
730 	union {
731 		pte_t orig_pte;		/* Value of PTE at the time of fault */
732 		pmd_t orig_pmd;		/* Value of PMD at the time of fault,
733 					 * used by PMD fault only.
734 					 */
735 	};
736 
737 	struct page *cow_page;		/* Page handler may use for COW fault */
738 	struct page *page;		/* ->fault handlers should return a
739 					 * page here, unless VM_FAULT_NOPAGE
740 					 * is set (which is also implied by
741 					 * VM_FAULT_ERROR).
742 					 */
743 	/* These three entries are valid only while holding ptl lock */
744 	pte_t *pte;			/* Pointer to pte entry matching
745 					 * the 'address'. NULL if the page
746 					 * table hasn't been allocated.
747 					 */
748 	spinlock_t *ptl;		/* Page table lock.
749 					 * Protects pte page table if 'pte'
750 					 * is not NULL, otherwise pmd.
751 					 */
752 	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
753 					 * vm_ops->map_pages() sets up a page
754 					 * table from atomic context.
755 					 * do_fault_around() pre-allocates
756 					 * page table to avoid allocation from
757 					 * atomic context.
758 					 */
759 };
760 
761 /*
762  * These are the virtual MM functions - opening of an area, closing and
763  * unmapping it (needed to keep files on disk up-to-date etc), pointer
764  * to the functions called when a no-page or a wp-page exception occurs.
765  */
766 struct vm_operations_struct {
767 	/**
768 	 * @open: Called when a VMA is remapped, split or forked. Not called
769 	 * upon first mapping a VMA.
770 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
771 	 */
772 	void (*open)(struct vm_area_struct *vma);
773 	/**
774 	 * @close: Called when the VMA is being removed from the MM.
775 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
776 	 */
777 	void (*close)(struct vm_area_struct *vma);
778 	/**
779 	 * @mapped: Called when the VMA is first mapped in the MM. Not called if
780 	 * the new VMA is merged with an adjacent VMA.
781 	 *
782 	 * The @vm_private_data field is an output field allowing the user to
783 	 * modify vma->vm_private_data as necessary.
784 	 *
785 	 * ONLY valid if set from f_op->mmap_prepare. Will result in an error if
786 	 * set from f_op->mmap.
787 	 *
788 	 * Returns %0 on success, or an error otherwise. On error, the VMA will
789 	 * be unmapped.
790 	 *
791 	 * Context: User context.  May sleep.  Caller holds mmap_lock.
792 	 */
793 	int (*mapped)(unsigned long start, unsigned long end, pgoff_t pgoff,
794 		      const struct file *file, void **vm_private_data);
795 	/* Called any time before splitting to check if it's allowed */
796 	int (*may_split)(struct vm_area_struct *vma, unsigned long addr);
797 	int (*mremap)(struct vm_area_struct *vma);
798 	/*
799 	 * Called by mprotect() to make driver-specific permission
800 	 * checks before mprotect() is finalised.   The VMA must not
801 	 * be modified.  Returns 0 if mprotect() can proceed.
802 	 */
803 	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
804 			unsigned long end, unsigned long newflags);
805 	vm_fault_t (*fault)(struct vm_fault *vmf);
806 	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
807 	vm_fault_t (*map_pages)(struct vm_fault *vmf,
808 			pgoff_t start_pgoff, pgoff_t end_pgoff);
809 	unsigned long (*pagesize)(struct vm_area_struct *vma);
810 
811 	/* notification that a previously read-only page is about to become
812 	 * writable, if an error is returned it will cause a SIGBUS */
813 	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
814 
815 	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
816 	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
817 
818 	/* called by access_process_vm when get_user_pages() fails, typically
819 	 * for use by special VMAs. See also generic_access_phys() for a generic
820 	 * implementation useful for any iomem mapping.
821 	 */
822 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
823 		      void *buf, int len, int write);
824 
825 	/* Called by the /proc/PID/maps code to ask the vma whether it
826 	 * has a special name.  Returning non-NULL will also cause this
827 	 * vma to be dumped unconditionally. */
828 	const char *(*name)(struct vm_area_struct *vma);
829 
830 #ifdef CONFIG_NUMA
831 	/*
832 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
833 	 * to hold the policy upon return.  Caller should pass NULL @new to
834 	 * remove a policy and fall back to surrounding context--i.e. do not
835 	 * install a MPOL_DEFAULT policy, nor the task or system default
836 	 * mempolicy.
837 	 */
838 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
839 
840 	/*
841 	 * get_policy() op must add reference [mpol_get()] to any policy at
842 	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
843 	 * in mm/mempolicy.c will do this automatically.
844 	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
845 	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
846 	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
847 	 * must return NULL--i.e., do not "fallback" to task or system default
848 	 * policy.
849 	 */
850 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
851 					unsigned long addr, pgoff_t *ilx);
852 #endif
853 #ifdef CONFIG_FIND_NORMAL_PAGE
854 	/*
855 	 * Called by vm_normal_page() for special PTEs in @vma at @addr. This
856 	 * allows for returning a "normal" page from vm_normal_page() even
857 	 * though the PTE indicates that the "struct page" either does not exist
858 	 * or should not be touched: "special".
859 	 *
860 	 * Do not add new users: this really only works when a "normal" page
861 	 * was mapped, but then the PTE got changed to something weird (+
862 	 * marked special) that would not make pte_pfn() identify the originally
863 	 * inserted page.
864 	 */
865 	struct page *(*find_normal_page)(struct vm_area_struct *vma,
866 					 unsigned long addr);
867 #endif /* CONFIG_FIND_NORMAL_PAGE */
868 };
869 
870 #ifdef CONFIG_NUMA_BALANCING
vma_numab_state_init(struct vm_area_struct * vma)871 static inline void vma_numab_state_init(struct vm_area_struct *vma)
872 {
873 	vma->numab_state = NULL;
874 }
vma_numab_state_free(struct vm_area_struct * vma)875 static inline void vma_numab_state_free(struct vm_area_struct *vma)
876 {
877 	kfree(vma->numab_state);
878 }
879 #else
vma_numab_state_init(struct vm_area_struct * vma)880 static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
vma_numab_state_free(struct vm_area_struct * vma)881 static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
882 #endif /* CONFIG_NUMA_BALANCING */
883 
884 /*
885  * These must be here rather than mmap_lock.h as dependent on vm_fault type,
886  * declared in this header.
887  */
888 #ifdef CONFIG_PER_VMA_LOCK
release_fault_lock(struct vm_fault * vmf)889 static inline void release_fault_lock(struct vm_fault *vmf)
890 {
891 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
892 		vma_end_read(vmf->vma);
893 	else
894 		mmap_read_unlock(vmf->vma->vm_mm);
895 }
896 
assert_fault_locked(const struct vm_fault * vmf)897 static inline void assert_fault_locked(const struct vm_fault *vmf)
898 {
899 	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
900 		vma_assert_locked(vmf->vma);
901 	else
902 		mmap_assert_locked(vmf->vma->vm_mm);
903 }
904 #else
release_fault_lock(struct vm_fault * vmf)905 static inline void release_fault_lock(struct vm_fault *vmf)
906 {
907 	mmap_read_unlock(vmf->vma->vm_mm);
908 }
909 
assert_fault_locked(const struct vm_fault * vmf)910 static inline void assert_fault_locked(const struct vm_fault *vmf)
911 {
912 	mmap_assert_locked(vmf->vma->vm_mm);
913 }
914 #endif /* CONFIG_PER_VMA_LOCK */
915 
mm_flags_test(int flag,const struct mm_struct * mm)916 static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
917 {
918 	return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
919 }
920 
mm_flags_test_and_set(int flag,struct mm_struct * mm)921 static inline bool mm_flags_test_and_set(int flag, struct mm_struct *mm)
922 {
923 	return test_and_set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
924 }
925 
mm_flags_test_and_clear(int flag,struct mm_struct * mm)926 static inline bool mm_flags_test_and_clear(int flag, struct mm_struct *mm)
927 {
928 	return test_and_clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
929 }
930 
mm_flags_set(int flag,struct mm_struct * mm)931 static inline void mm_flags_set(int flag, struct mm_struct *mm)
932 {
933 	set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
934 }
935 
mm_flags_clear(int flag,struct mm_struct * mm)936 static inline void mm_flags_clear(int flag, struct mm_struct *mm)
937 {
938 	clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
939 }
940 
mm_flags_clear_all(struct mm_struct * mm)941 static inline void mm_flags_clear_all(struct mm_struct *mm)
942 {
943 	bitmap_zero(ACCESS_PRIVATE(&mm->flags, __mm_flags), NUM_MM_FLAG_BITS);
944 }
945 
946 extern const struct vm_operations_struct vma_dummy_vm_ops;
947 
vma_init(struct vm_area_struct * vma,struct mm_struct * mm)948 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
949 {
950 	memset(vma, 0, sizeof(*vma));
951 	vma->vm_mm = mm;
952 	vma->vm_ops = &vma_dummy_vm_ops;
953 	INIT_LIST_HEAD(&vma->anon_vma_chain);
954 	vma_lock_init(vma, false);
955 }
956 
957 /* Use when VMA is not part of the VMA tree and needs no locking */
vm_flags_init(struct vm_area_struct * vma,vm_flags_t flags)958 static inline void vm_flags_init(struct vm_area_struct *vma,
959 				 vm_flags_t flags)
960 {
961 	VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
962 	vma_flags_clear_all(&vma->flags);
963 	vma_flags_overwrite_word(&vma->flags, flags);
964 }
965 
966 /*
967  * Use when VMA is part of the VMA tree and modifications need coordination
968  * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
969  * it should be locked explicitly beforehand.
970  */
vm_flags_reset(struct vm_area_struct * vma,vm_flags_t flags)971 static inline void vm_flags_reset(struct vm_area_struct *vma,
972 				  vm_flags_t flags)
973 {
974 	VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
975 	vma_assert_write_locked(vma);
976 	vm_flags_init(vma, flags);
977 }
978 
vma_flags_reset_once(struct vm_area_struct * vma,vma_flags_t * flags)979 static inline void vma_flags_reset_once(struct vm_area_struct *vma,
980 					vma_flags_t *flags)
981 {
982 	const unsigned long word = flags->__vma_flags[0];
983 
984 	/* It is assumed only the first system word must be written once. */
985 	vma_flags_overwrite_word_once(&vma->flags, word);
986 	/* The remainder can be copied normally. */
987 	if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
988 		unsigned long *dst = &vma->flags.__vma_flags[1];
989 		const unsigned long *src = &flags->__vma_flags[1];
990 
991 		bitmap_copy(dst, src, NUM_VMA_FLAG_BITS - BITS_PER_LONG);
992 	}
993 }
994 
vm_flags_set(struct vm_area_struct * vma,vm_flags_t flags)995 static inline void vm_flags_set(struct vm_area_struct *vma,
996 				vm_flags_t flags)
997 {
998 	vma_start_write(vma);
999 	vma_flags_set_word(&vma->flags, flags);
1000 }
1001 
vm_flags_clear(struct vm_area_struct * vma,vm_flags_t flags)1002 static inline void vm_flags_clear(struct vm_area_struct *vma,
1003 				  vm_flags_t flags)
1004 {
1005 	VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
1006 	vma_start_write(vma);
1007 	vma_flags_clear_word(&vma->flags, flags);
1008 }
1009 
1010 /*
1011  * Use only if VMA is not part of the VMA tree or has no other users and
1012  * therefore needs no locking.
1013  */
__vm_flags_mod(struct vm_area_struct * vma,vm_flags_t set,vm_flags_t clear)1014 static inline void __vm_flags_mod(struct vm_area_struct *vma,
1015 				  vm_flags_t set, vm_flags_t clear)
1016 {
1017 	vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
1018 }
1019 
1020 /*
1021  * Use only when the order of set/clear operations is unimportant, otherwise
1022  * use vm_flags_{set|clear} explicitly.
1023  */
vm_flags_mod(struct vm_area_struct * vma,vm_flags_t set,vm_flags_t clear)1024 static inline void vm_flags_mod(struct vm_area_struct *vma,
1025 				vm_flags_t set, vm_flags_t clear)
1026 {
1027 	vma_start_write(vma);
1028 	__vm_flags_mod(vma, set, clear);
1029 }
1030 
__vma_atomic_valid_flag(struct vm_area_struct * vma,vma_flag_t bit)1031 static __always_inline bool __vma_atomic_valid_flag(struct vm_area_struct *vma,
1032 		vma_flag_t bit)
1033 {
1034 	const vm_flags_t mask = BIT((__force int)bit);
1035 
1036 	/* Only specific flags are permitted */
1037 	if (WARN_ON_ONCE(!(mask & VM_ATOMIC_SET_ALLOWED)))
1038 		return false;
1039 
1040 	return true;
1041 }
1042 
1043 /*
1044  * Set VMA flag atomically. Requires only VMA/mmap read lock. Only specific
1045  * valid flags are allowed to do this.
1046  */
vma_set_atomic_flag(struct vm_area_struct * vma,vma_flag_t bit)1047 static __always_inline void vma_set_atomic_flag(struct vm_area_struct *vma,
1048 		vma_flag_t bit)
1049 {
1050 	unsigned long *bitmap = vma->flags.__vma_flags;
1051 
1052 	vma_assert_stabilised(vma);
1053 	if (__vma_atomic_valid_flag(vma, bit))
1054 		set_bit((__force int)bit, bitmap);
1055 }
1056 
1057 /*
1058  * Test for VMA flag atomically. Requires no locks. Only specific valid flags
1059  * are allowed to do this.
1060  *
1061  * This is necessarily racey, so callers must ensure that serialisation is
1062  * achieved through some other means, or that races are permissible.
1063  */
vma_test_atomic_flag(struct vm_area_struct * vma,vma_flag_t bit)1064 static __always_inline bool vma_test_atomic_flag(struct vm_area_struct *vma,
1065 		vma_flag_t bit)
1066 {
1067 	if (__vma_atomic_valid_flag(vma, bit))
1068 		return test_bit((__force int)bit, &vma->vm_flags);
1069 
1070 	return false;
1071 }
1072 
1073 /* Set an individual VMA flag in flags, non-atomically. */
vma_flags_set_flag(vma_flags_t * flags,vma_flag_t bit)1074 static __always_inline void vma_flags_set_flag(vma_flags_t *flags,
1075 		vma_flag_t bit)
1076 {
1077 	unsigned long *bitmap = flags->__vma_flags;
1078 
1079 	__set_bit((__force int)bit, bitmap);
1080 }
1081 
__mk_vma_flags(vma_flags_t flags,size_t count,const vma_flag_t * bits)1082 static __always_inline vma_flags_t __mk_vma_flags(vma_flags_t flags,
1083 		size_t count, const vma_flag_t *bits)
1084 {
1085 	int i;
1086 
1087 	for (i = 0; i < count; i++)
1088 		vma_flags_set_flag(&flags, bits[i]);
1089 	return flags;
1090 }
1091 
1092 /*
1093  * Helper macro which bitwise-or combines the specified input flags into a
1094  * vma_flags_t bitmap value. E.g.:
1095  *
1096  * vma_flags_t flags = mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT,
1097  *              VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT);
1098  *
1099  * The compiler cleverly optimises away all of the work and this ends up being
1100  * equivalent to aggregating the values manually.
1101  */
1102 #define mk_vma_flags(...) __mk_vma_flags(EMPTY_VMA_FLAGS,			\
1103 		COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
1104 
1105 /*
1106  * Helper macro which acts like mk_vma_flags, only appending to a copy of the
1107  * specified flags rather than establishing new flags. E.g.:
1108  *
1109  * vma_flags_t flags = append_vma_flags(VMA_STACK_DEFAULT_FLAGS, VMA_STACK_BIT,
1110  *              VMA_ACCOUNT_BIT);
1111  */
1112 #define append_vma_flags(flags, ...) __mk_vma_flags(flags,			\
1113 		COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
1114 
1115 /* Calculates the number of set bits in the specified VMA flags. */
vma_flags_count(const vma_flags_t * flags)1116 static __always_inline int vma_flags_count(const vma_flags_t *flags)
1117 {
1118 	const unsigned long *bitmap = flags->__vma_flags;
1119 
1120 	return bitmap_weight(bitmap, NUM_VMA_FLAG_BITS);
1121 }
1122 
1123 /*
1124  * Test whether a specific VMA flag is set, e.g.:
1125  *
1126  * if (vma_flags_test(flags, VMA_READ_BIT)) { ... }
1127  */
vma_flags_test(const vma_flags_t * flags,vma_flag_t bit)1128 static __always_inline bool vma_flags_test(const vma_flags_t *flags,
1129 		vma_flag_t bit)
1130 {
1131 	const unsigned long *bitmap = flags->__vma_flags;
1132 
1133 	return test_bit((__force int)bit, bitmap);
1134 }
1135 
1136 /*
1137  * Obtain a set of VMA flags which contain the overlapping flags contained
1138  * within flags and to_and.
1139  */
vma_flags_and_mask(const vma_flags_t * flags,vma_flags_t to_and)1140 static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags,
1141 						      vma_flags_t to_and)
1142 {
1143 	vma_flags_t dst;
1144 	unsigned long *bitmap_dst = dst.__vma_flags;
1145 	const unsigned long *bitmap = flags->__vma_flags;
1146 	const unsigned long *bitmap_to_and = to_and.__vma_flags;
1147 
1148 	bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS);
1149 	return dst;
1150 }
1151 
1152 /*
1153  * Obtain a set of VMA flags which contains the specified overlapping flags,
1154  * e.g.:
1155  *
1156  * vma_flags_t read_flags = vma_flags_and(&flags, VMA_READ_BIT,
1157  *                                        VMA_MAY_READ_BIT);
1158  */
1159 #define vma_flags_and(flags, ...)				\
1160 	vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__))
1161 
1162 /*  Test each of to_test flags in flags, non-atomically. */
vma_flags_test_any_mask(const vma_flags_t * flags,vma_flags_t to_test)1163 static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags,
1164 		vma_flags_t to_test)
1165 {
1166 	const unsigned long *bitmap = flags->__vma_flags;
1167 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
1168 
1169 	return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
1170 }
1171 
1172 /*
1173  * Test whether any specified VMA flag is set, e.g.:
1174  *
1175  * if (vma_flags_test_any(flags, VMA_READ_BIT, VMA_MAYREAD_BIT)) { ... }
1176  */
1177 #define vma_flags_test_any(flags, ...) \
1178 	vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__))
1179 
1180 /* Test that ALL of the to_test flags are set, non-atomically. */
vma_flags_test_all_mask(const vma_flags_t * flags,vma_flags_t to_test)1181 static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
1182 		vma_flags_t to_test)
1183 {
1184 	const unsigned long *bitmap = flags->__vma_flags;
1185 	const unsigned long *bitmap_to_test = to_test.__vma_flags;
1186 
1187 	return bitmap_subset(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
1188 }
1189 
1190 /*
1191  * Test whether ALL specified VMA flags are set, e.g.:
1192  *
1193  * if (vma_flags_test_all(flags, VMA_READ_BIT, VMA_MAYREAD_BIT)) { ... }
1194  */
1195 #define vma_flags_test_all(flags, ...) \
1196 	vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__))
1197 
1198 /*
1199  * Helper to test that a flag mask of type vma_flags_t has a SINGLE flag set
1200  * (returning false if flagmask has no flags set).
1201  *
1202  * This is defined to make the semantics clearer when testing an optionally
1203  * defined VMA flags mask, e.g.:
1204  *
1205  * if (vma_flags_test_single_mask(&flags, VMA_DROPPABLE)) { ... }
1206  *
1207  * When VMA_DROPPABLE is defined if available, or set to EMPTY_VMA_FLAGS
1208  * otherwise.
1209  */
vma_flags_test_single_mask(const vma_flags_t * flags,vma_flags_t flagmask)1210 static __always_inline bool vma_flags_test_single_mask(const vma_flags_t *flags,
1211 		vma_flags_t flagmask)
1212 {
1213 	VM_WARN_ON_ONCE(vma_flags_count(&flagmask) > 1);
1214 
1215 	return vma_flags_test_any_mask(flags, flagmask);
1216 }
1217 
1218 /* Set each of the to_set flags in flags, non-atomically. */
vma_flags_set_mask(vma_flags_t * flags,vma_flags_t to_set)1219 static __always_inline void vma_flags_set_mask(vma_flags_t *flags,
1220 		vma_flags_t to_set)
1221 {
1222 	unsigned long *bitmap = flags->__vma_flags;
1223 	const unsigned long *bitmap_to_set = to_set.__vma_flags;
1224 
1225 	bitmap_or(bitmap, bitmap, bitmap_to_set, NUM_VMA_FLAG_BITS);
1226 }
1227 
1228 /*
1229  * Set all specified VMA flags, e.g.:
1230  *
1231  * vma_flags_set(&flags, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
1232  */
1233 #define vma_flags_set(flags, ...) \
1234 	vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__))
1235 
1236 /* Clear all of the to-clear flags in flags, non-atomically. */
vma_flags_clear_mask(vma_flags_t * flags,vma_flags_t to_clear)1237 static __always_inline void vma_flags_clear_mask(vma_flags_t *flags,
1238 		vma_flags_t to_clear)
1239 {
1240 	unsigned long *bitmap = flags->__vma_flags;
1241 	const unsigned long *bitmap_to_clear = to_clear.__vma_flags;
1242 
1243 	bitmap_andnot(bitmap, bitmap, bitmap_to_clear, NUM_VMA_FLAG_BITS);
1244 }
1245 
1246 /*
1247  * Clear all specified individual flags, e.g.:
1248  *
1249  * vma_flags_clear(&flags, VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT);
1250  */
1251 #define vma_flags_clear(flags, ...) \
1252 	vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__))
1253 
1254 /*
1255  * Obtain a VMA flags value containing those flags that are present in flags or
1256  * flags_other but not in both.
1257  */
vma_flags_diff_pair(const vma_flags_t * flags,const vma_flags_t * flags_other)1258 static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags,
1259 		const vma_flags_t *flags_other)
1260 {
1261 	vma_flags_t dst;
1262 	const unsigned long *bitmap_other = flags_other->__vma_flags;
1263 	const unsigned long *bitmap = flags->__vma_flags;
1264 	unsigned long *bitmap_dst = dst.__vma_flags;
1265 
1266 	bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1267 	return dst;
1268 }
1269 
1270 /* Determine if flags and flags_other have precisely the same flags set. */
vma_flags_same_pair(const vma_flags_t * flags,const vma_flags_t * flags_other)1271 static __always_inline bool vma_flags_same_pair(const vma_flags_t *flags,
1272 						const vma_flags_t *flags_other)
1273 {
1274 	const unsigned long *bitmap = flags->__vma_flags;
1275 	const unsigned long *bitmap_other = flags_other->__vma_flags;
1276 
1277 	return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1278 }
1279 
1280 /* Determine if flags and flags_other have precisely the same flags set.  */
vma_flags_same_mask(const vma_flags_t * flags,vma_flags_t flags_other)1281 static __always_inline bool vma_flags_same_mask(const vma_flags_t *flags,
1282 						vma_flags_t flags_other)
1283 {
1284 	const unsigned long *bitmap = flags->__vma_flags;
1285 	const unsigned long *bitmap_other = flags_other.__vma_flags;
1286 
1287 	return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1288 }
1289 
1290 /*
1291  * Helper macro to determine if only the specific flags are set, e.g.:
1292  *
1293  * if (vma_flags_same(&flags, VMA_WRITE_BIT) { ... }
1294  */
1295 #define vma_flags_same(flags, ...) \
1296 	vma_flags_same_mask(flags, mk_vma_flags(__VA_ARGS__))
1297 
1298 /*
1299  * Test whether a specific flag in the VMA is set, e.g.:
1300  *
1301  * if (vma_test(vma, VMA_READ_BIT)) { ... }
1302  */
vma_test(const struct vm_area_struct * vma,vma_flag_t bit)1303 static __always_inline bool vma_test(const struct vm_area_struct *vma,
1304 		vma_flag_t bit)
1305 {
1306 	return vma_flags_test(&vma->flags, bit);
1307 }
1308 
1309 /* Helper to test any VMA flags in a VMA . */
vma_test_any_mask(const struct vm_area_struct * vma,vma_flags_t flags)1310 static __always_inline bool vma_test_any_mask(const struct vm_area_struct *vma,
1311 		vma_flags_t flags)
1312 {
1313 	return vma_flags_test_any_mask(&vma->flags, flags);
1314 }
1315 
1316 /*
1317  * Helper macro for testing whether any VMA flags are set in a VMA,
1318  * e.g.:
1319  *
1320  * if (vma_test_any(vma, VMA_IO_BIT, VMA_PFNMAP_BIT,
1321  *		VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)) { ... }
1322  */
1323 #define vma_test_any(vma, ...) \
1324 	vma_test_any_mask(vma, mk_vma_flags(__VA_ARGS__))
1325 
1326 /*
1327  * Helper to test that ALL specified flags are set in a VMA.
1328  *
1329  * Note: appropriate locks must be held, this function does not acquire them for
1330  * you.
1331  */
vma_test_all_mask(const struct vm_area_struct * vma,vma_flags_t flags)1332 static __always_inline bool vma_test_all_mask(const struct vm_area_struct *vma,
1333 		vma_flags_t flags)
1334 {
1335 	return vma_flags_test_all_mask(&vma->flags, flags);
1336 }
1337 
1338 /*
1339  * Helper macro for checking that ALL specified flags are set in a VMA, e.g.:
1340  *
1341  * if (vma_test_all(vma, VMA_READ_BIT, VMA_MAYREAD_BIT) { ... }
1342  */
1343 #define vma_test_all(vma, ...) \
1344 	vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__))
1345 
1346 /*
1347  * Helper to test that a flag mask of type vma_flags_t has a SINGLE flag set
1348  * (returning false if flagmask has no flags set).
1349  *
1350  * This is useful when a flag needs to be either defined or not depending upon
1351  * kernel configuration, e.g.:
1352  *
1353  * if (vma_test_single_mask(vma, VMA_DROPPABLE)) { ... }
1354  *
1355  * When VMA_DROPPABLE is defined if available, or set to EMPTY_VMA_FLAGS
1356  * otherwise.
1357  */
1358 static __always_inline bool
vma_test_single_mask(const struct vm_area_struct * vma,vma_flags_t flagmask)1359 vma_test_single_mask(const struct vm_area_struct *vma, vma_flags_t flagmask)
1360 {
1361 	return vma_flags_test_single_mask(&vma->flags, flagmask);
1362 }
1363 
1364 /*
1365  * Helper to set all VMA flags in a VMA.
1366  *
1367  * Note: appropriate locks must be held, this function does not acquire them for
1368  * you.
1369  */
vma_set_flags_mask(struct vm_area_struct * vma,vma_flags_t flags)1370 static __always_inline void vma_set_flags_mask(struct vm_area_struct *vma,
1371 		vma_flags_t flags)
1372 {
1373 	vma_flags_set_mask(&vma->flags, flags);
1374 }
1375 
1376 /*
1377  * Helper macro for specifying VMA flags in a VMA, e.g.:
1378  *
1379  * vma_set_flags(vma, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
1380  * 		VMA_DONTDUMP_BIT);
1381  *
1382  * Note: appropriate locks must be held, this function does not acquire them for
1383  * you.
1384  */
1385 #define vma_set_flags(vma, ...) \
1386 	vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
1387 
1388 /* Helper to clear all VMA flags in a VMA. */
vma_clear_flags_mask(struct vm_area_struct * vma,vma_flags_t flags)1389 static __always_inline void vma_clear_flags_mask(struct vm_area_struct *vma,
1390 		vma_flags_t flags)
1391 {
1392 	vma_flags_clear_mask(&vma->flags, flags);
1393 }
1394 
1395 /*
1396  * Helper macro for clearing VMA flags, e.g.:
1397  *
1398  * vma_clear_flags(vma, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
1399  * 		VMA_DONTDUMP_BIT);
1400  */
1401 #define vma_clear_flags(vma, ...) \
1402 	vma_clear_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
1403 
1404 /*
1405  * Test whether a specific VMA flag is set in a VMA descriptor, e.g.:
1406  *
1407  * if (vma_desc_test(desc, VMA_READ_BIT)) { ... }
1408  */
vma_desc_test(const struct vm_area_desc * desc,vma_flag_t bit)1409 static __always_inline bool vma_desc_test(const struct vm_area_desc *desc,
1410 		vma_flag_t bit)
1411 {
1412 	return vma_flags_test(&desc->vma_flags, bit);
1413 }
1414 
1415 /* Helper to test any VMA flags in a VMA descriptor. */
vma_desc_test_any_mask(const struct vm_area_desc * desc,vma_flags_t flags)1416 static __always_inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc,
1417 		vma_flags_t flags)
1418 {
1419 	return vma_flags_test_any_mask(&desc->vma_flags, flags);
1420 }
1421 
1422 /*
1423  * Helper macro for testing whether any VMA flags are set in a VMA descriptor,
1424  * e.g.:
1425  *
1426  * if (vma_desc_test_any(desc, VMA_IO_BIT, VMA_PFNMAP_BIT,
1427  *		VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)) { ... }
1428  */
1429 #define vma_desc_test_any(desc, ...) \
1430 	vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__))
1431 
1432 /* Helper to test all VMA flags in a VMA descriptor. */
vma_desc_test_all_mask(const struct vm_area_desc * desc,vma_flags_t flags)1433 static __always_inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc,
1434 		vma_flags_t flags)
1435 {
1436 	return vma_flags_test_all_mask(&desc->vma_flags, flags);
1437 }
1438 
1439 /*
1440  * Helper macro for testing whether ALL VMA flags are set in a VMA descriptor,
1441  * e.g.:
1442  *
1443  * if (vma_desc_test_all(desc, VMA_READ_BIT, VMA_MAYREAD_BIT)) { ... }
1444  */
1445 #define vma_desc_test_all(desc, ...) \
1446 	vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__))
1447 
1448 /* Helper to set all VMA flags in a VMA descriptor. */
vma_desc_set_flags_mask(struct vm_area_desc * desc,vma_flags_t flags)1449 static __always_inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
1450 		vma_flags_t flags)
1451 {
1452 	vma_flags_set_mask(&desc->vma_flags, flags);
1453 }
1454 
1455 /*
1456  * Helper macro for specifying VMA flags for an input pointer to a struct
1457  * vm_area_desc object describing a proposed VMA, e.g.:
1458  *
1459  * vma_desc_set_flags(desc, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
1460  * 		VMA_DONTDUMP_BIT);
1461  */
1462 #define vma_desc_set_flags(desc, ...) \
1463 	vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1464 
1465 /* Helper to clear all VMA flags in a VMA descriptor. */
vma_desc_clear_flags_mask(struct vm_area_desc * desc,vma_flags_t flags)1466 static __always_inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
1467 		vma_flags_t flags)
1468 {
1469 	vma_flags_clear_mask(&desc->vma_flags, flags);
1470 }
1471 
1472 /*
1473  * Helper macro for clearing VMA flags for an input pointer to a struct
1474  * vm_area_desc object describing a proposed VMA, e.g.:
1475  *
1476  * vma_desc_clear_flags(desc, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
1477  * 		VMA_DONTDUMP_BIT);
1478  */
1479 #define vma_desc_clear_flags(desc, ...) \
1480 	vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1481 
vma_set_anonymous(struct vm_area_struct * vma)1482 static inline void vma_set_anonymous(struct vm_area_struct *vma)
1483 {
1484 	vma->vm_ops = NULL;
1485 }
1486 
vma_is_anonymous(struct vm_area_struct * vma)1487 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1488 {
1489 	return !vma->vm_ops;
1490 }
1491 
1492 /*
1493  * Indicate if the VMA is a heap for the given task; for
1494  * /proc/PID/maps that is the heap of the main task.
1495  */
vma_is_initial_heap(const struct vm_area_struct * vma)1496 static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
1497 {
1498 	return vma->vm_start < vma->vm_mm->brk &&
1499 		vma->vm_end > vma->vm_mm->start_brk;
1500 }
1501 
1502 /*
1503  * Indicate if the VMA is a stack for the given task; for
1504  * /proc/PID/maps that is the stack of the main task.
1505  */
vma_is_initial_stack(const struct vm_area_struct * vma)1506 static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
1507 {
1508 	/*
1509 	 * We make no effort to guess what a given thread considers to be
1510 	 * its "stack".  It's not even well-defined for programs written
1511 	 * languages like Go.
1512 	 */
1513 	return vma->vm_start <= vma->vm_mm->start_stack &&
1514 		vma->vm_end >= vma->vm_mm->start_stack;
1515 }
1516 
vma_is_temporary_stack(const struct vm_area_struct * vma)1517 static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
1518 {
1519 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1520 
1521 	if (!maybe_stack)
1522 		return false;
1523 
1524 	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1525 						VM_STACK_INCOMPLETE_SETUP)
1526 		return true;
1527 
1528 	return false;
1529 }
1530 
vma_is_foreign(const struct vm_area_struct * vma)1531 static inline bool vma_is_foreign(const struct vm_area_struct *vma)
1532 {
1533 	if (!current->mm)
1534 		return true;
1535 
1536 	if (current->mm != vma->vm_mm)
1537 		return true;
1538 
1539 	return false;
1540 }
1541 
vma_is_accessible(const struct vm_area_struct * vma)1542 static inline bool vma_is_accessible(const struct vm_area_struct *vma)
1543 {
1544 	return vma->vm_flags & VM_ACCESS_FLAGS;
1545 }
1546 
is_shared_maywrite(const vma_flags_t * flags)1547 static inline bool is_shared_maywrite(const vma_flags_t *flags)
1548 {
1549 	return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT);
1550 }
1551 
vma_is_shared_maywrite(const struct vm_area_struct * vma)1552 static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
1553 {
1554 	return is_shared_maywrite(&vma->flags);
1555 }
1556 
1557 /**
1558  * vma_kernel_pagesize - Default page size granularity for this VMA.
1559  * @vma: The user mapping.
1560  *
1561  * The kernel page size specifies in which granularity VMA modifications
1562  * can be performed. Folios in this VMA will be aligned to, and at least
1563  * the size of the number of bytes returned by this function.
1564  *
1565  * The default kernel page size is not affected by Transparent Huge Pages
1566  * being in effect.
1567  *
1568  * Return: The default page size granularity for this VMA.
1569  */
vma_kernel_pagesize(struct vm_area_struct * vma)1570 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1571 {
1572 	if (unlikely(vma->vm_ops && vma->vm_ops->pagesize))
1573 		return vma->vm_ops->pagesize(vma);
1574 	return PAGE_SIZE;
1575 }
1576 
1577 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
1578 
1579 static inline
vma_find(struct vma_iterator * vmi,unsigned long max)1580 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1581 {
1582 	return mas_find(&vmi->mas, max - 1);
1583 }
1584 
vma_next(struct vma_iterator * vmi)1585 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1586 {
1587 	/*
1588 	 * Uses mas_find() to get the first VMA when the iterator starts.
1589 	 * Calling mas_next() could skip the first entry.
1590 	 */
1591 	return mas_find(&vmi->mas, ULONG_MAX);
1592 }
1593 
1594 static inline
vma_iter_next_range(struct vma_iterator * vmi)1595 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1596 {
1597 	return mas_next_range(&vmi->mas, ULONG_MAX);
1598 }
1599 
1600 
vma_prev(struct vma_iterator * vmi)1601 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1602 {
1603 	return mas_prev(&vmi->mas, 0);
1604 }
1605 
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)1606 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1607 			unsigned long start, unsigned long end, gfp_t gfp)
1608 {
1609 	__mas_set_range(&vmi->mas, start, end - 1);
1610 	mas_store_gfp(&vmi->mas, NULL, gfp);
1611 	if (unlikely(mas_is_err(&vmi->mas)))
1612 		return -ENOMEM;
1613 
1614 	return 0;
1615 }
1616 
1617 /* Free any unused preallocations */
vma_iter_free(struct vma_iterator * vmi)1618 static inline void vma_iter_free(struct vma_iterator *vmi)
1619 {
1620 	mas_destroy(&vmi->mas);
1621 }
1622 
vma_iter_bulk_store(struct vma_iterator * vmi,struct vm_area_struct * vma)1623 static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
1624 				      struct vm_area_struct *vma)
1625 {
1626 	vmi->mas.index = vma->vm_start;
1627 	vmi->mas.last = vma->vm_end - 1;
1628 	mas_store(&vmi->mas, vma);
1629 	if (unlikely(mas_is_err(&vmi->mas)))
1630 		return -ENOMEM;
1631 
1632 	vma_mark_attached(vma);
1633 	return 0;
1634 }
1635 
vma_iter_invalidate(struct vma_iterator * vmi)1636 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
1637 {
1638 	mas_pause(&vmi->mas);
1639 }
1640 
vma_iter_set(struct vma_iterator * vmi,unsigned long addr)1641 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1642 {
1643 	mas_set(&vmi->mas, addr);
1644 }
1645 
1646 #define for_each_vma(__vmi, __vma)					\
1647 	while (((__vma) = vma_next(&(__vmi))) != NULL)
1648 
1649 /* The MM code likes to work with exclusive end addresses */
1650 #define for_each_vma_range(__vmi, __vma, __end)				\
1651 	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
1652 
1653 #ifdef CONFIG_SHMEM
1654 /*
1655  * The vma_is_shmem is not inline because it is used only by slow
1656  * paths in userfault.
1657  */
1658 bool vma_is_shmem(const struct vm_area_struct *vma);
1659 bool vma_is_anon_shmem(const struct vm_area_struct *vma);
1660 #else
vma_is_shmem(const struct vm_area_struct * vma)1661 static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
vma_is_anon_shmem(const struct vm_area_struct * vma)1662 static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
1663 #endif
1664 
1665 int vma_is_stack_for_current(const struct vm_area_struct *vma);
1666 
1667 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1668 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1669 
1670 struct mmu_gather;
1671 struct inode;
1672 
1673 extern void prep_compound_page(struct page *page, unsigned int order);
1674 
folio_large_order(const struct folio * folio)1675 static inline unsigned int folio_large_order(const struct folio *folio)
1676 {
1677 	return folio->_flags_1 & 0xff;
1678 }
1679 
1680 #ifdef NR_PAGES_IN_LARGE_FOLIO
folio_large_nr_pages(const struct folio * folio)1681 static inline unsigned long folio_large_nr_pages(const struct folio *folio)
1682 {
1683 	return folio->_nr_pages;
1684 }
1685 #else
folio_large_nr_pages(const struct folio * folio)1686 static inline unsigned long folio_large_nr_pages(const struct folio *folio)
1687 {
1688 	return 1L << folio_large_order(folio);
1689 }
1690 #endif
1691 
1692 /*
1693  * compound_order() can be called without holding a reference, which means
1694  * that niceties like page_folio() don't work.  These callers should be
1695  * prepared to handle wild return values.  For example, PG_head may be
1696  * set before the order is initialised, or this may be a tail page.
1697  * See compaction.c for some good examples.
1698  */
compound_order(const struct page * page)1699 static inline unsigned int compound_order(const struct page *page)
1700 {
1701 	const struct folio *folio = (struct folio *)page;
1702 
1703 	if (!test_bit(PG_head, &folio->flags.f))
1704 		return 0;
1705 	return folio_large_order(folio);
1706 }
1707 
1708 /**
1709  * folio_order - The allocation order of a folio.
1710  * @folio: The folio.
1711  *
1712  * A folio is composed of 2^order pages.  See get_order() for the definition
1713  * of order.
1714  *
1715  * Return: The order of the folio.
1716  */
folio_order(const struct folio * folio)1717 static inline unsigned int folio_order(const struct folio *folio)
1718 {
1719 	if (!folio_test_large(folio))
1720 		return 0;
1721 	return folio_large_order(folio);
1722 }
1723 
1724 /**
1725  * folio_reset_order - Reset the folio order and derived _nr_pages
1726  * @folio: The folio.
1727  *
1728  * Reset the order and derived _nr_pages to 0. Must only be used in the
1729  * process of splitting large folios.
1730  */
folio_reset_order(struct folio * folio)1731 static inline void folio_reset_order(struct folio *folio)
1732 {
1733 	if (WARN_ON_ONCE(!folio_test_large(folio)))
1734 		return;
1735 	folio->_flags_1 &= ~0xffUL;
1736 #ifdef NR_PAGES_IN_LARGE_FOLIO
1737 	folio->_nr_pages = 0;
1738 #endif
1739 }
1740 
1741 #include <linux/huge_mm.h>
1742 
1743 /*
1744  * Methods to modify the page usage count.
1745  *
1746  * What counts for a page usage:
1747  * - cache mapping   (page->mapping)
1748  * - private data    (page->private)
1749  * - page mapped in a task's page tables, each mapping
1750  *   is counted separately
1751  *
1752  * Also, many kernel routines increase the page count before a critical
1753  * routine so they can be sure the page doesn't go away from under them.
1754  */
1755 
1756 /*
1757  * Drop a ref, return true if the refcount fell to zero (the page has no users)
1758  */
put_page_testzero(struct page * page)1759 static inline int put_page_testzero(struct page *page)
1760 {
1761 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
1762 	return page_ref_dec_and_test(page);
1763 }
1764 
folio_put_testzero(struct folio * folio)1765 static inline int folio_put_testzero(struct folio *folio)
1766 {
1767 	return put_page_testzero(&folio->page);
1768 }
1769 
1770 /*
1771  * Try to grab a ref unless the page has a refcount of zero, return false if
1772  * that is the case.
1773  * This can be called when MMU is off so it must not access
1774  * any of the virtual mappings.
1775  */
get_page_unless_zero(struct page * page)1776 static inline bool get_page_unless_zero(struct page *page)
1777 {
1778 	return page_ref_add_unless_zero(page, 1);
1779 }
1780 
folio_get_nontail_page(struct page * page)1781 static inline struct folio *folio_get_nontail_page(struct page *page)
1782 {
1783 	if (unlikely(!get_page_unless_zero(page)))
1784 		return NULL;
1785 	return (struct folio *)page;
1786 }
1787 
1788 extern int page_is_ram(unsigned long pfn);
1789 
1790 enum {
1791 	REGION_INTERSECTS,
1792 	REGION_DISJOINT,
1793 	REGION_MIXED,
1794 };
1795 
1796 int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
1797 		      unsigned long desc);
1798 
1799 /* Support for virtually mapped pages */
1800 struct page *vmalloc_to_page(const void *addr);
1801 unsigned long vmalloc_to_pfn(const void *addr);
1802 
1803 /*
1804  * Determine if an address is within the vmalloc range
1805  *
1806  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
1807  * is no special casing required.
1808  */
1809 #ifdef CONFIG_MMU
1810 extern bool is_vmalloc_addr(const void *x);
1811 extern int is_vmalloc_or_module_addr(const void *x);
1812 #else
is_vmalloc_addr(const void * x)1813 static inline bool is_vmalloc_addr(const void *x)
1814 {
1815 	return false;
1816 }
is_vmalloc_or_module_addr(const void * x)1817 static inline int is_vmalloc_or_module_addr(const void *x)
1818 {
1819 	return 0;
1820 }
1821 #endif
1822 
1823 /*
1824  * How many times the entire folio is mapped as a single unit (eg by a
1825  * PMD or PUD entry).  This is probably not what you want, except for
1826  * debugging purposes or implementation of other core folio_*() primitives.
1827  */
folio_entire_mapcount(const struct folio * folio)1828 static inline int folio_entire_mapcount(const struct folio *folio)
1829 {
1830 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1831 	if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
1832 		return 0;
1833 	return atomic_read(&folio->_entire_mapcount) + 1;
1834 }
1835 
folio_large_mapcount(const struct folio * folio)1836 static inline int folio_large_mapcount(const struct folio *folio)
1837 {
1838 	VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
1839 	return atomic_read(&folio->_large_mapcount) + 1;
1840 }
1841 
1842 /**
1843  * folio_mapcount() - Number of mappings of this folio.
1844  * @folio: The folio.
1845  *
1846  * The folio mapcount corresponds to the number of present user page table
1847  * entries that reference any part of a folio. Each such present user page
1848  * table entry must be paired with exactly on folio reference.
1849  *
1850  * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
1851  * exactly once.
1852  *
1853  * For hugetlb folios, each abstracted "hugetlb" user page table entry that
1854  * references the entire folio counts exactly once, even when such special
1855  * page table entries are comprised of multiple ordinary page table entries.
1856  *
1857  * Will report 0 for pages which cannot be mapped into userspace, such as
1858  * slab, page tables and similar.
1859  *
1860  * Return: The number of times this folio is mapped.
1861  */
folio_mapcount(const struct folio * folio)1862 static inline int folio_mapcount(const struct folio *folio)
1863 {
1864 	int mapcount;
1865 
1866 	if (likely(!folio_test_large(folio))) {
1867 		mapcount = atomic_read(&folio->_mapcount) + 1;
1868 		if (page_mapcount_is_type(mapcount))
1869 			mapcount = 0;
1870 		return mapcount;
1871 	}
1872 	return folio_large_mapcount(folio);
1873 }
1874 
1875 /**
1876  * folio_mapped - Is this folio mapped into userspace?
1877  * @folio: The folio.
1878  *
1879  * Return: True if any page in this folio is referenced by user page tables.
1880  */
folio_mapped(const struct folio * folio)1881 static inline bool folio_mapped(const struct folio *folio)
1882 {
1883 	return folio_mapcount(folio) >= 1;
1884 }
1885 
1886 /*
1887  * Return true if this page is mapped into pagetables.
1888  * For compound page it returns true if any sub-page of compound page is mapped,
1889  * even if this particular sub-page is not itself mapped by any PTE or PMD.
1890  */
page_mapped(const struct page * page)1891 static inline bool page_mapped(const struct page *page)
1892 {
1893 	return folio_mapped(page_folio(page));
1894 }
1895 
virt_to_head_page(const void * x)1896 static inline struct page *virt_to_head_page(const void *x)
1897 {
1898 	struct page *page = virt_to_page(x);
1899 
1900 	return compound_head(page);
1901 }
1902 
virt_to_folio(const void * x)1903 static inline struct folio *virt_to_folio(const void *x)
1904 {
1905 	struct page *page = virt_to_page(x);
1906 
1907 	return page_folio(page);
1908 }
1909 
1910 void __folio_put(struct folio *folio);
1911 
1912 void split_page(struct page *page, unsigned int order);
1913 void folio_copy(struct folio *dst, struct folio *src);
1914 int folio_mc_copy(struct folio *dst, struct folio *src);
1915 
1916 unsigned long nr_free_buffer_pages(void);
1917 
1918 /* Returns the number of bytes in this potentially compound page. */
page_size(const struct page * page)1919 static inline unsigned long page_size(const struct page *page)
1920 {
1921 	return PAGE_SIZE << compound_order(page);
1922 }
1923 
1924 /* Returns the number of bits needed for the number of bytes in a page */
page_shift(struct page * page)1925 static inline unsigned int page_shift(struct page *page)
1926 {
1927 	return PAGE_SHIFT + compound_order(page);
1928 }
1929 
1930 /**
1931  * thp_order - Order of a transparent huge page.
1932  * @page: Head page of a transparent huge page.
1933  */
thp_order(struct page * page)1934 static inline unsigned int thp_order(struct page *page)
1935 {
1936 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
1937 	return compound_order(page);
1938 }
1939 
1940 /**
1941  * thp_size - Size of a transparent huge page.
1942  * @page: Head page of a transparent huge page.
1943  *
1944  * Return: Number of bytes in this page.
1945  */
thp_size(struct page * page)1946 static inline unsigned long thp_size(struct page *page)
1947 {
1948 	return PAGE_SIZE << thp_order(page);
1949 }
1950 
1951 #ifdef CONFIG_MMU
1952 /*
1953  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1954  * servicing faults for write access.  In the normal case, do always want
1955  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1956  * that do not have writing enabled, when used by access_process_vm.
1957  */
maybe_mkwrite(pte_t pte,struct vm_area_struct * vma)1958 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1959 {
1960 	if (likely(vma->vm_flags & VM_WRITE))
1961 		pte = pte_mkwrite(pte, vma);
1962 	return pte;
1963 }
1964 
1965 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page);
1966 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1967 		struct page *page, unsigned int nr, unsigned long addr);
1968 
1969 vm_fault_t finish_fault(struct vm_fault *vmf);
1970 #endif
1971 
1972 /*
1973  * Multiple processes may "see" the same page. E.g. for untouched
1974  * mappings of /dev/null, all processes see the same page full of
1975  * zeroes, and text pages of executables and shared libraries have
1976  * only one copy in memory, at most, normally.
1977  *
1978  * For the non-reserved pages, page_count(page) denotes a reference count.
1979  *   page_count() == 0 means the page is free. page->lru is then used for
1980  *   freelist management in the buddy allocator.
1981  *   page_count() > 0  means the page has been allocated.
1982  *
1983  * Pages are allocated by the slab allocator in order to provide memory
1984  * to kmalloc and kmem_cache_alloc. In this case, the management of the
1985  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1986  * unless a particular usage is carefully commented. (the responsibility of
1987  * freeing the kmalloc memory is the caller's, of course).
1988  *
1989  * A page may be used by anyone else who does a __get_free_page().
1990  * In this case, page_count still tracks the references, and should only
1991  * be used through the normal accessor functions. The top bits of page->flags
1992  * and page->virtual store page management information, but all other fields
1993  * are unused and could be used privately, carefully. The management of this
1994  * page is the responsibility of the one who allocated it, and those who have
1995  * subsequently been given references to it.
1996  *
1997  * The other pages (we may call them "pagecache pages") are completely
1998  * managed by the Linux memory manager: I/O, buffers, swapping etc.
1999  * The following discussion applies only to them.
2000  *
2001  * A pagecache page contains an opaque `private' member, which belongs to the
2002  * page's address_space. Usually, this is the address of a circular list of
2003  * the page's disk buffers. PG_private must be set to tell the VM to call
2004  * into the filesystem to release these pages.
2005  *
2006  * A folio may belong to an inode's memory mapping. In this case,
2007  * folio->mapping points to the inode, and folio->index is the file
2008  * offset of the folio, in units of PAGE_SIZE.
2009  *
2010  * If pagecache pages are not associated with an inode, they are said to be
2011  * anonymous pages. These may become associated with the swapcache, and in that
2012  * case PG_swapcache is set, and page->private is an offset into the swapcache.
2013  *
2014  * In either case (swapcache or inode backed), the pagecache itself holds one
2015  * reference to the page. Setting PG_private should also increment the
2016  * refcount. The each user mapping also has a reference to the page.
2017  *
2018  * The pagecache pages are stored in a per-mapping radix tree, which is
2019  * rooted at mapping->i_pages, and indexed by offset.
2020  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
2021  * lists, we instead now tag pages as dirty/writeback in the radix tree.
2022  *
2023  * All pagecache pages may be subject to I/O:
2024  * - inode pages may need to be read from disk,
2025  * - inode pages which have been modified and are MAP_SHARED may need
2026  *   to be written back to the inode on disk,
2027  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
2028  *   modified may need to be swapped out to swap space and (later) to be read
2029  *   back into memory.
2030  */
2031 
2032 /* 127: arbitrary random number, small enough to assemble well */
2033 #define folio_ref_zero_or_close_to_overflow(folio) \
2034 	((unsigned int) folio_ref_count(folio) + 127u <= 127u)
2035 
2036 /**
2037  * folio_get - Increment the reference count on a folio.
2038  * @folio: The folio.
2039  *
2040  * Context: May be called in any context, as long as you know that
2041  * you have a refcount on the folio.  If you do not already have one,
2042  * folio_try_get() may be the right interface for you to use.
2043  */
folio_get(struct folio * folio)2044 static inline void folio_get(struct folio *folio)
2045 {
2046 	VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
2047 	folio_ref_inc(folio);
2048 }
2049 
get_page(struct page * page)2050 static inline void get_page(struct page *page)
2051 {
2052 	struct folio *folio = page_folio(page);
2053 	if (WARN_ON_ONCE(folio_test_slab(folio)))
2054 		return;
2055 	if (WARN_ON_ONCE(folio_test_large_kmalloc(folio)))
2056 		return;
2057 	folio_get(folio);
2058 }
2059 
try_get_page(struct page * page)2060 static inline __must_check bool try_get_page(struct page *page)
2061 {
2062 	page = compound_head(page);
2063 	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
2064 		return false;
2065 	page_ref_inc(page);
2066 	return true;
2067 }
2068 
2069 /**
2070  * folio_put - Decrement the reference count on a folio.
2071  * @folio: The folio.
2072  *
2073  * If the folio's reference count reaches zero, the memory will be
2074  * released back to the page allocator and may be used by another
2075  * allocation immediately.  Do not access the memory or the struct folio
2076  * after calling folio_put() unless you can be sure that it wasn't the
2077  * last reference.
2078  *
2079  * Context: May be called in process or interrupt context, but not in NMI
2080  * context.  May be called while holding a spinlock.
2081  */
folio_put(struct folio * folio)2082 static inline void folio_put(struct folio *folio)
2083 {
2084 	if (folio_put_testzero(folio))
2085 		__folio_put(folio);
2086 }
2087 
2088 /**
2089  * folio_put_refs - Reduce the reference count on a folio.
2090  * @folio: The folio.
2091  * @refs: The amount to subtract from the folio's reference count.
2092  *
2093  * If the folio's reference count reaches zero, the memory will be
2094  * released back to the page allocator and may be used by another
2095  * allocation immediately.  Do not access the memory or the struct folio
2096  * after calling folio_put_refs() unless you can be sure that these weren't
2097  * the last references.
2098  *
2099  * Context: May be called in process or interrupt context, but not in NMI
2100  * context.  May be called while holding a spinlock.
2101  */
folio_put_refs(struct folio * folio,int refs)2102 static inline void folio_put_refs(struct folio *folio, int refs)
2103 {
2104 	if (folio_ref_sub_and_test(folio, refs))
2105 		__folio_put(folio);
2106 }
2107 
2108 void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
2109 
2110 /*
2111  * union release_pages_arg - an array of pages or folios
2112  *
2113  * release_pages() releases a simple array of multiple pages, and
2114  * accepts various different forms of said page array: either
2115  * a regular old boring array of pages, an array of folios, or
2116  * an array of encoded page pointers.
2117  *
2118  * The transparent union syntax for this kind of "any of these
2119  * argument types" is all kinds of ugly, so look away.
2120  */
2121 typedef union {
2122 	struct page **pages;
2123 	struct folio **folios;
2124 	struct encoded_page **encoded_pages;
2125 } release_pages_arg __attribute__ ((__transparent_union__));
2126 
2127 void release_pages(release_pages_arg, int nr);
2128 
2129 /**
2130  * folios_put - Decrement the reference count on an array of folios.
2131  * @folios: The folios.
2132  *
2133  * Like folio_put(), but for a batch of folios.  This is more efficient
2134  * than writing the loop yourself as it will optimise the locks which need
2135  * to be taken if the folios are freed.  The folios batch is returned
2136  * empty and ready to be reused for another batch; there is no need to
2137  * reinitialise it.
2138  *
2139  * Context: May be called in process or interrupt context, but not in NMI
2140  * context.  May be called while holding a spinlock.
2141  */
folios_put(struct folio_batch * folios)2142 static inline void folios_put(struct folio_batch *folios)
2143 {
2144 	folios_put_refs(folios, NULL);
2145 }
2146 
put_page(struct page * page)2147 static inline void put_page(struct page *page)
2148 {
2149 	struct folio *folio = page_folio(page);
2150 
2151 	if (folio_test_slab(folio) || folio_test_large_kmalloc(folio))
2152 		return;
2153 
2154 	folio_put(folio);
2155 }
2156 
2157 /*
2158  * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
2159  * the page's refcount so that two separate items are tracked: the original page
2160  * reference count, and also a new count of how many pin_user_pages() calls were
2161  * made against the page. ("gup-pinned" is another term for the latter).
2162  *
2163  * With this scheme, pin_user_pages() becomes special: such pages are marked as
2164  * distinct from normal pages. As such, the unpin_user_page() call (and its
2165  * variants) must be used in order to release gup-pinned pages.
2166  *
2167  * Choice of value:
2168  *
2169  * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
2170  * counts with respect to pin_user_pages() and unpin_user_page() becomes
2171  * simpler, due to the fact that adding an even power of two to the page
2172  * refcount has the effect of using only the upper N bits, for the code that
2173  * counts up using the bias value. This means that the lower bits are left for
2174  * the exclusive use of the original code that increments and decrements by one
2175  * (or at least, by much smaller values than the bias value).
2176  *
2177  * Of course, once the lower bits overflow into the upper bits (and this is
2178  * OK, because subtraction recovers the original values), then visual inspection
2179  * no longer suffices to directly view the separate counts. However, for normal
2180  * applications that don't have huge page reference counts, this won't be an
2181  * issue.
2182  *
2183  * Locking: the lockless algorithm described in folio_try_get_rcu()
2184  * provides safe operation for get_user_pages(), folio_mkclean() and
2185  * other calls that race to set up page table entries.
2186  */
2187 #define GUP_PIN_COUNTING_BIAS (1U << 10)
2188 
2189 void unpin_user_page(struct page *page);
2190 void unpin_folio(struct folio *folio);
2191 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
2192 				 bool make_dirty);
2193 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
2194 				      bool make_dirty);
2195 void unpin_user_pages(struct page **pages, unsigned long npages);
2196 void unpin_user_folio(struct folio *folio, unsigned long npages);
2197 void unpin_folios(struct folio **folios, unsigned long nfolios);
2198 
is_cow_mapping(vm_flags_t flags)2199 static inline bool is_cow_mapping(vm_flags_t flags)
2200 {
2201 	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2202 }
2203 
vma_desc_is_cow_mapping(struct vm_area_desc * desc)2204 static inline bool vma_desc_is_cow_mapping(struct vm_area_desc *desc)
2205 {
2206 	const vma_flags_t *flags = &desc->vma_flags;
2207 
2208 	return vma_flags_test(flags, VMA_MAYWRITE_BIT) &&
2209 		!vma_flags_test(flags, VMA_SHARED_BIT);
2210 }
2211 
2212 #ifndef CONFIG_MMU
is_nommu_shared_mapping(vm_flags_t flags)2213 static inline bool is_nommu_shared_mapping(vm_flags_t flags)
2214 {
2215 	/*
2216 	 * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
2217 	 * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
2218 	 * a file mapping. R/O MAP_PRIVATE mappings might still modify
2219 	 * underlying memory if ptrace is active, so this is only possible if
2220 	 * ptrace does not apply. Note that there is no mprotect() to upgrade
2221 	 * write permissions later.
2222 	 */
2223 	return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
2224 }
2225 
is_nommu_shared_vma_flags(const vma_flags_t * flags)2226 static inline bool is_nommu_shared_vma_flags(const vma_flags_t *flags)
2227 {
2228 	return vma_flags_test_any(flags, VMA_MAYSHARE_BIT, VMA_MAYOVERLAY_BIT);
2229 }
2230 #endif
2231 
2232 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
2233 #define SECTION_IN_PAGE_FLAGS
2234 #endif
2235 
2236 /*
2237  * The identification function is mainly used by the buddy allocator for
2238  * determining if two pages could be buddies. We are not really identifying
2239  * the zone since we could be using the section number id if we do not have
2240  * node id available in page flags.
2241  * We only guarantee that it will return the same value for two combinable
2242  * pages in a zone.
2243  */
page_zone_id(struct page * page)2244 static inline int page_zone_id(struct page *page)
2245 {
2246 	return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK;
2247 }
2248 
2249 #ifdef NODE_NOT_IN_PAGE_FLAGS
2250 int memdesc_nid(memdesc_flags_t mdf);
2251 #else
memdesc_nid(memdesc_flags_t mdf)2252 static inline int memdesc_nid(memdesc_flags_t mdf)
2253 {
2254 	return (mdf.f >> NODES_PGSHIFT) & NODES_MASK;
2255 }
2256 #endif
2257 
page_to_nid(const struct page * page)2258 static inline int page_to_nid(const struct page *page)
2259 {
2260 	return memdesc_nid(PF_POISONED_CHECK(page)->flags);
2261 }
2262 
folio_nid(const struct folio * folio)2263 static inline int folio_nid(const struct folio *folio)
2264 {
2265 	return memdesc_nid(folio->flags);
2266 }
2267 
2268 #ifdef CONFIG_NUMA_BALANCING
2269 /* page access time bits needs to hold at least 4 seconds */
2270 #define PAGE_ACCESS_TIME_MIN_BITS	12
2271 #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
2272 #define PAGE_ACCESS_TIME_BUCKETS				\
2273 	(PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
2274 #else
2275 #define PAGE_ACCESS_TIME_BUCKETS	0
2276 #endif
2277 
2278 #define PAGE_ACCESS_TIME_MASK				\
2279 	(LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
2280 
cpu_pid_to_cpupid(int cpu,int pid)2281 static inline int cpu_pid_to_cpupid(int cpu, int pid)
2282 {
2283 	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
2284 }
2285 
cpupid_to_pid(int cpupid)2286 static inline int cpupid_to_pid(int cpupid)
2287 {
2288 	return cpupid & LAST__PID_MASK;
2289 }
2290 
cpupid_to_cpu(int cpupid)2291 static inline int cpupid_to_cpu(int cpupid)
2292 {
2293 	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
2294 }
2295 
cpupid_to_nid(int cpupid)2296 static inline int cpupid_to_nid(int cpupid)
2297 {
2298 	return cpu_to_node(cpupid_to_cpu(cpupid));
2299 }
2300 
cpupid_pid_unset(int cpupid)2301 static inline bool cpupid_pid_unset(int cpupid)
2302 {
2303 	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
2304 }
2305 
cpupid_cpu_unset(int cpupid)2306 static inline bool cpupid_cpu_unset(int cpupid)
2307 {
2308 	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
2309 }
2310 
__cpupid_match_pid(pid_t task_pid,int cpupid)2311 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
2312 {
2313 	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
2314 }
2315 
2316 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
2317 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
folio_xchg_last_cpupid(struct folio * folio,int cpupid)2318 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
2319 {
2320 	return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
2321 }
2322 
folio_last_cpupid(struct folio * folio)2323 static inline int folio_last_cpupid(struct folio *folio)
2324 {
2325 	return folio->_last_cpupid;
2326 }
page_cpupid_reset_last(struct page * page)2327 static inline void page_cpupid_reset_last(struct page *page)
2328 {
2329 	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
2330 }
2331 #else
folio_last_cpupid(struct folio * folio)2332 static inline int folio_last_cpupid(struct folio *folio)
2333 {
2334 	return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
2335 }
2336 
2337 int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
2338 
page_cpupid_reset_last(struct page * page)2339 static inline void page_cpupid_reset_last(struct page *page)
2340 {
2341 	page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
2342 }
2343 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
2344 
folio_xchg_access_time(struct folio * folio,int time)2345 static inline int folio_xchg_access_time(struct folio *folio, int time)
2346 {
2347 	int last_time;
2348 
2349 	last_time = folio_xchg_last_cpupid(folio,
2350 					   time >> PAGE_ACCESS_TIME_BUCKETS);
2351 	return last_time << PAGE_ACCESS_TIME_BUCKETS;
2352 }
2353 
vma_set_access_pid_bit(struct vm_area_struct * vma)2354 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
2355 {
2356 	unsigned int pid_bit;
2357 
2358 	pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
2359 	if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
2360 		__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
2361 	}
2362 }
2363 
2364 bool folio_use_access_time(struct folio *folio);
2365 #else /* !CONFIG_NUMA_BALANCING */
folio_xchg_last_cpupid(struct folio * folio,int cpupid)2366 static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
2367 {
2368 	return folio_nid(folio); /* XXX */
2369 }
2370 
folio_xchg_access_time(struct folio * folio,int time)2371 static inline int folio_xchg_access_time(struct folio *folio, int time)
2372 {
2373 	return 0;
2374 }
2375 
folio_last_cpupid(struct folio * folio)2376 static inline int folio_last_cpupid(struct folio *folio)
2377 {
2378 	return folio_nid(folio); /* XXX */
2379 }
2380 
cpupid_to_nid(int cpupid)2381 static inline int cpupid_to_nid(int cpupid)
2382 {
2383 	return -1;
2384 }
2385 
cpupid_to_pid(int cpupid)2386 static inline int cpupid_to_pid(int cpupid)
2387 {
2388 	return -1;
2389 }
2390 
cpupid_to_cpu(int cpupid)2391 static inline int cpupid_to_cpu(int cpupid)
2392 {
2393 	return -1;
2394 }
2395 
cpu_pid_to_cpupid(int nid,int pid)2396 static inline int cpu_pid_to_cpupid(int nid, int pid)
2397 {
2398 	return -1;
2399 }
2400 
cpupid_pid_unset(int cpupid)2401 static inline bool cpupid_pid_unset(int cpupid)
2402 {
2403 	return true;
2404 }
2405 
page_cpupid_reset_last(struct page * page)2406 static inline void page_cpupid_reset_last(struct page *page)
2407 {
2408 }
2409 
cpupid_match_pid(struct task_struct * task,int cpupid)2410 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
2411 {
2412 	return false;
2413 }
2414 
vma_set_access_pid_bit(struct vm_area_struct * vma)2415 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
2416 {
2417 }
folio_use_access_time(struct folio * folio)2418 static inline bool folio_use_access_time(struct folio *folio)
2419 {
2420 	return false;
2421 }
2422 #endif /* CONFIG_NUMA_BALANCING */
2423 
2424 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
2425 
2426 /*
2427  * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
2428  * setting tags for all pages to native kernel tag value 0xff, as the default
2429  * value 0x00 maps to 0xff.
2430  */
2431 
page_kasan_tag(const struct page * page)2432 static inline u8 page_kasan_tag(const struct page *page)
2433 {
2434 	u8 tag = KASAN_TAG_KERNEL;
2435 
2436 	if (kasan_enabled()) {
2437 		tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
2438 		tag ^= 0xff;
2439 	}
2440 
2441 	return tag;
2442 }
2443 
page_kasan_tag_set(struct page * page,u8 tag)2444 static inline void page_kasan_tag_set(struct page *page, u8 tag)
2445 {
2446 	unsigned long old_flags, flags;
2447 
2448 	if (!kasan_enabled())
2449 		return;
2450 
2451 	tag ^= 0xff;
2452 	old_flags = READ_ONCE(page->flags.f);
2453 	do {
2454 		flags = old_flags;
2455 		flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
2456 		flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
2457 	} while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
2458 }
2459 
page_kasan_tag_reset(struct page * page)2460 static inline void page_kasan_tag_reset(struct page *page)
2461 {
2462 	if (kasan_enabled())
2463 		page_kasan_tag_set(page, KASAN_TAG_KERNEL);
2464 }
2465 
2466 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
2467 
page_kasan_tag(const struct page * page)2468 static inline u8 page_kasan_tag(const struct page *page)
2469 {
2470 	return 0xff;
2471 }
2472 
page_kasan_tag_set(struct page * page,u8 tag)2473 static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
page_kasan_tag_reset(struct page * page)2474 static inline void page_kasan_tag_reset(struct page *page) { }
2475 
2476 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
2477 
page_zone(const struct page * page)2478 static inline struct zone *page_zone(const struct page *page)
2479 {
2480 	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
2481 }
2482 
page_pgdat(const struct page * page)2483 static inline pg_data_t *page_pgdat(const struct page *page)
2484 {
2485 	return NODE_DATA(page_to_nid(page));
2486 }
2487 
folio_pgdat(const struct folio * folio)2488 static inline pg_data_t *folio_pgdat(const struct folio *folio)
2489 {
2490 	return NODE_DATA(folio_nid(folio));
2491 }
2492 
folio_zone(const struct folio * folio)2493 static inline struct zone *folio_zone(const struct folio *folio)
2494 {
2495 	return &folio_pgdat(folio)->node_zones[folio_zonenum(folio)];
2496 }
2497 
2498 #ifdef SECTION_IN_PAGE_FLAGS
set_page_section(struct page * page,unsigned long section)2499 static inline void set_page_section(struct page *page, unsigned long section)
2500 {
2501 	page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
2502 	page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
2503 }
2504 
memdesc_section(memdesc_flags_t mdf)2505 static inline unsigned long memdesc_section(memdesc_flags_t mdf)
2506 {
2507 	return (mdf.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
2508 }
2509 #else /* !SECTION_IN_PAGE_FLAGS */
memdesc_section(memdesc_flags_t mdf)2510 static inline unsigned long memdesc_section(memdesc_flags_t mdf)
2511 {
2512 	return 0;
2513 }
2514 #endif /* SECTION_IN_PAGE_FLAGS */
2515 
2516 /**
2517  * folio_pfn - Return the Page Frame Number of a folio.
2518  * @folio: The folio.
2519  *
2520  * A folio may contain multiple pages.  The pages have consecutive
2521  * Page Frame Numbers.
2522  *
2523  * Return: The Page Frame Number of the first page in the folio.
2524  */
folio_pfn(const struct folio * folio)2525 static inline unsigned long folio_pfn(const struct folio *folio)
2526 {
2527 	return page_to_pfn(&folio->page);
2528 }
2529 
pfn_folio(unsigned long pfn)2530 static inline struct folio *pfn_folio(unsigned long pfn)
2531 {
2532 	return page_folio(pfn_to_page(pfn));
2533 }
2534 
2535 #ifdef CONFIG_MMU
mk_pte(const struct page * page,pgprot_t pgprot)2536 static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot)
2537 {
2538 	return pfn_pte(page_to_pfn(page), pgprot);
2539 }
2540 
2541 /**
2542  * folio_mk_pte - Create a PTE for this folio
2543  * @folio: The folio to create a PTE for
2544  * @pgprot: The page protection bits to use
2545  *
2546  * Create a page table entry for the first page of this folio.
2547  * This is suitable for passing to set_ptes().
2548  *
2549  * Return: A page table entry suitable for mapping this folio.
2550  */
folio_mk_pte(const struct folio * folio,pgprot_t pgprot)2551 static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot)
2552 {
2553 	return pfn_pte(folio_pfn(folio), pgprot);
2554 }
2555 
2556 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2557 /**
2558  * folio_mk_pmd - Create a PMD for this folio
2559  * @folio: The folio to create a PMD for
2560  * @pgprot: The page protection bits to use
2561  *
2562  * Create a page table entry for the first page of this folio.
2563  * This is suitable for passing to set_pmd_at().
2564  *
2565  * Return: A page table entry suitable for mapping this folio.
2566  */
folio_mk_pmd(const struct folio * folio,pgprot_t pgprot)2567 static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot)
2568 {
2569 	return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
2570 }
2571 
2572 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2573 /**
2574  * folio_mk_pud - Create a PUD for this folio
2575  * @folio: The folio to create a PUD for
2576  * @pgprot: The page protection bits to use
2577  *
2578  * Create a page table entry for the first page of this folio.
2579  * This is suitable for passing to set_pud_at().
2580  *
2581  * Return: A page table entry suitable for mapping this folio.
2582  */
folio_mk_pud(const struct folio * folio,pgprot_t pgprot)2583 static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot)
2584 {
2585 	return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
2586 }
2587 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2588 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2589 #endif /* CONFIG_MMU */
2590 
folio_has_pincount(const struct folio * folio)2591 static inline bool folio_has_pincount(const struct folio *folio)
2592 {
2593 	if (IS_ENABLED(CONFIG_64BIT))
2594 		return folio_test_large(folio);
2595 	return folio_order(folio) > 1;
2596 }
2597 
2598 /**
2599  * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
2600  * @folio: The folio.
2601  *
2602  * This function checks if a folio has been pinned via a call to
2603  * a function in the pin_user_pages() family.
2604  *
2605  * For small folios, the return value is partially fuzzy: false is not fuzzy,
2606  * because it means "definitely not pinned for DMA", but true means "probably
2607  * pinned for DMA, but possibly a false positive due to having at least
2608  * GUP_PIN_COUNTING_BIAS worth of normal folio references".
2609  *
2610  * False positives are OK, because: a) it's unlikely for a folio to
2611  * get that many refcounts, and b) all the callers of this routine are
2612  * expected to be able to deal gracefully with a false positive.
2613  *
2614  * For most large folios, the result will be exactly correct. That's because
2615  * we have more tracking data available: the _pincount field is used
2616  * instead of the GUP_PIN_COUNTING_BIAS scheme.
2617  *
2618  * For more information, please see Documentation/core-api/pin_user_pages.rst.
2619  *
2620  * Return: True, if it is likely that the folio has been "dma-pinned".
2621  * False, if the folio is definitely not dma-pinned.
2622  */
folio_maybe_dma_pinned(struct folio * folio)2623 static inline bool folio_maybe_dma_pinned(struct folio *folio)
2624 {
2625 	if (folio_has_pincount(folio))
2626 		return atomic_read(&folio->_pincount) > 0;
2627 
2628 	/*
2629 	 * folio_ref_count() is signed. If that refcount overflows, then
2630 	 * folio_ref_count() returns a negative value, and callers will avoid
2631 	 * further incrementing the refcount.
2632 	 *
2633 	 * Here, for that overflow case, use the sign bit to count a little
2634 	 * bit higher via unsigned math, and thus still get an accurate result.
2635 	 */
2636 	return ((unsigned int)folio_ref_count(folio)) >=
2637 		GUP_PIN_COUNTING_BIAS;
2638 }
2639 
2640 /*
2641  * This should most likely only be called during fork() to see whether we
2642  * should break the cow immediately for an anon page on the src mm.
2643  *
2644  * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
2645  */
folio_needs_cow_for_dma(struct vm_area_struct * vma,struct folio * folio)2646 static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
2647 					  struct folio *folio)
2648 {
2649 	VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
2650 
2651 	if (!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))
2652 		return false;
2653 
2654 	return folio_maybe_dma_pinned(folio);
2655 }
2656 
2657 /**
2658  * is_zero_page - Query if a page is a zero page
2659  * @page: The page to query
2660  *
2661  * This returns true if @page is one of the permanent zero pages.
2662  */
is_zero_page(const struct page * page)2663 static inline bool is_zero_page(const struct page *page)
2664 {
2665 	return is_zero_pfn(page_to_pfn(page));
2666 }
2667 
2668 /**
2669  * is_zero_folio - Query if a folio is a zero page
2670  * @folio: The folio to query
2671  *
2672  * This returns true if @folio is one of the permanent zero pages.
2673  */
is_zero_folio(const struct folio * folio)2674 static inline bool is_zero_folio(const struct folio *folio)
2675 {
2676 	return is_zero_page(&folio->page);
2677 }
2678 
2679 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
2680 #ifdef CONFIG_MIGRATION
folio_is_longterm_pinnable(struct folio * folio)2681 static inline bool folio_is_longterm_pinnable(struct folio *folio)
2682 {
2683 #ifdef CONFIG_CMA
2684 	int mt = folio_migratetype(folio);
2685 
2686 	if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
2687 		return false;
2688 #endif
2689 	/* The zero page can be "pinned" but gets special handling. */
2690 	if (is_zero_folio(folio))
2691 		return true;
2692 
2693 	/* Coherent device memory must always allow eviction. */
2694 	if (folio_is_device_coherent(folio))
2695 		return false;
2696 
2697 	/*
2698 	 * Filesystems can only tolerate transient delays to truncate and
2699 	 * hole-punch operations
2700 	 */
2701 	if (folio_is_fsdax(folio))
2702 		return false;
2703 
2704 	/* Otherwise, non-movable zone folios can be pinned. */
2705 	return !folio_is_zone_movable(folio);
2706 
2707 }
2708 #else
folio_is_longterm_pinnable(struct folio * folio)2709 static inline bool folio_is_longterm_pinnable(struct folio *folio)
2710 {
2711 	return true;
2712 }
2713 #endif
2714 
set_page_zone(struct page * page,enum zone_type zone)2715 static inline void set_page_zone(struct page *page, enum zone_type zone)
2716 {
2717 	page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT);
2718 	page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
2719 }
2720 
set_page_node(struct page * page,unsigned long node)2721 static inline void set_page_node(struct page *page, unsigned long node)
2722 {
2723 	page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT);
2724 	page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT;
2725 }
2726 
set_page_links(struct page * page,enum zone_type zone,unsigned long node,unsigned long pfn)2727 static inline void set_page_links(struct page *page, enum zone_type zone,
2728 	unsigned long node, unsigned long pfn)
2729 {
2730 	set_page_zone(page, zone);
2731 	set_page_node(page, node);
2732 #ifdef SECTION_IN_PAGE_FLAGS
2733 	set_page_section(page, pfn_to_section_nr(pfn));
2734 #endif
2735 }
2736 
2737 /**
2738  * folio_nr_pages - The number of pages in the folio.
2739  * @folio: The folio.
2740  *
2741  * Return: A positive power of two.
2742  */
folio_nr_pages(const struct folio * folio)2743 static inline unsigned long folio_nr_pages(const struct folio *folio)
2744 {
2745 	if (!folio_test_large(folio))
2746 		return 1;
2747 	return folio_large_nr_pages(folio);
2748 }
2749 
2750 /*
2751  * compound_nr() returns the number of pages in this potentially compound
2752  * page.  compound_nr() can be called on a tail page, and is defined to
2753  * return 1 in that case.
2754  */
compound_nr(const struct page * page)2755 static inline unsigned long compound_nr(const struct page *page)
2756 {
2757 	const struct folio *folio = (struct folio *)page;
2758 
2759 	if (!test_bit(PG_head, &folio->flags.f))
2760 		return 1;
2761 	return folio_large_nr_pages(folio);
2762 }
2763 
2764 /**
2765  * folio_next - Move to the next physical folio.
2766  * @folio: The folio we're currently operating on.
2767  *
2768  * If you have physically contiguous memory which may span more than
2769  * one folio (eg a &struct bio_vec), use this function to move from one
2770  * folio to the next.  Do not use it if the memory is only virtually
2771  * contiguous as the folios are almost certainly not adjacent to each
2772  * other.  This is the folio equivalent to writing ``page++``.
2773  *
2774  * Context: We assume that the folios are refcounted and/or locked at a
2775  * higher level and do not adjust the reference counts.
2776  * Return: The next struct folio.
2777  */
folio_next(struct folio * folio)2778 static inline struct folio *folio_next(struct folio *folio)
2779 {
2780 	return (struct folio *)folio_page(folio, folio_nr_pages(folio));
2781 }
2782 
2783 /**
2784  * folio_shift - The size of the memory described by this folio.
2785  * @folio: The folio.
2786  *
2787  * A folio represents a number of bytes which is a power-of-two in size.
2788  * This function tells you which power-of-two the folio is.  See also
2789  * folio_size() and folio_order().
2790  *
2791  * Context: The caller should have a reference on the folio to prevent
2792  * it from being split.  It is not necessary for the folio to be locked.
2793  * Return: The base-2 logarithm of the size of this folio.
2794  */
folio_shift(const struct folio * folio)2795 static inline unsigned int folio_shift(const struct folio *folio)
2796 {
2797 	return PAGE_SHIFT + folio_order(folio);
2798 }
2799 
2800 /**
2801  * folio_size - The number of bytes in a folio.
2802  * @folio: The folio.
2803  *
2804  * Context: The caller should have a reference on the folio to prevent
2805  * it from being split.  It is not necessary for the folio to be locked.
2806  * Return: The number of bytes in this folio.
2807  */
folio_size(const struct folio * folio)2808 static inline size_t folio_size(const struct folio *folio)
2809 {
2810 	return PAGE_SIZE << folio_order(folio);
2811 }
2812 
2813 /**
2814  * folio_maybe_mapped_shared - Whether the folio is mapped into the page
2815  *			       tables of more than one MM
2816  * @folio: The folio.
2817  *
2818  * This function checks if the folio maybe currently mapped into more than one
2819  * MM ("maybe mapped shared"), or if the folio is certainly mapped into a single
2820  * MM ("mapped exclusively").
2821  *
2822  * For KSM folios, this function also returns "mapped shared" when a folio is
2823  * mapped multiple times into the same MM, because the individual page mappings
2824  * are independent.
2825  *
2826  * For small anonymous folios and anonymous hugetlb folios, the return
2827  * value will be exactly correct: non-KSM folios can only be mapped at most once
2828  * into an MM, and they cannot be partially mapped. KSM folios are
2829  * considered shared even if mapped multiple times into the same MM.
2830  *
2831  * For other folios, the result can be fuzzy:
2832  *    #. For partially-mappable large folios (THP), the return value can wrongly
2833  *       indicate "mapped shared" (false positive) if a folio was mapped by
2834  *       more than two MMs at one point in time.
2835  *    #. For pagecache folios (including hugetlb), the return value can wrongly
2836  *       indicate "mapped shared" (false positive) when two VMAs in the same MM
2837  *       cover the same file range.
2838  *
2839  * Further, this function only considers current page table mappings that
2840  * are tracked using the folio mapcount(s).
2841  *
2842  * This function does not consider:
2843  *    #. If the folio might get mapped in the (near) future (e.g., swapcache,
2844  *       pagecache, temporary unmapping for migration).
2845  *    #. If the folio is mapped differently (VM_PFNMAP).
2846  *    #. If hugetlb page table sharing applies. Callers might want to check
2847  *       hugetlb_pmd_shared().
2848  *
2849  * Return: Whether the folio is estimated to be mapped into more than one MM.
2850  */
folio_maybe_mapped_shared(struct folio * folio)2851 static inline bool folio_maybe_mapped_shared(struct folio *folio)
2852 {
2853 	int mapcount = folio_mapcount(folio);
2854 
2855 	/* Only partially-mappable folios require more care. */
2856 	if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
2857 		return mapcount > 1;
2858 
2859 	/*
2860 	 * vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ...
2861 	 * simply assume "mapped shared", nobody should really care
2862 	 * about this for arbitrary kernel allocations.
2863 	 */
2864 	if (!IS_ENABLED(CONFIG_MM_ID))
2865 		return true;
2866 
2867 	/*
2868 	 * A single mapping implies "mapped exclusively", even if the
2869 	 * folio flag says something different: it's easier to handle this
2870 	 * case here instead of on the RMAP hot path.
2871 	 */
2872 	if (mapcount <= 1)
2873 		return false;
2874 	return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
2875 }
2876 
2877 /**
2878  * folio_expected_ref_count - calculate the expected folio refcount
2879  * @folio: the folio
2880  *
2881  * Calculate the expected folio refcount, taking references from the pagecache,
2882  * swapcache, PG_private and page table mappings into account. Useful in
2883  * combination with folio_ref_count() to detect unexpected references (e.g.,
2884  * GUP or other temporary references).
2885  *
2886  * Does currently not consider references from the LRU cache. If the folio
2887  * was isolated from the LRU (which is the case during migration or split),
2888  * the LRU cache does not apply.
2889  *
2890  * Calling this function on an unmapped folio -- !folio_mapped() -- that is
2891  * locked will return a stable result.
2892  *
2893  * Calling this function on a mapped folio will not result in a stable result,
2894  * because nothing stops additional page table mappings from coming (e.g.,
2895  * fork()) or going (e.g., munmap()).
2896  *
2897  * Calling this function without the folio lock will also not result in a
2898  * stable result: for example, the folio might get dropped from the swapcache
2899  * concurrently.
2900  *
2901  * However, even when called without the folio lock or on a mapped folio,
2902  * this function can be used to detect unexpected references early (for example,
2903  * if it makes sense to even lock the folio and unmap it).
2904  *
2905  * The caller must add any reference (e.g., from folio_try_get()) it might be
2906  * holding itself to the result.
2907  *
2908  * Returns: the expected folio refcount.
2909  */
folio_expected_ref_count(const struct folio * folio)2910 static inline int folio_expected_ref_count(const struct folio *folio)
2911 {
2912 	const int order = folio_order(folio);
2913 	int ref_count = 0;
2914 
2915 	if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
2916 		return 0;
2917 
2918 	/* One reference per page from the swapcache. */
2919 	ref_count += folio_test_swapcache(folio) << order;
2920 
2921 	if (!folio_test_anon(folio)) {
2922 		/* One reference per page from the pagecache. */
2923 		ref_count += !!folio->mapping << order;
2924 		/* One reference from PG_private. */
2925 		ref_count += folio_test_private(folio);
2926 	}
2927 
2928 	/* One reference per page table mapping. */
2929 	return ref_count + folio_mapcount(folio);
2930 }
2931 
2932 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
arch_make_folio_accessible(struct folio * folio)2933 static inline int arch_make_folio_accessible(struct folio *folio)
2934 {
2935 	return 0;
2936 }
2937 #endif
2938 
2939 /*
2940  * Some inline functions in vmstat.h depend on page_zone()
2941  */
2942 #include <linux/vmstat.h>
2943 
2944 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
2945 #define HASHED_PAGE_VIRTUAL
2946 #endif
2947 
2948 #if defined(WANT_PAGE_VIRTUAL)
page_address(const struct page * page)2949 static inline void *page_address(const struct page *page)
2950 {
2951 	return page->virtual;
2952 }
set_page_address(struct page * page,void * address)2953 static inline void set_page_address(struct page *page, void *address)
2954 {
2955 	page->virtual = address;
2956 }
2957 #define page_address_init()  do { } while(0)
2958 #endif
2959 
2960 #if defined(HASHED_PAGE_VIRTUAL)
2961 void *page_address(const struct page *page);
2962 void set_page_address(struct page *page, void *virtual);
2963 void page_address_init(void);
2964 #endif
2965 
lowmem_page_address(const struct page * page)2966 static __always_inline void *lowmem_page_address(const struct page *page)
2967 {
2968 	return page_to_virt(page);
2969 }
2970 
2971 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
2972 #define page_address(page) lowmem_page_address(page)
2973 #define set_page_address(page, address)  do { } while(0)
2974 #define page_address_init()  do { } while(0)
2975 #endif
2976 
folio_address(const struct folio * folio)2977 static inline void *folio_address(const struct folio *folio)
2978 {
2979 	return page_address(&folio->page);
2980 }
2981 
2982 /*
2983  * Return true only if the page has been allocated with
2984  * ALLOC_NO_WATERMARKS and the low watermark was not
2985  * met implying that the system is under some pressure.
2986  */
page_is_pfmemalloc(const struct page * page)2987 static inline bool page_is_pfmemalloc(const struct page *page)
2988 {
2989 	/*
2990 	 * lru.next has bit 1 set if the page is allocated from the
2991 	 * pfmemalloc reserves.  Callers may simply overwrite it if
2992 	 * they do not need to preserve that information.
2993 	 */
2994 	return (uintptr_t)page->lru.next & BIT(1);
2995 }
2996 
2997 /*
2998  * Return true only if the folio has been allocated with
2999  * ALLOC_NO_WATERMARKS and the low watermark was not
3000  * met implying that the system is under some pressure.
3001  */
folio_is_pfmemalloc(const struct folio * folio)3002 static inline bool folio_is_pfmemalloc(const struct folio *folio)
3003 {
3004 	/*
3005 	 * lru.next has bit 1 set if the page is allocated from the
3006 	 * pfmemalloc reserves.  Callers may simply overwrite it if
3007 	 * they do not need to preserve that information.
3008 	 */
3009 	return (uintptr_t)folio->lru.next & BIT(1);
3010 }
3011 
3012 /*
3013  * Only to be called by the page allocator on a freshly allocated
3014  * page.
3015  */
set_page_pfmemalloc(struct page * page)3016 static inline void set_page_pfmemalloc(struct page *page)
3017 {
3018 	page->lru.next = (void *)BIT(1);
3019 }
3020 
clear_page_pfmemalloc(struct page * page)3021 static inline void clear_page_pfmemalloc(struct page *page)
3022 {
3023 	page->lru.next = NULL;
3024 }
3025 
3026 /*
3027  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
3028  */
3029 extern void pagefault_out_of_memory(void);
3030 
3031 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
3032 #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
3033 
3034 /*
3035  * Parameter block passed down to zap_pte_range in exceptional cases.
3036  */
3037 struct zap_details {
3038 	struct folio *single_folio;	/* Locked folio to be unmapped */
3039 	bool skip_cows;			/* Do not zap COWed private pages */
3040 	bool reclaim_pt;		/* Need reclaim page tables? */
3041 	bool reaping;			/* Reaping, do not block. */
3042 	zap_flags_t zap_flags;		/* Extra flags for zapping */
3043 };
3044 
3045 /*
3046  * Whether to drop the pte markers, for example, the uffd-wp information for
3047  * file-backed memory.  This should only be specified when we will completely
3048  * drop the page in the mm, either by truncation or unmapping of the vma.  By
3049  * default, the flag is not set.
3050  */
3051 #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
3052 /* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
3053 #define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
3054 
3055 #ifdef CONFIG_MMU
3056 extern bool can_do_mlock(void);
3057 #else
can_do_mlock(void)3058 static inline bool can_do_mlock(void) { return false; }
3059 #endif
3060 extern int user_shm_lock(size_t, struct ucounts *);
3061 extern void user_shm_unlock(size_t, struct ucounts *);
3062 
3063 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
3064 			     pte_t pte);
3065 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
3066 			     pte_t pte);
3067 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
3068 				  unsigned long addr, pmd_t pmd);
3069 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
3070 				pmd_t pmd);
3071 struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
3072 		pud_t pud);
3073 
3074 void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address,
3075 		  unsigned long size);
3076 void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
3077 			   unsigned long size);
3078 /**
3079  * zap_vma - zap all page table entries in a vma
3080  * @vma: The vma to zap.
3081  */
zap_vma(struct vm_area_struct * vma)3082 static inline void zap_vma(struct vm_area_struct *vma)
3083 {
3084 	zap_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
3085 }
3086 struct mmu_notifier_range;
3087 
3088 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
3089 		unsigned long end, unsigned long floor, unsigned long ceiling);
3090 int
3091 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
3092 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
3093 			void *buf, int len, int write);
3094 
3095 struct follow_pfnmap_args {
3096 	/**
3097 	 * Inputs:
3098 	 * @vma: Pointer to @vm_area_struct struct
3099 	 * @address: the virtual address to walk
3100 	 */
3101 	struct vm_area_struct *vma;
3102 	unsigned long address;
3103 	/**
3104 	 * Internals:
3105 	 *
3106 	 * The caller shouldn't touch any of these.
3107 	 */
3108 	spinlock_t *lock;
3109 	pte_t *ptep;
3110 	/**
3111 	 * Outputs:
3112 	 *
3113 	 * @pfn: the PFN of the address
3114 	 * @addr_mask: address mask covering pfn
3115 	 * @pgprot: the pgprot_t of the mapping
3116 	 * @writable: whether the mapping is writable
3117 	 * @special: whether the mapping is a special mapping (real PFN maps)
3118 	 */
3119 	unsigned long pfn;
3120 	unsigned long addr_mask;
3121 	pgprot_t pgprot;
3122 	bool writable;
3123 	bool special;
3124 };
3125 int follow_pfnmap_start(struct follow_pfnmap_args *args);
3126 void follow_pfnmap_end(struct follow_pfnmap_args *args);
3127 
3128 extern void truncate_pagecache(struct inode *inode, loff_t new);
3129 extern void truncate_setsize(struct inode *inode, loff_t newsize);
3130 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
3131 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
3132 int generic_error_remove_folio(struct address_space *mapping,
3133 		struct folio *folio);
3134 
3135 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
3136 		unsigned long address, struct pt_regs *regs);
3137 
3138 #ifdef CONFIG_MMU
3139 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
3140 				  unsigned long address, unsigned int flags,
3141 				  struct pt_regs *regs);
3142 extern int fixup_user_fault(struct mm_struct *mm,
3143 			    unsigned long address, unsigned int fault_flags,
3144 			    bool *unlocked);
3145 void unmap_mapping_pages(struct address_space *mapping,
3146 		pgoff_t start, pgoff_t nr, bool even_cows);
3147 void unmap_mapping_range(struct address_space *mapping,
3148 		loff_t const holebegin, loff_t const holelen, int even_cows);
3149 #else
handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct pt_regs * regs)3150 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
3151 					 unsigned long address, unsigned int flags,
3152 					 struct pt_regs *regs)
3153 {
3154 	/* should never happen if there's no MMU */
3155 	BUG();
3156 	return VM_FAULT_SIGBUS;
3157 }
fixup_user_fault(struct mm_struct * mm,unsigned long address,unsigned int fault_flags,bool * unlocked)3158 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
3159 		unsigned int fault_flags, bool *unlocked)
3160 {
3161 	/* should never happen if there's no MMU */
3162 	BUG();
3163 	return -EFAULT;
3164 }
unmap_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t nr,bool even_cows)3165 static inline void unmap_mapping_pages(struct address_space *mapping,
3166 		pgoff_t start, pgoff_t nr, bool even_cows) { }
unmap_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen,int even_cows)3167 static inline void unmap_mapping_range(struct address_space *mapping,
3168 		loff_t const holebegin, loff_t const holelen, int even_cows) { }
3169 #endif
3170 
unmap_shared_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen)3171 static inline void unmap_shared_mapping_range(struct address_space *mapping,
3172 		loff_t const holebegin, loff_t const holelen)
3173 {
3174 	unmap_mapping_range(mapping, holebegin, holelen, 0);
3175 }
3176 
3177 static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
3178 						unsigned long addr);
3179 
3180 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
3181 		void *buf, int len, unsigned int gup_flags);
3182 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
3183 		void *buf, int len, unsigned int gup_flags);
3184 
3185 #ifdef CONFIG_BPF_SYSCALL
3186 extern int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
3187 			      void *buf, int len, unsigned int gup_flags);
3188 #endif
3189 
3190 long get_user_pages_remote(struct mm_struct *mm,
3191 			   unsigned long start, unsigned long nr_pages,
3192 			   unsigned int gup_flags, struct page **pages,
3193 			   int *locked);
3194 long pin_user_pages_remote(struct mm_struct *mm,
3195 			   unsigned long start, unsigned long nr_pages,
3196 			   unsigned int gup_flags, struct page **pages,
3197 			   int *locked);
3198 
3199 /*
3200  * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
3201  */
get_user_page_vma_remote(struct mm_struct * mm,unsigned long addr,int gup_flags,struct vm_area_struct ** vmap)3202 static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
3203 						    unsigned long addr,
3204 						    int gup_flags,
3205 						    struct vm_area_struct **vmap)
3206 {
3207 	struct page *page;
3208 	struct vm_area_struct *vma;
3209 	int got;
3210 
3211 	if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
3212 		return ERR_PTR(-EINVAL);
3213 
3214 	got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
3215 
3216 	if (got < 0)
3217 		return ERR_PTR(got);
3218 
3219 	vma = vma_lookup(mm, addr);
3220 	if (WARN_ON_ONCE(!vma)) {
3221 		put_page(page);
3222 		return ERR_PTR(-EINVAL);
3223 	}
3224 
3225 	*vmap = vma;
3226 	return page;
3227 }
3228 
3229 long get_user_pages(unsigned long start, unsigned long nr_pages,
3230 		    unsigned int gup_flags, struct page **pages);
3231 long pin_user_pages(unsigned long start, unsigned long nr_pages,
3232 		    unsigned int gup_flags, struct page **pages);
3233 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3234 		    struct page **pages, unsigned int gup_flags);
3235 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3236 		    struct page **pages, unsigned int gup_flags);
3237 long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
3238 		      struct folio **folios, unsigned int max_folios,
3239 		      pgoff_t *offset);
3240 int folio_add_pins(struct folio *folio, unsigned int pins);
3241 
3242 int get_user_pages_fast(unsigned long start, int nr_pages,
3243 			unsigned int gup_flags, struct page **pages);
3244 int pin_user_pages_fast(unsigned long start, int nr_pages,
3245 			unsigned int gup_flags, struct page **pages);
3246 void folio_add_pin(struct folio *folio);
3247 
3248 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
3249 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
3250 			const struct task_struct *task, bool bypass_rlim);
3251 
3252 struct kvec;
3253 struct page *get_dump_page(unsigned long addr, int *locked);
3254 
3255 bool folio_mark_dirty(struct folio *folio);
3256 bool folio_mark_dirty_lock(struct folio *folio);
3257 bool set_page_dirty(struct page *page);
3258 int set_page_dirty_lock(struct page *page);
3259 
3260 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
3261 
3262 /*
3263  * Flags used by change_protection().  For now we make it a bitmap so
3264  * that we can pass in multiple flags just like parameters.  However
3265  * for now all the callers are only use one of the flags at the same
3266  * time.
3267  */
3268 /*
3269  * Whether we should manually check if we can map individual PTEs writable,
3270  * because something (e.g., COW, uffd-wp) blocks that from happening for all
3271  * PTEs automatically in a writable mapping.
3272  */
3273 #define  MM_CP_TRY_CHANGE_WRITABLE	   (1UL << 0)
3274 /* Whether this protection change is for NUMA hints */
3275 #define  MM_CP_PROT_NUMA                   (1UL << 1)
3276 /* Whether this change is for write protecting */
3277 #define  MM_CP_UFFD_WP                     (1UL << 2) /* do wp */
3278 #define  MM_CP_UFFD_WP_RESOLVE             (1UL << 3) /* Resolve wp */
3279 #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
3280 					    MM_CP_UFFD_WP_RESOLVE)
3281 
3282 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
3283 			     pte_t pte);
3284 extern long change_protection(struct mmu_gather *tlb,
3285 			      struct vm_area_struct *vma, unsigned long start,
3286 			      unsigned long end, unsigned long cp_flags);
3287 extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
3288 	  struct vm_area_struct *vma, struct vm_area_struct **pprev,
3289 	  unsigned long start, unsigned long end, vm_flags_t newflags);
3290 
3291 /*
3292  * doesn't attempt to fault and will return short.
3293  */
3294 int get_user_pages_fast_only(unsigned long start, int nr_pages,
3295 			     unsigned int gup_flags, struct page **pages);
3296 
get_user_page_fast_only(unsigned long addr,unsigned int gup_flags,struct page ** pagep)3297 static inline bool get_user_page_fast_only(unsigned long addr,
3298 			unsigned int gup_flags, struct page **pagep)
3299 {
3300 	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
3301 }
3302 /*
3303  * per-process(per-mm_struct) statistics.
3304  */
get_mm_counter(struct mm_struct * mm,int member)3305 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
3306 {
3307 	return percpu_counter_read_positive(&mm->rss_stat[member]);
3308 }
3309 
get_mm_counter_sum(struct mm_struct * mm,int member)3310 static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
3311 {
3312 	return percpu_counter_sum_positive(&mm->rss_stat[member]);
3313 }
3314 
3315 void mm_trace_rss_stat(struct mm_struct *mm, int member);
3316 
add_mm_counter(struct mm_struct * mm,int member,long value)3317 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
3318 {
3319 	percpu_counter_add(&mm->rss_stat[member], value);
3320 
3321 	mm_trace_rss_stat(mm, member);
3322 }
3323 
inc_mm_counter(struct mm_struct * mm,int member)3324 static inline void inc_mm_counter(struct mm_struct *mm, int member)
3325 {
3326 	percpu_counter_inc(&mm->rss_stat[member]);
3327 
3328 	mm_trace_rss_stat(mm, member);
3329 }
3330 
dec_mm_counter(struct mm_struct * mm,int member)3331 static inline void dec_mm_counter(struct mm_struct *mm, int member)
3332 {
3333 	percpu_counter_dec(&mm->rss_stat[member]);
3334 
3335 	mm_trace_rss_stat(mm, member);
3336 }
3337 
3338 /* Optimized variant when folio is already known not to be anon */
mm_counter_file(struct folio * folio)3339 static inline int mm_counter_file(struct folio *folio)
3340 {
3341 	if (folio_test_swapbacked(folio))
3342 		return MM_SHMEMPAGES;
3343 	return MM_FILEPAGES;
3344 }
3345 
mm_counter(struct folio * folio)3346 static inline int mm_counter(struct folio *folio)
3347 {
3348 	if (folio_test_anon(folio))
3349 		return MM_ANONPAGES;
3350 	return mm_counter_file(folio);
3351 }
3352 
get_mm_rss(struct mm_struct * mm)3353 static inline unsigned long get_mm_rss(struct mm_struct *mm)
3354 {
3355 	return get_mm_counter(mm, MM_FILEPAGES) +
3356 		get_mm_counter(mm, MM_ANONPAGES) +
3357 		get_mm_counter(mm, MM_SHMEMPAGES);
3358 }
3359 
get_mm_rss_sum(struct mm_struct * mm)3360 static inline unsigned long get_mm_rss_sum(struct mm_struct *mm)
3361 {
3362 	return get_mm_counter_sum(mm, MM_FILEPAGES) +
3363 		get_mm_counter_sum(mm, MM_ANONPAGES) +
3364 		get_mm_counter_sum(mm, MM_SHMEMPAGES);
3365 }
3366 
get_mm_hiwater_rss(struct mm_struct * mm)3367 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
3368 {
3369 	return max(mm->hiwater_rss, get_mm_rss(mm));
3370 }
3371 
get_mm_hiwater_vm(struct mm_struct * mm)3372 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
3373 {
3374 	return max(mm->hiwater_vm, mm->total_vm);
3375 }
3376 
update_hiwater_rss(struct mm_struct * mm)3377 static inline void update_hiwater_rss(struct mm_struct *mm)
3378 {
3379 	unsigned long _rss = get_mm_rss(mm);
3380 
3381 	if (data_race(mm->hiwater_rss) < _rss)
3382 		data_race(mm->hiwater_rss = _rss);
3383 }
3384 
update_hiwater_vm(struct mm_struct * mm)3385 static inline void update_hiwater_vm(struct mm_struct *mm)
3386 {
3387 	if (mm->hiwater_vm < mm->total_vm)
3388 		mm->hiwater_vm = mm->total_vm;
3389 }
3390 
reset_mm_hiwater_rss(struct mm_struct * mm)3391 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
3392 {
3393 	mm->hiwater_rss = get_mm_rss(mm);
3394 }
3395 
setmax_mm_hiwater_rss(unsigned long * maxrss,struct mm_struct * mm)3396 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
3397 					 struct mm_struct *mm)
3398 {
3399 	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
3400 
3401 	if (*maxrss < hiwater_rss)
3402 		*maxrss = hiwater_rss;
3403 }
3404 
3405 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
pte_special(pte_t pte)3406 static inline int pte_special(pte_t pte)
3407 {
3408 	return 0;
3409 }
3410 
pte_mkspecial(pte_t pte)3411 static inline pte_t pte_mkspecial(pte_t pte)
3412 {
3413 	return pte;
3414 }
3415 #endif
3416 
3417 #ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
pmd_special(pmd_t pmd)3418 static inline bool pmd_special(pmd_t pmd)
3419 {
3420 	return false;
3421 }
3422 
pmd_mkspecial(pmd_t pmd)3423 static inline pmd_t pmd_mkspecial(pmd_t pmd)
3424 {
3425 	return pmd;
3426 }
3427 #endif	/* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
3428 
3429 #ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
pud_special(pud_t pud)3430 static inline bool pud_special(pud_t pud)
3431 {
3432 	return false;
3433 }
3434 
pud_mkspecial(pud_t pud)3435 static inline pud_t pud_mkspecial(pud_t pud)
3436 {
3437 	return pud;
3438 }
3439 #endif	/* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
3440 
3441 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
3442 			     spinlock_t **ptl);
3443 
3444 #ifdef __PAGETABLE_P4D_FOLDED
__p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)3445 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
3446 						unsigned long address)
3447 {
3448 	return 0;
3449 }
3450 #else
3451 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
3452 #endif
3453 
3454 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
__pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)3455 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
3456 						unsigned long address)
3457 {
3458 	return 0;
3459 }
mm_inc_nr_puds(struct mm_struct * mm)3460 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
mm_dec_nr_puds(struct mm_struct * mm)3461 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
3462 
3463 #else
3464 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
3465 
mm_inc_nr_puds(struct mm_struct * mm)3466 static inline void mm_inc_nr_puds(struct mm_struct *mm)
3467 {
3468 	if (mm_pud_folded(mm))
3469 		return;
3470 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
3471 }
3472 
mm_dec_nr_puds(struct mm_struct * mm)3473 static inline void mm_dec_nr_puds(struct mm_struct *mm)
3474 {
3475 	if (mm_pud_folded(mm))
3476 		return;
3477 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
3478 }
3479 #endif
3480 
3481 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
__pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)3482 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
3483 						unsigned long address)
3484 {
3485 	return 0;
3486 }
3487 
mm_inc_nr_pmds(struct mm_struct * mm)3488 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
mm_dec_nr_pmds(struct mm_struct * mm)3489 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
3490 
3491 #else
3492 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
3493 
mm_inc_nr_pmds(struct mm_struct * mm)3494 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
3495 {
3496 	if (mm_pmd_folded(mm))
3497 		return;
3498 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
3499 }
3500 
mm_dec_nr_pmds(struct mm_struct * mm)3501 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
3502 {
3503 	if (mm_pmd_folded(mm))
3504 		return;
3505 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
3506 }
3507 #endif
3508 
3509 #ifdef CONFIG_MMU
mm_pgtables_bytes_init(struct mm_struct * mm)3510 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
3511 {
3512 	atomic_long_set(&mm->pgtables_bytes, 0);
3513 }
3514 
mm_pgtables_bytes(const struct mm_struct * mm)3515 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
3516 {
3517 	return atomic_long_read(&mm->pgtables_bytes);
3518 }
3519 
mm_inc_nr_ptes(struct mm_struct * mm)3520 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
3521 {
3522 	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
3523 }
3524 
mm_dec_nr_ptes(struct mm_struct * mm)3525 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
3526 {
3527 	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
3528 }
3529 #else
3530 
mm_pgtables_bytes_init(struct mm_struct * mm)3531 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
mm_pgtables_bytes(const struct mm_struct * mm)3532 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
3533 {
3534 	return 0;
3535 }
3536 
mm_inc_nr_ptes(struct mm_struct * mm)3537 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
mm_dec_nr_ptes(struct mm_struct * mm)3538 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
3539 #endif
3540 
3541 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
3542 int __pte_alloc_kernel(pmd_t *pmd);
3543 
3544 #if defined(CONFIG_MMU)
3545 
p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)3546 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
3547 		unsigned long address)
3548 {
3549 	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
3550 		NULL : p4d_offset(pgd, address);
3551 }
3552 
pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)3553 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
3554 		unsigned long address)
3555 {
3556 	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
3557 		NULL : pud_offset(p4d, address);
3558 }
3559 
pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)3560 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3561 {
3562 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
3563 		NULL: pmd_offset(pud, address);
3564 }
3565 #endif /* CONFIG_MMU */
3566 
3567 enum pt_flags {
3568 	PT_kernel = PG_referenced,
3569 	PT_reserved = PG_reserved,
3570 	/* High bits are used for zone/node/section */
3571 };
3572 
virt_to_ptdesc(const void * x)3573 static inline struct ptdesc *virt_to_ptdesc(const void *x)
3574 {
3575 	return page_ptdesc(virt_to_page(x));
3576 }
3577 
3578 /**
3579  * ptdesc_address - Virtual address of page table.
3580  * @pt: Page table descriptor.
3581  *
3582  * Return: The first byte of the page table described by @pt.
3583  */
ptdesc_address(const struct ptdesc * pt)3584 static inline void *ptdesc_address(const struct ptdesc *pt)
3585 {
3586 	return folio_address(ptdesc_folio(pt));
3587 }
3588 
pagetable_is_reserved(struct ptdesc * pt)3589 static inline bool pagetable_is_reserved(struct ptdesc *pt)
3590 {
3591 	return test_bit(PT_reserved, &pt->pt_flags.f);
3592 }
3593 
3594 /**
3595  * ptdesc_set_kernel - Mark a ptdesc used to map the kernel
3596  * @ptdesc: The ptdesc to be marked
3597  *
3598  * Kernel page tables often need special handling. Set a flag so that
3599  * the handling code knows this ptdesc will not be used for userspace.
3600  */
ptdesc_set_kernel(struct ptdesc * ptdesc)3601 static inline void ptdesc_set_kernel(struct ptdesc *ptdesc)
3602 {
3603 	set_bit(PT_kernel, &ptdesc->pt_flags.f);
3604 }
3605 
3606 /**
3607  * ptdesc_clear_kernel - Mark a ptdesc as no longer used to map the kernel
3608  * @ptdesc: The ptdesc to be unmarked
3609  *
3610  * Use when the ptdesc is no longer used to map the kernel and no longer
3611  * needs special handling.
3612  */
ptdesc_clear_kernel(struct ptdesc * ptdesc)3613 static inline void ptdesc_clear_kernel(struct ptdesc *ptdesc)
3614 {
3615 	/*
3616 	 * Note: the 'PG_referenced' bit does not strictly need to be
3617 	 * cleared before freeing the page. But this is nice for
3618 	 * symmetry.
3619 	 */
3620 	clear_bit(PT_kernel, &ptdesc->pt_flags.f);
3621 }
3622 
3623 /**
3624  * ptdesc_test_kernel - Check if a ptdesc is used to map the kernel
3625  * @ptdesc: The ptdesc being tested
3626  *
3627  * Call to tell if the ptdesc used to map the kernel.
3628  */
ptdesc_test_kernel(const struct ptdesc * ptdesc)3629 static inline bool ptdesc_test_kernel(const struct ptdesc *ptdesc)
3630 {
3631 	return test_bit(PT_kernel, &ptdesc->pt_flags.f);
3632 }
3633 
3634 /**
3635  * pagetable_alloc - Allocate pagetables
3636  * @gfp:    GFP flags
3637  * @order:  desired pagetable order
3638  *
3639  * pagetable_alloc allocates memory for page tables as well as a page table
3640  * descriptor to describe that memory.
3641  *
3642  * Return: The ptdesc describing the allocated page tables.
3643  */
pagetable_alloc_noprof(gfp_t gfp,unsigned int order)3644 static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
3645 {
3646 	struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
3647 
3648 	return page_ptdesc(page);
3649 }
3650 #define pagetable_alloc(...)	alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
3651 
__pagetable_free(struct ptdesc * pt)3652 static inline void __pagetable_free(struct ptdesc *pt)
3653 {
3654 	struct page *page = ptdesc_page(pt);
3655 
3656 	__free_pages(page, compound_order(page));
3657 }
3658 
3659 #ifdef CONFIG_ASYNC_KERNEL_PGTABLE_FREE
3660 void pagetable_free_kernel(struct ptdesc *pt);
3661 #else
pagetable_free_kernel(struct ptdesc * pt)3662 static inline void pagetable_free_kernel(struct ptdesc *pt)
3663 {
3664 	__pagetable_free(pt);
3665 }
3666 #endif
3667 /**
3668  * pagetable_free - Free pagetables
3669  * @pt:	The page table descriptor
3670  *
3671  * pagetable_free frees the memory of all page tables described by a page
3672  * table descriptor and the memory for the descriptor itself.
3673  */
pagetable_free(struct ptdesc * pt)3674 static inline void pagetable_free(struct ptdesc *pt)
3675 {
3676 	if (ptdesc_test_kernel(pt)) {
3677 		ptdesc_clear_kernel(pt);
3678 		pagetable_free_kernel(pt);
3679 	} else {
3680 		__pagetable_free(pt);
3681 	}
3682 }
3683 
3684 #if defined(CONFIG_SPLIT_PTE_PTLOCKS)
3685 #if ALLOC_SPLIT_PTLOCKS
3686 void __init ptlock_cache_init(void);
3687 bool ptlock_alloc(struct ptdesc *ptdesc);
3688 void ptlock_free(struct ptdesc *ptdesc);
3689 
ptlock_ptr(struct ptdesc * ptdesc)3690 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3691 {
3692 	return ptdesc->ptl;
3693 }
3694 #else /* ALLOC_SPLIT_PTLOCKS */
ptlock_cache_init(void)3695 static inline void ptlock_cache_init(void)
3696 {
3697 }
3698 
ptlock_alloc(struct ptdesc * ptdesc)3699 static inline bool ptlock_alloc(struct ptdesc *ptdesc)
3700 {
3701 	return true;
3702 }
3703 
ptlock_free(struct ptdesc * ptdesc)3704 static inline void ptlock_free(struct ptdesc *ptdesc)
3705 {
3706 }
3707 
ptlock_ptr(struct ptdesc * ptdesc)3708 static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3709 {
3710 	return &ptdesc->ptl;
3711 }
3712 #endif /* ALLOC_SPLIT_PTLOCKS */
3713 
pte_lockptr(struct mm_struct * mm,pmd_t * pmd)3714 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3715 {
3716 	return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
3717 }
3718 
ptep_lockptr(struct mm_struct * mm,pte_t * pte)3719 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3720 {
3721 	BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
3722 	BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
3723 	return ptlock_ptr(virt_to_ptdesc(pte));
3724 }
3725 
ptlock_init(struct ptdesc * ptdesc)3726 static inline bool ptlock_init(struct ptdesc *ptdesc)
3727 {
3728 	/*
3729 	 * prep_new_page() initialize page->private (and therefore page->ptl)
3730 	 * with 0. Make sure nobody took it in use in between.
3731 	 *
3732 	 * It can happen if arch try to use slab for page table allocation:
3733 	 * slab code uses page->slab_cache, which share storage with page->ptl.
3734 	 */
3735 	VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
3736 	if (!ptlock_alloc(ptdesc))
3737 		return false;
3738 	spin_lock_init(ptlock_ptr(ptdesc));
3739 	return true;
3740 }
3741 
3742 #else	/* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3743 /*
3744  * We use mm->page_table_lock to guard all pagetable pages of the mm.
3745  */
pte_lockptr(struct mm_struct * mm,pmd_t * pmd)3746 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3747 {
3748 	return &mm->page_table_lock;
3749 }
ptep_lockptr(struct mm_struct * mm,pte_t * pte)3750 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3751 {
3752 	return &mm->page_table_lock;
3753 }
ptlock_cache_init(void)3754 static inline void ptlock_cache_init(void) {}
ptlock_init(struct ptdesc * ptdesc)3755 static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
ptlock_free(struct ptdesc * ptdesc)3756 static inline void ptlock_free(struct ptdesc *ptdesc) {}
3757 #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3758 
__pagetable_ctor(struct ptdesc * ptdesc)3759 static inline void __pagetable_ctor(struct ptdesc *ptdesc)
3760 {
3761 	struct folio *folio = ptdesc_folio(ptdesc);
3762 
3763 	__folio_set_pgtable(folio);
3764 	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3765 }
3766 
pagetable_dtor(struct ptdesc * ptdesc)3767 static inline void pagetable_dtor(struct ptdesc *ptdesc)
3768 {
3769 	struct folio *folio = ptdesc_folio(ptdesc);
3770 
3771 	ptlock_free(ptdesc);
3772 	__folio_clear_pgtable(folio);
3773 	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3774 }
3775 
pagetable_dtor_free(struct ptdesc * ptdesc)3776 static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
3777 {
3778 	pagetable_dtor(ptdesc);
3779 	pagetable_free(ptdesc);
3780 }
3781 
pagetable_pte_ctor(struct mm_struct * mm,struct ptdesc * ptdesc)3782 static inline bool pagetable_pte_ctor(struct mm_struct *mm,
3783 				      struct ptdesc *ptdesc)
3784 {
3785 	if (mm != &init_mm && !ptlock_init(ptdesc))
3786 		return false;
3787 	__pagetable_ctor(ptdesc);
3788 	return true;
3789 }
3790 
3791 pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
3792 
pte_offset_map(pmd_t * pmd,unsigned long addr)3793 static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
3794 {
3795 	return __pte_offset_map(pmd, addr, NULL);
3796 }
3797 
3798 pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3799 			   unsigned long addr, spinlock_t **ptlp);
3800 
3801 pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
3802 				unsigned long addr, spinlock_t **ptlp);
3803 pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
3804 				unsigned long addr, pmd_t *pmdvalp,
3805 				spinlock_t **ptlp);
3806 
3807 #define pte_unmap_unlock(pte, ptl)	do {		\
3808 	spin_unlock(ptl);				\
3809 	pte_unmap(pte);					\
3810 } while (0)
3811 
3812 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
3813 
3814 #define pte_alloc_map(mm, pmd, address)			\
3815 	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
3816 
3817 #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
3818 	(pte_alloc(mm, pmd) ?			\
3819 		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
3820 
3821 #define pte_alloc_kernel(pmd, address)			\
3822 	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
3823 		NULL: pte_offset_kernel(pmd, address))
3824 
3825 #if defined(CONFIG_SPLIT_PMD_PTLOCKS)
3826 
pmd_pgtable_page(pmd_t * pmd)3827 static inline struct page *pmd_pgtable_page(pmd_t *pmd)
3828 {
3829 	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
3830 	return virt_to_page((void *)((unsigned long) pmd & mask));
3831 }
3832 
pmd_ptdesc(pmd_t * pmd)3833 static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
3834 {
3835 	return page_ptdesc(pmd_pgtable_page(pmd));
3836 }
3837 
pmd_lockptr(struct mm_struct * mm,pmd_t * pmd)3838 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3839 {
3840 	return ptlock_ptr(pmd_ptdesc(pmd));
3841 }
3842 
pmd_ptlock_init(struct ptdesc * ptdesc)3843 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
3844 {
3845 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3846 	ptdesc->pmd_huge_pte = NULL;
3847 #endif
3848 	return ptlock_init(ptdesc);
3849 }
3850 
3851 #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3852 
3853 #else
3854 
pmd_lockptr(struct mm_struct * mm,pmd_t * pmd)3855 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3856 {
3857 	return &mm->page_table_lock;
3858 }
3859 
pmd_ptlock_init(struct ptdesc * ptdesc)3860 static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
3861 
3862 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3863 
3864 #endif
3865 
pmd_lock(struct mm_struct * mm,pmd_t * pmd)3866 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3867 {
3868 	spinlock_t *ptl = pmd_lockptr(mm, pmd);
3869 	spin_lock(ptl);
3870 	return ptl;
3871 }
3872 
pagetable_pmd_ctor(struct mm_struct * mm,struct ptdesc * ptdesc)3873 static inline bool pagetable_pmd_ctor(struct mm_struct *mm,
3874 				      struct ptdesc *ptdesc)
3875 {
3876 	if (mm != &init_mm && !pmd_ptlock_init(ptdesc))
3877 		return false;
3878 	ptdesc_pmd_pts_init(ptdesc);
3879 	__pagetable_ctor(ptdesc);
3880 	return true;
3881 }
3882 
3883 /*
3884  * No scalability reason to split PUD locks yet, but follow the same pattern
3885  * as the PMD locks to make it easier if we decide to.  The VM should not be
3886  * considered ready to switch to split PUD locks yet; there may be places
3887  * which need to be converted from page_table_lock.
3888  */
pud_lockptr(struct mm_struct * mm,pud_t * pud)3889 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3890 {
3891 	return &mm->page_table_lock;
3892 }
3893 
pud_lock(struct mm_struct * mm,pud_t * pud)3894 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3895 {
3896 	spinlock_t *ptl = pud_lockptr(mm, pud);
3897 
3898 	spin_lock(ptl);
3899 	return ptl;
3900 }
3901 
pagetable_pud_ctor(struct ptdesc * ptdesc)3902 static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
3903 {
3904 	__pagetable_ctor(ptdesc);
3905 }
3906 
pagetable_p4d_ctor(struct ptdesc * ptdesc)3907 static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc)
3908 {
3909 	__pagetable_ctor(ptdesc);
3910 }
3911 
pagetable_pgd_ctor(struct ptdesc * ptdesc)3912 static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc)
3913 {
3914 	__pagetable_ctor(ptdesc);
3915 }
3916 
3917 extern void __init pagecache_init(void);
3918 extern void free_initmem(void);
3919 
3920 /*
3921  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
3922  * into the buddy system. The freed pages will be poisoned with pattern
3923  * "poison" if it's within range [0, UCHAR_MAX].
3924  * Return pages freed into the buddy system.
3925  */
3926 extern unsigned long free_reserved_area(void *start, void *end,
3927 					int poison, const char *s);
3928 
3929 extern void adjust_managed_page_count(struct page *page, long count);
3930 
3931 extern void reserve_bootmem_region(phys_addr_t start,
3932 				   phys_addr_t end, int nid);
3933 
3934 /* Free the reserved page into the buddy system, so it gets managed. */
3935 void free_reserved_page(struct page *page);
3936 
mark_page_reserved(struct page * page)3937 static inline void mark_page_reserved(struct page *page)
3938 {
3939 	SetPageReserved(page);
3940 	adjust_managed_page_count(page, -1);
3941 }
3942 
free_reserved_ptdesc(struct ptdesc * pt)3943 static inline void free_reserved_ptdesc(struct ptdesc *pt)
3944 {
3945 	free_reserved_page(ptdesc_page(pt));
3946 }
3947 
3948 /*
3949  * Default method to free all the __init memory into the buddy system.
3950  * The freed pages will be poisoned with pattern "poison" if it's within
3951  * range [0, UCHAR_MAX].
3952  * Return pages freed into the buddy system.
3953  */
free_initmem_default(int poison)3954 static inline unsigned long free_initmem_default(int poison)
3955 {
3956 	extern char __init_begin[], __init_end[];
3957 
3958 	return free_reserved_area(&__init_begin, &__init_end,
3959 				  poison, "unused kernel image (initmem)");
3960 }
3961 
get_num_physpages(void)3962 static inline unsigned long get_num_physpages(void)
3963 {
3964 	int nid;
3965 	unsigned long phys_pages = 0;
3966 
3967 	for_each_online_node(nid)
3968 		phys_pages += node_present_pages(nid);
3969 
3970 	return phys_pages;
3971 }
3972 
3973 /*
3974  * FIXME: Using memblock node mappings, an architecture may initialise its
3975  * zones, allocate the backing mem_map and account for memory holes in an
3976  * architecture independent manner.
3977  *
3978  * An architecture is expected to register range of page frames backed by
3979  * physical memory with memblock_add[_node]() before calling
3980  * free_area_init() passing in the PFN each zone ends at. At a basic
3981  * usage, an architecture is expected to do something like
3982  *
3983  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
3984  * 							 max_highmem_pfn};
3985  * for_each_valid_physical_page_range()
3986  *	memblock_add_node(base, size, nid, MEMBLOCK_NONE)
3987  * free_area_init(max_zone_pfns);
3988  */
3989 void arch_zone_limits_init(unsigned long *max_zone_pfn);
3990 unsigned long node_map_pfn_alignment(void);
3991 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
3992 						unsigned long end_pfn);
3993 extern void get_pfn_range_for_nid(unsigned int nid,
3994 			unsigned long *start_pfn, unsigned long *end_pfn);
3995 
3996 #ifndef CONFIG_NUMA
early_pfn_to_nid(unsigned long pfn)3997 static inline int early_pfn_to_nid(unsigned long pfn)
3998 {
3999 	return 0;
4000 }
4001 #else
4002 /* please see mm/page_alloc.c */
4003 extern int __meminit early_pfn_to_nid(unsigned long pfn);
4004 #endif
4005 
4006 extern void mem_init(void);
4007 extern void __init mmap_init(void);
4008 
4009 extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
show_mem(void)4010 static inline void show_mem(void)
4011 {
4012 	__show_mem(0, NULL, MAX_NR_ZONES - 1);
4013 }
4014 extern long si_mem_available(void);
4015 extern void si_meminfo(struct sysinfo * val);
4016 extern void si_meminfo_node(struct sysinfo *val, int nid);
4017 
4018 extern __printf(3, 4)
4019 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
4020 
4021 extern void setup_per_cpu_pageset(void);
4022 
4023 /* nommu.c */
4024 extern atomic_long_t mmap_pages_allocated;
4025 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
4026 
4027 /* interval_tree.c */
4028 void vma_interval_tree_insert(struct vm_area_struct *node,
4029 			      struct rb_root_cached *root);
4030 void vma_interval_tree_insert_after(struct vm_area_struct *node,
4031 				    struct vm_area_struct *prev,
4032 				    struct rb_root_cached *root);
4033 void vma_interval_tree_remove(struct vm_area_struct *node,
4034 			      struct rb_root_cached *root);
4035 struct vm_area_struct *vma_interval_tree_subtree_search(struct vm_area_struct *node,
4036 				unsigned long start, unsigned long last);
4037 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
4038 				unsigned long start, unsigned long last);
4039 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
4040 				unsigned long start, unsigned long last);
4041 
4042 #define vma_interval_tree_foreach(vma, root, start, last)		\
4043 	for (vma = vma_interval_tree_iter_first(root, start, last);	\
4044 	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
4045 
4046 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
4047 				   struct rb_root_cached *root);
4048 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
4049 				   struct rb_root_cached *root);
4050 struct anon_vma_chain *
4051 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
4052 				  unsigned long start, unsigned long last);
4053 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
4054 	struct anon_vma_chain *node, unsigned long start, unsigned long last);
4055 #ifdef CONFIG_DEBUG_VM_RB
4056 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
4057 #endif
4058 
4059 #define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
4060 	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
4061 	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
4062 
4063 /* mmap.c */
4064 extern int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin);
4065 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
4066 extern void exit_mmap(struct mm_struct *);
4067 bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
4068 				 unsigned long addr, bool write);
4069 
check_data_rlimit(unsigned long rlim,unsigned long new,unsigned long start,unsigned long end_data,unsigned long start_data)4070 static inline int check_data_rlimit(unsigned long rlim,
4071 				    unsigned long new,
4072 				    unsigned long start,
4073 				    unsigned long end_data,
4074 				    unsigned long start_data)
4075 {
4076 	if (rlim < RLIM_INFINITY) {
4077 		if (((new - start) + (end_data - start_data)) > rlim)
4078 			return -ENOSPC;
4079 	}
4080 
4081 	return 0;
4082 }
4083 
4084 extern int mm_take_all_locks(struct mm_struct *mm);
4085 extern void mm_drop_all_locks(struct mm_struct *mm);
4086 
4087 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
4088 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
4089 extern struct file *get_mm_exe_file(struct mm_struct *mm);
4090 extern struct file *get_task_exe_file(struct task_struct *task);
4091 
4092 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
4093 
4094 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
4095 				   const struct vm_special_mapping *sm);
4096 struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
4097 				   unsigned long addr, unsigned long len,
4098 				   vm_flags_t vm_flags,
4099 				   const struct vm_special_mapping *spec);
4100 
4101 unsigned long randomize_stack_top(unsigned long stack_top);
4102 unsigned long randomize_page(unsigned long start, unsigned long range);
4103 
4104 unsigned long
4105 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
4106 		    unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
4107 
4108 static inline unsigned long
get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)4109 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
4110 		  unsigned long pgoff, unsigned long flags)
4111 {
4112 	return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
4113 }
4114 
4115 extern unsigned long do_mmap(struct file *file, unsigned long addr,
4116 	unsigned long len, unsigned long prot, unsigned long flags,
4117 	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
4118 	struct list_head *uf);
4119 extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
4120 			 unsigned long start, size_t len, struct list_head *uf,
4121 			 bool unlock);
4122 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
4123 		    struct mm_struct *mm, unsigned long start,
4124 		    unsigned long end, struct list_head *uf, bool unlock);
4125 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
4126 		     struct list_head *uf);
4127 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
4128 
4129 #ifdef CONFIG_MMU
4130 extern int __mm_populate(unsigned long addr, unsigned long len,
4131 			 int ignore_errors);
mm_populate(unsigned long addr,unsigned long len)4132 static inline void mm_populate(unsigned long addr, unsigned long len)
4133 {
4134 	/* Ignore errors */
4135 	(void) __mm_populate(addr, len, 1);
4136 }
4137 #else
mm_populate(unsigned long addr,unsigned long len)4138 static inline void mm_populate(unsigned long addr, unsigned long len) {}
4139 #endif
4140 
4141 /* This takes the mm semaphore itself */
4142 int __must_check vm_brk_flags(unsigned long addr, unsigned long request, bool is_exec);
4143 int vm_munmap(unsigned long start, size_t len);
4144 unsigned long __must_check vm_mmap(struct file *file, unsigned long addr,
4145 		unsigned long len, unsigned long prot,
4146 		unsigned long flag, unsigned long offset);
4147 unsigned long __must_check vm_mmap_shadow_stack(unsigned long addr,
4148 		unsigned long len, unsigned long flags);
4149 
4150 struct vm_unmapped_area_info {
4151 #define VM_UNMAPPED_AREA_TOPDOWN 1
4152 	unsigned long flags;
4153 	unsigned long length;
4154 	unsigned long low_limit;
4155 	unsigned long high_limit;
4156 	unsigned long align_mask;
4157 	unsigned long align_offset;
4158 	unsigned long start_gap;
4159 };
4160 
4161 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
4162 
4163 /* truncate.c */
4164 void truncate_inode_pages(struct address_space *mapping, loff_t lstart);
4165 void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart,
4166 		uoff_t lend);
4167 void truncate_inode_pages_final(struct address_space *mapping);
4168 
4169 /* generic vm_area_ops exported for stackable file systems */
4170 extern vm_fault_t filemap_fault(struct vm_fault *vmf);
4171 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
4172 		pgoff_t start_pgoff, pgoff_t end_pgoff);
4173 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
4174 
4175 extern unsigned long stack_guard_gap;
4176 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
4177 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
4178 struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
4179 
4180 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
4181 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
4182 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
4183 					     struct vm_area_struct **pprev);
4184 
4185 /*
4186  * Look up the first VMA which intersects the interval [start_addr, end_addr)
4187  * NULL if none.  Assume start_addr < end_addr.
4188  */
4189 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
4190 			unsigned long start_addr, unsigned long end_addr);
4191 
4192 /**
4193  * vma_lookup() - Find a VMA at a specific address
4194  * @mm: The process address space.
4195  * @addr: The user address.
4196  *
4197  * Return: The vm_area_struct at the given address, %NULL otherwise.
4198  */
4199 static inline
vma_lookup(struct mm_struct * mm,unsigned long addr)4200 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
4201 {
4202 	return mtree_load(&mm->mm_mt, addr);
4203 }
4204 
stack_guard_start_gap(const struct vm_area_struct * vma)4205 static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
4206 {
4207 	if (vma->vm_flags & VM_GROWSDOWN)
4208 		return stack_guard_gap;
4209 
4210 	/* See reasoning around the VM_SHADOW_STACK definition */
4211 	if (vma->vm_flags & VM_SHADOW_STACK)
4212 		return PAGE_SIZE;
4213 
4214 	return 0;
4215 }
4216 
vm_start_gap(const struct vm_area_struct * vma)4217 static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
4218 {
4219 	unsigned long gap = stack_guard_start_gap(vma);
4220 	unsigned long vm_start = vma->vm_start;
4221 
4222 	vm_start -= gap;
4223 	if (vm_start > vma->vm_start)
4224 		vm_start = 0;
4225 	return vm_start;
4226 }
4227 
vm_end_gap(const struct vm_area_struct * vma)4228 static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
4229 {
4230 	unsigned long vm_end = vma->vm_end;
4231 
4232 	if (vma->vm_flags & VM_GROWSUP) {
4233 		vm_end += stack_guard_gap;
4234 		if (vm_end < vma->vm_end)
4235 			vm_end = -PAGE_SIZE;
4236 	}
4237 	return vm_end;
4238 }
4239 
vma_pages(const struct vm_area_struct * vma)4240 static inline unsigned long vma_pages(const struct vm_area_struct *vma)
4241 {
4242 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
4243 }
4244 
vma_last_pgoff(struct vm_area_struct * vma)4245 static inline unsigned long vma_last_pgoff(struct vm_area_struct *vma)
4246 {
4247 	return vma->vm_pgoff + vma_pages(vma) - 1;
4248 }
4249 
vma_desc_size(const struct vm_area_desc * desc)4250 static inline unsigned long vma_desc_size(const struct vm_area_desc *desc)
4251 {
4252 	return desc->end - desc->start;
4253 }
4254 
vma_desc_pages(const struct vm_area_desc * desc)4255 static inline unsigned long vma_desc_pages(const struct vm_area_desc *desc)
4256 {
4257 	return vma_desc_size(desc) >> PAGE_SHIFT;
4258 }
4259 
4260 /**
4261  * mmap_action_remap - helper for mmap_prepare hook to specify that a pure PFN
4262  * remap is required.
4263  * @desc: The VMA descriptor for the VMA requiring remap.
4264  * @start: The virtual address to start the remap from, must be within the VMA.
4265  * @start_pfn: The first PFN in the range to remap.
4266  * @size: The size of the range to remap, in bytes, at most spanning to the end
4267  * of the VMA.
4268  */
mmap_action_remap(struct vm_area_desc * desc,unsigned long start,unsigned long start_pfn,unsigned long size)4269 static inline void mmap_action_remap(struct vm_area_desc *desc,
4270 				     unsigned long start,
4271 				     unsigned long start_pfn,
4272 				     unsigned long size)
4273 {
4274 	struct mmap_action *action = &desc->action;
4275 
4276 	/* [start, start + size) must be within the VMA. */
4277 	WARN_ON_ONCE(start < desc->start || start >= desc->end);
4278 	WARN_ON_ONCE(start + size > desc->end);
4279 
4280 	action->type = MMAP_REMAP_PFN;
4281 	action->remap.start = start;
4282 	action->remap.start_pfn = start_pfn;
4283 	action->remap.size = size;
4284 	action->remap.pgprot = desc->page_prot;
4285 }
4286 
4287 /**
4288  * mmap_action_remap_full - helper for mmap_prepare hook to specify that the
4289  * entirety of a VMA should be PFN remapped.
4290  * @desc: The VMA descriptor for the VMA requiring remap.
4291  * @start_pfn: The first PFN in the range to remap.
4292  */
mmap_action_remap_full(struct vm_area_desc * desc,unsigned long start_pfn)4293 static inline void mmap_action_remap_full(struct vm_area_desc *desc,
4294 					  unsigned long start_pfn)
4295 {
4296 	mmap_action_remap(desc, desc->start, start_pfn, vma_desc_size(desc));
4297 }
4298 
4299 /**
4300  * mmap_action_ioremap - helper for mmap_prepare hook to specify that a pure PFN
4301  * I/O remap is required.
4302  * @desc: The VMA descriptor for the VMA requiring remap.
4303  * @start: The virtual address to start the remap from, must be within the VMA.
4304  * @start_pfn: The first PFN in the range to remap.
4305  * @size: The size of the range to remap, in bytes, at most spanning to the end
4306  * of the VMA.
4307  */
mmap_action_ioremap(struct vm_area_desc * desc,unsigned long start,unsigned long start_pfn,unsigned long size)4308 static inline void mmap_action_ioremap(struct vm_area_desc *desc,
4309 				       unsigned long start,
4310 				       unsigned long start_pfn,
4311 				       unsigned long size)
4312 {
4313 	mmap_action_remap(desc, start, start_pfn, size);
4314 	desc->action.type = MMAP_IO_REMAP_PFN;
4315 }
4316 
4317 /**
4318  * mmap_action_ioremap_full - helper for mmap_prepare hook to specify that the
4319  * entirety of a VMA should be PFN I/O remapped.
4320  * @desc: The VMA descriptor for the VMA requiring remap.
4321  * @start_pfn: The first PFN in the range to remap.
4322  */
mmap_action_ioremap_full(struct vm_area_desc * desc,unsigned long start_pfn)4323 static inline void mmap_action_ioremap_full(struct vm_area_desc *desc,
4324 					    unsigned long start_pfn)
4325 {
4326 	mmap_action_ioremap(desc, desc->start, start_pfn, vma_desc_size(desc));
4327 }
4328 
4329 /**
4330  * mmap_action_simple_ioremap - helper for mmap_prepare hook to specify that the
4331  * physical range in [start_phys_addr, start_phys_addr + size) should be I/O
4332  * remapped.
4333  * @desc: The VMA descriptor for the VMA requiring remap.
4334  * @start_phys_addr: Start of the physical memory to be mapped.
4335  * @size: Size of the area to map.
4336  *
4337  * NOTE: Some drivers might want to tweak desc->page_prot for purposes of
4338  * write-combine or similar.
4339  */
mmap_action_simple_ioremap(struct vm_area_desc * desc,phys_addr_t start_phys_addr,unsigned long size)4340 static inline void mmap_action_simple_ioremap(struct vm_area_desc *desc,
4341 					      phys_addr_t start_phys_addr,
4342 					      unsigned long size)
4343 {
4344 	struct mmap_action *action = &desc->action;
4345 
4346 	action->simple_ioremap.start_phys_addr = start_phys_addr;
4347 	action->simple_ioremap.size = size;
4348 	action->type = MMAP_SIMPLE_IO_REMAP;
4349 }
4350 
4351 /**
4352  * mmap_action_map_kernel_pages - helper for mmap_prepare hook to specify that
4353  * @num kernel pages contained in the @pages array should be mapped to userland
4354  * starting at virtual address @start.
4355  * @desc: The VMA descriptor for the VMA requiring kernel pags to be mapped.
4356  * @start: The virtual address from which to map them.
4357  * @pages: An array of struct page pointers describing the memory to map.
4358  * @nr_pages: The number of entries in the @pages aray.
4359  */
mmap_action_map_kernel_pages(struct vm_area_desc * desc,unsigned long start,struct page ** pages,unsigned long nr_pages)4360 static inline void mmap_action_map_kernel_pages(struct vm_area_desc *desc,
4361 		unsigned long start, struct page **pages,
4362 		unsigned long nr_pages)
4363 {
4364 	struct mmap_action *action = &desc->action;
4365 
4366 	action->type = MMAP_MAP_KERNEL_PAGES;
4367 	action->map_kernel.start = start;
4368 	action->map_kernel.pages = pages;
4369 	action->map_kernel.nr_pages = nr_pages;
4370 	action->map_kernel.pgoff = desc->pgoff;
4371 }
4372 
4373 /**
4374  * mmap_action_map_kernel_pages_full - helper for mmap_prepare hook to specify that
4375  * kernel pages contained in the @pages array should be mapped to userland
4376  * from @desc->start to @desc->end.
4377  * @desc: The VMA descriptor for the VMA requiring kernel pags to be mapped.
4378  * @pages: An array of struct page pointers describing the memory to map.
4379  *
4380  * The caller must ensure that @pages contains sufficient entries to cover the
4381  * entire range described by @desc.
4382  */
mmap_action_map_kernel_pages_full(struct vm_area_desc * desc,struct page ** pages)4383 static inline void mmap_action_map_kernel_pages_full(struct vm_area_desc *desc,
4384 		struct page **pages)
4385 {
4386 	mmap_action_map_kernel_pages(desc, desc->start, pages,
4387 				     vma_desc_pages(desc));
4388 }
4389 
4390 int mmap_action_prepare(struct vm_area_desc *desc);
4391 int mmap_action_complete(struct vm_area_struct *vma,
4392 			 struct mmap_action *action);
4393 
4394 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
find_exact_vma(struct mm_struct * mm,unsigned long vm_start,unsigned long vm_end)4395 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
4396 				unsigned long vm_start, unsigned long vm_end)
4397 {
4398 	struct vm_area_struct *vma = vma_lookup(mm, vm_start);
4399 
4400 	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
4401 		vma = NULL;
4402 
4403 	return vma;
4404 }
4405 
4406 /**
4407  * range_is_subset - Is the specified inner range a subset of the outer range?
4408  * @outer_start: The start of the outer range.
4409  * @outer_end: The exclusive end of the outer range.
4410  * @inner_start: The start of the inner range.
4411  * @inner_end: The exclusive end of the inner range.
4412  *
4413  * Returns: %true if [inner_start, inner_end) is a subset of [outer_start,
4414  * outer_end), otherwise %false.
4415  */
range_is_subset(unsigned long outer_start,unsigned long outer_end,unsigned long inner_start,unsigned long inner_end)4416 static inline bool range_is_subset(unsigned long outer_start,
4417 				   unsigned long outer_end,
4418 				   unsigned long inner_start,
4419 				   unsigned long inner_end)
4420 {
4421 	return outer_start <= inner_start && inner_end <= outer_end;
4422 }
4423 
4424 /**
4425  * range_in_vma - is the specified [@start, @end) range a subset of the VMA?
4426  * @vma: The VMA against which we want to check [@start, @end).
4427  * @start: The start of the range we wish to check.
4428  * @end: The exclusive end of the range we wish to check.
4429  *
4430  * Returns: %true if [@start, @end) is a subset of [@vma->vm_start,
4431  * @vma->vm_end), %false otherwise.
4432  */
range_in_vma(const struct vm_area_struct * vma,unsigned long start,unsigned long end)4433 static inline bool range_in_vma(const struct vm_area_struct *vma,
4434 				unsigned long start, unsigned long end)
4435 {
4436 	if (!vma)
4437 		return false;
4438 
4439 	return range_is_subset(vma->vm_start, vma->vm_end, start, end);
4440 }
4441 
4442 /**
4443  * range_in_vma_desc - is the specified [@start, @end) range a subset of the VMA
4444  * described by @desc, a VMA descriptor?
4445  * @desc: The VMA descriptor against which we want to check [@start, @end).
4446  * @start: The start of the range we wish to check.
4447  * @end: The exclusive end of the range we wish to check.
4448  *
4449  * Returns: %true if [@start, @end) is a subset of [@desc->start, @desc->end),
4450  * %false otherwise.
4451  */
range_in_vma_desc(const struct vm_area_desc * desc,unsigned long start,unsigned long end)4452 static inline bool range_in_vma_desc(const struct vm_area_desc *desc,
4453 				     unsigned long start, unsigned long end)
4454 {
4455 	if (!desc)
4456 		return false;
4457 
4458 	return range_is_subset(desc->start, desc->end, start, end);
4459 }
4460 
4461 #ifdef CONFIG_MMU
4462 pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
4463 
vma_get_page_prot(vma_flags_t vma_flags)4464 static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
4465 {
4466 	const vm_flags_t vm_flags = vma_flags_to_legacy(vma_flags);
4467 
4468 	return vm_get_page_prot(vm_flags);
4469 }
4470 
4471 void vma_set_page_prot(struct vm_area_struct *vma);
4472 #else
vm_get_page_prot(vm_flags_t vm_flags)4473 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
4474 {
4475 	return __pgprot(0);
4476 }
vma_get_page_prot(vma_flags_t vma_flags)4477 static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
4478 {
4479 	return __pgprot(0);
4480 }
vma_set_page_prot(struct vm_area_struct * vma)4481 static inline void vma_set_page_prot(struct vm_area_struct *vma)
4482 {
4483 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4484 }
4485 #endif
4486 
4487 void vma_set_file(struct vm_area_struct *vma, struct file *file);
4488 
4489 #ifdef CONFIG_NUMA_BALANCING
4490 unsigned long change_prot_numa(struct vm_area_struct *vma,
4491 			unsigned long start, unsigned long end);
4492 #endif
4493 
4494 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
4495 		unsigned long addr);
4496 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
4497 		    unsigned long pfn, unsigned long size, pgprot_t pgprot);
4498 
4499 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
4500 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
4501 			struct page **pages, unsigned long *num);
4502 int map_kernel_pages_prepare(struct vm_area_desc *desc);
4503 int map_kernel_pages_complete(struct vm_area_struct *vma,
4504 			      struct mmap_action *action);
4505 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
4506 				unsigned long num);
4507 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
4508 				unsigned long num);
4509 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
4510 			bool write);
4511 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
4512 			unsigned long pfn);
4513 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
4514 			unsigned long pfn, pgprot_t pgprot);
4515 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
4516 			unsigned long pfn);
4517 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
4518 		unsigned long addr, unsigned long pfn);
4519 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
4520 
vmf_insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page)4521 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
4522 				unsigned long addr, struct page *page)
4523 {
4524 	int err = vm_insert_page(vma, addr, page);
4525 
4526 	if (err == -ENOMEM)
4527 		return VM_FAULT_OOM;
4528 	if (err < 0 && err != -EBUSY)
4529 		return VM_FAULT_SIGBUS;
4530 
4531 	return VM_FAULT_NOPAGE;
4532 }
4533 
4534 #ifndef io_remap_pfn_range_pfn
io_remap_pfn_range_pfn(unsigned long pfn,unsigned long size)4535 static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
4536 		unsigned long size)
4537 {
4538 	return pfn;
4539 }
4540 #endif
4541 
io_remap_pfn_range(struct vm_area_struct * vma,unsigned long addr,unsigned long orig_pfn,unsigned long size,pgprot_t orig_prot)4542 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
4543 				     unsigned long addr, unsigned long orig_pfn,
4544 				     unsigned long size, pgprot_t orig_prot)
4545 {
4546 	const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
4547 	const pgprot_t prot = pgprot_decrypted(orig_prot);
4548 
4549 	return remap_pfn_range(vma, addr, pfn, size, prot);
4550 }
4551 
vmf_error(int err)4552 static inline vm_fault_t vmf_error(int err)
4553 {
4554 	if (err == -ENOMEM)
4555 		return VM_FAULT_OOM;
4556 	else if (err == -EHWPOISON)
4557 		return VM_FAULT_HWPOISON;
4558 	return VM_FAULT_SIGBUS;
4559 }
4560 
4561 /*
4562  * Convert errno to return value for ->page_mkwrite() calls.
4563  *
4564  * This should eventually be merged with vmf_error() above, but will need a
4565  * careful audit of all vmf_error() callers.
4566  */
vmf_fs_error(int err)4567 static inline vm_fault_t vmf_fs_error(int err)
4568 {
4569 	if (err == 0)
4570 		return VM_FAULT_LOCKED;
4571 	if (err == -EFAULT || err == -EAGAIN)
4572 		return VM_FAULT_NOPAGE;
4573 	if (err == -ENOMEM)
4574 		return VM_FAULT_OOM;
4575 	/* -ENOSPC, -EDQUOT, -EIO ... */
4576 	return VM_FAULT_SIGBUS;
4577 }
4578 
vm_fault_to_errno(vm_fault_t vm_fault,int foll_flags)4579 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
4580 {
4581 	if (vm_fault & VM_FAULT_OOM)
4582 		return -ENOMEM;
4583 	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
4584 		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
4585 	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
4586 		return -EFAULT;
4587 	return 0;
4588 }
4589 
4590 /*
4591  * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
4592  * a (NUMA hinting) fault is required.
4593  */
gup_can_follow_protnone(const struct vm_area_struct * vma,unsigned int flags)4594 static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
4595 					   unsigned int flags)
4596 {
4597 	/*
4598 	 * If callers don't want to honor NUMA hinting faults, no need to
4599 	 * determine if we would actually have to trigger a NUMA hinting fault.
4600 	 */
4601 	if (!(flags & FOLL_HONOR_NUMA_FAULT))
4602 		return true;
4603 
4604 	/*
4605 	 * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
4606 	 *
4607 	 * Requiring a fault here even for inaccessible VMAs would mean that
4608 	 * FOLL_FORCE cannot make any progress, because handle_mm_fault()
4609 	 * refuses to process NUMA hinting faults in inaccessible VMAs.
4610 	 */
4611 	return !vma_is_accessible(vma);
4612 }
4613 
4614 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
4615 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
4616 			       unsigned long size, pte_fn_t fn, void *data);
4617 extern int apply_to_existing_page_range(struct mm_struct *mm,
4618 				   unsigned long address, unsigned long size,
4619 				   pte_fn_t fn, void *data);
4620 
4621 #ifdef CONFIG_PAGE_POISONING
4622 extern void __kernel_poison_pages(struct page *page, int numpages);
4623 extern void __kernel_unpoison_pages(struct page *page, int numpages);
4624 extern bool _page_poisoning_enabled_early;
4625 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
page_poisoning_enabled(void)4626 static inline bool page_poisoning_enabled(void)
4627 {
4628 	return _page_poisoning_enabled_early;
4629 }
4630 /*
4631  * For use in fast paths after init_mem_debugging() has run, or when a
4632  * false negative result is not harmful when called too early.
4633  */
page_poisoning_enabled_static(void)4634 static inline bool page_poisoning_enabled_static(void)
4635 {
4636 	return static_branch_unlikely(&_page_poisoning_enabled);
4637 }
kernel_poison_pages(struct page * page,int numpages)4638 static inline void kernel_poison_pages(struct page *page, int numpages)
4639 {
4640 	if (page_poisoning_enabled_static())
4641 		__kernel_poison_pages(page, numpages);
4642 }
kernel_unpoison_pages(struct page * page,int numpages)4643 static inline void kernel_unpoison_pages(struct page *page, int numpages)
4644 {
4645 	if (page_poisoning_enabled_static())
4646 		__kernel_unpoison_pages(page, numpages);
4647 }
4648 #else
page_poisoning_enabled(void)4649 static inline bool page_poisoning_enabled(void) { return false; }
page_poisoning_enabled_static(void)4650 static inline bool page_poisoning_enabled_static(void) { return false; }
__kernel_poison_pages(struct page * page,int nunmpages)4651 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
kernel_poison_pages(struct page * page,int numpages)4652 static inline void kernel_poison_pages(struct page *page, int numpages) { }
kernel_unpoison_pages(struct page * page,int numpages)4653 static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
4654 #endif
4655 
4656 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
want_init_on_alloc(gfp_t flags)4657 static inline bool want_init_on_alloc(gfp_t flags)
4658 {
4659 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
4660 				&init_on_alloc))
4661 		return true;
4662 	return flags & __GFP_ZERO;
4663 }
4664 
4665 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
want_init_on_free(void)4666 static inline bool want_init_on_free(void)
4667 {
4668 	return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
4669 				   &init_on_free);
4670 }
4671 
4672 extern bool _debug_pagealloc_enabled_early;
4673 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
4674 
debug_pagealloc_enabled(void)4675 static inline bool debug_pagealloc_enabled(void)
4676 {
4677 	return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
4678 		_debug_pagealloc_enabled_early;
4679 }
4680 
4681 /*
4682  * For use in fast paths after mem_debugging_and_hardening_init() has run,
4683  * or when a false negative result is not harmful when called too early.
4684  */
debug_pagealloc_enabled_static(void)4685 static inline bool debug_pagealloc_enabled_static(void)
4686 {
4687 	if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
4688 		return false;
4689 
4690 	return static_branch_unlikely(&_debug_pagealloc_enabled);
4691 }
4692 
4693 /*
4694  * To support DEBUG_PAGEALLOC architecture must ensure that
4695  * __kernel_map_pages() never fails
4696  */
4697 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
4698 #ifdef CONFIG_DEBUG_PAGEALLOC
debug_pagealloc_map_pages(struct page * page,int numpages)4699 static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
4700 {
4701 	iommu_debug_check_unmapped(page, numpages);
4702 
4703 	if (debug_pagealloc_enabled_static())
4704 		__kernel_map_pages(page, numpages, 1);
4705 }
4706 
debug_pagealloc_unmap_pages(struct page * page,int numpages)4707 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
4708 {
4709 	iommu_debug_check_unmapped(page, numpages);
4710 
4711 	if (debug_pagealloc_enabled_static())
4712 		__kernel_map_pages(page, numpages, 0);
4713 }
4714 
4715 extern unsigned int _debug_guardpage_minorder;
4716 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
4717 
debug_guardpage_minorder(void)4718 static inline unsigned int debug_guardpage_minorder(void)
4719 {
4720 	return _debug_guardpage_minorder;
4721 }
4722 
debug_guardpage_enabled(void)4723 static inline bool debug_guardpage_enabled(void)
4724 {
4725 	return static_branch_unlikely(&_debug_guardpage_enabled);
4726 }
4727 
page_is_guard(const struct page * page)4728 static inline bool page_is_guard(const struct page *page)
4729 {
4730 	if (!debug_guardpage_enabled())
4731 		return false;
4732 
4733 	return PageGuard(page);
4734 }
4735 
4736 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
set_page_guard(struct zone * zone,struct page * page,unsigned int order)4737 static inline bool set_page_guard(struct zone *zone, struct page *page,
4738 				  unsigned int order)
4739 {
4740 	if (!debug_guardpage_enabled())
4741 		return false;
4742 	return __set_page_guard(zone, page, order);
4743 }
4744 
4745 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
clear_page_guard(struct zone * zone,struct page * page,unsigned int order)4746 static inline void clear_page_guard(struct zone *zone, struct page *page,
4747 				    unsigned int order)
4748 {
4749 	if (!debug_guardpage_enabled())
4750 		return;
4751 	__clear_page_guard(zone, page, order);
4752 }
4753 
4754 #else	/* CONFIG_DEBUG_PAGEALLOC */
debug_pagealloc_map_pages(struct page * page,int numpages)4755 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
debug_pagealloc_unmap_pages(struct page * page,int numpages)4756 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
debug_guardpage_minorder(void)4757 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
debug_guardpage_enabled(void)4758 static inline bool debug_guardpage_enabled(void) { return false; }
page_is_guard(const struct page * page)4759 static inline bool page_is_guard(const struct page *page) { return false; }
set_page_guard(struct zone * zone,struct page * page,unsigned int order)4760 static inline bool set_page_guard(struct zone *zone, struct page *page,
4761 			unsigned int order) { return false; }
clear_page_guard(struct zone * zone,struct page * page,unsigned int order)4762 static inline void clear_page_guard(struct zone *zone, struct page *page,
4763 				unsigned int order) {}
4764 #endif	/* CONFIG_DEBUG_PAGEALLOC */
4765 
4766 #ifndef clear_pages
4767 /**
4768  * clear_pages() - clear a page range for kernel-internal use.
4769  * @addr: start address
4770  * @npages: number of pages
4771  *
4772  * Use clear_user_pages() instead when clearing a page range to be
4773  * mapped to user space.
4774  *
4775  * Does absolutely no exception handling.
4776  *
4777  * Note that even though the clearing operation is preemptible, clear_pages()
4778  * does not (and on architectures where it reduces to a few long-running
4779  * instructions, might not be able to) call cond_resched() to check if
4780  * rescheduling is required.
4781  *
4782  * When running under preemptible models this is not a problem. Under
4783  * cooperatively scheduled models, however, the caller is expected to
4784  * limit @npages to no more than PROCESS_PAGES_NON_PREEMPT_BATCH.
4785  */
clear_pages(void * addr,unsigned int npages)4786 static inline void clear_pages(void *addr, unsigned int npages)
4787 {
4788 	do {
4789 		clear_page(addr);
4790 		addr += PAGE_SIZE;
4791 	} while (--npages);
4792 }
4793 #endif
4794 
4795 #ifndef PROCESS_PAGES_NON_PREEMPT_BATCH
4796 #ifdef clear_pages
4797 /*
4798  * The architecture defines clear_pages(), and we assume that it is
4799  * generally "fast". So choose a batch size large enough to allow the processor
4800  * headroom for optimizing the operation and yet small enough that we see
4801  * reasonable preemption latency for when this optimization is not possible
4802  * (ex. slow microarchitectures, memory bandwidth saturation.)
4803  *
4804  * With a value of 32MB and assuming a memory bandwidth of ~10GBps, this should
4805  * result in worst case preemption latency of around 3ms when clearing pages.
4806  *
4807  * (See comment above clear_pages() for why preemption latency is a concern
4808  * here.)
4809  */
4810 #define PROCESS_PAGES_NON_PREEMPT_BATCH		(SZ_32M >> PAGE_SHIFT)
4811 #else /* !clear_pages */
4812 /*
4813  * The architecture does not provide a clear_pages() implementation. Assume
4814  * that clear_page() -- which clear_pages() will fallback to -- is relatively
4815  * slow and choose a small value for PROCESS_PAGES_NON_PREEMPT_BATCH.
4816  */
4817 #define PROCESS_PAGES_NON_PREEMPT_BATCH		1
4818 #endif
4819 #endif
4820 
4821 #ifdef __HAVE_ARCH_GATE_AREA
4822 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
4823 extern int in_gate_area_no_mm(unsigned long addr);
4824 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
4825 #else
get_gate_vma(struct mm_struct * mm)4826 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
4827 {
4828 	return NULL;
4829 }
in_gate_area_no_mm(unsigned long addr)4830 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
in_gate_area(struct mm_struct * mm,unsigned long addr)4831 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
4832 {
4833 	return 0;
4834 }
4835 #endif	/* __HAVE_ARCH_GATE_AREA */
4836 
4837 bool process_shares_mm(const struct task_struct *p, const struct mm_struct *mm);
4838 
4839 void drop_slab(void);
4840 
4841 #ifndef CONFIG_MMU
4842 #define randomize_va_space 0
4843 #else
4844 extern int randomize_va_space;
4845 #endif
4846 
4847 const char * arch_vma_name(struct vm_area_struct *vma);
4848 #ifdef CONFIG_MMU
4849 void print_vma_addr(char *prefix, unsigned long rip);
4850 #else
print_vma_addr(char * prefix,unsigned long rip)4851 static inline void print_vma_addr(char *prefix, unsigned long rip)
4852 {
4853 }
4854 #endif
4855 
4856 void *sparse_buffer_alloc(unsigned long size);
4857 unsigned long section_map_size(void);
4858 struct page * __populate_section_memmap(unsigned long pfn,
4859 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
4860 		struct dev_pagemap *pgmap);
4861 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
4862 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
4863 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
4864 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
4865 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
4866 			    struct vmem_altmap *altmap, unsigned long ptpfn,
4867 			    unsigned long flags);
4868 void *vmemmap_alloc_block(unsigned long size, int node);
4869 struct vmem_altmap;
4870 void *vmemmap_alloc_block_buf(unsigned long size, int node,
4871 			      struct vmem_altmap *altmap);
4872 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
4873 void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
4874 		     unsigned long addr, unsigned long next);
4875 int vmemmap_check_pmd(pmd_t *pmd, int node,
4876 		      unsigned long addr, unsigned long next);
4877 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
4878 			       int node, struct vmem_altmap *altmap);
4879 int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
4880 			       int node, struct vmem_altmap *altmap);
4881 int vmemmap_populate(unsigned long start, unsigned long end, int node,
4882 		struct vmem_altmap *altmap);
4883 int vmemmap_populate_hvo(unsigned long start, unsigned long end,
4884 			 unsigned int order, struct zone *zone,
4885 			 unsigned long headsize);
4886 void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
4887 			  unsigned long headsize);
4888 void vmemmap_populate_print_last(void);
4889 #ifdef CONFIG_MEMORY_HOTPLUG
4890 void vmemmap_free(unsigned long start, unsigned long end,
4891 		struct vmem_altmap *altmap);
4892 #endif
4893 
4894 #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmem_altmap_offset(const struct vmem_altmap * altmap)4895 static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
4896 {
4897 	/* number of pfns from base where pfn_to_page() is valid */
4898 	if (altmap)
4899 		return altmap->reserve + altmap->free;
4900 	return 0;
4901 }
4902 
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)4903 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
4904 				    unsigned long nr_pfns)
4905 {
4906 	altmap->alloc -= nr_pfns;
4907 }
4908 #else
vmem_altmap_offset(const struct vmem_altmap * altmap)4909 static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
4910 {
4911 	return 0;
4912 }
4913 
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)4914 static inline void vmem_altmap_free(struct vmem_altmap *altmap,
4915 				    unsigned long nr_pfns)
4916 {
4917 }
4918 #endif
4919 
4920 #define VMEMMAP_RESERVE_NR	2
4921 #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
__vmemmap_can_optimize(struct vmem_altmap * altmap,struct dev_pagemap * pgmap)4922 static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
4923 					  struct dev_pagemap *pgmap)
4924 {
4925 	unsigned long nr_pages;
4926 	unsigned long nr_vmemmap_pages;
4927 
4928 	if (!pgmap || !is_power_of_2(sizeof(struct page)))
4929 		return false;
4930 
4931 	nr_pages = pgmap_vmemmap_nr(pgmap);
4932 	nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
4933 	/*
4934 	 * For vmemmap optimization with DAX we need minimum 2 vmemmap
4935 	 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
4936 	 */
4937 	return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
4938 }
4939 /*
4940  * If we don't have an architecture override, use the generic rule
4941  */
4942 #ifndef vmemmap_can_optimize
4943 #define vmemmap_can_optimize __vmemmap_can_optimize
4944 #endif
4945 
4946 #else
vmemmap_can_optimize(struct vmem_altmap * altmap,struct dev_pagemap * pgmap)4947 static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
4948 					   struct dev_pagemap *pgmap)
4949 {
4950 	return false;
4951 }
4952 #endif
4953 
4954 enum mf_flags {
4955 	MF_COUNT_INCREASED = 1 << 0,
4956 	MF_ACTION_REQUIRED = 1 << 1,
4957 	MF_MUST_KILL = 1 << 2,
4958 	MF_SOFT_OFFLINE = 1 << 3,
4959 	MF_UNPOISON = 1 << 4,
4960 	MF_SW_SIMULATED = 1 << 5,
4961 	MF_NO_RETRY = 1 << 6,
4962 	MF_MEM_PRE_REMOVE = 1 << 7,
4963 };
4964 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
4965 		      unsigned long count, int mf_flags);
4966 extern int memory_failure(unsigned long pfn, int flags);
4967 extern int unpoison_memory(unsigned long pfn);
4968 extern atomic_long_t num_poisoned_pages __read_mostly;
4969 extern int soft_offline_page(unsigned long pfn, int flags);
4970 #ifdef CONFIG_MEMORY_FAILURE
4971 /*
4972  * Sysfs entries for memory failure handling statistics.
4973  */
4974 extern const struct attribute_group memory_failure_attr_group;
4975 extern void memory_failure_queue(unsigned long pfn, int flags);
4976 extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4977 					bool *migratable_cleared);
4978 void num_poisoned_pages_inc(unsigned long pfn);
4979 void num_poisoned_pages_sub(unsigned long pfn, long i);
4980 #else
memory_failure_queue(unsigned long pfn,int flags)4981 static inline void memory_failure_queue(unsigned long pfn, int flags)
4982 {
4983 }
4984 
__get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)4985 static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4986 					bool *migratable_cleared)
4987 {
4988 	return 0;
4989 }
4990 
num_poisoned_pages_inc(unsigned long pfn)4991 static inline void num_poisoned_pages_inc(unsigned long pfn)
4992 {
4993 }
4994 
num_poisoned_pages_sub(unsigned long pfn,long i)4995 static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
4996 {
4997 }
4998 #endif
4999 
5000 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
5001 extern void memblk_nr_poison_inc(unsigned long pfn);
5002 extern void memblk_nr_poison_sub(unsigned long pfn, long i);
5003 #else
memblk_nr_poison_inc(unsigned long pfn)5004 static inline void memblk_nr_poison_inc(unsigned long pfn)
5005 {
5006 }
5007 
memblk_nr_poison_sub(unsigned long pfn,long i)5008 static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
5009 {
5010 }
5011 #endif
5012 
5013 #ifndef arch_memory_failure
arch_memory_failure(unsigned long pfn,int flags)5014 static inline int arch_memory_failure(unsigned long pfn, int flags)
5015 {
5016 	return -ENXIO;
5017 }
5018 #endif
5019 
5020 #ifndef arch_is_platform_page
arch_is_platform_page(u64 paddr)5021 static inline bool arch_is_platform_page(u64 paddr)
5022 {
5023 	return false;
5024 }
5025 #endif
5026 
5027 /*
5028  * Error handlers for various types of pages.
5029  */
5030 enum mf_result {
5031 	MF_IGNORED,	/* Error: cannot be handled */
5032 	MF_FAILED,	/* Error: handling failed */
5033 	MF_DELAYED,	/* Will be handled later */
5034 	MF_RECOVERED,	/* Successfully recovered */
5035 };
5036 
5037 enum mf_action_page_type {
5038 	MF_MSG_KERNEL,
5039 	MF_MSG_KERNEL_HIGH_ORDER,
5040 	MF_MSG_DIFFERENT_COMPOUND,
5041 	MF_MSG_HUGE,
5042 	MF_MSG_FREE_HUGE,
5043 	MF_MSG_GET_HWPOISON,
5044 	MF_MSG_UNMAP_FAILED,
5045 	MF_MSG_DIRTY_SWAPCACHE,
5046 	MF_MSG_CLEAN_SWAPCACHE,
5047 	MF_MSG_DIRTY_MLOCKED_LRU,
5048 	MF_MSG_CLEAN_MLOCKED_LRU,
5049 	MF_MSG_DIRTY_UNEVICTABLE_LRU,
5050 	MF_MSG_CLEAN_UNEVICTABLE_LRU,
5051 	MF_MSG_DIRTY_LRU,
5052 	MF_MSG_CLEAN_LRU,
5053 	MF_MSG_TRUNCATED_LRU,
5054 	MF_MSG_BUDDY,
5055 	MF_MSG_DAX,
5056 	MF_MSG_UNSPLIT_THP,
5057 	MF_MSG_ALREADY_POISONED,
5058 	MF_MSG_PFN_MAP,
5059 	MF_MSG_UNKNOWN,
5060 };
5061 
5062 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5063 void folio_zero_user(struct folio *folio, unsigned long addr_hint);
5064 int copy_user_large_folio(struct folio *dst, struct folio *src,
5065 			  unsigned long addr_hint,
5066 			  struct vm_area_struct *vma);
5067 long copy_folio_from_user(struct folio *dst_folio,
5068 			   const void __user *usr_src,
5069 			   bool allow_pagefault);
5070 
5071 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5072 
5073 #if MAX_NUMNODES > 1
5074 void __init setup_nr_node_ids(void);
5075 #else
setup_nr_node_ids(void)5076 static inline void setup_nr_node_ids(void) {}
5077 #endif
5078 
5079 extern int memcmp_pages(struct page *page1, struct page *page2);
5080 
pages_identical(struct page * page1,struct page * page2)5081 static inline int pages_identical(struct page *page1, struct page *page2)
5082 {
5083 	return !memcmp_pages(page1, page2);
5084 }
5085 
5086 #ifdef CONFIG_MAPPING_DIRTY_HELPERS
5087 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
5088 						pgoff_t first_index, pgoff_t nr,
5089 						pgoff_t bitmap_pgoff,
5090 						unsigned long *bitmap,
5091 						pgoff_t *start,
5092 						pgoff_t *end);
5093 
5094 unsigned long wp_shared_mapping_range(struct address_space *mapping,
5095 				      pgoff_t first_index, pgoff_t nr);
5096 #endif
5097 
5098 #ifdef CONFIG_ANON_VMA_NAME
5099 int set_anon_vma_name(unsigned long addr, unsigned long size,
5100 		      const char __user *uname);
5101 #else
5102 static inline
set_anon_vma_name(unsigned long addr,unsigned long size,const char __user * uname)5103 int set_anon_vma_name(unsigned long addr, unsigned long size,
5104 		      const char __user *uname)
5105 {
5106 	return -EINVAL;
5107 }
5108 #endif
5109 
5110 #ifdef CONFIG_UNACCEPTED_MEMORY
5111 
5112 bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
5113 void accept_memory(phys_addr_t start, unsigned long size);
5114 
5115 #else
5116 
range_contains_unaccepted_memory(phys_addr_t start,unsigned long size)5117 static inline bool range_contains_unaccepted_memory(phys_addr_t start,
5118 						    unsigned long size)
5119 {
5120 	return false;
5121 }
5122 
accept_memory(phys_addr_t start,unsigned long size)5123 static inline void accept_memory(phys_addr_t start, unsigned long size)
5124 {
5125 }
5126 
5127 #endif
5128 
pfn_is_unaccepted_memory(unsigned long pfn)5129 static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
5130 {
5131 	return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
5132 }
5133 
5134 void vma_pgtable_walk_begin(struct vm_area_struct *vma);
5135 void vma_pgtable_walk_end(struct vm_area_struct *vma);
5136 
5137 int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
5138 int reserve_mem_release_by_name(const char *name);
5139 
5140 #ifdef CONFIG_64BIT
5141 int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
5142 #else
do_mseal(unsigned long start,size_t len_in,unsigned long flags)5143 static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
5144 {
5145 	/* noop on 32 bit */
5146 	return 0;
5147 }
5148 #endif
5149 
5150 /*
5151  * user_alloc_needs_zeroing checks if a user folio from page allocator needs to
5152  * be zeroed or not.
5153  */
user_alloc_needs_zeroing(void)5154 static inline bool user_alloc_needs_zeroing(void)
5155 {
5156 	/*
5157 	 * for user folios, arch with cache aliasing requires cache flush and
5158 	 * arc changes folio->flags to make icache coherent with dcache, so
5159 	 * always return false to make caller use
5160 	 * clear_user_page()/clear_user_highpage().
5161 	 */
5162 	return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() ||
5163 	       !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
5164 				   &init_on_alloc);
5165 }
5166 
5167 int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
5168 int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
5169 int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
5170 
5171 /*
5172  * DMA mapping IDs for page_pool
5173  *
5174  * When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
5175  * stashes it in the upper bits of page->pp_magic. Non-PP pages can have
5176  * arbitrary kernel pointers stored in the same field as pp_magic (since
5177  * it overlaps with page->lru.next), so we must ensure that we cannot
5178  * mistake a valid kernel pointer with any of the values we write into this
5179  * field.
5180  *
5181  * On architectures that set POISON_POINTER_DELTA, this is already ensured,
5182  * since this value becomes part of PP_SIGNATURE; meaning we can just use the
5183  * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
5184  * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
5185  * 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is
5186  * known at compile-time.
5187  *
5188  * If the value of PAGE_OFFSET is not known at compile time, or if it is too
5189  * small to leave at least 8 bits available above PP_SIGNATURE, we define the
5190  * number of bits to be 0, which turns off the DMA index tracking altogether
5191  * (see page_pool_register_dma_index()).
5192  */
5193 #define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
5194 #if POISON_POINTER_DELTA > 0
5195 /* PP_SIGNATURE includes POISON_POINTER_DELTA, so limit the size of the DMA
5196  * index to not overlap with that if set
5197  */
5198 #define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
5199 #else
5200 /* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */
5201 #define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8))
5202 #define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \
5203 			    PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \
5204 			    !(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \
5205 			      MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0)
5206 
5207 #endif
5208 
5209 #define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
5210 				  PP_DMA_INDEX_SHIFT)
5211 
5212 #define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
5213 #define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
5214 #define PAGE_SNAPSHOT_PG_IDLE  (1 << 2)
5215 
5216 struct page_snapshot {
5217 	struct folio folio_snapshot;
5218 	struct page page_snapshot;
5219 	unsigned long pfn;
5220 	unsigned long idx;
5221 	unsigned long flags;
5222 };
5223 
snapshot_page_is_faithful(const struct page_snapshot * ps)5224 static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps)
5225 {
5226 	return ps->flags & PAGE_SNAPSHOT_FAITHFUL;
5227 }
5228 
5229 void snapshot_page(struct page_snapshot *ps, const struct page *page);
5230 
5231 void map_anon_folio_pte_nopf(struct folio *folio, pte_t *pte,
5232 		struct vm_area_struct *vma, unsigned long addr,
5233 		bool uffd_wp);
5234 
5235 #endif /* _LINUX_MM_H */
5236