xref: /linux/mm/huge_memory.c (revision 57fca3a8ed8e8e42b456bef93055e8b73b1e358f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2009  Red Hat, Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/numa_balancing.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/swapops.h>
20 #include <linux/backing-dev.h>
21 #include <linux/dax.h>
22 #include <linux/mm_types.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/mman.h>
26 #include <linux/memremap.h>
27 #include <linux/pagemap.h>
28 #include <linux/debugfs.h>
29 #include <linux/migrate.h>
30 #include <linux/hashtable.h>
31 #include <linux/userfaultfd_k.h>
32 #include <linux/page_idle.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/oom.h>
35 #include <linux/numa.h>
36 #include <linux/page_owner.h>
37 #include <linux/sched/sysctl.h>
38 #include <linux/memory-tiers.h>
39 #include <linux/compat.h>
40 #include <linux/pgalloc.h>
41 #include <linux/pgalloc_tag.h>
42 #include <linux/pagewalk.h>
43 
44 #include <asm/tlb.h>
45 #include "internal.h"
46 #include "swap.h"
47 
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/thp.h>
50 
51 /*
52  * By default, transparent hugepage support is disabled in order to avoid
53  * risking an increased memory footprint for applications that are not
54  * guaranteed to benefit from it. When transparent hugepage support is
55  * enabled, it is for all mappings, and khugepaged scans all mappings.
56  * Defrag is invoked by khugepaged hugepage allocations and by page faults
57  * for all hugepage allocations.
58  */
59 unsigned long transparent_hugepage_flags __read_mostly =
60 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
61 	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
62 #endif
63 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
64 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
65 #endif
66 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
67 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
68 	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
69 
70 static struct shrinker *deferred_split_shrinker;
71 static unsigned long deferred_split_count(struct shrinker *shrink,
72 					  struct shrink_control *sc);
73 static unsigned long deferred_split_scan(struct shrinker *shrink,
74 					 struct shrink_control *sc);
75 static bool split_underused_thp = true;
76 
77 static atomic_t huge_zero_refcount;
78 struct folio *huge_zero_folio __read_mostly;
79 unsigned long huge_zero_pfn __read_mostly = ~0UL;
80 unsigned long huge_anon_orders_always __read_mostly;
81 unsigned long huge_anon_orders_madvise __read_mostly;
82 unsigned long huge_anon_orders_inherit __read_mostly;
83 static bool anon_orders_configured __initdata;
84 
file_thp_enabled(struct vm_area_struct * vma)85 static inline bool file_thp_enabled(struct vm_area_struct *vma)
86 {
87 	struct inode *inode;
88 
89 	if (!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
90 		return false;
91 
92 	if (!vma->vm_file)
93 		return false;
94 
95 	inode = file_inode(vma->vm_file);
96 
97 	if (IS_ANON_FILE(inode))
98 		return false;
99 
100 	return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
101 }
102 
103 /* If returns true, we are unable to access the VMA's folios. */
vma_is_special_huge(const struct vm_area_struct * vma)104 static bool vma_is_special_huge(const struct vm_area_struct *vma)
105 {
106 	if (vma_is_dax(vma))
107 		return false;
108 	return vma_test_any(vma, VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT);
109 }
110 
__thp_vma_allowable_orders(struct vm_area_struct * vma,vm_flags_t vm_flags,enum tva_type type,unsigned long orders)111 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
112 					 vm_flags_t vm_flags,
113 					 enum tva_type type,
114 					 unsigned long orders)
115 {
116 	const bool smaps = type == TVA_SMAPS;
117 	const bool in_pf = type == TVA_PAGEFAULT;
118 	const bool forced_collapse = type == TVA_FORCED_COLLAPSE;
119 	unsigned long supported_orders;
120 
121 	/* Check the intersection of requested and supported orders. */
122 	if (vma_is_anonymous(vma))
123 		supported_orders = THP_ORDERS_ALL_ANON;
124 	else if (vma_is_dax(vma) || vma_is_special_huge(vma))
125 		supported_orders = THP_ORDERS_ALL_SPECIAL_DAX;
126 	else
127 		supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
128 
129 	orders &= supported_orders;
130 	if (!orders)
131 		return 0;
132 
133 	if (!vma->vm_mm)		/* vdso */
134 		return 0;
135 
136 	if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags, forced_collapse))
137 		return 0;
138 
139 	/* khugepaged doesn't collapse DAX vma, but page fault is fine. */
140 	if (vma_is_dax(vma))
141 		return in_pf ? orders : 0;
142 
143 	/*
144 	 * khugepaged special VMA and hugetlb VMA.
145 	 * Must be checked after dax since some dax mappings may have
146 	 * VM_MIXEDMAP set.
147 	 */
148 	if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
149 		return 0;
150 
151 	/*
152 	 * Check alignment for file vma and size for both file and anon vma by
153 	 * filtering out the unsuitable orders.
154 	 *
155 	 * Skip the check for page fault. Huge fault does the check in fault
156 	 * handlers.
157 	 */
158 	if (!in_pf) {
159 		int order = highest_order(orders);
160 		unsigned long addr;
161 
162 		while (orders) {
163 			addr = vma->vm_end - (PAGE_SIZE << order);
164 			if (thp_vma_suitable_order(vma, addr, order))
165 				break;
166 			order = next_order(&orders, order);
167 		}
168 
169 		if (!orders)
170 			return 0;
171 	}
172 
173 	/*
174 	 * Enabled via shmem mount options or sysfs settings.
175 	 * Must be done before hugepage flags check since shmem has its
176 	 * own flags.
177 	 */
178 	if (!in_pf && shmem_file(vma->vm_file))
179 		return orders & shmem_allowable_huge_orders(file_inode(vma->vm_file),
180 						   vma, vma->vm_pgoff, 0,
181 						   forced_collapse);
182 
183 	if (!vma_is_anonymous(vma)) {
184 		/*
185 		 * Enforce THP collapse requirements as necessary. Anonymous vmas
186 		 * were already handled in thp_vma_allowable_orders().
187 		 */
188 		if (!forced_collapse &&
189 		    (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
190 						    !hugepage_global_always())))
191 			return 0;
192 
193 		/*
194 		 * Trust that ->huge_fault() handlers know what they are doing
195 		 * in fault path.
196 		 */
197 		if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
198 			return orders;
199 		/* Only regular file is valid in collapse path */
200 		if (((!in_pf || smaps)) && file_thp_enabled(vma))
201 			return orders;
202 		return 0;
203 	}
204 
205 	if (vma_is_temporary_stack(vma))
206 		return 0;
207 
208 	/*
209 	 * THPeligible bit of smaps should show 1 for proper VMAs even
210 	 * though anon_vma is not initialized yet.
211 	 *
212 	 * Allow page fault since anon_vma may be not initialized until
213 	 * the first page fault.
214 	 */
215 	if (!vma->anon_vma)
216 		return (smaps || in_pf) ? orders : 0;
217 
218 	return orders;
219 }
220 
get_huge_zero_folio(void)221 static bool get_huge_zero_folio(void)
222 {
223 	struct folio *zero_folio;
224 retry:
225 	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
226 		return true;
227 
228 	zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO | __GFP_ZEROTAGS) &
229 				 ~__GFP_MOVABLE,
230 			HPAGE_PMD_ORDER);
231 	if (!zero_folio) {
232 		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
233 		return false;
234 	}
235 	/* Ensure zero folio won't have large_rmappable flag set. */
236 	folio_clear_large_rmappable(zero_folio);
237 	preempt_disable();
238 	if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
239 		preempt_enable();
240 		folio_put(zero_folio);
241 		goto retry;
242 	}
243 	WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
244 
245 	/* We take additional reference here. It will be put back by shrinker */
246 	atomic_set(&huge_zero_refcount, 2);
247 	preempt_enable();
248 	count_vm_event(THP_ZERO_PAGE_ALLOC);
249 	return true;
250 }
251 
put_huge_zero_folio(void)252 static void put_huge_zero_folio(void)
253 {
254 	/*
255 	 * Counter should never go to zero here. Only shrinker can put
256 	 * last reference.
257 	 */
258 	BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
259 }
260 
mm_get_huge_zero_folio(struct mm_struct * mm)261 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
262 {
263 	if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
264 		return huge_zero_folio;
265 
266 	if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
267 		return READ_ONCE(huge_zero_folio);
268 
269 	if (!get_huge_zero_folio())
270 		return NULL;
271 
272 	if (mm_flags_test_and_set(MMF_HUGE_ZERO_FOLIO, mm))
273 		put_huge_zero_folio();
274 
275 	return READ_ONCE(huge_zero_folio);
276 }
277 
mm_put_huge_zero_folio(struct mm_struct * mm)278 void mm_put_huge_zero_folio(struct mm_struct *mm)
279 {
280 	if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
281 		return;
282 
283 	if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
284 		put_huge_zero_folio();
285 }
286 
shrink_huge_zero_folio_count(struct shrinker * shrink,struct shrink_control * sc)287 static unsigned long shrink_huge_zero_folio_count(struct shrinker *shrink,
288 						  struct shrink_control *sc)
289 {
290 	/* we can free zero page only if last reference remains */
291 	return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
292 }
293 
shrink_huge_zero_folio_scan(struct shrinker * shrink,struct shrink_control * sc)294 static unsigned long shrink_huge_zero_folio_scan(struct shrinker *shrink,
295 						 struct shrink_control *sc)
296 {
297 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
298 		struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
299 		BUG_ON(zero_folio == NULL);
300 		WRITE_ONCE(huge_zero_pfn, ~0UL);
301 		folio_put(zero_folio);
302 		return HPAGE_PMD_NR;
303 	}
304 
305 	return 0;
306 }
307 
308 static struct shrinker *huge_zero_folio_shrinker;
309 
310 #ifdef CONFIG_SYSFS
enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)311 static ssize_t enabled_show(struct kobject *kobj,
312 			    struct kobj_attribute *attr, char *buf)
313 {
314 	const char *output;
315 
316 	if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
317 		output = "[always] madvise never";
318 	else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
319 			  &transparent_hugepage_flags))
320 		output = "always [madvise] never";
321 	else
322 		output = "always madvise [never]";
323 
324 	return sysfs_emit(buf, "%s\n", output);
325 }
326 
327 enum anon_enabled_mode {
328 	ANON_ENABLED_ALWAYS	= 0,
329 	ANON_ENABLED_INHERIT	= 1,
330 	ANON_ENABLED_MADVISE	= 2,
331 	ANON_ENABLED_NEVER	= 3,
332 };
333 
334 static const char * const anon_enabled_mode_strings[] = {
335 	[ANON_ENABLED_ALWAYS]	= "always",
336 	[ANON_ENABLED_INHERIT]	= "inherit",
337 	[ANON_ENABLED_MADVISE]	= "madvise",
338 	[ANON_ENABLED_NEVER]	= "never",
339 };
340 
341 enum global_enabled_mode {
342 	GLOBAL_ENABLED_ALWAYS	= 0,
343 	GLOBAL_ENABLED_MADVISE	= 1,
344 	GLOBAL_ENABLED_NEVER	= 2,
345 };
346 
347 static const char * const global_enabled_mode_strings[] = {
348 	[GLOBAL_ENABLED_ALWAYS]		= "always",
349 	[GLOBAL_ENABLED_MADVISE]	= "madvise",
350 	[GLOBAL_ENABLED_NEVER]		= "never",
351 };
352 
set_global_enabled_mode(enum global_enabled_mode mode)353 static bool set_global_enabled_mode(enum global_enabled_mode mode)
354 {
355 	static const unsigned long thp_flags[] = {
356 		TRANSPARENT_HUGEPAGE_FLAG,
357 		TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
358 	};
359 	enum global_enabled_mode m;
360 	bool changed = false;
361 
362 	for (m = 0; m < ARRAY_SIZE(thp_flags); m++) {
363 		if (m == mode)
364 			changed |= !test_and_set_bit(thp_flags[m],
365 						     &transparent_hugepage_flags);
366 		else
367 			changed |= test_and_clear_bit(thp_flags[m],
368 						      &transparent_hugepage_flags);
369 	}
370 
371 	return changed;
372 }
373 
enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)374 static ssize_t enabled_store(struct kobject *kobj,
375 			     struct kobj_attribute *attr,
376 			     const char *buf, size_t count)
377 {
378 	int mode;
379 
380 	mode = sysfs_match_string(global_enabled_mode_strings, buf);
381 	if (mode < 0)
382 		return -EINVAL;
383 
384 	if (set_global_enabled_mode(mode)) {
385 		int err = start_stop_khugepaged();
386 
387 		if (err)
388 			return err;
389 	} else {
390 		/*
391 		 * Recalculate watermarks even when the mode didn't
392 		 * change, as the previous code always called
393 		 * start_stop_khugepaged() which does this internally.
394 		 */
395 		set_recommended_min_free_kbytes();
396 	}
397 	return count;
398 }
399 
400 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
401 
single_hugepage_flag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf,enum transparent_hugepage_flag flag)402 ssize_t single_hugepage_flag_show(struct kobject *kobj,
403 				  struct kobj_attribute *attr, char *buf,
404 				  enum transparent_hugepage_flag flag)
405 {
406 	return sysfs_emit(buf, "%d\n",
407 			  !!test_bit(flag, &transparent_hugepage_flags));
408 }
409 
single_hugepage_flag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count,enum transparent_hugepage_flag flag)410 ssize_t single_hugepage_flag_store(struct kobject *kobj,
411 				 struct kobj_attribute *attr,
412 				 const char *buf, size_t count,
413 				 enum transparent_hugepage_flag flag)
414 {
415 	unsigned long value;
416 	int ret;
417 
418 	ret = kstrtoul(buf, 10, &value);
419 	if (ret < 0)
420 		return ret;
421 	if (value > 1)
422 		return -EINVAL;
423 
424 	if (value)
425 		set_bit(flag, &transparent_hugepage_flags);
426 	else
427 		clear_bit(flag, &transparent_hugepage_flags);
428 
429 	return count;
430 }
431 
defrag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)432 static ssize_t defrag_show(struct kobject *kobj,
433 			   struct kobj_attribute *attr, char *buf)
434 {
435 	const char *output;
436 
437 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
438 		     &transparent_hugepage_flags))
439 		output = "[always] defer defer+madvise madvise never";
440 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
441 			  &transparent_hugepage_flags))
442 		output = "always [defer] defer+madvise madvise never";
443 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
444 			  &transparent_hugepage_flags))
445 		output = "always defer [defer+madvise] madvise never";
446 	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
447 			  &transparent_hugepage_flags))
448 		output = "always defer defer+madvise [madvise] never";
449 	else
450 		output = "always defer defer+madvise madvise [never]";
451 
452 	return sysfs_emit(buf, "%s\n", output);
453 }
454 
defrag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)455 static ssize_t defrag_store(struct kobject *kobj,
456 			    struct kobj_attribute *attr,
457 			    const char *buf, size_t count)
458 {
459 	if (sysfs_streq(buf, "always")) {
460 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
461 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
462 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
463 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
464 	} else if (sysfs_streq(buf, "defer+madvise")) {
465 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
466 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
467 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
468 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
469 	} else if (sysfs_streq(buf, "defer")) {
470 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
471 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
472 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
473 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
474 	} else if (sysfs_streq(buf, "madvise")) {
475 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
476 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
477 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
478 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
479 	} else if (sysfs_streq(buf, "never")) {
480 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
481 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
482 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
483 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
484 	} else
485 		return -EINVAL;
486 
487 	return count;
488 }
489 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
490 
use_zero_page_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)491 static ssize_t use_zero_page_show(struct kobject *kobj,
492 				  struct kobj_attribute *attr, char *buf)
493 {
494 	return single_hugepage_flag_show(kobj, attr, buf,
495 					 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
496 }
use_zero_page_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)497 static ssize_t use_zero_page_store(struct kobject *kobj,
498 		struct kobj_attribute *attr, const char *buf, size_t count)
499 {
500 	return single_hugepage_flag_store(kobj, attr, buf, count,
501 				 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
502 }
503 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
504 
hpage_pmd_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)505 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
506 				   struct kobj_attribute *attr, char *buf)
507 {
508 	return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
509 }
510 static struct kobj_attribute hpage_pmd_size_attr =
511 	__ATTR_RO(hpage_pmd_size);
512 
split_underused_thp_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)513 static ssize_t split_underused_thp_show(struct kobject *kobj,
514 			    struct kobj_attribute *attr, char *buf)
515 {
516 	return sysfs_emit(buf, "%d\n", split_underused_thp);
517 }
518 
split_underused_thp_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)519 static ssize_t split_underused_thp_store(struct kobject *kobj,
520 			     struct kobj_attribute *attr,
521 			     const char *buf, size_t count)
522 {
523 	int err = kstrtobool(buf, &split_underused_thp);
524 
525 	if (err < 0)
526 		return err;
527 
528 	return count;
529 }
530 
531 static struct kobj_attribute split_underused_thp_attr = __ATTR(
532 	shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
533 
534 static struct attribute *hugepage_attr[] = {
535 	&enabled_attr.attr,
536 	&defrag_attr.attr,
537 	&use_zero_page_attr.attr,
538 	&hpage_pmd_size_attr.attr,
539 #ifdef CONFIG_SHMEM
540 	&shmem_enabled_attr.attr,
541 #endif
542 	&split_underused_thp_attr.attr,
543 	NULL,
544 };
545 
546 static const struct attribute_group hugepage_attr_group = {
547 	.attrs = hugepage_attr,
548 };
549 
550 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
551 static void thpsize_release(struct kobject *kobj);
552 static DEFINE_SPINLOCK(huge_anon_orders_lock);
553 static LIST_HEAD(thpsize_list);
554 
anon_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)555 static ssize_t anon_enabled_show(struct kobject *kobj,
556 				 struct kobj_attribute *attr, char *buf)
557 {
558 	int order = to_thpsize(kobj)->order;
559 	const char *output;
560 
561 	if (test_bit(order, &huge_anon_orders_always))
562 		output = "[always] inherit madvise never";
563 	else if (test_bit(order, &huge_anon_orders_inherit))
564 		output = "always [inherit] madvise never";
565 	else if (test_bit(order, &huge_anon_orders_madvise))
566 		output = "always inherit [madvise] never";
567 	else
568 		output = "always inherit madvise [never]";
569 
570 	return sysfs_emit(buf, "%s\n", output);
571 }
572 
set_anon_enabled_mode(int order,enum anon_enabled_mode mode)573 static bool set_anon_enabled_mode(int order, enum anon_enabled_mode mode)
574 {
575 	static unsigned long *enabled_orders[] = {
576 		&huge_anon_orders_always,
577 		&huge_anon_orders_inherit,
578 		&huge_anon_orders_madvise,
579 	};
580 	enum anon_enabled_mode m;
581 	bool changed = false;
582 
583 	spin_lock(&huge_anon_orders_lock);
584 	for (m = 0; m < ARRAY_SIZE(enabled_orders); m++) {
585 		if (m == mode)
586 			changed |= !__test_and_set_bit(order, enabled_orders[m]);
587 		else
588 			changed |= __test_and_clear_bit(order, enabled_orders[m]);
589 	}
590 	spin_unlock(&huge_anon_orders_lock);
591 
592 	return changed;
593 }
594 
anon_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)595 static ssize_t anon_enabled_store(struct kobject *kobj,
596 				  struct kobj_attribute *attr,
597 				  const char *buf, size_t count)
598 {
599 	int order = to_thpsize(kobj)->order;
600 	int mode;
601 
602 	mode = sysfs_match_string(anon_enabled_mode_strings, buf);
603 	if (mode < 0)
604 		return -EINVAL;
605 
606 	if (set_anon_enabled_mode(order, mode)) {
607 		int err = start_stop_khugepaged();
608 
609 		if (err)
610 			return err;
611 	} else {
612 		/*
613 		 * Recalculate watermarks even when the mode didn't
614 		 * change, as the previous code always called
615 		 * start_stop_khugepaged() which does this internally.
616 		 */
617 		set_recommended_min_free_kbytes();
618 	}
619 
620 	return count;
621 }
622 
623 static struct kobj_attribute anon_enabled_attr =
624 	__ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
625 
626 static struct attribute *anon_ctrl_attrs[] = {
627 	&anon_enabled_attr.attr,
628 	NULL,
629 };
630 
631 static const struct attribute_group anon_ctrl_attr_grp = {
632 	.attrs = anon_ctrl_attrs,
633 };
634 
635 static struct attribute *file_ctrl_attrs[] = {
636 #ifdef CONFIG_SHMEM
637 	&thpsize_shmem_enabled_attr.attr,
638 #endif
639 	NULL,
640 };
641 
642 static const struct attribute_group file_ctrl_attr_grp = {
643 	.attrs = file_ctrl_attrs,
644 };
645 
646 static struct attribute *any_ctrl_attrs[] = {
647 	NULL,
648 };
649 
650 static const struct attribute_group any_ctrl_attr_grp = {
651 	.attrs = any_ctrl_attrs,
652 };
653 
654 static const struct kobj_type thpsize_ktype = {
655 	.release = &thpsize_release,
656 	.sysfs_ops = &kobj_sysfs_ops,
657 };
658 
659 DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
660 
sum_mthp_stat(int order,enum mthp_stat_item item)661 static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
662 {
663 	unsigned long sum = 0;
664 	int cpu;
665 
666 	for_each_possible_cpu(cpu) {
667 		struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
668 
669 		sum += this->stats[order][item];
670 	}
671 
672 	return sum;
673 }
674 
675 #define DEFINE_MTHP_STAT_ATTR(_name, _index)				\
676 static ssize_t _name##_show(struct kobject *kobj,			\
677 			struct kobj_attribute *attr, char *buf)		\
678 {									\
679 	int order = to_thpsize(kobj)->order;				\
680 									\
681 	return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index));	\
682 }									\
683 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
684 
685 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
686 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
687 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
688 DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
689 DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
690 DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
691 DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
692 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
693 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
694 #ifdef CONFIG_SHMEM
695 DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
696 DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
697 DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
698 #endif
699 DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
700 DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
701 DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
702 DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
703 DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
704 
705 static struct attribute *anon_stats_attrs[] = {
706 	&anon_fault_alloc_attr.attr,
707 	&anon_fault_fallback_attr.attr,
708 	&anon_fault_fallback_charge_attr.attr,
709 #ifndef CONFIG_SHMEM
710 	&zswpout_attr.attr,
711 	&swpin_attr.attr,
712 	&swpin_fallback_attr.attr,
713 	&swpin_fallback_charge_attr.attr,
714 	&swpout_attr.attr,
715 	&swpout_fallback_attr.attr,
716 #endif
717 	&split_deferred_attr.attr,
718 	&nr_anon_attr.attr,
719 	&nr_anon_partially_mapped_attr.attr,
720 	NULL,
721 };
722 
723 static struct attribute_group anon_stats_attr_grp = {
724 	.name = "stats",
725 	.attrs = anon_stats_attrs,
726 };
727 
728 static struct attribute *file_stats_attrs[] = {
729 #ifdef CONFIG_SHMEM
730 	&shmem_alloc_attr.attr,
731 	&shmem_fallback_attr.attr,
732 	&shmem_fallback_charge_attr.attr,
733 #endif
734 	NULL,
735 };
736 
737 static struct attribute_group file_stats_attr_grp = {
738 	.name = "stats",
739 	.attrs = file_stats_attrs,
740 };
741 
742 static struct attribute *any_stats_attrs[] = {
743 #ifdef CONFIG_SHMEM
744 	&zswpout_attr.attr,
745 	&swpin_attr.attr,
746 	&swpin_fallback_attr.attr,
747 	&swpin_fallback_charge_attr.attr,
748 	&swpout_attr.attr,
749 	&swpout_fallback_attr.attr,
750 #endif
751 	&split_attr.attr,
752 	&split_failed_attr.attr,
753 	NULL,
754 };
755 
756 static struct attribute_group any_stats_attr_grp = {
757 	.name = "stats",
758 	.attrs = any_stats_attrs,
759 };
760 
sysfs_add_group(struct kobject * kobj,const struct attribute_group * grp)761 static int sysfs_add_group(struct kobject *kobj,
762 			   const struct attribute_group *grp)
763 {
764 	int ret = -ENOENT;
765 
766 	/*
767 	 * If the group is named, try to merge first, assuming the subdirectory
768 	 * was already created. This avoids the warning emitted by
769 	 * sysfs_create_group() if the directory already exists.
770 	 */
771 	if (grp->name)
772 		ret = sysfs_merge_group(kobj, grp);
773 	if (ret)
774 		ret = sysfs_create_group(kobj, grp);
775 
776 	return ret;
777 }
778 
thpsize_create(int order,struct kobject * parent)779 static struct thpsize *thpsize_create(int order, struct kobject *parent)
780 {
781 	unsigned long size = (PAGE_SIZE << order) / SZ_1K;
782 	struct thpsize *thpsize;
783 	int ret = -ENOMEM;
784 
785 	thpsize = kzalloc_obj(*thpsize);
786 	if (!thpsize)
787 		goto err;
788 
789 	thpsize->order = order;
790 
791 	ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
792 				   "hugepages-%lukB", size);
793 	if (ret) {
794 		kfree(thpsize);
795 		goto err;
796 	}
797 
798 
799 	ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
800 	if (ret)
801 		goto err_put;
802 
803 	ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
804 	if (ret)
805 		goto err_put;
806 
807 	if (BIT(order) & THP_ORDERS_ALL_ANON) {
808 		ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
809 		if (ret)
810 			goto err_put;
811 
812 		ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
813 		if (ret)
814 			goto err_put;
815 	}
816 
817 	if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
818 		ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
819 		if (ret)
820 			goto err_put;
821 
822 		ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
823 		if (ret)
824 			goto err_put;
825 	}
826 
827 	return thpsize;
828 err_put:
829 	kobject_put(&thpsize->kobj);
830 err:
831 	return ERR_PTR(ret);
832 }
833 
thpsize_release(struct kobject * kobj)834 static void thpsize_release(struct kobject *kobj)
835 {
836 	kfree(to_thpsize(kobj));
837 }
838 
hugepage_init_sysfs(struct kobject ** hugepage_kobj)839 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
840 {
841 	int err;
842 	struct thpsize *thpsize;
843 	unsigned long orders;
844 	int order;
845 
846 	/*
847 	 * Default to setting PMD-sized THP to inherit the global setting and
848 	 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
849 	 * constant so we have to do this here.
850 	 */
851 	if (!anon_orders_configured)
852 		huge_anon_orders_inherit = BIT(PMD_ORDER);
853 
854 	*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
855 	if (unlikely(!*hugepage_kobj)) {
856 		pr_err("failed to create transparent hugepage kobject\n");
857 		return -ENOMEM;
858 	}
859 
860 	err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
861 	if (err) {
862 		pr_err("failed to register transparent hugepage group\n");
863 		goto delete_obj;
864 	}
865 
866 	err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
867 	if (err) {
868 		pr_err("failed to register transparent hugepage group\n");
869 		goto remove_hp_group;
870 	}
871 
872 	orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
873 	order = highest_order(orders);
874 	while (orders) {
875 		thpsize = thpsize_create(order, *hugepage_kobj);
876 		if (IS_ERR(thpsize)) {
877 			pr_err("failed to create thpsize for order %d\n", order);
878 			err = PTR_ERR(thpsize);
879 			goto remove_all;
880 		}
881 		list_add(&thpsize->node, &thpsize_list);
882 		order = next_order(&orders, order);
883 	}
884 
885 	return 0;
886 
887 remove_all:
888 	hugepage_exit_sysfs(*hugepage_kobj);
889 	return err;
890 remove_hp_group:
891 	sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
892 delete_obj:
893 	kobject_put(*hugepage_kobj);
894 	return err;
895 }
896 
hugepage_exit_sysfs(struct kobject * hugepage_kobj)897 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
898 {
899 	struct thpsize *thpsize, *tmp;
900 
901 	list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
902 		list_del(&thpsize->node);
903 		kobject_put(&thpsize->kobj);
904 	}
905 
906 	sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
907 	sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
908 	kobject_put(hugepage_kobj);
909 }
910 #else
hugepage_init_sysfs(struct kobject ** hugepage_kobj)911 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
912 {
913 	return 0;
914 }
915 
hugepage_exit_sysfs(struct kobject * hugepage_kobj)916 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
917 {
918 }
919 #endif /* CONFIG_SYSFS */
920 
thp_shrinker_init(void)921 static int __init thp_shrinker_init(void)
922 {
923 	deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
924 						 SHRINKER_MEMCG_AWARE |
925 						 SHRINKER_NONSLAB,
926 						 "thp-deferred_split");
927 	if (!deferred_split_shrinker)
928 		return -ENOMEM;
929 
930 	deferred_split_shrinker->count_objects = deferred_split_count;
931 	deferred_split_shrinker->scan_objects = deferred_split_scan;
932 	shrinker_register(deferred_split_shrinker);
933 
934 	if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO)) {
935 		/*
936 		 * Bump the reference of the huge_zero_folio and do not
937 		 * initialize the shrinker.
938 		 *
939 		 * huge_zero_folio will always be NULL on failure. We assume
940 		 * that get_huge_zero_folio() will most likely not fail as
941 		 * thp_shrinker_init() is invoked early on during boot.
942 		 */
943 		if (!get_huge_zero_folio())
944 			pr_warn("Allocating persistent huge zero folio failed\n");
945 		return 0;
946 	}
947 
948 	huge_zero_folio_shrinker = shrinker_alloc(0, "thp-zero");
949 	if (!huge_zero_folio_shrinker) {
950 		shrinker_free(deferred_split_shrinker);
951 		return -ENOMEM;
952 	}
953 
954 	huge_zero_folio_shrinker->count_objects = shrink_huge_zero_folio_count;
955 	huge_zero_folio_shrinker->scan_objects = shrink_huge_zero_folio_scan;
956 	shrinker_register(huge_zero_folio_shrinker);
957 
958 	return 0;
959 }
960 
thp_shrinker_exit(void)961 static void __init thp_shrinker_exit(void)
962 {
963 	shrinker_free(huge_zero_folio_shrinker);
964 	shrinker_free(deferred_split_shrinker);
965 }
966 
hugepage_init(void)967 static int __init hugepage_init(void)
968 {
969 	int err;
970 	struct kobject *hugepage_kobj;
971 
972 	if (!has_transparent_hugepage()) {
973 		transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
974 		return -EINVAL;
975 	}
976 
977 	/*
978 	 * hugepages can't be allocated by the buddy allocator
979 	 */
980 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
981 
982 	err = hugepage_init_sysfs(&hugepage_kobj);
983 	if (err)
984 		goto err_sysfs;
985 
986 	err = khugepaged_init();
987 	if (err)
988 		goto err_slab;
989 
990 	err = thp_shrinker_init();
991 	if (err)
992 		goto err_shrinker;
993 
994 	/*
995 	 * By default disable transparent hugepages on smaller systems,
996 	 * where the extra memory used could hurt more than TLB overhead
997 	 * is likely to save.  The admin can still enable it through /sys.
998 	 */
999 	if (totalram_pages() < MB_TO_PAGES(512)) {
1000 		transparent_hugepage_flags = 0;
1001 		return 0;
1002 	}
1003 
1004 	err = start_stop_khugepaged();
1005 	if (err)
1006 		goto err_khugepaged;
1007 
1008 	return 0;
1009 err_khugepaged:
1010 	thp_shrinker_exit();
1011 err_shrinker:
1012 	khugepaged_destroy();
1013 err_slab:
1014 	hugepage_exit_sysfs(hugepage_kobj);
1015 err_sysfs:
1016 	return err;
1017 }
1018 subsys_initcall(hugepage_init);
1019 
setup_transparent_hugepage(char * str)1020 static int __init setup_transparent_hugepage(char *str)
1021 {
1022 	int ret = 0;
1023 	if (!str)
1024 		goto out;
1025 	if (!strcmp(str, "always")) {
1026 		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
1027 			&transparent_hugepage_flags);
1028 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
1029 			  &transparent_hugepage_flags);
1030 		ret = 1;
1031 	} else if (!strcmp(str, "madvise")) {
1032 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
1033 			  &transparent_hugepage_flags);
1034 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
1035 			&transparent_hugepage_flags);
1036 		ret = 1;
1037 	} else if (!strcmp(str, "never")) {
1038 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
1039 			  &transparent_hugepage_flags);
1040 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
1041 			  &transparent_hugepage_flags);
1042 		ret = 1;
1043 	}
1044 out:
1045 	if (!ret)
1046 		pr_warn("transparent_hugepage= cannot parse, ignored\n");
1047 	return ret;
1048 }
1049 __setup("transparent_hugepage=", setup_transparent_hugepage);
1050 
1051 static char str_dup[PAGE_SIZE] __initdata;
setup_thp_anon(char * str)1052 static int __init setup_thp_anon(char *str)
1053 {
1054 	char *token, *range, *policy, *subtoken;
1055 	unsigned long always, inherit, madvise;
1056 	char *start_size, *end_size;
1057 	int start, end, nr;
1058 	char *p;
1059 
1060 	if (!str || strlen(str) + 1 > PAGE_SIZE)
1061 		goto err;
1062 	strscpy(str_dup, str);
1063 
1064 	always = huge_anon_orders_always;
1065 	madvise = huge_anon_orders_madvise;
1066 	inherit = huge_anon_orders_inherit;
1067 	p = str_dup;
1068 	while ((token = strsep(&p, ";")) != NULL) {
1069 		range = strsep(&token, ":");
1070 		policy = token;
1071 
1072 		if (!policy)
1073 			goto err;
1074 
1075 		while ((subtoken = strsep(&range, ",")) != NULL) {
1076 			if (strchr(subtoken, '-')) {
1077 				start_size = strsep(&subtoken, "-");
1078 				end_size = subtoken;
1079 
1080 				start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON);
1081 				end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON);
1082 			} else {
1083 				start_size = end_size = subtoken;
1084 				start = end = get_order_from_str(subtoken,
1085 								 THP_ORDERS_ALL_ANON);
1086 			}
1087 
1088 			if (start == -EINVAL) {
1089 				pr_err("invalid size %s in thp_anon boot parameter\n", start_size);
1090 				goto err;
1091 			}
1092 
1093 			if (end == -EINVAL) {
1094 				pr_err("invalid size %s in thp_anon boot parameter\n", end_size);
1095 				goto err;
1096 			}
1097 
1098 			if (start < 0 || end < 0 || start > end)
1099 				goto err;
1100 
1101 			nr = end - start + 1;
1102 			if (!strcmp(policy, "always")) {
1103 				bitmap_set(&always, start, nr);
1104 				bitmap_clear(&inherit, start, nr);
1105 				bitmap_clear(&madvise, start, nr);
1106 			} else if (!strcmp(policy, "madvise")) {
1107 				bitmap_set(&madvise, start, nr);
1108 				bitmap_clear(&inherit, start, nr);
1109 				bitmap_clear(&always, start, nr);
1110 			} else if (!strcmp(policy, "inherit")) {
1111 				bitmap_set(&inherit, start, nr);
1112 				bitmap_clear(&madvise, start, nr);
1113 				bitmap_clear(&always, start, nr);
1114 			} else if (!strcmp(policy, "never")) {
1115 				bitmap_clear(&inherit, start, nr);
1116 				bitmap_clear(&madvise, start, nr);
1117 				bitmap_clear(&always, start, nr);
1118 			} else {
1119 				pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
1120 				goto err;
1121 			}
1122 		}
1123 	}
1124 
1125 	huge_anon_orders_always = always;
1126 	huge_anon_orders_madvise = madvise;
1127 	huge_anon_orders_inherit = inherit;
1128 	anon_orders_configured = true;
1129 	return 1;
1130 
1131 err:
1132 	pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
1133 	return 0;
1134 }
1135 __setup("thp_anon=", setup_thp_anon);
1136 
maybe_pmd_mkwrite(pmd_t pmd,struct vm_area_struct * vma)1137 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
1138 {
1139 	if (likely(vma->vm_flags & VM_WRITE))
1140 		pmd = pmd_mkwrite(pmd, vma);
1141 	return pmd;
1142 }
1143 
split_queue_node(int nid)1144 static struct deferred_split *split_queue_node(int nid)
1145 {
1146 	struct pglist_data *pgdata = NODE_DATA(nid);
1147 
1148 	return &pgdata->deferred_split_queue;
1149 }
1150 
1151 #ifdef CONFIG_MEMCG
1152 static inline
folio_split_queue_memcg(struct folio * folio,struct deferred_split * queue)1153 struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
1154 					   struct deferred_split *queue)
1155 {
1156 	if (mem_cgroup_disabled())
1157 		return NULL;
1158 	if (split_queue_node(folio_nid(folio)) == queue)
1159 		return NULL;
1160 	return container_of(queue, struct mem_cgroup, deferred_split_queue);
1161 }
1162 
memcg_split_queue(int nid,struct mem_cgroup * memcg)1163 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
1164 {
1165 	return memcg ? &memcg->deferred_split_queue : split_queue_node(nid);
1166 }
1167 #else
1168 static inline
folio_split_queue_memcg(struct folio * folio,struct deferred_split * queue)1169 struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
1170 					   struct deferred_split *queue)
1171 {
1172 	return NULL;
1173 }
1174 
memcg_split_queue(int nid,struct mem_cgroup * memcg)1175 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
1176 {
1177 	return split_queue_node(nid);
1178 }
1179 #endif
1180 
split_queue_lock(int nid,struct mem_cgroup * memcg)1181 static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg)
1182 {
1183 	struct deferred_split *queue;
1184 
1185 retry:
1186 	queue = memcg_split_queue(nid, memcg);
1187 	spin_lock(&queue->split_queue_lock);
1188 	/*
1189 	 * There is a period between setting memcg to dying and reparenting
1190 	 * deferred split queue, and during this period the THPs in the deferred
1191 	 * split queue will be hidden from the shrinker side.
1192 	 */
1193 	if (unlikely(memcg_is_dying(memcg))) {
1194 		spin_unlock(&queue->split_queue_lock);
1195 		memcg = parent_mem_cgroup(memcg);
1196 		goto retry;
1197 	}
1198 
1199 	return queue;
1200 }
1201 
1202 static struct deferred_split *
split_queue_lock_irqsave(int nid,struct mem_cgroup * memcg,unsigned long * flags)1203 split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags)
1204 {
1205 	struct deferred_split *queue;
1206 
1207 retry:
1208 	queue = memcg_split_queue(nid, memcg);
1209 	spin_lock_irqsave(&queue->split_queue_lock, *flags);
1210 	if (unlikely(memcg_is_dying(memcg))) {
1211 		spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
1212 		memcg = parent_mem_cgroup(memcg);
1213 		goto retry;
1214 	}
1215 
1216 	return queue;
1217 }
1218 
folio_split_queue_lock(struct folio * folio)1219 static struct deferred_split *folio_split_queue_lock(struct folio *folio)
1220 {
1221 	return split_queue_lock(folio_nid(folio), folio_memcg(folio));
1222 }
1223 
1224 static struct deferred_split *
folio_split_queue_lock_irqsave(struct folio * folio,unsigned long * flags)1225 folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
1226 {
1227 	return split_queue_lock_irqsave(folio_nid(folio), folio_memcg(folio), flags);
1228 }
1229 
split_queue_unlock(struct deferred_split * queue)1230 static inline void split_queue_unlock(struct deferred_split *queue)
1231 {
1232 	spin_unlock(&queue->split_queue_lock);
1233 }
1234 
split_queue_unlock_irqrestore(struct deferred_split * queue,unsigned long flags)1235 static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
1236 						 unsigned long flags)
1237 {
1238 	spin_unlock_irqrestore(&queue->split_queue_lock, flags);
1239 }
1240 
is_transparent_hugepage(const struct folio * folio)1241 static inline bool is_transparent_hugepage(const struct folio *folio)
1242 {
1243 	if (!folio_test_large(folio))
1244 		return false;
1245 
1246 	return is_huge_zero_folio(folio) ||
1247 		folio_test_large_rmappable(folio);
1248 }
1249 
__thp_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,loff_t off,unsigned long flags,unsigned long size,vm_flags_t vm_flags)1250 static unsigned long __thp_get_unmapped_area(struct file *filp,
1251 		unsigned long addr, unsigned long len,
1252 		loff_t off, unsigned long flags, unsigned long size,
1253 		vm_flags_t vm_flags)
1254 {
1255 	loff_t off_end = off + len;
1256 	loff_t off_align = round_up(off, size);
1257 	unsigned long len_pad, ret, off_sub;
1258 
1259 	if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
1260 		return 0;
1261 
1262 	if (off_end <= off_align || (off_end - off_align) < size)
1263 		return 0;
1264 
1265 	len_pad = len + size;
1266 	if (len_pad < len || (off + len_pad) < off)
1267 		return 0;
1268 
1269 	ret = mm_get_unmapped_area_vmflags(filp, addr, len_pad,
1270 					   off >> PAGE_SHIFT, flags, vm_flags);
1271 
1272 	/*
1273 	 * The failure might be due to length padding. The caller will retry
1274 	 * without the padding.
1275 	 */
1276 	if (IS_ERR_VALUE(ret))
1277 		return 0;
1278 
1279 	/*
1280 	 * Do not try to align to THP boundary if allocation at the address
1281 	 * hint succeeds.
1282 	 */
1283 	if (ret == addr)
1284 		return addr;
1285 
1286 	off_sub = (off - ret) & (size - 1);
1287 
1288 	if (mm_flags_test(MMF_TOPDOWN, current->mm) && !off_sub)
1289 		return ret + size;
1290 
1291 	ret += off_sub;
1292 	return ret;
1293 }
1294 
thp_get_unmapped_area_vmflags(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)1295 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
1296 		unsigned long len, unsigned long pgoff, unsigned long flags,
1297 		vm_flags_t vm_flags)
1298 {
1299 	unsigned long ret;
1300 	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
1301 
1302 	ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
1303 	if (ret)
1304 		return ret;
1305 
1306 	return mm_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags,
1307 					    vm_flags);
1308 }
1309 
thp_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1310 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
1311 		unsigned long len, unsigned long pgoff, unsigned long flags)
1312 {
1313 	return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
1314 }
1315 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
1316 
vma_alloc_anon_folio_pmd(struct vm_area_struct * vma,unsigned long addr)1317 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
1318 		unsigned long addr)
1319 {
1320 	gfp_t gfp = vma_thp_gfp_mask(vma);
1321 	const int order = HPAGE_PMD_ORDER;
1322 	struct folio *folio;
1323 
1324 	folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK);
1325 
1326 	if (unlikely(!folio)) {
1327 		count_vm_event(THP_FAULT_FALLBACK);
1328 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1329 		return NULL;
1330 	}
1331 
1332 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1333 	if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
1334 		folio_put(folio);
1335 		count_vm_event(THP_FAULT_FALLBACK);
1336 		count_vm_event(THP_FAULT_FALLBACK_CHARGE);
1337 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1338 		count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
1339 		return NULL;
1340 	}
1341 	folio_throttle_swaprate(folio, gfp);
1342 
1343        /*
1344 	* When a folio is not zeroed during allocation (__GFP_ZERO not used)
1345 	* or user folios require special handling, folio_zero_user() is used to
1346 	* make sure that the page corresponding to the faulting address will be
1347 	* hot in the cache after zeroing.
1348 	*/
1349 	if (user_alloc_needs_zeroing())
1350 		folio_zero_user(folio, addr);
1351 	/*
1352 	 * The memory barrier inside __folio_mark_uptodate makes sure that
1353 	 * folio_zero_user writes become visible before the set_pmd_at()
1354 	 * write.
1355 	 */
1356 	__folio_mark_uptodate(folio);
1357 	return folio;
1358 }
1359 
map_anon_folio_pmd_nopf(struct folio * folio,pmd_t * pmd,struct vm_area_struct * vma,unsigned long haddr)1360 void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
1361 		struct vm_area_struct *vma, unsigned long haddr)
1362 {
1363 	pmd_t entry;
1364 
1365 	entry = folio_mk_pmd(folio, vma->vm_page_prot);
1366 	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1367 	folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
1368 	folio_add_lru_vma(folio, vma);
1369 	set_pmd_at(vma->vm_mm, haddr, pmd, entry);
1370 	update_mmu_cache_pmd(vma, haddr, pmd);
1371 	deferred_split_folio(folio, false);
1372 }
1373 
map_anon_folio_pmd_pf(struct folio * folio,pmd_t * pmd,struct vm_area_struct * vma,unsigned long haddr)1374 static void map_anon_folio_pmd_pf(struct folio *folio, pmd_t *pmd,
1375 		struct vm_area_struct *vma, unsigned long haddr)
1376 {
1377 	map_anon_folio_pmd_nopf(folio, pmd, vma, haddr);
1378 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1379 	count_vm_event(THP_FAULT_ALLOC);
1380 	count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
1381 	count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
1382 }
1383 
__do_huge_pmd_anonymous_page(struct vm_fault * vmf)1384 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1385 {
1386 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1387 	struct vm_area_struct *vma = vmf->vma;
1388 	struct folio *folio;
1389 	pgtable_t pgtable;
1390 	vm_fault_t ret = 0;
1391 
1392 	folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
1393 	if (unlikely(!folio))
1394 		return VM_FAULT_FALLBACK;
1395 
1396 	pgtable = pte_alloc_one(vma->vm_mm);
1397 	if (unlikely(!pgtable)) {
1398 		ret = VM_FAULT_OOM;
1399 		goto release;
1400 	}
1401 
1402 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1403 	if (unlikely(!pmd_none(*vmf->pmd))) {
1404 		goto unlock_release;
1405 	} else {
1406 		ret = check_stable_address_space(vma->vm_mm);
1407 		if (ret)
1408 			goto unlock_release;
1409 
1410 		/* Deliver the page fault to userland */
1411 		if (userfaultfd_missing(vma)) {
1412 			spin_unlock(vmf->ptl);
1413 			folio_put(folio);
1414 			pte_free(vma->vm_mm, pgtable);
1415 			ret = handle_userfault(vmf, VM_UFFD_MISSING);
1416 			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1417 			return ret;
1418 		}
1419 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1420 		map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
1421 		mm_inc_nr_ptes(vma->vm_mm);
1422 		spin_unlock(vmf->ptl);
1423 	}
1424 
1425 	return 0;
1426 unlock_release:
1427 	spin_unlock(vmf->ptl);
1428 release:
1429 	if (pgtable)
1430 		pte_free(vma->vm_mm, pgtable);
1431 	folio_put(folio);
1432 	return ret;
1433 
1434 }
1435 
do_huge_pmd_device_private(struct vm_fault * vmf)1436 vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
1437 {
1438 	struct vm_area_struct *vma = vmf->vma;
1439 	vm_fault_t ret = 0;
1440 	spinlock_t *ptl;
1441 	softleaf_t entry;
1442 	struct page *page;
1443 	struct folio *folio;
1444 
1445 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
1446 		vma_end_read(vma);
1447 		return VM_FAULT_RETRY;
1448 	}
1449 
1450 	ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1451 	if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) {
1452 		spin_unlock(ptl);
1453 		return 0;
1454 	}
1455 
1456 	entry = softleaf_from_pmd(vmf->orig_pmd);
1457 	page = softleaf_to_page(entry);
1458 	folio = page_folio(page);
1459 	vmf->page = page;
1460 	vmf->pte = NULL;
1461 	if (folio_trylock(folio)) {
1462 		folio_get(folio);
1463 		spin_unlock(ptl);
1464 		ret = page_pgmap(page)->ops->migrate_to_ram(vmf);
1465 		folio_unlock(folio);
1466 		folio_put(folio);
1467 	} else {
1468 		spin_unlock(ptl);
1469 	}
1470 
1471 	return ret;
1472 }
1473 
1474 /*
1475  * always: directly stall for all thp allocations
1476  * defer: wake kswapd and fail if not immediately available
1477  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
1478  *		  fail if not immediately available
1479  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
1480  *	    available
1481  * never: never stall for any thp allocation
1482  */
vma_thp_gfp_mask(struct vm_area_struct * vma)1483 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
1484 {
1485 	const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
1486 
1487 	/* Always do synchronous compaction */
1488 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
1489 		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
1490 
1491 	/* Kick kcompactd and fail quickly */
1492 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
1493 		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
1494 
1495 	/* Synchronous compaction if madvised, otherwise kick kcompactd */
1496 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
1497 		return GFP_TRANSHUGE_LIGHT |
1498 			(vma_madvised ? __GFP_DIRECT_RECLAIM :
1499 					__GFP_KSWAPD_RECLAIM);
1500 
1501 	/* Only do synchronous compaction if madvised */
1502 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
1503 		return GFP_TRANSHUGE_LIGHT |
1504 		       (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
1505 
1506 	return GFP_TRANSHUGE_LIGHT;
1507 }
1508 
1509 /* Caller must hold page table lock. */
set_huge_zero_folio(pgtable_t pgtable,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd,struct folio * zero_folio)1510 static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
1511 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
1512 		struct folio *zero_folio)
1513 {
1514 	pmd_t entry;
1515 	entry = folio_mk_pmd(zero_folio, vma->vm_page_prot);
1516 	entry = pmd_mkspecial(entry);
1517 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1518 	set_pmd_at(mm, haddr, pmd, entry);
1519 	mm_inc_nr_ptes(mm);
1520 }
1521 
do_huge_pmd_anonymous_page(struct vm_fault * vmf)1522 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1523 {
1524 	struct vm_area_struct *vma = vmf->vma;
1525 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1526 	vm_fault_t ret;
1527 
1528 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
1529 		return VM_FAULT_FALLBACK;
1530 	ret = vmf_anon_prepare(vmf);
1531 	if (ret)
1532 		return ret;
1533 	khugepaged_enter_vma(vma, vma->vm_flags);
1534 
1535 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1536 			!mm_forbids_zeropage(vma->vm_mm) &&
1537 			transparent_hugepage_use_zero_page()) {
1538 		pgtable_t pgtable;
1539 		struct folio *zero_folio;
1540 		vm_fault_t ret;
1541 
1542 		pgtable = pte_alloc_one(vma->vm_mm);
1543 		if (unlikely(!pgtable))
1544 			return VM_FAULT_OOM;
1545 		zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
1546 		if (unlikely(!zero_folio)) {
1547 			pte_free(vma->vm_mm, pgtable);
1548 			count_vm_event(THP_FAULT_FALLBACK);
1549 			return VM_FAULT_FALLBACK;
1550 		}
1551 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1552 		ret = 0;
1553 		if (pmd_none(*vmf->pmd)) {
1554 			ret = check_stable_address_space(vma->vm_mm);
1555 			if (ret) {
1556 				spin_unlock(vmf->ptl);
1557 				pte_free(vma->vm_mm, pgtable);
1558 			} else if (userfaultfd_missing(vma)) {
1559 				spin_unlock(vmf->ptl);
1560 				pte_free(vma->vm_mm, pgtable);
1561 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
1562 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1563 			} else {
1564 				set_huge_zero_folio(pgtable, vma->vm_mm, vma,
1565 						   haddr, vmf->pmd, zero_folio);
1566 				update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1567 				spin_unlock(vmf->ptl);
1568 			}
1569 		} else {
1570 			spin_unlock(vmf->ptl);
1571 			pte_free(vma->vm_mm, pgtable);
1572 		}
1573 		return ret;
1574 	}
1575 
1576 	return __do_huge_pmd_anonymous_page(vmf);
1577 }
1578 
1579 struct folio_or_pfn {
1580 	union {
1581 		struct folio *folio;
1582 		unsigned long pfn;
1583 	};
1584 	bool is_folio;
1585 };
1586 
insert_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,struct folio_or_pfn fop,pgprot_t prot,bool write)1587 static vm_fault_t insert_pmd(struct vm_area_struct *vma, unsigned long addr,
1588 		pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot,
1589 		bool write)
1590 {
1591 	struct mm_struct *mm = vma->vm_mm;
1592 	pgtable_t pgtable = NULL;
1593 	spinlock_t *ptl;
1594 	pmd_t entry;
1595 
1596 	if (addr < vma->vm_start || addr >= vma->vm_end)
1597 		return VM_FAULT_SIGBUS;
1598 
1599 	if (arch_needs_pgtable_deposit()) {
1600 		pgtable = pte_alloc_one(vma->vm_mm);
1601 		if (!pgtable)
1602 			return VM_FAULT_OOM;
1603 	}
1604 
1605 	ptl = pmd_lock(mm, pmd);
1606 	if (!pmd_none(*pmd)) {
1607 		const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
1608 					  fop.pfn;
1609 
1610 		if (write) {
1611 			if (pmd_pfn(*pmd) != pfn) {
1612 				WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1613 				goto out_unlock;
1614 			}
1615 			entry = pmd_mkyoung(*pmd);
1616 			entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1617 			if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1618 				update_mmu_cache_pmd(vma, addr, pmd);
1619 		}
1620 		goto out_unlock;
1621 	}
1622 
1623 	if (fop.is_folio) {
1624 		entry = folio_mk_pmd(fop.folio, vma->vm_page_prot);
1625 
1626 		if (is_huge_zero_folio(fop.folio)) {
1627 			entry = pmd_mkspecial(entry);
1628 		} else {
1629 			folio_get(fop.folio);
1630 			folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma);
1631 			add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
1632 		}
1633 	} else {
1634 		entry = pmd_mkhuge(pfn_pmd(fop.pfn, prot));
1635 		entry = pmd_mkspecial(entry);
1636 	}
1637 	if (write) {
1638 		entry = pmd_mkyoung(pmd_mkdirty(entry));
1639 		entry = maybe_pmd_mkwrite(entry, vma);
1640 	}
1641 
1642 	if (pgtable) {
1643 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
1644 		mm_inc_nr_ptes(mm);
1645 		pgtable = NULL;
1646 	}
1647 
1648 	set_pmd_at(mm, addr, pmd, entry);
1649 	update_mmu_cache_pmd(vma, addr, pmd);
1650 
1651 out_unlock:
1652 	spin_unlock(ptl);
1653 	if (pgtable)
1654 		pte_free(mm, pgtable);
1655 	return VM_FAULT_NOPAGE;
1656 }
1657 
1658 /**
1659  * vmf_insert_pfn_pmd - insert a pmd size pfn
1660  * @vmf: Structure describing the fault
1661  * @pfn: pfn to insert
1662  * @write: whether it's a write fault
1663  *
1664  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1665  *
1666  * Return: vm_fault_t value.
1667  */
vmf_insert_pfn_pmd(struct vm_fault * vmf,unsigned long pfn,bool write)1668 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
1669 			      bool write)
1670 {
1671 	unsigned long addr = vmf->address & PMD_MASK;
1672 	struct vm_area_struct *vma = vmf->vma;
1673 	pgprot_t pgprot = vma->vm_page_prot;
1674 	struct folio_or_pfn fop = {
1675 		.pfn = pfn,
1676 	};
1677 
1678 	/*
1679 	 * If we had pmd_special, we could avoid all these restrictions,
1680 	 * but we need to be consistent with PTEs and architectures that
1681 	 * can't support a 'special' bit.
1682 	 */
1683 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1684 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1685 						(VM_PFNMAP|VM_MIXEDMAP));
1686 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1687 
1688 	pfnmap_setup_cachemode_pfn(pfn, &pgprot);
1689 
1690 	return insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write);
1691 }
1692 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1693 
vmf_insert_folio_pmd(struct vm_fault * vmf,struct folio * folio,bool write)1694 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
1695 				bool write)
1696 {
1697 	struct vm_area_struct *vma = vmf->vma;
1698 	unsigned long addr = vmf->address & PMD_MASK;
1699 	struct folio_or_pfn fop = {
1700 		.folio = folio,
1701 		.is_folio = true,
1702 	};
1703 
1704 	if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER))
1705 		return VM_FAULT_SIGBUS;
1706 
1707 	return insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, write);
1708 }
1709 EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd);
1710 
1711 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
maybe_pud_mkwrite(pud_t pud,struct vm_area_struct * vma)1712 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1713 {
1714 	if (likely(vma->vm_flags & VM_WRITE))
1715 		pud = pud_mkwrite(pud);
1716 	return pud;
1717 }
1718 
insert_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,struct folio_or_pfn fop,pgprot_t prot,bool write)1719 static vm_fault_t insert_pud(struct vm_area_struct *vma, unsigned long addr,
1720 		pud_t *pud, struct folio_or_pfn fop, pgprot_t prot, bool write)
1721 {
1722 	struct mm_struct *mm = vma->vm_mm;
1723 	spinlock_t *ptl;
1724 	pud_t entry;
1725 
1726 	if (addr < vma->vm_start || addr >= vma->vm_end)
1727 		return VM_FAULT_SIGBUS;
1728 
1729 	ptl = pud_lock(mm, pud);
1730 	if (!pud_none(*pud)) {
1731 		const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
1732 					  fop.pfn;
1733 
1734 		if (write) {
1735 			if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
1736 				goto out_unlock;
1737 			entry = pud_mkyoung(*pud);
1738 			entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1739 			if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1740 				update_mmu_cache_pud(vma, addr, pud);
1741 		}
1742 		goto out_unlock;
1743 	}
1744 
1745 	if (fop.is_folio) {
1746 		entry = folio_mk_pud(fop.folio, vma->vm_page_prot);
1747 
1748 		folio_get(fop.folio);
1749 		folio_add_file_rmap_pud(fop.folio, &fop.folio->page, vma);
1750 		add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR);
1751 	} else {
1752 		entry = pud_mkhuge(pfn_pud(fop.pfn, prot));
1753 		entry = pud_mkspecial(entry);
1754 	}
1755 	if (write) {
1756 		entry = pud_mkyoung(pud_mkdirty(entry));
1757 		entry = maybe_pud_mkwrite(entry, vma);
1758 	}
1759 	set_pud_at(mm, addr, pud, entry);
1760 	update_mmu_cache_pud(vma, addr, pud);
1761 out_unlock:
1762 	spin_unlock(ptl);
1763 	return VM_FAULT_NOPAGE;
1764 }
1765 
1766 /**
1767  * vmf_insert_pfn_pud - insert a pud size pfn
1768  * @vmf: Structure describing the fault
1769  * @pfn: pfn to insert
1770  * @write: whether it's a write fault
1771  *
1772  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1773  *
1774  * Return: vm_fault_t value.
1775  */
vmf_insert_pfn_pud(struct vm_fault * vmf,unsigned long pfn,bool write)1776 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
1777 			      bool write)
1778 {
1779 	unsigned long addr = vmf->address & PUD_MASK;
1780 	struct vm_area_struct *vma = vmf->vma;
1781 	pgprot_t pgprot = vma->vm_page_prot;
1782 	struct folio_or_pfn fop = {
1783 		.pfn = pfn,
1784 	};
1785 
1786 	/*
1787 	 * If we had pud_special, we could avoid all these restrictions,
1788 	 * but we need to be consistent with PTEs and architectures that
1789 	 * can't support a 'special' bit.
1790 	 */
1791 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1792 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1793 						(VM_PFNMAP|VM_MIXEDMAP));
1794 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1795 
1796 	pfnmap_setup_cachemode_pfn(pfn, &pgprot);
1797 
1798 	return insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
1799 }
1800 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1801 
1802 /**
1803  * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry
1804  * @vmf: Structure describing the fault
1805  * @folio: folio to insert
1806  * @write: whether it's a write fault
1807  *
1808  * Return: vm_fault_t value.
1809  */
vmf_insert_folio_pud(struct vm_fault * vmf,struct folio * folio,bool write)1810 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
1811 				bool write)
1812 {
1813 	struct vm_area_struct *vma = vmf->vma;
1814 	unsigned long addr = vmf->address & PUD_MASK;
1815 	struct folio_or_pfn fop = {
1816 		.folio = folio,
1817 		.is_folio = true,
1818 	};
1819 
1820 	if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER))
1821 		return VM_FAULT_SIGBUS;
1822 
1823 	return insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write);
1824 }
1825 EXPORT_SYMBOL_GPL(vmf_insert_folio_pud);
1826 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1827 
1828 /**
1829  * touch_pmd - Mark page table pmd entry as accessed and dirty (for write)
1830  * @vma: The VMA covering @addr
1831  * @addr: The virtual address
1832  * @pmd: pmd pointer into the page table mapping @addr
1833  * @write: Whether it's a write access
1834  *
1835  * Return: whether the pmd entry is changed
1836  */
touch_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,bool write)1837 bool touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1838 	       pmd_t *pmd, bool write)
1839 {
1840 	pmd_t entry;
1841 
1842 	entry = pmd_mkyoung(*pmd);
1843 	if (write)
1844 		entry = pmd_mkdirty(entry);
1845 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1846 				  pmd, entry, write)) {
1847 		update_mmu_cache_pmd(vma, addr, pmd);
1848 		return true;
1849 	}
1850 
1851 	return false;
1852 }
1853 
copy_huge_non_present_pmd(struct mm_struct * dst_mm,struct mm_struct * src_mm,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pmd_t pmd,pgtable_t pgtable)1854 static void copy_huge_non_present_pmd(
1855 		struct mm_struct *dst_mm, struct mm_struct *src_mm,
1856 		pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1857 		struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1858 		pmd_t pmd, pgtable_t pgtable)
1859 {
1860 	softleaf_t entry = softleaf_from_pmd(pmd);
1861 	struct folio *src_folio;
1862 
1863 	VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(pmd));
1864 
1865 	if (softleaf_is_migration_write(entry) ||
1866 	    softleaf_is_migration_read_exclusive(entry)) {
1867 		entry = make_readable_migration_entry(swp_offset(entry));
1868 		pmd = swp_entry_to_pmd(entry);
1869 		if (pmd_swp_soft_dirty(*src_pmd))
1870 			pmd = pmd_swp_mksoft_dirty(pmd);
1871 		if (pmd_swp_uffd_wp(*src_pmd))
1872 			pmd = pmd_swp_mkuffd_wp(pmd);
1873 		set_pmd_at(src_mm, addr, src_pmd, pmd);
1874 	} else if (softleaf_is_device_private(entry)) {
1875 		/*
1876 		 * For device private entries, since there are no
1877 		 * read exclusive entries, writable = !readable
1878 		 */
1879 		if (softleaf_is_device_private_write(entry)) {
1880 			entry = make_readable_device_private_entry(swp_offset(entry));
1881 			pmd = swp_entry_to_pmd(entry);
1882 
1883 			if (pmd_swp_soft_dirty(*src_pmd))
1884 				pmd = pmd_swp_mksoft_dirty(pmd);
1885 			if (pmd_swp_uffd_wp(*src_pmd))
1886 				pmd = pmd_swp_mkuffd_wp(pmd);
1887 			set_pmd_at(src_mm, addr, src_pmd, pmd);
1888 		}
1889 
1890 		src_folio = softleaf_to_folio(entry);
1891 		VM_WARN_ON(!folio_test_large(src_folio));
1892 
1893 		folio_get(src_folio);
1894 		/*
1895 		 * folio_try_dup_anon_rmap_pmd does not fail for
1896 		 * device private entries.
1897 		 */
1898 		folio_try_dup_anon_rmap_pmd(src_folio, &src_folio->page,
1899 					    dst_vma, src_vma);
1900 	}
1901 
1902 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1903 	mm_inc_nr_ptes(dst_mm);
1904 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1905 	if (!userfaultfd_wp(dst_vma))
1906 		pmd = pmd_swp_clear_uffd_wp(pmd);
1907 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1908 }
1909 
copy_huge_pmd(struct mm_struct * dst_mm,struct mm_struct * src_mm,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1910 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1911 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1912 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1913 {
1914 	spinlock_t *dst_ptl, *src_ptl;
1915 	struct page *src_page;
1916 	struct folio *src_folio;
1917 	pmd_t pmd;
1918 	pgtable_t pgtable = NULL;
1919 	int ret = -ENOMEM;
1920 
1921 	pmd = pmdp_get_lockless(src_pmd);
1922 	if (unlikely(pmd_present(pmd) && pmd_special(pmd) &&
1923 		     !is_huge_zero_pmd(pmd))) {
1924 		dst_ptl = pmd_lock(dst_mm, dst_pmd);
1925 		src_ptl = pmd_lockptr(src_mm, src_pmd);
1926 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1927 		/*
1928 		 * No need to recheck the pmd, it can't change with write
1929 		 * mmap lock held here.
1930 		 *
1931 		 * Meanwhile, making sure it's not a CoW VMA with writable
1932 		 * mapping, otherwise it means either the anon page wrongly
1933 		 * applied special bit, or we made the PRIVATE mapping be
1934 		 * able to wrongly write to the backend MMIO.
1935 		 */
1936 		VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
1937 		goto set_pmd;
1938 	}
1939 
1940 	/* Skip if can be re-fill on fault */
1941 	if (!vma_is_anonymous(dst_vma))
1942 		return 0;
1943 
1944 	pgtable = pte_alloc_one(dst_mm);
1945 	if (unlikely(!pgtable))
1946 		goto out;
1947 
1948 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
1949 	src_ptl = pmd_lockptr(src_mm, src_pmd);
1950 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1951 
1952 	ret = -EAGAIN;
1953 	pmd = *src_pmd;
1954 
1955 	if (unlikely(thp_migration_supported() &&
1956 		     pmd_is_valid_softleaf(pmd))) {
1957 		copy_huge_non_present_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr,
1958 					  dst_vma, src_vma, pmd, pgtable);
1959 		ret = 0;
1960 		goto out_unlock;
1961 	}
1962 
1963 	if (unlikely(!pmd_trans_huge(pmd))) {
1964 		pte_free(dst_mm, pgtable);
1965 		goto out_unlock;
1966 	}
1967 	/*
1968 	 * When page table lock is held, the huge zero pmd should not be
1969 	 * under splitting since we don't split the page itself, only pmd to
1970 	 * a page table.
1971 	 */
1972 	if (is_huge_zero_pmd(pmd)) {
1973 		/*
1974 		 * mm_get_huge_zero_folio() will never allocate a new
1975 		 * folio here, since we already have a zero page to
1976 		 * copy. It just takes a reference.
1977 		 */
1978 		mm_get_huge_zero_folio(dst_mm);
1979 		goto out_zero_page;
1980 	}
1981 
1982 	src_page = pmd_page(pmd);
1983 	VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1984 	src_folio = page_folio(src_page);
1985 
1986 	folio_get(src_folio);
1987 	if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) {
1988 		/* Page maybe pinned: split and retry the fault on PTEs. */
1989 		folio_put(src_folio);
1990 		pte_free(dst_mm, pgtable);
1991 		spin_unlock(src_ptl);
1992 		spin_unlock(dst_ptl);
1993 		__split_huge_pmd(src_vma, src_pmd, addr, false);
1994 		return -EAGAIN;
1995 	}
1996 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1997 out_zero_page:
1998 	mm_inc_nr_ptes(dst_mm);
1999 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
2000 	pmdp_set_wrprotect(src_mm, addr, src_pmd);
2001 	if (!userfaultfd_wp(dst_vma))
2002 		pmd = pmd_clear_uffd_wp(pmd);
2003 	pmd = pmd_wrprotect(pmd);
2004 set_pmd:
2005 	pmd = pmd_mkold(pmd);
2006 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
2007 
2008 	ret = 0;
2009 out_unlock:
2010 	spin_unlock(src_ptl);
2011 	spin_unlock(dst_ptl);
2012 out:
2013 	return ret;
2014 }
2015 
2016 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
touch_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,bool write)2017 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
2018 	       pud_t *pud, bool write)
2019 {
2020 	pud_t _pud;
2021 
2022 	_pud = pud_mkyoung(*pud);
2023 	if (write)
2024 		_pud = pud_mkdirty(_pud);
2025 	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
2026 				  pud, _pud, write))
2027 		update_mmu_cache_pud(vma, addr, pud);
2028 }
2029 
copy_huge_pud(struct mm_struct * dst_mm,struct mm_struct * src_mm,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,struct vm_area_struct * vma)2030 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
2031 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
2032 		  struct vm_area_struct *vma)
2033 {
2034 	spinlock_t *dst_ptl, *src_ptl;
2035 	pud_t pud;
2036 	int ret;
2037 
2038 	dst_ptl = pud_lock(dst_mm, dst_pud);
2039 	src_ptl = pud_lockptr(src_mm, src_pud);
2040 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2041 
2042 	ret = -EAGAIN;
2043 	pud = *src_pud;
2044 	if (unlikely(!pud_trans_huge(pud)))
2045 		goto out_unlock;
2046 
2047 	/*
2048 	 * TODO: once we support anonymous pages, use
2049 	 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
2050 	 */
2051 	if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
2052 		pudp_set_wrprotect(src_mm, addr, src_pud);
2053 		pud = pud_wrprotect(pud);
2054 	}
2055 	pud = pud_mkold(pud);
2056 	set_pud_at(dst_mm, addr, dst_pud, pud);
2057 
2058 	ret = 0;
2059 out_unlock:
2060 	spin_unlock(src_ptl);
2061 	spin_unlock(dst_ptl);
2062 	return ret;
2063 }
2064 
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)2065 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
2066 {
2067 	bool write = vmf->flags & FAULT_FLAG_WRITE;
2068 
2069 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
2070 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
2071 		goto unlock;
2072 
2073 	touch_pud(vmf->vma, vmf->address, vmf->pud, write);
2074 unlock:
2075 	spin_unlock(vmf->ptl);
2076 }
2077 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2078 
huge_pmd_set_accessed(struct vm_fault * vmf)2079 bool huge_pmd_set_accessed(struct vm_fault *vmf)
2080 {
2081 	bool write = vmf->flags & FAULT_FLAG_WRITE;
2082 
2083 	if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
2084 		return false;
2085 
2086 	return touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
2087 }
2088 
do_huge_zero_wp_pmd(struct vm_fault * vmf)2089 static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
2090 {
2091 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2092 	struct vm_area_struct *vma = vmf->vma;
2093 	struct mmu_notifier_range range;
2094 	struct folio *folio;
2095 	vm_fault_t ret = 0;
2096 
2097 	folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
2098 	if (unlikely(!folio))
2099 		return VM_FAULT_FALLBACK;
2100 
2101 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr,
2102 				haddr + HPAGE_PMD_SIZE);
2103 	mmu_notifier_invalidate_range_start(&range);
2104 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2105 	if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd)))
2106 		goto release;
2107 	ret = check_stable_address_space(vma->vm_mm);
2108 	if (ret)
2109 		goto release;
2110 	(void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
2111 	map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
2112 	goto unlock;
2113 release:
2114 	folio_put(folio);
2115 unlock:
2116 	spin_unlock(vmf->ptl);
2117 	mmu_notifier_invalidate_range_end(&range);
2118 	return ret;
2119 }
2120 
do_huge_pmd_wp_page(struct vm_fault * vmf)2121 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
2122 {
2123 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
2124 	struct vm_area_struct *vma = vmf->vma;
2125 	struct folio *folio;
2126 	struct page *page;
2127 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2128 	pmd_t orig_pmd = vmf->orig_pmd;
2129 
2130 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
2131 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
2132 
2133 	if (is_huge_zero_pmd(orig_pmd)) {
2134 		vm_fault_t ret = do_huge_zero_wp_pmd(vmf);
2135 
2136 		if (!(ret & VM_FAULT_FALLBACK))
2137 			return ret;
2138 
2139 		/* Fallback to splitting PMD if THP cannot be allocated */
2140 		goto fallback;
2141 	}
2142 
2143 	spin_lock(vmf->ptl);
2144 
2145 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
2146 		spin_unlock(vmf->ptl);
2147 		return 0;
2148 	}
2149 
2150 	page = pmd_page(orig_pmd);
2151 	folio = page_folio(page);
2152 	VM_BUG_ON_PAGE(!PageHead(page), page);
2153 
2154 	/* Early check when only holding the PT lock. */
2155 	if (PageAnonExclusive(page))
2156 		goto reuse;
2157 
2158 	if (!folio_trylock(folio)) {
2159 		folio_get(folio);
2160 		spin_unlock(vmf->ptl);
2161 		folio_lock(folio);
2162 		spin_lock(vmf->ptl);
2163 		if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
2164 			spin_unlock(vmf->ptl);
2165 			folio_unlock(folio);
2166 			folio_put(folio);
2167 			return 0;
2168 		}
2169 		folio_put(folio);
2170 	}
2171 
2172 	/* Recheck after temporarily dropping the PT lock. */
2173 	if (PageAnonExclusive(page)) {
2174 		folio_unlock(folio);
2175 		goto reuse;
2176 	}
2177 
2178 	/*
2179 	 * See do_wp_page(): we can only reuse the folio exclusively if
2180 	 * there are no additional references. Note that we always drain
2181 	 * the LRU cache immediately after adding a THP.
2182 	 */
2183 	if (folio_ref_count(folio) >
2184 			1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
2185 		goto unlock_fallback;
2186 	if (folio_test_swapcache(folio))
2187 		folio_free_swap(folio);
2188 	if (folio_ref_count(folio) == 1) {
2189 		pmd_t entry;
2190 
2191 		folio_move_anon_rmap(folio, vma);
2192 		SetPageAnonExclusive(page);
2193 		folio_unlock(folio);
2194 reuse:
2195 		if (unlikely(unshare)) {
2196 			spin_unlock(vmf->ptl);
2197 			return 0;
2198 		}
2199 		entry = pmd_mkyoung(orig_pmd);
2200 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2201 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
2202 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
2203 		spin_unlock(vmf->ptl);
2204 		return 0;
2205 	}
2206 
2207 unlock_fallback:
2208 	folio_unlock(folio);
2209 	spin_unlock(vmf->ptl);
2210 fallback:
2211 	__split_huge_pmd(vma, vmf->pmd, vmf->address, false);
2212 	return VM_FAULT_FALLBACK;
2213 }
2214 
can_change_pmd_writable(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)2215 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
2216 					   unsigned long addr, pmd_t pmd)
2217 {
2218 	struct page *page;
2219 
2220 	if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
2221 		return false;
2222 
2223 	/* Don't touch entries that are not even readable (NUMA hinting). */
2224 	if (pmd_protnone(pmd))
2225 		return false;
2226 
2227 	/* Do we need write faults for softdirty tracking? */
2228 	if (pmd_needs_soft_dirty_wp(vma, pmd))
2229 		return false;
2230 
2231 	/* Do we need write faults for uffd-wp tracking? */
2232 	if (userfaultfd_huge_pmd_wp(vma, pmd))
2233 		return false;
2234 
2235 	if (!(vma->vm_flags & VM_SHARED)) {
2236 		/* See can_change_pte_writable(). */
2237 		page = vm_normal_page_pmd(vma, addr, pmd);
2238 		return page && PageAnon(page) && PageAnonExclusive(page);
2239 	}
2240 
2241 	/* See can_change_pte_writable(). */
2242 	return pmd_dirty(pmd);
2243 }
2244 
2245 /* NUMA hinting page fault entry point for trans huge pmds */
do_huge_pmd_numa_page(struct vm_fault * vmf)2246 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
2247 {
2248 	struct vm_area_struct *vma = vmf->vma;
2249 	struct folio *folio;
2250 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2251 	int nid = NUMA_NO_NODE;
2252 	int target_nid, last_cpupid;
2253 	pmd_t pmd, old_pmd;
2254 	bool writable = false;
2255 	int flags = 0;
2256 
2257 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2258 	old_pmd = pmdp_get(vmf->pmd);
2259 
2260 	if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
2261 		spin_unlock(vmf->ptl);
2262 		return 0;
2263 	}
2264 
2265 	pmd = pmd_modify(old_pmd, vma->vm_page_prot);
2266 
2267 	/*
2268 	 * Detect now whether the PMD could be writable; this information
2269 	 * is only valid while holding the PT lock.
2270 	 */
2271 	writable = pmd_write(pmd);
2272 	if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
2273 	    can_change_pmd_writable(vma, vmf->address, pmd))
2274 		writable = true;
2275 
2276 	folio = vm_normal_folio_pmd(vma, haddr, pmd);
2277 	if (!folio)
2278 		goto out_map;
2279 
2280 	nid = folio_nid(folio);
2281 
2282 	target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
2283 					&last_cpupid);
2284 	if (target_nid == NUMA_NO_NODE)
2285 		goto out_map;
2286 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
2287 		flags |= TNF_MIGRATE_FAIL;
2288 		goto out_map;
2289 	}
2290 	/* The folio is isolated and isolation code holds a folio reference. */
2291 	spin_unlock(vmf->ptl);
2292 	writable = false;
2293 
2294 	if (!migrate_misplaced_folio(folio, target_nid)) {
2295 		flags |= TNF_MIGRATED;
2296 		nid = target_nid;
2297 		task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
2298 		return 0;
2299 	}
2300 
2301 	flags |= TNF_MIGRATE_FAIL;
2302 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2303 	if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
2304 		spin_unlock(vmf->ptl);
2305 		return 0;
2306 	}
2307 out_map:
2308 	/* Restore the PMD */
2309 	pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
2310 	pmd = pmd_mkyoung(pmd);
2311 	if (writable)
2312 		pmd = pmd_mkwrite(pmd, vma);
2313 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
2314 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
2315 	spin_unlock(vmf->ptl);
2316 
2317 	if (nid != NUMA_NO_NODE)
2318 		task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
2319 	return 0;
2320 }
2321 
2322 /*
2323  * Return true if we do MADV_FREE successfully on entire pmd page.
2324  * Otherwise, return false.
2325  */
madvise_free_huge_pmd(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long next)2326 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2327 		pmd_t *pmd, unsigned long addr, unsigned long next)
2328 {
2329 	spinlock_t *ptl;
2330 	pmd_t orig_pmd;
2331 	struct folio *folio;
2332 	struct mm_struct *mm = tlb->mm;
2333 	bool ret = false;
2334 
2335 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2336 
2337 	ptl = pmd_trans_huge_lock(pmd, vma);
2338 	if (!ptl)
2339 		goto out_unlocked;
2340 
2341 	orig_pmd = *pmd;
2342 	if (is_huge_zero_pmd(orig_pmd))
2343 		goto out;
2344 
2345 	if (unlikely(!pmd_present(orig_pmd))) {
2346 		VM_BUG_ON(thp_migration_supported() &&
2347 				  !pmd_is_migration_entry(orig_pmd));
2348 		goto out;
2349 	}
2350 
2351 	folio = pmd_folio(orig_pmd);
2352 	/*
2353 	 * If other processes are mapping this folio, we couldn't discard
2354 	 * the folio unless they all do MADV_FREE so let's skip the folio.
2355 	 */
2356 	if (folio_maybe_mapped_shared(folio))
2357 		goto out;
2358 
2359 	if (!folio_trylock(folio))
2360 		goto out;
2361 
2362 	/*
2363 	 * If user want to discard part-pages of THP, split it so MADV_FREE
2364 	 * will deactivate only them.
2365 	 */
2366 	if (next - addr != HPAGE_PMD_SIZE) {
2367 		folio_get(folio);
2368 		spin_unlock(ptl);
2369 		split_folio(folio);
2370 		folio_unlock(folio);
2371 		folio_put(folio);
2372 		goto out_unlocked;
2373 	}
2374 
2375 	if (folio_test_dirty(folio))
2376 		folio_clear_dirty(folio);
2377 	folio_unlock(folio);
2378 
2379 	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
2380 		pmdp_invalidate(vma, addr, pmd);
2381 		orig_pmd = pmd_mkold(orig_pmd);
2382 		orig_pmd = pmd_mkclean(orig_pmd);
2383 
2384 		set_pmd_at(mm, addr, pmd, orig_pmd);
2385 		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2386 	}
2387 
2388 	folio_mark_lazyfree(folio);
2389 	ret = true;
2390 out:
2391 	spin_unlock(ptl);
2392 out_unlocked:
2393 	return ret;
2394 }
2395 
zap_deposited_table(struct mm_struct * mm,pmd_t * pmd)2396 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
2397 {
2398 	pgtable_t pgtable;
2399 
2400 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2401 	pte_free(mm, pgtable);
2402 	mm_dec_nr_ptes(mm);
2403 }
2404 
zap_huge_pmd_folio(struct mm_struct * mm,struct vm_area_struct * vma,pmd_t pmdval,struct folio * folio,bool is_present)2405 static void zap_huge_pmd_folio(struct mm_struct *mm, struct vm_area_struct *vma,
2406 		pmd_t pmdval, struct folio *folio, bool is_present)
2407 {
2408 	const bool is_device_private = folio_is_device_private(folio);
2409 
2410 	/* Present and device private folios are rmappable. */
2411 	if (is_present || is_device_private)
2412 		folio_remove_rmap_pmd(folio, &folio->page, vma);
2413 
2414 	if (folio_test_anon(folio)) {
2415 		add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
2416 	} else {
2417 		add_mm_counter(mm, mm_counter_file(folio),
2418 			       -HPAGE_PMD_NR);
2419 
2420 		if (is_present && pmd_young(pmdval) &&
2421 		    likely(vma_has_recency(vma)))
2422 			folio_mark_accessed(folio);
2423 	}
2424 
2425 	/* Device private folios are pinned. */
2426 	if (is_device_private)
2427 		folio_put(folio);
2428 }
2429 
normal_or_softleaf_folio_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmdval,bool is_present)2430 static struct folio *normal_or_softleaf_folio_pmd(struct vm_area_struct *vma,
2431 		unsigned long addr, pmd_t pmdval, bool is_present)
2432 {
2433 	if (is_present)
2434 		return vm_normal_folio_pmd(vma, addr, pmdval);
2435 
2436 	if (!thp_migration_supported())
2437 		WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
2438 	return pmd_to_softleaf_folio(pmdval);
2439 }
2440 
has_deposited_pgtable(struct vm_area_struct * vma,pmd_t pmdval,struct folio * folio)2441 static bool has_deposited_pgtable(struct vm_area_struct *vma, pmd_t pmdval,
2442 		struct folio *folio)
2443 {
2444 	/* Some architectures require unconditional depositing. */
2445 	if (arch_needs_pgtable_deposit())
2446 		return true;
2447 
2448 	/*
2449 	 * Huge zero always deposited except for DAX which handles itself, see
2450 	 * set_huge_zero_folio().
2451 	 */
2452 	if (is_huge_zero_pmd(pmdval))
2453 		return !vma_is_dax(vma);
2454 
2455 	/*
2456 	 * Otherwise, only anonymous folios are deposited, see
2457 	 * __do_huge_pmd_anonymous_page().
2458 	 */
2459 	return folio && folio_test_anon(folio);
2460 }
2461 
2462 /**
2463  * zap_huge_pmd - Zap a huge THP which is of PMD size.
2464  * @tlb: The MMU gather TLB state associated with the operation.
2465  * @vma: The VMA containing the range to zap.
2466  * @pmd: A pointer to the leaf PMD entry.
2467  * @addr: The virtual address for the range to zap.
2468  *
2469  * Returns: %true on success, %false otherwise.
2470  */
zap_huge_pmd(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr)2471 bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2472 		 pmd_t *pmd, unsigned long addr)
2473 {
2474 	struct mm_struct *mm = tlb->mm;
2475 	struct folio *folio = NULL;
2476 	bool is_present = false;
2477 	bool has_deposit;
2478 	spinlock_t *ptl;
2479 	pmd_t orig_pmd;
2480 
2481 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2482 
2483 	ptl = __pmd_trans_huge_lock(pmd, vma);
2484 	if (!ptl)
2485 		return false;
2486 	/*
2487 	 * For architectures like ppc64 we look at deposited pgtable
2488 	 * when calling pmdp_huge_get_and_clear. So do the
2489 	 * pgtable_trans_huge_withdraw after finishing pmdp related
2490 	 * operations.
2491 	 */
2492 	orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
2493 						tlb->fullmm);
2494 	arch_check_zapped_pmd(vma, orig_pmd);
2495 	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2496 
2497 	is_present = pmd_present(orig_pmd);
2498 	folio = normal_or_softleaf_folio_pmd(vma, addr, orig_pmd, is_present);
2499 	has_deposit = has_deposited_pgtable(vma, orig_pmd, folio);
2500 	if (folio)
2501 		zap_huge_pmd_folio(mm, vma, orig_pmd, folio, is_present);
2502 	if (has_deposit)
2503 		zap_deposited_table(mm, pmd);
2504 
2505 	spin_unlock(ptl);
2506 	if (is_present && folio)
2507 		tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
2508 	return true;
2509 }
2510 
2511 #ifndef pmd_move_must_withdraw
pmd_move_must_withdraw(spinlock_t * new_pmd_ptl,spinlock_t * old_pmd_ptl,struct vm_area_struct * vma)2512 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
2513 					 spinlock_t *old_pmd_ptl,
2514 					 struct vm_area_struct *vma)
2515 {
2516 	/*
2517 	 * With split pmd lock we also need to move preallocated
2518 	 * PTE page table if new_pmd is on different PMD page table.
2519 	 *
2520 	 * We also don't deposit and withdraw tables for file pages.
2521 	 */
2522 	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
2523 }
2524 #endif
2525 
move_soft_dirty_pmd(pmd_t pmd)2526 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
2527 {
2528 	if (pgtable_supports_soft_dirty()) {
2529 		if (unlikely(pmd_is_migration_entry(pmd)))
2530 			pmd = pmd_swp_mksoft_dirty(pmd);
2531 		else if (pmd_present(pmd))
2532 			pmd = pmd_mksoft_dirty(pmd);
2533 	}
2534 
2535 	return pmd;
2536 }
2537 
clear_uffd_wp_pmd(pmd_t pmd)2538 static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
2539 {
2540 	if (pmd_none(pmd))
2541 		return pmd;
2542 	if (pmd_present(pmd))
2543 		pmd = pmd_clear_uffd_wp(pmd);
2544 	else
2545 		pmd = pmd_swp_clear_uffd_wp(pmd);
2546 
2547 	return pmd;
2548 }
2549 
move_huge_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)2550 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
2551 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
2552 {
2553 	spinlock_t *old_ptl, *new_ptl;
2554 	pmd_t pmd;
2555 	struct mm_struct *mm = vma->vm_mm;
2556 	bool force_flush = false;
2557 
2558 	/*
2559 	 * The destination pmd shouldn't be established, free_pgtables()
2560 	 * should have released it; but move_page_tables() might have already
2561 	 * inserted a page table, if racing against shmem/file collapse.
2562 	 */
2563 	if (!pmd_none(*new_pmd)) {
2564 		VM_BUG_ON(pmd_trans_huge(*new_pmd));
2565 		return false;
2566 	}
2567 
2568 	/*
2569 	 * We don't have to worry about the ordering of src and dst
2570 	 * ptlocks because exclusive mmap_lock prevents deadlock.
2571 	 */
2572 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
2573 	if (old_ptl) {
2574 		new_ptl = pmd_lockptr(mm, new_pmd);
2575 		if (new_ptl != old_ptl)
2576 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
2577 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
2578 		if (pmd_present(pmd))
2579 			force_flush = true;
2580 		VM_BUG_ON(!pmd_none(*new_pmd));
2581 
2582 		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
2583 			pgtable_t pgtable;
2584 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2585 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
2586 		}
2587 		pmd = move_soft_dirty_pmd(pmd);
2588 		if (vma_has_uffd_without_event_remap(vma))
2589 			pmd = clear_uffd_wp_pmd(pmd);
2590 		set_pmd_at(mm, new_addr, new_pmd, pmd);
2591 		if (force_flush)
2592 			flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
2593 		if (new_ptl != old_ptl)
2594 			spin_unlock(new_ptl);
2595 		spin_unlock(old_ptl);
2596 		return true;
2597 	}
2598 	return false;
2599 }
2600 
change_non_present_huge_pmd(struct mm_struct * mm,unsigned long addr,pmd_t * pmd,bool uffd_wp,bool uffd_wp_resolve)2601 static void change_non_present_huge_pmd(struct mm_struct *mm,
2602 		unsigned long addr, pmd_t *pmd, bool uffd_wp,
2603 		bool uffd_wp_resolve)
2604 {
2605 	softleaf_t entry = softleaf_from_pmd(*pmd);
2606 	const struct folio *folio = softleaf_to_folio(entry);
2607 	pmd_t newpmd;
2608 
2609 	VM_WARN_ON(!pmd_is_valid_softleaf(*pmd));
2610 	if (softleaf_is_migration_write(entry)) {
2611 		/*
2612 		 * A protection check is difficult so
2613 		 * just be safe and disable write
2614 		 */
2615 		if (folio_test_anon(folio))
2616 			entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2617 		else
2618 			entry = make_readable_migration_entry(swp_offset(entry));
2619 		newpmd = swp_entry_to_pmd(entry);
2620 		if (pmd_swp_soft_dirty(*pmd))
2621 			newpmd = pmd_swp_mksoft_dirty(newpmd);
2622 	} else if (softleaf_is_device_private_write(entry)) {
2623 		entry = make_readable_device_private_entry(swp_offset(entry));
2624 		newpmd = swp_entry_to_pmd(entry);
2625 	} else {
2626 		newpmd = *pmd;
2627 	}
2628 
2629 	if (uffd_wp)
2630 		newpmd = pmd_swp_mkuffd_wp(newpmd);
2631 	else if (uffd_wp_resolve)
2632 		newpmd = pmd_swp_clear_uffd_wp(newpmd);
2633 	if (!pmd_same(*pmd, newpmd))
2634 		set_pmd_at(mm, addr, pmd, newpmd);
2635 }
2636 
2637 /*
2638  * Returns
2639  *  - 0 if PMD could not be locked
2640  *  - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2641  *      or if prot_numa but THP migration is not supported
2642  *  - HPAGE_PMD_NR if protections changed and TLB flush necessary
2643  */
change_huge_pmd(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,pgprot_t newprot,unsigned long cp_flags)2644 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2645 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2646 		    unsigned long cp_flags)
2647 {
2648 	struct mm_struct *mm = vma->vm_mm;
2649 	spinlock_t *ptl;
2650 	pmd_t oldpmd, entry;
2651 	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
2652 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2653 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2654 	int ret = 1;
2655 
2656 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2657 
2658 	if (prot_numa && !thp_migration_supported())
2659 		return 1;
2660 
2661 	ptl = __pmd_trans_huge_lock(pmd, vma);
2662 	if (!ptl)
2663 		return 0;
2664 
2665 	if (thp_migration_supported() && pmd_is_valid_softleaf(*pmd)) {
2666 		change_non_present_huge_pmd(mm, addr, pmd, uffd_wp,
2667 					    uffd_wp_resolve);
2668 		goto unlock;
2669 	}
2670 
2671 	if (prot_numa) {
2672 
2673 		/*
2674 		 * Avoid trapping faults against the zero page. The read-only
2675 		 * data is likely to be read-cached on the local CPU and
2676 		 * local/remote hits to the zero page are not interesting.
2677 		 */
2678 		if (is_huge_zero_pmd(*pmd))
2679 			goto unlock;
2680 
2681 		if (pmd_protnone(*pmd))
2682 			goto unlock;
2683 
2684 		if (!folio_can_map_prot_numa(pmd_folio(*pmd), vma,
2685 					     vma_is_single_threaded_private(vma)))
2686 			goto unlock;
2687 	}
2688 	/*
2689 	 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2690 	 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2691 	 * which is also under mmap_read_lock(mm):
2692 	 *
2693 	 *	CPU0:				CPU1:
2694 	 *				change_huge_pmd(prot_numa=1)
2695 	 *				 pmdp_huge_get_and_clear_notify()
2696 	 * madvise_dontneed()
2697 	 *  zap_pmd_range()
2698 	 *   pmd_trans_huge(*pmd) == 0 (without ptl)
2699 	 *   // skip the pmd
2700 	 *				 set_pmd_at();
2701 	 *				 // pmd is re-established
2702 	 *
2703 	 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2704 	 * which may break userspace.
2705 	 *
2706 	 * pmdp_invalidate_ad() is required to make sure we don't miss
2707 	 * dirty/young flags set by hardware.
2708 	 */
2709 	oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2710 
2711 	entry = pmd_modify(oldpmd, newprot);
2712 	if (uffd_wp)
2713 		entry = pmd_mkuffd_wp(entry);
2714 	else if (uffd_wp_resolve)
2715 		/*
2716 		 * Leave the write bit to be handled by PF interrupt
2717 		 * handler, then things like COW could be properly
2718 		 * handled.
2719 		 */
2720 		entry = pmd_clear_uffd_wp(entry);
2721 
2722 	/* See change_pte_range(). */
2723 	if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2724 	    can_change_pmd_writable(vma, addr, entry))
2725 		entry = pmd_mkwrite(entry, vma);
2726 
2727 	ret = HPAGE_PMD_NR;
2728 	set_pmd_at(mm, addr, pmd, entry);
2729 
2730 	if (huge_pmd_needs_flush(oldpmd, entry))
2731 		tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2732 unlock:
2733 	spin_unlock(ptl);
2734 	return ret;
2735 }
2736 
2737 /*
2738  * Returns:
2739  *
2740  * - 0: if pud leaf changed from under us
2741  * - 1: if pud can be skipped
2742  * - HPAGE_PUD_NR: if pud was successfully processed
2743  */
2744 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
change_huge_pud(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pudp,unsigned long addr,pgprot_t newprot,unsigned long cp_flags)2745 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2746 		    pud_t *pudp, unsigned long addr, pgprot_t newprot,
2747 		    unsigned long cp_flags)
2748 {
2749 	struct mm_struct *mm = vma->vm_mm;
2750 	pud_t oldpud, entry;
2751 	spinlock_t *ptl;
2752 
2753 	tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
2754 
2755 	/* NUMA balancing doesn't apply to dax */
2756 	if (cp_flags & MM_CP_PROT_NUMA)
2757 		return 1;
2758 
2759 	/*
2760 	 * Huge entries on userfault-wp only works with anonymous, while we
2761 	 * don't have anonymous PUDs yet.
2762 	 */
2763 	if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
2764 		return 1;
2765 
2766 	ptl = __pud_trans_huge_lock(pudp, vma);
2767 	if (!ptl)
2768 		return 0;
2769 
2770 	/*
2771 	 * Can't clear PUD or it can race with concurrent zapping.  See
2772 	 * change_huge_pmd().
2773 	 */
2774 	oldpud = pudp_invalidate(vma, addr, pudp);
2775 	entry = pud_modify(oldpud, newprot);
2776 	set_pud_at(mm, addr, pudp, entry);
2777 	tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
2778 
2779 	spin_unlock(ptl);
2780 	return HPAGE_PUD_NR;
2781 }
2782 #endif
2783 
2784 #ifdef CONFIG_USERFAULTFD
2785 /*
2786  * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
2787  * the caller, but it must return after releasing the page_table_lock.
2788  * Just move the page from src_pmd to dst_pmd if possible.
2789  * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2790  * repeated by the caller, or other errors in case of failure.
2791  */
move_pages_huge_pmd(struct mm_struct * mm,pmd_t * dst_pmd,pmd_t * src_pmd,pmd_t dst_pmdval,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long dst_addr,unsigned long src_addr)2792 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2793 			struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2794 			unsigned long dst_addr, unsigned long src_addr)
2795 {
2796 	pmd_t _dst_pmd, src_pmdval;
2797 	struct page *src_page;
2798 	struct folio *src_folio;
2799 	spinlock_t *src_ptl, *dst_ptl;
2800 	pgtable_t src_pgtable;
2801 	struct mmu_notifier_range range;
2802 	int err = 0;
2803 
2804 	src_pmdval = *src_pmd;
2805 	src_ptl = pmd_lockptr(mm, src_pmd);
2806 
2807 	lockdep_assert_held(src_ptl);
2808 	vma_assert_locked(src_vma);
2809 	vma_assert_locked(dst_vma);
2810 
2811 	/* Sanity checks before the operation */
2812 	if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2813 	    WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2814 		spin_unlock(src_ptl);
2815 		return -EINVAL;
2816 	}
2817 
2818 	if (!pmd_trans_huge(src_pmdval)) {
2819 		spin_unlock(src_ptl);
2820 		if (pmd_is_migration_entry(src_pmdval)) {
2821 			pmd_migration_entry_wait(mm, &src_pmdval);
2822 			return -EAGAIN;
2823 		}
2824 		return -ENOENT;
2825 	}
2826 
2827 	src_page = pmd_page(src_pmdval);
2828 
2829 	if (!is_huge_zero_pmd(src_pmdval)) {
2830 		if (unlikely(!PageAnonExclusive(src_page))) {
2831 			spin_unlock(src_ptl);
2832 			return -EBUSY;
2833 		}
2834 
2835 		src_folio = page_folio(src_page);
2836 		folio_get(src_folio);
2837 	} else
2838 		src_folio = NULL;
2839 
2840 	spin_unlock(src_ptl);
2841 
2842 	flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2843 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2844 				src_addr + HPAGE_PMD_SIZE);
2845 	mmu_notifier_invalidate_range_start(&range);
2846 
2847 	if (src_folio)
2848 		folio_lock(src_folio);
2849 
2850 	dst_ptl = pmd_lockptr(mm, dst_pmd);
2851 	double_pt_lock(src_ptl, dst_ptl);
2852 	if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2853 		     !pmd_same(*dst_pmd, dst_pmdval))) {
2854 		err = -EAGAIN;
2855 		goto unlock_ptls;
2856 	}
2857 	if (src_folio) {
2858 		if (folio_maybe_dma_pinned(src_folio) ||
2859 		    !PageAnonExclusive(&src_folio->page)) {
2860 			err = -EBUSY;
2861 			goto unlock_ptls;
2862 		}
2863 
2864 		if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2865 		    WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2866 			err = -EBUSY;
2867 			goto unlock_ptls;
2868 		}
2869 
2870 		src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2871 		/* Folio got pinned from under us. Put it back and fail the move. */
2872 		if (folio_maybe_dma_pinned(src_folio)) {
2873 			set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2874 			err = -EBUSY;
2875 			goto unlock_ptls;
2876 		}
2877 
2878 		folio_move_anon_rmap(src_folio, dst_vma);
2879 		src_folio->index = linear_page_index(dst_vma, dst_addr);
2880 
2881 		_dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
2882 		/* Follow mremap() behavior and treat the entry dirty after the move */
2883 		_dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2884 	} else {
2885 		src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2886 		_dst_pmd = move_soft_dirty_pmd(src_pmdval);
2887 		_dst_pmd = clear_uffd_wp_pmd(_dst_pmd);
2888 	}
2889 	set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2890 
2891 	src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2892 	pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2893 unlock_ptls:
2894 	double_pt_unlock(src_ptl, dst_ptl);
2895 	/* unblock rmap walks */
2896 	if (src_folio)
2897 		folio_unlock(src_folio);
2898 	mmu_notifier_invalidate_range_end(&range);
2899 	if (src_folio)
2900 		folio_put(src_folio);
2901 	return err;
2902 }
2903 #endif /* CONFIG_USERFAULTFD */
2904 
2905 /*
2906  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2907  *
2908  * Note that if it returns page table lock pointer, this routine returns without
2909  * unlocking page table lock. So callers must unlock it.
2910  */
__pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)2911 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2912 {
2913 	spinlock_t *ptl;
2914 
2915 	ptl = pmd_lock(vma->vm_mm, pmd);
2916 	if (likely(pmd_is_huge(*pmd)))
2917 		return ptl;
2918 	spin_unlock(ptl);
2919 	return NULL;
2920 }
2921 
2922 /*
2923  * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2924  *
2925  * Note that if it returns page table lock pointer, this routine returns without
2926  * unlocking page table lock. So callers must unlock it.
2927  */
__pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)2928 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2929 {
2930 	spinlock_t *ptl;
2931 
2932 	ptl = pud_lock(vma->vm_mm, pud);
2933 	if (likely(pud_trans_huge(*pud)))
2934 		return ptl;
2935 	spin_unlock(ptl);
2936 	return NULL;
2937 }
2938 
2939 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
zap_huge_pud(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr)2940 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2941 		 pud_t *pud, unsigned long addr)
2942 {
2943 	spinlock_t *ptl;
2944 	pud_t orig_pud;
2945 
2946 	ptl = __pud_trans_huge_lock(pud, vma);
2947 	if (!ptl)
2948 		return 0;
2949 
2950 	orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2951 	arch_check_zapped_pud(vma, orig_pud);
2952 	tlb_remove_pud_tlb_entry(tlb, pud, addr);
2953 	if (vma_is_special_huge(vma)) {
2954 		spin_unlock(ptl);
2955 		/* No zero page support yet */
2956 	} else {
2957 		struct page *page = NULL;
2958 		struct folio *folio;
2959 
2960 		/* No support for anonymous PUD pages or migration yet */
2961 		VM_WARN_ON_ONCE(vma_is_anonymous(vma) ||
2962 				!pud_present(orig_pud));
2963 
2964 		page = pud_page(orig_pud);
2965 		folio = page_folio(page);
2966 		folio_remove_rmap_pud(folio, page, vma);
2967 		add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR);
2968 
2969 		spin_unlock(ptl);
2970 		tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE);
2971 	}
2972 	return 1;
2973 }
2974 
__split_huge_pud_locked(struct vm_area_struct * vma,pud_t * pud,unsigned long haddr)2975 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2976 		unsigned long haddr)
2977 {
2978 	struct folio *folio;
2979 	struct page *page;
2980 	pud_t old_pud;
2981 
2982 	VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2983 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2984 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2985 	VM_BUG_ON(!pud_trans_huge(*pud));
2986 
2987 	count_vm_event(THP_SPLIT_PUD);
2988 
2989 	old_pud = pudp_huge_clear_flush(vma, haddr, pud);
2990 
2991 	if (!vma_is_dax(vma))
2992 		return;
2993 
2994 	page = pud_page(old_pud);
2995 	folio = page_folio(page);
2996 
2997 	if (!folio_test_dirty(folio) && pud_dirty(old_pud))
2998 		folio_mark_dirty(folio);
2999 	if (!folio_test_referenced(folio) && pud_young(old_pud))
3000 		folio_set_referenced(folio);
3001 	folio_remove_rmap_pud(folio, page, vma);
3002 	folio_put(folio);
3003 	add_mm_counter(vma->vm_mm, mm_counter_file(folio),
3004 		-HPAGE_PUD_NR);
3005 }
3006 
__split_huge_pud(struct vm_area_struct * vma,pud_t * pud,unsigned long address)3007 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
3008 		unsigned long address)
3009 {
3010 	spinlock_t *ptl;
3011 	struct mmu_notifier_range range;
3012 
3013 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
3014 				address & HPAGE_PUD_MASK,
3015 				(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
3016 	mmu_notifier_invalidate_range_start(&range);
3017 	ptl = pud_lock(vma->vm_mm, pud);
3018 	if (unlikely(!pud_trans_huge(*pud)))
3019 		goto out;
3020 	__split_huge_pud_locked(vma, pud, range.start);
3021 
3022 out:
3023 	spin_unlock(ptl);
3024 	mmu_notifier_invalidate_range_end(&range);
3025 }
3026 #else
__split_huge_pud(struct vm_area_struct * vma,pud_t * pud,unsigned long address)3027 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
3028 		unsigned long address)
3029 {
3030 }
3031 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
3032 
__split_huge_zero_page_pmd(struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd)3033 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
3034 		unsigned long haddr, pmd_t *pmd)
3035 {
3036 	struct mm_struct *mm = vma->vm_mm;
3037 	pgtable_t pgtable;
3038 	pmd_t _pmd, old_pmd;
3039 	unsigned long addr;
3040 	pte_t *pte;
3041 	int i;
3042 
3043 	/*
3044 	 * Leave pmd empty until pte is filled note that it is fine to delay
3045 	 * notification until mmu_notifier_invalidate_range_end() as we are
3046 	 * replacing a zero pmd write protected page with a zero pte write
3047 	 * protected page.
3048 	 *
3049 	 * See Documentation/mm/mmu_notifier.rst
3050 	 */
3051 	old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
3052 
3053 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
3054 	pmd_populate(mm, &_pmd, pgtable);
3055 
3056 	pte = pte_offset_map(&_pmd, haddr);
3057 	VM_BUG_ON(!pte);
3058 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3059 		pte_t entry;
3060 
3061 		entry = pfn_pte(zero_pfn(addr), vma->vm_page_prot);
3062 		entry = pte_mkspecial(entry);
3063 		if (pmd_uffd_wp(old_pmd))
3064 			entry = pte_mkuffd_wp(entry);
3065 		VM_BUG_ON(!pte_none(ptep_get(pte)));
3066 		set_pte_at(mm, addr, pte, entry);
3067 		pte++;
3068 	}
3069 	pte_unmap(pte - 1);
3070 	smp_wmb(); /* make pte visible before pmd */
3071 	pmd_populate(mm, pmd, pgtable);
3072 }
3073 
__split_huge_pmd_locked(struct vm_area_struct * vma,pmd_t * pmd,unsigned long haddr,bool freeze)3074 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
3075 		unsigned long haddr, bool freeze)
3076 {
3077 	struct mm_struct *mm = vma->vm_mm;
3078 	struct folio *folio;
3079 	struct page *page;
3080 	pgtable_t pgtable;
3081 	pmd_t old_pmd, _pmd;
3082 	bool soft_dirty, uffd_wp = false, young = false, write = false;
3083 	bool anon_exclusive = false, dirty = false;
3084 	unsigned long addr;
3085 	pte_t *pte;
3086 	int i;
3087 
3088 	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
3089 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
3090 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
3091 
3092 	VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(*pmd) && !pmd_trans_huge(*pmd));
3093 
3094 	count_vm_event(THP_SPLIT_PMD);
3095 
3096 	if (!vma_is_anonymous(vma)) {
3097 		old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
3098 		/*
3099 		 * We are going to unmap this huge page. So
3100 		 * just go ahead and zap it
3101 		 */
3102 		if (arch_needs_pgtable_deposit())
3103 			zap_deposited_table(mm, pmd);
3104 		if (vma_is_special_huge(vma))
3105 			return;
3106 		if (unlikely(pmd_is_migration_entry(old_pmd))) {
3107 			const softleaf_t old_entry = softleaf_from_pmd(old_pmd);
3108 
3109 			folio = softleaf_to_folio(old_entry);
3110 		} else if (is_huge_zero_pmd(old_pmd)) {
3111 			return;
3112 		} else {
3113 			page = pmd_page(old_pmd);
3114 			folio = page_folio(page);
3115 			if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
3116 				folio_mark_dirty(folio);
3117 			if (!folio_test_referenced(folio) && pmd_young(old_pmd))
3118 				folio_set_referenced(folio);
3119 			folio_remove_rmap_pmd(folio, page, vma);
3120 			folio_put(folio);
3121 		}
3122 		add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
3123 		return;
3124 	}
3125 
3126 	if (is_huge_zero_pmd(*pmd)) {
3127 		/*
3128 		 * FIXME: Do we want to invalidate secondary mmu by calling
3129 		 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
3130 		 * inside __split_huge_pmd() ?
3131 		 *
3132 		 * We are going from a zero huge page write protected to zero
3133 		 * small page also write protected so it does not seems useful
3134 		 * to invalidate secondary mmu at this time.
3135 		 */
3136 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
3137 	}
3138 
3139 	if (pmd_is_migration_entry(*pmd)) {
3140 		softleaf_t entry;
3141 
3142 		old_pmd = *pmd;
3143 		entry = softleaf_from_pmd(old_pmd);
3144 		page = softleaf_to_page(entry);
3145 		folio = page_folio(page);
3146 
3147 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
3148 		uffd_wp = pmd_swp_uffd_wp(old_pmd);
3149 
3150 		write = softleaf_is_migration_write(entry);
3151 		if (PageAnon(page))
3152 			anon_exclusive = softleaf_is_migration_read_exclusive(entry);
3153 		young = softleaf_is_migration_young(entry);
3154 		dirty = softleaf_is_migration_dirty(entry);
3155 	} else if (pmd_is_device_private_entry(*pmd)) {
3156 		softleaf_t entry;
3157 
3158 		old_pmd = *pmd;
3159 		entry = softleaf_from_pmd(old_pmd);
3160 		page = softleaf_to_page(entry);
3161 		folio = page_folio(page);
3162 
3163 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
3164 		uffd_wp = pmd_swp_uffd_wp(old_pmd);
3165 
3166 		write = softleaf_is_device_private_write(entry);
3167 		anon_exclusive = PageAnonExclusive(page);
3168 
3169 		/*
3170 		 * Device private THP should be treated the same as regular
3171 		 * folios w.r.t anon exclusive handling. See the comments for
3172 		 * folio handling and anon_exclusive below.
3173 		 */
3174 		if (freeze && anon_exclusive &&
3175 		    folio_try_share_anon_rmap_pmd(folio, page))
3176 			freeze = false;
3177 		if (!freeze) {
3178 			rmap_t rmap_flags = RMAP_NONE;
3179 
3180 			folio_ref_add(folio, HPAGE_PMD_NR - 1);
3181 			if (anon_exclusive)
3182 				rmap_flags |= RMAP_EXCLUSIVE;
3183 
3184 			folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
3185 						 vma, haddr, rmap_flags);
3186 		}
3187 	} else {
3188 		/*
3189 		 * Up to this point the pmd is present and huge and userland has
3190 		 * the whole access to the hugepage during the split (which
3191 		 * happens in place). If we overwrite the pmd with the not-huge
3192 		 * version pointing to the pte here (which of course we could if
3193 		 * all CPUs were bug free), userland could trigger a small page
3194 		 * size TLB miss on the small sized TLB while the hugepage TLB
3195 		 * entry is still established in the huge TLB. Some CPU doesn't
3196 		 * like that. See
3197 		 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
3198 		 * 383 on page 105. Intel should be safe but is also warns that
3199 		 * it's only safe if the permission and cache attributes of the
3200 		 * two entries loaded in the two TLB is identical (which should
3201 		 * be the case here). But it is generally safer to never allow
3202 		 * small and huge TLB entries for the same virtual address to be
3203 		 * loaded simultaneously. So instead of doing "pmd_populate();
3204 		 * flush_pmd_tlb_range();" we first mark the current pmd
3205 		 * notpresent (atomically because here the pmd_trans_huge must
3206 		 * remain set at all times on the pmd until the split is
3207 		 * complete for this pmd), then we flush the SMP TLB and finally
3208 		 * we write the non-huge version of the pmd entry with
3209 		 * pmd_populate.
3210 		 */
3211 		old_pmd = pmdp_invalidate(vma, haddr, pmd);
3212 		page = pmd_page(old_pmd);
3213 		folio = page_folio(page);
3214 		if (pmd_dirty(old_pmd)) {
3215 			dirty = true;
3216 			folio_set_dirty(folio);
3217 		}
3218 		write = pmd_write(old_pmd);
3219 		young = pmd_young(old_pmd);
3220 		soft_dirty = pmd_soft_dirty(old_pmd);
3221 		uffd_wp = pmd_uffd_wp(old_pmd);
3222 
3223 		VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
3224 		VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3225 
3226 		/*
3227 		 * Without "freeze", we'll simply split the PMD, propagating the
3228 		 * PageAnonExclusive() flag for each PTE by setting it for
3229 		 * each subpage -- no need to (temporarily) clear.
3230 		 *
3231 		 * With "freeze" we want to replace mapped pages by
3232 		 * migration entries right away. This is only possible if we
3233 		 * managed to clear PageAnonExclusive() -- see
3234 		 * set_pmd_migration_entry().
3235 		 *
3236 		 * In case we cannot clear PageAnonExclusive(), split the PMD
3237 		 * only and let try_to_migrate_one() fail later.
3238 		 *
3239 		 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
3240 		 */
3241 		anon_exclusive = PageAnonExclusive(page);
3242 		if (freeze && anon_exclusive &&
3243 		    folio_try_share_anon_rmap_pmd(folio, page))
3244 			freeze = false;
3245 		if (!freeze) {
3246 			rmap_t rmap_flags = RMAP_NONE;
3247 
3248 			folio_ref_add(folio, HPAGE_PMD_NR - 1);
3249 			if (anon_exclusive)
3250 				rmap_flags |= RMAP_EXCLUSIVE;
3251 			folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
3252 						 vma, haddr, rmap_flags);
3253 		}
3254 	}
3255 
3256 	/*
3257 	 * Withdraw the table only after we mark the pmd entry invalid.
3258 	 * This's critical for some architectures (Power).
3259 	 */
3260 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
3261 	pmd_populate(mm, &_pmd, pgtable);
3262 
3263 	pte = pte_offset_map(&_pmd, haddr);
3264 	VM_BUG_ON(!pte);
3265 
3266 	/*
3267 	 * Note that NUMA hinting access restrictions are not transferred to
3268 	 * avoid any possibility of altering permissions across VMAs.
3269 	 */
3270 	if (freeze || pmd_is_migration_entry(old_pmd)) {
3271 		pte_t entry;
3272 		swp_entry_t swp_entry;
3273 
3274 		for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3275 			if (write)
3276 				swp_entry = make_writable_migration_entry(
3277 							page_to_pfn(page + i));
3278 			else if (anon_exclusive)
3279 				swp_entry = make_readable_exclusive_migration_entry(
3280 							page_to_pfn(page + i));
3281 			else
3282 				swp_entry = make_readable_migration_entry(
3283 							page_to_pfn(page + i));
3284 			if (young)
3285 				swp_entry = make_migration_entry_young(swp_entry);
3286 			if (dirty)
3287 				swp_entry = make_migration_entry_dirty(swp_entry);
3288 			entry = swp_entry_to_pte(swp_entry);
3289 			if (soft_dirty)
3290 				entry = pte_swp_mksoft_dirty(entry);
3291 			if (uffd_wp)
3292 				entry = pte_swp_mkuffd_wp(entry);
3293 			VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3294 			set_pte_at(mm, addr, pte + i, entry);
3295 		}
3296 	} else if (pmd_is_device_private_entry(old_pmd)) {
3297 		pte_t entry;
3298 		swp_entry_t swp_entry;
3299 
3300 		for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3301 			/*
3302 			 * anon_exclusive was already propagated to the relevant
3303 			 * pages corresponding to the pte entries when freeze
3304 			 * is false.
3305 			 */
3306 			if (write)
3307 				swp_entry = make_writable_device_private_entry(
3308 							page_to_pfn(page + i));
3309 			else
3310 				swp_entry = make_readable_device_private_entry(
3311 							page_to_pfn(page + i));
3312 			/*
3313 			 * Young and dirty bits are not progated via swp_entry
3314 			 */
3315 			entry = swp_entry_to_pte(swp_entry);
3316 			if (soft_dirty)
3317 				entry = pte_swp_mksoft_dirty(entry);
3318 			if (uffd_wp)
3319 				entry = pte_swp_mkuffd_wp(entry);
3320 			VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3321 			set_pte_at(mm, addr, pte + i, entry);
3322 		}
3323 	} else {
3324 		pte_t entry;
3325 
3326 		entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
3327 		if (write)
3328 			entry = pte_mkwrite(entry, vma);
3329 		if (!young)
3330 			entry = pte_mkold(entry);
3331 		/* NOTE: this may set soft-dirty too on some archs */
3332 		if (dirty)
3333 			entry = pte_mkdirty(entry);
3334 		if (soft_dirty)
3335 			entry = pte_mksoft_dirty(entry);
3336 		if (uffd_wp)
3337 			entry = pte_mkuffd_wp(entry);
3338 
3339 		for (i = 0; i < HPAGE_PMD_NR; i++)
3340 			VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3341 
3342 		set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
3343 	}
3344 	pte_unmap(pte);
3345 
3346 	if (!pmd_is_migration_entry(*pmd))
3347 		folio_remove_rmap_pmd(folio, page, vma);
3348 	if (freeze)
3349 		put_page(page);
3350 
3351 	smp_wmb(); /* make pte visible before pmd */
3352 	pmd_populate(mm, pmd, pgtable);
3353 }
3354 
split_huge_pmd_locked(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,bool freeze)3355 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
3356 			   pmd_t *pmd, bool freeze)
3357 {
3358 	VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
3359 	if (pmd_trans_huge(*pmd) || pmd_is_valid_softleaf(*pmd))
3360 		__split_huge_pmd_locked(vma, pmd, address, freeze);
3361 }
3362 
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze)3363 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
3364 		unsigned long address, bool freeze)
3365 {
3366 	spinlock_t *ptl;
3367 	struct mmu_notifier_range range;
3368 
3369 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
3370 				address & HPAGE_PMD_MASK,
3371 				(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
3372 	mmu_notifier_invalidate_range_start(&range);
3373 	ptl = pmd_lock(vma->vm_mm, pmd);
3374 	split_huge_pmd_locked(vma, range.start, pmd, freeze);
3375 	spin_unlock(ptl);
3376 	mmu_notifier_invalidate_range_end(&range);
3377 }
3378 
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze)3379 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
3380 		bool freeze)
3381 {
3382 	pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
3383 
3384 	if (!pmd)
3385 		return;
3386 
3387 	__split_huge_pmd(vma, pmd, address, freeze);
3388 }
3389 
split_huge_pmd_if_needed(struct vm_area_struct * vma,unsigned long address)3390 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
3391 {
3392 	/*
3393 	 * If the new address isn't hpage aligned and it could previously
3394 	 * contain an hugepage: check if we need to split an huge pmd.
3395 	 */
3396 	if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
3397 	    range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
3398 			 ALIGN(address, HPAGE_PMD_SIZE)))
3399 		split_huge_pmd_address(vma, address, false);
3400 }
3401 
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct vm_area_struct * next)3402 void vma_adjust_trans_huge(struct vm_area_struct *vma,
3403 			   unsigned long start,
3404 			   unsigned long end,
3405 			   struct vm_area_struct *next)
3406 {
3407 	/* Check if we need to split start first. */
3408 	split_huge_pmd_if_needed(vma, start);
3409 
3410 	/* Check if we need to split end next. */
3411 	split_huge_pmd_if_needed(vma, end);
3412 
3413 	/* If we're incrementing next->vm_start, we might need to split it. */
3414 	if (next)
3415 		split_huge_pmd_if_needed(next, end);
3416 }
3417 
unmap_folio(struct folio * folio)3418 static void unmap_folio(struct folio *folio)
3419 {
3420 	enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
3421 		TTU_BATCH_FLUSH;
3422 
3423 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3424 
3425 	if (folio_test_pmd_mappable(folio))
3426 		ttu_flags |= TTU_SPLIT_HUGE_PMD;
3427 
3428 	/*
3429 	 * Anon pages need migration entries to preserve them, but file
3430 	 * pages can simply be left unmapped, then faulted back on demand.
3431 	 * If that is ever changed (perhaps for mlock), update remap_page().
3432 	 */
3433 	if (folio_test_anon(folio))
3434 		try_to_migrate(folio, ttu_flags);
3435 	else
3436 		try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
3437 
3438 	try_to_unmap_flush();
3439 }
3440 
__discard_anon_folio_pmd_locked(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,struct folio * folio)3441 static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
3442 					    unsigned long addr, pmd_t *pmdp,
3443 					    struct folio *folio)
3444 {
3445 	struct mm_struct *mm = vma->vm_mm;
3446 	int ref_count, map_count;
3447 	pmd_t orig_pmd = *pmdp;
3448 
3449 	if (pmd_dirty(orig_pmd))
3450 		folio_set_dirty(folio);
3451 	if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
3452 		folio_set_swapbacked(folio);
3453 		return false;
3454 	}
3455 
3456 	orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
3457 
3458 	/*
3459 	 * Syncing against concurrent GUP-fast:
3460 	 * - clear PMD; barrier; read refcount
3461 	 * - inc refcount; barrier; read PMD
3462 	 */
3463 	smp_mb();
3464 
3465 	ref_count = folio_ref_count(folio);
3466 	map_count = folio_mapcount(folio);
3467 
3468 	/*
3469 	 * Order reads for folio refcount and dirty flag
3470 	 * (see comments in __remove_mapping()).
3471 	 */
3472 	smp_rmb();
3473 
3474 	/*
3475 	 * If the folio or its PMD is redirtied at this point, or if there
3476 	 * are unexpected references, we will give up to discard this folio
3477 	 * and remap it.
3478 	 *
3479 	 * The only folio refs must be one from isolation plus the rmap(s).
3480 	 */
3481 	if (pmd_dirty(orig_pmd))
3482 		folio_set_dirty(folio);
3483 	if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
3484 		folio_set_swapbacked(folio);
3485 		set_pmd_at(mm, addr, pmdp, orig_pmd);
3486 		return false;
3487 	}
3488 
3489 	if (ref_count != map_count + 1) {
3490 		set_pmd_at(mm, addr, pmdp, orig_pmd);
3491 		return false;
3492 	}
3493 
3494 	folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma);
3495 	zap_deposited_table(mm, pmdp);
3496 	add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
3497 	if (vma->vm_flags & VM_LOCKED)
3498 		mlock_drain_local();
3499 	folio_put(folio);
3500 
3501 	return true;
3502 }
3503 
unmap_huge_pmd_locked(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,struct folio * folio)3504 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
3505 			   pmd_t *pmdp, struct folio *folio)
3506 {
3507 	VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
3508 	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
3509 	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3510 	VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
3511 	VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
3512 
3513 	return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
3514 }
3515 
remap_page(struct folio * folio,unsigned long nr,int flags)3516 static void remap_page(struct folio *folio, unsigned long nr, int flags)
3517 {
3518 	int i = 0;
3519 
3520 	/* If unmap_folio() uses try_to_migrate() on file, remove this check */
3521 	if (!folio_test_anon(folio))
3522 		return;
3523 	for (;;) {
3524 		remove_migration_ptes(folio, folio, TTU_RMAP_LOCKED | flags);
3525 		i += folio_nr_pages(folio);
3526 		if (i >= nr)
3527 			break;
3528 		folio = folio_next(folio);
3529 	}
3530 }
3531 
lru_add_split_folio(struct folio * folio,struct folio * new_folio,struct lruvec * lruvec,struct list_head * list)3532 static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
3533 		struct lruvec *lruvec, struct list_head *list)
3534 {
3535 	VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio);
3536 	lockdep_assert_held(&lruvec->lru_lock);
3537 
3538 	if (folio_is_device_private(folio))
3539 		return;
3540 
3541 	if (list) {
3542 		/* page reclaim is reclaiming a huge page */
3543 		VM_WARN_ON(folio_test_lru(folio));
3544 		folio_get(new_folio);
3545 		list_add_tail(&new_folio->lru, list);
3546 	} else {
3547 		/* head is still on lru (and we have it frozen) */
3548 		VM_WARN_ON(!folio_test_lru(folio));
3549 		if (folio_test_unevictable(folio))
3550 			new_folio->mlock_count = 0;
3551 		else
3552 			list_add_tail(&new_folio->lru, &folio->lru);
3553 		folio_set_lru(new_folio);
3554 	}
3555 }
3556 
page_range_has_hwpoisoned(struct page * page,long nr_pages)3557 static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
3558 {
3559 	for (; nr_pages; page++, nr_pages--)
3560 		if (PageHWPoison(page))
3561 			return true;
3562 	return false;
3563 }
3564 
3565 /*
3566  * It splits @folio into @new_order folios and copies the @folio metadata to
3567  * all the resulting folios.
3568  */
__split_folio_to_order(struct folio * folio,int old_order,int new_order)3569 static void __split_folio_to_order(struct folio *folio, int old_order,
3570 		int new_order)
3571 {
3572 	/* Scan poisoned pages when split a poisoned folio to large folios */
3573 	const bool handle_hwpoison = folio_test_has_hwpoisoned(folio) && new_order;
3574 	long new_nr_pages = 1 << new_order;
3575 	long nr_pages = 1 << old_order;
3576 	long i;
3577 
3578 	folio_clear_has_hwpoisoned(folio);
3579 
3580 	/* Check first new_nr_pages since the loop below skips them */
3581 	if (handle_hwpoison &&
3582 	    page_range_has_hwpoisoned(folio_page(folio, 0), new_nr_pages))
3583 		folio_set_has_hwpoisoned(folio);
3584 	/*
3585 	 * Skip the first new_nr_pages, since the new folio from them have all
3586 	 * the flags from the original folio.
3587 	 */
3588 	for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) {
3589 		struct page *new_head = &folio->page + i;
3590 		/*
3591 		 * Careful: new_folio is not a "real" folio before we cleared PageTail.
3592 		 * Don't pass it around before clear_compound_head().
3593 		 */
3594 		struct folio *new_folio = (struct folio *)new_head;
3595 
3596 		VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head);
3597 
3598 		/*
3599 		 * Clone page flags before unfreezing refcount.
3600 		 *
3601 		 * After successful get_page_unless_zero() might follow flags change,
3602 		 * for example lock_page() which set PG_waiters.
3603 		 *
3604 		 * Note that for mapped sub-pages of an anonymous THP,
3605 		 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
3606 		 * the migration entry instead from where remap_page() will restore it.
3607 		 * We can still have PG_anon_exclusive set on effectively unmapped and
3608 		 * unreferenced sub-pages of an anonymous THP: we can simply drop
3609 		 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
3610 		 */
3611 		new_folio->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
3612 		new_folio->flags.f |= (folio->flags.f &
3613 				((1L << PG_referenced) |
3614 				 (1L << PG_swapbacked) |
3615 				 (1L << PG_swapcache) |
3616 				 (1L << PG_mlocked) |
3617 				 (1L << PG_uptodate) |
3618 				 (1L << PG_active) |
3619 				 (1L << PG_workingset) |
3620 				 (1L << PG_locked) |
3621 				 (1L << PG_unevictable) |
3622 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
3623 				 (1L << PG_arch_2) |
3624 #endif
3625 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
3626 				 (1L << PG_arch_3) |
3627 #endif
3628 				 (1L << PG_dirty) |
3629 				 LRU_GEN_MASK | LRU_REFS_MASK));
3630 
3631 		if (handle_hwpoison &&
3632 		    page_range_has_hwpoisoned(new_head, new_nr_pages))
3633 			folio_set_has_hwpoisoned(new_folio);
3634 
3635 		new_folio->mapping = folio->mapping;
3636 		new_folio->index = folio->index + i;
3637 
3638 		if (folio_test_swapcache(folio))
3639 			new_folio->swap.val = folio->swap.val + i;
3640 
3641 		/* Page flags must be visible before we make the page non-compound. */
3642 		smp_wmb();
3643 
3644 		/*
3645 		 * Clear PageTail before unfreezing page refcount.
3646 		 *
3647 		 * After successful get_page_unless_zero() might follow put_page()
3648 		 * which needs correct compound_head().
3649 		 */
3650 		clear_compound_head(new_head);
3651 		if (new_order) {
3652 			prep_compound_page(new_head, new_order);
3653 			folio_set_large_rmappable(new_folio);
3654 		}
3655 
3656 		if (folio_test_young(folio))
3657 			folio_set_young(new_folio);
3658 		if (folio_test_idle(folio))
3659 			folio_set_idle(new_folio);
3660 #ifdef CONFIG_MEMCG
3661 		new_folio->memcg_data = folio->memcg_data;
3662 #endif
3663 
3664 		folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
3665 	}
3666 
3667 	if (new_order)
3668 		folio_set_order(folio, new_order);
3669 	else
3670 		ClearPageCompound(&folio->page);
3671 }
3672 
3673 /**
3674  * __split_unmapped_folio() - splits an unmapped @folio to lower order folios in
3675  * two ways: uniform split or non-uniform split.
3676  * @folio: the to-be-split folio
3677  * @new_order: the smallest order of the after split folios (since buddy
3678  *             allocator like split generates folios with orders from @folio's
3679  *             order - 1 to new_order).
3680  * @split_at: in buddy allocator like split, the folio containing @split_at
3681  *            will be split until its order becomes @new_order.
3682  * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller
3683  * @mapping: @folio->mapping
3684  * @split_type: if the split is uniform or not (buddy allocator like split)
3685  *
3686  *
3687  * 1. uniform split: the given @folio into multiple @new_order small folios,
3688  *    where all small folios have the same order. This is done when
3689  *    split_type is SPLIT_TYPE_UNIFORM.
3690  * 2. buddy allocator like (non-uniform) split: the given @folio is split into
3691  *    half and one of the half (containing the given page) is split into half
3692  *    until the given @folio's order becomes @new_order. This is done when
3693  *    split_type is SPLIT_TYPE_NON_UNIFORM.
3694  *
3695  * The high level flow for these two methods are:
3696  *
3697  * 1. uniform split: @xas is split with no expectation of failure and a single
3698  *    __split_folio_to_order() is called to split the @folio into @new_order
3699  *    along with stats update.
3700  * 2. non-uniform split: folio_order - @new_order calls to
3701  *    __split_folio_to_order() are expected to be made in a for loop to split
3702  *    the @folio to one lower order at a time. The folio containing @split_at
3703  *    is split in each iteration. @xas is split into half in each iteration and
3704  *    can fail. A failed @xas split leaves split folios as is without merging
3705  *    them back.
3706  *
3707  * After splitting, the caller's folio reference will be transferred to the
3708  * folio containing @split_at. The caller needs to unlock and/or free
3709  * after-split folios if necessary.
3710  *
3711  * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
3712  * split but not to @new_order, the caller needs to check)
3713  */
__split_unmapped_folio(struct folio * folio,int new_order,struct page * split_at,struct xa_state * xas,struct address_space * mapping,enum split_type split_type)3714 static int __split_unmapped_folio(struct folio *folio, int new_order,
3715 		struct page *split_at, struct xa_state *xas,
3716 		struct address_space *mapping, enum split_type split_type)
3717 {
3718 	const bool is_anon = folio_test_anon(folio);
3719 	int old_order = folio_order(folio);
3720 	int start_order = split_type == SPLIT_TYPE_UNIFORM ? new_order : old_order - 1;
3721 	struct folio *old_folio = folio;
3722 	int split_order;
3723 
3724 	/*
3725 	 * split to new_order one order at a time. For uniform split,
3726 	 * folio is split to new_order directly.
3727 	 */
3728 	for (split_order = start_order;
3729 	     split_order >= new_order;
3730 	     split_order--) {
3731 		int nr_new_folios = 1UL << (old_order - split_order);
3732 
3733 		/* order-1 anonymous folio is not supported */
3734 		if (is_anon && split_order == 1)
3735 			continue;
3736 
3737 		if (mapping) {
3738 			/*
3739 			 * uniform split has xas_split_alloc() called before
3740 			 * irq is disabled to allocate enough memory, whereas
3741 			 * non-uniform split can handle ENOMEM.
3742 			 * Use the to-be-split folio, so that a parallel
3743 			 * folio_try_get() waits on it until xarray is updated
3744 			 * with after-split folios and the original one is
3745 			 * unfrozen.
3746 			 */
3747 			if (split_type == SPLIT_TYPE_UNIFORM) {
3748 				xas_split(xas, old_folio, old_order);
3749 			} else {
3750 				xas_set_order(xas, folio->index, split_order);
3751 				xas_try_split(xas, old_folio, old_order);
3752 				if (xas_error(xas))
3753 					return xas_error(xas);
3754 			}
3755 		}
3756 
3757 		folio_split_memcg_refs(folio, old_order, split_order);
3758 		split_page_owner(&folio->page, old_order, split_order);
3759 		pgalloc_tag_split(folio, old_order, split_order);
3760 		__split_folio_to_order(folio, old_order, split_order);
3761 
3762 		if (is_anon) {
3763 			mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1);
3764 			mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, nr_new_folios);
3765 		}
3766 		/*
3767 		 * If uniform split, the process is complete.
3768 		 * If non-uniform, continue splitting the folio at @split_at
3769 		 * as long as the next @split_order is >= @new_order.
3770 		 */
3771 		folio = page_folio(split_at);
3772 		old_order = split_order;
3773 	}
3774 
3775 	return 0;
3776 }
3777 
3778 /**
3779  * folio_check_splittable() - check if a folio can be split to a given order
3780  * @folio: folio to be split
3781  * @new_order: the smallest order of the after split folios (since buddy
3782  *             allocator like split generates folios with orders from @folio's
3783  *             order - 1 to new_order).
3784  * @split_type: uniform or non-uniform split
3785  *
3786  * folio_check_splittable() checks if @folio can be split to @new_order using
3787  * @split_type method. The truncated folio check must come first.
3788  *
3789  * Context: folio must be locked.
3790  *
3791  * Return: 0 - @folio can be split to @new_order, otherwise an error number is
3792  * returned.
3793  */
folio_check_splittable(struct folio * folio,unsigned int new_order,enum split_type split_type)3794 int folio_check_splittable(struct folio *folio, unsigned int new_order,
3795 			   enum split_type split_type)
3796 {
3797 	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
3798 	/*
3799 	 * Folios that just got truncated cannot get split. Signal to the
3800 	 * caller that there was a race.
3801 	 *
3802 	 * TODO: this will also currently refuse folios without a mapping in the
3803 	 * swapcache (shmem or to-be-anon folios).
3804 	 */
3805 	if (!folio->mapping && !folio_test_anon(folio))
3806 		return -EBUSY;
3807 
3808 	if (folio_test_anon(folio)) {
3809 		/* order-1 is not supported for anonymous THP. */
3810 		if (new_order == 1)
3811 			return -EINVAL;
3812 	} else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) {
3813 		if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
3814 		    !mapping_large_folio_support(folio->mapping)) {
3815 			/*
3816 			 * We can always split a folio down to a single page
3817 			 * (new_order == 0) uniformly.
3818 			 *
3819 			 * For any other scenario
3820 			 *   a) uniform split targeting a large folio
3821 			 *      (new_order > 0)
3822 			 *   b) any non-uniform split
3823 			 * we must confirm that the file system supports large
3824 			 * folios.
3825 			 *
3826 			 * Note that we might still have THPs in such
3827 			 * mappings, which is created from khugepaged when
3828 			 * CONFIG_READ_ONLY_THP_FOR_FS is enabled. But in that
3829 			 * case, the mapping does not actually support large
3830 			 * folios properly.
3831 			 */
3832 			return -EINVAL;
3833 		}
3834 	}
3835 
3836 	/*
3837 	 * swapcache folio could only be split to order 0
3838 	 *
3839 	 * non-uniform split creates after-split folios with orders from
3840 	 * folio_order(folio) - 1 to new_order, making it not suitable for any
3841 	 * swapcache folio split. Only uniform split to order-0 can be used
3842 	 * here.
3843 	 */
3844 	if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) {
3845 		return -EINVAL;
3846 	}
3847 
3848 	if (is_huge_zero_folio(folio))
3849 		return -EINVAL;
3850 
3851 	if (folio_test_writeback(folio))
3852 		return -EBUSY;
3853 
3854 	return 0;
3855 }
3856 
3857 /* Number of folio references from the pagecache or the swapcache. */
folio_cache_ref_count(const struct folio * folio)3858 static unsigned int folio_cache_ref_count(const struct folio *folio)
3859 {
3860 	if (folio_test_anon(folio) && !folio_test_swapcache(folio))
3861 		return 0;
3862 	return folio_nr_pages(folio);
3863 }
3864 
__folio_freeze_and_split_unmapped(struct folio * folio,unsigned int new_order,struct page * split_at,struct xa_state * xas,struct address_space * mapping,bool do_lru,struct list_head * list,enum split_type split_type,pgoff_t end,int * nr_shmem_dropped)3865 static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
3866 					     struct page *split_at, struct xa_state *xas,
3867 					     struct address_space *mapping, bool do_lru,
3868 					     struct list_head *list, enum split_type split_type,
3869 					     pgoff_t end, int *nr_shmem_dropped)
3870 {
3871 	struct folio *end_folio = folio_next(folio);
3872 	struct folio *new_folio, *next;
3873 	int old_order = folio_order(folio);
3874 	int ret = 0;
3875 	struct deferred_split *ds_queue;
3876 
3877 	VM_WARN_ON_ONCE(!mapping && end);
3878 	/* Prevent deferred_split_scan() touching ->_refcount */
3879 	ds_queue = folio_split_queue_lock(folio);
3880 	if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
3881 		struct swap_cluster_info *ci = NULL;
3882 		struct lruvec *lruvec;
3883 
3884 		if (old_order > 1) {
3885 			if (!list_empty(&folio->_deferred_list)) {
3886 				ds_queue->split_queue_len--;
3887 				/*
3888 				 * Reinitialize page_deferred_list after removing the
3889 				 * page from the split_queue, otherwise a subsequent
3890 				 * split will see list corruption when checking the
3891 				 * page_deferred_list.
3892 				 */
3893 				list_del_init(&folio->_deferred_list);
3894 			}
3895 			if (folio_test_partially_mapped(folio)) {
3896 				folio_clear_partially_mapped(folio);
3897 				mod_mthp_stat(old_order,
3898 					MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3899 			}
3900 		}
3901 		split_queue_unlock(ds_queue);
3902 		if (mapping) {
3903 			int nr = folio_nr_pages(folio);
3904 
3905 			if (folio_test_pmd_mappable(folio) &&
3906 			    new_order < HPAGE_PMD_ORDER) {
3907 				if (folio_test_swapbacked(folio)) {
3908 					lruvec_stat_mod_folio(folio,
3909 							NR_SHMEM_THPS, -nr);
3910 				} else {
3911 					lruvec_stat_mod_folio(folio,
3912 							NR_FILE_THPS, -nr);
3913 					filemap_nr_thps_dec(mapping);
3914 				}
3915 			}
3916 		}
3917 
3918 		if (folio_test_swapcache(folio)) {
3919 			if (mapping) {
3920 				VM_WARN_ON_ONCE_FOLIO(mapping, folio);
3921 				return -EINVAL;
3922 			}
3923 
3924 			ci = swap_cluster_get_and_lock(folio);
3925 		}
3926 
3927 		/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
3928 		if (do_lru)
3929 			lruvec = folio_lruvec_lock(folio);
3930 
3931 		ret = __split_unmapped_folio(folio, new_order, split_at, xas,
3932 					     mapping, split_type);
3933 
3934 		/*
3935 		 * Unfreeze after-split folios and put them back to the right
3936 		 * list. @folio should be kept frozon until page cache
3937 		 * entries are updated with all the other after-split folios
3938 		 * to prevent others seeing stale page cache entries.
3939 		 * As a result, new_folio starts from the next folio of
3940 		 * @folio.
3941 		 */
3942 		for (new_folio = folio_next(folio); new_folio != end_folio;
3943 		     new_folio = next) {
3944 			unsigned long nr_pages = folio_nr_pages(new_folio);
3945 
3946 			next = folio_next(new_folio);
3947 
3948 			zone_device_private_split_cb(folio, new_folio);
3949 
3950 			folio_ref_unfreeze(new_folio,
3951 					   folio_cache_ref_count(new_folio) + 1);
3952 
3953 			if (do_lru)
3954 				lru_add_split_folio(folio, new_folio, lruvec, list);
3955 
3956 			/*
3957 			 * Anonymous folio with swap cache.
3958 			 * NOTE: shmem in swap cache is not supported yet.
3959 			 */
3960 			if (ci) {
3961 				__swap_cache_replace_folio(ci, folio, new_folio);
3962 				continue;
3963 			}
3964 
3965 			/* Anonymous folio without swap cache */
3966 			if (!mapping)
3967 				continue;
3968 
3969 			/* Add the new folio to the page cache. */
3970 			if (new_folio->index < end) {
3971 				__xa_store(&mapping->i_pages, new_folio->index,
3972 					   new_folio, 0);
3973 				continue;
3974 			}
3975 
3976 			VM_WARN_ON_ONCE(!nr_shmem_dropped);
3977 			/* Drop folio beyond EOF: ->index >= end */
3978 			if (shmem_mapping(mapping) && nr_shmem_dropped)
3979 				*nr_shmem_dropped += nr_pages;
3980 			else if (folio_test_clear_dirty(new_folio))
3981 				folio_account_cleaned(
3982 					new_folio, inode_to_wb(mapping->host));
3983 			__filemap_remove_folio(new_folio, NULL);
3984 			folio_put_refs(new_folio, nr_pages);
3985 		}
3986 
3987 		zone_device_private_split_cb(folio, NULL);
3988 		/*
3989 		 * Unfreeze @folio only after all page cache entries, which
3990 		 * used to point to it, have been updated with new folios.
3991 		 * Otherwise, a parallel folio_try_get() can grab @folio
3992 		 * and its caller can see stale page cache entries.
3993 		 */
3994 		folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
3995 
3996 		if (do_lru)
3997 			unlock_page_lruvec(lruvec);
3998 
3999 		if (ci)
4000 			swap_cluster_unlock(ci);
4001 	} else {
4002 		split_queue_unlock(ds_queue);
4003 		return -EAGAIN;
4004 	}
4005 
4006 	return ret;
4007 }
4008 
4009 /**
4010  * __folio_split() - split a folio at @split_at to a @new_order folio
4011  * @folio: folio to split
4012  * @new_order: the order of the new folio
4013  * @split_at: a page within the new folio
4014  * @lock_at: a page within @folio to be left locked to caller
4015  * @list: after-split folios will be put on it if non NULL
4016  * @split_type: perform uniform split or not (non-uniform split)
4017  *
4018  * It calls __split_unmapped_folio() to perform uniform and non-uniform split.
4019  * It is in charge of checking whether the split is supported or not and
4020  * preparing @folio for __split_unmapped_folio().
4021  *
4022  * After splitting, the after-split folio containing @lock_at remains locked
4023  * and others are unlocked:
4024  * 1. for uniform split, @lock_at points to one of @folio's subpages;
4025  * 2. for buddy allocator like (non-uniform) split, @lock_at points to @folio.
4026  *
4027  * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
4028  * split but not to @new_order, the caller needs to check)
4029  */
__folio_split(struct folio * folio,unsigned int new_order,struct page * split_at,struct page * lock_at,struct list_head * list,enum split_type split_type)4030 static int __folio_split(struct folio *folio, unsigned int new_order,
4031 		struct page *split_at, struct page *lock_at,
4032 		struct list_head *list, enum split_type split_type)
4033 {
4034 	XA_STATE(xas, &folio->mapping->i_pages, folio->index);
4035 	struct folio *end_folio = folio_next(folio);
4036 	bool is_anon = folio_test_anon(folio);
4037 	struct address_space *mapping = NULL;
4038 	struct anon_vma *anon_vma = NULL;
4039 	int old_order = folio_order(folio);
4040 	struct folio *new_folio, *next;
4041 	int nr_shmem_dropped = 0;
4042 	enum ttu_flags ttu_flags = 0;
4043 	int ret;
4044 	pgoff_t end = 0;
4045 
4046 	VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
4047 	VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
4048 
4049 	if (folio != page_folio(split_at) || folio != page_folio(lock_at)) {
4050 		ret = -EINVAL;
4051 		goto out;
4052 	}
4053 
4054 	if (new_order >= old_order) {
4055 		ret = -EINVAL;
4056 		goto out;
4057 	}
4058 
4059 	ret = folio_check_splittable(folio, new_order, split_type);
4060 	if (ret) {
4061 		VM_WARN_ONCE(ret == -EINVAL, "Tried to split an unsplittable folio");
4062 		goto out;
4063 	}
4064 
4065 	if (is_anon) {
4066 		/*
4067 		 * The caller does not necessarily hold an mmap_lock that would
4068 		 * prevent the anon_vma disappearing so we first we take a
4069 		 * reference to it and then lock the anon_vma for write. This
4070 		 * is similar to folio_lock_anon_vma_read except the write lock
4071 		 * is taken to serialise against parallel split or collapse
4072 		 * operations.
4073 		 */
4074 		anon_vma = folio_get_anon_vma(folio);
4075 		if (!anon_vma) {
4076 			ret = -EBUSY;
4077 			goto out;
4078 		}
4079 		anon_vma_lock_write(anon_vma);
4080 		mapping = NULL;
4081 	} else {
4082 		unsigned int min_order;
4083 		gfp_t gfp;
4084 
4085 		mapping = folio->mapping;
4086 		min_order = mapping_min_folio_order(folio->mapping);
4087 		if (new_order < min_order) {
4088 			ret = -EINVAL;
4089 			goto out;
4090 		}
4091 
4092 		gfp = current_gfp_context(mapping_gfp_mask(mapping) &
4093 							GFP_RECLAIM_MASK);
4094 
4095 		if (!filemap_release_folio(folio, gfp)) {
4096 			ret = -EBUSY;
4097 			goto out;
4098 		}
4099 
4100 		if (split_type == SPLIT_TYPE_UNIFORM) {
4101 			xas_set_order(&xas, folio->index, new_order);
4102 			xas_split_alloc(&xas, folio, old_order, gfp);
4103 			if (xas_error(&xas)) {
4104 				ret = xas_error(&xas);
4105 				goto out;
4106 			}
4107 		}
4108 
4109 		anon_vma = NULL;
4110 		i_mmap_lock_read(mapping);
4111 
4112 		/*
4113 		 *__split_unmapped_folio() may need to trim off pages beyond
4114 		 * EOF: but on 32-bit, i_size_read() takes an irq-unsafe
4115 		 * seqlock, which cannot be nested inside the page tree lock.
4116 		 * So note end now: i_size itself may be changed at any moment,
4117 		 * but folio lock is good enough to serialize the trimming.
4118 		 */
4119 		end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
4120 		if (shmem_mapping(mapping))
4121 			end = shmem_fallocend(mapping->host, end);
4122 	}
4123 
4124 	/*
4125 	 * Racy check if we can split the page, before unmap_folio() will
4126 	 * split PMDs
4127 	 */
4128 	if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) {
4129 		ret = -EAGAIN;
4130 		goto out_unlock;
4131 	}
4132 
4133 	unmap_folio(folio);
4134 
4135 	/* block interrupt reentry in xa_lock and spinlock */
4136 	local_irq_disable();
4137 	if (mapping) {
4138 		/*
4139 		 * Check if the folio is present in page cache.
4140 		 * We assume all tail are present too, if folio is there.
4141 		 */
4142 		xas_lock(&xas);
4143 		xas_reset(&xas);
4144 		if (xas_load(&xas) != folio) {
4145 			ret = -EAGAIN;
4146 			goto fail;
4147 		}
4148 	}
4149 
4150 	ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
4151 						true, list, split_type, end, &nr_shmem_dropped);
4152 fail:
4153 	if (mapping)
4154 		xas_unlock(&xas);
4155 
4156 	local_irq_enable();
4157 
4158 	if (nr_shmem_dropped)
4159 		shmem_uncharge(mapping->host, nr_shmem_dropped);
4160 
4161 	if (!ret && is_anon && !folio_is_device_private(folio))
4162 		ttu_flags = TTU_USE_SHARED_ZEROPAGE;
4163 
4164 	remap_page(folio, 1 << old_order, ttu_flags);
4165 
4166 	/*
4167 	 * Unlock all after-split folios except the one containing
4168 	 * @lock_at page. If @folio is not split, it will be kept locked.
4169 	 */
4170 	for (new_folio = folio; new_folio != end_folio; new_folio = next) {
4171 		next = folio_next(new_folio);
4172 		if (new_folio == page_folio(lock_at))
4173 			continue;
4174 
4175 		folio_unlock(new_folio);
4176 		/*
4177 		 * Subpages may be freed if there wasn't any mapping
4178 		 * like if add_to_swap() is running on a lru page that
4179 		 * had its mapping zapped. And freeing these pages
4180 		 * requires taking the lru_lock so we do the put_page
4181 		 * of the tail pages after the split is complete.
4182 		 */
4183 		free_folio_and_swap_cache(new_folio);
4184 	}
4185 
4186 out_unlock:
4187 	if (anon_vma) {
4188 		anon_vma_unlock_write(anon_vma);
4189 		put_anon_vma(anon_vma);
4190 	}
4191 	if (mapping)
4192 		i_mmap_unlock_read(mapping);
4193 out:
4194 	xas_destroy(&xas);
4195 	if (is_pmd_order(old_order))
4196 		count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
4197 	count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
4198 	return ret;
4199 }
4200 
4201 /**
4202  * folio_split_unmapped() - split a large anon folio that is already unmapped
4203  * @folio: folio to split
4204  * @new_order: the order of folios after split
4205  *
4206  * This function is a helper for splitting folios that have already been
4207  * unmapped. The use case is that the device or the CPU can refuse to migrate
4208  * THP pages in the middle of migration, due to allocation issues on either
4209  * side.
4210  *
4211  * anon_vma_lock is not required to be held, mmap_read_lock() or
4212  * mmap_write_lock() should be held. @folio is expected to be locked by the
4213  * caller. device-private and non device-private folios are supported along
4214  * with folios that are in the swapcache. @folio should also be unmapped and
4215  * isolated from LRU (if applicable)
4216  *
4217  * Upon return, the folio is not remapped, split folios are not added to LRU,
4218  * free_folio_and_swap_cache() is not called, and new folios remain locked.
4219  *
4220  * Return: 0 on success, -EAGAIN if the folio cannot be split (e.g., due to
4221  *         insufficient reference count or extra pins).
4222  */
folio_split_unmapped(struct folio * folio,unsigned int new_order)4223 int folio_split_unmapped(struct folio *folio, unsigned int new_order)
4224 {
4225 	int ret = 0;
4226 
4227 	VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
4228 	VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
4229 	VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
4230 	VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio);
4231 
4232 	if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1)
4233 		return -EAGAIN;
4234 
4235 	local_irq_disable();
4236 	ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL,
4237 						NULL, false, NULL, SPLIT_TYPE_UNIFORM,
4238 						0, NULL);
4239 	local_irq_enable();
4240 	return ret;
4241 }
4242 
4243 /*
4244  * This function splits a large folio into smaller folios of order @new_order.
4245  * @page can point to any page of the large folio to split. The split operation
4246  * does not change the position of @page.
4247  *
4248  * Prerequisites:
4249  *
4250  * 1) The caller must hold a reference on the @page's owning folio, also known
4251  *    as the large folio.
4252  *
4253  * 2) The large folio must be locked.
4254  *
4255  * 3) The folio must not be pinned. Any unexpected folio references, including
4256  *    GUP pins, will result in the folio not getting split; instead, the caller
4257  *    will receive an -EAGAIN.
4258  *
4259  * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
4260  *    supported for non-file-backed folios, because folio->_deferred_list, which
4261  *    is used by partially mapped folios, is stored in subpage 2, but an order-1
4262  *    folio only has subpages 0 and 1. File-backed order-1 folios are supported,
4263  *    since they do not use _deferred_list.
4264  *
4265  * After splitting, the caller's folio reference will be transferred to @page,
4266  * resulting in a raised refcount of @page after this call. The other pages may
4267  * be freed if they are not mapped.
4268  *
4269  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
4270  *
4271  * Pages in @new_order will inherit the mapping, flags, and so on from the
4272  * huge page.
4273  *
4274  * Returns 0 if the huge page was split successfully.
4275  *
4276  * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
4277  * the folio was concurrently removed from the page cache.
4278  *
4279  * Returns -EBUSY when trying to split the huge zeropage, if the folio is
4280  * under writeback, if fs-specific folio metadata cannot currently be
4281  * released, or if some unexpected race happened (e.g., anon VMA disappeared,
4282  * truncation).
4283  *
4284  * Callers should ensure that the order respects the address space mapping
4285  * min-order if one is set for non-anonymous folios.
4286  *
4287  * Returns -EINVAL when trying to split to an order that is incompatible
4288  * with the folio. Splitting to order 0 is compatible with all folios.
4289  */
__split_huge_page_to_list_to_order(struct page * page,struct list_head * list,unsigned int new_order)4290 int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
4291 				     unsigned int new_order)
4292 {
4293 	struct folio *folio = page_folio(page);
4294 
4295 	return __folio_split(folio, new_order, &folio->page, page, list,
4296 			     SPLIT_TYPE_UNIFORM);
4297 }
4298 
4299 /**
4300  * folio_split() - split a folio at @split_at to a @new_order folio
4301  * @folio: folio to split
4302  * @new_order: the order of the new folio
4303  * @split_at: a page within the new folio
4304  * @list: after-split folios are added to @list if not null, otherwise to LRU
4305  *        list
4306  *
4307  * It has the same prerequisites and returns as
4308  * split_huge_page_to_list_to_order().
4309  *
4310  * Split a folio at @split_at to a new_order folio, leave the
4311  * remaining subpages of the original folio as large as possible. For example,
4312  * in the case of splitting an order-9 folio at its third order-3 subpages to
4313  * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio.
4314  * After the split, there will be a group of folios with different orders and
4315  * the new folio containing @split_at is marked in bracket:
4316  * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8].
4317  *
4318  * After split, folio is left locked for caller.
4319  *
4320  * Return: 0 - successful, <0 - failed (if -ENOMEM is returned, @folio might be
4321  * split but not to @new_order, the caller needs to check)
4322  */
folio_split(struct folio * folio,unsigned int new_order,struct page * split_at,struct list_head * list)4323 int folio_split(struct folio *folio, unsigned int new_order,
4324 		struct page *split_at, struct list_head *list)
4325 {
4326 	return __folio_split(folio, new_order, split_at, &folio->page, list,
4327 			     SPLIT_TYPE_NON_UNIFORM);
4328 }
4329 
4330 /**
4331  * min_order_for_split() - get the minimum order @folio can be split to
4332  * @folio: folio to split
4333  *
4334  * min_order_for_split() tells the minimum order @folio can be split to.
4335  * If a file-backed folio is truncated, 0 will be returned. Any subsequent
4336  * split attempt should get -EBUSY from split checking code.
4337  *
4338  * Return: @folio's minimum order for split
4339  */
min_order_for_split(struct folio * folio)4340 unsigned int min_order_for_split(struct folio *folio)
4341 {
4342 	if (folio_test_anon(folio))
4343 		return 0;
4344 
4345 	/*
4346 	 * If the folio got truncated, we don't know the previous mapping and
4347 	 * consequently the old min order. But it doesn't matter, as any split
4348 	 * attempt will immediately fail with -EBUSY as the folio cannot get
4349 	 * split until freed.
4350 	 */
4351 	if (!folio->mapping)
4352 		return 0;
4353 
4354 	return mapping_min_folio_order(folio->mapping);
4355 }
4356 
split_folio_to_list(struct folio * folio,struct list_head * list)4357 int split_folio_to_list(struct folio *folio, struct list_head *list)
4358 {
4359 	return split_huge_page_to_list_to_order(&folio->page, list, 0);
4360 }
4361 
4362 /*
4363  * __folio_unqueue_deferred_split() is not to be called directly:
4364  * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h
4365  * limits its calls to those folios which may have a _deferred_list for
4366  * queueing THP splits, and that list is (racily observed to be) non-empty.
4367  *
4368  * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is
4369  * zero: because even when split_queue_lock is held, a non-empty _deferred_list
4370  * might be in use on deferred_split_scan()'s unlocked on-stack list.
4371  *
4372  * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is
4373  * therefore important to unqueue deferred split before changing folio memcg.
4374  */
__folio_unqueue_deferred_split(struct folio * folio)4375 bool __folio_unqueue_deferred_split(struct folio *folio)
4376 {
4377 	struct deferred_split *ds_queue;
4378 	unsigned long flags;
4379 	bool unqueued = false;
4380 
4381 	WARN_ON_ONCE(folio_ref_count(folio));
4382 	WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));
4383 
4384 	ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
4385 	if (!list_empty(&folio->_deferred_list)) {
4386 		ds_queue->split_queue_len--;
4387 		if (folio_test_partially_mapped(folio)) {
4388 			folio_clear_partially_mapped(folio);
4389 			mod_mthp_stat(folio_order(folio),
4390 				      MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
4391 		}
4392 		list_del_init(&folio->_deferred_list);
4393 		unqueued = true;
4394 	}
4395 	split_queue_unlock_irqrestore(ds_queue, flags);
4396 
4397 	return unqueued;	/* useful for debug warnings */
4398 }
4399 
4400 /* partially_mapped=false won't clear PG_partially_mapped folio flag */
deferred_split_folio(struct folio * folio,bool partially_mapped)4401 void deferred_split_folio(struct folio *folio, bool partially_mapped)
4402 {
4403 	struct deferred_split *ds_queue;
4404 	unsigned long flags;
4405 
4406 	/*
4407 	 * Order 1 folios have no space for a deferred list, but we also
4408 	 * won't waste much memory by not adding them to the deferred list.
4409 	 */
4410 	if (folio_order(folio) <= 1)
4411 		return;
4412 
4413 	if (!partially_mapped && !split_underused_thp)
4414 		return;
4415 
4416 	/*
4417 	 * Exclude swapcache: originally to avoid a corrupt deferred split
4418 	 * queue. Nowadays that is fully prevented by memcg1_swapout();
4419 	 * but if page reclaim is already handling the same folio, it is
4420 	 * unnecessary to handle it again in the shrinker, so excluding
4421 	 * swapcache here may still be a useful optimization.
4422 	 */
4423 	if (folio_test_swapcache(folio))
4424 		return;
4425 
4426 	ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
4427 	if (partially_mapped) {
4428 		if (!folio_test_partially_mapped(folio)) {
4429 			folio_set_partially_mapped(folio);
4430 			if (folio_test_pmd_mappable(folio))
4431 				count_vm_event(THP_DEFERRED_SPLIT_PAGE);
4432 			count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
4433 			mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
4434 
4435 		}
4436 	} else {
4437 		/* partially mapped folios cannot become non-partially mapped */
4438 		VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
4439 	}
4440 	if (list_empty(&folio->_deferred_list)) {
4441 		struct mem_cgroup *memcg;
4442 
4443 		memcg = folio_split_queue_memcg(folio, ds_queue);
4444 		list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
4445 		ds_queue->split_queue_len++;
4446 		if (memcg)
4447 			set_shrinker_bit(memcg, folio_nid(folio),
4448 					 shrinker_id(deferred_split_shrinker));
4449 	}
4450 	split_queue_unlock_irqrestore(ds_queue, flags);
4451 }
4452 
deferred_split_count(struct shrinker * shrink,struct shrink_control * sc)4453 static unsigned long deferred_split_count(struct shrinker *shrink,
4454 		struct shrink_control *sc)
4455 {
4456 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
4457 	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
4458 
4459 #ifdef CONFIG_MEMCG
4460 	if (sc->memcg)
4461 		ds_queue = &sc->memcg->deferred_split_queue;
4462 #endif
4463 	return READ_ONCE(ds_queue->split_queue_len);
4464 }
4465 
thp_underused(struct folio * folio)4466 static bool thp_underused(struct folio *folio)
4467 {
4468 	int num_zero_pages = 0, num_filled_pages = 0;
4469 	int i;
4470 
4471 	if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
4472 		return false;
4473 
4474 	if (folio_contain_hwpoisoned_page(folio))
4475 		return false;
4476 
4477 	for (i = 0; i < folio_nr_pages(folio); i++) {
4478 		if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
4479 			if (++num_zero_pages > khugepaged_max_ptes_none)
4480 				return true;
4481 		} else {
4482 			/*
4483 			 * Another path for early exit once the number
4484 			 * of non-zero filled pages exceeds threshold.
4485 			 */
4486 			if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
4487 				return false;
4488 		}
4489 	}
4490 	return false;
4491 }
4492 
deferred_split_scan(struct shrinker * shrink,struct shrink_control * sc)4493 static unsigned long deferred_split_scan(struct shrinker *shrink,
4494 		struct shrink_control *sc)
4495 {
4496 	struct deferred_split *ds_queue;
4497 	unsigned long flags;
4498 	struct folio *folio, *next;
4499 	int split = 0, i;
4500 	struct folio_batch fbatch;
4501 
4502 	folio_batch_init(&fbatch);
4503 
4504 retry:
4505 	ds_queue = split_queue_lock_irqsave(sc->nid, sc->memcg, &flags);
4506 	/* Take pin on all head pages to avoid freeing them under us */
4507 	list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
4508 							_deferred_list) {
4509 		if (folio_try_get(folio)) {
4510 			folio_batch_add(&fbatch, folio);
4511 		} else if (folio_test_partially_mapped(folio)) {
4512 			/* We lost race with folio_put() */
4513 			folio_clear_partially_mapped(folio);
4514 			mod_mthp_stat(folio_order(folio),
4515 				      MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
4516 		}
4517 		list_del_init(&folio->_deferred_list);
4518 		ds_queue->split_queue_len--;
4519 		if (!--sc->nr_to_scan)
4520 			break;
4521 		if (!folio_batch_space(&fbatch))
4522 			break;
4523 	}
4524 	split_queue_unlock_irqrestore(ds_queue, flags);
4525 
4526 	for (i = 0; i < folio_batch_count(&fbatch); i++) {
4527 		bool did_split = false;
4528 		bool underused = false;
4529 		struct deferred_split *fqueue;
4530 
4531 		folio = fbatch.folios[i];
4532 		if (!folio_test_partially_mapped(folio)) {
4533 			/*
4534 			 * See try_to_map_unused_to_zeropage(): we cannot
4535 			 * optimize zero-filled pages after splitting an
4536 			 * mlocked folio.
4537 			 */
4538 			if (folio_test_mlocked(folio))
4539 				goto next;
4540 			underused = thp_underused(folio);
4541 			if (!underused)
4542 				goto next;
4543 		}
4544 		if (!folio_trylock(folio))
4545 			goto requeue;
4546 		if (!split_folio(folio)) {
4547 			did_split = true;
4548 			if (underused)
4549 				count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
4550 			split++;
4551 		}
4552 		folio_unlock(folio);
4553 next:
4554 		/*
4555 		 * If thp_underused() returns false, or if split_folio()
4556 		 * succeeds, or if split_folio() fails in the case it was
4557 		 * underused, then consider it used and don't add it back to
4558 		 * split_queue.
4559 		 */
4560 		if (did_split || !folio_test_partially_mapped(folio))
4561 			continue;
4562 requeue:
4563 		/*
4564 		 * Add back partially mapped folios, or underused folios that
4565 		 * we could not lock this round.
4566 		 */
4567 		fqueue = folio_split_queue_lock_irqsave(folio, &flags);
4568 		if (list_empty(&folio->_deferred_list)) {
4569 			list_add_tail(&folio->_deferred_list, &fqueue->split_queue);
4570 			fqueue->split_queue_len++;
4571 		}
4572 		split_queue_unlock_irqrestore(fqueue, flags);
4573 	}
4574 	folios_put(&fbatch);
4575 
4576 	if (sc->nr_to_scan && !list_empty(&ds_queue->split_queue)) {
4577 		cond_resched();
4578 		goto retry;
4579 	}
4580 
4581 	/*
4582 	 * Stop shrinker if we didn't split any page, but the queue is empty.
4583 	 * This can happen if pages were freed under us.
4584 	 */
4585 	if (!split && list_empty(&ds_queue->split_queue))
4586 		return SHRINK_STOP;
4587 	return split;
4588 }
4589 
4590 #ifdef CONFIG_MEMCG
reparent_deferred_split_queue(struct mem_cgroup * memcg)4591 void reparent_deferred_split_queue(struct mem_cgroup *memcg)
4592 {
4593 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4594 	struct deferred_split *ds_queue = &memcg->deferred_split_queue;
4595 	struct deferred_split *parent_ds_queue = &parent->deferred_split_queue;
4596 	int nid;
4597 
4598 	spin_lock_irq(&ds_queue->split_queue_lock);
4599 	spin_lock_nested(&parent_ds_queue->split_queue_lock, SINGLE_DEPTH_NESTING);
4600 
4601 	if (!ds_queue->split_queue_len)
4602 		goto unlock;
4603 
4604 	list_splice_tail_init(&ds_queue->split_queue, &parent_ds_queue->split_queue);
4605 	parent_ds_queue->split_queue_len += ds_queue->split_queue_len;
4606 	ds_queue->split_queue_len = 0;
4607 
4608 	for_each_node(nid)
4609 		set_shrinker_bit(parent, nid, shrinker_id(deferred_split_shrinker));
4610 
4611 unlock:
4612 	spin_unlock(&parent_ds_queue->split_queue_lock);
4613 	spin_unlock_irq(&ds_queue->split_queue_lock);
4614 }
4615 #endif
4616 
4617 #ifdef CONFIG_DEBUG_FS
split_huge_pages_all(void)4618 static void split_huge_pages_all(void)
4619 {
4620 	struct zone *zone;
4621 	struct page *page;
4622 	struct folio *folio;
4623 	unsigned long pfn, max_zone_pfn;
4624 	unsigned long total = 0, split = 0;
4625 
4626 	pr_debug("Split all THPs\n");
4627 	for_each_zone(zone) {
4628 		if (!managed_zone(zone))
4629 			continue;
4630 		max_zone_pfn = zone_end_pfn(zone);
4631 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
4632 			int nr_pages;
4633 
4634 			page = pfn_to_online_page(pfn);
4635 			if (!page || PageTail(page))
4636 				continue;
4637 			folio = page_folio(page);
4638 			if (!folio_try_get(folio))
4639 				continue;
4640 
4641 			if (unlikely(page_folio(page) != folio))
4642 				goto next;
4643 
4644 			if (zone != folio_zone(folio))
4645 				goto next;
4646 
4647 			if (!folio_test_large(folio)
4648 				|| folio_test_hugetlb(folio)
4649 				|| !folio_test_lru(folio))
4650 				goto next;
4651 
4652 			total++;
4653 			folio_lock(folio);
4654 			nr_pages = folio_nr_pages(folio);
4655 			if (!split_folio(folio))
4656 				split++;
4657 			pfn += nr_pages - 1;
4658 			folio_unlock(folio);
4659 next:
4660 			folio_put(folio);
4661 			cond_resched();
4662 		}
4663 	}
4664 
4665 	pr_debug("%lu of %lu THP split\n", split, total);
4666 }
4667 
vma_not_suitable_for_thp_split(struct vm_area_struct * vma)4668 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
4669 {
4670 	if (vma_is_dax(vma))
4671 		return true;
4672 	if (vma_is_special_huge(vma))
4673 		return true;
4674 	if (vma_test(vma, VMA_IO_BIT))
4675 		return true;
4676 	if (is_vm_hugetlb_page(vma))
4677 		return true;
4678 
4679 	return false;
4680 }
4681 
split_huge_pages_pid(int pid,unsigned long vaddr_start,unsigned long vaddr_end,unsigned int new_order,long in_folio_offset)4682 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
4683 				unsigned long vaddr_end, unsigned int new_order,
4684 				long in_folio_offset)
4685 {
4686 	int ret = 0;
4687 	struct task_struct *task;
4688 	struct mm_struct *mm;
4689 	unsigned long total = 0, split = 0;
4690 	unsigned long addr;
4691 
4692 	vaddr_start &= PAGE_MASK;
4693 	vaddr_end &= PAGE_MASK;
4694 
4695 	task = find_get_task_by_vpid(pid);
4696 	if (!task) {
4697 		ret = -ESRCH;
4698 		goto out;
4699 	}
4700 
4701 	/* Find the mm_struct */
4702 	mm = get_task_mm(task);
4703 	put_task_struct(task);
4704 
4705 	if (!mm) {
4706 		ret = -EINVAL;
4707 		goto out;
4708 	}
4709 
4710 	pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n",
4711 		 pid, vaddr_start, vaddr_end, new_order, in_folio_offset);
4712 
4713 	mmap_read_lock(mm);
4714 	/*
4715 	 * always increase addr by PAGE_SIZE, since we could have a PTE page
4716 	 * table filled with PTE-mapped THPs, each of which is distinct.
4717 	 */
4718 	for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
4719 		struct vm_area_struct *vma = vma_lookup(mm, addr);
4720 		struct folio_walk fw;
4721 		struct folio *folio;
4722 		struct address_space *mapping;
4723 		unsigned int target_order = new_order;
4724 
4725 		if (!vma)
4726 			break;
4727 
4728 		/* skip special VMA and hugetlb VMA */
4729 		if (vma_not_suitable_for_thp_split(vma)) {
4730 			addr = vma->vm_end;
4731 			continue;
4732 		}
4733 
4734 		folio = folio_walk_start(&fw, vma, addr, 0);
4735 		if (!folio)
4736 			continue;
4737 
4738 		if (!is_transparent_hugepage(folio))
4739 			goto next;
4740 
4741 		if (!folio_test_anon(folio)) {
4742 			mapping = folio->mapping;
4743 			target_order = max(new_order,
4744 					   mapping_min_folio_order(mapping));
4745 		}
4746 
4747 		if (target_order >= folio_order(folio))
4748 			goto next;
4749 
4750 		total++;
4751 		/*
4752 		 * For folios with private, split_huge_page_to_list_to_order()
4753 		 * will try to drop it before split and then check if the folio
4754 		 * can be split or not. So skip the check here.
4755 		 */
4756 		if (!folio_test_private(folio) &&
4757 		    folio_expected_ref_count(folio) != folio_ref_count(folio))
4758 			goto next;
4759 
4760 		if (!folio_trylock(folio))
4761 			goto next;
4762 		folio_get(folio);
4763 		folio_walk_end(&fw, vma);
4764 
4765 		if (!folio_test_anon(folio) && folio->mapping != mapping)
4766 			goto unlock;
4767 
4768 		if (in_folio_offset < 0 ||
4769 		    in_folio_offset >= folio_nr_pages(folio)) {
4770 			if (!split_folio_to_order(folio, target_order))
4771 				split++;
4772 		} else {
4773 			struct page *split_at = folio_page(folio,
4774 							   in_folio_offset);
4775 			if (!folio_split(folio, target_order, split_at, NULL))
4776 				split++;
4777 		}
4778 
4779 unlock:
4780 
4781 		folio_unlock(folio);
4782 		folio_put(folio);
4783 
4784 		cond_resched();
4785 		continue;
4786 next:
4787 		folio_walk_end(&fw, vma);
4788 		cond_resched();
4789 	}
4790 	mmap_read_unlock(mm);
4791 	mmput(mm);
4792 
4793 	pr_debug("%lu of %lu THP split\n", split, total);
4794 
4795 out:
4796 	return ret;
4797 }
4798 
split_huge_pages_in_file(const char * file_path,pgoff_t off_start,pgoff_t off_end,unsigned int new_order,long in_folio_offset)4799 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
4800 				pgoff_t off_end, unsigned int new_order,
4801 				long in_folio_offset)
4802 {
4803 	struct file *candidate;
4804 	struct address_space *mapping;
4805 	pgoff_t index;
4806 	int nr_pages = 1;
4807 	unsigned long total = 0, split = 0;
4808 	unsigned int min_order;
4809 	unsigned int target_order;
4810 
4811 	CLASS(filename_kernel, file)(file_path);
4812 	candidate = file_open_name(file, O_RDONLY, 0);
4813 	if (IS_ERR(candidate))
4814 		return -EINVAL;
4815 
4816 	pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n",
4817 		 file_path, off_start, off_end, new_order, in_folio_offset);
4818 
4819 	mapping = candidate->f_mapping;
4820 	min_order = mapping_min_folio_order(mapping);
4821 	target_order = max(new_order, min_order);
4822 
4823 	for (index = off_start; index < off_end; index += nr_pages) {
4824 		struct folio *folio = filemap_get_folio(mapping, index);
4825 
4826 		nr_pages = 1;
4827 		if (IS_ERR(folio))
4828 			continue;
4829 
4830 		if (!folio_test_large(folio))
4831 			goto next;
4832 
4833 		total++;
4834 		nr_pages = folio_nr_pages(folio);
4835 
4836 		if (target_order >= folio_order(folio))
4837 			goto next;
4838 
4839 		if (!folio_trylock(folio))
4840 			goto next;
4841 
4842 		if (folio->mapping != mapping)
4843 			goto unlock;
4844 
4845 		if (in_folio_offset < 0 || in_folio_offset >= nr_pages) {
4846 			if (!split_folio_to_order(folio, target_order))
4847 				split++;
4848 		} else {
4849 			struct page *split_at = folio_page(folio,
4850 							   in_folio_offset);
4851 			if (!folio_split(folio, target_order, split_at, NULL))
4852 				split++;
4853 		}
4854 
4855 unlock:
4856 		folio_unlock(folio);
4857 next:
4858 		folio_put(folio);
4859 		cond_resched();
4860 	}
4861 
4862 	filp_close(candidate, NULL);
4863 	pr_debug("%lu of %lu file-backed THP split\n", split, total);
4864 	return 0;
4865 }
4866 
4867 #define MAX_INPUT_BUF_SZ 255
4868 
split_huge_pages_write(struct file * file,const char __user * buf,size_t count,loff_t * ppops)4869 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
4870 				size_t count, loff_t *ppops)
4871 {
4872 	static DEFINE_MUTEX(split_debug_mutex);
4873 	ssize_t ret;
4874 	/*
4875 	 * hold pid, start_vaddr, end_vaddr, new_order or
4876 	 * file_path, off_start, off_end, new_order
4877 	 */
4878 	char input_buf[MAX_INPUT_BUF_SZ];
4879 	int pid;
4880 	unsigned long vaddr_start, vaddr_end;
4881 	unsigned int new_order = 0;
4882 	long in_folio_offset = -1;
4883 
4884 	ret = mutex_lock_interruptible(&split_debug_mutex);
4885 	if (ret)
4886 		return ret;
4887 
4888 	ret = -EFAULT;
4889 
4890 	memset(input_buf, 0, MAX_INPUT_BUF_SZ);
4891 	if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
4892 		goto out;
4893 
4894 	input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
4895 
4896 	if (input_buf[0] == '/') {
4897 		char *tok;
4898 		char *tok_buf = input_buf;
4899 		char file_path[MAX_INPUT_BUF_SZ];
4900 		pgoff_t off_start = 0, off_end = 0;
4901 		size_t input_len = strlen(input_buf);
4902 
4903 		tok = strsep(&tok_buf, ",");
4904 		if (tok && tok_buf) {
4905 			strscpy(file_path, tok);
4906 		} else {
4907 			ret = -EINVAL;
4908 			goto out;
4909 		}
4910 
4911 		ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end,
4912 				&new_order, &in_folio_offset);
4913 		if (ret != 2 && ret != 3 && ret != 4) {
4914 			ret = -EINVAL;
4915 			goto out;
4916 		}
4917 		ret = split_huge_pages_in_file(file_path, off_start, off_end,
4918 				new_order, in_folio_offset);
4919 		if (!ret)
4920 			ret = input_len;
4921 
4922 		goto out;
4923 	}
4924 
4925 	ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start,
4926 			&vaddr_end, &new_order, &in_folio_offset);
4927 	if (ret == 1 && pid == 1) {
4928 		split_huge_pages_all();
4929 		ret = strlen(input_buf);
4930 		goto out;
4931 	} else if (ret != 3 && ret != 4 && ret != 5) {
4932 		ret = -EINVAL;
4933 		goto out;
4934 	}
4935 
4936 	ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order,
4937 			in_folio_offset);
4938 	if (!ret)
4939 		ret = strlen(input_buf);
4940 out:
4941 	mutex_unlock(&split_debug_mutex);
4942 	return ret;
4943 
4944 }
4945 
4946 static const struct file_operations split_huge_pages_fops = {
4947 	.owner	 = THIS_MODULE,
4948 	.write	 = split_huge_pages_write,
4949 };
4950 
split_huge_pages_debugfs(void)4951 static int __init split_huge_pages_debugfs(void)
4952 {
4953 	debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
4954 			    &split_huge_pages_fops);
4955 	return 0;
4956 }
4957 late_initcall(split_huge_pages_debugfs);
4958 #endif
4959 
4960 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
set_pmd_migration_entry(struct page_vma_mapped_walk * pvmw,struct page * page)4961 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
4962 		struct page *page)
4963 {
4964 	struct folio *folio = page_folio(page);
4965 	struct vm_area_struct *vma = pvmw->vma;
4966 	struct mm_struct *mm = vma->vm_mm;
4967 	unsigned long address = pvmw->address;
4968 	bool anon_exclusive;
4969 	pmd_t pmdval;
4970 	swp_entry_t entry;
4971 	pmd_t pmdswp;
4972 
4973 	if (!(pvmw->pmd && !pvmw->pte))
4974 		return 0;
4975 
4976 	flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
4977 	if (unlikely(!pmd_present(*pvmw->pmd)))
4978 		pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd);
4979 	else
4980 		pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
4981 
4982 	/* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
4983 	anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
4984 	if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
4985 		set_pmd_at(mm, address, pvmw->pmd, pmdval);
4986 		return -EBUSY;
4987 	}
4988 
4989 	if (pmd_dirty(pmdval))
4990 		folio_mark_dirty(folio);
4991 	if (pmd_write(pmdval))
4992 		entry = make_writable_migration_entry(page_to_pfn(page));
4993 	else if (anon_exclusive)
4994 		entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
4995 	else
4996 		entry = make_readable_migration_entry(page_to_pfn(page));
4997 	if (pmd_young(pmdval))
4998 		entry = make_migration_entry_young(entry);
4999 	if (pmd_dirty(pmdval))
5000 		entry = make_migration_entry_dirty(entry);
5001 	pmdswp = swp_entry_to_pmd(entry);
5002 	if (pmd_soft_dirty(pmdval))
5003 		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
5004 	if (pmd_uffd_wp(pmdval))
5005 		pmdswp = pmd_swp_mkuffd_wp(pmdswp);
5006 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
5007 	folio_remove_rmap_pmd(folio, page, vma);
5008 	folio_put(folio);
5009 	trace_set_migration_pmd(address, pmd_val(pmdswp));
5010 
5011 	return 0;
5012 }
5013 
remove_migration_pmd(struct page_vma_mapped_walk * pvmw,struct page * new)5014 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
5015 {
5016 	struct folio *folio = page_folio(new);
5017 	struct vm_area_struct *vma = pvmw->vma;
5018 	struct mm_struct *mm = vma->vm_mm;
5019 	unsigned long address = pvmw->address;
5020 	unsigned long haddr = address & HPAGE_PMD_MASK;
5021 	pmd_t pmde;
5022 	softleaf_t entry;
5023 
5024 	if (!(pvmw->pmd && !pvmw->pte))
5025 		return;
5026 
5027 	entry = softleaf_from_pmd(*pvmw->pmd);
5028 	folio_get(folio);
5029 	pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
5030 
5031 	if (pmd_swp_soft_dirty(*pvmw->pmd))
5032 		pmde = pmd_mksoft_dirty(pmde);
5033 	if (softleaf_is_migration_write(entry))
5034 		pmde = pmd_mkwrite(pmde, vma);
5035 	if (pmd_swp_uffd_wp(*pvmw->pmd))
5036 		pmde = pmd_mkuffd_wp(pmde);
5037 	if (!softleaf_is_migration_young(entry))
5038 		pmde = pmd_mkold(pmde);
5039 	/* NOTE: this may contain setting soft-dirty on some archs */
5040 	if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
5041 		pmde = pmd_mkdirty(pmde);
5042 
5043 	if (folio_is_device_private(folio)) {
5044 		swp_entry_t entry;
5045 
5046 		if (pmd_write(pmde))
5047 			entry = make_writable_device_private_entry(
5048 							page_to_pfn(new));
5049 		else
5050 			entry = make_readable_device_private_entry(
5051 							page_to_pfn(new));
5052 		pmde = swp_entry_to_pmd(entry);
5053 
5054 		if (pmd_swp_soft_dirty(*pvmw->pmd))
5055 			pmde = pmd_swp_mksoft_dirty(pmde);
5056 		if (pmd_swp_uffd_wp(*pvmw->pmd))
5057 			pmde = pmd_swp_mkuffd_wp(pmde);
5058 	}
5059 
5060 	if (folio_test_anon(folio)) {
5061 		rmap_t rmap_flags = RMAP_NONE;
5062 
5063 		if (!softleaf_is_migration_read(entry))
5064 			rmap_flags |= RMAP_EXCLUSIVE;
5065 
5066 		folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
5067 	} else {
5068 		folio_add_file_rmap_pmd(folio, new, vma);
5069 	}
5070 	VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
5071 	set_pmd_at(mm, haddr, pvmw->pmd, pmde);
5072 
5073 	/* No need to invalidate - it was non-present before */
5074 	update_mmu_cache_pmd(vma, address, pvmw->pmd);
5075 	trace_remove_migration_pmd(address, pmd_val(pmde));
5076 }
5077 #endif
5078