1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/mmu_notifier.h>
8 #include <linux/rmap.h>
9 #include <linux/swap.h>
10 #include <linux/mm_inline.h>
11 #include <linux/kthread.h>
12 #include <linux/khugepaged.h>
13 #include <linux/freezer.h>
14 #include <linux/mman.h>
15 #include <linux/hashtable.h>
16 #include <linux/userfaultfd_k.h>
17 #include <linux/page_idle.h>
18 #include <linux/page_table_check.h>
19 #include <linux/rcupdate_wait.h>
20 #include <linux/swapops.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/dax.h>
23 #include <linux/ksm.h>
24
25 #include <asm/tlb.h>
26 #include <asm/pgalloc.h>
27 #include "internal.h"
28 #include "mm_slot.h"
29
30 enum scan_result {
31 SCAN_FAIL,
32 SCAN_SUCCEED,
33 SCAN_PMD_NULL,
34 SCAN_PMD_NONE,
35 SCAN_PMD_MAPPED,
36 SCAN_EXCEED_NONE_PTE,
37 SCAN_EXCEED_SWAP_PTE,
38 SCAN_EXCEED_SHARED_PTE,
39 SCAN_PTE_NON_PRESENT,
40 SCAN_PTE_UFFD_WP,
41 SCAN_PTE_MAPPED_HUGEPAGE,
42 SCAN_PAGE_RO,
43 SCAN_LACK_REFERENCED_PAGE,
44 SCAN_PAGE_NULL,
45 SCAN_SCAN_ABORT,
46 SCAN_PAGE_COUNT,
47 SCAN_PAGE_LRU,
48 SCAN_PAGE_LOCK,
49 SCAN_PAGE_ANON,
50 SCAN_PAGE_COMPOUND,
51 SCAN_ANY_PROCESS,
52 SCAN_VMA_NULL,
53 SCAN_VMA_CHECK,
54 SCAN_ADDRESS_RANGE,
55 SCAN_DEL_PAGE_LRU,
56 SCAN_ALLOC_HUGE_PAGE_FAIL,
57 SCAN_CGROUP_CHARGE_FAIL,
58 SCAN_TRUNCATED,
59 SCAN_PAGE_HAS_PRIVATE,
60 SCAN_STORE_FAILED,
61 SCAN_COPY_MC,
62 SCAN_PAGE_FILLED,
63 };
64
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/huge_memory.h>
67
68 static struct task_struct *khugepaged_thread __read_mostly;
69 static DEFINE_MUTEX(khugepaged_mutex);
70
71 /* default scan 8*512 pte (or vmas) every 30 second */
72 static unsigned int khugepaged_pages_to_scan __read_mostly;
73 static unsigned int khugepaged_pages_collapsed;
74 static unsigned int khugepaged_full_scans;
75 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
76 /* during fragmentation poll the hugepage allocator once every minute */
77 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
78 static unsigned long khugepaged_sleep_expire;
79 static DEFINE_SPINLOCK(khugepaged_mm_lock);
80 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
81 /*
82 * default collapse hugepages if there is at least one pte mapped like
83 * it would have happened if the vma was large enough during page
84 * fault.
85 *
86 * Note that these are only respected if collapse was initiated by khugepaged.
87 */
88 unsigned int khugepaged_max_ptes_none __read_mostly;
89 static unsigned int khugepaged_max_ptes_swap __read_mostly;
90 static unsigned int khugepaged_max_ptes_shared __read_mostly;
91
92 #define MM_SLOTS_HASH_BITS 10
93 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
94
95 static struct kmem_cache *mm_slot_cache __ro_after_init;
96
97 struct collapse_control {
98 bool is_khugepaged;
99
100 /* Num pages scanned per node */
101 u32 node_load[MAX_NUMNODES];
102
103 /* nodemask for allocation fallback */
104 nodemask_t alloc_nmask;
105 };
106
107 /**
108 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
109 * @slot: hash lookup from mm to mm_slot
110 */
111 struct khugepaged_mm_slot {
112 struct mm_slot slot;
113 };
114
115 /**
116 * struct khugepaged_scan - cursor for scanning
117 * @mm_head: the head of the mm list to scan
118 * @mm_slot: the current mm_slot we are scanning
119 * @address: the next address inside that to be scanned
120 *
121 * There is only the one khugepaged_scan instance of this cursor structure.
122 */
123 struct khugepaged_scan {
124 struct list_head mm_head;
125 struct khugepaged_mm_slot *mm_slot;
126 unsigned long address;
127 };
128
129 static struct khugepaged_scan khugepaged_scan = {
130 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
131 };
132
133 #ifdef CONFIG_SYSFS
scan_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)134 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
135 struct kobj_attribute *attr,
136 char *buf)
137 {
138 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
139 }
140
scan_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)141 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
142 struct kobj_attribute *attr,
143 const char *buf, size_t count)
144 {
145 unsigned int msecs;
146 int err;
147
148 err = kstrtouint(buf, 10, &msecs);
149 if (err)
150 return -EINVAL;
151
152 khugepaged_scan_sleep_millisecs = msecs;
153 khugepaged_sleep_expire = 0;
154 wake_up_interruptible(&khugepaged_wait);
155
156 return count;
157 }
158 static struct kobj_attribute scan_sleep_millisecs_attr =
159 __ATTR_RW(scan_sleep_millisecs);
160
alloc_sleep_millisecs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)161 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
162 struct kobj_attribute *attr,
163 char *buf)
164 {
165 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
166 }
167
alloc_sleep_millisecs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)168 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
169 struct kobj_attribute *attr,
170 const char *buf, size_t count)
171 {
172 unsigned int msecs;
173 int err;
174
175 err = kstrtouint(buf, 10, &msecs);
176 if (err)
177 return -EINVAL;
178
179 khugepaged_alloc_sleep_millisecs = msecs;
180 khugepaged_sleep_expire = 0;
181 wake_up_interruptible(&khugepaged_wait);
182
183 return count;
184 }
185 static struct kobj_attribute alloc_sleep_millisecs_attr =
186 __ATTR_RW(alloc_sleep_millisecs);
187
pages_to_scan_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)188 static ssize_t pages_to_scan_show(struct kobject *kobj,
189 struct kobj_attribute *attr,
190 char *buf)
191 {
192 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
193 }
pages_to_scan_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)194 static ssize_t pages_to_scan_store(struct kobject *kobj,
195 struct kobj_attribute *attr,
196 const char *buf, size_t count)
197 {
198 unsigned int pages;
199 int err;
200
201 err = kstrtouint(buf, 10, &pages);
202 if (err || !pages)
203 return -EINVAL;
204
205 khugepaged_pages_to_scan = pages;
206
207 return count;
208 }
209 static struct kobj_attribute pages_to_scan_attr =
210 __ATTR_RW(pages_to_scan);
211
pages_collapsed_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)212 static ssize_t pages_collapsed_show(struct kobject *kobj,
213 struct kobj_attribute *attr,
214 char *buf)
215 {
216 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
217 }
218 static struct kobj_attribute pages_collapsed_attr =
219 __ATTR_RO(pages_collapsed);
220
full_scans_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)221 static ssize_t full_scans_show(struct kobject *kobj,
222 struct kobj_attribute *attr,
223 char *buf)
224 {
225 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
226 }
227 static struct kobj_attribute full_scans_attr =
228 __ATTR_RO(full_scans);
229
defrag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)230 static ssize_t defrag_show(struct kobject *kobj,
231 struct kobj_attribute *attr, char *buf)
232 {
233 return single_hugepage_flag_show(kobj, attr, buf,
234 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
235 }
defrag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)236 static ssize_t defrag_store(struct kobject *kobj,
237 struct kobj_attribute *attr,
238 const char *buf, size_t count)
239 {
240 return single_hugepage_flag_store(kobj, attr, buf, count,
241 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
242 }
243 static struct kobj_attribute khugepaged_defrag_attr =
244 __ATTR_RW(defrag);
245
246 /*
247 * max_ptes_none controls if khugepaged should collapse hugepages over
248 * any unmapped ptes in turn potentially increasing the memory
249 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
250 * reduce the available free memory in the system as it
251 * runs. Increasing max_ptes_none will instead potentially reduce the
252 * free memory in the system during the khugepaged scan.
253 */
max_ptes_none_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)254 static ssize_t max_ptes_none_show(struct kobject *kobj,
255 struct kobj_attribute *attr,
256 char *buf)
257 {
258 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
259 }
max_ptes_none_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)260 static ssize_t max_ptes_none_store(struct kobject *kobj,
261 struct kobj_attribute *attr,
262 const char *buf, size_t count)
263 {
264 int err;
265 unsigned long max_ptes_none;
266
267 err = kstrtoul(buf, 10, &max_ptes_none);
268 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
269 return -EINVAL;
270
271 khugepaged_max_ptes_none = max_ptes_none;
272
273 return count;
274 }
275 static struct kobj_attribute khugepaged_max_ptes_none_attr =
276 __ATTR_RW(max_ptes_none);
277
max_ptes_swap_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)278 static ssize_t max_ptes_swap_show(struct kobject *kobj,
279 struct kobj_attribute *attr,
280 char *buf)
281 {
282 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
283 }
284
max_ptes_swap_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)285 static ssize_t max_ptes_swap_store(struct kobject *kobj,
286 struct kobj_attribute *attr,
287 const char *buf, size_t count)
288 {
289 int err;
290 unsigned long max_ptes_swap;
291
292 err = kstrtoul(buf, 10, &max_ptes_swap);
293 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
294 return -EINVAL;
295
296 khugepaged_max_ptes_swap = max_ptes_swap;
297
298 return count;
299 }
300
301 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
302 __ATTR_RW(max_ptes_swap);
303
max_ptes_shared_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)304 static ssize_t max_ptes_shared_show(struct kobject *kobj,
305 struct kobj_attribute *attr,
306 char *buf)
307 {
308 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
309 }
310
max_ptes_shared_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)311 static ssize_t max_ptes_shared_store(struct kobject *kobj,
312 struct kobj_attribute *attr,
313 const char *buf, size_t count)
314 {
315 int err;
316 unsigned long max_ptes_shared;
317
318 err = kstrtoul(buf, 10, &max_ptes_shared);
319 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
320 return -EINVAL;
321
322 khugepaged_max_ptes_shared = max_ptes_shared;
323
324 return count;
325 }
326
327 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
328 __ATTR_RW(max_ptes_shared);
329
330 static struct attribute *khugepaged_attr[] = {
331 &khugepaged_defrag_attr.attr,
332 &khugepaged_max_ptes_none_attr.attr,
333 &khugepaged_max_ptes_swap_attr.attr,
334 &khugepaged_max_ptes_shared_attr.attr,
335 &pages_to_scan_attr.attr,
336 &pages_collapsed_attr.attr,
337 &full_scans_attr.attr,
338 &scan_sleep_millisecs_attr.attr,
339 &alloc_sleep_millisecs_attr.attr,
340 NULL,
341 };
342
343 struct attribute_group khugepaged_attr_group = {
344 .attrs = khugepaged_attr,
345 .name = "khugepaged",
346 };
347 #endif /* CONFIG_SYSFS */
348
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)349 int hugepage_madvise(struct vm_area_struct *vma,
350 unsigned long *vm_flags, int advice)
351 {
352 switch (advice) {
353 case MADV_HUGEPAGE:
354 #ifdef CONFIG_S390
355 /*
356 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
357 * can't handle this properly after s390_enable_sie, so we simply
358 * ignore the madvise to prevent qemu from causing a SIGSEGV.
359 */
360 if (mm_has_pgste(vma->vm_mm))
361 return 0;
362 #endif
363 *vm_flags &= ~VM_NOHUGEPAGE;
364 *vm_flags |= VM_HUGEPAGE;
365 /*
366 * If the vma become good for khugepaged to scan,
367 * register it here without waiting a page fault that
368 * may not happen any time soon.
369 */
370 khugepaged_enter_vma(vma, *vm_flags);
371 break;
372 case MADV_NOHUGEPAGE:
373 *vm_flags &= ~VM_HUGEPAGE;
374 *vm_flags |= VM_NOHUGEPAGE;
375 /*
376 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 * this vma even if we leave the mm registered in khugepaged if
378 * it got registered before VM_NOHUGEPAGE was set.
379 */
380 break;
381 }
382
383 return 0;
384 }
385
khugepaged_init(void)386 int __init khugepaged_init(void)
387 {
388 mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0);
389 if (!mm_slot_cache)
390 return -ENOMEM;
391
392 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
395 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
396
397 return 0;
398 }
399
khugepaged_destroy(void)400 void __init khugepaged_destroy(void)
401 {
402 kmem_cache_destroy(mm_slot_cache);
403 }
404
hpage_collapse_test_exit(struct mm_struct * mm)405 static inline int hpage_collapse_test_exit(struct mm_struct *mm)
406 {
407 return atomic_read(&mm->mm_users) == 0;
408 }
409
hpage_collapse_test_exit_or_disable(struct mm_struct * mm)410 static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
411 {
412 return hpage_collapse_test_exit(mm) ||
413 test_bit(MMF_DISABLE_THP, &mm->flags);
414 }
415
hugepage_pmd_enabled(void)416 static bool hugepage_pmd_enabled(void)
417 {
418 /*
419 * We cover the anon, shmem and the file-backed case here; file-backed
420 * hugepages, when configured in, are determined by the global control.
421 * Anon pmd-sized hugepages are determined by the pmd-size control.
422 * Shmem pmd-sized hugepages are also determined by its pmd-size control,
423 * except when the global shmem_huge is set to SHMEM_HUGE_DENY.
424 */
425 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
426 hugepage_global_enabled())
427 return true;
428 if (test_bit(PMD_ORDER, &huge_anon_orders_always))
429 return true;
430 if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
431 return true;
432 if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
433 hugepage_global_enabled())
434 return true;
435 if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
436 return true;
437 return false;
438 }
439
__khugepaged_enter(struct mm_struct * mm)440 void __khugepaged_enter(struct mm_struct *mm)
441 {
442 struct khugepaged_mm_slot *mm_slot;
443 struct mm_slot *slot;
444 int wakeup;
445
446 /* __khugepaged_exit() must not run from under us */
447 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
448 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
449 return;
450
451 mm_slot = mm_slot_alloc(mm_slot_cache);
452 if (!mm_slot)
453 return;
454
455 slot = &mm_slot->slot;
456
457 spin_lock(&khugepaged_mm_lock);
458 mm_slot_insert(mm_slots_hash, mm, slot);
459 /*
460 * Insert just behind the scanning cursor, to let the area settle
461 * down a little.
462 */
463 wakeup = list_empty(&khugepaged_scan.mm_head);
464 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
465 spin_unlock(&khugepaged_mm_lock);
466
467 mmgrab(mm);
468 if (wakeup)
469 wake_up_interruptible(&khugepaged_wait);
470 }
471
khugepaged_enter_vma(struct vm_area_struct * vma,unsigned long vm_flags)472 void khugepaged_enter_vma(struct vm_area_struct *vma,
473 unsigned long vm_flags)
474 {
475 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
476 hugepage_pmd_enabled()) {
477 if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
478 PMD_ORDER))
479 __khugepaged_enter(vma->vm_mm);
480 }
481 }
482
__khugepaged_exit(struct mm_struct * mm)483 void __khugepaged_exit(struct mm_struct *mm)
484 {
485 struct khugepaged_mm_slot *mm_slot;
486 struct mm_slot *slot;
487 int free = 0;
488
489 spin_lock(&khugepaged_mm_lock);
490 slot = mm_slot_lookup(mm_slots_hash, mm);
491 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
492 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
493 hash_del(&slot->hash);
494 list_del(&slot->mm_node);
495 free = 1;
496 }
497 spin_unlock(&khugepaged_mm_lock);
498
499 if (free) {
500 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
501 mm_slot_free(mm_slot_cache, mm_slot);
502 mmdrop(mm);
503 } else if (mm_slot) {
504 /*
505 * This is required to serialize against
506 * hpage_collapse_test_exit() (which is guaranteed to run
507 * under mmap sem read mode). Stop here (after we return all
508 * pagetables will be destroyed) until khugepaged has finished
509 * working on the pagetables under the mmap_lock.
510 */
511 mmap_write_lock(mm);
512 mmap_write_unlock(mm);
513 }
514 }
515
release_pte_folio(struct folio * folio)516 static void release_pte_folio(struct folio *folio)
517 {
518 node_stat_mod_folio(folio,
519 NR_ISOLATED_ANON + folio_is_file_lru(folio),
520 -folio_nr_pages(folio));
521 folio_unlock(folio);
522 folio_putback_lru(folio);
523 }
524
release_pte_pages(pte_t * pte,pte_t * _pte,struct list_head * compound_pagelist)525 static void release_pte_pages(pte_t *pte, pte_t *_pte,
526 struct list_head *compound_pagelist)
527 {
528 struct folio *folio, *tmp;
529
530 while (--_pte >= pte) {
531 pte_t pteval = ptep_get(_pte);
532 unsigned long pfn;
533
534 if (pte_none(pteval))
535 continue;
536 pfn = pte_pfn(pteval);
537 if (is_zero_pfn(pfn))
538 continue;
539 folio = pfn_folio(pfn);
540 if (folio_test_large(folio))
541 continue;
542 release_pte_folio(folio);
543 }
544
545 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
546 list_del(&folio->lru);
547 release_pte_folio(folio);
548 }
549 }
550
is_refcount_suitable(struct folio * folio)551 static bool is_refcount_suitable(struct folio *folio)
552 {
553 int expected_refcount = folio_mapcount(folio);
554
555 if (!folio_test_anon(folio) || folio_test_swapcache(folio))
556 expected_refcount += folio_nr_pages(folio);
557
558 if (folio_test_private(folio))
559 expected_refcount++;
560
561 return folio_ref_count(folio) == expected_refcount;
562 }
563
__collapse_huge_page_isolate(struct vm_area_struct * vma,unsigned long address,pte_t * pte,struct collapse_control * cc,struct list_head * compound_pagelist)564 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
565 unsigned long address,
566 pte_t *pte,
567 struct collapse_control *cc,
568 struct list_head *compound_pagelist)
569 {
570 struct page *page = NULL;
571 struct folio *folio = NULL;
572 pte_t *_pte;
573 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
574 bool writable = false;
575
576 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
577 _pte++, address += PAGE_SIZE) {
578 pte_t pteval = ptep_get(_pte);
579 if (pte_none(pteval) || (pte_present(pteval) &&
580 is_zero_pfn(pte_pfn(pteval)))) {
581 ++none_or_zero;
582 if (!userfaultfd_armed(vma) &&
583 (!cc->is_khugepaged ||
584 none_or_zero <= khugepaged_max_ptes_none)) {
585 continue;
586 } else {
587 result = SCAN_EXCEED_NONE_PTE;
588 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
589 goto out;
590 }
591 }
592 if (!pte_present(pteval)) {
593 result = SCAN_PTE_NON_PRESENT;
594 goto out;
595 }
596 if (pte_uffd_wp(pteval)) {
597 result = SCAN_PTE_UFFD_WP;
598 goto out;
599 }
600 page = vm_normal_page(vma, address, pteval);
601 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
602 result = SCAN_PAGE_NULL;
603 goto out;
604 }
605
606 folio = page_folio(page);
607 VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
608
609 /* See hpage_collapse_scan_pmd(). */
610 if (folio_maybe_mapped_shared(folio)) {
611 ++shared;
612 if (cc->is_khugepaged &&
613 shared > khugepaged_max_ptes_shared) {
614 result = SCAN_EXCEED_SHARED_PTE;
615 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
616 goto out;
617 }
618 }
619
620 if (folio_test_large(folio)) {
621 struct folio *f;
622
623 /*
624 * Check if we have dealt with the compound page
625 * already
626 */
627 list_for_each_entry(f, compound_pagelist, lru) {
628 if (folio == f)
629 goto next;
630 }
631 }
632
633 /*
634 * We can do it before folio_isolate_lru because the
635 * folio can't be freed from under us. NOTE: PG_lock
636 * is needed to serialize against split_huge_page
637 * when invoked from the VM.
638 */
639 if (!folio_trylock(folio)) {
640 result = SCAN_PAGE_LOCK;
641 goto out;
642 }
643
644 /*
645 * Check if the page has any GUP (or other external) pins.
646 *
647 * The page table that maps the page has been already unlinked
648 * from the page table tree and this process cannot get
649 * an additional pin on the page.
650 *
651 * New pins can come later if the page is shared across fork,
652 * but not from this process. The other process cannot write to
653 * the page, only trigger CoW.
654 */
655 if (!is_refcount_suitable(folio)) {
656 folio_unlock(folio);
657 result = SCAN_PAGE_COUNT;
658 goto out;
659 }
660
661 /*
662 * Isolate the page to avoid collapsing an hugepage
663 * currently in use by the VM.
664 */
665 if (!folio_isolate_lru(folio)) {
666 folio_unlock(folio);
667 result = SCAN_DEL_PAGE_LRU;
668 goto out;
669 }
670 node_stat_mod_folio(folio,
671 NR_ISOLATED_ANON + folio_is_file_lru(folio),
672 folio_nr_pages(folio));
673 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
674 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
675
676 if (folio_test_large(folio))
677 list_add_tail(&folio->lru, compound_pagelist);
678 next:
679 /*
680 * If collapse was initiated by khugepaged, check that there is
681 * enough young pte to justify collapsing the page
682 */
683 if (cc->is_khugepaged &&
684 (pte_young(pteval) || folio_test_young(folio) ||
685 folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
686 address)))
687 referenced++;
688
689 if (pte_write(pteval))
690 writable = true;
691 }
692
693 if (unlikely(!writable)) {
694 result = SCAN_PAGE_RO;
695 } else if (unlikely(cc->is_khugepaged && !referenced)) {
696 result = SCAN_LACK_REFERENCED_PAGE;
697 } else {
698 result = SCAN_SUCCEED;
699 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
700 referenced, writable, result);
701 return result;
702 }
703 out:
704 release_pte_pages(pte, _pte, compound_pagelist);
705 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
706 referenced, writable, result);
707 return result;
708 }
709
__collapse_huge_page_copy_succeeded(pte_t * pte,struct vm_area_struct * vma,unsigned long address,spinlock_t * ptl,struct list_head * compound_pagelist)710 static void __collapse_huge_page_copy_succeeded(pte_t *pte,
711 struct vm_area_struct *vma,
712 unsigned long address,
713 spinlock_t *ptl,
714 struct list_head *compound_pagelist)
715 {
716 struct folio *src, *tmp;
717 pte_t *_pte;
718 pte_t pteval;
719
720 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
721 _pte++, address += PAGE_SIZE) {
722 pteval = ptep_get(_pte);
723 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
724 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
725 if (is_zero_pfn(pte_pfn(pteval))) {
726 /*
727 * ptl mostly unnecessary.
728 */
729 spin_lock(ptl);
730 ptep_clear(vma->vm_mm, address, _pte);
731 spin_unlock(ptl);
732 ksm_might_unmap_zero_page(vma->vm_mm, pteval);
733 }
734 } else {
735 struct page *src_page = pte_page(pteval);
736
737 src = page_folio(src_page);
738 if (!folio_test_large(src))
739 release_pte_folio(src);
740 /*
741 * ptl mostly unnecessary, but preempt has to
742 * be disabled to update the per-cpu stats
743 * inside folio_remove_rmap_pte().
744 */
745 spin_lock(ptl);
746 ptep_clear(vma->vm_mm, address, _pte);
747 folio_remove_rmap_pte(src, src_page, vma);
748 spin_unlock(ptl);
749 free_page_and_swap_cache(src_page);
750 }
751 }
752
753 list_for_each_entry_safe(src, tmp, compound_pagelist, lru) {
754 list_del(&src->lru);
755 node_stat_sub_folio(src, NR_ISOLATED_ANON +
756 folio_is_file_lru(src));
757 folio_unlock(src);
758 free_swap_cache(src);
759 folio_putback_lru(src);
760 }
761 }
762
__collapse_huge_page_copy_failed(pte_t * pte,pmd_t * pmd,pmd_t orig_pmd,struct vm_area_struct * vma,struct list_head * compound_pagelist)763 static void __collapse_huge_page_copy_failed(pte_t *pte,
764 pmd_t *pmd,
765 pmd_t orig_pmd,
766 struct vm_area_struct *vma,
767 struct list_head *compound_pagelist)
768 {
769 spinlock_t *pmd_ptl;
770
771 /*
772 * Re-establish the PMD to point to the original page table
773 * entry. Restoring PMD needs to be done prior to releasing
774 * pages. Since pages are still isolated and locked here,
775 * acquiring anon_vma_lock_write is unnecessary.
776 */
777 pmd_ptl = pmd_lock(vma->vm_mm, pmd);
778 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
779 spin_unlock(pmd_ptl);
780 /*
781 * Release both raw and compound pages isolated
782 * in __collapse_huge_page_isolate.
783 */
784 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
785 }
786
787 /*
788 * __collapse_huge_page_copy - attempts to copy memory contents from raw
789 * pages to a hugepage. Cleans up the raw pages if copying succeeds;
790 * otherwise restores the original page table and releases isolated raw pages.
791 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
792 *
793 * @pte: starting of the PTEs to copy from
794 * @folio: the new hugepage to copy contents to
795 * @pmd: pointer to the new hugepage's PMD
796 * @orig_pmd: the original raw pages' PMD
797 * @vma: the original raw pages' virtual memory area
798 * @address: starting address to copy
799 * @ptl: lock on raw pages' PTEs
800 * @compound_pagelist: list that stores compound pages
801 */
__collapse_huge_page_copy(pte_t * pte,struct folio * folio,pmd_t * pmd,pmd_t orig_pmd,struct vm_area_struct * vma,unsigned long address,spinlock_t * ptl,struct list_head * compound_pagelist)802 static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
803 pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
804 unsigned long address, spinlock_t *ptl,
805 struct list_head *compound_pagelist)
806 {
807 unsigned int i;
808 int result = SCAN_SUCCEED;
809
810 /*
811 * Copying pages' contents is subject to memory poison at any iteration.
812 */
813 for (i = 0; i < HPAGE_PMD_NR; i++) {
814 pte_t pteval = ptep_get(pte + i);
815 struct page *page = folio_page(folio, i);
816 unsigned long src_addr = address + i * PAGE_SIZE;
817 struct page *src_page;
818
819 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
820 clear_user_highpage(page, src_addr);
821 continue;
822 }
823 src_page = pte_page(pteval);
824 if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) {
825 result = SCAN_COPY_MC;
826 break;
827 }
828 }
829
830 if (likely(result == SCAN_SUCCEED))
831 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
832 compound_pagelist);
833 else
834 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
835 compound_pagelist);
836
837 return result;
838 }
839
khugepaged_alloc_sleep(void)840 static void khugepaged_alloc_sleep(void)
841 {
842 DEFINE_WAIT(wait);
843
844 add_wait_queue(&khugepaged_wait, &wait);
845 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
846 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
847 remove_wait_queue(&khugepaged_wait, &wait);
848 }
849
850 struct collapse_control khugepaged_collapse_control = {
851 .is_khugepaged = true,
852 };
853
hpage_collapse_scan_abort(int nid,struct collapse_control * cc)854 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
855 {
856 int i;
857
858 /*
859 * If node_reclaim_mode is disabled, then no extra effort is made to
860 * allocate memory locally.
861 */
862 if (!node_reclaim_enabled())
863 return false;
864
865 /* If there is a count for this node already, it must be acceptable */
866 if (cc->node_load[nid])
867 return false;
868
869 for (i = 0; i < MAX_NUMNODES; i++) {
870 if (!cc->node_load[i])
871 continue;
872 if (node_distance(nid, i) > node_reclaim_distance)
873 return true;
874 }
875 return false;
876 }
877
878 #define khugepaged_defrag() \
879 (transparent_hugepage_flags & \
880 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
881
882 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
alloc_hugepage_khugepaged_gfpmask(void)883 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
884 {
885 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
886 }
887
888 #ifdef CONFIG_NUMA
hpage_collapse_find_target_node(struct collapse_control * cc)889 static int hpage_collapse_find_target_node(struct collapse_control *cc)
890 {
891 int nid, target_node = 0, max_value = 0;
892
893 /* find first node with max normal pages hit */
894 for (nid = 0; nid < MAX_NUMNODES; nid++)
895 if (cc->node_load[nid] > max_value) {
896 max_value = cc->node_load[nid];
897 target_node = nid;
898 }
899
900 for_each_online_node(nid) {
901 if (max_value == cc->node_load[nid])
902 node_set(nid, cc->alloc_nmask);
903 }
904
905 return target_node;
906 }
907 #else
hpage_collapse_find_target_node(struct collapse_control * cc)908 static int hpage_collapse_find_target_node(struct collapse_control *cc)
909 {
910 return 0;
911 }
912 #endif
913
914 /*
915 * If mmap_lock temporarily dropped, revalidate vma
916 * before taking mmap_lock.
917 * Returns enum scan_result value.
918 */
919
hugepage_vma_revalidate(struct mm_struct * mm,unsigned long address,bool expect_anon,struct vm_area_struct ** vmap,struct collapse_control * cc)920 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
921 bool expect_anon,
922 struct vm_area_struct **vmap,
923 struct collapse_control *cc)
924 {
925 struct vm_area_struct *vma;
926 unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
927
928 if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
929 return SCAN_ANY_PROCESS;
930
931 *vmap = vma = find_vma(mm, address);
932 if (!vma)
933 return SCAN_VMA_NULL;
934
935 if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
936 return SCAN_ADDRESS_RANGE;
937 if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
938 return SCAN_VMA_CHECK;
939 /*
940 * Anon VMA expected, the address may be unmapped then
941 * remapped to file after khugepaged reaquired the mmap_lock.
942 *
943 * thp_vma_allowable_order may return true for qualified file
944 * vmas.
945 */
946 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
947 return SCAN_PAGE_ANON;
948 return SCAN_SUCCEED;
949 }
950
check_pmd_state(pmd_t * pmd)951 static inline int check_pmd_state(pmd_t *pmd)
952 {
953 pmd_t pmde = pmdp_get_lockless(pmd);
954
955 if (pmd_none(pmde))
956 return SCAN_PMD_NONE;
957 if (!pmd_present(pmde))
958 return SCAN_PMD_NULL;
959 if (pmd_trans_huge(pmde))
960 return SCAN_PMD_MAPPED;
961 if (pmd_devmap(pmde))
962 return SCAN_PMD_NULL;
963 if (pmd_bad(pmde))
964 return SCAN_PMD_NULL;
965 return SCAN_SUCCEED;
966 }
967
find_pmd_or_thp_or_none(struct mm_struct * mm,unsigned long address,pmd_t ** pmd)968 static int find_pmd_or_thp_or_none(struct mm_struct *mm,
969 unsigned long address,
970 pmd_t **pmd)
971 {
972 *pmd = mm_find_pmd(mm, address);
973 if (!*pmd)
974 return SCAN_PMD_NULL;
975
976 return check_pmd_state(*pmd);
977 }
978
check_pmd_still_valid(struct mm_struct * mm,unsigned long address,pmd_t * pmd)979 static int check_pmd_still_valid(struct mm_struct *mm,
980 unsigned long address,
981 pmd_t *pmd)
982 {
983 pmd_t *new_pmd;
984 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
985
986 if (result != SCAN_SUCCEED)
987 return result;
988 if (new_pmd != pmd)
989 return SCAN_FAIL;
990 return SCAN_SUCCEED;
991 }
992
993 /*
994 * Bring missing pages in from swap, to complete THP collapse.
995 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
996 *
997 * Called and returns without pte mapped or spinlocks held.
998 * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
999 */
__collapse_huge_page_swapin(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd,int referenced)1000 static int __collapse_huge_page_swapin(struct mm_struct *mm,
1001 struct vm_area_struct *vma,
1002 unsigned long haddr, pmd_t *pmd,
1003 int referenced)
1004 {
1005 int swapped_in = 0;
1006 vm_fault_t ret = 0;
1007 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1008 int result;
1009 pte_t *pte = NULL;
1010 spinlock_t *ptl;
1011
1012 for (address = haddr; address < end; address += PAGE_SIZE) {
1013 struct vm_fault vmf = {
1014 .vma = vma,
1015 .address = address,
1016 .pgoff = linear_page_index(vma, address),
1017 .flags = FAULT_FLAG_ALLOW_RETRY,
1018 .pmd = pmd,
1019 };
1020
1021 if (!pte++) {
1022 /*
1023 * Here the ptl is only used to check pte_same() in
1024 * do_swap_page(), so readonly version is enough.
1025 */
1026 pte = pte_offset_map_ro_nolock(mm, pmd, address, &ptl);
1027 if (!pte) {
1028 mmap_read_unlock(mm);
1029 result = SCAN_PMD_NULL;
1030 goto out;
1031 }
1032 }
1033
1034 vmf.orig_pte = ptep_get_lockless(pte);
1035 if (!is_swap_pte(vmf.orig_pte))
1036 continue;
1037
1038 vmf.pte = pte;
1039 vmf.ptl = ptl;
1040 ret = do_swap_page(&vmf);
1041 /* Which unmaps pte (after perhaps re-checking the entry) */
1042 pte = NULL;
1043
1044 /*
1045 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1046 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1047 * we do not retry here and swap entry will remain in pagetable
1048 * resulting in later failure.
1049 */
1050 if (ret & VM_FAULT_RETRY) {
1051 /* Likely, but not guaranteed, that page lock failed */
1052 result = SCAN_PAGE_LOCK;
1053 goto out;
1054 }
1055 if (ret & VM_FAULT_ERROR) {
1056 mmap_read_unlock(mm);
1057 result = SCAN_FAIL;
1058 goto out;
1059 }
1060 swapped_in++;
1061 }
1062
1063 if (pte)
1064 pte_unmap(pte);
1065
1066 /* Drain LRU cache to remove extra pin on the swapped in pages */
1067 if (swapped_in)
1068 lru_add_drain();
1069
1070 result = SCAN_SUCCEED;
1071 out:
1072 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1073 return result;
1074 }
1075
alloc_charge_folio(struct folio ** foliop,struct mm_struct * mm,struct collapse_control * cc)1076 static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
1077 struct collapse_control *cc)
1078 {
1079 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1080 GFP_TRANSHUGE);
1081 int node = hpage_collapse_find_target_node(cc);
1082 struct folio *folio;
1083
1084 folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
1085 if (!folio) {
1086 *foliop = NULL;
1087 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1088 return SCAN_ALLOC_HUGE_PAGE_FAIL;
1089 }
1090
1091 count_vm_event(THP_COLLAPSE_ALLOC);
1092 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1093 folio_put(folio);
1094 *foliop = NULL;
1095 return SCAN_CGROUP_CHARGE_FAIL;
1096 }
1097
1098 count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1099
1100 *foliop = folio;
1101 return SCAN_SUCCEED;
1102 }
1103
collapse_huge_page(struct mm_struct * mm,unsigned long address,int referenced,int unmapped,struct collapse_control * cc)1104 static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1105 int referenced, int unmapped,
1106 struct collapse_control *cc)
1107 {
1108 LIST_HEAD(compound_pagelist);
1109 pmd_t *pmd, _pmd;
1110 pte_t *pte;
1111 pgtable_t pgtable;
1112 struct folio *folio;
1113 spinlock_t *pmd_ptl, *pte_ptl;
1114 int result = SCAN_FAIL;
1115 struct vm_area_struct *vma;
1116 struct mmu_notifier_range range;
1117
1118 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1119
1120 /*
1121 * Before allocating the hugepage, release the mmap_lock read lock.
1122 * The allocation can take potentially a long time if it involves
1123 * sync compaction, and we do not need to hold the mmap_lock during
1124 * that. We will recheck the vma after taking it again in write mode.
1125 */
1126 mmap_read_unlock(mm);
1127
1128 result = alloc_charge_folio(&folio, mm, cc);
1129 if (result != SCAN_SUCCEED)
1130 goto out_nolock;
1131
1132 mmap_read_lock(mm);
1133 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1134 if (result != SCAN_SUCCEED) {
1135 mmap_read_unlock(mm);
1136 goto out_nolock;
1137 }
1138
1139 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1140 if (result != SCAN_SUCCEED) {
1141 mmap_read_unlock(mm);
1142 goto out_nolock;
1143 }
1144
1145 if (unmapped) {
1146 /*
1147 * __collapse_huge_page_swapin will return with mmap_lock
1148 * released when it fails. So we jump out_nolock directly in
1149 * that case. Continuing to collapse causes inconsistency.
1150 */
1151 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1152 referenced);
1153 if (result != SCAN_SUCCEED)
1154 goto out_nolock;
1155 }
1156
1157 mmap_read_unlock(mm);
1158 /*
1159 * Prevent all access to pagetables with the exception of
1160 * gup_fast later handled by the ptep_clear_flush and the VM
1161 * handled by the anon_vma lock + PG_lock.
1162 *
1163 * UFFDIO_MOVE is prevented to race as well thanks to the
1164 * mmap_lock.
1165 */
1166 mmap_write_lock(mm);
1167 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1168 if (result != SCAN_SUCCEED)
1169 goto out_up_write;
1170 /* check if the pmd is still valid */
1171 result = check_pmd_still_valid(mm, address, pmd);
1172 if (result != SCAN_SUCCEED)
1173 goto out_up_write;
1174
1175 vma_start_write(vma);
1176 anon_vma_lock_write(vma->anon_vma);
1177
1178 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1179 address + HPAGE_PMD_SIZE);
1180 mmu_notifier_invalidate_range_start(&range);
1181
1182 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1183 /*
1184 * This removes any huge TLB entry from the CPU so we won't allow
1185 * huge and small TLB entries for the same virtual address to
1186 * avoid the risk of CPU bugs in that area.
1187 *
1188 * Parallel GUP-fast is fine since GUP-fast will back off when
1189 * it detects PMD is changed.
1190 */
1191 _pmd = pmdp_collapse_flush(vma, address, pmd);
1192 spin_unlock(pmd_ptl);
1193 mmu_notifier_invalidate_range_end(&range);
1194 tlb_remove_table_sync_one();
1195
1196 pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1197 if (pte) {
1198 result = __collapse_huge_page_isolate(vma, address, pte, cc,
1199 &compound_pagelist);
1200 spin_unlock(pte_ptl);
1201 } else {
1202 result = SCAN_PMD_NULL;
1203 }
1204
1205 if (unlikely(result != SCAN_SUCCEED)) {
1206 if (pte)
1207 pte_unmap(pte);
1208 spin_lock(pmd_ptl);
1209 BUG_ON(!pmd_none(*pmd));
1210 /*
1211 * We can only use set_pmd_at when establishing
1212 * hugepmds and never for establishing regular pmds that
1213 * points to regular pagetables. Use pmd_populate for that
1214 */
1215 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1216 spin_unlock(pmd_ptl);
1217 anon_vma_unlock_write(vma->anon_vma);
1218 goto out_up_write;
1219 }
1220
1221 /*
1222 * All pages are isolated and locked so anon_vma rmap
1223 * can't run anymore.
1224 */
1225 anon_vma_unlock_write(vma->anon_vma);
1226
1227 result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
1228 vma, address, pte_ptl,
1229 &compound_pagelist);
1230 pte_unmap(pte);
1231 if (unlikely(result != SCAN_SUCCEED))
1232 goto out_up_write;
1233
1234 /*
1235 * The smp_wmb() inside __folio_mark_uptodate() ensures the
1236 * copy_huge_page writes become visible before the set_pmd_at()
1237 * write.
1238 */
1239 __folio_mark_uptodate(folio);
1240 pgtable = pmd_pgtable(_pmd);
1241
1242 _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
1243 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1244
1245 spin_lock(pmd_ptl);
1246 BUG_ON(!pmd_none(*pmd));
1247 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
1248 folio_add_lru_vma(folio, vma);
1249 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1250 set_pmd_at(mm, address, pmd, _pmd);
1251 update_mmu_cache_pmd(vma, address, pmd);
1252 deferred_split_folio(folio, false);
1253 spin_unlock(pmd_ptl);
1254
1255 folio = NULL;
1256
1257 result = SCAN_SUCCEED;
1258 out_up_write:
1259 mmap_write_unlock(mm);
1260 out_nolock:
1261 if (folio)
1262 folio_put(folio);
1263 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1264 return result;
1265 }
1266
hpage_collapse_scan_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,bool * mmap_locked,struct collapse_control * cc)1267 static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1268 struct vm_area_struct *vma,
1269 unsigned long address, bool *mmap_locked,
1270 struct collapse_control *cc)
1271 {
1272 pmd_t *pmd;
1273 pte_t *pte, *_pte;
1274 int result = SCAN_FAIL, referenced = 0;
1275 int none_or_zero = 0, shared = 0;
1276 struct page *page = NULL;
1277 struct folio *folio = NULL;
1278 unsigned long _address;
1279 spinlock_t *ptl;
1280 int node = NUMA_NO_NODE, unmapped = 0;
1281 bool writable = false;
1282
1283 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1284
1285 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1286 if (result != SCAN_SUCCEED)
1287 goto out;
1288
1289 memset(cc->node_load, 0, sizeof(cc->node_load));
1290 nodes_clear(cc->alloc_nmask);
1291 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1292 if (!pte) {
1293 result = SCAN_PMD_NULL;
1294 goto out;
1295 }
1296
1297 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1298 _pte++, _address += PAGE_SIZE) {
1299 pte_t pteval = ptep_get(_pte);
1300 if (is_swap_pte(pteval)) {
1301 ++unmapped;
1302 if (!cc->is_khugepaged ||
1303 unmapped <= khugepaged_max_ptes_swap) {
1304 /*
1305 * Always be strict with uffd-wp
1306 * enabled swap entries. Please see
1307 * comment below for pte_uffd_wp().
1308 */
1309 if (pte_swp_uffd_wp_any(pteval)) {
1310 result = SCAN_PTE_UFFD_WP;
1311 goto out_unmap;
1312 }
1313 continue;
1314 } else {
1315 result = SCAN_EXCEED_SWAP_PTE;
1316 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1317 goto out_unmap;
1318 }
1319 }
1320 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1321 ++none_or_zero;
1322 if (!userfaultfd_armed(vma) &&
1323 (!cc->is_khugepaged ||
1324 none_or_zero <= khugepaged_max_ptes_none)) {
1325 continue;
1326 } else {
1327 result = SCAN_EXCEED_NONE_PTE;
1328 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1329 goto out_unmap;
1330 }
1331 }
1332 if (pte_uffd_wp(pteval)) {
1333 /*
1334 * Don't collapse the page if any of the small
1335 * PTEs are armed with uffd write protection.
1336 * Here we can also mark the new huge pmd as
1337 * write protected if any of the small ones is
1338 * marked but that could bring unknown
1339 * userfault messages that falls outside of
1340 * the registered range. So, just be simple.
1341 */
1342 result = SCAN_PTE_UFFD_WP;
1343 goto out_unmap;
1344 }
1345 if (pte_write(pteval))
1346 writable = true;
1347
1348 page = vm_normal_page(vma, _address, pteval);
1349 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1350 result = SCAN_PAGE_NULL;
1351 goto out_unmap;
1352 }
1353 folio = page_folio(page);
1354
1355 if (!folio_test_anon(folio)) {
1356 result = SCAN_PAGE_ANON;
1357 goto out_unmap;
1358 }
1359
1360 /*
1361 * We treat a single page as shared if any part of the THP
1362 * is shared.
1363 */
1364 if (folio_maybe_mapped_shared(folio)) {
1365 ++shared;
1366 if (cc->is_khugepaged &&
1367 shared > khugepaged_max_ptes_shared) {
1368 result = SCAN_EXCEED_SHARED_PTE;
1369 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1370 goto out_unmap;
1371 }
1372 }
1373
1374 /*
1375 * Record which node the original page is from and save this
1376 * information to cc->node_load[].
1377 * Khugepaged will allocate hugepage from the node has the max
1378 * hit record.
1379 */
1380 node = folio_nid(folio);
1381 if (hpage_collapse_scan_abort(node, cc)) {
1382 result = SCAN_SCAN_ABORT;
1383 goto out_unmap;
1384 }
1385 cc->node_load[node]++;
1386 if (!folio_test_lru(folio)) {
1387 result = SCAN_PAGE_LRU;
1388 goto out_unmap;
1389 }
1390 if (folio_test_locked(folio)) {
1391 result = SCAN_PAGE_LOCK;
1392 goto out_unmap;
1393 }
1394
1395 /*
1396 * Check if the page has any GUP (or other external) pins.
1397 *
1398 * Here the check may be racy:
1399 * it may see folio_mapcount() > folio_ref_count().
1400 * But such case is ephemeral we could always retry collapse
1401 * later. However it may report false positive if the page
1402 * has excessive GUP pins (i.e. 512). Anyway the same check
1403 * will be done again later the risk seems low.
1404 */
1405 if (!is_refcount_suitable(folio)) {
1406 result = SCAN_PAGE_COUNT;
1407 goto out_unmap;
1408 }
1409
1410 /*
1411 * If collapse was initiated by khugepaged, check that there is
1412 * enough young pte to justify collapsing the page
1413 */
1414 if (cc->is_khugepaged &&
1415 (pte_young(pteval) || folio_test_young(folio) ||
1416 folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
1417 address)))
1418 referenced++;
1419 }
1420 if (!writable) {
1421 result = SCAN_PAGE_RO;
1422 } else if (cc->is_khugepaged &&
1423 (!referenced ||
1424 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1425 result = SCAN_LACK_REFERENCED_PAGE;
1426 } else {
1427 result = SCAN_SUCCEED;
1428 }
1429 out_unmap:
1430 pte_unmap_unlock(pte, ptl);
1431 if (result == SCAN_SUCCEED) {
1432 result = collapse_huge_page(mm, address, referenced,
1433 unmapped, cc);
1434 /* collapse_huge_page will return with the mmap_lock released */
1435 *mmap_locked = false;
1436 }
1437 out:
1438 trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
1439 none_or_zero, result, unmapped);
1440 return result;
1441 }
1442
collect_mm_slot(struct khugepaged_mm_slot * mm_slot)1443 static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1444 {
1445 struct mm_slot *slot = &mm_slot->slot;
1446 struct mm_struct *mm = slot->mm;
1447
1448 lockdep_assert_held(&khugepaged_mm_lock);
1449
1450 if (hpage_collapse_test_exit(mm)) {
1451 /* free mm_slot */
1452 hash_del(&slot->hash);
1453 list_del(&slot->mm_node);
1454
1455 /*
1456 * Not strictly needed because the mm exited already.
1457 *
1458 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1459 */
1460
1461 /* khugepaged_mm_lock actually not necessary for the below */
1462 mm_slot_free(mm_slot_cache, mm_slot);
1463 mmdrop(mm);
1464 }
1465 }
1466
1467 #ifdef CONFIG_SHMEM
1468 /* hpage must be locked, and mmap_lock must be held */
set_huge_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,struct page * hpage)1469 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1470 pmd_t *pmdp, struct page *hpage)
1471 {
1472 struct vm_fault vmf = {
1473 .vma = vma,
1474 .address = addr,
1475 .flags = 0,
1476 .pmd = pmdp,
1477 };
1478
1479 VM_BUG_ON(!PageTransHuge(hpage));
1480 mmap_assert_locked(vma->vm_mm);
1481
1482 if (do_set_pmd(&vmf, hpage))
1483 return SCAN_FAIL;
1484
1485 get_page(hpage);
1486 return SCAN_SUCCEED;
1487 }
1488
1489 /**
1490 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1491 * address haddr.
1492 *
1493 * @mm: process address space where collapse happens
1494 * @addr: THP collapse address
1495 * @install_pmd: If a huge PMD should be installed
1496 *
1497 * This function checks whether all the PTEs in the PMD are pointing to the
1498 * right THP. If so, retract the page table so the THP can refault in with
1499 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1500 */
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr,bool install_pmd)1501 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1502 bool install_pmd)
1503 {
1504 struct mmu_notifier_range range;
1505 bool notified = false;
1506 unsigned long haddr = addr & HPAGE_PMD_MASK;
1507 struct vm_area_struct *vma = vma_lookup(mm, haddr);
1508 struct folio *folio;
1509 pte_t *start_pte, *pte;
1510 pmd_t *pmd, pgt_pmd;
1511 spinlock_t *pml = NULL, *ptl;
1512 int nr_ptes = 0, result = SCAN_FAIL;
1513 int i;
1514
1515 mmap_assert_locked(mm);
1516
1517 /* First check VMA found, in case page tables are being torn down */
1518 if (!vma || !vma->vm_file ||
1519 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1520 return SCAN_VMA_CHECK;
1521
1522 /* Fast check before locking page if already PMD-mapped */
1523 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1524 if (result == SCAN_PMD_MAPPED)
1525 return result;
1526
1527 /*
1528 * If we are here, we've succeeded in replacing all the native pages
1529 * in the page cache with a single hugepage. If a mm were to fault-in
1530 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1531 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1532 * analogously elide sysfs THP settings here.
1533 */
1534 if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
1535 return SCAN_VMA_CHECK;
1536
1537 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1538 if (userfaultfd_wp(vma))
1539 return SCAN_PTE_UFFD_WP;
1540
1541 folio = filemap_lock_folio(vma->vm_file->f_mapping,
1542 linear_page_index(vma, haddr));
1543 if (IS_ERR(folio))
1544 return SCAN_PAGE_NULL;
1545
1546 if (folio_order(folio) != HPAGE_PMD_ORDER) {
1547 result = SCAN_PAGE_COMPOUND;
1548 goto drop_folio;
1549 }
1550
1551 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1552 switch (result) {
1553 case SCAN_SUCCEED:
1554 break;
1555 case SCAN_PMD_NONE:
1556 /*
1557 * All pte entries have been removed and pmd cleared.
1558 * Skip all the pte checks and just update the pmd mapping.
1559 */
1560 goto maybe_install_pmd;
1561 default:
1562 goto drop_folio;
1563 }
1564
1565 result = SCAN_FAIL;
1566 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1567 if (!start_pte) /* mmap_lock + page lock should prevent this */
1568 goto drop_folio;
1569
1570 /* step 1: check all mapped PTEs are to the right huge page */
1571 for (i = 0, addr = haddr, pte = start_pte;
1572 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1573 struct page *page;
1574 pte_t ptent = ptep_get(pte);
1575
1576 /* empty pte, skip */
1577 if (pte_none(ptent))
1578 continue;
1579
1580 /* page swapped out, abort */
1581 if (!pte_present(ptent)) {
1582 result = SCAN_PTE_NON_PRESENT;
1583 goto abort;
1584 }
1585
1586 page = vm_normal_page(vma, addr, ptent);
1587 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1588 page = NULL;
1589 /*
1590 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1591 * page table, but the new page will not be a subpage of hpage.
1592 */
1593 if (folio_page(folio, i) != page)
1594 goto abort;
1595 }
1596
1597 pte_unmap_unlock(start_pte, ptl);
1598 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1599 haddr, haddr + HPAGE_PMD_SIZE);
1600 mmu_notifier_invalidate_range_start(&range);
1601 notified = true;
1602
1603 /*
1604 * pmd_lock covers a wider range than ptl, and (if split from mm's
1605 * page_table_lock) ptl nests inside pml. The less time we hold pml,
1606 * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1607 * inserts a valid as-if-COWed PTE without even looking up page cache.
1608 * So page lock of folio does not protect from it, so we must not drop
1609 * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1610 */
1611 if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1612 pml = pmd_lock(mm, pmd);
1613
1614 start_pte = pte_offset_map_rw_nolock(mm, pmd, haddr, &pgt_pmd, &ptl);
1615 if (!start_pte) /* mmap_lock + page lock should prevent this */
1616 goto abort;
1617 if (!pml)
1618 spin_lock(ptl);
1619 else if (ptl != pml)
1620 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1621
1622 if (unlikely(!pmd_same(pgt_pmd, pmdp_get_lockless(pmd))))
1623 goto abort;
1624
1625 /* step 2: clear page table and adjust rmap */
1626 for (i = 0, addr = haddr, pte = start_pte;
1627 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1628 struct page *page;
1629 pte_t ptent = ptep_get(pte);
1630
1631 if (pte_none(ptent))
1632 continue;
1633 /*
1634 * We dropped ptl after the first scan, to do the mmu_notifier:
1635 * page lock stops more PTEs of the folio being faulted in, but
1636 * does not stop write faults COWing anon copies from existing
1637 * PTEs; and does not stop those being swapped out or migrated.
1638 */
1639 if (!pte_present(ptent)) {
1640 result = SCAN_PTE_NON_PRESENT;
1641 goto abort;
1642 }
1643 page = vm_normal_page(vma, addr, ptent);
1644 if (folio_page(folio, i) != page)
1645 goto abort;
1646
1647 /*
1648 * Must clear entry, or a racing truncate may re-remove it.
1649 * TLB flush can be left until pmdp_collapse_flush() does it.
1650 * PTE dirty? Shmem page is already dirty; file is read-only.
1651 */
1652 ptep_clear(mm, addr, pte);
1653 folio_remove_rmap_pte(folio, page, vma);
1654 nr_ptes++;
1655 }
1656
1657 if (!pml)
1658 spin_unlock(ptl);
1659
1660 /* step 3: set proper refcount and mm_counters. */
1661 if (nr_ptes) {
1662 folio_ref_sub(folio, nr_ptes);
1663 add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
1664 }
1665
1666 /* step 4: remove empty page table */
1667 if (!pml) {
1668 pml = pmd_lock(mm, pmd);
1669 if (ptl != pml) {
1670 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1671 if (unlikely(!pmd_same(pgt_pmd, pmdp_get_lockless(pmd)))) {
1672 flush_tlb_mm(mm);
1673 goto unlock;
1674 }
1675 }
1676 }
1677 pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1678 pmdp_get_lockless_sync();
1679 pte_unmap_unlock(start_pte, ptl);
1680 if (ptl != pml)
1681 spin_unlock(pml);
1682
1683 mmu_notifier_invalidate_range_end(&range);
1684
1685 mm_dec_nr_ptes(mm);
1686 page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
1687 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1688
1689 maybe_install_pmd:
1690 /* step 5: install pmd entry */
1691 result = install_pmd
1692 ? set_huge_pmd(vma, haddr, pmd, &folio->page)
1693 : SCAN_SUCCEED;
1694 goto drop_folio;
1695 abort:
1696 if (nr_ptes) {
1697 flush_tlb_mm(mm);
1698 folio_ref_sub(folio, nr_ptes);
1699 add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
1700 }
1701 unlock:
1702 if (start_pte)
1703 pte_unmap_unlock(start_pte, ptl);
1704 if (pml && pml != ptl)
1705 spin_unlock(pml);
1706 if (notified)
1707 mmu_notifier_invalidate_range_end(&range);
1708 drop_folio:
1709 folio_unlock(folio);
1710 folio_put(folio);
1711 return result;
1712 }
1713
retract_page_tables(struct address_space * mapping,pgoff_t pgoff)1714 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1715 {
1716 struct vm_area_struct *vma;
1717
1718 i_mmap_lock_read(mapping);
1719 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1720 struct mmu_notifier_range range;
1721 struct mm_struct *mm;
1722 unsigned long addr;
1723 pmd_t *pmd, pgt_pmd;
1724 spinlock_t *pml;
1725 spinlock_t *ptl;
1726 bool success = false;
1727
1728 /*
1729 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1730 * got written to. These VMAs are likely not worth removing
1731 * page tables from, as PMD-mapping is likely to be split later.
1732 */
1733 if (READ_ONCE(vma->anon_vma))
1734 continue;
1735
1736 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1737 if (addr & ~HPAGE_PMD_MASK ||
1738 vma->vm_end < addr + HPAGE_PMD_SIZE)
1739 continue;
1740
1741 mm = vma->vm_mm;
1742 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1743 continue;
1744
1745 if (hpage_collapse_test_exit(mm))
1746 continue;
1747 /*
1748 * When a vma is registered with uffd-wp, we cannot recycle
1749 * the page table because there may be pte markers installed.
1750 * Other vmas can still have the same file mapped hugely, but
1751 * skip this one: it will always be mapped in small page size
1752 * for uffd-wp registered ranges.
1753 */
1754 if (userfaultfd_wp(vma))
1755 continue;
1756
1757 /* PTEs were notified when unmapped; but now for the PMD? */
1758 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1759 addr, addr + HPAGE_PMD_SIZE);
1760 mmu_notifier_invalidate_range_start(&range);
1761
1762 pml = pmd_lock(mm, pmd);
1763 /*
1764 * The lock of new_folio is still held, we will be blocked in
1765 * the page fault path, which prevents the pte entries from
1766 * being set again. So even though the old empty PTE page may be
1767 * concurrently freed and a new PTE page is filled into the pmd
1768 * entry, it is still empty and can be removed.
1769 *
1770 * So here we only need to recheck if the state of pmd entry
1771 * still meets our requirements, rather than checking pmd_same()
1772 * like elsewhere.
1773 */
1774 if (check_pmd_state(pmd) != SCAN_SUCCEED)
1775 goto drop_pml;
1776 ptl = pte_lockptr(mm, pmd);
1777 if (ptl != pml)
1778 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1779
1780 /*
1781 * Huge page lock is still held, so normally the page table
1782 * must remain empty; and we have already skipped anon_vma
1783 * and userfaultfd_wp() vmas. But since the mmap_lock is not
1784 * held, it is still possible for a racing userfaultfd_ioctl()
1785 * to have inserted ptes or markers. Now that we hold ptlock,
1786 * repeating the anon_vma check protects from one category,
1787 * and repeating the userfaultfd_wp() check from another.
1788 */
1789 if (likely(!vma->anon_vma && !userfaultfd_wp(vma))) {
1790 pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
1791 pmdp_get_lockless_sync();
1792 success = true;
1793 }
1794
1795 if (ptl != pml)
1796 spin_unlock(ptl);
1797 drop_pml:
1798 spin_unlock(pml);
1799
1800 mmu_notifier_invalidate_range_end(&range);
1801
1802 if (success) {
1803 mm_dec_nr_ptes(mm);
1804 page_table_check_pte_clear_range(mm, addr, pgt_pmd);
1805 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1806 }
1807 }
1808 i_mmap_unlock_read(mapping);
1809 }
1810
1811 /**
1812 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1813 *
1814 * @mm: process address space where collapse happens
1815 * @addr: virtual collapse start address
1816 * @file: file that collapse on
1817 * @start: collapse start address
1818 * @cc: collapse context and scratchpad
1819 *
1820 * Basic scheme is simple, details are more complex:
1821 * - allocate and lock a new huge page;
1822 * - scan page cache, locking old pages
1823 * + swap/gup in pages if necessary;
1824 * - copy data to new page
1825 * - handle shmem holes
1826 * + re-validate that holes weren't filled by someone else
1827 * + check for userfaultfd
1828 * - finalize updates to the page cache;
1829 * - if replacing succeeds:
1830 * + unlock huge page;
1831 * + free old pages;
1832 * - if replacing failed;
1833 * + unlock old pages
1834 * + unlock and free huge page;
1835 */
collapse_file(struct mm_struct * mm,unsigned long addr,struct file * file,pgoff_t start,struct collapse_control * cc)1836 static int collapse_file(struct mm_struct *mm, unsigned long addr,
1837 struct file *file, pgoff_t start,
1838 struct collapse_control *cc)
1839 {
1840 struct address_space *mapping = file->f_mapping;
1841 struct page *dst;
1842 struct folio *folio, *tmp, *new_folio;
1843 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1844 LIST_HEAD(pagelist);
1845 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1846 int nr_none = 0, result = SCAN_SUCCEED;
1847 bool is_shmem = shmem_file(file);
1848
1849 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1850 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1851
1852 result = alloc_charge_folio(&new_folio, mm, cc);
1853 if (result != SCAN_SUCCEED)
1854 goto out;
1855
1856 mapping_set_update(&xas, mapping);
1857
1858 __folio_set_locked(new_folio);
1859 if (is_shmem)
1860 __folio_set_swapbacked(new_folio);
1861 new_folio->index = start;
1862 new_folio->mapping = mapping;
1863
1864 /*
1865 * Ensure we have slots for all the pages in the range. This is
1866 * almost certainly a no-op because most of the pages must be present
1867 */
1868 do {
1869 xas_lock_irq(&xas);
1870 xas_create_range(&xas);
1871 if (!xas_error(&xas))
1872 break;
1873 xas_unlock_irq(&xas);
1874 if (!xas_nomem(&xas, GFP_KERNEL)) {
1875 result = SCAN_FAIL;
1876 goto rollback;
1877 }
1878 } while (1);
1879
1880 for (index = start; index < end;) {
1881 xas_set(&xas, index);
1882 folio = xas_load(&xas);
1883
1884 VM_BUG_ON(index != xas.xa_index);
1885 if (is_shmem) {
1886 if (!folio) {
1887 /*
1888 * Stop if extent has been truncated or
1889 * hole-punched, and is now completely
1890 * empty.
1891 */
1892 if (index == start) {
1893 if (!xas_next_entry(&xas, end - 1)) {
1894 result = SCAN_TRUNCATED;
1895 goto xa_locked;
1896 }
1897 }
1898 nr_none++;
1899 index++;
1900 continue;
1901 }
1902
1903 if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
1904 xas_unlock_irq(&xas);
1905 /* swap in or instantiate fallocated page */
1906 if (shmem_get_folio(mapping->host, index, 0,
1907 &folio, SGP_NOALLOC)) {
1908 result = SCAN_FAIL;
1909 goto xa_unlocked;
1910 }
1911 /* drain lru cache to help folio_isolate_lru() */
1912 lru_add_drain();
1913 } else if (folio_trylock(folio)) {
1914 folio_get(folio);
1915 xas_unlock_irq(&xas);
1916 } else {
1917 result = SCAN_PAGE_LOCK;
1918 goto xa_locked;
1919 }
1920 } else { /* !is_shmem */
1921 if (!folio || xa_is_value(folio)) {
1922 xas_unlock_irq(&xas);
1923 page_cache_sync_readahead(mapping, &file->f_ra,
1924 file, index,
1925 end - index);
1926 /* drain lru cache to help folio_isolate_lru() */
1927 lru_add_drain();
1928 folio = filemap_lock_folio(mapping, index);
1929 if (IS_ERR(folio)) {
1930 result = SCAN_FAIL;
1931 goto xa_unlocked;
1932 }
1933 } else if (folio_test_dirty(folio)) {
1934 /*
1935 * khugepaged only works on read-only fd,
1936 * so this page is dirty because it hasn't
1937 * been flushed since first write. There
1938 * won't be new dirty pages.
1939 *
1940 * Trigger async flush here and hope the
1941 * writeback is done when khugepaged
1942 * revisits this page.
1943 *
1944 * This is a one-off situation. We are not
1945 * forcing writeback in loop.
1946 */
1947 xas_unlock_irq(&xas);
1948 filemap_flush(mapping);
1949 result = SCAN_FAIL;
1950 goto xa_unlocked;
1951 } else if (folio_test_writeback(folio)) {
1952 xas_unlock_irq(&xas);
1953 result = SCAN_FAIL;
1954 goto xa_unlocked;
1955 } else if (folio_trylock(folio)) {
1956 folio_get(folio);
1957 xas_unlock_irq(&xas);
1958 } else {
1959 result = SCAN_PAGE_LOCK;
1960 goto xa_locked;
1961 }
1962 }
1963
1964 /*
1965 * The folio must be locked, so we can drop the i_pages lock
1966 * without racing with truncate.
1967 */
1968 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1969
1970 /* make sure the folio is up to date */
1971 if (unlikely(!folio_test_uptodate(folio))) {
1972 result = SCAN_FAIL;
1973 goto out_unlock;
1974 }
1975
1976 /*
1977 * If file was truncated then extended, or hole-punched, before
1978 * we locked the first folio, then a THP might be there already.
1979 * This will be discovered on the first iteration.
1980 */
1981 if (folio_order(folio) == HPAGE_PMD_ORDER &&
1982 folio->index == start) {
1983 /* Maybe PMD-mapped */
1984 result = SCAN_PTE_MAPPED_HUGEPAGE;
1985 goto out_unlock;
1986 }
1987
1988 if (folio_mapping(folio) != mapping) {
1989 result = SCAN_TRUNCATED;
1990 goto out_unlock;
1991 }
1992
1993 if (!is_shmem && (folio_test_dirty(folio) ||
1994 folio_test_writeback(folio))) {
1995 /*
1996 * khugepaged only works on read-only fd, so this
1997 * folio is dirty because it hasn't been flushed
1998 * since first write.
1999 */
2000 result = SCAN_FAIL;
2001 goto out_unlock;
2002 }
2003
2004 if (!folio_isolate_lru(folio)) {
2005 result = SCAN_DEL_PAGE_LRU;
2006 goto out_unlock;
2007 }
2008
2009 if (!filemap_release_folio(folio, GFP_KERNEL)) {
2010 result = SCAN_PAGE_HAS_PRIVATE;
2011 folio_putback_lru(folio);
2012 goto out_unlock;
2013 }
2014
2015 if (folio_mapped(folio))
2016 try_to_unmap(folio,
2017 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
2018
2019 xas_lock_irq(&xas);
2020
2021 VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
2022
2023 /*
2024 * We control 2 + nr_pages references to the folio:
2025 * - we hold a pin on it;
2026 * - nr_pages reference from page cache;
2027 * - one from lru_isolate_folio;
2028 * If those are the only references, then any new usage
2029 * of the folio will have to fetch it from the page
2030 * cache. That requires locking the folio to handle
2031 * truncate, so any new usage will be blocked until we
2032 * unlock folio after collapse/during rollback.
2033 */
2034 if (folio_ref_count(folio) != 2 + folio_nr_pages(folio)) {
2035 result = SCAN_PAGE_COUNT;
2036 xas_unlock_irq(&xas);
2037 folio_putback_lru(folio);
2038 goto out_unlock;
2039 }
2040
2041 /*
2042 * Accumulate the folios that are being collapsed.
2043 */
2044 list_add_tail(&folio->lru, &pagelist);
2045 index += folio_nr_pages(folio);
2046 continue;
2047 out_unlock:
2048 folio_unlock(folio);
2049 folio_put(folio);
2050 goto xa_unlocked;
2051 }
2052
2053 if (!is_shmem) {
2054 filemap_nr_thps_inc(mapping);
2055 /*
2056 * Paired with the fence in do_dentry_open() -> get_write_access()
2057 * to ensure i_writecount is up to date and the update to nr_thps
2058 * is visible. Ensures the page cache will be truncated if the
2059 * file is opened writable.
2060 */
2061 smp_mb();
2062 if (inode_is_open_for_write(mapping->host)) {
2063 result = SCAN_FAIL;
2064 filemap_nr_thps_dec(mapping);
2065 }
2066 }
2067
2068 xa_locked:
2069 xas_unlock_irq(&xas);
2070 xa_unlocked:
2071
2072 /*
2073 * If collapse is successful, flush must be done now before copying.
2074 * If collapse is unsuccessful, does flush actually need to be done?
2075 * Do it anyway, to clear the state.
2076 */
2077 try_to_unmap_flush();
2078
2079 if (result == SCAN_SUCCEED && nr_none &&
2080 !shmem_charge(mapping->host, nr_none))
2081 result = SCAN_FAIL;
2082 if (result != SCAN_SUCCEED) {
2083 nr_none = 0;
2084 goto rollback;
2085 }
2086
2087 /*
2088 * The old folios are locked, so they won't change anymore.
2089 */
2090 index = start;
2091 dst = folio_page(new_folio, 0);
2092 list_for_each_entry(folio, &pagelist, lru) {
2093 int i, nr_pages = folio_nr_pages(folio);
2094
2095 while (index < folio->index) {
2096 clear_highpage(dst);
2097 index++;
2098 dst++;
2099 }
2100
2101 for (i = 0; i < nr_pages; i++) {
2102 if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) {
2103 result = SCAN_COPY_MC;
2104 goto rollback;
2105 }
2106 index++;
2107 dst++;
2108 }
2109 }
2110 while (index < end) {
2111 clear_highpage(dst);
2112 index++;
2113 dst++;
2114 }
2115
2116 if (nr_none) {
2117 struct vm_area_struct *vma;
2118 int nr_none_check = 0;
2119
2120 i_mmap_lock_read(mapping);
2121 xas_lock_irq(&xas);
2122
2123 xas_set(&xas, start);
2124 for (index = start; index < end; index++) {
2125 if (!xas_next(&xas)) {
2126 xas_store(&xas, XA_RETRY_ENTRY);
2127 if (xas_error(&xas)) {
2128 result = SCAN_STORE_FAILED;
2129 goto immap_locked;
2130 }
2131 nr_none_check++;
2132 }
2133 }
2134
2135 if (nr_none != nr_none_check) {
2136 result = SCAN_PAGE_FILLED;
2137 goto immap_locked;
2138 }
2139
2140 /*
2141 * If userspace observed a missing page in a VMA with
2142 * a MODE_MISSING userfaultfd, then it might expect a
2143 * UFFD_EVENT_PAGEFAULT for that page. If so, we need to
2144 * roll back to avoid suppressing such an event. Since
2145 * wp/minor userfaultfds don't give userspace any
2146 * guarantees that the kernel doesn't fill a missing
2147 * page with a zero page, so they don't matter here.
2148 *
2149 * Any userfaultfds registered after this point will
2150 * not be able to observe any missing pages due to the
2151 * previously inserted retry entries.
2152 */
2153 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2154 if (userfaultfd_missing(vma)) {
2155 result = SCAN_EXCEED_NONE_PTE;
2156 goto immap_locked;
2157 }
2158 }
2159
2160 immap_locked:
2161 i_mmap_unlock_read(mapping);
2162 if (result != SCAN_SUCCEED) {
2163 xas_set(&xas, start);
2164 for (index = start; index < end; index++) {
2165 if (xas_next(&xas) == XA_RETRY_ENTRY)
2166 xas_store(&xas, NULL);
2167 }
2168
2169 xas_unlock_irq(&xas);
2170 goto rollback;
2171 }
2172 } else {
2173 xas_lock_irq(&xas);
2174 }
2175
2176 if (is_shmem)
2177 __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
2178 else
2179 __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
2180
2181 if (nr_none) {
2182 __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
2183 /* nr_none is always 0 for non-shmem. */
2184 __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
2185 }
2186
2187 /*
2188 * Mark new_folio as uptodate before inserting it into the
2189 * page cache so that it isn't mistaken for an fallocated but
2190 * unwritten page.
2191 */
2192 folio_mark_uptodate(new_folio);
2193 folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
2194
2195 if (is_shmem)
2196 folio_mark_dirty(new_folio);
2197 folio_add_lru(new_folio);
2198
2199 /* Join all the small entries into a single multi-index entry. */
2200 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2201 xas_store(&xas, new_folio);
2202 WARN_ON_ONCE(xas_error(&xas));
2203 xas_unlock_irq(&xas);
2204
2205 /*
2206 * Remove pte page tables, so we can re-fault the page as huge.
2207 * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2208 */
2209 retract_page_tables(mapping, start);
2210 if (cc && !cc->is_khugepaged)
2211 result = SCAN_PTE_MAPPED_HUGEPAGE;
2212 folio_unlock(new_folio);
2213
2214 /*
2215 * The collapse has succeeded, so free the old folios.
2216 */
2217 list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2218 list_del(&folio->lru);
2219 folio->mapping = NULL;
2220 folio_clear_active(folio);
2221 folio_clear_unevictable(folio);
2222 folio_unlock(folio);
2223 folio_put_refs(folio, 2 + folio_nr_pages(folio));
2224 }
2225
2226 goto out;
2227
2228 rollback:
2229 /* Something went wrong: roll back page cache changes */
2230 if (nr_none) {
2231 xas_lock_irq(&xas);
2232 mapping->nrpages -= nr_none;
2233 xas_unlock_irq(&xas);
2234 shmem_uncharge(mapping->host, nr_none);
2235 }
2236
2237 list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2238 list_del(&folio->lru);
2239 folio_unlock(folio);
2240 folio_putback_lru(folio);
2241 folio_put(folio);
2242 }
2243 /*
2244 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2245 * file only. This undo is not needed unless failure is
2246 * due to SCAN_COPY_MC.
2247 */
2248 if (!is_shmem && result == SCAN_COPY_MC) {
2249 filemap_nr_thps_dec(mapping);
2250 /*
2251 * Paired with the fence in do_dentry_open() -> get_write_access()
2252 * to ensure the update to nr_thps is visible.
2253 */
2254 smp_mb();
2255 }
2256
2257 new_folio->mapping = NULL;
2258
2259 folio_unlock(new_folio);
2260 folio_put(new_folio);
2261 out:
2262 VM_BUG_ON(!list_empty(&pagelist));
2263 trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
2264 return result;
2265 }
2266
hpage_collapse_scan_file(struct mm_struct * mm,unsigned long addr,struct file * file,pgoff_t start,struct collapse_control * cc)2267 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2268 struct file *file, pgoff_t start,
2269 struct collapse_control *cc)
2270 {
2271 struct folio *folio = NULL;
2272 struct address_space *mapping = file->f_mapping;
2273 XA_STATE(xas, &mapping->i_pages, start);
2274 int present, swap;
2275 int node = NUMA_NO_NODE;
2276 int result = SCAN_SUCCEED;
2277
2278 present = 0;
2279 swap = 0;
2280 memset(cc->node_load, 0, sizeof(cc->node_load));
2281 nodes_clear(cc->alloc_nmask);
2282 rcu_read_lock();
2283 xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
2284 if (xas_retry(&xas, folio))
2285 continue;
2286
2287 if (xa_is_value(folio)) {
2288 swap += 1 << xas_get_order(&xas);
2289 if (cc->is_khugepaged &&
2290 swap > khugepaged_max_ptes_swap) {
2291 result = SCAN_EXCEED_SWAP_PTE;
2292 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2293 break;
2294 }
2295 continue;
2296 }
2297
2298 if (folio_order(folio) == HPAGE_PMD_ORDER &&
2299 folio->index == start) {
2300 /* Maybe PMD-mapped */
2301 result = SCAN_PTE_MAPPED_HUGEPAGE;
2302 /*
2303 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2304 * by the caller won't touch the page cache, and so
2305 * it's safe to skip LRU and refcount checks before
2306 * returning.
2307 */
2308 break;
2309 }
2310
2311 node = folio_nid(folio);
2312 if (hpage_collapse_scan_abort(node, cc)) {
2313 result = SCAN_SCAN_ABORT;
2314 break;
2315 }
2316 cc->node_load[node]++;
2317
2318 if (!folio_test_lru(folio)) {
2319 result = SCAN_PAGE_LRU;
2320 break;
2321 }
2322
2323 if (!is_refcount_suitable(folio)) {
2324 result = SCAN_PAGE_COUNT;
2325 break;
2326 }
2327
2328 /*
2329 * We probably should check if the folio is referenced
2330 * here, but nobody would transfer pte_young() to
2331 * folio_test_referenced() for us. And rmap walk here
2332 * is just too costly...
2333 */
2334
2335 present += folio_nr_pages(folio);
2336
2337 if (need_resched()) {
2338 xas_pause(&xas);
2339 cond_resched_rcu();
2340 }
2341 }
2342 rcu_read_unlock();
2343
2344 if (result == SCAN_SUCCEED) {
2345 if (cc->is_khugepaged &&
2346 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2347 result = SCAN_EXCEED_NONE_PTE;
2348 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2349 } else {
2350 result = collapse_file(mm, addr, file, start, cc);
2351 }
2352 }
2353
2354 trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
2355 return result;
2356 }
2357 #else
hpage_collapse_scan_file(struct mm_struct * mm,unsigned long addr,struct file * file,pgoff_t start,struct collapse_control * cc)2358 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2359 struct file *file, pgoff_t start,
2360 struct collapse_control *cc)
2361 {
2362 BUILD_BUG();
2363 }
2364 #endif
2365
khugepaged_scan_mm_slot(unsigned int pages,int * result,struct collapse_control * cc)2366 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2367 struct collapse_control *cc)
2368 __releases(&khugepaged_mm_lock)
2369 __acquires(&khugepaged_mm_lock)
2370 {
2371 struct vma_iterator vmi;
2372 struct khugepaged_mm_slot *mm_slot;
2373 struct mm_slot *slot;
2374 struct mm_struct *mm;
2375 struct vm_area_struct *vma;
2376 int progress = 0;
2377
2378 VM_BUG_ON(!pages);
2379 lockdep_assert_held(&khugepaged_mm_lock);
2380 *result = SCAN_FAIL;
2381
2382 if (khugepaged_scan.mm_slot) {
2383 mm_slot = khugepaged_scan.mm_slot;
2384 slot = &mm_slot->slot;
2385 } else {
2386 slot = list_entry(khugepaged_scan.mm_head.next,
2387 struct mm_slot, mm_node);
2388 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2389 khugepaged_scan.address = 0;
2390 khugepaged_scan.mm_slot = mm_slot;
2391 }
2392 spin_unlock(&khugepaged_mm_lock);
2393
2394 mm = slot->mm;
2395 /*
2396 * Don't wait for semaphore (to avoid long wait times). Just move to
2397 * the next mm on the list.
2398 */
2399 vma = NULL;
2400 if (unlikely(!mmap_read_trylock(mm)))
2401 goto breakouterloop_mmap_lock;
2402
2403 progress++;
2404 if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2405 goto breakouterloop;
2406
2407 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2408 for_each_vma(vmi, vma) {
2409 unsigned long hstart, hend;
2410
2411 cond_resched();
2412 if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {
2413 progress++;
2414 break;
2415 }
2416 if (!thp_vma_allowable_order(vma, vma->vm_flags,
2417 TVA_ENFORCE_SYSFS, PMD_ORDER)) {
2418 skip:
2419 progress++;
2420 continue;
2421 }
2422 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2423 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2424 if (khugepaged_scan.address > hend)
2425 goto skip;
2426 if (khugepaged_scan.address < hstart)
2427 khugepaged_scan.address = hstart;
2428 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2429
2430 while (khugepaged_scan.address < hend) {
2431 bool mmap_locked = true;
2432
2433 cond_resched();
2434 if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2435 goto breakouterloop;
2436
2437 VM_BUG_ON(khugepaged_scan.address < hstart ||
2438 khugepaged_scan.address + HPAGE_PMD_SIZE >
2439 hend);
2440 if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
2441 struct file *file = get_file(vma->vm_file);
2442 pgoff_t pgoff = linear_page_index(vma,
2443 khugepaged_scan.address);
2444
2445 mmap_read_unlock(mm);
2446 mmap_locked = false;
2447 *result = hpage_collapse_scan_file(mm,
2448 khugepaged_scan.address, file, pgoff, cc);
2449 fput(file);
2450 if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2451 mmap_read_lock(mm);
2452 if (hpage_collapse_test_exit_or_disable(mm))
2453 goto breakouterloop;
2454 *result = collapse_pte_mapped_thp(mm,
2455 khugepaged_scan.address, false);
2456 if (*result == SCAN_PMD_MAPPED)
2457 *result = SCAN_SUCCEED;
2458 mmap_read_unlock(mm);
2459 }
2460 } else {
2461 *result = hpage_collapse_scan_pmd(mm, vma,
2462 khugepaged_scan.address, &mmap_locked, cc);
2463 }
2464
2465 if (*result == SCAN_SUCCEED)
2466 ++khugepaged_pages_collapsed;
2467
2468 /* move to next address */
2469 khugepaged_scan.address += HPAGE_PMD_SIZE;
2470 progress += HPAGE_PMD_NR;
2471 if (!mmap_locked)
2472 /*
2473 * We released mmap_lock so break loop. Note
2474 * that we drop mmap_lock before all hugepage
2475 * allocations, so if allocation fails, we are
2476 * guaranteed to break here and report the
2477 * correct result back to caller.
2478 */
2479 goto breakouterloop_mmap_lock;
2480 if (progress >= pages)
2481 goto breakouterloop;
2482 }
2483 }
2484 breakouterloop:
2485 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2486 breakouterloop_mmap_lock:
2487
2488 spin_lock(&khugepaged_mm_lock);
2489 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2490 /*
2491 * Release the current mm_slot if this mm is about to die, or
2492 * if we scanned all vmas of this mm.
2493 */
2494 if (hpage_collapse_test_exit(mm) || !vma) {
2495 /*
2496 * Make sure that if mm_users is reaching zero while
2497 * khugepaged runs here, khugepaged_exit will find
2498 * mm_slot not pointing to the exiting mm.
2499 */
2500 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2501 slot = list_entry(slot->mm_node.next,
2502 struct mm_slot, mm_node);
2503 khugepaged_scan.mm_slot =
2504 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2505 khugepaged_scan.address = 0;
2506 } else {
2507 khugepaged_scan.mm_slot = NULL;
2508 khugepaged_full_scans++;
2509 }
2510
2511 collect_mm_slot(mm_slot);
2512 }
2513
2514 return progress;
2515 }
2516
khugepaged_has_work(void)2517 static int khugepaged_has_work(void)
2518 {
2519 return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
2520 }
2521
khugepaged_wait_event(void)2522 static int khugepaged_wait_event(void)
2523 {
2524 return !list_empty(&khugepaged_scan.mm_head) ||
2525 kthread_should_stop();
2526 }
2527
khugepaged_do_scan(struct collapse_control * cc)2528 static void khugepaged_do_scan(struct collapse_control *cc)
2529 {
2530 unsigned int progress = 0, pass_through_head = 0;
2531 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2532 bool wait = true;
2533 int result = SCAN_SUCCEED;
2534
2535 lru_add_drain_all();
2536
2537 while (true) {
2538 cond_resched();
2539
2540 if (unlikely(kthread_should_stop()))
2541 break;
2542
2543 spin_lock(&khugepaged_mm_lock);
2544 if (!khugepaged_scan.mm_slot)
2545 pass_through_head++;
2546 if (khugepaged_has_work() &&
2547 pass_through_head < 2)
2548 progress += khugepaged_scan_mm_slot(pages - progress,
2549 &result, cc);
2550 else
2551 progress = pages;
2552 spin_unlock(&khugepaged_mm_lock);
2553
2554 if (progress >= pages)
2555 break;
2556
2557 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2558 /*
2559 * If fail to allocate the first time, try to sleep for
2560 * a while. When hit again, cancel the scan.
2561 */
2562 if (!wait)
2563 break;
2564 wait = false;
2565 khugepaged_alloc_sleep();
2566 }
2567 }
2568 }
2569
khugepaged_should_wakeup(void)2570 static bool khugepaged_should_wakeup(void)
2571 {
2572 return kthread_should_stop() ||
2573 time_after_eq(jiffies, khugepaged_sleep_expire);
2574 }
2575
khugepaged_wait_work(void)2576 static void khugepaged_wait_work(void)
2577 {
2578 if (khugepaged_has_work()) {
2579 const unsigned long scan_sleep_jiffies =
2580 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2581
2582 if (!scan_sleep_jiffies)
2583 return;
2584
2585 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2586 wait_event_freezable_timeout(khugepaged_wait,
2587 khugepaged_should_wakeup(),
2588 scan_sleep_jiffies);
2589 return;
2590 }
2591
2592 if (hugepage_pmd_enabled())
2593 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2594 }
2595
khugepaged(void * none)2596 static int khugepaged(void *none)
2597 {
2598 struct khugepaged_mm_slot *mm_slot;
2599
2600 set_freezable();
2601 set_user_nice(current, MAX_NICE);
2602
2603 while (!kthread_should_stop()) {
2604 khugepaged_do_scan(&khugepaged_collapse_control);
2605 khugepaged_wait_work();
2606 }
2607
2608 spin_lock(&khugepaged_mm_lock);
2609 mm_slot = khugepaged_scan.mm_slot;
2610 khugepaged_scan.mm_slot = NULL;
2611 if (mm_slot)
2612 collect_mm_slot(mm_slot);
2613 spin_unlock(&khugepaged_mm_lock);
2614 return 0;
2615 }
2616
set_recommended_min_free_kbytes(void)2617 static void set_recommended_min_free_kbytes(void)
2618 {
2619 struct zone *zone;
2620 int nr_zones = 0;
2621 unsigned long recommended_min;
2622
2623 if (!hugepage_pmd_enabled()) {
2624 calculate_min_free_kbytes();
2625 goto update_wmarks;
2626 }
2627
2628 for_each_populated_zone(zone) {
2629 /*
2630 * We don't need to worry about fragmentation of
2631 * ZONE_MOVABLE since it only has movable pages.
2632 */
2633 if (zone_idx(zone) > gfp_zone(GFP_USER))
2634 continue;
2635
2636 nr_zones++;
2637 }
2638
2639 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2640 recommended_min = pageblock_nr_pages * nr_zones * 2;
2641
2642 /*
2643 * Make sure that on average at least two pageblocks are almost free
2644 * of another type, one for a migratetype to fall back to and a
2645 * second to avoid subsequent fallbacks of other types There are 3
2646 * MIGRATE_TYPES we care about.
2647 */
2648 recommended_min += pageblock_nr_pages * nr_zones *
2649 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2650
2651 /* don't ever allow to reserve more than 5% of the lowmem */
2652 recommended_min = min(recommended_min,
2653 (unsigned long) nr_free_buffer_pages() / 20);
2654 recommended_min <<= (PAGE_SHIFT-10);
2655
2656 if (recommended_min > min_free_kbytes) {
2657 if (user_min_free_kbytes >= 0)
2658 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2659 min_free_kbytes, recommended_min);
2660
2661 min_free_kbytes = recommended_min;
2662 }
2663
2664 update_wmarks:
2665 setup_per_zone_wmarks();
2666 }
2667
start_stop_khugepaged(void)2668 int start_stop_khugepaged(void)
2669 {
2670 int err = 0;
2671
2672 mutex_lock(&khugepaged_mutex);
2673 if (hugepage_pmd_enabled()) {
2674 if (!khugepaged_thread)
2675 khugepaged_thread = kthread_run(khugepaged, NULL,
2676 "khugepaged");
2677 if (IS_ERR(khugepaged_thread)) {
2678 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2679 err = PTR_ERR(khugepaged_thread);
2680 khugepaged_thread = NULL;
2681 goto fail;
2682 }
2683
2684 if (!list_empty(&khugepaged_scan.mm_head))
2685 wake_up_interruptible(&khugepaged_wait);
2686 } else if (khugepaged_thread) {
2687 kthread_stop(khugepaged_thread);
2688 khugepaged_thread = NULL;
2689 }
2690 set_recommended_min_free_kbytes();
2691 fail:
2692 mutex_unlock(&khugepaged_mutex);
2693 return err;
2694 }
2695
khugepaged_min_free_kbytes_update(void)2696 void khugepaged_min_free_kbytes_update(void)
2697 {
2698 mutex_lock(&khugepaged_mutex);
2699 if (hugepage_pmd_enabled() && khugepaged_thread)
2700 set_recommended_min_free_kbytes();
2701 mutex_unlock(&khugepaged_mutex);
2702 }
2703
current_is_khugepaged(void)2704 bool current_is_khugepaged(void)
2705 {
2706 return kthread_func(current) == khugepaged;
2707 }
2708
madvise_collapse_errno(enum scan_result r)2709 static int madvise_collapse_errno(enum scan_result r)
2710 {
2711 /*
2712 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2713 * actionable feedback to caller, so they may take an appropriate
2714 * fallback measure depending on the nature of the failure.
2715 */
2716 switch (r) {
2717 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2718 return -ENOMEM;
2719 case SCAN_CGROUP_CHARGE_FAIL:
2720 case SCAN_EXCEED_NONE_PTE:
2721 return -EBUSY;
2722 /* Resource temporary unavailable - trying again might succeed */
2723 case SCAN_PAGE_COUNT:
2724 case SCAN_PAGE_LOCK:
2725 case SCAN_PAGE_LRU:
2726 case SCAN_DEL_PAGE_LRU:
2727 case SCAN_PAGE_FILLED:
2728 return -EAGAIN;
2729 /*
2730 * Other: Trying again likely not to succeed / error intrinsic to
2731 * specified memory range. khugepaged likely won't be able to collapse
2732 * either.
2733 */
2734 default:
2735 return -EINVAL;
2736 }
2737 }
2738
madvise_collapse(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)2739 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2740 unsigned long start, unsigned long end)
2741 {
2742 struct collapse_control *cc;
2743 struct mm_struct *mm = vma->vm_mm;
2744 unsigned long hstart, hend, addr;
2745 int thps = 0, last_fail = SCAN_FAIL;
2746 bool mmap_locked = true;
2747
2748 BUG_ON(vma->vm_start > start);
2749 BUG_ON(vma->vm_end < end);
2750
2751 *prev = vma;
2752
2753 if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
2754 return -EINVAL;
2755
2756 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2757 if (!cc)
2758 return -ENOMEM;
2759 cc->is_khugepaged = false;
2760
2761 mmgrab(mm);
2762 lru_add_drain_all();
2763
2764 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2765 hend = end & HPAGE_PMD_MASK;
2766
2767 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2768 int result = SCAN_FAIL;
2769
2770 if (!mmap_locked) {
2771 cond_resched();
2772 mmap_read_lock(mm);
2773 mmap_locked = true;
2774 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2775 cc);
2776 if (result != SCAN_SUCCEED) {
2777 last_fail = result;
2778 goto out_nolock;
2779 }
2780
2781 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2782 }
2783 mmap_assert_locked(mm);
2784 memset(cc->node_load, 0, sizeof(cc->node_load));
2785 nodes_clear(cc->alloc_nmask);
2786 if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
2787 struct file *file = get_file(vma->vm_file);
2788 pgoff_t pgoff = linear_page_index(vma, addr);
2789
2790 mmap_read_unlock(mm);
2791 mmap_locked = false;
2792 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2793 cc);
2794 fput(file);
2795 } else {
2796 result = hpage_collapse_scan_pmd(mm, vma, addr,
2797 &mmap_locked, cc);
2798 }
2799 if (!mmap_locked)
2800 *prev = NULL; /* Tell caller we dropped mmap_lock */
2801
2802 handle_result:
2803 switch (result) {
2804 case SCAN_SUCCEED:
2805 case SCAN_PMD_MAPPED:
2806 ++thps;
2807 break;
2808 case SCAN_PTE_MAPPED_HUGEPAGE:
2809 BUG_ON(mmap_locked);
2810 BUG_ON(*prev);
2811 mmap_read_lock(mm);
2812 result = collapse_pte_mapped_thp(mm, addr, true);
2813 mmap_read_unlock(mm);
2814 goto handle_result;
2815 /* Whitelisted set of results where continuing OK */
2816 case SCAN_PMD_NULL:
2817 case SCAN_PTE_NON_PRESENT:
2818 case SCAN_PTE_UFFD_WP:
2819 case SCAN_PAGE_RO:
2820 case SCAN_LACK_REFERENCED_PAGE:
2821 case SCAN_PAGE_NULL:
2822 case SCAN_PAGE_COUNT:
2823 case SCAN_PAGE_LOCK:
2824 case SCAN_PAGE_COMPOUND:
2825 case SCAN_PAGE_LRU:
2826 case SCAN_DEL_PAGE_LRU:
2827 last_fail = result;
2828 break;
2829 default:
2830 last_fail = result;
2831 /* Other error, exit */
2832 goto out_maybelock;
2833 }
2834 }
2835
2836 out_maybelock:
2837 /* Caller expects us to hold mmap_lock on return */
2838 if (!mmap_locked)
2839 mmap_read_lock(mm);
2840 out_nolock:
2841 mmap_assert_locked(mm);
2842 mmdrop(mm);
2843 kfree(cc);
2844
2845 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2846 : madvise_collapse_errno(last_fail);
2847 }
2848