1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2009 Red Hat, Inc. 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/sched/mm.h> 11 #include <linux/sched/numa_balancing.h> 12 #include <linux/highmem.h> 13 #include <linux/hugetlb.h> 14 #include <linux/mmu_notifier.h> 15 #include <linux/rmap.h> 16 #include <linux/swap.h> 17 #include <linux/shrinker.h> 18 #include <linux/mm_inline.h> 19 #include <linux/swapops.h> 20 #include <linux/backing-dev.h> 21 #include <linux/dax.h> 22 #include <linux/mm_types.h> 23 #include <linux/khugepaged.h> 24 #include <linux/freezer.h> 25 #include <linux/pfn_t.h> 26 #include <linux/mman.h> 27 #include <linux/memremap.h> 28 #include <linux/pagemap.h> 29 #include <linux/debugfs.h> 30 #include <linux/migrate.h> 31 #include <linux/hashtable.h> 32 #include <linux/userfaultfd_k.h> 33 #include <linux/page_idle.h> 34 #include <linux/shmem_fs.h> 35 #include <linux/oom.h> 36 #include <linux/numa.h> 37 #include <linux/page_owner.h> 38 #include <linux/sched/sysctl.h> 39 #include <linux/memory-tiers.h> 40 #include <linux/compat.h> 41 #include <linux/pgalloc_tag.h> 42 #include <linux/pagewalk.h> 43 44 #include <asm/tlb.h> 45 #include <asm/pgalloc.h> 46 #include "internal.h" 47 #include "swap.h" 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/thp.h> 51 52 /* 53 * By default, transparent hugepage support is disabled in order to avoid 54 * risking an increased memory footprint for applications that are not 55 * guaranteed to benefit from it. When transparent hugepage support is 56 * enabled, it is for all mappings, and khugepaged scans all mappings. 57 * Defrag is invoked by khugepaged hugepage allocations and by page faults 58 * for all hugepage allocations. 59 */ 60 unsigned long transparent_hugepage_flags __read_mostly = 61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 62 (1<<TRANSPARENT_HUGEPAGE_FLAG)| 63 #endif 64 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 65 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 66 #endif 67 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 68 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 69 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 70 71 static struct shrinker *deferred_split_shrinker; 72 static unsigned long deferred_split_count(struct shrinker *shrink, 73 struct shrink_control *sc); 74 static unsigned long deferred_split_scan(struct shrinker *shrink, 75 struct shrink_control *sc); 76 static bool split_underused_thp = true; 77 78 static atomic_t huge_zero_refcount; 79 struct folio *huge_zero_folio __read_mostly; 80 unsigned long huge_zero_pfn __read_mostly = ~0UL; 81 unsigned long huge_anon_orders_always __read_mostly; 82 unsigned long huge_anon_orders_madvise __read_mostly; 83 unsigned long huge_anon_orders_inherit __read_mostly; 84 static bool anon_orders_configured __initdata; 85 86 static inline bool file_thp_enabled(struct vm_area_struct *vma) 87 { 88 struct inode *inode; 89 90 if (!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) 91 return false; 92 93 if (!vma->vm_file) 94 return false; 95 96 inode = file_inode(vma->vm_file); 97 98 return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); 99 } 100 101 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, 102 unsigned long vm_flags, 103 unsigned long tva_flags, 104 unsigned long orders) 105 { 106 bool smaps = tva_flags & TVA_SMAPS; 107 bool in_pf = tva_flags & TVA_IN_PF; 108 bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS; 109 unsigned long supported_orders; 110 111 /* Check the intersection of requested and supported orders. */ 112 if (vma_is_anonymous(vma)) 113 supported_orders = THP_ORDERS_ALL_ANON; 114 else if (vma_is_special_huge(vma)) 115 supported_orders = THP_ORDERS_ALL_SPECIAL; 116 else 117 supported_orders = THP_ORDERS_ALL_FILE_DEFAULT; 118 119 orders &= supported_orders; 120 if (!orders) 121 return 0; 122 123 if (!vma->vm_mm) /* vdso */ 124 return 0; 125 126 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags)) 127 return 0; 128 129 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ 130 if (vma_is_dax(vma)) 131 return in_pf ? orders : 0; 132 133 /* 134 * khugepaged special VMA and hugetlb VMA. 135 * Must be checked after dax since some dax mappings may have 136 * VM_MIXEDMAP set. 137 */ 138 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED)) 139 return 0; 140 141 /* 142 * Check alignment for file vma and size for both file and anon vma by 143 * filtering out the unsuitable orders. 144 * 145 * Skip the check for page fault. Huge fault does the check in fault 146 * handlers. 147 */ 148 if (!in_pf) { 149 int order = highest_order(orders); 150 unsigned long addr; 151 152 while (orders) { 153 addr = vma->vm_end - (PAGE_SIZE << order); 154 if (thp_vma_suitable_order(vma, addr, order)) 155 break; 156 order = next_order(&orders, order); 157 } 158 159 if (!orders) 160 return 0; 161 } 162 163 /* 164 * Enabled via shmem mount options or sysfs settings. 165 * Must be done before hugepage flags check since shmem has its 166 * own flags. 167 */ 168 if (!in_pf && shmem_file(vma->vm_file)) 169 return shmem_allowable_huge_orders(file_inode(vma->vm_file), 170 vma, vma->vm_pgoff, 0, 171 !enforce_sysfs); 172 173 if (!vma_is_anonymous(vma)) { 174 /* 175 * Enforce sysfs THP requirements as necessary. Anonymous vmas 176 * were already handled in thp_vma_allowable_orders(). 177 */ 178 if (enforce_sysfs && 179 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) && 180 !hugepage_global_always()))) 181 return 0; 182 183 /* 184 * Trust that ->huge_fault() handlers know what they are doing 185 * in fault path. 186 */ 187 if (((in_pf || smaps)) && vma->vm_ops->huge_fault) 188 return orders; 189 /* Only regular file is valid in collapse path */ 190 if (((!in_pf || smaps)) && file_thp_enabled(vma)) 191 return orders; 192 return 0; 193 } 194 195 if (vma_is_temporary_stack(vma)) 196 return 0; 197 198 /* 199 * THPeligible bit of smaps should show 1 for proper VMAs even 200 * though anon_vma is not initialized yet. 201 * 202 * Allow page fault since anon_vma may be not initialized until 203 * the first page fault. 204 */ 205 if (!vma->anon_vma) 206 return (smaps || in_pf) ? orders : 0; 207 208 return orders; 209 } 210 211 static bool get_huge_zero_page(void) 212 { 213 struct folio *zero_folio; 214 retry: 215 if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 216 return true; 217 218 zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 219 HPAGE_PMD_ORDER); 220 if (!zero_folio) { 221 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 222 return false; 223 } 224 /* Ensure zero folio won't have large_rmappable flag set. */ 225 folio_clear_large_rmappable(zero_folio); 226 preempt_disable(); 227 if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) { 228 preempt_enable(); 229 folio_put(zero_folio); 230 goto retry; 231 } 232 WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio)); 233 234 /* We take additional reference here. It will be put back by shrinker */ 235 atomic_set(&huge_zero_refcount, 2); 236 preempt_enable(); 237 count_vm_event(THP_ZERO_PAGE_ALLOC); 238 return true; 239 } 240 241 static void put_huge_zero_page(void) 242 { 243 /* 244 * Counter should never go to zero here. Only shrinker can put 245 * last reference. 246 */ 247 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 248 } 249 250 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm) 251 { 252 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 253 return READ_ONCE(huge_zero_folio); 254 255 if (!get_huge_zero_page()) 256 return NULL; 257 258 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 259 put_huge_zero_page(); 260 261 return READ_ONCE(huge_zero_folio); 262 } 263 264 void mm_put_huge_zero_folio(struct mm_struct *mm) 265 { 266 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 267 put_huge_zero_page(); 268 } 269 270 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 271 struct shrink_control *sc) 272 { 273 /* we can free zero page only if last reference remains */ 274 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 275 } 276 277 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 278 struct shrink_control *sc) 279 { 280 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 281 struct folio *zero_folio = xchg(&huge_zero_folio, NULL); 282 BUG_ON(zero_folio == NULL); 283 WRITE_ONCE(huge_zero_pfn, ~0UL); 284 folio_put(zero_folio); 285 return HPAGE_PMD_NR; 286 } 287 288 return 0; 289 } 290 291 static struct shrinker *huge_zero_page_shrinker; 292 293 #ifdef CONFIG_SYSFS 294 static ssize_t enabled_show(struct kobject *kobj, 295 struct kobj_attribute *attr, char *buf) 296 { 297 const char *output; 298 299 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 300 output = "[always] madvise never"; 301 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 302 &transparent_hugepage_flags)) 303 output = "always [madvise] never"; 304 else 305 output = "always madvise [never]"; 306 307 return sysfs_emit(buf, "%s\n", output); 308 } 309 310 static ssize_t enabled_store(struct kobject *kobj, 311 struct kobj_attribute *attr, 312 const char *buf, size_t count) 313 { 314 ssize_t ret = count; 315 316 if (sysfs_streq(buf, "always")) { 317 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 318 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 319 } else if (sysfs_streq(buf, "madvise")) { 320 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 321 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 322 } else if (sysfs_streq(buf, "never")) { 323 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 324 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 325 } else 326 ret = -EINVAL; 327 328 if (ret > 0) { 329 int err = start_stop_khugepaged(); 330 if (err) 331 ret = err; 332 } 333 return ret; 334 } 335 336 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled); 337 338 ssize_t single_hugepage_flag_show(struct kobject *kobj, 339 struct kobj_attribute *attr, char *buf, 340 enum transparent_hugepage_flag flag) 341 { 342 return sysfs_emit(buf, "%d\n", 343 !!test_bit(flag, &transparent_hugepage_flags)); 344 } 345 346 ssize_t single_hugepage_flag_store(struct kobject *kobj, 347 struct kobj_attribute *attr, 348 const char *buf, size_t count, 349 enum transparent_hugepage_flag flag) 350 { 351 unsigned long value; 352 int ret; 353 354 ret = kstrtoul(buf, 10, &value); 355 if (ret < 0) 356 return ret; 357 if (value > 1) 358 return -EINVAL; 359 360 if (value) 361 set_bit(flag, &transparent_hugepage_flags); 362 else 363 clear_bit(flag, &transparent_hugepage_flags); 364 365 return count; 366 } 367 368 static ssize_t defrag_show(struct kobject *kobj, 369 struct kobj_attribute *attr, char *buf) 370 { 371 const char *output; 372 373 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 374 &transparent_hugepage_flags)) 375 output = "[always] defer defer+madvise madvise never"; 376 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 377 &transparent_hugepage_flags)) 378 output = "always [defer] defer+madvise madvise never"; 379 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 380 &transparent_hugepage_flags)) 381 output = "always defer [defer+madvise] madvise never"; 382 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 383 &transparent_hugepage_flags)) 384 output = "always defer defer+madvise [madvise] never"; 385 else 386 output = "always defer defer+madvise madvise [never]"; 387 388 return sysfs_emit(buf, "%s\n", output); 389 } 390 391 static ssize_t defrag_store(struct kobject *kobj, 392 struct kobj_attribute *attr, 393 const char *buf, size_t count) 394 { 395 if (sysfs_streq(buf, "always")) { 396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 398 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 399 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 400 } else if (sysfs_streq(buf, "defer+madvise")) { 401 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 402 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 403 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 404 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 405 } else if (sysfs_streq(buf, "defer")) { 406 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 407 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 408 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 409 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 410 } else if (sysfs_streq(buf, "madvise")) { 411 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 412 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 413 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 414 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 415 } else if (sysfs_streq(buf, "never")) { 416 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 417 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 418 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 419 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 420 } else 421 return -EINVAL; 422 423 return count; 424 } 425 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag); 426 427 static ssize_t use_zero_page_show(struct kobject *kobj, 428 struct kobj_attribute *attr, char *buf) 429 { 430 return single_hugepage_flag_show(kobj, attr, buf, 431 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 432 } 433 static ssize_t use_zero_page_store(struct kobject *kobj, 434 struct kobj_attribute *attr, const char *buf, size_t count) 435 { 436 return single_hugepage_flag_store(kobj, attr, buf, count, 437 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 438 } 439 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page); 440 441 static ssize_t hpage_pmd_size_show(struct kobject *kobj, 442 struct kobj_attribute *attr, char *buf) 443 { 444 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); 445 } 446 static struct kobj_attribute hpage_pmd_size_attr = 447 __ATTR_RO(hpage_pmd_size); 448 449 static ssize_t split_underused_thp_show(struct kobject *kobj, 450 struct kobj_attribute *attr, char *buf) 451 { 452 return sysfs_emit(buf, "%d\n", split_underused_thp); 453 } 454 455 static ssize_t split_underused_thp_store(struct kobject *kobj, 456 struct kobj_attribute *attr, 457 const char *buf, size_t count) 458 { 459 int err = kstrtobool(buf, &split_underused_thp); 460 461 if (err < 0) 462 return err; 463 464 return count; 465 } 466 467 static struct kobj_attribute split_underused_thp_attr = __ATTR( 468 shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store); 469 470 static struct attribute *hugepage_attr[] = { 471 &enabled_attr.attr, 472 &defrag_attr.attr, 473 &use_zero_page_attr.attr, 474 &hpage_pmd_size_attr.attr, 475 #ifdef CONFIG_SHMEM 476 &shmem_enabled_attr.attr, 477 #endif 478 &split_underused_thp_attr.attr, 479 NULL, 480 }; 481 482 static const struct attribute_group hugepage_attr_group = { 483 .attrs = hugepage_attr, 484 }; 485 486 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj); 487 static void thpsize_release(struct kobject *kobj); 488 static DEFINE_SPINLOCK(huge_anon_orders_lock); 489 static LIST_HEAD(thpsize_list); 490 491 static ssize_t anon_enabled_show(struct kobject *kobj, 492 struct kobj_attribute *attr, char *buf) 493 { 494 int order = to_thpsize(kobj)->order; 495 const char *output; 496 497 if (test_bit(order, &huge_anon_orders_always)) 498 output = "[always] inherit madvise never"; 499 else if (test_bit(order, &huge_anon_orders_inherit)) 500 output = "always [inherit] madvise never"; 501 else if (test_bit(order, &huge_anon_orders_madvise)) 502 output = "always inherit [madvise] never"; 503 else 504 output = "always inherit madvise [never]"; 505 506 return sysfs_emit(buf, "%s\n", output); 507 } 508 509 static ssize_t anon_enabled_store(struct kobject *kobj, 510 struct kobj_attribute *attr, 511 const char *buf, size_t count) 512 { 513 int order = to_thpsize(kobj)->order; 514 ssize_t ret = count; 515 516 if (sysfs_streq(buf, "always")) { 517 spin_lock(&huge_anon_orders_lock); 518 clear_bit(order, &huge_anon_orders_inherit); 519 clear_bit(order, &huge_anon_orders_madvise); 520 set_bit(order, &huge_anon_orders_always); 521 spin_unlock(&huge_anon_orders_lock); 522 } else if (sysfs_streq(buf, "inherit")) { 523 spin_lock(&huge_anon_orders_lock); 524 clear_bit(order, &huge_anon_orders_always); 525 clear_bit(order, &huge_anon_orders_madvise); 526 set_bit(order, &huge_anon_orders_inherit); 527 spin_unlock(&huge_anon_orders_lock); 528 } else if (sysfs_streq(buf, "madvise")) { 529 spin_lock(&huge_anon_orders_lock); 530 clear_bit(order, &huge_anon_orders_always); 531 clear_bit(order, &huge_anon_orders_inherit); 532 set_bit(order, &huge_anon_orders_madvise); 533 spin_unlock(&huge_anon_orders_lock); 534 } else if (sysfs_streq(buf, "never")) { 535 spin_lock(&huge_anon_orders_lock); 536 clear_bit(order, &huge_anon_orders_always); 537 clear_bit(order, &huge_anon_orders_inherit); 538 clear_bit(order, &huge_anon_orders_madvise); 539 spin_unlock(&huge_anon_orders_lock); 540 } else 541 ret = -EINVAL; 542 543 if (ret > 0) { 544 int err; 545 546 err = start_stop_khugepaged(); 547 if (err) 548 ret = err; 549 } 550 return ret; 551 } 552 553 static struct kobj_attribute anon_enabled_attr = 554 __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store); 555 556 static struct attribute *anon_ctrl_attrs[] = { 557 &anon_enabled_attr.attr, 558 NULL, 559 }; 560 561 static const struct attribute_group anon_ctrl_attr_grp = { 562 .attrs = anon_ctrl_attrs, 563 }; 564 565 static struct attribute *file_ctrl_attrs[] = { 566 #ifdef CONFIG_SHMEM 567 &thpsize_shmem_enabled_attr.attr, 568 #endif 569 NULL, 570 }; 571 572 static const struct attribute_group file_ctrl_attr_grp = { 573 .attrs = file_ctrl_attrs, 574 }; 575 576 static struct attribute *any_ctrl_attrs[] = { 577 NULL, 578 }; 579 580 static const struct attribute_group any_ctrl_attr_grp = { 581 .attrs = any_ctrl_attrs, 582 }; 583 584 static const struct kobj_type thpsize_ktype = { 585 .release = &thpsize_release, 586 .sysfs_ops = &kobj_sysfs_ops, 587 }; 588 589 DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}}; 590 591 static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item) 592 { 593 unsigned long sum = 0; 594 int cpu; 595 596 for_each_possible_cpu(cpu) { 597 struct mthp_stat *this = &per_cpu(mthp_stats, cpu); 598 599 sum += this->stats[order][item]; 600 } 601 602 return sum; 603 } 604 605 #define DEFINE_MTHP_STAT_ATTR(_name, _index) \ 606 static ssize_t _name##_show(struct kobject *kobj, \ 607 struct kobj_attribute *attr, char *buf) \ 608 { \ 609 int order = to_thpsize(kobj)->order; \ 610 \ 611 return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \ 612 } \ 613 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 614 615 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); 616 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); 617 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); 618 DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT); 619 DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN); 620 DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK); 621 DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE); 622 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); 623 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); 624 #ifdef CONFIG_SHMEM 625 DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC); 626 DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK); 627 DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE); 628 #endif 629 DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); 630 DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); 631 DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); 632 DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON); 633 DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED); 634 635 static struct attribute *anon_stats_attrs[] = { 636 &anon_fault_alloc_attr.attr, 637 &anon_fault_fallback_attr.attr, 638 &anon_fault_fallback_charge_attr.attr, 639 #ifndef CONFIG_SHMEM 640 &zswpout_attr.attr, 641 &swpin_attr.attr, 642 &swpin_fallback_attr.attr, 643 &swpin_fallback_charge_attr.attr, 644 &swpout_attr.attr, 645 &swpout_fallback_attr.attr, 646 #endif 647 &split_deferred_attr.attr, 648 &nr_anon_attr.attr, 649 &nr_anon_partially_mapped_attr.attr, 650 NULL, 651 }; 652 653 static struct attribute_group anon_stats_attr_grp = { 654 .name = "stats", 655 .attrs = anon_stats_attrs, 656 }; 657 658 static struct attribute *file_stats_attrs[] = { 659 #ifdef CONFIG_SHMEM 660 &shmem_alloc_attr.attr, 661 &shmem_fallback_attr.attr, 662 &shmem_fallback_charge_attr.attr, 663 #endif 664 NULL, 665 }; 666 667 static struct attribute_group file_stats_attr_grp = { 668 .name = "stats", 669 .attrs = file_stats_attrs, 670 }; 671 672 static struct attribute *any_stats_attrs[] = { 673 #ifdef CONFIG_SHMEM 674 &zswpout_attr.attr, 675 &swpin_attr.attr, 676 &swpin_fallback_attr.attr, 677 &swpin_fallback_charge_attr.attr, 678 &swpout_attr.attr, 679 &swpout_fallback_attr.attr, 680 #endif 681 &split_attr.attr, 682 &split_failed_attr.attr, 683 NULL, 684 }; 685 686 static struct attribute_group any_stats_attr_grp = { 687 .name = "stats", 688 .attrs = any_stats_attrs, 689 }; 690 691 static int sysfs_add_group(struct kobject *kobj, 692 const struct attribute_group *grp) 693 { 694 int ret = -ENOENT; 695 696 /* 697 * If the group is named, try to merge first, assuming the subdirectory 698 * was already created. This avoids the warning emitted by 699 * sysfs_create_group() if the directory already exists. 700 */ 701 if (grp->name) 702 ret = sysfs_merge_group(kobj, grp); 703 if (ret) 704 ret = sysfs_create_group(kobj, grp); 705 706 return ret; 707 } 708 709 static struct thpsize *thpsize_create(int order, struct kobject *parent) 710 { 711 unsigned long size = (PAGE_SIZE << order) / SZ_1K; 712 struct thpsize *thpsize; 713 int ret = -ENOMEM; 714 715 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL); 716 if (!thpsize) 717 goto err; 718 719 thpsize->order = order; 720 721 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent, 722 "hugepages-%lukB", size); 723 if (ret) { 724 kfree(thpsize); 725 goto err; 726 } 727 728 729 ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp); 730 if (ret) 731 goto err_put; 732 733 ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp); 734 if (ret) 735 goto err_put; 736 737 if (BIT(order) & THP_ORDERS_ALL_ANON) { 738 ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp); 739 if (ret) 740 goto err_put; 741 742 ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp); 743 if (ret) 744 goto err_put; 745 } 746 747 if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) { 748 ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp); 749 if (ret) 750 goto err_put; 751 752 ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp); 753 if (ret) 754 goto err_put; 755 } 756 757 return thpsize; 758 err_put: 759 kobject_put(&thpsize->kobj); 760 err: 761 return ERR_PTR(ret); 762 } 763 764 static void thpsize_release(struct kobject *kobj) 765 { 766 kfree(to_thpsize(kobj)); 767 } 768 769 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 770 { 771 int err; 772 struct thpsize *thpsize; 773 unsigned long orders; 774 int order; 775 776 /* 777 * Default to setting PMD-sized THP to inherit the global setting and 778 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time 779 * constant so we have to do this here. 780 */ 781 if (!anon_orders_configured) 782 huge_anon_orders_inherit = BIT(PMD_ORDER); 783 784 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 785 if (unlikely(!*hugepage_kobj)) { 786 pr_err("failed to create transparent hugepage kobject\n"); 787 return -ENOMEM; 788 } 789 790 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 791 if (err) { 792 pr_err("failed to register transparent hugepage group\n"); 793 goto delete_obj; 794 } 795 796 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 797 if (err) { 798 pr_err("failed to register transparent hugepage group\n"); 799 goto remove_hp_group; 800 } 801 802 orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT; 803 order = highest_order(orders); 804 while (orders) { 805 thpsize = thpsize_create(order, *hugepage_kobj); 806 if (IS_ERR(thpsize)) { 807 pr_err("failed to create thpsize for order %d\n", order); 808 err = PTR_ERR(thpsize); 809 goto remove_all; 810 } 811 list_add(&thpsize->node, &thpsize_list); 812 order = next_order(&orders, order); 813 } 814 815 return 0; 816 817 remove_all: 818 hugepage_exit_sysfs(*hugepage_kobj); 819 return err; 820 remove_hp_group: 821 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 822 delete_obj: 823 kobject_put(*hugepage_kobj); 824 return err; 825 } 826 827 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 828 { 829 struct thpsize *thpsize, *tmp; 830 831 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) { 832 list_del(&thpsize->node); 833 kobject_put(&thpsize->kobj); 834 } 835 836 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 837 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 838 kobject_put(hugepage_kobj); 839 } 840 #else 841 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 842 { 843 return 0; 844 } 845 846 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 847 { 848 } 849 #endif /* CONFIG_SYSFS */ 850 851 static int __init thp_shrinker_init(void) 852 { 853 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero"); 854 if (!huge_zero_page_shrinker) 855 return -ENOMEM; 856 857 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE | 858 SHRINKER_MEMCG_AWARE | 859 SHRINKER_NONSLAB, 860 "thp-deferred_split"); 861 if (!deferred_split_shrinker) { 862 shrinker_free(huge_zero_page_shrinker); 863 return -ENOMEM; 864 } 865 866 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count; 867 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan; 868 shrinker_register(huge_zero_page_shrinker); 869 870 deferred_split_shrinker->count_objects = deferred_split_count; 871 deferred_split_shrinker->scan_objects = deferred_split_scan; 872 shrinker_register(deferred_split_shrinker); 873 874 return 0; 875 } 876 877 static void __init thp_shrinker_exit(void) 878 { 879 shrinker_free(huge_zero_page_shrinker); 880 shrinker_free(deferred_split_shrinker); 881 } 882 883 static int __init hugepage_init(void) 884 { 885 int err; 886 struct kobject *hugepage_kobj; 887 888 if (!has_transparent_hugepage()) { 889 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED; 890 return -EINVAL; 891 } 892 893 /* 894 * hugepages can't be allocated by the buddy allocator 895 */ 896 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER); 897 898 err = hugepage_init_sysfs(&hugepage_kobj); 899 if (err) 900 goto err_sysfs; 901 902 err = khugepaged_init(); 903 if (err) 904 goto err_slab; 905 906 err = thp_shrinker_init(); 907 if (err) 908 goto err_shrinker; 909 910 /* 911 * By default disable transparent hugepages on smaller systems, 912 * where the extra memory used could hurt more than TLB overhead 913 * is likely to save. The admin can still enable it through /sys. 914 */ 915 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 916 transparent_hugepage_flags = 0; 917 return 0; 918 } 919 920 err = start_stop_khugepaged(); 921 if (err) 922 goto err_khugepaged; 923 924 return 0; 925 err_khugepaged: 926 thp_shrinker_exit(); 927 err_shrinker: 928 khugepaged_destroy(); 929 err_slab: 930 hugepage_exit_sysfs(hugepage_kobj); 931 err_sysfs: 932 return err; 933 } 934 subsys_initcall(hugepage_init); 935 936 static int __init setup_transparent_hugepage(char *str) 937 { 938 int ret = 0; 939 if (!str) 940 goto out; 941 if (!strcmp(str, "always")) { 942 set_bit(TRANSPARENT_HUGEPAGE_FLAG, 943 &transparent_hugepage_flags); 944 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 945 &transparent_hugepage_flags); 946 ret = 1; 947 } else if (!strcmp(str, "madvise")) { 948 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 949 &transparent_hugepage_flags); 950 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 951 &transparent_hugepage_flags); 952 ret = 1; 953 } else if (!strcmp(str, "never")) { 954 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 955 &transparent_hugepage_flags); 956 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 957 &transparent_hugepage_flags); 958 ret = 1; 959 } 960 out: 961 if (!ret) 962 pr_warn("transparent_hugepage= cannot parse, ignored\n"); 963 return ret; 964 } 965 __setup("transparent_hugepage=", setup_transparent_hugepage); 966 967 static char str_dup[PAGE_SIZE] __initdata; 968 static int __init setup_thp_anon(char *str) 969 { 970 char *token, *range, *policy, *subtoken; 971 unsigned long always, inherit, madvise; 972 char *start_size, *end_size; 973 int start, end, nr; 974 char *p; 975 976 if (!str || strlen(str) + 1 > PAGE_SIZE) 977 goto err; 978 strscpy(str_dup, str); 979 980 always = huge_anon_orders_always; 981 madvise = huge_anon_orders_madvise; 982 inherit = huge_anon_orders_inherit; 983 p = str_dup; 984 while ((token = strsep(&p, ";")) != NULL) { 985 range = strsep(&token, ":"); 986 policy = token; 987 988 if (!policy) 989 goto err; 990 991 while ((subtoken = strsep(&range, ",")) != NULL) { 992 if (strchr(subtoken, '-')) { 993 start_size = strsep(&subtoken, "-"); 994 end_size = subtoken; 995 996 start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON); 997 end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON); 998 } else { 999 start_size = end_size = subtoken; 1000 start = end = get_order_from_str(subtoken, 1001 THP_ORDERS_ALL_ANON); 1002 } 1003 1004 if (start == -EINVAL) { 1005 pr_err("invalid size %s in thp_anon boot parameter\n", start_size); 1006 goto err; 1007 } 1008 1009 if (end == -EINVAL) { 1010 pr_err("invalid size %s in thp_anon boot parameter\n", end_size); 1011 goto err; 1012 } 1013 1014 if (start < 0 || end < 0 || start > end) 1015 goto err; 1016 1017 nr = end - start + 1; 1018 if (!strcmp(policy, "always")) { 1019 bitmap_set(&always, start, nr); 1020 bitmap_clear(&inherit, start, nr); 1021 bitmap_clear(&madvise, start, nr); 1022 } else if (!strcmp(policy, "madvise")) { 1023 bitmap_set(&madvise, start, nr); 1024 bitmap_clear(&inherit, start, nr); 1025 bitmap_clear(&always, start, nr); 1026 } else if (!strcmp(policy, "inherit")) { 1027 bitmap_set(&inherit, start, nr); 1028 bitmap_clear(&madvise, start, nr); 1029 bitmap_clear(&always, start, nr); 1030 } else if (!strcmp(policy, "never")) { 1031 bitmap_clear(&inherit, start, nr); 1032 bitmap_clear(&madvise, start, nr); 1033 bitmap_clear(&always, start, nr); 1034 } else { 1035 pr_err("invalid policy %s in thp_anon boot parameter\n", policy); 1036 goto err; 1037 } 1038 } 1039 } 1040 1041 huge_anon_orders_always = always; 1042 huge_anon_orders_madvise = madvise; 1043 huge_anon_orders_inherit = inherit; 1044 anon_orders_configured = true; 1045 return 1; 1046 1047 err: 1048 pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str); 1049 return 0; 1050 } 1051 __setup("thp_anon=", setup_thp_anon); 1052 1053 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 1054 { 1055 if (likely(vma->vm_flags & VM_WRITE)) 1056 pmd = pmd_mkwrite(pmd, vma); 1057 return pmd; 1058 } 1059 1060 #ifdef CONFIG_MEMCG 1061 static inline 1062 struct deferred_split *get_deferred_split_queue(struct folio *folio) 1063 { 1064 struct mem_cgroup *memcg = folio_memcg(folio); 1065 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); 1066 1067 if (memcg) 1068 return &memcg->deferred_split_queue; 1069 else 1070 return &pgdat->deferred_split_queue; 1071 } 1072 #else 1073 static inline 1074 struct deferred_split *get_deferred_split_queue(struct folio *folio) 1075 { 1076 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); 1077 1078 return &pgdat->deferred_split_queue; 1079 } 1080 #endif 1081 1082 static inline bool is_transparent_hugepage(const struct folio *folio) 1083 { 1084 if (!folio_test_large(folio)) 1085 return false; 1086 1087 return is_huge_zero_folio(folio) || 1088 folio_test_large_rmappable(folio); 1089 } 1090 1091 static unsigned long __thp_get_unmapped_area(struct file *filp, 1092 unsigned long addr, unsigned long len, 1093 loff_t off, unsigned long flags, unsigned long size, 1094 vm_flags_t vm_flags) 1095 { 1096 loff_t off_end = off + len; 1097 loff_t off_align = round_up(off, size); 1098 unsigned long len_pad, ret, off_sub; 1099 1100 if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) 1101 return 0; 1102 1103 if (off_end <= off_align || (off_end - off_align) < size) 1104 return 0; 1105 1106 len_pad = len + size; 1107 if (len_pad < len || (off + len_pad) < off) 1108 return 0; 1109 1110 ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad, 1111 off >> PAGE_SHIFT, flags, vm_flags); 1112 1113 /* 1114 * The failure might be due to length padding. The caller will retry 1115 * without the padding. 1116 */ 1117 if (IS_ERR_VALUE(ret)) 1118 return 0; 1119 1120 /* 1121 * Do not try to align to THP boundary if allocation at the address 1122 * hint succeeds. 1123 */ 1124 if (ret == addr) 1125 return addr; 1126 1127 off_sub = (off - ret) & (size - 1); 1128 1129 if (test_bit(MMF_TOPDOWN, ¤t->mm->flags) && !off_sub) 1130 return ret + size; 1131 1132 ret += off_sub; 1133 return ret; 1134 } 1135 1136 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, 1137 unsigned long len, unsigned long pgoff, unsigned long flags, 1138 vm_flags_t vm_flags) 1139 { 1140 unsigned long ret; 1141 loff_t off = (loff_t)pgoff << PAGE_SHIFT; 1142 1143 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags); 1144 if (ret) 1145 return ret; 1146 1147 return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags, 1148 vm_flags); 1149 } 1150 1151 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 1152 unsigned long len, unsigned long pgoff, unsigned long flags) 1153 { 1154 return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0); 1155 } 1156 EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 1157 1158 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, 1159 unsigned long addr) 1160 { 1161 gfp_t gfp = vma_thp_gfp_mask(vma); 1162 const int order = HPAGE_PMD_ORDER; 1163 struct folio *folio; 1164 1165 folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK); 1166 1167 if (unlikely(!folio)) { 1168 count_vm_event(THP_FAULT_FALLBACK); 1169 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); 1170 return NULL; 1171 } 1172 1173 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 1174 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { 1175 folio_put(folio); 1176 count_vm_event(THP_FAULT_FALLBACK); 1177 count_vm_event(THP_FAULT_FALLBACK_CHARGE); 1178 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); 1179 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); 1180 return NULL; 1181 } 1182 folio_throttle_swaprate(folio, gfp); 1183 1184 /* 1185 * When a folio is not zeroed during allocation (__GFP_ZERO not used) 1186 * or user folios require special handling, folio_zero_user() is used to 1187 * make sure that the page corresponding to the faulting address will be 1188 * hot in the cache after zeroing. 1189 */ 1190 if (user_alloc_needs_zeroing()) 1191 folio_zero_user(folio, addr); 1192 /* 1193 * The memory barrier inside __folio_mark_uptodate makes sure that 1194 * folio_zero_user writes become visible before the set_pmd_at() 1195 * write. 1196 */ 1197 __folio_mark_uptodate(folio); 1198 return folio; 1199 } 1200 1201 static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, 1202 struct vm_area_struct *vma, unsigned long haddr) 1203 { 1204 pmd_t entry; 1205 1206 entry = mk_huge_pmd(&folio->page, vma->vm_page_prot); 1207 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1208 folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); 1209 folio_add_lru_vma(folio, vma); 1210 set_pmd_at(vma->vm_mm, haddr, pmd, entry); 1211 update_mmu_cache_pmd(vma, haddr, pmd); 1212 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1213 count_vm_event(THP_FAULT_ALLOC); 1214 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); 1215 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); 1216 } 1217 1218 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) 1219 { 1220 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1221 struct vm_area_struct *vma = vmf->vma; 1222 struct folio *folio; 1223 pgtable_t pgtable; 1224 vm_fault_t ret = 0; 1225 1226 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); 1227 if (unlikely(!folio)) 1228 return VM_FAULT_FALLBACK; 1229 1230 pgtable = pte_alloc_one(vma->vm_mm); 1231 if (unlikely(!pgtable)) { 1232 ret = VM_FAULT_OOM; 1233 goto release; 1234 } 1235 1236 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1237 if (unlikely(!pmd_none(*vmf->pmd))) { 1238 goto unlock_release; 1239 } else { 1240 ret = check_stable_address_space(vma->vm_mm); 1241 if (ret) 1242 goto unlock_release; 1243 1244 /* Deliver the page fault to userland */ 1245 if (userfaultfd_missing(vma)) { 1246 spin_unlock(vmf->ptl); 1247 folio_put(folio); 1248 pte_free(vma->vm_mm, pgtable); 1249 ret = handle_userfault(vmf, VM_UFFD_MISSING); 1250 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 1251 return ret; 1252 } 1253 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1254 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); 1255 mm_inc_nr_ptes(vma->vm_mm); 1256 deferred_split_folio(folio, false); 1257 spin_unlock(vmf->ptl); 1258 } 1259 1260 return 0; 1261 unlock_release: 1262 spin_unlock(vmf->ptl); 1263 release: 1264 if (pgtable) 1265 pte_free(vma->vm_mm, pgtable); 1266 folio_put(folio); 1267 return ret; 1268 1269 } 1270 1271 /* 1272 * always: directly stall for all thp allocations 1273 * defer: wake kswapd and fail if not immediately available 1274 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 1275 * fail if not immediately available 1276 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 1277 * available 1278 * never: never stall for any thp allocation 1279 */ 1280 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) 1281 { 1282 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); 1283 1284 /* Always do synchronous compaction */ 1285 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 1286 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 1287 1288 /* Kick kcompactd and fail quickly */ 1289 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 1290 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 1291 1292 /* Synchronous compaction if madvised, otherwise kick kcompactd */ 1293 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 1294 return GFP_TRANSHUGE_LIGHT | 1295 (vma_madvised ? __GFP_DIRECT_RECLAIM : 1296 __GFP_KSWAPD_RECLAIM); 1297 1298 /* Only do synchronous compaction if madvised */ 1299 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 1300 return GFP_TRANSHUGE_LIGHT | 1301 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 1302 1303 return GFP_TRANSHUGE_LIGHT; 1304 } 1305 1306 /* Caller must hold page table lock. */ 1307 static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm, 1308 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 1309 struct folio *zero_folio) 1310 { 1311 pmd_t entry; 1312 entry = mk_pmd(&zero_folio->page, vma->vm_page_prot); 1313 entry = pmd_mkhuge(entry); 1314 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1315 set_pmd_at(mm, haddr, pmd, entry); 1316 mm_inc_nr_ptes(mm); 1317 } 1318 1319 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 1320 { 1321 struct vm_area_struct *vma = vmf->vma; 1322 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1323 vm_fault_t ret; 1324 1325 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) 1326 return VM_FAULT_FALLBACK; 1327 ret = vmf_anon_prepare(vmf); 1328 if (ret) 1329 return ret; 1330 khugepaged_enter_vma(vma, vma->vm_flags); 1331 1332 if (!(vmf->flags & FAULT_FLAG_WRITE) && 1333 !mm_forbids_zeropage(vma->vm_mm) && 1334 transparent_hugepage_use_zero_page()) { 1335 pgtable_t pgtable; 1336 struct folio *zero_folio; 1337 vm_fault_t ret; 1338 1339 pgtable = pte_alloc_one(vma->vm_mm); 1340 if (unlikely(!pgtable)) 1341 return VM_FAULT_OOM; 1342 zero_folio = mm_get_huge_zero_folio(vma->vm_mm); 1343 if (unlikely(!zero_folio)) { 1344 pte_free(vma->vm_mm, pgtable); 1345 count_vm_event(THP_FAULT_FALLBACK); 1346 return VM_FAULT_FALLBACK; 1347 } 1348 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1349 ret = 0; 1350 if (pmd_none(*vmf->pmd)) { 1351 ret = check_stable_address_space(vma->vm_mm); 1352 if (ret) { 1353 spin_unlock(vmf->ptl); 1354 pte_free(vma->vm_mm, pgtable); 1355 } else if (userfaultfd_missing(vma)) { 1356 spin_unlock(vmf->ptl); 1357 pte_free(vma->vm_mm, pgtable); 1358 ret = handle_userfault(vmf, VM_UFFD_MISSING); 1359 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 1360 } else { 1361 set_huge_zero_folio(pgtable, vma->vm_mm, vma, 1362 haddr, vmf->pmd, zero_folio); 1363 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1364 spin_unlock(vmf->ptl); 1365 } 1366 } else { 1367 spin_unlock(vmf->ptl); 1368 pte_free(vma->vm_mm, pgtable); 1369 } 1370 return ret; 1371 } 1372 1373 return __do_huge_pmd_anonymous_page(vmf); 1374 } 1375 1376 static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 1377 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 1378 pgtable_t pgtable) 1379 { 1380 struct mm_struct *mm = vma->vm_mm; 1381 pmd_t entry; 1382 1383 lockdep_assert_held(pmd_lockptr(mm, pmd)); 1384 1385 if (!pmd_none(*pmd)) { 1386 if (write) { 1387 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { 1388 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); 1389 return -EEXIST; 1390 } 1391 entry = pmd_mkyoung(*pmd); 1392 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1393 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) 1394 update_mmu_cache_pmd(vma, addr, pmd); 1395 } 1396 1397 return -EEXIST; 1398 } 1399 1400 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 1401 if (pfn_t_devmap(pfn)) 1402 entry = pmd_mkdevmap(entry); 1403 else 1404 entry = pmd_mkspecial(entry); 1405 if (write) { 1406 entry = pmd_mkyoung(pmd_mkdirty(entry)); 1407 entry = maybe_pmd_mkwrite(entry, vma); 1408 } 1409 1410 if (pgtable) { 1411 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1412 mm_inc_nr_ptes(mm); 1413 } 1414 1415 set_pmd_at(mm, addr, pmd, entry); 1416 update_mmu_cache_pmd(vma, addr, pmd); 1417 return 0; 1418 } 1419 1420 /** 1421 * vmf_insert_pfn_pmd - insert a pmd size pfn 1422 * @vmf: Structure describing the fault 1423 * @pfn: pfn to insert 1424 * @write: whether it's a write fault 1425 * 1426 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. 1427 * 1428 * Return: vm_fault_t value. 1429 */ 1430 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) 1431 { 1432 unsigned long addr = vmf->address & PMD_MASK; 1433 struct vm_area_struct *vma = vmf->vma; 1434 pgprot_t pgprot = vma->vm_page_prot; 1435 pgtable_t pgtable = NULL; 1436 spinlock_t *ptl; 1437 int error; 1438 1439 /* 1440 * If we had pmd_special, we could avoid all these restrictions, 1441 * but we need to be consistent with PTEs and architectures that 1442 * can't support a 'special' bit. 1443 */ 1444 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 1445 !pfn_t_devmap(pfn)); 1446 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1447 (VM_PFNMAP|VM_MIXEDMAP)); 1448 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1449 1450 if (addr < vma->vm_start || addr >= vma->vm_end) 1451 return VM_FAULT_SIGBUS; 1452 1453 if (arch_needs_pgtable_deposit()) { 1454 pgtable = pte_alloc_one(vma->vm_mm); 1455 if (!pgtable) 1456 return VM_FAULT_OOM; 1457 } 1458 1459 track_pfn_insert(vma, &pgprot, pfn); 1460 ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1461 error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, 1462 pgtable); 1463 spin_unlock(ptl); 1464 if (error && pgtable) 1465 pte_free(vma->vm_mm, pgtable); 1466 1467 return VM_FAULT_NOPAGE; 1468 } 1469 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 1470 1471 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, 1472 bool write) 1473 { 1474 struct vm_area_struct *vma = vmf->vma; 1475 unsigned long addr = vmf->address & PMD_MASK; 1476 struct mm_struct *mm = vma->vm_mm; 1477 spinlock_t *ptl; 1478 pgtable_t pgtable = NULL; 1479 int error; 1480 1481 if (addr < vma->vm_start || addr >= vma->vm_end) 1482 return VM_FAULT_SIGBUS; 1483 1484 if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) 1485 return VM_FAULT_SIGBUS; 1486 1487 if (arch_needs_pgtable_deposit()) { 1488 pgtable = pte_alloc_one(vma->vm_mm); 1489 if (!pgtable) 1490 return VM_FAULT_OOM; 1491 } 1492 1493 ptl = pmd_lock(mm, vmf->pmd); 1494 if (pmd_none(*vmf->pmd)) { 1495 folio_get(folio); 1496 folio_add_file_rmap_pmd(folio, &folio->page, vma); 1497 add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR); 1498 } 1499 error = insert_pfn_pmd(vma, addr, vmf->pmd, 1500 pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot, 1501 write, pgtable); 1502 spin_unlock(ptl); 1503 if (error && pgtable) 1504 pte_free(mm, pgtable); 1505 1506 return VM_FAULT_NOPAGE; 1507 } 1508 EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd); 1509 1510 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1511 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 1512 { 1513 if (likely(vma->vm_flags & VM_WRITE)) 1514 pud = pud_mkwrite(pud); 1515 return pud; 1516 } 1517 1518 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 1519 pud_t *pud, pfn_t pfn, bool write) 1520 { 1521 struct mm_struct *mm = vma->vm_mm; 1522 pgprot_t prot = vma->vm_page_prot; 1523 pud_t entry; 1524 1525 if (!pud_none(*pud)) { 1526 if (write) { 1527 if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn))) 1528 return; 1529 entry = pud_mkyoung(*pud); 1530 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); 1531 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) 1532 update_mmu_cache_pud(vma, addr, pud); 1533 } 1534 return; 1535 } 1536 1537 entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 1538 if (pfn_t_devmap(pfn)) 1539 entry = pud_mkdevmap(entry); 1540 else 1541 entry = pud_mkspecial(entry); 1542 if (write) { 1543 entry = pud_mkyoung(pud_mkdirty(entry)); 1544 entry = maybe_pud_mkwrite(entry, vma); 1545 } 1546 set_pud_at(mm, addr, pud, entry); 1547 update_mmu_cache_pud(vma, addr, pud); 1548 } 1549 1550 /** 1551 * vmf_insert_pfn_pud - insert a pud size pfn 1552 * @vmf: Structure describing the fault 1553 * @pfn: pfn to insert 1554 * @write: whether it's a write fault 1555 * 1556 * Insert a pud size pfn. See vmf_insert_pfn() for additional info. 1557 * 1558 * Return: vm_fault_t value. 1559 */ 1560 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) 1561 { 1562 unsigned long addr = vmf->address & PUD_MASK; 1563 struct vm_area_struct *vma = vmf->vma; 1564 pgprot_t pgprot = vma->vm_page_prot; 1565 spinlock_t *ptl; 1566 1567 /* 1568 * If we had pud_special, we could avoid all these restrictions, 1569 * but we need to be consistent with PTEs and architectures that 1570 * can't support a 'special' bit. 1571 */ 1572 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 1573 !pfn_t_devmap(pfn)); 1574 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1575 (VM_PFNMAP|VM_MIXEDMAP)); 1576 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1577 1578 if (addr < vma->vm_start || addr >= vma->vm_end) 1579 return VM_FAULT_SIGBUS; 1580 1581 track_pfn_insert(vma, &pgprot, pfn); 1582 1583 ptl = pud_lock(vma->vm_mm, vmf->pud); 1584 insert_pfn_pud(vma, addr, vmf->pud, pfn, write); 1585 spin_unlock(ptl); 1586 1587 return VM_FAULT_NOPAGE; 1588 } 1589 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); 1590 1591 /** 1592 * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry 1593 * @vmf: Structure describing the fault 1594 * @folio: folio to insert 1595 * @write: whether it's a write fault 1596 * 1597 * Return: vm_fault_t value. 1598 */ 1599 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, 1600 bool write) 1601 { 1602 struct vm_area_struct *vma = vmf->vma; 1603 unsigned long addr = vmf->address & PUD_MASK; 1604 pud_t *pud = vmf->pud; 1605 struct mm_struct *mm = vma->vm_mm; 1606 spinlock_t *ptl; 1607 1608 if (addr < vma->vm_start || addr >= vma->vm_end) 1609 return VM_FAULT_SIGBUS; 1610 1611 if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER)) 1612 return VM_FAULT_SIGBUS; 1613 1614 ptl = pud_lock(mm, pud); 1615 1616 /* 1617 * If there is already an entry present we assume the folio is 1618 * already mapped, hence no need to take another reference. We 1619 * still call insert_pfn_pud() though in case the mapping needs 1620 * upgrading to writeable. 1621 */ 1622 if (pud_none(*vmf->pud)) { 1623 folio_get(folio); 1624 folio_add_file_rmap_pud(folio, &folio->page, vma); 1625 add_mm_counter(mm, mm_counter_file(folio), HPAGE_PUD_NR); 1626 } 1627 insert_pfn_pud(vma, addr, vmf->pud, pfn_to_pfn_t(folio_pfn(folio)), 1628 write); 1629 spin_unlock(ptl); 1630 1631 return VM_FAULT_NOPAGE; 1632 } 1633 EXPORT_SYMBOL_GPL(vmf_insert_folio_pud); 1634 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1635 1636 void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 1637 pmd_t *pmd, bool write) 1638 { 1639 pmd_t _pmd; 1640 1641 _pmd = pmd_mkyoung(*pmd); 1642 if (write) 1643 _pmd = pmd_mkdirty(_pmd); 1644 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1645 pmd, _pmd, write)) 1646 update_mmu_cache_pmd(vma, addr, pmd); 1647 } 1648 1649 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 1650 pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 1651 { 1652 unsigned long pfn = pmd_pfn(*pmd); 1653 struct mm_struct *mm = vma->vm_mm; 1654 struct page *page; 1655 int ret; 1656 1657 assert_spin_locked(pmd_lockptr(mm, pmd)); 1658 1659 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1660 return NULL; 1661 1662 if (pmd_present(*pmd) && pmd_devmap(*pmd)) 1663 /* pass */; 1664 else 1665 return NULL; 1666 1667 if (flags & FOLL_TOUCH) 1668 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); 1669 1670 /* 1671 * device mapped pages can only be returned if the 1672 * caller will manage the page reference count. 1673 */ 1674 if (!(flags & (FOLL_GET | FOLL_PIN))) 1675 return ERR_PTR(-EEXIST); 1676 1677 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 1678 *pgmap = get_dev_pagemap(pfn, *pgmap); 1679 if (!*pgmap) 1680 return ERR_PTR(-EFAULT); 1681 page = pfn_to_page(pfn); 1682 ret = try_grab_folio(page_folio(page), 1, flags); 1683 if (ret) 1684 page = ERR_PTR(ret); 1685 1686 return page; 1687 } 1688 1689 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1690 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 1691 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1692 { 1693 spinlock_t *dst_ptl, *src_ptl; 1694 struct page *src_page; 1695 struct folio *src_folio; 1696 pmd_t pmd; 1697 pgtable_t pgtable = NULL; 1698 int ret = -ENOMEM; 1699 1700 pmd = pmdp_get_lockless(src_pmd); 1701 if (unlikely(pmd_present(pmd) && pmd_special(pmd))) { 1702 dst_ptl = pmd_lock(dst_mm, dst_pmd); 1703 src_ptl = pmd_lockptr(src_mm, src_pmd); 1704 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1705 /* 1706 * No need to recheck the pmd, it can't change with write 1707 * mmap lock held here. 1708 * 1709 * Meanwhile, making sure it's not a CoW VMA with writable 1710 * mapping, otherwise it means either the anon page wrongly 1711 * applied special bit, or we made the PRIVATE mapping be 1712 * able to wrongly write to the backend MMIO. 1713 */ 1714 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); 1715 goto set_pmd; 1716 } 1717 1718 /* Skip if can be re-fill on fault */ 1719 if (!vma_is_anonymous(dst_vma)) 1720 return 0; 1721 1722 pgtable = pte_alloc_one(dst_mm); 1723 if (unlikely(!pgtable)) 1724 goto out; 1725 1726 dst_ptl = pmd_lock(dst_mm, dst_pmd); 1727 src_ptl = pmd_lockptr(src_mm, src_pmd); 1728 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1729 1730 ret = -EAGAIN; 1731 pmd = *src_pmd; 1732 1733 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1734 if (unlikely(is_swap_pmd(pmd))) { 1735 swp_entry_t entry = pmd_to_swp_entry(pmd); 1736 1737 VM_BUG_ON(!is_pmd_migration_entry(pmd)); 1738 if (!is_readable_migration_entry(entry)) { 1739 entry = make_readable_migration_entry( 1740 swp_offset(entry)); 1741 pmd = swp_entry_to_pmd(entry); 1742 if (pmd_swp_soft_dirty(*src_pmd)) 1743 pmd = pmd_swp_mksoft_dirty(pmd); 1744 if (pmd_swp_uffd_wp(*src_pmd)) 1745 pmd = pmd_swp_mkuffd_wp(pmd); 1746 set_pmd_at(src_mm, addr, src_pmd, pmd); 1747 } 1748 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1749 mm_inc_nr_ptes(dst_mm); 1750 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 1751 if (!userfaultfd_wp(dst_vma)) 1752 pmd = pmd_swp_clear_uffd_wp(pmd); 1753 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 1754 ret = 0; 1755 goto out_unlock; 1756 } 1757 #endif 1758 1759 if (unlikely(!pmd_trans_huge(pmd))) { 1760 pte_free(dst_mm, pgtable); 1761 goto out_unlock; 1762 } 1763 /* 1764 * When page table lock is held, the huge zero pmd should not be 1765 * under splitting since we don't split the page itself, only pmd to 1766 * a page table. 1767 */ 1768 if (is_huge_zero_pmd(pmd)) { 1769 /* 1770 * mm_get_huge_zero_folio() will never allocate a new 1771 * folio here, since we already have a zero page to 1772 * copy. It just takes a reference. 1773 */ 1774 mm_get_huge_zero_folio(dst_mm); 1775 goto out_zero_page; 1776 } 1777 1778 src_page = pmd_page(pmd); 1779 VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 1780 src_folio = page_folio(src_page); 1781 1782 folio_get(src_folio); 1783 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) { 1784 /* Page maybe pinned: split and retry the fault on PTEs. */ 1785 folio_put(src_folio); 1786 pte_free(dst_mm, pgtable); 1787 spin_unlock(src_ptl); 1788 spin_unlock(dst_ptl); 1789 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); 1790 return -EAGAIN; 1791 } 1792 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1793 out_zero_page: 1794 mm_inc_nr_ptes(dst_mm); 1795 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 1796 pmdp_set_wrprotect(src_mm, addr, src_pmd); 1797 if (!userfaultfd_wp(dst_vma)) 1798 pmd = pmd_clear_uffd_wp(pmd); 1799 pmd = pmd_wrprotect(pmd); 1800 set_pmd: 1801 pmd = pmd_mkold(pmd); 1802 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 1803 1804 ret = 0; 1805 out_unlock: 1806 spin_unlock(src_ptl); 1807 spin_unlock(dst_ptl); 1808 out: 1809 return ret; 1810 } 1811 1812 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1813 void touch_pud(struct vm_area_struct *vma, unsigned long addr, 1814 pud_t *pud, bool write) 1815 { 1816 pud_t _pud; 1817 1818 _pud = pud_mkyoung(*pud); 1819 if (write) 1820 _pud = pud_mkdirty(_pud); 1821 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 1822 pud, _pud, write)) 1823 update_mmu_cache_pud(vma, addr, pud); 1824 } 1825 1826 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1827 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1828 struct vm_area_struct *vma) 1829 { 1830 spinlock_t *dst_ptl, *src_ptl; 1831 pud_t pud; 1832 int ret; 1833 1834 dst_ptl = pud_lock(dst_mm, dst_pud); 1835 src_ptl = pud_lockptr(src_mm, src_pud); 1836 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1837 1838 ret = -EAGAIN; 1839 pud = *src_pud; 1840 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1841 goto out_unlock; 1842 1843 /* 1844 * TODO: once we support anonymous pages, use 1845 * folio_try_dup_anon_rmap_*() and split if duplicating fails. 1846 */ 1847 if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) { 1848 pudp_set_wrprotect(src_mm, addr, src_pud); 1849 pud = pud_wrprotect(pud); 1850 } 1851 pud = pud_mkold(pud); 1852 set_pud_at(dst_mm, addr, dst_pud, pud); 1853 1854 ret = 0; 1855 out_unlock: 1856 spin_unlock(src_ptl); 1857 spin_unlock(dst_ptl); 1858 return ret; 1859 } 1860 1861 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1862 { 1863 bool write = vmf->flags & FAULT_FLAG_WRITE; 1864 1865 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1866 if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1867 goto unlock; 1868 1869 touch_pud(vmf->vma, vmf->address, vmf->pud, write); 1870 unlock: 1871 spin_unlock(vmf->ptl); 1872 } 1873 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1874 1875 void huge_pmd_set_accessed(struct vm_fault *vmf) 1876 { 1877 bool write = vmf->flags & FAULT_FLAG_WRITE; 1878 1879 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1880 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) 1881 goto unlock; 1882 1883 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); 1884 1885 unlock: 1886 spin_unlock(vmf->ptl); 1887 } 1888 1889 static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf) 1890 { 1891 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1892 struct vm_area_struct *vma = vmf->vma; 1893 struct mmu_notifier_range range; 1894 struct folio *folio; 1895 vm_fault_t ret = 0; 1896 1897 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); 1898 if (unlikely(!folio)) 1899 return VM_FAULT_FALLBACK; 1900 1901 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr, 1902 haddr + HPAGE_PMD_SIZE); 1903 mmu_notifier_invalidate_range_start(&range); 1904 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1905 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) 1906 goto release; 1907 ret = check_stable_address_space(vma->vm_mm); 1908 if (ret) 1909 goto release; 1910 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd); 1911 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); 1912 goto unlock; 1913 release: 1914 folio_put(folio); 1915 unlock: 1916 spin_unlock(vmf->ptl); 1917 mmu_notifier_invalidate_range_end(&range); 1918 return ret; 1919 } 1920 1921 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) 1922 { 1923 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 1924 struct vm_area_struct *vma = vmf->vma; 1925 struct folio *folio; 1926 struct page *page; 1927 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1928 pmd_t orig_pmd = vmf->orig_pmd; 1929 1930 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 1931 VM_BUG_ON_VMA(!vma->anon_vma, vma); 1932 1933 if (is_huge_zero_pmd(orig_pmd)) { 1934 vm_fault_t ret = do_huge_zero_wp_pmd(vmf); 1935 1936 if (!(ret & VM_FAULT_FALLBACK)) 1937 return ret; 1938 1939 /* Fallback to splitting PMD if THP cannot be allocated */ 1940 goto fallback; 1941 } 1942 1943 spin_lock(vmf->ptl); 1944 1945 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1946 spin_unlock(vmf->ptl); 1947 return 0; 1948 } 1949 1950 page = pmd_page(orig_pmd); 1951 folio = page_folio(page); 1952 VM_BUG_ON_PAGE(!PageHead(page), page); 1953 1954 /* Early check when only holding the PT lock. */ 1955 if (PageAnonExclusive(page)) 1956 goto reuse; 1957 1958 if (!folio_trylock(folio)) { 1959 folio_get(folio); 1960 spin_unlock(vmf->ptl); 1961 folio_lock(folio); 1962 spin_lock(vmf->ptl); 1963 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1964 spin_unlock(vmf->ptl); 1965 folio_unlock(folio); 1966 folio_put(folio); 1967 return 0; 1968 } 1969 folio_put(folio); 1970 } 1971 1972 /* Recheck after temporarily dropping the PT lock. */ 1973 if (PageAnonExclusive(page)) { 1974 folio_unlock(folio); 1975 goto reuse; 1976 } 1977 1978 /* 1979 * See do_wp_page(): we can only reuse the folio exclusively if 1980 * there are no additional references. Note that we always drain 1981 * the LRU cache immediately after adding a THP. 1982 */ 1983 if (folio_ref_count(folio) > 1984 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) 1985 goto unlock_fallback; 1986 if (folio_test_swapcache(folio)) 1987 folio_free_swap(folio); 1988 if (folio_ref_count(folio) == 1) { 1989 pmd_t entry; 1990 1991 folio_move_anon_rmap(folio, vma); 1992 SetPageAnonExclusive(page); 1993 folio_unlock(folio); 1994 reuse: 1995 if (unlikely(unshare)) { 1996 spin_unlock(vmf->ptl); 1997 return 0; 1998 } 1999 entry = pmd_mkyoung(orig_pmd); 2000 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 2001 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 2002 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 2003 spin_unlock(vmf->ptl); 2004 return 0; 2005 } 2006 2007 unlock_fallback: 2008 folio_unlock(folio); 2009 spin_unlock(vmf->ptl); 2010 fallback: 2011 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); 2012 return VM_FAULT_FALLBACK; 2013 } 2014 2015 static inline bool can_change_pmd_writable(struct vm_area_struct *vma, 2016 unsigned long addr, pmd_t pmd) 2017 { 2018 struct page *page; 2019 2020 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) 2021 return false; 2022 2023 /* Don't touch entries that are not even readable (NUMA hinting). */ 2024 if (pmd_protnone(pmd)) 2025 return false; 2026 2027 /* Do we need write faults for softdirty tracking? */ 2028 if (pmd_needs_soft_dirty_wp(vma, pmd)) 2029 return false; 2030 2031 /* Do we need write faults for uffd-wp tracking? */ 2032 if (userfaultfd_huge_pmd_wp(vma, pmd)) 2033 return false; 2034 2035 if (!(vma->vm_flags & VM_SHARED)) { 2036 /* See can_change_pte_writable(). */ 2037 page = vm_normal_page_pmd(vma, addr, pmd); 2038 return page && PageAnon(page) && PageAnonExclusive(page); 2039 } 2040 2041 /* See can_change_pte_writable(). */ 2042 return pmd_dirty(pmd); 2043 } 2044 2045 /* NUMA hinting page fault entry point for trans huge pmds */ 2046 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) 2047 { 2048 struct vm_area_struct *vma = vmf->vma; 2049 struct folio *folio; 2050 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 2051 int nid = NUMA_NO_NODE; 2052 int target_nid, last_cpupid; 2053 pmd_t pmd, old_pmd; 2054 bool writable = false; 2055 int flags = 0; 2056 2057 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 2058 old_pmd = pmdp_get(vmf->pmd); 2059 2060 if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) { 2061 spin_unlock(vmf->ptl); 2062 return 0; 2063 } 2064 2065 pmd = pmd_modify(old_pmd, vma->vm_page_prot); 2066 2067 /* 2068 * Detect now whether the PMD could be writable; this information 2069 * is only valid while holding the PT lock. 2070 */ 2071 writable = pmd_write(pmd); 2072 if (!writable && vma_wants_manual_pte_write_upgrade(vma) && 2073 can_change_pmd_writable(vma, vmf->address, pmd)) 2074 writable = true; 2075 2076 folio = vm_normal_folio_pmd(vma, haddr, pmd); 2077 if (!folio) 2078 goto out_map; 2079 2080 nid = folio_nid(folio); 2081 2082 target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable, 2083 &last_cpupid); 2084 if (target_nid == NUMA_NO_NODE) 2085 goto out_map; 2086 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { 2087 flags |= TNF_MIGRATE_FAIL; 2088 goto out_map; 2089 } 2090 /* The folio is isolated and isolation code holds a folio reference. */ 2091 spin_unlock(vmf->ptl); 2092 writable = false; 2093 2094 if (!migrate_misplaced_folio(folio, target_nid)) { 2095 flags |= TNF_MIGRATED; 2096 nid = target_nid; 2097 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); 2098 return 0; 2099 } 2100 2101 flags |= TNF_MIGRATE_FAIL; 2102 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 2103 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) { 2104 spin_unlock(vmf->ptl); 2105 return 0; 2106 } 2107 out_map: 2108 /* Restore the PMD */ 2109 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot); 2110 pmd = pmd_mkyoung(pmd); 2111 if (writable) 2112 pmd = pmd_mkwrite(pmd, vma); 2113 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 2114 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 2115 spin_unlock(vmf->ptl); 2116 2117 if (nid != NUMA_NO_NODE) 2118 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); 2119 return 0; 2120 } 2121 2122 /* 2123 * Return true if we do MADV_FREE successfully on entire pmd page. 2124 * Otherwise, return false. 2125 */ 2126 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 2127 pmd_t *pmd, unsigned long addr, unsigned long next) 2128 { 2129 spinlock_t *ptl; 2130 pmd_t orig_pmd; 2131 struct folio *folio; 2132 struct mm_struct *mm = tlb->mm; 2133 bool ret = false; 2134 2135 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 2136 2137 ptl = pmd_trans_huge_lock(pmd, vma); 2138 if (!ptl) 2139 goto out_unlocked; 2140 2141 orig_pmd = *pmd; 2142 if (is_huge_zero_pmd(orig_pmd)) 2143 goto out; 2144 2145 if (unlikely(!pmd_present(orig_pmd))) { 2146 VM_BUG_ON(thp_migration_supported() && 2147 !is_pmd_migration_entry(orig_pmd)); 2148 goto out; 2149 } 2150 2151 folio = pmd_folio(orig_pmd); 2152 /* 2153 * If other processes are mapping this folio, we couldn't discard 2154 * the folio unless they all do MADV_FREE so let's skip the folio. 2155 */ 2156 if (folio_maybe_mapped_shared(folio)) 2157 goto out; 2158 2159 if (!folio_trylock(folio)) 2160 goto out; 2161 2162 /* 2163 * If user want to discard part-pages of THP, split it so MADV_FREE 2164 * will deactivate only them. 2165 */ 2166 if (next - addr != HPAGE_PMD_SIZE) { 2167 folio_get(folio); 2168 spin_unlock(ptl); 2169 split_folio(folio); 2170 folio_unlock(folio); 2171 folio_put(folio); 2172 goto out_unlocked; 2173 } 2174 2175 if (folio_test_dirty(folio)) 2176 folio_clear_dirty(folio); 2177 folio_unlock(folio); 2178 2179 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 2180 pmdp_invalidate(vma, addr, pmd); 2181 orig_pmd = pmd_mkold(orig_pmd); 2182 orig_pmd = pmd_mkclean(orig_pmd); 2183 2184 set_pmd_at(mm, addr, pmd, orig_pmd); 2185 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 2186 } 2187 2188 folio_mark_lazyfree(folio); 2189 ret = true; 2190 out: 2191 spin_unlock(ptl); 2192 out_unlocked: 2193 return ret; 2194 } 2195 2196 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 2197 { 2198 pgtable_t pgtable; 2199 2200 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2201 pte_free(mm, pgtable); 2202 mm_dec_nr_ptes(mm); 2203 } 2204 2205 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 2206 pmd_t *pmd, unsigned long addr) 2207 { 2208 pmd_t orig_pmd; 2209 spinlock_t *ptl; 2210 2211 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 2212 2213 ptl = __pmd_trans_huge_lock(pmd, vma); 2214 if (!ptl) 2215 return 0; 2216 /* 2217 * For architectures like ppc64 we look at deposited pgtable 2218 * when calling pmdp_huge_get_and_clear. So do the 2219 * pgtable_trans_huge_withdraw after finishing pmdp related 2220 * operations. 2221 */ 2222 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, 2223 tlb->fullmm); 2224 arch_check_zapped_pmd(vma, orig_pmd); 2225 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 2226 if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { 2227 if (arch_needs_pgtable_deposit()) 2228 zap_deposited_table(tlb->mm, pmd); 2229 spin_unlock(ptl); 2230 } else if (is_huge_zero_pmd(orig_pmd)) { 2231 if (!vma_is_dax(vma) || arch_needs_pgtable_deposit()) 2232 zap_deposited_table(tlb->mm, pmd); 2233 spin_unlock(ptl); 2234 } else { 2235 struct folio *folio = NULL; 2236 int flush_needed = 1; 2237 2238 if (pmd_present(orig_pmd)) { 2239 struct page *page = pmd_page(orig_pmd); 2240 2241 folio = page_folio(page); 2242 folio_remove_rmap_pmd(folio, page, vma); 2243 WARN_ON_ONCE(folio_mapcount(folio) < 0); 2244 VM_BUG_ON_PAGE(!PageHead(page), page); 2245 } else if (thp_migration_supported()) { 2246 swp_entry_t entry; 2247 2248 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 2249 entry = pmd_to_swp_entry(orig_pmd); 2250 folio = pfn_swap_entry_folio(entry); 2251 flush_needed = 0; 2252 } else 2253 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 2254 2255 if (folio_test_anon(folio)) { 2256 zap_deposited_table(tlb->mm, pmd); 2257 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 2258 } else { 2259 if (arch_needs_pgtable_deposit()) 2260 zap_deposited_table(tlb->mm, pmd); 2261 add_mm_counter(tlb->mm, mm_counter_file(folio), 2262 -HPAGE_PMD_NR); 2263 } 2264 2265 spin_unlock(ptl); 2266 if (flush_needed) 2267 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); 2268 } 2269 return 1; 2270 } 2271 2272 #ifndef pmd_move_must_withdraw 2273 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 2274 spinlock_t *old_pmd_ptl, 2275 struct vm_area_struct *vma) 2276 { 2277 /* 2278 * With split pmd lock we also need to move preallocated 2279 * PTE page table if new_pmd is on different PMD page table. 2280 * 2281 * We also don't deposit and withdraw tables for file pages. 2282 */ 2283 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 2284 } 2285 #endif 2286 2287 static pmd_t move_soft_dirty_pmd(pmd_t pmd) 2288 { 2289 #ifdef CONFIG_MEM_SOFT_DIRTY 2290 if (unlikely(is_pmd_migration_entry(pmd))) 2291 pmd = pmd_swp_mksoft_dirty(pmd); 2292 else if (pmd_present(pmd)) 2293 pmd = pmd_mksoft_dirty(pmd); 2294 #endif 2295 return pmd; 2296 } 2297 2298 static pmd_t clear_uffd_wp_pmd(pmd_t pmd) 2299 { 2300 if (pmd_present(pmd)) 2301 pmd = pmd_clear_uffd_wp(pmd); 2302 else if (is_swap_pmd(pmd)) 2303 pmd = pmd_swp_clear_uffd_wp(pmd); 2304 2305 return pmd; 2306 } 2307 2308 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 2309 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 2310 { 2311 spinlock_t *old_ptl, *new_ptl; 2312 pmd_t pmd; 2313 struct mm_struct *mm = vma->vm_mm; 2314 bool force_flush = false; 2315 2316 /* 2317 * The destination pmd shouldn't be established, free_pgtables() 2318 * should have released it; but move_page_tables() might have already 2319 * inserted a page table, if racing against shmem/file collapse. 2320 */ 2321 if (!pmd_none(*new_pmd)) { 2322 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 2323 return false; 2324 } 2325 2326 /* 2327 * We don't have to worry about the ordering of src and dst 2328 * ptlocks because exclusive mmap_lock prevents deadlock. 2329 */ 2330 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 2331 if (old_ptl) { 2332 new_ptl = pmd_lockptr(mm, new_pmd); 2333 if (new_ptl != old_ptl) 2334 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 2335 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 2336 if (pmd_present(pmd)) 2337 force_flush = true; 2338 VM_BUG_ON(!pmd_none(*new_pmd)); 2339 2340 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 2341 pgtable_t pgtable; 2342 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 2343 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 2344 } 2345 pmd = move_soft_dirty_pmd(pmd); 2346 if (vma_has_uffd_without_event_remap(vma)) 2347 pmd = clear_uffd_wp_pmd(pmd); 2348 set_pmd_at(mm, new_addr, new_pmd, pmd); 2349 if (force_flush) 2350 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 2351 if (new_ptl != old_ptl) 2352 spin_unlock(new_ptl); 2353 spin_unlock(old_ptl); 2354 return true; 2355 } 2356 return false; 2357 } 2358 2359 /* 2360 * Returns 2361 * - 0 if PMD could not be locked 2362 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary 2363 * or if prot_numa but THP migration is not supported 2364 * - HPAGE_PMD_NR if protections changed and TLB flush necessary 2365 */ 2366 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 2367 pmd_t *pmd, unsigned long addr, pgprot_t newprot, 2368 unsigned long cp_flags) 2369 { 2370 struct mm_struct *mm = vma->vm_mm; 2371 spinlock_t *ptl; 2372 pmd_t oldpmd, entry; 2373 bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 2374 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 2375 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 2376 int ret = 1; 2377 2378 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 2379 2380 if (prot_numa && !thp_migration_supported()) 2381 return 1; 2382 2383 ptl = __pmd_trans_huge_lock(pmd, vma); 2384 if (!ptl) 2385 return 0; 2386 2387 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2388 if (is_swap_pmd(*pmd)) { 2389 swp_entry_t entry = pmd_to_swp_entry(*pmd); 2390 struct folio *folio = pfn_swap_entry_folio(entry); 2391 pmd_t newpmd; 2392 2393 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 2394 if (is_writable_migration_entry(entry)) { 2395 /* 2396 * A protection check is difficult so 2397 * just be safe and disable write 2398 */ 2399 if (folio_test_anon(folio)) 2400 entry = make_readable_exclusive_migration_entry(swp_offset(entry)); 2401 else 2402 entry = make_readable_migration_entry(swp_offset(entry)); 2403 newpmd = swp_entry_to_pmd(entry); 2404 if (pmd_swp_soft_dirty(*pmd)) 2405 newpmd = pmd_swp_mksoft_dirty(newpmd); 2406 } else { 2407 newpmd = *pmd; 2408 } 2409 2410 if (uffd_wp) 2411 newpmd = pmd_swp_mkuffd_wp(newpmd); 2412 else if (uffd_wp_resolve) 2413 newpmd = pmd_swp_clear_uffd_wp(newpmd); 2414 if (!pmd_same(*pmd, newpmd)) 2415 set_pmd_at(mm, addr, pmd, newpmd); 2416 goto unlock; 2417 } 2418 #endif 2419 2420 if (prot_numa) { 2421 struct folio *folio; 2422 bool toptier; 2423 /* 2424 * Avoid trapping faults against the zero page. The read-only 2425 * data is likely to be read-cached on the local CPU and 2426 * local/remote hits to the zero page are not interesting. 2427 */ 2428 if (is_huge_zero_pmd(*pmd)) 2429 goto unlock; 2430 2431 if (pmd_protnone(*pmd)) 2432 goto unlock; 2433 2434 folio = pmd_folio(*pmd); 2435 toptier = node_is_toptier(folio_nid(folio)); 2436 /* 2437 * Skip scanning top tier node if normal numa 2438 * balancing is disabled 2439 */ 2440 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && 2441 toptier) 2442 goto unlock; 2443 2444 if (folio_use_access_time(folio)) 2445 folio_xchg_access_time(folio, 2446 jiffies_to_msecs(jiffies)); 2447 } 2448 /* 2449 * In case prot_numa, we are under mmap_read_lock(mm). It's critical 2450 * to not clear pmd intermittently to avoid race with MADV_DONTNEED 2451 * which is also under mmap_read_lock(mm): 2452 * 2453 * CPU0: CPU1: 2454 * change_huge_pmd(prot_numa=1) 2455 * pmdp_huge_get_and_clear_notify() 2456 * madvise_dontneed() 2457 * zap_pmd_range() 2458 * pmd_trans_huge(*pmd) == 0 (without ptl) 2459 * // skip the pmd 2460 * set_pmd_at(); 2461 * // pmd is re-established 2462 * 2463 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 2464 * which may break userspace. 2465 * 2466 * pmdp_invalidate_ad() is required to make sure we don't miss 2467 * dirty/young flags set by hardware. 2468 */ 2469 oldpmd = pmdp_invalidate_ad(vma, addr, pmd); 2470 2471 entry = pmd_modify(oldpmd, newprot); 2472 if (uffd_wp) 2473 entry = pmd_mkuffd_wp(entry); 2474 else if (uffd_wp_resolve) 2475 /* 2476 * Leave the write bit to be handled by PF interrupt 2477 * handler, then things like COW could be properly 2478 * handled. 2479 */ 2480 entry = pmd_clear_uffd_wp(entry); 2481 2482 /* See change_pte_range(). */ 2483 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) && 2484 can_change_pmd_writable(vma, addr, entry)) 2485 entry = pmd_mkwrite(entry, vma); 2486 2487 ret = HPAGE_PMD_NR; 2488 set_pmd_at(mm, addr, pmd, entry); 2489 2490 if (huge_pmd_needs_flush(oldpmd, entry)) 2491 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); 2492 unlock: 2493 spin_unlock(ptl); 2494 return ret; 2495 } 2496 2497 /* 2498 * Returns: 2499 * 2500 * - 0: if pud leaf changed from under us 2501 * - 1: if pud can be skipped 2502 * - HPAGE_PUD_NR: if pud was successfully processed 2503 */ 2504 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 2505 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 2506 pud_t *pudp, unsigned long addr, pgprot_t newprot, 2507 unsigned long cp_flags) 2508 { 2509 struct mm_struct *mm = vma->vm_mm; 2510 pud_t oldpud, entry; 2511 spinlock_t *ptl; 2512 2513 tlb_change_page_size(tlb, HPAGE_PUD_SIZE); 2514 2515 /* NUMA balancing doesn't apply to dax */ 2516 if (cp_flags & MM_CP_PROT_NUMA) 2517 return 1; 2518 2519 /* 2520 * Huge entries on userfault-wp only works with anonymous, while we 2521 * don't have anonymous PUDs yet. 2522 */ 2523 if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL)) 2524 return 1; 2525 2526 ptl = __pud_trans_huge_lock(pudp, vma); 2527 if (!ptl) 2528 return 0; 2529 2530 /* 2531 * Can't clear PUD or it can race with concurrent zapping. See 2532 * change_huge_pmd(). 2533 */ 2534 oldpud = pudp_invalidate(vma, addr, pudp); 2535 entry = pud_modify(oldpud, newprot); 2536 set_pud_at(mm, addr, pudp, entry); 2537 tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE); 2538 2539 spin_unlock(ptl); 2540 return HPAGE_PUD_NR; 2541 } 2542 #endif 2543 2544 #ifdef CONFIG_USERFAULTFD 2545 /* 2546 * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by 2547 * the caller, but it must return after releasing the page_table_lock. 2548 * Just move the page from src_pmd to dst_pmd if possible. 2549 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be 2550 * repeated by the caller, or other errors in case of failure. 2551 */ 2552 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval, 2553 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 2554 unsigned long dst_addr, unsigned long src_addr) 2555 { 2556 pmd_t _dst_pmd, src_pmdval; 2557 struct page *src_page; 2558 struct folio *src_folio; 2559 struct anon_vma *src_anon_vma; 2560 spinlock_t *src_ptl, *dst_ptl; 2561 pgtable_t src_pgtable; 2562 struct mmu_notifier_range range; 2563 int err = 0; 2564 2565 src_pmdval = *src_pmd; 2566 src_ptl = pmd_lockptr(mm, src_pmd); 2567 2568 lockdep_assert_held(src_ptl); 2569 vma_assert_locked(src_vma); 2570 vma_assert_locked(dst_vma); 2571 2572 /* Sanity checks before the operation */ 2573 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) || 2574 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) { 2575 spin_unlock(src_ptl); 2576 return -EINVAL; 2577 } 2578 2579 if (!pmd_trans_huge(src_pmdval)) { 2580 spin_unlock(src_ptl); 2581 if (is_pmd_migration_entry(src_pmdval)) { 2582 pmd_migration_entry_wait(mm, &src_pmdval); 2583 return -EAGAIN; 2584 } 2585 return -ENOENT; 2586 } 2587 2588 src_page = pmd_page(src_pmdval); 2589 2590 if (!is_huge_zero_pmd(src_pmdval)) { 2591 if (unlikely(!PageAnonExclusive(src_page))) { 2592 spin_unlock(src_ptl); 2593 return -EBUSY; 2594 } 2595 2596 src_folio = page_folio(src_page); 2597 folio_get(src_folio); 2598 } else 2599 src_folio = NULL; 2600 2601 spin_unlock(src_ptl); 2602 2603 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); 2604 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr, 2605 src_addr + HPAGE_PMD_SIZE); 2606 mmu_notifier_invalidate_range_start(&range); 2607 2608 if (src_folio) { 2609 folio_lock(src_folio); 2610 2611 /* 2612 * split_huge_page walks the anon_vma chain without the page 2613 * lock. Serialize against it with the anon_vma lock, the page 2614 * lock is not enough. 2615 */ 2616 src_anon_vma = folio_get_anon_vma(src_folio); 2617 if (!src_anon_vma) { 2618 err = -EAGAIN; 2619 goto unlock_folio; 2620 } 2621 anon_vma_lock_write(src_anon_vma); 2622 } else 2623 src_anon_vma = NULL; 2624 2625 dst_ptl = pmd_lockptr(mm, dst_pmd); 2626 double_pt_lock(src_ptl, dst_ptl); 2627 if (unlikely(!pmd_same(*src_pmd, src_pmdval) || 2628 !pmd_same(*dst_pmd, dst_pmdval))) { 2629 err = -EAGAIN; 2630 goto unlock_ptls; 2631 } 2632 if (src_folio) { 2633 if (folio_maybe_dma_pinned(src_folio) || 2634 !PageAnonExclusive(&src_folio->page)) { 2635 err = -EBUSY; 2636 goto unlock_ptls; 2637 } 2638 2639 if (WARN_ON_ONCE(!folio_test_head(src_folio)) || 2640 WARN_ON_ONCE(!folio_test_anon(src_folio))) { 2641 err = -EBUSY; 2642 goto unlock_ptls; 2643 } 2644 2645 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); 2646 /* Folio got pinned from under us. Put it back and fail the move. */ 2647 if (folio_maybe_dma_pinned(src_folio)) { 2648 set_pmd_at(mm, src_addr, src_pmd, src_pmdval); 2649 err = -EBUSY; 2650 goto unlock_ptls; 2651 } 2652 2653 folio_move_anon_rmap(src_folio, dst_vma); 2654 src_folio->index = linear_page_index(dst_vma, dst_addr); 2655 2656 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot); 2657 /* Follow mremap() behavior and treat the entry dirty after the move */ 2658 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma); 2659 } else { 2660 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); 2661 _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot); 2662 } 2663 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd); 2664 2665 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd); 2666 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable); 2667 unlock_ptls: 2668 double_pt_unlock(src_ptl, dst_ptl); 2669 if (src_anon_vma) { 2670 anon_vma_unlock_write(src_anon_vma); 2671 put_anon_vma(src_anon_vma); 2672 } 2673 unlock_folio: 2674 /* unblock rmap walks */ 2675 if (src_folio) 2676 folio_unlock(src_folio); 2677 mmu_notifier_invalidate_range_end(&range); 2678 if (src_folio) 2679 folio_put(src_folio); 2680 return err; 2681 } 2682 #endif /* CONFIG_USERFAULTFD */ 2683 2684 /* 2685 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 2686 * 2687 * Note that if it returns page table lock pointer, this routine returns without 2688 * unlocking page table lock. So callers must unlock it. 2689 */ 2690 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 2691 { 2692 spinlock_t *ptl; 2693 ptl = pmd_lock(vma->vm_mm, pmd); 2694 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 2695 pmd_devmap(*pmd))) 2696 return ptl; 2697 spin_unlock(ptl); 2698 return NULL; 2699 } 2700 2701 /* 2702 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise. 2703 * 2704 * Note that if it returns page table lock pointer, this routine returns without 2705 * unlocking page table lock. So callers must unlock it. 2706 */ 2707 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 2708 { 2709 spinlock_t *ptl; 2710 2711 ptl = pud_lock(vma->vm_mm, pud); 2712 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 2713 return ptl; 2714 spin_unlock(ptl); 2715 return NULL; 2716 } 2717 2718 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 2719 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 2720 pud_t *pud, unsigned long addr) 2721 { 2722 spinlock_t *ptl; 2723 pud_t orig_pud; 2724 2725 ptl = __pud_trans_huge_lock(pud, vma); 2726 if (!ptl) 2727 return 0; 2728 2729 orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm); 2730 arch_check_zapped_pud(vma, orig_pud); 2731 tlb_remove_pud_tlb_entry(tlb, pud, addr); 2732 if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { 2733 spin_unlock(ptl); 2734 /* No zero page support yet */ 2735 } else { 2736 struct page *page = NULL; 2737 struct folio *folio; 2738 2739 /* No support for anonymous PUD pages or migration yet */ 2740 VM_WARN_ON_ONCE(vma_is_anonymous(vma) || 2741 !pud_present(orig_pud)); 2742 2743 page = pud_page(orig_pud); 2744 folio = page_folio(page); 2745 folio_remove_rmap_pud(folio, page, vma); 2746 add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR); 2747 2748 spin_unlock(ptl); 2749 tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE); 2750 } 2751 return 1; 2752 } 2753 2754 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 2755 unsigned long haddr) 2756 { 2757 struct folio *folio; 2758 struct page *page; 2759 pud_t old_pud; 2760 2761 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 2762 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2763 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 2764 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 2765 2766 count_vm_event(THP_SPLIT_PUD); 2767 2768 old_pud = pudp_huge_clear_flush(vma, haddr, pud); 2769 2770 if (!vma_is_dax(vma)) 2771 return; 2772 2773 page = pud_page(old_pud); 2774 folio = page_folio(page); 2775 2776 if (!folio_test_dirty(folio) && pud_dirty(old_pud)) 2777 folio_mark_dirty(folio); 2778 if (!folio_test_referenced(folio) && pud_young(old_pud)) 2779 folio_set_referenced(folio); 2780 folio_remove_rmap_pud(folio, page, vma); 2781 folio_put(folio); 2782 add_mm_counter(vma->vm_mm, mm_counter_file(folio), 2783 -HPAGE_PUD_NR); 2784 } 2785 2786 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 2787 unsigned long address) 2788 { 2789 spinlock_t *ptl; 2790 struct mmu_notifier_range range; 2791 2792 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2793 address & HPAGE_PUD_MASK, 2794 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 2795 mmu_notifier_invalidate_range_start(&range); 2796 ptl = pud_lock(vma->vm_mm, pud); 2797 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 2798 goto out; 2799 __split_huge_pud_locked(vma, pud, range.start); 2800 2801 out: 2802 spin_unlock(ptl); 2803 mmu_notifier_invalidate_range_end(&range); 2804 } 2805 #else 2806 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 2807 unsigned long address) 2808 { 2809 } 2810 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 2811 2812 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2813 unsigned long haddr, pmd_t *pmd) 2814 { 2815 struct mm_struct *mm = vma->vm_mm; 2816 pgtable_t pgtable; 2817 pmd_t _pmd, old_pmd; 2818 unsigned long addr; 2819 pte_t *pte; 2820 int i; 2821 2822 /* 2823 * Leave pmd empty until pte is filled note that it is fine to delay 2824 * notification until mmu_notifier_invalidate_range_end() as we are 2825 * replacing a zero pmd write protected page with a zero pte write 2826 * protected page. 2827 * 2828 * See Documentation/mm/mmu_notifier.rst 2829 */ 2830 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); 2831 2832 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2833 pmd_populate(mm, &_pmd, pgtable); 2834 2835 pte = pte_offset_map(&_pmd, haddr); 2836 VM_BUG_ON(!pte); 2837 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2838 pte_t entry; 2839 2840 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot); 2841 entry = pte_mkspecial(entry); 2842 if (pmd_uffd_wp(old_pmd)) 2843 entry = pte_mkuffd_wp(entry); 2844 VM_BUG_ON(!pte_none(ptep_get(pte))); 2845 set_pte_at(mm, addr, pte, entry); 2846 pte++; 2847 } 2848 pte_unmap(pte - 1); 2849 smp_wmb(); /* make pte visible before pmd */ 2850 pmd_populate(mm, pmd, pgtable); 2851 } 2852 2853 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2854 unsigned long haddr, bool freeze) 2855 { 2856 struct mm_struct *mm = vma->vm_mm; 2857 struct folio *folio; 2858 struct page *page; 2859 pgtable_t pgtable; 2860 pmd_t old_pmd, _pmd; 2861 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; 2862 bool anon_exclusive = false, dirty = false; 2863 unsigned long addr; 2864 pte_t *pte; 2865 int i; 2866 2867 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2868 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2869 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 2870 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 2871 && !pmd_devmap(*pmd)); 2872 2873 count_vm_event(THP_SPLIT_PMD); 2874 2875 if (!vma_is_anonymous(vma)) { 2876 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); 2877 /* 2878 * We are going to unmap this huge page. So 2879 * just go ahead and zap it 2880 */ 2881 if (arch_needs_pgtable_deposit()) 2882 zap_deposited_table(mm, pmd); 2883 if (!vma_is_dax(vma) && vma_is_special_huge(vma)) 2884 return; 2885 if (unlikely(is_pmd_migration_entry(old_pmd))) { 2886 swp_entry_t entry; 2887 2888 entry = pmd_to_swp_entry(old_pmd); 2889 folio = pfn_swap_entry_folio(entry); 2890 } else if (is_huge_zero_pmd(old_pmd)) { 2891 return; 2892 } else { 2893 page = pmd_page(old_pmd); 2894 folio = page_folio(page); 2895 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd)) 2896 folio_mark_dirty(folio); 2897 if (!folio_test_referenced(folio) && pmd_young(old_pmd)) 2898 folio_set_referenced(folio); 2899 folio_remove_rmap_pmd(folio, page, vma); 2900 folio_put(folio); 2901 } 2902 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR); 2903 return; 2904 } 2905 2906 if (is_huge_zero_pmd(*pmd)) { 2907 /* 2908 * FIXME: Do we want to invalidate secondary mmu by calling 2909 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below 2910 * inside __split_huge_pmd() ? 2911 * 2912 * We are going from a zero huge page write protected to zero 2913 * small page also write protected so it does not seems useful 2914 * to invalidate secondary mmu at this time. 2915 */ 2916 return __split_huge_zero_page_pmd(vma, haddr, pmd); 2917 } 2918 2919 pmd_migration = is_pmd_migration_entry(*pmd); 2920 if (unlikely(pmd_migration)) { 2921 swp_entry_t entry; 2922 2923 old_pmd = *pmd; 2924 entry = pmd_to_swp_entry(old_pmd); 2925 page = pfn_swap_entry_to_page(entry); 2926 write = is_writable_migration_entry(entry); 2927 if (PageAnon(page)) 2928 anon_exclusive = is_readable_exclusive_migration_entry(entry); 2929 young = is_migration_entry_young(entry); 2930 dirty = is_migration_entry_dirty(entry); 2931 soft_dirty = pmd_swp_soft_dirty(old_pmd); 2932 uffd_wp = pmd_swp_uffd_wp(old_pmd); 2933 } else { 2934 /* 2935 * Up to this point the pmd is present and huge and userland has 2936 * the whole access to the hugepage during the split (which 2937 * happens in place). If we overwrite the pmd with the not-huge 2938 * version pointing to the pte here (which of course we could if 2939 * all CPUs were bug free), userland could trigger a small page 2940 * size TLB miss on the small sized TLB while the hugepage TLB 2941 * entry is still established in the huge TLB. Some CPU doesn't 2942 * like that. See 2943 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum 2944 * 383 on page 105. Intel should be safe but is also warns that 2945 * it's only safe if the permission and cache attributes of the 2946 * two entries loaded in the two TLB is identical (which should 2947 * be the case here). But it is generally safer to never allow 2948 * small and huge TLB entries for the same virtual address to be 2949 * loaded simultaneously. So instead of doing "pmd_populate(); 2950 * flush_pmd_tlb_range();" we first mark the current pmd 2951 * notpresent (atomically because here the pmd_trans_huge must 2952 * remain set at all times on the pmd until the split is 2953 * complete for this pmd), then we flush the SMP TLB and finally 2954 * we write the non-huge version of the pmd entry with 2955 * pmd_populate. 2956 */ 2957 old_pmd = pmdp_invalidate(vma, haddr, pmd); 2958 page = pmd_page(old_pmd); 2959 folio = page_folio(page); 2960 if (pmd_dirty(old_pmd)) { 2961 dirty = true; 2962 folio_set_dirty(folio); 2963 } 2964 write = pmd_write(old_pmd); 2965 young = pmd_young(old_pmd); 2966 soft_dirty = pmd_soft_dirty(old_pmd); 2967 uffd_wp = pmd_uffd_wp(old_pmd); 2968 2969 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio); 2970 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 2971 2972 /* 2973 * Without "freeze", we'll simply split the PMD, propagating the 2974 * PageAnonExclusive() flag for each PTE by setting it for 2975 * each subpage -- no need to (temporarily) clear. 2976 * 2977 * With "freeze" we want to replace mapped pages by 2978 * migration entries right away. This is only possible if we 2979 * managed to clear PageAnonExclusive() -- see 2980 * set_pmd_migration_entry(). 2981 * 2982 * In case we cannot clear PageAnonExclusive(), split the PMD 2983 * only and let try_to_migrate_one() fail later. 2984 * 2985 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first. 2986 */ 2987 anon_exclusive = PageAnonExclusive(page); 2988 if (freeze && anon_exclusive && 2989 folio_try_share_anon_rmap_pmd(folio, page)) 2990 freeze = false; 2991 if (!freeze) { 2992 rmap_t rmap_flags = RMAP_NONE; 2993 2994 folio_ref_add(folio, HPAGE_PMD_NR - 1); 2995 if (anon_exclusive) 2996 rmap_flags |= RMAP_EXCLUSIVE; 2997 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR, 2998 vma, haddr, rmap_flags); 2999 } 3000 } 3001 3002 /* 3003 * Withdraw the table only after we mark the pmd entry invalid. 3004 * This's critical for some architectures (Power). 3005 */ 3006 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 3007 pmd_populate(mm, &_pmd, pgtable); 3008 3009 pte = pte_offset_map(&_pmd, haddr); 3010 VM_BUG_ON(!pte); 3011 3012 /* 3013 * Note that NUMA hinting access restrictions are not transferred to 3014 * avoid any possibility of altering permissions across VMAs. 3015 */ 3016 if (freeze || pmd_migration) { 3017 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 3018 pte_t entry; 3019 swp_entry_t swp_entry; 3020 3021 if (write) 3022 swp_entry = make_writable_migration_entry( 3023 page_to_pfn(page + i)); 3024 else if (anon_exclusive) 3025 swp_entry = make_readable_exclusive_migration_entry( 3026 page_to_pfn(page + i)); 3027 else 3028 swp_entry = make_readable_migration_entry( 3029 page_to_pfn(page + i)); 3030 if (young) 3031 swp_entry = make_migration_entry_young(swp_entry); 3032 if (dirty) 3033 swp_entry = make_migration_entry_dirty(swp_entry); 3034 entry = swp_entry_to_pte(swp_entry); 3035 if (soft_dirty) 3036 entry = pte_swp_mksoft_dirty(entry); 3037 if (uffd_wp) 3038 entry = pte_swp_mkuffd_wp(entry); 3039 3040 VM_WARN_ON(!pte_none(ptep_get(pte + i))); 3041 set_pte_at(mm, addr, pte + i, entry); 3042 } 3043 } else { 3044 pte_t entry; 3045 3046 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot)); 3047 if (write) 3048 entry = pte_mkwrite(entry, vma); 3049 if (!young) 3050 entry = pte_mkold(entry); 3051 /* NOTE: this may set soft-dirty too on some archs */ 3052 if (dirty) 3053 entry = pte_mkdirty(entry); 3054 if (soft_dirty) 3055 entry = pte_mksoft_dirty(entry); 3056 if (uffd_wp) 3057 entry = pte_mkuffd_wp(entry); 3058 3059 for (i = 0; i < HPAGE_PMD_NR; i++) 3060 VM_WARN_ON(!pte_none(ptep_get(pte + i))); 3061 3062 set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR); 3063 } 3064 pte_unmap(pte); 3065 3066 if (!pmd_migration) 3067 folio_remove_rmap_pmd(folio, page, vma); 3068 if (freeze) 3069 put_page(page); 3070 3071 smp_wmb(); /* make pte visible before pmd */ 3072 pmd_populate(mm, pmd, pgtable); 3073 } 3074 3075 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, 3076 pmd_t *pmd, bool freeze, struct folio *folio) 3077 { 3078 bool pmd_migration = is_pmd_migration_entry(*pmd); 3079 3080 VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio)); 3081 VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE)); 3082 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); 3083 VM_BUG_ON(freeze && !folio); 3084 3085 /* 3086 * When the caller requests to set up a migration entry, we 3087 * require a folio to check the PMD against. Otherwise, there 3088 * is a risk of replacing the wrong folio. 3089 */ 3090 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || pmd_migration) { 3091 /* 3092 * Do not apply pmd_folio() to a migration entry; and folio lock 3093 * guarantees that it must be of the wrong folio anyway. 3094 */ 3095 if (folio && (pmd_migration || folio != pmd_folio(*pmd))) 3096 return; 3097 __split_huge_pmd_locked(vma, pmd, address, freeze); 3098 } 3099 } 3100 3101 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 3102 unsigned long address, bool freeze, struct folio *folio) 3103 { 3104 spinlock_t *ptl; 3105 struct mmu_notifier_range range; 3106 3107 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 3108 address & HPAGE_PMD_MASK, 3109 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 3110 mmu_notifier_invalidate_range_start(&range); 3111 ptl = pmd_lock(vma->vm_mm, pmd); 3112 split_huge_pmd_locked(vma, range.start, pmd, freeze, folio); 3113 spin_unlock(ptl); 3114 mmu_notifier_invalidate_range_end(&range); 3115 } 3116 3117 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 3118 bool freeze, struct folio *folio) 3119 { 3120 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); 3121 3122 if (!pmd) 3123 return; 3124 3125 __split_huge_pmd(vma, pmd, address, freeze, folio); 3126 } 3127 3128 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) 3129 { 3130 /* 3131 * If the new address isn't hpage aligned and it could previously 3132 * contain an hugepage: check if we need to split an huge pmd. 3133 */ 3134 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && 3135 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), 3136 ALIGN(address, HPAGE_PMD_SIZE))) 3137 split_huge_pmd_address(vma, address, false, NULL); 3138 } 3139 3140 void vma_adjust_trans_huge(struct vm_area_struct *vma, 3141 unsigned long start, 3142 unsigned long end, 3143 struct vm_area_struct *next) 3144 { 3145 /* Check if we need to split start first. */ 3146 split_huge_pmd_if_needed(vma, start); 3147 3148 /* Check if we need to split end next. */ 3149 split_huge_pmd_if_needed(vma, end); 3150 3151 /* If we're incrementing next->vm_start, we might need to split it. */ 3152 if (next) 3153 split_huge_pmd_if_needed(next, end); 3154 } 3155 3156 static void unmap_folio(struct folio *folio) 3157 { 3158 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC | 3159 TTU_BATCH_FLUSH; 3160 3161 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 3162 3163 if (folio_test_pmd_mappable(folio)) 3164 ttu_flags |= TTU_SPLIT_HUGE_PMD; 3165 3166 /* 3167 * Anon pages need migration entries to preserve them, but file 3168 * pages can simply be left unmapped, then faulted back on demand. 3169 * If that is ever changed (perhaps for mlock), update remap_page(). 3170 */ 3171 if (folio_test_anon(folio)) 3172 try_to_migrate(folio, ttu_flags); 3173 else 3174 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); 3175 3176 try_to_unmap_flush(); 3177 } 3178 3179 static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma, 3180 unsigned long addr, pmd_t *pmdp, 3181 struct folio *folio) 3182 { 3183 struct mm_struct *mm = vma->vm_mm; 3184 int ref_count, map_count; 3185 pmd_t orig_pmd = *pmdp; 3186 3187 if (pmd_dirty(orig_pmd)) 3188 folio_set_dirty(folio); 3189 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { 3190 folio_set_swapbacked(folio); 3191 return false; 3192 } 3193 3194 orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp); 3195 3196 /* 3197 * Syncing against concurrent GUP-fast: 3198 * - clear PMD; barrier; read refcount 3199 * - inc refcount; barrier; read PMD 3200 */ 3201 smp_mb(); 3202 3203 ref_count = folio_ref_count(folio); 3204 map_count = folio_mapcount(folio); 3205 3206 /* 3207 * Order reads for folio refcount and dirty flag 3208 * (see comments in __remove_mapping()). 3209 */ 3210 smp_rmb(); 3211 3212 /* 3213 * If the folio or its PMD is redirtied at this point, or if there 3214 * are unexpected references, we will give up to discard this folio 3215 * and remap it. 3216 * 3217 * The only folio refs must be one from isolation plus the rmap(s). 3218 */ 3219 if (pmd_dirty(orig_pmd)) 3220 folio_set_dirty(folio); 3221 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { 3222 folio_set_swapbacked(folio); 3223 set_pmd_at(mm, addr, pmdp, orig_pmd); 3224 return false; 3225 } 3226 3227 if (ref_count != map_count + 1) { 3228 set_pmd_at(mm, addr, pmdp, orig_pmd); 3229 return false; 3230 } 3231 3232 folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma); 3233 zap_deposited_table(mm, pmdp); 3234 add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR); 3235 if (vma->vm_flags & VM_LOCKED) 3236 mlock_drain_local(); 3237 folio_put(folio); 3238 3239 return true; 3240 } 3241 3242 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, 3243 pmd_t *pmdp, struct folio *folio) 3244 { 3245 VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio); 3246 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 3247 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 3248 VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio); 3249 VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE)); 3250 3251 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); 3252 } 3253 3254 static void remap_page(struct folio *folio, unsigned long nr, int flags) 3255 { 3256 int i = 0; 3257 3258 /* If unmap_folio() uses try_to_migrate() on file, remove this check */ 3259 if (!folio_test_anon(folio)) 3260 return; 3261 for (;;) { 3262 remove_migration_ptes(folio, folio, RMP_LOCKED | flags); 3263 i += folio_nr_pages(folio); 3264 if (i >= nr) 3265 break; 3266 folio = folio_next(folio); 3267 } 3268 } 3269 3270 static void lru_add_split_folio(struct folio *folio, struct folio *new_folio, 3271 struct lruvec *lruvec, struct list_head *list) 3272 { 3273 VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio); 3274 lockdep_assert_held(&lruvec->lru_lock); 3275 3276 if (list) { 3277 /* page reclaim is reclaiming a huge page */ 3278 VM_WARN_ON(folio_test_lru(folio)); 3279 folio_get(new_folio); 3280 list_add_tail(&new_folio->lru, list); 3281 } else { 3282 /* head is still on lru (and we have it frozen) */ 3283 VM_WARN_ON(!folio_test_lru(folio)); 3284 if (folio_test_unevictable(folio)) 3285 new_folio->mlock_count = 0; 3286 else 3287 list_add_tail(&new_folio->lru, &folio->lru); 3288 folio_set_lru(new_folio); 3289 } 3290 } 3291 3292 /* Racy check whether the huge page can be split */ 3293 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) 3294 { 3295 int extra_pins; 3296 3297 /* Additional pins from page cache */ 3298 if (folio_test_anon(folio)) 3299 extra_pins = folio_test_swapcache(folio) ? 3300 folio_nr_pages(folio) : 0; 3301 else 3302 extra_pins = folio_nr_pages(folio); 3303 if (pextra_pins) 3304 *pextra_pins = extra_pins; 3305 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 3306 caller_pins; 3307 } 3308 3309 /* 3310 * It splits @folio into @new_order folios and copies the @folio metadata to 3311 * all the resulting folios. 3312 */ 3313 static void __split_folio_to_order(struct folio *folio, int old_order, 3314 int new_order) 3315 { 3316 long new_nr_pages = 1 << new_order; 3317 long nr_pages = 1 << old_order; 3318 long i; 3319 3320 /* 3321 * Skip the first new_nr_pages, since the new folio from them have all 3322 * the flags from the original folio. 3323 */ 3324 for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) { 3325 struct page *new_head = &folio->page + i; 3326 3327 /* 3328 * Careful: new_folio is not a "real" folio before we cleared PageTail. 3329 * Don't pass it around before clear_compound_head(). 3330 */ 3331 struct folio *new_folio = (struct folio *)new_head; 3332 3333 VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head); 3334 3335 /* 3336 * Clone page flags before unfreezing refcount. 3337 * 3338 * After successful get_page_unless_zero() might follow flags change, 3339 * for example lock_page() which set PG_waiters. 3340 * 3341 * Note that for mapped sub-pages of an anonymous THP, 3342 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in 3343 * the migration entry instead from where remap_page() will restore it. 3344 * We can still have PG_anon_exclusive set on effectively unmapped and 3345 * unreferenced sub-pages of an anonymous THP: we can simply drop 3346 * PG_anon_exclusive (-> PG_mappedtodisk) for these here. 3347 */ 3348 new_folio->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 3349 new_folio->flags |= (folio->flags & 3350 ((1L << PG_referenced) | 3351 (1L << PG_swapbacked) | 3352 (1L << PG_swapcache) | 3353 (1L << PG_mlocked) | 3354 (1L << PG_uptodate) | 3355 (1L << PG_active) | 3356 (1L << PG_workingset) | 3357 (1L << PG_locked) | 3358 (1L << PG_unevictable) | 3359 #ifdef CONFIG_ARCH_USES_PG_ARCH_2 3360 (1L << PG_arch_2) | 3361 #endif 3362 #ifdef CONFIG_ARCH_USES_PG_ARCH_3 3363 (1L << PG_arch_3) | 3364 #endif 3365 (1L << PG_dirty) | 3366 LRU_GEN_MASK | LRU_REFS_MASK)); 3367 3368 new_folio->mapping = folio->mapping; 3369 new_folio->index = folio->index + i; 3370 3371 /* 3372 * page->private should not be set in tail pages. Fix up and warn once 3373 * if private is unexpectedly set. 3374 */ 3375 if (unlikely(new_folio->private)) { 3376 VM_WARN_ON_ONCE_PAGE(true, new_head); 3377 new_folio->private = NULL; 3378 } 3379 3380 if (folio_test_swapcache(folio)) 3381 new_folio->swap.val = folio->swap.val + i; 3382 3383 /* Page flags must be visible before we make the page non-compound. */ 3384 smp_wmb(); 3385 3386 /* 3387 * Clear PageTail before unfreezing page refcount. 3388 * 3389 * After successful get_page_unless_zero() might follow put_page() 3390 * which needs correct compound_head(). 3391 */ 3392 clear_compound_head(new_head); 3393 if (new_order) { 3394 prep_compound_page(new_head, new_order); 3395 folio_set_large_rmappable(new_folio); 3396 } 3397 3398 if (folio_test_young(folio)) 3399 folio_set_young(new_folio); 3400 if (folio_test_idle(folio)) 3401 folio_set_idle(new_folio); 3402 #ifdef CONFIG_MEMCG 3403 new_folio->memcg_data = folio->memcg_data; 3404 #endif 3405 3406 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); 3407 } 3408 3409 if (new_order) 3410 folio_set_order(folio, new_order); 3411 else 3412 ClearPageCompound(&folio->page); 3413 } 3414 3415 /* 3416 * It splits an unmapped @folio to lower order smaller folios in two ways. 3417 * @folio: the to-be-split folio 3418 * @new_order: the smallest order of the after split folios (since buddy 3419 * allocator like split generates folios with orders from @folio's 3420 * order - 1 to new_order). 3421 * @split_at: in buddy allocator like split, the folio containing @split_at 3422 * will be split until its order becomes @new_order. 3423 * @lock_at: the folio containing @lock_at is left locked for caller. 3424 * @list: the after split folios will be added to @list if it is not NULL, 3425 * otherwise to LRU lists. 3426 * @end: the end of the file @folio maps to. -1 if @folio is anonymous memory. 3427 * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller 3428 * @mapping: @folio->mapping 3429 * @uniform_split: if the split is uniform or not (buddy allocator like split) 3430 * 3431 * 3432 * 1. uniform split: the given @folio into multiple @new_order small folios, 3433 * where all small folios have the same order. This is done when 3434 * uniform_split is true. 3435 * 2. buddy allocator like (non-uniform) split: the given @folio is split into 3436 * half and one of the half (containing the given page) is split into half 3437 * until the given @page's order becomes @new_order. This is done when 3438 * uniform_split is false. 3439 * 3440 * The high level flow for these two methods are: 3441 * 1. uniform split: a single __split_folio_to_order() is called to split the 3442 * @folio into @new_order, then we traverse all the resulting folios one by 3443 * one in PFN ascending order and perform stats, unfreeze, adding to list, 3444 * and file mapping index operations. 3445 * 2. non-uniform split: in general, folio_order - @new_order calls to 3446 * __split_folio_to_order() are made in a for loop to split the @folio 3447 * to one lower order at a time. The resulting small folios are processed 3448 * like what is done during the traversal in 1, except the one containing 3449 * @page, which is split in next for loop. 3450 * 3451 * After splitting, the caller's folio reference will be transferred to the 3452 * folio containing @page. The other folios may be freed if they are not mapped. 3453 * 3454 * In terms of locking, after splitting, 3455 * 1. uniform split leaves @page (or the folio contains it) locked; 3456 * 2. buddy allocator like (non-uniform) split leaves @folio locked. 3457 * 3458 * 3459 * For !uniform_split, when -ENOMEM is returned, the original folio might be 3460 * split. The caller needs to check the input folio. 3461 */ 3462 static int __split_unmapped_folio(struct folio *folio, int new_order, 3463 struct page *split_at, struct page *lock_at, 3464 struct list_head *list, pgoff_t end, 3465 struct xa_state *xas, struct address_space *mapping, 3466 bool uniform_split) 3467 { 3468 struct lruvec *lruvec; 3469 struct address_space *swap_cache = NULL; 3470 struct folio *origin_folio = folio; 3471 struct folio *next_folio = folio_next(folio); 3472 struct folio *new_folio; 3473 struct folio *next; 3474 int order = folio_order(folio); 3475 int split_order; 3476 int start_order = uniform_split ? new_order : order - 1; 3477 int nr_dropped = 0; 3478 int ret = 0; 3479 bool stop_split = false; 3480 3481 if (folio_test_swapcache(folio)) { 3482 VM_BUG_ON(mapping); 3483 3484 /* a swapcache folio can only be uniformly split to order-0 */ 3485 if (!uniform_split || new_order != 0) 3486 return -EINVAL; 3487 3488 swap_cache = swap_address_space(folio->swap); 3489 xa_lock(&swap_cache->i_pages); 3490 } 3491 3492 if (folio_test_anon(folio)) 3493 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); 3494 3495 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ 3496 lruvec = folio_lruvec_lock(folio); 3497 3498 folio_clear_has_hwpoisoned(folio); 3499 3500 /* 3501 * split to new_order one order at a time. For uniform split, 3502 * folio is split to new_order directly. 3503 */ 3504 for (split_order = start_order; 3505 split_order >= new_order && !stop_split; 3506 split_order--) { 3507 int old_order = folio_order(folio); 3508 struct folio *release; 3509 struct folio *end_folio = folio_next(folio); 3510 3511 /* order-1 anonymous folio is not supported */ 3512 if (folio_test_anon(folio) && split_order == 1) 3513 continue; 3514 if (uniform_split && split_order != new_order) 3515 continue; 3516 3517 if (mapping) { 3518 /* 3519 * uniform split has xas_split_alloc() called before 3520 * irq is disabled to allocate enough memory, whereas 3521 * non-uniform split can handle ENOMEM. 3522 */ 3523 if (uniform_split) 3524 xas_split(xas, folio, old_order); 3525 else { 3526 xas_set_order(xas, folio->index, split_order); 3527 xas_try_split(xas, folio, old_order); 3528 if (xas_error(xas)) { 3529 ret = xas_error(xas); 3530 stop_split = true; 3531 goto after_split; 3532 } 3533 } 3534 } 3535 3536 folio_split_memcg_refs(folio, old_order, split_order); 3537 split_page_owner(&folio->page, old_order, split_order); 3538 pgalloc_tag_split(folio, old_order, split_order); 3539 3540 __split_folio_to_order(folio, old_order, split_order); 3541 3542 after_split: 3543 /* 3544 * Iterate through after-split folios and perform related 3545 * operations. But in buddy allocator like split, the folio 3546 * containing the specified page is skipped until its order 3547 * is new_order, since the folio will be worked on in next 3548 * iteration. 3549 */ 3550 for (release = folio; release != end_folio; release = next) { 3551 next = folio_next(release); 3552 /* 3553 * for buddy allocator like split, the folio containing 3554 * page will be split next and should not be released, 3555 * until the folio's order is new_order or stop_split 3556 * is set to true by the above xas_split() failure. 3557 */ 3558 if (release == page_folio(split_at)) { 3559 folio = release; 3560 if (split_order != new_order && !stop_split) 3561 continue; 3562 } 3563 if (folio_test_anon(release)) { 3564 mod_mthp_stat(folio_order(release), 3565 MTHP_STAT_NR_ANON, 1); 3566 } 3567 3568 /* 3569 * origin_folio should be kept frozon until page cache 3570 * entries are updated with all the other after-split 3571 * folios to prevent others seeing stale page cache 3572 * entries. 3573 */ 3574 if (release == origin_folio) 3575 continue; 3576 3577 folio_ref_unfreeze(release, 1 + 3578 ((mapping || swap_cache) ? 3579 folio_nr_pages(release) : 0)); 3580 3581 lru_add_split_folio(origin_folio, release, lruvec, 3582 list); 3583 3584 /* Some pages can be beyond EOF: drop them from cache */ 3585 if (release->index >= end) { 3586 if (shmem_mapping(mapping)) 3587 nr_dropped += folio_nr_pages(release); 3588 else if (folio_test_clear_dirty(release)) 3589 folio_account_cleaned(release, 3590 inode_to_wb(mapping->host)); 3591 __filemap_remove_folio(release, NULL); 3592 folio_put_refs(release, folio_nr_pages(release)); 3593 } else if (mapping) { 3594 __xa_store(&mapping->i_pages, 3595 release->index, release, 0); 3596 } else if (swap_cache) { 3597 __xa_store(&swap_cache->i_pages, 3598 swap_cache_index(release->swap), 3599 release, 0); 3600 } 3601 } 3602 } 3603 3604 /* 3605 * Unfreeze origin_folio only after all page cache entries, which used 3606 * to point to it, have been updated with new folios. Otherwise, 3607 * a parallel folio_try_get() can grab origin_folio and its caller can 3608 * see stale page cache entries. 3609 */ 3610 folio_ref_unfreeze(origin_folio, 1 + 3611 ((mapping || swap_cache) ? folio_nr_pages(origin_folio) : 0)); 3612 3613 unlock_page_lruvec(lruvec); 3614 3615 if (swap_cache) 3616 xa_unlock(&swap_cache->i_pages); 3617 if (mapping) 3618 xa_unlock(&mapping->i_pages); 3619 3620 /* Caller disabled irqs, so they are still disabled here */ 3621 local_irq_enable(); 3622 3623 if (nr_dropped) 3624 shmem_uncharge(mapping->host, nr_dropped); 3625 3626 remap_page(origin_folio, 1 << order, 3627 folio_test_anon(origin_folio) ? 3628 RMP_USE_SHARED_ZEROPAGE : 0); 3629 3630 /* 3631 * At this point, folio should contain the specified page. 3632 * For uniform split, it is left for caller to unlock. 3633 * For buddy allocator like split, the first after-split folio is left 3634 * for caller to unlock. 3635 */ 3636 for (new_folio = origin_folio; new_folio != next_folio; new_folio = next) { 3637 next = folio_next(new_folio); 3638 if (new_folio == page_folio(lock_at)) 3639 continue; 3640 3641 folio_unlock(new_folio); 3642 /* 3643 * Subpages may be freed if there wasn't any mapping 3644 * like if add_to_swap() is running on a lru page that 3645 * had its mapping zapped. And freeing these pages 3646 * requires taking the lru_lock so we do the put_page 3647 * of the tail pages after the split is complete. 3648 */ 3649 free_page_and_swap_cache(&new_folio->page); 3650 } 3651 return ret; 3652 } 3653 3654 bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, 3655 bool warns) 3656 { 3657 if (folio_test_anon(folio)) { 3658 /* order-1 is not supported for anonymous THP. */ 3659 VM_WARN_ONCE(warns && new_order == 1, 3660 "Cannot split to order-1 folio"); 3661 return new_order != 1; 3662 } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && 3663 !mapping_large_folio_support(folio->mapping)) { 3664 /* 3665 * No split if the file system does not support large folio. 3666 * Note that we might still have THPs in such mappings due to 3667 * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping 3668 * does not actually support large folios properly. 3669 */ 3670 VM_WARN_ONCE(warns, 3671 "Cannot split file folio to non-0 order"); 3672 return false; 3673 } 3674 3675 /* Only swapping a whole PMD-mapped folio is supported */ 3676 if (folio_test_swapcache(folio)) { 3677 VM_WARN_ONCE(warns, 3678 "Cannot split swapcache folio to non-0 order"); 3679 return false; 3680 } 3681 3682 return true; 3683 } 3684 3685 /* See comments in non_uniform_split_supported() */ 3686 bool uniform_split_supported(struct folio *folio, unsigned int new_order, 3687 bool warns) 3688 { 3689 if (folio_test_anon(folio)) { 3690 VM_WARN_ONCE(warns && new_order == 1, 3691 "Cannot split to order-1 folio"); 3692 return new_order != 1; 3693 } else if (new_order) { 3694 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && 3695 !mapping_large_folio_support(folio->mapping)) { 3696 VM_WARN_ONCE(warns, 3697 "Cannot split file folio to non-0 order"); 3698 return false; 3699 } 3700 } 3701 3702 if (new_order && folio_test_swapcache(folio)) { 3703 VM_WARN_ONCE(warns, 3704 "Cannot split swapcache folio to non-0 order"); 3705 return false; 3706 } 3707 3708 return true; 3709 } 3710 3711 /* 3712 * __folio_split: split a folio at @split_at to a @new_order folio 3713 * @folio: folio to split 3714 * @new_order: the order of the new folio 3715 * @split_at: a page within the new folio 3716 * @lock_at: a page within @folio to be left locked to caller 3717 * @list: after-split folios will be put on it if non NULL 3718 * @uniform_split: perform uniform split or not (non-uniform split) 3719 * 3720 * It calls __split_unmapped_folio() to perform uniform and non-uniform split. 3721 * It is in charge of checking whether the split is supported or not and 3722 * preparing @folio for __split_unmapped_folio(). 3723 * 3724 * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be 3725 * split but not to @new_order, the caller needs to check) 3726 */ 3727 static int __folio_split(struct folio *folio, unsigned int new_order, 3728 struct page *split_at, struct page *lock_at, 3729 struct list_head *list, bool uniform_split) 3730 { 3731 struct deferred_split *ds_queue = get_deferred_split_queue(folio); 3732 XA_STATE(xas, &folio->mapping->i_pages, folio->index); 3733 bool is_anon = folio_test_anon(folio); 3734 struct address_space *mapping = NULL; 3735 struct anon_vma *anon_vma = NULL; 3736 int order = folio_order(folio); 3737 int extra_pins, ret; 3738 pgoff_t end; 3739 bool is_hzp; 3740 3741 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 3742 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 3743 3744 if (folio != page_folio(split_at) || folio != page_folio(lock_at)) 3745 return -EINVAL; 3746 3747 if (new_order >= folio_order(folio)) 3748 return -EINVAL; 3749 3750 if (uniform_split && !uniform_split_supported(folio, new_order, true)) 3751 return -EINVAL; 3752 3753 if (!uniform_split && 3754 !non_uniform_split_supported(folio, new_order, true)) 3755 return -EINVAL; 3756 3757 is_hzp = is_huge_zero_folio(folio); 3758 if (is_hzp) { 3759 pr_warn_ratelimited("Called split_huge_page for huge zero page\n"); 3760 return -EBUSY; 3761 } 3762 3763 if (folio_test_writeback(folio)) 3764 return -EBUSY; 3765 3766 if (is_anon) { 3767 /* 3768 * The caller does not necessarily hold an mmap_lock that would 3769 * prevent the anon_vma disappearing so we first we take a 3770 * reference to it and then lock the anon_vma for write. This 3771 * is similar to folio_lock_anon_vma_read except the write lock 3772 * is taken to serialise against parallel split or collapse 3773 * operations. 3774 */ 3775 anon_vma = folio_get_anon_vma(folio); 3776 if (!anon_vma) { 3777 ret = -EBUSY; 3778 goto out; 3779 } 3780 end = -1; 3781 mapping = NULL; 3782 anon_vma_lock_write(anon_vma); 3783 } else { 3784 unsigned int min_order; 3785 gfp_t gfp; 3786 3787 mapping = folio->mapping; 3788 3789 /* Truncated ? */ 3790 /* 3791 * TODO: add support for large shmem folio in swap cache. 3792 * When shmem is in swap cache, mapping is NULL and 3793 * folio_test_swapcache() is true. 3794 */ 3795 if (!mapping) { 3796 ret = -EBUSY; 3797 goto out; 3798 } 3799 3800 min_order = mapping_min_folio_order(folio->mapping); 3801 if (new_order < min_order) { 3802 VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u", 3803 min_order); 3804 ret = -EINVAL; 3805 goto out; 3806 } 3807 3808 gfp = current_gfp_context(mapping_gfp_mask(mapping) & 3809 GFP_RECLAIM_MASK); 3810 3811 if (!filemap_release_folio(folio, gfp)) { 3812 ret = -EBUSY; 3813 goto out; 3814 } 3815 3816 if (uniform_split) { 3817 xas_set_order(&xas, folio->index, new_order); 3818 xas_split_alloc(&xas, folio, folio_order(folio), gfp); 3819 if (xas_error(&xas)) { 3820 ret = xas_error(&xas); 3821 goto out; 3822 } 3823 } 3824 3825 anon_vma = NULL; 3826 i_mmap_lock_read(mapping); 3827 3828 /* 3829 *__split_unmapped_folio() may need to trim off pages beyond 3830 * EOF: but on 32-bit, i_size_read() takes an irq-unsafe 3831 * seqlock, which cannot be nested inside the page tree lock. 3832 * So note end now: i_size itself may be changed at any moment, 3833 * but folio lock is good enough to serialize the trimming. 3834 */ 3835 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 3836 if (shmem_mapping(mapping)) 3837 end = shmem_fallocend(mapping->host, end); 3838 } 3839 3840 /* 3841 * Racy check if we can split the page, before unmap_folio() will 3842 * split PMDs 3843 */ 3844 if (!can_split_folio(folio, 1, &extra_pins)) { 3845 ret = -EAGAIN; 3846 goto out_unlock; 3847 } 3848 3849 unmap_folio(folio); 3850 3851 /* block interrupt reentry in xa_lock and spinlock */ 3852 local_irq_disable(); 3853 if (mapping) { 3854 /* 3855 * Check if the folio is present in page cache. 3856 * We assume all tail are present too, if folio is there. 3857 */ 3858 xas_lock(&xas); 3859 xas_reset(&xas); 3860 if (xas_load(&xas) != folio) 3861 goto fail; 3862 } 3863 3864 /* Prevent deferred_split_scan() touching ->_refcount */ 3865 spin_lock(&ds_queue->split_queue_lock); 3866 if (folio_ref_freeze(folio, 1 + extra_pins)) { 3867 if (folio_order(folio) > 1 && 3868 !list_empty(&folio->_deferred_list)) { 3869 ds_queue->split_queue_len--; 3870 if (folio_test_partially_mapped(folio)) { 3871 folio_clear_partially_mapped(folio); 3872 mod_mthp_stat(folio_order(folio), 3873 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); 3874 } 3875 /* 3876 * Reinitialize page_deferred_list after removing the 3877 * page from the split_queue, otherwise a subsequent 3878 * split will see list corruption when checking the 3879 * page_deferred_list. 3880 */ 3881 list_del_init(&folio->_deferred_list); 3882 } 3883 spin_unlock(&ds_queue->split_queue_lock); 3884 if (mapping) { 3885 int nr = folio_nr_pages(folio); 3886 3887 if (folio_test_pmd_mappable(folio) && 3888 new_order < HPAGE_PMD_ORDER) { 3889 if (folio_test_swapbacked(folio)) { 3890 __lruvec_stat_mod_folio(folio, 3891 NR_SHMEM_THPS, -nr); 3892 } else { 3893 __lruvec_stat_mod_folio(folio, 3894 NR_FILE_THPS, -nr); 3895 filemap_nr_thps_dec(mapping); 3896 } 3897 } 3898 } 3899 3900 ret = __split_unmapped_folio(folio, new_order, 3901 split_at, lock_at, list, end, &xas, mapping, 3902 uniform_split); 3903 } else { 3904 spin_unlock(&ds_queue->split_queue_lock); 3905 fail: 3906 if (mapping) 3907 xas_unlock(&xas); 3908 local_irq_enable(); 3909 remap_page(folio, folio_nr_pages(folio), 0); 3910 ret = -EAGAIN; 3911 } 3912 3913 out_unlock: 3914 if (anon_vma) { 3915 anon_vma_unlock_write(anon_vma); 3916 put_anon_vma(anon_vma); 3917 } 3918 if (mapping) 3919 i_mmap_unlock_read(mapping); 3920 out: 3921 xas_destroy(&xas); 3922 if (order == HPAGE_PMD_ORDER) 3923 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 3924 count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); 3925 return ret; 3926 } 3927 3928 /* 3929 * This function splits a large folio into smaller folios of order @new_order. 3930 * @page can point to any page of the large folio to split. The split operation 3931 * does not change the position of @page. 3932 * 3933 * Prerequisites: 3934 * 3935 * 1) The caller must hold a reference on the @page's owning folio, also known 3936 * as the large folio. 3937 * 3938 * 2) The large folio must be locked. 3939 * 3940 * 3) The folio must not be pinned. Any unexpected folio references, including 3941 * GUP pins, will result in the folio not getting split; instead, the caller 3942 * will receive an -EAGAIN. 3943 * 3944 * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not 3945 * supported for non-file-backed folios, because folio->_deferred_list, which 3946 * is used by partially mapped folios, is stored in subpage 2, but an order-1 3947 * folio only has subpages 0 and 1. File-backed order-1 folios are supported, 3948 * since they do not use _deferred_list. 3949 * 3950 * After splitting, the caller's folio reference will be transferred to @page, 3951 * resulting in a raised refcount of @page after this call. The other pages may 3952 * be freed if they are not mapped. 3953 * 3954 * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 3955 * 3956 * Pages in @new_order will inherit the mapping, flags, and so on from the 3957 * huge page. 3958 * 3959 * Returns 0 if the huge page was split successfully. 3960 * 3961 * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if 3962 * the folio was concurrently removed from the page cache. 3963 * 3964 * Returns -EBUSY when trying to split the huge zeropage, if the folio is 3965 * under writeback, if fs-specific folio metadata cannot currently be 3966 * released, or if some unexpected race happened (e.g., anon VMA disappeared, 3967 * truncation). 3968 * 3969 * Callers should ensure that the order respects the address space mapping 3970 * min-order if one is set for non-anonymous folios. 3971 * 3972 * Returns -EINVAL when trying to split to an order that is incompatible 3973 * with the folio. Splitting to order 0 is compatible with all folios. 3974 */ 3975 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, 3976 unsigned int new_order) 3977 { 3978 struct folio *folio = page_folio(page); 3979 3980 return __folio_split(folio, new_order, &folio->page, page, list, true); 3981 } 3982 3983 /* 3984 * folio_split: split a folio at @split_at to a @new_order folio 3985 * @folio: folio to split 3986 * @new_order: the order of the new folio 3987 * @split_at: a page within the new folio 3988 * 3989 * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be 3990 * split but not to @new_order, the caller needs to check) 3991 * 3992 * It has the same prerequisites and returns as 3993 * split_huge_page_to_list_to_order(). 3994 * 3995 * Split a folio at @split_at to a new_order folio, leave the 3996 * remaining subpages of the original folio as large as possible. For example, 3997 * in the case of splitting an order-9 folio at its third order-3 subpages to 3998 * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio. 3999 * After the split, there will be a group of folios with different orders and 4000 * the new folio containing @split_at is marked in bracket: 4001 * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8]. 4002 * 4003 * After split, folio is left locked for caller. 4004 */ 4005 int folio_split(struct folio *folio, unsigned int new_order, 4006 struct page *split_at, struct list_head *list) 4007 { 4008 return __folio_split(folio, new_order, split_at, &folio->page, list, 4009 false); 4010 } 4011 4012 int min_order_for_split(struct folio *folio) 4013 { 4014 if (folio_test_anon(folio)) 4015 return 0; 4016 4017 if (!folio->mapping) { 4018 if (folio_test_pmd_mappable(folio)) 4019 count_vm_event(THP_SPLIT_PAGE_FAILED); 4020 return -EBUSY; 4021 } 4022 4023 return mapping_min_folio_order(folio->mapping); 4024 } 4025 4026 int split_folio_to_list(struct folio *folio, struct list_head *list) 4027 { 4028 int ret = min_order_for_split(folio); 4029 4030 if (ret < 0) 4031 return ret; 4032 4033 return split_huge_page_to_list_to_order(&folio->page, list, ret); 4034 } 4035 4036 /* 4037 * __folio_unqueue_deferred_split() is not to be called directly: 4038 * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h 4039 * limits its calls to those folios which may have a _deferred_list for 4040 * queueing THP splits, and that list is (racily observed to be) non-empty. 4041 * 4042 * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is 4043 * zero: because even when split_queue_lock is held, a non-empty _deferred_list 4044 * might be in use on deferred_split_scan()'s unlocked on-stack list. 4045 * 4046 * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is 4047 * therefore important to unqueue deferred split before changing folio memcg. 4048 */ 4049 bool __folio_unqueue_deferred_split(struct folio *folio) 4050 { 4051 struct deferred_split *ds_queue; 4052 unsigned long flags; 4053 bool unqueued = false; 4054 4055 WARN_ON_ONCE(folio_ref_count(folio)); 4056 WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg(folio)); 4057 4058 ds_queue = get_deferred_split_queue(folio); 4059 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 4060 if (!list_empty(&folio->_deferred_list)) { 4061 ds_queue->split_queue_len--; 4062 if (folio_test_partially_mapped(folio)) { 4063 folio_clear_partially_mapped(folio); 4064 mod_mthp_stat(folio_order(folio), 4065 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); 4066 } 4067 list_del_init(&folio->_deferred_list); 4068 unqueued = true; 4069 } 4070 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 4071 4072 return unqueued; /* useful for debug warnings */ 4073 } 4074 4075 /* partially_mapped=false won't clear PG_partially_mapped folio flag */ 4076 void deferred_split_folio(struct folio *folio, bool partially_mapped) 4077 { 4078 struct deferred_split *ds_queue = get_deferred_split_queue(folio); 4079 #ifdef CONFIG_MEMCG 4080 struct mem_cgroup *memcg = folio_memcg(folio); 4081 #endif 4082 unsigned long flags; 4083 4084 /* 4085 * Order 1 folios have no space for a deferred list, but we also 4086 * won't waste much memory by not adding them to the deferred list. 4087 */ 4088 if (folio_order(folio) <= 1) 4089 return; 4090 4091 if (!partially_mapped && !split_underused_thp) 4092 return; 4093 4094 /* 4095 * Exclude swapcache: originally to avoid a corrupt deferred split 4096 * queue. Nowadays that is fully prevented by memcg1_swapout(); 4097 * but if page reclaim is already handling the same folio, it is 4098 * unnecessary to handle it again in the shrinker, so excluding 4099 * swapcache here may still be a useful optimization. 4100 */ 4101 if (folio_test_swapcache(folio)) 4102 return; 4103 4104 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 4105 if (partially_mapped) { 4106 if (!folio_test_partially_mapped(folio)) { 4107 folio_set_partially_mapped(folio); 4108 if (folio_test_pmd_mappable(folio)) 4109 count_vm_event(THP_DEFERRED_SPLIT_PAGE); 4110 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); 4111 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); 4112 4113 } 4114 } else { 4115 /* partially mapped folios cannot become non-partially mapped */ 4116 VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio); 4117 } 4118 if (list_empty(&folio->_deferred_list)) { 4119 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); 4120 ds_queue->split_queue_len++; 4121 #ifdef CONFIG_MEMCG 4122 if (memcg) 4123 set_shrinker_bit(memcg, folio_nid(folio), 4124 deferred_split_shrinker->id); 4125 #endif 4126 } 4127 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 4128 } 4129 4130 static unsigned long deferred_split_count(struct shrinker *shrink, 4131 struct shrink_control *sc) 4132 { 4133 struct pglist_data *pgdata = NODE_DATA(sc->nid); 4134 struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 4135 4136 #ifdef CONFIG_MEMCG 4137 if (sc->memcg) 4138 ds_queue = &sc->memcg->deferred_split_queue; 4139 #endif 4140 return READ_ONCE(ds_queue->split_queue_len); 4141 } 4142 4143 static bool thp_underused(struct folio *folio) 4144 { 4145 int num_zero_pages = 0, num_filled_pages = 0; 4146 void *kaddr; 4147 int i; 4148 4149 if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1) 4150 return false; 4151 4152 for (i = 0; i < folio_nr_pages(folio); i++) { 4153 kaddr = kmap_local_folio(folio, i * PAGE_SIZE); 4154 if (!memchr_inv(kaddr, 0, PAGE_SIZE)) { 4155 num_zero_pages++; 4156 if (num_zero_pages > khugepaged_max_ptes_none) { 4157 kunmap_local(kaddr); 4158 return true; 4159 } 4160 } else { 4161 /* 4162 * Another path for early exit once the number 4163 * of non-zero filled pages exceeds threshold. 4164 */ 4165 num_filled_pages++; 4166 if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) { 4167 kunmap_local(kaddr); 4168 return false; 4169 } 4170 } 4171 kunmap_local(kaddr); 4172 } 4173 return false; 4174 } 4175 4176 static unsigned long deferred_split_scan(struct shrinker *shrink, 4177 struct shrink_control *sc) 4178 { 4179 struct pglist_data *pgdata = NODE_DATA(sc->nid); 4180 struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 4181 unsigned long flags; 4182 LIST_HEAD(list); 4183 struct folio *folio, *next, *prev = NULL; 4184 int split = 0, removed = 0; 4185 4186 #ifdef CONFIG_MEMCG 4187 if (sc->memcg) 4188 ds_queue = &sc->memcg->deferred_split_queue; 4189 #endif 4190 4191 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 4192 /* Take pin on all head pages to avoid freeing them under us */ 4193 list_for_each_entry_safe(folio, next, &ds_queue->split_queue, 4194 _deferred_list) { 4195 if (folio_try_get(folio)) { 4196 list_move(&folio->_deferred_list, &list); 4197 } else { 4198 /* We lost race with folio_put() */ 4199 if (folio_test_partially_mapped(folio)) { 4200 folio_clear_partially_mapped(folio); 4201 mod_mthp_stat(folio_order(folio), 4202 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); 4203 } 4204 list_del_init(&folio->_deferred_list); 4205 ds_queue->split_queue_len--; 4206 } 4207 if (!--sc->nr_to_scan) 4208 break; 4209 } 4210 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 4211 4212 list_for_each_entry_safe(folio, next, &list, _deferred_list) { 4213 bool did_split = false; 4214 bool underused = false; 4215 4216 if (!folio_test_partially_mapped(folio)) { 4217 underused = thp_underused(folio); 4218 if (!underused) 4219 goto next; 4220 } 4221 if (!folio_trylock(folio)) 4222 goto next; 4223 if (!split_folio(folio)) { 4224 did_split = true; 4225 if (underused) 4226 count_vm_event(THP_UNDERUSED_SPLIT_PAGE); 4227 split++; 4228 } 4229 folio_unlock(folio); 4230 next: 4231 /* 4232 * split_folio() removes folio from list on success. 4233 * Only add back to the queue if folio is partially mapped. 4234 * If thp_underused returns false, or if split_folio fails 4235 * in the case it was underused, then consider it used and 4236 * don't add it back to split_queue. 4237 */ 4238 if (did_split) { 4239 ; /* folio already removed from list */ 4240 } else if (!folio_test_partially_mapped(folio)) { 4241 list_del_init(&folio->_deferred_list); 4242 removed++; 4243 } else { 4244 /* 4245 * That unlocked list_del_init() above would be unsafe, 4246 * unless its folio is separated from any earlier folios 4247 * left on the list (which may be concurrently unqueued) 4248 * by one safe folio with refcount still raised. 4249 */ 4250 swap(folio, prev); 4251 } 4252 if (folio) 4253 folio_put(folio); 4254 } 4255 4256 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 4257 list_splice_tail(&list, &ds_queue->split_queue); 4258 ds_queue->split_queue_len -= removed; 4259 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 4260 4261 if (prev) 4262 folio_put(prev); 4263 4264 /* 4265 * Stop shrinker if we didn't split any page, but the queue is empty. 4266 * This can happen if pages were freed under us. 4267 */ 4268 if (!split && list_empty(&ds_queue->split_queue)) 4269 return SHRINK_STOP; 4270 return split; 4271 } 4272 4273 #ifdef CONFIG_DEBUG_FS 4274 static void split_huge_pages_all(void) 4275 { 4276 struct zone *zone; 4277 struct page *page; 4278 struct folio *folio; 4279 unsigned long pfn, max_zone_pfn; 4280 unsigned long total = 0, split = 0; 4281 4282 pr_debug("Split all THPs\n"); 4283 for_each_zone(zone) { 4284 if (!managed_zone(zone)) 4285 continue; 4286 max_zone_pfn = zone_end_pfn(zone); 4287 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 4288 int nr_pages; 4289 4290 page = pfn_to_online_page(pfn); 4291 if (!page || PageTail(page)) 4292 continue; 4293 folio = page_folio(page); 4294 if (!folio_try_get(folio)) 4295 continue; 4296 4297 if (unlikely(page_folio(page) != folio)) 4298 goto next; 4299 4300 if (zone != folio_zone(folio)) 4301 goto next; 4302 4303 if (!folio_test_large(folio) 4304 || folio_test_hugetlb(folio) 4305 || !folio_test_lru(folio)) 4306 goto next; 4307 4308 total++; 4309 folio_lock(folio); 4310 nr_pages = folio_nr_pages(folio); 4311 if (!split_folio(folio)) 4312 split++; 4313 pfn += nr_pages - 1; 4314 folio_unlock(folio); 4315 next: 4316 folio_put(folio); 4317 cond_resched(); 4318 } 4319 } 4320 4321 pr_debug("%lu of %lu THP split\n", split, total); 4322 } 4323 4324 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) 4325 { 4326 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || 4327 is_vm_hugetlb_page(vma); 4328 } 4329 4330 static int split_huge_pages_pid(int pid, unsigned long vaddr_start, 4331 unsigned long vaddr_end, unsigned int new_order, 4332 long in_folio_offset) 4333 { 4334 int ret = 0; 4335 struct task_struct *task; 4336 struct mm_struct *mm; 4337 unsigned long total = 0, split = 0; 4338 unsigned long addr; 4339 4340 vaddr_start &= PAGE_MASK; 4341 vaddr_end &= PAGE_MASK; 4342 4343 task = find_get_task_by_vpid(pid); 4344 if (!task) { 4345 ret = -ESRCH; 4346 goto out; 4347 } 4348 4349 /* Find the mm_struct */ 4350 mm = get_task_mm(task); 4351 put_task_struct(task); 4352 4353 if (!mm) { 4354 ret = -EINVAL; 4355 goto out; 4356 } 4357 4358 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", 4359 pid, vaddr_start, vaddr_end); 4360 4361 mmap_read_lock(mm); 4362 /* 4363 * always increase addr by PAGE_SIZE, since we could have a PTE page 4364 * table filled with PTE-mapped THPs, each of which is distinct. 4365 */ 4366 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { 4367 struct vm_area_struct *vma = vma_lookup(mm, addr); 4368 struct folio_walk fw; 4369 struct folio *folio; 4370 struct address_space *mapping; 4371 unsigned int target_order = new_order; 4372 4373 if (!vma) 4374 break; 4375 4376 /* skip special VMA and hugetlb VMA */ 4377 if (vma_not_suitable_for_thp_split(vma)) { 4378 addr = vma->vm_end; 4379 continue; 4380 } 4381 4382 folio = folio_walk_start(&fw, vma, addr, 0); 4383 if (!folio) 4384 continue; 4385 4386 if (!is_transparent_hugepage(folio)) 4387 goto next; 4388 4389 if (!folio_test_anon(folio)) { 4390 mapping = folio->mapping; 4391 target_order = max(new_order, 4392 mapping_min_folio_order(mapping)); 4393 } 4394 4395 if (target_order >= folio_order(folio)) 4396 goto next; 4397 4398 total++; 4399 /* 4400 * For folios with private, split_huge_page_to_list_to_order() 4401 * will try to drop it before split and then check if the folio 4402 * can be split or not. So skip the check here. 4403 */ 4404 if (!folio_test_private(folio) && 4405 !can_split_folio(folio, 0, NULL)) 4406 goto next; 4407 4408 if (!folio_trylock(folio)) 4409 goto next; 4410 folio_get(folio); 4411 folio_walk_end(&fw, vma); 4412 4413 if (!folio_test_anon(folio) && folio->mapping != mapping) 4414 goto unlock; 4415 4416 if (in_folio_offset < 0 || 4417 in_folio_offset >= folio_nr_pages(folio)) { 4418 if (!split_folio_to_order(folio, target_order)) 4419 split++; 4420 } else { 4421 struct page *split_at = folio_page(folio, 4422 in_folio_offset); 4423 if (!folio_split(folio, target_order, split_at, NULL)) 4424 split++; 4425 } 4426 4427 unlock: 4428 4429 folio_unlock(folio); 4430 folio_put(folio); 4431 4432 cond_resched(); 4433 continue; 4434 next: 4435 folio_walk_end(&fw, vma); 4436 cond_resched(); 4437 } 4438 mmap_read_unlock(mm); 4439 mmput(mm); 4440 4441 pr_debug("%lu of %lu THP split\n", split, total); 4442 4443 out: 4444 return ret; 4445 } 4446 4447 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, 4448 pgoff_t off_end, unsigned int new_order, 4449 long in_folio_offset) 4450 { 4451 struct filename *file; 4452 struct file *candidate; 4453 struct address_space *mapping; 4454 int ret = -EINVAL; 4455 pgoff_t index; 4456 int nr_pages = 1; 4457 unsigned long total = 0, split = 0; 4458 unsigned int min_order; 4459 unsigned int target_order; 4460 4461 file = getname_kernel(file_path); 4462 if (IS_ERR(file)) 4463 return ret; 4464 4465 candidate = file_open_name(file, O_RDONLY, 0); 4466 if (IS_ERR(candidate)) 4467 goto out; 4468 4469 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", 4470 file_path, off_start, off_end); 4471 4472 mapping = candidate->f_mapping; 4473 min_order = mapping_min_folio_order(mapping); 4474 target_order = max(new_order, min_order); 4475 4476 for (index = off_start; index < off_end; index += nr_pages) { 4477 struct folio *folio = filemap_get_folio(mapping, index); 4478 4479 nr_pages = 1; 4480 if (IS_ERR(folio)) 4481 continue; 4482 4483 if (!folio_test_large(folio)) 4484 goto next; 4485 4486 total++; 4487 nr_pages = folio_nr_pages(folio); 4488 4489 if (target_order >= folio_order(folio)) 4490 goto next; 4491 4492 if (!folio_trylock(folio)) 4493 goto next; 4494 4495 if (folio->mapping != mapping) 4496 goto unlock; 4497 4498 if (in_folio_offset < 0 || in_folio_offset >= nr_pages) { 4499 if (!split_folio_to_order(folio, target_order)) 4500 split++; 4501 } else { 4502 struct page *split_at = folio_page(folio, 4503 in_folio_offset); 4504 if (!folio_split(folio, target_order, split_at, NULL)) 4505 split++; 4506 } 4507 4508 unlock: 4509 folio_unlock(folio); 4510 next: 4511 folio_put(folio); 4512 cond_resched(); 4513 } 4514 4515 filp_close(candidate, NULL); 4516 ret = 0; 4517 4518 pr_debug("%lu of %lu file-backed THP split\n", split, total); 4519 out: 4520 putname(file); 4521 return ret; 4522 } 4523 4524 #define MAX_INPUT_BUF_SZ 255 4525 4526 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, 4527 size_t count, loff_t *ppops) 4528 { 4529 static DEFINE_MUTEX(split_debug_mutex); 4530 ssize_t ret; 4531 /* 4532 * hold pid, start_vaddr, end_vaddr, new_order or 4533 * file_path, off_start, off_end, new_order 4534 */ 4535 char input_buf[MAX_INPUT_BUF_SZ]; 4536 int pid; 4537 unsigned long vaddr_start, vaddr_end; 4538 unsigned int new_order = 0; 4539 long in_folio_offset = -1; 4540 4541 ret = mutex_lock_interruptible(&split_debug_mutex); 4542 if (ret) 4543 return ret; 4544 4545 ret = -EFAULT; 4546 4547 memset(input_buf, 0, MAX_INPUT_BUF_SZ); 4548 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) 4549 goto out; 4550 4551 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; 4552 4553 if (input_buf[0] == '/') { 4554 char *tok; 4555 char *tok_buf = input_buf; 4556 char file_path[MAX_INPUT_BUF_SZ]; 4557 pgoff_t off_start = 0, off_end = 0; 4558 size_t input_len = strlen(input_buf); 4559 4560 tok = strsep(&tok_buf, ","); 4561 if (tok && tok_buf) { 4562 strscpy(file_path, tok); 4563 } else { 4564 ret = -EINVAL; 4565 goto out; 4566 } 4567 4568 ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end, 4569 &new_order, &in_folio_offset); 4570 if (ret != 2 && ret != 3 && ret != 4) { 4571 ret = -EINVAL; 4572 goto out; 4573 } 4574 ret = split_huge_pages_in_file(file_path, off_start, off_end, 4575 new_order, in_folio_offset); 4576 if (!ret) 4577 ret = input_len; 4578 4579 goto out; 4580 } 4581 4582 ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start, 4583 &vaddr_end, &new_order, &in_folio_offset); 4584 if (ret == 1 && pid == 1) { 4585 split_huge_pages_all(); 4586 ret = strlen(input_buf); 4587 goto out; 4588 } else if (ret != 3 && ret != 4 && ret != 5) { 4589 ret = -EINVAL; 4590 goto out; 4591 } 4592 4593 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order, 4594 in_folio_offset); 4595 if (!ret) 4596 ret = strlen(input_buf); 4597 out: 4598 mutex_unlock(&split_debug_mutex); 4599 return ret; 4600 4601 } 4602 4603 static const struct file_operations split_huge_pages_fops = { 4604 .owner = THIS_MODULE, 4605 .write = split_huge_pages_write, 4606 }; 4607 4608 static int __init split_huge_pages_debugfs(void) 4609 { 4610 debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 4611 &split_huge_pages_fops); 4612 return 0; 4613 } 4614 late_initcall(split_huge_pages_debugfs); 4615 #endif 4616 4617 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 4618 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 4619 struct page *page) 4620 { 4621 struct folio *folio = page_folio(page); 4622 struct vm_area_struct *vma = pvmw->vma; 4623 struct mm_struct *mm = vma->vm_mm; 4624 unsigned long address = pvmw->address; 4625 bool anon_exclusive; 4626 pmd_t pmdval; 4627 swp_entry_t entry; 4628 pmd_t pmdswp; 4629 4630 if (!(pvmw->pmd && !pvmw->pte)) 4631 return 0; 4632 4633 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 4634 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); 4635 4636 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */ 4637 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page); 4638 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) { 4639 set_pmd_at(mm, address, pvmw->pmd, pmdval); 4640 return -EBUSY; 4641 } 4642 4643 if (pmd_dirty(pmdval)) 4644 folio_mark_dirty(folio); 4645 if (pmd_write(pmdval)) 4646 entry = make_writable_migration_entry(page_to_pfn(page)); 4647 else if (anon_exclusive) 4648 entry = make_readable_exclusive_migration_entry(page_to_pfn(page)); 4649 else 4650 entry = make_readable_migration_entry(page_to_pfn(page)); 4651 if (pmd_young(pmdval)) 4652 entry = make_migration_entry_young(entry); 4653 if (pmd_dirty(pmdval)) 4654 entry = make_migration_entry_dirty(entry); 4655 pmdswp = swp_entry_to_pmd(entry); 4656 if (pmd_soft_dirty(pmdval)) 4657 pmdswp = pmd_swp_mksoft_dirty(pmdswp); 4658 if (pmd_uffd_wp(pmdval)) 4659 pmdswp = pmd_swp_mkuffd_wp(pmdswp); 4660 set_pmd_at(mm, address, pvmw->pmd, pmdswp); 4661 folio_remove_rmap_pmd(folio, page, vma); 4662 folio_put(folio); 4663 trace_set_migration_pmd(address, pmd_val(pmdswp)); 4664 4665 return 0; 4666 } 4667 4668 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 4669 { 4670 struct folio *folio = page_folio(new); 4671 struct vm_area_struct *vma = pvmw->vma; 4672 struct mm_struct *mm = vma->vm_mm; 4673 unsigned long address = pvmw->address; 4674 unsigned long haddr = address & HPAGE_PMD_MASK; 4675 pmd_t pmde; 4676 swp_entry_t entry; 4677 4678 if (!(pvmw->pmd && !pvmw->pte)) 4679 return; 4680 4681 entry = pmd_to_swp_entry(*pvmw->pmd); 4682 folio_get(folio); 4683 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); 4684 if (pmd_swp_soft_dirty(*pvmw->pmd)) 4685 pmde = pmd_mksoft_dirty(pmde); 4686 if (is_writable_migration_entry(entry)) 4687 pmde = pmd_mkwrite(pmde, vma); 4688 if (pmd_swp_uffd_wp(*pvmw->pmd)) 4689 pmde = pmd_mkuffd_wp(pmde); 4690 if (!is_migration_entry_young(entry)) 4691 pmde = pmd_mkold(pmde); 4692 /* NOTE: this may contain setting soft-dirty on some archs */ 4693 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 4694 pmde = pmd_mkdirty(pmde); 4695 4696 if (folio_test_anon(folio)) { 4697 rmap_t rmap_flags = RMAP_NONE; 4698 4699 if (!is_readable_migration_entry(entry)) 4700 rmap_flags |= RMAP_EXCLUSIVE; 4701 4702 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags); 4703 } else { 4704 folio_add_file_rmap_pmd(folio, new, vma); 4705 } 4706 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new)); 4707 set_pmd_at(mm, haddr, pvmw->pmd, pmde); 4708 4709 /* No need to invalidate - it was non-present before */ 4710 update_mmu_cache_pmd(vma, address, pvmw->pmd); 4711 trace_remove_migration_pmd(address, pmd_val(pmde)); 4712 } 4713 #endif 4714