1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
5 *
6 * High level machine check handler. Handles pages reported by the
7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
8 * failure.
9 *
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
12 *
13 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
20 *
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/mm/page-types when running a real workload.
28 *
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
34 * VM.
35 */
36
37 #define pr_fmt(fmt) "Memory failure: " fmt
38
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/page-flags.h>
42 #include <linux/sched/signal.h>
43 #include <linux/sched/task.h>
44 #include <linux/dax.h>
45 #include <linux/ksm.h>
46 #include <linux/rmap.h>
47 #include <linux/export.h>
48 #include <linux/pagemap.h>
49 #include <linux/swap.h>
50 #include <linux/backing-dev.h>
51 #include <linux/migrate.h>
52 #include <linux/slab.h>
53 #include <linux/swapops.h>
54 #include <linux/hugetlb.h>
55 #include <linux/memory_hotplug.h>
56 #include <linux/mm_inline.h>
57 #include <linux/memremap.h>
58 #include <linux/kfifo.h>
59 #include <linux/ratelimit.h>
60 #include <linux/pagewalk.h>
61 #include <linux/shmem_fs.h>
62 #include <linux/sysctl.h>
63 #include "swap.h"
64 #include "internal.h"
65 #include "ras/ras_event.h"
66
67 static int sysctl_memory_failure_early_kill __read_mostly;
68
69 static int sysctl_memory_failure_recovery __read_mostly = 1;
70
71 static int sysctl_enable_soft_offline __read_mostly = 1;
72
73 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
74
75 static bool hw_memory_failure __read_mostly = false;
76
77 static DEFINE_MUTEX(mf_mutex);
78
num_poisoned_pages_inc(unsigned long pfn)79 void num_poisoned_pages_inc(unsigned long pfn)
80 {
81 atomic_long_inc(&num_poisoned_pages);
82 memblk_nr_poison_inc(pfn);
83 }
84
num_poisoned_pages_sub(unsigned long pfn,long i)85 void num_poisoned_pages_sub(unsigned long pfn, long i)
86 {
87 atomic_long_sub(i, &num_poisoned_pages);
88 if (pfn != -1UL)
89 memblk_nr_poison_sub(pfn, i);
90 }
91
92 /**
93 * MF_ATTR_RO - Create sysfs entry for each memory failure statistics.
94 * @_name: name of the file in the per NUMA sysfs directory.
95 */
96 #define MF_ATTR_RO(_name) \
97 static ssize_t _name##_show(struct device *dev, \
98 struct device_attribute *attr, \
99 char *buf) \
100 { \
101 struct memory_failure_stats *mf_stats = \
102 &NODE_DATA(dev->id)->mf_stats; \
103 return sysfs_emit(buf, "%lu\n", mf_stats->_name); \
104 } \
105 static DEVICE_ATTR_RO(_name)
106
107 MF_ATTR_RO(total);
108 MF_ATTR_RO(ignored);
109 MF_ATTR_RO(failed);
110 MF_ATTR_RO(delayed);
111 MF_ATTR_RO(recovered);
112
113 static struct attribute *memory_failure_attr[] = {
114 &dev_attr_total.attr,
115 &dev_attr_ignored.attr,
116 &dev_attr_failed.attr,
117 &dev_attr_delayed.attr,
118 &dev_attr_recovered.attr,
119 NULL,
120 };
121
122 const struct attribute_group memory_failure_attr_group = {
123 .name = "memory_failure",
124 .attrs = memory_failure_attr,
125 };
126
127 static const struct ctl_table memory_failure_table[] = {
128 {
129 .procname = "memory_failure_early_kill",
130 .data = &sysctl_memory_failure_early_kill,
131 .maxlen = sizeof(sysctl_memory_failure_early_kill),
132 .mode = 0644,
133 .proc_handler = proc_dointvec_minmax,
134 .extra1 = SYSCTL_ZERO,
135 .extra2 = SYSCTL_ONE,
136 },
137 {
138 .procname = "memory_failure_recovery",
139 .data = &sysctl_memory_failure_recovery,
140 .maxlen = sizeof(sysctl_memory_failure_recovery),
141 .mode = 0644,
142 .proc_handler = proc_dointvec_minmax,
143 .extra1 = SYSCTL_ZERO,
144 .extra2 = SYSCTL_ONE,
145 },
146 {
147 .procname = "enable_soft_offline",
148 .data = &sysctl_enable_soft_offline,
149 .maxlen = sizeof(sysctl_enable_soft_offline),
150 .mode = 0644,
151 .proc_handler = proc_dointvec_minmax,
152 .extra1 = SYSCTL_ZERO,
153 .extra2 = SYSCTL_ONE,
154 }
155 };
156
157 /*
158 * Return values:
159 * 1: the page is dissolved (if needed) and taken off from buddy,
160 * 0: the page is dissolved (if needed) and not taken off from buddy,
161 * < 0: failed to dissolve.
162 */
__page_handle_poison(struct page * page)163 static int __page_handle_poison(struct page *page)
164 {
165 int ret;
166
167 /*
168 * zone_pcp_disable() can't be used here. It will
169 * hold pcp_batch_high_lock and dissolve_free_hugetlb_folio() might hold
170 * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
171 * optimization is enabled. This will break current lock dependency
172 * chain and leads to deadlock.
173 * Disabling pcp before dissolving the page was a deterministic
174 * approach because we made sure that those pages cannot end up in any
175 * PCP list. Draining PCP lists expels those pages to the buddy system,
176 * but nothing guarantees that those pages do not get back to a PCP
177 * queue if we need to refill those.
178 */
179 ret = dissolve_free_hugetlb_folio(page_folio(page));
180 if (!ret) {
181 drain_all_pages(page_zone(page));
182 ret = take_page_off_buddy(page);
183 }
184
185 return ret;
186 }
187
page_handle_poison(struct page * page,bool hugepage_or_freepage,bool release)188 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
189 {
190 if (hugepage_or_freepage) {
191 /*
192 * Doing this check for free pages is also fine since
193 * dissolve_free_hugetlb_folio() returns 0 for non-hugetlb folios as well.
194 */
195 if (__page_handle_poison(page) <= 0)
196 /*
197 * We could fail to take off the target page from buddy
198 * for example due to racy page allocation, but that's
199 * acceptable because soft-offlined page is not broken
200 * and if someone really want to use it, they should
201 * take it.
202 */
203 return false;
204 }
205
206 SetPageHWPoison(page);
207 if (release)
208 put_page(page);
209 page_ref_inc(page);
210 num_poisoned_pages_inc(page_to_pfn(page));
211
212 return true;
213 }
214
215 #if IS_ENABLED(CONFIG_HWPOISON_INJECT)
216
217 u32 hwpoison_filter_enable = 0;
218 u32 hwpoison_filter_dev_major = ~0U;
219 u32 hwpoison_filter_dev_minor = ~0U;
220 u64 hwpoison_filter_flags_mask;
221 u64 hwpoison_filter_flags_value;
222 EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
223 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
224 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
225 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
226 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
227
hwpoison_filter_dev(struct page * p)228 static int hwpoison_filter_dev(struct page *p)
229 {
230 struct folio *folio = page_folio(p);
231 struct address_space *mapping;
232 dev_t dev;
233
234 if (hwpoison_filter_dev_major == ~0U &&
235 hwpoison_filter_dev_minor == ~0U)
236 return 0;
237
238 mapping = folio_mapping(folio);
239 if (mapping == NULL || mapping->host == NULL)
240 return -EINVAL;
241
242 dev = mapping->host->i_sb->s_dev;
243 if (hwpoison_filter_dev_major != ~0U &&
244 hwpoison_filter_dev_major != MAJOR(dev))
245 return -EINVAL;
246 if (hwpoison_filter_dev_minor != ~0U &&
247 hwpoison_filter_dev_minor != MINOR(dev))
248 return -EINVAL;
249
250 return 0;
251 }
252
hwpoison_filter_flags(struct page * p)253 static int hwpoison_filter_flags(struct page *p)
254 {
255 if (!hwpoison_filter_flags_mask)
256 return 0;
257
258 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
259 hwpoison_filter_flags_value)
260 return 0;
261 else
262 return -EINVAL;
263 }
264
265 /*
266 * This allows stress tests to limit test scope to a collection of tasks
267 * by putting them under some memcg. This prevents killing unrelated/important
268 * processes such as /sbin/init. Note that the target task may share clean
269 * pages with init (eg. libc text), which is harmless. If the target task
270 * share _dirty_ pages with another task B, the test scheme must make sure B
271 * is also included in the memcg. At last, due to race conditions this filter
272 * can only guarantee that the page either belongs to the memcg tasks, or is
273 * a freed page.
274 */
275 #ifdef CONFIG_MEMCG
276 u64 hwpoison_filter_memcg;
277 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
hwpoison_filter_task(struct page * p)278 static int hwpoison_filter_task(struct page *p)
279 {
280 if (!hwpoison_filter_memcg)
281 return 0;
282
283 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
284 return -EINVAL;
285
286 return 0;
287 }
288 #else
hwpoison_filter_task(struct page * p)289 static int hwpoison_filter_task(struct page *p) { return 0; }
290 #endif
291
hwpoison_filter(struct page * p)292 int hwpoison_filter(struct page *p)
293 {
294 if (!hwpoison_filter_enable)
295 return 0;
296
297 if (hwpoison_filter_dev(p))
298 return -EINVAL;
299
300 if (hwpoison_filter_flags(p))
301 return -EINVAL;
302
303 if (hwpoison_filter_task(p))
304 return -EINVAL;
305
306 return 0;
307 }
308 EXPORT_SYMBOL_GPL(hwpoison_filter);
309 #else
hwpoison_filter(struct page * p)310 int hwpoison_filter(struct page *p)
311 {
312 return 0;
313 }
314 #endif
315
316 /*
317 * Kill all processes that have a poisoned page mapped and then isolate
318 * the page.
319 *
320 * General strategy:
321 * Find all processes having the page mapped and kill them.
322 * But we keep a page reference around so that the page is not
323 * actually freed yet.
324 * Then stash the page away
325 *
326 * There's no convenient way to get back to mapped processes
327 * from the VMAs. So do a brute-force search over all
328 * running processes.
329 *
330 * Remember that machine checks are not common (or rather
331 * if they are common you have other problems), so this shouldn't
332 * be a performance issue.
333 *
334 * Also there are some races possible while we get from the
335 * error detection to actually handle it.
336 */
337
338 struct to_kill {
339 struct list_head nd;
340 struct task_struct *tsk;
341 unsigned long addr;
342 short size_shift;
343 };
344
345 /*
346 * Send all the processes who have the page mapped a signal.
347 * ``action optional'' if they are not immediately affected by the error
348 * ``action required'' if error happened in current execution context
349 */
kill_proc(struct to_kill * tk,unsigned long pfn,int flags)350 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
351 {
352 struct task_struct *t = tk->tsk;
353 short addr_lsb = tk->size_shift;
354 int ret = 0;
355
356 pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
357 pfn, t->comm, task_pid_nr(t));
358
359 if ((flags & MF_ACTION_REQUIRED) && (t == current))
360 ret = force_sig_mceerr(BUS_MCEERR_AR,
361 (void __user *)tk->addr, addr_lsb);
362 else
363 /*
364 * Signal other processes sharing the page if they have
365 * PF_MCE_EARLY set.
366 * Don't use force here, it's convenient if the signal
367 * can be temporarily blocked.
368 */
369 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
370 addr_lsb, t);
371 if (ret < 0)
372 pr_info("Error sending signal to %s:%d: %d\n",
373 t->comm, task_pid_nr(t), ret);
374 return ret;
375 }
376
377 /*
378 * Unknown page type encountered. Try to check whether it can turn PageLRU by
379 * lru_add_drain_all.
380 */
shake_folio(struct folio * folio)381 void shake_folio(struct folio *folio)
382 {
383 if (folio_test_hugetlb(folio))
384 return;
385 /*
386 * TODO: Could shrink slab caches here if a lightweight range-based
387 * shrinker will be available.
388 */
389 if (folio_test_slab(folio))
390 return;
391
392 lru_add_drain_all();
393 }
394 EXPORT_SYMBOL_GPL(shake_folio);
395
shake_page(struct page * page)396 static void shake_page(struct page *page)
397 {
398 shake_folio(page_folio(page));
399 }
400
dev_pagemap_mapping_shift(struct vm_area_struct * vma,unsigned long address)401 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
402 unsigned long address)
403 {
404 unsigned long ret = 0;
405 pgd_t *pgd;
406 p4d_t *p4d;
407 pud_t *pud;
408 pmd_t *pmd;
409 pte_t *pte;
410 pte_t ptent;
411
412 VM_BUG_ON_VMA(address == -EFAULT, vma);
413 pgd = pgd_offset(vma->vm_mm, address);
414 if (!pgd_present(*pgd))
415 return 0;
416 p4d = p4d_offset(pgd, address);
417 if (!p4d_present(*p4d))
418 return 0;
419 pud = pud_offset(p4d, address);
420 if (!pud_present(*pud))
421 return 0;
422 if (pud_trans_huge(*pud))
423 return PUD_SHIFT;
424 pmd = pmd_offset(pud, address);
425 if (!pmd_present(*pmd))
426 return 0;
427 if (pmd_trans_huge(*pmd))
428 return PMD_SHIFT;
429 pte = pte_offset_map(pmd, address);
430 if (!pte)
431 return 0;
432 ptent = ptep_get(pte);
433 if (pte_present(ptent))
434 ret = PAGE_SHIFT;
435 pte_unmap(pte);
436 return ret;
437 }
438
439 /*
440 * Failure handling: if we can't find or can't kill a process there's
441 * not much we can do. We just print a message and ignore otherwise.
442 */
443
444 /*
445 * Schedule a process for later kill.
446 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
447 */
__add_to_kill(struct task_struct * tsk,const struct page * p,struct vm_area_struct * vma,struct list_head * to_kill,unsigned long addr)448 static void __add_to_kill(struct task_struct *tsk, const struct page *p,
449 struct vm_area_struct *vma, struct list_head *to_kill,
450 unsigned long addr)
451 {
452 struct to_kill *tk;
453
454 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
455 if (!tk) {
456 pr_err("Out of memory while machine check handling\n");
457 return;
458 }
459
460 tk->addr = addr;
461 if (is_zone_device_page(p))
462 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
463 else
464 tk->size_shift = folio_shift(page_folio(p));
465
466 /*
467 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
468 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
469 * so "tk->size_shift == 0" effectively checks no mapping on
470 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
471 * to a process' address space, it's possible not all N VMAs
472 * contain mappings for the page, but at least one VMA does.
473 * Only deliver SIGBUS with payload derived from the VMA that
474 * has a mapping for the page.
475 */
476 if (tk->addr == -EFAULT) {
477 pr_info("Unable to find user space address %lx in %s\n",
478 page_to_pfn(p), tsk->comm);
479 } else if (tk->size_shift == 0) {
480 kfree(tk);
481 return;
482 }
483
484 get_task_struct(tsk);
485 tk->tsk = tsk;
486 list_add_tail(&tk->nd, to_kill);
487 }
488
add_to_kill_anon_file(struct task_struct * tsk,const struct page * p,struct vm_area_struct * vma,struct list_head * to_kill,unsigned long addr)489 static void add_to_kill_anon_file(struct task_struct *tsk, const struct page *p,
490 struct vm_area_struct *vma, struct list_head *to_kill,
491 unsigned long addr)
492 {
493 if (addr == -EFAULT)
494 return;
495 __add_to_kill(tsk, p, vma, to_kill, addr);
496 }
497
498 #ifdef CONFIG_KSM
task_in_to_kill_list(struct list_head * to_kill,struct task_struct * tsk)499 static bool task_in_to_kill_list(struct list_head *to_kill,
500 struct task_struct *tsk)
501 {
502 struct to_kill *tk, *next;
503
504 list_for_each_entry_safe(tk, next, to_kill, nd) {
505 if (tk->tsk == tsk)
506 return true;
507 }
508
509 return false;
510 }
511
add_to_kill_ksm(struct task_struct * tsk,const struct page * p,struct vm_area_struct * vma,struct list_head * to_kill,unsigned long addr)512 void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
513 struct vm_area_struct *vma, struct list_head *to_kill,
514 unsigned long addr)
515 {
516 if (!task_in_to_kill_list(to_kill, tsk))
517 __add_to_kill(tsk, p, vma, to_kill, addr);
518 }
519 #endif
520 /*
521 * Kill the processes that have been collected earlier.
522 *
523 * Only do anything when FORCEKILL is set, otherwise just free the
524 * list (this is used for clean pages which do not need killing)
525 */
kill_procs(struct list_head * to_kill,int forcekill,unsigned long pfn,int flags)526 static void kill_procs(struct list_head *to_kill, int forcekill,
527 unsigned long pfn, int flags)
528 {
529 struct to_kill *tk, *next;
530
531 list_for_each_entry_safe(tk, next, to_kill, nd) {
532 if (forcekill) {
533 if (tk->addr == -EFAULT) {
534 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
535 pfn, tk->tsk->comm, task_pid_nr(tk->tsk));
536 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
537 tk->tsk, PIDTYPE_PID);
538 }
539
540 /*
541 * In theory the process could have mapped
542 * something else on the address in-between. We could
543 * check for that, but we need to tell the
544 * process anyways.
545 */
546 else if (kill_proc(tk, pfn, flags) < 0)
547 pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
548 pfn, tk->tsk->comm, task_pid_nr(tk->tsk));
549 }
550 list_del(&tk->nd);
551 put_task_struct(tk->tsk);
552 kfree(tk);
553 }
554 }
555
556 /*
557 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
558 * on behalf of the thread group. Return task_struct of the (first found)
559 * dedicated thread if found, and return NULL otherwise.
560 *
561 * We already hold rcu lock in the caller, so we don't have to call
562 * rcu_read_lock/unlock() in this function.
563 */
find_early_kill_thread(struct task_struct * tsk)564 static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
565 {
566 struct task_struct *t;
567
568 for_each_thread(tsk, t) {
569 if (t->flags & PF_MCE_PROCESS) {
570 if (t->flags & PF_MCE_EARLY)
571 return t;
572 } else {
573 if (sysctl_memory_failure_early_kill)
574 return t;
575 }
576 }
577 return NULL;
578 }
579
580 /*
581 * Determine whether a given process is "early kill" process which expects
582 * to be signaled when some page under the process is hwpoisoned.
583 * Return task_struct of the dedicated thread (main thread unless explicitly
584 * specified) if the process is "early kill" and otherwise returns NULL.
585 *
586 * Note that the above is true for Action Optional case. For Action Required
587 * case, it's only meaningful to the current thread which need to be signaled
588 * with SIGBUS, this error is Action Optional for other non current
589 * processes sharing the same error page,if the process is "early kill", the
590 * task_struct of the dedicated thread will also be returned.
591 */
task_early_kill(struct task_struct * tsk,int force_early)592 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
593 {
594 if (!tsk->mm)
595 return NULL;
596 /*
597 * Comparing ->mm here because current task might represent
598 * a subthread, while tsk always points to the main thread.
599 */
600 if (force_early && tsk->mm == current->mm)
601 return current;
602
603 return find_early_kill_thread(tsk);
604 }
605
606 /*
607 * Collect processes when the error hit an anonymous page.
608 */
collect_procs_anon(const struct folio * folio,const struct page * page,struct list_head * to_kill,int force_early)609 static void collect_procs_anon(const struct folio *folio,
610 const struct page *page, struct list_head *to_kill,
611 int force_early)
612 {
613 struct task_struct *tsk;
614 struct anon_vma *av;
615 pgoff_t pgoff;
616
617 av = folio_lock_anon_vma_read(folio, NULL);
618 if (av == NULL) /* Not actually mapped anymore */
619 return;
620
621 pgoff = page_pgoff(folio, page);
622 rcu_read_lock();
623 for_each_process(tsk) {
624 struct vm_area_struct *vma;
625 struct anon_vma_chain *vmac;
626 struct task_struct *t = task_early_kill(tsk, force_early);
627 unsigned long addr;
628
629 if (!t)
630 continue;
631 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
632 pgoff, pgoff) {
633 vma = vmac->vma;
634 if (vma->vm_mm != t->mm)
635 continue;
636 addr = page_mapped_in_vma(page, vma);
637 add_to_kill_anon_file(t, page, vma, to_kill, addr);
638 }
639 }
640 rcu_read_unlock();
641 anon_vma_unlock_read(av);
642 }
643
644 /*
645 * Collect processes when the error hit a file mapped page.
646 */
collect_procs_file(const struct folio * folio,const struct page * page,struct list_head * to_kill,int force_early)647 static void collect_procs_file(const struct folio *folio,
648 const struct page *page, struct list_head *to_kill,
649 int force_early)
650 {
651 struct vm_area_struct *vma;
652 struct task_struct *tsk;
653 struct address_space *mapping = folio->mapping;
654 pgoff_t pgoff;
655
656 i_mmap_lock_read(mapping);
657 rcu_read_lock();
658 pgoff = page_pgoff(folio, page);
659 for_each_process(tsk) {
660 struct task_struct *t = task_early_kill(tsk, force_early);
661 unsigned long addr;
662
663 if (!t)
664 continue;
665 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
666 pgoff) {
667 /*
668 * Send early kill signal to tasks where a vma covers
669 * the page but the corrupted page is not necessarily
670 * mapped in its pte.
671 * Assume applications who requested early kill want
672 * to be informed of all such data corruptions.
673 */
674 if (vma->vm_mm != t->mm)
675 continue;
676 addr = page_address_in_vma(folio, page, vma);
677 add_to_kill_anon_file(t, page, vma, to_kill, addr);
678 }
679 }
680 rcu_read_unlock();
681 i_mmap_unlock_read(mapping);
682 }
683
684 #ifdef CONFIG_FS_DAX
add_to_kill_fsdax(struct task_struct * tsk,const struct page * p,struct vm_area_struct * vma,struct list_head * to_kill,pgoff_t pgoff)685 static void add_to_kill_fsdax(struct task_struct *tsk, const struct page *p,
686 struct vm_area_struct *vma,
687 struct list_head *to_kill, pgoff_t pgoff)
688 {
689 unsigned long addr = vma_address(vma, pgoff, 1);
690 __add_to_kill(tsk, p, vma, to_kill, addr);
691 }
692
693 /*
694 * Collect processes when the error hit a fsdax page.
695 */
collect_procs_fsdax(const struct page * page,struct address_space * mapping,pgoff_t pgoff,struct list_head * to_kill,bool pre_remove)696 static void collect_procs_fsdax(const struct page *page,
697 struct address_space *mapping, pgoff_t pgoff,
698 struct list_head *to_kill, bool pre_remove)
699 {
700 struct vm_area_struct *vma;
701 struct task_struct *tsk;
702
703 i_mmap_lock_read(mapping);
704 rcu_read_lock();
705 for_each_process(tsk) {
706 struct task_struct *t = tsk;
707
708 /*
709 * Search for all tasks while MF_MEM_PRE_REMOVE is set, because
710 * the current may not be the one accessing the fsdax page.
711 * Otherwise, search for the current task.
712 */
713 if (!pre_remove)
714 t = task_early_kill(tsk, true);
715 if (!t)
716 continue;
717 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
718 if (vma->vm_mm == t->mm)
719 add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
720 }
721 }
722 rcu_read_unlock();
723 i_mmap_unlock_read(mapping);
724 }
725 #endif /* CONFIG_FS_DAX */
726
727 /*
728 * Collect the processes who have the corrupted page mapped to kill.
729 */
collect_procs(const struct folio * folio,const struct page * page,struct list_head * tokill,int force_early)730 static void collect_procs(const struct folio *folio, const struct page *page,
731 struct list_head *tokill, int force_early)
732 {
733 if (!folio->mapping)
734 return;
735 if (unlikely(folio_test_ksm(folio)))
736 collect_procs_ksm(folio, page, tokill, force_early);
737 else if (folio_test_anon(folio))
738 collect_procs_anon(folio, page, tokill, force_early);
739 else
740 collect_procs_file(folio, page, tokill, force_early);
741 }
742
743 struct hwpoison_walk {
744 struct to_kill tk;
745 unsigned long pfn;
746 int flags;
747 };
748
set_to_kill(struct to_kill * tk,unsigned long addr,short shift)749 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
750 {
751 tk->addr = addr;
752 tk->size_shift = shift;
753 }
754
check_hwpoisoned_entry(pte_t pte,unsigned long addr,short shift,unsigned long poisoned_pfn,struct to_kill * tk)755 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
756 unsigned long poisoned_pfn, struct to_kill *tk)
757 {
758 unsigned long pfn = 0;
759
760 if (pte_present(pte)) {
761 pfn = pte_pfn(pte);
762 } else {
763 swp_entry_t swp = pte_to_swp_entry(pte);
764
765 if (is_hwpoison_entry(swp))
766 pfn = swp_offset_pfn(swp);
767 }
768
769 if (!pfn || pfn != poisoned_pfn)
770 return 0;
771
772 set_to_kill(tk, addr, shift);
773 return 1;
774 }
775
776 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
check_hwpoisoned_pmd_entry(pmd_t * pmdp,unsigned long addr,struct hwpoison_walk * hwp)777 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
778 struct hwpoison_walk *hwp)
779 {
780 pmd_t pmd = *pmdp;
781 unsigned long pfn;
782 unsigned long hwpoison_vaddr;
783
784 if (!pmd_present(pmd))
785 return 0;
786 pfn = pmd_pfn(pmd);
787 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
788 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
789 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
790 return 1;
791 }
792 return 0;
793 }
794 #else
check_hwpoisoned_pmd_entry(pmd_t * pmdp,unsigned long addr,struct hwpoison_walk * hwp)795 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
796 struct hwpoison_walk *hwp)
797 {
798 return 0;
799 }
800 #endif
801
hwpoison_pte_range(pmd_t * pmdp,unsigned long addr,unsigned long end,struct mm_walk * walk)802 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
803 unsigned long end, struct mm_walk *walk)
804 {
805 struct hwpoison_walk *hwp = walk->private;
806 int ret = 0;
807 pte_t *ptep, *mapped_pte;
808 spinlock_t *ptl;
809
810 ptl = pmd_trans_huge_lock(pmdp, walk->vma);
811 if (ptl) {
812 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
813 spin_unlock(ptl);
814 goto out;
815 }
816
817 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
818 addr, &ptl);
819 if (!ptep)
820 goto out;
821
822 for (; addr != end; ptep++, addr += PAGE_SIZE) {
823 ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT,
824 hwp->pfn, &hwp->tk);
825 if (ret == 1)
826 break;
827 }
828 pte_unmap_unlock(mapped_pte, ptl);
829 out:
830 cond_resched();
831 return ret;
832 }
833
834 #ifdef CONFIG_HUGETLB_PAGE
hwpoison_hugetlb_range(pte_t * ptep,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)835 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
836 unsigned long addr, unsigned long end,
837 struct mm_walk *walk)
838 {
839 struct hwpoison_walk *hwp = walk->private;
840 struct hstate *h = hstate_vma(walk->vma);
841 spinlock_t *ptl;
842 pte_t pte;
843 int ret;
844
845 ptl = huge_pte_lock(h, walk->mm, ptep);
846 pte = huge_ptep_get(walk->mm, addr, ptep);
847 ret = check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
848 hwp->pfn, &hwp->tk);
849 spin_unlock(ptl);
850 return ret;
851 }
852 #else
853 #define hwpoison_hugetlb_range NULL
854 #endif
855
856 static const struct mm_walk_ops hwpoison_walk_ops = {
857 .pmd_entry = hwpoison_pte_range,
858 .hugetlb_entry = hwpoison_hugetlb_range,
859 .walk_lock = PGWALK_RDLOCK,
860 };
861
862 /*
863 * Sends SIGBUS to the current process with error info.
864 *
865 * This function is intended to handle "Action Required" MCEs on already
866 * hardware poisoned pages. They could happen, for example, when
867 * memory_failure() failed to unmap the error page at the first call, or
868 * when multiple local machine checks happened on different CPUs.
869 *
870 * MCE handler currently has no easy access to the error virtual address,
871 * so this function walks page table to find it. The returned virtual address
872 * is proper in most cases, but it could be wrong when the application
873 * process has multiple entries mapping the error page.
874 */
kill_accessing_process(struct task_struct * p,unsigned long pfn,int flags)875 static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
876 int flags)
877 {
878 int ret;
879 struct hwpoison_walk priv = {
880 .pfn = pfn,
881 };
882 priv.tk.tsk = p;
883
884 if (!p->mm)
885 return -EFAULT;
886
887 mmap_read_lock(p->mm);
888 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
889 (void *)&priv);
890 /*
891 * ret = 1 when CMCI wins, regardless of whether try_to_unmap()
892 * succeeds or fails, then kill the process with SIGBUS.
893 * ret = 0 when poison page is a clean page and it's dropped, no
894 * SIGBUS is needed.
895 */
896 if (ret == 1 && priv.tk.addr)
897 kill_proc(&priv.tk, pfn, flags);
898 mmap_read_unlock(p->mm);
899
900 return ret > 0 ? -EHWPOISON : 0;
901 }
902
903 /*
904 * MF_IGNORED - The m-f() handler marks the page as PG_hwpoisoned'ed.
905 * But it could not do more to isolate the page from being accessed again,
906 * nor does it kill the process. This is extremely rare and one of the
907 * potential causes is that the page state has been changed due to
908 * underlying race condition. This is the most severe outcomes.
909 *
910 * MF_FAILED - The m-f() handler marks the page as PG_hwpoisoned'ed.
911 * It should have killed the process, but it can't isolate the page,
912 * due to conditions such as extra pin, unmap failure, etc. Accessing
913 * the page again may trigger another MCE and the process will be killed
914 * by the m-f() handler immediately.
915 *
916 * MF_DELAYED - The m-f() handler marks the page as PG_hwpoisoned'ed.
917 * The page is unmapped, and is removed from the LRU or file mapping.
918 * An attempt to access the page again will trigger page fault and the
919 * PF handler will kill the process.
920 *
921 * MF_RECOVERED - The m-f() handler marks the page as PG_hwpoisoned'ed.
922 * The page has been completely isolated, that is, unmapped, taken out of
923 * the buddy system, or hole-punnched out of the file mapping.
924 */
925 static const char *action_name[] = {
926 [MF_IGNORED] = "Ignored",
927 [MF_FAILED] = "Failed",
928 [MF_DELAYED] = "Delayed",
929 [MF_RECOVERED] = "Recovered",
930 };
931
932 static const char * const action_page_types[] = {
933 [MF_MSG_KERNEL] = "reserved kernel page",
934 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
935 [MF_MSG_HUGE] = "huge page",
936 [MF_MSG_FREE_HUGE] = "free huge page",
937 [MF_MSG_GET_HWPOISON] = "get hwpoison page",
938 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
939 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
940 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
941 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
942 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
943 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
944 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
945 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
946 [MF_MSG_CLEAN_LRU] = "clean LRU page",
947 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
948 [MF_MSG_BUDDY] = "free buddy page",
949 [MF_MSG_DAX] = "dax page",
950 [MF_MSG_UNSPLIT_THP] = "unsplit thp",
951 [MF_MSG_ALREADY_POISONED] = "already poisoned",
952 [MF_MSG_UNKNOWN] = "unknown page",
953 };
954
955 /*
956 * XXX: It is possible that a page is isolated from LRU cache,
957 * and then kept in swap cache or failed to remove from page cache.
958 * The page count will stop it from being freed by unpoison.
959 * Stress tests should be aware of this memory leak problem.
960 */
delete_from_lru_cache(struct folio * folio)961 static int delete_from_lru_cache(struct folio *folio)
962 {
963 if (folio_isolate_lru(folio)) {
964 /*
965 * Clear sensible page flags, so that the buddy system won't
966 * complain when the folio is unpoison-and-freed.
967 */
968 folio_clear_active(folio);
969 folio_clear_unevictable(folio);
970
971 /*
972 * Poisoned page might never drop its ref count to 0 so we have
973 * to uncharge it manually from its memcg.
974 */
975 mem_cgroup_uncharge(folio);
976
977 /*
978 * drop the refcount elevated by folio_isolate_lru()
979 */
980 folio_put(folio);
981 return 0;
982 }
983 return -EIO;
984 }
985
truncate_error_folio(struct folio * folio,unsigned long pfn,struct address_space * mapping)986 static int truncate_error_folio(struct folio *folio, unsigned long pfn,
987 struct address_space *mapping)
988 {
989 int ret = MF_FAILED;
990
991 if (mapping->a_ops->error_remove_folio) {
992 int err = mapping->a_ops->error_remove_folio(mapping, folio);
993
994 if (err != 0)
995 pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
996 else if (!filemap_release_folio(folio, GFP_NOIO))
997 pr_info("%#lx: failed to release buffers\n", pfn);
998 else
999 ret = MF_RECOVERED;
1000 } else {
1001 /*
1002 * If the file system doesn't support it just invalidate
1003 * This fails on dirty or anything with private pages
1004 */
1005 if (mapping_evict_folio(mapping, folio))
1006 ret = MF_RECOVERED;
1007 else
1008 pr_info("%#lx: Failed to invalidate\n", pfn);
1009 }
1010
1011 return ret;
1012 }
1013
1014 struct page_state {
1015 unsigned long mask;
1016 unsigned long res;
1017 enum mf_action_page_type type;
1018
1019 /* Callback ->action() has to unlock the relevant page inside it. */
1020 int (*action)(struct page_state *ps, struct page *p);
1021 };
1022
1023 /*
1024 * Return true if page is still referenced by others, otherwise return
1025 * false.
1026 *
1027 * The extra_pins is true when one extra refcount is expected.
1028 */
has_extra_refcount(struct page_state * ps,struct page * p,bool extra_pins)1029 static bool has_extra_refcount(struct page_state *ps, struct page *p,
1030 bool extra_pins)
1031 {
1032 int count = page_count(p) - 1;
1033
1034 if (extra_pins)
1035 count -= folio_nr_pages(page_folio(p));
1036
1037 if (count > 0) {
1038 pr_err("%#lx: %s still referenced by %d users\n",
1039 page_to_pfn(p), action_page_types[ps->type], count);
1040 return true;
1041 }
1042
1043 return false;
1044 }
1045
1046 /*
1047 * Error hit kernel page.
1048 * Do nothing, try to be lucky and not touch this instead. For a few cases we
1049 * could be more sophisticated.
1050 */
me_kernel(struct page_state * ps,struct page * p)1051 static int me_kernel(struct page_state *ps, struct page *p)
1052 {
1053 unlock_page(p);
1054 return MF_IGNORED;
1055 }
1056
1057 /*
1058 * Page in unknown state. Do nothing.
1059 * This is a catch-all in case we fail to make sense of the page state.
1060 */
me_unknown(struct page_state * ps,struct page * p)1061 static int me_unknown(struct page_state *ps, struct page *p)
1062 {
1063 pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
1064 unlock_page(p);
1065 return MF_IGNORED;
1066 }
1067
1068 /*
1069 * Clean (or cleaned) page cache page.
1070 */
me_pagecache_clean(struct page_state * ps,struct page * p)1071 static int me_pagecache_clean(struct page_state *ps, struct page *p)
1072 {
1073 struct folio *folio = page_folio(p);
1074 int ret;
1075 struct address_space *mapping;
1076 bool extra_pins;
1077
1078 delete_from_lru_cache(folio);
1079
1080 /*
1081 * For anonymous folios the only reference left
1082 * should be the one m_f() holds.
1083 */
1084 if (folio_test_anon(folio)) {
1085 ret = MF_RECOVERED;
1086 goto out;
1087 }
1088
1089 /*
1090 * Now truncate the page in the page cache. This is really
1091 * more like a "temporary hole punch"
1092 * Don't do this for block devices when someone else
1093 * has a reference, because it could be file system metadata
1094 * and that's not safe to truncate.
1095 */
1096 mapping = folio_mapping(folio);
1097 if (!mapping) {
1098 /* Folio has been torn down in the meantime */
1099 ret = MF_FAILED;
1100 goto out;
1101 }
1102
1103 /*
1104 * The shmem page is kept in page cache instead of truncating
1105 * so is expected to have an extra refcount after error-handling.
1106 */
1107 extra_pins = shmem_mapping(mapping);
1108
1109 /*
1110 * Truncation is a bit tricky. Enable it per file system for now.
1111 *
1112 * Open: to take i_rwsem or not for this? Right now we don't.
1113 */
1114 ret = truncate_error_folio(folio, page_to_pfn(p), mapping);
1115 if (has_extra_refcount(ps, p, extra_pins))
1116 ret = MF_FAILED;
1117
1118 out:
1119 folio_unlock(folio);
1120
1121 return ret;
1122 }
1123
1124 /*
1125 * Dirty pagecache page
1126 * Issues: when the error hit a hole page the error is not properly
1127 * propagated.
1128 */
me_pagecache_dirty(struct page_state * ps,struct page * p)1129 static int me_pagecache_dirty(struct page_state *ps, struct page *p)
1130 {
1131 struct folio *folio = page_folio(p);
1132 struct address_space *mapping = folio_mapping(folio);
1133
1134 /* TBD: print more information about the file. */
1135 if (mapping) {
1136 /*
1137 * IO error will be reported by write(), fsync(), etc.
1138 * who check the mapping.
1139 * This way the application knows that something went
1140 * wrong with its dirty file data.
1141 */
1142 mapping_set_error(mapping, -EIO);
1143 }
1144
1145 return me_pagecache_clean(ps, p);
1146 }
1147
1148 /*
1149 * Clean and dirty swap cache.
1150 *
1151 * Dirty swap cache page is tricky to handle. The page could live both in page
1152 * table and swap cache(ie. page is freshly swapped in). So it could be
1153 * referenced concurrently by 2 types of PTEs:
1154 * normal PTEs and swap PTEs. We try to handle them consistently by calling
1155 * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs,
1156 * and then
1157 * - clear dirty bit to prevent IO
1158 * - remove from LRU
1159 * - but keep in the swap cache, so that when we return to it on
1160 * a later page fault, we know the application is accessing
1161 * corrupted data and shall be killed (we installed simple
1162 * interception code in do_swap_page to catch it).
1163 *
1164 * Clean swap cache pages can be directly isolated. A later page fault will
1165 * bring in the known good data from disk.
1166 */
me_swapcache_dirty(struct page_state * ps,struct page * p)1167 static int me_swapcache_dirty(struct page_state *ps, struct page *p)
1168 {
1169 struct folio *folio = page_folio(p);
1170 int ret;
1171 bool extra_pins = false;
1172
1173 folio_clear_dirty(folio);
1174 /* Trigger EIO in shmem: */
1175 folio_clear_uptodate(folio);
1176
1177 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED;
1178 folio_unlock(folio);
1179
1180 if (ret == MF_DELAYED)
1181 extra_pins = true;
1182
1183 if (has_extra_refcount(ps, p, extra_pins))
1184 ret = MF_FAILED;
1185
1186 return ret;
1187 }
1188
me_swapcache_clean(struct page_state * ps,struct page * p)1189 static int me_swapcache_clean(struct page_state *ps, struct page *p)
1190 {
1191 struct folio *folio = page_folio(p);
1192 int ret;
1193
1194 delete_from_swap_cache(folio);
1195
1196 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
1197 folio_unlock(folio);
1198
1199 if (has_extra_refcount(ps, p, false))
1200 ret = MF_FAILED;
1201
1202 return ret;
1203 }
1204
1205 /*
1206 * Huge pages. Needs work.
1207 * Issues:
1208 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1209 * To narrow down kill region to one page, we need to break up pmd.
1210 */
me_huge_page(struct page_state * ps,struct page * p)1211 static int me_huge_page(struct page_state *ps, struct page *p)
1212 {
1213 struct folio *folio = page_folio(p);
1214 int res;
1215 struct address_space *mapping;
1216 bool extra_pins = false;
1217
1218 mapping = folio_mapping(folio);
1219 if (mapping) {
1220 res = truncate_error_folio(folio, page_to_pfn(p), mapping);
1221 /* The page is kept in page cache. */
1222 extra_pins = true;
1223 folio_unlock(folio);
1224 } else {
1225 folio_unlock(folio);
1226 /*
1227 * migration entry prevents later access on error hugepage,
1228 * so we can free and dissolve it into buddy to save healthy
1229 * subpages.
1230 */
1231 folio_put(folio);
1232 if (__page_handle_poison(p) > 0) {
1233 page_ref_inc(p);
1234 res = MF_RECOVERED;
1235 } else {
1236 res = MF_FAILED;
1237 }
1238 }
1239
1240 if (has_extra_refcount(ps, p, extra_pins))
1241 res = MF_FAILED;
1242
1243 return res;
1244 }
1245
1246 /*
1247 * Various page states we can handle.
1248 *
1249 * A page state is defined by its current page->flags bits.
1250 * The table matches them in order and calls the right handler.
1251 *
1252 * This is quite tricky because we can access page at any time
1253 * in its live cycle, so all accesses have to be extremely careful.
1254 *
1255 * This is not complete. More states could be added.
1256 * For any missing state don't attempt recovery.
1257 */
1258
1259 #define dirty (1UL << PG_dirty)
1260 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1261 #define unevict (1UL << PG_unevictable)
1262 #define mlock (1UL << PG_mlocked)
1263 #define lru (1UL << PG_lru)
1264 #define head (1UL << PG_head)
1265 #define reserved (1UL << PG_reserved)
1266
1267 static struct page_state error_states[] = {
1268 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
1269 /*
1270 * free pages are specially detected outside this table:
1271 * PG_buddy pages only make a small fraction of all free pages.
1272 */
1273
1274 { head, head, MF_MSG_HUGE, me_huge_page },
1275
1276 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
1277 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
1278
1279 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
1280 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
1281
1282 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
1283 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
1284
1285 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
1286 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
1287
1288 /*
1289 * Catchall entry: must be at end.
1290 */
1291 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
1292 };
1293
1294 #undef dirty
1295 #undef sc
1296 #undef unevict
1297 #undef mlock
1298 #undef lru
1299 #undef head
1300 #undef reserved
1301
update_per_node_mf_stats(unsigned long pfn,enum mf_result result)1302 static void update_per_node_mf_stats(unsigned long pfn,
1303 enum mf_result result)
1304 {
1305 int nid = MAX_NUMNODES;
1306 struct memory_failure_stats *mf_stats = NULL;
1307
1308 nid = pfn_to_nid(pfn);
1309 if (unlikely(nid < 0 || nid >= MAX_NUMNODES)) {
1310 WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid);
1311 return;
1312 }
1313
1314 mf_stats = &NODE_DATA(nid)->mf_stats;
1315 switch (result) {
1316 case MF_IGNORED:
1317 ++mf_stats->ignored;
1318 break;
1319 case MF_FAILED:
1320 ++mf_stats->failed;
1321 break;
1322 case MF_DELAYED:
1323 ++mf_stats->delayed;
1324 break;
1325 case MF_RECOVERED:
1326 ++mf_stats->recovered;
1327 break;
1328 default:
1329 WARN_ONCE(1, "Memory failure: mf_result=%d is not properly handled", result);
1330 break;
1331 }
1332 ++mf_stats->total;
1333 }
1334
1335 /*
1336 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1337 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1338 */
action_result(unsigned long pfn,enum mf_action_page_type type,enum mf_result result)1339 static int action_result(unsigned long pfn, enum mf_action_page_type type,
1340 enum mf_result result)
1341 {
1342 trace_memory_failure_event(pfn, type, result);
1343
1344 num_poisoned_pages_inc(pfn);
1345
1346 update_per_node_mf_stats(pfn, result);
1347
1348 pr_err("%#lx: recovery action for %s: %s\n",
1349 pfn, action_page_types[type], action_name[result]);
1350
1351 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
1352 }
1353
page_action(struct page_state * ps,struct page * p,unsigned long pfn)1354 static int page_action(struct page_state *ps, struct page *p,
1355 unsigned long pfn)
1356 {
1357 int result;
1358
1359 /* page p should be unlocked after returning from ps->action(). */
1360 result = ps->action(ps, p);
1361
1362 /* Could do more checks here if page looks ok */
1363 /*
1364 * Could adjust zone counters here to correct for the missing page.
1365 */
1366
1367 return action_result(pfn, ps->type, result);
1368 }
1369
PageHWPoisonTakenOff(struct page * page)1370 static inline bool PageHWPoisonTakenOff(struct page *page)
1371 {
1372 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
1373 }
1374
SetPageHWPoisonTakenOff(struct page * page)1375 void SetPageHWPoisonTakenOff(struct page *page)
1376 {
1377 set_page_private(page, MAGIC_HWPOISON);
1378 }
1379
ClearPageHWPoisonTakenOff(struct page * page)1380 void ClearPageHWPoisonTakenOff(struct page *page)
1381 {
1382 if (PageHWPoison(page))
1383 set_page_private(page, 0);
1384 }
1385
1386 /*
1387 * Return true if a page type of a given page is supported by hwpoison
1388 * mechanism (while handling could fail), otherwise false. This function
1389 * does not return true for hugetlb or device memory pages, so it's assumed
1390 * to be called only in the context where we never have such pages.
1391 */
HWPoisonHandlable(struct page * page,unsigned long flags)1392 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
1393 {
1394 if (PageSlab(page))
1395 return false;
1396
1397 /* Soft offline could migrate movable_ops pages */
1398 if ((flags & MF_SOFT_OFFLINE) && page_has_movable_ops(page))
1399 return true;
1400
1401 return PageLRU(page) || is_free_buddy_page(page);
1402 }
1403
__get_hwpoison_page(struct page * page,unsigned long flags)1404 static int __get_hwpoison_page(struct page *page, unsigned long flags)
1405 {
1406 struct folio *folio = page_folio(page);
1407 int ret = 0;
1408 bool hugetlb = false;
1409
1410 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false);
1411 if (hugetlb) {
1412 /* Make sure hugetlb demotion did not happen from under us. */
1413 if (folio == page_folio(page))
1414 return ret;
1415 if (ret > 0) {
1416 folio_put(folio);
1417 folio = page_folio(page);
1418 }
1419 }
1420
1421 /*
1422 * This check prevents from calling folio_try_get() for any
1423 * unsupported type of folio in order to reduce the risk of unexpected
1424 * races caused by taking a folio refcount.
1425 */
1426 if (!HWPoisonHandlable(&folio->page, flags))
1427 return -EBUSY;
1428
1429 if (folio_try_get(folio)) {
1430 if (folio == page_folio(page))
1431 return 1;
1432
1433 pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
1434 folio_put(folio);
1435 }
1436
1437 return 0;
1438 }
1439
1440 #define GET_PAGE_MAX_RETRY_NUM 3
1441
get_any_page(struct page * p,unsigned long flags)1442 static int get_any_page(struct page *p, unsigned long flags)
1443 {
1444 int ret = 0, pass = 0;
1445 bool count_increased = false;
1446
1447 if (flags & MF_COUNT_INCREASED)
1448 count_increased = true;
1449
1450 try_again:
1451 if (!count_increased) {
1452 ret = __get_hwpoison_page(p, flags);
1453 if (!ret) {
1454 if (page_count(p)) {
1455 /* We raced with an allocation, retry. */
1456 if (pass++ < GET_PAGE_MAX_RETRY_NUM)
1457 goto try_again;
1458 ret = -EBUSY;
1459 } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1460 /* We raced with put_page, retry. */
1461 if (pass++ < GET_PAGE_MAX_RETRY_NUM)
1462 goto try_again;
1463 ret = -EIO;
1464 }
1465 goto out;
1466 } else if (ret == -EBUSY) {
1467 /*
1468 * We raced with (possibly temporary) unhandlable
1469 * page, retry.
1470 */
1471 if (pass++ < 3) {
1472 shake_page(p);
1473 goto try_again;
1474 }
1475 ret = -EIO;
1476 goto out;
1477 }
1478 }
1479
1480 if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
1481 ret = 1;
1482 } else {
1483 /*
1484 * A page we cannot handle. Check whether we can turn
1485 * it into something we can handle.
1486 */
1487 if (pass++ < GET_PAGE_MAX_RETRY_NUM) {
1488 put_page(p);
1489 shake_page(p);
1490 count_increased = false;
1491 goto try_again;
1492 }
1493 put_page(p);
1494 ret = -EIO;
1495 }
1496 out:
1497 if (ret == -EIO)
1498 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
1499
1500 return ret;
1501 }
1502
__get_unpoison_page(struct page * page)1503 static int __get_unpoison_page(struct page *page)
1504 {
1505 struct folio *folio = page_folio(page);
1506 int ret = 0;
1507 bool hugetlb = false;
1508
1509 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true);
1510 if (hugetlb) {
1511 /* Make sure hugetlb demotion did not happen from under us. */
1512 if (folio == page_folio(page))
1513 return ret;
1514 if (ret > 0)
1515 folio_put(folio);
1516 }
1517
1518 /*
1519 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
1520 * but also isolated from buddy freelist, so need to identify the
1521 * state and have to cancel both operations to unpoison.
1522 */
1523 if (PageHWPoisonTakenOff(page))
1524 return -EHWPOISON;
1525
1526 return get_page_unless_zero(page) ? 1 : 0;
1527 }
1528
1529 /**
1530 * get_hwpoison_page() - Get refcount for memory error handling
1531 * @p: Raw error page (hit by memory error)
1532 * @flags: Flags controlling behavior of error handling
1533 *
1534 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1535 * error on it, after checking that the error page is in a well-defined state
1536 * (defined as a page-type we can successfully handle the memory error on it,
1537 * such as LRU page and hugetlb page).
1538 *
1539 * Memory error handling could be triggered at any time on any type of page,
1540 * so it's prone to race with typical memory management lifecycle (like
1541 * allocation and free). So to avoid such races, get_hwpoison_page() takes
1542 * extra care for the error page's state (as done in __get_hwpoison_page()),
1543 * and has some retry logic in get_any_page().
1544 *
1545 * When called from unpoison_memory(), the caller should already ensure that
1546 * the given page has PG_hwpoison. So it's never reused for other page
1547 * allocations, and __get_unpoison_page() never races with them.
1548 *
1549 * Return: 0 on failure or free buddy (hugetlb) page,
1550 * 1 on success for in-use pages in a well-defined state,
1551 * -EIO for pages on which we can not handle memory errors,
1552 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
1553 * operations like allocation and free,
1554 * -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1555 */
get_hwpoison_page(struct page * p,unsigned long flags)1556 static int get_hwpoison_page(struct page *p, unsigned long flags)
1557 {
1558 int ret;
1559
1560 zone_pcp_disable(page_zone(p));
1561 if (flags & MF_UNPOISON)
1562 ret = __get_unpoison_page(p);
1563 else
1564 ret = get_any_page(p, flags);
1565 zone_pcp_enable(page_zone(p));
1566
1567 return ret;
1568 }
1569
1570 /*
1571 * The caller must guarantee the folio isn't large folio, except hugetlb.
1572 * try_to_unmap() can't handle it.
1573 */
unmap_poisoned_folio(struct folio * folio,unsigned long pfn,bool must_kill)1574 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
1575 {
1576 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
1577 struct address_space *mapping;
1578
1579 if (folio_test_swapcache(folio)) {
1580 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1581 ttu &= ~TTU_HWPOISON;
1582 }
1583
1584 /*
1585 * Propagate the dirty bit from PTEs to struct page first, because we
1586 * need this to decide if we should kill or just drop the page.
1587 * XXX: the dirty test could be racy: set_page_dirty() may not always
1588 * be called inside page lock (it's recommended but not enforced).
1589 */
1590 mapping = folio_mapping(folio);
1591 if (!must_kill && !folio_test_dirty(folio) && mapping &&
1592 mapping_can_writeback(mapping)) {
1593 if (folio_mkclean(folio)) {
1594 folio_set_dirty(folio);
1595 } else {
1596 ttu &= ~TTU_HWPOISON;
1597 pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
1598 pfn);
1599 }
1600 }
1601
1602 if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
1603 /*
1604 * For hugetlb folios in shared mappings, try_to_unmap
1605 * could potentially call huge_pmd_unshare. Because of
1606 * this, take semaphore in write mode here and set
1607 * TTU_RMAP_LOCKED to indicate we have taken the lock
1608 * at this higher level.
1609 */
1610 mapping = hugetlb_folio_mapping_lock_write(folio);
1611 if (!mapping) {
1612 pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n",
1613 folio_pfn(folio));
1614 return -EBUSY;
1615 }
1616
1617 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
1618 i_mmap_unlock_write(mapping);
1619 } else {
1620 try_to_unmap(folio, ttu);
1621 }
1622
1623 return folio_mapped(folio) ? -EBUSY : 0;
1624 }
1625
1626 /*
1627 * Do all that is necessary to remove user space mappings. Unmap
1628 * the pages and send SIGBUS to the processes if the data was dirty.
1629 */
hwpoison_user_mappings(struct folio * folio,struct page * p,unsigned long pfn,int flags)1630 static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
1631 unsigned long pfn, int flags)
1632 {
1633 LIST_HEAD(tokill);
1634 bool unmap_success;
1635 int forcekill;
1636 bool mlocked = folio_test_mlocked(folio);
1637
1638 /*
1639 * Here we are interested only in user-mapped pages, so skip any
1640 * other types of pages.
1641 */
1642 if (folio_test_reserved(folio) || folio_test_slab(folio) ||
1643 folio_test_pgtable(folio) || folio_test_offline(folio))
1644 return true;
1645 if (!(folio_test_lru(folio) || folio_test_hugetlb(folio)))
1646 return true;
1647
1648 /*
1649 * This check implies we don't kill processes if their pages
1650 * are in the swap cache early. Those are always late kills.
1651 */
1652 if (!folio_mapped(folio))
1653 return true;
1654
1655 /*
1656 * First collect all the processes that have the page
1657 * mapped in dirty form. This has to be done before try_to_unmap,
1658 * because ttu takes the rmap data structures down.
1659 */
1660 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
1661
1662 unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL);
1663 if (!unmap_success)
1664 pr_err("%#lx: failed to unmap page (folio mapcount=%d)\n",
1665 pfn, folio_mapcount(folio));
1666
1667 /*
1668 * try_to_unmap() might put mlocked page in lru cache, so call
1669 * shake_page() again to ensure that it's flushed.
1670 */
1671 if (mlocked)
1672 shake_folio(folio);
1673
1674 /*
1675 * Now that the dirty bit has been propagated to the
1676 * struct page and all unmaps done we can decide if
1677 * killing is needed or not. Only kill when the page
1678 * was dirty or the process is not restartable,
1679 * otherwise the tokill list is merely
1680 * freed. When there was a problem unmapping earlier
1681 * use a more force-full uncatchable kill to prevent
1682 * any accesses to the poisoned memory.
1683 */
1684 forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) ||
1685 !unmap_success;
1686 kill_procs(&tokill, forcekill, pfn, flags);
1687
1688 return unmap_success;
1689 }
1690
identify_page_state(unsigned long pfn,struct page * p,unsigned long page_flags)1691 static int identify_page_state(unsigned long pfn, struct page *p,
1692 unsigned long page_flags)
1693 {
1694 struct page_state *ps;
1695
1696 /*
1697 * The first check uses the current page flags which may not have any
1698 * relevant information. The second check with the saved page flags is
1699 * carried out only if the first check can't determine the page status.
1700 */
1701 for (ps = error_states;; ps++)
1702 if ((p->flags & ps->mask) == ps->res)
1703 break;
1704
1705 page_flags |= (p->flags & (1UL << PG_dirty));
1706
1707 if (!ps->mask)
1708 for (ps = error_states;; ps++)
1709 if ((page_flags & ps->mask) == ps->res)
1710 break;
1711 return page_action(ps, p, pfn);
1712 }
1713
1714 /*
1715 * When 'release' is 'false', it means that if thp split has failed,
1716 * there is still more to do, hence the page refcount we took earlier
1717 * is still needed.
1718 */
try_to_split_thp_page(struct page * page,bool release)1719 static int try_to_split_thp_page(struct page *page, bool release)
1720 {
1721 int ret;
1722
1723 lock_page(page);
1724 ret = split_huge_page(page);
1725 unlock_page(page);
1726
1727 if (ret && release)
1728 put_page(page);
1729
1730 return ret;
1731 }
1732
unmap_and_kill(struct list_head * to_kill,unsigned long pfn,struct address_space * mapping,pgoff_t index,int flags)1733 static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
1734 struct address_space *mapping, pgoff_t index, int flags)
1735 {
1736 struct to_kill *tk;
1737 unsigned long size = 0;
1738
1739 list_for_each_entry(tk, to_kill, nd)
1740 if (tk->size_shift)
1741 size = max(size, 1UL << tk->size_shift);
1742
1743 if (size) {
1744 /*
1745 * Unmap the largest mapping to avoid breaking up device-dax
1746 * mappings which are constant size. The actual size of the
1747 * mapping being torn down is communicated in siginfo, see
1748 * kill_proc()
1749 */
1750 loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1);
1751
1752 unmap_mapping_range(mapping, start, size, 0);
1753 }
1754
1755 kill_procs(to_kill, flags & MF_MUST_KILL, pfn, flags);
1756 }
1757
1758 /*
1759 * Only dev_pagemap pages get here, such as fsdax when the filesystem
1760 * either do not claim or fails to claim a hwpoison event, or devdax.
1761 * The fsdax pages are initialized per base page, and the devdax pages
1762 * could be initialized either as base pages, or as compound pages with
1763 * vmemmap optimization enabled. Devdax is simplistic in its dealing with
1764 * hwpoison, such that, if a subpage of a compound page is poisoned,
1765 * simply mark the compound head page is by far sufficient.
1766 */
mf_generic_kill_procs(unsigned long long pfn,int flags,struct dev_pagemap * pgmap)1767 static int mf_generic_kill_procs(unsigned long long pfn, int flags,
1768 struct dev_pagemap *pgmap)
1769 {
1770 struct folio *folio = pfn_folio(pfn);
1771 LIST_HEAD(to_kill);
1772 dax_entry_t cookie;
1773 int rc = 0;
1774
1775 /*
1776 * Prevent the inode from being freed while we are interrogating
1777 * the address_space, typically this would be handled by
1778 * lock_page(), but dax pages do not use the page lock. This
1779 * also prevents changes to the mapping of this pfn until
1780 * poison signaling is complete.
1781 */
1782 cookie = dax_lock_folio(folio);
1783 if (!cookie)
1784 return -EBUSY;
1785
1786 if (hwpoison_filter(&folio->page)) {
1787 rc = -EOPNOTSUPP;
1788 goto unlock;
1789 }
1790
1791 switch (pgmap->type) {
1792 case MEMORY_DEVICE_PRIVATE:
1793 case MEMORY_DEVICE_COHERENT:
1794 /*
1795 * TODO: Handle device pages which may need coordination
1796 * with device-side memory.
1797 */
1798 rc = -ENXIO;
1799 goto unlock;
1800 default:
1801 break;
1802 }
1803
1804 /*
1805 * Use this flag as an indication that the dax page has been
1806 * remapped UC to prevent speculative consumption of poison.
1807 */
1808 SetPageHWPoison(&folio->page);
1809
1810 /*
1811 * Unlike System-RAM there is no possibility to swap in a
1812 * different physical page at a given virtual address, so all
1813 * userspace consumption of ZONE_DEVICE memory necessitates
1814 * SIGBUS (i.e. MF_MUST_KILL)
1815 */
1816 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1817 collect_procs(folio, &folio->page, &to_kill, true);
1818
1819 unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
1820 unlock:
1821 dax_unlock_folio(folio, cookie);
1822 return rc;
1823 }
1824
1825 #ifdef CONFIG_FS_DAX
1826 /**
1827 * mf_dax_kill_procs - Collect and kill processes who are using this file range
1828 * @mapping: address_space of the file in use
1829 * @index: start pgoff of the range within the file
1830 * @count: length of the range, in unit of PAGE_SIZE
1831 * @mf_flags: memory failure flags
1832 */
mf_dax_kill_procs(struct address_space * mapping,pgoff_t index,unsigned long count,int mf_flags)1833 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
1834 unsigned long count, int mf_flags)
1835 {
1836 LIST_HEAD(to_kill);
1837 dax_entry_t cookie;
1838 struct page *page;
1839 size_t end = index + count;
1840 bool pre_remove = mf_flags & MF_MEM_PRE_REMOVE;
1841
1842 mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1843
1844 for (; index < end; index++) {
1845 page = NULL;
1846 cookie = dax_lock_mapping_entry(mapping, index, &page);
1847 if (!cookie)
1848 return -EBUSY;
1849 if (!page)
1850 goto unlock;
1851
1852 if (!pre_remove)
1853 SetPageHWPoison(page);
1854
1855 /*
1856 * The pre_remove case is revoking access, the memory is still
1857 * good and could theoretically be put back into service.
1858 */
1859 collect_procs_fsdax(page, mapping, index, &to_kill, pre_remove);
1860 unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
1861 index, mf_flags);
1862 unlock:
1863 dax_unlock_mapping_entry(mapping, index, cookie);
1864 }
1865 return 0;
1866 }
1867 EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
1868 #endif /* CONFIG_FS_DAX */
1869
1870 #ifdef CONFIG_HUGETLB_PAGE
1871
1872 /*
1873 * Struct raw_hwp_page represents information about "raw error page",
1874 * constructing singly linked list from ->_hugetlb_hwpoison field of folio.
1875 */
1876 struct raw_hwp_page {
1877 struct llist_node node;
1878 struct page *page;
1879 };
1880
raw_hwp_list_head(struct folio * folio)1881 static inline struct llist_head *raw_hwp_list_head(struct folio *folio)
1882 {
1883 return (struct llist_head *)&folio->_hugetlb_hwpoison;
1884 }
1885
is_raw_hwpoison_page_in_hugepage(struct page * page)1886 bool is_raw_hwpoison_page_in_hugepage(struct page *page)
1887 {
1888 struct llist_head *raw_hwp_head;
1889 struct raw_hwp_page *p;
1890 struct folio *folio = page_folio(page);
1891 bool ret = false;
1892
1893 if (!folio_test_hwpoison(folio))
1894 return false;
1895
1896 if (!folio_test_hugetlb(folio))
1897 return PageHWPoison(page);
1898
1899 /*
1900 * When RawHwpUnreliable is set, kernel lost track of which subpages
1901 * are HWPOISON. So return as if ALL subpages are HWPOISONed.
1902 */
1903 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1904 return true;
1905
1906 mutex_lock(&mf_mutex);
1907
1908 raw_hwp_head = raw_hwp_list_head(folio);
1909 llist_for_each_entry(p, raw_hwp_head->first, node) {
1910 if (page == p->page) {
1911 ret = true;
1912 break;
1913 }
1914 }
1915
1916 mutex_unlock(&mf_mutex);
1917
1918 return ret;
1919 }
1920
__folio_free_raw_hwp(struct folio * folio,bool move_flag)1921 static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag)
1922 {
1923 struct llist_node *head;
1924 struct raw_hwp_page *p, *next;
1925 unsigned long count = 0;
1926
1927 head = llist_del_all(raw_hwp_list_head(folio));
1928 llist_for_each_entry_safe(p, next, head, node) {
1929 if (move_flag)
1930 SetPageHWPoison(p->page);
1931 else
1932 num_poisoned_pages_sub(page_to_pfn(p->page), 1);
1933 kfree(p);
1934 count++;
1935 }
1936 return count;
1937 }
1938
folio_set_hugetlb_hwpoison(struct folio * folio,struct page * page)1939 static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page)
1940 {
1941 struct llist_head *head;
1942 struct raw_hwp_page *raw_hwp;
1943 struct raw_hwp_page *p;
1944 int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0;
1945
1946 /*
1947 * Once the hwpoison hugepage has lost reliable raw error info,
1948 * there is little meaning to keep additional error info precisely,
1949 * so skip to add additional raw error info.
1950 */
1951 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1952 return -EHWPOISON;
1953 head = raw_hwp_list_head(folio);
1954 llist_for_each_entry(p, head->first, node) {
1955 if (p->page == page)
1956 return -EHWPOISON;
1957 }
1958
1959 raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
1960 if (raw_hwp) {
1961 raw_hwp->page = page;
1962 llist_add(&raw_hwp->node, head);
1963 /* the first error event will be counted in action_result(). */
1964 if (ret)
1965 num_poisoned_pages_inc(page_to_pfn(page));
1966 } else {
1967 /*
1968 * Failed to save raw error info. We no longer trace all
1969 * hwpoisoned subpages, and we need refuse to free/dissolve
1970 * this hwpoisoned hugepage.
1971 */
1972 folio_set_hugetlb_raw_hwp_unreliable(folio);
1973 /*
1974 * Once hugetlb_raw_hwp_unreliable is set, raw_hwp_page is not
1975 * used any more, so free it.
1976 */
1977 __folio_free_raw_hwp(folio, false);
1978 }
1979 return ret;
1980 }
1981
folio_free_raw_hwp(struct folio * folio,bool move_flag)1982 static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag)
1983 {
1984 /*
1985 * hugetlb_vmemmap_optimized hugepages can't be freed because struct
1986 * pages for tail pages are required but they don't exist.
1987 */
1988 if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio))
1989 return 0;
1990
1991 /*
1992 * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by
1993 * definition.
1994 */
1995 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1996 return 0;
1997
1998 return __folio_free_raw_hwp(folio, move_flag);
1999 }
2000
folio_clear_hugetlb_hwpoison(struct folio * folio)2001 void folio_clear_hugetlb_hwpoison(struct folio *folio)
2002 {
2003 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
2004 return;
2005 if (folio_test_hugetlb_vmemmap_optimized(folio))
2006 return;
2007 folio_clear_hwpoison(folio);
2008 folio_free_raw_hwp(folio, true);
2009 }
2010
2011 /*
2012 * Called from hugetlb code with hugetlb_lock held.
2013 *
2014 * Return values:
2015 * 0 - free hugepage
2016 * 1 - in-use hugepage
2017 * 2 - not a hugepage
2018 * -EBUSY - the hugepage is busy (try to retry)
2019 * -EHWPOISON - the hugepage is already hwpoisoned
2020 */
__get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)2021 int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
2022 bool *migratable_cleared)
2023 {
2024 struct page *page = pfn_to_page(pfn);
2025 struct folio *folio = page_folio(page);
2026 int ret = 2; /* fallback to normal page handling */
2027 bool count_increased = false;
2028
2029 if (!folio_test_hugetlb(folio))
2030 goto out;
2031
2032 if (flags & MF_COUNT_INCREASED) {
2033 ret = 1;
2034 count_increased = true;
2035 } else if (folio_test_hugetlb_freed(folio)) {
2036 ret = 0;
2037 } else if (folio_test_hugetlb_migratable(folio)) {
2038 ret = folio_try_get(folio);
2039 if (ret)
2040 count_increased = true;
2041 } else {
2042 ret = -EBUSY;
2043 if (!(flags & MF_NO_RETRY))
2044 goto out;
2045 }
2046
2047 if (folio_set_hugetlb_hwpoison(folio, page)) {
2048 ret = -EHWPOISON;
2049 goto out;
2050 }
2051
2052 /*
2053 * Clearing hugetlb_migratable for hwpoisoned hugepages to prevent them
2054 * from being migrated by memory hotremove.
2055 */
2056 if (count_increased && folio_test_hugetlb_migratable(folio)) {
2057 folio_clear_hugetlb_migratable(folio);
2058 *migratable_cleared = true;
2059 }
2060
2061 return ret;
2062 out:
2063 if (count_increased)
2064 folio_put(folio);
2065 return ret;
2066 }
2067
2068 /*
2069 * Taking refcount of hugetlb pages needs extra care about race conditions
2070 * with basic operations like hugepage allocation/free/demotion.
2071 * So some of prechecks for hwpoison (pinning, and testing/setting
2072 * PageHWPoison) should be done in single hugetlb_lock range.
2073 */
try_memory_failure_hugetlb(unsigned long pfn,int flags,int * hugetlb)2074 static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2075 {
2076 int res;
2077 struct page *p = pfn_to_page(pfn);
2078 struct folio *folio;
2079 unsigned long page_flags;
2080 bool migratable_cleared = false;
2081
2082 *hugetlb = 1;
2083 retry:
2084 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
2085 if (res == 2) { /* fallback to normal page handling */
2086 *hugetlb = 0;
2087 return 0;
2088 } else if (res == -EHWPOISON) {
2089 pr_err("%#lx: already hardware poisoned\n", pfn);
2090 if (flags & MF_ACTION_REQUIRED) {
2091 folio = page_folio(p);
2092 res = kill_accessing_process(current, folio_pfn(folio), flags);
2093 action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
2094 }
2095 return res;
2096 } else if (res == -EBUSY) {
2097 if (!(flags & MF_NO_RETRY)) {
2098 flags |= MF_NO_RETRY;
2099 goto retry;
2100 }
2101 return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);
2102 }
2103
2104 folio = page_folio(p);
2105 folio_lock(folio);
2106
2107 if (hwpoison_filter(p)) {
2108 folio_clear_hugetlb_hwpoison(folio);
2109 if (migratable_cleared)
2110 folio_set_hugetlb_migratable(folio);
2111 folio_unlock(folio);
2112 if (res == 1)
2113 folio_put(folio);
2114 return -EOPNOTSUPP;
2115 }
2116
2117 /*
2118 * Handling free hugepage. The possible race with hugepage allocation
2119 * or demotion can be prevented by PageHWPoison flag.
2120 */
2121 if (res == 0) {
2122 folio_unlock(folio);
2123 if (__page_handle_poison(p) > 0) {
2124 page_ref_inc(p);
2125 res = MF_RECOVERED;
2126 } else {
2127 res = MF_FAILED;
2128 }
2129 return action_result(pfn, MF_MSG_FREE_HUGE, res);
2130 }
2131
2132 page_flags = folio->flags;
2133
2134 if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
2135 folio_unlock(folio);
2136 return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED);
2137 }
2138
2139 return identify_page_state(pfn, p, page_flags);
2140 }
2141
2142 #else
try_memory_failure_hugetlb(unsigned long pfn,int flags,int * hugetlb)2143 static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2144 {
2145 return 0;
2146 }
2147
folio_free_raw_hwp(struct folio * folio,bool flag)2148 static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag)
2149 {
2150 return 0;
2151 }
2152 #endif /* CONFIG_HUGETLB_PAGE */
2153
2154 /* Drop the extra refcount in case we come from madvise() */
put_ref_page(unsigned long pfn,int flags)2155 static void put_ref_page(unsigned long pfn, int flags)
2156 {
2157 if (!(flags & MF_COUNT_INCREASED))
2158 return;
2159
2160 put_page(pfn_to_page(pfn));
2161 }
2162
memory_failure_dev_pagemap(unsigned long pfn,int flags,struct dev_pagemap * pgmap)2163 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
2164 struct dev_pagemap *pgmap)
2165 {
2166 int rc = -ENXIO;
2167
2168 /* device metadata space is not recoverable */
2169 if (!pgmap_pfn_valid(pgmap, pfn))
2170 goto out;
2171
2172 /*
2173 * Call driver's implementation to handle the memory failure, otherwise
2174 * fall back to generic handler.
2175 */
2176 if (pgmap_has_memory_failure(pgmap)) {
2177 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
2178 /*
2179 * Fall back to generic handler too if operation is not
2180 * supported inside the driver/device/filesystem.
2181 */
2182 if (rc != -EOPNOTSUPP)
2183 goto out;
2184 }
2185
2186 rc = mf_generic_kill_procs(pfn, flags, pgmap);
2187 out:
2188 /* drop pgmap ref acquired in caller */
2189 put_dev_pagemap(pgmap);
2190 if (rc != -EOPNOTSUPP)
2191 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
2192 return rc;
2193 }
2194
2195 /*
2196 * The calling condition is as such: thp split failed, page might have
2197 * been RDMA pinned, not much can be done for recovery.
2198 * But a SIGBUS should be delivered with vaddr provided so that the user
2199 * application has a chance to recover. Also, application processes'
2200 * election for MCE early killed will be honored.
2201 */
kill_procs_now(struct page * p,unsigned long pfn,int flags,struct folio * folio)2202 static void kill_procs_now(struct page *p, unsigned long pfn, int flags,
2203 struct folio *folio)
2204 {
2205 LIST_HEAD(tokill);
2206
2207 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
2208 kill_procs(&tokill, true, pfn, flags);
2209 }
2210
2211 /**
2212 * memory_failure - Handle memory failure of a page.
2213 * @pfn: Page Number of the corrupted page
2214 * @flags: fine tune action taken
2215 *
2216 * This function is called by the low level machine check code
2217 * of an architecture when it detects hardware memory corruption
2218 * of a page. It tries its best to recover, which includes
2219 * dropping pages, killing processes etc.
2220 *
2221 * The function is primarily of use for corruptions that
2222 * happen outside the current execution context (e.g. when
2223 * detected by a background scrubber)
2224 *
2225 * Must run in process context (e.g. a work queue) with interrupts
2226 * enabled and no spinlocks held.
2227 *
2228 * Return:
2229 * 0 - success,
2230 * -ENXIO - memory not managed by the kernel
2231 * -EOPNOTSUPP - hwpoison_filter() filtered the error event,
2232 * -EHWPOISON - the page was already poisoned, potentially
2233 * kill process,
2234 * other negative values - failure.
2235 */
memory_failure(unsigned long pfn,int flags)2236 int memory_failure(unsigned long pfn, int flags)
2237 {
2238 struct page *p;
2239 struct folio *folio;
2240 struct dev_pagemap *pgmap;
2241 int res = 0;
2242 unsigned long page_flags;
2243 bool retry = true;
2244 int hugetlb = 0;
2245
2246 if (!sysctl_memory_failure_recovery)
2247 panic("Memory failure on page %lx", pfn);
2248
2249 mutex_lock(&mf_mutex);
2250
2251 if (!(flags & MF_SW_SIMULATED))
2252 hw_memory_failure = true;
2253
2254 p = pfn_to_online_page(pfn);
2255 if (!p) {
2256 res = arch_memory_failure(pfn, flags);
2257 if (res == 0)
2258 goto unlock_mutex;
2259
2260 if (pfn_valid(pfn)) {
2261 pgmap = get_dev_pagemap(pfn, NULL);
2262 put_ref_page(pfn, flags);
2263 if (pgmap) {
2264 res = memory_failure_dev_pagemap(pfn, flags,
2265 pgmap);
2266 goto unlock_mutex;
2267 }
2268 }
2269 pr_err("%#lx: memory outside kernel control\n", pfn);
2270 res = -ENXIO;
2271 goto unlock_mutex;
2272 }
2273
2274 try_again:
2275 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
2276 if (hugetlb)
2277 goto unlock_mutex;
2278
2279 if (TestSetPageHWPoison(p)) {
2280 pr_err("%#lx: already hardware poisoned\n", pfn);
2281 res = -EHWPOISON;
2282 if (flags & MF_ACTION_REQUIRED)
2283 res = kill_accessing_process(current, pfn, flags);
2284 if (flags & MF_COUNT_INCREASED)
2285 put_page(p);
2286 action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
2287 goto unlock_mutex;
2288 }
2289
2290 /*
2291 * We need/can do nothing about count=0 pages.
2292 * 1) it's a free page, and therefore in safe hand:
2293 * check_new_page() will be the gate keeper.
2294 * 2) it's part of a non-compound high order page.
2295 * Implies some kernel user: cannot stop them from
2296 * R/W the page; let's pray that the page has been
2297 * used and will be freed some time later.
2298 * In fact it's dangerous to directly bump up page count from 0,
2299 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
2300 */
2301 if (!(flags & MF_COUNT_INCREASED)) {
2302 res = get_hwpoison_page(p, flags);
2303 if (!res) {
2304 if (is_free_buddy_page(p)) {
2305 if (take_page_off_buddy(p)) {
2306 page_ref_inc(p);
2307 res = MF_RECOVERED;
2308 } else {
2309 /* We lost the race, try again */
2310 if (retry) {
2311 ClearPageHWPoison(p);
2312 retry = false;
2313 goto try_again;
2314 }
2315 res = MF_FAILED;
2316 }
2317 res = action_result(pfn, MF_MSG_BUDDY, res);
2318 } else {
2319 res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
2320 }
2321 goto unlock_mutex;
2322 } else if (res < 0) {
2323 res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);
2324 goto unlock_mutex;
2325 }
2326 }
2327
2328 folio = page_folio(p);
2329
2330 /* filter pages that are protected from hwpoison test by users */
2331 folio_lock(folio);
2332 if (hwpoison_filter(p)) {
2333 ClearPageHWPoison(p);
2334 folio_unlock(folio);
2335 folio_put(folio);
2336 res = -EOPNOTSUPP;
2337 goto unlock_mutex;
2338 }
2339 folio_unlock(folio);
2340
2341 if (folio_test_large(folio)) {
2342 /*
2343 * The flag must be set after the refcount is bumped
2344 * otherwise it may race with THP split.
2345 * And the flag can't be set in get_hwpoison_page() since
2346 * it is called by soft offline too and it is just called
2347 * for !MF_COUNT_INCREASED. So here seems to be the best
2348 * place.
2349 *
2350 * Don't need care about the above error handling paths for
2351 * get_hwpoison_page() since they handle either free page
2352 * or unhandlable page. The refcount is bumped iff the
2353 * page is a valid handlable page.
2354 */
2355 folio_set_has_hwpoisoned(folio);
2356 if (try_to_split_thp_page(p, false) < 0) {
2357 res = -EHWPOISON;
2358 kill_procs_now(p, pfn, flags, folio);
2359 put_page(p);
2360 action_result(pfn, MF_MSG_UNSPLIT_THP, MF_FAILED);
2361 goto unlock_mutex;
2362 }
2363 VM_BUG_ON_PAGE(!page_count(p), p);
2364 folio = page_folio(p);
2365 }
2366
2367 /*
2368 * We ignore non-LRU pages for good reasons.
2369 * - PG_locked is only well defined for LRU pages and a few others
2370 * - to avoid races with __SetPageLocked()
2371 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
2372 * The check (unnecessarily) ignores LRU pages being isolated and
2373 * walked by the page reclaim code, however that's not a big loss.
2374 */
2375 shake_folio(folio);
2376
2377 folio_lock(folio);
2378
2379 /*
2380 * We're only intended to deal with the non-Compound page here.
2381 * The page cannot become compound pages again as folio has been
2382 * splited and extra refcnt is held.
2383 */
2384 WARN_ON(folio_test_large(folio));
2385
2386 /*
2387 * We use page flags to determine what action should be taken, but
2388 * the flags can be modified by the error containment action. One
2389 * example is an mlocked page, where PG_mlocked is cleared by
2390 * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
2391 * status correctly, we save a copy of the page flags at this time.
2392 */
2393 page_flags = folio->flags;
2394
2395 /*
2396 * __munlock_folio() may clear a writeback folio's LRU flag without
2397 * the folio lock. We need to wait for writeback completion for this
2398 * folio or it may trigger a vfs BUG while evicting inode.
2399 */
2400 if (!folio_test_lru(folio) && !folio_test_writeback(folio))
2401 goto identify_page_state;
2402
2403 /*
2404 * It's very difficult to mess with pages currently under IO
2405 * and in many cases impossible, so we just avoid it here.
2406 */
2407 folio_wait_writeback(folio);
2408
2409 /*
2410 * Now take care of user space mappings.
2411 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
2412 */
2413 if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
2414 res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED);
2415 goto unlock_page;
2416 }
2417
2418 /*
2419 * Torn down by someone else?
2420 */
2421 if (folio_test_lru(folio) && !folio_test_swapcache(folio) &&
2422 folio->mapping == NULL) {
2423 res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
2424 goto unlock_page;
2425 }
2426
2427 identify_page_state:
2428 res = identify_page_state(pfn, p, page_flags);
2429 mutex_unlock(&mf_mutex);
2430 return res;
2431 unlock_page:
2432 folio_unlock(folio);
2433 unlock_mutex:
2434 mutex_unlock(&mf_mutex);
2435 return res;
2436 }
2437 EXPORT_SYMBOL_GPL(memory_failure);
2438
2439 #define MEMORY_FAILURE_FIFO_ORDER 4
2440 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
2441
2442 struct memory_failure_entry {
2443 unsigned long pfn;
2444 int flags;
2445 };
2446
2447 struct memory_failure_cpu {
2448 DECLARE_KFIFO(fifo, struct memory_failure_entry,
2449 MEMORY_FAILURE_FIFO_SIZE);
2450 raw_spinlock_t lock;
2451 struct work_struct work;
2452 };
2453
2454 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
2455
2456 /**
2457 * memory_failure_queue - Schedule handling memory failure of a page.
2458 * @pfn: Page Number of the corrupted page
2459 * @flags: Flags for memory failure handling
2460 *
2461 * This function is called by the low level hardware error handler
2462 * when it detects hardware memory corruption of a page. It schedules
2463 * the recovering of error page, including dropping pages, killing
2464 * processes etc.
2465 *
2466 * The function is primarily of use for corruptions that
2467 * happen outside the current execution context (e.g. when
2468 * detected by a background scrubber)
2469 *
2470 * Can run in IRQ context.
2471 */
memory_failure_queue(unsigned long pfn,int flags)2472 void memory_failure_queue(unsigned long pfn, int flags)
2473 {
2474 struct memory_failure_cpu *mf_cpu;
2475 unsigned long proc_flags;
2476 bool buffer_overflow;
2477 struct memory_failure_entry entry = {
2478 .pfn = pfn,
2479 .flags = flags,
2480 };
2481
2482 mf_cpu = &get_cpu_var(memory_failure_cpu);
2483 raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2484 buffer_overflow = !kfifo_put(&mf_cpu->fifo, entry);
2485 if (!buffer_overflow)
2486 schedule_work_on(smp_processor_id(), &mf_cpu->work);
2487 raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2488 put_cpu_var(memory_failure_cpu);
2489 if (buffer_overflow)
2490 pr_err("buffer overflow when queuing memory failure at %#lx\n",
2491 pfn);
2492 }
2493 EXPORT_SYMBOL_GPL(memory_failure_queue);
2494
memory_failure_work_func(struct work_struct * work)2495 static void memory_failure_work_func(struct work_struct *work)
2496 {
2497 struct memory_failure_cpu *mf_cpu;
2498 struct memory_failure_entry entry = { 0, };
2499 unsigned long proc_flags;
2500 int gotten;
2501
2502 mf_cpu = container_of(work, struct memory_failure_cpu, work);
2503 for (;;) {
2504 raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2505 gotten = kfifo_get(&mf_cpu->fifo, &entry);
2506 raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2507 if (!gotten)
2508 break;
2509 if (entry.flags & MF_SOFT_OFFLINE)
2510 soft_offline_page(entry.pfn, entry.flags);
2511 else
2512 memory_failure(entry.pfn, entry.flags);
2513 }
2514 }
2515
memory_failure_init(void)2516 static int __init memory_failure_init(void)
2517 {
2518 struct memory_failure_cpu *mf_cpu;
2519 int cpu;
2520
2521 for_each_possible_cpu(cpu) {
2522 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2523 raw_spin_lock_init(&mf_cpu->lock);
2524 INIT_KFIFO(mf_cpu->fifo);
2525 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
2526 }
2527
2528 register_sysctl_init("vm", memory_failure_table);
2529
2530 return 0;
2531 }
2532 core_initcall(memory_failure_init);
2533
2534 #undef pr_fmt
2535 #define pr_fmt(fmt) "Unpoison: " fmt
2536 #define unpoison_pr_info(fmt, pfn, rs) \
2537 ({ \
2538 if (__ratelimit(rs)) \
2539 pr_info(fmt, pfn); \
2540 })
2541
2542 /**
2543 * unpoison_memory - Unpoison a previously poisoned page
2544 * @pfn: Page number of the to be unpoisoned page
2545 *
2546 * Software-unpoison a page that has been poisoned by
2547 * memory_failure() earlier.
2548 *
2549 * This is only done on the software-level, so it only works
2550 * for linux injected failures, not real hardware failures
2551 *
2552 * Returns 0 for success, otherwise -errno.
2553 */
unpoison_memory(unsigned long pfn)2554 int unpoison_memory(unsigned long pfn)
2555 {
2556 struct folio *folio;
2557 struct page *p;
2558 int ret = -EBUSY, ghp;
2559 unsigned long count;
2560 bool huge = false;
2561 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
2562 DEFAULT_RATELIMIT_BURST);
2563
2564 if (!pfn_valid(pfn))
2565 return -ENXIO;
2566
2567 p = pfn_to_page(pfn);
2568 folio = page_folio(p);
2569
2570 mutex_lock(&mf_mutex);
2571
2572 if (hw_memory_failure) {
2573 unpoison_pr_info("%#lx: disabled after HW memory failure\n",
2574 pfn, &unpoison_rs);
2575 ret = -EOPNOTSUPP;
2576 goto unlock_mutex;
2577 }
2578
2579 if (is_huge_zero_folio(folio)) {
2580 unpoison_pr_info("%#lx: huge zero page is not supported\n",
2581 pfn, &unpoison_rs);
2582 ret = -EOPNOTSUPP;
2583 goto unlock_mutex;
2584 }
2585
2586 if (!PageHWPoison(p)) {
2587 unpoison_pr_info("%#lx: page was already unpoisoned\n",
2588 pfn, &unpoison_rs);
2589 goto unlock_mutex;
2590 }
2591
2592 if (folio_ref_count(folio) > 1) {
2593 unpoison_pr_info("%#lx: someone grabs the hwpoison page\n",
2594 pfn, &unpoison_rs);
2595 goto unlock_mutex;
2596 }
2597
2598 if (folio_test_slab(folio) || folio_test_pgtable(folio) ||
2599 folio_test_reserved(folio) || folio_test_offline(folio))
2600 goto unlock_mutex;
2601
2602 if (folio_mapped(folio)) {
2603 unpoison_pr_info("%#lx: someone maps the hwpoison page\n",
2604 pfn, &unpoison_rs);
2605 goto unlock_mutex;
2606 }
2607
2608 if (folio_mapping(folio)) {
2609 unpoison_pr_info("%#lx: the hwpoison page has non-NULL mapping\n",
2610 pfn, &unpoison_rs);
2611 goto unlock_mutex;
2612 }
2613
2614 ghp = get_hwpoison_page(p, MF_UNPOISON);
2615 if (!ghp) {
2616 if (folio_test_hugetlb(folio)) {
2617 huge = true;
2618 count = folio_free_raw_hwp(folio, false);
2619 if (count == 0)
2620 goto unlock_mutex;
2621 }
2622 ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY;
2623 } else if (ghp < 0) {
2624 if (ghp == -EHWPOISON) {
2625 ret = put_page_back_buddy(p) ? 0 : -EBUSY;
2626 } else {
2627 ret = ghp;
2628 unpoison_pr_info("%#lx: failed to grab page\n",
2629 pfn, &unpoison_rs);
2630 }
2631 } else {
2632 if (folio_test_hugetlb(folio)) {
2633 huge = true;
2634 count = folio_free_raw_hwp(folio, false);
2635 if (count == 0) {
2636 folio_put(folio);
2637 goto unlock_mutex;
2638 }
2639 }
2640
2641 folio_put(folio);
2642 if (TestClearPageHWPoison(p)) {
2643 folio_put(folio);
2644 ret = 0;
2645 }
2646 }
2647
2648 unlock_mutex:
2649 mutex_unlock(&mf_mutex);
2650 if (!ret) {
2651 if (!huge)
2652 num_poisoned_pages_sub(pfn, 1);
2653 unpoison_pr_info("%#lx: software-unpoisoned page\n",
2654 page_to_pfn(p), &unpoison_rs);
2655 }
2656 return ret;
2657 }
2658 EXPORT_SYMBOL(unpoison_memory);
2659
2660 #undef pr_fmt
2661 #define pr_fmt(fmt) "Soft offline: " fmt
2662
2663 /*
2664 * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
2665 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2666 * If the page is mapped, it migrates the contents over.
2667 */
soft_offline_in_use_page(struct page * page)2668 static int soft_offline_in_use_page(struct page *page)
2669 {
2670 long ret = 0;
2671 unsigned long pfn = page_to_pfn(page);
2672 struct folio *folio = page_folio(page);
2673 char const *msg_page[] = {"page", "hugepage"};
2674 bool huge = folio_test_hugetlb(folio);
2675 bool isolated;
2676 LIST_HEAD(pagelist);
2677 struct migration_target_control mtc = {
2678 .nid = NUMA_NO_NODE,
2679 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2680 .reason = MR_MEMORY_FAILURE,
2681 };
2682
2683 if (!huge && folio_test_large(folio)) {
2684 if (try_to_split_thp_page(page, true)) {
2685 pr_info("%#lx: thp split failed\n", pfn);
2686 return -EBUSY;
2687 }
2688 folio = page_folio(page);
2689 }
2690
2691 folio_lock(folio);
2692 if (!huge)
2693 folio_wait_writeback(folio);
2694 if (PageHWPoison(page)) {
2695 folio_unlock(folio);
2696 folio_put(folio);
2697 pr_info("%#lx: page already poisoned\n", pfn);
2698 return 0;
2699 }
2700
2701 if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
2702 /*
2703 * Try to invalidate first. This should work for
2704 * non dirty unmapped page cache pages.
2705 */
2706 ret = mapping_evict_folio(folio_mapping(folio), folio);
2707 folio_unlock(folio);
2708
2709 if (ret) {
2710 pr_info("%#lx: invalidated\n", pfn);
2711 page_handle_poison(page, false, true);
2712 return 0;
2713 }
2714
2715 isolated = isolate_folio_to_list(folio, &pagelist);
2716
2717 /*
2718 * If we succeed to isolate the folio, we grabbed another refcount on
2719 * the folio, so we can safely drop the one we got from get_any_page().
2720 * If we failed to isolate the folio, it means that we cannot go further
2721 * and we will return an error, so drop the reference we got from
2722 * get_any_page() as well.
2723 */
2724 folio_put(folio);
2725
2726 if (isolated) {
2727 ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
2728 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
2729 if (!ret) {
2730 bool release = !huge;
2731
2732 if (!page_handle_poison(page, huge, release))
2733 ret = -EBUSY;
2734 } else {
2735 if (!list_empty(&pagelist))
2736 putback_movable_pages(&pagelist);
2737
2738 pr_info("%#lx: %s migration failed %ld, type %pGp\n",
2739 pfn, msg_page[huge], ret, &page->flags);
2740 if (ret > 0)
2741 ret = -EBUSY;
2742 }
2743 } else {
2744 pr_info("%#lx: %s isolation failed, page count %d, type %pGp\n",
2745 pfn, msg_page[huge], page_count(page), &page->flags);
2746 ret = -EBUSY;
2747 }
2748 return ret;
2749 }
2750
2751 /**
2752 * soft_offline_page - Soft offline a page.
2753 * @pfn: pfn to soft-offline
2754 * @flags: flags. Same as memory_failure().
2755 *
2756 * Returns 0 on success,
2757 * -EOPNOTSUPP for hwpoison_filter() filtered the error event, or
2758 * disabled by /proc/sys/vm/enable_soft_offline,
2759 * < 0 otherwise negated errno.
2760 *
2761 * Soft offline a page, by migration or invalidation,
2762 * without killing anything. This is for the case when
2763 * a page is not corrupted yet (so it's still valid to access),
2764 * but has had a number of corrected errors and is better taken
2765 * out.
2766 *
2767 * The actual policy on when to do that is maintained by
2768 * user space.
2769 *
2770 * This should never impact any application or cause data loss,
2771 * however it might take some time.
2772 *
2773 * This is not a 100% solution for all memory, but tries to be
2774 * ``good enough'' for the majority of memory.
2775 */
soft_offline_page(unsigned long pfn,int flags)2776 int soft_offline_page(unsigned long pfn, int flags)
2777 {
2778 int ret;
2779 bool try_again = true;
2780 struct page *page;
2781
2782 if (!pfn_valid(pfn)) {
2783 WARN_ON_ONCE(flags & MF_COUNT_INCREASED);
2784 return -ENXIO;
2785 }
2786
2787 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2788 page = pfn_to_online_page(pfn);
2789 if (!page) {
2790 put_ref_page(pfn, flags);
2791 return -EIO;
2792 }
2793
2794 if (!sysctl_enable_soft_offline) {
2795 pr_info_once("disabled by /proc/sys/vm/enable_soft_offline\n");
2796 put_ref_page(pfn, flags);
2797 return -EOPNOTSUPP;
2798 }
2799
2800 mutex_lock(&mf_mutex);
2801
2802 if (PageHWPoison(page)) {
2803 pr_info("%#lx: page already poisoned\n", pfn);
2804 put_ref_page(pfn, flags);
2805 mutex_unlock(&mf_mutex);
2806 return 0;
2807 }
2808
2809 retry:
2810 get_online_mems();
2811 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
2812 put_online_mems();
2813
2814 if (hwpoison_filter(page)) {
2815 if (ret > 0)
2816 put_page(page);
2817
2818 mutex_unlock(&mf_mutex);
2819 return -EOPNOTSUPP;
2820 }
2821
2822 if (ret > 0) {
2823 ret = soft_offline_in_use_page(page);
2824 } else if (ret == 0) {
2825 if (!page_handle_poison(page, true, false)) {
2826 if (try_again) {
2827 try_again = false;
2828 flags &= ~MF_COUNT_INCREASED;
2829 goto retry;
2830 }
2831 ret = -EBUSY;
2832 }
2833 }
2834
2835 mutex_unlock(&mf_mutex);
2836
2837 return ret;
2838 }
2839