1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/rmap.c - physical to virtual reverse mappings
4 *
5 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins 2003, 2004
18 */
19
20 /*
21 * Lock ordering in mm:
22 *
23 * inode->i_rwsem (while writing or truncating, not reading or faulting)
24 * mm->mmap_lock
25 * mapping->invalidate_lock (in filemap_fault)
26 * folio_lock
27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
28 * vma_start_write
29 * mapping->i_mmap_rwsem
30 * anon_vma->rwsem
31 * mm->page_table_lock or pte_lock
32 * swap_lock (in swap_duplicate, swap_info_get)
33 * mmlist_lock (in mmput, drain_mmlist and others)
34 * mapping->private_lock (in block_dirty_folio)
35 * i_pages lock (widely used)
36 * lruvec->lru_lock (in folio_lruvec_lock_irq)
37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
39 * sb_lock (within inode_lock in fs/fs-writeback.c)
40 * i_pages lock (widely used, in set_page_dirty,
41 * in arch-dependent flush_dcache_mmap_lock,
42 * within bdi.wb->list_lock in __sync_single_inode)
43 *
44 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
45 * ->tasklist_lock
46 * pte map lock
47 *
48 * hugetlbfs PageHuge() take locks in this order:
49 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
50 * vma_lock (hugetlb specific lock for pmd_sharing)
51 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
52 * folio_lock
53 */
54
55 #include <linux/mm.h>
56 #include <linux/sched/mm.h>
57 #include <linux/sched/task.h>
58 #include <linux/pagemap.h>
59 #include <linux/swap.h>
60 #include <linux/leafops.h>
61 #include <linux/slab.h>
62 #include <linux/init.h>
63 #include <linux/ksm.h>
64 #include <linux/rmap.h>
65 #include <linux/rcupdate.h>
66 #include <linux/export.h>
67 #include <linux/memcontrol.h>
68 #include <linux/mmu_notifier.h>
69 #include <linux/migrate.h>
70 #include <linux/hugetlb.h>
71 #include <linux/huge_mm.h>
72 #include <linux/backing-dev.h>
73 #include <linux/page_idle.h>
74 #include <linux/memremap.h>
75 #include <linux/userfaultfd_k.h>
76 #include <linux/mm_inline.h>
77 #include <linux/oom.h>
78
79 #include <asm/tlb.h>
80
81 #define CREATE_TRACE_POINTS
82 #include <trace/events/migrate.h>
83
84 #include "internal.h"
85 #include "swap.h"
86
87 static struct kmem_cache *anon_vma_cachep;
88 static struct kmem_cache *anon_vma_chain_cachep;
89
anon_vma_alloc(void)90 static inline struct anon_vma *anon_vma_alloc(void)
91 {
92 struct anon_vma *anon_vma;
93
94 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
95 if (anon_vma) {
96 atomic_set(&anon_vma->refcount, 1);
97 anon_vma->num_children = 0;
98 anon_vma->num_active_vmas = 0;
99 anon_vma->parent = anon_vma;
100 /*
101 * Initialise the anon_vma root to point to itself. If called
102 * from fork, the root will be reset to the parents anon_vma.
103 */
104 anon_vma->root = anon_vma;
105 }
106
107 return anon_vma;
108 }
109
anon_vma_free(struct anon_vma * anon_vma)110 static inline void anon_vma_free(struct anon_vma *anon_vma)
111 {
112 VM_BUG_ON(atomic_read(&anon_vma->refcount));
113
114 /*
115 * Synchronize against folio_lock_anon_vma_read() such that
116 * we can safely hold the lock without the anon_vma getting
117 * freed.
118 *
119 * Relies on the full mb implied by the atomic_dec_and_test() from
120 * put_anon_vma() against the acquire barrier implied by
121 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
122 *
123 * folio_lock_anon_vma_read() VS put_anon_vma()
124 * down_read_trylock() atomic_dec_and_test()
125 * LOCK MB
126 * atomic_read() rwsem_is_locked()
127 *
128 * LOCK should suffice since the actual taking of the lock must
129 * happen _before_ what follows.
130 */
131 might_sleep();
132 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
133 anon_vma_lock_write(anon_vma);
134 anon_vma_unlock_write(anon_vma);
135 }
136
137 kmem_cache_free(anon_vma_cachep, anon_vma);
138 }
139
anon_vma_chain_alloc(gfp_t gfp)140 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
141 {
142 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
143 }
144
anon_vma_chain_free(struct anon_vma_chain * anon_vma_chain)145 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
146 {
147 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
148 }
149
anon_vma_chain_assign(struct vm_area_struct * vma,struct anon_vma_chain * avc,struct anon_vma * anon_vma)150 static void anon_vma_chain_assign(struct vm_area_struct *vma,
151 struct anon_vma_chain *avc,
152 struct anon_vma *anon_vma)
153 {
154 avc->vma = vma;
155 avc->anon_vma = anon_vma;
156 list_add(&avc->same_vma, &vma->anon_vma_chain);
157 }
158
159 /**
160 * __anon_vma_prepare - attach an anon_vma to a memory region
161 * @vma: the memory region in question
162 *
163 * This makes sure the memory mapping described by 'vma' has
164 * an 'anon_vma' attached to it, so that we can associate the
165 * anonymous pages mapped into it with that anon_vma.
166 *
167 * The common case will be that we already have one, which
168 * is handled inline by anon_vma_prepare(). But if
169 * not we either need to find an adjacent mapping that we
170 * can re-use the anon_vma from (very common when the only
171 * reason for splitting a vma has been mprotect()), or we
172 * allocate a new one.
173 *
174 * Anon-vma allocations are very subtle, because we may have
175 * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
176 * and that may actually touch the rwsem even in the newly
177 * allocated vma (it depends on RCU to make sure that the
178 * anon_vma isn't actually destroyed).
179 *
180 * As a result, we need to do proper anon_vma locking even
181 * for the new allocation. At the same time, we do not want
182 * to do any locking for the common case of already having
183 * an anon_vma.
184 */
__anon_vma_prepare(struct vm_area_struct * vma)185 int __anon_vma_prepare(struct vm_area_struct *vma)
186 {
187 struct mm_struct *mm = vma->vm_mm;
188 struct anon_vma *anon_vma, *allocated;
189 struct anon_vma_chain *avc;
190
191 mmap_assert_locked(mm);
192 might_sleep();
193
194 avc = anon_vma_chain_alloc(GFP_KERNEL);
195 if (!avc)
196 goto out_enomem;
197
198 anon_vma = find_mergeable_anon_vma(vma);
199 allocated = NULL;
200 if (!anon_vma) {
201 anon_vma = anon_vma_alloc();
202 if (unlikely(!anon_vma))
203 goto out_enomem_free_avc;
204 anon_vma->num_children++; /* self-parent link for new root */
205 allocated = anon_vma;
206 }
207
208 anon_vma_lock_write(anon_vma);
209 /* page_table_lock to protect against threads */
210 spin_lock(&mm->page_table_lock);
211 if (likely(!vma->anon_vma)) {
212 vma->anon_vma = anon_vma;
213 anon_vma_chain_assign(vma, avc, anon_vma);
214 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
215 anon_vma->num_active_vmas++;
216 allocated = NULL;
217 avc = NULL;
218 }
219 spin_unlock(&mm->page_table_lock);
220 anon_vma_unlock_write(anon_vma);
221
222 if (unlikely(allocated))
223 put_anon_vma(allocated);
224 if (unlikely(avc))
225 anon_vma_chain_free(avc);
226
227 return 0;
228
229 out_enomem_free_avc:
230 anon_vma_chain_free(avc);
231 out_enomem:
232 return -ENOMEM;
233 }
234
check_anon_vma_clone(struct vm_area_struct * dst,struct vm_area_struct * src,enum vma_operation operation)235 static void check_anon_vma_clone(struct vm_area_struct *dst,
236 struct vm_area_struct *src,
237 enum vma_operation operation)
238 {
239 /* The write lock must be held. */
240 mmap_assert_write_locked(src->vm_mm);
241 /* If not a fork then must be on same mm. */
242 VM_WARN_ON_ONCE(operation != VMA_OP_FORK && dst->vm_mm != src->vm_mm);
243
244 /* If we have anything to do src->anon_vma must be provided. */
245 VM_WARN_ON_ONCE(!src->anon_vma && !list_empty(&src->anon_vma_chain));
246 VM_WARN_ON_ONCE(!src->anon_vma && dst->anon_vma);
247 /* We are establishing a new anon_vma_chain. */
248 VM_WARN_ON_ONCE(!list_empty(&dst->anon_vma_chain));
249 /*
250 * On fork, dst->anon_vma is set NULL (temporarily). Otherwise, anon_vma
251 * must be the same across dst and src.
252 */
253 VM_WARN_ON_ONCE(dst->anon_vma && dst->anon_vma != src->anon_vma);
254 /*
255 * Essentially equivalent to above - if not a no-op, we should expect
256 * dst->anon_vma to be set for everything except a fork.
257 */
258 VM_WARN_ON_ONCE(operation != VMA_OP_FORK && src->anon_vma &&
259 !dst->anon_vma);
260 /* For the anon_vma to be compatible, it can only be singular. */
261 VM_WARN_ON_ONCE(operation == VMA_OP_MERGE_UNFAULTED &&
262 !list_is_singular(&src->anon_vma_chain));
263 #ifdef CONFIG_PER_VMA_LOCK
264 /* Only merging an unfaulted VMA leaves the destination attached. */
265 VM_WARN_ON_ONCE(operation != VMA_OP_MERGE_UNFAULTED &&
266 vma_is_attached(dst));
267 #endif
268 }
269
maybe_reuse_anon_vma(struct vm_area_struct * dst,struct anon_vma * anon_vma)270 static void maybe_reuse_anon_vma(struct vm_area_struct *dst,
271 struct anon_vma *anon_vma)
272 {
273 /* If already populated, nothing to do.*/
274 if (dst->anon_vma)
275 return;
276
277 /*
278 * We reuse an anon_vma if any linking VMAs were unmapped and it has
279 * only a single child at most.
280 */
281 if (anon_vma->num_active_vmas > 0)
282 return;
283 if (anon_vma->num_children > 1)
284 return;
285
286 dst->anon_vma = anon_vma;
287 anon_vma->num_active_vmas++;
288 }
289
290 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma);
291
292 /**
293 * anon_vma_clone - Establishes new anon_vma_chain objects in @dst linking to
294 * all of the anon_vma objects contained within @src anon_vma_chain's.
295 * @dst: The destination VMA with an empty anon_vma_chain.
296 * @src: The source VMA we wish to duplicate.
297 * @operation: The type of operation which resulted in the clone.
298 *
299 * This is the heart of the VMA side of the anon_vma implementation - we invoke
300 * this function whenever we need to set up a new VMA's anon_vma state.
301 *
302 * This is invoked for:
303 *
304 * - VMA Merge, but only when @dst is unfaulted and @src is faulted - meaning we
305 * clone @src into @dst.
306 * - VMA split.
307 * - VMA (m)remap.
308 * - Fork of faulted VMA.
309 *
310 * In all cases other than fork this is simply a duplication. Fork additionally
311 * adds a new active anon_vma.
312 *
313 * ONLY in the case of fork do we try to 'reuse' existing anon_vma's in an
314 * anon_vma hierarchy, reusing anon_vma's which have no VMA associated with them
315 * but do have a single child. This is to avoid waste of memory when repeatedly
316 * forking.
317 *
318 * Returns: 0 on success, -ENOMEM on failure.
319 */
anon_vma_clone(struct vm_area_struct * dst,struct vm_area_struct * src,enum vma_operation operation)320 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src,
321 enum vma_operation operation)
322 {
323 struct anon_vma_chain *avc, *pavc;
324 struct anon_vma *active_anon_vma = src->anon_vma;
325
326 check_anon_vma_clone(dst, src, operation);
327
328 if (!active_anon_vma)
329 return 0;
330
331 /*
332 * Allocate AVCs. We don't need an anon_vma lock for this as we
333 * are not updating the anon_vma rbtree nor are we changing
334 * anon_vma statistics.
335 *
336 * Either src, dst have the same mm for which we hold an exclusive mmap
337 * write lock, or we are forking and we hold it on src->vm_mm and dst is
338 * not yet accessible to other threads so there's no possibliity of the
339 * unlinked AVC's being observed yet.
340 */
341 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) {
342 avc = anon_vma_chain_alloc(GFP_KERNEL);
343 if (!avc)
344 goto enomem_failure;
345
346 anon_vma_chain_assign(dst, avc, pavc->anon_vma);
347 }
348
349 /*
350 * Now link the anon_vma's back to the newly inserted AVCs.
351 * Note that all anon_vma's share the same root.
352 */
353 anon_vma_lock_write(src->anon_vma);
354 list_for_each_entry_reverse(avc, &dst->anon_vma_chain, same_vma) {
355 struct anon_vma *anon_vma = avc->anon_vma;
356
357 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
358 if (operation == VMA_OP_FORK)
359 maybe_reuse_anon_vma(dst, anon_vma);
360 }
361
362 if (operation != VMA_OP_FORK)
363 dst->anon_vma->num_active_vmas++;
364
365 anon_vma_unlock_write(active_anon_vma);
366 return 0;
367
368 enomem_failure:
369 cleanup_partial_anon_vmas(dst);
370 return -ENOMEM;
371 }
372
373 /*
374 * Attach vma to its own anon_vma, as well as to the anon_vmas that
375 * the corresponding VMA in the parent process is attached to.
376 * Returns 0 on success, non-zero on failure.
377 */
anon_vma_fork(struct vm_area_struct * vma,struct vm_area_struct * pvma)378 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
379 {
380 struct anon_vma_chain *avc;
381 struct anon_vma *anon_vma;
382 int rc;
383
384 /* Don't bother if the parent process has no anon_vma here. */
385 if (!pvma->anon_vma)
386 return 0;
387
388 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
389 vma->anon_vma = NULL;
390
391 anon_vma = anon_vma_alloc();
392 if (!anon_vma)
393 return -ENOMEM;
394 avc = anon_vma_chain_alloc(GFP_KERNEL);
395 if (!avc) {
396 put_anon_vma(anon_vma);
397 return -ENOMEM;
398 }
399
400 /*
401 * First, attach the new VMA to the parent VMA's anon_vmas,
402 * so rmap can find non-COWed pages in child processes.
403 */
404 rc = anon_vma_clone(vma, pvma, VMA_OP_FORK);
405 /* An error arose or an existing anon_vma was reused, all done then. */
406 if (rc || vma->anon_vma) {
407 put_anon_vma(anon_vma);
408 anon_vma_chain_free(avc);
409 return rc;
410 }
411
412 /*
413 * OK no reuse, so add our own anon_vma.
414 *
415 * Since it is not linked anywhere we can safely manipulate anon_vma
416 * fields without a lock.
417 */
418
419 anon_vma->num_active_vmas = 1;
420 /*
421 * The root anon_vma's rwsem is the lock actually used when we
422 * lock any of the anon_vmas in this anon_vma tree.
423 */
424 anon_vma->root = pvma->anon_vma->root;
425 anon_vma->parent = pvma->anon_vma;
426 /*
427 * With refcounts, an anon_vma can stay around longer than the
428 * process it belongs to. The root anon_vma needs to be pinned until
429 * this anon_vma is freed, because the lock lives in the root.
430 */
431 get_anon_vma(anon_vma->root);
432 /* Mark this anon_vma as the one where our new (COWed) pages go. */
433 vma->anon_vma = anon_vma;
434 anon_vma_chain_assign(vma, avc, anon_vma);
435 /* Now let rmap see it. */
436 anon_vma_lock_write(anon_vma);
437 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
438 anon_vma->parent->num_children++;
439 anon_vma_unlock_write(anon_vma);
440
441 return 0;
442 }
443
444 /*
445 * In the unfortunate case of anon_vma_clone() failing to allocate memory we
446 * have to clean things up.
447 *
448 * Since we allocate anon_vma_chain's before we insert them into the interval
449 * trees, we simply have to free up the AVC's and remove the entries from the
450 * VMA's anon_vma_chain.
451 */
cleanup_partial_anon_vmas(struct vm_area_struct * vma)452 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma)
453 {
454 struct anon_vma_chain *avc, *next;
455
456 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
457 list_del(&avc->same_vma);
458 anon_vma_chain_free(avc);
459 }
460
461 /*
462 * The anon_vma assigned to this VMA is no longer valid, as we were not
463 * able to correctly clone AVC state. Avoid inconsistent anon_vma tree
464 * state by resetting.
465 */
466 vma->anon_vma = NULL;
467 }
468
469 /**
470 * unlink_anon_vmas() - remove all links between a VMA and anon_vma's, freeing
471 * anon_vma_chain objects.
472 * @vma: The VMA whose links to anon_vma objects is to be severed.
473 *
474 * As part of the process anon_vma_chain's are freed,
475 * anon_vma->num_children,num_active_vmas is updated as required and, if the
476 * relevant anon_vma references no further VMAs, its reference count is
477 * decremented.
478 */
unlink_anon_vmas(struct vm_area_struct * vma)479 void unlink_anon_vmas(struct vm_area_struct *vma)
480 {
481 struct anon_vma_chain *avc, *next;
482 struct anon_vma *active_anon_vma = vma->anon_vma;
483
484 /* Always hold mmap lock, read-lock on unmap possibly. */
485 mmap_assert_locked(vma->vm_mm);
486
487 /* Unfaulted is a no-op. */
488 if (!active_anon_vma) {
489 VM_WARN_ON_ONCE(!list_empty(&vma->anon_vma_chain));
490 return;
491 }
492
493 anon_vma_lock_write(active_anon_vma);
494
495 /*
496 * Unlink each anon_vma chained to the VMA. This list is ordered
497 * from newest to oldest, ensuring the root anon_vma gets freed last.
498 */
499 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
500 struct anon_vma *anon_vma = avc->anon_vma;
501
502 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
503
504 /*
505 * Leave empty anon_vmas on the list - we'll need
506 * to free them outside the lock.
507 */
508 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
509 anon_vma->parent->num_children--;
510 continue;
511 }
512
513 list_del(&avc->same_vma);
514 anon_vma_chain_free(avc);
515 }
516
517 active_anon_vma->num_active_vmas--;
518 /*
519 * vma would still be needed after unlink, and anon_vma will be prepared
520 * when handle fault.
521 */
522 vma->anon_vma = NULL;
523 anon_vma_unlock_write(active_anon_vma);
524
525
526 /*
527 * Iterate the list once more, it now only contains empty and unlinked
528 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
529 * needing to write-acquire the anon_vma->root->rwsem.
530 */
531 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
532 struct anon_vma *anon_vma = avc->anon_vma;
533
534 VM_WARN_ON(anon_vma->num_children);
535 VM_WARN_ON(anon_vma->num_active_vmas);
536 put_anon_vma(anon_vma);
537
538 list_del(&avc->same_vma);
539 anon_vma_chain_free(avc);
540 }
541 }
542
anon_vma_ctor(void * data)543 static void anon_vma_ctor(void *data)
544 {
545 struct anon_vma *anon_vma = data;
546
547 init_rwsem(&anon_vma->rwsem);
548 atomic_set(&anon_vma->refcount, 0);
549 anon_vma->rb_root = RB_ROOT_CACHED;
550 }
551
anon_vma_init(void)552 void __init anon_vma_init(void)
553 {
554 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
555 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
556 anon_vma_ctor);
557 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
558 SLAB_PANIC|SLAB_ACCOUNT);
559 }
560
561 /*
562 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
563 *
564 * Since there is no serialization what so ever against folio_remove_rmap_*()
565 * the best this function can do is return a refcount increased anon_vma
566 * that might have been relevant to this page.
567 *
568 * The page might have been remapped to a different anon_vma or the anon_vma
569 * returned may already be freed (and even reused).
570 *
571 * In case it was remapped to a different anon_vma, the new anon_vma will be a
572 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
573 * ensure that any anon_vma obtained from the page will still be valid for as
574 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
575 *
576 * All users of this function must be very careful when walking the anon_vma
577 * chain and verify that the page in question is indeed mapped in it
578 * [ something equivalent to page_mapped_in_vma() ].
579 *
580 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
581 * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid
582 * if there is a mapcount, we can dereference the anon_vma after observing
583 * those.
584 *
585 * NOTE: the caller should hold folio lock when calling this.
586 */
folio_get_anon_vma(const struct folio * folio)587 struct anon_vma *folio_get_anon_vma(const struct folio *folio)
588 {
589 struct anon_vma *anon_vma = NULL;
590 unsigned long anon_mapping;
591
592 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
593
594 rcu_read_lock();
595 anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
596 if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
597 goto out;
598 if (!folio_mapped(folio))
599 goto out;
600
601 anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
602 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
603 anon_vma = NULL;
604 goto out;
605 }
606
607 /*
608 * If this folio is still mapped, then its anon_vma cannot have been
609 * freed. But if it has been unmapped, we have no security against the
610 * anon_vma structure being freed and reused (for another anon_vma:
611 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
612 * above cannot corrupt).
613 */
614 if (!folio_mapped(folio)) {
615 rcu_read_unlock();
616 put_anon_vma(anon_vma);
617 return NULL;
618 }
619 out:
620 rcu_read_unlock();
621
622 return anon_vma;
623 }
624
625 /*
626 * Similar to folio_get_anon_vma() except it locks the anon_vma.
627 *
628 * Its a little more complex as it tries to keep the fast path to a single
629 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
630 * reference like with folio_get_anon_vma() and then block on the mutex
631 * on !rwc->try_lock case.
632 */
folio_lock_anon_vma_read(const struct folio * folio,struct rmap_walk_control * rwc)633 struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
634 struct rmap_walk_control *rwc)
635 {
636 struct anon_vma *anon_vma = NULL;
637 struct anon_vma *root_anon_vma;
638 unsigned long anon_mapping;
639
640 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
641
642 rcu_read_lock();
643 anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
644 if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
645 goto out;
646 if (!folio_mapped(folio))
647 goto out;
648
649 anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
650 root_anon_vma = READ_ONCE(anon_vma->root);
651 if (down_read_trylock(&root_anon_vma->rwsem)) {
652 /*
653 * If the folio is still mapped, then this anon_vma is still
654 * its anon_vma, and holding the mutex ensures that it will
655 * not go away, see anon_vma_free().
656 */
657 if (!folio_mapped(folio)) {
658 up_read(&root_anon_vma->rwsem);
659 anon_vma = NULL;
660 }
661 goto out;
662 }
663
664 if (rwc && rwc->try_lock) {
665 anon_vma = NULL;
666 rwc->contended = true;
667 goto out;
668 }
669
670 /* trylock failed, we got to sleep */
671 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
672 anon_vma = NULL;
673 goto out;
674 }
675
676 if (!folio_mapped(folio)) {
677 rcu_read_unlock();
678 put_anon_vma(anon_vma);
679 return NULL;
680 }
681
682 /* we pinned the anon_vma, its safe to sleep */
683 rcu_read_unlock();
684 anon_vma_lock_read(anon_vma);
685
686 if (atomic_dec_and_test(&anon_vma->refcount)) {
687 /*
688 * Oops, we held the last refcount, release the lock
689 * and bail -- can't simply use put_anon_vma() because
690 * we'll deadlock on the anon_vma_lock_write() recursion.
691 */
692 anon_vma_unlock_read(anon_vma);
693 __put_anon_vma(anon_vma);
694 anon_vma = NULL;
695 }
696
697 return anon_vma;
698
699 out:
700 rcu_read_unlock();
701 return anon_vma;
702 }
703
704 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
705 /*
706 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
707 * important if a PTE was dirty when it was unmapped that it's flushed
708 * before any IO is initiated on the page to prevent lost writes. Similarly,
709 * it must be flushed before freeing to prevent data leakage.
710 */
try_to_unmap_flush(void)711 void try_to_unmap_flush(void)
712 {
713 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
714
715 if (!tlb_ubc->flush_required)
716 return;
717
718 arch_tlbbatch_flush(&tlb_ubc->arch);
719 tlb_ubc->flush_required = false;
720 tlb_ubc->writable = false;
721 }
722
723 /* Flush iff there are potentially writable TLB entries that can race with IO */
try_to_unmap_flush_dirty(void)724 void try_to_unmap_flush_dirty(void)
725 {
726 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
727
728 if (tlb_ubc->writable)
729 try_to_unmap_flush();
730 }
731
732 /*
733 * Bits 0-14 of mm->tlb_flush_batched record pending generations.
734 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
735 */
736 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16
737 #define TLB_FLUSH_BATCH_PENDING_MASK \
738 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
739 #define TLB_FLUSH_BATCH_PENDING_LARGE \
740 (TLB_FLUSH_BATCH_PENDING_MASK / 2)
741
set_tlb_ubc_flush_pending(struct mm_struct * mm,pte_t pteval,unsigned long start,unsigned long end)742 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
743 unsigned long start, unsigned long end)
744 {
745 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
746 int batch;
747 bool writable = pte_dirty(pteval);
748
749 if (!pte_accessible(mm, pteval))
750 return;
751
752 arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end);
753 tlb_ubc->flush_required = true;
754
755 /*
756 * Ensure compiler does not re-order the setting of tlb_flush_batched
757 * before the PTE is cleared.
758 */
759 barrier();
760 batch = atomic_read(&mm->tlb_flush_batched);
761 retry:
762 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
763 /*
764 * Prevent `pending' from catching up with `flushed' because of
765 * overflow. Reset `pending' and `flushed' to be 1 and 0 if
766 * `pending' becomes large.
767 */
768 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1))
769 goto retry;
770 } else {
771 atomic_inc(&mm->tlb_flush_batched);
772 }
773
774 /*
775 * If the PTE was dirty then it's best to assume it's writable. The
776 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
777 * before the page is queued for IO.
778 */
779 if (writable)
780 tlb_ubc->writable = true;
781 }
782
783 /*
784 * Returns true if the TLB flush should be deferred to the end of a batch of
785 * unmap operations to reduce IPIs.
786 */
should_defer_flush(struct mm_struct * mm,enum ttu_flags flags)787 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
788 {
789 if (!(flags & TTU_BATCH_FLUSH))
790 return false;
791
792 return arch_tlbbatch_should_defer(mm);
793 }
794
795 /*
796 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
797 * releasing the PTL if TLB flushes are batched. It's possible for a parallel
798 * operation such as mprotect or munmap to race between reclaim unmapping
799 * the page and flushing the page. If this race occurs, it potentially allows
800 * access to data via a stale TLB entry. Tracking all mm's that have TLB
801 * batching in flight would be expensive during reclaim so instead track
802 * whether TLB batching occurred in the past and if so then do a flush here
803 * if required. This will cost one additional flush per reclaim cycle paid
804 * by the first operation at risk such as mprotect and mumap.
805 *
806 * This must be called under the PTL so that an access to tlb_flush_batched
807 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
808 * via the PTL.
809 */
flush_tlb_batched_pending(struct mm_struct * mm)810 void flush_tlb_batched_pending(struct mm_struct *mm)
811 {
812 int batch = atomic_read(&mm->tlb_flush_batched);
813 int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
814 int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
815
816 if (pending != flushed) {
817 flush_tlb_mm(mm);
818 /*
819 * If the new TLB flushing is pending during flushing, leave
820 * mm->tlb_flush_batched as is, to avoid losing flushing.
821 */
822 atomic_cmpxchg(&mm->tlb_flush_batched, batch,
823 pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
824 }
825 }
826 #else
set_tlb_ubc_flush_pending(struct mm_struct * mm,pte_t pteval,unsigned long start,unsigned long end)827 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
828 unsigned long start, unsigned long end)
829 {
830 }
831
should_defer_flush(struct mm_struct * mm,enum ttu_flags flags)832 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
833 {
834 return false;
835 }
836 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
837
838 /**
839 * page_address_in_vma - The virtual address of a page in this VMA.
840 * @folio: The folio containing the page.
841 * @page: The page within the folio.
842 * @vma: The VMA we need to know the address in.
843 *
844 * Calculates the user virtual address of this page in the specified VMA.
845 * It is the caller's responsibility to check the page is actually
846 * within the VMA. There may not currently be a PTE pointing at this
847 * page, but if a page fault occurs at this address, this is the page
848 * which will be accessed.
849 *
850 * Context: Caller should hold a reference to the folio. Caller should
851 * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the
852 * VMA from being altered.
853 *
854 * Return: The virtual address corresponding to this page in the VMA.
855 */
page_address_in_vma(const struct folio * folio,const struct page * page,const struct vm_area_struct * vma)856 unsigned long page_address_in_vma(const struct folio *folio,
857 const struct page *page, const struct vm_area_struct *vma)
858 {
859 if (folio_test_anon(folio)) {
860 struct anon_vma *anon_vma = folio_anon_vma(folio);
861 /*
862 * Note: swapoff's unuse_vma() is more efficient with this
863 * check, and needs it to match anon_vma when KSM is active.
864 */
865 if (!vma->anon_vma || !anon_vma ||
866 vma->anon_vma->root != anon_vma->root)
867 return -EFAULT;
868 } else if (!vma->vm_file) {
869 return -EFAULT;
870 } else if (vma->vm_file->f_mapping != folio->mapping) {
871 return -EFAULT;
872 }
873
874 /* KSM folios don't reach here because of the !anon_vma check */
875 return vma_address(vma, page_pgoff(folio, page), 1);
876 }
877
878 /*
879 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
880 * NULL if it doesn't exist. No guarantees / checks on what the pmd_t*
881 * represents.
882 */
mm_find_pmd(struct mm_struct * mm,unsigned long address)883 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
884 {
885 pgd_t *pgd;
886 p4d_t *p4d;
887 pud_t *pud;
888 pmd_t *pmd = NULL;
889
890 pgd = pgd_offset(mm, address);
891 if (!pgd_present(*pgd))
892 goto out;
893
894 p4d = p4d_offset(pgd, address);
895 if (!p4d_present(*p4d))
896 goto out;
897
898 pud = pud_offset(p4d, address);
899 if (!pud_present(*pud))
900 goto out;
901
902 pmd = pmd_offset(pud, address);
903 out:
904 return pmd;
905 }
906
907 struct folio_referenced_arg {
908 int mapcount;
909 int referenced;
910 vm_flags_t vm_flags;
911 struct mem_cgroup *memcg;
912 };
913
914 /*
915 * arg: folio_referenced_arg will be passed
916 */
folio_referenced_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)917 static bool folio_referenced_one(struct folio *folio,
918 struct vm_area_struct *vma, unsigned long address, void *arg)
919 {
920 struct folio_referenced_arg *pra = arg;
921 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
922 int ptes = 0, referenced = 0;
923 unsigned int nr;
924
925 while (page_vma_mapped_walk(&pvmw)) {
926 address = pvmw.address;
927 nr = 1;
928
929 if (vma->vm_flags & VM_LOCKED) {
930 ptes++;
931 pra->mapcount--;
932
933 /* Only mlock fully mapped pages */
934 if (pvmw.pte && ptes != pvmw.nr_pages)
935 continue;
936
937 /*
938 * All PTEs must be protected by page table lock in
939 * order to mlock the page.
940 *
941 * If page table boundary has been cross, current ptl
942 * only protect part of ptes.
943 */
944 if (pvmw.flags & PVMW_PGTABLE_CROSSED)
945 continue;
946
947 /* Restore the mlock which got missed */
948 mlock_vma_folio(folio, vma);
949 page_vma_mapped_walk_done(&pvmw);
950 pra->vm_flags |= VM_LOCKED;
951 return false; /* To break the loop */
952 }
953
954 /*
955 * Skip the non-shared swapbacked folio mapped solely by
956 * the exiting or OOM-reaped process. This avoids redundant
957 * swap-out followed by an immediate unmap.
958 */
959 if ((!atomic_read(&vma->vm_mm->mm_users) ||
960 check_stable_address_space(vma->vm_mm)) &&
961 folio_test_anon(folio) && folio_test_swapbacked(folio) &&
962 !folio_maybe_mapped_shared(folio)) {
963 pra->referenced = -1;
964 page_vma_mapped_walk_done(&pvmw);
965 return false;
966 }
967
968 if (pvmw.pte && folio_test_large(folio)) {
969 const unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
970 const unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
971 pte_t pteval = ptep_get(pvmw.pte);
972
973 nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr);
974 }
975
976 /*
977 * When LRU is switching, we don’t know where the surrounding folios
978 * are. —they could be on active/inactive lists or on MGLRU. So the
979 * simplest approach is to disable this look-around optimization.
980 */
981 if (lru_gen_enabled() && !lru_gen_switching() && pvmw.pte) {
982 if (lru_gen_look_around(&pvmw, nr))
983 referenced++;
984 } else if (pvmw.pte) {
985 if (clear_flush_young_ptes_notify(vma, address, pvmw.pte, nr))
986 referenced++;
987 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
988 if (pmdp_clear_flush_young_notify(vma, address,
989 pvmw.pmd))
990 referenced++;
991 } else {
992 /* unexpected pmd-mapped folio? */
993 WARN_ON_ONCE(1);
994 }
995
996 ptes += nr;
997 pra->mapcount -= nr;
998 /*
999 * If we are sure that we batched the entire folio,
1000 * we can just optimize and stop right here.
1001 */
1002 if (ptes == pvmw.nr_pages) {
1003 page_vma_mapped_walk_done(&pvmw);
1004 break;
1005 }
1006
1007 /* Skip the batched PTEs */
1008 pvmw.pte += nr - 1;
1009 pvmw.address += (nr - 1) * PAGE_SIZE;
1010 }
1011
1012 if (referenced)
1013 folio_clear_idle(folio);
1014 if (folio_test_clear_young(folio))
1015 referenced++;
1016
1017 if (referenced) {
1018 pra->referenced++;
1019 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
1020 }
1021
1022 if (!pra->mapcount)
1023 return false; /* To break the loop */
1024
1025 return true;
1026 }
1027
invalid_folio_referenced_vma(struct vm_area_struct * vma,void * arg)1028 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
1029 {
1030 struct folio_referenced_arg *pra = arg;
1031 struct mem_cgroup *memcg = pra->memcg;
1032
1033 /*
1034 * Ignore references from this mapping if it has no recency. If the
1035 * folio has been used in another mapping, we will catch it; if this
1036 * other mapping is already gone, the unmap path will have set the
1037 * referenced flag or activated the folio in zap_pte_range().
1038 */
1039 if (!vma_has_recency(vma))
1040 return true;
1041
1042 /*
1043 * If we are reclaiming on behalf of a cgroup, skip counting on behalf
1044 * of references from different cgroups.
1045 */
1046 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
1047 return true;
1048
1049 return false;
1050 }
1051
1052 /**
1053 * folio_referenced() - Test if the folio was referenced.
1054 * @folio: The folio to test.
1055 * @is_locked: Caller holds lock on the folio.
1056 * @memcg: target memory cgroup
1057 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
1058 *
1059 * Quick test_and_clear_referenced for all mappings of a folio,
1060 *
1061 * Return: The number of mappings which referenced the folio. Return -1 if
1062 * the function bailed out due to rmap lock contention.
1063 */
folio_referenced(struct folio * folio,int is_locked,struct mem_cgroup * memcg,vm_flags_t * vm_flags)1064 int folio_referenced(struct folio *folio, int is_locked,
1065 struct mem_cgroup *memcg, vm_flags_t *vm_flags)
1066 {
1067 bool we_locked = false;
1068 struct folio_referenced_arg pra = {
1069 .mapcount = folio_mapcount(folio),
1070 .memcg = memcg,
1071 };
1072 struct rmap_walk_control rwc = {
1073 .rmap_one = folio_referenced_one,
1074 .arg = (void *)&pra,
1075 .anon_lock = folio_lock_anon_vma_read,
1076 .try_lock = true,
1077 .invalid_vma = invalid_folio_referenced_vma,
1078 };
1079
1080 VM_WARN_ON_ONCE_FOLIO(folio_is_zone_device(folio), folio);
1081 *vm_flags = 0;
1082 if (!pra.mapcount)
1083 return 0;
1084
1085 if (!folio_raw_mapping(folio))
1086 return 0;
1087
1088 if (!is_locked) {
1089 we_locked = folio_trylock(folio);
1090 if (!we_locked)
1091 return 1;
1092 }
1093
1094 rmap_walk(folio, &rwc);
1095 *vm_flags = pra.vm_flags;
1096
1097 if (we_locked)
1098 folio_unlock(folio);
1099
1100 return rwc.contended ? -1 : pra.referenced;
1101 }
1102
page_vma_mkclean_one(struct page_vma_mapped_walk * pvmw)1103 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
1104 {
1105 int cleaned = 0;
1106 struct vm_area_struct *vma = pvmw->vma;
1107 struct mmu_notifier_range range;
1108 unsigned long address = pvmw->address;
1109
1110 /*
1111 * We have to assume the worse case ie pmd for invalidation. Note that
1112 * the folio can not be freed from this function.
1113 */
1114 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0,
1115 vma->vm_mm, address, vma_address_end(pvmw));
1116 mmu_notifier_invalidate_range_start(&range);
1117
1118 while (page_vma_mapped_walk(pvmw)) {
1119 int ret = 0;
1120
1121 address = pvmw->address;
1122 if (pvmw->pte) {
1123 pte_t *pte = pvmw->pte;
1124 pte_t entry = ptep_get(pte);
1125
1126 /*
1127 * PFN swap PTEs, such as device-exclusive ones, that
1128 * actually map pages are clean and not writable from a
1129 * CPU perspective. The MMU notifier takes care of any
1130 * device aspects.
1131 */
1132 if (!pte_present(entry))
1133 continue;
1134 if (!pte_dirty(entry) && !pte_write(entry))
1135 continue;
1136
1137 flush_cache_page(vma, address, pte_pfn(entry));
1138 entry = ptep_clear_flush(vma, address, pte);
1139 entry = pte_wrprotect(entry);
1140 entry = pte_mkclean(entry);
1141 set_pte_at(vma->vm_mm, address, pte, entry);
1142 ret = 1;
1143 } else {
1144 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1145 pmd_t *pmd = pvmw->pmd;
1146 pmd_t entry = pmdp_get(pmd);
1147
1148 /*
1149 * Please see the comment above (!pte_present).
1150 * A non present PMD is not writable from a CPU
1151 * perspective.
1152 */
1153 if (!pmd_present(entry))
1154 continue;
1155 if (!pmd_dirty(entry) && !pmd_write(entry))
1156 continue;
1157
1158 flush_cache_range(vma, address,
1159 address + HPAGE_PMD_SIZE);
1160 entry = pmdp_invalidate(vma, address, pmd);
1161 entry = pmd_wrprotect(entry);
1162 entry = pmd_mkclean(entry);
1163 set_pmd_at(vma->vm_mm, address, pmd, entry);
1164 ret = 1;
1165 #else
1166 /* unexpected pmd-mapped folio? */
1167 WARN_ON_ONCE(1);
1168 #endif
1169 }
1170
1171 if (ret)
1172 cleaned++;
1173 }
1174
1175 mmu_notifier_invalidate_range_end(&range);
1176
1177 return cleaned;
1178 }
1179
page_mkclean_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)1180 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1181 unsigned long address, void *arg)
1182 {
1183 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1184 int *cleaned = arg;
1185
1186 *cleaned += page_vma_mkclean_one(&pvmw);
1187
1188 return true;
1189 }
1190
invalid_mkclean_vma(struct vm_area_struct * vma,void * arg)1191 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1192 {
1193 if (vma->vm_flags & VM_SHARED)
1194 return false;
1195
1196 return true;
1197 }
1198
folio_mkclean(struct folio * folio)1199 int folio_mkclean(struct folio *folio)
1200 {
1201 int cleaned = 0;
1202 struct address_space *mapping;
1203 struct rmap_walk_control rwc = {
1204 .arg = (void *)&cleaned,
1205 .rmap_one = page_mkclean_one,
1206 .invalid_vma = invalid_mkclean_vma,
1207 };
1208
1209 BUG_ON(!folio_test_locked(folio));
1210
1211 if (!folio_mapped(folio))
1212 return 0;
1213
1214 mapping = folio_mapping(folio);
1215 if (!mapping)
1216 return 0;
1217
1218 rmap_walk(folio, &rwc);
1219
1220 return cleaned;
1221 }
1222 EXPORT_SYMBOL_GPL(folio_mkclean);
1223
1224 struct wrprotect_file_state {
1225 int cleaned;
1226 pgoff_t pgoff;
1227 unsigned long pfn;
1228 unsigned long nr_pages;
1229 };
1230
mapping_wrprotect_range_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)1231 static bool mapping_wrprotect_range_one(struct folio *folio,
1232 struct vm_area_struct *vma, unsigned long address, void *arg)
1233 {
1234 struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg;
1235 struct page_vma_mapped_walk pvmw = {
1236 .pfn = state->pfn,
1237 .nr_pages = state->nr_pages,
1238 .pgoff = state->pgoff,
1239 .vma = vma,
1240 .address = address,
1241 .flags = PVMW_SYNC,
1242 };
1243
1244 state->cleaned += page_vma_mkclean_one(&pvmw);
1245
1246 return true;
1247 }
1248
1249 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
1250 pgoff_t pgoff_start, unsigned long nr_pages,
1251 struct rmap_walk_control *rwc, bool locked);
1252
1253 /**
1254 * mapping_wrprotect_range() - Write-protect all mappings in a specified range.
1255 *
1256 * @mapping: The mapping whose reverse mapping should be traversed.
1257 * @pgoff: The page offset at which @pfn is mapped within @mapping.
1258 * @pfn: The PFN of the page mapped in @mapping at @pgoff.
1259 * @nr_pages: The number of physically contiguous base pages spanned.
1260 *
1261 * Traverses the reverse mapping, finding all VMAs which contain a shared
1262 * mapping of the pages in the specified range in @mapping, and write-protects
1263 * them (that is, updates the page tables to mark the mappings read-only such
1264 * that a write protection fault arises when the mappings are written to).
1265 *
1266 * The @pfn value need not refer to a folio, but rather can reference a kernel
1267 * allocation which is mapped into userland. We therefore do not require that
1268 * the page maps to a folio with a valid mapping or index field, rather the
1269 * caller specifies these in @mapping and @pgoff.
1270 *
1271 * Return: the number of write-protected PTEs, or an error.
1272 */
mapping_wrprotect_range(struct address_space * mapping,pgoff_t pgoff,unsigned long pfn,unsigned long nr_pages)1273 int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
1274 unsigned long pfn, unsigned long nr_pages)
1275 {
1276 struct wrprotect_file_state state = {
1277 .cleaned = 0,
1278 .pgoff = pgoff,
1279 .pfn = pfn,
1280 .nr_pages = nr_pages,
1281 };
1282 struct rmap_walk_control rwc = {
1283 .arg = (void *)&state,
1284 .rmap_one = mapping_wrprotect_range_one,
1285 .invalid_vma = invalid_mkclean_vma,
1286 };
1287
1288 if (!mapping)
1289 return 0;
1290
1291 __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc,
1292 /* locked = */false);
1293
1294 return state.cleaned;
1295 }
1296 EXPORT_SYMBOL_GPL(mapping_wrprotect_range);
1297
1298 /**
1299 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1300 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1301 * within the @vma of shared mappings. And since clean PTEs
1302 * should also be readonly, write protects them too.
1303 * @pfn: start pfn.
1304 * @nr_pages: number of physically contiguous pages srarting with @pfn.
1305 * @pgoff: page offset that the @pfn mapped with.
1306 * @vma: vma that @pfn mapped within.
1307 *
1308 * Returns the number of cleaned PTEs (including PMDs).
1309 */
pfn_mkclean_range(unsigned long pfn,unsigned long nr_pages,pgoff_t pgoff,struct vm_area_struct * vma)1310 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
1311 struct vm_area_struct *vma)
1312 {
1313 struct page_vma_mapped_walk pvmw = {
1314 .pfn = pfn,
1315 .nr_pages = nr_pages,
1316 .pgoff = pgoff,
1317 .vma = vma,
1318 .flags = PVMW_SYNC,
1319 };
1320
1321 if (invalid_mkclean_vma(vma, NULL))
1322 return 0;
1323
1324 pvmw.address = vma_address(vma, pgoff, nr_pages);
1325 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
1326
1327 return page_vma_mkclean_one(&pvmw);
1328 }
1329
__folio_mod_stat(struct folio * folio,int nr,int nr_pmdmapped)1330 static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
1331 {
1332 int idx;
1333
1334 if (nr) {
1335 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
1336 lruvec_stat_mod_folio(folio, idx, nr);
1337 }
1338 if (nr_pmdmapped) {
1339 if (folio_test_anon(folio)) {
1340 idx = NR_ANON_THPS;
1341 lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
1342 } else {
1343 /* NR_*_PMDMAPPED are not maintained per-memcg */
1344 idx = folio_test_swapbacked(folio) ?
1345 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED;
1346 __mod_node_page_state(folio_pgdat(folio), idx,
1347 nr_pmdmapped);
1348 }
1349 }
1350 }
1351
__folio_add_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,enum pgtable_level level)1352 static __always_inline void __folio_add_rmap(struct folio *folio,
1353 struct page *page, int nr_pages, struct vm_area_struct *vma,
1354 enum pgtable_level level)
1355 {
1356 atomic_t *mapped = &folio->_nr_pages_mapped;
1357 const int orig_nr_pages = nr_pages;
1358 int first = 0, nr = 0, nr_pmdmapped = 0;
1359
1360 __folio_rmap_sanity_checks(folio, page, nr_pages, level);
1361
1362 switch (level) {
1363 case PGTABLE_LEVEL_PTE:
1364 if (!folio_test_large(folio)) {
1365 nr = atomic_inc_and_test(&folio->_mapcount);
1366 break;
1367 }
1368
1369 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1370 nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma);
1371 if (nr == orig_nr_pages)
1372 /* Was completely unmapped. */
1373 nr = folio_large_nr_pages(folio);
1374 else
1375 nr = 0;
1376 break;
1377 }
1378
1379 do {
1380 first += atomic_inc_and_test(&page->_mapcount);
1381 } while (page++, --nr_pages > 0);
1382
1383 if (first &&
1384 atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED)
1385 nr = first;
1386
1387 folio_add_large_mapcount(folio, orig_nr_pages, vma);
1388 break;
1389 case PGTABLE_LEVEL_PMD:
1390 case PGTABLE_LEVEL_PUD:
1391 first = atomic_inc_and_test(&folio->_entire_mapcount);
1392 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1393 if (level == PGTABLE_LEVEL_PMD && first)
1394 nr_pmdmapped = folio_large_nr_pages(folio);
1395 nr = folio_inc_return_large_mapcount(folio, vma);
1396 if (nr == 1)
1397 /* Was completely unmapped. */
1398 nr = folio_large_nr_pages(folio);
1399 else
1400 nr = 0;
1401 break;
1402 }
1403
1404 if (first) {
1405 nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped);
1406 if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) {
1407 nr_pages = folio_large_nr_pages(folio);
1408 /*
1409 * We only track PMD mappings of PMD-sized
1410 * folios separately.
1411 */
1412 if (level == PGTABLE_LEVEL_PMD)
1413 nr_pmdmapped = nr_pages;
1414 nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
1415 /* Raced ahead of a remove and another add? */
1416 if (unlikely(nr < 0))
1417 nr = 0;
1418 } else {
1419 /* Raced ahead of a remove of ENTIRELY_MAPPED */
1420 nr = 0;
1421 }
1422 }
1423 folio_inc_large_mapcount(folio, vma);
1424 break;
1425 default:
1426 BUILD_BUG();
1427 }
1428 __folio_mod_stat(folio, nr, nr_pmdmapped);
1429 }
1430
1431 /**
1432 * folio_move_anon_rmap - move a folio to our anon_vma
1433 * @folio: The folio to move to our anon_vma
1434 * @vma: The vma the folio belongs to
1435 *
1436 * When a folio belongs exclusively to one process after a COW event,
1437 * that folio can be moved into the anon_vma that belongs to just that
1438 * process, so the rmap code will not search the parent or sibling processes.
1439 */
folio_move_anon_rmap(struct folio * folio,struct vm_area_struct * vma)1440 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma)
1441 {
1442 void *anon_vma = vma->anon_vma;
1443
1444 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1445 VM_BUG_ON_VMA(!anon_vma, vma);
1446
1447 anon_vma += FOLIO_MAPPING_ANON;
1448 /*
1449 * Ensure that anon_vma and the FOLIO_MAPPING_ANON bit are written
1450 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1451 * folio_test_anon()) will not see one without the other.
1452 */
1453 WRITE_ONCE(folio->mapping, anon_vma);
1454 }
1455
1456 /**
1457 * __folio_set_anon - set up a new anonymous rmap for a folio
1458 * @folio: The folio to set up the new anonymous rmap for.
1459 * @vma: VM area to add the folio to.
1460 * @address: User virtual address of the mapping
1461 * @exclusive: Whether the folio is exclusive to the process.
1462 */
__folio_set_anon(struct folio * folio,struct vm_area_struct * vma,unsigned long address,bool exclusive)1463 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
1464 unsigned long address, bool exclusive)
1465 {
1466 struct anon_vma *anon_vma = vma->anon_vma;
1467
1468 BUG_ON(!anon_vma);
1469
1470 /*
1471 * If the folio isn't exclusive to this vma, we must use the _oldest_
1472 * possible anon_vma for the folio mapping!
1473 */
1474 if (!exclusive)
1475 anon_vma = anon_vma->root;
1476
1477 /*
1478 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
1479 * Make sure the compiler doesn't split the stores of anon_vma and
1480 * the FOLIO_MAPPING_ANON type identifier, otherwise the rmap code
1481 * could mistake the mapping for a struct address_space and crash.
1482 */
1483 anon_vma = (void *) anon_vma + FOLIO_MAPPING_ANON;
1484 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
1485 folio->index = linear_page_index(vma, address);
1486 }
1487
1488 /**
1489 * __page_check_anon_rmap - sanity check anonymous rmap addition
1490 * @folio: The folio containing @page.
1491 * @page: the page to check the mapping of
1492 * @vma: the vm area in which the mapping is added
1493 * @address: the user virtual address mapped
1494 */
__page_check_anon_rmap(const struct folio * folio,const struct page * page,struct vm_area_struct * vma,unsigned long address)1495 static void __page_check_anon_rmap(const struct folio *folio,
1496 const struct page *page, struct vm_area_struct *vma,
1497 unsigned long address)
1498 {
1499 /*
1500 * The page's anon-rmap details (mapping and index) are guaranteed to
1501 * be set up correctly at this point.
1502 *
1503 * We have exclusion against folio_add_anon_rmap_*() because the caller
1504 * always holds the page locked.
1505 *
1506 * We have exclusion against folio_add_new_anon_rmap because those pages
1507 * are initially only visible via the pagetables, and the pte is locked
1508 * over the call to folio_add_new_anon_rmap.
1509 */
1510 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1511 folio);
1512 VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address),
1513 page);
1514 }
1515
__folio_add_anon_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,unsigned long address,rmap_t flags,enum pgtable_level level)1516 static __always_inline void __folio_add_anon_rmap(struct folio *folio,
1517 struct page *page, int nr_pages, struct vm_area_struct *vma,
1518 unsigned long address, rmap_t flags, enum pgtable_level level)
1519 {
1520 int i;
1521
1522 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
1523
1524 __folio_add_rmap(folio, page, nr_pages, vma, level);
1525
1526 if (likely(!folio_test_ksm(folio)))
1527 __page_check_anon_rmap(folio, page, vma, address);
1528
1529 if (flags & RMAP_EXCLUSIVE) {
1530 switch (level) {
1531 case PGTABLE_LEVEL_PTE:
1532 for (i = 0; i < nr_pages; i++)
1533 SetPageAnonExclusive(page + i);
1534 break;
1535 case PGTABLE_LEVEL_PMD:
1536 SetPageAnonExclusive(page);
1537 break;
1538 case PGTABLE_LEVEL_PUD:
1539 /*
1540 * Keep the compiler happy, we don't support anonymous
1541 * PUD mappings.
1542 */
1543 WARN_ON_ONCE(1);
1544 break;
1545 default:
1546 BUILD_BUG();
1547 }
1548 }
1549
1550 VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) &&
1551 atomic_read(&folio->_mapcount) > 0, folio);
1552 for (i = 0; i < nr_pages; i++) {
1553 struct page *cur_page = page + i;
1554
1555 VM_WARN_ON_FOLIO(folio_test_large(folio) &&
1556 folio_entire_mapcount(folio) > 1 &&
1557 PageAnonExclusive(cur_page), folio);
1558 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
1559 continue;
1560
1561 /*
1562 * While PTE-mapping a THP we have a PMD and a PTE
1563 * mapping.
1564 */
1565 VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 &&
1566 PageAnonExclusive(cur_page), folio);
1567 }
1568
1569 /*
1570 * Only mlock it if the folio is fully mapped to the VMA.
1571 *
1572 * Partially mapped folios can be split on reclaim and part outside
1573 * of mlocked VMA can be evicted or freed.
1574 */
1575 if (folio_nr_pages(folio) == nr_pages)
1576 mlock_vma_folio(folio, vma);
1577 }
1578
1579 /**
1580 * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
1581 * @folio: The folio to add the mappings to
1582 * @page: The first page to add
1583 * @nr_pages: The number of pages which will be mapped
1584 * @vma: The vm area in which the mappings are added
1585 * @address: The user virtual address of the first page to map
1586 * @flags: The rmap flags
1587 *
1588 * The page range of folio is defined by [first_page, first_page + nr_pages)
1589 *
1590 * The caller needs to hold the page table lock, and the page must be locked in
1591 * the anon_vma case: to serialize mapping,index checking after setting,
1592 * and to ensure that an anon folio is not being upgraded racily to a KSM folio
1593 * (but KSM folios are never downgraded).
1594 */
folio_add_anon_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,unsigned long address,rmap_t flags)1595 void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
1596 int nr_pages, struct vm_area_struct *vma, unsigned long address,
1597 rmap_t flags)
1598 {
1599 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
1600 PGTABLE_LEVEL_PTE);
1601 }
1602
1603 /**
1604 * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
1605 * @folio: The folio to add the mapping to
1606 * @page: The first page to add
1607 * @vma: The vm area in which the mapping is added
1608 * @address: The user virtual address of the first page to map
1609 * @flags: The rmap flags
1610 *
1611 * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
1612 *
1613 * The caller needs to hold the page table lock, and the page must be locked in
1614 * the anon_vma case: to serialize mapping,index checking after setting.
1615 */
folio_add_anon_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * vma,unsigned long address,rmap_t flags)1616 void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
1617 struct vm_area_struct *vma, unsigned long address, rmap_t flags)
1618 {
1619 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1620 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
1621 PGTABLE_LEVEL_PMD);
1622 #else
1623 WARN_ON_ONCE(true);
1624 #endif
1625 }
1626
1627 /**
1628 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1629 * @folio: The folio to add the mapping to.
1630 * @vma: the vm area in which the mapping is added
1631 * @address: the user virtual address mapped
1632 * @flags: The rmap flags
1633 *
1634 * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
1635 * This means the inc-and-test can be bypassed.
1636 * The folio doesn't necessarily need to be locked while it's exclusive
1637 * unless two threads map it concurrently. However, the folio must be
1638 * locked if it's shared.
1639 *
1640 * If the folio is pmd-mappable, it is accounted as a THP.
1641 */
folio_add_new_anon_rmap(struct folio * folio,struct vm_area_struct * vma,unsigned long address,rmap_t flags)1642 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
1643 unsigned long address, rmap_t flags)
1644 {
1645 const bool exclusive = flags & RMAP_EXCLUSIVE;
1646 int nr = 1, nr_pmdmapped = 0;
1647
1648 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
1649 VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
1650
1651 /*
1652 * VM_DROPPABLE mappings don't swap; instead they're just dropped when
1653 * under memory pressure.
1654 */
1655 if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE))
1656 __folio_set_swapbacked(folio);
1657 __folio_set_anon(folio, vma, address, exclusive);
1658
1659 if (likely(!folio_test_large(folio))) {
1660 /* increment count (starts at -1) */
1661 atomic_set(&folio->_mapcount, 0);
1662 if (exclusive)
1663 SetPageAnonExclusive(&folio->page);
1664 } else if (!folio_test_pmd_mappable(folio)) {
1665 int i;
1666
1667 nr = folio_large_nr_pages(folio);
1668 for (i = 0; i < nr; i++) {
1669 struct page *page = folio_page(folio, i);
1670
1671 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1672 /* increment count (starts at -1) */
1673 atomic_set(&page->_mapcount, 0);
1674 if (exclusive)
1675 SetPageAnonExclusive(page);
1676 }
1677
1678 folio_set_large_mapcount(folio, nr, vma);
1679 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1680 atomic_set(&folio->_nr_pages_mapped, nr);
1681 } else {
1682 nr = folio_large_nr_pages(folio);
1683 /* increment count (starts at -1) */
1684 atomic_set(&folio->_entire_mapcount, 0);
1685 folio_set_large_mapcount(folio, 1, vma);
1686 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1687 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
1688 if (exclusive)
1689 SetPageAnonExclusive(&folio->page);
1690 nr_pmdmapped = nr;
1691 }
1692
1693 VM_WARN_ON_ONCE(address < vma->vm_start ||
1694 address + (nr << PAGE_SHIFT) > vma->vm_end);
1695
1696 __folio_mod_stat(folio, nr, nr_pmdmapped);
1697 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
1698 }
1699
__folio_add_file_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,enum pgtable_level level)1700 static __always_inline void __folio_add_file_rmap(struct folio *folio,
1701 struct page *page, int nr_pages, struct vm_area_struct *vma,
1702 enum pgtable_level level)
1703 {
1704 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
1705
1706 __folio_add_rmap(folio, page, nr_pages, vma, level);
1707
1708 /*
1709 * Only mlock it if the folio is fully mapped to the VMA.
1710 *
1711 * Partially mapped folios can be split on reclaim and part outside
1712 * of mlocked VMA can be evicted or freed.
1713 */
1714 if (folio_nr_pages(folio) == nr_pages)
1715 mlock_vma_folio(folio, vma);
1716 }
1717
1718 /**
1719 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio
1720 * @folio: The folio to add the mappings to
1721 * @page: The first page to add
1722 * @nr_pages: The number of pages that will be mapped using PTEs
1723 * @vma: The vm area in which the mappings are added
1724 *
1725 * The page range of the folio is defined by [page, page + nr_pages)
1726 *
1727 * The caller needs to hold the page table lock.
1728 */
folio_add_file_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma)1729 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
1730 int nr_pages, struct vm_area_struct *vma)
1731 {
1732 __folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
1733 }
1734
1735 /**
1736 * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
1737 * @folio: The folio to add the mapping to
1738 * @page: The first page to add
1739 * @vma: The vm area in which the mapping is added
1740 *
1741 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1742 *
1743 * The caller needs to hold the page table lock.
1744 */
folio_add_file_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * vma)1745 void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
1746 struct vm_area_struct *vma)
1747 {
1748 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1749 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
1750 #else
1751 WARN_ON_ONCE(true);
1752 #endif
1753 }
1754
1755 /**
1756 * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio
1757 * @folio: The folio to add the mapping to
1758 * @page: The first page to add
1759 * @vma: The vm area in which the mapping is added
1760 *
1761 * The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
1762 *
1763 * The caller needs to hold the page table lock.
1764 */
folio_add_file_rmap_pud(struct folio * folio,struct page * page,struct vm_area_struct * vma)1765 void folio_add_file_rmap_pud(struct folio *folio, struct page *page,
1766 struct vm_area_struct *vma)
1767 {
1768 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1769 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1770 __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
1771 #else
1772 WARN_ON_ONCE(true);
1773 #endif
1774 }
1775
__folio_remove_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,enum pgtable_level level)1776 static __always_inline void __folio_remove_rmap(struct folio *folio,
1777 struct page *page, int nr_pages, struct vm_area_struct *vma,
1778 enum pgtable_level level)
1779 {
1780 atomic_t *mapped = &folio->_nr_pages_mapped;
1781 int last = 0, nr = 0, nr_pmdmapped = 0;
1782 bool partially_mapped = false;
1783
1784 __folio_rmap_sanity_checks(folio, page, nr_pages, level);
1785
1786 switch (level) {
1787 case PGTABLE_LEVEL_PTE:
1788 if (!folio_test_large(folio)) {
1789 nr = atomic_add_negative(-1, &folio->_mapcount);
1790 break;
1791 }
1792
1793 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1794 nr = folio_sub_return_large_mapcount(folio, nr_pages, vma);
1795 if (!nr) {
1796 /* Now completely unmapped. */
1797 nr = folio_large_nr_pages(folio);
1798 } else {
1799 partially_mapped = nr < folio_large_nr_pages(folio) &&
1800 !folio_entire_mapcount(folio);
1801 nr = 0;
1802 }
1803 break;
1804 }
1805
1806 folio_sub_large_mapcount(folio, nr_pages, vma);
1807 do {
1808 last += atomic_add_negative(-1, &page->_mapcount);
1809 } while (page++, --nr_pages > 0);
1810
1811 if (last &&
1812 atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED)
1813 nr = last;
1814
1815 partially_mapped = nr && atomic_read(mapped);
1816 break;
1817 case PGTABLE_LEVEL_PMD:
1818 case PGTABLE_LEVEL_PUD:
1819 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1820 last = atomic_add_negative(-1, &folio->_entire_mapcount);
1821 if (level == PGTABLE_LEVEL_PMD && last)
1822 nr_pmdmapped = folio_large_nr_pages(folio);
1823 nr = folio_dec_return_large_mapcount(folio, vma);
1824 if (!nr) {
1825 /* Now completely unmapped. */
1826 nr = folio_large_nr_pages(folio);
1827 } else {
1828 partially_mapped = last &&
1829 nr < folio_large_nr_pages(folio);
1830 nr = 0;
1831 }
1832 break;
1833 }
1834
1835 folio_dec_large_mapcount(folio, vma);
1836 last = atomic_add_negative(-1, &folio->_entire_mapcount);
1837 if (last) {
1838 nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
1839 if (likely(nr < ENTIRELY_MAPPED)) {
1840 nr_pages = folio_large_nr_pages(folio);
1841 if (level == PGTABLE_LEVEL_PMD)
1842 nr_pmdmapped = nr_pages;
1843 nr = nr_pages - nr;
1844 /* Raced ahead of another remove and an add? */
1845 if (unlikely(nr < 0))
1846 nr = 0;
1847 } else {
1848 /* An add of ENTIRELY_MAPPED raced ahead */
1849 nr = 0;
1850 }
1851 }
1852
1853 partially_mapped = nr && nr < nr_pmdmapped;
1854 break;
1855 default:
1856 BUILD_BUG();
1857 }
1858
1859 /*
1860 * Queue anon large folio for deferred split if at least one page of
1861 * the folio is unmapped and at least one page is still mapped.
1862 *
1863 * Check partially_mapped first to ensure it is a large folio.
1864 *
1865 * Device private folios do not support deferred splitting and
1866 * shrinker based scanning of the folios to free.
1867 */
1868 if (partially_mapped && folio_test_anon(folio) &&
1869 !folio_test_partially_mapped(folio) &&
1870 !folio_is_device_private(folio))
1871 deferred_split_folio(folio, true);
1872
1873 __folio_mod_stat(folio, -nr, -nr_pmdmapped);
1874
1875 /*
1876 * It would be tidy to reset folio_test_anon mapping when fully
1877 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
1878 * which increments mapcount after us but sets mapping before us:
1879 * so leave the reset to free_pages_prepare, and remember that
1880 * it's only reliable while mapped.
1881 */
1882
1883 munlock_vma_folio(folio, vma);
1884 }
1885
1886 /**
1887 * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio
1888 * @folio: The folio to remove the mappings from
1889 * @page: The first page to remove
1890 * @nr_pages: The number of pages that will be removed from the mapping
1891 * @vma: The vm area from which the mappings are removed
1892 *
1893 * The page range of the folio is defined by [page, page + nr_pages)
1894 *
1895 * The caller needs to hold the page table lock.
1896 */
folio_remove_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma)1897 void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
1898 int nr_pages, struct vm_area_struct *vma)
1899 {
1900 __folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
1901 }
1902
1903 /**
1904 * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
1905 * @folio: The folio to remove the mapping from
1906 * @page: The first page to remove
1907 * @vma: The vm area from which the mapping is removed
1908 *
1909 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1910 *
1911 * The caller needs to hold the page table lock.
1912 */
folio_remove_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * vma)1913 void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
1914 struct vm_area_struct *vma)
1915 {
1916 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1917 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
1918 #else
1919 WARN_ON_ONCE(true);
1920 #endif
1921 }
1922
1923 /**
1924 * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio
1925 * @folio: The folio to remove the mapping from
1926 * @page: The first page to remove
1927 * @vma: The vm area from which the mapping is removed
1928 *
1929 * The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
1930 *
1931 * The caller needs to hold the page table lock.
1932 */
folio_remove_rmap_pud(struct folio * folio,struct page * page,struct vm_area_struct * vma)1933 void folio_remove_rmap_pud(struct folio *folio, struct page *page,
1934 struct vm_area_struct *vma)
1935 {
1936 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1937 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1938 __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
1939 #else
1940 WARN_ON_ONCE(true);
1941 #endif
1942 }
1943
folio_unmap_pte_batch(struct folio * folio,struct page_vma_mapped_walk * pvmw,enum ttu_flags flags,pte_t pte)1944 static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
1945 struct page_vma_mapped_walk *pvmw,
1946 enum ttu_flags flags, pte_t pte)
1947 {
1948 unsigned long end_addr, addr = pvmw->address;
1949 struct vm_area_struct *vma = pvmw->vma;
1950 unsigned int max_nr;
1951
1952 if (flags & TTU_HWPOISON)
1953 return 1;
1954 if (!folio_test_large(folio))
1955 return 1;
1956
1957 /* We may only batch within a single VMA and a single page table. */
1958 end_addr = pmd_addr_end(addr, vma->vm_end);
1959 max_nr = (end_addr - addr) >> PAGE_SHIFT;
1960
1961 /* We only support lazyfree or file folios batching for now ... */
1962 if (folio_test_anon(folio) && folio_test_swapbacked(folio))
1963 return 1;
1964
1965 if (pte_unused(pte))
1966 return 1;
1967
1968 if (userfaultfd_wp(vma))
1969 return 1;
1970
1971 /*
1972 * If unmap fails, we need to restore the ptes. To avoid accidentally
1973 * upgrading write permissions for ptes that were not originally
1974 * writable, and to avoid losing the soft-dirty bit, use the
1975 * appropriate FPB flags.
1976 */
1977 return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr,
1978 FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY);
1979 }
1980
1981 /*
1982 * @arg: enum ttu_flags will be passed to this argument
1983 */
try_to_unmap_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)1984 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1985 unsigned long address, void *arg)
1986 {
1987 struct mm_struct *mm = vma->vm_mm;
1988 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1989 bool anon_exclusive, ret = true;
1990 pte_t pteval;
1991 struct page *subpage;
1992 struct mmu_notifier_range range;
1993 enum ttu_flags flags = (enum ttu_flags)(long)arg;
1994 unsigned long nr_pages = 1, end_addr;
1995 unsigned long pfn;
1996 unsigned long hsz = 0;
1997 int ptes = 0;
1998
1999 /*
2000 * When racing against e.g. zap_pte_range() on another cpu,
2001 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
2002 * try_to_unmap() may return before page_mapped() has become false,
2003 * if page table locking is skipped: use TTU_SYNC to wait for that.
2004 */
2005 if (flags & TTU_SYNC)
2006 pvmw.flags = PVMW_SYNC;
2007
2008 /*
2009 * For THP, we have to assume the worse case ie pmd for invalidation.
2010 * For hugetlb, it could be much worse if we need to do pud
2011 * invalidation in the case of pmd sharing.
2012 *
2013 * Note that the folio can not be freed in this function as call of
2014 * try_to_unmap() must hold a reference on the folio.
2015 */
2016 range.end = vma_address_end(&pvmw);
2017 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2018 address, range.end);
2019 if (folio_test_hugetlb(folio)) {
2020 /*
2021 * If sharing is possible, start and end will be adjusted
2022 * accordingly.
2023 */
2024 adjust_range_if_pmd_sharing_possible(vma, &range.start,
2025 &range.end);
2026
2027 /* We need the huge page size for set_huge_pte_at() */
2028 hsz = huge_page_size(hstate_vma(vma));
2029 }
2030 mmu_notifier_invalidate_range_start(&range);
2031
2032 while (page_vma_mapped_walk(&pvmw)) {
2033 /*
2034 * If the folio is in an mlock()d vma, we must not swap it out.
2035 */
2036 if (!(flags & TTU_IGNORE_MLOCK) &&
2037 (vma->vm_flags & VM_LOCKED)) {
2038 ptes++;
2039
2040 /*
2041 * Set 'ret' to indicate the page cannot be unmapped.
2042 *
2043 * Do not jump to walk_abort immediately as additional
2044 * iteration might be required to detect fully mapped
2045 * folio an mlock it.
2046 */
2047 ret = false;
2048
2049 /* Only mlock fully mapped pages */
2050 if (pvmw.pte && ptes != pvmw.nr_pages)
2051 continue;
2052
2053 /*
2054 * All PTEs must be protected by page table lock in
2055 * order to mlock the page.
2056 *
2057 * If page table boundary has been cross, current ptl
2058 * only protect part of ptes.
2059 */
2060 if (pvmw.flags & PVMW_PGTABLE_CROSSED)
2061 goto walk_done;
2062
2063 /* Restore the mlock which got missed */
2064 mlock_vma_folio(folio, vma);
2065 goto walk_done;
2066 }
2067
2068 if (!pvmw.pte) {
2069 if (folio_test_lazyfree(folio)) {
2070 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
2071 goto walk_done;
2072 /*
2073 * unmap_huge_pmd_locked has either already marked
2074 * the folio as swap-backed or decided to retain it
2075 * due to GUP or speculative references.
2076 */
2077 goto walk_abort;
2078 }
2079
2080 if (flags & TTU_SPLIT_HUGE_PMD) {
2081 /*
2082 * We temporarily have to drop the PTL and
2083 * restart so we can process the PTE-mapped THP.
2084 */
2085 split_huge_pmd_locked(vma, pvmw.address,
2086 pvmw.pmd, false);
2087 flags &= ~TTU_SPLIT_HUGE_PMD;
2088 page_vma_mapped_walk_restart(&pvmw);
2089 continue;
2090 }
2091 }
2092
2093 /* Unexpected PMD-mapped THP? */
2094 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2095
2096 /*
2097 * Handle PFN swap PTEs, such as device-exclusive ones, that
2098 * actually map pages.
2099 */
2100 pteval = ptep_get(pvmw.pte);
2101 if (likely(pte_present(pteval))) {
2102 pfn = pte_pfn(pteval);
2103 } else {
2104 const softleaf_t entry = softleaf_from_pte(pteval);
2105
2106 pfn = softleaf_to_pfn(entry);
2107 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
2108 }
2109
2110 subpage = folio_page(folio, pfn - folio_pfn(folio));
2111 address = pvmw.address;
2112 anon_exclusive = folio_test_anon(folio) &&
2113 PageAnonExclusive(subpage);
2114
2115 if (folio_test_hugetlb(folio)) {
2116 bool anon = folio_test_anon(folio);
2117
2118 /*
2119 * The try_to_unmap() is only passed a hugetlb page
2120 * in the case where the hugetlb page is poisoned.
2121 */
2122 VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
2123 /*
2124 * huge_pmd_unshare may unmap an entire PMD page.
2125 * There is no way of knowing exactly which PMDs may
2126 * be cached for this mm, so we must flush them all.
2127 * start/end were already adjusted above to cover this
2128 * range.
2129 */
2130 flush_cache_range(vma, range.start, range.end);
2131
2132 /*
2133 * To call huge_pmd_unshare, i_mmap_rwsem must be
2134 * held in write mode. Caller needs to explicitly
2135 * do this outside rmap routines.
2136 *
2137 * We also must hold hugetlb vma_lock in write mode.
2138 * Lock order dictates acquiring vma_lock BEFORE
2139 * i_mmap_rwsem. We can only try lock here and fail
2140 * if unsuccessful.
2141 */
2142 if (!anon) {
2143 struct mmu_gather tlb;
2144
2145 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
2146 if (!hugetlb_vma_trylock_write(vma))
2147 goto walk_abort;
2148
2149 tlb_gather_mmu_vma(&tlb, vma);
2150 if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) {
2151 hugetlb_vma_unlock_write(vma);
2152 huge_pmd_unshare_flush(&tlb, vma);
2153 tlb_finish_mmu(&tlb);
2154 /*
2155 * The PMD table was unmapped,
2156 * consequently unmapping the folio.
2157 */
2158 goto walk_done;
2159 }
2160 hugetlb_vma_unlock_write(vma);
2161 tlb_finish_mmu(&tlb);
2162 }
2163 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
2164 if (pte_dirty(pteval))
2165 folio_mark_dirty(folio);
2166 } else if (likely(pte_present(pteval))) {
2167 nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval);
2168 end_addr = address + nr_pages * PAGE_SIZE;
2169 flush_cache_range(vma, address, end_addr);
2170
2171 /* Nuke the page table entry. */
2172 pteval = get_and_clear_ptes(mm, address, pvmw.pte, nr_pages);
2173 /*
2174 * We clear the PTE but do not flush so potentially
2175 * a remote CPU could still be writing to the folio.
2176 * If the entry was previously clean then the
2177 * architecture must guarantee that a clear->dirty
2178 * transition on a cached TLB entry is written through
2179 * and traps if the PTE is unmapped.
2180 */
2181 if (should_defer_flush(mm, flags))
2182 set_tlb_ubc_flush_pending(mm, pteval, address, end_addr);
2183 else
2184 flush_tlb_range(vma, address, end_addr);
2185 if (pte_dirty(pteval))
2186 folio_mark_dirty(folio);
2187 } else {
2188 pte_clear(mm, address, pvmw.pte);
2189 }
2190
2191 /*
2192 * Now the pte is cleared. If this pte was uffd-wp armed,
2193 * we may want to replace a none pte with a marker pte if
2194 * it's file-backed, so we don't lose the tracking info.
2195 */
2196 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
2197
2198 /* Update high watermark before we lower rss */
2199 update_hiwater_rss(mm);
2200
2201 if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
2202 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2203 if (folio_test_hugetlb(folio)) {
2204 hugetlb_count_sub(folio_nr_pages(folio), mm);
2205 set_huge_pte_at(mm, address, pvmw.pte, pteval,
2206 hsz);
2207 } else {
2208 dec_mm_counter(mm, mm_counter(folio));
2209 set_pte_at(mm, address, pvmw.pte, pteval);
2210 }
2211 } else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
2212 !userfaultfd_armed(vma)) {
2213 /*
2214 * The guest indicated that the page content is of no
2215 * interest anymore. Simply discard the pte, vmscan
2216 * will take care of the rest.
2217 * A future reference will then fault in a new zero
2218 * page. When userfaultfd is active, we must not drop
2219 * this page though, as its main user (postcopy
2220 * migration) will not expect userfaults on already
2221 * copied pages.
2222 */
2223 dec_mm_counter(mm, mm_counter(folio));
2224 } else if (folio_test_anon(folio)) {
2225 swp_entry_t entry = page_swap_entry(subpage);
2226 pte_t swp_pte;
2227 /*
2228 * Store the swap location in the pte.
2229 * See handle_pte_fault() ...
2230 */
2231 if (unlikely(folio_test_swapbacked(folio) !=
2232 folio_test_swapcache(folio))) {
2233 WARN_ON_ONCE(1);
2234 goto walk_abort;
2235 }
2236
2237 /* MADV_FREE page check */
2238 if (!folio_test_swapbacked(folio)) {
2239 int ref_count, map_count;
2240
2241 /*
2242 * Synchronize with gup_pte_range():
2243 * - clear PTE; barrier; read refcount
2244 * - inc refcount; barrier; read PTE
2245 */
2246 smp_mb();
2247
2248 ref_count = folio_ref_count(folio);
2249 map_count = folio_mapcount(folio);
2250
2251 /*
2252 * Order reads for page refcount and dirty flag
2253 * (see comments in __remove_mapping()).
2254 */
2255 smp_rmb();
2256
2257 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
2258 /*
2259 * redirtied either using the page table or a previously
2260 * obtained GUP reference.
2261 */
2262 set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
2263 folio_set_swapbacked(folio);
2264 goto walk_abort;
2265 } else if (ref_count != 1 + map_count) {
2266 /*
2267 * Additional reference. Could be a GUP reference or any
2268 * speculative reference. GUP users must mark the folio
2269 * dirty if there was a modification. This folio cannot be
2270 * reclaimed right now either way, so act just like nothing
2271 * happened.
2272 * We'll come back here later and detect if the folio was
2273 * dirtied when the additional reference is gone.
2274 */
2275 set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
2276 goto walk_abort;
2277 }
2278 add_mm_counter(mm, MM_ANONPAGES, -nr_pages);
2279 goto discard;
2280 }
2281
2282 if (folio_dup_swap(folio, subpage) < 0) {
2283 set_pte_at(mm, address, pvmw.pte, pteval);
2284 goto walk_abort;
2285 }
2286
2287 /*
2288 * arch_unmap_one() is expected to be a NOP on
2289 * architectures where we could have PFN swap PTEs,
2290 * so we'll not check/care.
2291 */
2292 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2293 folio_put_swap(folio, subpage);
2294 set_pte_at(mm, address, pvmw.pte, pteval);
2295 goto walk_abort;
2296 }
2297
2298 /* See folio_try_share_anon_rmap(): clear PTE first. */
2299 if (anon_exclusive &&
2300 folio_try_share_anon_rmap_pte(folio, subpage)) {
2301 folio_put_swap(folio, subpage);
2302 set_pte_at(mm, address, pvmw.pte, pteval);
2303 goto walk_abort;
2304 }
2305 if (list_empty(&mm->mmlist)) {
2306 spin_lock(&mmlist_lock);
2307 if (list_empty(&mm->mmlist))
2308 list_add(&mm->mmlist, &init_mm.mmlist);
2309 spin_unlock(&mmlist_lock);
2310 }
2311 dec_mm_counter(mm, MM_ANONPAGES);
2312 inc_mm_counter(mm, MM_SWAPENTS);
2313 swp_pte = swp_entry_to_pte(entry);
2314 if (anon_exclusive)
2315 swp_pte = pte_swp_mkexclusive(swp_pte);
2316 if (likely(pte_present(pteval))) {
2317 if (pte_soft_dirty(pteval))
2318 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2319 if (pte_uffd_wp(pteval))
2320 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2321 } else {
2322 if (pte_swp_soft_dirty(pteval))
2323 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2324 if (pte_swp_uffd_wp(pteval))
2325 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2326 }
2327 set_pte_at(mm, address, pvmw.pte, swp_pte);
2328 } else {
2329 /*
2330 * This is a locked file-backed folio,
2331 * so it cannot be removed from the page
2332 * cache and replaced by a new folio before
2333 * mmu_notifier_invalidate_range_end, so no
2334 * concurrent thread might update its page table
2335 * to point at a new folio while a device is
2336 * still using this folio.
2337 *
2338 * See Documentation/mm/mmu_notifier.rst
2339 */
2340 add_mm_counter(mm, mm_counter_file(folio), -nr_pages);
2341 }
2342 discard:
2343 if (unlikely(folio_test_hugetlb(folio))) {
2344 hugetlb_remove_rmap(folio);
2345 } else {
2346 folio_remove_rmap_ptes(folio, subpage, nr_pages, vma);
2347 }
2348 if (vma->vm_flags & VM_LOCKED)
2349 mlock_drain_local();
2350 folio_put_refs(folio, nr_pages);
2351
2352 /*
2353 * If we are sure that we batched the entire folio and cleared
2354 * all PTEs, we can just optimize and stop right here.
2355 */
2356 if (nr_pages == folio_nr_pages(folio))
2357 goto walk_done;
2358 continue;
2359 walk_abort:
2360 ret = false;
2361 walk_done:
2362 page_vma_mapped_walk_done(&pvmw);
2363 break;
2364 }
2365
2366 mmu_notifier_invalidate_range_end(&range);
2367
2368 return ret;
2369 }
2370
invalid_migration_vma(struct vm_area_struct * vma,void * arg)2371 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
2372 {
2373 return vma_is_temporary_stack(vma);
2374 }
2375
folio_not_mapped(struct folio * folio)2376 static int folio_not_mapped(struct folio *folio)
2377 {
2378 return !folio_mapped(folio);
2379 }
2380
2381 /**
2382 * try_to_unmap - Try to remove all page table mappings to a folio.
2383 * @folio: The folio to unmap.
2384 * @flags: action and flags
2385 *
2386 * Tries to remove all the page table entries which are mapping this
2387 * folio. It is the caller's responsibility to check if the folio is
2388 * still mapped if needed (use TTU_SYNC to prevent accounting races).
2389 *
2390 * Context: Caller must hold the folio lock.
2391 */
try_to_unmap(struct folio * folio,enum ttu_flags flags)2392 void try_to_unmap(struct folio *folio, enum ttu_flags flags)
2393 {
2394 struct rmap_walk_control rwc = {
2395 .rmap_one = try_to_unmap_one,
2396 .arg = (void *)flags,
2397 .done = folio_not_mapped,
2398 .anon_lock = folio_lock_anon_vma_read,
2399 };
2400
2401 if (flags & TTU_RMAP_LOCKED)
2402 rmap_walk_locked(folio, &rwc);
2403 else
2404 rmap_walk(folio, &rwc);
2405 }
2406
2407 /*
2408 * @arg: enum ttu_flags will be passed to this argument.
2409 *
2410 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
2411 * containing migration entries.
2412 */
try_to_migrate_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)2413 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
2414 unsigned long address, void *arg)
2415 {
2416 struct mm_struct *mm = vma->vm_mm;
2417 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2418 bool anon_exclusive, writable, ret = true;
2419 pte_t pteval;
2420 struct page *subpage;
2421 struct mmu_notifier_range range;
2422 enum ttu_flags flags = (enum ttu_flags)(long)arg;
2423 unsigned long pfn;
2424 unsigned long hsz = 0;
2425
2426 /*
2427 * When racing against e.g. zap_pte_range() on another cpu,
2428 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
2429 * try_to_migrate() may return before page_mapped() has become false,
2430 * if page table locking is skipped: use TTU_SYNC to wait for that.
2431 */
2432 if (flags & TTU_SYNC)
2433 pvmw.flags = PVMW_SYNC;
2434
2435 /*
2436 * For THP, we have to assume the worse case ie pmd for invalidation.
2437 * For hugetlb, it could be much worse if we need to do pud
2438 * invalidation in the case of pmd sharing.
2439 *
2440 * Note that the page can not be free in this function as call of
2441 * try_to_unmap() must hold a reference on the page.
2442 */
2443 range.end = vma_address_end(&pvmw);
2444 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2445 address, range.end);
2446 if (folio_test_hugetlb(folio)) {
2447 /*
2448 * If sharing is possible, start and end will be adjusted
2449 * accordingly.
2450 */
2451 adjust_range_if_pmd_sharing_possible(vma, &range.start,
2452 &range.end);
2453
2454 /* We need the huge page size for set_huge_pte_at() */
2455 hsz = huge_page_size(hstate_vma(vma));
2456 }
2457 mmu_notifier_invalidate_range_start(&range);
2458
2459 while (page_vma_mapped_walk(&pvmw)) {
2460 /* PMD-mapped THP migration entry */
2461 if (!pvmw.pte) {
2462 __maybe_unused unsigned long pfn;
2463 __maybe_unused pmd_t pmdval;
2464
2465 if (flags & TTU_SPLIT_HUGE_PMD) {
2466 /*
2467 * split_huge_pmd_locked() might leave the
2468 * folio mapped through PTEs. Retry the walk
2469 * so we can detect this scenario and properly
2470 * abort the walk.
2471 */
2472 split_huge_pmd_locked(vma, pvmw.address,
2473 pvmw.pmd, true);
2474 flags &= ~TTU_SPLIT_HUGE_PMD;
2475 page_vma_mapped_walk_restart(&pvmw);
2476 continue;
2477 }
2478 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2479 pmdval = pmdp_get(pvmw.pmd);
2480 if (likely(pmd_present(pmdval)))
2481 pfn = pmd_pfn(pmdval);
2482 else
2483 pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
2484
2485 subpage = folio_page(folio, pfn - folio_pfn(folio));
2486
2487 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
2488 !folio_test_pmd_mappable(folio), folio);
2489
2490 if (set_pmd_migration_entry(&pvmw, subpage)) {
2491 ret = false;
2492 page_vma_mapped_walk_done(&pvmw);
2493 break;
2494 }
2495 continue;
2496 #endif
2497 }
2498
2499 /* Unexpected PMD-mapped THP? */
2500 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2501
2502 /*
2503 * Handle PFN swap PTEs, such as device-exclusive ones, that
2504 * actually map pages.
2505 */
2506 pteval = ptep_get(pvmw.pte);
2507 if (likely(pte_present(pteval))) {
2508 pfn = pte_pfn(pteval);
2509 } else {
2510 const softleaf_t entry = softleaf_from_pte(pteval);
2511
2512 pfn = softleaf_to_pfn(entry);
2513 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
2514 }
2515
2516 subpage = folio_page(folio, pfn - folio_pfn(folio));
2517 address = pvmw.address;
2518 anon_exclusive = folio_test_anon(folio) &&
2519 PageAnonExclusive(subpage);
2520
2521 if (folio_test_hugetlb(folio)) {
2522 bool anon = folio_test_anon(folio);
2523
2524 /*
2525 * huge_pmd_unshare may unmap an entire PMD page.
2526 * There is no way of knowing exactly which PMDs may
2527 * be cached for this mm, so we must flush them all.
2528 * start/end were already adjusted above to cover this
2529 * range.
2530 */
2531 flush_cache_range(vma, range.start, range.end);
2532
2533 /*
2534 * To call huge_pmd_unshare, i_mmap_rwsem must be
2535 * held in write mode. Caller needs to explicitly
2536 * do this outside rmap routines.
2537 *
2538 * We also must hold hugetlb vma_lock in write mode.
2539 * Lock order dictates acquiring vma_lock BEFORE
2540 * i_mmap_rwsem. We can only try lock here and
2541 * fail if unsuccessful.
2542 */
2543 if (!anon) {
2544 struct mmu_gather tlb;
2545
2546 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
2547 if (!hugetlb_vma_trylock_write(vma)) {
2548 page_vma_mapped_walk_done(&pvmw);
2549 ret = false;
2550 break;
2551 }
2552
2553 tlb_gather_mmu_vma(&tlb, vma);
2554 if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) {
2555 hugetlb_vma_unlock_write(vma);
2556 huge_pmd_unshare_flush(&tlb, vma);
2557 tlb_finish_mmu(&tlb);
2558 /*
2559 * The PMD table was unmapped,
2560 * consequently unmapping the folio.
2561 */
2562 page_vma_mapped_walk_done(&pvmw);
2563 break;
2564 }
2565 hugetlb_vma_unlock_write(vma);
2566 tlb_finish_mmu(&tlb);
2567 }
2568 /* Nuke the hugetlb page table entry */
2569 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
2570 if (pte_dirty(pteval))
2571 folio_mark_dirty(folio);
2572 writable = pte_write(pteval);
2573 } else if (likely(pte_present(pteval))) {
2574 flush_cache_page(vma, address, pfn);
2575 /* Nuke the page table entry. */
2576 if (should_defer_flush(mm, flags)) {
2577 /*
2578 * We clear the PTE but do not flush so potentially
2579 * a remote CPU could still be writing to the folio.
2580 * If the entry was previously clean then the
2581 * architecture must guarantee that a clear->dirty
2582 * transition on a cached TLB entry is written through
2583 * and traps if the PTE is unmapped.
2584 */
2585 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
2586
2587 set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE);
2588 } else {
2589 pteval = ptep_clear_flush(vma, address, pvmw.pte);
2590 }
2591 if (pte_dirty(pteval))
2592 folio_mark_dirty(folio);
2593 writable = pte_write(pteval);
2594 } else {
2595 const softleaf_t entry = softleaf_from_pte(pteval);
2596
2597 pte_clear(mm, address, pvmw.pte);
2598
2599 writable = softleaf_is_device_private_write(entry);
2600 }
2601
2602 VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) &&
2603 !anon_exclusive, folio);
2604
2605 /* Update high watermark before we lower rss */
2606 update_hiwater_rss(mm);
2607
2608 if (PageHWPoison(subpage)) {
2609 VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio);
2610
2611 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2612 if (folio_test_hugetlb(folio)) {
2613 hugetlb_count_sub(folio_nr_pages(folio), mm);
2614 set_huge_pte_at(mm, address, pvmw.pte, pteval,
2615 hsz);
2616 } else {
2617 dec_mm_counter(mm, mm_counter(folio));
2618 set_pte_at(mm, address, pvmw.pte, pteval);
2619 }
2620 } else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
2621 !userfaultfd_armed(vma)) {
2622 /*
2623 * The guest indicated that the page content is of no
2624 * interest anymore. Simply discard the pte, vmscan
2625 * will take care of the rest.
2626 * A future reference will then fault in a new zero
2627 * page. When userfaultfd is active, we must not drop
2628 * this page though, as its main user (postcopy
2629 * migration) will not expect userfaults on already
2630 * copied pages.
2631 */
2632 dec_mm_counter(mm, mm_counter(folio));
2633 } else {
2634 swp_entry_t entry;
2635 pte_t swp_pte;
2636
2637 /*
2638 * arch_unmap_one() is expected to be a NOP on
2639 * architectures where we could have PFN swap PTEs,
2640 * so we'll not check/care.
2641 */
2642 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2643 if (folio_test_hugetlb(folio))
2644 set_huge_pte_at(mm, address, pvmw.pte,
2645 pteval, hsz);
2646 else
2647 set_pte_at(mm, address, pvmw.pte, pteval);
2648 ret = false;
2649 page_vma_mapped_walk_done(&pvmw);
2650 break;
2651 }
2652
2653 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
2654 if (folio_test_hugetlb(folio)) {
2655 if (anon_exclusive &&
2656 hugetlb_try_share_anon_rmap(folio)) {
2657 set_huge_pte_at(mm, address, pvmw.pte,
2658 pteval, hsz);
2659 ret = false;
2660 page_vma_mapped_walk_done(&pvmw);
2661 break;
2662 }
2663 } else if (anon_exclusive &&
2664 folio_try_share_anon_rmap_pte(folio, subpage)) {
2665 set_pte_at(mm, address, pvmw.pte, pteval);
2666 ret = false;
2667 page_vma_mapped_walk_done(&pvmw);
2668 break;
2669 }
2670
2671 /*
2672 * Store the pfn of the page in a special migration
2673 * pte. do_swap_page() will wait until the migration
2674 * pte is removed and then restart fault handling.
2675 */
2676 if (writable)
2677 entry = make_writable_migration_entry(
2678 page_to_pfn(subpage));
2679 else if (anon_exclusive)
2680 entry = make_readable_exclusive_migration_entry(
2681 page_to_pfn(subpage));
2682 else
2683 entry = make_readable_migration_entry(
2684 page_to_pfn(subpage));
2685 if (likely(pte_present(pteval))) {
2686 if (pte_young(pteval))
2687 entry = make_migration_entry_young(entry);
2688 if (pte_dirty(pteval))
2689 entry = make_migration_entry_dirty(entry);
2690 swp_pte = swp_entry_to_pte(entry);
2691 if (pte_soft_dirty(pteval))
2692 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2693 if (pte_uffd_wp(pteval))
2694 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2695 } else {
2696 swp_pte = swp_entry_to_pte(entry);
2697 if (pte_swp_soft_dirty(pteval))
2698 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2699 if (pte_swp_uffd_wp(pteval))
2700 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2701 }
2702 if (folio_test_hugetlb(folio))
2703 set_huge_pte_at(mm, address, pvmw.pte, swp_pte,
2704 hsz);
2705 else
2706 set_pte_at(mm, address, pvmw.pte, swp_pte);
2707 trace_set_migration_pte(address, pte_val(swp_pte),
2708 folio_order(folio));
2709 /*
2710 * No need to invalidate here it will synchronize on
2711 * against the special swap migration pte.
2712 */
2713 }
2714
2715 if (unlikely(folio_test_hugetlb(folio)))
2716 hugetlb_remove_rmap(folio);
2717 else
2718 folio_remove_rmap_pte(folio, subpage, vma);
2719 if (vma->vm_flags & VM_LOCKED)
2720 mlock_drain_local();
2721 folio_put(folio);
2722 }
2723
2724 mmu_notifier_invalidate_range_end(&range);
2725
2726 return ret;
2727 }
2728
2729 /**
2730 * try_to_migrate - try to replace all page table mappings with swap entries
2731 * @folio: the folio to replace page table entries for
2732 * @flags: action and flags
2733 *
2734 * Tries to remove all the page table entries which are mapping this folio and
2735 * replace them with special swap entries. Caller must hold the folio lock.
2736 */
try_to_migrate(struct folio * folio,enum ttu_flags flags)2737 void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2738 {
2739 struct rmap_walk_control rwc = {
2740 .rmap_one = try_to_migrate_one,
2741 .arg = (void *)flags,
2742 .done = folio_not_mapped,
2743 .anon_lock = folio_lock_anon_vma_read,
2744 };
2745
2746 /*
2747 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2748 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags.
2749 */
2750 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2751 TTU_SYNC | TTU_BATCH_FLUSH)))
2752 return;
2753
2754 if (folio_is_zone_device(folio) &&
2755 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2756 return;
2757
2758 /*
2759 * During exec, a temporary VMA is setup and later moved.
2760 * The VMA is moved under the anon_vma lock but not the
2761 * page tables leading to a race where migration cannot
2762 * find the migration ptes. Rather than increasing the
2763 * locking requirements of exec(), migration skips
2764 * temporary VMAs until after exec() completes.
2765 */
2766 if (!folio_test_ksm(folio) && folio_test_anon(folio))
2767 rwc.invalid_vma = invalid_migration_vma;
2768
2769 if (flags & TTU_RMAP_LOCKED)
2770 rmap_walk_locked(folio, &rwc);
2771 else
2772 rmap_walk(folio, &rwc);
2773 }
2774
2775 #ifdef CONFIG_DEVICE_PRIVATE
2776 /**
2777 * make_device_exclusive() - Mark a page for exclusive use by a device
2778 * @mm: mm_struct of associated target process
2779 * @addr: the virtual address to mark for exclusive device access
2780 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2781 * @foliop: folio pointer will be stored here on success.
2782 *
2783 * This function looks up the page mapped at the given address, grabs a
2784 * folio reference, locks the folio and replaces the PTE with special
2785 * device-exclusive PFN swap entry, preventing access through the process
2786 * page tables. The function will return with the folio locked and referenced.
2787 *
2788 * On fault, the device-exclusive entries are replaced with the original PTE
2789 * under folio lock, after calling MMU notifiers.
2790 *
2791 * Only anonymous non-hugetlb folios are supported and the VMA must have
2792 * write permissions such that we can fault in the anonymous page writable
2793 * in order to mark it exclusive. The caller must hold the mmap_lock in read
2794 * mode.
2795 *
2796 * A driver using this to program access from a device must use a mmu notifier
2797 * critical section to hold a device specific lock during programming. Once
2798 * programming is complete it should drop the folio lock and reference after
2799 * which point CPU access to the page will revoke the exclusive access.
2800 *
2801 * Notes:
2802 * #. This function always operates on individual PTEs mapping individual
2803 * pages. PMD-sized THPs are first remapped to be mapped by PTEs before
2804 * the conversion happens on a single PTE corresponding to @addr.
2805 * #. While concurrent access through the process page tables is prevented,
2806 * concurrent access through other page references (e.g., earlier GUP
2807 * invocation) is not handled and not supported.
2808 * #. device-exclusive entries are considered "clean" and "old" by core-mm.
2809 * Device drivers must update the folio state when informed by MMU
2810 * notifiers.
2811 *
2812 * Returns: pointer to mapped page on success, otherwise a negative error.
2813 */
make_device_exclusive(struct mm_struct * mm,unsigned long addr,void * owner,struct folio ** foliop)2814 struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
2815 void *owner, struct folio **foliop)
2816 {
2817 struct mmu_notifier_range range;
2818 struct folio *folio, *fw_folio;
2819 struct vm_area_struct *vma;
2820 struct folio_walk fw;
2821 struct page *page;
2822 swp_entry_t entry;
2823 pte_t swp_pte;
2824 int ret;
2825
2826 mmap_assert_locked(mm);
2827 addr = PAGE_ALIGN_DOWN(addr);
2828
2829 /*
2830 * Fault in the page writable and try to lock it; note that if the
2831 * address would already be marked for exclusive use by a device,
2832 * the GUP call would undo that first by triggering a fault.
2833 *
2834 * If any other device would already map this page exclusively, the
2835 * fault will trigger a conversion to an ordinary
2836 * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE.
2837 */
2838 retry:
2839 page = get_user_page_vma_remote(mm, addr,
2840 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2841 &vma);
2842 if (IS_ERR(page))
2843 return page;
2844 folio = page_folio(page);
2845
2846 if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) {
2847 folio_put(folio);
2848 return ERR_PTR(-EOPNOTSUPP);
2849 }
2850
2851 ret = folio_lock_killable(folio);
2852 if (ret) {
2853 folio_put(folio);
2854 return ERR_PTR(ret);
2855 }
2856
2857 /*
2858 * Inform secondary MMUs that we are going to convert this PTE to
2859 * device-exclusive, such that they unmap it now. Note that the
2860 * caller must filter this event out to prevent livelocks.
2861 */
2862 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
2863 mm, addr, addr + PAGE_SIZE, owner);
2864 mmu_notifier_invalidate_range_start(&range);
2865
2866 /*
2867 * Let's do a second walk and make sure we still find the same page
2868 * mapped writable. Note that any page of an anonymous folio can
2869 * only be mapped writable using exactly one PTE ("exclusive"), so
2870 * there cannot be other mappings.
2871 */
2872 fw_folio = folio_walk_start(&fw, vma, addr, 0);
2873 if (fw_folio != folio || fw.page != page ||
2874 fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) {
2875 if (fw_folio)
2876 folio_walk_end(&fw, vma);
2877 mmu_notifier_invalidate_range_end(&range);
2878 folio_unlock(folio);
2879 folio_put(folio);
2880 goto retry;
2881 }
2882
2883 /* Nuke the page table entry so we get the uptodate dirty bit. */
2884 flush_cache_page(vma, addr, page_to_pfn(page));
2885 fw.pte = ptep_clear_flush(vma, addr, fw.ptep);
2886
2887 /* Set the dirty flag on the folio now the PTE is gone. */
2888 if (pte_dirty(fw.pte))
2889 folio_mark_dirty(folio);
2890
2891 /*
2892 * Store the pfn of the page in a special device-exclusive PFN swap PTE.
2893 * do_swap_page() will trigger the conversion back while holding the
2894 * folio lock.
2895 */
2896 entry = make_device_exclusive_entry(page_to_pfn(page));
2897 swp_pte = swp_entry_to_pte(entry);
2898 if (pte_soft_dirty(fw.pte))
2899 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2900 /* The pte is writable, uffd-wp does not apply. */
2901 set_pte_at(mm, addr, fw.ptep, swp_pte);
2902
2903 folio_walk_end(&fw, vma);
2904 mmu_notifier_invalidate_range_end(&range);
2905 *foliop = folio;
2906 return page;
2907 }
2908 EXPORT_SYMBOL_GPL(make_device_exclusive);
2909 #endif
2910
__put_anon_vma(struct anon_vma * anon_vma)2911 void __put_anon_vma(struct anon_vma *anon_vma)
2912 {
2913 struct anon_vma *root = anon_vma->root;
2914
2915 anon_vma_free(anon_vma);
2916 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2917 anon_vma_free(root);
2918 }
2919
rmap_walk_anon_lock(const struct folio * folio,struct rmap_walk_control * rwc)2920 static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
2921 struct rmap_walk_control *rwc)
2922 {
2923 struct anon_vma *anon_vma;
2924
2925 if (rwc->anon_lock)
2926 return rwc->anon_lock(folio, rwc);
2927
2928 /*
2929 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2930 * because that depends on page_mapped(); but not all its usages
2931 * are holding mmap_lock. Users without mmap_lock are required to
2932 * take a reference count to prevent the anon_vma disappearing
2933 */
2934 anon_vma = folio_anon_vma(folio);
2935 if (!anon_vma)
2936 return NULL;
2937
2938 if (anon_vma_trylock_read(anon_vma))
2939 goto out;
2940
2941 if (rwc->try_lock) {
2942 anon_vma = NULL;
2943 rwc->contended = true;
2944 goto out;
2945 }
2946
2947 anon_vma_lock_read(anon_vma);
2948 out:
2949 return anon_vma;
2950 }
2951
2952 /*
2953 * rmap_walk_anon - do something to anonymous page using the object-based
2954 * rmap method
2955 * @folio: the folio to be handled
2956 * @rwc: control variable according to each walk type
2957 * @locked: caller holds relevant rmap lock
2958 *
2959 * Find all the mappings of a folio using the mapping pointer and the vma
2960 * chains contained in the anon_vma struct it points to.
2961 */
rmap_walk_anon(struct folio * folio,struct rmap_walk_control * rwc,bool locked)2962 static void rmap_walk_anon(struct folio *folio,
2963 struct rmap_walk_control *rwc, bool locked)
2964 {
2965 struct anon_vma *anon_vma;
2966 pgoff_t pgoff_start, pgoff_end;
2967 struct anon_vma_chain *avc;
2968
2969 /*
2970 * The folio lock ensures that folio->mapping can't be changed under us
2971 * to an anon_vma with different root.
2972 */
2973 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2974
2975 if (locked) {
2976 anon_vma = folio_anon_vma(folio);
2977 /* anon_vma disappear under us? */
2978 VM_BUG_ON_FOLIO(!anon_vma, folio);
2979 } else {
2980 anon_vma = rmap_walk_anon_lock(folio, rwc);
2981 }
2982 if (!anon_vma)
2983 return;
2984
2985 pgoff_start = folio_pgoff(folio);
2986 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2987 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2988 pgoff_start, pgoff_end) {
2989 struct vm_area_struct *vma = avc->vma;
2990 unsigned long address = vma_address(vma, pgoff_start,
2991 folio_nr_pages(folio));
2992
2993 VM_BUG_ON_VMA(address == -EFAULT, vma);
2994 cond_resched();
2995
2996 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2997 continue;
2998
2999 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
3000 break;
3001 if (rwc->done && rwc->done(folio))
3002 break;
3003 }
3004
3005 if (!locked)
3006 anon_vma_unlock_read(anon_vma);
3007 }
3008
3009 /**
3010 * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping
3011 * of a page mapped within a specified page cache object at a specified offset.
3012 *
3013 * @folio: Either the folio whose mappings to traverse, or if NULL,
3014 * the callbacks specified in @rwc will be configured such
3015 * as to be able to look up mappings correctly.
3016 * @mapping: The page cache object whose mapping VMAs we intend to
3017 * traverse. If @folio is non-NULL, this should be equal to
3018 * folio_mapping(folio).
3019 * @pgoff_start: The offset within @mapping of the page which we are
3020 * looking up. If @folio is non-NULL, this should be equal
3021 * to folio_pgoff(folio).
3022 * @nr_pages: The number of pages mapped by the mapping. If @folio is
3023 * non-NULL, this should be equal to folio_nr_pages(folio).
3024 * @rwc: The reverse mapping walk control object describing how
3025 * the traversal should proceed.
3026 * @locked: Is the @mapping already locked? If not, we acquire the
3027 * lock.
3028 */
__rmap_walk_file(struct folio * folio,struct address_space * mapping,pgoff_t pgoff_start,unsigned long nr_pages,struct rmap_walk_control * rwc,bool locked)3029 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
3030 pgoff_t pgoff_start, unsigned long nr_pages,
3031 struct rmap_walk_control *rwc, bool locked)
3032 {
3033 pgoff_t pgoff_end = pgoff_start + nr_pages - 1;
3034 struct vm_area_struct *vma;
3035
3036 VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio);
3037 VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio);
3038 VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio);
3039
3040 if (!locked) {
3041 if (i_mmap_trylock_read(mapping))
3042 goto lookup;
3043
3044 if (rwc->try_lock) {
3045 rwc->contended = true;
3046 return;
3047 }
3048
3049 i_mmap_lock_read(mapping);
3050 }
3051 lookup:
3052 vma_interval_tree_foreach(vma, &mapping->i_mmap,
3053 pgoff_start, pgoff_end) {
3054 unsigned long address = vma_address(vma, pgoff_start, nr_pages);
3055
3056 VM_BUG_ON_VMA(address == -EFAULT, vma);
3057 cond_resched();
3058
3059 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
3060 continue;
3061
3062 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
3063 goto done;
3064 if (rwc->done && rwc->done(folio))
3065 goto done;
3066 }
3067 done:
3068 if (!locked)
3069 i_mmap_unlock_read(mapping);
3070 }
3071
3072 /*
3073 * rmap_walk_file - do something to file page using the object-based rmap method
3074 * @folio: the folio to be handled
3075 * @rwc: control variable according to each walk type
3076 * @locked: caller holds relevant rmap lock
3077 *
3078 * Find all the mappings of a folio using the mapping pointer and the vma chains
3079 * contained in the address_space struct it points to.
3080 */
rmap_walk_file(struct folio * folio,struct rmap_walk_control * rwc,bool locked)3081 static void rmap_walk_file(struct folio *folio,
3082 struct rmap_walk_control *rwc, bool locked)
3083 {
3084 /*
3085 * The folio lock not only makes sure that folio->mapping cannot
3086 * suddenly be NULLified by truncation, it makes sure that the structure
3087 * at mapping cannot be freed and reused yet, so we can safely take
3088 * mapping->i_mmap_rwsem.
3089 */
3090 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3091
3092 if (!folio->mapping)
3093 return;
3094
3095 __rmap_walk_file(folio, folio->mapping, folio->index,
3096 folio_nr_pages(folio), rwc, locked);
3097 }
3098
rmap_walk(struct folio * folio,struct rmap_walk_control * rwc)3099 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
3100 {
3101 if (unlikely(folio_test_ksm(folio)))
3102 rmap_walk_ksm(folio, rwc);
3103 else if (folio_test_anon(folio))
3104 rmap_walk_anon(folio, rwc, false);
3105 else
3106 rmap_walk_file(folio, rwc, false);
3107 }
3108
3109 /* Like rmap_walk, but caller holds relevant rmap lock */
rmap_walk_locked(struct folio * folio,struct rmap_walk_control * rwc)3110 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
3111 {
3112 /* no ksm support for now */
3113 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
3114 if (folio_test_anon(folio))
3115 rmap_walk_anon(folio, rwc, true);
3116 else
3117 rmap_walk_file(folio, rwc, true);
3118 }
3119
3120 #ifdef CONFIG_HUGETLB_PAGE
3121 /*
3122 * The following two functions are for anonymous (private mapped) hugepages.
3123 * Unlike common anonymous pages, anonymous hugepages have no accounting code
3124 * and no lru code, because we handle hugepages differently from common pages.
3125 */
hugetlb_add_anon_rmap(struct folio * folio,struct vm_area_struct * vma,unsigned long address,rmap_t flags)3126 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
3127 unsigned long address, rmap_t flags)
3128 {
3129 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
3130 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3131
3132 atomic_inc(&folio->_entire_mapcount);
3133 atomic_inc(&folio->_large_mapcount);
3134 if (flags & RMAP_EXCLUSIVE)
3135 SetPageAnonExclusive(&folio->page);
3136 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
3137 PageAnonExclusive(&folio->page), folio);
3138 }
3139
hugetlb_add_new_anon_rmap(struct folio * folio,struct vm_area_struct * vma,unsigned long address)3140 void hugetlb_add_new_anon_rmap(struct folio *folio,
3141 struct vm_area_struct *vma, unsigned long address)
3142 {
3143 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
3144
3145 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
3146 /* increment count (starts at -1) */
3147 atomic_set(&folio->_entire_mapcount, 0);
3148 atomic_set(&folio->_large_mapcount, 0);
3149 folio_clear_hugetlb_restore_reserve(folio);
3150 __folio_set_anon(folio, vma, address, true);
3151 SetPageAnonExclusive(&folio->page);
3152 }
3153 #endif /* CONFIG_HUGETLB_PAGE */
3154