1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/mremap.c
4 *
5 * (C) Copyright 1996 Linus Torvalds
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11 #include <linux/mm.h>
12 #include <linux/mm_inline.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/capability.h>
19 #include <linux/fs.h>
20 #include <linux/swapops.h>
21 #include <linux/highmem.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/uaccess.h>
26 #include <linux/userfaultfd_k.h>
27 #include <linux/mempolicy.h>
28
29 #include <asm/cacheflush.h>
30 #include <asm/tlb.h>
31 #include <asm/pgalloc.h>
32
33 #include "internal.h"
34
35 /* Classify the kind of remap operation being performed. */
36 enum mremap_type {
37 MREMAP_INVALID, /* Initial state. */
38 MREMAP_NO_RESIZE, /* old_len == new_len, if not moved, do nothing. */
39 MREMAP_SHRINK, /* old_len > new_len. */
40 MREMAP_EXPAND, /* old_len < new_len. */
41 };
42
43 /*
44 * Describes a VMA mremap() operation and is threaded throughout it.
45 *
46 * Any of the fields may be mutated by the operation, however these values will
47 * always accurately reflect the remap (for instance, we may adjust lengths and
48 * delta to account for hugetlb alignment).
49 */
50 struct vma_remap_struct {
51 /* User-provided state. */
52 unsigned long addr; /* User-specified address from which we remap. */
53 unsigned long old_len; /* Length of range being remapped. */
54 unsigned long new_len; /* Desired new length of mapping. */
55 unsigned long flags; /* user-specified MREMAP_* flags. */
56 unsigned long new_addr; /* Optionally, desired new address. */
57
58 /* uffd state. */
59 struct vm_userfaultfd_ctx *uf;
60 struct list_head *uf_unmap_early;
61 struct list_head *uf_unmap;
62
63 /* VMA state, determined in do_mremap(). */
64 struct vm_area_struct *vma;
65
66 /* Internal state, determined in do_mremap(). */
67 unsigned long delta; /* Absolute delta of old_len,new_len. */
68 bool mlocked; /* Was the VMA mlock()'d? */
69 enum mremap_type remap_type; /* expand, shrink, etc. */
70 bool mmap_locked; /* Is mm currently write-locked? */
71 unsigned long charged; /* If VM_ACCOUNT, # pages to account. */
72 };
73
get_old_pud(struct mm_struct * mm,unsigned long addr)74 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
75 {
76 pgd_t *pgd;
77 p4d_t *p4d;
78 pud_t *pud;
79
80 pgd = pgd_offset(mm, addr);
81 if (pgd_none_or_clear_bad(pgd))
82 return NULL;
83
84 p4d = p4d_offset(pgd, addr);
85 if (p4d_none_or_clear_bad(p4d))
86 return NULL;
87
88 pud = pud_offset(p4d, addr);
89 if (pud_none_or_clear_bad(pud))
90 return NULL;
91
92 return pud;
93 }
94
get_old_pmd(struct mm_struct * mm,unsigned long addr)95 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
96 {
97 pud_t *pud;
98 pmd_t *pmd;
99
100 pud = get_old_pud(mm, addr);
101 if (!pud)
102 return NULL;
103
104 pmd = pmd_offset(pud, addr);
105 if (pmd_none(*pmd))
106 return NULL;
107
108 return pmd;
109 }
110
alloc_new_pud(struct mm_struct * mm,unsigned long addr)111 static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr)
112 {
113 pgd_t *pgd;
114 p4d_t *p4d;
115
116 pgd = pgd_offset(mm, addr);
117 p4d = p4d_alloc(mm, pgd, addr);
118 if (!p4d)
119 return NULL;
120
121 return pud_alloc(mm, p4d, addr);
122 }
123
alloc_new_pmd(struct mm_struct * mm,unsigned long addr)124 static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
125 {
126 pud_t *pud;
127 pmd_t *pmd;
128
129 pud = alloc_new_pud(mm, addr);
130 if (!pud)
131 return NULL;
132
133 pmd = pmd_alloc(mm, pud, addr);
134 if (!pmd)
135 return NULL;
136
137 VM_BUG_ON(pmd_trans_huge(*pmd));
138
139 return pmd;
140 }
141
take_rmap_locks(struct vm_area_struct * vma)142 static void take_rmap_locks(struct vm_area_struct *vma)
143 {
144 if (vma->vm_file)
145 i_mmap_lock_write(vma->vm_file->f_mapping);
146 if (vma->anon_vma)
147 anon_vma_lock_write(vma->anon_vma);
148 }
149
drop_rmap_locks(struct vm_area_struct * vma)150 static void drop_rmap_locks(struct vm_area_struct *vma)
151 {
152 if (vma->anon_vma)
153 anon_vma_unlock_write(vma->anon_vma);
154 if (vma->vm_file)
155 i_mmap_unlock_write(vma->vm_file->f_mapping);
156 }
157
move_soft_dirty_pte(pte_t pte)158 static pte_t move_soft_dirty_pte(pte_t pte)
159 {
160 /*
161 * Set soft dirty bit so we can notice
162 * in userspace the ptes were moved.
163 */
164 #ifdef CONFIG_MEM_SOFT_DIRTY
165 if (pte_present(pte))
166 pte = pte_mksoft_dirty(pte);
167 else if (is_swap_pte(pte))
168 pte = pte_swp_mksoft_dirty(pte);
169 #endif
170 return pte;
171 }
172
move_ptes(struct pagetable_move_control * pmc,unsigned long extent,pmd_t * old_pmd,pmd_t * new_pmd)173 static int move_ptes(struct pagetable_move_control *pmc,
174 unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
175 {
176 struct vm_area_struct *vma = pmc->old;
177 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
178 struct mm_struct *mm = vma->vm_mm;
179 pte_t *old_pte, *new_pte, pte;
180 pmd_t dummy_pmdval;
181 spinlock_t *old_ptl, *new_ptl;
182 bool force_flush = false;
183 unsigned long old_addr = pmc->old_addr;
184 unsigned long new_addr = pmc->new_addr;
185 unsigned long old_end = old_addr + extent;
186 unsigned long len = old_end - old_addr;
187 int err = 0;
188
189 /*
190 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
191 * locks to ensure that rmap will always observe either the old or the
192 * new ptes. This is the easiest way to avoid races with
193 * truncate_pagecache(), page migration, etc...
194 *
195 * When need_rmap_locks is false, we use other ways to avoid
196 * such races:
197 *
198 * - During exec() shift_arg_pages(), we use a specially tagged vma
199 * which rmap call sites look for using vma_is_temporary_stack().
200 *
201 * - During mremap(), new_vma is often known to be placed after vma
202 * in rmap traversal order. This ensures rmap will always observe
203 * either the old pte, or the new pte, or both (the page table locks
204 * serialize access to individual ptes, but only rmap traversal
205 * order guarantees that we won't miss both the old and new ptes).
206 */
207 if (pmc->need_rmap_locks)
208 take_rmap_locks(vma);
209
210 /*
211 * We don't have to worry about the ordering of src and dst
212 * pte locks because exclusive mmap_lock prevents deadlock.
213 */
214 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
215 if (!old_pte) {
216 err = -EAGAIN;
217 goto out;
218 }
219 /*
220 * Now new_pte is none, so hpage_collapse_scan_file() path can not find
221 * this by traversing file->f_mapping, so there is no concurrency with
222 * retract_page_tables(). In addition, we already hold the exclusive
223 * mmap_lock, so this new_pte page is stable, so there is no need to get
224 * pmdval and do pmd_same() check.
225 */
226 new_pte = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval,
227 &new_ptl);
228 if (!new_pte) {
229 pte_unmap_unlock(old_pte, old_ptl);
230 err = -EAGAIN;
231 goto out;
232 }
233 if (new_ptl != old_ptl)
234 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
235 flush_tlb_batched_pending(vma->vm_mm);
236 arch_enter_lazy_mmu_mode();
237
238 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
239 new_pte++, new_addr += PAGE_SIZE) {
240 if (pte_none(ptep_get(old_pte)))
241 continue;
242
243 pte = ptep_get_and_clear(mm, old_addr, old_pte);
244 /*
245 * If we are remapping a valid PTE, make sure
246 * to flush TLB before we drop the PTL for the
247 * PTE.
248 *
249 * NOTE! Both old and new PTL matter: the old one
250 * for racing with folio_mkclean(), the new one to
251 * make sure the physical page stays valid until
252 * the TLB entry for the old mapping has been
253 * flushed.
254 */
255 if (pte_present(pte))
256 force_flush = true;
257 pte = move_pte(pte, old_addr, new_addr);
258 pte = move_soft_dirty_pte(pte);
259
260 if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
261 pte_clear(mm, new_addr, new_pte);
262 else {
263 if (need_clear_uffd_wp) {
264 if (pte_present(pte))
265 pte = pte_clear_uffd_wp(pte);
266 else if (is_swap_pte(pte))
267 pte = pte_swp_clear_uffd_wp(pte);
268 }
269 set_pte_at(mm, new_addr, new_pte, pte);
270 }
271 }
272
273 arch_leave_lazy_mmu_mode();
274 if (force_flush)
275 flush_tlb_range(vma, old_end - len, old_end);
276 if (new_ptl != old_ptl)
277 spin_unlock(new_ptl);
278 pte_unmap(new_pte - 1);
279 pte_unmap_unlock(old_pte - 1, old_ptl);
280 out:
281 if (pmc->need_rmap_locks)
282 drop_rmap_locks(vma);
283 return err;
284 }
285
286 #ifndef arch_supports_page_table_move
287 #define arch_supports_page_table_move arch_supports_page_table_move
arch_supports_page_table_move(void)288 static inline bool arch_supports_page_table_move(void)
289 {
290 return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
291 IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
292 }
293 #endif
294
295 #ifdef CONFIG_HAVE_MOVE_PMD
move_normal_pmd(struct pagetable_move_control * pmc,pmd_t * old_pmd,pmd_t * new_pmd)296 static bool move_normal_pmd(struct pagetable_move_control *pmc,
297 pmd_t *old_pmd, pmd_t *new_pmd)
298 {
299 spinlock_t *old_ptl, *new_ptl;
300 struct vm_area_struct *vma = pmc->old;
301 struct mm_struct *mm = vma->vm_mm;
302 bool res = false;
303 pmd_t pmd;
304
305 if (!arch_supports_page_table_move())
306 return false;
307 /*
308 * The destination pmd shouldn't be established, free_pgtables()
309 * should have released it.
310 *
311 * However, there's a case during execve() where we use mremap
312 * to move the initial stack, and in that case the target area
313 * may overlap the source area (always moving down).
314 *
315 * If everything is PMD-aligned, that works fine, as moving
316 * each pmd down will clear the source pmd. But if we first
317 * have a few 4kB-only pages that get moved down, and then
318 * hit the "now the rest is PMD-aligned, let's do everything
319 * one pmd at a time", we will still have the old (now empty
320 * of any 4kB pages, but still there) PMD in the page table
321 * tree.
322 *
323 * Warn on it once - because we really should try to figure
324 * out how to do this better - but then say "I won't move
325 * this pmd".
326 *
327 * One alternative might be to just unmap the target pmd at
328 * this point, and verify that it really is empty. We'll see.
329 */
330 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
331 return false;
332
333 /* If this pmd belongs to a uffd vma with remap events disabled, we need
334 * to ensure that the uffd-wp state is cleared from all pgtables. This
335 * means recursing into lower page tables in move_page_tables(), and we
336 * can reuse the existing code if we simply treat the entry as "not
337 * moved".
338 */
339 if (vma_has_uffd_without_event_remap(vma))
340 return false;
341
342 /*
343 * We don't have to worry about the ordering of src and dst
344 * ptlocks because exclusive mmap_lock prevents deadlock.
345 */
346 old_ptl = pmd_lock(mm, old_pmd);
347 new_ptl = pmd_lockptr(mm, new_pmd);
348 if (new_ptl != old_ptl)
349 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
350
351 pmd = *old_pmd;
352
353 /* Racing with collapse? */
354 if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
355 goto out_unlock;
356 /* Clear the pmd */
357 pmd_clear(old_pmd);
358 res = true;
359
360 VM_BUG_ON(!pmd_none(*new_pmd));
361
362 pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
363 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE);
364 out_unlock:
365 if (new_ptl != old_ptl)
366 spin_unlock(new_ptl);
367 spin_unlock(old_ptl);
368
369 return res;
370 }
371 #else
move_normal_pmd(struct pagetable_move_control * pmc,pmd_t * old_pmd,pmd_t * new_pmd)372 static inline bool move_normal_pmd(struct pagetable_move_control *pmc,
373 pmd_t *old_pmd, pmd_t *new_pmd)
374 {
375 return false;
376 }
377 #endif
378
379 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
move_normal_pud(struct pagetable_move_control * pmc,pud_t * old_pud,pud_t * new_pud)380 static bool move_normal_pud(struct pagetable_move_control *pmc,
381 pud_t *old_pud, pud_t *new_pud)
382 {
383 spinlock_t *old_ptl, *new_ptl;
384 struct vm_area_struct *vma = pmc->old;
385 struct mm_struct *mm = vma->vm_mm;
386 pud_t pud;
387
388 if (!arch_supports_page_table_move())
389 return false;
390 /*
391 * The destination pud shouldn't be established, free_pgtables()
392 * should have released it.
393 */
394 if (WARN_ON_ONCE(!pud_none(*new_pud)))
395 return false;
396
397 /* If this pud belongs to a uffd vma with remap events disabled, we need
398 * to ensure that the uffd-wp state is cleared from all pgtables. This
399 * means recursing into lower page tables in move_page_tables(), and we
400 * can reuse the existing code if we simply treat the entry as "not
401 * moved".
402 */
403 if (vma_has_uffd_without_event_remap(vma))
404 return false;
405
406 /*
407 * We don't have to worry about the ordering of src and dst
408 * ptlocks because exclusive mmap_lock prevents deadlock.
409 */
410 old_ptl = pud_lock(mm, old_pud);
411 new_ptl = pud_lockptr(mm, new_pud);
412 if (new_ptl != old_ptl)
413 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
414
415 /* Clear the pud */
416 pud = *old_pud;
417 pud_clear(old_pud);
418
419 VM_BUG_ON(!pud_none(*new_pud));
420
421 pud_populate(mm, new_pud, pud_pgtable(pud));
422 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE);
423 if (new_ptl != old_ptl)
424 spin_unlock(new_ptl);
425 spin_unlock(old_ptl);
426
427 return true;
428 }
429 #else
move_normal_pud(struct pagetable_move_control * pmc,pud_t * old_pud,pud_t * new_pud)430 static inline bool move_normal_pud(struct pagetable_move_control *pmc,
431 pud_t *old_pud, pud_t *new_pud)
432 {
433 return false;
434 }
435 #endif
436
437 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
move_huge_pud(struct pagetable_move_control * pmc,pud_t * old_pud,pud_t * new_pud)438 static bool move_huge_pud(struct pagetable_move_control *pmc,
439 pud_t *old_pud, pud_t *new_pud)
440 {
441 spinlock_t *old_ptl, *new_ptl;
442 struct vm_area_struct *vma = pmc->old;
443 struct mm_struct *mm = vma->vm_mm;
444 pud_t pud;
445
446 /*
447 * The destination pud shouldn't be established, free_pgtables()
448 * should have released it.
449 */
450 if (WARN_ON_ONCE(!pud_none(*new_pud)))
451 return false;
452
453 /*
454 * We don't have to worry about the ordering of src and dst
455 * ptlocks because exclusive mmap_lock prevents deadlock.
456 */
457 old_ptl = pud_lock(mm, old_pud);
458 new_ptl = pud_lockptr(mm, new_pud);
459 if (new_ptl != old_ptl)
460 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
461
462 /* Clear the pud */
463 pud = *old_pud;
464 pud_clear(old_pud);
465
466 VM_BUG_ON(!pud_none(*new_pud));
467
468 /* Set the new pud */
469 /* mark soft_ditry when we add pud level soft dirty support */
470 set_pud_at(mm, pmc->new_addr, new_pud, pud);
471 flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE);
472 if (new_ptl != old_ptl)
473 spin_unlock(new_ptl);
474 spin_unlock(old_ptl);
475
476 return true;
477 }
478 #else
move_huge_pud(struct pagetable_move_control * pmc,pud_t * old_pud,pud_t * new_pud)479 static bool move_huge_pud(struct pagetable_move_control *pmc,
480 pud_t *old_pud, pud_t *new_pud)
481
482 {
483 WARN_ON_ONCE(1);
484 return false;
485
486 }
487 #endif
488
489 enum pgt_entry {
490 NORMAL_PMD,
491 HPAGE_PMD,
492 NORMAL_PUD,
493 HPAGE_PUD,
494 };
495
496 /*
497 * Returns an extent of the corresponding size for the pgt_entry specified if
498 * valid. Else returns a smaller extent bounded by the end of the source and
499 * destination pgt_entry.
500 */
get_extent(enum pgt_entry entry,struct pagetable_move_control * pmc)501 static __always_inline unsigned long get_extent(enum pgt_entry entry,
502 struct pagetable_move_control *pmc)
503 {
504 unsigned long next, extent, mask, size;
505 unsigned long old_addr = pmc->old_addr;
506 unsigned long old_end = pmc->old_end;
507 unsigned long new_addr = pmc->new_addr;
508
509 switch (entry) {
510 case HPAGE_PMD:
511 case NORMAL_PMD:
512 mask = PMD_MASK;
513 size = PMD_SIZE;
514 break;
515 case HPAGE_PUD:
516 case NORMAL_PUD:
517 mask = PUD_MASK;
518 size = PUD_SIZE;
519 break;
520 default:
521 BUILD_BUG();
522 break;
523 }
524
525 next = (old_addr + size) & mask;
526 /* even if next overflowed, extent below will be ok */
527 extent = next - old_addr;
528 if (extent > old_end - old_addr)
529 extent = old_end - old_addr;
530 next = (new_addr + size) & mask;
531 if (extent > next - new_addr)
532 extent = next - new_addr;
533 return extent;
534 }
535
536 /*
537 * Should move_pgt_entry() acquire the rmap locks? This is either expressed in
538 * the PMC, or overridden in the case of normal, larger page tables.
539 */
should_take_rmap_locks(struct pagetable_move_control * pmc,enum pgt_entry entry)540 static bool should_take_rmap_locks(struct pagetable_move_control *pmc,
541 enum pgt_entry entry)
542 {
543 switch (entry) {
544 case NORMAL_PMD:
545 case NORMAL_PUD:
546 return true;
547 default:
548 return pmc->need_rmap_locks;
549 }
550 }
551
552 /*
553 * Attempts to speedup the move by moving entry at the level corresponding to
554 * pgt_entry. Returns true if the move was successful, else false.
555 */
move_pgt_entry(struct pagetable_move_control * pmc,enum pgt_entry entry,void * old_entry,void * new_entry)556 static bool move_pgt_entry(struct pagetable_move_control *pmc,
557 enum pgt_entry entry, void *old_entry, void *new_entry)
558 {
559 bool moved = false;
560 bool need_rmap_locks = should_take_rmap_locks(pmc, entry);
561
562 /* See comment in move_ptes() */
563 if (need_rmap_locks)
564 take_rmap_locks(pmc->old);
565
566 switch (entry) {
567 case NORMAL_PMD:
568 moved = move_normal_pmd(pmc, old_entry, new_entry);
569 break;
570 case NORMAL_PUD:
571 moved = move_normal_pud(pmc, old_entry, new_entry);
572 break;
573 case HPAGE_PMD:
574 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
575 move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry,
576 new_entry);
577 break;
578 case HPAGE_PUD:
579 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
580 move_huge_pud(pmc, old_entry, new_entry);
581 break;
582
583 default:
584 WARN_ON_ONCE(1);
585 break;
586 }
587
588 if (need_rmap_locks)
589 drop_rmap_locks(pmc->old);
590
591 return moved;
592 }
593
594 /*
595 * A helper to check if aligning down is OK. The aligned address should fall
596 * on *no mapping*. For the stack moving down, that's a special move within
597 * the VMA that is created to span the source and destination of the move,
598 * so we make an exception for it.
599 */
can_align_down(struct pagetable_move_control * pmc,struct vm_area_struct * vma,unsigned long addr_to_align,unsigned long mask)600 static bool can_align_down(struct pagetable_move_control *pmc,
601 struct vm_area_struct *vma, unsigned long addr_to_align,
602 unsigned long mask)
603 {
604 unsigned long addr_masked = addr_to_align & mask;
605
606 /*
607 * If @addr_to_align of either source or destination is not the beginning
608 * of the corresponding VMA, we can't align down or we will destroy part
609 * of the current mapping.
610 */
611 if (!pmc->for_stack && vma->vm_start != addr_to_align)
612 return false;
613
614 /* In the stack case we explicitly permit in-VMA alignment. */
615 if (pmc->for_stack && addr_masked >= vma->vm_start)
616 return true;
617
618 /*
619 * Make sure the realignment doesn't cause the address to fall on an
620 * existing mapping.
621 */
622 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
623 }
624
625 /*
626 * Determine if are in fact able to realign for efficiency to a higher page
627 * table boundary.
628 */
can_realign_addr(struct pagetable_move_control * pmc,unsigned long pagetable_mask)629 static bool can_realign_addr(struct pagetable_move_control *pmc,
630 unsigned long pagetable_mask)
631 {
632 unsigned long align_mask = ~pagetable_mask;
633 unsigned long old_align = pmc->old_addr & align_mask;
634 unsigned long new_align = pmc->new_addr & align_mask;
635 unsigned long pagetable_size = align_mask + 1;
636 unsigned long old_align_next = pagetable_size - old_align;
637
638 /*
639 * We don't want to have to go hunting for VMAs from the end of the old
640 * VMA to the next page table boundary, also we want to make sure the
641 * operation is wortwhile.
642 *
643 * So ensure that we only perform this realignment if the end of the
644 * range being copied reaches or crosses the page table boundary.
645 *
646 * boundary boundary
647 * .<- old_align -> .
648 * . |----------------.-----------|
649 * . | vma . |
650 * . |----------------.-----------|
651 * . <----------------.----------->
652 * . len_in
653 * <------------------------------->
654 * . pagetable_size .
655 * . <---------------->
656 * . old_align_next .
657 */
658 if (pmc->len_in < old_align_next)
659 return false;
660
661 /* Skip if the addresses are already aligned. */
662 if (old_align == 0)
663 return false;
664
665 /* Only realign if the new and old addresses are mutually aligned. */
666 if (old_align != new_align)
667 return false;
668
669 /* Ensure realignment doesn't cause overlap with existing mappings. */
670 if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) ||
671 !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask))
672 return false;
673
674 return true;
675 }
676
677 /*
678 * Opportunistically realign to specified boundary for faster copy.
679 *
680 * Consider an mremap() of a VMA with page table boundaries as below, and no
681 * preceding VMAs from the lower page table boundary to the start of the VMA,
682 * with the end of the range reaching or crossing the page table boundary.
683 *
684 * boundary boundary
685 * . |----------------.-----------|
686 * . | vma . |
687 * . |----------------.-----------|
688 * . pmc->old_addr . pmc->old_end
689 * . <---------------------------->
690 * . move these page tables
691 *
692 * If we proceed with moving page tables in this scenario, we will have a lot of
693 * work to do traversing old page tables and establishing new ones in the
694 * destination across multiple lower level page tables.
695 *
696 * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the
697 * page table boundary, so we can simply copy a single page table entry for the
698 * aligned portion of the VMA instead:
699 *
700 * boundary boundary
701 * . |----------------.-----------|
702 * . | vma . |
703 * . |----------------.-----------|
704 * pmc->old_addr . pmc->old_end
705 * <------------------------------------------->
706 * . move these page tables
707 */
try_realign_addr(struct pagetable_move_control * pmc,unsigned long pagetable_mask)708 static void try_realign_addr(struct pagetable_move_control *pmc,
709 unsigned long pagetable_mask)
710 {
711
712 if (!can_realign_addr(pmc, pagetable_mask))
713 return;
714
715 /*
716 * Simply align to page table boundaries. Note that we do NOT update the
717 * pmc->old_end value, and since the move_page_tables() operation spans
718 * from [old_addr, old_end) (offsetting new_addr as it is performed),
719 * this simply changes the start of the copy, not the end.
720 */
721 pmc->old_addr &= pagetable_mask;
722 pmc->new_addr &= pagetable_mask;
723 }
724
725 /* Is the page table move operation done? */
pmc_done(struct pagetable_move_control * pmc)726 static bool pmc_done(struct pagetable_move_control *pmc)
727 {
728 return pmc->old_addr >= pmc->old_end;
729 }
730
731 /* Advance to the next page table, offset by extent bytes. */
pmc_next(struct pagetable_move_control * pmc,unsigned long extent)732 static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent)
733 {
734 pmc->old_addr += extent;
735 pmc->new_addr += extent;
736 }
737
738 /*
739 * Determine how many bytes in the specified input range have had their page
740 * tables moved so far.
741 */
pmc_progress(struct pagetable_move_control * pmc)742 static unsigned long pmc_progress(struct pagetable_move_control *pmc)
743 {
744 unsigned long orig_old_addr = pmc->old_end - pmc->len_in;
745 unsigned long old_addr = pmc->old_addr;
746
747 /*
748 * Prevent negative return values when {old,new}_addr was realigned but
749 * we broke out of the loop in move_page_tables() for the first PMD
750 * itself.
751 */
752 return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr;
753 }
754
move_page_tables(struct pagetable_move_control * pmc)755 unsigned long move_page_tables(struct pagetable_move_control *pmc)
756 {
757 unsigned long extent;
758 struct mmu_notifier_range range;
759 pmd_t *old_pmd, *new_pmd;
760 pud_t *old_pud, *new_pud;
761 struct mm_struct *mm = pmc->old->vm_mm;
762
763 if (!pmc->len_in)
764 return 0;
765
766 if (is_vm_hugetlb_page(pmc->old))
767 return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr,
768 pmc->new_addr, pmc->len_in);
769
770 /*
771 * If possible, realign addresses to PMD boundary for faster copy.
772 * Only realign if the mremap copying hits a PMD boundary.
773 */
774 try_realign_addr(pmc, PMD_MASK);
775
776 flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end);
777 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm,
778 pmc->old_addr, pmc->old_end);
779 mmu_notifier_invalidate_range_start(&range);
780
781 for (; !pmc_done(pmc); pmc_next(pmc, extent)) {
782 cond_resched();
783 /*
784 * If extent is PUD-sized try to speed up the move by moving at the
785 * PUD level if possible.
786 */
787 extent = get_extent(NORMAL_PUD, pmc);
788
789 old_pud = get_old_pud(mm, pmc->old_addr);
790 if (!old_pud)
791 continue;
792 new_pud = alloc_new_pud(mm, pmc->new_addr);
793 if (!new_pud)
794 break;
795 if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
796 if (extent == HPAGE_PUD_SIZE) {
797 move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud);
798 /* We ignore and continue on error? */
799 continue;
800 }
801 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
802 if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud))
803 continue;
804 }
805
806 extent = get_extent(NORMAL_PMD, pmc);
807 old_pmd = get_old_pmd(mm, pmc->old_addr);
808 if (!old_pmd)
809 continue;
810 new_pmd = alloc_new_pmd(mm, pmc->new_addr);
811 if (!new_pmd)
812 break;
813 again:
814 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
815 pmd_devmap(*old_pmd)) {
816 if (extent == HPAGE_PMD_SIZE &&
817 move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd))
818 continue;
819 split_huge_pmd(pmc->old, old_pmd, pmc->old_addr);
820 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
821 extent == PMD_SIZE) {
822 /*
823 * If the extent is PMD-sized, try to speed the move by
824 * moving at the PMD level if possible.
825 */
826 if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd))
827 continue;
828 }
829 if (pmd_none(*old_pmd))
830 continue;
831 if (pte_alloc(pmc->new->vm_mm, new_pmd))
832 break;
833 if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0)
834 goto again;
835 }
836
837 mmu_notifier_invalidate_range_end(&range);
838
839 return pmc_progress(pmc);
840 }
841
842 /* Set vrm->delta to the difference in VMA size specified by user. */
vrm_set_delta(struct vma_remap_struct * vrm)843 static void vrm_set_delta(struct vma_remap_struct *vrm)
844 {
845 vrm->delta = abs_diff(vrm->old_len, vrm->new_len);
846 }
847
848 /* Determine what kind of remap this is - shrink, expand or no resize at all. */
vrm_remap_type(struct vma_remap_struct * vrm)849 static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm)
850 {
851 if (vrm->delta == 0)
852 return MREMAP_NO_RESIZE;
853
854 if (vrm->old_len > vrm->new_len)
855 return MREMAP_SHRINK;
856
857 return MREMAP_EXPAND;
858 }
859
860 /*
861 * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs
862 * overlapping?
863 */
vrm_overlaps(struct vma_remap_struct * vrm)864 static bool vrm_overlaps(struct vma_remap_struct *vrm)
865 {
866 unsigned long start_old = vrm->addr;
867 unsigned long start_new = vrm->new_addr;
868 unsigned long end_old = vrm->addr + vrm->old_len;
869 unsigned long end_new = vrm->new_addr + vrm->new_len;
870
871 /*
872 * start_old end_old
873 * |-----------|
874 * | |
875 * |-----------|
876 * |-------------|
877 * | |
878 * |-------------|
879 * start_new end_new
880 */
881 if (end_old > start_new && end_new > start_old)
882 return true;
883
884 return false;
885 }
886
887 /* Do the mremap() flags require that the new_addr parameter be specified? */
vrm_implies_new_addr(struct vma_remap_struct * vrm)888 static bool vrm_implies_new_addr(struct vma_remap_struct *vrm)
889 {
890 return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP);
891 }
892
893 /*
894 * Find an unmapped area for the requested vrm->new_addr.
895 *
896 * If MREMAP_FIXED then this is equivalent to a MAP_FIXED mmap() call. If only
897 * MREMAP_DONTUNMAP is set, then this is equivalent to providing a hint to
898 * mmap(), otherwise this is equivalent to mmap() specifying a NULL address.
899 *
900 * Returns 0 on success (with vrm->new_addr updated), or an error code upon
901 * failure.
902 */
vrm_set_new_addr(struct vma_remap_struct * vrm)903 static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm)
904 {
905 struct vm_area_struct *vma = vrm->vma;
906 unsigned long map_flags = 0;
907 /* Page Offset _into_ the VMA. */
908 pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT;
909 pgoff_t pgoff = vma->vm_pgoff + internal_pgoff;
910 unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0;
911 unsigned long res;
912
913 if (vrm->flags & MREMAP_FIXED)
914 map_flags |= MAP_FIXED;
915 if (vma->vm_flags & VM_MAYSHARE)
916 map_flags |= MAP_SHARED;
917
918 res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff,
919 map_flags);
920 if (IS_ERR_VALUE(res))
921 return res;
922
923 vrm->new_addr = res;
924 return 0;
925 }
926
927 /*
928 * Keep track of pages which have been added to the memory mapping. If the VMA
929 * is accounted, also check to see if there is sufficient memory.
930 *
931 * Returns true on success, false if insufficient memory to charge.
932 */
vrm_charge(struct vma_remap_struct * vrm)933 static bool vrm_charge(struct vma_remap_struct *vrm)
934 {
935 unsigned long charged;
936
937 if (!(vrm->vma->vm_flags & VM_ACCOUNT))
938 return true;
939
940 /*
941 * If we don't unmap the old mapping, then we account the entirety of
942 * the length of the new one. Otherwise it's just the delta in size.
943 */
944 if (vrm->flags & MREMAP_DONTUNMAP)
945 charged = vrm->new_len >> PAGE_SHIFT;
946 else
947 charged = vrm->delta >> PAGE_SHIFT;
948
949
950 /* This accounts 'charged' pages of memory. */
951 if (security_vm_enough_memory_mm(current->mm, charged))
952 return false;
953
954 vrm->charged = charged;
955 return true;
956 }
957
958 /*
959 * an error has occurred so we will not be using vrm->charged memory. Unaccount
960 * this memory if the VMA is accounted.
961 */
vrm_uncharge(struct vma_remap_struct * vrm)962 static void vrm_uncharge(struct vma_remap_struct *vrm)
963 {
964 if (!(vrm->vma->vm_flags & VM_ACCOUNT))
965 return;
966
967 vm_unacct_memory(vrm->charged);
968 vrm->charged = 0;
969 }
970
971 /*
972 * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to
973 * account for 'bytes' memory used, and if locked, indicate this in the VRM so
974 * we can handle this correctly later.
975 */
vrm_stat_account(struct vma_remap_struct * vrm,unsigned long bytes)976 static void vrm_stat_account(struct vma_remap_struct *vrm,
977 unsigned long bytes)
978 {
979 unsigned long pages = bytes >> PAGE_SHIFT;
980 struct mm_struct *mm = current->mm;
981 struct vm_area_struct *vma = vrm->vma;
982
983 vm_stat_account(mm, vma->vm_flags, pages);
984 if (vma->vm_flags & VM_LOCKED) {
985 mm->locked_vm += pages;
986 vrm->mlocked = true;
987 }
988 }
989
990 /*
991 * Perform checks before attempting to write a VMA prior to it being
992 * moved.
993 */
prep_move_vma(struct vma_remap_struct * vrm)994 static unsigned long prep_move_vma(struct vma_remap_struct *vrm)
995 {
996 unsigned long err = 0;
997 struct vm_area_struct *vma = vrm->vma;
998 unsigned long old_addr = vrm->addr;
999 unsigned long old_len = vrm->old_len;
1000 unsigned long dummy = vma->vm_flags;
1001
1002 /*
1003 * We'd prefer to avoid failure later on in do_munmap:
1004 * which may split one vma into three before unmapping.
1005 */
1006 if (current->mm->map_count >= sysctl_max_map_count - 3)
1007 return -ENOMEM;
1008
1009 if (vma->vm_ops && vma->vm_ops->may_split) {
1010 if (vma->vm_start != old_addr)
1011 err = vma->vm_ops->may_split(vma, old_addr);
1012 if (!err && vma->vm_end != old_addr + old_len)
1013 err = vma->vm_ops->may_split(vma, old_addr + old_len);
1014 if (err)
1015 return err;
1016 }
1017
1018 /*
1019 * Advise KSM to break any KSM pages in the area to be moved:
1020 * it would be confusing if they were to turn up at the new
1021 * location, where they happen to coincide with different KSM
1022 * pages recently unmapped. But leave vma->vm_flags as it was,
1023 * so KSM can come around to merge on vma and new_vma afterwards.
1024 */
1025 err = ksm_madvise(vma, old_addr, old_addr + old_len,
1026 MADV_UNMERGEABLE, &dummy);
1027 if (err)
1028 return err;
1029
1030 return 0;
1031 }
1032
1033 /*
1034 * Unmap source VMA for VMA move, turning it from a copy to a move, being
1035 * careful to ensure we do not underflow memory account while doing so if an
1036 * accountable move.
1037 *
1038 * This is best effort, if we fail to unmap then we simply try to correct
1039 * accounting and exit.
1040 */
unmap_source_vma(struct vma_remap_struct * vrm)1041 static void unmap_source_vma(struct vma_remap_struct *vrm)
1042 {
1043 struct mm_struct *mm = current->mm;
1044 unsigned long addr = vrm->addr;
1045 unsigned long len = vrm->old_len;
1046 struct vm_area_struct *vma = vrm->vma;
1047 VMA_ITERATOR(vmi, mm, addr);
1048 int err;
1049 unsigned long vm_start;
1050 unsigned long vm_end;
1051 /*
1052 * It might seem odd that we check for MREMAP_DONTUNMAP here, given this
1053 * function implies that we unmap the original VMA, which seems
1054 * contradictory.
1055 *
1056 * However, this occurs when this operation was attempted and an error
1057 * arose, in which case we _do_ wish to unmap the _new_ VMA, which means
1058 * we actually _do_ want it be unaccounted.
1059 */
1060 bool accountable_move = (vma->vm_flags & VM_ACCOUNT) &&
1061 !(vrm->flags & MREMAP_DONTUNMAP);
1062
1063 /*
1064 * So we perform a trick here to prevent incorrect accounting. Any merge
1065 * or new VMA allocation performed in copy_vma() does not adjust
1066 * accounting, it is expected that callers handle this.
1067 *
1068 * And indeed we already have, accounting appropriately in the case of
1069 * both in vrm_charge().
1070 *
1071 * However, when we unmap the existing VMA (to effect the move), this
1072 * code will, if the VMA has VM_ACCOUNT set, attempt to unaccount
1073 * removed pages.
1074 *
1075 * To avoid this we temporarily clear this flag, reinstating on any
1076 * portions of the original VMA that remain.
1077 */
1078 if (accountable_move) {
1079 vm_flags_clear(vma, VM_ACCOUNT);
1080 /* We are about to split vma, so store the start/end. */
1081 vm_start = vma->vm_start;
1082 vm_end = vma->vm_end;
1083 }
1084
1085 err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false);
1086 vrm->vma = NULL; /* Invalidated. */
1087 if (err) {
1088 /* OOM: unable to split vma, just get accounts right */
1089 vm_acct_memory(len >> PAGE_SHIFT);
1090 return;
1091 }
1092
1093 /*
1094 * If we mremap() from a VMA like this:
1095 *
1096 * addr end
1097 * | |
1098 * v v
1099 * |-------------|
1100 * | |
1101 * |-------------|
1102 *
1103 * Having cleared VM_ACCOUNT from the whole VMA, after we unmap above
1104 * we'll end up with:
1105 *
1106 * addr end
1107 * | |
1108 * v v
1109 * |---| |---|
1110 * | A | | B |
1111 * |---| |---|
1112 *
1113 * The VMI is still pointing at addr, so vma_prev() will give us A, and
1114 * a subsequent or lone vma_next() will give as B.
1115 *
1116 * do_vmi_munmap() will have restored the VMI back to addr.
1117 */
1118 if (accountable_move) {
1119 unsigned long end = addr + len;
1120
1121 if (vm_start < addr) {
1122 struct vm_area_struct *prev = vma_prev(&vmi);
1123
1124 vm_flags_set(prev, VM_ACCOUNT); /* Acquires VMA lock. */
1125 }
1126
1127 if (vm_end > end) {
1128 struct vm_area_struct *next = vma_next(&vmi);
1129
1130 vm_flags_set(next, VM_ACCOUNT); /* Acquires VMA lock. */
1131 }
1132 }
1133 }
1134
1135 /*
1136 * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the
1137 * process. Additionally handle an error occurring on moving of page tables,
1138 * where we reset vrm state to cause unmapping of the new VMA.
1139 *
1140 * Outputs the newly installed VMA to new_vma_ptr. Returns 0 on success or an
1141 * error code.
1142 */
copy_vma_and_data(struct vma_remap_struct * vrm,struct vm_area_struct ** new_vma_ptr)1143 static int copy_vma_and_data(struct vma_remap_struct *vrm,
1144 struct vm_area_struct **new_vma_ptr)
1145 {
1146 unsigned long internal_offset = vrm->addr - vrm->vma->vm_start;
1147 unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT;
1148 unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff;
1149 unsigned long moved_len;
1150 struct vm_area_struct *vma = vrm->vma;
1151 struct vm_area_struct *new_vma;
1152 int err = 0;
1153 PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len);
1154
1155 new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff,
1156 &pmc.need_rmap_locks);
1157 if (!new_vma) {
1158 vrm_uncharge(vrm);
1159 *new_vma_ptr = NULL;
1160 return -ENOMEM;
1161 }
1162 vrm->vma = vma;
1163 pmc.old = vma;
1164 pmc.new = new_vma;
1165
1166 moved_len = move_page_tables(&pmc);
1167 if (moved_len < vrm->old_len)
1168 err = -ENOMEM;
1169 else if (vma->vm_ops && vma->vm_ops->mremap)
1170 err = vma->vm_ops->mremap(new_vma);
1171
1172 if (unlikely(err)) {
1173 PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr,
1174 vrm->addr, moved_len);
1175
1176 /*
1177 * On error, move entries back from new area to old,
1178 * which will succeed since page tables still there,
1179 * and then proceed to unmap new area instead of old.
1180 */
1181 pmc_revert.need_rmap_locks = true;
1182 move_page_tables(&pmc_revert);
1183
1184 vrm->vma = new_vma;
1185 vrm->old_len = vrm->new_len;
1186 vrm->addr = vrm->new_addr;
1187 } else {
1188 mremap_userfaultfd_prep(new_vma, vrm->uf);
1189 }
1190
1191 fixup_hugetlb_reservations(vma);
1192
1193 /* Tell pfnmap has moved from this vma */
1194 if (unlikely(vma->vm_flags & VM_PFNMAP))
1195 untrack_pfn_clear(vma);
1196
1197 *new_vma_ptr = new_vma;
1198 return err;
1199 }
1200
1201 /*
1202 * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and
1203 * account flags on remaining VMA by convention (it cannot be mlock()'d any
1204 * longer, as pages in range are no longer mapped), and removing anon_vma_chain
1205 * links from it (if the entire VMA was copied over).
1206 */
dontunmap_complete(struct vma_remap_struct * vrm,struct vm_area_struct * new_vma)1207 static void dontunmap_complete(struct vma_remap_struct *vrm,
1208 struct vm_area_struct *new_vma)
1209 {
1210 unsigned long start = vrm->addr;
1211 unsigned long end = vrm->addr + vrm->old_len;
1212 unsigned long old_start = vrm->vma->vm_start;
1213 unsigned long old_end = vrm->vma->vm_end;
1214
1215 /*
1216 * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old
1217 * vma.
1218 */
1219 vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT);
1220
1221 /*
1222 * anon_vma links of the old vma is no longer needed after its page
1223 * table has been moved.
1224 */
1225 if (new_vma != vrm->vma && start == old_start && end == old_end)
1226 unlink_anon_vmas(vrm->vma);
1227
1228 /* Because we won't unmap we don't need to touch locked_vm. */
1229 }
1230
move_vma(struct vma_remap_struct * vrm)1231 static unsigned long move_vma(struct vma_remap_struct *vrm)
1232 {
1233 struct mm_struct *mm = current->mm;
1234 struct vm_area_struct *new_vma;
1235 unsigned long hiwater_vm;
1236 int err;
1237
1238 err = prep_move_vma(vrm);
1239 if (err)
1240 return err;
1241
1242 /* If accounted, charge the number of bytes the operation will use. */
1243 if (!vrm_charge(vrm))
1244 return -ENOMEM;
1245
1246 /* We don't want racing faults. */
1247 vma_start_write(vrm->vma);
1248
1249 /* Perform copy step. */
1250 err = copy_vma_and_data(vrm, &new_vma);
1251 /*
1252 * If we established the copied-to VMA, we attempt to recover from the
1253 * error by setting the destination VMA to the source VMA and unmapping
1254 * it below.
1255 */
1256 if (err && !new_vma)
1257 return err;
1258
1259 /*
1260 * If we failed to move page tables we still do total_vm increment
1261 * since do_munmap() will decrement it by old_len == new_len.
1262 *
1263 * Since total_vm is about to be raised artificially high for a
1264 * moment, we need to restore high watermark afterwards: if stats
1265 * are taken meanwhile, total_vm and hiwater_vm appear too high.
1266 * If this were a serious issue, we'd add a flag to do_munmap().
1267 */
1268 hiwater_vm = mm->hiwater_vm;
1269
1270 vrm_stat_account(vrm, vrm->new_len);
1271 if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP)))
1272 dontunmap_complete(vrm, new_vma);
1273 else
1274 unmap_source_vma(vrm);
1275
1276 mm->hiwater_vm = hiwater_vm;
1277
1278 return err ? (unsigned long)err : vrm->new_addr;
1279 }
1280
1281 /*
1282 * resize_is_valid() - Ensure the vma can be resized to the new length at the give
1283 * address.
1284 *
1285 * Return 0 on success, error otherwise.
1286 */
resize_is_valid(struct vma_remap_struct * vrm)1287 static int resize_is_valid(struct vma_remap_struct *vrm)
1288 {
1289 struct mm_struct *mm = current->mm;
1290 struct vm_area_struct *vma = vrm->vma;
1291 unsigned long addr = vrm->addr;
1292 unsigned long old_len = vrm->old_len;
1293 unsigned long new_len = vrm->new_len;
1294 unsigned long pgoff;
1295
1296 /*
1297 * !old_len is a special case where an attempt is made to 'duplicate'
1298 * a mapping. This makes no sense for private mappings as it will
1299 * instead create a fresh/new mapping unrelated to the original. This
1300 * is contrary to the basic idea of mremap which creates new mappings
1301 * based on the original. There are no known use cases for this
1302 * behavior. As a result, fail such attempts.
1303 */
1304 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
1305 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n",
1306 current->comm, current->pid);
1307 return -EINVAL;
1308 }
1309
1310 if ((vrm->flags & MREMAP_DONTUNMAP) &&
1311 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
1312 return -EINVAL;
1313
1314 /* We can't remap across vm area boundaries */
1315 if (old_len > vma->vm_end - addr)
1316 return -EFAULT;
1317
1318 if (new_len == old_len)
1319 return 0;
1320
1321 /* Need to be careful about a growing mapping */
1322 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
1323 pgoff += vma->vm_pgoff;
1324 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
1325 return -EINVAL;
1326
1327 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
1328 return -EFAULT;
1329
1330 if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta))
1331 return -EAGAIN;
1332
1333 if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT))
1334 return -ENOMEM;
1335
1336 return 0;
1337 }
1338
1339 /*
1340 * The user has requested that the VMA be shrunk (i.e., old_len > new_len), so
1341 * execute this, optionally dropping the mmap lock when we do so.
1342 *
1343 * In both cases this invalidates the VMA, however if we don't drop the lock,
1344 * then load the correct VMA into vrm->vma afterwards.
1345 */
shrink_vma(struct vma_remap_struct * vrm,bool drop_lock)1346 static unsigned long shrink_vma(struct vma_remap_struct *vrm,
1347 bool drop_lock)
1348 {
1349 struct mm_struct *mm = current->mm;
1350 unsigned long unmap_start = vrm->addr + vrm->new_len;
1351 unsigned long unmap_bytes = vrm->delta;
1352 unsigned long res;
1353 VMA_ITERATOR(vmi, mm, unmap_start);
1354
1355 VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK);
1356
1357 res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes,
1358 vrm->uf_unmap, drop_lock);
1359 vrm->vma = NULL; /* Invalidated. */
1360 if (res)
1361 return res;
1362
1363 /*
1364 * If we've not dropped the lock, then we should reload the VMA to
1365 * replace the invalidated VMA with the one that may have now been
1366 * split.
1367 */
1368 if (drop_lock) {
1369 vrm->mmap_locked = false;
1370 } else {
1371 vrm->vma = vma_lookup(mm, vrm->addr);
1372 if (!vrm->vma)
1373 return -EFAULT;
1374 }
1375
1376 return 0;
1377 }
1378
1379 /*
1380 * mremap_to() - remap a vma to a new location.
1381 * Returns: The new address of the vma or an error.
1382 */
mremap_to(struct vma_remap_struct * vrm)1383 static unsigned long mremap_to(struct vma_remap_struct *vrm)
1384 {
1385 struct mm_struct *mm = current->mm;
1386 unsigned long err;
1387
1388 /* Is the new length or address silly? */
1389 if (vrm->new_len > TASK_SIZE ||
1390 vrm->new_addr > TASK_SIZE - vrm->new_len)
1391 return -EINVAL;
1392
1393 if (vrm_overlaps(vrm))
1394 return -EINVAL;
1395
1396 if (vrm->flags & MREMAP_FIXED) {
1397 /*
1398 * In mremap_to().
1399 * VMA is moved to dst address, and munmap dst first.
1400 * do_munmap will check if dst is sealed.
1401 */
1402 err = do_munmap(mm, vrm->new_addr, vrm->new_len,
1403 vrm->uf_unmap_early);
1404 vrm->vma = NULL; /* Invalidated. */
1405 if (err)
1406 return err;
1407
1408 /*
1409 * If we remap a portion of a VMA elsewhere in the same VMA,
1410 * this can invalidate the old VMA. Reset.
1411 */
1412 vrm->vma = vma_lookup(mm, vrm->addr);
1413 if (!vrm->vma)
1414 return -EFAULT;
1415 }
1416
1417 if (vrm->remap_type == MREMAP_SHRINK) {
1418 err = shrink_vma(vrm, /* drop_lock= */false);
1419 if (err)
1420 return err;
1421
1422 /* Set up for the move now shrink has been executed. */
1423 vrm->old_len = vrm->new_len;
1424 }
1425
1426 err = resize_is_valid(vrm);
1427 if (err)
1428 return err;
1429
1430 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
1431 if (vrm->flags & MREMAP_DONTUNMAP) {
1432 vm_flags_t vm_flags = vrm->vma->vm_flags;
1433 unsigned long pages = vrm->old_len >> PAGE_SHIFT;
1434
1435 if (!may_expand_vm(mm, vm_flags, pages))
1436 return -ENOMEM;
1437 }
1438
1439 err = vrm_set_new_addr(vrm);
1440 if (err)
1441 return err;
1442
1443 return move_vma(vrm);
1444 }
1445
vma_expandable(struct vm_area_struct * vma,unsigned long delta)1446 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
1447 {
1448 unsigned long end = vma->vm_end + delta;
1449
1450 if (end < vma->vm_end) /* overflow */
1451 return 0;
1452 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
1453 return 0;
1454 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
1455 0, MAP_FIXED) & ~PAGE_MASK)
1456 return 0;
1457 return 1;
1458 }
1459
1460 /* Determine whether we are actually able to execute an in-place expansion. */
vrm_can_expand_in_place(struct vma_remap_struct * vrm)1461 static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm)
1462 {
1463 /* Number of bytes from vrm->addr to end of VMA. */
1464 unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr;
1465
1466 /* If end of range aligns to end of VMA, we can just expand in-place. */
1467 if (suffix_bytes != vrm->old_len)
1468 return false;
1469
1470 /* Check whether this is feasible. */
1471 if (!vma_expandable(vrm->vma, vrm->delta))
1472 return false;
1473
1474 return true;
1475 }
1476
1477 /*
1478 * Are the parameters passed to mremap() valid? If so return 0, otherwise return
1479 * error.
1480 */
check_mremap_params(struct vma_remap_struct * vrm)1481 static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
1482
1483 {
1484 unsigned long addr = vrm->addr;
1485 unsigned long flags = vrm->flags;
1486
1487 /* Ensure no unexpected flag values. */
1488 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
1489 return -EINVAL;
1490
1491 /* Start address must be page-aligned. */
1492 if (offset_in_page(addr))
1493 return -EINVAL;
1494
1495 /*
1496 * We allow a zero old-len as a special case
1497 * for DOS-emu "duplicate shm area" thing. But
1498 * a zero new-len is nonsensical.
1499 */
1500 if (!PAGE_ALIGN(vrm->new_len))
1501 return -EINVAL;
1502
1503 /* Remainder of checks are for cases with specific new_addr. */
1504 if (!vrm_implies_new_addr(vrm))
1505 return 0;
1506
1507 /* The new address must be page-aligned. */
1508 if (offset_in_page(vrm->new_addr))
1509 return -EINVAL;
1510
1511 /* A fixed address implies a move. */
1512 if (!(flags & MREMAP_MAYMOVE))
1513 return -EINVAL;
1514
1515 /* MREMAP_DONTUNMAP does not allow resizing in the process. */
1516 if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len)
1517 return -EINVAL;
1518
1519 /*
1520 * move_vma() need us to stay 4 maps below the threshold, otherwise
1521 * it will bail out at the very beginning.
1522 * That is a problem if we have already unmaped the regions here
1523 * (new_addr, and old_addr), because userspace will not know the
1524 * state of the vma's after it gets -ENOMEM.
1525 * So, to avoid such scenario we can pre-compute if the whole
1526 * operation has high chances to success map-wise.
1527 * Worst-scenario case is when both vma's (new_addr and old_addr) get
1528 * split in 3 before unmapping it.
1529 * That means 2 more maps (1 for each) to the ones we already hold.
1530 * Check whether current map count plus 2 still leads us to 4 maps below
1531 * the threshold, otherwise return -ENOMEM here to be more safe.
1532 */
1533 if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3)
1534 return -ENOMEM;
1535
1536 return 0;
1537 }
1538
1539 /*
1540 * We know we can expand the VMA in-place by delta pages, so do so.
1541 *
1542 * If we discover the VMA is locked, update mm_struct statistics accordingly and
1543 * indicate so to the caller.
1544 */
expand_vma_in_place(struct vma_remap_struct * vrm)1545 static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm)
1546 {
1547 struct mm_struct *mm = current->mm;
1548 struct vm_area_struct *vma = vrm->vma;
1549 VMA_ITERATOR(vmi, mm, vma->vm_end);
1550
1551 if (!vrm_charge(vrm))
1552 return -ENOMEM;
1553
1554 /*
1555 * Function vma_merge_extend() is called on the
1556 * extension we are adding to the already existing vma,
1557 * vma_merge_extend() will merge this extension with the
1558 * already existing vma (expand operation itself) and
1559 * possibly also with the next vma if it becomes
1560 * adjacent to the expanded vma and otherwise
1561 * compatible.
1562 */
1563 vma = vma_merge_extend(&vmi, vma, vrm->delta);
1564 if (!vma) {
1565 vrm_uncharge(vrm);
1566 return -ENOMEM;
1567 }
1568 vrm->vma = vma;
1569
1570 vrm_stat_account(vrm, vrm->delta);
1571
1572 return 0;
1573 }
1574
align_hugetlb(struct vma_remap_struct * vrm)1575 static bool align_hugetlb(struct vma_remap_struct *vrm)
1576 {
1577 struct hstate *h __maybe_unused = hstate_vma(vrm->vma);
1578
1579 vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h));
1580 vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h));
1581
1582 /* addrs must be huge page aligned */
1583 if (vrm->addr & ~huge_page_mask(h))
1584 return false;
1585 if (vrm->new_addr & ~huge_page_mask(h))
1586 return false;
1587
1588 /*
1589 * Don't allow remap expansion, because the underlying hugetlb
1590 * reservation is not yet capable to handle split reservation.
1591 */
1592 if (vrm->new_len > vrm->old_len)
1593 return false;
1594
1595 vrm_set_delta(vrm);
1596
1597 return true;
1598 }
1599
1600 /*
1601 * We are mremap()'ing without specifying a fixed address to move to, but are
1602 * requesting that the VMA's size be increased.
1603 *
1604 * Try to do so in-place, if this fails, then move the VMA to a new location to
1605 * action the change.
1606 */
expand_vma(struct vma_remap_struct * vrm)1607 static unsigned long expand_vma(struct vma_remap_struct *vrm)
1608 {
1609 unsigned long err;
1610 unsigned long addr = vrm->addr;
1611
1612 err = resize_is_valid(vrm);
1613 if (err)
1614 return err;
1615
1616 /*
1617 * [addr, old_len) spans precisely to the end of the VMA, so try to
1618 * expand it in-place.
1619 */
1620 if (vrm_can_expand_in_place(vrm)) {
1621 err = expand_vma_in_place(vrm);
1622 if (err)
1623 return err;
1624
1625 /*
1626 * We want to populate the newly expanded portion of the VMA to
1627 * satisfy the expectation that mlock()'ing a VMA maintains all
1628 * of its pages in memory.
1629 */
1630 if (vrm->mlocked)
1631 vrm->new_addr = addr;
1632
1633 /* OK we're done! */
1634 return addr;
1635 }
1636
1637 /*
1638 * We weren't able to just expand or shrink the area,
1639 * we need to create a new one and move it.
1640 */
1641
1642 /* We're not allowed to move the VMA, so error out. */
1643 if (!(vrm->flags & MREMAP_MAYMOVE))
1644 return -ENOMEM;
1645
1646 /* Find a new location to move the VMA to. */
1647 err = vrm_set_new_addr(vrm);
1648 if (err)
1649 return err;
1650
1651 return move_vma(vrm);
1652 }
1653
1654 /*
1655 * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the
1656 * first available address to perform the operation.
1657 */
mremap_at(struct vma_remap_struct * vrm)1658 static unsigned long mremap_at(struct vma_remap_struct *vrm)
1659 {
1660 unsigned long res;
1661
1662 switch (vrm->remap_type) {
1663 case MREMAP_INVALID:
1664 break;
1665 case MREMAP_NO_RESIZE:
1666 /* NO-OP CASE - resizing to the same size. */
1667 return vrm->addr;
1668 case MREMAP_SHRINK:
1669 /*
1670 * SHRINK CASE. Can always be done in-place.
1671 *
1672 * Simply unmap the shrunken portion of the VMA. This does all
1673 * the needed commit accounting, and we indicate that the mmap
1674 * lock should be dropped.
1675 */
1676 res = shrink_vma(vrm, /* drop_lock= */true);
1677 if (res)
1678 return res;
1679
1680 return vrm->addr;
1681 case MREMAP_EXPAND:
1682 return expand_vma(vrm);
1683 }
1684
1685 BUG();
1686 }
1687
do_mremap(struct vma_remap_struct * vrm)1688 static unsigned long do_mremap(struct vma_remap_struct *vrm)
1689 {
1690 struct mm_struct *mm = current->mm;
1691 struct vm_area_struct *vma;
1692 unsigned long ret;
1693
1694 ret = check_mremap_params(vrm);
1695 if (ret)
1696 return ret;
1697
1698 vrm->old_len = PAGE_ALIGN(vrm->old_len);
1699 vrm->new_len = PAGE_ALIGN(vrm->new_len);
1700 vrm_set_delta(vrm);
1701
1702 if (mmap_write_lock_killable(mm))
1703 return -EINTR;
1704 vrm->mmap_locked = true;
1705
1706 vma = vrm->vma = vma_lookup(mm, vrm->addr);
1707 if (!vma) {
1708 ret = -EFAULT;
1709 goto out;
1710 }
1711
1712 /* If mseal()'d, mremap() is prohibited. */
1713 if (!can_modify_vma(vma)) {
1714 ret = -EPERM;
1715 goto out;
1716 }
1717
1718 /* Align to hugetlb page size, if required. */
1719 if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) {
1720 ret = -EINVAL;
1721 goto out;
1722 }
1723
1724 vrm->remap_type = vrm_remap_type(vrm);
1725
1726 /* Actually execute mremap. */
1727 ret = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);
1728
1729 out:
1730 if (vrm->mmap_locked) {
1731 mmap_write_unlock(mm);
1732 vrm->mmap_locked = false;
1733
1734 if (!offset_in_page(ret) && vrm->mlocked && vrm->new_len > vrm->old_len)
1735 mm_populate(vrm->new_addr + vrm->old_len, vrm->delta);
1736 }
1737
1738 userfaultfd_unmap_complete(mm, vrm->uf_unmap_early);
1739 mremap_userfaultfd_complete(vrm->uf, vrm->addr, ret, vrm->old_len);
1740 userfaultfd_unmap_complete(mm, vrm->uf_unmap);
1741
1742 return ret;
1743 }
1744
1745 /*
1746 * Expand (or shrink) an existing mapping, potentially moving it at the
1747 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1748 *
1749 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
1750 * This option implies MREMAP_MAYMOVE.
1751 */
SYSCALL_DEFINE5(mremap,unsigned long,addr,unsigned long,old_len,unsigned long,new_len,unsigned long,flags,unsigned long,new_addr)1752 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1753 unsigned long, new_len, unsigned long, flags,
1754 unsigned long, new_addr)
1755 {
1756 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
1757 LIST_HEAD(uf_unmap_early);
1758 LIST_HEAD(uf_unmap);
1759 /*
1760 * There is a deliberate asymmetry here: we strip the pointer tag
1761 * from the old address but leave the new address alone. This is
1762 * for consistency with mmap(), where we prevent the creation of
1763 * aliasing mappings in userspace by leaving the tag bits of the
1764 * mapping address intact. A non-zero tag will cause the subsequent
1765 * range checks to reject the address as invalid.
1766 *
1767 * See Documentation/arch/arm64/tagged-address-abi.rst for more
1768 * information.
1769 */
1770 struct vma_remap_struct vrm = {
1771 .addr = untagged_addr(addr),
1772 .old_len = old_len,
1773 .new_len = new_len,
1774 .flags = flags,
1775 .new_addr = new_addr,
1776
1777 .uf = &uf,
1778 .uf_unmap_early = &uf_unmap_early,
1779 .uf_unmap = &uf_unmap,
1780
1781 .remap_type = MREMAP_INVALID, /* We set later. */
1782 };
1783
1784 return do_mremap(&vrm);
1785 }
1786