1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6
7 #include "generated/bit-length.h"
8
9 #include "maple-shared.h"
10 #include "vma_internal.h"
11
12 /* Include so header guard set. */
13 #include "../../../mm/vma.h"
14
15 static bool fail_prealloc;
16
17 /* Then override vma_iter_prealloc() so we can choose to fail it. */
18 #define vma_iter_prealloc(vmi, vma) \
19 (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
20
21 #define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536
22
23 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
24 unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
25 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
26
27 /*
28 * Directly import the VMA implementation here. Our vma_internal.h wrapper
29 * provides userland-equivalent functionality for everything vma.c uses.
30 */
31 #include "../../../mm/vma.c"
32
33 const struct vm_operations_struct vma_dummy_vm_ops;
34 static struct anon_vma dummy_anon_vma;
35
36 #define ASSERT_TRUE(_expr) \
37 do { \
38 if (!(_expr)) { \
39 fprintf(stderr, \
40 "Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
41 __FILE__, __LINE__, __FUNCTION__, #_expr); \
42 return false; \
43 } \
44 } while (0)
45 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
46 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
47 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
48
49 static struct task_struct __current;
50
get_current(void)51 struct task_struct *get_current(void)
52 {
53 return &__current;
54 }
55
rlimit(unsigned int limit)56 unsigned long rlimit(unsigned int limit)
57 {
58 return (unsigned long)-1;
59 }
60
61 /* Helper function to simply allocate a VMA. */
alloc_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)62 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
63 unsigned long start,
64 unsigned long end,
65 pgoff_t pgoff,
66 vm_flags_t flags)
67 {
68 struct vm_area_struct *ret = vm_area_alloc(mm);
69
70 if (ret == NULL)
71 return NULL;
72
73 ret->vm_start = start;
74 ret->vm_end = end;
75 ret->vm_pgoff = pgoff;
76 ret->__vm_flags = flags;
77 vma_assert_detached(ret);
78
79 return ret;
80 }
81
82 /* Helper function to allocate a VMA and link it to the tree. */
attach_vma(struct mm_struct * mm,struct vm_area_struct * vma)83 static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma)
84 {
85 int res;
86
87 res = vma_link(mm, vma);
88 if (!res)
89 vma_assert_attached(vma);
90 return res;
91 }
92
93 /* Helper function to allocate a VMA and link it to the tree. */
alloc_and_link_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)94 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
95 unsigned long start,
96 unsigned long end,
97 pgoff_t pgoff,
98 vm_flags_t flags)
99 {
100 struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
101
102 if (vma == NULL)
103 return NULL;
104
105 if (attach_vma(mm, vma)) {
106 vm_area_free(vma);
107 return NULL;
108 }
109
110 /*
111 * Reset this counter which we use to track whether writes have
112 * begun. Linking to the tree will have caused this to be incremented,
113 * which means we will get a false positive otherwise.
114 */
115 vma->vm_lock_seq = UINT_MAX;
116
117 return vma;
118 }
119
120 /* Helper function which provides a wrapper around a merge new VMA operation. */
merge_new(struct vma_merge_struct * vmg)121 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
122 {
123 struct vm_area_struct *vma;
124 /*
125 * For convenience, get prev and next VMAs. Which the new VMA operation
126 * requires.
127 */
128 vmg->next = vma_next(vmg->vmi);
129 vmg->prev = vma_prev(vmg->vmi);
130 vma_iter_next_range(vmg->vmi);
131
132 vma = vma_merge_new_range(vmg);
133 if (vma)
134 vma_assert_attached(vma);
135
136 return vma;
137 }
138
139 /*
140 * Helper function which provides a wrapper around a merge existing VMA
141 * operation.
142 */
merge_existing(struct vma_merge_struct * vmg)143 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
144 {
145 struct vm_area_struct *vma;
146
147 vma = vma_merge_existing_range(vmg);
148 if (vma)
149 vma_assert_attached(vma);
150 return vma;
151 }
152
153 /*
154 * Helper function which provides a wrapper around the expansion of an existing
155 * VMA.
156 */
expand_existing(struct vma_merge_struct * vmg)157 static int expand_existing(struct vma_merge_struct *vmg)
158 {
159 return vma_expand(vmg);
160 }
161
162 /*
163 * Helper function to reset merge state the associated VMA iterator to a
164 * specified new range.
165 */
vmg_set_range(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)166 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
167 unsigned long end, pgoff_t pgoff, vm_flags_t flags)
168 {
169 vma_iter_set(vmg->vmi, start);
170
171 vmg->prev = NULL;
172 vmg->middle = NULL;
173 vmg->next = NULL;
174 vmg->target = NULL;
175
176 vmg->start = start;
177 vmg->end = end;
178 vmg->pgoff = pgoff;
179 vmg->flags = flags;
180
181 vmg->just_expand = false;
182 vmg->__remove_middle = false;
183 vmg->__remove_next = false;
184 vmg->__adjust_middle_start = false;
185 vmg->__adjust_next_start = false;
186 }
187
188 /*
189 * Helper function to try to merge a new VMA.
190 *
191 * Update vmg and the iterator for it and try to merge, otherwise allocate a new
192 * VMA, link it to the maple tree and return it.
193 */
try_merge_new_vma(struct mm_struct * mm,struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags,bool * was_merged)194 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
195 struct vma_merge_struct *vmg,
196 unsigned long start, unsigned long end,
197 pgoff_t pgoff, vm_flags_t flags,
198 bool *was_merged)
199 {
200 struct vm_area_struct *merged;
201
202 vmg_set_range(vmg, start, end, pgoff, flags);
203
204 merged = merge_new(vmg);
205 if (merged) {
206 *was_merged = true;
207 ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
208 return merged;
209 }
210
211 *was_merged = false;
212
213 ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
214
215 return alloc_and_link_vma(mm, start, end, pgoff, flags);
216 }
217
218 /*
219 * Helper function to reset the dummy anon_vma to indicate it has not been
220 * duplicated.
221 */
reset_dummy_anon_vma(void)222 static void reset_dummy_anon_vma(void)
223 {
224 dummy_anon_vma.was_cloned = false;
225 dummy_anon_vma.was_unlinked = false;
226 }
227
228 /*
229 * Helper function to remove all VMAs and destroy the maple tree associated with
230 * a virtual address space. Returns a count of VMAs in the tree.
231 */
cleanup_mm(struct mm_struct * mm,struct vma_iterator * vmi)232 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
233 {
234 struct vm_area_struct *vma;
235 int count = 0;
236
237 fail_prealloc = false;
238 reset_dummy_anon_vma();
239
240 vma_iter_set(vmi, 0);
241 for_each_vma(*vmi, vma) {
242 vm_area_free(vma);
243 count++;
244 }
245
246 mtree_destroy(&mm->mm_mt);
247 mm->map_count = 0;
248 return count;
249 }
250
251 /* Helper function to determine if VMA has had vma_start_write() performed. */
vma_write_started(struct vm_area_struct * vma)252 static bool vma_write_started(struct vm_area_struct *vma)
253 {
254 int seq = vma->vm_lock_seq;
255
256 /* We reset after each check. */
257 vma->vm_lock_seq = UINT_MAX;
258
259 /* The vma_start_write() stub simply increments this value. */
260 return seq > -1;
261 }
262
263 /* Helper function providing a dummy vm_ops->close() method.*/
dummy_close(struct vm_area_struct *)264 static void dummy_close(struct vm_area_struct *)
265 {
266 }
267
test_simple_merge(void)268 static bool test_simple_merge(void)
269 {
270 struct vm_area_struct *vma;
271 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
272 struct mm_struct mm = {};
273 struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
274 struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
275 VMA_ITERATOR(vmi, &mm, 0x1000);
276 struct vma_merge_struct vmg = {
277 .mm = &mm,
278 .vmi = &vmi,
279 .start = 0x1000,
280 .end = 0x2000,
281 .flags = flags,
282 .pgoff = 1,
283 };
284
285 ASSERT_FALSE(attach_vma(&mm, vma_left));
286 ASSERT_FALSE(attach_vma(&mm, vma_right));
287
288 vma = merge_new(&vmg);
289 ASSERT_NE(vma, NULL);
290
291 ASSERT_EQ(vma->vm_start, 0);
292 ASSERT_EQ(vma->vm_end, 0x3000);
293 ASSERT_EQ(vma->vm_pgoff, 0);
294 ASSERT_EQ(vma->vm_flags, flags);
295
296 vm_area_free(vma);
297 mtree_destroy(&mm.mm_mt);
298
299 return true;
300 }
301
test_simple_modify(void)302 static bool test_simple_modify(void)
303 {
304 struct vm_area_struct *vma;
305 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
306 struct mm_struct mm = {};
307 struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
308 VMA_ITERATOR(vmi, &mm, 0x1000);
309
310 ASSERT_FALSE(attach_vma(&mm, init_vma));
311
312 /*
313 * The flags will not be changed, the vma_modify_flags() function
314 * performs the merge/split only.
315 */
316 vma = vma_modify_flags(&vmi, init_vma, init_vma,
317 0x1000, 0x2000, VM_READ | VM_MAYREAD);
318 ASSERT_NE(vma, NULL);
319 /* We modify the provided VMA, and on split allocate new VMAs. */
320 ASSERT_EQ(vma, init_vma);
321
322 ASSERT_EQ(vma->vm_start, 0x1000);
323 ASSERT_EQ(vma->vm_end, 0x2000);
324 ASSERT_EQ(vma->vm_pgoff, 1);
325
326 /*
327 * Now walk through the three split VMAs and make sure they are as
328 * expected.
329 */
330
331 vma_iter_set(&vmi, 0);
332 vma = vma_iter_load(&vmi);
333
334 ASSERT_EQ(vma->vm_start, 0);
335 ASSERT_EQ(vma->vm_end, 0x1000);
336 ASSERT_EQ(vma->vm_pgoff, 0);
337
338 vm_area_free(vma);
339 vma_iter_clear(&vmi);
340
341 vma = vma_next(&vmi);
342
343 ASSERT_EQ(vma->vm_start, 0x1000);
344 ASSERT_EQ(vma->vm_end, 0x2000);
345 ASSERT_EQ(vma->vm_pgoff, 1);
346
347 vm_area_free(vma);
348 vma_iter_clear(&vmi);
349
350 vma = vma_next(&vmi);
351
352 ASSERT_EQ(vma->vm_start, 0x2000);
353 ASSERT_EQ(vma->vm_end, 0x3000);
354 ASSERT_EQ(vma->vm_pgoff, 2);
355
356 vm_area_free(vma);
357 mtree_destroy(&mm.mm_mt);
358
359 return true;
360 }
361
test_simple_expand(void)362 static bool test_simple_expand(void)
363 {
364 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
365 struct mm_struct mm = {};
366 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
367 VMA_ITERATOR(vmi, &mm, 0);
368 struct vma_merge_struct vmg = {
369 .vmi = &vmi,
370 .middle = vma,
371 .start = 0,
372 .end = 0x3000,
373 .pgoff = 0,
374 };
375
376 ASSERT_FALSE(attach_vma(&mm, vma));
377
378 ASSERT_FALSE(expand_existing(&vmg));
379
380 ASSERT_EQ(vma->vm_start, 0);
381 ASSERT_EQ(vma->vm_end, 0x3000);
382 ASSERT_EQ(vma->vm_pgoff, 0);
383
384 vm_area_free(vma);
385 mtree_destroy(&mm.mm_mt);
386
387 return true;
388 }
389
test_simple_shrink(void)390 static bool test_simple_shrink(void)
391 {
392 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
393 struct mm_struct mm = {};
394 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
395 VMA_ITERATOR(vmi, &mm, 0);
396
397 ASSERT_FALSE(attach_vma(&mm, vma));
398
399 ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
400
401 ASSERT_EQ(vma->vm_start, 0);
402 ASSERT_EQ(vma->vm_end, 0x1000);
403 ASSERT_EQ(vma->vm_pgoff, 0);
404
405 vm_area_free(vma);
406 mtree_destroy(&mm.mm_mt);
407
408 return true;
409 }
410
test_merge_new(void)411 static bool test_merge_new(void)
412 {
413 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
414 struct mm_struct mm = {};
415 VMA_ITERATOR(vmi, &mm, 0);
416 struct vma_merge_struct vmg = {
417 .mm = &mm,
418 .vmi = &vmi,
419 };
420 struct anon_vma_chain dummy_anon_vma_chain_a = {
421 .anon_vma = &dummy_anon_vma,
422 };
423 struct anon_vma_chain dummy_anon_vma_chain_b = {
424 .anon_vma = &dummy_anon_vma,
425 };
426 struct anon_vma_chain dummy_anon_vma_chain_c = {
427 .anon_vma = &dummy_anon_vma,
428 };
429 struct anon_vma_chain dummy_anon_vma_chain_d = {
430 .anon_vma = &dummy_anon_vma,
431 };
432 const struct vm_operations_struct vm_ops = {
433 .close = dummy_close,
434 };
435 int count;
436 struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
437 bool merged;
438
439 /*
440 * 0123456789abc
441 * AA B CC
442 */
443 vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
444 ASSERT_NE(vma_a, NULL);
445 /* We give each VMA a single avc so we can test anon_vma duplication. */
446 INIT_LIST_HEAD(&vma_a->anon_vma_chain);
447 list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
448
449 vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
450 ASSERT_NE(vma_b, NULL);
451 INIT_LIST_HEAD(&vma_b->anon_vma_chain);
452 list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
453
454 vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
455 ASSERT_NE(vma_c, NULL);
456 INIT_LIST_HEAD(&vma_c->anon_vma_chain);
457 list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
458
459 /*
460 * NO merge.
461 *
462 * 0123456789abc
463 * AA B ** CC
464 */
465 vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
466 ASSERT_NE(vma_d, NULL);
467 INIT_LIST_HEAD(&vma_d->anon_vma_chain);
468 list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
469 ASSERT_FALSE(merged);
470 ASSERT_EQ(mm.map_count, 4);
471
472 /*
473 * Merge BOTH sides.
474 *
475 * 0123456789abc
476 * AA*B DD CC
477 */
478 vma_a->vm_ops = &vm_ops; /* This should have no impact. */
479 vma_b->anon_vma = &dummy_anon_vma;
480 vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
481 ASSERT_EQ(vma, vma_a);
482 /* Merge with A, delete B. */
483 ASSERT_TRUE(merged);
484 ASSERT_EQ(vma->vm_start, 0);
485 ASSERT_EQ(vma->vm_end, 0x4000);
486 ASSERT_EQ(vma->vm_pgoff, 0);
487 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
488 ASSERT_TRUE(vma_write_started(vma));
489 ASSERT_EQ(mm.map_count, 3);
490
491 /*
492 * Merge to PREVIOUS VMA.
493 *
494 * 0123456789abc
495 * AAAA* DD CC
496 */
497 vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
498 ASSERT_EQ(vma, vma_a);
499 /* Extend A. */
500 ASSERT_TRUE(merged);
501 ASSERT_EQ(vma->vm_start, 0);
502 ASSERT_EQ(vma->vm_end, 0x5000);
503 ASSERT_EQ(vma->vm_pgoff, 0);
504 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
505 ASSERT_TRUE(vma_write_started(vma));
506 ASSERT_EQ(mm.map_count, 3);
507
508 /*
509 * Merge to NEXT VMA.
510 *
511 * 0123456789abc
512 * AAAAA *DD CC
513 */
514 vma_d->anon_vma = &dummy_anon_vma;
515 vma_d->vm_ops = &vm_ops; /* This should have no impact. */
516 vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
517 ASSERT_EQ(vma, vma_d);
518 /* Prepend. */
519 ASSERT_TRUE(merged);
520 ASSERT_EQ(vma->vm_start, 0x6000);
521 ASSERT_EQ(vma->vm_end, 0x9000);
522 ASSERT_EQ(vma->vm_pgoff, 6);
523 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
524 ASSERT_TRUE(vma_write_started(vma));
525 ASSERT_EQ(mm.map_count, 3);
526
527 /*
528 * Merge BOTH sides.
529 *
530 * 0123456789abc
531 * AAAAA*DDD CC
532 */
533 vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
534 vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
535 ASSERT_EQ(vma, vma_a);
536 /* Merge with A, delete D. */
537 ASSERT_TRUE(merged);
538 ASSERT_EQ(vma->vm_start, 0);
539 ASSERT_EQ(vma->vm_end, 0x9000);
540 ASSERT_EQ(vma->vm_pgoff, 0);
541 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
542 ASSERT_TRUE(vma_write_started(vma));
543 ASSERT_EQ(mm.map_count, 2);
544
545 /*
546 * Merge to NEXT VMA.
547 *
548 * 0123456789abc
549 * AAAAAAAAA *CC
550 */
551 vma_c->anon_vma = &dummy_anon_vma;
552 vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
553 ASSERT_EQ(vma, vma_c);
554 /* Prepend C. */
555 ASSERT_TRUE(merged);
556 ASSERT_EQ(vma->vm_start, 0xa000);
557 ASSERT_EQ(vma->vm_end, 0xc000);
558 ASSERT_EQ(vma->vm_pgoff, 0xa);
559 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
560 ASSERT_TRUE(vma_write_started(vma));
561 ASSERT_EQ(mm.map_count, 2);
562
563 /*
564 * Merge BOTH sides.
565 *
566 * 0123456789abc
567 * AAAAAAAAA*CCC
568 */
569 vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
570 ASSERT_EQ(vma, vma_a);
571 /* Extend A and delete C. */
572 ASSERT_TRUE(merged);
573 ASSERT_EQ(vma->vm_start, 0);
574 ASSERT_EQ(vma->vm_end, 0xc000);
575 ASSERT_EQ(vma->vm_pgoff, 0);
576 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
577 ASSERT_TRUE(vma_write_started(vma));
578 ASSERT_EQ(mm.map_count, 1);
579
580 /*
581 * Final state.
582 *
583 * 0123456789abc
584 * AAAAAAAAAAAAA
585 */
586
587 count = 0;
588 vma_iter_set(&vmi, 0);
589 for_each_vma(vmi, vma) {
590 ASSERT_NE(vma, NULL);
591 ASSERT_EQ(vma->vm_start, 0);
592 ASSERT_EQ(vma->vm_end, 0xc000);
593 ASSERT_EQ(vma->vm_pgoff, 0);
594 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
595
596 vm_area_free(vma);
597 count++;
598 }
599
600 /* Should only have one VMA left (though freed) after all is done.*/
601 ASSERT_EQ(count, 1);
602
603 mtree_destroy(&mm.mm_mt);
604 return true;
605 }
606
test_vma_merge_special_flags(void)607 static bool test_vma_merge_special_flags(void)
608 {
609 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
610 struct mm_struct mm = {};
611 VMA_ITERATOR(vmi, &mm, 0);
612 struct vma_merge_struct vmg = {
613 .mm = &mm,
614 .vmi = &vmi,
615 };
616 vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
617 vm_flags_t all_special_flags = 0;
618 int i;
619 struct vm_area_struct *vma_left, *vma;
620
621 /* Make sure there aren't new VM_SPECIAL flags. */
622 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
623 all_special_flags |= special_flags[i];
624 }
625 ASSERT_EQ(all_special_flags, VM_SPECIAL);
626
627 /*
628 * 01234
629 * AAA
630 */
631 vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
632 ASSERT_NE(vma_left, NULL);
633
634 /* 1. Set up new VMA with special flag that would otherwise merge. */
635
636 /*
637 * 01234
638 * AAA*
639 *
640 * This should merge if not for the VM_SPECIAL flag.
641 */
642 vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
643 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
644 vm_flags_t special_flag = special_flags[i];
645
646 vma_left->__vm_flags = flags | special_flag;
647 vmg.flags = flags | special_flag;
648 vma = merge_new(&vmg);
649 ASSERT_EQ(vma, NULL);
650 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
651 }
652
653 /* 2. Modify VMA with special flag that would otherwise merge. */
654
655 /*
656 * 01234
657 * AAAB
658 *
659 * Create a VMA to modify.
660 */
661 vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
662 ASSERT_NE(vma, NULL);
663 vmg.middle = vma;
664
665 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
666 vm_flags_t special_flag = special_flags[i];
667
668 vma_left->__vm_flags = flags | special_flag;
669 vmg.flags = flags | special_flag;
670 vma = merge_existing(&vmg);
671 ASSERT_EQ(vma, NULL);
672 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
673 }
674
675 cleanup_mm(&mm, &vmi);
676 return true;
677 }
678
test_vma_merge_with_close(void)679 static bool test_vma_merge_with_close(void)
680 {
681 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
682 struct mm_struct mm = {};
683 VMA_ITERATOR(vmi, &mm, 0);
684 struct vma_merge_struct vmg = {
685 .mm = &mm,
686 .vmi = &vmi,
687 };
688 const struct vm_operations_struct vm_ops = {
689 .close = dummy_close,
690 };
691 struct vm_area_struct *vma_prev, *vma_next, *vma;
692
693 /*
694 * When merging VMAs we are not permitted to remove any VMA that has a
695 * vm_ops->close() hook.
696 *
697 * Considering the two possible adjacent VMAs to which a VMA can be
698 * merged:
699 *
700 * [ prev ][ vma ][ next ]
701 *
702 * In no case will we need to delete prev. If the operation is
703 * mergeable, then prev will be extended with one or both of vma and
704 * next deleted.
705 *
706 * As a result, during initial mergeability checks, only
707 * can_vma_merge_before() (which implies the VMA being merged with is
708 * 'next' as shown above) bothers to check to see whether the next VMA
709 * has a vm_ops->close() callback that will need to be called when
710 * removed.
711 *
712 * If it does, then we cannot merge as the resources that the close()
713 * operation potentially clears down are tied only to the existing VMA
714 * range and we have no way of extending those to the nearly merged one.
715 *
716 * We must consider two scenarios:
717 *
718 * A.
719 *
720 * vm_ops->close: - - !NULL
721 * [ prev ][ vma ][ next ]
722 *
723 * Where prev may or may not be present/mergeable.
724 *
725 * This is picked up by a specific check in can_vma_merge_before().
726 *
727 * B.
728 *
729 * vm_ops->close: - !NULL
730 * [ prev ][ vma ]
731 *
732 * Where prev and vma are present and mergeable.
733 *
734 * This is picked up by a specific check in the modified VMA merge.
735 *
736 * IMPORTANT NOTE: We make the assumption that the following case:
737 *
738 * - !NULL NULL
739 * [ prev ][ vma ][ next ]
740 *
741 * Cannot occur, because vma->vm_ops being the same implies the same
742 * vma->vm_file, and therefore this would mean that next->vm_ops->close
743 * would be set too, and thus scenario A would pick this up.
744 */
745
746 /*
747 * The only case of a new VMA merge that results in a VMA being deleted
748 * is one where both the previous and next VMAs are merged - in this
749 * instance the next VMA is deleted, and the previous VMA is extended.
750 *
751 * If we are unable to do so, we reduce the operation to simply
752 * extending the prev VMA and not merging next.
753 *
754 * 0123456789
755 * PPP**NNNN
756 * ->
757 * 0123456789
758 * PPPPPPNNN
759 */
760
761 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
762 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
763 vma_next->vm_ops = &vm_ops;
764
765 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
766 ASSERT_EQ(merge_new(&vmg), vma_prev);
767 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
768 ASSERT_EQ(vma_prev->vm_start, 0);
769 ASSERT_EQ(vma_prev->vm_end, 0x5000);
770 ASSERT_EQ(vma_prev->vm_pgoff, 0);
771
772 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
773
774 /*
775 * When modifying an existing VMA there are further cases where we
776 * delete VMAs.
777 *
778 * <>
779 * 0123456789
780 * PPPVV
781 *
782 * In this instance, if vma has a close hook, the merge simply cannot
783 * proceed.
784 */
785
786 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
787 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
788 vma->vm_ops = &vm_ops;
789
790 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
791 vmg.prev = vma_prev;
792 vmg.middle = vma;
793
794 /*
795 * The VMA being modified in a way that would otherwise merge should
796 * also fail.
797 */
798 ASSERT_EQ(merge_existing(&vmg), NULL);
799 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
800
801 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
802
803 /*
804 * This case is mirrored if merging with next.
805 *
806 * <>
807 * 0123456789
808 * VVNNNN
809 *
810 * In this instance, if vma has a close hook, the merge simply cannot
811 * proceed.
812 */
813
814 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
815 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
816 vma->vm_ops = &vm_ops;
817
818 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
819 vmg.middle = vma;
820 ASSERT_EQ(merge_existing(&vmg), NULL);
821 /*
822 * Initially this is misapprehended as an out of memory report, as the
823 * close() check is handled in the same way as anon_vma duplication
824 * failures, however a subsequent patch resolves this.
825 */
826 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
827
828 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
829
830 /*
831 * Finally, we consider two variants of the case where we modify a VMA
832 * to merge with both the previous and next VMAs.
833 *
834 * The first variant is where vma has a close hook. In this instance, no
835 * merge can proceed.
836 *
837 * <>
838 * 0123456789
839 * PPPVVNNNN
840 */
841
842 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
843 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
844 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
845 vma->vm_ops = &vm_ops;
846
847 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
848 vmg.prev = vma_prev;
849 vmg.middle = vma;
850
851 ASSERT_EQ(merge_existing(&vmg), NULL);
852 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
853
854 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
855
856 /*
857 * The second variant is where next has a close hook. In this instance,
858 * we reduce the operation to a merge between prev and vma.
859 *
860 * <>
861 * 0123456789
862 * PPPVVNNNN
863 * ->
864 * 0123456789
865 * PPPPPNNNN
866 */
867
868 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
869 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
870 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
871 vma_next->vm_ops = &vm_ops;
872
873 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
874 vmg.prev = vma_prev;
875 vmg.middle = vma;
876
877 ASSERT_EQ(merge_existing(&vmg), vma_prev);
878 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
879 ASSERT_EQ(vma_prev->vm_start, 0);
880 ASSERT_EQ(vma_prev->vm_end, 0x5000);
881 ASSERT_EQ(vma_prev->vm_pgoff, 0);
882
883 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
884
885 return true;
886 }
887
test_vma_merge_new_with_close(void)888 static bool test_vma_merge_new_with_close(void)
889 {
890 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
891 struct mm_struct mm = {};
892 VMA_ITERATOR(vmi, &mm, 0);
893 struct vma_merge_struct vmg = {
894 .mm = &mm,
895 .vmi = &vmi,
896 };
897 struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
898 struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
899 const struct vm_operations_struct vm_ops = {
900 .close = dummy_close,
901 };
902 struct vm_area_struct *vma;
903
904 /*
905 * We should allow the partial merge of a proposed new VMA if the
906 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
907 * compatible), e.g.:
908 *
909 * New VMA
910 * A v-------v B
911 * |-----| |-----|
912 * close close
913 *
914 * Since the rule is to not DELETE a VMA with a close operation, this
915 * should be permitted, only rather than expanding A and deleting B, we
916 * should simply expand A and leave B intact, e.g.:
917 *
918 * New VMA
919 * A B
920 * |------------||-----|
921 * close close
922 */
923
924 /* Have prev and next have a vm_ops->close() hook. */
925 vma_prev->vm_ops = &vm_ops;
926 vma_next->vm_ops = &vm_ops;
927
928 vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
929 vma = merge_new(&vmg);
930 ASSERT_NE(vma, NULL);
931 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
932 ASSERT_EQ(vma->vm_start, 0);
933 ASSERT_EQ(vma->vm_end, 0x5000);
934 ASSERT_EQ(vma->vm_pgoff, 0);
935 ASSERT_EQ(vma->vm_ops, &vm_ops);
936 ASSERT_TRUE(vma_write_started(vma));
937 ASSERT_EQ(mm.map_count, 2);
938
939 cleanup_mm(&mm, &vmi);
940 return true;
941 }
942
test_merge_existing(void)943 static bool test_merge_existing(void)
944 {
945 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
946 struct mm_struct mm = {};
947 VMA_ITERATOR(vmi, &mm, 0);
948 struct vm_area_struct *vma, *vma_prev, *vma_next;
949 struct vma_merge_struct vmg = {
950 .mm = &mm,
951 .vmi = &vmi,
952 };
953 const struct vm_operations_struct vm_ops = {
954 .close = dummy_close,
955 };
956
957 /*
958 * Merge right case - partial span.
959 *
960 * <->
961 * 0123456789
962 * VVVVNNN
963 * ->
964 * 0123456789
965 * VNNNNNN
966 */
967 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
968 vma->vm_ops = &vm_ops; /* This should have no impact. */
969 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
970 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
971 vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
972 vmg.middle = vma;
973 vmg.prev = vma;
974 vma->anon_vma = &dummy_anon_vma;
975 ASSERT_EQ(merge_existing(&vmg), vma_next);
976 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
977 ASSERT_EQ(vma_next->vm_start, 0x3000);
978 ASSERT_EQ(vma_next->vm_end, 0x9000);
979 ASSERT_EQ(vma_next->vm_pgoff, 3);
980 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
981 ASSERT_EQ(vma->vm_start, 0x2000);
982 ASSERT_EQ(vma->vm_end, 0x3000);
983 ASSERT_EQ(vma->vm_pgoff, 2);
984 ASSERT_TRUE(vma_write_started(vma));
985 ASSERT_TRUE(vma_write_started(vma_next));
986 ASSERT_EQ(mm.map_count, 2);
987
988 /* Clear down and reset. */
989 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
990
991 /*
992 * Merge right case - full span.
993 *
994 * <-->
995 * 0123456789
996 * VVVVNNN
997 * ->
998 * 0123456789
999 * NNNNNNN
1000 */
1001 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
1002 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
1003 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
1004 vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
1005 vmg.middle = vma;
1006 vma->anon_vma = &dummy_anon_vma;
1007 ASSERT_EQ(merge_existing(&vmg), vma_next);
1008 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1009 ASSERT_EQ(vma_next->vm_start, 0x2000);
1010 ASSERT_EQ(vma_next->vm_end, 0x9000);
1011 ASSERT_EQ(vma_next->vm_pgoff, 2);
1012 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1013 ASSERT_TRUE(vma_write_started(vma_next));
1014 ASSERT_EQ(mm.map_count, 1);
1015
1016 /* Clear down and reset. We should have deleted vma. */
1017 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1018
1019 /*
1020 * Merge left case - partial span.
1021 *
1022 * <->
1023 * 0123456789
1024 * PPPVVVV
1025 * ->
1026 * 0123456789
1027 * PPPPPPV
1028 */
1029 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1030 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1031 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1032 vma->vm_ops = &vm_ops; /* This should have no impact. */
1033 vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
1034 vmg.prev = vma_prev;
1035 vmg.middle = vma;
1036 vma->anon_vma = &dummy_anon_vma;
1037
1038 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1039 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1040 ASSERT_EQ(vma_prev->vm_start, 0);
1041 ASSERT_EQ(vma_prev->vm_end, 0x6000);
1042 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1043 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1044 ASSERT_EQ(vma->vm_start, 0x6000);
1045 ASSERT_EQ(vma->vm_end, 0x7000);
1046 ASSERT_EQ(vma->vm_pgoff, 6);
1047 ASSERT_TRUE(vma_write_started(vma_prev));
1048 ASSERT_TRUE(vma_write_started(vma));
1049 ASSERT_EQ(mm.map_count, 2);
1050
1051 /* Clear down and reset. */
1052 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1053
1054 /*
1055 * Merge left case - full span.
1056 *
1057 * <-->
1058 * 0123456789
1059 * PPPVVVV
1060 * ->
1061 * 0123456789
1062 * PPPPPPP
1063 */
1064 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1065 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1066 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1067 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1068 vmg.prev = vma_prev;
1069 vmg.middle = vma;
1070 vma->anon_vma = &dummy_anon_vma;
1071 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1072 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1073 ASSERT_EQ(vma_prev->vm_start, 0);
1074 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1075 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1076 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1077 ASSERT_TRUE(vma_write_started(vma_prev));
1078 ASSERT_EQ(mm.map_count, 1);
1079
1080 /* Clear down and reset. We should have deleted vma. */
1081 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1082
1083 /*
1084 * Merge both case.
1085 *
1086 * <-->
1087 * 0123456789
1088 * PPPVVVVNNN
1089 * ->
1090 * 0123456789
1091 * PPPPPPPPPP
1092 */
1093 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1094 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1095 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1096 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1097 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1098 vmg.prev = vma_prev;
1099 vmg.middle = vma;
1100 vma->anon_vma = &dummy_anon_vma;
1101 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1102 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1103 ASSERT_EQ(vma_prev->vm_start, 0);
1104 ASSERT_EQ(vma_prev->vm_end, 0x9000);
1105 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1106 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1107 ASSERT_TRUE(vma_write_started(vma_prev));
1108 ASSERT_EQ(mm.map_count, 1);
1109
1110 /* Clear down and reset. We should have deleted prev and next. */
1111 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1112
1113 /*
1114 * Non-merge ranges. the modified VMA merge operation assumes that the
1115 * caller always specifies ranges within the input VMA so we need only
1116 * examine these cases.
1117 *
1118 * -
1119 * -
1120 * -
1121 * <->
1122 * <>
1123 * <>
1124 * 0123456789a
1125 * PPPVVVVVNNN
1126 */
1127
1128 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1129 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1130 vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1131
1132 vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1133 vmg.prev = vma;
1134 vmg.middle = vma;
1135 ASSERT_EQ(merge_existing(&vmg), NULL);
1136 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1137
1138 vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1139 vmg.prev = vma;
1140 vmg.middle = vma;
1141 ASSERT_EQ(merge_existing(&vmg), NULL);
1142 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1143
1144 vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1145 vmg.prev = vma;
1146 vmg.middle = vma;
1147 ASSERT_EQ(merge_existing(&vmg), NULL);
1148 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1149
1150 vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1151 vmg.prev = vma;
1152 vmg.middle = vma;
1153 ASSERT_EQ(merge_existing(&vmg), NULL);
1154 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1155
1156 vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1157 vmg.prev = vma;
1158 vmg.middle = vma;
1159 ASSERT_EQ(merge_existing(&vmg), NULL);
1160 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1161
1162 vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1163 vmg.prev = vma;
1164 vmg.middle = vma;
1165 ASSERT_EQ(merge_existing(&vmg), NULL);
1166 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1167
1168 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1169
1170 return true;
1171 }
1172
test_anon_vma_non_mergeable(void)1173 static bool test_anon_vma_non_mergeable(void)
1174 {
1175 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1176 struct mm_struct mm = {};
1177 VMA_ITERATOR(vmi, &mm, 0);
1178 struct vm_area_struct *vma, *vma_prev, *vma_next;
1179 struct vma_merge_struct vmg = {
1180 .mm = &mm,
1181 .vmi = &vmi,
1182 };
1183 struct anon_vma_chain dummy_anon_vma_chain1 = {
1184 .anon_vma = &dummy_anon_vma,
1185 };
1186 struct anon_vma_chain dummy_anon_vma_chain2 = {
1187 .anon_vma = &dummy_anon_vma,
1188 };
1189
1190 /*
1191 * In the case of modified VMA merge, merging both left and right VMAs
1192 * but where prev and next have incompatible anon_vma objects, we revert
1193 * to a merge of prev and VMA:
1194 *
1195 * <-->
1196 * 0123456789
1197 * PPPVVVVNNN
1198 * ->
1199 * 0123456789
1200 * PPPPPPPNNN
1201 */
1202 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1203 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1204 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1205
1206 /*
1207 * Give both prev and next single anon_vma_chain fields, so they will
1208 * merge with the NULL vmg->anon_vma.
1209 *
1210 * However, when prev is compared to next, the merge should fail.
1211 */
1212
1213 INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1214 list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1215 ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1216 vma_prev->anon_vma = &dummy_anon_vma;
1217 ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1218
1219 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1220 list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1221 ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1222 vma_next->anon_vma = (struct anon_vma *)2;
1223 ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1224
1225 ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1226
1227 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1228 vmg.prev = vma_prev;
1229 vmg.middle = vma;
1230
1231 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1232 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1233 ASSERT_EQ(vma_prev->vm_start, 0);
1234 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1235 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1236 ASSERT_TRUE(vma_write_started(vma_prev));
1237 ASSERT_FALSE(vma_write_started(vma_next));
1238
1239 /* Clear down and reset. */
1240 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1241
1242 /*
1243 * Now consider the new VMA case. This is equivalent, only adding a new
1244 * VMA in a gap between prev and next.
1245 *
1246 * <-->
1247 * 0123456789
1248 * PPP****NNN
1249 * ->
1250 * 0123456789
1251 * PPPPPPPNNN
1252 */
1253 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1254 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1255
1256 INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1257 list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1258 vma_prev->anon_vma = (struct anon_vma *)1;
1259
1260 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1261 list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1262 vma_next->anon_vma = (struct anon_vma *)2;
1263
1264 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1265 vmg.prev = vma_prev;
1266
1267 ASSERT_EQ(merge_new(&vmg), vma_prev);
1268 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1269 ASSERT_EQ(vma_prev->vm_start, 0);
1270 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1271 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1272 ASSERT_TRUE(vma_write_started(vma_prev));
1273 ASSERT_FALSE(vma_write_started(vma_next));
1274
1275 /* Final cleanup. */
1276 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1277
1278 return true;
1279 }
1280
test_dup_anon_vma(void)1281 static bool test_dup_anon_vma(void)
1282 {
1283 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1284 struct mm_struct mm = {};
1285 VMA_ITERATOR(vmi, &mm, 0);
1286 struct vma_merge_struct vmg = {
1287 .mm = &mm,
1288 .vmi = &vmi,
1289 };
1290 struct anon_vma_chain dummy_anon_vma_chain = {
1291 .anon_vma = &dummy_anon_vma,
1292 };
1293 struct vm_area_struct *vma_prev, *vma_next, *vma;
1294
1295 reset_dummy_anon_vma();
1296
1297 /*
1298 * Expanding a VMA delete the next one duplicates next's anon_vma and
1299 * assigns it to the expanded VMA.
1300 *
1301 * This covers new VMA merging, as these operations amount to a VMA
1302 * expand.
1303 */
1304 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1305 vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1306 vma_next->anon_vma = &dummy_anon_vma;
1307
1308 vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1309 vmg.middle = vma_prev;
1310 vmg.next = vma_next;
1311
1312 ASSERT_EQ(expand_existing(&vmg), 0);
1313
1314 /* Will have been cloned. */
1315 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1316 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1317
1318 /* Cleanup ready for next run. */
1319 cleanup_mm(&mm, &vmi);
1320
1321 /*
1322 * next has anon_vma, we assign to prev.
1323 *
1324 * |<----->|
1325 * |-------*********-------|
1326 * prev vma next
1327 * extend delete delete
1328 */
1329
1330 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1331 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1332 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1333
1334 /* Initialise avc so mergeability check passes. */
1335 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1336 list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1337
1338 vma_next->anon_vma = &dummy_anon_vma;
1339 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1340 vmg.prev = vma_prev;
1341 vmg.middle = vma;
1342
1343 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1344 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1345
1346 ASSERT_EQ(vma_prev->vm_start, 0);
1347 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1348
1349 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1350 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1351
1352 cleanup_mm(&mm, &vmi);
1353
1354 /*
1355 * vma has anon_vma, we assign to prev.
1356 *
1357 * |<----->|
1358 * |-------*********-------|
1359 * prev vma next
1360 * extend delete delete
1361 */
1362
1363 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1364 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1365 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1366
1367 vma->anon_vma = &dummy_anon_vma;
1368 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1369 vmg.prev = vma_prev;
1370 vmg.middle = vma;
1371
1372 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1373 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1374
1375 ASSERT_EQ(vma_prev->vm_start, 0);
1376 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1377
1378 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1379 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1380
1381 cleanup_mm(&mm, &vmi);
1382
1383 /*
1384 * vma has anon_vma, we assign to prev.
1385 *
1386 * |<----->|
1387 * |-------*************
1388 * prev vma
1389 * extend shrink/delete
1390 */
1391
1392 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1393 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1394
1395 vma->anon_vma = &dummy_anon_vma;
1396 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1397 vmg.prev = vma_prev;
1398 vmg.middle = vma;
1399
1400 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1401 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1402
1403 ASSERT_EQ(vma_prev->vm_start, 0);
1404 ASSERT_EQ(vma_prev->vm_end, 0x5000);
1405
1406 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1407 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1408
1409 cleanup_mm(&mm, &vmi);
1410
1411 /*
1412 * vma has anon_vma, we assign to next.
1413 *
1414 * |<----->|
1415 * *************-------|
1416 * vma next
1417 * shrink/delete extend
1418 */
1419
1420 vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1421 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1422
1423 vma->anon_vma = &dummy_anon_vma;
1424 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1425 vmg.prev = vma;
1426 vmg.middle = vma;
1427
1428 ASSERT_EQ(merge_existing(&vmg), vma_next);
1429 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1430
1431 ASSERT_EQ(vma_next->vm_start, 0x3000);
1432 ASSERT_EQ(vma_next->vm_end, 0x8000);
1433
1434 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1435 ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1436
1437 cleanup_mm(&mm, &vmi);
1438 return true;
1439 }
1440
test_vmi_prealloc_fail(void)1441 static bool test_vmi_prealloc_fail(void)
1442 {
1443 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1444 struct mm_struct mm = {};
1445 VMA_ITERATOR(vmi, &mm, 0);
1446 struct vma_merge_struct vmg = {
1447 .mm = &mm,
1448 .vmi = &vmi,
1449 };
1450 struct vm_area_struct *vma_prev, *vma;
1451
1452 /*
1453 * We are merging vma into prev, with vma possessing an anon_vma, which
1454 * will be duplicated. We cause the vmi preallocation to fail and assert
1455 * the duplicated anon_vma is unlinked.
1456 */
1457
1458 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1459 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1460 vma->anon_vma = &dummy_anon_vma;
1461
1462 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1463 vmg.prev = vma_prev;
1464 vmg.middle = vma;
1465
1466 fail_prealloc = true;
1467
1468 /* This will cause the merge to fail. */
1469 ASSERT_EQ(merge_existing(&vmg), NULL);
1470 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1471 /* We will already have assigned the anon_vma. */
1472 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1473 /* And it was both cloned and unlinked. */
1474 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1475 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1476
1477 cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1478
1479 /*
1480 * We repeat the same operation for expanding a VMA, which is what new
1481 * VMA merging ultimately uses too. This asserts that unlinking is
1482 * performed in this case too.
1483 */
1484
1485 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1486 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1487 vma->anon_vma = &dummy_anon_vma;
1488
1489 vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1490 vmg.middle = vma_prev;
1491 vmg.next = vma;
1492
1493 fail_prealloc = true;
1494 ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1495 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1496
1497 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1498 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1499 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1500
1501 cleanup_mm(&mm, &vmi);
1502 return true;
1503 }
1504
test_merge_extend(void)1505 static bool test_merge_extend(void)
1506 {
1507 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1508 struct mm_struct mm = {};
1509 VMA_ITERATOR(vmi, &mm, 0x1000);
1510 struct vm_area_struct *vma;
1511
1512 vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1513 alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1514
1515 /*
1516 * Extend a VMA into the gap between itself and the following VMA.
1517 * This should result in a merge.
1518 *
1519 * <->
1520 * * *
1521 *
1522 */
1523
1524 ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1525 ASSERT_EQ(vma->vm_start, 0);
1526 ASSERT_EQ(vma->vm_end, 0x4000);
1527 ASSERT_EQ(vma->vm_pgoff, 0);
1528 ASSERT_TRUE(vma_write_started(vma));
1529 ASSERT_EQ(mm.map_count, 1);
1530
1531 cleanup_mm(&mm, &vmi);
1532 return true;
1533 }
1534
test_copy_vma(void)1535 static bool test_copy_vma(void)
1536 {
1537 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1538 struct mm_struct mm = {};
1539 bool need_locks = false;
1540 VMA_ITERATOR(vmi, &mm, 0);
1541 struct vm_area_struct *vma, *vma_new, *vma_next;
1542
1543 /* Move backwards and do not merge. */
1544
1545 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1546 vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1547 ASSERT_NE(vma_new, vma);
1548 ASSERT_EQ(vma_new->vm_start, 0);
1549 ASSERT_EQ(vma_new->vm_end, 0x2000);
1550 ASSERT_EQ(vma_new->vm_pgoff, 0);
1551 vma_assert_attached(vma_new);
1552
1553 cleanup_mm(&mm, &vmi);
1554
1555 /* Move a VMA into position next to another and merge the two. */
1556
1557 vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1558 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1559 vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1560 vma_assert_attached(vma_new);
1561
1562 ASSERT_EQ(vma_new, vma_next);
1563
1564 cleanup_mm(&mm, &vmi);
1565 return true;
1566 }
1567
test_expand_only_mode(void)1568 static bool test_expand_only_mode(void)
1569 {
1570 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1571 struct mm_struct mm = {};
1572 VMA_ITERATOR(vmi, &mm, 0);
1573 struct vm_area_struct *vma_prev, *vma;
1574 VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
1575
1576 /*
1577 * Place a VMA prior to the one we're expanding so we assert that we do
1578 * not erroneously try to traverse to the previous VMA even though we
1579 * have, through the use of the just_expand flag, indicated we do not
1580 * need to do so.
1581 */
1582 alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1583
1584 /*
1585 * We will be positioned at the prev VMA, but looking to expand to
1586 * 0x9000.
1587 */
1588 vma_iter_set(&vmi, 0x3000);
1589 vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1590 vmg.prev = vma_prev;
1591 vmg.just_expand = true;
1592
1593 vma = vma_merge_new_range(&vmg);
1594 ASSERT_NE(vma, NULL);
1595 ASSERT_EQ(vma, vma_prev);
1596 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1597 ASSERT_EQ(vma->vm_start, 0x3000);
1598 ASSERT_EQ(vma->vm_end, 0x9000);
1599 ASSERT_EQ(vma->vm_pgoff, 3);
1600 ASSERT_TRUE(vma_write_started(vma));
1601 ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1602 vma_assert_attached(vma);
1603
1604 cleanup_mm(&mm, &vmi);
1605 return true;
1606 }
1607
test_mmap_region_basic(void)1608 static bool test_mmap_region_basic(void)
1609 {
1610 struct mm_struct mm = {};
1611 unsigned long addr;
1612 struct vm_area_struct *vma;
1613 VMA_ITERATOR(vmi, &mm, 0);
1614
1615 current->mm = &mm;
1616
1617 /* Map at 0x300000, length 0x3000. */
1618 addr = __mmap_region(NULL, 0x300000, 0x3000,
1619 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1620 0x300, NULL);
1621 ASSERT_EQ(addr, 0x300000);
1622
1623 /* Map at 0x250000, length 0x3000. */
1624 addr = __mmap_region(NULL, 0x250000, 0x3000,
1625 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1626 0x250, NULL);
1627 ASSERT_EQ(addr, 0x250000);
1628
1629 /* Map at 0x303000, merging to 0x300000 of length 0x6000. */
1630 addr = __mmap_region(NULL, 0x303000, 0x3000,
1631 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1632 0x303, NULL);
1633 ASSERT_EQ(addr, 0x303000);
1634
1635 /* Map at 0x24d000, merging to 0x250000 of length 0x6000. */
1636 addr = __mmap_region(NULL, 0x24d000, 0x3000,
1637 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1638 0x24d, NULL);
1639 ASSERT_EQ(addr, 0x24d000);
1640
1641 ASSERT_EQ(mm.map_count, 2);
1642
1643 for_each_vma(vmi, vma) {
1644 if (vma->vm_start == 0x300000) {
1645 ASSERT_EQ(vma->vm_end, 0x306000);
1646 ASSERT_EQ(vma->vm_pgoff, 0x300);
1647 } else if (vma->vm_start == 0x24d000) {
1648 ASSERT_EQ(vma->vm_end, 0x253000);
1649 ASSERT_EQ(vma->vm_pgoff, 0x24d);
1650 } else {
1651 ASSERT_FALSE(true);
1652 }
1653 }
1654
1655 cleanup_mm(&mm, &vmi);
1656 return true;
1657 }
1658
main(void)1659 int main(void)
1660 {
1661 int num_tests = 0, num_fail = 0;
1662
1663 maple_tree_init();
1664
1665 #define TEST(name) \
1666 do { \
1667 num_tests++; \
1668 if (!test_##name()) { \
1669 num_fail++; \
1670 fprintf(stderr, "Test " #name " FAILED\n"); \
1671 } \
1672 } while (0)
1673
1674 /* Very simple tests to kick the tyres. */
1675 TEST(simple_merge);
1676 TEST(simple_modify);
1677 TEST(simple_expand);
1678 TEST(simple_shrink);
1679
1680 TEST(merge_new);
1681 TEST(vma_merge_special_flags);
1682 TEST(vma_merge_with_close);
1683 TEST(vma_merge_new_with_close);
1684 TEST(merge_existing);
1685 TEST(anon_vma_non_mergeable);
1686 TEST(dup_anon_vma);
1687 TEST(vmi_prealloc_fail);
1688 TEST(merge_extend);
1689 TEST(copy_vma);
1690 TEST(expand_only_mode);
1691
1692 TEST(mmap_region_basic);
1693
1694 #undef TEST
1695
1696 printf("%d tests run, %d passed, %d failed.\n",
1697 num_tests, num_tests - num_fail, num_fail);
1698
1699 return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1700 }
1701