1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #define _GNU_SOURCE
4 #include "../kselftest_harness.h"
5 #include <linux/prctl.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/prctl.h>
12 #include <sys/syscall.h>
13 #include <sys/wait.h>
14 #include <linux/perf_event.h>
15 #include "vm_util.h"
16 #include <linux/mman.h>
17
FIXTURE(merge)18 FIXTURE(merge)
19 {
20 unsigned int page_size;
21 char *carveout;
22 struct procmap_fd procmap;
23 };
24
FIXTURE_SETUP(merge)25 FIXTURE_SETUP(merge)
26 {
27 self->page_size = psize();
28 /* Carve out PROT_NONE region to map over. */
29 self->carveout = mmap(NULL, 30 * self->page_size, PROT_NONE,
30 MAP_ANON | MAP_PRIVATE, -1, 0);
31 ASSERT_NE(self->carveout, MAP_FAILED);
32 /* Setup PROCMAP_QUERY interface. */
33 ASSERT_EQ(open_self_procmap(&self->procmap), 0);
34 }
35
FIXTURE_TEARDOWN(merge)36 FIXTURE_TEARDOWN(merge)
37 {
38 ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
39 ASSERT_EQ(close_procmap(&self->procmap), 0);
40 /*
41 * Clear unconditionally, as some tests set this. It is no issue if this
42 * fails (KSM may be disabled for instance).
43 */
44 prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
45 }
46
TEST_F(merge,mprotect_unfaulted_left)47 TEST_F(merge, mprotect_unfaulted_left)
48 {
49 unsigned int page_size = self->page_size;
50 char *carveout = self->carveout;
51 struct procmap_fd *procmap = &self->procmap;
52 char *ptr;
53
54 /*
55 * Map 10 pages of R/W memory within. MAP_NORESERVE so we don't hit
56 * merge failure due to lack of VM_ACCOUNT flag by mistake.
57 *
58 * |-----------------------|
59 * | unfaulted |
60 * |-----------------------|
61 */
62 ptr = mmap(&carveout[page_size], 10 * page_size, PROT_READ | PROT_WRITE,
63 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
64 ASSERT_NE(ptr, MAP_FAILED);
65 /*
66 * Now make the first 5 pages read-only, splitting the VMA:
67 *
68 * RO RW
69 * |-----------|-----------|
70 * | unfaulted | unfaulted |
71 * |-----------|-----------|
72 */
73 ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ), 0);
74 /*
75 * Fault in the first of the last 5 pages so it gets an anon_vma and
76 * thus the whole VMA becomes 'faulted':
77 *
78 * RO RW
79 * |-----------|-----------|
80 * | unfaulted | faulted |
81 * |-----------|-----------|
82 */
83 ptr[5 * page_size] = 'x';
84 /*
85 * Now mprotect() the RW region read-only, we should merge (though for
86 * ~15 years we did not! :):
87 *
88 * RO
89 * |-----------------------|
90 * | faulted |
91 * |-----------------------|
92 */
93 ASSERT_EQ(mprotect(&ptr[5 * page_size], 5 * page_size, PROT_READ), 0);
94
95 /* Assert that the merge succeeded using PROCMAP_QUERY. */
96 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
97 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
98 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
99 }
100
TEST_F(merge,mprotect_unfaulted_right)101 TEST_F(merge, mprotect_unfaulted_right)
102 {
103 unsigned int page_size = self->page_size;
104 char *carveout = self->carveout;
105 struct procmap_fd *procmap = &self->procmap;
106 char *ptr;
107
108 /*
109 * |-----------------------|
110 * | unfaulted |
111 * |-----------------------|
112 */
113 ptr = mmap(&carveout[page_size], 10 * page_size, PROT_READ | PROT_WRITE,
114 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
115 ASSERT_NE(ptr, MAP_FAILED);
116 /*
117 * Now make the last 5 pages read-only, splitting the VMA:
118 *
119 * RW RO
120 * |-----------|-----------|
121 * | unfaulted | unfaulted |
122 * |-----------|-----------|
123 */
124 ASSERT_EQ(mprotect(&ptr[5 * page_size], 5 * page_size, PROT_READ), 0);
125 /*
126 * Fault in the first of the first 5 pages so it gets an anon_vma and
127 * thus the whole VMA becomes 'faulted':
128 *
129 * RW RO
130 * |-----------|-----------|
131 * | faulted | unfaulted |
132 * |-----------|-----------|
133 */
134 ptr[0] = 'x';
135 /*
136 * Now mprotect() the RW region read-only, we should merge:
137 *
138 * RO
139 * |-----------------------|
140 * | faulted |
141 * |-----------------------|
142 */
143 ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ), 0);
144
145 /* Assert that the merge succeeded using PROCMAP_QUERY. */
146 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
147 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
148 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
149 }
150
TEST_F(merge,mprotect_unfaulted_both)151 TEST_F(merge, mprotect_unfaulted_both)
152 {
153 unsigned int page_size = self->page_size;
154 char *carveout = self->carveout;
155 struct procmap_fd *procmap = &self->procmap;
156 char *ptr;
157
158 /*
159 * |-----------------------|
160 * | unfaulted |
161 * |-----------------------|
162 */
163 ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
164 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
165 ASSERT_NE(ptr, MAP_FAILED);
166 /*
167 * Now make the first and last 3 pages read-only, splitting the VMA:
168 *
169 * RO RW RO
170 * |-----------|-----------|-----------|
171 * | unfaulted | unfaulted | unfaulted |
172 * |-----------|-----------|-----------|
173 */
174 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
175 ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
176 /*
177 * Fault in the first of the middle 3 pages so it gets an anon_vma and
178 * thus the whole VMA becomes 'faulted':
179 *
180 * RO RW RO
181 * |-----------|-----------|-----------|
182 * | unfaulted | faulted | unfaulted |
183 * |-----------|-----------|-----------|
184 */
185 ptr[3 * page_size] = 'x';
186 /*
187 * Now mprotect() the RW region read-only, we should merge:
188 *
189 * RO
190 * |-----------------------|
191 * | faulted |
192 * |-----------------------|
193 */
194 ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
195
196 /* Assert that the merge succeeded using PROCMAP_QUERY. */
197 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
198 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
199 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
200 }
201
TEST_F(merge,mprotect_faulted_left_unfaulted_right)202 TEST_F(merge, mprotect_faulted_left_unfaulted_right)
203 {
204 unsigned int page_size = self->page_size;
205 char *carveout = self->carveout;
206 struct procmap_fd *procmap = &self->procmap;
207 char *ptr;
208
209 /*
210 * |-----------------------|
211 * | unfaulted |
212 * |-----------------------|
213 */
214 ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
215 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
216 ASSERT_NE(ptr, MAP_FAILED);
217 /*
218 * Now make the last 3 pages read-only, splitting the VMA:
219 *
220 * RW RO
221 * |-----------------------|-----------|
222 * | unfaulted | unfaulted |
223 * |-----------------------|-----------|
224 */
225 ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
226 /*
227 * Fault in the first of the first 6 pages so it gets an anon_vma and
228 * thus the whole VMA becomes 'faulted':
229 *
230 * RW RO
231 * |-----------------------|-----------|
232 * | unfaulted | unfaulted |
233 * |-----------------------|-----------|
234 */
235 ptr[0] = 'x';
236 /*
237 * Now make the first 3 pages read-only, splitting the VMA:
238 *
239 * RO RW RO
240 * |-----------|-----------|-----------|
241 * | faulted | faulted | unfaulted |
242 * |-----------|-----------|-----------|
243 */
244 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
245 /*
246 * Now mprotect() the RW region read-only, we should merge:
247 *
248 * RO
249 * |-----------------------|
250 * | faulted |
251 * |-----------------------|
252 */
253 ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
254
255 /* Assert that the merge succeeded using PROCMAP_QUERY. */
256 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
257 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
258 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
259 }
260
TEST_F(merge,mprotect_unfaulted_left_faulted_right)261 TEST_F(merge, mprotect_unfaulted_left_faulted_right)
262 {
263 unsigned int page_size = self->page_size;
264 char *carveout = self->carveout;
265 struct procmap_fd *procmap = &self->procmap;
266 char *ptr;
267
268 /*
269 * |-----------------------|
270 * | unfaulted |
271 * |-----------------------|
272 */
273 ptr = mmap(&carveout[2 * page_size], 9 * page_size, PROT_READ | PROT_WRITE,
274 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
275 ASSERT_NE(ptr, MAP_FAILED);
276 /*
277 * Now make the first 3 pages read-only, splitting the VMA:
278 *
279 * RO RW
280 * |-----------|-----------------------|
281 * | unfaulted | unfaulted |
282 * |-----------|-----------------------|
283 */
284 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
285 /*
286 * Fault in the first of the last 6 pages so it gets an anon_vma and
287 * thus the whole VMA becomes 'faulted':
288 *
289 * RO RW
290 * |-----------|-----------------------|
291 * | unfaulted | faulted |
292 * |-----------|-----------------------|
293 */
294 ptr[3 * page_size] = 'x';
295 /*
296 * Now make the last 3 pages read-only, splitting the VMA:
297 *
298 * RO RW RO
299 * |-----------|-----------|-----------|
300 * | unfaulted | faulted | faulted |
301 * |-----------|-----------|-----------|
302 */
303 ASSERT_EQ(mprotect(&ptr[6 * page_size], 3 * page_size, PROT_READ), 0);
304 /*
305 * Now mprotect() the RW region read-only, we should merge:
306 *
307 * RO
308 * |-----------------------|
309 * | faulted |
310 * |-----------------------|
311 */
312 ASSERT_EQ(mprotect(&ptr[3 * page_size], 3 * page_size, PROT_READ), 0);
313
314 /* Assert that the merge succeeded using PROCMAP_QUERY. */
315 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
316 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
317 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 9 * page_size);
318 }
319
TEST_F(merge,forked_target_vma)320 TEST_F(merge, forked_target_vma)
321 {
322 unsigned int page_size = self->page_size;
323 char *carveout = self->carveout;
324 struct procmap_fd *procmap = &self->procmap;
325 pid_t pid;
326 char *ptr, *ptr2;
327 int i;
328
329 /*
330 * |-----------|
331 * | unfaulted |
332 * |-----------|
333 */
334 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
335 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
336 ASSERT_NE(ptr, MAP_FAILED);
337
338 /*
339 * Fault in process.
340 *
341 * |-----------|
342 * | faulted |
343 * |-----------|
344 */
345 ptr[0] = 'x';
346
347 pid = fork();
348 ASSERT_NE(pid, -1);
349
350 if (pid != 0) {
351 wait(NULL);
352 return;
353 }
354
355 /* Child process below: */
356
357 /* Reopen for child. */
358 ASSERT_EQ(close_procmap(&self->procmap), 0);
359 ASSERT_EQ(open_self_procmap(&self->procmap), 0);
360
361 /* unCOWing everything does not cause the AVC to go away. */
362 for (i = 0; i < 5 * page_size; i += page_size)
363 ptr[i] = 'x';
364
365 /*
366 * Map in adjacent VMA in child.
367 *
368 * forked
369 * |-----------|-----------|
370 * | faulted | unfaulted |
371 * |-----------|-----------|
372 * ptr ptr2
373 */
374 ptr2 = mmap(&ptr[5 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
375 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
376 ASSERT_NE(ptr2, MAP_FAILED);
377
378 /* Make sure not merged. */
379 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
380 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
381 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 5 * page_size);
382 }
383
TEST_F(merge,forked_source_vma)384 TEST_F(merge, forked_source_vma)
385 {
386 unsigned int page_size = self->page_size;
387 char *carveout = self->carveout;
388 struct procmap_fd *procmap = &self->procmap;
389 pid_t pid;
390 char *ptr, *ptr2;
391 int i;
392
393 /*
394 * |-----------|------------|
395 * | unfaulted | <unmapped> |
396 * |-----------|------------|
397 */
398 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
399 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
400 ASSERT_NE(ptr, MAP_FAILED);
401
402 /*
403 * Fault in process.
404 *
405 * |-----------|------------|
406 * | faulted | <unmapped> |
407 * |-----------|------------|
408 */
409 ptr[0] = 'x';
410
411 pid = fork();
412 ASSERT_NE(pid, -1);
413
414 if (pid != 0) {
415 wait(NULL);
416 return;
417 }
418
419 /* Child process below: */
420
421 /* Reopen for child. */
422 ASSERT_EQ(close_procmap(&self->procmap), 0);
423 ASSERT_EQ(open_self_procmap(&self->procmap), 0);
424
425 /* unCOWing everything does not cause the AVC to go away. */
426 for (i = 0; i < 5 * page_size; i += page_size)
427 ptr[i] = 'x';
428
429 /*
430 * Map in adjacent VMA in child, ptr2 after ptr, but incompatible.
431 *
432 * forked RW RWX
433 * |-----------|-----------|
434 * | faulted | unfaulted |
435 * |-----------|-----------|
436 * ptr ptr2
437 */
438 ptr2 = mmap(&carveout[6 * page_size], 5 * page_size, PROT_READ | PROT_WRITE | PROT_EXEC,
439 MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE, -1, 0);
440 ASSERT_NE(ptr2, MAP_FAILED);
441
442 /* Make sure not merged. */
443 ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
444 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
445 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
446
447 /*
448 * Now mprotect forked region to RWX so it becomes the source for the
449 * merge to unfaulted region:
450 *
451 * forked RWX RWX
452 * |-----------|-----------|
453 * | faulted | unfaulted |
454 * |-----------|-----------|
455 * ptr ptr2
456 *
457 * This should NOT result in a merge, as ptr was forked.
458 */
459 ASSERT_EQ(mprotect(ptr, 5 * page_size, PROT_READ | PROT_WRITE | PROT_EXEC), 0);
460 /* Again, make sure not merged. */
461 ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
462 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
463 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
464 }
465
TEST_F(merge,handle_uprobe_upon_merged_vma)466 TEST_F(merge, handle_uprobe_upon_merged_vma)
467 {
468 const size_t attr_sz = sizeof(struct perf_event_attr);
469 unsigned int page_size = self->page_size;
470 const char *probe_file = "./foo";
471 char *carveout = self->carveout;
472 struct perf_event_attr attr;
473 unsigned long type;
474 void *ptr1, *ptr2;
475 int fd;
476
477 fd = open(probe_file, O_RDWR|O_CREAT, 0600);
478 ASSERT_GE(fd, 0);
479
480 ASSERT_EQ(ftruncate(fd, page_size), 0);
481 if (read_sysfs("/sys/bus/event_source/devices/uprobe/type", &type) != 0) {
482 SKIP(goto out, "Failed to read uprobe sysfs file, skipping");
483 }
484
485 memset(&attr, 0, attr_sz);
486 attr.size = attr_sz;
487 attr.type = type;
488 attr.config1 = (__u64)(long)probe_file;
489 attr.config2 = 0x0;
490
491 ASSERT_GE(syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0), 0);
492
493 ptr1 = mmap(&carveout[page_size], 10 * page_size, PROT_EXEC,
494 MAP_PRIVATE | MAP_FIXED, fd, 0);
495 ASSERT_NE(ptr1, MAP_FAILED);
496
497 ptr2 = mremap(ptr1, page_size, 2 * page_size,
498 MREMAP_MAYMOVE | MREMAP_FIXED, ptr1 + 5 * page_size);
499 ASSERT_NE(ptr2, MAP_FAILED);
500
501 ASSERT_NE(mremap(ptr2, page_size, page_size,
502 MREMAP_MAYMOVE | MREMAP_FIXED, ptr1), MAP_FAILED);
503
504 out:
505 close(fd);
506 remove(probe_file);
507 }
508
TEST_F(merge,ksm_merge)509 TEST_F(merge, ksm_merge)
510 {
511 unsigned int page_size = self->page_size;
512 char *carveout = self->carveout;
513 struct procmap_fd *procmap = &self->procmap;
514 char *ptr, *ptr2;
515 int err;
516
517 /*
518 * Map two R/W immediately adjacent to one another, they should
519 * trivially merge:
520 *
521 * |-----------|-----------|
522 * | R/W | R/W |
523 * |-----------|-----------|
524 * ptr ptr2
525 */
526
527 ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
528 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
529 ASSERT_NE(ptr, MAP_FAILED);
530 ptr2 = mmap(&carveout[2 * page_size], page_size,
531 PROT_READ | PROT_WRITE,
532 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
533 ASSERT_NE(ptr2, MAP_FAILED);
534 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
535 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
536 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
537
538 /* Unmap the second half of this merged VMA. */
539 ASSERT_EQ(munmap(ptr2, page_size), 0);
540
541 /* OK, now enable global KSM merge. We clear this on test teardown. */
542 err = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
543 if (err == -1) {
544 int errnum = errno;
545
546 /* Only non-failure case... */
547 ASSERT_EQ(errnum, EINVAL);
548 /* ...but indicates we should skip. */
549 SKIP(return, "KSM memory merging not supported, skipping.");
550 }
551
552 /*
553 * Now map a VMA adjacent to the existing that was just made
554 * VM_MERGEABLE, this should merge as well.
555 */
556 ptr2 = mmap(&carveout[2 * page_size], page_size,
557 PROT_READ | PROT_WRITE,
558 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
559 ASSERT_NE(ptr2, MAP_FAILED);
560 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
561 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
562 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
563
564 /* Now this VMA altogether. */
565 ASSERT_EQ(munmap(ptr, 2 * page_size), 0);
566
567 /* Try the same operation as before, asserting this also merges fine. */
568 ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
569 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
570 ASSERT_NE(ptr, MAP_FAILED);
571 ptr2 = mmap(&carveout[2 * page_size], page_size,
572 PROT_READ | PROT_WRITE,
573 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
574 ASSERT_NE(ptr2, MAP_FAILED);
575 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
576 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
577 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
578 }
579
TEST_F(merge,mremap_unfaulted_to_faulted)580 TEST_F(merge, mremap_unfaulted_to_faulted)
581 {
582 unsigned int page_size = self->page_size;
583 char *carveout = self->carveout;
584 struct procmap_fd *procmap = &self->procmap;
585 char *ptr, *ptr2;
586
587 /*
588 * Map two distinct areas:
589 *
590 * |-----------| |-----------|
591 * | unfaulted | | unfaulted |
592 * |-----------| |-----------|
593 * ptr ptr2
594 */
595 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
596 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
597 ASSERT_NE(ptr, MAP_FAILED);
598 ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
599 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
600 ASSERT_NE(ptr2, MAP_FAILED);
601
602 /* Offset ptr2 further away. */
603 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
604 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
605 ASSERT_NE(ptr2, MAP_FAILED);
606
607 /*
608 * Fault in ptr:
609 * \
610 * |-----------| / |-----------|
611 * | faulted | \ | unfaulted |
612 * |-----------| / |-----------|
613 * ptr \ ptr2
614 */
615 ptr[0] = 'x';
616
617 /*
618 * Now move ptr2 adjacent to ptr:
619 *
620 * |-----------|-----------|
621 * | faulted | unfaulted |
622 * |-----------|-----------|
623 * ptr ptr2
624 *
625 * It should merge:
626 *
627 * |----------------------|
628 * | faulted |
629 * |----------------------|
630 * ptr
631 */
632 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
633 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
634 ASSERT_NE(ptr2, MAP_FAILED);
635
636 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
637 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
638 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
639 }
640
TEST_F(merge,mremap_unfaulted_behind_faulted)641 TEST_F(merge, mremap_unfaulted_behind_faulted)
642 {
643 unsigned int page_size = self->page_size;
644 char *carveout = self->carveout;
645 struct procmap_fd *procmap = &self->procmap;
646 char *ptr, *ptr2;
647
648 /*
649 * Map two distinct areas:
650 *
651 * |-----------| |-----------|
652 * | unfaulted | | unfaulted |
653 * |-----------| |-----------|
654 * ptr ptr2
655 */
656 ptr = mmap(&carveout[6 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
657 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
658 ASSERT_NE(ptr, MAP_FAILED);
659 ptr2 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
660 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
661 ASSERT_NE(ptr2, MAP_FAILED);
662
663 /* Offset ptr2 further away. */
664 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
665 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
666 ASSERT_NE(ptr2, MAP_FAILED);
667
668 /*
669 * Fault in ptr:
670 * \
671 * |-----------| / |-----------|
672 * | faulted | \ | unfaulted |
673 * |-----------| / |-----------|
674 * ptr \ ptr2
675 */
676 ptr[0] = 'x';
677
678 /*
679 * Now move ptr2 adjacent, but behind, ptr:
680 *
681 * |-----------|-----------|
682 * | unfaulted | faulted |
683 * |-----------|-----------|
684 * ptr2 ptr
685 *
686 * It should merge:
687 *
688 * |----------------------|
689 * | faulted |
690 * |----------------------|
691 * ptr2
692 */
693 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
694 MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
695 ASSERT_NE(ptr2, MAP_FAILED);
696
697 ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
698 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
699 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 10 * page_size);
700 }
701
TEST_F(merge,mremap_unfaulted_between_faulted)702 TEST_F(merge, mremap_unfaulted_between_faulted)
703 {
704 unsigned int page_size = self->page_size;
705 char *carveout = self->carveout;
706 struct procmap_fd *procmap = &self->procmap;
707 char *ptr, *ptr2, *ptr3;
708
709 /*
710 * Map three distinct areas:
711 *
712 * |-----------| |-----------| |-----------|
713 * | unfaulted | | unfaulted | | unfaulted |
714 * |-----------| |-----------| |-----------|
715 * ptr ptr2 ptr3
716 */
717 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
718 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
719 ASSERT_NE(ptr, MAP_FAILED);
720 ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
721 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
722 ASSERT_NE(ptr2, MAP_FAILED);
723 ptr3 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
724 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
725 ASSERT_NE(ptr3, MAP_FAILED);
726
727 /* Offset ptr3 further away. */
728 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
729 MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
730 ASSERT_NE(ptr3, MAP_FAILED);
731
732 /* Offset ptr2 further away. */
733 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
734 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
735 ASSERT_NE(ptr2, MAP_FAILED);
736
737 /*
738 * Fault in ptr, ptr3:
739 * \ \
740 * |-----------| / |-----------| / |-----------|
741 * | faulted | \ | unfaulted | \ | faulted |
742 * |-----------| / |-----------| / |-----------|
743 * ptr \ ptr2 \ ptr3
744 */
745 ptr[0] = 'x';
746 ptr3[0] = 'x';
747
748 /*
749 * Move ptr3 back into place, leaving a place for ptr2:
750 * \
751 * |-----------| |-----------| / |-----------|
752 * | faulted | | faulted | \ | unfaulted |
753 * |-----------| |-----------| / |-----------|
754 * ptr ptr3 \ ptr2
755 */
756 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
757 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
758 ASSERT_NE(ptr3, MAP_FAILED);
759
760 /*
761 * Finally, move ptr2 into place:
762 *
763 * |-----------|-----------|-----------|
764 * | faulted | unfaulted | faulted |
765 * |-----------|-----------|-----------|
766 * ptr ptr2 ptr3
767 *
768 * It should merge, but only ptr, ptr2:
769 *
770 * |-----------------------|-----------|
771 * | faulted | unfaulted |
772 * |-----------------------|-----------|
773 */
774 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
775 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
776 ASSERT_NE(ptr2, MAP_FAILED);
777
778 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
779 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
780 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
781
782 ASSERT_TRUE(find_vma_procmap(procmap, ptr3));
783 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr3);
784 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr3 + 5 * page_size);
785 }
786
TEST_F(merge,mremap_unfaulted_between_faulted_unfaulted)787 TEST_F(merge, mremap_unfaulted_between_faulted_unfaulted)
788 {
789 unsigned int page_size = self->page_size;
790 char *carveout = self->carveout;
791 struct procmap_fd *procmap = &self->procmap;
792 char *ptr, *ptr2, *ptr3;
793
794 /*
795 * Map three distinct areas:
796 *
797 * |-----------| |-----------| |-----------|
798 * | unfaulted | | unfaulted | | unfaulted |
799 * |-----------| |-----------| |-----------|
800 * ptr ptr2 ptr3
801 */
802 ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
803 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
804 ASSERT_NE(ptr, MAP_FAILED);
805 ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
806 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
807 ASSERT_NE(ptr2, MAP_FAILED);
808 ptr3 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
809 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
810 ASSERT_NE(ptr3, MAP_FAILED);
811
812 /* Offset ptr3 further away. */
813 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
814 MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
815 ASSERT_NE(ptr3, MAP_FAILED);
816
817
818 /* Offset ptr2 further away. */
819 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
820 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
821 ASSERT_NE(ptr2, MAP_FAILED);
822
823 /*
824 * Fault in ptr:
825 * \ \
826 * |-----------| / |-----------| / |-----------|
827 * | faulted | \ | unfaulted | \ | unfaulted |
828 * |-----------| / |-----------| / |-----------|
829 * ptr \ ptr2 \ ptr3
830 */
831 ptr[0] = 'x';
832
833 /*
834 * Move ptr3 back into place, leaving a place for ptr2:
835 * \
836 * |-----------| |-----------| / |-----------|
837 * | faulted | | unfaulted | \ | unfaulted |
838 * |-----------| |-----------| / |-----------|
839 * ptr ptr3 \ ptr2
840 */
841 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
842 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
843 ASSERT_NE(ptr3, MAP_FAILED);
844
845 /*
846 * Finally, move ptr2 into place:
847 *
848 * |-----------|-----------|-----------|
849 * | faulted | unfaulted | unfaulted |
850 * |-----------|-----------|-----------|
851 * ptr ptr2 ptr3
852 *
853 * It should merge:
854 *
855 * |-----------------------------------|
856 * | faulted |
857 * |-----------------------------------|
858 */
859 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
860 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
861 ASSERT_NE(ptr2, MAP_FAILED);
862
863 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
864 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
865 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
866 }
867
TEST_F(merge,mremap_unfaulted_between_correctly_placed_faulted)868 TEST_F(merge, mremap_unfaulted_between_correctly_placed_faulted)
869 {
870 unsigned int page_size = self->page_size;
871 char *carveout = self->carveout;
872 struct procmap_fd *procmap = &self->procmap;
873 char *ptr, *ptr2;
874
875 /*
876 * Map one larger area:
877 *
878 * |-----------------------------------|
879 * | unfaulted |
880 * |-----------------------------------|
881 */
882 ptr = mmap(&carveout[page_size], 15 * page_size, PROT_READ | PROT_WRITE,
883 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
884 ASSERT_NE(ptr, MAP_FAILED);
885
886 /*
887 * Fault in ptr:
888 *
889 * |-----------------------------------|
890 * | faulted |
891 * |-----------------------------------|
892 */
893 ptr[0] = 'x';
894
895 /*
896 * Unmap middle:
897 *
898 * |-----------| |-----------|
899 * | faulted | | faulted |
900 * |-----------| |-----------|
901 *
902 * Now the faulted areas are compatible with each other (anon_vma the
903 * same, vma->vm_pgoff equal to virtual page offset).
904 */
905 ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
906
907 /*
908 * Map a new area, ptr2:
909 * \
910 * |-----------| |-----------| / |-----------|
911 * | faulted | | faulted | \ | unfaulted |
912 * |-----------| |-----------| / |-----------|
913 * ptr \ ptr2
914 */
915 ptr2 = mmap(&carveout[20 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
916 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
917 ASSERT_NE(ptr2, MAP_FAILED);
918
919 /*
920 * Finally, move ptr2 into place:
921 *
922 * |-----------|-----------|-----------|
923 * | faulted | unfaulted | faulted |
924 * |-----------|-----------|-----------|
925 * ptr ptr2 ptr3
926 *
927 * It should merge:
928 *
929 * |-----------------------------------|
930 * | faulted |
931 * |-----------------------------------|
932 */
933 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
934 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
935 ASSERT_NE(ptr2, MAP_FAILED);
936
937 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
938 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
939 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
940 }
941
TEST_F(merge,mremap_correct_placed_faulted)942 TEST_F(merge, mremap_correct_placed_faulted)
943 {
944 unsigned int page_size = self->page_size;
945 char *carveout = self->carveout;
946 struct procmap_fd *procmap = &self->procmap;
947 char *ptr, *ptr2, *ptr3;
948
949 /*
950 * Map one larger area:
951 *
952 * |-----------------------------------|
953 * | unfaulted |
954 * |-----------------------------------|
955 */
956 ptr = mmap(&carveout[page_size], 15 * page_size, PROT_READ | PROT_WRITE,
957 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
958 ASSERT_NE(ptr, MAP_FAILED);
959
960 /*
961 * Fault in ptr:
962 *
963 * |-----------------------------------|
964 * | faulted |
965 * |-----------------------------------|
966 */
967 ptr[0] = 'x';
968
969 /*
970 * Offset the final and middle 5 pages further away:
971 * \ \
972 * |-----------| / |-----------| / |-----------|
973 * | faulted | \ | faulted | \ | faulted |
974 * |-----------| / |-----------| / |-----------|
975 * ptr \ ptr2 \ ptr3
976 */
977 ptr3 = &ptr[10 * page_size];
978 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
979 MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
980 ASSERT_NE(ptr3, MAP_FAILED);
981 ptr2 = &ptr[5 * page_size];
982 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
983 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
984 ASSERT_NE(ptr2, MAP_FAILED);
985
986 /*
987 * Move ptr2 into its correct place:
988 * \
989 * |-----------|-----------| / |-----------|
990 * | faulted | faulted | \ | faulted |
991 * |-----------|-----------| / |-----------|
992 * ptr ptr2 \ ptr3
993 *
994 * It should merge:
995 * \
996 * |-----------------------| / |-----------|
997 * | faulted | \ | faulted |
998 * |-----------------------| / |-----------|
999 * ptr \ ptr3
1000 */
1001
1002 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1003 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
1004 ASSERT_NE(ptr2, MAP_FAILED);
1005
1006 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1007 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1008 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
1009
1010 /*
1011 * Now move ptr out of place:
1012 * \ \
1013 * |-----------| / |-----------| / |-----------|
1014 * | faulted | \ | faulted | \ | faulted |
1015 * |-----------| / |-----------| / |-----------|
1016 * ptr2 \ ptr \ ptr3
1017 */
1018 ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1019 MREMAP_MAYMOVE | MREMAP_FIXED, ptr + page_size * 1000);
1020 ASSERT_NE(ptr, MAP_FAILED);
1021
1022 /*
1023 * Now move ptr back into place:
1024 * \
1025 * |-----------|-----------| / |-----------|
1026 * | faulted | faulted | \ | faulted |
1027 * |-----------|-----------| / |-----------|
1028 * ptr ptr2 \ ptr3
1029 *
1030 * It should merge:
1031 * \
1032 * |-----------------------| / |-----------|
1033 * | faulted | \ | faulted |
1034 * |-----------------------| / |-----------|
1035 * ptr \ ptr3
1036 */
1037 ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1038 MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
1039 ASSERT_NE(ptr, MAP_FAILED);
1040
1041 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1042 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1043 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
1044
1045 /*
1046 * Now move ptr out of place again:
1047 * \ \
1048 * |-----------| / |-----------| / |-----------|
1049 * | faulted | \ | faulted | \ | faulted |
1050 * |-----------| / |-----------| / |-----------|
1051 * ptr2 \ ptr \ ptr3
1052 */
1053 ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1054 MREMAP_MAYMOVE | MREMAP_FIXED, ptr + page_size * 1000);
1055 ASSERT_NE(ptr, MAP_FAILED);
1056
1057 /*
1058 * Now move ptr3 back into place:
1059 * \
1060 * |-----------|-----------| / |-----------|
1061 * | faulted | faulted | \ | faulted |
1062 * |-----------|-----------| / |-----------|
1063 * ptr2 ptr3 \ ptr
1064 *
1065 * It should merge:
1066 * \
1067 * |-----------------------| / |-----------|
1068 * | faulted | \ | faulted |
1069 * |-----------------------| / |-----------|
1070 * ptr2 \ ptr
1071 */
1072 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1073 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr2[5 * page_size]);
1074 ASSERT_NE(ptr3, MAP_FAILED);
1075
1076 ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
1077 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
1078 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 10 * page_size);
1079
1080 /*
1081 * Now move ptr back into place:
1082 *
1083 * |-----------|-----------------------|
1084 * | faulted | faulted |
1085 * |-----------|-----------------------|
1086 * ptr ptr2
1087 *
1088 * It should merge:
1089 *
1090 * |-----------------------------------|
1091 * | faulted |
1092 * |-----------------------------------|
1093 * ptr
1094 */
1095 ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
1096 MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
1097 ASSERT_NE(ptr, MAP_FAILED);
1098
1099 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1100 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1101 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
1102
1103 /*
1104 * Now move ptr2 out of the way:
1105 * \
1106 * |-----------| |-----------| / |-----------|
1107 * | faulted | | faulted | \ | faulted |
1108 * |-----------| |-----------| / |-----------|
1109 * ptr ptr3 \ ptr2
1110 */
1111 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1112 MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
1113 ASSERT_NE(ptr2, MAP_FAILED);
1114
1115 /*
1116 * Now move it back:
1117 *
1118 * |-----------|-----------|-----------|
1119 * | faulted | faulted | faulted |
1120 * |-----------|-----------|-----------|
1121 * ptr ptr2 ptr3
1122 *
1123 * It should merge:
1124 *
1125 * |-----------------------------------|
1126 * | faulted |
1127 * |-----------------------------------|
1128 * ptr
1129 */
1130 ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
1131 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
1132 ASSERT_NE(ptr2, MAP_FAILED);
1133
1134 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1135 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1136 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
1137
1138 /*
1139 * Move ptr3 out of place:
1140 * \
1141 * |-----------------------| / |-----------|
1142 * | faulted | \ | faulted |
1143 * |-----------------------| / |-----------|
1144 * ptr \ ptr3
1145 */
1146 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1147 MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 1000);
1148 ASSERT_NE(ptr3, MAP_FAILED);
1149
1150 /*
1151 * Now move it back:
1152 *
1153 * |-----------|-----------|-----------|
1154 * | faulted | faulted | faulted |
1155 * |-----------|-----------|-----------|
1156 * ptr ptr2 ptr3
1157 *
1158 * It should merge:
1159 *
1160 * |-----------------------------------|
1161 * | faulted |
1162 * |-----------------------------------|
1163 * ptr
1164 */
1165 ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
1166 MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
1167 ASSERT_NE(ptr3, MAP_FAILED);
1168
1169 ASSERT_TRUE(find_vma_procmap(procmap, ptr));
1170 ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
1171 ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
1172 }
1173
1174 TEST_HARNESS_MAIN
1175