1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #define _GNU_SOURCE
4 #include "../kselftest_harness.h"
5 #include <asm-generic/mman.h> /* Force the import of the tools version. */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/limits.h>
10 #include <linux/userfaultfd.h>
11 #include <linux/fs.h>
12 #include <setjmp.h>
13 #include <signal.h>
14 #include <stdbool.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 #include <sys/syscall.h>
21 #include <sys/uio.h>
22 #include <unistd.h>
23 #include "vm_util.h"
24
25 #include "../pidfd/pidfd.h"
26
27 /*
28 * Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
29 *
30 * "If the signal occurs other than as the result of calling the abort or raise
31 * function, the behavior is undefined if the signal handler refers to any
32 * object with static storage duration other than by assigning a value to an
33 * object declared as volatile sig_atomic_t"
34 */
35 static volatile sig_atomic_t signal_jump_set;
36 static sigjmp_buf signal_jmp_buf;
37
38 /*
39 * How is the test backing the mapping being tested?
40 */
41 enum backing_type {
42 ANON_BACKED,
43 SHMEM_BACKED,
44 LOCAL_FILE_BACKED,
45 };
46
FIXTURE(guard_regions)47 FIXTURE(guard_regions)
48 {
49 unsigned long page_size;
50 char path[PATH_MAX];
51 int fd;
52 };
53
FIXTURE_VARIANT(guard_regions)54 FIXTURE_VARIANT(guard_regions)
55 {
56 enum backing_type backing;
57 };
58
FIXTURE_VARIANT_ADD(guard_regions,anon)59 FIXTURE_VARIANT_ADD(guard_regions, anon)
60 {
61 .backing = ANON_BACKED,
62 };
63
FIXTURE_VARIANT_ADD(guard_regions,shmem)64 FIXTURE_VARIANT_ADD(guard_regions, shmem)
65 {
66 .backing = SHMEM_BACKED,
67 };
68
FIXTURE_VARIANT_ADD(guard_regions,file)69 FIXTURE_VARIANT_ADD(guard_regions, file)
70 {
71 .backing = LOCAL_FILE_BACKED,
72 };
73
is_anon_backed(const FIXTURE_VARIANT (guard_regions)* variant)74 static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant)
75 {
76 switch (variant->backing) {
77 case ANON_BACKED:
78 case SHMEM_BACKED:
79 return true;
80 default:
81 return false;
82 }
83 }
84
mmap_(FIXTURE_DATA (guard_regions)* self,const FIXTURE_VARIANT (guard_regions)* variant,void * addr,size_t length,int prot,int extra_flags,off_t offset)85 static void *mmap_(FIXTURE_DATA(guard_regions) * self,
86 const FIXTURE_VARIANT(guard_regions) * variant,
87 void *addr, size_t length, int prot, int extra_flags,
88 off_t offset)
89 {
90 int fd;
91 int flags = extra_flags;
92
93 switch (variant->backing) {
94 case ANON_BACKED:
95 flags |= MAP_PRIVATE | MAP_ANON;
96 fd = -1;
97 break;
98 case SHMEM_BACKED:
99 case LOCAL_FILE_BACKED:
100 flags |= MAP_SHARED;
101 fd = self->fd;
102 break;
103 default:
104 ksft_exit_fail();
105 break;
106 }
107
108 return mmap(addr, length, prot, flags, fd, offset);
109 }
110
userfaultfd(int flags)111 static int userfaultfd(int flags)
112 {
113 return syscall(SYS_userfaultfd, flags);
114 }
115
handle_fatal(int c)116 static void handle_fatal(int c)
117 {
118 if (!signal_jump_set)
119 return;
120
121 siglongjmp(signal_jmp_buf, c);
122 }
123
sys_process_madvise(int pidfd,const struct iovec * iovec,size_t n,int advice,unsigned int flags)124 static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
125 size_t n, int advice, unsigned int flags)
126 {
127 return syscall(__NR_process_madvise, pidfd, iovec, n, advice, flags);
128 }
129
130 /*
131 * Enable our signal catcher and try to read/write the specified buffer. The
132 * return value indicates whether the read/write succeeds without a fatal
133 * signal.
134 */
try_access_buf(char * ptr,bool write)135 static bool try_access_buf(char *ptr, bool write)
136 {
137 bool failed;
138
139 /* Tell signal handler to jump back here on fatal signal. */
140 signal_jump_set = true;
141 /* If a fatal signal arose, we will jump back here and failed is set. */
142 failed = sigsetjmp(signal_jmp_buf, 0) != 0;
143
144 if (!failed) {
145 if (write)
146 *ptr = 'x';
147 else
148 FORCE_READ(ptr);
149 }
150
151 signal_jump_set = false;
152 return !failed;
153 }
154
155 /* Try and read from a buffer, return true if no fatal signal. */
try_read_buf(char * ptr)156 static bool try_read_buf(char *ptr)
157 {
158 return try_access_buf(ptr, false);
159 }
160
161 /* Try and write to a buffer, return true if no fatal signal. */
try_write_buf(char * ptr)162 static bool try_write_buf(char *ptr)
163 {
164 return try_access_buf(ptr, true);
165 }
166
167 /*
168 * Try and BOTH read from AND write to a buffer, return true if BOTH operations
169 * succeed.
170 */
try_read_write_buf(char * ptr)171 static bool try_read_write_buf(char *ptr)
172 {
173 return try_read_buf(ptr) && try_write_buf(ptr);
174 }
175
setup_sighandler(void)176 static void setup_sighandler(void)
177 {
178 struct sigaction act = {
179 .sa_handler = &handle_fatal,
180 .sa_flags = SA_NODEFER,
181 };
182
183 sigemptyset(&act.sa_mask);
184 if (sigaction(SIGSEGV, &act, NULL))
185 ksft_exit_fail_perror("sigaction");
186 }
187
teardown_sighandler(void)188 static void teardown_sighandler(void)
189 {
190 struct sigaction act = {
191 .sa_handler = SIG_DFL,
192 .sa_flags = SA_NODEFER,
193 };
194
195 sigemptyset(&act.sa_mask);
196 sigaction(SIGSEGV, &act, NULL);
197 }
198
open_file(const char * prefix,char * path)199 static int open_file(const char *prefix, char *path)
200 {
201 int fd;
202
203 snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix);
204 fd = mkstemp(path);
205 if (fd < 0)
206 ksft_exit_fail_perror("mkstemp");
207
208 return fd;
209 }
210
211 /* Establish a varying pattern in a buffer. */
set_pattern(char * ptr,size_t num_pages,size_t page_size)212 static void set_pattern(char *ptr, size_t num_pages, size_t page_size)
213 {
214 size_t i;
215
216 for (i = 0; i < num_pages; i++) {
217 char *ptr2 = &ptr[i * page_size];
218
219 memset(ptr2, 'a' + (i % 26), page_size);
220 }
221 }
222
223 /*
224 * Check that a buffer contains the pattern set by set_pattern(), starting at a
225 * page offset of pgoff within the buffer.
226 */
check_pattern_offset(char * ptr,size_t num_pages,size_t page_size,size_t pgoff)227 static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size,
228 size_t pgoff)
229 {
230 size_t i;
231
232 for (i = 0; i < num_pages * page_size; i++) {
233 size_t offset = pgoff * page_size + i;
234 char actual = ptr[offset];
235 char expected = 'a' + ((offset / page_size) % 26);
236
237 if (actual != expected)
238 return false;
239 }
240
241 return true;
242 }
243
244 /* Check that a buffer contains the pattern set by set_pattern(). */
check_pattern(char * ptr,size_t num_pages,size_t page_size)245 static bool check_pattern(char *ptr, size_t num_pages, size_t page_size)
246 {
247 return check_pattern_offset(ptr, num_pages, page_size, 0);
248 }
249
250 /* Determine if a buffer contains only repetitions of a specified char. */
is_buf_eq(char * buf,size_t size,char chr)251 static bool is_buf_eq(char *buf, size_t size, char chr)
252 {
253 size_t i;
254
255 for (i = 0; i < size; i++) {
256 if (buf[i] != chr)
257 return false;
258 }
259
260 return true;
261 }
262
FIXTURE_SETUP(guard_regions)263 FIXTURE_SETUP(guard_regions)
264 {
265 self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
266 setup_sighandler();
267
268 switch (variant->backing) {
269 case ANON_BACKED:
270 return;
271 case LOCAL_FILE_BACKED:
272 self->fd = open_file("", self->path);
273 break;
274 case SHMEM_BACKED:
275 self->fd = memfd_create(self->path, 0);
276 break;
277 }
278
279 /* We truncate file to at least 100 pages, tests can modify as needed. */
280 ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0);
281 };
282
FIXTURE_TEARDOWN_PARENT(guard_regions)283 FIXTURE_TEARDOWN_PARENT(guard_regions)
284 {
285 teardown_sighandler();
286
287 if (variant->backing == ANON_BACKED)
288 return;
289
290 if (self->fd >= 0)
291 close(self->fd);
292
293 if (self->path[0] != '\0')
294 unlink(self->path);
295 }
296
TEST_F(guard_regions,basic)297 TEST_F(guard_regions, basic)
298 {
299 const unsigned long NUM_PAGES = 10;
300 const unsigned long page_size = self->page_size;
301 char *ptr;
302 int i;
303
304 ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size,
305 PROT_READ | PROT_WRITE, 0, 0);
306 ASSERT_NE(ptr, MAP_FAILED);
307
308 /* Trivially assert we can touch the first page. */
309 ASSERT_TRUE(try_read_write_buf(ptr));
310
311 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
312
313 /* Establish that 1st page SIGSEGV's. */
314 ASSERT_FALSE(try_read_write_buf(ptr));
315
316 /* Ensure we can touch everything else.*/
317 for (i = 1; i < NUM_PAGES; i++) {
318 char *curr = &ptr[i * page_size];
319
320 ASSERT_TRUE(try_read_write_buf(curr));
321 }
322
323 /* Establish a guard page at the end of the mapping. */
324 ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
325 MADV_GUARD_INSTALL), 0);
326
327 /* Check that both guard pages result in SIGSEGV. */
328 ASSERT_FALSE(try_read_write_buf(ptr));
329 ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
330
331 /* Remove the first guard page. */
332 ASSERT_FALSE(madvise(ptr, page_size, MADV_GUARD_REMOVE));
333
334 /* Make sure we can touch it. */
335 ASSERT_TRUE(try_read_write_buf(ptr));
336
337 /* Remove the last guard page. */
338 ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
339 MADV_GUARD_REMOVE));
340
341 /* Make sure we can touch it. */
342 ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
343
344 /*
345 * Test setting a _range_ of pages, namely the first 3. The first of
346 * these be faulted in, so this also tests that we can install guard
347 * pages over backed pages.
348 */
349 ASSERT_EQ(madvise(ptr, 3 * page_size, MADV_GUARD_INSTALL), 0);
350
351 /* Make sure they are all guard pages. */
352 for (i = 0; i < 3; i++) {
353 char *curr = &ptr[i * page_size];
354
355 ASSERT_FALSE(try_read_write_buf(curr));
356 }
357
358 /* Make sure the rest are not. */
359 for (i = 3; i < NUM_PAGES; i++) {
360 char *curr = &ptr[i * page_size];
361
362 ASSERT_TRUE(try_read_write_buf(curr));
363 }
364
365 /* Remove guard pages. */
366 ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
367
368 /* Now make sure we can touch everything. */
369 for (i = 0; i < NUM_PAGES; i++) {
370 char *curr = &ptr[i * page_size];
371
372 ASSERT_TRUE(try_read_write_buf(curr));
373 }
374
375 /*
376 * Now remove all guard pages, make sure we don't remove existing
377 * entries.
378 */
379 ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
380
381 for (i = 0; i < NUM_PAGES * page_size; i += page_size) {
382 char chr = ptr[i];
383
384 ASSERT_EQ(chr, 'x');
385 }
386
387 ASSERT_EQ(munmap(ptr, NUM_PAGES * page_size), 0);
388 }
389
390 /* Assert that operations applied across multiple VMAs work as expected. */
TEST_F(guard_regions,multi_vma)391 TEST_F(guard_regions, multi_vma)
392 {
393 const unsigned long page_size = self->page_size;
394 char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
395 int i;
396
397 /* Reserve a 100 page region over which we can install VMAs. */
398 ptr_region = mmap_(self, variant, NULL, 100 * page_size,
399 PROT_NONE, 0, 0);
400 ASSERT_NE(ptr_region, MAP_FAILED);
401
402 /* Place a VMA of 10 pages size at the start of the region. */
403 ptr1 = mmap_(self, variant, ptr_region, 10 * page_size,
404 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
405 ASSERT_NE(ptr1, MAP_FAILED);
406
407 /* Place a VMA of 5 pages size 50 pages into the region. */
408 ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
409 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
410 ASSERT_NE(ptr2, MAP_FAILED);
411
412 /* Place a VMA of 20 pages size at the end of the region. */
413 ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size,
414 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
415 ASSERT_NE(ptr3, MAP_FAILED);
416
417 /* Unmap gaps. */
418 ASSERT_EQ(munmap(&ptr_region[10 * page_size], 40 * page_size), 0);
419 ASSERT_EQ(munmap(&ptr_region[55 * page_size], 25 * page_size), 0);
420
421 /*
422 * We end up with VMAs like this:
423 *
424 * 0 10 .. 50 55 .. 80 100
425 * [---] [---] [---]
426 */
427
428 /*
429 * Now mark the whole range as guard pages and make sure all VMAs are as
430 * such.
431 */
432
433 /*
434 * madvise() is certifiable and lets you perform operations over gaps,
435 * everything works, but it indicates an error and errno is set to
436 * -ENOMEM. Also if anything runs out of memory it is set to
437 * -ENOMEM. You are meant to guess which is which.
438 */
439 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1);
440 ASSERT_EQ(errno, ENOMEM);
441
442 for (i = 0; i < 10; i++) {
443 char *curr = &ptr1[i * page_size];
444
445 ASSERT_FALSE(try_read_write_buf(curr));
446 }
447
448 for (i = 0; i < 5; i++) {
449 char *curr = &ptr2[i * page_size];
450
451 ASSERT_FALSE(try_read_write_buf(curr));
452 }
453
454 for (i = 0; i < 20; i++) {
455 char *curr = &ptr3[i * page_size];
456
457 ASSERT_FALSE(try_read_write_buf(curr));
458 }
459
460 /* Now remove guar pages over range and assert the opposite. */
461
462 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1);
463 ASSERT_EQ(errno, ENOMEM);
464
465 for (i = 0; i < 10; i++) {
466 char *curr = &ptr1[i * page_size];
467
468 ASSERT_TRUE(try_read_write_buf(curr));
469 }
470
471 for (i = 0; i < 5; i++) {
472 char *curr = &ptr2[i * page_size];
473
474 ASSERT_TRUE(try_read_write_buf(curr));
475 }
476
477 for (i = 0; i < 20; i++) {
478 char *curr = &ptr3[i * page_size];
479
480 ASSERT_TRUE(try_read_write_buf(curr));
481 }
482
483 /* Now map incompatible VMAs in the gaps. */
484 ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size,
485 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
486 ASSERT_NE(ptr, MAP_FAILED);
487 ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size,
488 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
489 ASSERT_NE(ptr, MAP_FAILED);
490
491 /*
492 * We end up with VMAs like this:
493 *
494 * 0 10 .. 50 55 .. 80 100
495 * [---][xxxx][---][xxxx][---]
496 *
497 * Where 'x' signifies VMAs that cannot be merged with those adjacent to
498 * them.
499 */
500
501 /* Multiple VMAs adjacent to one another should result in no error. */
502 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), 0);
503 for (i = 0; i < 100; i++) {
504 char *curr = &ptr_region[i * page_size];
505
506 ASSERT_FALSE(try_read_write_buf(curr));
507 }
508 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), 0);
509 for (i = 0; i < 100; i++) {
510 char *curr = &ptr_region[i * page_size];
511
512 ASSERT_TRUE(try_read_write_buf(curr));
513 }
514
515 /* Cleanup. */
516 ASSERT_EQ(munmap(ptr_region, 100 * page_size), 0);
517 }
518
519 /*
520 * Assert that batched operations performed using process_madvise() work as
521 * expected.
522 */
TEST_F(guard_regions,process_madvise)523 TEST_F(guard_regions, process_madvise)
524 {
525 const unsigned long page_size = self->page_size;
526 char *ptr_region, *ptr1, *ptr2, *ptr3;
527 ssize_t count;
528 struct iovec vec[6];
529
530 /* Reserve region to map over. */
531 ptr_region = mmap_(self, variant, NULL, 100 * page_size,
532 PROT_NONE, 0, 0);
533 ASSERT_NE(ptr_region, MAP_FAILED);
534
535 /*
536 * 10 pages offset 1 page into reserve region. We MAP_POPULATE so we
537 * overwrite existing entries and test this code path against
538 * overwriting existing entries.
539 */
540 ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size,
541 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0);
542 ASSERT_NE(ptr1, MAP_FAILED);
543 /* We want guard markers at start/end of each VMA. */
544 vec[0].iov_base = ptr1;
545 vec[0].iov_len = page_size;
546 vec[1].iov_base = &ptr1[9 * page_size];
547 vec[1].iov_len = page_size;
548
549 /* 5 pages offset 50 pages into reserve region. */
550 ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
551 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
552 ASSERT_NE(ptr2, MAP_FAILED);
553 vec[2].iov_base = ptr2;
554 vec[2].iov_len = page_size;
555 vec[3].iov_base = &ptr2[4 * page_size];
556 vec[3].iov_len = page_size;
557
558 /* 20 pages offset 79 pages into reserve region. */
559 ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size,
560 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
561 ASSERT_NE(ptr3, MAP_FAILED);
562 vec[4].iov_base = ptr3;
563 vec[4].iov_len = page_size;
564 vec[5].iov_base = &ptr3[19 * page_size];
565 vec[5].iov_len = page_size;
566
567 /* Free surrounding VMAs. */
568 ASSERT_EQ(munmap(ptr_region, page_size), 0);
569 ASSERT_EQ(munmap(&ptr_region[11 * page_size], 39 * page_size), 0);
570 ASSERT_EQ(munmap(&ptr_region[55 * page_size], 24 * page_size), 0);
571 ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
572
573 /* Now guard in one step. */
574 count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_INSTALL, 0);
575
576 /* OK we don't have permission to do this, skip. */
577 if (count == -1 && errno == EPERM)
578 SKIP(return, "No process_madvise() permissions, try running as root.\n");
579
580 /* Returns the number of bytes advised. */
581 ASSERT_EQ(count, 6 * page_size);
582
583 /* Now make sure the guarding was applied. */
584
585 ASSERT_FALSE(try_read_write_buf(ptr1));
586 ASSERT_FALSE(try_read_write_buf(&ptr1[9 * page_size]));
587
588 ASSERT_FALSE(try_read_write_buf(ptr2));
589 ASSERT_FALSE(try_read_write_buf(&ptr2[4 * page_size]));
590
591 ASSERT_FALSE(try_read_write_buf(ptr3));
592 ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
593
594 /* Now do the same with unguard... */
595 count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_REMOVE, 0);
596
597 /* ...and everything should now succeed. */
598
599 ASSERT_TRUE(try_read_write_buf(ptr1));
600 ASSERT_TRUE(try_read_write_buf(&ptr1[9 * page_size]));
601
602 ASSERT_TRUE(try_read_write_buf(ptr2));
603 ASSERT_TRUE(try_read_write_buf(&ptr2[4 * page_size]));
604
605 ASSERT_TRUE(try_read_write_buf(ptr3));
606 ASSERT_TRUE(try_read_write_buf(&ptr3[19 * page_size]));
607
608 /* Cleanup. */
609 ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
610 ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
611 ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
612 }
613
614 /* Assert that unmapping ranges does not leave guard markers behind. */
TEST_F(guard_regions,munmap)615 TEST_F(guard_regions, munmap)
616 {
617 const unsigned long page_size = self->page_size;
618 char *ptr, *ptr_new1, *ptr_new2;
619
620 ptr = mmap_(self, variant, NULL, 10 * page_size,
621 PROT_READ | PROT_WRITE, 0, 0);
622 ASSERT_NE(ptr, MAP_FAILED);
623
624 /* Guard first and last pages. */
625 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
626 ASSERT_EQ(madvise(&ptr[9 * page_size], page_size, MADV_GUARD_INSTALL), 0);
627
628 /* Assert that they are guarded. */
629 ASSERT_FALSE(try_read_write_buf(ptr));
630 ASSERT_FALSE(try_read_write_buf(&ptr[9 * page_size]));
631
632 /* Unmap them. */
633 ASSERT_EQ(munmap(ptr, page_size), 0);
634 ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
635
636 /* Map over them.*/
637 ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE,
638 MAP_FIXED, 0);
639 ASSERT_NE(ptr_new1, MAP_FAILED);
640 ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size,
641 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
642 ASSERT_NE(ptr_new2, MAP_FAILED);
643
644 /* Assert that they are now not guarded. */
645 ASSERT_TRUE(try_read_write_buf(ptr_new1));
646 ASSERT_TRUE(try_read_write_buf(ptr_new2));
647
648 /* Cleanup. */
649 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
650 }
651
652 /* Assert that mprotect() operations have no bearing on guard markers. */
TEST_F(guard_regions,mprotect)653 TEST_F(guard_regions, mprotect)
654 {
655 const unsigned long page_size = self->page_size;
656 char *ptr;
657 int i;
658
659 ptr = mmap_(self, variant, NULL, 10 * page_size,
660 PROT_READ | PROT_WRITE, 0, 0);
661 ASSERT_NE(ptr, MAP_FAILED);
662
663 /* Guard the middle of the range. */
664 ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
665 MADV_GUARD_INSTALL), 0);
666
667 /* Assert that it is indeed guarded. */
668 ASSERT_FALSE(try_read_write_buf(&ptr[5 * page_size]));
669 ASSERT_FALSE(try_read_write_buf(&ptr[6 * page_size]));
670
671 /* Now make these pages read-only. */
672 ASSERT_EQ(mprotect(&ptr[5 * page_size], 2 * page_size, PROT_READ), 0);
673
674 /* Make sure the range is still guarded. */
675 ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
676 ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
677
678 /* Make sure we can guard again without issue.*/
679 ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
680 MADV_GUARD_INSTALL), 0);
681
682 /* Make sure the range is, yet again, still guarded. */
683 ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
684 ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
685
686 /* Now unguard the whole range. */
687 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
688
689 /* Make sure the whole range is readable. */
690 for (i = 0; i < 10; i++) {
691 char *curr = &ptr[i * page_size];
692
693 ASSERT_TRUE(try_read_buf(curr));
694 }
695
696 /* Cleanup. */
697 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
698 }
699
700 /* Split and merge VMAs and make sure guard pages still behave. */
TEST_F(guard_regions,split_merge)701 TEST_F(guard_regions, split_merge)
702 {
703 const unsigned long page_size = self->page_size;
704 char *ptr, *ptr_new;
705 int i;
706
707 ptr = mmap_(self, variant, NULL, 10 * page_size,
708 PROT_READ | PROT_WRITE, 0, 0);
709 ASSERT_NE(ptr, MAP_FAILED);
710
711 /* Guard the whole range. */
712 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
713
714 /* Make sure the whole range is guarded. */
715 for (i = 0; i < 10; i++) {
716 char *curr = &ptr[i * page_size];
717
718 ASSERT_FALSE(try_read_write_buf(curr));
719 }
720
721 /* Now unmap some pages in the range so we split. */
722 ASSERT_EQ(munmap(&ptr[2 * page_size], page_size), 0);
723 ASSERT_EQ(munmap(&ptr[5 * page_size], page_size), 0);
724 ASSERT_EQ(munmap(&ptr[8 * page_size], page_size), 0);
725
726 /* Make sure the remaining ranges are guarded post-split. */
727 for (i = 0; i < 2; i++) {
728 char *curr = &ptr[i * page_size];
729
730 ASSERT_FALSE(try_read_write_buf(curr));
731 }
732 for (i = 2; i < 5; i++) {
733 char *curr = &ptr[i * page_size];
734
735 ASSERT_FALSE(try_read_write_buf(curr));
736 }
737 for (i = 6; i < 8; i++) {
738 char *curr = &ptr[i * page_size];
739
740 ASSERT_FALSE(try_read_write_buf(curr));
741 }
742 for (i = 9; i < 10; i++) {
743 char *curr = &ptr[i * page_size];
744
745 ASSERT_FALSE(try_read_write_buf(curr));
746 }
747
748 /* Now map them again - the unmap will have cleared the guards. */
749 ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size,
750 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
751 ASSERT_NE(ptr_new, MAP_FAILED);
752 ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size,
753 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
754 ASSERT_NE(ptr_new, MAP_FAILED);
755 ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size,
756 PROT_READ | PROT_WRITE, MAP_FIXED, 0);
757 ASSERT_NE(ptr_new, MAP_FAILED);
758
759 /* Now make sure guard pages are established. */
760 for (i = 0; i < 10; i++) {
761 char *curr = &ptr[i * page_size];
762 bool result = try_read_write_buf(curr);
763 bool expect_true = i == 2 || i == 5 || i == 8;
764
765 ASSERT_TRUE(expect_true ? result : !result);
766 }
767
768 /* Now guard everything again. */
769 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
770
771 /* Make sure the whole range is guarded. */
772 for (i = 0; i < 10; i++) {
773 char *curr = &ptr[i * page_size];
774
775 ASSERT_FALSE(try_read_write_buf(curr));
776 }
777
778 /* Now split the range into three. */
779 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
780 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
781
782 /* Make sure the whole range is guarded for read. */
783 for (i = 0; i < 10; i++) {
784 char *curr = &ptr[i * page_size];
785
786 ASSERT_FALSE(try_read_buf(curr));
787 }
788
789 /* Now reset protection bits so we merge the whole thing. */
790 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
791 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
792 PROT_READ | PROT_WRITE), 0);
793
794 /* Make sure the whole range is still guarded. */
795 for (i = 0; i < 10; i++) {
796 char *curr = &ptr[i * page_size];
797
798 ASSERT_FALSE(try_read_write_buf(curr));
799 }
800
801 /* Split range into 3 again... */
802 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
803 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
804
805 /* ...and unguard the whole range. */
806 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
807
808 /* Make sure the whole range is remedied for read. */
809 for (i = 0; i < 10; i++) {
810 char *curr = &ptr[i * page_size];
811
812 ASSERT_TRUE(try_read_buf(curr));
813 }
814
815 /* Merge them again. */
816 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
817 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
818 PROT_READ | PROT_WRITE), 0);
819
820 /* Now ensure the merged range is remedied for read/write. */
821 for (i = 0; i < 10; i++) {
822 char *curr = &ptr[i * page_size];
823
824 ASSERT_TRUE(try_read_write_buf(curr));
825 }
826
827 /* Cleanup. */
828 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
829 }
830
831 /* Assert that MADV_DONTNEED does not remove guard markers. */
TEST_F(guard_regions,dontneed)832 TEST_F(guard_regions, dontneed)
833 {
834 const unsigned long page_size = self->page_size;
835 char *ptr;
836 int i;
837
838 ptr = mmap_(self, variant, NULL, 10 * page_size,
839 PROT_READ | PROT_WRITE, 0, 0);
840 ASSERT_NE(ptr, MAP_FAILED);
841
842 /* Back the whole range. */
843 for (i = 0; i < 10; i++) {
844 char *curr = &ptr[i * page_size];
845
846 *curr = 'y';
847 }
848
849 /* Guard every other page. */
850 for (i = 0; i < 10; i += 2) {
851 char *curr = &ptr[i * page_size];
852 int res = madvise(curr, page_size, MADV_GUARD_INSTALL);
853
854 ASSERT_EQ(res, 0);
855 }
856
857 /* Indicate that we don't need any of the range. */
858 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0);
859
860 /* Check to ensure guard markers are still in place. */
861 for (i = 0; i < 10; i++) {
862 char *curr = &ptr[i * page_size];
863 bool result = try_read_buf(curr);
864
865 if (i % 2 == 0) {
866 ASSERT_FALSE(result);
867 } else {
868 ASSERT_TRUE(result);
869 switch (variant->backing) {
870 case ANON_BACKED:
871 /* If anon, then we get a zero page. */
872 ASSERT_EQ(*curr, '\0');
873 break;
874 default:
875 /* Otherwise, we get the file data. */
876 ASSERT_EQ(*curr, 'y');
877 break;
878 }
879 }
880
881 /* Now write... */
882 result = try_write_buf(&ptr[i * page_size]);
883
884 /* ...and make sure same result. */
885 ASSERT_TRUE(i % 2 != 0 ? result : !result);
886 }
887
888 /* Cleanup. */
889 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
890 }
891
892 /* Assert that mlock()'ed pages work correctly with guard markers. */
TEST_F(guard_regions,mlock)893 TEST_F(guard_regions, mlock)
894 {
895 const unsigned long page_size = self->page_size;
896 char *ptr;
897 int i;
898
899 ptr = mmap_(self, variant, NULL, 10 * page_size,
900 PROT_READ | PROT_WRITE, 0, 0);
901 ASSERT_NE(ptr, MAP_FAILED);
902
903 /* Populate. */
904 for (i = 0; i < 10; i++) {
905 char *curr = &ptr[i * page_size];
906
907 *curr = 'y';
908 }
909
910 /* Lock. */
911 ASSERT_EQ(mlock(ptr, 10 * page_size), 0);
912
913 /* Now try to guard, should fail with EINVAL. */
914 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1);
915 ASSERT_EQ(errno, EINVAL);
916
917 /* OK unlock. */
918 ASSERT_EQ(munlock(ptr, 10 * page_size), 0);
919
920 /* Guard first half of range, should now succeed. */
921 ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
922
923 /* Make sure guard works. */
924 for (i = 0; i < 10; i++) {
925 char *curr = &ptr[i * page_size];
926 bool result = try_read_write_buf(curr);
927
928 if (i < 5) {
929 ASSERT_FALSE(result);
930 } else {
931 ASSERT_TRUE(result);
932 ASSERT_EQ(*curr, 'x');
933 }
934 }
935
936 /*
937 * Now lock the latter part of the range. We can't lock the guard pages,
938 * as this would result in the pages being populated and the guarding
939 * would cause this to error out.
940 */
941 ASSERT_EQ(mlock(&ptr[5 * page_size], 5 * page_size), 0);
942
943 /*
944 * Now remove guard pages, we permit mlock()'d ranges to have guard
945 * pages removed as it is a non-destructive operation.
946 */
947 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
948
949 /* Now check that no guard pages remain. */
950 for (i = 0; i < 10; i++) {
951 char *curr = &ptr[i * page_size];
952
953 ASSERT_TRUE(try_read_write_buf(curr));
954 }
955
956 /* Cleanup. */
957 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
958 }
959
960 /*
961 * Assert that moving, extending and shrinking memory via mremap() retains
962 * guard markers where possible.
963 *
964 * - Moving a mapping alone should retain markers as they are.
965 */
TEST_F(guard_regions,mremap_move)966 TEST_F(guard_regions, mremap_move)
967 {
968 const unsigned long page_size = self->page_size;
969 char *ptr, *ptr_new;
970
971 /* Map 5 pages. */
972 ptr = mmap_(self, variant, NULL, 5 * page_size,
973 PROT_READ | PROT_WRITE, 0, 0);
974 ASSERT_NE(ptr, MAP_FAILED);
975
976 /* Place guard markers at both ends of the 5 page span. */
977 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
978 ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
979
980 /* Make sure the guard pages are in effect. */
981 ASSERT_FALSE(try_read_write_buf(ptr));
982 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
983
984 /* Map a new region we will move this range into. Doing this ensures
985 * that we have reserved a range to map into.
986 */
987 ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0);
988 ASSERT_NE(ptr_new, MAP_FAILED);
989
990 ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
991 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new), ptr_new);
992
993 /* Make sure the guard markers are retained. */
994 ASSERT_FALSE(try_read_write_buf(ptr_new));
995 ASSERT_FALSE(try_read_write_buf(&ptr_new[4 * page_size]));
996
997 /*
998 * Clean up - we only need reference the new pointer as we overwrote the
999 * PROT_NONE range and moved the existing one.
1000 */
1001 munmap(ptr_new, 5 * page_size);
1002 }
1003
1004 /*
1005 * Assert that moving, extending and shrinking memory via mremap() retains
1006 * guard markers where possible.
1007 *
1008 * Expanding should retain guard pages, only now in different position. The user
1009 * will have to remove guard pages manually to fix up (they'd have to do the
1010 * same if it were a PROT_NONE mapping).
1011 */
TEST_F(guard_regions,mremap_expand)1012 TEST_F(guard_regions, mremap_expand)
1013 {
1014 const unsigned long page_size = self->page_size;
1015 char *ptr, *ptr_new;
1016
1017 /* Map 10 pages... */
1018 ptr = mmap_(self, variant, NULL, 10 * page_size,
1019 PROT_READ | PROT_WRITE, 0, 0);
1020 ASSERT_NE(ptr, MAP_FAILED);
1021 /* ...But unmap the last 5 so we can ensure we can expand into them. */
1022 ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
1023
1024 /* Place guard markers at both ends of the 5 page span. */
1025 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1026 ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1027
1028 /* Make sure the guarding is in effect. */
1029 ASSERT_FALSE(try_read_write_buf(ptr));
1030 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1031
1032 /* Now expand to 10 pages. */
1033 ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0);
1034 ASSERT_NE(ptr, MAP_FAILED);
1035
1036 /*
1037 * Make sure the guard markers are retained in their original positions.
1038 */
1039 ASSERT_FALSE(try_read_write_buf(ptr));
1040 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1041
1042 /* Reserve a region which we can move to and expand into. */
1043 ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0);
1044 ASSERT_NE(ptr_new, MAP_FAILED);
1045
1046 /* Now move and expand into it. */
1047 ptr = mremap(ptr, 10 * page_size, 20 * page_size,
1048 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new);
1049 ASSERT_EQ(ptr, ptr_new);
1050
1051 /*
1052 * Again, make sure the guard markers are retained in their original positions.
1053 */
1054 ASSERT_FALSE(try_read_write_buf(ptr));
1055 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1056
1057 /*
1058 * A real user would have to remove guard markers, but would reasonably
1059 * expect all characteristics of the mapping to be retained, including
1060 * guard markers.
1061 */
1062
1063 /* Cleanup. */
1064 munmap(ptr, 20 * page_size);
1065 }
1066 /*
1067 * Assert that moving, extending and shrinking memory via mremap() retains
1068 * guard markers where possible.
1069 *
1070 * Shrinking will result in markers that are shrunk over being removed. Again,
1071 * if the user were using a PROT_NONE mapping they'd have to manually fix this
1072 * up also so this is OK.
1073 */
TEST_F(guard_regions,mremap_shrink)1074 TEST_F(guard_regions, mremap_shrink)
1075 {
1076 const unsigned long page_size = self->page_size;
1077 char *ptr;
1078 int i;
1079
1080 /* Map 5 pages. */
1081 ptr = mmap_(self, variant, NULL, 5 * page_size,
1082 PROT_READ | PROT_WRITE, 0, 0);
1083 ASSERT_NE(ptr, MAP_FAILED);
1084
1085 /* Place guard markers at both ends of the 5 page span. */
1086 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1087 ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
1088
1089 /* Make sure the guarding is in effect. */
1090 ASSERT_FALSE(try_read_write_buf(ptr));
1091 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
1092
1093 /* Now shrink to 3 pages. */
1094 ptr = mremap(ptr, 5 * page_size, 3 * page_size, MREMAP_MAYMOVE);
1095 ASSERT_NE(ptr, MAP_FAILED);
1096
1097 /* We expect the guard marker at the start to be retained... */
1098 ASSERT_FALSE(try_read_write_buf(ptr));
1099
1100 /* ...But remaining pages will not have guard markers. */
1101 for (i = 1; i < 3; i++) {
1102 char *curr = &ptr[i * page_size];
1103
1104 ASSERT_TRUE(try_read_write_buf(curr));
1105 }
1106
1107 /*
1108 * As with expansion, a real user would have to remove guard pages and
1109 * fixup. But you'd have to do similar manual things with PROT_NONE
1110 * mappings too.
1111 */
1112
1113 /*
1114 * If we expand back to the original size, the end marker will, of
1115 * course, no longer be present.
1116 */
1117 ptr = mremap(ptr, 3 * page_size, 5 * page_size, 0);
1118 ASSERT_NE(ptr, MAP_FAILED);
1119
1120 /* Again, we expect the guard marker at the start to be retained... */
1121 ASSERT_FALSE(try_read_write_buf(ptr));
1122
1123 /* ...But remaining pages will not have guard markers. */
1124 for (i = 1; i < 5; i++) {
1125 char *curr = &ptr[i * page_size];
1126
1127 ASSERT_TRUE(try_read_write_buf(curr));
1128 }
1129
1130 /* Cleanup. */
1131 munmap(ptr, 5 * page_size);
1132 }
1133
1134 /*
1135 * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
1136 * retain guard pages.
1137 */
TEST_F(guard_regions,fork)1138 TEST_F(guard_regions, fork)
1139 {
1140 const unsigned long page_size = self->page_size;
1141 char *ptr;
1142 pid_t pid;
1143 int i;
1144
1145 /* Map 10 pages. */
1146 ptr = mmap_(self, variant, NULL, 10 * page_size,
1147 PROT_READ | PROT_WRITE, 0, 0);
1148 ASSERT_NE(ptr, MAP_FAILED);
1149
1150 /* Establish guard pages in the first 5 pages. */
1151 ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1152
1153 pid = fork();
1154 ASSERT_NE(pid, -1);
1155 if (!pid) {
1156 /* This is the child process now. */
1157
1158 /* Assert that the guarding is in effect. */
1159 for (i = 0; i < 10; i++) {
1160 char *curr = &ptr[i * page_size];
1161 bool result = try_read_write_buf(curr);
1162
1163 ASSERT_TRUE(i >= 5 ? result : !result);
1164 }
1165
1166 /* Now unguard the range.*/
1167 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1168
1169 exit(0);
1170 }
1171
1172 /* Parent process. */
1173
1174 /* Parent simply waits on child. */
1175 waitpid(pid, NULL, 0);
1176
1177 /* Child unguard does not impact parent page table state. */
1178 for (i = 0; i < 10; i++) {
1179 char *curr = &ptr[i * page_size];
1180 bool result = try_read_write_buf(curr);
1181
1182 ASSERT_TRUE(i >= 5 ? result : !result);
1183 }
1184
1185 /* Cleanup. */
1186 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1187 }
1188
1189 /*
1190 * Assert expected behaviour after we fork populated ranges of anonymous memory
1191 * and then guard and unguard the range.
1192 */
TEST_F(guard_regions,fork_cow)1193 TEST_F(guard_regions, fork_cow)
1194 {
1195 const unsigned long page_size = self->page_size;
1196 char *ptr;
1197 pid_t pid;
1198 int i;
1199
1200 if (variant->backing != ANON_BACKED)
1201 SKIP(return, "CoW only supported on anon mappings");
1202
1203 /* Map 10 pages. */
1204 ptr = mmap_(self, variant, NULL, 10 * page_size,
1205 PROT_READ | PROT_WRITE, 0, 0);
1206 ASSERT_NE(ptr, MAP_FAILED);
1207
1208 /* Populate range. */
1209 for (i = 0; i < 10 * page_size; i++) {
1210 char chr = 'a' + (i % 26);
1211
1212 ptr[i] = chr;
1213 }
1214
1215 pid = fork();
1216 ASSERT_NE(pid, -1);
1217 if (!pid) {
1218 /* This is the child process now. */
1219
1220 /* Ensure the range is as expected. */
1221 for (i = 0; i < 10 * page_size; i++) {
1222 char expected = 'a' + (i % 26);
1223 char actual = ptr[i];
1224
1225 ASSERT_EQ(actual, expected);
1226 }
1227
1228 /* Establish guard pages across the whole range. */
1229 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1230 /* Remove it. */
1231 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1232
1233 /*
1234 * By removing the guard pages, the page tables will be
1235 * cleared. Assert that we are looking at the zero page now.
1236 */
1237 for (i = 0; i < 10 * page_size; i++) {
1238 char actual = ptr[i];
1239
1240 ASSERT_EQ(actual, '\0');
1241 }
1242
1243 exit(0);
1244 }
1245
1246 /* Parent process. */
1247
1248 /* Parent simply waits on child. */
1249 waitpid(pid, NULL, 0);
1250
1251 /* Ensure the range is unchanged in parent anon range. */
1252 for (i = 0; i < 10 * page_size; i++) {
1253 char expected = 'a' + (i % 26);
1254 char actual = ptr[i];
1255
1256 ASSERT_EQ(actual, expected);
1257 }
1258
1259 /* Cleanup. */
1260 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1261 }
1262
1263 /*
1264 * Assert that forking a process with VMAs that do have VM_WIPEONFORK set
1265 * behave as expected.
1266 */
TEST_F(guard_regions,fork_wipeonfork)1267 TEST_F(guard_regions, fork_wipeonfork)
1268 {
1269 const unsigned long page_size = self->page_size;
1270 char *ptr;
1271 pid_t pid;
1272 int i;
1273
1274 if (variant->backing != ANON_BACKED)
1275 SKIP(return, "Wipe on fork only supported on anon mappings");
1276
1277 /* Map 10 pages. */
1278 ptr = mmap_(self, variant, NULL, 10 * page_size,
1279 PROT_READ | PROT_WRITE, 0, 0);
1280 ASSERT_NE(ptr, MAP_FAILED);
1281
1282 /* Mark wipe on fork. */
1283 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_WIPEONFORK), 0);
1284
1285 /* Guard the first 5 pages. */
1286 ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1287
1288 pid = fork();
1289 ASSERT_NE(pid, -1);
1290 if (!pid) {
1291 /* This is the child process now. */
1292
1293 /* Guard will have been wiped. */
1294 for (i = 0; i < 10; i++) {
1295 char *curr = &ptr[i * page_size];
1296
1297 ASSERT_TRUE(try_read_write_buf(curr));
1298 }
1299
1300 exit(0);
1301 }
1302
1303 /* Parent process. */
1304
1305 waitpid(pid, NULL, 0);
1306
1307 /* Guard markers should be in effect.*/
1308 for (i = 0; i < 10; i++) {
1309 char *curr = &ptr[i * page_size];
1310 bool result = try_read_write_buf(curr);
1311
1312 ASSERT_TRUE(i >= 5 ? result : !result);
1313 }
1314
1315 /* Cleanup. */
1316 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1317 }
1318
1319 /* Ensure that MADV_FREE retains guard entries as expected. */
TEST_F(guard_regions,lazyfree)1320 TEST_F(guard_regions, lazyfree)
1321 {
1322 const unsigned long page_size = self->page_size;
1323 char *ptr;
1324 int i;
1325
1326 if (variant->backing != ANON_BACKED)
1327 SKIP(return, "MADV_FREE only supported on anon mappings");
1328
1329 /* Map 10 pages. */
1330 ptr = mmap_(self, variant, NULL, 10 * page_size,
1331 PROT_READ | PROT_WRITE, 0, 0);
1332 ASSERT_NE(ptr, MAP_FAILED);
1333
1334 /* Guard range. */
1335 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1336
1337 /* Ensure guarded. */
1338 for (i = 0; i < 10; i++) {
1339 char *curr = &ptr[i * page_size];
1340
1341 ASSERT_FALSE(try_read_write_buf(curr));
1342 }
1343
1344 /* Lazyfree range. */
1345 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_FREE), 0);
1346
1347 /* This should leave the guard markers in place. */
1348 for (i = 0; i < 10; i++) {
1349 char *curr = &ptr[i * page_size];
1350
1351 ASSERT_FALSE(try_read_write_buf(curr));
1352 }
1353
1354 /* Cleanup. */
1355 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1356 }
1357
1358 /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
TEST_F(guard_regions,populate)1359 TEST_F(guard_regions, populate)
1360 {
1361 const unsigned long page_size = self->page_size;
1362 char *ptr;
1363
1364 /* Map 10 pages. */
1365 ptr = mmap_(self, variant, NULL, 10 * page_size,
1366 PROT_READ | PROT_WRITE, 0, 0);
1367 ASSERT_NE(ptr, MAP_FAILED);
1368
1369 /* Guard range. */
1370 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1371
1372 /* Populate read should error out... */
1373 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1);
1374 ASSERT_EQ(errno, EFAULT);
1375
1376 /* ...as should populate write. */
1377 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1);
1378 ASSERT_EQ(errno, EFAULT);
1379
1380 /* Cleanup. */
1381 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1382 }
1383
1384 /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
TEST_F(guard_regions,cold_pageout)1385 TEST_F(guard_regions, cold_pageout)
1386 {
1387 const unsigned long page_size = self->page_size;
1388 char *ptr;
1389 int i;
1390
1391 /* Map 10 pages. */
1392 ptr = mmap_(self, variant, NULL, 10 * page_size,
1393 PROT_READ | PROT_WRITE, 0, 0);
1394 ASSERT_NE(ptr, MAP_FAILED);
1395
1396 /* Guard range. */
1397 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1398
1399 /* Ensured guarded. */
1400 for (i = 0; i < 10; i++) {
1401 char *curr = &ptr[i * page_size];
1402
1403 ASSERT_FALSE(try_read_write_buf(curr));
1404 }
1405
1406 /* Now mark cold. This should have no impact on guard markers. */
1407 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_COLD), 0);
1408
1409 /* Should remain guarded. */
1410 for (i = 0; i < 10; i++) {
1411 char *curr = &ptr[i * page_size];
1412
1413 ASSERT_FALSE(try_read_write_buf(curr));
1414 }
1415
1416 /* OK, now page out. This should equally, have no effect on markers. */
1417 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1418
1419 /* Should remain guarded. */
1420 for (i = 0; i < 10; i++) {
1421 char *curr = &ptr[i * page_size];
1422
1423 ASSERT_FALSE(try_read_write_buf(curr));
1424 }
1425
1426 /* Cleanup. */
1427 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1428 }
1429
1430 /* Ensure that guard pages do not break userfaultd. */
TEST_F(guard_regions,uffd)1431 TEST_F(guard_regions, uffd)
1432 {
1433 const unsigned long page_size = self->page_size;
1434 int uffd;
1435 char *ptr;
1436 int i;
1437 struct uffdio_api api = {
1438 .api = UFFD_API,
1439 .features = 0,
1440 };
1441 struct uffdio_register reg;
1442 struct uffdio_range range;
1443
1444 if (!is_anon_backed(variant))
1445 SKIP(return, "uffd only works on anon backing");
1446
1447 /* Set up uffd. */
1448 uffd = userfaultfd(0);
1449 if (uffd == -1) {
1450 switch (errno) {
1451 case EPERM:
1452 SKIP(return, "No userfaultfd permissions, try running as root.");
1453 break;
1454 case ENOSYS:
1455 SKIP(return, "userfaultfd is not supported/not enabled.");
1456 break;
1457 default:
1458 ksft_exit_fail_msg("userfaultfd failed with %s\n",
1459 strerror(errno));
1460 break;
1461 }
1462 }
1463
1464 ASSERT_NE(uffd, -1);
1465
1466 ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
1467
1468 /* Map 10 pages. */
1469 ptr = mmap_(self, variant, NULL, 10 * page_size,
1470 PROT_READ | PROT_WRITE, 0, 0);
1471 ASSERT_NE(ptr, MAP_FAILED);
1472
1473 /* Register the range with uffd. */
1474 range.start = (unsigned long)ptr;
1475 range.len = 10 * page_size;
1476 reg.range = range;
1477 reg.mode = UFFDIO_REGISTER_MODE_MISSING;
1478 ASSERT_EQ(ioctl(uffd, UFFDIO_REGISTER, ®), 0);
1479
1480 /* Guard the range. This should not trigger the uffd. */
1481 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1482
1483 /* The guarding should behave as usual with no uffd intervention. */
1484 for (i = 0; i < 10; i++) {
1485 char *curr = &ptr[i * page_size];
1486
1487 ASSERT_FALSE(try_read_write_buf(curr));
1488 }
1489
1490 /* Cleanup. */
1491 ASSERT_EQ(ioctl(uffd, UFFDIO_UNREGISTER, &range), 0);
1492 close(uffd);
1493 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1494 }
1495
1496 /*
1497 * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
1498 * aggressively read-ahead, then install guard regions and assert that it
1499 * behaves correctly.
1500 *
1501 * We page out using MADV_PAGEOUT before checking guard regions so we drop page
1502 * cache folios, meaning we maximise the possibility of some broken readahead.
1503 */
TEST_F(guard_regions,madvise_sequential)1504 TEST_F(guard_regions, madvise_sequential)
1505 {
1506 char *ptr;
1507 int i;
1508 const unsigned long page_size = self->page_size;
1509
1510 if (variant->backing == ANON_BACKED)
1511 SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed");
1512
1513 ptr = mmap_(self, variant, NULL, 10 * page_size,
1514 PROT_READ | PROT_WRITE, 0, 0);
1515 ASSERT_NE(ptr, MAP_FAILED);
1516
1517 /* Establish a pattern of data in the file. */
1518 set_pattern(ptr, 10, page_size);
1519 ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1520
1521 /* Mark it as being accessed sequentially. */
1522 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0);
1523
1524 /* Mark every other page a guard page. */
1525 for (i = 0; i < 10; i += 2) {
1526 char *ptr2 = &ptr[i * page_size];
1527
1528 ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0);
1529 }
1530
1531 /* Now page it out. */
1532 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1533
1534 /* Now make sure pages are as expected. */
1535 for (i = 0; i < 10; i++) {
1536 char *chrp = &ptr[i * page_size];
1537
1538 if (i % 2 == 0) {
1539 bool result = try_read_write_buf(chrp);
1540
1541 ASSERT_FALSE(result);
1542 } else {
1543 ASSERT_EQ(*chrp, 'a' + i);
1544 }
1545 }
1546
1547 /* Now remove guard pages. */
1548 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1549
1550 /* Now make sure all data is as expected. */
1551 if (!check_pattern(ptr, 10, page_size))
1552 ASSERT_TRUE(false);
1553
1554 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1555 }
1556
1557 /*
1558 * Check that file-backed mappings implement guard regions with MAP_PRIVATE
1559 * correctly.
1560 */
TEST_F(guard_regions,map_private)1561 TEST_F(guard_regions, map_private)
1562 {
1563 const unsigned long page_size = self->page_size;
1564 char *ptr_shared, *ptr_private;
1565 int i;
1566
1567 if (variant->backing == ANON_BACKED)
1568 SKIP(return, "MAP_PRIVATE test specific to file-backed");
1569
1570 ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1571 ASSERT_NE(ptr_shared, MAP_FAILED);
1572
1573 /* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */
1574 ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0);
1575 ASSERT_NE(ptr_private, MAP_FAILED);
1576
1577 /* Set pattern in shared mapping. */
1578 set_pattern(ptr_shared, 10, page_size);
1579
1580 /* Install guard regions in every other page in the shared mapping. */
1581 for (i = 0; i < 10; i += 2) {
1582 char *ptr = &ptr_shared[i * page_size];
1583
1584 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1585 }
1586
1587 for (i = 0; i < 10; i++) {
1588 /* Every even shared page should be guarded. */
1589 ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1590 /* Private mappings should always be readable. */
1591 ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1592 }
1593
1594 /* Install guard regions in every other page in the private mapping. */
1595 for (i = 0; i < 10; i += 2) {
1596 char *ptr = &ptr_private[i * page_size];
1597
1598 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
1599 }
1600
1601 for (i = 0; i < 10; i++) {
1602 /* Every even shared page should be guarded. */
1603 ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
1604 /* Every odd private page should be guarded. */
1605 ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1606 }
1607
1608 /* Remove guard regions from shared mapping. */
1609 ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0);
1610
1611 for (i = 0; i < 10; i++) {
1612 /* Shared mappings should always be readable. */
1613 ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1614 /* Every even private page should be guarded. */
1615 ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
1616 }
1617
1618 /* Remove guard regions from private mapping. */
1619 ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1620
1621 for (i = 0; i < 10; i++) {
1622 /* Shared mappings should always be readable. */
1623 ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1624 /* Private mappings should always be readable. */
1625 ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
1626 }
1627
1628 /* Ensure patterns are intact. */
1629 ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size));
1630 ASSERT_TRUE(check_pattern(ptr_private, 10, page_size));
1631
1632 /* Now write out every other page to MAP_PRIVATE. */
1633 for (i = 0; i < 10; i += 2) {
1634 char *ptr = &ptr_private[i * page_size];
1635
1636 memset(ptr, 'a' + i, page_size);
1637 }
1638
1639 /*
1640 * At this point the mapping is:
1641 *
1642 * 0123456789
1643 * SPSPSPSPSP
1644 *
1645 * Where S = shared, P = private mappings.
1646 */
1647
1648 /* Now mark the beginning of the mapping guarded. */
1649 ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0);
1650
1651 /*
1652 * This renders the mapping:
1653 *
1654 * 0123456789
1655 * xxxxxPSPSP
1656 */
1657
1658 for (i = 0; i < 10; i++) {
1659 char *ptr = &ptr_private[i * page_size];
1660
1661 /* Ensure guard regions as expected. */
1662 ASSERT_EQ(try_read_buf(ptr), i >= 5);
1663 /* The shared mapping should always succeed. */
1664 ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
1665 }
1666
1667 /* Remove the guard regions altogether. */
1668 ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
1669
1670 /*
1671 *
1672 * We now expect the mapping to be:
1673 *
1674 * 0123456789
1675 * SSSSSPSPSP
1676 *
1677 * As we removed guard regions, the private pages from the first 5 will
1678 * have been zapped, so on fault will reestablish the shared mapping.
1679 */
1680
1681 for (i = 0; i < 10; i++) {
1682 char *ptr = &ptr_private[i * page_size];
1683
1684 /*
1685 * Assert that shared mappings in the MAP_PRIVATE mapping match
1686 * the shared mapping.
1687 */
1688 if (i < 5 || i % 2 == 0) {
1689 char *ptr_s = &ptr_shared[i * page_size];
1690
1691 ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0);
1692 continue;
1693 }
1694
1695 /* Everything else is a private mapping. */
1696 ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i));
1697 }
1698
1699 ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0);
1700 ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0);
1701 }
1702
1703 /* Test that guard regions established over a read-only mapping function correctly. */
TEST_F(guard_regions,readonly_file)1704 TEST_F(guard_regions, readonly_file)
1705 {
1706 const unsigned long page_size = self->page_size;
1707 char *ptr;
1708 int i;
1709
1710 if (variant->backing != LOCAL_FILE_BACKED)
1711 SKIP(return, "Read-only test specific to file-backed");
1712
1713 /* Map shared so we can populate with pattern, populate it, unmap. */
1714 ptr = mmap_(self, variant, NULL, 10 * page_size,
1715 PROT_READ | PROT_WRITE, 0, 0);
1716 ASSERT_NE(ptr, MAP_FAILED);
1717 set_pattern(ptr, 10, page_size);
1718 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1719 /* Close the fd so we can re-open read-only. */
1720 ASSERT_EQ(close(self->fd), 0);
1721
1722 /* Re-open read-only. */
1723 self->fd = open(self->path, O_RDONLY);
1724 ASSERT_NE(self->fd, -1);
1725 /* Re-map read-only. */
1726 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1727 ASSERT_NE(ptr, MAP_FAILED);
1728
1729 /* Mark every other page guarded. */
1730 for (i = 0; i < 10; i += 2) {
1731 char *ptr_pg = &ptr[i * page_size];
1732
1733 ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0);
1734 }
1735
1736 /* Assert that the guard regions are in place.*/
1737 for (i = 0; i < 10; i++) {
1738 char *ptr_pg = &ptr[i * page_size];
1739
1740 ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0);
1741 }
1742
1743 /* Remove guard regions. */
1744 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1745
1746 /* Ensure the data is as expected. */
1747 ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1748
1749 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1750 }
1751
TEST_F(guard_regions,fault_around)1752 TEST_F(guard_regions, fault_around)
1753 {
1754 const unsigned long page_size = self->page_size;
1755 char *ptr;
1756 int i;
1757
1758 if (variant->backing == ANON_BACKED)
1759 SKIP(return, "Fault-around test specific to file-backed");
1760
1761 ptr = mmap_(self, variant, NULL, 10 * page_size,
1762 PROT_READ | PROT_WRITE, 0, 0);
1763 ASSERT_NE(ptr, MAP_FAILED);
1764
1765 /* Establish a pattern in the backing file. */
1766 set_pattern(ptr, 10, page_size);
1767
1768 /*
1769 * Now drop it from the page cache so we get major faults when next we
1770 * map it.
1771 */
1772 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1773
1774 /* Unmap and remap 'to be sure'. */
1775 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1776 ptr = mmap_(self, variant, NULL, 10 * page_size,
1777 PROT_READ | PROT_WRITE, 0, 0);
1778 ASSERT_NE(ptr, MAP_FAILED);
1779
1780 /* Now make every even page guarded. */
1781 for (i = 0; i < 10; i += 2) {
1782 char *ptr_p = &ptr[i * page_size];
1783
1784 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1785 }
1786
1787 /* Now fault in every odd page. This should trigger fault-around. */
1788 for (i = 1; i < 10; i += 2) {
1789 char *ptr_p = &ptr[i * page_size];
1790
1791 ASSERT_TRUE(try_read_buf(ptr_p));
1792 }
1793
1794 /* Finally, ensure that guard regions are intact as expected. */
1795 for (i = 0; i < 10; i++) {
1796 char *ptr_p = &ptr[i * page_size];
1797
1798 ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
1799 }
1800
1801 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1802 }
1803
TEST_F(guard_regions,truncation)1804 TEST_F(guard_regions, truncation)
1805 {
1806 const unsigned long page_size = self->page_size;
1807 char *ptr;
1808 int i;
1809
1810 if (variant->backing == ANON_BACKED)
1811 SKIP(return, "Truncation test specific to file-backed");
1812
1813 ptr = mmap_(self, variant, NULL, 10 * page_size,
1814 PROT_READ | PROT_WRITE, 0, 0);
1815 ASSERT_NE(ptr, MAP_FAILED);
1816
1817 /*
1818 * Establish a pattern in the backing file, just so there is data
1819 * there.
1820 */
1821 set_pattern(ptr, 10, page_size);
1822
1823 /* Now make every even page guarded. */
1824 for (i = 0; i < 10; i += 2) {
1825 char *ptr_p = &ptr[i * page_size];
1826
1827 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1828 }
1829
1830 /* Now assert things are as expected. */
1831 for (i = 0; i < 10; i++) {
1832 char *ptr_p = &ptr[i * page_size];
1833
1834 ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1835 }
1836
1837 /* Now truncate to actually used size (initialised to 100). */
1838 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1839
1840 /* Here the guard regions will remain intact. */
1841 for (i = 0; i < 10; i++) {
1842 char *ptr_p = &ptr[i * page_size];
1843
1844 ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1845 }
1846
1847 /* Now truncate to half the size, then truncate again to the full size. */
1848 ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0);
1849 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1850
1851 /* Again, guard pages will remain intact. */
1852 for (i = 0; i < 10; i++) {
1853 char *ptr_p = &ptr[i * page_size];
1854
1855 ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
1856 }
1857
1858 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1859 }
1860
TEST_F(guard_regions,hole_punch)1861 TEST_F(guard_regions, hole_punch)
1862 {
1863 const unsigned long page_size = self->page_size;
1864 char *ptr;
1865 int i;
1866
1867 if (variant->backing == ANON_BACKED)
1868 SKIP(return, "Truncation test specific to file-backed");
1869
1870 /* Establish pattern in mapping. */
1871 ptr = mmap_(self, variant, NULL, 10 * page_size,
1872 PROT_READ | PROT_WRITE, 0, 0);
1873 ASSERT_NE(ptr, MAP_FAILED);
1874 set_pattern(ptr, 10, page_size);
1875
1876 /* Install a guard region in the middle of the mapping. */
1877 ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1878 MADV_GUARD_INSTALL), 0);
1879
1880 /*
1881 * The buffer will now be:
1882 *
1883 * 0123456789
1884 * ***xxxx***
1885 *
1886 * Where * is data and x is the guard region.
1887 */
1888
1889 /* Ensure established. */
1890 for (i = 0; i < 10; i++) {
1891 char *ptr_p = &ptr[i * page_size];
1892
1893 ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1894 }
1895
1896 /* Now hole punch the guarded region. */
1897 ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
1898 MADV_REMOVE), 0);
1899
1900 /* Ensure guard regions remain. */
1901 for (i = 0; i < 10; i++) {
1902 char *ptr_p = &ptr[i * page_size];
1903
1904 ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
1905 }
1906
1907 /* Now remove guard region throughout. */
1908 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1909
1910 /* Check that the pattern exists in non-hole punched region. */
1911 ASSERT_TRUE(check_pattern(ptr, 3, page_size));
1912 /* Check that hole punched region is zeroed. */
1913 ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0'));
1914 /* Check that the pattern exists in the remainder of the file. */
1915 ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7));
1916
1917 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1918 }
1919
1920 /*
1921 * Ensure that a memfd works correctly with guard regions, that we can write
1922 * seal it then open the mapping read-only and still establish guard regions
1923 * within, remove those guard regions and have everything work correctly.
1924 */
TEST_F(guard_regions,memfd_write_seal)1925 TEST_F(guard_regions, memfd_write_seal)
1926 {
1927 const unsigned long page_size = self->page_size;
1928 char *ptr;
1929 int i;
1930
1931 if (variant->backing != SHMEM_BACKED)
1932 SKIP(return, "memfd write seal test specific to shmem");
1933
1934 /* OK, we need a memfd, so close existing one. */
1935 ASSERT_EQ(close(self->fd), 0);
1936
1937 /* Create and truncate memfd. */
1938 self->fd = memfd_create("guard_regions_memfd_seals_test",
1939 MFD_ALLOW_SEALING);
1940 ASSERT_NE(self->fd, -1);
1941 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
1942
1943 /* Map, set pattern, unmap. */
1944 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
1945 ASSERT_NE(ptr, MAP_FAILED);
1946 set_pattern(ptr, 10, page_size);
1947 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1948
1949 /* Write-seal the memfd. */
1950 ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0);
1951
1952 /* Now map the memfd readonly. */
1953 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
1954 ASSERT_NE(ptr, MAP_FAILED);
1955
1956 /* Ensure pattern is as expected. */
1957 ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1958
1959 /* Now make every even page guarded. */
1960 for (i = 0; i < 10; i += 2) {
1961 char *ptr_p = &ptr[i * page_size];
1962
1963 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
1964 }
1965
1966 /* Now assert things are as expected. */
1967 for (i = 0; i < 10; i++) {
1968 char *ptr_p = &ptr[i * page_size];
1969
1970 ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
1971 }
1972
1973 /* Now remove guard regions. */
1974 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1975
1976 /* Ensure pattern is as expected. */
1977 ASSERT_TRUE(check_pattern(ptr, 10, page_size));
1978
1979 /* Ensure write seal intact. */
1980 for (i = 0; i < 10; i++) {
1981 char *ptr_p = &ptr[i * page_size];
1982
1983 ASSERT_FALSE(try_write_buf(ptr_p));
1984 }
1985
1986 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1987 }
1988
1989
1990 /*
1991 * Since we are now permitted to establish guard regions in read-only anonymous
1992 * mappings, for the sake of thoroughness, though it probably has no practical
1993 * use, test that guard regions function with a mapping to the anonymous zero
1994 * page.
1995 */
TEST_F(guard_regions,anon_zeropage)1996 TEST_F(guard_regions, anon_zeropage)
1997 {
1998 const unsigned long page_size = self->page_size;
1999 char *ptr;
2000 int i;
2001
2002 if (!is_anon_backed(variant))
2003 SKIP(return, "anon zero page test specific to anon/shmem");
2004
2005 /* Obtain a read-only i.e. anon zero page mapping. */
2006 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
2007 ASSERT_NE(ptr, MAP_FAILED);
2008
2009 /* Now make every even page guarded. */
2010 for (i = 0; i < 10; i += 2) {
2011 char *ptr_p = &ptr[i * page_size];
2012
2013 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2014 }
2015
2016 /* Now assert things are as expected. */
2017 for (i = 0; i < 10; i++) {
2018 char *ptr_p = &ptr[i * page_size];
2019
2020 ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
2021 }
2022
2023 /* Now remove all guard regions. */
2024 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
2025
2026 /* Now assert things are as expected. */
2027 for (i = 0; i < 10; i++) {
2028 char *ptr_p = &ptr[i * page_size];
2029
2030 ASSERT_TRUE(try_read_buf(ptr_p));
2031 }
2032
2033 /* Ensure zero page...*/
2034 ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0'));
2035
2036 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2037 }
2038
2039 /*
2040 * Assert that /proc/$pid/pagemap correctly identifies guard region ranges.
2041 */
TEST_F(guard_regions,pagemap)2042 TEST_F(guard_regions, pagemap)
2043 {
2044 const unsigned long page_size = self->page_size;
2045 int proc_fd;
2046 char *ptr;
2047 int i;
2048
2049 proc_fd = open("/proc/self/pagemap", O_RDONLY);
2050 ASSERT_NE(proc_fd, -1);
2051
2052 ptr = mmap_(self, variant, NULL, 10 * page_size,
2053 PROT_READ | PROT_WRITE, 0, 0);
2054 ASSERT_NE(ptr, MAP_FAILED);
2055
2056 /* Read from pagemap, and assert no guard regions are detected. */
2057 for (i = 0; i < 10; i++) {
2058 char *ptr_p = &ptr[i * page_size];
2059 unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2060 unsigned long masked = entry & PM_GUARD_REGION;
2061
2062 ASSERT_EQ(masked, 0);
2063 }
2064
2065 /* Install a guard region in every other page. */
2066 for (i = 0; i < 10; i += 2) {
2067 char *ptr_p = &ptr[i * page_size];
2068
2069 ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2070 }
2071
2072 /* Re-read from pagemap, and assert guard regions are detected. */
2073 for (i = 0; i < 10; i++) {
2074 char *ptr_p = &ptr[i * page_size];
2075 unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
2076 unsigned long masked = entry & PM_GUARD_REGION;
2077
2078 ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0);
2079 }
2080
2081 ASSERT_EQ(close(proc_fd), 0);
2082 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2083 }
2084
2085 /*
2086 * Assert that PAGEMAP_SCAN correctly reports guard region ranges.
2087 */
TEST_F(guard_regions,pagemap_scan)2088 TEST_F(guard_regions, pagemap_scan)
2089 {
2090 const unsigned long page_size = self->page_size;
2091 struct page_region pm_regs[10];
2092 struct pm_scan_arg pm_scan_args = {
2093 .size = sizeof(struct pm_scan_arg),
2094 .category_anyof_mask = PAGE_IS_GUARD,
2095 .return_mask = PAGE_IS_GUARD,
2096 .vec = (long)&pm_regs,
2097 .vec_len = ARRAY_SIZE(pm_regs),
2098 };
2099 int proc_fd, i;
2100 char *ptr;
2101
2102 proc_fd = open("/proc/self/pagemap", O_RDONLY);
2103 ASSERT_NE(proc_fd, -1);
2104
2105 ptr = mmap_(self, variant, NULL, 10 * page_size,
2106 PROT_READ | PROT_WRITE, 0, 0);
2107 ASSERT_NE(ptr, MAP_FAILED);
2108
2109 pm_scan_args.start = (long)ptr;
2110 pm_scan_args.end = (long)ptr + 10 * page_size;
2111 ASSERT_EQ(ioctl(proc_fd, PAGEMAP_SCAN, &pm_scan_args), 0);
2112 ASSERT_EQ(pm_scan_args.walk_end, (long)ptr + 10 * page_size);
2113
2114 /* Install a guard region in every other page. */
2115 for (i = 0; i < 10; i += 2) {
2116 char *ptr_p = &ptr[i * page_size];
2117
2118 ASSERT_EQ(syscall(__NR_madvise, ptr_p, page_size, MADV_GUARD_INSTALL), 0);
2119 }
2120
2121 /*
2122 * Assert ioctl() returns the count of located regions, where each
2123 * region spans every other page within the range of 10 pages.
2124 */
2125 ASSERT_EQ(ioctl(proc_fd, PAGEMAP_SCAN, &pm_scan_args), 5);
2126 ASSERT_EQ(pm_scan_args.walk_end, (long)ptr + 10 * page_size);
2127
2128 /* Re-read from pagemap, and assert guard regions are detected. */
2129 for (i = 0; i < 5; i++) {
2130 long ptr_p = (long)&ptr[2 * i * page_size];
2131
2132 ASSERT_EQ(pm_regs[i].start, ptr_p);
2133 ASSERT_EQ(pm_regs[i].end, ptr_p + page_size);
2134 ASSERT_EQ(pm_regs[i].categories, PAGE_IS_GUARD);
2135 }
2136
2137 ASSERT_EQ(close(proc_fd), 0);
2138 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
2139 }
2140
2141 TEST_HARNESS_MAIN
2142