Lines Matching +full:use +full:- +full:guard +full:- +full:pages

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 #include <asm-generic/mman.h> /* Force the import of the tools version. */
39 * anything with it in order to trigger a read page fault. We therefore must use
82 switch (variant->backing) { in is_anon_backed()
99 switch (variant->backing) { in mmap_()
102 fd = -1; in mmap_()
107 fd = self->fd; in mmap_()
271 self->page_size = (unsigned long)sysconf(_SC_PAGESIZE); in FIXTURE_SETUP()
274 switch (variant->backing) { in FIXTURE_SETUP()
278 self->fd = open_file("", self->path); in FIXTURE_SETUP()
281 self->fd = memfd_create(self->path, 0); in FIXTURE_SETUP()
285 /* We truncate file to at least 100 pages, tests can modify as needed. */ in FIXTURE_SETUP()
286 ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0); in FIXTURE_SETUP()
293 if (variant->backing == ANON_BACKED) in FIXTURE_TEARDOWN_PARENT()
296 if (self->fd >= 0) in FIXTURE_TEARDOWN_PARENT()
297 close(self->fd); in FIXTURE_TEARDOWN_PARENT()
299 if (self->path[0] != '\0') in FIXTURE_TEARDOWN_PARENT()
300 unlink(self->path); in FIXTURE_TEARDOWN_PARENT()
306 const unsigned long page_size = self->page_size; in TEST_F()
329 /* Establish a guard page at the end of the mapping. */ in TEST_F()
330 ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size, in TEST_F()
333 /* Check that both guard pages result in SIGSEGV. */ in TEST_F()
335 ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size])); in TEST_F()
337 /* Remove the first guard page. */ in TEST_F()
343 /* Remove the last guard page. */ in TEST_F()
344 ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size, in TEST_F()
348 ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size])); in TEST_F()
351 * Test setting a _range_ of pages, namely the first 3. The first of in TEST_F()
352 * these be faulted in, so this also tests that we can install guard in TEST_F()
353 * pages over backed pages. in TEST_F()
357 /* Make sure they are all guard pages. */ in TEST_F()
371 /* Remove guard pages. */ in TEST_F()
382 * Now remove all guard pages, make sure we don't remove existing in TEST_F()
399 const unsigned long page_size = self->page_size; in TEST_F()
408 /* Place a VMA of 10 pages size at the start of the region. */ in TEST_F()
413 /* Place a VMA of 5 pages size 50 pages into the region. */ in TEST_F()
418 /* Place a VMA of 20 pages size at the end of the region. */ in TEST_F()
431 * [---] [---] [---] in TEST_F()
435 * Now mark the whole range as guard pages and make sure all VMAs are as in TEST_F()
442 * -ENOMEM. Also if anything runs out of memory it is set to in TEST_F()
443 * -ENOMEM. You are meant to guess which is which. in TEST_F()
445 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1); in TEST_F()
466 /* Now remove guar pages over range and assert the opposite. */ in TEST_F()
468 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1); in TEST_F()
501 * [---][xxxx][---][xxxx][---] in TEST_F()
531 const unsigned long page_size = self->page_size; in TEST_F()
542 * 10 pages offset 1 page into reserve region. We MAP_POPULATE so we in TEST_F()
549 /* We want guard markers at start/end of each VMA. */ in TEST_F()
555 /* 5 pages offset 50 pages into reserve region. */ in TEST_F()
564 /* 20 pages offset 79 pages into reserve region. */ in TEST_F()
579 /* Now guard in one step. */ in TEST_F()
583 if (count == -1 && errno == EPERM) in TEST_F()
620 /* Assert that unmapping ranges does not leave guard markers behind. */
623 const unsigned long page_size = self->page_size; in TEST_F()
630 /* Guard first and last pages. */ in TEST_F()
658 /* Assert that mprotect() operations have no bearing on guard markers. */
661 const unsigned long page_size = self->page_size; in TEST_F()
669 /* Guard the middle of the range. */ in TEST_F()
677 /* Now make these pages read-only. */ in TEST_F()
684 /* Make sure we can guard again without issue.*/ in TEST_F()
706 /* Split and merge VMAs and make sure guard pages still behave. */
709 const unsigned long page_size = self->page_size; in TEST_F()
717 /* Guard the whole range. */ in TEST_F()
727 /* Now unmap some pages in the range so we split. */ in TEST_F()
732 /* Make sure the remaining ranges are guarded post-split. */ in TEST_F()
754 /* Now map them again - the unmap will have cleared the guards. */ in TEST_F()
765 /* Now make sure guard pages are established. */ in TEST_F()
774 /* Now guard everything again. */ in TEST_F()
837 /* Assert that MADV_DONTNEED does not remove guard markers. */
840 const unsigned long page_size = self->page_size; in TEST_F()
855 /* Guard every other page. */ in TEST_F()
866 /* Check to ensure guard markers are still in place. */ in TEST_F()
875 switch (variant->backing) { in TEST_F()
898 /* Assert that mlock()'ed pages work correctly with guard markers. */
901 const unsigned long page_size = self->page_size; in TEST_F()
919 /* Now try to guard, should fail with EINVAL. */ in TEST_F()
920 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1); in TEST_F()
926 /* Guard first half of range, should now succeed. */ in TEST_F()
929 /* Make sure guard works. */ in TEST_F()
943 * Now lock the latter part of the range. We can't lock the guard pages, in TEST_F()
944 * as this would result in the pages being populated and the guarding in TEST_F()
950 * Now remove guard pages, we permit mlock()'d ranges to have guard in TEST_F()
951 * pages removed as it is a non-destructive operation. in TEST_F()
955 /* Now check that no guard pages remain. */ in TEST_F()
968 * guard markers where possible.
970 * - Moving a mapping alone should retain markers as they are.
974 const unsigned long page_size = self->page_size; in TEST_F()
977 /* Map 5 pages. */ in TEST_F()
982 /* Place guard markers at both ends of the 5 page span. */ in TEST_F()
986 /* Make sure the guard pages are in effect. */ in TEST_F()
999 /* Make sure the guard markers are retained. */ in TEST_F()
1004 * Clean up - we only need reference the new pointer as we overwrote the in TEST_F()
1012 * guard markers where possible.
1014 * Expanding should retain guard pages, only now in different position. The user
1015 * will have to remove guard pages manually to fix up (they'd have to do the
1020 const unsigned long page_size = self->page_size; in TEST_F()
1023 /* Map 10 pages... */ in TEST_F()
1030 /* Place guard markers at both ends of the 5 page span. */ in TEST_F()
1038 /* Now expand to 10 pages. */ in TEST_F()
1043 * Make sure the guard markers are retained in their original positions. in TEST_F()
1058 * Again, make sure the guard markers are retained in their original positions. in TEST_F()
1064 * A real user would have to remove guard markers, but would reasonably in TEST_F()
1066 * guard markers. in TEST_F()
1074 * guard markers where possible.
1082 const unsigned long page_size = self->page_size; in TEST_F()
1086 /* Map 5 pages. */ in TEST_F()
1091 /* Place guard markers at both ends of the 5 page span. */ in TEST_F()
1099 /* Now shrink to 3 pages. */ in TEST_F()
1103 /* We expect the guard marker at the start to be retained... */ in TEST_F()
1106 /* ...But remaining pages will not have guard markers. */ in TEST_F()
1114 * As with expansion, a real user would have to remove guard pages and in TEST_F()
1126 /* Again, we expect the guard marker at the start to be retained... */ in TEST_F()
1129 /* ...But remaining pages will not have guard markers. */ in TEST_F()
1142 * retain guard pages.
1146 const unsigned long page_size = self->page_size; in TEST_F()
1151 /* Map 10 pages. */ in TEST_F()
1156 /* Establish guard pages in the first 5 pages. */ in TEST_F()
1160 ASSERT_NE(pid, -1); in TEST_F()
1197 * and then guard and unguard the range.
1201 const unsigned long page_size = self->page_size; in TEST_F()
1206 if (variant->backing != ANON_BACKED) in TEST_F()
1209 /* Map 10 pages. */ in TEST_F()
1222 ASSERT_NE(pid, -1); in TEST_F()
1234 /* Establish guard pages across the whole range. */ in TEST_F()
1240 * By removing the guard pages, the page tables will be in TEST_F()
1275 const unsigned long page_size = self->page_size; in TEST_F()
1280 if (variant->backing != ANON_BACKED) in TEST_F()
1283 /* Map 10 pages. */ in TEST_F()
1291 /* Guard the first 5 pages. */ in TEST_F()
1295 ASSERT_NE(pid, -1); in TEST_F()
1299 /* Guard will have been wiped. */ in TEST_F()
1313 /* Guard markers should be in effect.*/ in TEST_F()
1325 /* Ensure that MADV_FREE retains guard entries as expected. */
1328 const unsigned long page_size = self->page_size; in TEST_F()
1332 if (variant->backing != ANON_BACKED) in TEST_F()
1335 /* Map 10 pages. */ in TEST_F()
1340 /* Guard range. */ in TEST_F()
1353 /* This should leave the guard markers in place. */ in TEST_F()
1367 const unsigned long page_size = self->page_size; in TEST_F()
1370 /* Map 10 pages. */ in TEST_F()
1375 /* Guard range. */ in TEST_F()
1379 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1); in TEST_F()
1383 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1); in TEST_F()
1390 /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
1393 const unsigned long page_size = self->page_size; in TEST_F()
1397 /* Map 10 pages. */ in TEST_F()
1402 /* Guard range. */ in TEST_F()
1412 /* Now mark cold. This should have no impact on guard markers. */ in TEST_F()
1436 /* Ensure that guard pages do not break userfaultd. */
1439 const unsigned long page_size = self->page_size; in TEST_F()
1455 if (uffd == -1 && errno == EPERM) in TEST_F()
1457 ASSERT_NE(uffd, -1); in TEST_F()
1461 /* Map 10 pages. */ in TEST_F()
1473 /* Guard the range. This should not trigger the uffd. */ in TEST_F()
1490 * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
1491 * aggressively read-ahead, then install guard regions and assert that it
1494 * We page out using MADV_PAGEOUT before checking guard regions so we drop page
1501 const unsigned long page_size = self->page_size; in TEST_F()
1503 if (variant->backing == ANON_BACKED) in TEST_F()
1504 SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed"); in TEST_F()
1517 /* Mark every other page a guard page. */ in TEST_F()
1527 /* Now make sure pages are as expected. */ in TEST_F()
1540 /* Now remove guard pages. */ in TEST_F()
1551 * Check that file-backed mappings implement guard regions with MAP_PRIVATE
1556 const unsigned long page_size = self->page_size; in TEST_F()
1560 if (variant->backing == ANON_BACKED) in TEST_F()
1561 SKIP(return, "MAP_PRIVATE test specific to file-backed"); in TEST_F()
1566 /* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */ in TEST_F()
1567 ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0); in TEST_F()
1573 /* Install guard regions in every other page in the shared mapping. */ in TEST_F()
1587 /* Install guard regions in every other page in the private mapping. */ in TEST_F()
1601 /* Remove guard regions from shared mapping. */ in TEST_F()
1611 /* Remove guard regions from private mapping. */ in TEST_F()
1654 /* Ensure guard regions as expected. */ in TEST_F()
1660 /* Remove the guard regions altogether. */ in TEST_F()
1670 * As we removed guard regions, the private pages from the first 5 will in TEST_F()
1696 /* Test that guard regions established over a read-only mapping function correctly. */
1699 const unsigned long page_size = self->page_size; in TEST_F()
1703 if (variant->backing != LOCAL_FILE_BACKED) in TEST_F()
1704 SKIP(return, "Read-only test specific to file-backed"); in TEST_F()
1712 /* Close the fd so we can re-open read-only. */ in TEST_F()
1713 ASSERT_EQ(close(self->fd), 0); in TEST_F()
1715 /* Re-open read-only. */ in TEST_F()
1716 self->fd = open(self->path, O_RDONLY); in TEST_F()
1717 ASSERT_NE(self->fd, -1); in TEST_F()
1718 /* Re-map read-only. */ in TEST_F()
1729 /* Assert that the guard regions are in place.*/ in TEST_F()
1736 /* Remove guard regions. */ in TEST_F()
1747 const unsigned long page_size = self->page_size; in TEST_F()
1751 if (variant->backing == ANON_BACKED) in TEST_F()
1752 SKIP(return, "Fault-around test specific to file-backed"); in TEST_F()
1780 /* Now fault in every odd page. This should trigger fault-around. */ in TEST_F()
1787 /* Finally, ensure that guard regions are intact as expected. */ in TEST_F()
1799 const unsigned long page_size = self->page_size; in TEST_F()
1803 if (variant->backing == ANON_BACKED) in TEST_F()
1804 SKIP(return, "Truncation test specific to file-backed"); in TEST_F()
1831 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); in TEST_F()
1833 /* Here the guard regions will remain intact. */ in TEST_F()
1841 ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0); in TEST_F()
1842 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); in TEST_F()
1844 /* Again, guard pages will remain intact. */ in TEST_F()
1856 const unsigned long page_size = self->page_size; in TEST_F()
1860 if (variant->backing == ANON_BACKED) in TEST_F()
1861 SKIP(return, "Truncation test specific to file-backed"); in TEST_F()
1869 /* Install a guard region in the middle of the mapping. */ in TEST_F()
1879 * Where * is data and x is the guard region. in TEST_F()
1893 /* Ensure guard regions remain. */ in TEST_F()
1900 /* Now remove guard region throughout. */ in TEST_F()
1903 /* Check that the pattern exists in non-hole punched region. */ in TEST_F()
1914 * Ensure that a memfd works correctly with guard regions, that we can write
1915 * seal it then open the mapping read-only and still establish guard regions
1916 * within, remove those guard regions and have everything work correctly.
1920 const unsigned long page_size = self->page_size; in TEST_F()
1924 if (variant->backing != SHMEM_BACKED) in TEST_F()
1928 ASSERT_EQ(close(self->fd), 0); in TEST_F()
1931 self->fd = memfd_create("guard_regions_memfd_seals_test", in TEST_F()
1933 ASSERT_NE(self->fd, -1); in TEST_F()
1934 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); in TEST_F()
1942 /* Write-seal the memfd. */ in TEST_F()
1943 ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0); in TEST_F()
1966 /* Now remove guard regions. */ in TEST_F()
1984 * Since we are now permitted to establish guard regions in read-only anonymous
1986 * use, test that guard regions function with a mapping to the anonymous zero
1991 const unsigned long page_size = self->page_size; in TEST_F()
1998 /* Obtain a read-only i.e. anon zero page mapping. */ in TEST_F()
2016 /* Now remove all guard regions. */ in TEST_F()
2033 * Assert that /proc/$pid/pagemap correctly identifies guard region ranges.
2037 const unsigned long page_size = self->page_size; in TEST_F()
2043 ASSERT_NE(proc_fd, -1); in TEST_F()
2049 /* Read from pagemap, and assert no guard regions are detected. */ in TEST_F()
2058 /* Install a guard region in every other page. */ in TEST_F()
2065 /* Re-read from pagemap, and assert guard regions are detected. */ in TEST_F()