Lines Matching +full:10 +full:a
31 * object with static storage duration other than by assigning a value to an
39 * anything with it in order to trigger a read page fault. We therefore must use
138 * return value indicates whether the read/write succeeds without a fatal
147 /* If a fatal signal arose, we will jump back here and failed is set. */ in try_access_buf()
161 /* Try and read from a buffer, return true if no fatal signal. */
167 /* Try and write to a buffer, return true if no fatal signal. */
174 * Try and BOTH read from AND write to a buffer, return true if BOTH operations
217 /* Establish a varying pattern in a buffer. */
225 memset(ptr2, 'a' + (i % 26), page_size); in set_pattern()
230 * Check that a buffer contains the pattern set by set_pattern(), starting at a
241 char expected = 'a' + ((offset / page_size) % 26); in check_pattern_offset()
250 /* Check that a buffer contains the pattern set by set_pattern(). */
256 /* Determine if a buffer contains only repetitions of a specified char. */
305 const unsigned long NUM_PAGES = 10; in TEST_F()
329 /* Establish a guard page at the end of the mapping. */ in TEST_F()
351 * Test setting a _range_ of pages, namely the first 3. The first of in TEST_F()
403 /* Reserve a 100 page region over which we can install VMAs. */ in TEST_F()
408 /* Place a VMA of 10 pages size at the start of the region. */ in TEST_F()
409 ptr1 = mmap_(self, variant, ptr_region, 10 * page_size, in TEST_F()
413 /* Place a VMA of 5 pages size 50 pages into the region. */ in TEST_F()
418 /* Place a VMA of 20 pages size at the end of the region. */ in TEST_F()
424 ASSERT_EQ(munmap(&ptr_region[10 * page_size], 40 * page_size), 0); in TEST_F()
430 * 0 10 .. 50 55 .. 80 100 in TEST_F()
448 for (i = 0; i < 10; i++) { in TEST_F()
471 for (i = 0; i < 10; i++) { in TEST_F()
490 ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size, in TEST_F()
500 * 0 10 .. 50 55 .. 80 100 in TEST_F()
542 * 10 pages offset 1 page into reserve region. We MAP_POPULATE so we in TEST_F()
546 ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size, in TEST_F()
615 ASSERT_EQ(munmap(ptr1, 10 * page_size), 0); in TEST_F()
626 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
655 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
665 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
693 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
696 for (i = 0; i < 10; i++) { in TEST_F()
703 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
713 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
718 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0); in TEST_F()
721 for (i = 0; i < 10; i++) { in TEST_F()
748 for (i = 9; i < 10; i++) { in TEST_F()
766 for (i = 0; i < 10; i++) { in TEST_F()
775 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0); in TEST_F()
778 for (i = 0; i < 10; i++) { in TEST_F()
789 for (i = 0; i < 10; i++) { in TEST_F()
801 for (i = 0; i < 10; i++) { in TEST_F()
812 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
815 for (i = 0; i < 10; i++) { in TEST_F()
827 for (i = 0; i < 10; i++) { in TEST_F()
834 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
844 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
849 for (i = 0; i < 10; i++) { in TEST_F()
856 for (i = 0; i < 10; i += 2) { in TEST_F()
864 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0); in TEST_F()
867 for (i = 0; i < 10; i++) { in TEST_F()
877 /* If anon, then we get a zero page. */ in TEST_F()
895 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
905 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
910 for (i = 0; i < 10; i++) { in TEST_F()
917 ASSERT_EQ(mlock(ptr, 10 * page_size), 0); in TEST_F()
920 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1); in TEST_F()
924 ASSERT_EQ(munlock(ptr, 10 * page_size), 0); in TEST_F()
930 for (i = 0; i < 10; i++) { in TEST_F()
951 * pages removed as it is a non-destructive operation. in TEST_F()
953 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
956 for (i = 0; i < 10; i++) { in TEST_F()
963 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
970 * - Moving a mapping alone should retain markers as they are.
990 /* Map a new region we will move this range into. Doing this ensures in TEST_F()
991 * that we have reserved a range to map into. in TEST_F()
1016 * same if it were a PROT_NONE mapping).
1023 /* Map 10 pages... */ in TEST_F()
1024 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1038 /* Now expand to 10 pages. */ in TEST_F()
1039 ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0); in TEST_F()
1048 /* Reserve a region which we can move to and expand into. */ in TEST_F()
1053 ptr = mremap(ptr, 10 * page_size, 20 * page_size, in TEST_F()
1064 * A real user would have to remove guard markers, but would reasonably in TEST_F()
1077 * if the user were using a PROT_NONE mapping they'd have to manually fix this
1114 * As with expansion, a real user would have to remove guard pages and in TEST_F()
1141 * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
1151 /* Map 10 pages. */ in TEST_F()
1152 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1165 for (i = 0; i < 10; i++) { in TEST_F()
1173 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
1184 for (i = 0; i < 10; i++) { in TEST_F()
1192 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1209 /* Map 10 pages. */ in TEST_F()
1210 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1215 for (i = 0; i < 10 * page_size; i++) { in TEST_F()
1216 char chr = 'a' + (i % 26); in TEST_F()
1227 for (i = 0; i < 10 * page_size; i++) { in TEST_F()
1228 char expected = 'a' + (i % 26); in TEST_F()
1235 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0); in TEST_F()
1237 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
1243 for (i = 0; i < 10 * page_size; i++) { in TEST_F()
1258 for (i = 0; i < 10 * page_size; i++) { in TEST_F()
1259 char expected = 'a' + (i % 26); in TEST_F()
1266 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1270 * Assert that forking a process with VMAs that do have VM_WIPEONFORK set
1283 /* Map 10 pages. */ in TEST_F()
1284 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1289 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_WIPEONFORK), 0); in TEST_F()
1300 for (i = 0; i < 10; i++) { in TEST_F()
1314 for (i = 0; i < 10; i++) { in TEST_F()
1322 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1335 /* Map 10 pages. */ in TEST_F()
1336 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1341 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0); in TEST_F()
1344 for (i = 0; i < 10; i++) { in TEST_F()
1351 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_FREE), 0); in TEST_F()
1354 for (i = 0; i < 10; i++) { in TEST_F()
1361 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1370 /* Map 10 pages. */ in TEST_F()
1371 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1376 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0); in TEST_F()
1379 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1); in TEST_F()
1383 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1); in TEST_F()
1387 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1397 /* Map 10 pages. */ in TEST_F()
1398 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1403 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0); in TEST_F()
1406 for (i = 0; i < 10; i++) { in TEST_F()
1413 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_COLD), 0); in TEST_F()
1416 for (i = 0; i < 10; i++) { in TEST_F()
1423 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0); in TEST_F()
1426 for (i = 0; i < 10; i++) { in TEST_F()
1433 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1461 /* Map 10 pages. */ in TEST_F()
1462 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1468 range.len = 10 * page_size; in TEST_F()
1474 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0); in TEST_F()
1477 for (i = 0; i < 10; i++) { in TEST_F()
1486 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1490 * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
1506 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1510 /* Establish a pattern of data in the file. */ in TEST_F()
1511 set_pattern(ptr, 10, page_size); in TEST_F()
1512 ASSERT_TRUE(check_pattern(ptr, 10, page_size)); in TEST_F()
1515 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0); in TEST_F()
1517 /* Mark every other page a guard page. */ in TEST_F()
1518 for (i = 0; i < 10; i += 2) { in TEST_F()
1525 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0); in TEST_F()
1528 for (i = 0; i < 10; i++) { in TEST_F()
1536 ASSERT_EQ(*chrp, 'a' + i); in TEST_F()
1541 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
1544 if (!check_pattern(ptr, 10, page_size)) in TEST_F()
1547 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1563 ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0); in TEST_F()
1567 ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0); in TEST_F()
1571 set_pattern(ptr_shared, 10, page_size); in TEST_F()
1574 for (i = 0; i < 10; i += 2) { in TEST_F()
1580 for (i = 0; i < 10; i++) { in TEST_F()
1588 for (i = 0; i < 10; i += 2) { in TEST_F()
1594 for (i = 0; i < 10; i++) { in TEST_F()
1602 ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
1604 for (i = 0; i < 10; i++) { in TEST_F()
1612 ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
1614 for (i = 0; i < 10; i++) { in TEST_F()
1622 ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size)); in TEST_F()
1623 ASSERT_TRUE(check_pattern(ptr_private, 10, page_size)); in TEST_F()
1626 for (i = 0; i < 10; i += 2) { in TEST_F()
1629 memset(ptr, 'a' + i, page_size); in TEST_F()
1651 for (i = 0; i < 10; i++) { in TEST_F()
1661 ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
1674 for (i = 0; i < 10; i++) { in TEST_F()
1688 /* Everything else is a private mapping. */ in TEST_F()
1689 ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i)); in TEST_F()
1692 ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0); in TEST_F()
1693 ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0); in TEST_F()
1696 /* Test that guard regions established over a read-only mapping function correctly. */
1707 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1710 set_pattern(ptr, 10, page_size); in TEST_F()
1711 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1719 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0); in TEST_F()
1723 for (i = 0; i < 10; i += 2) { in TEST_F()
1730 for (i = 0; i < 10; i++) { in TEST_F()
1737 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
1740 ASSERT_TRUE(check_pattern(ptr, 10, page_size)); in TEST_F()
1742 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1754 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1758 /* Establish a pattern in the backing file. */ in TEST_F()
1759 set_pattern(ptr, 10, page_size); in TEST_F()
1765 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0); in TEST_F()
1768 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1769 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1774 for (i = 0; i < 10; i += 2) { in TEST_F()
1781 for (i = 1; i < 10; i += 2) { in TEST_F()
1788 for (i = 0; i < 10; i++) { in TEST_F()
1794 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1806 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1811 * Establish a pattern in the backing file, just so there is data in TEST_F()
1814 set_pattern(ptr, 10, page_size); in TEST_F()
1817 for (i = 0; i < 10; i += 2) { in TEST_F()
1824 for (i = 0; i < 10; i++) { in TEST_F()
1831 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); in TEST_F()
1834 for (i = 0; i < 10; i++) { in TEST_F()
1842 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); in TEST_F()
1845 for (i = 0; i < 10; i++) { in TEST_F()
1851 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1864 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
1867 set_pattern(ptr, 10, page_size); in TEST_F()
1869 /* Install a guard region in the middle of the mapping. */ in TEST_F()
1883 for (i = 0; i < 10; i++) { in TEST_F()
1894 for (i = 0; i < 10; i++) { in TEST_F()
1901 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
1910 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1914 * Ensure that a memfd works correctly with guard regions, that we can write
1927 /* OK, we need a memfd, so close existing one. */ in TEST_F()
1934 ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); in TEST_F()
1937 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0); in TEST_F()
1939 set_pattern(ptr, 10, page_size); in TEST_F()
1940 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1946 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0); in TEST_F()
1950 ASSERT_TRUE(check_pattern(ptr, 10, page_size)); in TEST_F()
1953 for (i = 0; i < 10; i += 2) { in TEST_F()
1960 for (i = 0; i < 10; i++) { in TEST_F()
1967 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
1970 ASSERT_TRUE(check_pattern(ptr, 10, page_size)); in TEST_F()
1973 for (i = 0; i < 10; i++) { in TEST_F()
1979 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
1986 * use, test that guard regions function with a mapping to the anonymous zero
1998 /* Obtain a read-only i.e. anon zero page mapping. */ in TEST_F()
1999 ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0); in TEST_F()
2003 for (i = 0; i < 10; i += 2) { in TEST_F()
2010 for (i = 0; i < 10; i++) { in TEST_F()
2017 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); in TEST_F()
2020 for (i = 0; i < 10; i++) { in TEST_F()
2027 ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0')); in TEST_F()
2029 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()
2045 ptr = mmap_(self, variant, NULL, 10 * page_size, in TEST_F()
2050 for (i = 0; i < 10; i++) { in TEST_F()
2058 /* Install a guard region in every other page. */ in TEST_F()
2059 for (i = 0; i < 10; i += 2) { in TEST_F()
2066 for (i = 0; i < 10; i++) { in TEST_F()
2075 ASSERT_EQ(munmap(ptr, 10 * page_size), 0); in TEST_F()