Lines Matching +full:page +full:- +full:based

1 // SPDX-License-Identifier: GPL-2.0-only
30 #include <asm/page.h>
38 /* Fields set based on lines observed in the console. */
64 return -1; in kasan_suite_init()
71 * Temporarily enable multi-shot mode. Otherwise, KASAN would only in kasan_suite_init()
95 * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
101 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
102 * checking is auto-disabled. When this happens, this test handler reenables
164 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in kmalloc_oob_right()
178 * An aligned access into the first out-of-bounds granule that falls in kmalloc_oob_right()
183 /* Out-of-bounds access past the aligned kmalloc object. */ in kmalloc_oob_right()
199 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); in kmalloc_oob_left()
217 * Check that KASAN detects an out-of-bounds access for a big object allocated
223 size_t size = KMALLOC_MAX_CACHE_SIZE - 256; in kmalloc_big_oob_right()
279 struct page *pages; in page_alloc_oob_right()
284 * With generic KASAN page allocations have no redzones, thus in page_alloc_oob_right()
285 * out-of-bounds detection is not guaranteed. in page_alloc_oob_right()
301 struct page *pages; in page_alloc_uaf()
319 middle = size1 + (size2 - size1) / 2; in krealloc_more_oob_helper()
327 /* Suppress -Warray-bounds warnings. */ in krealloc_more_oob_helper()
331 ptr2[size1 - 1] = 'x'; in krealloc_more_oob_helper()
334 ptr2[size2 - 1] = 'x'; in krealloc_more_oob_helper()
354 middle = size2 + (size1 - size2) / 2; in krealloc_less_oob_helper()
362 /* Suppress -Warray-bounds warnings. */ in krealloc_less_oob_helper()
366 ptr2[size2 - 1] = 'x'; in krealloc_less_oob_helper()
385 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x'); in krealloc_less_oob_helper()
414 * Check that krealloc() detects a use-after-free, returns NULL,
443 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); in kmalloc_oob_16()
484 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_2()
495 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size)); in kmalloc_oob_memset_2()
502 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_4()
513 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size)); in kmalloc_oob_memset_4()
520 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_8()
531 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size)); in kmalloc_oob_memset_8()
538 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_16()
549 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size)); in kmalloc_oob_memset_16()
556 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_in_memset()
574 size_t invalid_size = -2; in kmalloc_memmove_negative_size()
579 * Hardware tag-based mode doesn't check memmove for negative size. in kmalloc_memmove_negative_size()
580 * As a result, this test introduces a side-effect memory corruption, in kmalloc_memmove_negative_size()
663 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same. in kmalloc_uaf2()
678 * Check that KASAN detects use-after-free when another object was allocated in
679 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
686 /* This test is specifically crafted for tag-based modes. */ in kmalloc_uaf3()
716 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in ksize_unpoisons_memory()
729 ptr[size - 1] = 'x'; in ksize_unpoisons_memory()
735 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); in ksize_unpoisons_memory()
741 * Check that a use-after-free is detected by ksize() and via normal accesses
747 int size = 128 - KASAN_GRANULE_SIZE; in ksize_uaf()
778 ((volatile struct kasan_rcu_info *)fp)->i; in rcu_uaf_reclaim()
792 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim); in rcu_uaf()
817 ((volatile struct work_struct *)work)->data); in workqueue_uaf()
824 struct page *page; in kfree_via_page() local
830 page = virt_to_page(ptr); in kfree_via_page()
832 kfree(page_address(page) + offset); in kfree_via_page()
979 p[i][0] = p[i][size - 1] = 42; in kmem_cache_bulk()
998 * list when the tests trigger double-free and invalid-free bugs. in mempool_prepare_kmalloc()
1021 * Do not allocate one preallocated element, as we skip the double-free in mempool_prepare_slab()
1022 * and invalid-free tests for slab mempool for simplicity. in mempool_prepare_slab()
1066 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in mempool_kmalloc_oob_right()
1106 * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1107 * allocations have no redzones, and thus the out-of-bounds detection is not
1109 * the tag-based KASAN modes, the neighboring allocation might have the same
1113 static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page) in mempool_uaf_helper() argument
1122 ptr = page ? page_address((struct page *)elem) : elem; in mempool_uaf_helper()
1277 * Skip the invalid-free test for page mempool. The invalid-free detection only
1278 * works for compound pages and mempool preallocates all page elements without
1287 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS in kasan_global_oob_right()
1310 char *p = array - 3; in kasan_global_oob_left()
1339 char *p = array - 1; in kasan_alloca_oob_left()
1493 * below accesses are still out-of-bounds, since bitops are defined to in kasan_bitops_generic()
1510 /* This test is specifically crafted for tag-based modes. */ in kasan_bitops_tags()
1513 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */ in kasan_bitops_tags()
1528 /* This test is intended for tag-based modes. */ in vmalloc_helpers_tags()
1565 struct page *page; in vmalloc_oob() local
1566 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5; in vmalloc_oob()
1579 * We have to be careful not to hit the guard page in vmalloc tests. in vmalloc_oob()
1583 /* Make sure in-bounds accesses are valid. */ in vmalloc_oob()
1585 v_ptr[size - 1] = 0; in vmalloc_oob()
1594 /* An aligned access into the first out-of-bounds granule. */ in vmalloc_oob()
1597 /* Check that in-bounds accesses to the physical page are valid. */ in vmalloc_oob()
1598 page = vmalloc_to_page(v_ptr); in vmalloc_oob()
1599 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); in vmalloc_oob()
1600 p_ptr = page_address(page); in vmalloc_oob()
1607 * We can't check for use-after-unmap bugs in this nor in the following in vmalloc_oob()
1608 * vmalloc tests, as the page might be fully unmapped and accessing it in vmalloc_oob()
1616 struct page *p_page, *v_page; in vmap_tags()
1619 * This test is specifically crafted for the software tag-based mode, in vmap_tags()
1620 * the only tag-based mode that poisons vmap mappings. in vmap_tags()
1638 * We can't check for out-of-bounds bugs in this nor in the following in vmap_tags()
1639 * vmalloc tests, as allocations have page granularity and accessing in vmap_tags()
1640 * the guard page will crash the kernel. in vmap_tags()
1646 /* Make sure that in-bounds accesses through both pointers work. */ in vmap_tags()
1650 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */ in vmap_tags()
1662 struct page *page; in vm_map_ram_tags() local
1665 * This test is specifically crafted for the software tag-based mode, in vm_map_ram_tags()
1666 * the only tag-based mode that poisons vm_map_ram mappings. in vm_map_ram_tags()
1670 page = alloc_pages(GFP_KERNEL, 1); in vm_map_ram_tags()
1671 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); in vm_map_ram_tags()
1672 p_ptr = page_address(page); in vm_map_ram_tags()
1675 v_ptr = vm_map_ram(&page, 1, -1); in vm_map_ram_tags()
1681 /* Make sure that in-bounds accesses through both pointers work. */ in vm_map_ram_tags()
1695 * This test is specifically crafted for the software tag-based mode, in vmalloc_percpu()
1696 * the only tag-based mode that poisons percpu mappings. in vmalloc_percpu()
1708 /* Make sure that in-bounds accesses don't crash the kernel. */ in vmalloc_percpu()
1717 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1723 struct page *pages; in match_all_not_assigned()
1760 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1786 /* Check that there are no match-all memory tags for tag-based modes. */
1801 * For Software Tag-Based KASAN, skip the majority of tag in match_all_mem_tag()
1805 tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8) in match_all_mem_tag()