xref: /linux/mm/kasan/kasan_test_c.c (revision 91325f31afc1026de28665cf1a7b6e157fa4d39d)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *
4   * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5   * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6   */
7  
8  #define pr_fmt(fmt) "kasan: test: " fmt
9  
10  #include <kunit/test.h>
11  #include <linux/bitops.h>
12  #include <linux/delay.h>
13  #include <linux/io.h>
14  #include <linux/kasan.h>
15  #include <linux/kernel.h>
16  #include <linux/mempool.h>
17  #include <linux/mm.h>
18  #include <linux/mman.h>
19  #include <linux/module.h>
20  #include <linux/printk.h>
21  #include <linux/random.h>
22  #include <linux/set_memory.h>
23  #include <linux/slab.h>
24  #include <linux/string.h>
25  #include <linux/tracepoint.h>
26  #include <linux/uaccess.h>
27  #include <linux/vmalloc.h>
28  #include <trace/events/printk.h>
29  
30  #include <asm/page.h>
31  
32  #include "kasan.h"
33  
34  #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
35  
36  MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
37  
38  static bool multishot;
39  
40  /* Fields set based on lines observed in the console. */
41  static struct {
42  	bool report_found;
43  	bool async_fault;
44  } test_status;
45  
46  /*
47   * Some tests use these global variables to store return values from function
48   * calls that could otherwise be eliminated by the compiler as dead code.
49   */
50  static void *volatile kasan_ptr_result;
51  static volatile int kasan_int_result;
52  
53  /* Probe for console output: obtains test_status lines of interest. */
probe_console(void * ignore,const char * buf,size_t len)54  static void probe_console(void *ignore, const char *buf, size_t len)
55  {
56  	if (strnstr(buf, "BUG: KASAN: ", len))
57  		WRITE_ONCE(test_status.report_found, true);
58  	else if (strnstr(buf, "Asynchronous fault: ", len))
59  		WRITE_ONCE(test_status.async_fault, true);
60  }
61  
kasan_suite_init(struct kunit_suite * suite)62  static int kasan_suite_init(struct kunit_suite *suite)
63  {
64  	if (!kasan_enabled()) {
65  		pr_err("Can't run KASAN tests with KASAN disabled");
66  		return -1;
67  	}
68  
69  	/* Stop failing KUnit tests on KASAN reports. */
70  	kasan_kunit_test_suite_start();
71  
72  	/*
73  	 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
74  	 * report the first detected bug and panic the kernel if panic_on_warn
75  	 * is enabled.
76  	 */
77  	multishot = kasan_save_enable_multi_shot();
78  
79  	register_trace_console(probe_console, NULL);
80  	return 0;
81  }
82  
kasan_suite_exit(struct kunit_suite * suite)83  static void kasan_suite_exit(struct kunit_suite *suite)
84  {
85  	kasan_kunit_test_suite_end();
86  	kasan_restore_multi_shot(multishot);
87  	unregister_trace_console(probe_console, NULL);
88  	tracepoint_synchronize_unregister();
89  }
90  
kasan_test_exit(struct kunit * test)91  static void kasan_test_exit(struct kunit *test)
92  {
93  	KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
94  }
95  
96  /**
97   * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
98   * KASAN report; causes a KUnit test failure otherwise.
99   *
100   * @test: Currently executing KUnit test.
101   * @expression: Expression that must produce a KASAN report.
102   *
103   * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
104   * checking is auto-disabled. When this happens, this test handler reenables
105   * tag checking. As tag checking can be only disabled or enabled per CPU,
106   * this handler disables migration (preemption).
107   *
108   * Since the compiler doesn't see that the expression can change the test_status
109   * fields, it can reorder or optimize away the accesses to those fields.
110   * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
111   * expression to prevent that.
112   *
113   * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
114   * as false. This allows detecting KASAN reports that happen outside of the
115   * checks by asserting !test_status.report_found at the start of
116   * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
117   */
118  #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do {			\
119  	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&				\
120  	    kasan_sync_fault_possible())				\
121  		migrate_disable();					\
122  	KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));	\
123  	barrier();							\
124  	expression;							\
125  	barrier();							\
126  	if (kasan_async_fault_possible())				\
127  		kasan_force_async_fault();				\
128  	if (!READ_ONCE(test_status.report_found)) {			\
129  		KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure "	\
130  				"expected in \"" #expression		\
131  				 "\", but none occurred");		\
132  	}								\
133  	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&				\
134  	    kasan_sync_fault_possible()) {				\
135  		if (READ_ONCE(test_status.report_found) &&		\
136  		    !READ_ONCE(test_status.async_fault))		\
137  			kasan_enable_hw_tags();				\
138  		migrate_enable();					\
139  	}								\
140  	WRITE_ONCE(test_status.report_found, false);			\
141  	WRITE_ONCE(test_status.async_fault, false);			\
142  } while (0)
143  
144  #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do {			\
145  	if (!IS_ENABLED(config))					\
146  		kunit_skip((test), "Test requires " #config "=y");	\
147  } while (0)
148  
149  #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do {			\
150  	if (IS_ENABLED(config))						\
151  		kunit_skip((test), "Test requires " #config "=n");	\
152  } while (0)
153  
154  #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do {		\
155  	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS))				\
156  		break;  /* No compiler instrumentation. */		\
157  	if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX))	\
158  		break;  /* Should always be instrumented! */		\
159  	if (IS_ENABLED(CONFIG_GENERIC_ENTRY))				\
160  		kunit_skip((test), "Test requires checked mem*()");	\
161  } while (0)
162  
kmalloc_oob_right(struct kunit * test)163  static void kmalloc_oob_right(struct kunit *test)
164  {
165  	char *ptr;
166  	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
167  
168  	ptr = kmalloc(size, GFP_KERNEL);
169  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
170  
171  	OPTIMIZER_HIDE_VAR(ptr);
172  	/*
173  	 * An unaligned access past the requested kmalloc size.
174  	 * Only generic KASAN can precisely detect these.
175  	 */
176  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
177  		KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
178  
179  	/*
180  	 * An aligned access into the first out-of-bounds granule that falls
181  	 * within the aligned kmalloc object.
182  	 */
183  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
184  
185  	/* Out-of-bounds access past the aligned kmalloc object. */
186  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
187  					ptr[size + KASAN_GRANULE_SIZE + 5]);
188  
189  	kfree(ptr);
190  }
191  
kmalloc_oob_left(struct kunit * test)192  static void kmalloc_oob_left(struct kunit *test)
193  {
194  	char *ptr;
195  	size_t size = 15;
196  
197  	ptr = kmalloc(size, GFP_KERNEL);
198  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
199  
200  	OPTIMIZER_HIDE_VAR(ptr);
201  	KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
202  	kfree(ptr);
203  }
204  
kmalloc_node_oob_right(struct kunit * test)205  static void kmalloc_node_oob_right(struct kunit *test)
206  {
207  	char *ptr;
208  	size_t size = 4096;
209  
210  	ptr = kmalloc_node(size, GFP_KERNEL, 0);
211  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
212  
213  	OPTIMIZER_HIDE_VAR(ptr);
214  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
215  	kfree(ptr);
216  }
217  
kmalloc_track_caller_oob_right(struct kunit * test)218  static void kmalloc_track_caller_oob_right(struct kunit *test)
219  {
220  	char *ptr;
221  	size_t size = 128 - KASAN_GRANULE_SIZE;
222  
223  	/*
224  	 * Check that KASAN detects out-of-bounds access for object allocated via
225  	 * kmalloc_track_caller().
226  	 */
227  	ptr = kmalloc_track_caller(size, GFP_KERNEL);
228  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
229  
230  	OPTIMIZER_HIDE_VAR(ptr);
231  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
232  
233  	kfree(ptr);
234  
235  	/*
236  	 * Check that KASAN detects out-of-bounds access for object allocated via
237  	 * kmalloc_node_track_caller().
238  	 */
239  	ptr = kmalloc_node_track_caller(size, GFP_KERNEL, 0);
240  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
241  
242  	OPTIMIZER_HIDE_VAR(ptr);
243  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
244  
245  	kfree(ptr);
246  }
247  
248  /*
249   * Check that KASAN detects an out-of-bounds access for a big object allocated
250   * via kmalloc(). But not as big as to trigger the page_alloc fallback.
251   */
kmalloc_big_oob_right(struct kunit * test)252  static void kmalloc_big_oob_right(struct kunit *test)
253  {
254  	char *ptr;
255  	size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
256  
257  	ptr = kmalloc(size, GFP_KERNEL);
258  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
259  
260  	OPTIMIZER_HIDE_VAR(ptr);
261  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
262  	kfree(ptr);
263  }
264  
265  /*
266   * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
267   * that does not fit into the largest slab cache and therefore is allocated via
268   * the page_alloc fallback.
269   */
270  
kmalloc_large_oob_right(struct kunit * test)271  static void kmalloc_large_oob_right(struct kunit *test)
272  {
273  	char *ptr;
274  	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
275  
276  	ptr = kmalloc(size, GFP_KERNEL);
277  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
278  
279  	OPTIMIZER_HIDE_VAR(ptr);
280  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
281  
282  	kfree(ptr);
283  }
284  
kmalloc_large_uaf(struct kunit * test)285  static void kmalloc_large_uaf(struct kunit *test)
286  {
287  	char *ptr;
288  	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
289  
290  	ptr = kmalloc(size, GFP_KERNEL);
291  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
292  	kfree(ptr);
293  
294  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
295  }
296  
kmalloc_large_invalid_free(struct kunit * test)297  static void kmalloc_large_invalid_free(struct kunit *test)
298  {
299  	char *ptr;
300  	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
301  
302  	ptr = kmalloc(size, GFP_KERNEL);
303  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
304  
305  	KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
306  }
307  
page_alloc_oob_right(struct kunit * test)308  static void page_alloc_oob_right(struct kunit *test)
309  {
310  	char *ptr;
311  	struct page *pages;
312  	size_t order = 4;
313  	size_t size = (1UL << (PAGE_SHIFT + order));
314  
315  	/*
316  	 * With generic KASAN page allocations have no redzones, thus
317  	 * out-of-bounds detection is not guaranteed.
318  	 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
319  	 */
320  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
321  
322  	pages = alloc_pages(GFP_KERNEL, order);
323  	ptr = page_address(pages);
324  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
325  
326  	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
327  	free_pages((unsigned long)ptr, order);
328  }
329  
page_alloc_uaf(struct kunit * test)330  static void page_alloc_uaf(struct kunit *test)
331  {
332  	char *ptr;
333  	struct page *pages;
334  	size_t order = 4;
335  
336  	pages = alloc_pages(GFP_KERNEL, order);
337  	ptr = page_address(pages);
338  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
339  	free_pages((unsigned long)ptr, order);
340  
341  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
342  }
343  
krealloc_more_oob_helper(struct kunit * test,size_t size1,size_t size2)344  static void krealloc_more_oob_helper(struct kunit *test,
345  					size_t size1, size_t size2)
346  {
347  	char *ptr1, *ptr2;
348  	size_t middle;
349  
350  	KUNIT_ASSERT_LT(test, size1, size2);
351  	middle = size1 + (size2 - size1) / 2;
352  
353  	ptr1 = kmalloc(size1, GFP_KERNEL);
354  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
355  
356  	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
357  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
358  
359  	/* Suppress -Warray-bounds warnings. */
360  	OPTIMIZER_HIDE_VAR(ptr2);
361  
362  	/* All offsets up to size2 must be accessible. */
363  	ptr2[size1 - 1] = 'x';
364  	ptr2[size1] = 'x';
365  	ptr2[middle] = 'x';
366  	ptr2[size2 - 1] = 'x';
367  
368  	/* Generic mode is precise, so unaligned size2 must be inaccessible. */
369  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
370  		KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
371  
372  	/* For all modes first aligned offset after size2 must be inaccessible. */
373  	KUNIT_EXPECT_KASAN_FAIL(test,
374  		ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
375  
376  	kfree(ptr2);
377  }
378  
krealloc_less_oob_helper(struct kunit * test,size_t size1,size_t size2)379  static void krealloc_less_oob_helper(struct kunit *test,
380  					size_t size1, size_t size2)
381  {
382  	char *ptr1, *ptr2;
383  	size_t middle;
384  
385  	KUNIT_ASSERT_LT(test, size2, size1);
386  	middle = size2 + (size1 - size2) / 2;
387  
388  	ptr1 = kmalloc(size1, GFP_KERNEL);
389  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
390  
391  	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
392  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
393  
394  	/* Suppress -Warray-bounds warnings. */
395  	OPTIMIZER_HIDE_VAR(ptr2);
396  
397  	/* Must be accessible for all modes. */
398  	ptr2[size2 - 1] = 'x';
399  
400  	/* Generic mode is precise, so unaligned size2 must be inaccessible. */
401  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
402  		KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
403  
404  	/* For all modes first aligned offset after size2 must be inaccessible. */
405  	KUNIT_EXPECT_KASAN_FAIL(test,
406  		ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
407  
408  	/*
409  	 * For all modes all size2, middle, and size1 should land in separate
410  	 * granules and thus the latter two offsets should be inaccessible.
411  	 */
412  	KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
413  				round_down(middle, KASAN_GRANULE_SIZE));
414  	KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
415  				round_down(size1, KASAN_GRANULE_SIZE));
416  	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
417  	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
418  	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
419  
420  	kfree(ptr2);
421  }
422  
krealloc_more_oob(struct kunit * test)423  static void krealloc_more_oob(struct kunit *test)
424  {
425  	krealloc_more_oob_helper(test, 201, 235);
426  }
427  
krealloc_less_oob(struct kunit * test)428  static void krealloc_less_oob(struct kunit *test)
429  {
430  	krealloc_less_oob_helper(test, 235, 201);
431  }
432  
krealloc_large_more_oob(struct kunit * test)433  static void krealloc_large_more_oob(struct kunit *test)
434  {
435  	krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
436  					KMALLOC_MAX_CACHE_SIZE + 235);
437  }
438  
krealloc_large_less_oob(struct kunit * test)439  static void krealloc_large_less_oob(struct kunit *test)
440  {
441  	krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
442  					KMALLOC_MAX_CACHE_SIZE + 201);
443  }
444  
445  /*
446   * Check that krealloc() detects a use-after-free, returns NULL,
447   * and doesn't unpoison the freed object.
448   */
krealloc_uaf(struct kunit * test)449  static void krealloc_uaf(struct kunit *test)
450  {
451  	char *ptr1, *ptr2;
452  	int size1 = 201;
453  	int size2 = 235;
454  
455  	ptr1 = kmalloc(size1, GFP_KERNEL);
456  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
457  	kfree(ptr1);
458  
459  	KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
460  	KUNIT_ASSERT_NULL(test, ptr2);
461  	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
462  }
463  
kmalloc_oob_16(struct kunit * test)464  static void kmalloc_oob_16(struct kunit *test)
465  {
466  	struct {
467  		u64 words[2];
468  	} *ptr1, *ptr2;
469  
470  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
471  
472  	/* This test is specifically crafted for the generic mode. */
473  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
474  
475  	/* RELOC_HIDE to prevent gcc from warning about short alloc */
476  	ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
477  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
478  
479  	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
480  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
481  
482  	OPTIMIZER_HIDE_VAR(ptr1);
483  	OPTIMIZER_HIDE_VAR(ptr2);
484  	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
485  	kfree(ptr1);
486  	kfree(ptr2);
487  }
488  
kmalloc_uaf_16(struct kunit * test)489  static void kmalloc_uaf_16(struct kunit *test)
490  {
491  	struct {
492  		u64 words[2];
493  	} *ptr1, *ptr2;
494  
495  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
496  
497  	ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
498  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
499  
500  	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
501  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
502  	kfree(ptr2);
503  
504  	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
505  	kfree(ptr1);
506  }
507  
508  /*
509   * Note: in the memset tests below, the written range touches both valid and
510   * invalid memory. This makes sure that the instrumentation does not only check
511   * the starting address but the whole range.
512   */
513  
kmalloc_oob_memset_2(struct kunit * test)514  static void kmalloc_oob_memset_2(struct kunit *test)
515  {
516  	char *ptr;
517  	size_t size = 128 - KASAN_GRANULE_SIZE;
518  	size_t memset_size = 2;
519  
520  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
521  
522  	ptr = kmalloc(size, GFP_KERNEL);
523  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
524  
525  	OPTIMIZER_HIDE_VAR(ptr);
526  	OPTIMIZER_HIDE_VAR(size);
527  	OPTIMIZER_HIDE_VAR(memset_size);
528  	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
529  	kfree(ptr);
530  }
531  
kmalloc_oob_memset_4(struct kunit * test)532  static void kmalloc_oob_memset_4(struct kunit *test)
533  {
534  	char *ptr;
535  	size_t size = 128 - KASAN_GRANULE_SIZE;
536  	size_t memset_size = 4;
537  
538  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
539  
540  	ptr = kmalloc(size, GFP_KERNEL);
541  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
542  
543  	OPTIMIZER_HIDE_VAR(ptr);
544  	OPTIMIZER_HIDE_VAR(size);
545  	OPTIMIZER_HIDE_VAR(memset_size);
546  	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
547  	kfree(ptr);
548  }
549  
kmalloc_oob_memset_8(struct kunit * test)550  static void kmalloc_oob_memset_8(struct kunit *test)
551  {
552  	char *ptr;
553  	size_t size = 128 - KASAN_GRANULE_SIZE;
554  	size_t memset_size = 8;
555  
556  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
557  
558  	ptr = kmalloc(size, GFP_KERNEL);
559  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
560  
561  	OPTIMIZER_HIDE_VAR(ptr);
562  	OPTIMIZER_HIDE_VAR(size);
563  	OPTIMIZER_HIDE_VAR(memset_size);
564  	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
565  	kfree(ptr);
566  }
567  
kmalloc_oob_memset_16(struct kunit * test)568  static void kmalloc_oob_memset_16(struct kunit *test)
569  {
570  	char *ptr;
571  	size_t size = 128 - KASAN_GRANULE_SIZE;
572  	size_t memset_size = 16;
573  
574  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
575  
576  	ptr = kmalloc(size, GFP_KERNEL);
577  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
578  
579  	OPTIMIZER_HIDE_VAR(ptr);
580  	OPTIMIZER_HIDE_VAR(size);
581  	OPTIMIZER_HIDE_VAR(memset_size);
582  	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
583  	kfree(ptr);
584  }
585  
kmalloc_oob_in_memset(struct kunit * test)586  static void kmalloc_oob_in_memset(struct kunit *test)
587  {
588  	char *ptr;
589  	size_t size = 128 - KASAN_GRANULE_SIZE;
590  
591  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
592  
593  	ptr = kmalloc(size, GFP_KERNEL);
594  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
595  
596  	OPTIMIZER_HIDE_VAR(ptr);
597  	OPTIMIZER_HIDE_VAR(size);
598  	KUNIT_EXPECT_KASAN_FAIL(test,
599  				memset(ptr, 0, size + KASAN_GRANULE_SIZE));
600  	kfree(ptr);
601  }
602  
kmalloc_memmove_negative_size(struct kunit * test)603  static void kmalloc_memmove_negative_size(struct kunit *test)
604  {
605  	char *ptr;
606  	size_t size = 64;
607  	size_t invalid_size = -2;
608  
609  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
610  
611  	/*
612  	 * Hardware tag-based mode doesn't check memmove for negative size.
613  	 * As a result, this test introduces a side-effect memory corruption,
614  	 * which can result in a crash.
615  	 */
616  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
617  
618  	ptr = kmalloc(size, GFP_KERNEL);
619  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
620  
621  	memset((char *)ptr, 0, 64);
622  	OPTIMIZER_HIDE_VAR(ptr);
623  	OPTIMIZER_HIDE_VAR(invalid_size);
624  	KUNIT_EXPECT_KASAN_FAIL(test,
625  		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
626  	kfree(ptr);
627  }
628  
kmalloc_memmove_invalid_size(struct kunit * test)629  static void kmalloc_memmove_invalid_size(struct kunit *test)
630  {
631  	char *ptr;
632  	size_t size = 64;
633  	size_t invalid_size = size;
634  
635  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
636  
637  	ptr = kmalloc(size, GFP_KERNEL);
638  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
639  
640  	memset((char *)ptr, 0, 64);
641  	OPTIMIZER_HIDE_VAR(ptr);
642  	OPTIMIZER_HIDE_VAR(invalid_size);
643  	KUNIT_EXPECT_KASAN_FAIL(test,
644  		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
645  	kfree(ptr);
646  }
647  
kmalloc_uaf(struct kunit * test)648  static void kmalloc_uaf(struct kunit *test)
649  {
650  	char *ptr;
651  	size_t size = 10;
652  
653  	ptr = kmalloc(size, GFP_KERNEL);
654  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
655  
656  	kfree(ptr);
657  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
658  }
659  
kmalloc_uaf_memset(struct kunit * test)660  static void kmalloc_uaf_memset(struct kunit *test)
661  {
662  	char *ptr;
663  	size_t size = 33;
664  
665  	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
666  
667  	/*
668  	 * Only generic KASAN uses quarantine, which is required to avoid a
669  	 * kernel memory corruption this test causes.
670  	 */
671  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
672  
673  	ptr = kmalloc(size, GFP_KERNEL);
674  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
675  
676  	kfree(ptr);
677  	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
678  }
679  
kmalloc_uaf2(struct kunit * test)680  static void kmalloc_uaf2(struct kunit *test)
681  {
682  	char *ptr1, *ptr2;
683  	size_t size = 43;
684  	int counter = 0;
685  
686  again:
687  	ptr1 = kmalloc(size, GFP_KERNEL);
688  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
689  
690  	kfree(ptr1);
691  
692  	ptr2 = kmalloc(size, GFP_KERNEL);
693  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
694  
695  	/*
696  	 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
697  	 * Allow up to 16 attempts at generating different tags.
698  	 */
699  	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
700  		kfree(ptr2);
701  		goto again;
702  	}
703  
704  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
705  	KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
706  
707  	kfree(ptr2);
708  }
709  
710  /*
711   * Check that KASAN detects use-after-free when another object was allocated in
712   * the same slot. Relevant for the tag-based modes, which do not use quarantine.
713   */
kmalloc_uaf3(struct kunit * test)714  static void kmalloc_uaf3(struct kunit *test)
715  {
716  	char *ptr1, *ptr2;
717  	size_t size = 100;
718  
719  	/* This test is specifically crafted for tag-based modes. */
720  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
721  
722  	ptr1 = kmalloc(size, GFP_KERNEL);
723  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
724  	kfree(ptr1);
725  
726  	ptr2 = kmalloc(size, GFP_KERNEL);
727  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
728  	kfree(ptr2);
729  
730  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
731  }
732  
kasan_atomics_helper(struct kunit * test,void * unsafe,void * safe)733  static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
734  {
735  	int *i_unsafe = unsafe;
736  
737  	KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
738  	KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
739  	KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
740  	KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
741  
742  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
743  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
744  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
745  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
746  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
747  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
748  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
749  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
750  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
751  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
752  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
753  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
754  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
755  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
756  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
757  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
758  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
759  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
760  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
761  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
762  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
763  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
764  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
765  
766  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
767  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
768  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
769  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
770  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
771  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
772  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
773  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
774  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
775  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
776  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
777  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
778  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
779  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
780  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
781  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
782  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
783  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
784  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
785  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
786  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
787  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
788  	KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
789  }
790  
kasan_atomics(struct kunit * test)791  static void kasan_atomics(struct kunit *test)
792  {
793  	void *a1, *a2;
794  
795  	/*
796  	 * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
797  	 * that the following 16 bytes will make up the redzone.
798  	 */
799  	a1 = kzalloc(48, GFP_KERNEL);
800  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
801  	a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
802  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
803  
804  	/* Use atomics to access the redzone. */
805  	kasan_atomics_helper(test, a1 + 48, a2);
806  
807  	kfree(a1);
808  	kfree(a2);
809  }
810  
kmalloc_double_kzfree(struct kunit * test)811  static void kmalloc_double_kzfree(struct kunit *test)
812  {
813  	char *ptr;
814  	size_t size = 16;
815  
816  	ptr = kmalloc(size, GFP_KERNEL);
817  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
818  
819  	kfree_sensitive(ptr);
820  	KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
821  }
822  
823  /* Check that ksize() does NOT unpoison whole object. */
ksize_unpoisons_memory(struct kunit * test)824  static void ksize_unpoisons_memory(struct kunit *test)
825  {
826  	char *ptr;
827  	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
828  	size_t real_size;
829  
830  	ptr = kmalloc(size, GFP_KERNEL);
831  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
832  
833  	real_size = ksize(ptr);
834  	KUNIT_EXPECT_GT(test, real_size, size);
835  
836  	OPTIMIZER_HIDE_VAR(ptr);
837  
838  	/* These accesses shouldn't trigger a KASAN report. */
839  	ptr[0] = 'x';
840  	ptr[size - 1] = 'x';
841  
842  	/* These must trigger a KASAN report. */
843  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
844  		KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
845  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
846  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
847  
848  	kfree(ptr);
849  }
850  
851  /*
852   * Check that a use-after-free is detected by ksize() and via normal accesses
853   * after it.
854   */
ksize_uaf(struct kunit * test)855  static void ksize_uaf(struct kunit *test)
856  {
857  	char *ptr;
858  	int size = 128 - KASAN_GRANULE_SIZE;
859  
860  	ptr = kmalloc(size, GFP_KERNEL);
861  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
862  	kfree(ptr);
863  
864  	OPTIMIZER_HIDE_VAR(ptr);
865  	KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
866  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
867  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
868  }
869  
870  /*
871   * The two tests below check that Generic KASAN prints auxiliary stack traces
872   * for RCU callbacks and workqueues. The reports need to be inspected manually.
873   *
874   * These tests are still enabled for other KASAN modes to make sure that all
875   * modes report bad accesses in tested scenarios.
876   */
877  
878  static struct kasan_rcu_info {
879  	int i;
880  	struct rcu_head rcu;
881  } *global_rcu_ptr;
882  
rcu_uaf_reclaim(struct rcu_head * rp)883  static void rcu_uaf_reclaim(struct rcu_head *rp)
884  {
885  	struct kasan_rcu_info *fp =
886  		container_of(rp, struct kasan_rcu_info, rcu);
887  
888  	kfree(fp);
889  	((volatile struct kasan_rcu_info *)fp)->i;
890  }
891  
rcu_uaf(struct kunit * test)892  static void rcu_uaf(struct kunit *test)
893  {
894  	struct kasan_rcu_info *ptr;
895  
896  	ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
897  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
898  
899  	global_rcu_ptr = rcu_dereference_protected(
900  				(struct kasan_rcu_info __rcu *)ptr, NULL);
901  
902  	KUNIT_EXPECT_KASAN_FAIL(test,
903  		call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
904  		rcu_barrier());
905  }
906  
workqueue_uaf_work(struct work_struct * work)907  static void workqueue_uaf_work(struct work_struct *work)
908  {
909  	kfree(work);
910  }
911  
workqueue_uaf(struct kunit * test)912  static void workqueue_uaf(struct kunit *test)
913  {
914  	struct workqueue_struct *workqueue;
915  	struct work_struct *work;
916  
917  	workqueue = create_workqueue("kasan_workqueue_test");
918  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
919  
920  	work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
921  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
922  
923  	INIT_WORK(work, workqueue_uaf_work);
924  	queue_work(workqueue, work);
925  	destroy_workqueue(workqueue);
926  
927  	KUNIT_EXPECT_KASAN_FAIL(test,
928  		((volatile struct work_struct *)work)->data);
929  }
930  
kfree_via_page(struct kunit * test)931  static void kfree_via_page(struct kunit *test)
932  {
933  	char *ptr;
934  	size_t size = 8;
935  	struct page *page;
936  	unsigned long offset;
937  
938  	ptr = kmalloc(size, GFP_KERNEL);
939  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
940  
941  	page = virt_to_page(ptr);
942  	offset = offset_in_page(ptr);
943  	kfree(page_address(page) + offset);
944  }
945  
kfree_via_phys(struct kunit * test)946  static void kfree_via_phys(struct kunit *test)
947  {
948  	char *ptr;
949  	size_t size = 8;
950  	phys_addr_t phys;
951  
952  	ptr = kmalloc(size, GFP_KERNEL);
953  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
954  
955  	phys = virt_to_phys(ptr);
956  	kfree(phys_to_virt(phys));
957  }
958  
kmem_cache_oob(struct kunit * test)959  static void kmem_cache_oob(struct kunit *test)
960  {
961  	char *p;
962  	size_t size = 200;
963  	struct kmem_cache *cache;
964  
965  	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
966  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
967  
968  	p = kmem_cache_alloc(cache, GFP_KERNEL);
969  	if (!p) {
970  		kunit_err(test, "Allocation failed: %s\n", __func__);
971  		kmem_cache_destroy(cache);
972  		return;
973  	}
974  
975  	KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
976  
977  	kmem_cache_free(cache, p);
978  	kmem_cache_destroy(cache);
979  }
980  
kmem_cache_double_free(struct kunit * test)981  static void kmem_cache_double_free(struct kunit *test)
982  {
983  	char *p;
984  	size_t size = 200;
985  	struct kmem_cache *cache;
986  
987  	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
988  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
989  
990  	p = kmem_cache_alloc(cache, GFP_KERNEL);
991  	if (!p) {
992  		kunit_err(test, "Allocation failed: %s\n", __func__);
993  		kmem_cache_destroy(cache);
994  		return;
995  	}
996  
997  	kmem_cache_free(cache, p);
998  	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
999  	kmem_cache_destroy(cache);
1000  }
1001  
kmem_cache_invalid_free(struct kunit * test)1002  static void kmem_cache_invalid_free(struct kunit *test)
1003  {
1004  	char *p;
1005  	size_t size = 200;
1006  	struct kmem_cache *cache;
1007  
1008  	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
1009  				  NULL);
1010  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1011  
1012  	p = kmem_cache_alloc(cache, GFP_KERNEL);
1013  	if (!p) {
1014  		kunit_err(test, "Allocation failed: %s\n", __func__);
1015  		kmem_cache_destroy(cache);
1016  		return;
1017  	}
1018  
1019  	/* Trigger invalid free, the object doesn't get freed. */
1020  	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
1021  
1022  	/*
1023  	 * Properly free the object to prevent the "Objects remaining in
1024  	 * test_cache on __kmem_cache_shutdown" BUG failure.
1025  	 */
1026  	kmem_cache_free(cache, p);
1027  
1028  	kmem_cache_destroy(cache);
1029  }
1030  
kmem_cache_rcu_uaf(struct kunit * test)1031  static void kmem_cache_rcu_uaf(struct kunit *test)
1032  {
1033  	char *p;
1034  	size_t size = 200;
1035  	struct kmem_cache *cache;
1036  
1037  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
1038  
1039  	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
1040  				  NULL);
1041  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1042  
1043  	p = kmem_cache_alloc(cache, GFP_KERNEL);
1044  	if (!p) {
1045  		kunit_err(test, "Allocation failed: %s\n", __func__);
1046  		kmem_cache_destroy(cache);
1047  		return;
1048  	}
1049  	*p = 1;
1050  
1051  	rcu_read_lock();
1052  
1053  	/* Free the object - this will internally schedule an RCU callback. */
1054  	kmem_cache_free(cache, p);
1055  
1056  	/*
1057  	 * We should still be allowed to access the object at this point because
1058  	 * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
1059  	 * critical section since before the kmem_cache_free().
1060  	 */
1061  	READ_ONCE(*p);
1062  
1063  	rcu_read_unlock();
1064  
1065  	/*
1066  	 * Wait for the RCU callback to execute; after this, the object should
1067  	 * have actually been freed from KASAN's perspective.
1068  	 */
1069  	rcu_barrier();
1070  
1071  	KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
1072  
1073  	kmem_cache_destroy(cache);
1074  }
1075  
kmem_cache_double_destroy(struct kunit * test)1076  static void kmem_cache_double_destroy(struct kunit *test)
1077  {
1078  	struct kmem_cache *cache;
1079  
1080  	cache = kmem_cache_create("test_cache", 200, 0, SLAB_NO_MERGE, NULL);
1081  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1082  	kmem_cache_destroy(cache);
1083  	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
1084  }
1085  
kmem_cache_accounted(struct kunit * test)1086  static void kmem_cache_accounted(struct kunit *test)
1087  {
1088  	int i;
1089  	char *p;
1090  	size_t size = 200;
1091  	struct kmem_cache *cache;
1092  
1093  	cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
1094  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1095  
1096  	/*
1097  	 * Several allocations with a delay to allow for lazy per memcg kmem
1098  	 * cache creation.
1099  	 */
1100  	for (i = 0; i < 5; i++) {
1101  		p = kmem_cache_alloc(cache, GFP_KERNEL);
1102  		if (!p)
1103  			goto free_cache;
1104  
1105  		kmem_cache_free(cache, p);
1106  		msleep(100);
1107  	}
1108  
1109  free_cache:
1110  	kmem_cache_destroy(cache);
1111  }
1112  
kmem_cache_bulk(struct kunit * test)1113  static void kmem_cache_bulk(struct kunit *test)
1114  {
1115  	struct kmem_cache *cache;
1116  	size_t size = 200;
1117  	char *p[10];
1118  	bool ret;
1119  	int i;
1120  
1121  	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1122  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1123  
1124  	ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
1125  	if (!ret) {
1126  		kunit_err(test, "Allocation failed: %s\n", __func__);
1127  		kmem_cache_destroy(cache);
1128  		return;
1129  	}
1130  
1131  	for (i = 0; i < ARRAY_SIZE(p); i++)
1132  		p[i][0] = p[i][size - 1] = 42;
1133  
1134  	kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
1135  	kmem_cache_destroy(cache);
1136  }
1137  
mempool_prepare_kmalloc(struct kunit * test,mempool_t * pool,size_t size)1138  static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
1139  {
1140  	int pool_size = 4;
1141  	int ret;
1142  	void *elem;
1143  
1144  	memset(pool, 0, sizeof(*pool));
1145  	ret = mempool_init_kmalloc_pool(pool, pool_size, size);
1146  	KUNIT_ASSERT_EQ(test, ret, 0);
1147  
1148  	/*
1149  	 * Allocate one element to prevent mempool from freeing elements to the
1150  	 * underlying allocator and instead make it add them to the element
1151  	 * list when the tests trigger double-free and invalid-free bugs.
1152  	 * This allows testing KASAN annotations in add_element().
1153  	 */
1154  	elem = mempool_alloc_preallocated(pool);
1155  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1156  
1157  	return elem;
1158  }
1159  
mempool_prepare_slab(struct kunit * test,mempool_t * pool,size_t size)1160  static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
1161  {
1162  	struct kmem_cache *cache;
1163  	int pool_size = 4;
1164  	int ret;
1165  
1166  	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1167  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1168  
1169  	memset(pool, 0, sizeof(*pool));
1170  	ret = mempool_init_slab_pool(pool, pool_size, cache);
1171  	KUNIT_ASSERT_EQ(test, ret, 0);
1172  
1173  	/*
1174  	 * Do not allocate one preallocated element, as we skip the double-free
1175  	 * and invalid-free tests for slab mempool for simplicity.
1176  	 */
1177  
1178  	return cache;
1179  }
1180  
mempool_prepare_page(struct kunit * test,mempool_t * pool,int order)1181  static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
1182  {
1183  	int pool_size = 4;
1184  	int ret;
1185  	void *elem;
1186  
1187  	memset(pool, 0, sizeof(*pool));
1188  	ret = mempool_init_page_pool(pool, pool_size, order);
1189  	KUNIT_ASSERT_EQ(test, ret, 0);
1190  
1191  	elem = mempool_alloc_preallocated(pool);
1192  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1193  
1194  	return elem;
1195  }
1196  
mempool_oob_right_helper(struct kunit * test,mempool_t * pool,size_t size)1197  static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
1198  {
1199  	char *elem;
1200  
1201  	elem = mempool_alloc_preallocated(pool);
1202  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1203  
1204  	OPTIMIZER_HIDE_VAR(elem);
1205  
1206  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1207  		KUNIT_EXPECT_KASAN_FAIL(test,
1208  			((volatile char *)&elem[size])[0]);
1209  	else
1210  		KUNIT_EXPECT_KASAN_FAIL(test,
1211  			((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
1212  
1213  	mempool_free(elem, pool);
1214  }
1215  
mempool_kmalloc_oob_right(struct kunit * test)1216  static void mempool_kmalloc_oob_right(struct kunit *test)
1217  {
1218  	mempool_t pool;
1219  	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
1220  	void *extra_elem;
1221  
1222  	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1223  
1224  	mempool_oob_right_helper(test, &pool, size);
1225  
1226  	mempool_free(extra_elem, &pool);
1227  	mempool_exit(&pool);
1228  }
1229  
mempool_kmalloc_large_oob_right(struct kunit * test)1230  static void mempool_kmalloc_large_oob_right(struct kunit *test)
1231  {
1232  	mempool_t pool;
1233  	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1234  	void *extra_elem;
1235  
1236  	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1237  
1238  	mempool_oob_right_helper(test, &pool, size);
1239  
1240  	mempool_free(extra_elem, &pool);
1241  	mempool_exit(&pool);
1242  }
1243  
mempool_slab_oob_right(struct kunit * test)1244  static void mempool_slab_oob_right(struct kunit *test)
1245  {
1246  	mempool_t pool;
1247  	size_t size = 123;
1248  	struct kmem_cache *cache;
1249  
1250  	cache = mempool_prepare_slab(test, &pool, size);
1251  
1252  	mempool_oob_right_helper(test, &pool, size);
1253  
1254  	mempool_exit(&pool);
1255  	kmem_cache_destroy(cache);
1256  }
1257  
1258  /*
1259   * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1260   * allocations have no redzones, and thus the out-of-bounds detection is not
1261   * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
1262   * the tag-based KASAN modes, the neighboring allocation might have the same
1263   * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
1264   */
1265  
mempool_uaf_helper(struct kunit * test,mempool_t * pool,bool page)1266  static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
1267  {
1268  	char *elem, *ptr;
1269  
1270  	elem = mempool_alloc_preallocated(pool);
1271  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1272  
1273  	mempool_free(elem, pool);
1274  
1275  	ptr = page ? page_address((struct page *)elem) : elem;
1276  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
1277  }
1278  
mempool_kmalloc_uaf(struct kunit * test)1279  static void mempool_kmalloc_uaf(struct kunit *test)
1280  {
1281  	mempool_t pool;
1282  	size_t size = 128;
1283  	void *extra_elem;
1284  
1285  	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1286  
1287  	mempool_uaf_helper(test, &pool, false);
1288  
1289  	mempool_free(extra_elem, &pool);
1290  	mempool_exit(&pool);
1291  }
1292  
mempool_kmalloc_large_uaf(struct kunit * test)1293  static void mempool_kmalloc_large_uaf(struct kunit *test)
1294  {
1295  	mempool_t pool;
1296  	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1297  	void *extra_elem;
1298  
1299  	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1300  
1301  	mempool_uaf_helper(test, &pool, false);
1302  
1303  	mempool_free(extra_elem, &pool);
1304  	mempool_exit(&pool);
1305  }
1306  
mempool_slab_uaf(struct kunit * test)1307  static void mempool_slab_uaf(struct kunit *test)
1308  {
1309  	mempool_t pool;
1310  	size_t size = 123;
1311  	struct kmem_cache *cache;
1312  
1313  	cache = mempool_prepare_slab(test, &pool, size);
1314  
1315  	mempool_uaf_helper(test, &pool, false);
1316  
1317  	mempool_exit(&pool);
1318  	kmem_cache_destroy(cache);
1319  }
1320  
mempool_page_alloc_uaf(struct kunit * test)1321  static void mempool_page_alloc_uaf(struct kunit *test)
1322  {
1323  	mempool_t pool;
1324  	int order = 2;
1325  	void *extra_elem;
1326  
1327  	extra_elem = mempool_prepare_page(test, &pool, order);
1328  
1329  	mempool_uaf_helper(test, &pool, true);
1330  
1331  	mempool_free(extra_elem, &pool);
1332  	mempool_exit(&pool);
1333  }
1334  
mempool_double_free_helper(struct kunit * test,mempool_t * pool)1335  static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
1336  {
1337  	char *elem;
1338  
1339  	elem = mempool_alloc_preallocated(pool);
1340  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1341  
1342  	mempool_free(elem, pool);
1343  
1344  	KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
1345  }
1346  
mempool_kmalloc_double_free(struct kunit * test)1347  static void mempool_kmalloc_double_free(struct kunit *test)
1348  {
1349  	mempool_t pool;
1350  	size_t size = 128;
1351  	char *extra_elem;
1352  
1353  	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1354  
1355  	mempool_double_free_helper(test, &pool);
1356  
1357  	mempool_free(extra_elem, &pool);
1358  	mempool_exit(&pool);
1359  }
1360  
mempool_kmalloc_large_double_free(struct kunit * test)1361  static void mempool_kmalloc_large_double_free(struct kunit *test)
1362  {
1363  	mempool_t pool;
1364  	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1365  	char *extra_elem;
1366  
1367  	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1368  
1369  	mempool_double_free_helper(test, &pool);
1370  
1371  	mempool_free(extra_elem, &pool);
1372  	mempool_exit(&pool);
1373  }
1374  
mempool_page_alloc_double_free(struct kunit * test)1375  static void mempool_page_alloc_double_free(struct kunit *test)
1376  {
1377  	mempool_t pool;
1378  	int order = 2;
1379  	char *extra_elem;
1380  
1381  	extra_elem = mempool_prepare_page(test, &pool, order);
1382  
1383  	mempool_double_free_helper(test, &pool);
1384  
1385  	mempool_free(extra_elem, &pool);
1386  	mempool_exit(&pool);
1387  }
1388  
mempool_kmalloc_invalid_free_helper(struct kunit * test,mempool_t * pool)1389  static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
1390  {
1391  	char *elem;
1392  
1393  	elem = mempool_alloc_preallocated(pool);
1394  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1395  
1396  	KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
1397  
1398  	mempool_free(elem, pool);
1399  }
1400  
mempool_kmalloc_invalid_free(struct kunit * test)1401  static void mempool_kmalloc_invalid_free(struct kunit *test)
1402  {
1403  	mempool_t pool;
1404  	size_t size = 128;
1405  	char *extra_elem;
1406  
1407  	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1408  
1409  	mempool_kmalloc_invalid_free_helper(test, &pool);
1410  
1411  	mempool_free(extra_elem, &pool);
1412  	mempool_exit(&pool);
1413  }
1414  
mempool_kmalloc_large_invalid_free(struct kunit * test)1415  static void mempool_kmalloc_large_invalid_free(struct kunit *test)
1416  {
1417  	mempool_t pool;
1418  	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1419  	char *extra_elem;
1420  
1421  	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1422  
1423  	mempool_kmalloc_invalid_free_helper(test, &pool);
1424  
1425  	mempool_free(extra_elem, &pool);
1426  	mempool_exit(&pool);
1427  }
1428  
1429  /*
1430   * Skip the invalid-free test for page mempool. The invalid-free detection only
1431   * works for compound pages and mempool preallocates all page elements without
1432   * the __GFP_COMP flag.
1433   */
1434  
1435  static char global_array[10];
1436  
kasan_global_oob_right(struct kunit * test)1437  static void kasan_global_oob_right(struct kunit *test)
1438  {
1439  	/*
1440  	 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1441  	 * from failing here and panicking the kernel, access the array via a
1442  	 * volatile pointer, which will prevent the compiler from being able to
1443  	 * determine the array bounds.
1444  	 *
1445  	 * This access uses a volatile pointer to char (char *volatile) rather
1446  	 * than the more conventional pointer to volatile char (volatile char *)
1447  	 * because we want to prevent the compiler from making inferences about
1448  	 * the pointer itself (i.e. its array bounds), not the data that it
1449  	 * refers to.
1450  	 */
1451  	char *volatile array = global_array;
1452  	char *p = &array[ARRAY_SIZE(global_array) + 3];
1453  
1454  	/* Only generic mode instruments globals. */
1455  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1456  
1457  	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1458  }
1459  
kasan_global_oob_left(struct kunit * test)1460  static void kasan_global_oob_left(struct kunit *test)
1461  {
1462  	char *volatile array = global_array;
1463  	char *p = array - 3;
1464  
1465  	/*
1466  	 * GCC is known to fail this test, skip it.
1467  	 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1468  	 */
1469  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
1470  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1471  	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1472  }
1473  
kasan_stack_oob(struct kunit * test)1474  static void kasan_stack_oob(struct kunit *test)
1475  {
1476  	char stack_array[10];
1477  	/* See comment in kasan_global_oob_right. */
1478  	char *volatile array = stack_array;
1479  	char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
1480  
1481  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1482  
1483  	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1484  }
1485  
kasan_alloca_oob_left(struct kunit * test)1486  static void kasan_alloca_oob_left(struct kunit *test)
1487  {
1488  	volatile int i = 10;
1489  	char alloca_array[i];
1490  	/* See comment in kasan_global_oob_right. */
1491  	char *volatile array = alloca_array;
1492  	char *p = array - 1;
1493  
1494  	/* Only generic mode instruments dynamic allocas. */
1495  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1496  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1497  
1498  	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1499  }
1500  
kasan_alloca_oob_right(struct kunit * test)1501  static void kasan_alloca_oob_right(struct kunit *test)
1502  {
1503  	volatile int i = 10;
1504  	char alloca_array[i];
1505  	/* See comment in kasan_global_oob_right. */
1506  	char *volatile array = alloca_array;
1507  	char *p = array + i;
1508  
1509  	/* Only generic mode instruments dynamic allocas. */
1510  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1511  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1512  
1513  	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1514  }
1515  
kasan_memchr(struct kunit * test)1516  static void kasan_memchr(struct kunit *test)
1517  {
1518  	char *ptr;
1519  	size_t size = 24;
1520  
1521  	/*
1522  	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1523  	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1524  	 */
1525  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1526  
1527  	if (OOB_TAG_OFF)
1528  		size = round_up(size, OOB_TAG_OFF);
1529  
1530  	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1531  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1532  
1533  	OPTIMIZER_HIDE_VAR(ptr);
1534  	OPTIMIZER_HIDE_VAR(size);
1535  	KUNIT_EXPECT_KASAN_FAIL(test,
1536  		kasan_ptr_result = memchr(ptr, '1', size + 1));
1537  
1538  	kfree(ptr);
1539  }
1540  
kasan_memcmp(struct kunit * test)1541  static void kasan_memcmp(struct kunit *test)
1542  {
1543  	char *ptr;
1544  	size_t size = 24;
1545  	int arr[9];
1546  
1547  	/*
1548  	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1549  	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1550  	 */
1551  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1552  
1553  	if (OOB_TAG_OFF)
1554  		size = round_up(size, OOB_TAG_OFF);
1555  
1556  	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1557  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1558  	memset(arr, 0, sizeof(arr));
1559  
1560  	OPTIMIZER_HIDE_VAR(ptr);
1561  	OPTIMIZER_HIDE_VAR(size);
1562  	KUNIT_EXPECT_KASAN_FAIL(test,
1563  		kasan_int_result = memcmp(ptr, arr, size+1));
1564  	kfree(ptr);
1565  }
1566  
kasan_strings(struct kunit * test)1567  static void kasan_strings(struct kunit *test)
1568  {
1569  	char *ptr;
1570  	char *src;
1571  	size_t size = 24;
1572  
1573  	/*
1574  	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1575  	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1576  	 */
1577  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1578  
1579  	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1580  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1581  
1582  	src = kmalloc(KASAN_GRANULE_SIZE, GFP_KERNEL | __GFP_ZERO);
1583  	strscpy(src, "f0cacc1a0000000", KASAN_GRANULE_SIZE);
1584  
1585  	/*
1586  	 * Make sure that strscpy() does not trigger KASAN if it overreads into
1587  	 * poisoned memory.
1588  	 *
1589  	 * The expected size does not include the terminator '\0'
1590  	 * so it is (KASAN_GRANULE_SIZE - 2) ==
1591  	 * KASAN_GRANULE_SIZE - ("initial removed character" + "\0").
1592  	 */
1593  	KUNIT_EXPECT_EQ(test, KASAN_GRANULE_SIZE - 2,
1594  			strscpy(ptr, src + 1, KASAN_GRANULE_SIZE));
1595  
1596  	/* strscpy should fail if the first byte is unreadable. */
1597  	KUNIT_EXPECT_KASAN_FAIL(test, strscpy(ptr, src + KASAN_GRANULE_SIZE,
1598  					      KASAN_GRANULE_SIZE));
1599  
1600  	kfree(src);
1601  	kfree(ptr);
1602  
1603  	/*
1604  	 * Try to cause only 1 invalid access (less spam in dmesg).
1605  	 * For that we need ptr to point to zeroed byte.
1606  	 * Skip metadata that could be stored in freed object so ptr
1607  	 * will likely point to zeroed byte.
1608  	 */
1609  	ptr += 16;
1610  	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1611  
1612  	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1613  
1614  	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1615  
1616  	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1617  
1618  	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1619  
1620  	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1621  }
1622  
kasan_bitops_modify(struct kunit * test,int nr,void * addr)1623  static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1624  {
1625  	KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1626  	KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1627  	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1628  	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1629  	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1630  	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1631  	KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1632  	KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1633  }
1634  
kasan_bitops_test_and_modify(struct kunit * test,int nr,void * addr)1635  static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1636  {
1637  	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1638  	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1639  	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1640  	KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1641  	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1642  	KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1643  	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1644  	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1645  	if (nr < 7)
1646  		KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1647  				xor_unlock_is_negative_byte(1 << nr, addr));
1648  }
1649  
kasan_bitops_generic(struct kunit * test)1650  static void kasan_bitops_generic(struct kunit *test)
1651  {
1652  	long *bits;
1653  
1654  	/* This test is specifically crafted for the generic mode. */
1655  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1656  
1657  	/*
1658  	 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1659  	 * this way we do not actually corrupt other memory.
1660  	 */
1661  	bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1662  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1663  
1664  	/*
1665  	 * Below calls try to access bit within allocated memory; however, the
1666  	 * below accesses are still out-of-bounds, since bitops are defined to
1667  	 * operate on the whole long the bit is in.
1668  	 */
1669  	kasan_bitops_modify(test, BITS_PER_LONG, bits);
1670  
1671  	/*
1672  	 * Below calls try to access bit beyond allocated memory.
1673  	 */
1674  	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1675  
1676  	kfree(bits);
1677  }
1678  
kasan_bitops_tags(struct kunit * test)1679  static void kasan_bitops_tags(struct kunit *test)
1680  {
1681  	long *bits;
1682  
1683  	/* This test is specifically crafted for tag-based modes. */
1684  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1685  
1686  	/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1687  	bits = kzalloc(48, GFP_KERNEL);
1688  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1689  
1690  	/* Do the accesses past the 48 allocated bytes, but within the redone. */
1691  	kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1692  	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1693  
1694  	kfree(bits);
1695  }
1696  
vmalloc_helpers_tags(struct kunit * test)1697  static void vmalloc_helpers_tags(struct kunit *test)
1698  {
1699  	void *ptr;
1700  
1701  	/* This test is intended for tag-based modes. */
1702  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1703  
1704  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1705  
1706  	if (!kasan_vmalloc_enabled())
1707  		kunit_skip(test, "Test requires kasan.vmalloc=on");
1708  
1709  	ptr = vmalloc(PAGE_SIZE);
1710  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1711  
1712  	/* Check that the returned pointer is tagged. */
1713  	KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1714  	KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1715  
1716  	/* Make sure exported vmalloc helpers handle tagged pointers. */
1717  	KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1718  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1719  
1720  #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1721  	{
1722  		int rv;
1723  
1724  		/* Make sure vmalloc'ed memory permissions can be changed. */
1725  		rv = set_memory_ro((unsigned long)ptr, 1);
1726  		KUNIT_ASSERT_GE(test, rv, 0);
1727  		rv = set_memory_rw((unsigned long)ptr, 1);
1728  		KUNIT_ASSERT_GE(test, rv, 0);
1729  	}
1730  #endif
1731  
1732  	vfree(ptr);
1733  }
1734  
vmalloc_oob(struct kunit * test)1735  static void vmalloc_oob(struct kunit *test)
1736  {
1737  	char *v_ptr, *p_ptr;
1738  	struct page *page;
1739  	size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1740  
1741  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1742  
1743  	if (!kasan_vmalloc_enabled())
1744  		kunit_skip(test, "Test requires kasan.vmalloc=on");
1745  
1746  	v_ptr = vmalloc(size);
1747  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1748  
1749  	OPTIMIZER_HIDE_VAR(v_ptr);
1750  
1751  	/*
1752  	 * We have to be careful not to hit the guard page in vmalloc tests.
1753  	 * The MMU will catch that and crash us.
1754  	 */
1755  
1756  	/* Make sure in-bounds accesses are valid. */
1757  	v_ptr[0] = 0;
1758  	v_ptr[size - 1] = 0;
1759  
1760  	/*
1761  	 * An unaligned access past the requested vmalloc size.
1762  	 * Only generic KASAN can precisely detect these.
1763  	 */
1764  	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1765  		KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1766  
1767  	/* An aligned access into the first out-of-bounds granule. */
1768  	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1769  
1770  	/* Check that in-bounds accesses to the physical page are valid. */
1771  	page = vmalloc_to_page(v_ptr);
1772  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1773  	p_ptr = page_address(page);
1774  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1775  	p_ptr[0] = 0;
1776  
1777  	vfree(v_ptr);
1778  
1779  	/*
1780  	 * We can't check for use-after-unmap bugs in this nor in the following
1781  	 * vmalloc tests, as the page might be fully unmapped and accessing it
1782  	 * will crash the kernel.
1783  	 */
1784  }
1785  
vmap_tags(struct kunit * test)1786  static void vmap_tags(struct kunit *test)
1787  {
1788  	char *p_ptr, *v_ptr;
1789  	struct page *p_page, *v_page;
1790  
1791  	/*
1792  	 * This test is specifically crafted for the software tag-based mode,
1793  	 * the only tag-based mode that poisons vmap mappings.
1794  	 */
1795  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1796  
1797  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1798  
1799  	if (!kasan_vmalloc_enabled())
1800  		kunit_skip(test, "Test requires kasan.vmalloc=on");
1801  
1802  	p_page = alloc_pages(GFP_KERNEL, 1);
1803  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1804  	p_ptr = page_address(p_page);
1805  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1806  
1807  	v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1808  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1809  
1810  	/*
1811  	 * We can't check for out-of-bounds bugs in this nor in the following
1812  	 * vmalloc tests, as allocations have page granularity and accessing
1813  	 * the guard page will crash the kernel.
1814  	 */
1815  
1816  	KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1817  	KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1818  
1819  	/* Make sure that in-bounds accesses through both pointers work. */
1820  	*p_ptr = 0;
1821  	*v_ptr = 0;
1822  
1823  	/* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1824  	v_page = vmalloc_to_page(v_ptr);
1825  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1826  	KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1827  
1828  	vunmap(v_ptr);
1829  	free_pages((unsigned long)p_ptr, 1);
1830  }
1831  
vm_map_ram_tags(struct kunit * test)1832  static void vm_map_ram_tags(struct kunit *test)
1833  {
1834  	char *p_ptr, *v_ptr;
1835  	struct page *page;
1836  
1837  	/*
1838  	 * This test is specifically crafted for the software tag-based mode,
1839  	 * the only tag-based mode that poisons vm_map_ram mappings.
1840  	 */
1841  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1842  
1843  	page = alloc_pages(GFP_KERNEL, 1);
1844  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1845  	p_ptr = page_address(page);
1846  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1847  
1848  	v_ptr = vm_map_ram(&page, 1, -1);
1849  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1850  
1851  	KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1852  	KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1853  
1854  	/* Make sure that in-bounds accesses through both pointers work. */
1855  	*p_ptr = 0;
1856  	*v_ptr = 0;
1857  
1858  	vm_unmap_ram(v_ptr, 1);
1859  	free_pages((unsigned long)p_ptr, 1);
1860  }
1861  
1862  /*
1863   * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1864   * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1865   * modes.
1866   */
match_all_not_assigned(struct kunit * test)1867  static void match_all_not_assigned(struct kunit *test)
1868  {
1869  	char *ptr;
1870  	struct page *pages;
1871  	int i, size, order;
1872  
1873  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1874  
1875  	for (i = 0; i < 256; i++) {
1876  		size = get_random_u32_inclusive(1, 1024);
1877  		ptr = kmalloc(size, GFP_KERNEL);
1878  		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1879  		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1880  		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1881  		kfree(ptr);
1882  	}
1883  
1884  	for (i = 0; i < 256; i++) {
1885  		order = get_random_u32_inclusive(1, 4);
1886  		pages = alloc_pages(GFP_KERNEL, order);
1887  		ptr = page_address(pages);
1888  		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1889  		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1890  		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1891  		free_pages((unsigned long)ptr, order);
1892  	}
1893  
1894  	if (!kasan_vmalloc_enabled())
1895  		return;
1896  
1897  	for (i = 0; i < 256; i++) {
1898  		size = get_random_u32_inclusive(1, 1024);
1899  		ptr = vmalloc(size);
1900  		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1901  		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1902  		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1903  		vfree(ptr);
1904  	}
1905  }
1906  
1907  /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
match_all_ptr_tag(struct kunit * test)1908  static void match_all_ptr_tag(struct kunit *test)
1909  {
1910  	char *ptr;
1911  	u8 tag;
1912  
1913  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1914  
1915  	ptr = kmalloc(128, GFP_KERNEL);
1916  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1917  
1918  	/* Backup the assigned tag. */
1919  	tag = get_tag(ptr);
1920  	KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1921  
1922  	/* Reset the tag to 0xff.*/
1923  	ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1924  
1925  	/* This access shouldn't trigger a KASAN report. */
1926  	*ptr = 0;
1927  
1928  	/* Recover the pointer tag and free. */
1929  	ptr = set_tag(ptr, tag);
1930  	kfree(ptr);
1931  }
1932  
1933  /* Check that there are no match-all memory tags for tag-based modes. */
match_all_mem_tag(struct kunit * test)1934  static void match_all_mem_tag(struct kunit *test)
1935  {
1936  	char *ptr;
1937  	int tag;
1938  
1939  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1940  
1941  	ptr = kmalloc(128, GFP_KERNEL);
1942  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1943  	KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1944  
1945  	/* For each possible tag value not matching the pointer tag. */
1946  	for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1947  		/*
1948  		 * For Software Tag-Based KASAN, skip the majority of tag
1949  		 * values to avoid the test printing too many reports.
1950  		 */
1951  		if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
1952  		    tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
1953  			continue;
1954  
1955  		if (tag == get_tag(ptr))
1956  			continue;
1957  
1958  		/* Mark the first memory granule with the chosen memory tag. */
1959  		kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1960  
1961  		/* This access must cause a KASAN report. */
1962  		KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1963  	}
1964  
1965  	/* Recover the memory tag and free. */
1966  	kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1967  	kfree(ptr);
1968  }
1969  
1970  /*
1971   * Check that Rust performing a use-after-free using `unsafe` is detected.
1972   * This is a smoke test to make sure that Rust is being sanitized properly.
1973   */
rust_uaf(struct kunit * test)1974  static void rust_uaf(struct kunit *test)
1975  {
1976  	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
1977  	KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
1978  }
1979  
1980  /*
1981   * copy_to_kernel_nofault() is an internal helper available when
1982   * kasan_test is built-in, so it must not be visible to loadable modules.
1983   */
1984  #ifndef MODULE
copy_to_kernel_nofault_oob(struct kunit * test)1985  static void copy_to_kernel_nofault_oob(struct kunit *test)
1986  {
1987  	char *ptr;
1988  	char buf[128];
1989  	size_t size = sizeof(buf);
1990  
1991  	/*
1992  	 * This test currently fails with the HW_TAGS mode. The reason is
1993  	 * unknown and needs to be investigated.
1994  	 */
1995  	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
1996  
1997  	ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL);
1998  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1999  	OPTIMIZER_HIDE_VAR(ptr);
2000  
2001  	/*
2002  	 * We test copy_to_kernel_nofault() to detect corrupted memory that is
2003  	 * being written into the kernel. In contrast,
2004  	 * copy_from_kernel_nofault() is primarily used in kernel helper
2005  	 * functions where the source address might be random or uninitialized.
2006  	 * Applying KASAN instrumentation to copy_from_kernel_nofault() could
2007  	 * lead to false positives.  By focusing KASAN checks only on
2008  	 * copy_to_kernel_nofault(), we ensure that only valid memory is
2009  	 * written to the kernel, minimizing the risk of kernel corruption
2010  	 * while avoiding false positives in the reverse case.
2011  	 */
2012  	KUNIT_EXPECT_KASAN_FAIL(test,
2013  		copy_to_kernel_nofault(&buf[0], ptr, size));
2014  	KUNIT_EXPECT_KASAN_FAIL(test,
2015  		copy_to_kernel_nofault(ptr, &buf[0], size));
2016  
2017  	kfree(ptr);
2018  }
2019  #endif /* !MODULE */
2020  
copy_user_test_oob(struct kunit * test)2021  static void copy_user_test_oob(struct kunit *test)
2022  {
2023  	char *kmem;
2024  	char __user *usermem;
2025  	unsigned long useraddr;
2026  	size_t size = 128 - KASAN_GRANULE_SIZE;
2027  	int __maybe_unused unused;
2028  
2029  	kmem = kunit_kmalloc(test, size, GFP_KERNEL);
2030  	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, kmem);
2031  
2032  	useraddr = kunit_vm_mmap(test, NULL, 0, PAGE_SIZE,
2033  					PROT_READ | PROT_WRITE | PROT_EXEC,
2034  					MAP_ANONYMOUS | MAP_PRIVATE, 0);
2035  	KUNIT_ASSERT_NE_MSG(test, useraddr, 0,
2036  		"Could not create userspace mm");
2037  	KUNIT_ASSERT_LT_MSG(test, useraddr, (unsigned long)TASK_SIZE,
2038  		"Failed to allocate user memory");
2039  
2040  	OPTIMIZER_HIDE_VAR(size);
2041  	usermem = (char __user *)useraddr;
2042  
2043  	KUNIT_EXPECT_KASAN_FAIL(test,
2044  		unused = copy_from_user(kmem, usermem, size + 1));
2045  	KUNIT_EXPECT_KASAN_FAIL(test,
2046  		unused = copy_to_user(usermem, kmem, size + 1));
2047  	KUNIT_EXPECT_KASAN_FAIL(test,
2048  		unused = __copy_from_user(kmem, usermem, size + 1));
2049  	KUNIT_EXPECT_KASAN_FAIL(test,
2050  		unused = __copy_to_user(usermem, kmem, size + 1));
2051  	KUNIT_EXPECT_KASAN_FAIL(test,
2052  		unused = __copy_from_user_inatomic(kmem, usermem, size + 1));
2053  	KUNIT_EXPECT_KASAN_FAIL(test,
2054  		unused = __copy_to_user_inatomic(usermem, kmem, size + 1));
2055  
2056  	/*
2057  	* Prepare a long string in usermem to avoid the strncpy_from_user test
2058  	* bailing out on '\0' before it reaches out-of-bounds.
2059  	*/
2060  	memset(kmem, 'a', size);
2061  	KUNIT_EXPECT_EQ(test, copy_to_user(usermem, kmem, size), 0);
2062  
2063  	KUNIT_EXPECT_KASAN_FAIL(test,
2064  		unused = strncpy_from_user(kmem, usermem, size + 1));
2065  }
2066  
2067  static struct kunit_case kasan_kunit_test_cases[] = {
2068  	KUNIT_CASE(kmalloc_oob_right),
2069  	KUNIT_CASE(kmalloc_oob_left),
2070  	KUNIT_CASE(kmalloc_node_oob_right),
2071  	KUNIT_CASE(kmalloc_track_caller_oob_right),
2072  	KUNIT_CASE(kmalloc_big_oob_right),
2073  	KUNIT_CASE(kmalloc_large_oob_right),
2074  	KUNIT_CASE(kmalloc_large_uaf),
2075  	KUNIT_CASE(kmalloc_large_invalid_free),
2076  	KUNIT_CASE(page_alloc_oob_right),
2077  	KUNIT_CASE(page_alloc_uaf),
2078  	KUNIT_CASE(krealloc_more_oob),
2079  	KUNIT_CASE(krealloc_less_oob),
2080  	KUNIT_CASE(krealloc_large_more_oob),
2081  	KUNIT_CASE(krealloc_large_less_oob),
2082  	KUNIT_CASE(krealloc_uaf),
2083  	KUNIT_CASE(kmalloc_oob_16),
2084  	KUNIT_CASE(kmalloc_uaf_16),
2085  	KUNIT_CASE(kmalloc_oob_in_memset),
2086  	KUNIT_CASE(kmalloc_oob_memset_2),
2087  	KUNIT_CASE(kmalloc_oob_memset_4),
2088  	KUNIT_CASE(kmalloc_oob_memset_8),
2089  	KUNIT_CASE(kmalloc_oob_memset_16),
2090  	KUNIT_CASE(kmalloc_memmove_negative_size),
2091  	KUNIT_CASE(kmalloc_memmove_invalid_size),
2092  	KUNIT_CASE(kmalloc_uaf),
2093  	KUNIT_CASE(kmalloc_uaf_memset),
2094  	KUNIT_CASE(kmalloc_uaf2),
2095  	KUNIT_CASE(kmalloc_uaf3),
2096  	KUNIT_CASE(kmalloc_double_kzfree),
2097  	KUNIT_CASE(ksize_unpoisons_memory),
2098  	KUNIT_CASE(ksize_uaf),
2099  	KUNIT_CASE(rcu_uaf),
2100  	KUNIT_CASE(workqueue_uaf),
2101  	KUNIT_CASE(kfree_via_page),
2102  	KUNIT_CASE(kfree_via_phys),
2103  	KUNIT_CASE(kmem_cache_oob),
2104  	KUNIT_CASE(kmem_cache_double_free),
2105  	KUNIT_CASE(kmem_cache_invalid_free),
2106  	KUNIT_CASE(kmem_cache_rcu_uaf),
2107  	KUNIT_CASE(kmem_cache_double_destroy),
2108  	KUNIT_CASE(kmem_cache_accounted),
2109  	KUNIT_CASE(kmem_cache_bulk),
2110  	KUNIT_CASE(mempool_kmalloc_oob_right),
2111  	KUNIT_CASE(mempool_kmalloc_large_oob_right),
2112  	KUNIT_CASE(mempool_slab_oob_right),
2113  	KUNIT_CASE(mempool_kmalloc_uaf),
2114  	KUNIT_CASE(mempool_kmalloc_large_uaf),
2115  	KUNIT_CASE(mempool_slab_uaf),
2116  	KUNIT_CASE(mempool_page_alloc_uaf),
2117  	KUNIT_CASE(mempool_kmalloc_double_free),
2118  	KUNIT_CASE(mempool_kmalloc_large_double_free),
2119  	KUNIT_CASE(mempool_page_alloc_double_free),
2120  	KUNIT_CASE(mempool_kmalloc_invalid_free),
2121  	KUNIT_CASE(mempool_kmalloc_large_invalid_free),
2122  	KUNIT_CASE(kasan_global_oob_right),
2123  	KUNIT_CASE(kasan_global_oob_left),
2124  	KUNIT_CASE(kasan_stack_oob),
2125  	KUNIT_CASE(kasan_alloca_oob_left),
2126  	KUNIT_CASE(kasan_alloca_oob_right),
2127  	KUNIT_CASE(kasan_memchr),
2128  	KUNIT_CASE(kasan_memcmp),
2129  	KUNIT_CASE(kasan_strings),
2130  	KUNIT_CASE(kasan_bitops_generic),
2131  	KUNIT_CASE(kasan_bitops_tags),
2132  	KUNIT_CASE_SLOW(kasan_atomics),
2133  	KUNIT_CASE(vmalloc_helpers_tags),
2134  	KUNIT_CASE(vmalloc_oob),
2135  	KUNIT_CASE(vmap_tags),
2136  	KUNIT_CASE(vm_map_ram_tags),
2137  	KUNIT_CASE(match_all_not_assigned),
2138  	KUNIT_CASE(match_all_ptr_tag),
2139  	KUNIT_CASE(match_all_mem_tag),
2140  #ifndef MODULE
2141  	KUNIT_CASE(copy_to_kernel_nofault_oob),
2142  #endif
2143  	KUNIT_CASE(rust_uaf),
2144  	KUNIT_CASE(copy_user_test_oob),
2145  	{}
2146  };
2147  
2148  static struct kunit_suite kasan_kunit_test_suite = {
2149  	.name = "kasan",
2150  	.test_cases = kasan_kunit_test_cases,
2151  	.exit = kasan_test_exit,
2152  	.suite_init = kasan_suite_init,
2153  	.suite_exit = kasan_suite_exit,
2154  };
2155  
2156  kunit_test_suite(kasan_kunit_test_suite);
2157  
2158  MODULE_DESCRIPTION("KUnit tests for checking KASAN bug-detection capabilities");
2159  MODULE_LICENSE("GPL");
2160