1 // SPDX-License-Identifier: GPL-2.0
2 #include <kunit/test.h>
3 #include <kunit/test-bug.h>
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/rcupdate.h>
9 #include <linux/delay.h>
10 #include "../mm/slab.h"
11 
12 static struct kunit_resource resource;
13 static int slab_errors;
14 
15 /*
16  * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
17  * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
18  * object from kfence pool, where the operation could be caught by both
19  * our test and kfence sanity check.
20  */
21 static struct kmem_cache *test_kmem_cache_create(const char *name,
22 				unsigned int size, slab_flags_t flags)
23 {
24 	struct kmem_cache *s = kmem_cache_create(name, size, 0,
25 					(flags | SLAB_NO_USER_FLAGS), NULL);
26 	s->flags |= SLAB_SKIP_KFENCE;
27 	return s;
28 }
29 
30 static void test_clobber_zone(struct kunit *test)
31 {
32 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
33 							SLAB_RED_ZONE);
34 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
35 
36 	kasan_disable_current();
37 	p[64] = 0x12;
38 
39 	validate_slab_cache(s);
40 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
41 
42 	kasan_enable_current();
43 	kmem_cache_free(s, p);
44 	kmem_cache_destroy(s);
45 }
46 
47 #ifndef CONFIG_KASAN
48 static void test_next_pointer(struct kunit *test)
49 {
50 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
51 							64, SLAB_POISON);
52 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
53 	unsigned long tmp;
54 	unsigned long *ptr_addr;
55 
56 	kmem_cache_free(s, p);
57 
58 	ptr_addr = (unsigned long *)(p + s->offset);
59 	tmp = *ptr_addr;
60 	p[s->offset] = ~p[s->offset];
61 
62 	/*
63 	 * Expecting three errors.
64 	 * One for the corrupted freechain and the other one for the wrong
65 	 * count of objects in use. The third error is fixing broken cache.
66 	 */
67 	validate_slab_cache(s);
68 	KUNIT_EXPECT_EQ(test, 3, slab_errors);
69 
70 	/*
71 	 * Try to repair corrupted freepointer.
72 	 * Still expecting two errors. The first for the wrong count
73 	 * of objects in use.
74 	 * The second error is for fixing broken cache.
75 	 */
76 	*ptr_addr = tmp;
77 	slab_errors = 0;
78 
79 	validate_slab_cache(s);
80 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
81 
82 	/*
83 	 * Previous validation repaired the count of objects in use.
84 	 * Now expecting no error.
85 	 */
86 	slab_errors = 0;
87 	validate_slab_cache(s);
88 	KUNIT_EXPECT_EQ(test, 0, slab_errors);
89 
90 	kmem_cache_destroy(s);
91 }
92 
93 static void test_first_word(struct kunit *test)
94 {
95 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
96 							64, SLAB_POISON);
97 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
98 
99 	kmem_cache_free(s, p);
100 	*p = 0x78;
101 
102 	validate_slab_cache(s);
103 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
104 
105 	kmem_cache_destroy(s);
106 }
107 
108 static void test_clobber_50th_byte(struct kunit *test)
109 {
110 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
111 							64, SLAB_POISON);
112 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
113 
114 	kmem_cache_free(s, p);
115 	p[50] = 0x9a;
116 
117 	validate_slab_cache(s);
118 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
119 
120 	kmem_cache_destroy(s);
121 }
122 #endif
123 
124 static void test_clobber_redzone_free(struct kunit *test)
125 {
126 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
127 							SLAB_RED_ZONE);
128 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
129 
130 	kasan_disable_current();
131 	kmem_cache_free(s, p);
132 	p[64] = 0xab;
133 
134 	validate_slab_cache(s);
135 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
136 
137 	kasan_enable_current();
138 	kmem_cache_destroy(s);
139 }
140 
141 static void test_kmalloc_redzone_access(struct kunit *test)
142 {
143 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
144 				SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
145 	u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18));
146 
147 	kasan_disable_current();
148 
149 	/* Suppress the -Warray-bounds warning */
150 	OPTIMIZER_HIDE_VAR(p);
151 	p[18] = 0xab;
152 	p[19] = 0xab;
153 
154 	validate_slab_cache(s);
155 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
156 
157 	kasan_enable_current();
158 	kmem_cache_free(s, p);
159 	kmem_cache_destroy(s);
160 }
161 
162 struct test_kfree_rcu_struct {
163 	struct rcu_head rcu;
164 };
165 
166 static void test_kfree_rcu(struct kunit *test)
167 {
168 	struct kmem_cache *s;
169 	struct test_kfree_rcu_struct *p;
170 
171 	if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
172 		kunit_skip(test, "can't do kfree_rcu() when test is built-in");
173 
174 	s = test_kmem_cache_create("TestSlub_kfree_rcu",
175 				   sizeof(struct test_kfree_rcu_struct),
176 				   SLAB_NO_MERGE);
177 	p = kmem_cache_alloc(s, GFP_KERNEL);
178 
179 	kfree_rcu(p, rcu);
180 	kmem_cache_destroy(s);
181 
182 	KUNIT_EXPECT_EQ(test, 0, slab_errors);
183 }
184 
185 struct cache_destroy_work {
186 	struct work_struct work;
187 	struct kmem_cache *s;
188 };
189 
190 static void cache_destroy_workfn(struct work_struct *w)
191 {
192 	struct cache_destroy_work *cdw;
193 
194 	cdw = container_of(w, struct cache_destroy_work, work);
195 	kmem_cache_destroy(cdw->s);
196 }
197 
198 #define KMEM_CACHE_DESTROY_NR 10
199 
200 static void test_kfree_rcu_wq_destroy(struct kunit *test)
201 {
202 	struct test_kfree_rcu_struct *p;
203 	struct cache_destroy_work cdw;
204 	struct workqueue_struct *wq;
205 	struct kmem_cache *s;
206 	unsigned int delay;
207 	int i;
208 
209 	if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
210 		kunit_skip(test, "can't do kfree_rcu() when test is built-in");
211 
212 	INIT_WORK_ONSTACK(&cdw.work, cache_destroy_workfn);
213 	wq = alloc_workqueue("test_kfree_rcu_destroy_wq",
214 			WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
215 
216 	if (!wq)
217 		kunit_skip(test, "failed to alloc wq");
218 
219 	for (i = 0; i < KMEM_CACHE_DESTROY_NR; i++) {
220 		s = test_kmem_cache_create("TestSlub_kfree_rcu_wq_destroy",
221 				sizeof(struct test_kfree_rcu_struct),
222 				SLAB_NO_MERGE);
223 
224 		if (!s)
225 			kunit_skip(test, "failed to create cache");
226 
227 		delay = get_random_u8();
228 		p = kmem_cache_alloc(s, GFP_KERNEL);
229 		kfree_rcu(p, rcu);
230 
231 		cdw.s = s;
232 
233 		msleep(delay);
234 		queue_work(wq, &cdw.work);
235 		flush_work(&cdw.work);
236 	}
237 
238 	destroy_workqueue(wq);
239 	KUNIT_EXPECT_EQ(test, 0, slab_errors);
240 }
241 
242 static void test_leak_destroy(struct kunit *test)
243 {
244 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
245 							64, SLAB_NO_MERGE);
246 	kmem_cache_alloc(s, GFP_KERNEL);
247 
248 	kmem_cache_destroy(s);
249 
250 	KUNIT_EXPECT_EQ(test, 2, slab_errors);
251 }
252 
253 static void test_krealloc_redzone_zeroing(struct kunit *test)
254 {
255 	u8 *p;
256 	int i;
257 	struct kmem_cache *s = test_kmem_cache_create("TestSlub_krealloc", 64,
258 				SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
259 
260 	p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 48));
261 	memset(p, 0xff, 48);
262 
263 	kasan_disable_current();
264 	OPTIMIZER_HIDE_VAR(p);
265 
266 	/* Test shrink */
267 	p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
268 	for (i = 40; i < 64; i++)
269 		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
270 
271 	/* Test grow within the same 64B kmalloc object */
272 	p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
273 	for (i = 40; i < 56; i++)
274 		KUNIT_EXPECT_EQ(test, p[i], 0);
275 	for (i = 56; i < 64; i++)
276 		KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
277 
278 	validate_slab_cache(s);
279 	KUNIT_EXPECT_EQ(test, 0, slab_errors);
280 
281 	memset(p, 0xff, 56);
282 	/* Test grow with allocating a bigger 128B object */
283 	p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
284 	for (i = 0; i < 56; i++)
285 		KUNIT_EXPECT_EQ(test, p[i], 0xff);
286 	for (i = 56; i < 112; i++)
287 		KUNIT_EXPECT_EQ(test, p[i], 0);
288 
289 	kfree(p);
290 	kasan_enable_current();
291 	kmem_cache_destroy(s);
292 }
293 
294 static int test_init(struct kunit *test)
295 {
296 	slab_errors = 0;
297 
298 	kunit_add_named_resource(test, NULL, NULL, &resource,
299 					"slab_errors", &slab_errors);
300 	return 0;
301 }
302 
303 static struct kunit_case test_cases[] = {
304 	KUNIT_CASE(test_clobber_zone),
305 
306 #ifndef CONFIG_KASAN
307 	KUNIT_CASE(test_next_pointer),
308 	KUNIT_CASE(test_first_word),
309 	KUNIT_CASE(test_clobber_50th_byte),
310 #endif
311 
312 	KUNIT_CASE(test_clobber_redzone_free),
313 	KUNIT_CASE(test_kmalloc_redzone_access),
314 	KUNIT_CASE(test_kfree_rcu),
315 	KUNIT_CASE(test_kfree_rcu_wq_destroy),
316 	KUNIT_CASE(test_leak_destroy),
317 	KUNIT_CASE(test_krealloc_redzone_zeroing),
318 	{}
319 };
320 
321 static struct kunit_suite test_suite = {
322 	.name = "slub_test",
323 	.init = test_init,
324 	.test_cases = test_cases,
325 };
326 kunit_test_suite(test_suite);
327 
328 MODULE_DESCRIPTION("Kunit tests for slub allocator");
329 MODULE_LICENSE("GPL");
330