1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy()
4  * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
5  *
6  * For corner cases with UBSAN, try testing with:
7  *
8  * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
9  *	--kconfig_add CONFIG_FORTIFY_SOURCE=y \
10  *	--kconfig_add CONFIG_UBSAN=y \
11  *	--kconfig_add CONFIG_UBSAN_TRAP=y \
12  *	--kconfig_add CONFIG_UBSAN_BOUNDS=y \
13  *	--kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
14  *	--make_options LLVM=1 fortify
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 /* We don't need to fill dmesg with the fortify WARNs during testing. */
19 #ifdef DEBUG
20 # define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
21 # define FORTIFY_WARN_KUNIT(x...)   WARN_ONCE(x)
22 #else
23 # define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
24 # define FORTIFY_WARN_KUNIT(x...)   do { } while (0)
25 #endif
26 
27 /* Redefine fortify_panic() to track failures. */
28 void fortify_add_kunit_error(int write);
29 #define fortify_panic(func, write, avail, size, retfail) do {		\
30 	FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size);	\
31 	fortify_add_kunit_error(write);					\
32 	return (retfail);						\
33 } while (0)
34 
35 /* Redefine fortify_warn_once() to track memcpy() failures. */
36 #define fortify_warn_once(chk_func, x...) do {				\
37 	bool __result = chk_func;					\
38 	FORTIFY_WARN_KUNIT(__result, x);				\
39 	if (__result)							\
40 		fortify_add_kunit_error(1);				\
41 } while (0)
42 
43 #include <kunit/device.h>
44 #include <kunit/test.h>
45 #include <kunit/test-bug.h>
46 #include <linux/device.h>
47 #include <linux/slab.h>
48 #include <linux/string.h>
49 #include <linux/vmalloc.h>
50 
51 /* Handle being built without CONFIG_FORTIFY_SOURCE */
52 #ifndef __compiletime_strlen
53 # define __compiletime_strlen __builtin_strlen
54 #endif
55 
56 static struct kunit_resource read_resource;
57 static struct kunit_resource write_resource;
58 static int fortify_read_overflows;
59 static int fortify_write_overflows;
60 
61 static const char array_of_10[] = "this is 10";
62 static const char *ptr_of_11 = "this is 11!";
63 static const char * const unchanging_12 = "this is 12!!";
64 static char array_unknown[] = "compiler thinks I might change";
65 
fortify_add_kunit_error(int write)66 void fortify_add_kunit_error(int write)
67 {
68 	struct kunit_resource *resource;
69 	struct kunit *current_test;
70 
71 	current_test = kunit_get_current_test();
72 	if (!current_test)
73 		return;
74 
75 	resource = kunit_find_named_resource(current_test,
76 			write ? "fortify_write_overflows"
77 			      : "fortify_read_overflows");
78 	if (!resource)
79 		return;
80 
81 	(*(int *)resource->data)++;
82 	kunit_put_resource(resource);
83 }
84 
fortify_test_known_sizes(struct kunit * test)85 static void fortify_test_known_sizes(struct kunit *test)
86 {
87 	char stack[80] = "Test!";
88 
89 	KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(stack)));
90 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(stack), 5);
91 
92 	KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen("88888888")));
93 	KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
94 
95 	KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(array_of_10)));
96 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
97 
98 	KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(ptr_of_11)));
99 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
100 
101 	KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(unchanging_12)));
102 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(unchanging_12), 12);
103 
104 	KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(array_unknown)));
105 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
106 
107 	/* Externally defined and dynamically sized string pointer: */
108 	KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(test->name)));
109 	KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
110 }
111 
112 /* This is volatile so the optimizer can't perform DCE below. */
113 static volatile int pick;
114 
115 /* Not inline to keep optimizer from figuring out which string we want. */
want_minus_one(int pick)116 static noinline size_t want_minus_one(int pick)
117 {
118 	const char *str;
119 
120 	switch (pick) {
121 	case 1:
122 		str = "4444";
123 		break;
124 	case 2:
125 		str = "333";
126 		break;
127 	default:
128 		str = "1";
129 		break;
130 	}
131 	return __compiletime_strlen(str);
132 }
133 
fortify_test_control_flow_split(struct kunit * test)134 static void fortify_test_control_flow_split(struct kunit *test)
135 {
136 	KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
137 }
138 
139 #define KUNIT_EXPECT_BOS(test, p, expected, name)			\
140 	KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1),		\
141 		expected,						\
142 		"__alloc_size() not working with __bos on " name "\n")
143 
144 #if !__has_builtin(__builtin_dynamic_object_size)
145 #define KUNIT_EXPECT_BDOS(test, p, expected, name)			\
146 	/* Silence "unused variable 'expected'" warning. */		\
147 	KUNIT_EXPECT_EQ(test, expected, expected)
148 #else
149 #define KUNIT_EXPECT_BDOS(test, p, expected, name)			\
150 	KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1),	\
151 		expected,						\
152 		"__alloc_size() not working with __bdos on " name "\n")
153 #endif
154 
155 /* If the execpted size is a constant value, __bos can see it. */
156 #define check_const(_expected, alloc, free)		do {		\
157 	size_t expected = (_expected);					\
158 	void *p = alloc;						\
159 	KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n");	\
160 	KUNIT_EXPECT_BOS(test, p, expected, #alloc);			\
161 	KUNIT_EXPECT_BDOS(test, p, expected, #alloc);			\
162 	free;								\
163 } while (0)
164 
165 /* If the execpted size is NOT a constant value, __bos CANNOT see it. */
166 #define check_dynamic(_expected, alloc, free)		do {		\
167 	size_t expected = (_expected);					\
168 	void *p = alloc;						\
169 	KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n");	\
170 	KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc);			\
171 	KUNIT_EXPECT_BDOS(test, p, expected, #alloc);			\
172 	free;								\
173 } while (0)
174 
175 /* Assortment of constant-value kinda-edge cases. */
176 #define CONST_TEST_BODY(TEST_alloc)	do {				\
177 	/* Special-case vmalloc()-family to skip 0-sized allocs. */	\
178 	if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0)			\
179 		TEST_alloc(check_const, 0, 0);				\
180 	TEST_alloc(check_const, 1, 1);					\
181 	TEST_alloc(check_const, 128, 128);				\
182 	TEST_alloc(check_const, 1023, 1023);				\
183 	TEST_alloc(check_const, 1025, 1025);				\
184 	TEST_alloc(check_const, 4096, 4096);				\
185 	TEST_alloc(check_const, 4097, 4097);				\
186 } while (0)
187 
188 static volatile size_t zero_size;
189 static volatile size_t unknown_size = 50;
190 
191 #if !__has_builtin(__builtin_dynamic_object_size)
192 #define DYNAMIC_TEST_BODY(TEST_alloc)					\
193 	kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
194 #else
195 #define DYNAMIC_TEST_BODY(TEST_alloc)	do {				\
196 	size_t size = unknown_size;					\
197 									\
198 	/*								\
199 	 * Expected size is "size" in each test, before it is then	\
200 	 * internally incremented in each test.	Requires we disable	\
201 	 * -Wunsequenced.						\
202 	 */								\
203 	TEST_alloc(check_dynamic, size, size++);			\
204 	/* Make sure incrementing actually happened. */			\
205 	KUNIT_EXPECT_NE(test, size, unknown_size);			\
206 } while (0)
207 #endif
208 
209 #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator)				\
210 static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \
211 {									\
212 	CONST_TEST_BODY(TEST_##allocator);				\
213 }									\
214 static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
215 {									\
216 	DYNAMIC_TEST_BODY(TEST_##allocator);				\
217 }
218 
219 #define TEST_kmalloc(checker, expected_size, alloc_size)	do {	\
220 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
221 	void *orig;							\
222 	size_t len;							\
223 									\
224 	checker(expected_size, kmalloc(alloc_size, gfp),		\
225 		kfree(p));						\
226 	checker(expected_size,						\
227 		kmalloc_node(alloc_size, gfp, NUMA_NO_NODE),		\
228 		kfree(p));						\
229 	checker(expected_size, kzalloc(alloc_size, gfp),		\
230 		kfree(p));						\
231 	checker(expected_size,						\
232 		kzalloc_node(alloc_size, gfp, NUMA_NO_NODE),		\
233 		kfree(p));						\
234 	checker(expected_size, kcalloc(1, alloc_size, gfp),		\
235 		kfree(p));						\
236 	checker(expected_size, kcalloc(alloc_size, 1, gfp),		\
237 		kfree(p));						\
238 	checker(expected_size,						\
239 		kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE),		\
240 		kfree(p));						\
241 	checker(expected_size,						\
242 		kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE),		\
243 		kfree(p));						\
244 	checker(expected_size, kmalloc_array(1, alloc_size, gfp),	\
245 		kfree(p));						\
246 	checker(expected_size, kmalloc_array(alloc_size, 1, gfp),	\
247 		kfree(p));						\
248 	checker(expected_size,						\
249 		kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE),	\
250 		kfree(p));						\
251 	checker(expected_size,						\
252 		kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE),	\
253 		kfree(p));						\
254 									\
255 	orig = kmalloc(alloc_size, gfp);				\
256 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
257 	checker((expected_size) * 2,					\
258 		krealloc(orig, (alloc_size) * 2, gfp),			\
259 		kfree(p));						\
260 	orig = kmalloc(alloc_size, gfp);				\
261 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
262 	checker((expected_size) * 2,					\
263 		krealloc_array(orig, 1, (alloc_size) * 2, gfp),		\
264 		kfree(p));						\
265 	orig = kmalloc(alloc_size, gfp);				\
266 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
267 	checker((expected_size) * 2,					\
268 		krealloc_array(orig, (alloc_size) * 2, 1, gfp),		\
269 		kfree(p));						\
270 									\
271 	len = 11;							\
272 	/* Using memdup() with fixed size, so force unknown length. */	\
273 	if (!__builtin_constant_p(expected_size))			\
274 		len += zero_size;					\
275 	checker(len, kmemdup("hello there", len, gfp), kfree(p));	\
276 } while (0)
277 DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
278 
279 /* Sizes are in pages, not bytes. */
280 #define TEST_vmalloc(checker, expected_pages, alloc_pages)	do {	\
281 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
282 	checker((expected_pages) * PAGE_SIZE,				\
283 		vmalloc((alloc_pages) * PAGE_SIZE),	   vfree(p));	\
284 	checker((expected_pages) * PAGE_SIZE,				\
285 		vzalloc((alloc_pages) * PAGE_SIZE),	   vfree(p));	\
286 	checker((expected_pages) * PAGE_SIZE,				\
287 		__vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p));	\
288 } while (0)
289 DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
290 
291 /* Sizes are in pages (and open-coded for side-effects), not bytes. */
292 #define TEST_kvmalloc(checker, expected_pages, alloc_pages)	do {	\
293 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
294 	size_t prev_size;						\
295 	void *orig;							\
296 									\
297 	checker((expected_pages) * PAGE_SIZE,				\
298 		kvmalloc((alloc_pages) * PAGE_SIZE, gfp),		\
299 		kvfree(p));						\
300 	checker((expected_pages) * PAGE_SIZE,				\
301 		kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
302 		kvfree(p));						\
303 	checker((expected_pages) * PAGE_SIZE,				\
304 		kvzalloc((alloc_pages) * PAGE_SIZE, gfp),		\
305 		kvfree(p));						\
306 	checker((expected_pages) * PAGE_SIZE,				\
307 		kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
308 		kvfree(p));						\
309 	checker((expected_pages) * PAGE_SIZE,				\
310 		kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp),		\
311 		kvfree(p));						\
312 	checker((expected_pages) * PAGE_SIZE,				\
313 		kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp),		\
314 		kvfree(p));						\
315 	checker((expected_pages) * PAGE_SIZE,				\
316 		kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp),	\
317 		kvfree(p));						\
318 	checker((expected_pages) * PAGE_SIZE,				\
319 		kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp),	\
320 		kvfree(p));						\
321 									\
322 	prev_size = (expected_pages) * PAGE_SIZE;			\
323 	orig = kvmalloc(prev_size, gfp);				\
324 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
325 	checker(((expected_pages) * PAGE_SIZE) * 2,			\
326 		kvrealloc(orig, ((alloc_pages) * PAGE_SIZE) * 2, gfp),	\
327 		kvfree(p));						\
328 } while (0)
329 DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
330 
331 #define TEST_devm_kmalloc(checker, expected_size, alloc_size)	do {	\
332 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
333 	const char dev_name[] = "fortify-test";				\
334 	struct device *dev;						\
335 	void *orig;							\
336 	size_t len;							\
337 									\
338 	/* Create dummy device for devm_kmalloc()-family tests. */	\
339 	dev = kunit_device_register(test, dev_name);			\
340 	KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),			\
341 			       "Cannot register test device\n");	\
342 									\
343 	checker(expected_size, devm_kmalloc(dev, alloc_size, gfp),	\
344 		devm_kfree(dev, p));					\
345 	checker(expected_size, devm_kzalloc(dev, alloc_size, gfp),	\
346 		devm_kfree(dev, p));					\
347 	checker(expected_size,						\
348 		devm_kmalloc_array(dev, 1, alloc_size, gfp),		\
349 		devm_kfree(dev, p));					\
350 	checker(expected_size,						\
351 		devm_kmalloc_array(dev, alloc_size, 1, gfp),		\
352 		devm_kfree(dev, p));					\
353 	checker(expected_size,						\
354 		devm_kcalloc(dev, 1, alloc_size, gfp),			\
355 		devm_kfree(dev, p));					\
356 	checker(expected_size,						\
357 		devm_kcalloc(dev, alloc_size, 1, gfp),			\
358 		devm_kfree(dev, p));					\
359 									\
360 	orig = devm_kmalloc(dev, alloc_size, gfp);			\
361 	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
362 	checker((expected_size) * 2,					\
363 		devm_krealloc(dev, orig, (alloc_size) * 2, gfp),	\
364 		devm_kfree(dev, p));					\
365 									\
366 	len = 4;							\
367 	/* Using memdup() with fixed size, so force unknown length. */	\
368 	if (!__builtin_constant_p(expected_size))			\
369 		len += zero_size;					\
370 	checker(len, devm_kmemdup(dev, "Ohai", len, gfp),		\
371 		devm_kfree(dev, p));					\
372 									\
373 	kunit_device_unregister(test, dev);				\
374 } while (0)
375 DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
376 
377 static const char * const test_strs[] = {
378 	"",
379 	"Hello there",
380 	"A longer string, just for variety",
381 };
382 
383 #define TEST_realloc(checker)	do {					\
384 	gfp_t gfp = GFP_KERNEL;						\
385 	size_t len;							\
386 	int i;								\
387 									\
388 	for (i = 0; i < ARRAY_SIZE(test_strs); i++) {			\
389 		len = strlen(test_strs[i]);				\
390 		KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0);	\
391 		checker(len, kmemdup_array(test_strs[i], 1, len, gfp),	\
392 			kfree(p));					\
393 		checker(len, kmemdup(test_strs[i], len, gfp),		\
394 			kfree(p));					\
395 	}								\
396 } while (0)
fortify_test_realloc_size(struct kunit * test)397 static void fortify_test_realloc_size(struct kunit *test)
398 {
399 	TEST_realloc(check_dynamic);
400 }
401 
402 /*
403  * We can't have an array at the end of a structure or else
404  * builds without -fstrict-flex-arrays=3 will report them as
405  * being an unknown length. Additionally, add bytes before
406  * and after the string to catch over/underflows if tests
407  * fail.
408  */
409 struct fortify_padding {
410 	unsigned long bytes_before;
411 	char buf[32];
412 	unsigned long bytes_after;
413 };
414 
fortify_test_strlen(struct kunit * test)415 static void fortify_test_strlen(struct kunit *test)
416 {
417 	struct fortify_padding pad = { };
418 	int i, end = sizeof(pad.buf) - 1;
419 
420 	/* Fill 31 bytes with valid characters. */
421 	for (i = 0; i < sizeof(pad.buf) - 1; i++)
422 		pad.buf[i] = i + '0';
423 	/* Trailing bytes are still %NUL. */
424 	KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
425 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
426 
427 	/* String is terminated, so strlen() is valid. */
428 	KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
429 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
430 
431 	/* Make string unterminated, and recount. */
432 	pad.buf[end] = 'A';
433 	end = sizeof(pad.buf);
434 	KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
435 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
436 }
437 
fortify_test_strnlen(struct kunit * test)438 static void fortify_test_strnlen(struct kunit *test)
439 {
440 	struct fortify_padding pad = { };
441 	int i, end = sizeof(pad.buf) - 1;
442 
443 	/* Fill 31 bytes with valid characters. */
444 	for (i = 0; i < sizeof(pad.buf) - 1; i++)
445 		pad.buf[i] = i + '0';
446 	/* Trailing bytes are still %NUL. */
447 	KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
448 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
449 
450 	/* String is terminated, so strnlen() is valid. */
451 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
452 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
453 	/* A truncated strnlen() will be safe, too. */
454 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
455 					sizeof(pad.buf) / 2);
456 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
457 
458 	/* Make string unterminated, and recount. */
459 	pad.buf[end] = 'A';
460 	end = sizeof(pad.buf);
461 	/* Reading beyond with strncpy() will fail. */
462 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
463 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
464 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
465 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
466 
467 	/* Early-truncated is safe still, though. */
468 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
469 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
470 
471 	end = sizeof(pad.buf) / 2;
472 	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
473 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
474 }
475 
fortify_test_strcpy(struct kunit * test)476 static void fortify_test_strcpy(struct kunit *test)
477 {
478 	struct fortify_padding pad = { };
479 	char src[sizeof(pad.buf) + 1] = { };
480 	int i;
481 
482 	/* Fill 31 bytes with valid characters. */
483 	for (i = 0; i < sizeof(src) - 2; i++)
484 		src[i] = i + '0';
485 
486 	/* Destination is %NUL-filled to start with. */
487 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
488 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
489 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
490 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
491 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
492 
493 	/* Legitimate strcpy() 1 less than of max size. */
494 	KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
495 				== pad.buf);
496 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
497 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
498 	/* Only last byte should be %NUL */
499 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
500 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
501 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
502 
503 	src[sizeof(src) - 2] = 'A';
504 	/* But now we trip the overflow checking. */
505 	KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
506 				== pad.buf);
507 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
508 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
509 	/* Trailing %NUL -- thanks to FORTIFY. */
510 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
511 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
512 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
513 	/* And we will not have gone beyond. */
514 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
515 
516 	src[sizeof(src) - 1] = 'A';
517 	/* And for sure now, two bytes past. */
518 	KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
519 				== pad.buf);
520 	/*
521 	 * Which trips both the strlen() on the unterminated src,
522 	 * and the resulting copy attempt.
523 	 */
524 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
525 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
526 	/* Trailing %NUL -- thanks to FORTIFY. */
527 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
528 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
529 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
530 	/* And we will not have gone beyond. */
531 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
532 }
533 
fortify_test_strncpy(struct kunit * test)534 static void fortify_test_strncpy(struct kunit *test)
535 {
536 	struct fortify_padding pad = { };
537 	char src[] = "Copy me fully into a small buffer and I will overflow!";
538 	size_t sizeof_buf = sizeof(pad.buf);
539 
540 	OPTIMIZER_HIDE_VAR(sizeof_buf);
541 
542 	/* Destination is %NUL-filled to start with. */
543 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
544 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
545 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
546 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
547 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
548 
549 	/* Legitimate strncpy() 1 less than of max size. */
550 	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf - 1)
551 				== pad.buf);
552 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
553 	/* Only last byte should be %NUL */
554 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
555 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
556 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
557 
558 	/* Legitimate (though unterminated) max-size strncpy. */
559 	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf)
560 				== pad.buf);
561 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
562 	/* No trailing %NUL -- thanks strncpy API. */
563 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
564 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
565 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
566 	/* But we will not have gone beyond. */
567 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
568 
569 	/* Now verify that FORTIFY is working... */
570 	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 1)
571 				== pad.buf);
572 	/* Should catch the overflow. */
573 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
574 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
575 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
576 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
577 	/* And we will not have gone beyond. */
578 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
579 
580 	/* And further... */
581 	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 2)
582 				== pad.buf);
583 	/* Should catch the overflow. */
584 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
585 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
586 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
587 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
588 	/* And we will not have gone beyond. */
589 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
590 }
591 
fortify_test_strscpy(struct kunit * test)592 static void fortify_test_strscpy(struct kunit *test)
593 {
594 	struct fortify_padding pad = { };
595 	char src[] = "Copy me fully into a small buffer and I will overflow!";
596 	size_t sizeof_buf = sizeof(pad.buf);
597 	size_t sizeof_src = sizeof(src);
598 
599 	OPTIMIZER_HIDE_VAR(sizeof_buf);
600 	OPTIMIZER_HIDE_VAR(sizeof_src);
601 
602 	/* Destination is %NUL-filled to start with. */
603 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
604 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
605 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
606 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
607 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
608 
609 	/* Legitimate strscpy() 1 less than of max size. */
610 	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf - 1),
611 			-E2BIG);
612 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
613 	/* Keeping space for %NUL, last two bytes should be %NUL */
614 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
615 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
616 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
617 
618 	/* Legitimate max-size strscpy. */
619 	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf),
620 			-E2BIG);
621 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
622 	/* A trailing %NUL will exist. */
623 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
624 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
625 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
626 
627 	/* Now verify that FORTIFY is working... */
628 	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf + 1),
629 			-E2BIG);
630 	/* Should catch the overflow. */
631 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
632 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
633 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
634 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
635 	/* And we will not have gone beyond. */
636 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
637 
638 	/* And much further... */
639 	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_src * 2),
640 			-E2BIG);
641 	/* Should catch the overflow. */
642 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
643 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
644 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
645 	KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
646 	/* And we will not have gone beyond. */
647 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
648 }
649 
fortify_test_strcat(struct kunit * test)650 static void fortify_test_strcat(struct kunit *test)
651 {
652 	struct fortify_padding pad = { };
653 	char src[sizeof(pad.buf) / 2] = { };
654 	char one[] = "A";
655 	char two[] = "BC";
656 	int i;
657 
658 	/* Fill 15 bytes with valid characters. */
659 	for (i = 0; i < sizeof(src) - 1; i++)
660 		src[i] = i + 'A';
661 
662 	/* Destination is %NUL-filled to start with. */
663 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
664 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
665 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
666 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
667 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
668 
669 	/* Legitimate strcat() using less than half max size. */
670 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
671 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
672 	/* Legitimate strcat() now 2 bytes shy of end. */
673 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
674 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
675 	/* Last two bytes should be %NUL */
676 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
677 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
678 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
679 
680 	/* Add one more character to the end. */
681 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
682 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
683 	/* Last byte should be %NUL */
684 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
685 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
686 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
687 
688 	/* And this one char will overflow. */
689 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
690 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
691 	/* Last byte should be %NUL thanks to FORTIFY. */
692 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
693 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
694 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
695 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
696 
697 	/* And adding two will overflow more. */
698 	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
699 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
700 	/* Last byte should be %NUL thanks to FORTIFY. */
701 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
702 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
703 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
704 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
705 }
706 
fortify_test_strncat(struct kunit * test)707 static void fortify_test_strncat(struct kunit *test)
708 {
709 	struct fortify_padding pad = { };
710 	char src[sizeof(pad.buf)] = { };
711 	int i, partial;
712 
713 	/* Fill 31 bytes with valid characters. */
714 	partial = sizeof(src) / 2 - 1;
715 	for (i = 0; i < partial; i++)
716 		src[i] = i + 'A';
717 
718 	/* Destination is %NUL-filled to start with. */
719 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
720 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
721 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
722 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
723 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
724 
725 	/* Legitimate strncat() using less than half max size. */
726 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
727 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
728 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
729 	/* Legitimate strncat() now 2 bytes shy of end. */
730 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
731 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
732 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
733 	/* Last two bytes should be %NUL */
734 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
735 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
736 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
737 
738 	/* Add one more character to the end. */
739 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
740 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
741 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
742 	/* Last byte should be %NUL */
743 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
744 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
745 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
746 
747 	/* And this one char will overflow. */
748 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
749 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
750 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
751 	/* Last byte should be %NUL thanks to FORTIFY. */
752 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
753 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
754 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
755 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
756 
757 	/* And adding two will overflow more. */
758 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
759 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
760 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
761 	/* Last byte should be %NUL thanks to FORTIFY. */
762 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
763 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
764 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
765 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
766 
767 	/* Force an unterminated destination, and overflow. */
768 	pad.buf[sizeof(pad.buf) - 1] = 'A';
769 	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
770 	/* This will have tripped both strlen() and strcat(). */
771 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
772 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
773 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
774 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
775 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
776 	/* But we should not go beyond the end. */
777 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
778 }
779 
fortify_test_strlcat(struct kunit * test)780 static void fortify_test_strlcat(struct kunit *test)
781 {
782 	struct fortify_padding pad = { };
783 	char src[sizeof(pad.buf)] = { };
784 	int i, partial;
785 	int len = sizeof(pad.buf);
786 
787 	OPTIMIZER_HIDE_VAR(len);
788 
789 	/* Fill 15 bytes with valid characters. */
790 	partial = sizeof(src) / 2 - 1;
791 	for (i = 0; i < partial; i++)
792 		src[i] = i + 'A';
793 
794 	/* Destination is %NUL-filled to start with. */
795 	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
796 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
797 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
798 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
799 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
800 
801 	/* Legitimate strlcat() using less than half max size. */
802 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
803 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
804 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
805 	/* Legitimate strlcat() now 2 bytes shy of end. */
806 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
807 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
808 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
809 	/* Last two bytes should be %NUL */
810 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
811 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
812 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
813 
814 	/* Add one more character to the end. */
815 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
816 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
817 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
818 	/* Last byte should be %NUL */
819 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
820 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
821 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
822 
823 	/* And this one char will overflow. */
824 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
825 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
826 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
827 	/* Last byte should be %NUL thanks to FORTIFY. */
828 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
829 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
830 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
831 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
832 
833 	/* And adding two will overflow more. */
834 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
835 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
836 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
837 	/* Last byte should be %NUL thanks to FORTIFY. */
838 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
839 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
840 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
841 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
842 
843 	/* Force an unterminated destination, and overflow. */
844 	pad.buf[sizeof(pad.buf) - 1] = 'A';
845 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
846 	/* This will have tripped both strlen() and strlcat(). */
847 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
848 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
849 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
850 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
851 	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
852 	/* But we should not go beyond the end. */
853 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
854 
855 	/* Force an unterminated source, and overflow. */
856 	memset(src, 'B', sizeof(src));
857 	pad.buf[sizeof(pad.buf) - 1] = '\0';
858 	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
859 	/* This will have tripped both strlen() and strlcat(). */
860 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
861 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
862 	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
863 	/* But we should not go beyond the end. */
864 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
865 }
866 
867 /* Check for 0-sized arrays... */
868 struct fortify_zero_sized {
869 	unsigned long bytes_before;
870 	char buf[0];
871 	unsigned long bytes_after;
872 };
873 
874 #define __fortify_test(memfunc)					\
875 static void fortify_test_##memfunc(struct kunit *test)		\
876 {								\
877 	struct fortify_zero_sized empty = { };			\
878 	struct fortify_padding pad = { };			\
879 	char srcA[sizeof(pad.buf) + 2];				\
880 	char srcB[sizeof(pad.buf) + 2];				\
881 	size_t len = sizeof(pad.buf);				\
882 	size_t zero = 0;					\
883 								\
884 	OPTIMIZER_HIDE_VAR(len);				\
885 	OPTIMIZER_HIDE_VAR(zero);				\
886 								\
887 	memset(srcA, 'A', sizeof(srcA));			\
888 	KUNIT_ASSERT_EQ(test, srcA[0], 'A');			\
889 	memset(srcB, 'B', sizeof(srcB));			\
890 	KUNIT_ASSERT_EQ(test, srcB[0], 'B');			\
891 								\
892 	memfunc(pad.buf, srcA, zero);				\
893 	KUNIT_EXPECT_EQ(test, pad.buf[0], '\0');		\
894 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
895 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
896 	memfunc(pad.buf + 1, srcB, zero + 1);			\
897 	KUNIT_EXPECT_EQ(test, pad.buf[0], '\0');		\
898 	KUNIT_EXPECT_EQ(test, pad.buf[1], 'B');			\
899 	KUNIT_EXPECT_EQ(test, pad.buf[2], '\0');		\
900 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
901 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
902 	memfunc(pad.buf, srcA, zero + 1);			\
903 	KUNIT_EXPECT_EQ(test, pad.buf[0], 'A');			\
904 	KUNIT_EXPECT_EQ(test, pad.buf[1], 'B');			\
905 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
906 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
907 	memfunc(pad.buf, srcA, len - 1);			\
908 	KUNIT_EXPECT_EQ(test, pad.buf[1], 'A');			\
909 	KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0');		\
910 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
911 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
912 	memfunc(pad.buf, srcA, len);				\
913 	KUNIT_EXPECT_EQ(test, pad.buf[1], 'A');			\
914 	KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A');		\
915 	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);		\
916 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
917 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
918 	memfunc(pad.buf, srcA, len + 1);			\
919 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
920 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);	\
921 	memfunc(pad.buf + 1, srcB, len);			\
922 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
923 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);	\
924 								\
925 	/* Reset error counter. */				\
926 	fortify_write_overflows = 0;				\
927 	/* Copy nothing into nothing: no errors. */		\
928 	memfunc(empty.buf, srcB, zero);				\
929 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
930 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);	\
931 	memfunc(empty.buf, srcB, zero + 1);			\
932 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);	\
933 	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);	\
934 }
935 __fortify_test(memcpy)
__fortify_test(memmove)936 __fortify_test(memmove)
937 
938 static void fortify_test_memscan(struct kunit *test)
939 {
940 	char haystack[] = "Where oh where is my memory range?";
941 	char *mem = haystack + strlen("Where oh where is ");
942 	char needle = 'm';
943 	size_t len = sizeof(haystack);
944 
945 	OPTIMIZER_HIDE_VAR(len);
946 
947 	KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
948 				  mem);
949 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
950 	/* Catch too-large range. */
951 	KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
952 				  NULL);
953 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
954 	KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
955 				  NULL);
956 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
957 }
958 
fortify_test_memchr(struct kunit * test)959 static void fortify_test_memchr(struct kunit *test)
960 {
961 	char haystack[] = "Where oh where is my memory range?";
962 	char *mem = haystack + strlen("Where oh where is ");
963 	char needle = 'm';
964 	size_t len = sizeof(haystack);
965 
966 	OPTIMIZER_HIDE_VAR(len);
967 
968 	KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
969 				  mem);
970 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
971 	/* Catch too-large range. */
972 	KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
973 				  NULL);
974 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
975 	KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
976 				  NULL);
977 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
978 }
979 
fortify_test_memchr_inv(struct kunit * test)980 static void fortify_test_memchr_inv(struct kunit *test)
981 {
982 	char haystack[] = "Where oh where is my memory range?";
983 	char *mem = haystack + 1;
984 	char needle = 'W';
985 	size_t len = sizeof(haystack);
986 
987 	OPTIMIZER_HIDE_VAR(len);
988 
989 	/* Normal search is okay. */
990 	KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
991 				  mem);
992 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
993 	/* Catch too-large range. */
994 	KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
995 				  NULL);
996 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
997 	KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
998 				  NULL);
999 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
1000 }
1001 
fortify_test_memcmp(struct kunit * test)1002 static void fortify_test_memcmp(struct kunit *test)
1003 {
1004 	char one[] = "My mind is going ...";
1005 	char two[] = "My mind is going ... I can feel it.";
1006 	size_t one_len = sizeof(one) - 1;
1007 	size_t two_len = sizeof(two) - 1;
1008 
1009 	OPTIMIZER_HIDE_VAR(one_len);
1010 	OPTIMIZER_HIDE_VAR(two_len);
1011 
1012 	/* We match the first string (ignoring the %NUL). */
1013 	KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
1014 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1015 	/* Still in bounds, but no longer matching. */
1016 	KUNIT_ASSERT_LT(test, memcmp(one, two, one_len + 1), 0);
1017 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1018 
1019 	/* Catch too-large ranges. */
1020 	KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
1021 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
1022 
1023 	KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
1024 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
1025 }
1026 
fortify_test_kmemdup(struct kunit * test)1027 static void fortify_test_kmemdup(struct kunit *test)
1028 {
1029 	char src[] = "I got Doom running on it!";
1030 	char *copy;
1031 	size_t len = sizeof(src);
1032 
1033 	OPTIMIZER_HIDE_VAR(len);
1034 
1035 	/* Copy is within bounds. */
1036 	copy = kmemdup(src, len, GFP_KERNEL);
1037 	KUNIT_EXPECT_NOT_NULL(test, copy);
1038 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1039 	kfree(copy);
1040 
1041 	/* Without %NUL. */
1042 	copy = kmemdup(src, len - 1, GFP_KERNEL);
1043 	KUNIT_EXPECT_NOT_NULL(test, copy);
1044 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1045 	kfree(copy);
1046 
1047 	/* Tiny bounds. */
1048 	copy = kmemdup(src, 1, GFP_KERNEL);
1049 	KUNIT_EXPECT_NOT_NULL(test, copy);
1050 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1051 	kfree(copy);
1052 
1053 	/* Out of bounds by 1 byte. */
1054 	copy = kmemdup(src, len + 1, GFP_KERNEL);
1055 	KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1056 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
1057 	kfree(copy);
1058 
1059 	/* Way out of bounds. */
1060 	copy = kmemdup(src, len * 2, GFP_KERNEL);
1061 	KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1062 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
1063 	kfree(copy);
1064 
1065 	/* Starting offset causing out of bounds. */
1066 	copy = kmemdup(src + 1, len, GFP_KERNEL);
1067 	KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1068 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
1069 	kfree(copy);
1070 }
1071 
fortify_test_init(struct kunit * test)1072 static int fortify_test_init(struct kunit *test)
1073 {
1074 	if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
1075 		kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
1076 
1077 	fortify_read_overflows = 0;
1078 	kunit_add_named_resource(test, NULL, NULL, &read_resource,
1079 				 "fortify_read_overflows",
1080 				 &fortify_read_overflows);
1081 	fortify_write_overflows = 0;
1082 	kunit_add_named_resource(test, NULL, NULL, &write_resource,
1083 				 "fortify_write_overflows",
1084 				 &fortify_write_overflows);
1085 	return 0;
1086 }
1087 
1088 static struct kunit_case fortify_test_cases[] = {
1089 	KUNIT_CASE(fortify_test_known_sizes),
1090 	KUNIT_CASE(fortify_test_control_flow_split),
1091 	KUNIT_CASE(fortify_test_alloc_size_kmalloc_const),
1092 	KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic),
1093 	KUNIT_CASE(fortify_test_alloc_size_vmalloc_const),
1094 	KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic),
1095 	KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const),
1096 	KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic),
1097 	KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const),
1098 	KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic),
1099 	KUNIT_CASE(fortify_test_realloc_size),
1100 	KUNIT_CASE(fortify_test_strlen),
1101 	KUNIT_CASE(fortify_test_strnlen),
1102 	KUNIT_CASE(fortify_test_strcpy),
1103 	KUNIT_CASE(fortify_test_strncpy),
1104 	KUNIT_CASE(fortify_test_strscpy),
1105 	KUNIT_CASE(fortify_test_strcat),
1106 	KUNIT_CASE(fortify_test_strncat),
1107 	KUNIT_CASE(fortify_test_strlcat),
1108 	/* skip memset: performs bounds checking on whole structs */
1109 	KUNIT_CASE(fortify_test_memcpy),
1110 	KUNIT_CASE(fortify_test_memmove),
1111 	KUNIT_CASE(fortify_test_memscan),
1112 	KUNIT_CASE(fortify_test_memchr),
1113 	KUNIT_CASE(fortify_test_memchr_inv),
1114 	KUNIT_CASE(fortify_test_memcmp),
1115 	KUNIT_CASE(fortify_test_kmemdup),
1116 	{}
1117 };
1118 
1119 static struct kunit_suite fortify_test_suite = {
1120 	.name = "fortify",
1121 	.init = fortify_test_init,
1122 	.test_cases = fortify_test_cases,
1123 };
1124 
1125 kunit_test_suite(fortify_test_suite);
1126 
1127 MODULE_DESCRIPTION("Runtime test cases for CONFIG_FORTIFY_SOURCE");
1128 MODULE_LICENSE("GPL");
1129