1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This is for all the tests related to validating kernel memory
4  * permissions: non-executable regions, non-writable regions, and
5  * even non-readable regions.
6  */
7 #include "lkdtm.h"
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/mman.h>
11 #include <linux/uaccess.h>
12 #include <asm/cacheflush.h>
13 #include <asm/sections.h>
14 
15 /* Whether or not to fill the target memory area with do_nothing(). */
16 #define CODE_WRITE	true
17 #define CODE_AS_IS	false
18 
19 /* How many bytes to copy to be sure we've copied enough of do_nothing(). */
20 #define EXEC_SIZE 64
21 
22 /* This is non-const, so it will end up in the .data section. */
23 static u8 data_area[EXEC_SIZE];
24 
25 /* This is const, so it will end up in the .rodata section. */
26 static const unsigned long rodata = 0xAA55AA55;
27 
28 /* This is marked __ro_after_init, so it should ultimately be .rodata. */
29 static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
30 
31 /*
32  * This is a pointer to do_nothing() which is initialized at runtime rather
33  * than build time to avoid objtool IBT validation warnings caused by an
34  * inlined unrolled memcpy() in execute_location().
35  */
36 static void __ro_after_init *do_nothing_ptr;
37 
38 /*
39  * This just returns to the caller. It is designed to be copied into
40  * non-executable memory regions.
41  */
42 static noinline void do_nothing(void)
43 {
44 	return;
45 }
46 
47 /* Must immediately follow do_nothing for size calculuations to work out. */
48 static noinline void do_overwritten(void)
49 {
50 	pr_info("do_overwritten wasn't overwritten!\n");
51 	return;
52 }
53 
54 static noinline void do_almost_nothing(void)
55 {
56 	pr_info("do_nothing was hijacked!\n");
57 }
58 
59 static void *setup_function_descriptor(func_desc_t *fdesc, void *dst)
60 {
61 	if (!have_function_descriptors())
62 		return dst;
63 
64 	memcpy(fdesc, do_nothing, sizeof(*fdesc));
65 	fdesc->addr = (unsigned long)dst;
66 	barrier();
67 
68 	return fdesc;
69 }
70 
71 static noinline __nocfi void execute_location(void *dst, bool write)
72 {
73 	void (*func)(void);
74 	func_desc_t fdesc;
75 
76 	pr_info("attempting ok execution at %px\n", do_nothing_ptr);
77 	do_nothing();
78 
79 	if (write == CODE_WRITE) {
80 		memcpy(dst, do_nothing_ptr, EXEC_SIZE);
81 		flush_icache_range((unsigned long)dst,
82 				   (unsigned long)dst + EXEC_SIZE);
83 	}
84 	pr_info("attempting bad execution at %px\n", dst);
85 	func = setup_function_descriptor(&fdesc, dst);
86 	func();
87 	pr_err("FAIL: func returned\n");
88 }
89 
90 static void execute_user_location(void *dst)
91 {
92 	int copied;
93 
94 	/* Intentionally crossing kernel/user memory boundary. */
95 	void (*func)(void);
96 	func_desc_t fdesc;
97 	void *do_nothing_text = dereference_function_descriptor(do_nothing);
98 
99 	pr_info("attempting ok execution at %px\n", do_nothing_text);
100 	do_nothing();
101 
102 	copied = access_process_vm(current, (unsigned long)dst, do_nothing_text,
103 				   EXEC_SIZE, FOLL_WRITE);
104 	if (copied < EXEC_SIZE)
105 		return;
106 	pr_info("attempting bad execution at %px\n", dst);
107 	func = setup_function_descriptor(&fdesc, dst);
108 	func();
109 	pr_err("FAIL: func returned\n");
110 }
111 
112 static void lkdtm_WRITE_RO(void)
113 {
114 	/* Explicitly cast away "const" for the test and make volatile. */
115 	volatile unsigned long *ptr = (unsigned long *)&rodata;
116 
117 	pr_info("attempting bad rodata write at %px\n", ptr);
118 	*ptr ^= 0xabcd1234;
119 	pr_err("FAIL: survived bad write\n");
120 }
121 
122 static void lkdtm_WRITE_RO_AFTER_INIT(void)
123 {
124 	volatile unsigned long *ptr = &ro_after_init;
125 
126 	/*
127 	 * Verify we were written to during init. Since an Oops
128 	 * is considered a "success", a failure is to just skip the
129 	 * real test.
130 	 */
131 	if ((*ptr & 0xAA) != 0xAA) {
132 		pr_info("%p was NOT written during init!?\n", ptr);
133 		return;
134 	}
135 
136 	pr_info("attempting bad ro_after_init write at %px\n", ptr);
137 	*ptr ^= 0xabcd1234;
138 	pr_err("FAIL: survived bad write\n");
139 }
140 
141 static void lkdtm_WRITE_KERN(void)
142 {
143 	size_t size;
144 	volatile unsigned char *ptr;
145 
146 	size = (unsigned long)dereference_function_descriptor(do_overwritten) -
147 	       (unsigned long)dereference_function_descriptor(do_nothing);
148 	ptr = dereference_function_descriptor(do_overwritten);
149 
150 	pr_info("attempting bad %zu byte write at %px\n", size, ptr);
151 	memcpy((void *)ptr, (unsigned char *)do_nothing, size);
152 	flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
153 	pr_err("FAIL: survived bad write\n");
154 
155 	do_overwritten();
156 }
157 
158 static void lkdtm_WRITE_OPD(void)
159 {
160 	size_t size = sizeof(func_desc_t);
161 	void (*func)(void) = do_nothing;
162 
163 	if (!have_function_descriptors()) {
164 		pr_info("XFAIL: Platform doesn't use function descriptors.\n");
165 		return;
166 	}
167 	pr_info("attempting bad %zu bytes write at %px\n", size, do_nothing);
168 	memcpy(do_nothing, do_almost_nothing, size);
169 	pr_err("FAIL: survived bad write\n");
170 
171 	asm("" : "=m"(func));
172 	func();
173 }
174 
175 static void lkdtm_EXEC_DATA(void)
176 {
177 	execute_location(data_area, CODE_WRITE);
178 }
179 
180 static void lkdtm_EXEC_STACK(void)
181 {
182 	u8 stack_area[EXEC_SIZE];
183 	execute_location(stack_area, CODE_WRITE);
184 }
185 
186 static void lkdtm_EXEC_KMALLOC(void)
187 {
188 	u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
189 	execute_location(kmalloc_area, CODE_WRITE);
190 	kfree(kmalloc_area);
191 }
192 
193 static void lkdtm_EXEC_VMALLOC(void)
194 {
195 	u32 *vmalloc_area = vmalloc(EXEC_SIZE);
196 	execute_location(vmalloc_area, CODE_WRITE);
197 	vfree(vmalloc_area);
198 }
199 
200 static void lkdtm_EXEC_RODATA(void)
201 {
202 	execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing),
203 			 CODE_AS_IS);
204 }
205 
206 static void lkdtm_EXEC_USERSPACE(void)
207 {
208 	unsigned long user_addr;
209 
210 	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
211 			    PROT_READ | PROT_WRITE | PROT_EXEC,
212 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
213 	if (user_addr >= TASK_SIZE) {
214 		pr_warn("Failed to allocate user memory\n");
215 		return;
216 	}
217 	execute_user_location((void *)user_addr);
218 	vm_munmap(user_addr, PAGE_SIZE);
219 }
220 
221 static void lkdtm_EXEC_NULL(void)
222 {
223 	execute_location(NULL, CODE_AS_IS);
224 }
225 
226 static void lkdtm_ACCESS_USERSPACE(void)
227 {
228 	unsigned long user_addr, tmp = 0;
229 	unsigned long *ptr;
230 
231 	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
232 			    PROT_READ | PROT_WRITE | PROT_EXEC,
233 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
234 	if (user_addr >= TASK_SIZE) {
235 		pr_warn("Failed to allocate user memory\n");
236 		return;
237 	}
238 
239 	if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
240 		pr_warn("copy_to_user failed\n");
241 		vm_munmap(user_addr, PAGE_SIZE);
242 		return;
243 	}
244 
245 	ptr = (unsigned long *)user_addr;
246 
247 	pr_info("attempting bad read at %px\n", ptr);
248 	tmp = *ptr;
249 	tmp += 0xc0dec0de;
250 	pr_err("FAIL: survived bad read\n");
251 
252 	pr_info("attempting bad write at %px\n", ptr);
253 	*ptr = tmp;
254 	pr_err("FAIL: survived bad write\n");
255 
256 	vm_munmap(user_addr, PAGE_SIZE);
257 }
258 
259 static void lkdtm_ACCESS_NULL(void)
260 {
261 	unsigned long tmp;
262 	volatile unsigned long *ptr = (unsigned long *)NULL;
263 
264 	pr_info("attempting bad read at %px\n", ptr);
265 	tmp = *ptr;
266 	tmp += 0xc0dec0de;
267 	pr_err("FAIL: survived bad read\n");
268 
269 	pr_info("attempting bad write at %px\n", ptr);
270 	*ptr = tmp;
271 	pr_err("FAIL: survived bad write\n");
272 }
273 
274 void __init lkdtm_perms_init(void)
275 {
276 	do_nothing_ptr = dereference_function_descriptor(do_nothing);
277 
278 	/* Make sure we can write to __ro_after_init values during __init */
279 	ro_after_init |= 0xAA;
280 }
281 
282 static struct crashtype crashtypes[] = {
283 	CRASHTYPE(WRITE_RO),
284 	CRASHTYPE(WRITE_RO_AFTER_INIT),
285 	CRASHTYPE(WRITE_KERN),
286 	CRASHTYPE(WRITE_OPD),
287 	CRASHTYPE(EXEC_DATA),
288 	CRASHTYPE(EXEC_STACK),
289 	CRASHTYPE(EXEC_KMALLOC),
290 	CRASHTYPE(EXEC_VMALLOC),
291 	CRASHTYPE(EXEC_RODATA),
292 	CRASHTYPE(EXEC_USERSPACE),
293 	CRASHTYPE(EXEC_NULL),
294 	CRASHTYPE(ACCESS_USERSPACE),
295 	CRASHTYPE(ACCESS_NULL),
296 };
297 
298 struct crashtype_category perms_crashtypes = {
299 	.crashtypes = crashtypes,
300 	.len	    = ARRAY_SIZE(crashtypes),
301 };
302