1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2002 Richard Henderson
4  * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5  * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
6  * Copyright (C) 2024 Mike Rapoport IBM.
7  */
8 
9 #define pr_fmt(fmt) "execmem: " fmt
10 
11 #include <linux/mm.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
14 #include <linux/execmem.h>
15 #include <linux/maple_tree.h>
16 #include <linux/set_memory.h>
17 #include <linux/moduleloader.h>
18 #include <linux/text-patching.h>
19 
20 #include <asm/tlbflush.h>
21 
22 #include "internal.h"
23 
24 static struct execmem_info *execmem_info __ro_after_init;
25 static struct execmem_info default_execmem_info __ro_after_init;
26 
27 #ifdef CONFIG_MMU
execmem_vmalloc(struct execmem_range * range,size_t size,pgprot_t pgprot,unsigned long vm_flags)28 static void *execmem_vmalloc(struct execmem_range *range, size_t size,
29 			     pgprot_t pgprot, unsigned long vm_flags)
30 {
31 	bool kasan = range->flags & EXECMEM_KASAN_SHADOW;
32 	gfp_t gfp_flags = GFP_KERNEL | __GFP_NOWARN;
33 	unsigned int align = range->alignment;
34 	unsigned long start = range->start;
35 	unsigned long end = range->end;
36 	void *p;
37 
38 	if (kasan)
39 		vm_flags |= VM_DEFER_KMEMLEAK;
40 
41 	if (vm_flags & VM_ALLOW_HUGE_VMAP)
42 		align = PMD_SIZE;
43 
44 	p = __vmalloc_node_range(size, align, start, end, gfp_flags,
45 				 pgprot, vm_flags, NUMA_NO_NODE,
46 				 __builtin_return_address(0));
47 	if (!p && range->fallback_start) {
48 		start = range->fallback_start;
49 		end = range->fallback_end;
50 		p = __vmalloc_node_range(size, align, start, end, gfp_flags,
51 					 pgprot, vm_flags, NUMA_NO_NODE,
52 					 __builtin_return_address(0));
53 	}
54 
55 	if (!p) {
56 		pr_warn_ratelimited("unable to allocate memory\n");
57 		return NULL;
58 	}
59 
60 	if (kasan && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
61 		vfree(p);
62 		return NULL;
63 	}
64 
65 	return p;
66 }
67 
execmem_vmap(size_t size)68 struct vm_struct *execmem_vmap(size_t size)
69 {
70 	struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA];
71 	struct vm_struct *area;
72 
73 	area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
74 				  range->start, range->end, NUMA_NO_NODE,
75 				  GFP_KERNEL, __builtin_return_address(0));
76 	if (!area && range->fallback_start)
77 		area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
78 					  range->fallback_start, range->fallback_end,
79 					  NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0));
80 
81 	return area;
82 }
83 #else
execmem_vmalloc(struct execmem_range * range,size_t size,pgprot_t pgprot,unsigned long vm_flags)84 static void *execmem_vmalloc(struct execmem_range *range, size_t size,
85 			     pgprot_t pgprot, unsigned long vm_flags)
86 {
87 	return vmalloc(size);
88 }
89 #endif /* CONFIG_MMU */
90 
91 #ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
92 struct execmem_cache {
93 	struct mutex mutex;
94 	struct maple_tree busy_areas;
95 	struct maple_tree free_areas;
96 };
97 
98 static struct execmem_cache execmem_cache = {
99 	.mutex = __MUTEX_INITIALIZER(execmem_cache.mutex),
100 	.busy_areas = MTREE_INIT_EXT(busy_areas, MT_FLAGS_LOCK_EXTERN,
101 				     execmem_cache.mutex),
102 	.free_areas = MTREE_INIT_EXT(free_areas, MT_FLAGS_LOCK_EXTERN,
103 				     execmem_cache.mutex),
104 };
105 
mas_range_len(struct ma_state * mas)106 static inline unsigned long mas_range_len(struct ma_state *mas)
107 {
108 	return mas->last - mas->index + 1;
109 }
110 
execmem_set_direct_map_valid(struct vm_struct * vm,bool valid)111 static int execmem_set_direct_map_valid(struct vm_struct *vm, bool valid)
112 {
113 	unsigned int nr = (1 << get_vm_area_page_order(vm));
114 	unsigned int updated = 0;
115 	int err = 0;
116 
117 	for (int i = 0; i < vm->nr_pages; i += nr) {
118 		err = set_direct_map_valid_noflush(vm->pages[i], nr, valid);
119 		if (err)
120 			goto err_restore;
121 		updated += nr;
122 	}
123 
124 	return 0;
125 
126 err_restore:
127 	for (int i = 0; i < updated; i += nr)
128 		set_direct_map_valid_noflush(vm->pages[i], nr, !valid);
129 
130 	return err;
131 }
132 
execmem_cache_clean(struct work_struct * work)133 static void execmem_cache_clean(struct work_struct *work)
134 {
135 	struct maple_tree *free_areas = &execmem_cache.free_areas;
136 	struct mutex *mutex = &execmem_cache.mutex;
137 	MA_STATE(mas, free_areas, 0, ULONG_MAX);
138 	void *area;
139 
140 	mutex_lock(mutex);
141 	mas_for_each(&mas, area, ULONG_MAX) {
142 		size_t size = mas_range_len(&mas);
143 
144 		if (IS_ALIGNED(size, PMD_SIZE) &&
145 		    IS_ALIGNED(mas.index, PMD_SIZE)) {
146 			struct vm_struct *vm = find_vm_area(area);
147 
148 			execmem_set_direct_map_valid(vm, true);
149 			mas_store_gfp(&mas, NULL, GFP_KERNEL);
150 			vfree(area);
151 		}
152 	}
153 	mutex_unlock(mutex);
154 }
155 
156 static DECLARE_WORK(execmem_cache_clean_work, execmem_cache_clean);
157 
execmem_cache_add(void * ptr,size_t size)158 static int execmem_cache_add(void *ptr, size_t size)
159 {
160 	struct maple_tree *free_areas = &execmem_cache.free_areas;
161 	struct mutex *mutex = &execmem_cache.mutex;
162 	unsigned long addr = (unsigned long)ptr;
163 	MA_STATE(mas, free_areas, addr - 1, addr + 1);
164 	unsigned long lower, upper;
165 	void *area = NULL;
166 	int err;
167 
168 	lower = addr;
169 	upper = addr + size - 1;
170 
171 	mutex_lock(mutex);
172 	area = mas_walk(&mas);
173 	if (area && mas.last == addr - 1)
174 		lower = mas.index;
175 
176 	area = mas_next(&mas, ULONG_MAX);
177 	if (area && mas.index == addr + size)
178 		upper = mas.last;
179 
180 	mas_set_range(&mas, lower, upper);
181 	err = mas_store_gfp(&mas, (void *)lower, GFP_KERNEL);
182 	mutex_unlock(mutex);
183 	if (err)
184 		return err;
185 
186 	return 0;
187 }
188 
within_range(struct execmem_range * range,struct ma_state * mas,size_t size)189 static bool within_range(struct execmem_range *range, struct ma_state *mas,
190 			 size_t size)
191 {
192 	unsigned long addr = mas->index;
193 
194 	if (addr >= range->start && addr + size < range->end)
195 		return true;
196 
197 	if (range->fallback_start &&
198 	    addr >= range->fallback_start && addr + size < range->fallback_end)
199 		return true;
200 
201 	return false;
202 }
203 
__execmem_cache_alloc(struct execmem_range * range,size_t size)204 static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
205 {
206 	struct maple_tree *free_areas = &execmem_cache.free_areas;
207 	struct maple_tree *busy_areas = &execmem_cache.busy_areas;
208 	MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
209 	MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
210 	struct mutex *mutex = &execmem_cache.mutex;
211 	unsigned long addr, last, area_size = 0;
212 	void *area, *ptr = NULL;
213 	int err;
214 
215 	mutex_lock(mutex);
216 	mas_for_each(&mas_free, area, ULONG_MAX) {
217 		area_size = mas_range_len(&mas_free);
218 
219 		if (area_size >= size && within_range(range, &mas_free, size))
220 			break;
221 	}
222 
223 	if (area_size < size)
224 		goto out_unlock;
225 
226 	addr = mas_free.index;
227 	last = mas_free.last;
228 
229 	/* insert allocated size to busy_areas at range [addr, addr + size) */
230 	mas_set_range(&mas_busy, addr, addr + size - 1);
231 	err = mas_store_gfp(&mas_busy, (void *)addr, GFP_KERNEL);
232 	if (err)
233 		goto out_unlock;
234 
235 	mas_store_gfp(&mas_free, NULL, GFP_KERNEL);
236 	if (area_size > size) {
237 		void *ptr = (void *)(addr + size);
238 
239 		/*
240 		 * re-insert remaining free size to free_areas at range
241 		 * [addr + size, last]
242 		 */
243 		mas_set_range(&mas_free, addr + size, last);
244 		err = mas_store_gfp(&mas_free, ptr, GFP_KERNEL);
245 		if (err) {
246 			mas_store_gfp(&mas_busy, NULL, GFP_KERNEL);
247 			goto out_unlock;
248 		}
249 	}
250 	ptr = (void *)addr;
251 
252 out_unlock:
253 	mutex_unlock(mutex);
254 	return ptr;
255 }
256 
257 static bool execmem_cache_rox = false;
258 
execmem_cache_make_ro(void)259 void execmem_cache_make_ro(void)
260 {
261 	struct maple_tree *free_areas = &execmem_cache.free_areas;
262 	struct maple_tree *busy_areas = &execmem_cache.busy_areas;
263 	MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
264 	MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
265 	struct mutex *mutex = &execmem_cache.mutex;
266 	void *area;
267 
268 	execmem_cache_rox = true;
269 
270 	mutex_lock(mutex);
271 
272 	mas_for_each(&mas_free, area, ULONG_MAX) {
273 		unsigned long pages = mas_range_len(&mas_free) >> PAGE_SHIFT;
274 		set_memory_ro(mas_free.index, pages);
275 	}
276 
277 	mas_for_each(&mas_busy, area, ULONG_MAX) {
278 		unsigned long pages = mas_range_len(&mas_busy) >> PAGE_SHIFT;
279 		set_memory_ro(mas_busy.index, pages);
280 	}
281 
282 	mutex_unlock(mutex);
283 }
284 
execmem_cache_populate(struct execmem_range * range,size_t size)285 static int execmem_cache_populate(struct execmem_range *range, size_t size)
286 {
287 	unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
288 	struct vm_struct *vm;
289 	size_t alloc_size;
290 	int err = -ENOMEM;
291 	void *p;
292 
293 	alloc_size = round_up(size, PMD_SIZE);
294 	p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
295 	if (!p)
296 		return err;
297 
298 	vm = find_vm_area(p);
299 	if (!vm)
300 		goto err_free_mem;
301 
302 	/* fill memory with instructions that will trap */
303 	execmem_fill_trapping_insns(p, alloc_size, /* writable = */ true);
304 
305 	if (execmem_cache_rox) {
306 		err = set_memory_rox((unsigned long)p, vm->nr_pages);
307 		if (err)
308 			goto err_free_mem;
309 	} else {
310 		err = set_memory_x((unsigned long)p, vm->nr_pages);
311 		if (err)
312 			goto err_free_mem;
313 	}
314 
315 	err = execmem_cache_add(p, alloc_size);
316 	if (err)
317 		goto err_reset_direct_map;
318 
319 	return 0;
320 
321 err_reset_direct_map:
322 	execmem_set_direct_map_valid(vm, true);
323 err_free_mem:
324 	vfree(p);
325 	return err;
326 }
327 
execmem_cache_alloc(struct execmem_range * range,size_t size)328 static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
329 {
330 	void *p;
331 	int err;
332 
333 	p = __execmem_cache_alloc(range, size);
334 	if (p)
335 		return p;
336 
337 	err = execmem_cache_populate(range, size);
338 	if (err)
339 		return NULL;
340 
341 	return __execmem_cache_alloc(range, size);
342 }
343 
execmem_cache_free(void * ptr)344 static bool execmem_cache_free(void *ptr)
345 {
346 	struct maple_tree *busy_areas = &execmem_cache.busy_areas;
347 	struct mutex *mutex = &execmem_cache.mutex;
348 	unsigned long addr = (unsigned long)ptr;
349 	MA_STATE(mas, busy_areas, addr, addr);
350 	size_t size;
351 	void *area;
352 
353 	mutex_lock(mutex);
354 	area = mas_walk(&mas);
355 	if (!area) {
356 		mutex_unlock(mutex);
357 		return false;
358 	}
359 	size = mas_range_len(&mas);
360 
361 	mas_store_gfp(&mas, NULL, GFP_KERNEL);
362 	mutex_unlock(mutex);
363 
364 	execmem_fill_trapping_insns(ptr, size, /* writable = */ false);
365 
366 	execmem_cache_add(ptr, size);
367 
368 	schedule_work(&execmem_cache_clean_work);
369 
370 	return true;
371 }
372 
execmem_make_temp_rw(void * ptr,size_t size)373 int execmem_make_temp_rw(void *ptr, size_t size)
374 {
375 	unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT;
376 	unsigned long addr = (unsigned long)ptr;
377 	int ret;
378 
379 	ret = set_memory_nx(addr, nr);
380 	if (ret)
381 		return ret;
382 
383 	return set_memory_rw(addr, nr);
384 }
385 
execmem_restore_rox(void * ptr,size_t size)386 int execmem_restore_rox(void *ptr, size_t size)
387 {
388 	unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT;
389 	unsigned long addr = (unsigned long)ptr;
390 
391 	return set_memory_rox(addr, nr);
392 }
393 
394 #else /* CONFIG_ARCH_HAS_EXECMEM_ROX */
execmem_cache_alloc(struct execmem_range * range,size_t size)395 static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
396 {
397 	return NULL;
398 }
399 
execmem_cache_free(void * ptr)400 static bool execmem_cache_free(void *ptr)
401 {
402 	return false;
403 }
404 #endif /* CONFIG_ARCH_HAS_EXECMEM_ROX */
405 
execmem_alloc(enum execmem_type type,size_t size)406 void *execmem_alloc(enum execmem_type type, size_t size)
407 {
408 	struct execmem_range *range = &execmem_info->ranges[type];
409 	bool use_cache = range->flags & EXECMEM_ROX_CACHE;
410 	unsigned long vm_flags = VM_FLUSH_RESET_PERMS;
411 	pgprot_t pgprot = range->pgprot;
412 	void *p;
413 
414 	if (use_cache)
415 		p = execmem_cache_alloc(range, size);
416 	else
417 		p = execmem_vmalloc(range, size, pgprot, vm_flags);
418 
419 	return kasan_reset_tag(p);
420 }
421 
execmem_free(void * ptr)422 void execmem_free(void *ptr)
423 {
424 	/*
425 	 * This memory may be RO, and freeing RO memory in an interrupt is not
426 	 * supported by vmalloc.
427 	 */
428 	WARN_ON(in_interrupt());
429 
430 	if (!execmem_cache_free(ptr))
431 		vfree(ptr);
432 }
433 
execmem_update_copy(void * dst,const void * src,size_t size)434 void *execmem_update_copy(void *dst, const void *src, size_t size)
435 {
436 	return text_poke_copy(dst, src, size);
437 }
438 
execmem_is_rox(enum execmem_type type)439 bool execmem_is_rox(enum execmem_type type)
440 {
441 	return !!(execmem_info->ranges[type].flags & EXECMEM_ROX_CACHE);
442 }
443 
execmem_validate(struct execmem_info * info)444 static bool execmem_validate(struct execmem_info *info)
445 {
446 	struct execmem_range *r = &info->ranges[EXECMEM_DEFAULT];
447 
448 	if (!r->alignment || !r->start || !r->end || !pgprot_val(r->pgprot)) {
449 		pr_crit("Invalid parameters for execmem allocator, module loading will fail");
450 		return false;
451 	}
452 
453 	if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX)) {
454 		for (int i = EXECMEM_DEFAULT; i < EXECMEM_TYPE_MAX; i++) {
455 			r = &info->ranges[i];
456 
457 			if (r->flags & EXECMEM_ROX_CACHE) {
458 				pr_warn_once("ROX cache is not supported\n");
459 				r->flags &= ~EXECMEM_ROX_CACHE;
460 			}
461 		}
462 	}
463 
464 	return true;
465 }
466 
execmem_init_missing(struct execmem_info * info)467 static void execmem_init_missing(struct execmem_info *info)
468 {
469 	struct execmem_range *default_range = &info->ranges[EXECMEM_DEFAULT];
470 
471 	for (int i = EXECMEM_DEFAULT + 1; i < EXECMEM_TYPE_MAX; i++) {
472 		struct execmem_range *r = &info->ranges[i];
473 
474 		if (!r->start) {
475 			if (i == EXECMEM_MODULE_DATA)
476 				r->pgprot = PAGE_KERNEL;
477 			else
478 				r->pgprot = default_range->pgprot;
479 			r->alignment = default_range->alignment;
480 			r->start = default_range->start;
481 			r->end = default_range->end;
482 			r->flags = default_range->flags;
483 			r->fallback_start = default_range->fallback_start;
484 			r->fallback_end = default_range->fallback_end;
485 		}
486 	}
487 }
488 
execmem_arch_setup(void)489 struct execmem_info * __weak execmem_arch_setup(void)
490 {
491 	return NULL;
492 }
493 
__execmem_init(void)494 static void __init __execmem_init(void)
495 {
496 	struct execmem_info *info = execmem_arch_setup();
497 
498 	if (!info) {
499 		info = execmem_info = &default_execmem_info;
500 		info->ranges[EXECMEM_DEFAULT].start = VMALLOC_START;
501 		info->ranges[EXECMEM_DEFAULT].end = VMALLOC_END;
502 		info->ranges[EXECMEM_DEFAULT].pgprot = PAGE_KERNEL_EXEC;
503 		info->ranges[EXECMEM_DEFAULT].alignment = 1;
504 	}
505 
506 	if (!execmem_validate(info))
507 		return;
508 
509 	execmem_init_missing(info);
510 
511 	execmem_info = info;
512 }
513 
514 #ifdef CONFIG_ARCH_WANTS_EXECMEM_LATE
execmem_late_init(void)515 static int __init execmem_late_init(void)
516 {
517 	__execmem_init();
518 	return 0;
519 }
520 core_initcall(execmem_late_init);
521 #else
execmem_init(void)522 void __init execmem_init(void)
523 {
524 	__execmem_init();
525 }
526 #endif
527