1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Module strict rwx
4  *
5  * Copyright (C) 2015 Rusty Russell
6  */
7 
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/vmalloc.h>
11 #include <linux/set_memory.h>
12 #include <linux/execmem.h>
13 #include "internal.h"
14 
15 static int module_set_memory(const struct module *mod, enum mod_mem_type type,
16 			     int (*set_memory)(unsigned long start, int num_pages))
17 {
18 	const struct module_memory *mod_mem = &mod->mem[type];
19 
20 	if (!mod_mem->base)
21 		return 0;
22 
23 	set_vm_flush_reset_perms(mod_mem->base);
24 	return set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT);
25 }
26 
27 /*
28  * Since some arches are moving towards PAGE_KERNEL module allocations instead
29  * of PAGE_KERNEL_EXEC, keep module_enable_x() independent of
30  * CONFIG_STRICT_MODULE_RWX because they are needed regardless of whether we
31  * are strict.
32  */
33 int module_enable_text_rox(const struct module *mod)
34 {
35 	for_class_mod_mem_type(type, text) {
36 		const struct module_memory *mem = &mod->mem[type];
37 		int ret;
38 
39 		if (mem->is_rox)
40 			ret = execmem_restore_rox(mem->base, mem->size);
41 		else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
42 			ret = module_set_memory(mod, type, set_memory_rox);
43 		else
44 			ret = module_set_memory(mod, type, set_memory_x);
45 		if (ret)
46 			return ret;
47 	}
48 	return 0;
49 }
50 
51 int module_enable_rodata_ro(const struct module *mod)
52 {
53 	int ret;
54 
55 	if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX) || !rodata_enabled)
56 		return 0;
57 
58 	ret = module_set_memory(mod, MOD_RODATA, set_memory_ro);
59 	if (ret)
60 		return ret;
61 	ret = module_set_memory(mod, MOD_INIT_RODATA, set_memory_ro);
62 	if (ret)
63 		return ret;
64 
65 	return 0;
66 }
67 
68 int module_enable_rodata_ro_after_init(const struct module *mod)
69 {
70 	if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX) || !rodata_enabled)
71 		return 0;
72 
73 	return module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro);
74 }
75 
76 int module_enable_data_nx(const struct module *mod)
77 {
78 	if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
79 		return 0;
80 
81 	for_class_mod_mem_type(type, data) {
82 		int ret = module_set_memory(mod, type, set_memory_nx);
83 
84 		if (ret)
85 			return ret;
86 	}
87 	return 0;
88 }
89 
90 int module_enforce_rwx_sections(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
91 				const char *secstrings,
92 				const struct module *mod)
93 {
94 	const unsigned long shf_wx = SHF_WRITE | SHF_EXECINSTR;
95 	int i;
96 
97 	if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
98 		return 0;
99 
100 	for (i = 0; i < hdr->e_shnum; i++) {
101 		if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) {
102 			pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n",
103 			       mod->name, secstrings + sechdrs[i].sh_name, i);
104 			return -ENOEXEC;
105 		}
106 	}
107 
108 	return 0;
109 }
110 
111 static const char *const ro_after_init[] = {
112 	/*
113 	 * Section .data..ro_after_init holds data explicitly annotated by
114 	 * __ro_after_init.
115 	 */
116 	".data..ro_after_init",
117 
118 	/*
119 	 * Section __jump_table holds data structures that are never modified,
120 	 * with the exception of entries that refer to code in the __init
121 	 * section, which are marked as such at module load time.
122 	 */
123 	"__jump_table",
124 
125 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
126 	/*
127 	 * Section .static_call_sites holds data structures that need to be
128 	 * sorted and processed at module load time but are never modified
129 	 * afterwards.
130 	 */
131 	".static_call_sites",
132 #endif
133 };
134 
135 void module_mark_ro_after_init(const Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
136 			       const char *secstrings)
137 {
138 	int i, j;
139 
140 	for (i = 1; i < hdr->e_shnum; i++) {
141 		Elf_Shdr *shdr = &sechdrs[i];
142 
143 		for (j = 0; j < ARRAY_SIZE(ro_after_init); j++) {
144 			if (strcmp(secstrings + shdr->sh_name,
145 				   ro_after_init[j]) == 0) {
146 				shdr->sh_flags |= SHF_RO_AFTER_INIT;
147 				break;
148 			}
149 		}
150 	}
151 }
152