1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  *    Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
5  *			 <benh@kernel.crashing.org>
6  */
7 
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/slab.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/syscalls.h>
20 #include <linux/vdso_datastore.h>
21 #include <vdso/datapage.h>
22 
23 #include <asm/syscall.h>
24 #include <asm/processor.h>
25 #include <asm/mmu.h>
26 #include <asm/mmu_context.h>
27 #include <asm/machdep.h>
28 #include <asm/cputable.h>
29 #include <asm/sections.h>
30 #include <asm/firmware.h>
31 #include <asm/vdso.h>
32 #include <asm/vdso_datapage.h>
33 #include <asm/setup.h>
34 
35 static_assert(__VDSO_PAGES == VDSO_NR_PAGES);
36 
37 /* The alignment of the vDSO */
38 #define VDSO_ALIGNMENT	(1 << 16)
39 
40 extern char vdso32_start, vdso32_end;
41 extern char vdso64_start, vdso64_end;
42 
43 long sys_ni_syscall(void);
44 
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma,unsigned long text_size)45 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
46 		       unsigned long text_size)
47 {
48 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
49 
50 	if (new_size != text_size)
51 		return -EINVAL;
52 
53 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
54 
55 	return 0;
56 }
57 
vdso32_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)58 static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
59 {
60 	return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start);
61 }
62 
vdso64_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)63 static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
64 {
65 	return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
66 }
67 
vdso_close(const struct vm_special_mapping * sm,struct vm_area_struct * vma)68 static void vdso_close(const struct vm_special_mapping *sm, struct vm_area_struct *vma)
69 {
70 	struct mm_struct *mm = vma->vm_mm;
71 
72 	/*
73 	 * close() is called for munmap() but also for mremap(). In the mremap()
74 	 * case the vdso pointer has already been updated by the mremap() hook
75 	 * above, so it must not be set to NULL here.
76 	 */
77 	if (vma->vm_start != (unsigned long)mm->context.vdso)
78 		return;
79 
80 	mm->context.vdso = NULL;
81 }
82 
83 static struct vm_special_mapping vdso32_spec __ro_after_init = {
84 	.name = "[vdso]",
85 	.mremap = vdso32_mremap,
86 	.close = vdso_close,
87 };
88 
89 static struct vm_special_mapping vdso64_spec __ro_after_init = {
90 	.name = "[vdso]",
91 	.mremap = vdso64_mremap,
92 	.close = vdso_close,
93 };
94 
95 /*
96  * This is called from binfmt_elf, we create the special vma for the
97  * vDSO and insert it into the mm struct tree
98  */
__arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)99 static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
100 {
101 	unsigned long vdso_size, vdso_base, mappings_size;
102 	struct vm_special_mapping *vdso_spec;
103 	unsigned long vvar_size = VDSO_NR_PAGES * PAGE_SIZE;
104 	struct mm_struct *mm = current->mm;
105 	struct vm_area_struct *vma;
106 
107 	if (is_32bit_task()) {
108 		vdso_spec = &vdso32_spec;
109 		vdso_size = &vdso32_end - &vdso32_start;
110 	} else {
111 		vdso_spec = &vdso64_spec;
112 		vdso_size = &vdso64_end - &vdso64_start;
113 	}
114 
115 	mappings_size = vdso_size + vvar_size;
116 	mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
117 
118 	/*
119 	 * Pick a base address for the vDSO in process space.
120 	 * Add enough to the size so that the result can be aligned.
121 	 */
122 	vdso_base = get_unmapped_area(NULL, 0, mappings_size, 0, 0);
123 	if (IS_ERR_VALUE(vdso_base))
124 		return vdso_base;
125 
126 	/* Add required alignment. */
127 	vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
128 
129 	vma = vdso_install_vvar_mapping(mm, vdso_base);
130 	if (IS_ERR(vma))
131 		return PTR_ERR(vma);
132 
133 	/*
134 	 * our vma flags don't have VM_WRITE so by default, the process isn't
135 	 * allowed to write those pages.
136 	 * gdb can break that with ptrace interface, and thus trigger COW on
137 	 * those pages but it's then your responsibility to never do that on
138 	 * the "data" page of the vDSO or you'll stop getting kernel updates
139 	 * and your nice userland gettimeofday will be totally dead.
140 	 * It's fine to use that for setting breakpoints in the vDSO code
141 	 * pages though.
142 	 */
143 	vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
144 				       VM_READ | VM_EXEC | VM_MAYREAD |
145 				       VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
146 	if (IS_ERR(vma)) {
147 		do_munmap(mm, vdso_base, vvar_size, NULL);
148 		return PTR_ERR(vma);
149 	}
150 
151 	// Now that the mappings are in place, set the mm VDSO pointer
152 	mm->context.vdso = (void __user *)vdso_base + vvar_size;
153 
154 	return 0;
155 }
156 
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)157 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
158 {
159 	struct mm_struct *mm = current->mm;
160 	int rc;
161 
162 	mm->context.vdso = NULL;
163 
164 	if (mmap_write_lock_killable(mm))
165 		return -EINTR;
166 
167 	rc = __arch_setup_additional_pages(bprm, uses_interp);
168 
169 	mmap_write_unlock(mm);
170 	return rc;
171 }
172 
173 #define VDSO_DO_FIXUPS(type, value, bits, sec) do {					\
174 	void *__start = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_start);	\
175 	void *__end = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_end);	\
176 											\
177 	do_##type##_fixups((value), __start, __end);					\
178 } while (0)
179 
vdso_fixup_features(void)180 static void __init vdso_fixup_features(void)
181 {
182 #ifdef CONFIG_PPC64
183 	VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 64, ftr_fixup);
184 	VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 64, mmu_ftr_fixup);
185 	VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 64, fw_ftr_fixup);
186 	VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 64, lwsync_fixup);
187 #endif /* CONFIG_PPC64 */
188 
189 #ifdef CONFIG_VDSO32
190 	VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 32, ftr_fixup);
191 	VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 32, mmu_ftr_fixup);
192 #ifdef CONFIG_PPC64
193 	VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 32, fw_ftr_fixup);
194 #endif /* CONFIG_PPC64 */
195 	VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 32, lwsync_fixup);
196 #endif
197 }
198 
199 /*
200  * Called from setup_arch to initialize the bitmap of available
201  * syscalls in the systemcfg page
202  */
vdso_setup_syscall_map(void)203 static void __init vdso_setup_syscall_map(void)
204 {
205 	unsigned int i;
206 
207 	for (i = 0; i < NR_syscalls; i++) {
208 		if (sys_call_table[i] != (void *)&sys_ni_syscall)
209 			vdso_k_arch_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
210 		if (IS_ENABLED(CONFIG_COMPAT) &&
211 		    compat_sys_call_table[i] != (void *)&sys_ni_syscall)
212 			vdso_k_arch_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
213 	}
214 }
215 
216 #ifdef CONFIG_PPC64
vdso_getcpu_init(void)217 int vdso_getcpu_init(void)
218 {
219 	unsigned long cpu, node, val;
220 
221 	/*
222 	 * SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node
223 	 * in the next 16 bits.  The VDSO uses this to implement getcpu().
224 	 */
225 	cpu = get_cpu();
226 	WARN_ON_ONCE(cpu > 0xffff);
227 
228 	node = cpu_to_node(cpu);
229 	WARN_ON_ONCE(node > 0xffff);
230 
231 	val = (cpu & 0xffff) | ((node & 0xffff) << 16);
232 	mtspr(SPRN_SPRG_VDSO_WRITE, val);
233 	get_paca()->sprg_vdso = val;
234 
235 	put_cpu();
236 
237 	return 0;
238 }
239 /* We need to call this before SMP init */
240 early_initcall(vdso_getcpu_init);
241 #endif
242 
vdso_setup_pages(void * start,void * end)243 static struct page ** __init vdso_setup_pages(void *start, void *end)
244 {
245 	int i;
246 	struct page **pagelist;
247 	int pages = (end - start) >> PAGE_SHIFT;
248 
249 	pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
250 	if (!pagelist)
251 		panic("%s: Cannot allocate page list for VDSO", __func__);
252 
253 	for (i = 0; i < pages; i++)
254 		pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
255 
256 	return pagelist;
257 }
258 
vdso_init(void)259 static int __init vdso_init(void)
260 {
261 #ifdef CONFIG_PPC64
262 	vdso_k_arch_data->dcache_block_size = ppc64_caches.l1d.block_size;
263 	vdso_k_arch_data->icache_block_size = ppc64_caches.l1i.block_size;
264 	vdso_k_arch_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
265 	vdso_k_arch_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
266 #endif /* CONFIG_PPC64 */
267 
268 	vdso_setup_syscall_map();
269 
270 	vdso_fixup_features();
271 
272 	if (IS_ENABLED(CONFIG_VDSO32))
273 		vdso32_spec.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
274 
275 	if (IS_ENABLED(CONFIG_PPC64))
276 		vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
277 
278 	smp_wmb();
279 
280 	return 0;
281 }
282 arch_initcall(vdso_init);
283