1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 *
5 * This contains most of the x86 vDSO kernel-side code.
6 */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/vdso_datastore.h>
18
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
22 #include <asm/vdso.h>
23 #include <asm/tlb.h>
24 #include <asm/page.h>
25 #include <asm/desc.h>
26 #include <asm/cpufeature.h>
27 #include <asm/vdso/vsyscall.h>
28 #include <clocksource/hyperv_timer.h>
29
30 static_assert(VDSO_NR_PAGES + VDSO_NR_VCLOCK_PAGES == __VDSO_PAGES);
31
32 unsigned int vclocks_used __read_mostly;
33
34 #if defined(CONFIG_X86_64)
35 unsigned int __read_mostly vdso64_enabled = 1;
36 #endif
37
init_vdso_image(const struct vdso_image * image)38 int __init init_vdso_image(const struct vdso_image *image)
39 {
40 BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
41 BUG_ON(image->size % PAGE_SIZE != 0);
42
43 apply_alternatives((struct alt_instr *)(image->data + image->alt),
44 (struct alt_instr *)(image->data + image->alt +
45 image->alt_len));
46
47 return 0;
48 }
49
50 struct linux_binprm;
51
vdso_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)52 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
53 struct vm_area_struct *vma, struct vm_fault *vmf)
54 {
55 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
56
57 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
58 return VM_FAULT_SIGBUS;
59
60 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
61 get_page(vmf->page);
62 return 0;
63 }
64
vdso_fix_landing(const struct vdso_image * image,struct vm_area_struct * new_vma)65 static void vdso_fix_landing(const struct vdso_image *image,
66 struct vm_area_struct *new_vma)
67 {
68 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
69 if (in_ia32_syscall() && image == &vdso_image_32) {
70 struct pt_regs *regs = current_pt_regs();
71 unsigned long vdso_land = image->sym_int80_landing_pad;
72 unsigned long old_land_addr = vdso_land +
73 (unsigned long)current->mm->context.vdso;
74
75 /* Fixing userspace landing - look at do_fast_syscall_32 */
76 if (regs->ip == old_land_addr)
77 regs->ip = new_vma->vm_start + vdso_land;
78 }
79 #endif
80 }
81
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)82 static int vdso_mremap(const struct vm_special_mapping *sm,
83 struct vm_area_struct *new_vma)
84 {
85 const struct vdso_image *image = current->mm->context.vdso_image;
86
87 vdso_fix_landing(image, new_vma);
88 current->mm->context.vdso = (void __user *)new_vma->vm_start;
89
90 return 0;
91 }
92
vvar_vclock_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)93 static vm_fault_t vvar_vclock_fault(const struct vm_special_mapping *sm,
94 struct vm_area_struct *vma, struct vm_fault *vmf)
95 {
96 switch (vmf->pgoff) {
97 #ifdef CONFIG_PARAVIRT_CLOCK
98 case VDSO_PAGE_PVCLOCK_OFFSET:
99 {
100 struct pvclock_vsyscall_time_info *pvti =
101 pvclock_get_pvti_cpu0_va();
102
103 if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK))
104 return vmf_insert_pfn_prot(vma, vmf->address,
105 __pa(pvti) >> PAGE_SHIFT,
106 pgprot_decrypted(vma->vm_page_prot));
107 break;
108 }
109 #endif /* CONFIG_PARAVIRT_CLOCK */
110 #ifdef CONFIG_HYPERV_TIMER
111 case VDSO_PAGE_HVCLOCK_OFFSET:
112 {
113 unsigned long pfn = hv_get_tsc_pfn();
114 if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
115 return vmf_insert_pfn(vma, vmf->address, pfn);
116 break;
117 }
118 #endif /* CONFIG_HYPERV_TIMER */
119 }
120
121 return VM_FAULT_SIGBUS;
122 }
123
124 static const struct vm_special_mapping vdso_mapping = {
125 .name = "[vdso]",
126 .fault = vdso_fault,
127 .mremap = vdso_mremap,
128 };
129 static const struct vm_special_mapping vvar_vclock_mapping = {
130 .name = "[vvar_vclock]",
131 .fault = vvar_vclock_fault,
132 };
133
134 /*
135 * Add vdso and vvar mappings to current process.
136 * @image - blob to map
137 * @addr - request a specific address (zero to map at free addr)
138 */
map_vdso(const struct vdso_image * image,unsigned long addr)139 static int map_vdso(const struct vdso_image *image, unsigned long addr)
140 {
141 struct mm_struct *mm = current->mm;
142 struct vm_area_struct *vma;
143 unsigned long text_start;
144 int ret = 0;
145
146 if (mmap_write_lock_killable(mm))
147 return -EINTR;
148
149 addr = get_unmapped_area(NULL, addr,
150 image->size + __VDSO_PAGES * PAGE_SIZE, 0, 0);
151 if (IS_ERR_VALUE(addr)) {
152 ret = addr;
153 goto up_fail;
154 }
155
156 text_start = addr + __VDSO_PAGES * PAGE_SIZE;
157
158 /*
159 * MAYWRITE to allow gdb to COW and set breakpoints
160 */
161 vma = _install_special_mapping(mm,
162 text_start,
163 image->size,
164 VM_READ|VM_EXEC|
165 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
166 VM_SEALED_SYSMAP,
167 &vdso_mapping);
168
169 if (IS_ERR(vma)) {
170 ret = PTR_ERR(vma);
171 goto up_fail;
172 }
173
174 vma = vdso_install_vvar_mapping(mm, addr);
175 if (IS_ERR(vma)) {
176 ret = PTR_ERR(vma);
177 do_munmap(mm, text_start, image->size, NULL);
178 goto up_fail;
179 }
180
181 vma = _install_special_mapping(mm,
182 VDSO_VCLOCK_PAGES_START(addr),
183 VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
184 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
185 VM_PFNMAP|VM_SEALED_SYSMAP,
186 &vvar_vclock_mapping);
187
188 if (IS_ERR(vma)) {
189 ret = PTR_ERR(vma);
190 do_munmap(mm, text_start, image->size, NULL);
191 do_munmap(mm, addr, image->size, NULL);
192 goto up_fail;
193 }
194
195 current->mm->context.vdso = (void __user *)text_start;
196 current->mm->context.vdso_image = image;
197
198 up_fail:
199 mmap_write_unlock(mm);
200 return ret;
201 }
202
map_vdso_once(const struct vdso_image * image,unsigned long addr)203 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
204 {
205 struct mm_struct *mm = current->mm;
206 struct vm_area_struct *vma;
207 VMA_ITERATOR(vmi, mm, 0);
208
209 mmap_write_lock(mm);
210 /*
211 * Check if we have already mapped vdso blob - fail to prevent
212 * abusing from userspace install_special_mapping, which may
213 * not do accounting and rlimit right.
214 * We could search vma near context.vdso, but it's a slowpath,
215 * so let's explicitly check all VMAs to be completely sure.
216 */
217 for_each_vma(vmi, vma) {
218 if (vma_is_special_mapping(vma, &vdso_mapping) ||
219 vma_is_special_mapping(vma, &vdso_vvar_mapping) ||
220 vma_is_special_mapping(vma, &vvar_vclock_mapping)) {
221 mmap_write_unlock(mm);
222 return -EEXIST;
223 }
224 }
225 mmap_write_unlock(mm);
226
227 return map_vdso(image, addr);
228 }
229
230 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
load_vdso32(void)231 static int load_vdso32(void)
232 {
233 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
234 return 0;
235
236 return map_vdso(&vdso_image_32, 0);
237 }
238 #endif
239
240 #ifdef CONFIG_X86_64
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)241 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
242 {
243 if (!vdso64_enabled)
244 return 0;
245
246 return map_vdso(&vdso_image_64, 0);
247 }
248
249 #ifdef CONFIG_COMPAT
compat_arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp,bool x32)250 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
251 int uses_interp, bool x32)
252 {
253 #ifdef CONFIG_X86_X32_ABI
254 if (x32) {
255 if (!vdso64_enabled)
256 return 0;
257 return map_vdso(&vdso_image_x32, 0);
258 }
259 #endif
260 #ifdef CONFIG_IA32_EMULATION
261 return load_vdso32();
262 #else
263 return 0;
264 #endif
265 }
266 #endif
267 #else
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)268 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
269 {
270 return load_vdso32();
271 }
272 #endif
273
arch_syscall_is_vdso_sigreturn(struct pt_regs * regs)274 bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
275 {
276 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
277 const struct vdso_image *image = current->mm->context.vdso_image;
278 unsigned long vdso = (unsigned long) current->mm->context.vdso;
279
280 if (in_ia32_syscall() && image == &vdso_image_32) {
281 if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
282 regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
283 return true;
284 }
285 #endif
286 return false;
287 }
288
289 #ifdef CONFIG_X86_64
vdso_setup(char * s)290 static __init int vdso_setup(char *s)
291 {
292 vdso64_enabled = simple_strtoul(s, NULL, 0);
293 return 1;
294 }
295 __setup("vdso=", vdso_setup);
296 #endif /* CONFIG_X86_64 */
297