xref: /linux/arch/x86/entry/vdso/vma.c (revision 49b30f3e9cde3403b8719dfcddc923bce572b69c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/vdso_datastore.h>
18 
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
22 #include <asm/vdso.h>
23 #include <asm/tlb.h>
24 #include <asm/page.h>
25 #include <asm/desc.h>
26 #include <asm/cpufeature.h>
27 #include <asm/vdso/vsyscall.h>
28 #include <clocksource/hyperv_timer.h>
29 
30 static_assert(VDSO_NR_PAGES + VDSO_NR_VCLOCK_PAGES == __VDSO_PAGES);
31 
32 unsigned int vclocks_used __read_mostly;
33 
34 #if defined(CONFIG_X86_64)
35 unsigned int __read_mostly vdso64_enabled = 1;
36 #endif
37 
init_vdso_image(const struct vdso_image * image)38 int __init init_vdso_image(const struct vdso_image *image)
39 {
40 	BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
41 	BUG_ON(image->size % PAGE_SIZE != 0);
42 
43 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
44 			   (struct alt_instr *)(image->data + image->alt +
45 						image->alt_len));
46 
47 	return 0;
48 }
49 
50 struct linux_binprm;
51 
vdso_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)52 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
53 		      struct vm_area_struct *vma, struct vm_fault *vmf)
54 {
55 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
56 
57 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
58 		return VM_FAULT_SIGBUS;
59 
60 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
61 	get_page(vmf->page);
62 	return 0;
63 }
64 
vdso_fix_landing(const struct vdso_image * image,struct vm_area_struct * new_vma)65 static void vdso_fix_landing(const struct vdso_image *image,
66 		struct vm_area_struct *new_vma)
67 {
68 	struct pt_regs *regs = current_pt_regs();
69 	unsigned long ipoffset = regs->ip -
70 		(unsigned long)current->mm->context.vdso;
71 
72 	if (ipoffset < image->size)
73 		regs->ip = new_vma->vm_start + ipoffset;
74 }
75 
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)76 static int vdso_mremap(const struct vm_special_mapping *sm,
77 		struct vm_area_struct *new_vma)
78 {
79 	const struct vdso_image *image = current->mm->context.vdso_image;
80 
81 	vdso_fix_landing(image, new_vma);
82 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
83 
84 	return 0;
85 }
86 
vvar_vclock_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)87 static vm_fault_t vvar_vclock_fault(const struct vm_special_mapping *sm,
88 				    struct vm_area_struct *vma, struct vm_fault *vmf)
89 {
90 	switch (vmf->pgoff) {
91 	case VDSO_PAGE_PVCLOCK_OFFSET:
92 	{
93 		struct pvclock_vsyscall_time_info *pvti =
94 			pvclock_get_pvti_cpu0_va();
95 
96 		if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK))
97 			return vmf_insert_pfn_prot(vma, vmf->address,
98 					__pa(pvti) >> PAGE_SHIFT,
99 					pgprot_decrypted(vma->vm_page_prot));
100 		break;
101 	}
102 	case VDSO_PAGE_HVCLOCK_OFFSET:
103 	{
104 		unsigned long pfn = hv_get_tsc_pfn();
105 		if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
106 			return vmf_insert_pfn(vma, vmf->address, pfn);
107 		break;
108 	}
109 	}
110 
111 	return VM_FAULT_SIGBUS;
112 }
113 
114 static const struct vm_special_mapping vdso_mapping = {
115 	.name = "[vdso]",
116 	.fault = vdso_fault,
117 	.mremap = vdso_mremap,
118 };
119 static const struct vm_special_mapping vvar_vclock_mapping = {
120 	.name = "[vvar_vclock]",
121 	.fault = vvar_vclock_fault,
122 };
123 
124 /*
125  * Add vdso and vvar mappings to current process.
126  * @image          - blob to map
127  * @addr           - request a specific address (zero to map at free addr)
128  */
map_vdso(const struct vdso_image * image,unsigned long addr)129 static int map_vdso(const struct vdso_image *image, unsigned long addr)
130 {
131 	struct mm_struct *mm = current->mm;
132 	struct vm_area_struct *vma;
133 	unsigned long text_start;
134 	int ret = 0;
135 
136 	if (mmap_write_lock_killable(mm))
137 		return -EINTR;
138 
139 	addr = get_unmapped_area(NULL, addr,
140 				 image->size + __VDSO_PAGES * PAGE_SIZE, 0, 0);
141 	if (IS_ERR_VALUE(addr)) {
142 		ret = addr;
143 		goto up_fail;
144 	}
145 
146 	text_start = addr + __VDSO_PAGES * PAGE_SIZE;
147 
148 	/*
149 	 * MAYWRITE to allow gdb to COW and set breakpoints
150 	 */
151 	vma = _install_special_mapping(mm,
152 				       text_start,
153 				       image->size,
154 				       VM_READ|VM_EXEC|
155 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
156 				       VM_SEALED_SYSMAP,
157 				       &vdso_mapping);
158 
159 	if (IS_ERR(vma)) {
160 		ret = PTR_ERR(vma);
161 		goto up_fail;
162 	}
163 
164 	vma = vdso_install_vvar_mapping(mm, addr);
165 	if (IS_ERR(vma)) {
166 		ret = PTR_ERR(vma);
167 		do_munmap(mm, text_start, image->size, NULL);
168 		goto up_fail;
169 	}
170 
171 	vma = _install_special_mapping(mm,
172 				       VDSO_VCLOCK_PAGES_START(addr),
173 				       VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
174 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
175 				       VM_PFNMAP|VM_SEALED_SYSMAP,
176 				       &vvar_vclock_mapping);
177 
178 	if (IS_ERR(vma)) {
179 		ret = PTR_ERR(vma);
180 		do_munmap(mm, text_start, image->size, NULL);
181 		do_munmap(mm, addr, image->size, NULL);
182 		goto up_fail;
183 	}
184 
185 	current->mm->context.vdso = (void __user *)text_start;
186 	current->mm->context.vdso_image = image;
187 
188 up_fail:
189 	mmap_write_unlock(mm);
190 	return ret;
191 }
192 
map_vdso_once(const struct vdso_image * image,unsigned long addr)193 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
194 {
195 	struct mm_struct *mm = current->mm;
196 	struct vm_area_struct *vma;
197 	VMA_ITERATOR(vmi, mm, 0);
198 
199 	mmap_write_lock(mm);
200 	/*
201 	 * Check if we have already mapped vdso blob - fail to prevent
202 	 * abusing from userspace install_special_mapping, which may
203 	 * not do accounting and rlimit right.
204 	 * We could search vma near context.vdso, but it's a slowpath,
205 	 * so let's explicitly check all VMAs to be completely sure.
206 	 */
207 	for_each_vma(vmi, vma) {
208 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
209 				vma_is_special_mapping(vma, &vdso_vvar_mapping) ||
210 				vma_is_special_mapping(vma, &vvar_vclock_mapping)) {
211 			mmap_write_unlock(mm);
212 			return -EEXIST;
213 		}
214 	}
215 	mmap_write_unlock(mm);
216 
217 	return map_vdso(image, addr);
218 }
219 
load_vdso32(void)220 static int load_vdso32(void)
221 {
222 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
223 		return 0;
224 
225 	return map_vdso(&vdso32_image, 0);
226 }
227 
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)228 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
229 {
230 	if (IS_ENABLED(CONFIG_X86_64)) {
231 		if (!vdso64_enabled)
232 			return 0;
233 
234 		return map_vdso(&vdso64_image, 0);
235 	}
236 
237 	return load_vdso32();
238 }
239 
240 #ifdef CONFIG_COMPAT
compat_arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp,bool x32)241 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
242 				       int uses_interp, bool x32)
243 {
244 	if (IS_ENABLED(CONFIG_X86_X32_ABI) && x32) {
245 		if (!vdso64_enabled)
246 			return 0;
247 		return map_vdso(&vdsox32_image, 0);
248 	}
249 
250 	if (IS_ENABLED(CONFIG_IA32_EMULATION))
251 		return load_vdso32();
252 
253 	return 0;
254 }
255 #endif
256 
arch_syscall_is_vdso_sigreturn(struct pt_regs * regs)257 bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
258 {
259 	const struct vdso_image *image = current->mm->context.vdso_image;
260 	unsigned long vdso = (unsigned long) current->mm->context.vdso;
261 
262 	if (in_ia32_syscall() && image == &vdso32_image) {
263 		if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
264 		    regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
265 			return true;
266 	}
267 	return false;
268 }
269 
270 #ifdef CONFIG_X86_64
vdso_setup(char * s)271 static __init int vdso_setup(char *s)
272 {
273 	vdso64_enabled = simple_strtoul(s, NULL, 0);
274 	return 1;
275 }
276 __setup("vdso=", vdso_setup);
277 #endif /* CONFIG_X86_64 */
278