xref: /linux/arch/sparc/vdso/vma.c (revision 2f6c9bf31a0b16aeccb42b73f8d0ddf9bea88f3f)
1 /*
2  * Set up the VMAs to tell the VM about the vDSO.
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  * Subject to the GPL, v.2
5  */
6 
7 /*
8  * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/err.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/linkage.h>
17 #include <linux/random.h>
18 #include <linux/elf.h>
19 #include <asm/cacheflush.h>
20 #include <asm/spitfire.h>
21 #include <asm/vdso.h>
22 #include <asm/vvar.h>
23 #include <asm/page.h>
24 
25 unsigned int __read_mostly vdso_enabled = 1;
26 
27 static struct vm_special_mapping vvar_mapping = {
28 	.name = "[vvar]"
29 };
30 
31 #ifdef	CONFIG_SPARC64
32 static struct vm_special_mapping vdso_mapping64 = {
33 	.name = "[vdso]"
34 };
35 #endif
36 
37 #ifdef CONFIG_COMPAT
38 static struct vm_special_mapping vdso_mapping32 = {
39 	.name = "[vdso]"
40 };
41 #endif
42 
43 struct vvar_data *vvar_data;
44 
45 struct tick_patch_entry {
46 	s32 orig, repl;
47 };
48 
49 static void stick_patch(const struct vdso_image *image)
50 {
51 	struct tick_patch_entry *p, *p_end;
52 
53 	p = image->data + image->tick_patch;
54 	p_end = (void *)p + image->tick_patch_len;
55 	while (p < p_end) {
56 		u32 *instr = (void *)&p->orig + p->orig;
57 		u32 *repl = (void *)&p->repl + p->repl;
58 
59 		*instr = *repl;
60 		flushi(instr);
61 		p++;
62 	}
63 }
64 
65 /*
66  * Allocate pages for the vdso and vvar, and copy in the vdso text from the
67  * kernel image.
68  */
69 int __init init_vdso_image(const struct vdso_image *image,
70 		struct vm_special_mapping *vdso_mapping)
71 {
72 	int i;
73 	struct page *dp, **dpp = NULL;
74 	int dnpages = 0;
75 	struct page *cp, **cpp = NULL;
76 	int cnpages = (image->size) / PAGE_SIZE;
77 
78 	/*
79 	 * First, the vdso text.  This is initialied data, an integral number of
80 	 * pages long.
81 	 */
82 	if (WARN_ON(image->size % PAGE_SIZE != 0))
83 		goto oom;
84 
85 	cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
86 	vdso_mapping->pages = cpp;
87 
88 	if (!cpp)
89 		goto oom;
90 
91 	if (tlb_type != spitfire)
92 		stick_patch(image);
93 
94 	for (i = 0; i < cnpages; i++) {
95 		cp = alloc_page(GFP_KERNEL);
96 		if (!cp)
97 			goto oom;
98 		cpp[i] = cp;
99 		copy_page(page_address(cp), image->data + i * PAGE_SIZE);
100 	}
101 
102 	/*
103 	 * Now the vvar page.  This is uninitialized data.
104 	 */
105 
106 	if (vvar_data == NULL) {
107 		dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
108 		if (WARN_ON(dnpages != 1))
109 			goto oom;
110 		dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
111 		vvar_mapping.pages = dpp;
112 
113 		if (!dpp)
114 			goto oom;
115 
116 		dp = alloc_page(GFP_KERNEL);
117 		if (!dp)
118 			goto oom;
119 
120 		dpp[0] = dp;
121 		vvar_data = page_address(dp);
122 		memset(vvar_data, 0, PAGE_SIZE);
123 
124 		vvar_data->seq = 0;
125 	}
126 
127 	return 0;
128  oom:
129 	if (cpp != NULL) {
130 		for (i = 0; i < cnpages; i++) {
131 			if (cpp[i] != NULL)
132 				__free_page(cpp[i]);
133 		}
134 		kfree(cpp);
135 		vdso_mapping->pages = NULL;
136 	}
137 
138 	if (dpp != NULL) {
139 		for (i = 0; i < dnpages; i++) {
140 			if (dpp[i] != NULL)
141 				__free_page(dpp[i]);
142 		}
143 		kfree(dpp);
144 		vvar_mapping.pages = NULL;
145 	}
146 
147 	pr_warn("Cannot allocate vdso\n");
148 	vdso_enabled = 0;
149 	return -ENOMEM;
150 }
151 
152 static int __init init_vdso(void)
153 {
154 	int err = 0;
155 #ifdef CONFIG_SPARC64
156 	err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
157 	if (err)
158 		return err;
159 #endif
160 
161 #ifdef CONFIG_COMPAT
162 	err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
163 #endif
164 	return err;
165 
166 }
167 subsys_initcall(init_vdso);
168 
169 struct linux_binprm;
170 
171 /* Shuffle the vdso up a bit, randomly. */
172 static unsigned long vdso_addr(unsigned long start, unsigned int len)
173 {
174 	unsigned int offset;
175 
176 	/* This loses some more bits than a modulo, but is cheaper */
177 	offset = get_random_int() & (PTRS_PER_PTE - 1);
178 	return start + (offset << PAGE_SHIFT);
179 }
180 
181 static int map_vdso(const struct vdso_image *image,
182 		struct vm_special_mapping *vdso_mapping)
183 {
184 	struct mm_struct *mm = current->mm;
185 	struct vm_area_struct *vma;
186 	unsigned long text_start, addr = 0;
187 	int ret = 0;
188 
189 	down_write(&mm->mmap_sem);
190 
191 	/*
192 	 * First, get an unmapped region: then randomize it, and make sure that
193 	 * region is free.
194 	 */
195 	if (current->flags & PF_RANDOMIZE) {
196 		addr = get_unmapped_area(NULL, 0,
197 					 image->size - image->sym_vvar_start,
198 					 0, 0);
199 		if (IS_ERR_VALUE(addr)) {
200 			ret = addr;
201 			goto up_fail;
202 		}
203 		addr = vdso_addr(addr, image->size - image->sym_vvar_start);
204 	}
205 	addr = get_unmapped_area(NULL, addr,
206 				 image->size - image->sym_vvar_start, 0, 0);
207 	if (IS_ERR_VALUE(addr)) {
208 		ret = addr;
209 		goto up_fail;
210 	}
211 
212 	text_start = addr - image->sym_vvar_start;
213 	current->mm->context.vdso = (void __user *)text_start;
214 
215 	/*
216 	 * MAYWRITE to allow gdb to COW and set breakpoints
217 	 */
218 	vma = _install_special_mapping(mm,
219 				       text_start,
220 				       image->size,
221 				       VM_READ|VM_EXEC|
222 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
223 				       vdso_mapping);
224 
225 	if (IS_ERR(vma)) {
226 		ret = PTR_ERR(vma);
227 		goto up_fail;
228 	}
229 
230 	vma = _install_special_mapping(mm,
231 				       addr,
232 				       -image->sym_vvar_start,
233 				       VM_READ|VM_MAYREAD,
234 				       &vvar_mapping);
235 
236 	if (IS_ERR(vma)) {
237 		ret = PTR_ERR(vma);
238 		do_munmap(mm, text_start, image->size, NULL);
239 	}
240 
241 up_fail:
242 	if (ret)
243 		current->mm->context.vdso = NULL;
244 
245 	up_write(&mm->mmap_sem);
246 	return ret;
247 }
248 
249 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
250 {
251 
252 	if (!vdso_enabled)
253 		return 0;
254 
255 #if defined CONFIG_COMPAT
256 	if (!(is_32bit_task()))
257 		return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
258 	else
259 		return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
260 #else
261 	return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
262 #endif
263 
264 }
265 
266 static __init int vdso_setup(char *s)
267 {
268 	int err;
269 	unsigned long val;
270 
271 	err = kstrtoul(s, 10, &val);
272 	if (err)
273 		return err;
274 	vdso_enabled = val;
275 	return 0;
276 }
277 __setup("vdso=", vdso_setup);
278