1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author: Huacai Chen <chenhuacai@loongson.cn>
4  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5  */
6 
7 #include <linux/binfmts.h>
8 #include <linux/elf.h>
9 #include <linux/err.h>
10 #include <linux/init.h>
11 #include <linux/ioport.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/random.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/vdso_datastore.h>
18 
19 #include <asm/page.h>
20 #include <asm/vdso.h>
21 #include <vdso/helpers.h>
22 #include <vdso/vsyscall.h>
23 #include <vdso/datapage.h>
24 #include <generated/vdso-offsets.h>
25 
26 extern char vdso_start[], vdso_end[];
27 
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)28 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
29 {
30 	current->mm->context.vdso = (void *)(new_vma->vm_start);
31 
32 	return 0;
33 }
34 
35 struct loongarch_vdso_info vdso_info = {
36 	.vdso = vdso_start,
37 	.code_mapping = {
38 		.name = "[vdso]",
39 		.mremap = vdso_mremap,
40 	},
41 	.offset_sigreturn = vdso_offset_sigreturn,
42 };
43 
init_vdso(void)44 static int __init init_vdso(void)
45 {
46 	unsigned long i, cpu, pfn;
47 
48 	BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
49 
50 	for_each_possible_cpu(cpu)
51 		vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu);
52 
53 	vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start);
54 	vdso_info.code_mapping.pages =
55 		kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL);
56 
57 	pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
58 	for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
59 		vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
60 
61 	return 0;
62 }
63 subsys_initcall(init_vdso);
64 
vdso_base(void)65 static unsigned long vdso_base(void)
66 {
67 	unsigned long base = STACK_TOP;
68 
69 	if (current->flags & PF_RANDOMIZE) {
70 		base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
71 		base = PAGE_ALIGN(base);
72 	}
73 
74 	return base;
75 }
76 
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)77 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
78 {
79 	int ret;
80 	unsigned long size, data_addr, vdso_addr;
81 	struct mm_struct *mm = current->mm;
82 	struct vm_area_struct *vma;
83 	struct loongarch_vdso_info *info = current->thread.vdso;
84 
85 	if (mmap_write_lock_killable(mm))
86 		return -EINTR;
87 
88 	/*
89 	 * Determine total area size. This includes the VDSO data itself
90 	 * and the data pages.
91 	 */
92 	size = VVAR_SIZE + info->size;
93 
94 	data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
95 	if (IS_ERR_VALUE(data_addr)) {
96 		ret = data_addr;
97 		goto out;
98 	}
99 
100 	vma = vdso_install_vvar_mapping(mm, data_addr);
101 	if (IS_ERR(vma)) {
102 		ret = PTR_ERR(vma);
103 		goto out;
104 	}
105 
106 	vdso_addr = data_addr + VVAR_SIZE;
107 	vma = _install_special_mapping(mm, vdso_addr, info->size,
108 				       VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
109 				       &info->code_mapping);
110 	if (IS_ERR(vma)) {
111 		ret = PTR_ERR(vma);
112 		goto out;
113 	}
114 
115 	mm->context.vdso = (void *)vdso_addr;
116 	ret = 0;
117 
118 out:
119 	mmap_write_unlock(mm);
120 	return ret;
121 }
122