xref: /kvm-unit-tests/lib/x86/usermode.c (revision cd5f2fb4ad641c51fe0f1a85264dc3f6ede6e131)
1 #include "x86/msr.h"
2 #include "x86/processor.h"
3 #include "x86/apic-defs.h"
4 #include "x86/apic.h"
5 #include "x86/desc.h"
6 #include "x86/isr.h"
7 #include "alloc.h"
8 #include "setjmp.h"
9 #include "usermode.h"
10 
11 #include "libcflat.h"
12 #include <stdint.h>
13 
14 #define USERMODE_STACK_SIZE	0x2000
15 #define RET_TO_KERNEL_IRQ	0x20
16 
17 static jmp_buf jmpbuf;
18 
restore_exec_to_jmpbuf(void)19 static void restore_exec_to_jmpbuf(void)
20 {
21 	longjmp(jmpbuf, 1);
22 }
23 
restore_exec_to_jmpbuf_exception_handler(struct ex_regs * regs)24 static void restore_exec_to_jmpbuf_exception_handler(struct ex_regs *regs)
25 {
26 	/* longjmp must happen after iret, so do not do it now.  */
27 	regs->rip = (unsigned long)&restore_exec_to_jmpbuf;
28 	regs->cs = KERNEL_CS;
29 #ifdef __x86_64__
30 	regs->ss = KERNEL_DS;
31 #endif
32 }
33 
run_in_user(usermode_func func,unsigned int fault_vector,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,bool * raised_vector)34 uint64_t run_in_user(usermode_func func, unsigned int fault_vector,
35 		uint64_t arg1, uint64_t arg2, uint64_t arg3,
36 		uint64_t arg4, bool *raised_vector)
37 {
38 	extern char ret_to_kernel;
39 	volatile uint64_t rax = 0;
40 	static unsigned char user_stack[USERMODE_STACK_SIZE];
41 	handler old_ex;
42 
43 	*raised_vector = 0;
44 	set_idt_entry(RET_TO_KERNEL_IRQ, &ret_to_kernel, 3);
45 	old_ex = handle_exception(fault_vector,
46 				  restore_exec_to_jmpbuf_exception_handler);
47 
48 	if (setjmp(jmpbuf) != 0) {
49 		handle_exception(fault_vector, old_ex);
50 		*raised_vector = 1;
51 		return 0;
52 	}
53 
54 	asm volatile (
55 			/* Prepare kernel SP for exception handlers */
56 			"mov %%rsp, %[rsp0]\n\t"
57 			/* Load user_ds to DS and ES */
58 			"mov %[user_ds], %%ax\n\t"
59 			"mov %%ax, %%ds\n\t"
60 			"mov %%ax, %%es\n\t"
61 			/* IRET into user mode */
62 			"pushq %[user_ds]\n\t"
63 			"pushq %[user_stack_top]\n\t"
64 			"pushfq\n\t"
65 			"pushq %[user_cs]\n\t"
66 			"lea user_mode(%%rip), %%rax\n\t"
67 			"pushq %%rax\n\t"
68 			"iretq\n"
69 
70 			"user_mode:\n\t"
71 			/* Back up volatile registers before invoking func */
72 			"push %%rcx\n\t"
73 			"push %%rdx\n\t"
74 			"push %%rdi\n\t"
75 			"push %%rsi\n\t"
76 			"push %%r8\n\t"
77 			"push %%r9\n\t"
78 			"push %%r10\n\t"
79 			"push %%r11\n\t"
80 			/* Call user mode function */
81 			"mov %[arg1], %%rdi\n\t"
82 			"mov %[arg2], %%rsi\n\t"
83 			"mov %[arg3], %%rdx\n\t"
84 			"mov %[arg4], %%rcx\n\t"
85 			"call *%[func]\n\t"
86 			/* Restore registers */
87 			"pop %%r11\n\t"
88 			"pop %%r10\n\t"
89 			"pop %%r9\n\t"
90 			"pop %%r8\n\t"
91 			"pop %%rsi\n\t"
92 			"pop %%rdi\n\t"
93 			"pop %%rdx\n\t"
94 			"pop %%rcx\n\t"
95 			/* Return to kernel via system call */
96 			"int %[kernel_entry_vector]\n\t"
97 			/* Kernel Mode */
98 			"ret_to_kernel:\n\t"
99 			"mov %[rsp0], %%rsp\n\t"
100 #ifdef __x86_64__
101 			/*
102 			 * Restore SS, as the CPU loads SS with a NULL segment
103 			 * if handling an interrupt/exception changes the CPL.
104 			 */
105 			"mov %[kernel_ds], %%ss\n\t"
106 #endif
107 			:
108 			"+a"(rax),
109 			[rsp0]"=m"(tss[0].rsp0)
110 			:
111 			[arg1]"m"(arg1),
112 			[arg2]"m"(arg2),
113 			[arg3]"m"(arg3),
114 			[arg4]"m"(arg4),
115 			[func]"m"(func),
116 			[user_ds]"i"(USER_DS),
117 			[user_cs]"i"(USER_CS),
118 			[kernel_ds]"rm"(KERNEL_DS),
119 			[user_stack_top]"r"(user_stack +
120 					sizeof(user_stack)),
121 			[kernel_entry_vector]"i"(RET_TO_KERNEL_IRQ));
122 
123 	handle_exception(fault_vector, old_ex);
124 
125 	return rax;
126 }
127