xref: /kvm-unit-tests/x86/svm.c (revision 916635a813e975600335c6c47250881b7a328971)
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "smp.h"
12 #include "types.h"
13 #include "alloc_page.h"
14 #include "isr.h"
15 #include "apic.h"
16 #include "vmalloc.h"
17 
18 /* for the nested page table*/
19 u64 *pte[2048];
20 u64 *pde[4];
21 u64 *pdpe;
22 u64 *pml4e;
23 
24 struct vmcb *vmcb;
25 
26 u64 *npt_get_pte(u64 address)
27 {
28 	int i1, i2;
29 
30 	address >>= 12;
31 	i1 = (address >> 9) & 0x7ff;
32 	i2 = address & 0x1ff;
33 
34 	return &pte[i1][i2];
35 }
36 
37 u64 *npt_get_pde(u64 address)
38 {
39 	int i1, i2;
40 
41 	address >>= 21;
42 	i1 = (address >> 9) & 0x3;
43 	i2 = address & 0x1ff;
44 
45 	return &pde[i1][i2];
46 }
47 
48 u64 *npt_get_pdpe(void)
49 {
50 	return pdpe;
51 }
52 
53 u64 *npt_get_pml4e(void)
54 {
55 	return pml4e;
56 }
57 
58 bool smp_supported(void)
59 {
60 	return cpu_count() > 1;
61 }
62 
63 bool default_supported(void)
64 {
65     return true;
66 }
67 
68 void default_prepare(struct svm_test *test)
69 {
70 	vmcb_ident(vmcb);
71 }
72 
73 void default_prepare_gif_clear(struct svm_test *test)
74 {
75 }
76 
77 bool default_finished(struct svm_test *test)
78 {
79 	return true; /* one vmexit */
80 }
81 
82 bool npt_supported(void)
83 {
84 	return this_cpu_has(X86_FEATURE_NPT);
85 }
86 
87 int get_test_stage(struct svm_test *test)
88 {
89 	barrier();
90 	return test->scratch;
91 }
92 
93 void set_test_stage(struct svm_test *test, int s)
94 {
95 	barrier();
96 	test->scratch = s;
97 	barrier();
98 }
99 
100 void inc_test_stage(struct svm_test *test)
101 {
102 	barrier();
103 	test->scratch++;
104 	barrier();
105 }
106 
107 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
108                          u64 base, u32 limit, u32 attr)
109 {
110 	seg->selector = selector;
111 	seg->attrib = attr;
112 	seg->limit = limit;
113 	seg->base = base;
114 }
115 
116 inline void vmmcall(void)
117 {
118 	asm volatile ("vmmcall" : : : "memory");
119 }
120 
121 static test_guest_func guest_main;
122 
123 void test_set_guest(test_guest_func func)
124 {
125 	guest_main = func;
126 }
127 
128 static void test_thunk(struct svm_test *test)
129 {
130 	guest_main(test);
131 	vmmcall();
132 }
133 
134 u8 *io_bitmap;
135 u8 io_bitmap_area[16384];
136 
137 u8 *msr_bitmap;
138 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
139 
140 void vmcb_ident(struct vmcb *vmcb)
141 {
142 	u64 vmcb_phys = virt_to_phys(vmcb);
143 	struct vmcb_save_area *save = &vmcb->save;
144 	struct vmcb_control_area *ctrl = &vmcb->control;
145 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
146 	    | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
147 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
148 	    | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
149 	struct descriptor_table_ptr desc_table_ptr;
150 
151 	memset(vmcb, 0, sizeof(*vmcb));
152 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
153 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
154 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
155 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
156 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
157 	sgdt(&desc_table_ptr);
158 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
159 	sidt(&desc_table_ptr);
160 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
161 	ctrl->asid = 1;
162 	save->cpl = 0;
163 	save->efer = rdmsr(MSR_EFER);
164 	save->cr4 = read_cr4();
165 	save->cr3 = read_cr3();
166 	save->cr0 = read_cr0();
167 	save->dr7 = read_dr7();
168 	save->dr6 = read_dr6();
169 	save->cr2 = read_cr2();
170 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
171 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
172 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
173 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
174 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
175 
176 	if (npt_supported()) {
177 		ctrl->nested_ctl = 1;
178 		ctrl->nested_cr3 = (u64)pml4e;
179 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
180 	}
181 }
182 
183 struct regs regs;
184 
185 struct regs get_regs(void)
186 {
187 	return regs;
188 }
189 
190 // rax handled specially below
191 
192 #define SAVE_GPR_C                              \
193         "xchg %%rbx, regs+0x8\n\t"              \
194         "xchg %%rcx, regs+0x10\n\t"             \
195         "xchg %%rdx, regs+0x18\n\t"             \
196         "xchg %%rbp, regs+0x28\n\t"             \
197         "xchg %%rsi, regs+0x30\n\t"             \
198         "xchg %%rdi, regs+0x38\n\t"             \
199         "xchg %%r8, regs+0x40\n\t"              \
200         "xchg %%r9, regs+0x48\n\t"              \
201         "xchg %%r10, regs+0x50\n\t"             \
202         "xchg %%r11, regs+0x58\n\t"             \
203         "xchg %%r12, regs+0x60\n\t"             \
204         "xchg %%r13, regs+0x68\n\t"             \
205         "xchg %%r14, regs+0x70\n\t"             \
206         "xchg %%r15, regs+0x78\n\t"
207 
208 #define LOAD_GPR_C      SAVE_GPR_C
209 
210 struct svm_test *v2_test;
211 
212 #define ASM_PRE_VMRUN_CMD                       \
213                 "vmload %%rax\n\t"              \
214                 "mov regs+0x80, %%r15\n\t"      \
215                 "mov %%r15, 0x170(%%rax)\n\t"   \
216                 "mov regs, %%r15\n\t"           \
217                 "mov %%r15, 0x1f8(%%rax)\n\t"   \
218                 LOAD_GPR_C                      \
219 
220 #define ASM_POST_VMRUN_CMD                      \
221                 SAVE_GPR_C                      \
222                 "mov 0x170(%%rax), %%r15\n\t"   \
223                 "mov %%r15, regs+0x80\n\t"      \
224                 "mov 0x1f8(%%rax), %%r15\n\t"   \
225                 "mov %%r15, regs\n\t"           \
226                 "vmsave %%rax\n\t"              \
227 
228 u64 guest_stack[10000];
229 
230 int svm_vmrun(void)
231 {
232 	vmcb->save.rip = (ulong)test_thunk;
233 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
234 	regs.rdi = (ulong)v2_test;
235 
236 	asm volatile (
237 		ASM_PRE_VMRUN_CMD
238                 "vmrun %%rax\n\t"               \
239 		ASM_POST_VMRUN_CMD
240 		:
241 		: "a" (virt_to_phys(vmcb))
242 		: "memory", "r15");
243 
244 	return (vmcb->control.exit_code);
245 }
246 
247 extern u64 *vmrun_rip;
248 
249 static void test_run(struct svm_test *test)
250 {
251 	u64 vmcb_phys = virt_to_phys(vmcb);
252 
253 	irq_disable();
254 	vmcb_ident(vmcb);
255 
256 	test->prepare(test);
257 	guest_main = test->guest_func;
258 	vmcb->save.rip = (ulong)test_thunk;
259 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
260 	regs.rdi = (ulong)test;
261 	do {
262 		struct svm_test *the_test = test;
263 		u64 the_vmcb = vmcb_phys;
264 		asm volatile (
265 			"clgi;\n\t" // semi-colon needed for LLVM compatibility
266 			"sti \n\t"
267 			"call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
268 			"mov %[vmcb_phys], %%rax \n\t"
269 			ASM_PRE_VMRUN_CMD
270 			".global vmrun_rip\n\t"		\
271 			"vmrun_rip: vmrun %%rax\n\t"    \
272 			ASM_POST_VMRUN_CMD
273 			"cli \n\t"
274 			"stgi"
275 			: // inputs clobbered by the guest:
276 			"=D" (the_test),            // first argument register
277 			"=b" (the_vmcb)             // callee save register!
278 			: [test] "0" (the_test),
279 			[vmcb_phys] "1"(the_vmcb),
280 			[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
281 			: "rax", "rcx", "rdx", "rsi",
282 			"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
283 			"memory");
284 		++test->exits;
285 	} while (!test->finished(test));
286 	irq_enable();
287 
288 	report(test->succeeded(test), "%s", test->name);
289 
290         if (test->on_vcpu)
291 	    test->on_vcpu_done = true;
292 }
293 
294 static void set_additional_vcpu_msr(void *msr_efer)
295 {
296 	void *hsave = alloc_page();
297 
298 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
299 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
300 }
301 
302 static void setup_svm(void)
303 {
304 	void *hsave = alloc_page();
305 	u64 *page, address;
306 	int i,j;
307 
308 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
309 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
310 
311 	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
312 
313 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
314 
315 	if (!npt_supported())
316 		return;
317 
318 	for (i = 1; i < cpu_count(); i++)
319 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
320 
321 	printf("NPT detected - running all tests with NPT enabled\n");
322 
323 	/*
324 	* Nested paging supported - Build a nested page table
325 	* Build the page-table bottom-up and map everything with 4k
326 	* pages to get enough granularity for the NPT unit-tests.
327 	*/
328 
329 	address = 0;
330 
331 	/* PTE level */
332 	for (i = 0; i < 2048; ++i) {
333 		page = alloc_page();
334 
335 		for (j = 0; j < 512; ++j, address += 4096)
336 	    		page[j] = address | 0x067ULL;
337 
338 		pte[i] = page;
339 	}
340 
341 	/* PDE level */
342 	for (i = 0; i < 4; ++i) {
343 		page = alloc_page();
344 
345 	for (j = 0; j < 512; ++j)
346 	    page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
347 
348 		pde[i] = page;
349 	}
350 
351 	/* PDPe level */
352 	pdpe   = alloc_page();
353 	for (i = 0; i < 4; ++i)
354 		pdpe[i] = ((u64)(pde[i])) | 0x27;
355 
356 	/* PML4e level */
357 	pml4e    = alloc_page();
358 	pml4e[0] = ((u64)pdpe) | 0x27;
359 }
360 
361 int matched;
362 
363 static bool
364 test_wanted(const char *name, char *filters[], int filter_count)
365 {
366         int i;
367         bool positive = false;
368         bool match = false;
369         char clean_name[strlen(name) + 1];
370         char *c;
371         const char *n;
372 
373         /* Replace spaces with underscores. */
374         n = name;
375         c = &clean_name[0];
376         do *c++ = (*n == ' ') ? '_' : *n;
377         while (*n++);
378 
379         for (i = 0; i < filter_count; i++) {
380                 const char *filter = filters[i];
381 
382                 if (filter[0] == '-') {
383                         if (simple_glob(clean_name, filter + 1))
384                                 return false;
385                 } else {
386                         positive = true;
387                         match |= simple_glob(clean_name, filter);
388                 }
389         }
390 
391         if (!positive || match) {
392                 matched++;
393                 return true;
394         } else {
395                 return false;
396         }
397 }
398 
399 int main(int ac, char **av)
400 {
401 	/* Omit PT_USER_MASK to allow tested host.CR4.SMEP=1. */
402 	pteval_t opt_mask = 0;
403 	int i = 0;
404 
405 	ac--;
406 	av++;
407 
408 	__setup_vm(&opt_mask);
409 
410 	if (!this_cpu_has(X86_FEATURE_SVM)) {
411 		printf("SVM not availble\n");
412 		return report_summary();
413 	}
414 
415 	setup_svm();
416 
417 	vmcb = alloc_page();
418 
419 	for (; svm_tests[i].name != NULL; i++) {
420 		if (!test_wanted(svm_tests[i].name, av, ac))
421 			continue;
422 		if (svm_tests[i].supported && !svm_tests[i].supported())
423 			continue;
424 		if (svm_tests[i].v2 == NULL) {
425 			if (svm_tests[i].on_vcpu) {
426 				if (cpu_count() <= svm_tests[i].on_vcpu)
427 					continue;
428 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
429 				while (!svm_tests[i].on_vcpu_done)
430 					cpu_relax();
431 			}
432 			else
433 				test_run(&svm_tests[i]);
434 		} else {
435 			vmcb_ident(vmcb);
436 			v2_test = &(svm_tests[i]);
437 			svm_tests[i].v2();
438 		}
439 	}
440 
441 	if (!matched)
442 		report(matched, "command line didn't match any tests!");
443 
444 	return report_summary();
445 }
446