xref: /kvm-unit-tests/x86/svm.c (revision f6972bd685f09cbbdbdee074ebbca298620039bf)
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "smp.h"
12 #include "types.h"
13 #include "alloc_page.h"
14 #include "isr.h"
15 #include "apic.h"
16 #include "vmalloc.h"
17 
18 /* for the nested page table*/
19 u64 *pte[2048];
20 u64 *pde[4];
21 u64 *pdpe;
22 u64 *pml4e;
23 
24 struct vmcb *vmcb;
25 
26 u64 *npt_get_pte(u64 address)
27 {
28 	int i1, i2;
29 
30 	address >>= 12;
31 	i1 = (address >> 9) & 0x7ff;
32 	i2 = address & 0x1ff;
33 
34 	return &pte[i1][i2];
35 }
36 
37 u64 *npt_get_pde(u64 address)
38 {
39 	int i1, i2;
40 
41 	address >>= 21;
42 	i1 = (address >> 9) & 0x3;
43 	i2 = address & 0x1ff;
44 
45 	return &pde[i1][i2];
46 }
47 
48 u64 *npt_get_pdpe(void)
49 {
50 	return pdpe;
51 }
52 
53 u64 *npt_get_pml4e(void)
54 {
55 	return pml4e;
56 }
57 
58 bool smp_supported(void)
59 {
60 	return cpu_count() > 1;
61 }
62 
63 bool default_supported(void)
64 {
65     return true;
66 }
67 
68 bool vgif_supported(void)
69 {
70 	return this_cpu_has(X86_FEATURE_VGIF);
71 }
72 
73 void default_prepare(struct svm_test *test)
74 {
75 	vmcb_ident(vmcb);
76 }
77 
78 void default_prepare_gif_clear(struct svm_test *test)
79 {
80 }
81 
82 bool default_finished(struct svm_test *test)
83 {
84 	return true; /* one vmexit */
85 }
86 
87 bool npt_supported(void)
88 {
89 	return this_cpu_has(X86_FEATURE_NPT);
90 }
91 
92 int get_test_stage(struct svm_test *test)
93 {
94 	barrier();
95 	return test->scratch;
96 }
97 
98 void set_test_stage(struct svm_test *test, int s)
99 {
100 	barrier();
101 	test->scratch = s;
102 	barrier();
103 }
104 
105 void inc_test_stage(struct svm_test *test)
106 {
107 	barrier();
108 	test->scratch++;
109 	barrier();
110 }
111 
112 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
113                          u64 base, u32 limit, u32 attr)
114 {
115 	seg->selector = selector;
116 	seg->attrib = attr;
117 	seg->limit = limit;
118 	seg->base = base;
119 }
120 
121 inline void vmmcall(void)
122 {
123 	asm volatile ("vmmcall" : : : "memory");
124 }
125 
126 static test_guest_func guest_main;
127 
128 void test_set_guest(test_guest_func func)
129 {
130 	guest_main = func;
131 }
132 
133 static void test_thunk(struct svm_test *test)
134 {
135 	guest_main(test);
136 	vmmcall();
137 }
138 
139 u8 *io_bitmap;
140 u8 io_bitmap_area[16384];
141 
142 u8 *msr_bitmap;
143 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
144 
145 void vmcb_ident(struct vmcb *vmcb)
146 {
147 	u64 vmcb_phys = virt_to_phys(vmcb);
148 	struct vmcb_save_area *save = &vmcb->save;
149 	struct vmcb_control_area *ctrl = &vmcb->control;
150 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
151 	    | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
152 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
153 	    | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
154 	struct descriptor_table_ptr desc_table_ptr;
155 
156 	memset(vmcb, 0, sizeof(*vmcb));
157 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
158 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
159 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
160 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
161 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
162 	sgdt(&desc_table_ptr);
163 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
164 	sidt(&desc_table_ptr);
165 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
166 	ctrl->asid = 1;
167 	save->cpl = 0;
168 	save->efer = rdmsr(MSR_EFER);
169 	save->cr4 = read_cr4();
170 	save->cr3 = read_cr3();
171 	save->cr0 = read_cr0();
172 	save->dr7 = read_dr7();
173 	save->dr6 = read_dr6();
174 	save->cr2 = read_cr2();
175 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
176 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
177 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
178 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
179 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
180 
181 	if (npt_supported()) {
182 		ctrl->nested_ctl = 1;
183 		ctrl->nested_cr3 = (u64)pml4e;
184 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
185 	}
186 }
187 
188 struct regs regs;
189 
190 struct regs get_regs(void)
191 {
192 	return regs;
193 }
194 
195 // rax handled specially below
196 
197 #define SAVE_GPR_C                              \
198         "xchg %%rbx, regs+0x8\n\t"              \
199         "xchg %%rcx, regs+0x10\n\t"             \
200         "xchg %%rdx, regs+0x18\n\t"             \
201         "xchg %%rbp, regs+0x28\n\t"             \
202         "xchg %%rsi, regs+0x30\n\t"             \
203         "xchg %%rdi, regs+0x38\n\t"             \
204         "xchg %%r8, regs+0x40\n\t"              \
205         "xchg %%r9, regs+0x48\n\t"              \
206         "xchg %%r10, regs+0x50\n\t"             \
207         "xchg %%r11, regs+0x58\n\t"             \
208         "xchg %%r12, regs+0x60\n\t"             \
209         "xchg %%r13, regs+0x68\n\t"             \
210         "xchg %%r14, regs+0x70\n\t"             \
211         "xchg %%r15, regs+0x78\n\t"
212 
213 #define LOAD_GPR_C      SAVE_GPR_C
214 
215 struct svm_test *v2_test;
216 
217 #define ASM_PRE_VMRUN_CMD                       \
218                 "vmload %%rax\n\t"              \
219                 "mov regs+0x80, %%r15\n\t"      \
220                 "mov %%r15, 0x170(%%rax)\n\t"   \
221                 "mov regs, %%r15\n\t"           \
222                 "mov %%r15, 0x1f8(%%rax)\n\t"   \
223                 LOAD_GPR_C                      \
224 
225 #define ASM_POST_VMRUN_CMD                      \
226                 SAVE_GPR_C                      \
227                 "mov 0x170(%%rax), %%r15\n\t"   \
228                 "mov %%r15, regs+0x80\n\t"      \
229                 "mov 0x1f8(%%rax), %%r15\n\t"   \
230                 "mov %%r15, regs\n\t"           \
231                 "vmsave %%rax\n\t"              \
232 
233 u64 guest_stack[10000];
234 
235 int svm_vmrun(void)
236 {
237 	vmcb->save.rip = (ulong)test_thunk;
238 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
239 	regs.rdi = (ulong)v2_test;
240 
241 	asm volatile (
242 		ASM_PRE_VMRUN_CMD
243                 "vmrun %%rax\n\t"               \
244 		ASM_POST_VMRUN_CMD
245 		:
246 		: "a" (virt_to_phys(vmcb))
247 		: "memory", "r15");
248 
249 	return (vmcb->control.exit_code);
250 }
251 
252 extern u64 *vmrun_rip;
253 
254 static void test_run(struct svm_test *test)
255 {
256 	u64 vmcb_phys = virt_to_phys(vmcb);
257 
258 	irq_disable();
259 	vmcb_ident(vmcb);
260 
261 	test->prepare(test);
262 	guest_main = test->guest_func;
263 	vmcb->save.rip = (ulong)test_thunk;
264 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
265 	regs.rdi = (ulong)test;
266 	do {
267 		struct svm_test *the_test = test;
268 		u64 the_vmcb = vmcb_phys;
269 		asm volatile (
270 			"clgi;\n\t" // semi-colon needed for LLVM compatibility
271 			"sti \n\t"
272 			"call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
273 			"mov %[vmcb_phys], %%rax \n\t"
274 			ASM_PRE_VMRUN_CMD
275 			".global vmrun_rip\n\t"		\
276 			"vmrun_rip: vmrun %%rax\n\t"    \
277 			ASM_POST_VMRUN_CMD
278 			"cli \n\t"
279 			"stgi"
280 			: // inputs clobbered by the guest:
281 			"=D" (the_test),            // first argument register
282 			"=b" (the_vmcb)             // callee save register!
283 			: [test] "0" (the_test),
284 			[vmcb_phys] "1"(the_vmcb),
285 			[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
286 			: "rax", "rcx", "rdx", "rsi",
287 			"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
288 			"memory");
289 		++test->exits;
290 	} while (!test->finished(test));
291 	irq_enable();
292 
293 	report(test->succeeded(test), "%s", test->name);
294 
295         if (test->on_vcpu)
296 	    test->on_vcpu_done = true;
297 }
298 
299 static void set_additional_vcpu_msr(void *msr_efer)
300 {
301 	void *hsave = alloc_page();
302 
303 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
304 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
305 }
306 
307 static void setup_svm(void)
308 {
309 	void *hsave = alloc_page();
310 	u64 *page, address;
311 	int i,j;
312 
313 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
314 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
315 
316 	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
317 
318 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
319 
320 	if (!npt_supported())
321 		return;
322 
323 	for (i = 1; i < cpu_count(); i++)
324 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
325 
326 	printf("NPT detected - running all tests with NPT enabled\n");
327 
328 	/*
329 	* Nested paging supported - Build a nested page table
330 	* Build the page-table bottom-up and map everything with 4k
331 	* pages to get enough granularity for the NPT unit-tests.
332 	*/
333 
334 	address = 0;
335 
336 	/* PTE level */
337 	for (i = 0; i < 2048; ++i) {
338 		page = alloc_page();
339 
340 		for (j = 0; j < 512; ++j, address += 4096)
341 	    		page[j] = address | 0x067ULL;
342 
343 		pte[i] = page;
344 	}
345 
346 	/* PDE level */
347 	for (i = 0; i < 4; ++i) {
348 		page = alloc_page();
349 
350 	for (j = 0; j < 512; ++j)
351 	    page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
352 
353 		pde[i] = page;
354 	}
355 
356 	/* PDPe level */
357 	pdpe   = alloc_page();
358 	for (i = 0; i < 4; ++i)
359 		pdpe[i] = ((u64)(pde[i])) | 0x27;
360 
361 	/* PML4e level */
362 	pml4e    = alloc_page();
363 	pml4e[0] = ((u64)pdpe) | 0x27;
364 }
365 
366 int matched;
367 
368 static bool
369 test_wanted(const char *name, char *filters[], int filter_count)
370 {
371         int i;
372         bool positive = false;
373         bool match = false;
374         char clean_name[strlen(name) + 1];
375         char *c;
376         const char *n;
377 
378         /* Replace spaces with underscores. */
379         n = name;
380         c = &clean_name[0];
381         do *c++ = (*n == ' ') ? '_' : *n;
382         while (*n++);
383 
384         for (i = 0; i < filter_count; i++) {
385                 const char *filter = filters[i];
386 
387                 if (filter[0] == '-') {
388                         if (simple_glob(clean_name, filter + 1))
389                                 return false;
390                 } else {
391                         positive = true;
392                         match |= simple_glob(clean_name, filter);
393                 }
394         }
395 
396         if (!positive || match) {
397                 matched++;
398                 return true;
399         } else {
400                 return false;
401         }
402 }
403 
404 int main(int ac, char **av)
405 {
406 	/* Omit PT_USER_MASK to allow tested host.CR4.SMEP=1. */
407 	pteval_t opt_mask = 0;
408 	int i = 0;
409 
410 	ac--;
411 	av++;
412 
413 	__setup_vm(&opt_mask);
414 
415 	if (!this_cpu_has(X86_FEATURE_SVM)) {
416 		printf("SVM not availble\n");
417 		return report_summary();
418 	}
419 
420 	setup_svm();
421 
422 	vmcb = alloc_page();
423 
424 	for (; svm_tests[i].name != NULL; i++) {
425 		if (!test_wanted(svm_tests[i].name, av, ac))
426 			continue;
427 		if (svm_tests[i].supported && !svm_tests[i].supported())
428 			continue;
429 		if (svm_tests[i].v2 == NULL) {
430 			if (svm_tests[i].on_vcpu) {
431 				if (cpu_count() <= svm_tests[i].on_vcpu)
432 					continue;
433 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
434 				while (!svm_tests[i].on_vcpu_done)
435 					cpu_relax();
436 			}
437 			else
438 				test_run(&svm_tests[i]);
439 		} else {
440 			vmcb_ident(vmcb);
441 			v2_test = &(svm_tests[i]);
442 			svm_tests[i].v2();
443 		}
444 	}
445 
446 	if (!matched)
447 		report(matched, "command line didn't match any tests!");
448 
449 	return report_summary();
450 }
451