xref: /kvm-unit-tests/x86/svm.c (revision cf851077f97ed6922333d9ad3157d1c1253b58b8)
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "smp.h"
12 #include "types.h"
13 #include "alloc_page.h"
14 #include "isr.h"
15 #include "apic.h"
16 
17 /* for the nested page table*/
18 u64 *pte[2048];
19 u64 *pde[4];
20 u64 *pdpe;
21 u64 *pml4e;
22 
23 struct vmcb *vmcb;
24 
25 u64 *npt_get_pte(u64 address)
26 {
27 	int i1, i2;
28 
29 	address >>= 12;
30 	i1 = (address >> 9) & 0x7ff;
31 	i2 = address & 0x1ff;
32 
33 	return &pte[i1][i2];
34 }
35 
36 u64 *npt_get_pde(u64 address)
37 {
38 	int i1, i2;
39 
40 	address >>= 21;
41 	i1 = (address >> 9) & 0x3;
42 	i2 = address & 0x1ff;
43 
44 	return &pde[i1][i2];
45 }
46 
47 u64 *npt_get_pdpe(void)
48 {
49 	return pdpe;
50 }
51 
52 u64 *npt_get_pml4e(void)
53 {
54 	return pml4e;
55 }
56 
57 bool smp_supported(void)
58 {
59 	return cpu_count() > 1;
60 }
61 
62 bool default_supported(void)
63 {
64     return true;
65 }
66 
67 void default_prepare(struct svm_test *test)
68 {
69 	vmcb_ident(vmcb);
70 }
71 
72 void default_prepare_gif_clear(struct svm_test *test)
73 {
74 }
75 
76 bool default_finished(struct svm_test *test)
77 {
78 	return true; /* one vmexit */
79 }
80 
81 bool npt_supported(void)
82 {
83 	return this_cpu_has(X86_FEATURE_NPT);
84 }
85 
86 int get_test_stage(struct svm_test *test)
87 {
88 	barrier();
89 	return test->scratch;
90 }
91 
92 void set_test_stage(struct svm_test *test, int s)
93 {
94 	barrier();
95 	test->scratch = s;
96 	barrier();
97 }
98 
99 void inc_test_stage(struct svm_test *test)
100 {
101 	barrier();
102 	test->scratch++;
103 	barrier();
104 }
105 
106 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
107                          u64 base, u32 limit, u32 attr)
108 {
109 	seg->selector = selector;
110 	seg->attrib = attr;
111 	seg->limit = limit;
112 	seg->base = base;
113 }
114 
115 inline void vmmcall(void)
116 {
117 	asm volatile ("vmmcall" : : : "memory");
118 }
119 
120 static test_guest_func guest_main;
121 
122 void test_set_guest(test_guest_func func)
123 {
124 	guest_main = func;
125 }
126 
127 static void test_thunk(struct svm_test *test)
128 {
129 	guest_main(test);
130 	vmmcall();
131 }
132 
133 u8 *io_bitmap;
134 u8 io_bitmap_area[16384];
135 
136 u8 *msr_bitmap;
137 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
138 
139 void vmcb_ident(struct vmcb *vmcb)
140 {
141 	u64 vmcb_phys = virt_to_phys(vmcb);
142 	struct vmcb_save_area *save = &vmcb->save;
143 	struct vmcb_control_area *ctrl = &vmcb->control;
144 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
145 	    | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
146 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
147 	    | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
148 	struct descriptor_table_ptr desc_table_ptr;
149 
150 	memset(vmcb, 0, sizeof(*vmcb));
151 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
152 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
153 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
154 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
155 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
156 	sgdt(&desc_table_ptr);
157 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
158 	sidt(&desc_table_ptr);
159 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
160 	ctrl->asid = 1;
161 	save->cpl = 0;
162 	save->efer = rdmsr(MSR_EFER);
163 	save->cr4 = read_cr4();
164 	save->cr3 = read_cr3();
165 	save->cr0 = read_cr0();
166 	save->dr7 = read_dr7();
167 	save->dr6 = read_dr6();
168 	save->cr2 = read_cr2();
169 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
170 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
171 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
172 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
173 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
174 
175 	if (npt_supported()) {
176 		ctrl->nested_ctl = 1;
177 		ctrl->nested_cr3 = (u64)pml4e;
178 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
179 	}
180 }
181 
182 struct regs regs;
183 
184 struct regs get_regs(void)
185 {
186 	return regs;
187 }
188 
189 // rax handled specially below
190 
191 #define SAVE_GPR_C                              \
192         "xchg %%rbx, regs+0x8\n\t"              \
193         "xchg %%rcx, regs+0x10\n\t"             \
194         "xchg %%rdx, regs+0x18\n\t"             \
195         "xchg %%rbp, regs+0x28\n\t"             \
196         "xchg %%rsi, regs+0x30\n\t"             \
197         "xchg %%rdi, regs+0x38\n\t"             \
198         "xchg %%r8, regs+0x40\n\t"              \
199         "xchg %%r9, regs+0x48\n\t"              \
200         "xchg %%r10, regs+0x50\n\t"             \
201         "xchg %%r11, regs+0x58\n\t"             \
202         "xchg %%r12, regs+0x60\n\t"             \
203         "xchg %%r13, regs+0x68\n\t"             \
204         "xchg %%r14, regs+0x70\n\t"             \
205         "xchg %%r15, regs+0x78\n\t"
206 
207 #define LOAD_GPR_C      SAVE_GPR_C
208 
209 struct svm_test *v2_test;
210 
211 #define ASM_PRE_VMRUN_CMD                       \
212                 "vmload %%rax\n\t"              \
213                 "mov regs+0x80, %%r15\n\t"      \
214                 "mov %%r15, 0x170(%%rax)\n\t"   \
215                 "mov regs, %%r15\n\t"           \
216                 "mov %%r15, 0x1f8(%%rax)\n\t"   \
217                 LOAD_GPR_C                      \
218 
219 #define ASM_POST_VMRUN_CMD                      \
220                 SAVE_GPR_C                      \
221                 "mov 0x170(%%rax), %%r15\n\t"   \
222                 "mov %%r15, regs+0x80\n\t"      \
223                 "mov 0x1f8(%%rax), %%r15\n\t"   \
224                 "mov %%r15, regs\n\t"           \
225                 "vmsave %%rax\n\t"              \
226 
227 u64 guest_stack[10000];
228 
229 int svm_vmrun(void)
230 {
231 	vmcb->save.rip = (ulong)test_thunk;
232 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
233 	regs.rdi = (ulong)v2_test;
234 
235 	asm volatile (
236 		ASM_PRE_VMRUN_CMD
237                 "vmrun %%rax\n\t"               \
238 		ASM_POST_VMRUN_CMD
239 		:
240 		: "a" (virt_to_phys(vmcb))
241 		: "memory", "r15");
242 
243 	return (vmcb->control.exit_code);
244 }
245 
246 extern u64 *vmrun_rip;
247 
248 static void test_run(struct svm_test *test)
249 {
250 	u64 vmcb_phys = virt_to_phys(vmcb);
251 
252 	irq_disable();
253 	vmcb_ident(vmcb);
254 
255 	test->prepare(test);
256 	guest_main = test->guest_func;
257 	vmcb->save.rip = (ulong)test_thunk;
258 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
259 	regs.rdi = (ulong)test;
260 	do {
261 		struct svm_test *the_test = test;
262 		u64 the_vmcb = vmcb_phys;
263 		asm volatile (
264 			"clgi;\n\t" // semi-colon needed for LLVM compatibility
265 			"sti \n\t"
266 			"call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
267 			"mov %[vmcb_phys], %%rax \n\t"
268 			ASM_PRE_VMRUN_CMD
269 			".global vmrun_rip\n\t"		\
270 			"vmrun_rip: vmrun %%rax\n\t"    \
271 			ASM_POST_VMRUN_CMD
272 			"cli \n\t"
273 			"stgi"
274 			: // inputs clobbered by the guest:
275 			"=D" (the_test),            // first argument register
276 			"=b" (the_vmcb)             // callee save register!
277 			: [test] "0" (the_test),
278 			[vmcb_phys] "1"(the_vmcb),
279 			[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
280 			: "rax", "rcx", "rdx", "rsi",
281 			"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
282 			"memory");
283 		++test->exits;
284 	} while (!test->finished(test));
285 	irq_enable();
286 
287 	report(test->succeeded(test), "%s", test->name);
288 
289         if (test->on_vcpu)
290 	    test->on_vcpu_done = true;
291 }
292 
293 static void set_additional_vcpu_msr(void *msr_efer)
294 {
295 	void *hsave = alloc_page();
296 
297 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
298 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME | EFER_NX);
299 }
300 
301 static void setup_svm(void)
302 {
303 	void *hsave = alloc_page();
304 	u64 *page, address;
305 	int i,j;
306 
307 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
308 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
309 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
310 
311 	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
312 
313 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
314 
315 	if (!npt_supported())
316 		return;
317 
318 	for (i = 1; i < cpu_count(); i++)
319 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
320 
321 	printf("NPT detected - running all tests with NPT enabled\n");
322 
323 	/*
324 	* Nested paging supported - Build a nested page table
325 	* Build the page-table bottom-up and map everything with 4k
326 	* pages to get enough granularity for the NPT unit-tests.
327 	*/
328 
329 	address = 0;
330 
331 	/* PTE level */
332 	for (i = 0; i < 2048; ++i) {
333 		page = alloc_page();
334 
335 		for (j = 0; j < 512; ++j, address += 4096)
336 	    		page[j] = address | 0x067ULL;
337 
338 		pte[i] = page;
339 	}
340 
341 	/* PDE level */
342 	for (i = 0; i < 4; ++i) {
343 		page = alloc_page();
344 
345 	for (j = 0; j < 512; ++j)
346 	    page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
347 
348 		pde[i] = page;
349 	}
350 
351 	/* PDPe level */
352 	pdpe   = alloc_page();
353 	for (i = 0; i < 4; ++i)
354 		pdpe[i] = ((u64)(pde[i])) | 0x27;
355 
356 	/* PML4e level */
357 	pml4e    = alloc_page();
358 	pml4e[0] = ((u64)pdpe) | 0x27;
359 }
360 
361 int matched;
362 
363 static bool
364 test_wanted(const char *name, char *filters[], int filter_count)
365 {
366         int i;
367         bool positive = false;
368         bool match = false;
369         char clean_name[strlen(name) + 1];
370         char *c;
371         const char *n;
372 
373         /* Replace spaces with underscores. */
374         n = name;
375         c = &clean_name[0];
376         do *c++ = (*n == ' ') ? '_' : *n;
377         while (*n++);
378 
379         for (i = 0; i < filter_count; i++) {
380                 const char *filter = filters[i];
381 
382                 if (filter[0] == '-') {
383                         if (simple_glob(clean_name, filter + 1))
384                                 return false;
385                 } else {
386                         positive = true;
387                         match |= simple_glob(clean_name, filter);
388                 }
389         }
390 
391         if (!positive || match) {
392                 matched++;
393                 return true;
394         } else {
395                 return false;
396         }
397 }
398 
399 int main(int ac, char **av)
400 {
401 	int i = 0;
402 
403 	ac--;
404 	av++;
405 
406 	setup_vm();
407 
408 	if (!this_cpu_has(X86_FEATURE_SVM)) {
409 		printf("SVM not availble\n");
410 		return report_summary();
411 	}
412 
413 	setup_svm();
414 
415 	vmcb = alloc_page();
416 
417 	for (; svm_tests[i].name != NULL; i++) {
418 		if (!test_wanted(svm_tests[i].name, av, ac))
419 			continue;
420 		if (svm_tests[i].supported && !svm_tests[i].supported())
421 			continue;
422 		if (svm_tests[i].v2 == NULL) {
423 			if (svm_tests[i].on_vcpu) {
424 				if (cpu_count() <= svm_tests[i].on_vcpu)
425 					continue;
426 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
427 				while (!svm_tests[i].on_vcpu_done)
428 					cpu_relax();
429 			}
430 			else
431 				test_run(&svm_tests[i]);
432 		} else {
433 			vmcb_ident(vmcb);
434 			v2_test = &(svm_tests[i]);
435 			svm_tests[i].v2();
436 		}
437 	}
438 
439 	if (!matched)
440 		report(matched, "command line didn't match any tests!");
441 
442 	return report_summary();
443 }
444