xref: /kvm-unit-tests/x86/svm.c (revision 2c96b77ec9d3b1fcec7525174e23a6240ee05949)
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "smp.h"
12 #include "types.h"
13 #include "alloc_page.h"
14 #include "isr.h"
15 #include "apic.h"
16 #include "vmalloc.h"
17 
18 /* for the nested page table*/
19 u64 *pte[2048];
20 u64 *pde[4];
21 u64 *pdpe;
22 u64 *pml4e;
23 
24 struct vmcb *vmcb;
25 
26 u64 *npt_get_pte(u64 address)
27 {
28 	int i1, i2;
29 
30 	address >>= 12;
31 	i1 = (address >> 9) & 0x7ff;
32 	i2 = address & 0x1ff;
33 
34 	return &pte[i1][i2];
35 }
36 
37 u64 *npt_get_pde(u64 address)
38 {
39 	int i1, i2;
40 
41 	address >>= 21;
42 	i1 = (address >> 9) & 0x3;
43 	i2 = address & 0x1ff;
44 
45 	return &pde[i1][i2];
46 }
47 
48 u64 *npt_get_pdpe(void)
49 {
50 	return pdpe;
51 }
52 
53 u64 *npt_get_pml4e(void)
54 {
55 	return pml4e;
56 }
57 
58 bool smp_supported(void)
59 {
60 	return cpu_count() > 1;
61 }
62 
63 bool default_supported(void)
64 {
65     return true;
66 }
67 
68 bool vgif_supported(void)
69 {
70 	return this_cpu_has(X86_FEATURE_VGIF);
71 }
72 
73 void default_prepare(struct svm_test *test)
74 {
75 	vmcb_ident(vmcb);
76 }
77 
78 void default_prepare_gif_clear(struct svm_test *test)
79 {
80 }
81 
82 bool default_finished(struct svm_test *test)
83 {
84 	return true; /* one vmexit */
85 }
86 
87 bool npt_supported(void)
88 {
89 	return this_cpu_has(X86_FEATURE_NPT);
90 }
91 
92 int get_test_stage(struct svm_test *test)
93 {
94 	barrier();
95 	return test->scratch;
96 }
97 
98 void set_test_stage(struct svm_test *test, int s)
99 {
100 	barrier();
101 	test->scratch = s;
102 	barrier();
103 }
104 
105 void inc_test_stage(struct svm_test *test)
106 {
107 	barrier();
108 	test->scratch++;
109 	barrier();
110 }
111 
112 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
113                          u64 base, u32 limit, u32 attr)
114 {
115 	seg->selector = selector;
116 	seg->attrib = attr;
117 	seg->limit = limit;
118 	seg->base = base;
119 }
120 
121 inline void vmmcall(void)
122 {
123 	asm volatile ("vmmcall" : : : "memory");
124 }
125 
126 static test_guest_func guest_main;
127 
128 void test_set_guest(test_guest_func func)
129 {
130 	guest_main = func;
131 }
132 
133 static void test_thunk(struct svm_test *test)
134 {
135 	guest_main(test);
136 	vmmcall();
137 }
138 
139 u8 *io_bitmap;
140 u8 io_bitmap_area[16384];
141 
142 u8 *msr_bitmap;
143 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
144 
145 void vmcb_ident(struct vmcb *vmcb)
146 {
147 	u64 vmcb_phys = virt_to_phys(vmcb);
148 	struct vmcb_save_area *save = &vmcb->save;
149 	struct vmcb_control_area *ctrl = &vmcb->control;
150 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
151 	    | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
152 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
153 	    | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
154 	struct descriptor_table_ptr desc_table_ptr;
155 
156 	memset(vmcb, 0, sizeof(*vmcb));
157 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
158 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
159 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
160 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
161 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
162 	sgdt(&desc_table_ptr);
163 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
164 	sidt(&desc_table_ptr);
165 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
166 	ctrl->asid = 1;
167 	save->cpl = 0;
168 	save->efer = rdmsr(MSR_EFER);
169 	save->cr4 = read_cr4();
170 	save->cr3 = read_cr3();
171 	save->cr0 = read_cr0();
172 	save->dr7 = read_dr7();
173 	save->dr6 = read_dr6();
174 	save->cr2 = read_cr2();
175 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
176 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
177 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
178 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
179 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
180 
181 	if (npt_supported()) {
182 		ctrl->nested_ctl = 1;
183 		ctrl->nested_cr3 = (u64)pml4e;
184 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
185 	}
186 }
187 
188 struct regs regs;
189 
190 struct regs get_regs(void)
191 {
192 	return regs;
193 }
194 
195 // rax handled specially below
196 
197 #define SAVE_GPR_C                              \
198         "xchg %%rbx, regs+0x8\n\t"              \
199         "xchg %%rcx, regs+0x10\n\t"             \
200         "xchg %%rdx, regs+0x18\n\t"             \
201         "xchg %%rbp, regs+0x28\n\t"             \
202         "xchg %%rsi, regs+0x30\n\t"             \
203         "xchg %%rdi, regs+0x38\n\t"             \
204         "xchg %%r8, regs+0x40\n\t"              \
205         "xchg %%r9, regs+0x48\n\t"              \
206         "xchg %%r10, regs+0x50\n\t"             \
207         "xchg %%r11, regs+0x58\n\t"             \
208         "xchg %%r12, regs+0x60\n\t"             \
209         "xchg %%r13, regs+0x68\n\t"             \
210         "xchg %%r14, regs+0x70\n\t"             \
211         "xchg %%r15, regs+0x78\n\t"
212 
213 #define LOAD_GPR_C      SAVE_GPR_C
214 
215 struct svm_test *v2_test;
216 
217 #define ASM_PRE_VMRUN_CMD                       \
218                 "vmload %%rax\n\t"              \
219                 "mov regs+0x80, %%r15\n\t"      \
220                 "mov %%r15, 0x170(%%rax)\n\t"   \
221                 "mov regs, %%r15\n\t"           \
222                 "mov %%r15, 0x1f8(%%rax)\n\t"   \
223                 LOAD_GPR_C                      \
224 
225 #define ASM_POST_VMRUN_CMD                      \
226                 SAVE_GPR_C                      \
227                 "mov 0x170(%%rax), %%r15\n\t"   \
228                 "mov %%r15, regs+0x80\n\t"      \
229                 "mov 0x1f8(%%rax), %%r15\n\t"   \
230                 "mov %%r15, regs\n\t"           \
231                 "vmsave %%rax\n\t"              \
232 
233 u64 guest_stack[10000];
234 
235 int __svm_vmrun(u64 rip)
236 {
237 	vmcb->save.rip = (ulong)rip;
238 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
239 	regs.rdi = (ulong)v2_test;
240 
241 	asm volatile (
242 		ASM_PRE_VMRUN_CMD
243                 "vmrun %%rax\n\t"               \
244 		ASM_POST_VMRUN_CMD
245 		:
246 		: "a" (virt_to_phys(vmcb))
247 		: "memory", "r15");
248 
249 	return (vmcb->control.exit_code);
250 }
251 
252 int svm_vmrun(void)
253 {
254 	return __svm_vmrun((u64)test_thunk);
255 }
256 
257 extern u8 vmrun_rip;
258 
259 static noinline void test_run(struct svm_test *test)
260 {
261 	u64 vmcb_phys = virt_to_phys(vmcb);
262 
263 	irq_disable();
264 	vmcb_ident(vmcb);
265 
266 	test->prepare(test);
267 	guest_main = test->guest_func;
268 	vmcb->save.rip = (ulong)test_thunk;
269 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
270 	regs.rdi = (ulong)test;
271 	do {
272 		struct svm_test *the_test = test;
273 		u64 the_vmcb = vmcb_phys;
274 		asm volatile (
275 			"clgi;\n\t" // semi-colon needed for LLVM compatibility
276 			"sti \n\t"
277 			"call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
278 			"mov %[vmcb_phys], %%rax \n\t"
279 			ASM_PRE_VMRUN_CMD
280 			".global vmrun_rip\n\t"		\
281 			"vmrun_rip: vmrun %%rax\n\t"    \
282 			ASM_POST_VMRUN_CMD
283 			"cli \n\t"
284 			"stgi"
285 			: // inputs clobbered by the guest:
286 			"=D" (the_test),            // first argument register
287 			"=b" (the_vmcb)             // callee save register!
288 			: [test] "0" (the_test),
289 			[vmcb_phys] "1"(the_vmcb),
290 			[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
291 			: "rax", "rcx", "rdx", "rsi",
292 			"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
293 			"memory");
294 		++test->exits;
295 	} while (!test->finished(test));
296 	irq_enable();
297 
298 	report(test->succeeded(test), "%s", test->name);
299 
300         if (test->on_vcpu)
301 	    test->on_vcpu_done = true;
302 }
303 
304 static void set_additional_vcpu_msr(void *msr_efer)
305 {
306 	void *hsave = alloc_page();
307 
308 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
309 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
310 }
311 
312 static void setup_svm(void)
313 {
314 	void *hsave = alloc_page();
315 	u64 *page, address;
316 	int i,j;
317 
318 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
319 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
320 
321 	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
322 
323 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
324 
325 	if (!npt_supported())
326 		return;
327 
328 	for (i = 1; i < cpu_count(); i++)
329 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
330 
331 	printf("NPT detected - running all tests with NPT enabled\n");
332 
333 	/*
334 	* Nested paging supported - Build a nested page table
335 	* Build the page-table bottom-up and map everything with 4k
336 	* pages to get enough granularity for the NPT unit-tests.
337 	*/
338 
339 	address = 0;
340 
341 	/* PTE level */
342 	for (i = 0; i < 2048; ++i) {
343 		page = alloc_page();
344 
345 		for (j = 0; j < 512; ++j, address += 4096)
346 	    		page[j] = address | 0x067ULL;
347 
348 		pte[i] = page;
349 	}
350 
351 	/* PDE level */
352 	for (i = 0; i < 4; ++i) {
353 		page = alloc_page();
354 
355 	for (j = 0; j < 512; ++j)
356 	    page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
357 
358 		pde[i] = page;
359 	}
360 
361 	/* PDPe level */
362 	pdpe   = alloc_page();
363 	for (i = 0; i < 4; ++i)
364 		pdpe[i] = ((u64)(pde[i])) | 0x27;
365 
366 	/* PML4e level */
367 	pml4e    = alloc_page();
368 	pml4e[0] = ((u64)pdpe) | 0x27;
369 }
370 
371 int matched;
372 
373 static bool
374 test_wanted(const char *name, char *filters[], int filter_count)
375 {
376         int i;
377         bool positive = false;
378         bool match = false;
379         char clean_name[strlen(name) + 1];
380         char *c;
381         const char *n;
382 
383         /* Replace spaces with underscores. */
384         n = name;
385         c = &clean_name[0];
386         do *c++ = (*n == ' ') ? '_' : *n;
387         while (*n++);
388 
389         for (i = 0; i < filter_count; i++) {
390                 const char *filter = filters[i];
391 
392                 if (filter[0] == '-') {
393                         if (simple_glob(clean_name, filter + 1))
394                                 return false;
395                 } else {
396                         positive = true;
397                         match |= simple_glob(clean_name, filter);
398                 }
399         }
400 
401         if (!positive || match) {
402                 matched++;
403                 return true;
404         } else {
405                 return false;
406         }
407 }
408 
409 int main(int ac, char **av)
410 {
411 	/* Omit PT_USER_MASK to allow tested host.CR4.SMEP=1. */
412 	pteval_t opt_mask = 0;
413 	int i = 0;
414 
415 	ac--;
416 	av++;
417 
418 	__setup_vm(&opt_mask);
419 
420 	if (!this_cpu_has(X86_FEATURE_SVM)) {
421 		printf("SVM not availble\n");
422 		return report_summary();
423 	}
424 
425 	setup_svm();
426 
427 	vmcb = alloc_page();
428 
429 	for (; svm_tests[i].name != NULL; i++) {
430 		if (!test_wanted(svm_tests[i].name, av, ac))
431 			continue;
432 		if (svm_tests[i].supported && !svm_tests[i].supported())
433 			continue;
434 		if (svm_tests[i].v2 == NULL) {
435 			if (svm_tests[i].on_vcpu) {
436 				if (cpu_count() <= svm_tests[i].on_vcpu)
437 					continue;
438 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
439 				while (!svm_tests[i].on_vcpu_done)
440 					cpu_relax();
441 			}
442 			else
443 				test_run(&svm_tests[i]);
444 		} else {
445 			vmcb_ident(vmcb);
446 			v2_test = &(svm_tests[i]);
447 			svm_tests[i].v2();
448 		}
449 	}
450 
451 	if (!matched)
452 		report(matched, "command line didn't match any tests!");
453 
454 	return report_summary();
455 }
456