xref: /kvm-unit-tests/x86/svm.c (revision e97e1c827fadc972c4efc8fc0650984b6fcc74e8) !
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "smp.h"
12 #include "types.h"
13 #include "alloc_page.h"
14 #include "isr.h"
15 #include "apic.h"
16 
17 /* for the nested page table*/
18 u64 *pte[2048];
19 u64 *pde[4];
20 u64 *pdpe;
21 u64 *pml4e;
22 
23 struct vmcb *vmcb;
24 
25 u64 *npt_get_pte(u64 address)
26 {
27 	int i1, i2;
28 
29 	address >>= 12;
30 	i1 = (address >> 9) & 0x7ff;
31 	i2 = address & 0x1ff;
32 
33 	return &pte[i1][i2];
34 }
35 
36 u64 *npt_get_pde(u64 address)
37 {
38 	int i1, i2;
39 
40 	address >>= 21;
41 	i1 = (address >> 9) & 0x3;
42 	i2 = address & 0x1ff;
43 
44 	return &pde[i1][i2];
45 }
46 
47 u64 *npt_get_pdpe(void)
48 {
49 	return pdpe;
50 }
51 
52 u64 *npt_get_pml4e(void)
53 {
54 	return pml4e;
55 }
56 
57 bool smp_supported(void)
58 {
59 	return cpu_count() > 1;
60 }
61 
62 bool default_supported(void)
63 {
64     return true;
65 }
66 
67 void default_prepare(struct svm_test *test)
68 {
69 	vmcb_ident(vmcb);
70 }
71 
72 void default_prepare_gif_clear(struct svm_test *test)
73 {
74 }
75 
76 bool default_finished(struct svm_test *test)
77 {
78 	return true; /* one vmexit */
79 }
80 
81 bool npt_supported(void)
82 {
83 	return this_cpu_has(X86_FEATURE_NPT);
84 }
85 
86 int get_test_stage(struct svm_test *test)
87 {
88 	barrier();
89 	return test->scratch;
90 }
91 
92 void set_test_stage(struct svm_test *test, int s)
93 {
94 	barrier();
95 	test->scratch = s;
96 	barrier();
97 }
98 
99 void inc_test_stage(struct svm_test *test)
100 {
101 	barrier();
102 	test->scratch++;
103 	barrier();
104 }
105 
106 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
107                          u64 base, u32 limit, u32 attr)
108 {
109 	seg->selector = selector;
110 	seg->attrib = attr;
111 	seg->limit = limit;
112 	seg->base = base;
113 }
114 
115 inline void vmmcall(void)
116 {
117 	asm volatile ("vmmcall" : : : "memory");
118 }
119 
120 static test_guest_func guest_main;
121 
122 void test_set_guest(test_guest_func func)
123 {
124 	guest_main = func;
125 }
126 
127 static void test_thunk(struct svm_test *test)
128 {
129 	guest_main(test);
130 	vmmcall();
131 }
132 
133 u8 *io_bitmap;
134 u8 io_bitmap_area[16384];
135 
136 u8 *msr_bitmap;
137 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
138 
139 void vmcb_ident(struct vmcb *vmcb)
140 {
141 	u64 vmcb_phys = virt_to_phys(vmcb);
142 	struct vmcb_save_area *save = &vmcb->save;
143 	struct vmcb_control_area *ctrl = &vmcb->control;
144 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
145 	    | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
146 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
147 	    | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
148 	struct descriptor_table_ptr desc_table_ptr;
149 
150 	memset(vmcb, 0, sizeof(*vmcb));
151 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
152 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
153 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
154 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
155 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
156 	sgdt(&desc_table_ptr);
157 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
158 	sidt(&desc_table_ptr);
159 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
160 	ctrl->asid = 1;
161 	save->cpl = 0;
162 	save->efer = rdmsr(MSR_EFER);
163 	save->cr4 = read_cr4();
164 	save->cr3 = read_cr3();
165 	save->cr0 = read_cr0();
166 	save->dr7 = read_dr7();
167 	save->dr6 = read_dr6();
168 	save->cr2 = read_cr2();
169 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
170 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
171 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
172 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
173 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
174 
175 	if (npt_supported()) {
176 		ctrl->nested_ctl = 1;
177 		ctrl->nested_cr3 = (u64)pml4e;
178 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
179 	}
180 }
181 
182 struct regs regs;
183 
184 struct regs get_regs(void)
185 {
186 	return regs;
187 }
188 
189 // rax handled specially below
190 
191 #define SAVE_GPR_C                              \
192         "xchg %%rbx, regs+0x8\n\t"              \
193         "xchg %%rcx, regs+0x10\n\t"             \
194         "xchg %%rdx, regs+0x18\n\t"             \
195         "xchg %%rbp, regs+0x28\n\t"             \
196         "xchg %%rsi, regs+0x30\n\t"             \
197         "xchg %%rdi, regs+0x38\n\t"             \
198         "xchg %%r8, regs+0x40\n\t"              \
199         "xchg %%r9, regs+0x48\n\t"              \
200         "xchg %%r10, regs+0x50\n\t"             \
201         "xchg %%r11, regs+0x58\n\t"             \
202         "xchg %%r12, regs+0x60\n\t"             \
203         "xchg %%r13, regs+0x68\n\t"             \
204         "xchg %%r14, regs+0x70\n\t"             \
205         "xchg %%r15, regs+0x78\n\t"
206 
207 #define LOAD_GPR_C      SAVE_GPR_C
208 
209 struct svm_test *v2_test;
210 
211 #define ASM_PRE_VMRUN_CMD                       \
212                 "vmload %%rax\n\t"              \
213                 "mov regs+0x80, %%r15\n\t"      \
214                 "mov %%r15, 0x170(%%rax)\n\t"   \
215                 "mov regs, %%r15\n\t"           \
216                 "mov %%r15, 0x1f8(%%rax)\n\t"   \
217                 LOAD_GPR_C                      \
218 
219 #define ASM_POST_VMRUN_CMD                      \
220                 SAVE_GPR_C                      \
221                 "mov 0x170(%%rax), %%r15\n\t"   \
222                 "mov %%r15, regs+0x80\n\t"      \
223                 "mov 0x1f8(%%rax), %%r15\n\t"   \
224                 "mov %%r15, regs\n\t"           \
225                 "vmsave %%rax\n\t"              \
226 
227 u64 guest_stack[10000];
228 
229 int svm_vmrun(void)
230 {
231 	vmcb->save.rip = (ulong)test_thunk;
232 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
233 	regs.rdi = (ulong)v2_test;
234 
235 	asm volatile (
236 		ASM_PRE_VMRUN_CMD
237                 "vmrun %%rax\n\t"               \
238 		ASM_POST_VMRUN_CMD
239 		:
240 		: "a" (virt_to_phys(vmcb))
241 		: "memory", "r15");
242 
243 	return (vmcb->control.exit_code);
244 }
245 
246 extern u64 *vmrun_rip;
247 
248 static void test_run(struct svm_test *test)
249 {
250 	u64 vmcb_phys = virt_to_phys(vmcb);
251 
252 	irq_disable();
253 	test->prepare(test);
254 	guest_main = test->guest_func;
255 	vmcb->save.rip = (ulong)test_thunk;
256 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
257 	regs.rdi = (ulong)test;
258 	do {
259 		struct svm_test *the_test = test;
260 		u64 the_vmcb = vmcb_phys;
261 		asm volatile (
262 			"clgi;\n\t" // semi-colon needed for LLVM compatibility
263 			"sti \n\t"
264 			"call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
265 			"mov %[vmcb_phys], %%rax \n\t"
266 			ASM_PRE_VMRUN_CMD
267 			".global vmrun_rip\n\t"		\
268 			"vmrun_rip: vmrun %%rax\n\t"    \
269 			ASM_POST_VMRUN_CMD
270 			"cli \n\t"
271 			"stgi"
272 			: // inputs clobbered by the guest:
273 			"=D" (the_test),            // first argument register
274 			"=b" (the_vmcb)             // callee save register!
275 			: [test] "0" (the_test),
276 			[vmcb_phys] "1"(the_vmcb),
277 			[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
278 			: "rax", "rcx", "rdx", "rsi",
279 			"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
280 			"memory");
281 		++test->exits;
282 	} while (!test->finished(test));
283 	irq_enable();
284 
285 	report(test->succeeded(test), "%s", test->name);
286 
287         if (test->on_vcpu)
288 	    test->on_vcpu_done = true;
289 }
290 
291 static void set_additional_vcpu_msr(void *msr_efer)
292 {
293 	void *hsave = alloc_page();
294 
295 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
296 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME | EFER_NX);
297 }
298 
299 static void setup_svm(void)
300 {
301 	void *hsave = alloc_page();
302 	u64 *page, address;
303 	int i,j;
304 
305 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
306 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
307 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
308 
309 	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
310 
311 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
312 
313 	if (!npt_supported())
314 		return;
315 
316 	for (i = 1; i < cpu_count(); i++)
317 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
318 
319 	printf("NPT detected - running all tests with NPT enabled\n");
320 
321 	/*
322 	* Nested paging supported - Build a nested page table
323 	* Build the page-table bottom-up and map everything with 4k
324 	* pages to get enough granularity for the NPT unit-tests.
325 	*/
326 
327 	address = 0;
328 
329 	/* PTE level */
330 	for (i = 0; i < 2048; ++i) {
331 		page = alloc_page();
332 
333 		for (j = 0; j < 512; ++j, address += 4096)
334 	    		page[j] = address | 0x067ULL;
335 
336 		pte[i] = page;
337 	}
338 
339 	/* PDE level */
340 	for (i = 0; i < 4; ++i) {
341 		page = alloc_page();
342 
343 	for (j = 0; j < 512; ++j)
344 	    page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
345 
346 		pde[i] = page;
347 	}
348 
349 	/* PDPe level */
350 	pdpe   = alloc_page();
351 	for (i = 0; i < 4; ++i)
352 		pdpe[i] = ((u64)(pde[i])) | 0x27;
353 
354 	/* PML4e level */
355 	pml4e    = alloc_page();
356 	pml4e[0] = ((u64)pdpe) | 0x27;
357 }
358 
359 int matched;
360 
361 static bool
362 test_wanted(const char *name, char *filters[], int filter_count)
363 {
364         int i;
365         bool positive = false;
366         bool match = false;
367         char clean_name[strlen(name) + 1];
368         char *c;
369         const char *n;
370 
371         /* Replace spaces with underscores. */
372         n = name;
373         c = &clean_name[0];
374         do *c++ = (*n == ' ') ? '_' : *n;
375         while (*n++);
376 
377         for (i = 0; i < filter_count; i++) {
378                 const char *filter = filters[i];
379 
380                 if (filter[0] == '-') {
381                         if (simple_glob(clean_name, filter + 1))
382                                 return false;
383                 } else {
384                         positive = true;
385                         match |= simple_glob(clean_name, filter);
386                 }
387         }
388 
389         if (!positive || match) {
390                 matched++;
391                 return true;
392         } else {
393                 return false;
394         }
395 }
396 
397 int main(int ac, char **av)
398 {
399 	int i = 0;
400 
401 	ac--;
402 	av++;
403 
404 	setup_vm();
405 
406 	if (!this_cpu_has(X86_FEATURE_SVM)) {
407 		printf("SVM not availble\n");
408 		return report_summary();
409 	}
410 
411 	setup_svm();
412 
413 	vmcb = alloc_page();
414 
415 	for (; svm_tests[i].name != NULL; i++) {
416 		if (!test_wanted(svm_tests[i].name, av, ac))
417 			continue;
418 		if (svm_tests[i].supported && !svm_tests[i].supported())
419 			continue;
420 		if (svm_tests[i].v2 == NULL) {
421 			if (svm_tests[i].on_vcpu) {
422 				if (cpu_count() <= svm_tests[i].on_vcpu)
423 					continue;
424 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
425 				while (!svm_tests[i].on_vcpu_done)
426 					cpu_relax();
427 			}
428 			else
429 				test_run(&svm_tests[i]);
430 		} else {
431 			vmcb_ident(vmcb);
432 			v2_test = &(svm_tests[i]);
433 			svm_tests[i].v2();
434 		}
435 	}
436 
437 	if (!matched)
438 		report(matched, "command line didn't match any tests!");
439 
440 	return report_summary();
441 }
442