xref: /kvm-unit-tests/x86/svm.c (revision 48f6791013dafc10b5ce039f8c7d31efcadd2a64)
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "smp.h"
12 #include "types.h"
13 #include "alloc_page.h"
14 #include "isr.h"
15 #include "apic.h"
16 
17 /* for the nested page table*/
18 u64 *pte[2048];
19 u64 *pde[4];
20 u64 *pdpe;
21 u64 *pml4e;
22 
23 struct vmcb *vmcb;
24 
25 u64 *npt_get_pte(u64 address)
26 {
27 	int i1, i2;
28 
29 	address >>= 12;
30 	i1 = (address >> 9) & 0x7ff;
31 	i2 = address & 0x1ff;
32 
33 	return &pte[i1][i2];
34 }
35 
36 u64 *npt_get_pde(u64 address)
37 {
38 	int i1, i2;
39 
40 	address >>= 21;
41 	i1 = (address >> 9) & 0x3;
42 	i2 = address & 0x1ff;
43 
44 	return &pde[i1][i2];
45 }
46 
47 u64 *npt_get_pdpe(void)
48 {
49 	return pdpe;
50 }
51 
52 u64 *npt_get_pml4e(void)
53 {
54 	return pml4e;
55 }
56 
57 bool smp_supported(void)
58 {
59 	return cpu_count() > 1;
60 }
61 
62 bool default_supported(void)
63 {
64     return true;
65 }
66 
67 void default_prepare(struct svm_test *test)
68 {
69 	vmcb_ident(vmcb);
70 }
71 
72 void default_prepare_gif_clear(struct svm_test *test)
73 {
74 }
75 
76 bool default_finished(struct svm_test *test)
77 {
78 	return true; /* one vmexit */
79 }
80 
81 bool npt_supported(void)
82 {
83 	return this_cpu_has(X86_FEATURE_NPT);
84 }
85 
86 int get_test_stage(struct svm_test *test)
87 {
88 	barrier();
89 	return test->scratch;
90 }
91 
92 void set_test_stage(struct svm_test *test, int s)
93 {
94 	barrier();
95 	test->scratch = s;
96 	barrier();
97 }
98 
99 void inc_test_stage(struct svm_test *test)
100 {
101 	barrier();
102 	test->scratch++;
103 	barrier();
104 }
105 
106 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
107                          u64 base, u32 limit, u32 attr)
108 {
109 	seg->selector = selector;
110 	seg->attrib = attr;
111 	seg->limit = limit;
112 	seg->base = base;
113 }
114 
115 inline void vmmcall(void)
116 {
117 	asm volatile ("vmmcall" : : : "memory");
118 }
119 
120 static test_guest_func guest_main;
121 
122 void test_set_guest(test_guest_func func)
123 {
124 	guest_main = func;
125 }
126 
127 static void test_thunk(struct svm_test *test)
128 {
129 	guest_main(test);
130 	vmmcall();
131 }
132 
133 u8 *io_bitmap;
134 u8 io_bitmap_area[16384];
135 
136 u8 *msr_bitmap;
137 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
138 
139 void vmcb_ident(struct vmcb *vmcb)
140 {
141 	u64 vmcb_phys = virt_to_phys(vmcb);
142 	struct vmcb_save_area *save = &vmcb->save;
143 	struct vmcb_control_area *ctrl = &vmcb->control;
144 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
145 	    | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
146 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
147 	    | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
148 	struct descriptor_table_ptr desc_table_ptr;
149 
150 	memset(vmcb, 0, sizeof(*vmcb));
151 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
152 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
153 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
154 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
155 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
156 	sgdt(&desc_table_ptr);
157 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
158 	sidt(&desc_table_ptr);
159 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
160 	ctrl->asid = 1;
161 	save->cpl = 0;
162 	save->efer = rdmsr(MSR_EFER);
163 	save->cr4 = read_cr4();
164 	save->cr3 = read_cr3();
165 	save->cr0 = read_cr0();
166 	save->dr7 = read_dr7();
167 	save->dr6 = read_dr6();
168 	save->cr2 = read_cr2();
169 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
170 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
171 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
172 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
173 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
174 
175 	if (npt_supported()) {
176 		ctrl->nested_ctl = 1;
177 		ctrl->nested_cr3 = (u64)pml4e;
178 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
179 	}
180 }
181 
182 struct regs regs;
183 
184 struct regs get_regs(void)
185 {
186 	return regs;
187 }
188 
189 // rax handled specially below
190 
191 #define SAVE_GPR_C                              \
192         "xchg %%rbx, regs+0x8\n\t"              \
193         "xchg %%rcx, regs+0x10\n\t"             \
194         "xchg %%rdx, regs+0x18\n\t"             \
195         "xchg %%rbp, regs+0x28\n\t"             \
196         "xchg %%rsi, regs+0x30\n\t"             \
197         "xchg %%rdi, regs+0x38\n\t"             \
198         "xchg %%r8, regs+0x40\n\t"              \
199         "xchg %%r9, regs+0x48\n\t"              \
200         "xchg %%r10, regs+0x50\n\t"             \
201         "xchg %%r11, regs+0x58\n\t"             \
202         "xchg %%r12, regs+0x60\n\t"             \
203         "xchg %%r13, regs+0x68\n\t"             \
204         "xchg %%r14, regs+0x70\n\t"             \
205         "xchg %%r15, regs+0x78\n\t"
206 
207 #define LOAD_GPR_C      SAVE_GPR_C
208 
209 struct svm_test *v2_test;
210 
211 #define ASM_VMRUN_CMD                           \
212                 "vmload %%rax\n\t"              \
213                 "mov regs+0x80, %%r15\n\t"      \
214                 "mov %%r15, 0x170(%%rax)\n\t"   \
215                 "mov regs, %%r15\n\t"           \
216                 "mov %%r15, 0x1f8(%%rax)\n\t"   \
217                 LOAD_GPR_C                      \
218                 "vmrun %%rax\n\t"               \
219                 SAVE_GPR_C                      \
220                 "mov 0x170(%%rax), %%r15\n\t"   \
221                 "mov %%r15, regs+0x80\n\t"      \
222                 "mov 0x1f8(%%rax), %%r15\n\t"   \
223                 "mov %%r15, regs\n\t"           \
224                 "vmsave %%rax\n\t"              \
225 
226 u64 guest_stack[10000];
227 
228 int svm_vmrun(void)
229 {
230 	vmcb->save.rip = (ulong)test_thunk;
231 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
232 	regs.rdi = (ulong)v2_test;
233 
234 	asm volatile (
235 		ASM_VMRUN_CMD
236 		:
237 		: "a" (virt_to_phys(vmcb))
238 		: "memory", "r15");
239 
240 	return (vmcb->control.exit_code);
241 }
242 
243 static void test_run(struct svm_test *test)
244 {
245 	u64 vmcb_phys = virt_to_phys(vmcb);
246 
247 	irq_disable();
248 	test->prepare(test);
249 	guest_main = test->guest_func;
250 	vmcb->save.rip = (ulong)test_thunk;
251 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
252 	regs.rdi = (ulong)test;
253 	do {
254 		struct svm_test *the_test = test;
255 		u64 the_vmcb = vmcb_phys;
256 		asm volatile (
257 			"clgi;\n\t" // semi-colon needed for LLVM compatibility
258 			"sti \n\t"
259 			"call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
260 			"mov %[vmcb_phys], %%rax \n\t"
261 			ASM_VMRUN_CMD
262 			"cli \n\t"
263 			"stgi"
264 			: // inputs clobbered by the guest:
265 			"=D" (the_test),            // first argument register
266 			"=b" (the_vmcb)             // callee save register!
267 			: [test] "0" (the_test),
268 			[vmcb_phys] "1"(the_vmcb),
269 			[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
270 			: "rax", "rcx", "rdx", "rsi",
271 			"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
272 			"memory");
273 		++test->exits;
274 	} while (!test->finished(test));
275 	irq_enable();
276 
277 	report(test->succeeded(test), "%s", test->name);
278 
279         if (test->on_vcpu)
280 	    test->on_vcpu_done = true;
281 }
282 
283 static void set_additional_vcpu_msr(void *msr_efer)
284 {
285 	void *hsave = alloc_page();
286 
287 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
288 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME | EFER_NX);
289 }
290 
291 static void setup_svm(void)
292 {
293 	void *hsave = alloc_page();
294 	u64 *page, address;
295 	int i,j;
296 
297 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
298 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
299 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
300 
301 	io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
302 
303 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
304 
305 	if (!npt_supported())
306 		return;
307 
308 	for (i = 1; i < cpu_count(); i++)
309 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
310 
311 	printf("NPT detected - running all tests with NPT enabled\n");
312 
313 	/*
314 	* Nested paging supported - Build a nested page table
315 	* Build the page-table bottom-up and map everything with 4k
316 	* pages to get enough granularity for the NPT unit-tests.
317 	*/
318 
319 	address = 0;
320 
321 	/* PTE level */
322 	for (i = 0; i < 2048; ++i) {
323 		page = alloc_page();
324 
325 		for (j = 0; j < 512; ++j, address += 4096)
326 	    		page[j] = address | 0x067ULL;
327 
328 		pte[i] = page;
329 	}
330 
331 	/* PDE level */
332 	for (i = 0; i < 4; ++i) {
333 		page = alloc_page();
334 
335 	for (j = 0; j < 512; ++j)
336 	    page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
337 
338 		pde[i] = page;
339 	}
340 
341 	/* PDPe level */
342 	pdpe   = alloc_page();
343 	for (i = 0; i < 4; ++i)
344 		pdpe[i] = ((u64)(pde[i])) | 0x27;
345 
346 	/* PML4e level */
347 	pml4e    = alloc_page();
348 	pml4e[0] = ((u64)pdpe) | 0x27;
349 }
350 
351 int matched;
352 
353 static bool
354 test_wanted(const char *name, char *filters[], int filter_count)
355 {
356         int i;
357         bool positive = false;
358         bool match = false;
359         char clean_name[strlen(name) + 1];
360         char *c;
361         const char *n;
362 
363         /* Replace spaces with underscores. */
364         n = name;
365         c = &clean_name[0];
366         do *c++ = (*n == ' ') ? '_' : *n;
367         while (*n++);
368 
369         for (i = 0; i < filter_count; i++) {
370                 const char *filter = filters[i];
371 
372                 if (filter[0] == '-') {
373                         if (simple_glob(clean_name, filter + 1))
374                                 return false;
375                 } else {
376                         positive = true;
377                         match |= simple_glob(clean_name, filter);
378                 }
379         }
380 
381         if (!positive || match) {
382                 matched++;
383                 return true;
384         } else {
385                 return false;
386         }
387 }
388 
389 int main(int ac, char **av)
390 {
391 	int i = 0;
392 
393 	ac--;
394 	av++;
395 
396 	setup_vm();
397 
398 	if (!this_cpu_has(X86_FEATURE_SVM)) {
399 		printf("SVM not availble\n");
400 		return report_summary();
401 	}
402 
403 	setup_svm();
404 
405 	vmcb = alloc_page();
406 
407 	for (; svm_tests[i].name != NULL; i++) {
408 		if (!test_wanted(svm_tests[i].name, av, ac))
409 			continue;
410 		if (svm_tests[i].supported && !svm_tests[i].supported())
411 			continue;
412 		if (svm_tests[i].v2 == NULL) {
413 			if (svm_tests[i].on_vcpu) {
414 				if (cpu_count() <= svm_tests[i].on_vcpu)
415 					continue;
416 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
417 				while (!svm_tests[i].on_vcpu_done)
418 					cpu_relax();
419 			}
420 			else
421 				test_run(&svm_tests[i]);
422 		} else {
423 			vmcb_ident(vmcb);
424 			v2_test = &(svm_tests[i]);
425 			svm_tests[i].v2();
426 		}
427 	}
428 
429 	if (!matched)
430 		report(matched, "command line didn't match any tests!");
431 
432 	return report_summary();
433 }
434