xref: /kvm-unit-tests/x86/svm.c (revision c85124d28a51ea3619e91cf6da788142677f4a4d)
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "smp.h"
12 #include "types.h"
13 #include "alloc_page.h"
14 #include "isr.h"
15 #include "apic.h"
16 
17 /* for the nested page table*/
18 u64 *pte[2048];
19 u64 *pde[4];
20 u64 *pdpe;
21 u64 *pml4e;
22 
23 struct vmcb *vmcb;
24 
25 u64 *npt_get_pte(u64 address)
26 {
27 	int i1, i2;
28 
29 	address >>= 12;
30 	i1 = (address >> 9) & 0x7ff;
31 	i2 = address & 0x1ff;
32 
33 	return &pte[i1][i2];
34 }
35 
36 u64 *npt_get_pde(u64 address)
37 {
38 	int i1, i2;
39 
40 	address >>= 21;
41 	i1 = (address >> 9) & 0x3;
42 	i2 = address & 0x1ff;
43 
44 	return &pde[i1][i2];
45 }
46 
47 u64 *npt_get_pdpe(void)
48 {
49 	return pdpe;
50 }
51 
52 u64 *npt_get_pml4e(void)
53 {
54 	return pml4e;
55 }
56 
57 bool smp_supported(void)
58 {
59 	return cpu_count() > 1;
60 }
61 
62 bool default_supported(void)
63 {
64     return true;
65 }
66 
67 bool vgif_supported(void)
68 {
69 	return this_cpu_has(X86_FEATURE_VGIF);
70 }
71 
72 bool lbrv_supported(void)
73 {
74     return this_cpu_has(X86_FEATURE_LBRV);
75 }
76 
77 bool tsc_scale_supported(void)
78 {
79     return this_cpu_has(X86_FEATURE_TSCRATEMSR);
80 }
81 
82 bool pause_filter_supported(void)
83 {
84     return this_cpu_has(X86_FEATURE_PAUSEFILTER);
85 }
86 
87 bool pause_threshold_supported(void)
88 {
89     return this_cpu_has(X86_FEATURE_PFTHRESHOLD);
90 }
91 
92 
93 void default_prepare(struct svm_test *test)
94 {
95 	vmcb_ident(vmcb);
96 }
97 
98 void default_prepare_gif_clear(struct svm_test *test)
99 {
100 }
101 
102 bool default_finished(struct svm_test *test)
103 {
104 	return true; /* one vmexit */
105 }
106 
107 bool npt_supported(void)
108 {
109 	return this_cpu_has(X86_FEATURE_NPT);
110 }
111 
112 int get_test_stage(struct svm_test *test)
113 {
114 	barrier();
115 	return test->scratch;
116 }
117 
118 void set_test_stage(struct svm_test *test, int s)
119 {
120 	barrier();
121 	test->scratch = s;
122 	barrier();
123 }
124 
125 void inc_test_stage(struct svm_test *test)
126 {
127 	barrier();
128 	test->scratch++;
129 	barrier();
130 }
131 
132 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
133                          u64 base, u32 limit, u32 attr)
134 {
135 	seg->selector = selector;
136 	seg->attrib = attr;
137 	seg->limit = limit;
138 	seg->base = base;
139 }
140 
141 inline void vmmcall(void)
142 {
143 	asm volatile ("vmmcall" : : : "memory");
144 }
145 
146 static test_guest_func guest_main;
147 
148 void test_set_guest(test_guest_func func)
149 {
150 	guest_main = func;
151 }
152 
153 static void test_thunk(struct svm_test *test)
154 {
155 	guest_main(test);
156 	vmmcall();
157 }
158 
159 u8 *io_bitmap;
160 u8 io_bitmap_area[16384];
161 
162 u8 *msr_bitmap;
163 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
164 
165 void vmcb_ident(struct vmcb *vmcb)
166 {
167 	u64 vmcb_phys = virt_to_phys(vmcb);
168 	struct vmcb_save_area *save = &vmcb->save;
169 	struct vmcb_control_area *ctrl = &vmcb->control;
170 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
171 	    | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
172 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
173 	    | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
174 	struct descriptor_table_ptr desc_table_ptr;
175 
176 	memset(vmcb, 0, sizeof(*vmcb));
177 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
178 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
179 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
180 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
181 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
182 	sgdt(&desc_table_ptr);
183 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
184 	sidt(&desc_table_ptr);
185 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
186 	ctrl->asid = 1;
187 	save->cpl = 0;
188 	save->efer = rdmsr(MSR_EFER);
189 	save->cr4 = read_cr4();
190 	save->cr3 = read_cr3();
191 	save->cr0 = read_cr0();
192 	save->dr7 = read_dr7();
193 	save->dr6 = read_dr6();
194 	save->cr2 = read_cr2();
195 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
196 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
197 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
198 			  (1ULL << INTERCEPT_VMMCALL) |
199 			  (1ULL << INTERCEPT_SHUTDOWN);
200 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
201 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
202 
203 	if (npt_supported()) {
204 		ctrl->nested_ctl = 1;
205 		ctrl->nested_cr3 = (u64)pml4e;
206 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
207 	}
208 }
209 
210 struct regs regs;
211 
212 struct regs get_regs(void)
213 {
214 	return regs;
215 }
216 
217 // rax handled specially below
218 
219 
220 struct svm_test *v2_test;
221 
222 
223 u64 guest_stack[10000];
224 
225 int __svm_vmrun(u64 rip)
226 {
227 	vmcb->save.rip = (ulong)rip;
228 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
229 	regs.rdi = (ulong)v2_test;
230 
231 	asm volatile (
232 		ASM_PRE_VMRUN_CMD
233                 "vmrun %%rax\n\t"               \
234 		ASM_POST_VMRUN_CMD
235 		:
236 		: "a" (virt_to_phys(vmcb))
237 		: "memory", "r15");
238 
239 	return (vmcb->control.exit_code);
240 }
241 
242 int svm_vmrun(void)
243 {
244 	return __svm_vmrun((u64)test_thunk);
245 }
246 
247 extern u8 vmrun_rip;
248 
249 static noinline void test_run(struct svm_test *test)
250 {
251 	u64 vmcb_phys = virt_to_phys(vmcb);
252 
253 	irq_disable();
254 	vmcb_ident(vmcb);
255 
256 	test->prepare(test);
257 	guest_main = test->guest_func;
258 	vmcb->save.rip = (ulong)test_thunk;
259 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
260 	regs.rdi = (ulong)test;
261 	do {
262 		struct svm_test *the_test = test;
263 		u64 the_vmcb = vmcb_phys;
264 		asm volatile (
265 			"clgi;\n\t" // semi-colon needed for LLVM compatibility
266 			"sti \n\t"
267 			"call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
268 			"mov %[vmcb_phys], %%rax \n\t"
269 			ASM_PRE_VMRUN_CMD
270 			".global vmrun_rip\n\t"		\
271 			"vmrun_rip: vmrun %%rax\n\t"    \
272 			ASM_POST_VMRUN_CMD
273 			"cli \n\t"
274 			"stgi"
275 			: // inputs clobbered by the guest:
276 			"=D" (the_test),            // first argument register
277 			"=b" (the_vmcb)             // callee save register!
278 			: [test] "0" (the_test),
279 			[vmcb_phys] "1"(the_vmcb),
280 			[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
281 			: "rax", "rcx", "rdx", "rsi",
282 			"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
283 			"memory");
284 		++test->exits;
285 	} while (!test->finished(test));
286 	irq_enable();
287 
288 	report(test->succeeded(test), "%s", test->name);
289 
290         if (test->on_vcpu)
291 	    test->on_vcpu_done = true;
292 }
293 
294 static void set_additional_vcpu_msr(void *msr_efer)
295 {
296 	void *hsave = alloc_page();
297 
298 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
299 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
300 }
301 
302 static void setup_svm(void)
303 {
304 	void *hsave = alloc_page();
305 	u64 *page, address;
306 	int i,j;
307 
308 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
309 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
310 
311 	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
312 
313 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
314 
315 	if (!npt_supported())
316 		return;
317 
318 	for (i = 1; i < cpu_count(); i++)
319 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
320 
321 	printf("NPT detected - running all tests with NPT enabled\n");
322 
323 	/*
324 	* Nested paging supported - Build a nested page table
325 	* Build the page-table bottom-up and map everything with 4k
326 	* pages to get enough granularity for the NPT unit-tests.
327 	*/
328 
329 	address = 0;
330 
331 	/* PTE level */
332 	for (i = 0; i < 2048; ++i) {
333 		page = alloc_page();
334 
335 		for (j = 0; j < 512; ++j, address += 4096)
336 	    		page[j] = address | 0x067ULL;
337 
338 		pte[i] = page;
339 	}
340 
341 	/* PDE level */
342 	for (i = 0; i < 4; ++i) {
343 		page = alloc_page();
344 
345 	for (j = 0; j < 512; ++j)
346 	    page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
347 
348 		pde[i] = page;
349 	}
350 
351 	/* PDPe level */
352 	pdpe   = alloc_page();
353 	for (i = 0; i < 4; ++i)
354 		pdpe[i] = ((u64)(pde[i])) | 0x27;
355 
356 	/* PML4e level */
357 	pml4e    = alloc_page();
358 	pml4e[0] = ((u64)pdpe) | 0x27;
359 }
360 
361 int matched;
362 
363 static bool
364 test_wanted(const char *name, char *filters[], int filter_count)
365 {
366         int i;
367         bool positive = false;
368         bool match = false;
369         char clean_name[strlen(name) + 1];
370         char *c;
371         const char *n;
372 
373         /* Replace spaces with underscores. */
374         n = name;
375         c = &clean_name[0];
376         do *c++ = (*n == ' ') ? '_' : *n;
377         while (*n++);
378 
379         for (i = 0; i < filter_count; i++) {
380                 const char *filter = filters[i];
381 
382                 if (filter[0] == '-') {
383                         if (simple_glob(clean_name, filter + 1))
384                                 return false;
385                 } else {
386                         positive = true;
387                         match |= simple_glob(clean_name, filter);
388                 }
389         }
390 
391         if (!positive || match) {
392                 matched++;
393                 return true;
394         } else {
395                 return false;
396         }
397 }
398 
399 int run_svm_tests(int ac, char **av, struct svm_test *svm_tests)
400 {
401 	int i = 0;
402 
403 	ac--;
404 	av++;
405 
406 	if (!this_cpu_has(X86_FEATURE_SVM)) {
407 		printf("SVM not available\n");
408 		return report_summary();
409 	}
410 
411 	setup_svm();
412 
413 	vmcb = alloc_page();
414 
415 	for (; svm_tests[i].name != NULL; i++) {
416 		if (!test_wanted(svm_tests[i].name, av, ac))
417 			continue;
418 		if (svm_tests[i].supported && !svm_tests[i].supported())
419 			continue;
420 		if (svm_tests[i].v2 == NULL) {
421 			if (svm_tests[i].on_vcpu) {
422 				if (cpu_count() <= svm_tests[i].on_vcpu)
423 					continue;
424 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
425 				while (!svm_tests[i].on_vcpu_done)
426 					cpu_relax();
427 			}
428 			else
429 				test_run(&svm_tests[i]);
430 		} else {
431 			vmcb_ident(vmcb);
432 			v2_test = &(svm_tests[i]);
433 			svm_tests[i].v2();
434 		}
435 	}
436 
437 	if (!matched)
438 		report(matched, "command line didn't match any tests!");
439 
440 	return report_summary();
441 }
442