xref: /kvm-unit-tests/x86/svm.c (revision 787f0aeb881f8c191fe23929dd3813a9f861f9b1)
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "fwcfg.h"
12 #include "smp.h"
13 #include "types.h"
14 #include "alloc_page.h"
15 #include "isr.h"
16 #include "apic.h"
17 
18 /* for the nested page table*/
19 u64 *pml4e;
20 
21 struct vmcb *vmcb;
22 
23 u64 *npt_get_pte(u64 address)
24 {
25 	return get_pte(npt_get_pml4e(), (void*)address);
26 }
27 
28 u64 *npt_get_pde(u64 address)
29 {
30 	struct pte_search search;
31 	search = find_pte_level(npt_get_pml4e(), (void*)address, 2);
32 	return search.pte;
33 }
34 
35 u64 *npt_get_pdpe(u64 address)
36 {
37 	struct pte_search search;
38 	search = find_pte_level(npt_get_pml4e(), (void*)address, 3);
39 	return search.pte;
40 }
41 
42 u64 *npt_get_pml4e(void)
43 {
44 	return pml4e;
45 }
46 
47 bool smp_supported(void)
48 {
49 	return cpu_count() > 1;
50 }
51 
52 bool default_supported(void)
53 {
54 	return true;
55 }
56 
57 bool vgif_supported(void)
58 {
59 	return this_cpu_has(X86_FEATURE_VGIF);
60 }
61 
62 bool lbrv_supported(void)
63 {
64 	return this_cpu_has(X86_FEATURE_LBRV);
65 }
66 
67 bool tsc_scale_supported(void)
68 {
69 	return this_cpu_has(X86_FEATURE_TSCRATEMSR);
70 }
71 
72 bool pause_filter_supported(void)
73 {
74 	return this_cpu_has(X86_FEATURE_PAUSEFILTER);
75 }
76 
77 bool pause_threshold_supported(void)
78 {
79 	return this_cpu_has(X86_FEATURE_PFTHRESHOLD);
80 }
81 
82 
83 void default_prepare(struct svm_test *test)
84 {
85 	vmcb_ident(vmcb);
86 }
87 
88 void default_prepare_gif_clear(struct svm_test *test)
89 {
90 }
91 
92 bool default_finished(struct svm_test *test)
93 {
94 	return true; /* one vmexit */
95 }
96 
97 bool npt_supported(void)
98 {
99 	return this_cpu_has(X86_FEATURE_NPT);
100 }
101 
102 bool vnmi_supported(void)
103 {
104        return this_cpu_has(X86_FEATURE_VNMI);
105 }
106 
107 int get_test_stage(struct svm_test *test)
108 {
109 	barrier();
110 	return test->scratch;
111 }
112 
113 void set_test_stage(struct svm_test *test, int s)
114 {
115 	barrier();
116 	test->scratch = s;
117 	barrier();
118 }
119 
120 void inc_test_stage(struct svm_test *test)
121 {
122 	barrier();
123 	test->scratch++;
124 	barrier();
125 }
126 
127 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
128 			 u64 base, u32 limit, u32 attr)
129 {
130 	seg->selector = selector;
131 	seg->attrib = attr;
132 	seg->limit = limit;
133 	seg->base = base;
134 }
135 
136 inline void vmmcall(void)
137 {
138 	asm volatile ("vmmcall" : : : "memory");
139 }
140 
141 static test_guest_func guest_main;
142 
143 void test_set_guest(test_guest_func func)
144 {
145 	guest_main = func;
146 }
147 
148 static void test_thunk(struct svm_test *test)
149 {
150 	guest_main(test);
151 	vmmcall();
152 }
153 
154 u8 *io_bitmap;
155 u8 io_bitmap_area[16384];
156 
157 u8 *msr_bitmap;
158 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
159 
160 void vmcb_ident(struct vmcb *vmcb)
161 {
162 	u64 vmcb_phys = virt_to_phys(vmcb);
163 	struct vmcb_save_area *save = &vmcb->save;
164 	struct vmcb_control_area *ctrl = &vmcb->control;
165 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
166 		| SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
167 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
168 		| SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
169 	struct descriptor_table_ptr desc_table_ptr;
170 
171 	memset(vmcb, 0, sizeof(*vmcb));
172 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
173 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
174 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
175 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
176 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
177 	sgdt(&desc_table_ptr);
178 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
179 	sidt(&desc_table_ptr);
180 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
181 	ctrl->asid = 1;
182 	save->cpl = 0;
183 	save->efer = rdmsr(MSR_EFER);
184 	save->cr4 = read_cr4();
185 	save->cr3 = read_cr3();
186 	save->cr0 = read_cr0();
187 	save->dr7 = read_dr7();
188 	save->dr6 = read_dr6();
189 	save->cr2 = read_cr2();
190 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
191 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
192 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
193 		(1ULL << INTERCEPT_VMMCALL) |
194 		(1ULL << INTERCEPT_SHUTDOWN);
195 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
196 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
197 
198 	if (npt_supported()) {
199 		ctrl->nested_ctl = 1;
200 		ctrl->nested_cr3 = (u64)pml4e;
201 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
202 	}
203 }
204 
205 struct regs regs;
206 
207 struct regs get_regs(void)
208 {
209 	return regs;
210 }
211 
212 // rax handled specially below
213 
214 
215 struct svm_test *v2_test;
216 
217 
218 u64 guest_stack[10000];
219 
220 void svm_setup_vmrun(u64 rip)
221 {
222 	vmcb->save.rip = (ulong)rip;
223 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
224 }
225 
226 int __svm_vmrun(u64 rip)
227 {
228 	svm_setup_vmrun(rip);
229 	regs.rdi = (ulong)v2_test;
230 
231 	asm volatile (
232 		      ASM_PRE_VMRUN_CMD
233 		      "vmrun %%rax\n\t"               \
234 		      ASM_POST_VMRUN_CMD
235 		      :
236 		      : "a" (virt_to_phys(vmcb))
237 		      : "memory", "r15");
238 
239 	return (vmcb->control.exit_code);
240 }
241 
242 int svm_vmrun(void)
243 {
244 	return __svm_vmrun((u64)test_thunk);
245 }
246 
247 extern u8 vmrun_rip;
248 
249 static noinline void test_run(struct svm_test *test)
250 {
251 	u64 vmcb_phys = virt_to_phys(vmcb);
252 
253 	cli();
254 	vmcb_ident(vmcb);
255 
256 	test->prepare(test);
257 	guest_main = test->guest_func;
258 	vmcb->save.rip = (ulong)test_thunk;
259 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
260 	regs.rdi = (ulong)test;
261 	do {
262 		struct svm_test *the_test = test;
263 		u64 the_vmcb = vmcb_phys;
264 		asm volatile (
265 			      "clgi;\n\t" // semi-colon needed for LLVM compatibility
266 			      "sti \n\t"
267 			      "call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
268 			      "mov %[vmcb_phys], %%rax \n\t"
269 			      ASM_PRE_VMRUN_CMD
270 			      ".global vmrun_rip\n\t"		\
271 			      "vmrun_rip: vmrun %%rax\n\t"    \
272 			      ASM_POST_VMRUN_CMD
273 			      "cli \n\t"
274 			      "stgi"
275 			      : // inputs clobbered by the guest:
276 				"=D" (the_test),            // first argument register
277 				"=b" (the_vmcb)             // callee save register!
278 			      : [test] "0" (the_test),
279 				[vmcb_phys] "1"(the_vmcb),
280 				[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
281 			      : "rax", "rcx", "rdx", "rsi",
282 				"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
283 				"memory");
284 		++test->exits;
285 	} while (!test->finished(test));
286 	sti();
287 
288 	report(test->succeeded(test), "%s", test->name);
289 
290 	if (test->on_vcpu)
291 		test->on_vcpu_done = true;
292 }
293 
294 static void set_additional_vcpu_msr(void *msr_efer)
295 {
296 	void *hsave = alloc_page();
297 
298 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
299 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
300 }
301 
302 static void setup_npt(void)
303 {
304 	u64 size = fwcfg_get_u64(FW_CFG_RAM_SIZE);
305 
306 	/* Ensure all <4gb is mapped, e.g. if there's no RAM above 4gb. */
307 	if (size < BIT_ULL(32))
308 		size = BIT_ULL(32);
309 
310 	pml4e = alloc_page();
311 
312 	/* NPT accesses are treated as "user" accesses. */
313 	__setup_mmu_range(pml4e, 0, size, X86_MMU_MAP_USER);
314 }
315 
316 static void setup_svm(void)
317 {
318 	void *hsave = alloc_page();
319 	int i;
320 
321 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
322 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
323 
324 	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
325 
326 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
327 
328 	if (!npt_supported())
329 		return;
330 
331 	for (i = 1; i < cpu_count(); i++)
332 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
333 
334 	printf("NPT detected - running all tests with NPT enabled\n");
335 
336 	/*
337 	 * Nested paging supported - Build a nested page table
338 	 * Build the page-table bottom-up and map everything with 4k
339 	 * pages to get enough granularity for the NPT unit-tests.
340 	 */
341 
342 	setup_npt();
343 }
344 
345 int matched;
346 
347 static bool
348 test_wanted(const char *name, char *filters[], int filter_count)
349 {
350 	int i;
351 	bool positive = false;
352 	bool match = false;
353 	char clean_name[strlen(name) + 1];
354 	char *c;
355 	const char *n;
356 
357 	/* Replace spaces with underscores. */
358 	n = name;
359 	c = &clean_name[0];
360 	do *c++ = (*n == ' ') ? '_' : *n;
361 	while (*n++);
362 
363 	for (i = 0; i < filter_count; i++) {
364 		const char *filter = filters[i];
365 
366 		if (filter[0] == '-') {
367 			if (simple_glob(clean_name, filter + 1))
368 				return false;
369 		} else {
370 			positive = true;
371 			match |= simple_glob(clean_name, filter);
372 		}
373 	}
374 
375 	if (!positive || match) {
376 		matched++;
377 		return true;
378 	} else {
379 		return false;
380 	}
381 }
382 
383 int run_svm_tests(int ac, char **av, struct svm_test *svm_tests)
384 {
385 	int i = 0;
386 
387 	ac--;
388 	av++;
389 
390 	if (!this_cpu_has(X86_FEATURE_SVM)) {
391 		printf("SVM not available\n");
392 		return report_summary();
393 	}
394 
395 	setup_svm();
396 
397 	vmcb = alloc_page();
398 
399 	for (; svm_tests[i].name != NULL; i++) {
400 		if (!test_wanted(svm_tests[i].name, av, ac))
401 			continue;
402 		if (svm_tests[i].supported && !svm_tests[i].supported())
403 			continue;
404 		if (svm_tests[i].v2 == NULL) {
405 			if (svm_tests[i].on_vcpu) {
406 				if (cpu_count() <= svm_tests[i].on_vcpu)
407 					continue;
408 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
409 				while (!svm_tests[i].on_vcpu_done)
410 					cpu_relax();
411 			}
412 			else
413 				test_run(&svm_tests[i]);
414 		} else {
415 			vmcb_ident(vmcb);
416 			v2_test = &(svm_tests[i]);
417 			svm_tests[i].v2();
418 		}
419 	}
420 
421 	if (!matched)
422 		report(matched, "command line didn't match any tests!");
423 
424 	return report_summary();
425 }
426