xref: /kvm-unit-tests/x86/svm.c (revision cd5f2fb4ad641c51fe0f1a85264dc3f6ede6e131)
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "fwcfg.h"
12 #include "smp.h"
13 #include "alloc_page.h"
14 #include "isr.h"
15 #include "apic.h"
16 
17 /* for the nested page table*/
18 u64 *pml4e;
19 
20 struct vmcb *vmcb;
21 
npt_get_pte(u64 address)22 u64 *npt_get_pte(u64 address)
23 {
24 	return get_pte(npt_get_pml4e(), (void*)address);
25 }
26 
npt_get_pde(u64 address)27 u64 *npt_get_pde(u64 address)
28 {
29 	struct pte_search search;
30 	search = find_pte_level(npt_get_pml4e(), (void*)address, 2);
31 	return search.pte;
32 }
33 
npt_get_pdpe(u64 address)34 u64 *npt_get_pdpe(u64 address)
35 {
36 	struct pte_search search;
37 	search = find_pte_level(npt_get_pml4e(), (void*)address, 3);
38 	return search.pte;
39 }
40 
npt_get_pml4e(void)41 u64 *npt_get_pml4e(void)
42 {
43 	return pml4e;
44 }
45 
smp_supported(void)46 bool smp_supported(void)
47 {
48 	return cpu_count() > 1;
49 }
50 
default_supported(void)51 bool default_supported(void)
52 {
53 	return true;
54 }
55 
vgif_supported(void)56 bool vgif_supported(void)
57 {
58 	return this_cpu_has(X86_FEATURE_VGIF);
59 }
60 
lbrv_supported(void)61 bool lbrv_supported(void)
62 {
63 	return this_cpu_has(X86_FEATURE_LBRV);
64 }
65 
tsc_scale_supported(void)66 bool tsc_scale_supported(void)
67 {
68 	return this_cpu_has(X86_FEATURE_TSCRATEMSR);
69 }
70 
pause_filter_supported(void)71 bool pause_filter_supported(void)
72 {
73 	return this_cpu_has(X86_FEATURE_PAUSEFILTER);
74 }
75 
pause_threshold_supported(void)76 bool pause_threshold_supported(void)
77 {
78 	return this_cpu_has(X86_FEATURE_PFTHRESHOLD);
79 }
80 
81 
default_prepare(struct svm_test * test)82 void default_prepare(struct svm_test *test)
83 {
84 	vmcb_ident(vmcb);
85 }
86 
default_prepare_gif_clear(struct svm_test * test)87 void default_prepare_gif_clear(struct svm_test *test)
88 {
89 }
90 
default_finished(struct svm_test * test)91 bool default_finished(struct svm_test *test)
92 {
93 	return true; /* one vmexit */
94 }
95 
npt_supported(void)96 bool npt_supported(void)
97 {
98 	return this_cpu_has(X86_FEATURE_NPT);
99 }
100 
vnmi_supported(void)101 bool vnmi_supported(void)
102 {
103        return this_cpu_has(X86_FEATURE_VNMI);
104 }
105 
get_test_stage(struct svm_test * test)106 int get_test_stage(struct svm_test *test)
107 {
108 	barrier();
109 	return test->scratch;
110 }
111 
set_test_stage(struct svm_test * test,int s)112 void set_test_stage(struct svm_test *test, int s)
113 {
114 	barrier();
115 	test->scratch = s;
116 	barrier();
117 }
118 
inc_test_stage(struct svm_test * test)119 void inc_test_stage(struct svm_test *test)
120 {
121 	barrier();
122 	test->scratch++;
123 	barrier();
124 }
125 
vmcb_set_seg(struct vmcb_seg * seg,u16 selector,u64 base,u32 limit,u32 attr)126 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
127 			 u64 base, u32 limit, u32 attr)
128 {
129 	seg->selector = selector;
130 	seg->attrib = attr;
131 	seg->limit = limit;
132 	seg->base = base;
133 }
134 
vmmcall(void)135 inline void vmmcall(void)
136 {
137 	asm volatile ("vmmcall" : : : "memory");
138 }
139 
140 static test_guest_func guest_main;
141 
test_set_guest(test_guest_func func)142 void test_set_guest(test_guest_func func)
143 {
144 	guest_main = func;
145 }
146 
test_thunk(struct svm_test * test)147 static void test_thunk(struct svm_test *test)
148 {
149 	guest_main(test);
150 	vmmcall();
151 }
152 
153 u8 *io_bitmap;
154 u8 io_bitmap_area[16384];
155 
156 u8 *msr_bitmap;
157 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
158 
vmcb_ident(struct vmcb * vmcb)159 void vmcb_ident(struct vmcb *vmcb)
160 {
161 	u64 vmcb_phys = virt_to_phys(vmcb);
162 	struct vmcb_save_area *save = &vmcb->save;
163 	struct vmcb_control_area *ctrl = &vmcb->control;
164 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
165 		| SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
166 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
167 		| SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
168 	struct descriptor_table_ptr desc_table_ptr;
169 
170 	memset(vmcb, 0, sizeof(*vmcb));
171 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
172 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
173 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
174 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
175 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
176 	sgdt(&desc_table_ptr);
177 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
178 	sidt(&desc_table_ptr);
179 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
180 	ctrl->asid = 1;
181 	save->cpl = 0;
182 	save->efer = rdmsr(MSR_EFER);
183 	save->cr4 = read_cr4();
184 	save->cr3 = read_cr3();
185 	save->cr0 = read_cr0();
186 	save->dr7 = read_dr7();
187 	save->dr6 = read_dr6();
188 	save->cr2 = read_cr2();
189 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
190 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
191 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
192 		(1ULL << INTERCEPT_VMMCALL) |
193 		(1ULL << INTERCEPT_SHUTDOWN);
194 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
195 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
196 
197 	if (npt_supported()) {
198 		ctrl->nested_ctl = 1;
199 		ctrl->nested_cr3 = (u64)pml4e;
200 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
201 	}
202 }
203 
204 struct regs regs;
205 
get_regs(void)206 struct regs get_regs(void)
207 {
208 	return regs;
209 }
210 
211 // rax handled specially below
212 
213 
214 struct svm_test *v2_test;
215 
216 
217 u64 guest_stack[10000];
218 
svm_setup_vmrun(u64 rip)219 void svm_setup_vmrun(u64 rip)
220 {
221 	vmcb->save.rip = (ulong)rip;
222 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
223 }
224 
__svm_vmrun(u64 rip)225 int __svm_vmrun(u64 rip)
226 {
227 	svm_setup_vmrun(rip);
228 	regs.rdi = (ulong)v2_test;
229 
230 	asm volatile (
231 		      ASM_PRE_VMRUN_CMD
232 		      "vmrun %%rax\n\t"               \
233 		      ASM_POST_VMRUN_CMD
234 		      :
235 		      : "a" (virt_to_phys(vmcb))
236 		      : "memory", "r15");
237 
238 	return (vmcb->control.exit_code);
239 }
240 
svm_vmrun(void)241 int svm_vmrun(void)
242 {
243 	return __svm_vmrun((u64)test_thunk);
244 }
245 
246 extern u8 vmrun_rip;
247 
test_run(struct svm_test * test)248 static noinline void test_run(struct svm_test *test)
249 {
250 	u64 vmcb_phys = virt_to_phys(vmcb);
251 
252 	cli();
253 	vmcb_ident(vmcb);
254 
255 	test->prepare(test);
256 	guest_main = test->guest_func;
257 	vmcb->save.rip = (ulong)test_thunk;
258 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
259 	regs.rdi = (ulong)test;
260 	do {
261 		struct svm_test *the_test = test;
262 		u64 the_vmcb = vmcb_phys;
263 		asm volatile (
264 			      "clgi;\n\t" // semi-colon needed for LLVM compatibility
265 			      "sti \n\t"
266 			      "call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
267 			      "mov %[vmcb_phys], %%rax \n\t"
268 			      ASM_PRE_VMRUN_CMD
269 			      ".global vmrun_rip\n\t"		\
270 			      "vmrun_rip: vmrun %%rax\n\t"    \
271 			      ASM_POST_VMRUN_CMD
272 			      "cli \n\t"
273 			      "stgi"
274 			      : // inputs clobbered by the guest:
275 				"=D" (the_test),            // first argument register
276 				"=b" (the_vmcb)             // callee save register!
277 			      : [test] "0" (the_test),
278 				[vmcb_phys] "1"(the_vmcb),
279 				[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
280 			      : "rax", "rcx", "rdx", "rsi",
281 				"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
282 				"memory");
283 		++test->exits;
284 	} while (!test->finished(test));
285 	sti();
286 
287 	report(test->succeeded(test), "%s", test->name);
288 
289 	if (test->on_vcpu)
290 		test->on_vcpu_done = true;
291 }
292 
set_additional_vcpu_msr(void * msr_efer)293 static void set_additional_vcpu_msr(void *msr_efer)
294 {
295 	void *hsave = alloc_page();
296 
297 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
298 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
299 }
300 
setup_npt(void)301 static void setup_npt(void)
302 {
303 	u64 size = fwcfg_get_u64(FW_CFG_RAM_SIZE);
304 
305 	/* Ensure all <4gb is mapped, e.g. if there's no RAM above 4gb. */
306 	if (size < BIT_ULL(32))
307 		size = BIT_ULL(32);
308 
309 	pml4e = alloc_page();
310 
311 	/* NPT accesses are treated as "user" accesses. */
312 	__setup_mmu_range(pml4e, 0, size, X86_MMU_MAP_USER);
313 }
314 
setup_svm(void)315 static void setup_svm(void)
316 {
317 	void *hsave = alloc_page();
318 	int i;
319 
320 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
321 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
322 
323 	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
324 
325 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
326 
327 	if (!npt_supported())
328 		return;
329 
330 	for (i = 1; i < cpu_count(); i++)
331 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
332 
333 	printf("NPT detected - running all tests with NPT enabled\n");
334 
335 	/*
336 	 * Nested paging supported - Build a nested page table
337 	 * Build the page-table bottom-up and map everything with 4k
338 	 * pages to get enough granularity for the NPT unit-tests.
339 	 */
340 
341 	setup_npt();
342 }
343 
344 int matched;
345 
346 static bool
test_wanted(const char * name,char * filters[],int filter_count)347 test_wanted(const char *name, char *filters[], int filter_count)
348 {
349 	int i;
350 	bool positive = false;
351 	bool match = false;
352 	char clean_name[strlen(name) + 1];
353 	char *c;
354 	const char *n;
355 
356 	/* Replace spaces with underscores. */
357 	n = name;
358 	c = &clean_name[0];
359 	do *c++ = (*n == ' ') ? '_' : *n;
360 	while (*n++);
361 
362 	for (i = 0; i < filter_count; i++) {
363 		const char *filter = filters[i];
364 
365 		if (filter[0] == '-') {
366 			if (simple_glob(clean_name, filter + 1))
367 				return false;
368 		} else {
369 			positive = true;
370 			match |= simple_glob(clean_name, filter);
371 		}
372 	}
373 
374 	if (!positive || match) {
375 		matched++;
376 		return true;
377 	} else {
378 		return false;
379 	}
380 }
381 
run_svm_tests(int ac,char ** av,struct svm_test * svm_tests)382 int run_svm_tests(int ac, char **av, struct svm_test *svm_tests)
383 {
384 	int i = 0;
385 
386 	ac--;
387 	av++;
388 
389 	if (!this_cpu_has(X86_FEATURE_SVM)) {
390 		printf("SVM not available\n");
391 		return report_summary();
392 	}
393 
394 	setup_svm();
395 
396 	vmcb = alloc_page();
397 
398 	for (; svm_tests[i].name != NULL; i++) {
399 		if (!test_wanted(svm_tests[i].name, av, ac))
400 			continue;
401 		if (svm_tests[i].supported && !svm_tests[i].supported())
402 			continue;
403 		if (svm_tests[i].v2 == NULL) {
404 			if (svm_tests[i].on_vcpu) {
405 				if (cpu_count() <= svm_tests[i].on_vcpu)
406 					continue;
407 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
408 				while (!svm_tests[i].on_vcpu_done)
409 					cpu_relax();
410 			}
411 			else
412 				test_run(&svm_tests[i]);
413 		} else {
414 			vmcb_ident(vmcb);
415 			v2_test = &(svm_tests[i]);
416 			svm_tests[i].v2();
417 		}
418 	}
419 
420 	if (!matched)
421 		report(matched, "command line didn't match any tests!");
422 
423 	return report_summary();
424 }
425