xref: /kvm-unit-tests/x86/svm.c (revision ea9bf4ad93ff371473c3a30803c500b133cb52f4)
1 /*
2  * Framework for testing nested virtualization
3  */
4 
5 #include "svm.h"
6 #include "libcflat.h"
7 #include "processor.h"
8 #include "desc.h"
9 #include "msr.h"
10 #include "vm.h"
11 #include "smp.h"
12 #include "types.h"
13 #include "alloc_page.h"
14 #include "isr.h"
15 #include "apic.h"
16 #include "vmalloc.h"
17 
18 /* for the nested page table*/
19 u64 *pte[2048];
20 u64 *pde[4];
21 u64 *pdpe;
22 u64 *pml4e;
23 
24 struct vmcb *vmcb;
25 
26 u64 *npt_get_pte(u64 address)
27 {
28 	int i1, i2;
29 
30 	address >>= 12;
31 	i1 = (address >> 9) & 0x7ff;
32 	i2 = address & 0x1ff;
33 
34 	return &pte[i1][i2];
35 }
36 
37 u64 *npt_get_pde(u64 address)
38 {
39 	int i1, i2;
40 
41 	address >>= 21;
42 	i1 = (address >> 9) & 0x3;
43 	i2 = address & 0x1ff;
44 
45 	return &pde[i1][i2];
46 }
47 
48 u64 *npt_get_pdpe(void)
49 {
50 	return pdpe;
51 }
52 
53 u64 *npt_get_pml4e(void)
54 {
55 	return pml4e;
56 }
57 
58 bool smp_supported(void)
59 {
60 	return cpu_count() > 1;
61 }
62 
63 bool default_supported(void)
64 {
65     return true;
66 }
67 
68 bool vgif_supported(void)
69 {
70 	return this_cpu_has(X86_FEATURE_VGIF);
71 }
72 
73 void default_prepare(struct svm_test *test)
74 {
75 	vmcb_ident(vmcb);
76 }
77 
78 void default_prepare_gif_clear(struct svm_test *test)
79 {
80 }
81 
82 bool default_finished(struct svm_test *test)
83 {
84 	return true; /* one vmexit */
85 }
86 
87 bool npt_supported(void)
88 {
89 	return this_cpu_has(X86_FEATURE_NPT);
90 }
91 
92 int get_test_stage(struct svm_test *test)
93 {
94 	barrier();
95 	return test->scratch;
96 }
97 
98 void set_test_stage(struct svm_test *test, int s)
99 {
100 	barrier();
101 	test->scratch = s;
102 	barrier();
103 }
104 
105 void inc_test_stage(struct svm_test *test)
106 {
107 	barrier();
108 	test->scratch++;
109 	barrier();
110 }
111 
112 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
113                          u64 base, u32 limit, u32 attr)
114 {
115 	seg->selector = selector;
116 	seg->attrib = attr;
117 	seg->limit = limit;
118 	seg->base = base;
119 }
120 
121 inline void vmmcall(void)
122 {
123 	asm volatile ("vmmcall" : : : "memory");
124 }
125 
126 static test_guest_func guest_main;
127 
128 void test_set_guest(test_guest_func func)
129 {
130 	guest_main = func;
131 }
132 
133 static void test_thunk(struct svm_test *test)
134 {
135 	guest_main(test);
136 	vmmcall();
137 }
138 
139 u8 *io_bitmap;
140 u8 io_bitmap_area[16384];
141 
142 u8 *msr_bitmap;
143 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
144 
145 void vmcb_ident(struct vmcb *vmcb)
146 {
147 	u64 vmcb_phys = virt_to_phys(vmcb);
148 	struct vmcb_save_area *save = &vmcb->save;
149 	struct vmcb_control_area *ctrl = &vmcb->control;
150 	u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
151 	    | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
152 	u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
153 	    | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
154 	struct descriptor_table_ptr desc_table_ptr;
155 
156 	memset(vmcb, 0, sizeof(*vmcb));
157 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
158 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
159 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
160 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
161 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
162 	sgdt(&desc_table_ptr);
163 	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
164 	sidt(&desc_table_ptr);
165 	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
166 	ctrl->asid = 1;
167 	save->cpl = 0;
168 	save->efer = rdmsr(MSR_EFER);
169 	save->cr4 = read_cr4();
170 	save->cr3 = read_cr3();
171 	save->cr0 = read_cr0();
172 	save->dr7 = read_dr7();
173 	save->dr6 = read_dr6();
174 	save->cr2 = read_cr2();
175 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
176 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
177 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
178 			  (1ULL << INTERCEPT_VMMCALL) |
179 			  (1ULL << INTERCEPT_SHUTDOWN);
180 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
181 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
182 
183 	if (npt_supported()) {
184 		ctrl->nested_ctl = 1;
185 		ctrl->nested_cr3 = (u64)pml4e;
186 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
187 	}
188 }
189 
190 struct regs regs;
191 
192 struct regs get_regs(void)
193 {
194 	return regs;
195 }
196 
197 // rax handled specially below
198 
199 
200 struct svm_test *v2_test;
201 
202 
203 u64 guest_stack[10000];
204 
205 int __svm_vmrun(u64 rip)
206 {
207 	vmcb->save.rip = (ulong)rip;
208 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
209 	regs.rdi = (ulong)v2_test;
210 
211 	asm volatile (
212 		ASM_PRE_VMRUN_CMD
213                 "vmrun %%rax\n\t"               \
214 		ASM_POST_VMRUN_CMD
215 		:
216 		: "a" (virt_to_phys(vmcb))
217 		: "memory", "r15");
218 
219 	return (vmcb->control.exit_code);
220 }
221 
222 int svm_vmrun(void)
223 {
224 	return __svm_vmrun((u64)test_thunk);
225 }
226 
227 extern u8 vmrun_rip;
228 
229 static noinline void test_run(struct svm_test *test)
230 {
231 	u64 vmcb_phys = virt_to_phys(vmcb);
232 
233 	irq_disable();
234 	vmcb_ident(vmcb);
235 
236 	test->prepare(test);
237 	guest_main = test->guest_func;
238 	vmcb->save.rip = (ulong)test_thunk;
239 	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
240 	regs.rdi = (ulong)test;
241 	do {
242 		struct svm_test *the_test = test;
243 		u64 the_vmcb = vmcb_phys;
244 		asm volatile (
245 			"clgi;\n\t" // semi-colon needed for LLVM compatibility
246 			"sti \n\t"
247 			"call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
248 			"mov %[vmcb_phys], %%rax \n\t"
249 			ASM_PRE_VMRUN_CMD
250 			".global vmrun_rip\n\t"		\
251 			"vmrun_rip: vmrun %%rax\n\t"    \
252 			ASM_POST_VMRUN_CMD
253 			"cli \n\t"
254 			"stgi"
255 			: // inputs clobbered by the guest:
256 			"=D" (the_test),            // first argument register
257 			"=b" (the_vmcb)             // callee save register!
258 			: [test] "0" (the_test),
259 			[vmcb_phys] "1"(the_vmcb),
260 			[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
261 			: "rax", "rcx", "rdx", "rsi",
262 			"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
263 			"memory");
264 		++test->exits;
265 	} while (!test->finished(test));
266 	irq_enable();
267 
268 	report(test->succeeded(test), "%s", test->name);
269 
270         if (test->on_vcpu)
271 	    test->on_vcpu_done = true;
272 }
273 
274 static void set_additional_vcpu_msr(void *msr_efer)
275 {
276 	void *hsave = alloc_page();
277 
278 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
279 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
280 }
281 
282 static void setup_svm(void)
283 {
284 	void *hsave = alloc_page();
285 	u64 *page, address;
286 	int i,j;
287 
288 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
289 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
290 
291 	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
292 
293 	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
294 
295 	if (!npt_supported())
296 		return;
297 
298 	for (i = 1; i < cpu_count(); i++)
299 		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
300 
301 	printf("NPT detected - running all tests with NPT enabled\n");
302 
303 	/*
304 	* Nested paging supported - Build a nested page table
305 	* Build the page-table bottom-up and map everything with 4k
306 	* pages to get enough granularity for the NPT unit-tests.
307 	*/
308 
309 	address = 0;
310 
311 	/* PTE level */
312 	for (i = 0; i < 2048; ++i) {
313 		page = alloc_page();
314 
315 		for (j = 0; j < 512; ++j, address += 4096)
316 	    		page[j] = address | 0x067ULL;
317 
318 		pte[i] = page;
319 	}
320 
321 	/* PDE level */
322 	for (i = 0; i < 4; ++i) {
323 		page = alloc_page();
324 
325 	for (j = 0; j < 512; ++j)
326 	    page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
327 
328 		pde[i] = page;
329 	}
330 
331 	/* PDPe level */
332 	pdpe   = alloc_page();
333 	for (i = 0; i < 4; ++i)
334 		pdpe[i] = ((u64)(pde[i])) | 0x27;
335 
336 	/* PML4e level */
337 	pml4e    = alloc_page();
338 	pml4e[0] = ((u64)pdpe) | 0x27;
339 }
340 
341 int matched;
342 
343 static bool
344 test_wanted(const char *name, char *filters[], int filter_count)
345 {
346         int i;
347         bool positive = false;
348         bool match = false;
349         char clean_name[strlen(name) + 1];
350         char *c;
351         const char *n;
352 
353         /* Replace spaces with underscores. */
354         n = name;
355         c = &clean_name[0];
356         do *c++ = (*n == ' ') ? '_' : *n;
357         while (*n++);
358 
359         for (i = 0; i < filter_count; i++) {
360                 const char *filter = filters[i];
361 
362                 if (filter[0] == '-') {
363                         if (simple_glob(clean_name, filter + 1))
364                                 return false;
365                 } else {
366                         positive = true;
367                         match |= simple_glob(clean_name, filter);
368                 }
369         }
370 
371         if (!positive || match) {
372                 matched++;
373                 return true;
374         } else {
375                 return false;
376         }
377 }
378 
379 int main(int ac, char **av)
380 {
381 	/* Omit PT_USER_MASK to allow tested host.CR4.SMEP=1. */
382 	pteval_t opt_mask = 0;
383 	int i = 0;
384 
385 	ac--;
386 	av++;
387 
388 	__setup_vm(&opt_mask);
389 
390 	if (!this_cpu_has(X86_FEATURE_SVM)) {
391 		printf("SVM not availble\n");
392 		return report_summary();
393 	}
394 
395 	setup_svm();
396 
397 	vmcb = alloc_page();
398 
399 	for (; svm_tests[i].name != NULL; i++) {
400 		if (!test_wanted(svm_tests[i].name, av, ac))
401 			continue;
402 		if (svm_tests[i].supported && !svm_tests[i].supported())
403 			continue;
404 		if (svm_tests[i].v2 == NULL) {
405 			if (svm_tests[i].on_vcpu) {
406 				if (cpu_count() <= svm_tests[i].on_vcpu)
407 					continue;
408 				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
409 				while (!svm_tests[i].on_vcpu_done)
410 					cpu_relax();
411 			}
412 			else
413 				test_run(&svm_tests[i]);
414 		} else {
415 			vmcb_ident(vmcb);
416 			v2_test = &(svm_tests[i]);
417 			svm_tests[i].v2();
418 		}
419 	}
420 
421 	if (!matched)
422 		report(matched, "command line didn't match any tests!");
423 
424 	return report_summary();
425 }
426