xref: /kvm-unit-tests/x86/svm.c (revision 55601383cca6221889c5641e4bf6cfdbb855b213)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 
10 /* for the nested page table*/
11 u64 *pml4e;
12 u64 *pdpe;
13 u64 *pde[4];
14 u64 *pte[2048];
15 void *scratch_page;
16 
17 #define LATENCY_RUNS 1000000
18 
19 u64 tsc_start;
20 u64 tsc_end;
21 
22 u64 vmrun_sum, vmexit_sum;
23 u64 vmsave_sum, vmload_sum;
24 u64 stgi_sum, clgi_sum;
25 u64 latvmrun_max;
26 u64 latvmrun_min;
27 u64 latvmexit_max;
28 u64 latvmexit_min;
29 u64 latvmload_max;
30 u64 latvmload_min;
31 u64 latvmsave_max;
32 u64 latvmsave_min;
33 u64 latstgi_max;
34 u64 latstgi_min;
35 u64 latclgi_max;
36 u64 latclgi_min;
37 u64 runs;
38 
39 u8 *io_bitmap;
40 u8 io_bitmap_area[16384];
41 
42 static bool npt_supported(void)
43 {
44    return cpuid(0x8000000A).d & 1;
45 }
46 
47 static void setup_svm(void)
48 {
49     void *hsave = alloc_page();
50     u64 *page, address;
51     int i,j;
52 
53     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
54     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
55     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
56 
57     scratch_page = alloc_page();
58 
59     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
60 
61     if (!npt_supported())
62         return;
63 
64     printf("NPT detected - running all tests with NPT enabled\n");
65 
66     /*
67      * Nested paging supported - Build a nested page table
68      * Build the page-table bottom-up and map everything with 4k pages
69      * to get enough granularity for the NPT unit-tests.
70      */
71 
72     address = 0;
73 
74     /* PTE level */
75     for (i = 0; i < 2048; ++i) {
76         page = alloc_page();
77 
78         for (j = 0; j < 512; ++j, address += 4096)
79             page[j] = address | 0x067ULL;
80 
81         pte[i] = page;
82     }
83 
84     /* PDE level */
85     for (i = 0; i < 4; ++i) {
86         page = alloc_page();
87 
88         for (j = 0; j < 512; ++j)
89             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
90 
91         pde[i] = page;
92     }
93 
94     /* PDPe level */
95     pdpe   = alloc_page();
96     for (i = 0; i < 4; ++i)
97        pdpe[i] = ((u64)(pde[i])) | 0x27;
98 
99     /* PML4e level */
100     pml4e    = alloc_page();
101     pml4e[0] = ((u64)pdpe) | 0x27;
102 }
103 
104 static u64 *npt_get_pde(u64 address)
105 {
106     int i1, i2;
107 
108     address >>= 21;
109     i1 = (address >> 9) & 0x3;
110     i2 = address & 0x1ff;
111 
112     return &pde[i1][i2];
113 }
114 
115 static u64 *npt_get_pte(u64 address)
116 {
117     int i1, i2;
118 
119     address >>= 12;
120     i1 = (address >> 9) & 0x7ff;
121     i2 = address & 0x1ff;
122 
123     return &pte[i1][i2];
124 }
125 
126 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
127                          u64 base, u32 limit, u32 attr)
128 {
129     seg->selector = selector;
130     seg->attrib = attr;
131     seg->limit = limit;
132     seg->base = base;
133 }
134 
135 static void vmcb_ident(struct vmcb *vmcb)
136 {
137     u64 vmcb_phys = virt_to_phys(vmcb);
138     struct vmcb_save_area *save = &vmcb->save;
139     struct vmcb_control_area *ctrl = &vmcb->control;
140     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
141         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
142     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
143         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
144     struct descriptor_table_ptr desc_table_ptr;
145 
146     memset(vmcb, 0, sizeof(*vmcb));
147     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
148     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
149     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
150     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
151     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
152     sgdt(&desc_table_ptr);
153     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
154     sidt(&desc_table_ptr);
155     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
156     ctrl->asid = 1;
157     save->cpl = 0;
158     save->efer = rdmsr(MSR_EFER);
159     save->cr4 = read_cr4();
160     save->cr3 = read_cr3();
161     save->cr0 = read_cr0();
162     save->dr7 = read_dr7();
163     save->dr6 = read_dr6();
164     save->cr2 = read_cr2();
165     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
166     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
167     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
168     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
169 
170     if (npt_supported()) {
171         ctrl->nested_ctl = 1;
172         ctrl->nested_cr3 = (u64)pml4e;
173     }
174 }
175 
176 struct test {
177     const char *name;
178     bool (*supported)(void);
179     void (*prepare)(struct test *test);
180     void (*guest_func)(struct test *test);
181     bool (*finished)(struct test *test);
182     bool (*succeeded)(struct test *test);
183     struct vmcb *vmcb;
184     int exits;
185     ulong scratch;
186 };
187 
188 static inline void vmmcall(void)
189 {
190     asm volatile ("vmmcall" : : : "memory");
191 }
192 
193 static void test_thunk(struct test *test)
194 {
195     test->guest_func(test);
196     vmmcall();
197 }
198 
199 struct regs {
200         u64 rax;
201         u64 rcx;
202         u64 rdx;
203         u64 rbx;
204         u64 cr2;
205         u64 rbp;
206         u64 rsi;
207         u64 rdi;
208         u64 r8;
209         u64 r9;
210         u64 r10;
211         u64 r11;
212         u64 r12;
213         u64 r13;
214         u64 r14;
215         u64 r15;
216         u64 rflags;
217 };
218 
219 struct regs regs;
220 
221 // rax handled specially below
222 
223 #define SAVE_GPR_C                              \
224         "xchg %%rbx, regs+0x8\n\t"              \
225         "xchg %%rcx, regs+0x10\n\t"             \
226         "xchg %%rdx, regs+0x18\n\t"             \
227         "xchg %%rbp, regs+0x28\n\t"             \
228         "xchg %%rsi, regs+0x30\n\t"             \
229         "xchg %%rdi, regs+0x38\n\t"             \
230         "xchg %%r8, regs+0x40\n\t"              \
231         "xchg %%r9, regs+0x48\n\t"              \
232         "xchg %%r10, regs+0x50\n\t"             \
233         "xchg %%r11, regs+0x58\n\t"             \
234         "xchg %%r12, regs+0x60\n\t"             \
235         "xchg %%r13, regs+0x68\n\t"             \
236         "xchg %%r14, regs+0x70\n\t"             \
237         "xchg %%r15, regs+0x78\n\t"
238 
239 #define LOAD_GPR_C      SAVE_GPR_C
240 
241 static void test_run(struct test *test, struct vmcb *vmcb)
242 {
243     u64 vmcb_phys = virt_to_phys(vmcb);
244     u64 guest_stack[10000];
245 
246     test->vmcb = vmcb;
247     test->prepare(test);
248     vmcb->save.rip = (ulong)test_thunk;
249     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
250     regs.rdi = (ulong)test;
251     do {
252         tsc_start = rdtsc();
253         asm volatile (
254             "clgi \n\t"
255             "vmload \n\t"
256             "mov regs+0x80, %%r15\n\t"  // rflags
257             "mov %%r15, 0x170(%0)\n\t"
258             "mov regs, %%r15\n\t"       // rax
259             "mov %%r15, 0x1f8(%0)\n\t"
260             LOAD_GPR_C
261             "vmrun \n\t"
262             SAVE_GPR_C
263             "mov 0x170(%0), %%r15\n\t"  // rflags
264             "mov %%r15, regs+0x80\n\t"
265             "mov 0x1f8(%0), %%r15\n\t"  // rax
266             "mov %%r15, regs\n\t"
267             "vmsave \n\t"
268             "stgi"
269             : : "a"(vmcb_phys)
270             : "rbx", "rcx", "rdx", "rsi",
271               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
272               "memory");
273 	tsc_end = rdtsc();
274         ++test->exits;
275     } while (!test->finished(test));
276 
277     report("%s", test->succeeded(test), test->name);
278 }
279 
280 static bool smp_supported(void)
281 {
282 	return cpu_count() > 1;
283 }
284 
285 static bool default_supported(void)
286 {
287     return true;
288 }
289 
290 static void default_prepare(struct test *test)
291 {
292     vmcb_ident(test->vmcb);
293     cli();
294 }
295 
296 static bool default_finished(struct test *test)
297 {
298     return true; /* one vmexit */
299 }
300 
301 static void null_test(struct test *test)
302 {
303 }
304 
305 static bool null_check(struct test *test)
306 {
307     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
308 }
309 
310 static void prepare_no_vmrun_int(struct test *test)
311 {
312     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
313 }
314 
315 static bool check_no_vmrun_int(struct test *test)
316 {
317     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
318 }
319 
320 static void test_vmrun(struct test *test)
321 {
322     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
323 }
324 
325 static bool check_vmrun(struct test *test)
326 {
327     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
328 }
329 
330 static void prepare_cr3_intercept(struct test *test)
331 {
332     default_prepare(test);
333     test->vmcb->control.intercept_cr_read |= 1 << 3;
334 }
335 
336 static void test_cr3_intercept(struct test *test)
337 {
338     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
339 }
340 
341 static bool check_cr3_intercept(struct test *test)
342 {
343     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
344 }
345 
346 static bool check_cr3_nointercept(struct test *test)
347 {
348     return null_check(test) && test->scratch == read_cr3();
349 }
350 
351 static void corrupt_cr3_intercept_bypass(void *_test)
352 {
353     struct test *test = _test;
354     extern volatile u32 mmio_insn;
355 
356     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
357         pause();
358     pause();
359     pause();
360     pause();
361     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
362 }
363 
364 static void prepare_cr3_intercept_bypass(struct test *test)
365 {
366     default_prepare(test);
367     test->vmcb->control.intercept_cr_read |= 1 << 3;
368     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
369 }
370 
371 static void test_cr3_intercept_bypass(struct test *test)
372 {
373     ulong a = 0xa0000;
374 
375     test->scratch = 1;
376     while (test->scratch != 2)
377         barrier();
378 
379     asm volatile ("mmio_insn: mov %0, (%0); nop"
380                   : "+a"(a) : : "memory");
381     test->scratch = a;
382 }
383 
384 static bool next_rip_supported(void)
385 {
386     return (cpuid(SVM_CPUID_FUNC).d & 8);
387 }
388 
389 static void prepare_next_rip(struct test *test)
390 {
391     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
392 }
393 
394 
395 static void test_next_rip(struct test *test)
396 {
397     asm volatile ("rdtsc\n\t"
398                   ".globl exp_next_rip\n\t"
399                   "exp_next_rip:\n\t" ::: "eax", "edx");
400 }
401 
402 static bool check_next_rip(struct test *test)
403 {
404     extern char exp_next_rip;
405     unsigned long address = (unsigned long)&exp_next_rip;
406 
407     return address == test->vmcb->control.next_rip;
408 }
409 
410 static void prepare_mode_switch(struct test *test)
411 {
412     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
413                                              |  (1ULL << UD_VECTOR)
414                                              |  (1ULL << DF_VECTOR)
415                                              |  (1ULL << PF_VECTOR);
416     test->scratch = 0;
417 }
418 
419 static void test_mode_switch(struct test *test)
420 {
421     asm volatile("	cli\n"
422 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
423 		 "1:\n"
424 		 "	.long 2f\n"
425 		 "	.long " xstr(KERNEL_CS32) "\n"
426 		 ".code32\n"
427 		 "2:\n"
428 		 "	movl %%cr0, %%eax\n"
429 		 "	btcl  $31, %%eax\n" /* clear PG */
430 		 "	movl %%eax, %%cr0\n"
431 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
432 		 "	rdmsr\n"
433 		 "	btcl $8, %%eax\n" /* clear LME */
434 		 "	wrmsr\n"
435 		 "	movl %%cr4, %%eax\n"
436 		 "	btcl $5, %%eax\n" /* clear PAE */
437 		 "	movl %%eax, %%cr4\n"
438 		 "	movw %[ds16], %%ax\n"
439 		 "	movw %%ax, %%ds\n"
440 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
441 		 ".code16\n"
442 		 "3:\n"
443 		 "	movl %%cr0, %%eax\n"
444 		 "	btcl $0, %%eax\n" /* clear PE  */
445 		 "	movl %%eax, %%cr0\n"
446 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
447 		 "4:\n"
448 		 "	vmmcall\n"
449 		 "	movl %%cr0, %%eax\n"
450 		 "	btsl $0, %%eax\n" /* set PE  */
451 		 "	movl %%eax, %%cr0\n"
452 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
453 		 ".code32\n"
454 		 "5:\n"
455 		 "	movl %%cr4, %%eax\n"
456 		 "	btsl $5, %%eax\n" /* set PAE */
457 		 "	movl %%eax, %%cr4\n"
458 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
459 		 "	rdmsr\n"
460 		 "	btsl $8, %%eax\n" /* set LME */
461 		 "	wrmsr\n"
462 		 "	movl %%cr0, %%eax\n"
463 		 "	btsl  $31, %%eax\n" /* set PG */
464 		 "	movl %%eax, %%cr0\n"
465 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
466 		 ".code64\n\t"
467 		 "6:\n"
468 		 "	vmmcall\n"
469 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
470 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
471 		 : "rax", "rbx", "rcx", "rdx", "memory");
472 }
473 
474 static bool mode_switch_finished(struct test *test)
475 {
476     u64 cr0, cr4, efer;
477 
478     cr0  = test->vmcb->save.cr0;
479     cr4  = test->vmcb->save.cr4;
480     efer = test->vmcb->save.efer;
481 
482     /* Only expect VMMCALL intercepts */
483     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
484 	    return true;
485 
486     /* Jump over VMMCALL instruction */
487     test->vmcb->save.rip += 3;
488 
489     /* Do sanity checks */
490     switch (test->scratch) {
491     case 0:
492         /* Test should be in real mode now - check for this */
493         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
494             (cr4  & 0x00000020) || /* CR4.PAE */
495             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
496                 return true;
497         break;
498     case 2:
499         /* Test should be back in long-mode now - check for this */
500         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
501             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
502             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
503 		    return true;
504 	break;
505     }
506 
507     /* one step forward */
508     test->scratch += 1;
509 
510     return test->scratch == 2;
511 }
512 
513 static bool check_mode_switch(struct test *test)
514 {
515 	return test->scratch == 2;
516 }
517 
518 static void prepare_ioio(struct test *test)
519 {
520     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
521     test->scratch = 0;
522     memset(io_bitmap, 0, 8192);
523     io_bitmap[8192] = 0xFF;
524 }
525 
526 int get_test_stage(struct test *test)
527 {
528     barrier();
529     return test->scratch;
530 }
531 
532 void inc_test_stage(struct test *test)
533 {
534     barrier();
535     test->scratch++;
536     barrier();
537 }
538 
539 static void test_ioio(struct test *test)
540 {
541     // stage 0, test IO pass
542     inb(0x5000);
543     outb(0x0, 0x5000);
544     if (get_test_stage(test) != 0)
545         goto fail;
546 
547     // test IO width, in/out
548     io_bitmap[0] = 0xFF;
549     inc_test_stage(test);
550     inb(0x0);
551     if (get_test_stage(test) != 2)
552         goto fail;
553 
554     outw(0x0, 0x0);
555     if (get_test_stage(test) != 3)
556         goto fail;
557 
558     inl(0x0);
559     if (get_test_stage(test) != 4)
560         goto fail;
561 
562     // test low/high IO port
563     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
564     inb(0x5000);
565     if (get_test_stage(test) != 5)
566         goto fail;
567 
568     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
569     inw(0x9000);
570     if (get_test_stage(test) != 6)
571         goto fail;
572 
573     // test partial pass
574     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
575     inl(0x4FFF);
576     if (get_test_stage(test) != 7)
577         goto fail;
578 
579     // test across pages
580     inc_test_stage(test);
581     inl(0x7FFF);
582     if (get_test_stage(test) != 8)
583         goto fail;
584 
585     inc_test_stage(test);
586     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
587     inl(0x7FFF);
588     if (get_test_stage(test) != 10)
589         goto fail;
590 
591     io_bitmap[0] = 0;
592     inl(0xFFFF);
593     if (get_test_stage(test) != 11)
594         goto fail;
595 
596     io_bitmap[0] = 0xFF;
597     io_bitmap[8192] = 0;
598     inl(0xFFFF);
599     inc_test_stage(test);
600     if (get_test_stage(test) != 12)
601         goto fail;
602 
603     return;
604 
605 fail:
606     report("stage %d\n", false, get_test_stage(test));
607     test->scratch = -1;
608 }
609 
610 static bool ioio_finished(struct test *test)
611 {
612     unsigned port, size;
613 
614     /* Only expect IOIO intercepts */
615     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
616         return true;
617 
618     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
619         return true;
620 
621     /* one step forward */
622     test->scratch += 1;
623 
624     port = test->vmcb->control.exit_info_1 >> 16;
625     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
626 
627     while (size--) {
628         io_bitmap[port / 8] &= ~(1 << (port & 7));
629         port++;
630     }
631 
632     return false;
633 }
634 
635 static bool check_ioio(struct test *test)
636 {
637     memset(io_bitmap, 0, 8193);
638     return test->scratch != -1;
639 }
640 
641 static void prepare_asid_zero(struct test *test)
642 {
643     test->vmcb->control.asid = 0;
644 }
645 
646 static void test_asid_zero(struct test *test)
647 {
648     asm volatile ("vmmcall\n\t");
649 }
650 
651 static bool check_asid_zero(struct test *test)
652 {
653     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
654 }
655 
656 static void sel_cr0_bug_prepare(struct test *test)
657 {
658     vmcb_ident(test->vmcb);
659     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
660 }
661 
662 static bool sel_cr0_bug_finished(struct test *test)
663 {
664 	return true;
665 }
666 
667 static void sel_cr0_bug_test(struct test *test)
668 {
669     unsigned long cr0;
670 
671     /* read cr0, clear CD, and write back */
672     cr0  = read_cr0();
673     cr0 |= (1UL << 30);
674     write_cr0(cr0);
675 
676     /*
677      * If we are here the test failed, not sure what to do now because we
678      * are not in guest-mode anymore so we can't trigger an intercept.
679      * Trigger a tripple-fault for now.
680      */
681     report("sel_cr0 test. Can not recover from this - exiting\n", false);
682     exit(report_summary());
683 }
684 
685 static bool sel_cr0_bug_check(struct test *test)
686 {
687     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
688 }
689 
690 static void npt_nx_prepare(struct test *test)
691 {
692 
693     u64 *pte;
694 
695     vmcb_ident(test->vmcb);
696     pte = npt_get_pte((u64)null_test);
697 
698     *pte |= (1ULL << 63);
699 }
700 
701 static bool npt_nx_check(struct test *test)
702 {
703     u64 *pte = npt_get_pte((u64)null_test);
704 
705     *pte &= ~(1ULL << 63);
706 
707     test->vmcb->save.efer |= (1 << 11);
708 
709     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
710            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
711 }
712 
713 static void npt_us_prepare(struct test *test)
714 {
715     u64 *pte;
716 
717     vmcb_ident(test->vmcb);
718     pte = npt_get_pte((u64)scratch_page);
719 
720     *pte &= ~(1ULL << 2);
721 }
722 
723 static void npt_us_test(struct test *test)
724 {
725     (void) *(volatile u64 *)scratch_page;
726 }
727 
728 static bool npt_us_check(struct test *test)
729 {
730     u64 *pte = npt_get_pte((u64)scratch_page);
731 
732     *pte |= (1ULL << 2);
733 
734     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
735            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
736 }
737 
738 u64 save_pde;
739 
740 static void npt_rsvd_prepare(struct test *test)
741 {
742     u64 *pde;
743 
744     vmcb_ident(test->vmcb);
745     pde = npt_get_pde((u64) null_test);
746 
747     save_pde = *pde;
748     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
749 }
750 
751 static bool npt_rsvd_check(struct test *test)
752 {
753     u64 *pde = npt_get_pde((u64) null_test);
754 
755     *pde = save_pde;
756 
757     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
758             && (test->vmcb->control.exit_info_1 == 0x10000001dULL);
759 }
760 
761 static void npt_rw_prepare(struct test *test)
762 {
763 
764     u64 *pte;
765 
766     vmcb_ident(test->vmcb);
767     pte = npt_get_pte(0x80000);
768 
769     *pte &= ~(1ULL << 1);
770 }
771 
772 static void npt_rw_test(struct test *test)
773 {
774     u64 *data = (void*)(0x80000);
775 
776     *data = 0;
777 }
778 
779 static bool npt_rw_check(struct test *test)
780 {
781     u64 *pte = npt_get_pte(0x80000);
782 
783     *pte |= (1ULL << 1);
784 
785     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
786            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
787 }
788 
789 static void npt_rw_pfwalk_prepare(struct test *test)
790 {
791 
792     u64 *pte;
793 
794     vmcb_ident(test->vmcb);
795     pte = npt_get_pte(read_cr3());
796 
797     *pte &= ~(1ULL << 1);
798 }
799 
800 static bool npt_rw_pfwalk_check(struct test *test)
801 {
802     u64 *pte = npt_get_pte(read_cr3());
803 
804     *pte |= (1ULL << 1);
805 
806     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
807            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
808 	   && (test->vmcb->control.exit_info_2 == read_cr3());
809 }
810 
811 static void npt_rsvd_pfwalk_prepare(struct test *test)
812 {
813 
814     vmcb_ident(test->vmcb);
815 
816     pdpe[0] |= (1ULL << 8);
817 }
818 
819 static bool npt_rsvd_pfwalk_check(struct test *test)
820 {
821     pdpe[0] &= ~(1ULL << 8);
822 
823     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
824             && (test->vmcb->control.exit_info_1 == 0x200000006ULL);
825 }
826 
827 static void npt_l1mmio_prepare(struct test *test)
828 {
829     vmcb_ident(test->vmcb);
830 }
831 
832 u32 nested_apic_version1;
833 u32 nested_apic_version2;
834 
835 static void npt_l1mmio_test(struct test *test)
836 {
837     volatile u32 *data = (volatile void*)(0xfee00030UL);
838 
839     nested_apic_version1 = *data;
840     nested_apic_version2 = *data;
841 }
842 
843 static bool npt_l1mmio_check(struct test *test)
844 {
845     volatile u32 *data = (volatile void*)(0xfee00030);
846     u32 lvr = *data;
847 
848     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
849 }
850 
851 static void npt_rw_l1mmio_prepare(struct test *test)
852 {
853 
854     u64 *pte;
855 
856     vmcb_ident(test->vmcb);
857     pte = npt_get_pte(0xfee00080);
858 
859     *pte &= ~(1ULL << 1);
860 }
861 
862 static void npt_rw_l1mmio_test(struct test *test)
863 {
864     volatile u32 *data = (volatile void*)(0xfee00080);
865 
866     *data = *data;
867 }
868 
869 static bool npt_rw_l1mmio_check(struct test *test)
870 {
871     u64 *pte = npt_get_pte(0xfee00080);
872 
873     *pte |= (1ULL << 1);
874 
875     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
876            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
877 }
878 
879 static void latency_prepare(struct test *test)
880 {
881     default_prepare(test);
882     runs = LATENCY_RUNS;
883     latvmrun_min = latvmexit_min = -1ULL;
884     latvmrun_max = latvmexit_max = 0;
885     vmrun_sum = vmexit_sum = 0;
886 }
887 
888 static void latency_test(struct test *test)
889 {
890     u64 cycles;
891 
892 start:
893     tsc_end = rdtsc();
894 
895     cycles = tsc_end - tsc_start;
896 
897     if (cycles > latvmrun_max)
898         latvmrun_max = cycles;
899 
900     if (cycles < latvmrun_min)
901         latvmrun_min = cycles;
902 
903     vmrun_sum += cycles;
904 
905     tsc_start = rdtsc();
906 
907     asm volatile ("vmmcall" : : : "memory");
908     goto start;
909 }
910 
911 static bool latency_finished(struct test *test)
912 {
913     u64 cycles;
914 
915     tsc_end = rdtsc();
916 
917     cycles = tsc_end - tsc_start;
918 
919     if (cycles > latvmexit_max)
920         latvmexit_max = cycles;
921 
922     if (cycles < latvmexit_min)
923         latvmexit_min = cycles;
924 
925     vmexit_sum += cycles;
926 
927     test->vmcb->save.rip += 3;
928 
929     runs -= 1;
930 
931     return runs == 0;
932 }
933 
934 static bool latency_check(struct test *test)
935 {
936     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
937             latvmrun_min, vmrun_sum / LATENCY_RUNS);
938     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
939             latvmexit_min, vmexit_sum / LATENCY_RUNS);
940     return true;
941 }
942 
943 static void lat_svm_insn_prepare(struct test *test)
944 {
945     default_prepare(test);
946     runs = LATENCY_RUNS;
947     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
948     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
949     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
950 }
951 
952 static bool lat_svm_insn_finished(struct test *test)
953 {
954     u64 vmcb_phys = virt_to_phys(test->vmcb);
955     u64 cycles;
956 
957     for ( ; runs != 0; runs--) {
958         tsc_start = rdtsc();
959         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
960         cycles = rdtsc() - tsc_start;
961         if (cycles > latvmload_max)
962             latvmload_max = cycles;
963         if (cycles < latvmload_min)
964             latvmload_min = cycles;
965         vmload_sum += cycles;
966 
967         tsc_start = rdtsc();
968         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
969         cycles = rdtsc() - tsc_start;
970         if (cycles > latvmsave_max)
971             latvmsave_max = cycles;
972         if (cycles < latvmsave_min)
973             latvmsave_min = cycles;
974         vmsave_sum += cycles;
975 
976         tsc_start = rdtsc();
977         asm volatile("stgi\n\t");
978         cycles = rdtsc() - tsc_start;
979         if (cycles > latstgi_max)
980             latstgi_max = cycles;
981         if (cycles < latstgi_min)
982             latstgi_min = cycles;
983         stgi_sum += cycles;
984 
985         tsc_start = rdtsc();
986         asm volatile("clgi\n\t");
987         cycles = rdtsc() - tsc_start;
988         if (cycles > latclgi_max)
989             latclgi_max = cycles;
990         if (cycles < latclgi_min)
991             latclgi_min = cycles;
992         clgi_sum += cycles;
993     }
994 
995     return true;
996 }
997 
998 static bool lat_svm_insn_check(struct test *test)
999 {
1000     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1001             latvmload_min, vmload_sum / LATENCY_RUNS);
1002     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1003             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1004     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1005             latstgi_min, stgi_sum / LATENCY_RUNS);
1006     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1007             latclgi_min, clgi_sum / LATENCY_RUNS);
1008     return true;
1009 }
1010 static struct test tests[] = {
1011     { "null", default_supported, default_prepare, null_test,
1012       default_finished, null_check },
1013     { "vmrun", default_supported, default_prepare, test_vmrun,
1014        default_finished, check_vmrun },
1015     { "ioio", default_supported, prepare_ioio, test_ioio,
1016        ioio_finished, check_ioio },
1017     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
1018       null_test, default_finished, check_no_vmrun_int },
1019     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
1020       test_cr3_intercept, default_finished, check_cr3_intercept },
1021     { "cr3 read nointercept", default_supported, default_prepare,
1022       test_cr3_intercept, default_finished, check_cr3_nointercept },
1023     { "cr3 read intercept emulate", smp_supported,
1024       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
1025       default_finished, check_cr3_intercept },
1026     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
1027       default_finished, check_next_rip },
1028     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
1029        mode_switch_finished, check_mode_switch },
1030     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
1031        default_finished, check_asid_zero },
1032     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
1033        sel_cr0_bug_finished, sel_cr0_bug_check },
1034     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
1035 	    default_finished, npt_nx_check },
1036     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
1037 	    default_finished, npt_us_check },
1038     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
1039 	    default_finished, npt_rsvd_check },
1040     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
1041 	    default_finished, npt_rw_check },
1042     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, null_test,
1043 	    default_finished, npt_rsvd_pfwalk_check },
1044     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, null_test,
1045 	    default_finished, npt_rw_pfwalk_check },
1046     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
1047 	    default_finished, npt_l1mmio_check },
1048     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, npt_rw_l1mmio_test,
1049 	    default_finished, npt_rw_l1mmio_check },
1050     { "latency_run_exit", default_supported, latency_prepare, latency_test,
1051       latency_finished, latency_check },
1052     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
1053       lat_svm_insn_finished, lat_svm_insn_check },
1054 };
1055 
1056 int main(int ac, char **av)
1057 {
1058     int i, nr;
1059     struct vmcb *vmcb;
1060 
1061     setup_vm();
1062     smp_init();
1063 
1064     if (!(cpuid(0x80000001).c & 4)) {
1065         printf("SVM not availble\n");
1066         return report_summary();
1067     }
1068 
1069     setup_svm();
1070 
1071     vmcb = alloc_page();
1072 
1073     nr = ARRAY_SIZE(tests);
1074     for (i = 0; i < nr; ++i) {
1075         if (!tests[i].supported())
1076             continue;
1077         test_run(&tests[i], vmcb);
1078     }
1079 
1080     return report_summary();
1081 }
1082