xref: /kvm-unit-tests/x86/svm.c (revision c0a4e715eb30944f984a92fbd4bb2cff6ee298a5)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "msr.h"
5 #include "vm.h"
6 #include "smp.h"
7 #include "types.h"
8 
9 /* for the nested page table*/
10 u64 *pml4e;
11 u64 *pdpe;
12 u64 *pde[4];
13 u64 *pte[2048];
14 void *scratch_page;
15 
16 #define LATENCY_RUNS 1000000
17 
18 u64 tsc_start;
19 u64 tsc_end;
20 
21 u64 vmrun_sum, vmexit_sum;
22 u64 vmsave_sum, vmload_sum;
23 u64 stgi_sum, clgi_sum;
24 u64 latvmrun_max;
25 u64 latvmrun_min;
26 u64 latvmexit_max;
27 u64 latvmexit_min;
28 u64 latvmload_max;
29 u64 latvmload_min;
30 u64 latvmsave_max;
31 u64 latvmsave_min;
32 u64 latstgi_max;
33 u64 latstgi_min;
34 u64 latclgi_max;
35 u64 latclgi_min;
36 u64 runs;
37 
38 static bool npt_supported(void)
39 {
40    return cpuid(0x8000000A).d & 1;
41 }
42 
43 static void setup_svm(void)
44 {
45     void *hsave = alloc_page();
46     u64 *page, address;
47     int i,j;
48 
49     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
50     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
51     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
52 
53     scratch_page = alloc_page();
54 
55     if (!npt_supported())
56         return;
57 
58     printf("NPT detected - running all tests with NPT enabled\n");
59 
60     /*
61      * Nested paging supported - Build a nested page table
62      * Build the page-table bottom-up and map everything with 4k pages
63      * to get enough granularity for the NPT unit-tests.
64      */
65 
66     address = 0;
67 
68     /* PTE level */
69     for (i = 0; i < 2048; ++i) {
70         page = alloc_page();
71 
72         for (j = 0; j < 512; ++j, address += 4096)
73             page[j] = address | 0x067ULL;
74 
75         pte[i] = page;
76     }
77 
78     /* PDE level */
79     for (i = 0; i < 4; ++i) {
80         page = alloc_page();
81 
82         for (j = 0; j < 512; ++j)
83             page[j] = (u64)pte[(i * 514) + j] | 0x027ULL;
84 
85         pde[i] = page;
86     }
87 
88     /* PDPe level */
89     pdpe   = alloc_page();
90     for (i = 0; i < 4; ++i)
91        pdpe[i] = ((u64)(pde[i])) | 0x27;
92 
93     /* PML4e level */
94     pml4e    = alloc_page();
95     pml4e[0] = ((u64)pdpe) | 0x27;
96 }
97 
98 static u64 *get_pte(u64 address)
99 {
100     int i1, i2;
101 
102     address >>= 12;
103     i1 = (address >> 9) & 0x7ff;
104     i2 = address & 0x1ff;
105 
106     return &pte[i1][i2];
107 }
108 
109 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
110                          u64 base, u32 limit, u32 attr)
111 {
112     seg->selector = selector;
113     seg->attrib = attr;
114     seg->limit = limit;
115     seg->base = base;
116 }
117 
118 static void vmcb_ident(struct vmcb *vmcb)
119 {
120     u64 vmcb_phys = virt_to_phys(vmcb);
121     struct vmcb_save_area *save = &vmcb->save;
122     struct vmcb_control_area *ctrl = &vmcb->control;
123     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
124         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
125     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
126         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
127     struct descriptor_table_ptr desc_table_ptr;
128 
129     memset(vmcb, 0, sizeof(*vmcb));
130     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
131     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
132     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
133     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
134     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
135     sgdt(&desc_table_ptr);
136     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
137     sidt(&desc_table_ptr);
138     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
139     ctrl->asid = 1;
140     save->cpl = 0;
141     save->efer = rdmsr(MSR_EFER);
142     save->cr4 = read_cr4();
143     save->cr3 = read_cr3();
144     save->cr0 = read_cr0();
145     save->dr7 = read_dr7();
146     save->dr6 = read_dr6();
147     save->cr2 = read_cr2();
148     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
149     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
150     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
151 
152     if (npt_supported()) {
153         ctrl->nested_ctl = 1;
154         ctrl->nested_cr3 = (u64)pml4e;
155     }
156 }
157 
158 struct test {
159     const char *name;
160     bool (*supported)(void);
161     void (*prepare)(struct test *test);
162     void (*guest_func)(struct test *test);
163     bool (*finished)(struct test *test);
164     bool (*succeeded)(struct test *test);
165     struct vmcb *vmcb;
166     int exits;
167     ulong scratch;
168 };
169 
170 static void test_thunk(struct test *test)
171 {
172     test->guest_func(test);
173     asm volatile ("vmmcall" : : : "memory");
174 }
175 
176 static bool test_run(struct test *test, struct vmcb *vmcb)
177 {
178     u64 vmcb_phys = virt_to_phys(vmcb);
179     u64 guest_stack[10000];
180     bool success;
181 
182     test->vmcb = vmcb;
183     test->prepare(test);
184     vmcb->save.rip = (ulong)test_thunk;
185     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
186     do {
187         tsc_start = rdtsc();
188         asm volatile (
189             "clgi \n\t"
190             "vmload \n\t"
191             "push %%rbp \n\t"
192             "push %1 \n\t"
193             "vmrun \n\t"
194             "pop %1 \n\t"
195             "pop %%rbp \n\t"
196             "vmsave \n\t"
197             "stgi"
198             : : "a"(vmcb_phys), "D"(test)
199             : "rbx", "rcx", "rdx", "rsi",
200               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
201               "memory");
202 	tsc_end = rdtsc();
203         ++test->exits;
204     } while (!test->finished(test));
205 
206 
207     success = test->succeeded(test);
208 
209     printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
210 
211     return success;
212 }
213 
214 static bool smp_supported(void)
215 {
216 	return cpu_count() > 1;
217 }
218 
219 static bool default_supported(void)
220 {
221     return true;
222 }
223 
224 static void default_prepare(struct test *test)
225 {
226     vmcb_ident(test->vmcb);
227     cli();
228 }
229 
230 static bool default_finished(struct test *test)
231 {
232     return true; /* one vmexit */
233 }
234 
235 static void null_test(struct test *test)
236 {
237 }
238 
239 static bool null_check(struct test *test)
240 {
241     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
242 }
243 
244 static void prepare_no_vmrun_int(struct test *test)
245 {
246     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
247 }
248 
249 static bool check_no_vmrun_int(struct test *test)
250 {
251     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
252 }
253 
254 static void test_vmrun(struct test *test)
255 {
256     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
257 }
258 
259 static bool check_vmrun(struct test *test)
260 {
261     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
262 }
263 
264 static void prepare_cr3_intercept(struct test *test)
265 {
266     default_prepare(test);
267     test->vmcb->control.intercept_cr_read |= 1 << 3;
268 }
269 
270 static void test_cr3_intercept(struct test *test)
271 {
272     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
273 }
274 
275 static bool check_cr3_intercept(struct test *test)
276 {
277     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
278 }
279 
280 static bool check_cr3_nointercept(struct test *test)
281 {
282     return null_check(test) && test->scratch == read_cr3();
283 }
284 
285 static void corrupt_cr3_intercept_bypass(void *_test)
286 {
287     struct test *test = _test;
288     extern volatile u32 mmio_insn;
289 
290     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
291         pause();
292     pause();
293     pause();
294     pause();
295     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
296 }
297 
298 static void prepare_cr3_intercept_bypass(struct test *test)
299 {
300     default_prepare(test);
301     test->vmcb->control.intercept_cr_read |= 1 << 3;
302     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
303 }
304 
305 static void test_cr3_intercept_bypass(struct test *test)
306 {
307     ulong a = 0xa0000;
308 
309     test->scratch = 1;
310     while (test->scratch != 2)
311         barrier();
312 
313     asm volatile ("mmio_insn: mov %0, (%0); nop"
314                   : "+a"(a) : : "memory");
315     test->scratch = a;
316 }
317 
318 static bool next_rip_supported(void)
319 {
320     return (cpuid(SVM_CPUID_FUNC).d & 8);
321 }
322 
323 static void prepare_next_rip(struct test *test)
324 {
325     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
326 }
327 
328 
329 static void test_next_rip(struct test *test)
330 {
331     asm volatile ("rdtsc\n\t"
332                   ".globl exp_next_rip\n\t"
333                   "exp_next_rip:\n\t" ::: "eax", "edx");
334 }
335 
336 static bool check_next_rip(struct test *test)
337 {
338     extern char exp_next_rip;
339     unsigned long address = (unsigned long)&exp_next_rip;
340 
341     return address == test->vmcb->control.next_rip;
342 }
343 
344 static void prepare_mode_switch(struct test *test)
345 {
346     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
347                                              |  (1ULL << UD_VECTOR)
348                                              |  (1ULL << DF_VECTOR)
349                                              |  (1ULL << PF_VECTOR);
350     test->scratch = 0;
351 }
352 
353 static void test_mode_switch(struct test *test)
354 {
355     asm volatile("	cli\n"
356 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
357 		 "1:\n"
358 		 "	.long 2f\n"
359 		 "	.long 40\n"
360 		 ".code32\n"
361 		 "2:\n"
362 		 "	movl %%cr0, %%eax\n"
363 		 "	btcl  $31, %%eax\n" /* clear PG */
364 		 "	movl %%eax, %%cr0\n"
365 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
366 		 "	rdmsr\n"
367 		 "	btcl $8, %%eax\n" /* clear LME */
368 		 "	wrmsr\n"
369 		 "	movl %%cr4, %%eax\n"
370 		 "	btcl $5, %%eax\n" /* clear PAE */
371 		 "	movl %%eax, %%cr4\n"
372 		 "	movw $64, %%ax\n"
373 		 "	movw %%ax, %%ds\n"
374 		 "	ljmpl $56, $3f\n" /* jump to 16 bit protected-mode */
375 		 ".code16\n"
376 		 "3:\n"
377 		 "	movl %%cr0, %%eax\n"
378 		 "	btcl $0, %%eax\n" /* clear PE  */
379 		 "	movl %%eax, %%cr0\n"
380 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
381 		 "4:\n"
382 		 "	vmmcall\n"
383 		 "	movl %%cr0, %%eax\n"
384 		 "	btsl $0, %%eax\n" /* set PE  */
385 		 "	movl %%eax, %%cr0\n"
386 		 "	ljmpl $40, $5f\n" /* back to protected mode */
387 		 ".code32\n"
388 		 "5:\n"
389 		 "	movl %%cr4, %%eax\n"
390 		 "	btsl $5, %%eax\n" /* set PAE */
391 		 "	movl %%eax, %%cr4\n"
392 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
393 		 "	rdmsr\n"
394 		 "	btsl $8, %%eax\n" /* set LME */
395 		 "	wrmsr\n"
396 		 "	movl %%cr0, %%eax\n"
397 		 "	btsl  $31, %%eax\n" /* set PG */
398 		 "	movl %%eax, %%cr0\n"
399 		 "	ljmpl $8, $6f\n"    /* back to long mode */
400 		 ".code64\n\t"
401 		 "6:\n"
402 		 "	vmmcall\n"
403 		 ::: "rax", "rbx", "rcx", "rdx", "memory");
404 }
405 
406 static bool mode_switch_finished(struct test *test)
407 {
408     u64 cr0, cr4, efer;
409 
410     cr0  = test->vmcb->save.cr0;
411     cr4  = test->vmcb->save.cr4;
412     efer = test->vmcb->save.efer;
413 
414     /* Only expect VMMCALL intercepts */
415     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
416 	    return true;
417 
418     /* Jump over VMMCALL instruction */
419     test->vmcb->save.rip += 3;
420 
421     /* Do sanity checks */
422     switch (test->scratch) {
423     case 0:
424         /* Test should be in real mode now - check for this */
425         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
426             (cr4  & 0x00000020) || /* CR4.PAE */
427             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
428                 return true;
429         break;
430     case 2:
431         /* Test should be back in long-mode now - check for this */
432         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
433             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
434             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
435 		    return true;
436 	break;
437     }
438 
439     /* one step forward */
440     test->scratch += 1;
441 
442     return test->scratch == 2;
443 }
444 
445 static bool check_mode_switch(struct test *test)
446 {
447 	return test->scratch == 2;
448 }
449 
450 static void prepare_asid_zero(struct test *test)
451 {
452     test->vmcb->control.asid = 0;
453 }
454 
455 static void test_asid_zero(struct test *test)
456 {
457     asm volatile ("vmmcall\n\t");
458 }
459 
460 static bool check_asid_zero(struct test *test)
461 {
462     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
463 }
464 
465 static void sel_cr0_bug_prepare(struct test *test)
466 {
467     vmcb_ident(test->vmcb);
468     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
469 }
470 
471 static bool sel_cr0_bug_finished(struct test *test)
472 {
473 	return true;
474 }
475 
476 static void sel_cr0_bug_test(struct test *test)
477 {
478     unsigned long cr0;
479 
480     /* read cr0, clear CD, and write back */
481     cr0  = read_cr0();
482     cr0 |= (1UL << 30);
483     write_cr0(cr0);
484 
485     /*
486      * If we are here the test failed, not sure what to do now because we
487      * are not in guest-mode anymore so we can't trigger an intercept.
488      * Trigger a tripple-fault for now.
489      */
490     printf("sel_cr0 test failed. Can not recover from this - exiting\n");
491     exit(1);
492 }
493 
494 static bool sel_cr0_bug_check(struct test *test)
495 {
496     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
497 }
498 
499 static void npt_nx_prepare(struct test *test)
500 {
501 
502     u64 *pte;
503 
504     vmcb_ident(test->vmcb);
505     pte = get_pte((u64)null_test);
506 
507     *pte |= (1ULL << 63);
508 }
509 
510 static bool npt_nx_check(struct test *test)
511 {
512     u64 *pte = get_pte((u64)null_test);
513 
514     *pte &= ~(1ULL << 63);
515 
516     test->vmcb->save.efer |= (1 << 11);
517 
518     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
519            && (test->vmcb->control.exit_info_1 == 0x15);
520 }
521 
522 static void npt_us_prepare(struct test *test)
523 {
524     u64 *pte;
525 
526     vmcb_ident(test->vmcb);
527     pte = get_pte((u64)scratch_page);
528 
529     *pte &= ~(1ULL << 2);
530 }
531 
532 static void npt_us_test(struct test *test)
533 {
534     (void) *(volatile u64 *)scratch_page;
535 }
536 
537 static bool npt_us_check(struct test *test)
538 {
539     u64 *pte = get_pte((u64)scratch_page);
540 
541     *pte |= (1ULL << 2);
542 
543     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
544            && (test->vmcb->control.exit_info_1 == 0x05);
545 }
546 
547 static void npt_rsvd_prepare(struct test *test)
548 {
549 
550     vmcb_ident(test->vmcb);
551 
552     pdpe[0] |= (1ULL << 8);
553 }
554 
555 static bool npt_rsvd_check(struct test *test)
556 {
557     pdpe[0] &= ~(1ULL << 8);
558 
559     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
560             && (test->vmcb->control.exit_info_1 == 0x0f);
561 }
562 
563 static void npt_rw_prepare(struct test *test)
564 {
565 
566     u64 *pte;
567 
568     vmcb_ident(test->vmcb);
569     pte = get_pte(0x80000);
570 
571     *pte &= ~(1ULL << 1);
572 }
573 
574 static void npt_rw_test(struct test *test)
575 {
576     u64 *data = (void*)(0x80000);
577 
578     *data = 0;
579 }
580 
581 static bool npt_rw_check(struct test *test)
582 {
583     u64 *pte = get_pte(0x80000);
584 
585     *pte |= (1ULL << 1);
586 
587     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
588            && (test->vmcb->control.exit_info_1 == 0x07);
589 }
590 
591 static void npt_pfwalk_prepare(struct test *test)
592 {
593 
594     u64 *pte;
595 
596     vmcb_ident(test->vmcb);
597     pte = get_pte(read_cr3());
598 
599     *pte &= ~(1ULL << 1);
600 }
601 
602 static bool npt_pfwalk_check(struct test *test)
603 {
604     u64 *pte = get_pte(read_cr3());
605 
606     *pte |= (1ULL << 1);
607 
608     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
609            && (test->vmcb->control.exit_info_1 == 0x7)
610 	   && (test->vmcb->control.exit_info_2 == read_cr3());
611 }
612 
613 static void latency_prepare(struct test *test)
614 {
615     default_prepare(test);
616     runs = LATENCY_RUNS;
617     latvmrun_min = latvmexit_min = -1ULL;
618     latvmrun_max = latvmexit_max = 0;
619     vmrun_sum = vmexit_sum = 0;
620 }
621 
622 static void latency_test(struct test *test)
623 {
624     u64 cycles;
625 
626 start:
627     tsc_end = rdtsc();
628 
629     cycles = tsc_end - tsc_start;
630 
631     if (cycles > latvmrun_max)
632         latvmrun_max = cycles;
633 
634     if (cycles < latvmrun_min)
635         latvmrun_min = cycles;
636 
637     vmrun_sum += cycles;
638 
639     tsc_start = rdtsc();
640 
641     asm volatile ("vmmcall" : : : "memory");
642     goto start;
643 }
644 
645 static bool latency_finished(struct test *test)
646 {
647     u64 cycles;
648 
649     tsc_end = rdtsc();
650 
651     cycles = tsc_end - tsc_start;
652 
653     if (cycles > latvmexit_max)
654         latvmexit_max = cycles;
655 
656     if (cycles < latvmexit_min)
657         latvmexit_min = cycles;
658 
659     vmexit_sum += cycles;
660 
661     test->vmcb->save.rip += 3;
662 
663     runs -= 1;
664 
665     return runs == 0;
666 }
667 
668 static bool latency_check(struct test *test)
669 {
670     printf("    Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max,
671             latvmrun_min, vmrun_sum / LATENCY_RUNS);
672     printf("    Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max,
673             latvmexit_min, vmexit_sum / LATENCY_RUNS);
674     return true;
675 }
676 
677 static void lat_svm_insn_prepare(struct test *test)
678 {
679     default_prepare(test);
680     runs = LATENCY_RUNS;
681     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
682     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
683     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
684 }
685 
686 static bool lat_svm_insn_finished(struct test *test)
687 {
688     u64 vmcb_phys = virt_to_phys(test->vmcb);
689     u64 cycles;
690 
691     for ( ; runs != 0; runs--) {
692         tsc_start = rdtsc();
693         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
694         cycles = rdtsc() - tsc_start;
695         if (cycles > latvmload_max)
696             latvmload_max = cycles;
697         if (cycles < latvmload_min)
698             latvmload_min = cycles;
699         vmload_sum += cycles;
700 
701         tsc_start = rdtsc();
702         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
703         cycles = rdtsc() - tsc_start;
704         if (cycles > latvmsave_max)
705             latvmsave_max = cycles;
706         if (cycles < latvmsave_min)
707             latvmsave_min = cycles;
708         vmsave_sum += cycles;
709 
710         tsc_start = rdtsc();
711         asm volatile("stgi\n\t");
712         cycles = rdtsc() - tsc_start;
713         if (cycles > latstgi_max)
714             latstgi_max = cycles;
715         if (cycles < latstgi_min)
716             latstgi_min = cycles;
717         stgi_sum += cycles;
718 
719         tsc_start = rdtsc();
720         asm volatile("clgi\n\t");
721         cycles = rdtsc() - tsc_start;
722         if (cycles > latclgi_max)
723             latclgi_max = cycles;
724         if (cycles < latclgi_min)
725             latclgi_min = cycles;
726         clgi_sum += cycles;
727     }
728 
729     return true;
730 }
731 
732 static bool lat_svm_insn_check(struct test *test)
733 {
734     printf("    Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
735             latvmload_min, vmload_sum / LATENCY_RUNS);
736     printf("    Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
737             latvmsave_min, vmsave_sum / LATENCY_RUNS);
738     printf("    Latency STGI:   max: %d min: %d avg: %d\n", latstgi_max,
739             latstgi_min, stgi_sum / LATENCY_RUNS);
740     printf("    Latency CLGI:   max: %d min: %d avg: %d\n", latclgi_max,
741             latclgi_min, clgi_sum / LATENCY_RUNS);
742     return true;
743 }
744 static struct test tests[] = {
745     { "null", default_supported, default_prepare, null_test,
746       default_finished, null_check },
747     { "vmrun", default_supported, default_prepare, test_vmrun,
748        default_finished, check_vmrun },
749     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
750       null_test, default_finished, check_no_vmrun_int },
751     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
752       test_cr3_intercept, default_finished, check_cr3_intercept },
753     { "cr3 read nointercept", default_supported, default_prepare,
754       test_cr3_intercept, default_finished, check_cr3_nointercept },
755     { "cr3 read intercept emulate", smp_supported,
756       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
757       default_finished, check_cr3_intercept },
758     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
759       default_finished, check_next_rip },
760     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
761        mode_switch_finished, check_mode_switch },
762     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
763        default_finished, check_asid_zero },
764     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
765        sel_cr0_bug_finished, sel_cr0_bug_check },
766     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
767 	    default_finished, npt_nx_check },
768     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
769 	    default_finished, npt_us_check },
770     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
771 	    default_finished, npt_rsvd_check },
772     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
773 	    default_finished, npt_rw_check },
774     { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test,
775 	    default_finished, npt_pfwalk_check },
776     { "latency_run_exit", default_supported, latency_prepare, latency_test,
777       latency_finished, latency_check },
778     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
779       lat_svm_insn_finished, lat_svm_insn_check },
780 };
781 
782 int main(int ac, char **av)
783 {
784     int i, nr, passed, done;
785     struct vmcb *vmcb;
786 
787     setup_vm();
788     smp_init();
789 
790     if (!(cpuid(0x80000001).c & 4)) {
791         printf("SVM not availble\n");
792         return 0;
793     }
794 
795     setup_svm();
796 
797     vmcb = alloc_page();
798 
799     nr = ARRAY_SIZE(tests);
800     passed = done = 0;
801     for (i = 0; i < nr; ++i) {
802         if (!tests[i].supported())
803             continue;
804         done += 1;
805         passed += test_run(&tests[i], vmcb);
806     }
807 
808     printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed));
809     return passed == done ? 0 : 1;
810 }
811