xref: /kvm-unit-tests/x86/svm.c (revision ef1012196508918f846901ab10f277431901ac02)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "msr.h"
5 #include "vm.h"
6 #include "smp.h"
7 #include "types.h"
8 
9 /* for the nested page table*/
10 u64 *pml4e;
11 u64 *pdpe;
12 u64 *pde[4];
13 u64 *pte[2048];
14 u64 *scratch_page;
15 
16 #define LATENCY_RUNS 1000000
17 
18 u64 tsc_start;
19 u64 tsc_end;
20 
21 u64 vmrun_sum, vmexit_sum;
22 u64 vmsave_sum, vmload_sum;
23 u64 stgi_sum, clgi_sum;
24 u64 latvmrun_max;
25 u64 latvmrun_min;
26 u64 latvmexit_max;
27 u64 latvmexit_min;
28 u64 latvmload_max;
29 u64 latvmload_min;
30 u64 latvmsave_max;
31 u64 latvmsave_min;
32 u64 latstgi_max;
33 u64 latstgi_min;
34 u64 latclgi_max;
35 u64 latclgi_min;
36 u64 runs;
37 
38 static bool npt_supported(void)
39 {
40    return cpuid(0x8000000A).d & 1;
41 }
42 
43 static void setup_svm(void)
44 {
45     void *hsave = alloc_page();
46     u64 *page, address;
47     int i,j;
48 
49     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
50     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
51     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
52 
53     scratch_page = alloc_page();
54 
55     if (!npt_supported())
56         return;
57 
58     printf("NPT detected - running all tests with NPT enabled\n");
59 
60     /*
61      * Nested paging supported - Build a nested page table
62      * Build the page-table bottom-up and map everything with 4k pages
63      * to get enough granularity for the NPT unit-tests.
64      */
65 
66     address = 0;
67 
68     /* PTE level */
69     for (i = 0; i < 2048; ++i) {
70         page = alloc_page();
71 
72         for (j = 0; j < 512; ++j, address += 4096)
73             page[j] = address | 0x067ULL;
74 
75         pte[i] = page;
76     }
77 
78     /* PDE level */
79     for (i = 0; i < 4; ++i) {
80         page = alloc_page();
81 
82         for (j = 0; j < 512; ++j)
83             page[j] = (u64)pte[(i * 514) + j] | 0x027ULL;
84 
85         pde[i] = page;
86     }
87 
88     /* PDPe level */
89     pdpe   = alloc_page();
90     for (i = 0; i < 4; ++i)
91        pdpe[i] = ((u64)(pde[i])) | 0x27;
92 
93     /* PML4e level */
94     pml4e    = alloc_page();
95     pml4e[0] = ((u64)pdpe) | 0x27;
96 }
97 
98 static u64 *get_pte(u64 address)
99 {
100     int i1, i2;
101 
102     address >>= 12;
103     i1 = (address >> 9) & 0x7ff;
104     i2 = address & 0x1ff;
105 
106     return &pte[i1][i2];
107 }
108 
109 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
110                          u64 base, u32 limit, u32 attr)
111 {
112     seg->selector = selector;
113     seg->attrib = attr;
114     seg->limit = limit;
115     seg->base = base;
116 }
117 
118 static void vmcb_ident(struct vmcb *vmcb)
119 {
120     u64 vmcb_phys = virt_to_phys(vmcb);
121     struct vmcb_save_area *save = &vmcb->save;
122     struct vmcb_control_area *ctrl = &vmcb->control;
123     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
124         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
125     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
126         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
127     struct descriptor_table_ptr desc_table_ptr;
128 
129     memset(vmcb, 0, sizeof(*vmcb));
130     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
131     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
132     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
133     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
134     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
135     sgdt(&desc_table_ptr);
136     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
137     sidt(&desc_table_ptr);
138     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
139     ctrl->asid = 1;
140     save->cpl = 0;
141     save->efer = rdmsr(MSR_EFER);
142     save->cr4 = read_cr4();
143     save->cr3 = read_cr3();
144     save->cr0 = read_cr0();
145     save->dr7 = read_dr7();
146     save->dr6 = read_dr6();
147     save->cr2 = read_cr2();
148     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
149     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
150     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
151 
152     if (npt_supported()) {
153         ctrl->nested_ctl = 1;
154         ctrl->nested_cr3 = (u64)pml4e;
155     }
156 }
157 
158 struct test {
159     const char *name;
160     bool (*supported)(void);
161     void (*prepare)(struct test *test);
162     void (*guest_func)(struct test *test);
163     bool (*finished)(struct test *test);
164     bool (*succeeded)(struct test *test);
165     struct vmcb *vmcb;
166     int exits;
167     ulong scratch;
168 };
169 
170 static void test_thunk(struct test *test)
171 {
172     test->guest_func(test);
173     asm volatile ("vmmcall" : : : "memory");
174 }
175 
176 static bool test_run(struct test *test, struct vmcb *vmcb)
177 {
178     u64 vmcb_phys = virt_to_phys(vmcb);
179     u64 guest_stack[10000];
180     bool success;
181 
182     test->vmcb = vmcb;
183     test->prepare(test);
184     vmcb->save.rip = (ulong)test_thunk;
185     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
186     do {
187         tsc_start = rdtsc();
188         asm volatile (
189             "clgi \n\t"
190             "vmload \n\t"
191             "push %%rbp \n\t"
192             "push %1 \n\t"
193             "vmrun \n\t"
194             "pop %1 \n\t"
195             "pop %%rbp \n\t"
196             "vmsave \n\t"
197             "stgi"
198             : : "a"(vmcb_phys), "D"(test)
199             : "rbx", "rcx", "rdx", "rsi",
200               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
201               "memory");
202 	tsc_end = rdtsc();
203         ++test->exits;
204     } while (!test->finished(test));
205 
206 
207     success = test->succeeded(test);
208 
209     printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
210 
211     return success;
212 }
213 
214 static bool default_supported(void)
215 {
216     return true;
217 }
218 
219 static void default_prepare(struct test *test)
220 {
221     vmcb_ident(test->vmcb);
222     cli();
223 }
224 
225 static bool default_finished(struct test *test)
226 {
227     return true; /* one vmexit */
228 }
229 
230 static void null_test(struct test *test)
231 {
232 }
233 
234 static bool null_check(struct test *test)
235 {
236     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
237 }
238 
239 static void prepare_no_vmrun_int(struct test *test)
240 {
241     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
242 }
243 
244 static bool check_no_vmrun_int(struct test *test)
245 {
246     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
247 }
248 
249 static void test_vmrun(struct test *test)
250 {
251     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
252 }
253 
254 static bool check_vmrun(struct test *test)
255 {
256     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
257 }
258 
259 static void prepare_cr3_intercept(struct test *test)
260 {
261     default_prepare(test);
262     test->vmcb->control.intercept_cr_read |= 1 << 3;
263 }
264 
265 static void test_cr3_intercept(struct test *test)
266 {
267     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
268 }
269 
270 static bool check_cr3_intercept(struct test *test)
271 {
272     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
273 }
274 
275 static bool check_cr3_nointercept(struct test *test)
276 {
277     return null_check(test) && test->scratch == read_cr3();
278 }
279 
280 static void corrupt_cr3_intercept_bypass(void *_test)
281 {
282     struct test *test = _test;
283     extern volatile u32 mmio_insn;
284 
285     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
286         pause();
287     pause();
288     pause();
289     pause();
290     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
291 }
292 
293 static void prepare_cr3_intercept_bypass(struct test *test)
294 {
295     default_prepare(test);
296     test->vmcb->control.intercept_cr_read |= 1 << 3;
297     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
298 }
299 
300 static void test_cr3_intercept_bypass(struct test *test)
301 {
302     ulong a = 0xa0000;
303 
304     test->scratch = 1;
305     while (test->scratch != 2)
306         barrier();
307 
308     asm volatile ("mmio_insn: mov %0, (%0); nop"
309                   : "+a"(a) : : "memory");
310     test->scratch = a;
311 }
312 
313 static bool next_rip_supported(void)
314 {
315     return (cpuid(SVM_CPUID_FUNC).d & 8);
316 }
317 
318 static void prepare_next_rip(struct test *test)
319 {
320     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
321 }
322 
323 
324 static void test_next_rip(struct test *test)
325 {
326     asm volatile ("rdtsc\n\t"
327                   ".globl exp_next_rip\n\t"
328                   "exp_next_rip:\n\t" ::: "eax", "edx");
329 }
330 
331 static bool check_next_rip(struct test *test)
332 {
333     extern char exp_next_rip;
334     unsigned long address = (unsigned long)&exp_next_rip;
335 
336     return address == test->vmcb->control.next_rip;
337 }
338 
339 static void prepare_mode_switch(struct test *test)
340 {
341     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
342                                              |  (1ULL << UD_VECTOR)
343                                              |  (1ULL << DF_VECTOR)
344                                              |  (1ULL << PF_VECTOR);
345     test->scratch = 0;
346 }
347 
348 static void test_mode_switch(struct test *test)
349 {
350     asm volatile("	cli\n"
351 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
352 		 "1:\n"
353 		 "	.long 2f\n"
354 		 "	.long 40\n"
355 		 ".code32\n"
356 		 "2:\n"
357 		 "	movl %%cr0, %%eax\n"
358 		 "	btcl  $31, %%eax\n" /* clear PG */
359 		 "	movl %%eax, %%cr0\n"
360 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
361 		 "	rdmsr\n"
362 		 "	btcl $8, %%eax\n" /* clear LME */
363 		 "	wrmsr\n"
364 		 "	movl %%cr4, %%eax\n"
365 		 "	btcl $5, %%eax\n" /* clear PAE */
366 		 "	movl %%eax, %%cr4\n"
367 		 "	movw $64, %%ax\n"
368 		 "	movw %%ax, %%ds\n"
369 		 "	ljmpl $56, $3f\n" /* jump to 16 bit protected-mode */
370 		 ".code16\n"
371 		 "3:\n"
372 		 "	movl %%cr0, %%eax\n"
373 		 "	btcl $0, %%eax\n" /* clear PE  */
374 		 "	movl %%eax, %%cr0\n"
375 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
376 		 "4:\n"
377 		 "	vmmcall\n"
378 		 "	movl %%cr0, %%eax\n"
379 		 "	btsl $0, %%eax\n" /* set PE  */
380 		 "	movl %%eax, %%cr0\n"
381 		 "	ljmpl $40, $5f\n" /* back to protected mode */
382 		 ".code32\n"
383 		 "5:\n"
384 		 "	movl %%cr4, %%eax\n"
385 		 "	btsl $5, %%eax\n" /* set PAE */
386 		 "	movl %%eax, %%cr4\n"
387 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
388 		 "	rdmsr\n"
389 		 "	btsl $8, %%eax\n" /* set LME */
390 		 "	wrmsr\n"
391 		 "	movl %%cr0, %%eax\n"
392 		 "	btsl  $31, %%eax\n" /* set PG */
393 		 "	movl %%eax, %%cr0\n"
394 		 "	ljmpl $8, $6f\n"    /* back to long mode */
395 		 ".code64\n\t"
396 		 "6:\n"
397 		 "	vmmcall\n"
398 		 ::: "rax", "rbx", "rcx", "rdx", "memory");
399 }
400 
401 static bool mode_switch_finished(struct test *test)
402 {
403     u64 cr0, cr4, efer;
404 
405     cr0  = test->vmcb->save.cr0;
406     cr4  = test->vmcb->save.cr4;
407     efer = test->vmcb->save.efer;
408 
409     /* Only expect VMMCALL intercepts */
410     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
411 	    return true;
412 
413     /* Jump over VMMCALL instruction */
414     test->vmcb->save.rip += 3;
415 
416     /* Do sanity checks */
417     switch (test->scratch) {
418     case 0:
419         /* Test should be in real mode now - check for this */
420         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
421             (cr4  & 0x00000020) || /* CR4.PAE */
422             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
423                 return true;
424         break;
425     case 2:
426         /* Test should be back in long-mode now - check for this */
427         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
428             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
429             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
430 		    return true;
431 	break;
432     }
433 
434     /* one step forward */
435     test->scratch += 1;
436 
437     return test->scratch == 2;
438 }
439 
440 static bool check_mode_switch(struct test *test)
441 {
442 	return test->scratch == 2;
443 }
444 
445 static void prepare_asid_zero(struct test *test)
446 {
447     test->vmcb->control.asid = 0;
448 }
449 
450 static void test_asid_zero(struct test *test)
451 {
452     asm volatile ("vmmcall\n\t");
453 }
454 
455 static bool check_asid_zero(struct test *test)
456 {
457     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
458 }
459 
460 static void sel_cr0_bug_prepare(struct test *test)
461 {
462     vmcb_ident(test->vmcb);
463     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
464 }
465 
466 static bool sel_cr0_bug_finished(struct test *test)
467 {
468 	return true;
469 }
470 
471 static void sel_cr0_bug_test(struct test *test)
472 {
473     unsigned long cr0;
474 
475     /* read cr0, clear CD, and write back */
476     cr0  = read_cr0();
477     cr0 |= (1UL << 30);
478     write_cr0(cr0);
479 
480     /*
481      * If we are here the test failed, not sure what to do now because we
482      * are not in guest-mode anymore so we can't trigger an intercept.
483      * Trigger a tripple-fault for now.
484      */
485     printf("sel_cr0 test failed. Can not recover from this - exiting\n");
486     exit(1);
487 }
488 
489 static bool sel_cr0_bug_check(struct test *test)
490 {
491     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
492 }
493 
494 static void npt_nx_prepare(struct test *test)
495 {
496 
497     u64 *pte;
498 
499     vmcb_ident(test->vmcb);
500     pte = get_pte((u64)null_test);
501 
502     *pte |= (1ULL << 63);
503 }
504 
505 static bool npt_nx_check(struct test *test)
506 {
507     u64 *pte = get_pte((u64)null_test);
508 
509     *pte &= ~(1ULL << 63);
510 
511     test->vmcb->save.efer |= (1 << 11);
512 
513     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
514            && (test->vmcb->control.exit_info_1 == 0x15);
515 }
516 
517 static void npt_us_prepare(struct test *test)
518 {
519     u64 *pte;
520 
521     vmcb_ident(test->vmcb);
522     pte = get_pte((u64)scratch_page);
523 
524     *pte &= ~(1ULL << 2);
525 }
526 
527 static void npt_us_test(struct test *test)
528 {
529     volatile u64 data;
530 
531     data = *scratch_page;
532 }
533 
534 static bool npt_us_check(struct test *test)
535 {
536     u64 *pte = get_pte((u64)scratch_page);
537 
538     *pte |= (1ULL << 2);
539 
540     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
541            && (test->vmcb->control.exit_info_1 == 0x05);
542 }
543 
544 static void npt_rsvd_prepare(struct test *test)
545 {
546 
547     vmcb_ident(test->vmcb);
548 
549     pdpe[0] |= (1ULL << 8);
550 }
551 
552 static bool npt_rsvd_check(struct test *test)
553 {
554     pdpe[0] &= ~(1ULL << 8);
555 
556     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
557             && (test->vmcb->control.exit_info_1 == 0x0f);
558 }
559 
560 static void npt_rw_prepare(struct test *test)
561 {
562 
563     u64 *pte;
564 
565     vmcb_ident(test->vmcb);
566     pte = get_pte(0x80000);
567 
568     *pte &= ~(1ULL << 1);
569 }
570 
571 static void npt_rw_test(struct test *test)
572 {
573     u64 *data = (void*)(0x80000);
574 
575     *data = 0;
576 }
577 
578 static bool npt_rw_check(struct test *test)
579 {
580     u64 *pte = get_pte(0x80000);
581 
582     *pte |= (1ULL << 1);
583 
584     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
585            && (test->vmcb->control.exit_info_1 == 0x07);
586 }
587 
588 static void npt_pfwalk_prepare(struct test *test)
589 {
590 
591     u64 *pte;
592 
593     vmcb_ident(test->vmcb);
594     pte = get_pte(read_cr3());
595 
596     *pte &= ~(1ULL << 1);
597 }
598 
599 static bool npt_pfwalk_check(struct test *test)
600 {
601     u64 *pte = get_pte(read_cr3());
602 
603     *pte |= (1ULL << 1);
604 
605     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
606            && (test->vmcb->control.exit_info_1 == 0x7)
607 	   && (test->vmcb->control.exit_info_2 == read_cr3());
608 }
609 
610 static void latency_prepare(struct test *test)
611 {
612     default_prepare(test);
613     runs = LATENCY_RUNS;
614     latvmrun_min = latvmexit_min = -1ULL;
615     latvmrun_max = latvmexit_max = 0;
616     vmrun_sum = vmexit_sum = 0;
617 }
618 
619 static void latency_test(struct test *test)
620 {
621     u64 cycles;
622 
623 start:
624     tsc_end = rdtsc();
625 
626     cycles = tsc_end - tsc_start;
627 
628     if (cycles > latvmrun_max)
629         latvmrun_max = cycles;
630 
631     if (cycles < latvmrun_min)
632         latvmrun_min = cycles;
633 
634     vmrun_sum += cycles;
635 
636     tsc_start = rdtsc();
637 
638     asm volatile ("vmmcall" : : : "memory");
639     goto start;
640 }
641 
642 static bool latency_finished(struct test *test)
643 {
644     u64 cycles;
645 
646     tsc_end = rdtsc();
647 
648     cycles = tsc_end - tsc_start;
649 
650     if (cycles > latvmexit_max)
651         latvmexit_max = cycles;
652 
653     if (cycles < latvmexit_min)
654         latvmexit_min = cycles;
655 
656     vmexit_sum += cycles;
657 
658     test->vmcb->save.rip += 3;
659 
660     runs -= 1;
661 
662     return runs == 0;
663 }
664 
665 static bool latency_check(struct test *test)
666 {
667     printf("    Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max,
668             latvmrun_min, vmrun_sum / LATENCY_RUNS);
669     printf("    Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max,
670             latvmexit_min, vmexit_sum / LATENCY_RUNS);
671     return true;
672 }
673 
674 static void lat_svm_insn_prepare(struct test *test)
675 {
676     default_prepare(test);
677     runs = LATENCY_RUNS;
678     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
679     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
680     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
681 }
682 
683 static bool lat_svm_insn_finished(struct test *test)
684 {
685     u64 vmcb_phys = virt_to_phys(test->vmcb);
686     u64 cycles;
687 
688     for ( ; runs != 0; runs--) {
689         tsc_start = rdtsc();
690         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
691         cycles = rdtsc() - tsc_start;
692         if (cycles > latvmload_max)
693             latvmload_max = cycles;
694         if (cycles < latvmload_min)
695             latvmload_min = cycles;
696         vmload_sum += cycles;
697 
698         tsc_start = rdtsc();
699         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
700         cycles = rdtsc() - tsc_start;
701         if (cycles > latvmsave_max)
702             latvmsave_max = cycles;
703         if (cycles < latvmsave_min)
704             latvmsave_min = cycles;
705         vmsave_sum += cycles;
706 
707         tsc_start = rdtsc();
708         asm volatile("stgi\n\t");
709         cycles = rdtsc() - tsc_start;
710         if (cycles > latstgi_max)
711             latstgi_max = cycles;
712         if (cycles < latstgi_min)
713             latstgi_min = cycles;
714         stgi_sum += cycles;
715 
716         tsc_start = rdtsc();
717         asm volatile("clgi\n\t");
718         cycles = rdtsc() - tsc_start;
719         if (cycles > latclgi_max)
720             latclgi_max = cycles;
721         if (cycles < latclgi_min)
722             latclgi_min = cycles;
723         clgi_sum += cycles;
724     }
725 
726     return true;
727 }
728 
729 static bool lat_svm_insn_check(struct test *test)
730 {
731     printf("    Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
732             latvmload_min, vmload_sum / LATENCY_RUNS);
733     printf("    Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
734             latvmsave_min, vmsave_sum / LATENCY_RUNS);
735     printf("    Latency STGI:   max: %d min: %d avg: %d\n", latstgi_max,
736             latstgi_min, stgi_sum / LATENCY_RUNS);
737     printf("    Latency CLGI:   max: %d min: %d avg: %d\n", latclgi_max,
738             latclgi_min, clgi_sum / LATENCY_RUNS);
739     return true;
740 }
741 static struct test tests[] = {
742     { "null", default_supported, default_prepare, null_test,
743       default_finished, null_check },
744     { "vmrun", default_supported, default_prepare, test_vmrun,
745        default_finished, check_vmrun },
746     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
747       null_test, default_finished, check_no_vmrun_int },
748     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
749       test_cr3_intercept, default_finished, check_cr3_intercept },
750     { "cr3 read nointercept", default_supported, default_prepare,
751       test_cr3_intercept, default_finished, check_cr3_nointercept },
752     { "cr3 read intercept emulate", default_supported,
753       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
754       default_finished, check_cr3_intercept },
755     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
756       default_finished, check_next_rip },
757     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
758        mode_switch_finished, check_mode_switch },
759     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
760        default_finished, check_asid_zero },
761     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
762        sel_cr0_bug_finished, sel_cr0_bug_check },
763     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
764 	    default_finished, npt_nx_check },
765     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
766 	    default_finished, npt_us_check },
767     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
768 	    default_finished, npt_rsvd_check },
769     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
770 	    default_finished, npt_rw_check },
771     { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test,
772 	    default_finished, npt_pfwalk_check },
773     { "latency_run_exit", default_supported, latency_prepare, latency_test,
774       latency_finished, latency_check },
775     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
776       lat_svm_insn_finished, lat_svm_insn_check },
777 };
778 
779 int main(int ac, char **av)
780 {
781     int i, nr, passed, done;
782     struct vmcb *vmcb;
783 
784     setup_vm();
785     smp_init();
786 
787     if (!(cpuid(0x80000001).c & 4)) {
788         printf("SVM not availble\n");
789         return 0;
790     }
791 
792     setup_svm();
793 
794     vmcb = alloc_page();
795 
796     nr = ARRAY_SIZE(tests);
797     passed = done = 0;
798     for (i = 0; i < nr; ++i) {
799         if (!tests[i].supported())
800             continue;
801         done += 1;
802         passed += test_run(&tests[i], vmcb);
803     }
804 
805     printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed));
806     return passed == done ? 0 : 1;
807 }
808