xref: /kvm-unit-tests/x86/svm.c (revision a43baea076d05ae25b4e6bef9f1032d8f1f30ee0)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 
10 /* for the nested page table*/
11 u64 *pml4e;
12 u64 *pdpe;
13 u64 *pde[4];
14 u64 *pte[2048];
15 void *scratch_page;
16 
17 #define LATENCY_RUNS 1000000
18 
19 u64 tsc_start;
20 u64 tsc_end;
21 
22 u64 vmrun_sum, vmexit_sum;
23 u64 vmsave_sum, vmload_sum;
24 u64 stgi_sum, clgi_sum;
25 u64 latvmrun_max;
26 u64 latvmrun_min;
27 u64 latvmexit_max;
28 u64 latvmexit_min;
29 u64 latvmload_max;
30 u64 latvmload_min;
31 u64 latvmsave_max;
32 u64 latvmsave_min;
33 u64 latstgi_max;
34 u64 latstgi_min;
35 u64 latclgi_max;
36 u64 latclgi_min;
37 u64 runs;
38 
39 static bool npt_supported(void)
40 {
41    return cpuid(0x8000000A).d & 1;
42 }
43 
44 static void setup_svm(void)
45 {
46     void *hsave = alloc_page();
47     u64 *page, address;
48     int i,j;
49 
50     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
51     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
52     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
53 
54     scratch_page = alloc_page();
55 
56     if (!npt_supported())
57         return;
58 
59     printf("NPT detected - running all tests with NPT enabled\n");
60 
61     /*
62      * Nested paging supported - Build a nested page table
63      * Build the page-table bottom-up and map everything with 4k pages
64      * to get enough granularity for the NPT unit-tests.
65      */
66 
67     address = 0;
68 
69     /* PTE level */
70     for (i = 0; i < 2048; ++i) {
71         page = alloc_page();
72 
73         for (j = 0; j < 512; ++j, address += 4096)
74             page[j] = address | 0x067ULL;
75 
76         pte[i] = page;
77     }
78 
79     /* PDE level */
80     for (i = 0; i < 4; ++i) {
81         page = alloc_page();
82 
83         for (j = 0; j < 512; ++j)
84             page[j] = (u64)pte[(i * 514) + j] | 0x027ULL;
85 
86         pde[i] = page;
87     }
88 
89     /* PDPe level */
90     pdpe   = alloc_page();
91     for (i = 0; i < 4; ++i)
92        pdpe[i] = ((u64)(pde[i])) | 0x27;
93 
94     /* PML4e level */
95     pml4e    = alloc_page();
96     pml4e[0] = ((u64)pdpe) | 0x27;
97 }
98 
99 static u64 *npt_get_pte(u64 address)
100 {
101     int i1, i2;
102 
103     address >>= 12;
104     i1 = (address >> 9) & 0x7ff;
105     i2 = address & 0x1ff;
106 
107     return &pte[i1][i2];
108 }
109 
110 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
111                          u64 base, u32 limit, u32 attr)
112 {
113     seg->selector = selector;
114     seg->attrib = attr;
115     seg->limit = limit;
116     seg->base = base;
117 }
118 
119 static void vmcb_ident(struct vmcb *vmcb)
120 {
121     u64 vmcb_phys = virt_to_phys(vmcb);
122     struct vmcb_save_area *save = &vmcb->save;
123     struct vmcb_control_area *ctrl = &vmcb->control;
124     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
125         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
126     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
127         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
128     struct descriptor_table_ptr desc_table_ptr;
129 
130     memset(vmcb, 0, sizeof(*vmcb));
131     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
132     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
133     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
134     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
135     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
136     sgdt(&desc_table_ptr);
137     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
138     sidt(&desc_table_ptr);
139     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
140     ctrl->asid = 1;
141     save->cpl = 0;
142     save->efer = rdmsr(MSR_EFER);
143     save->cr4 = read_cr4();
144     save->cr3 = read_cr3();
145     save->cr0 = read_cr0();
146     save->dr7 = read_dr7();
147     save->dr6 = read_dr6();
148     save->cr2 = read_cr2();
149     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
150     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
151     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
152 
153     if (npt_supported()) {
154         ctrl->nested_ctl = 1;
155         ctrl->nested_cr3 = (u64)pml4e;
156     }
157 }
158 
159 struct test {
160     const char *name;
161     bool (*supported)(void);
162     void (*prepare)(struct test *test);
163     void (*guest_func)(struct test *test);
164     bool (*finished)(struct test *test);
165     bool (*succeeded)(struct test *test);
166     struct vmcb *vmcb;
167     int exits;
168     ulong scratch;
169 };
170 
171 static void test_thunk(struct test *test)
172 {
173     test->guest_func(test);
174     asm volatile ("vmmcall" : : : "memory");
175 }
176 
177 struct regs {
178         u64 rax;
179         u64 rcx;
180         u64 rdx;
181         u64 rbx;
182         u64 cr2;
183         u64 rbp;
184         u64 rsi;
185         u64 rdi;
186         u64 r8;
187         u64 r9;
188         u64 r10;
189         u64 r11;
190         u64 r12;
191         u64 r13;
192         u64 r14;
193         u64 r15;
194         u64 rflags;
195 };
196 
197 struct regs regs;
198 
199 // rax handled specially below
200 
201 #define SAVE_GPR_C                              \
202         "xchg %%rbx, regs+0x8\n\t"              \
203         "xchg %%rcx, regs+0x10\n\t"             \
204         "xchg %%rdx, regs+0x18\n\t"             \
205         "xchg %%rbp, regs+0x28\n\t"             \
206         "xchg %%rsi, regs+0x30\n\t"             \
207         "xchg %%rdi, regs+0x38\n\t"             \
208         "xchg %%r8, regs+0x40\n\t"              \
209         "xchg %%r9, regs+0x48\n\t"              \
210         "xchg %%r10, regs+0x50\n\t"             \
211         "xchg %%r11, regs+0x58\n\t"             \
212         "xchg %%r12, regs+0x60\n\t"             \
213         "xchg %%r13, regs+0x68\n\t"             \
214         "xchg %%r14, regs+0x70\n\t"             \
215         "xchg %%r15, regs+0x78\n\t"
216 
217 #define LOAD_GPR_C      SAVE_GPR_C
218 
219 static bool test_run(struct test *test, struct vmcb *vmcb)
220 {
221     u64 vmcb_phys = virt_to_phys(vmcb);
222     u64 guest_stack[10000];
223     bool success;
224 
225     test->vmcb = vmcb;
226     test->prepare(test);
227     vmcb->save.rip = (ulong)test_thunk;
228     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
229     regs.rdi = (ulong)test;
230     do {
231         tsc_start = rdtsc();
232         asm volatile (
233             "clgi \n\t"
234             "vmload \n\t"
235             "mov regs+0x80, %%r15\n\t"  // rflags
236             "mov %%r15, 0x170(%0)\n\t"
237             "mov regs, %%r15\n\t"       // rax
238             "mov %%r15, 0x1f8(%0)\n\t"
239             LOAD_GPR_C
240             "vmrun \n\t"
241             SAVE_GPR_C
242             "mov 0x170(%0), %%r15\n\t"  // rflags
243             "mov %%r15, regs+0x80\n\t"
244             "mov 0x1f8(%0), %%r15\n\t"  // rax
245             "mov %%r15, regs\n\t"
246             "vmsave \n\t"
247             "stgi"
248             : : "a"(vmcb_phys)
249             : "rbx", "rcx", "rdx", "rsi",
250               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
251               "memory");
252 	tsc_end = rdtsc();
253         ++test->exits;
254     } while (!test->finished(test));
255 
256 
257     success = test->succeeded(test);
258 
259     printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
260 
261     return success;
262 }
263 
264 static bool smp_supported(void)
265 {
266 	return cpu_count() > 1;
267 }
268 
269 static bool default_supported(void)
270 {
271     return true;
272 }
273 
274 static void default_prepare(struct test *test)
275 {
276     vmcb_ident(test->vmcb);
277     cli();
278 }
279 
280 static bool default_finished(struct test *test)
281 {
282     return true; /* one vmexit */
283 }
284 
285 static void null_test(struct test *test)
286 {
287 }
288 
289 static bool null_check(struct test *test)
290 {
291     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
292 }
293 
294 static void prepare_no_vmrun_int(struct test *test)
295 {
296     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
297 }
298 
299 static bool check_no_vmrun_int(struct test *test)
300 {
301     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
302 }
303 
304 static void test_vmrun(struct test *test)
305 {
306     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
307 }
308 
309 static bool check_vmrun(struct test *test)
310 {
311     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
312 }
313 
314 static void prepare_cr3_intercept(struct test *test)
315 {
316     default_prepare(test);
317     test->vmcb->control.intercept_cr_read |= 1 << 3;
318 }
319 
320 static void test_cr3_intercept(struct test *test)
321 {
322     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
323 }
324 
325 static bool check_cr3_intercept(struct test *test)
326 {
327     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
328 }
329 
330 static bool check_cr3_nointercept(struct test *test)
331 {
332     return null_check(test) && test->scratch == read_cr3();
333 }
334 
335 static void corrupt_cr3_intercept_bypass(void *_test)
336 {
337     struct test *test = _test;
338     extern volatile u32 mmio_insn;
339 
340     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
341         pause();
342     pause();
343     pause();
344     pause();
345     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
346 }
347 
348 static void prepare_cr3_intercept_bypass(struct test *test)
349 {
350     default_prepare(test);
351     test->vmcb->control.intercept_cr_read |= 1 << 3;
352     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
353 }
354 
355 static void test_cr3_intercept_bypass(struct test *test)
356 {
357     ulong a = 0xa0000;
358 
359     test->scratch = 1;
360     while (test->scratch != 2)
361         barrier();
362 
363     asm volatile ("mmio_insn: mov %0, (%0); nop"
364                   : "+a"(a) : : "memory");
365     test->scratch = a;
366 }
367 
368 static bool next_rip_supported(void)
369 {
370     return (cpuid(SVM_CPUID_FUNC).d & 8);
371 }
372 
373 static void prepare_next_rip(struct test *test)
374 {
375     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
376 }
377 
378 
379 static void test_next_rip(struct test *test)
380 {
381     asm volatile ("rdtsc\n\t"
382                   ".globl exp_next_rip\n\t"
383                   "exp_next_rip:\n\t" ::: "eax", "edx");
384 }
385 
386 static bool check_next_rip(struct test *test)
387 {
388     extern char exp_next_rip;
389     unsigned long address = (unsigned long)&exp_next_rip;
390 
391     return address == test->vmcb->control.next_rip;
392 }
393 
394 static void prepare_mode_switch(struct test *test)
395 {
396     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
397                                              |  (1ULL << UD_VECTOR)
398                                              |  (1ULL << DF_VECTOR)
399                                              |  (1ULL << PF_VECTOR);
400     test->scratch = 0;
401 }
402 
403 static void test_mode_switch(struct test *test)
404 {
405     asm volatile("	cli\n"
406 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
407 		 "1:\n"
408 		 "	.long 2f\n"
409 		 "	.long " xstr(KERNEL_CS32) "\n"
410 		 ".code32\n"
411 		 "2:\n"
412 		 "	movl %%cr0, %%eax\n"
413 		 "	btcl  $31, %%eax\n" /* clear PG */
414 		 "	movl %%eax, %%cr0\n"
415 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
416 		 "	rdmsr\n"
417 		 "	btcl $8, %%eax\n" /* clear LME */
418 		 "	wrmsr\n"
419 		 "	movl %%cr4, %%eax\n"
420 		 "	btcl $5, %%eax\n" /* clear PAE */
421 		 "	movl %%eax, %%cr4\n"
422 		 "	movw %[ds16], %%ax\n"
423 		 "	movw %%ax, %%ds\n"
424 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
425 		 ".code16\n"
426 		 "3:\n"
427 		 "	movl %%cr0, %%eax\n"
428 		 "	btcl $0, %%eax\n" /* clear PE  */
429 		 "	movl %%eax, %%cr0\n"
430 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
431 		 "4:\n"
432 		 "	vmmcall\n"
433 		 "	movl %%cr0, %%eax\n"
434 		 "	btsl $0, %%eax\n" /* set PE  */
435 		 "	movl %%eax, %%cr0\n"
436 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
437 		 ".code32\n"
438 		 "5:\n"
439 		 "	movl %%cr4, %%eax\n"
440 		 "	btsl $5, %%eax\n" /* set PAE */
441 		 "	movl %%eax, %%cr4\n"
442 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
443 		 "	rdmsr\n"
444 		 "	btsl $8, %%eax\n" /* set LME */
445 		 "	wrmsr\n"
446 		 "	movl %%cr0, %%eax\n"
447 		 "	btsl  $31, %%eax\n" /* set PG */
448 		 "	movl %%eax, %%cr0\n"
449 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
450 		 ".code64\n\t"
451 		 "6:\n"
452 		 "	vmmcall\n"
453 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
454 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
455 		 : "rax", "rbx", "rcx", "rdx", "memory");
456 }
457 
458 static bool mode_switch_finished(struct test *test)
459 {
460     u64 cr0, cr4, efer;
461 
462     cr0  = test->vmcb->save.cr0;
463     cr4  = test->vmcb->save.cr4;
464     efer = test->vmcb->save.efer;
465 
466     /* Only expect VMMCALL intercepts */
467     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
468 	    return true;
469 
470     /* Jump over VMMCALL instruction */
471     test->vmcb->save.rip += 3;
472 
473     /* Do sanity checks */
474     switch (test->scratch) {
475     case 0:
476         /* Test should be in real mode now - check for this */
477         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
478             (cr4  & 0x00000020) || /* CR4.PAE */
479             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
480                 return true;
481         break;
482     case 2:
483         /* Test should be back in long-mode now - check for this */
484         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
485             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
486             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
487 		    return true;
488 	break;
489     }
490 
491     /* one step forward */
492     test->scratch += 1;
493 
494     return test->scratch == 2;
495 }
496 
497 static bool check_mode_switch(struct test *test)
498 {
499 	return test->scratch == 2;
500 }
501 
502 static void prepare_asid_zero(struct test *test)
503 {
504     test->vmcb->control.asid = 0;
505 }
506 
507 static void test_asid_zero(struct test *test)
508 {
509     asm volatile ("vmmcall\n\t");
510 }
511 
512 static bool check_asid_zero(struct test *test)
513 {
514     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
515 }
516 
517 static void sel_cr0_bug_prepare(struct test *test)
518 {
519     vmcb_ident(test->vmcb);
520     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
521 }
522 
523 static bool sel_cr0_bug_finished(struct test *test)
524 {
525 	return true;
526 }
527 
528 static void sel_cr0_bug_test(struct test *test)
529 {
530     unsigned long cr0;
531 
532     /* read cr0, clear CD, and write back */
533     cr0  = read_cr0();
534     cr0 |= (1UL << 30);
535     write_cr0(cr0);
536 
537     /*
538      * If we are here the test failed, not sure what to do now because we
539      * are not in guest-mode anymore so we can't trigger an intercept.
540      * Trigger a tripple-fault for now.
541      */
542     printf("sel_cr0 test failed. Can not recover from this - exiting\n");
543     exit(1);
544 }
545 
546 static bool sel_cr0_bug_check(struct test *test)
547 {
548     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
549 }
550 
551 static void npt_nx_prepare(struct test *test)
552 {
553 
554     u64 *pte;
555 
556     vmcb_ident(test->vmcb);
557     pte = npt_get_pte((u64)null_test);
558 
559     *pte |= (1ULL << 63);
560 }
561 
562 static bool npt_nx_check(struct test *test)
563 {
564     u64 *pte = npt_get_pte((u64)null_test);
565 
566     *pte &= ~(1ULL << 63);
567 
568     test->vmcb->save.efer |= (1 << 11);
569 
570     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
571            && (test->vmcb->control.exit_info_1 == 0x15);
572 }
573 
574 static void npt_us_prepare(struct test *test)
575 {
576     u64 *pte;
577 
578     vmcb_ident(test->vmcb);
579     pte = npt_get_pte((u64)scratch_page);
580 
581     *pte &= ~(1ULL << 2);
582 }
583 
584 static void npt_us_test(struct test *test)
585 {
586     (void) *(volatile u64 *)scratch_page;
587 }
588 
589 static bool npt_us_check(struct test *test)
590 {
591     u64 *pte = npt_get_pte((u64)scratch_page);
592 
593     *pte |= (1ULL << 2);
594 
595     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
596            && (test->vmcb->control.exit_info_1 == 0x05);
597 }
598 
599 static void npt_rsvd_prepare(struct test *test)
600 {
601 
602     vmcb_ident(test->vmcb);
603 
604     pdpe[0] |= (1ULL << 8);
605 }
606 
607 static bool npt_rsvd_check(struct test *test)
608 {
609     pdpe[0] &= ~(1ULL << 8);
610 
611     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
612             && (test->vmcb->control.exit_info_1 == 0x0f);
613 }
614 
615 static void npt_rw_prepare(struct test *test)
616 {
617 
618     u64 *pte;
619 
620     vmcb_ident(test->vmcb);
621     pte = npt_get_pte(0x80000);
622 
623     *pte &= ~(1ULL << 1);
624 }
625 
626 static void npt_rw_test(struct test *test)
627 {
628     u64 *data = (void*)(0x80000);
629 
630     *data = 0;
631 }
632 
633 static bool npt_rw_check(struct test *test)
634 {
635     u64 *pte = npt_get_pte(0x80000);
636 
637     *pte |= (1ULL << 1);
638 
639     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
640            && (test->vmcb->control.exit_info_1 == 0x07);
641 }
642 
643 static void npt_pfwalk_prepare(struct test *test)
644 {
645 
646     u64 *pte;
647 
648     vmcb_ident(test->vmcb);
649     pte = npt_get_pte(read_cr3());
650 
651     *pte &= ~(1ULL << 1);
652 }
653 
654 static bool npt_pfwalk_check(struct test *test)
655 {
656     u64 *pte = npt_get_pte(read_cr3());
657 
658     *pte |= (1ULL << 1);
659 
660     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
661            && (test->vmcb->control.exit_info_1 == 0x7)
662 	   && (test->vmcb->control.exit_info_2 == read_cr3());
663 }
664 
665 static void latency_prepare(struct test *test)
666 {
667     default_prepare(test);
668     runs = LATENCY_RUNS;
669     latvmrun_min = latvmexit_min = -1ULL;
670     latvmrun_max = latvmexit_max = 0;
671     vmrun_sum = vmexit_sum = 0;
672 }
673 
674 static void latency_test(struct test *test)
675 {
676     u64 cycles;
677 
678 start:
679     tsc_end = rdtsc();
680 
681     cycles = tsc_end - tsc_start;
682 
683     if (cycles > latvmrun_max)
684         latvmrun_max = cycles;
685 
686     if (cycles < latvmrun_min)
687         latvmrun_min = cycles;
688 
689     vmrun_sum += cycles;
690 
691     tsc_start = rdtsc();
692 
693     asm volatile ("vmmcall" : : : "memory");
694     goto start;
695 }
696 
697 static bool latency_finished(struct test *test)
698 {
699     u64 cycles;
700 
701     tsc_end = rdtsc();
702 
703     cycles = tsc_end - tsc_start;
704 
705     if (cycles > latvmexit_max)
706         latvmexit_max = cycles;
707 
708     if (cycles < latvmexit_min)
709         latvmexit_min = cycles;
710 
711     vmexit_sum += cycles;
712 
713     test->vmcb->save.rip += 3;
714 
715     runs -= 1;
716 
717     return runs == 0;
718 }
719 
720 static bool latency_check(struct test *test)
721 {
722     printf("    Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max,
723             latvmrun_min, vmrun_sum / LATENCY_RUNS);
724     printf("    Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max,
725             latvmexit_min, vmexit_sum / LATENCY_RUNS);
726     return true;
727 }
728 
729 static void lat_svm_insn_prepare(struct test *test)
730 {
731     default_prepare(test);
732     runs = LATENCY_RUNS;
733     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
734     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
735     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
736 }
737 
738 static bool lat_svm_insn_finished(struct test *test)
739 {
740     u64 vmcb_phys = virt_to_phys(test->vmcb);
741     u64 cycles;
742 
743     for ( ; runs != 0; runs--) {
744         tsc_start = rdtsc();
745         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
746         cycles = rdtsc() - tsc_start;
747         if (cycles > latvmload_max)
748             latvmload_max = cycles;
749         if (cycles < latvmload_min)
750             latvmload_min = cycles;
751         vmload_sum += cycles;
752 
753         tsc_start = rdtsc();
754         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
755         cycles = rdtsc() - tsc_start;
756         if (cycles > latvmsave_max)
757             latvmsave_max = cycles;
758         if (cycles < latvmsave_min)
759             latvmsave_min = cycles;
760         vmsave_sum += cycles;
761 
762         tsc_start = rdtsc();
763         asm volatile("stgi\n\t");
764         cycles = rdtsc() - tsc_start;
765         if (cycles > latstgi_max)
766             latstgi_max = cycles;
767         if (cycles < latstgi_min)
768             latstgi_min = cycles;
769         stgi_sum += cycles;
770 
771         tsc_start = rdtsc();
772         asm volatile("clgi\n\t");
773         cycles = rdtsc() - tsc_start;
774         if (cycles > latclgi_max)
775             latclgi_max = cycles;
776         if (cycles < latclgi_min)
777             latclgi_min = cycles;
778         clgi_sum += cycles;
779     }
780 
781     return true;
782 }
783 
784 static bool lat_svm_insn_check(struct test *test)
785 {
786     printf("    Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
787             latvmload_min, vmload_sum / LATENCY_RUNS);
788     printf("    Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
789             latvmsave_min, vmsave_sum / LATENCY_RUNS);
790     printf("    Latency STGI:   max: %d min: %d avg: %d\n", latstgi_max,
791             latstgi_min, stgi_sum / LATENCY_RUNS);
792     printf("    Latency CLGI:   max: %d min: %d avg: %d\n", latclgi_max,
793             latclgi_min, clgi_sum / LATENCY_RUNS);
794     return true;
795 }
796 static struct test tests[] = {
797     { "null", default_supported, default_prepare, null_test,
798       default_finished, null_check },
799     { "vmrun", default_supported, default_prepare, test_vmrun,
800        default_finished, check_vmrun },
801     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
802       null_test, default_finished, check_no_vmrun_int },
803     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
804       test_cr3_intercept, default_finished, check_cr3_intercept },
805     { "cr3 read nointercept", default_supported, default_prepare,
806       test_cr3_intercept, default_finished, check_cr3_nointercept },
807     { "cr3 read intercept emulate", smp_supported,
808       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
809       default_finished, check_cr3_intercept },
810     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
811       default_finished, check_next_rip },
812     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
813        mode_switch_finished, check_mode_switch },
814     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
815        default_finished, check_asid_zero },
816     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
817        sel_cr0_bug_finished, sel_cr0_bug_check },
818     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
819 	    default_finished, npt_nx_check },
820     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
821 	    default_finished, npt_us_check },
822     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
823 	    default_finished, npt_rsvd_check },
824     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
825 	    default_finished, npt_rw_check },
826     { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test,
827 	    default_finished, npt_pfwalk_check },
828     { "latency_run_exit", default_supported, latency_prepare, latency_test,
829       latency_finished, latency_check },
830     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
831       lat_svm_insn_finished, lat_svm_insn_check },
832 };
833 
834 int main(int ac, char **av)
835 {
836     int i, nr, passed, done;
837     struct vmcb *vmcb;
838 
839     setup_vm();
840     smp_init();
841 
842     if (!(cpuid(0x80000001).c & 4)) {
843         printf("SVM not availble\n");
844         return 0;
845     }
846 
847     setup_svm();
848 
849     vmcb = alloc_page();
850 
851     nr = ARRAY_SIZE(tests);
852     passed = done = 0;
853     for (i = 0; i < nr; ++i) {
854         if (!tests[i].supported())
855             continue;
856         done += 1;
857         passed += test_run(&tests[i], vmcb);
858     }
859 
860     printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed));
861     return passed == done ? 0 : 1;
862 }
863