xref: /kvm-unit-tests/x86/svm.c (revision 3d46571bca758d6d8538a8c9ffd83fc1eb42b933) !
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 
10 /* for the nested page table*/
11 u64 *pml4e;
12 u64 *pdpe;
13 u64 *pde[4];
14 u64 *pte[2048];
15 void *scratch_page;
16 
17 #define LATENCY_RUNS 1000000
18 
19 u64 tsc_start;
20 u64 tsc_end;
21 
22 u64 vmrun_sum, vmexit_sum;
23 u64 vmsave_sum, vmload_sum;
24 u64 stgi_sum, clgi_sum;
25 u64 latvmrun_max;
26 u64 latvmrun_min;
27 u64 latvmexit_max;
28 u64 latvmexit_min;
29 u64 latvmload_max;
30 u64 latvmload_min;
31 u64 latvmsave_max;
32 u64 latvmsave_min;
33 u64 latstgi_max;
34 u64 latstgi_min;
35 u64 latclgi_max;
36 u64 latclgi_min;
37 u64 runs;
38 
39 u8 *io_bitmap;
40 u8 io_bitmap_area[16384];
41 
42 static bool npt_supported(void)
43 {
44    return cpuid(0x8000000A).d & 1;
45 }
46 
47 static void setup_svm(void)
48 {
49     void *hsave = alloc_page();
50     u64 *page, address;
51     int i,j;
52 
53     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
54     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
55     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
56 
57     scratch_page = alloc_page();
58 
59     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
60 
61     if (!npt_supported())
62         return;
63 
64     printf("NPT detected - running all tests with NPT enabled\n");
65 
66     /*
67      * Nested paging supported - Build a nested page table
68      * Build the page-table bottom-up and map everything with 4k pages
69      * to get enough granularity for the NPT unit-tests.
70      */
71 
72     address = 0;
73 
74     /* PTE level */
75     for (i = 0; i < 2048; ++i) {
76         page = alloc_page();
77 
78         for (j = 0; j < 512; ++j, address += 4096)
79             page[j] = address | 0x067ULL;
80 
81         pte[i] = page;
82     }
83 
84     /* PDE level */
85     for (i = 0; i < 4; ++i) {
86         page = alloc_page();
87 
88         for (j = 0; j < 512; ++j)
89             page[j] = (u64)pte[(i * 514) + j] | 0x027ULL;
90 
91         pde[i] = page;
92     }
93 
94     /* PDPe level */
95     pdpe   = alloc_page();
96     for (i = 0; i < 4; ++i)
97        pdpe[i] = ((u64)(pde[i])) | 0x27;
98 
99     /* PML4e level */
100     pml4e    = alloc_page();
101     pml4e[0] = ((u64)pdpe) | 0x27;
102 }
103 
104 static u64 *npt_get_pte(u64 address)
105 {
106     int i1, i2;
107 
108     address >>= 12;
109     i1 = (address >> 9) & 0x7ff;
110     i2 = address & 0x1ff;
111 
112     return &pte[i1][i2];
113 }
114 
115 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
116                          u64 base, u32 limit, u32 attr)
117 {
118     seg->selector = selector;
119     seg->attrib = attr;
120     seg->limit = limit;
121     seg->base = base;
122 }
123 
124 static void vmcb_ident(struct vmcb *vmcb)
125 {
126     u64 vmcb_phys = virt_to_phys(vmcb);
127     struct vmcb_save_area *save = &vmcb->save;
128     struct vmcb_control_area *ctrl = &vmcb->control;
129     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
130         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
131     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
132         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
133     struct descriptor_table_ptr desc_table_ptr;
134 
135     memset(vmcb, 0, sizeof(*vmcb));
136     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
137     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
138     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
139     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
140     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
141     sgdt(&desc_table_ptr);
142     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
143     sidt(&desc_table_ptr);
144     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
145     ctrl->asid = 1;
146     save->cpl = 0;
147     save->efer = rdmsr(MSR_EFER);
148     save->cr4 = read_cr4();
149     save->cr3 = read_cr3();
150     save->cr0 = read_cr0();
151     save->dr7 = read_dr7();
152     save->dr6 = read_dr6();
153     save->cr2 = read_cr2();
154     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
155     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
156     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
157     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
158 
159     if (npt_supported()) {
160         ctrl->nested_ctl = 1;
161         ctrl->nested_cr3 = (u64)pml4e;
162     }
163 }
164 
165 struct test {
166     const char *name;
167     bool (*supported)(void);
168     void (*prepare)(struct test *test);
169     void (*guest_func)(struct test *test);
170     bool (*finished)(struct test *test);
171     bool (*succeeded)(struct test *test);
172     struct vmcb *vmcb;
173     int exits;
174     ulong scratch;
175 };
176 
177 static void test_thunk(struct test *test)
178 {
179     test->guest_func(test);
180     asm volatile ("vmmcall" : : : "memory");
181 }
182 
183 struct regs {
184         u64 rax;
185         u64 rcx;
186         u64 rdx;
187         u64 rbx;
188         u64 cr2;
189         u64 rbp;
190         u64 rsi;
191         u64 rdi;
192         u64 r8;
193         u64 r9;
194         u64 r10;
195         u64 r11;
196         u64 r12;
197         u64 r13;
198         u64 r14;
199         u64 r15;
200         u64 rflags;
201 };
202 
203 struct regs regs;
204 
205 // rax handled specially below
206 
207 #define SAVE_GPR_C                              \
208         "xchg %%rbx, regs+0x8\n\t"              \
209         "xchg %%rcx, regs+0x10\n\t"             \
210         "xchg %%rdx, regs+0x18\n\t"             \
211         "xchg %%rbp, regs+0x28\n\t"             \
212         "xchg %%rsi, regs+0x30\n\t"             \
213         "xchg %%rdi, regs+0x38\n\t"             \
214         "xchg %%r8, regs+0x40\n\t"              \
215         "xchg %%r9, regs+0x48\n\t"              \
216         "xchg %%r10, regs+0x50\n\t"             \
217         "xchg %%r11, regs+0x58\n\t"             \
218         "xchg %%r12, regs+0x60\n\t"             \
219         "xchg %%r13, regs+0x68\n\t"             \
220         "xchg %%r14, regs+0x70\n\t"             \
221         "xchg %%r15, regs+0x78\n\t"
222 
223 #define LOAD_GPR_C      SAVE_GPR_C
224 
225 static bool test_run(struct test *test, struct vmcb *vmcb)
226 {
227     u64 vmcb_phys = virt_to_phys(vmcb);
228     u64 guest_stack[10000];
229     bool success;
230 
231     test->vmcb = vmcb;
232     test->prepare(test);
233     vmcb->save.rip = (ulong)test_thunk;
234     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
235     regs.rdi = (ulong)test;
236     do {
237         tsc_start = rdtsc();
238         asm volatile (
239             "clgi \n\t"
240             "vmload \n\t"
241             "mov regs+0x80, %%r15\n\t"  // rflags
242             "mov %%r15, 0x170(%0)\n\t"
243             "mov regs, %%r15\n\t"       // rax
244             "mov %%r15, 0x1f8(%0)\n\t"
245             LOAD_GPR_C
246             "vmrun \n\t"
247             SAVE_GPR_C
248             "mov 0x170(%0), %%r15\n\t"  // rflags
249             "mov %%r15, regs+0x80\n\t"
250             "mov 0x1f8(%0), %%r15\n\t"  // rax
251             "mov %%r15, regs\n\t"
252             "vmsave \n\t"
253             "stgi"
254             : : "a"(vmcb_phys)
255             : "rbx", "rcx", "rdx", "rsi",
256               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
257               "memory");
258 	tsc_end = rdtsc();
259         ++test->exits;
260     } while (!test->finished(test));
261 
262 
263     success = test->succeeded(test);
264 
265     printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
266 
267     return success;
268 }
269 
270 static bool smp_supported(void)
271 {
272 	return cpu_count() > 1;
273 }
274 
275 static bool default_supported(void)
276 {
277     return true;
278 }
279 
280 static void default_prepare(struct test *test)
281 {
282     vmcb_ident(test->vmcb);
283     cli();
284 }
285 
286 static bool default_finished(struct test *test)
287 {
288     return true; /* one vmexit */
289 }
290 
291 static void null_test(struct test *test)
292 {
293 }
294 
295 static bool null_check(struct test *test)
296 {
297     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
298 }
299 
300 static void prepare_no_vmrun_int(struct test *test)
301 {
302     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
303 }
304 
305 static bool check_no_vmrun_int(struct test *test)
306 {
307     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
308 }
309 
310 static void test_vmrun(struct test *test)
311 {
312     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
313 }
314 
315 static bool check_vmrun(struct test *test)
316 {
317     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
318 }
319 
320 static void prepare_cr3_intercept(struct test *test)
321 {
322     default_prepare(test);
323     test->vmcb->control.intercept_cr_read |= 1 << 3;
324 }
325 
326 static void test_cr3_intercept(struct test *test)
327 {
328     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
329 }
330 
331 static bool check_cr3_intercept(struct test *test)
332 {
333     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
334 }
335 
336 static bool check_cr3_nointercept(struct test *test)
337 {
338     return null_check(test) && test->scratch == read_cr3();
339 }
340 
341 static void corrupt_cr3_intercept_bypass(void *_test)
342 {
343     struct test *test = _test;
344     extern volatile u32 mmio_insn;
345 
346     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
347         pause();
348     pause();
349     pause();
350     pause();
351     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
352 }
353 
354 static void prepare_cr3_intercept_bypass(struct test *test)
355 {
356     default_prepare(test);
357     test->vmcb->control.intercept_cr_read |= 1 << 3;
358     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
359 }
360 
361 static void test_cr3_intercept_bypass(struct test *test)
362 {
363     ulong a = 0xa0000;
364 
365     test->scratch = 1;
366     while (test->scratch != 2)
367         barrier();
368 
369     asm volatile ("mmio_insn: mov %0, (%0); nop"
370                   : "+a"(a) : : "memory");
371     test->scratch = a;
372 }
373 
374 static bool next_rip_supported(void)
375 {
376     return (cpuid(SVM_CPUID_FUNC).d & 8);
377 }
378 
379 static void prepare_next_rip(struct test *test)
380 {
381     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
382 }
383 
384 
385 static void test_next_rip(struct test *test)
386 {
387     asm volatile ("rdtsc\n\t"
388                   ".globl exp_next_rip\n\t"
389                   "exp_next_rip:\n\t" ::: "eax", "edx");
390 }
391 
392 static bool check_next_rip(struct test *test)
393 {
394     extern char exp_next_rip;
395     unsigned long address = (unsigned long)&exp_next_rip;
396 
397     return address == test->vmcb->control.next_rip;
398 }
399 
400 static void prepare_mode_switch(struct test *test)
401 {
402     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
403                                              |  (1ULL << UD_VECTOR)
404                                              |  (1ULL << DF_VECTOR)
405                                              |  (1ULL << PF_VECTOR);
406     test->scratch = 0;
407 }
408 
409 static void test_mode_switch(struct test *test)
410 {
411     asm volatile("	cli\n"
412 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
413 		 "1:\n"
414 		 "	.long 2f\n"
415 		 "	.long " xstr(KERNEL_CS32) "\n"
416 		 ".code32\n"
417 		 "2:\n"
418 		 "	movl %%cr0, %%eax\n"
419 		 "	btcl  $31, %%eax\n" /* clear PG */
420 		 "	movl %%eax, %%cr0\n"
421 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
422 		 "	rdmsr\n"
423 		 "	btcl $8, %%eax\n" /* clear LME */
424 		 "	wrmsr\n"
425 		 "	movl %%cr4, %%eax\n"
426 		 "	btcl $5, %%eax\n" /* clear PAE */
427 		 "	movl %%eax, %%cr4\n"
428 		 "	movw %[ds16], %%ax\n"
429 		 "	movw %%ax, %%ds\n"
430 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
431 		 ".code16\n"
432 		 "3:\n"
433 		 "	movl %%cr0, %%eax\n"
434 		 "	btcl $0, %%eax\n" /* clear PE  */
435 		 "	movl %%eax, %%cr0\n"
436 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
437 		 "4:\n"
438 		 "	vmmcall\n"
439 		 "	movl %%cr0, %%eax\n"
440 		 "	btsl $0, %%eax\n" /* set PE  */
441 		 "	movl %%eax, %%cr0\n"
442 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
443 		 ".code32\n"
444 		 "5:\n"
445 		 "	movl %%cr4, %%eax\n"
446 		 "	btsl $5, %%eax\n" /* set PAE */
447 		 "	movl %%eax, %%cr4\n"
448 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
449 		 "	rdmsr\n"
450 		 "	btsl $8, %%eax\n" /* set LME */
451 		 "	wrmsr\n"
452 		 "	movl %%cr0, %%eax\n"
453 		 "	btsl  $31, %%eax\n" /* set PG */
454 		 "	movl %%eax, %%cr0\n"
455 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
456 		 ".code64\n\t"
457 		 "6:\n"
458 		 "	vmmcall\n"
459 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
460 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
461 		 : "rax", "rbx", "rcx", "rdx", "memory");
462 }
463 
464 static bool mode_switch_finished(struct test *test)
465 {
466     u64 cr0, cr4, efer;
467 
468     cr0  = test->vmcb->save.cr0;
469     cr4  = test->vmcb->save.cr4;
470     efer = test->vmcb->save.efer;
471 
472     /* Only expect VMMCALL intercepts */
473     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
474 	    return true;
475 
476     /* Jump over VMMCALL instruction */
477     test->vmcb->save.rip += 3;
478 
479     /* Do sanity checks */
480     switch (test->scratch) {
481     case 0:
482         /* Test should be in real mode now - check for this */
483         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
484             (cr4  & 0x00000020) || /* CR4.PAE */
485             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
486                 return true;
487         break;
488     case 2:
489         /* Test should be back in long-mode now - check for this */
490         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
491             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
492             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
493 		    return true;
494 	break;
495     }
496 
497     /* one step forward */
498     test->scratch += 1;
499 
500     return test->scratch == 2;
501 }
502 
503 static bool check_mode_switch(struct test *test)
504 {
505 	return test->scratch == 2;
506 }
507 
508 static void prepare_asid_zero(struct test *test)
509 {
510     test->vmcb->control.asid = 0;
511 }
512 
513 static void test_asid_zero(struct test *test)
514 {
515     asm volatile ("vmmcall\n\t");
516 }
517 
518 static bool check_asid_zero(struct test *test)
519 {
520     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
521 }
522 
523 static void sel_cr0_bug_prepare(struct test *test)
524 {
525     vmcb_ident(test->vmcb);
526     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
527 }
528 
529 static bool sel_cr0_bug_finished(struct test *test)
530 {
531 	return true;
532 }
533 
534 static void sel_cr0_bug_test(struct test *test)
535 {
536     unsigned long cr0;
537 
538     /* read cr0, clear CD, and write back */
539     cr0  = read_cr0();
540     cr0 |= (1UL << 30);
541     write_cr0(cr0);
542 
543     /*
544      * If we are here the test failed, not sure what to do now because we
545      * are not in guest-mode anymore so we can't trigger an intercept.
546      * Trigger a tripple-fault for now.
547      */
548     printf("sel_cr0 test failed. Can not recover from this - exiting\n");
549     exit(1);
550 }
551 
552 static bool sel_cr0_bug_check(struct test *test)
553 {
554     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
555 }
556 
557 static void npt_nx_prepare(struct test *test)
558 {
559 
560     u64 *pte;
561 
562     vmcb_ident(test->vmcb);
563     pte = npt_get_pte((u64)null_test);
564 
565     *pte |= (1ULL << 63);
566 }
567 
568 static bool npt_nx_check(struct test *test)
569 {
570     u64 *pte = npt_get_pte((u64)null_test);
571 
572     *pte &= ~(1ULL << 63);
573 
574     test->vmcb->save.efer |= (1 << 11);
575 
576     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
577            && (test->vmcb->control.exit_info_1 == 0x15);
578 }
579 
580 static void npt_us_prepare(struct test *test)
581 {
582     u64 *pte;
583 
584     vmcb_ident(test->vmcb);
585     pte = npt_get_pte((u64)scratch_page);
586 
587     *pte &= ~(1ULL << 2);
588 }
589 
590 static void npt_us_test(struct test *test)
591 {
592     (void) *(volatile u64 *)scratch_page;
593 }
594 
595 static bool npt_us_check(struct test *test)
596 {
597     u64 *pte = npt_get_pte((u64)scratch_page);
598 
599     *pte |= (1ULL << 2);
600 
601     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
602            && (test->vmcb->control.exit_info_1 == 0x05);
603 }
604 
605 static void npt_rsvd_prepare(struct test *test)
606 {
607 
608     vmcb_ident(test->vmcb);
609 
610     pdpe[0] |= (1ULL << 8);
611 }
612 
613 static bool npt_rsvd_check(struct test *test)
614 {
615     pdpe[0] &= ~(1ULL << 8);
616 
617     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
618             && (test->vmcb->control.exit_info_1 == 0x0f);
619 }
620 
621 static void npt_rw_prepare(struct test *test)
622 {
623 
624     u64 *pte;
625 
626     vmcb_ident(test->vmcb);
627     pte = npt_get_pte(0x80000);
628 
629     *pte &= ~(1ULL << 1);
630 }
631 
632 static void npt_rw_test(struct test *test)
633 {
634     u64 *data = (void*)(0x80000);
635 
636     *data = 0;
637 }
638 
639 static bool npt_rw_check(struct test *test)
640 {
641     u64 *pte = npt_get_pte(0x80000);
642 
643     *pte |= (1ULL << 1);
644 
645     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
646            && (test->vmcb->control.exit_info_1 == 0x07);
647 }
648 
649 static void npt_pfwalk_prepare(struct test *test)
650 {
651 
652     u64 *pte;
653 
654     vmcb_ident(test->vmcb);
655     pte = npt_get_pte(read_cr3());
656 
657     *pte &= ~(1ULL << 1);
658 }
659 
660 static bool npt_pfwalk_check(struct test *test)
661 {
662     u64 *pte = npt_get_pte(read_cr3());
663 
664     *pte |= (1ULL << 1);
665 
666     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
667            && (test->vmcb->control.exit_info_1 == 0x7)
668 	   && (test->vmcb->control.exit_info_2 == read_cr3());
669 }
670 
671 static void latency_prepare(struct test *test)
672 {
673     default_prepare(test);
674     runs = LATENCY_RUNS;
675     latvmrun_min = latvmexit_min = -1ULL;
676     latvmrun_max = latvmexit_max = 0;
677     vmrun_sum = vmexit_sum = 0;
678 }
679 
680 static void latency_test(struct test *test)
681 {
682     u64 cycles;
683 
684 start:
685     tsc_end = rdtsc();
686 
687     cycles = tsc_end - tsc_start;
688 
689     if (cycles > latvmrun_max)
690         latvmrun_max = cycles;
691 
692     if (cycles < latvmrun_min)
693         latvmrun_min = cycles;
694 
695     vmrun_sum += cycles;
696 
697     tsc_start = rdtsc();
698 
699     asm volatile ("vmmcall" : : : "memory");
700     goto start;
701 }
702 
703 static bool latency_finished(struct test *test)
704 {
705     u64 cycles;
706 
707     tsc_end = rdtsc();
708 
709     cycles = tsc_end - tsc_start;
710 
711     if (cycles > latvmexit_max)
712         latvmexit_max = cycles;
713 
714     if (cycles < latvmexit_min)
715         latvmexit_min = cycles;
716 
717     vmexit_sum += cycles;
718 
719     test->vmcb->save.rip += 3;
720 
721     runs -= 1;
722 
723     return runs == 0;
724 }
725 
726 static bool latency_check(struct test *test)
727 {
728     printf("    Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max,
729             latvmrun_min, vmrun_sum / LATENCY_RUNS);
730     printf("    Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max,
731             latvmexit_min, vmexit_sum / LATENCY_RUNS);
732     return true;
733 }
734 
735 static void lat_svm_insn_prepare(struct test *test)
736 {
737     default_prepare(test);
738     runs = LATENCY_RUNS;
739     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
740     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
741     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
742 }
743 
744 static bool lat_svm_insn_finished(struct test *test)
745 {
746     u64 vmcb_phys = virt_to_phys(test->vmcb);
747     u64 cycles;
748 
749     for ( ; runs != 0; runs--) {
750         tsc_start = rdtsc();
751         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
752         cycles = rdtsc() - tsc_start;
753         if (cycles > latvmload_max)
754             latvmload_max = cycles;
755         if (cycles < latvmload_min)
756             latvmload_min = cycles;
757         vmload_sum += cycles;
758 
759         tsc_start = rdtsc();
760         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
761         cycles = rdtsc() - tsc_start;
762         if (cycles > latvmsave_max)
763             latvmsave_max = cycles;
764         if (cycles < latvmsave_min)
765             latvmsave_min = cycles;
766         vmsave_sum += cycles;
767 
768         tsc_start = rdtsc();
769         asm volatile("stgi\n\t");
770         cycles = rdtsc() - tsc_start;
771         if (cycles > latstgi_max)
772             latstgi_max = cycles;
773         if (cycles < latstgi_min)
774             latstgi_min = cycles;
775         stgi_sum += cycles;
776 
777         tsc_start = rdtsc();
778         asm volatile("clgi\n\t");
779         cycles = rdtsc() - tsc_start;
780         if (cycles > latclgi_max)
781             latclgi_max = cycles;
782         if (cycles < latclgi_min)
783             latclgi_min = cycles;
784         clgi_sum += cycles;
785     }
786 
787     return true;
788 }
789 
790 static bool lat_svm_insn_check(struct test *test)
791 {
792     printf("    Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
793             latvmload_min, vmload_sum / LATENCY_RUNS);
794     printf("    Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
795             latvmsave_min, vmsave_sum / LATENCY_RUNS);
796     printf("    Latency STGI:   max: %d min: %d avg: %d\n", latstgi_max,
797             latstgi_min, stgi_sum / LATENCY_RUNS);
798     printf("    Latency CLGI:   max: %d min: %d avg: %d\n", latclgi_max,
799             latclgi_min, clgi_sum / LATENCY_RUNS);
800     return true;
801 }
802 static struct test tests[] = {
803     { "null", default_supported, default_prepare, null_test,
804       default_finished, null_check },
805     { "vmrun", default_supported, default_prepare, test_vmrun,
806        default_finished, check_vmrun },
807     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
808       null_test, default_finished, check_no_vmrun_int },
809     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
810       test_cr3_intercept, default_finished, check_cr3_intercept },
811     { "cr3 read nointercept", default_supported, default_prepare,
812       test_cr3_intercept, default_finished, check_cr3_nointercept },
813     { "cr3 read intercept emulate", smp_supported,
814       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
815       default_finished, check_cr3_intercept },
816     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
817       default_finished, check_next_rip },
818     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
819        mode_switch_finished, check_mode_switch },
820     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
821        default_finished, check_asid_zero },
822     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
823        sel_cr0_bug_finished, sel_cr0_bug_check },
824     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
825 	    default_finished, npt_nx_check },
826     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
827 	    default_finished, npt_us_check },
828     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
829 	    default_finished, npt_rsvd_check },
830     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
831 	    default_finished, npt_rw_check },
832     { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test,
833 	    default_finished, npt_pfwalk_check },
834     { "latency_run_exit", default_supported, latency_prepare, latency_test,
835       latency_finished, latency_check },
836     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
837       lat_svm_insn_finished, lat_svm_insn_check },
838 };
839 
840 int main(int ac, char **av)
841 {
842     int i, nr, passed, done;
843     struct vmcb *vmcb;
844 
845     setup_vm();
846     smp_init();
847 
848     if (!(cpuid(0x80000001).c & 4)) {
849         printf("SVM not availble\n");
850         return 0;
851     }
852 
853     setup_svm();
854 
855     vmcb = alloc_page();
856 
857     nr = ARRAY_SIZE(tests);
858     passed = done = 0;
859     for (i = 0; i < nr; ++i) {
860         if (!tests[i].supported())
861             continue;
862         done += 1;
863         passed += test_run(&tests[i], vmcb);
864     }
865 
866     printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed));
867     return passed == done ? 0 : 1;
868 }
869