xref: /kvm-unit-tests/x86/svm.c (revision e8b10c1ff110e5047c4706d71326260b6e6ca79c)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "io.h"
10 
11 /* for the nested page table*/
12 u64 *pml4e;
13 u64 *pdpe;
14 u64 *pde[4];
15 u64 *pte[2048];
16 void *scratch_page;
17 
18 #define LATENCY_RUNS 1000000
19 
20 u64 tsc_start;
21 u64 tsc_end;
22 
23 u64 vmrun_sum, vmexit_sum;
24 u64 vmsave_sum, vmload_sum;
25 u64 stgi_sum, clgi_sum;
26 u64 latvmrun_max;
27 u64 latvmrun_min;
28 u64 latvmexit_max;
29 u64 latvmexit_min;
30 u64 latvmload_max;
31 u64 latvmload_min;
32 u64 latvmsave_max;
33 u64 latvmsave_min;
34 u64 latstgi_max;
35 u64 latstgi_min;
36 u64 latclgi_max;
37 u64 latclgi_min;
38 u64 runs;
39 
40 u8 *io_bitmap;
41 u8 io_bitmap_area[16384];
42 
43 static bool npt_supported(void)
44 {
45    return cpuid(0x8000000A).d & 1;
46 }
47 
48 static void setup_svm(void)
49 {
50     void *hsave = alloc_page();
51     u64 *page, address;
52     int i,j;
53 
54     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
55     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
56     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
57 
58     scratch_page = alloc_page();
59 
60     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
61 
62     if (!npt_supported())
63         return;
64 
65     printf("NPT detected - running all tests with NPT enabled\n");
66 
67     /*
68      * Nested paging supported - Build a nested page table
69      * Build the page-table bottom-up and map everything with 4k pages
70      * to get enough granularity for the NPT unit-tests.
71      */
72 
73     address = 0;
74 
75     /* PTE level */
76     for (i = 0; i < 2048; ++i) {
77         page = alloc_page();
78 
79         for (j = 0; j < 512; ++j, address += 4096)
80             page[j] = address | 0x067ULL;
81 
82         pte[i] = page;
83     }
84 
85     /* PDE level */
86     for (i = 0; i < 4; ++i) {
87         page = alloc_page();
88 
89         for (j = 0; j < 512; ++j)
90             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
91 
92         pde[i] = page;
93     }
94 
95     /* PDPe level */
96     pdpe   = alloc_page();
97     for (i = 0; i < 4; ++i)
98        pdpe[i] = ((u64)(pde[i])) | 0x27;
99 
100     /* PML4e level */
101     pml4e    = alloc_page();
102     pml4e[0] = ((u64)pdpe) | 0x27;
103 }
104 
105 static u64 *npt_get_pte(u64 address)
106 {
107     int i1, i2;
108 
109     address >>= 12;
110     i1 = (address >> 9) & 0x7ff;
111     i2 = address & 0x1ff;
112 
113     return &pte[i1][i2];
114 }
115 
116 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
117                          u64 base, u32 limit, u32 attr)
118 {
119     seg->selector = selector;
120     seg->attrib = attr;
121     seg->limit = limit;
122     seg->base = base;
123 }
124 
125 static void vmcb_ident(struct vmcb *vmcb)
126 {
127     u64 vmcb_phys = virt_to_phys(vmcb);
128     struct vmcb_save_area *save = &vmcb->save;
129     struct vmcb_control_area *ctrl = &vmcb->control;
130     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
131         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
132     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
133         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
134     struct descriptor_table_ptr desc_table_ptr;
135 
136     memset(vmcb, 0, sizeof(*vmcb));
137     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
138     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
139     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
140     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
141     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
142     sgdt(&desc_table_ptr);
143     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
144     sidt(&desc_table_ptr);
145     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
146     ctrl->asid = 1;
147     save->cpl = 0;
148     save->efer = rdmsr(MSR_EFER);
149     save->cr4 = read_cr4();
150     save->cr3 = read_cr3();
151     save->cr0 = read_cr0();
152     save->dr7 = read_dr7();
153     save->dr6 = read_dr6();
154     save->cr2 = read_cr2();
155     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
156     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
157     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
158     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
159 
160     if (npt_supported()) {
161         ctrl->nested_ctl = 1;
162         ctrl->nested_cr3 = (u64)pml4e;
163     }
164 }
165 
166 struct test {
167     const char *name;
168     bool (*supported)(void);
169     void (*prepare)(struct test *test);
170     void (*guest_func)(struct test *test);
171     bool (*finished)(struct test *test);
172     bool (*succeeded)(struct test *test);
173     struct vmcb *vmcb;
174     int exits;
175     ulong scratch;
176 };
177 
178 static inline void vmmcall(void)
179 {
180     asm volatile ("vmmcall" : : : "memory");
181 }
182 
183 static void test_thunk(struct test *test)
184 {
185     test->guest_func(test);
186     vmmcall();
187 }
188 
189 struct regs {
190         u64 rax;
191         u64 rcx;
192         u64 rdx;
193         u64 rbx;
194         u64 cr2;
195         u64 rbp;
196         u64 rsi;
197         u64 rdi;
198         u64 r8;
199         u64 r9;
200         u64 r10;
201         u64 r11;
202         u64 r12;
203         u64 r13;
204         u64 r14;
205         u64 r15;
206         u64 rflags;
207 };
208 
209 struct regs regs;
210 
211 // rax handled specially below
212 
213 #define SAVE_GPR_C                              \
214         "xchg %%rbx, regs+0x8\n\t"              \
215         "xchg %%rcx, regs+0x10\n\t"             \
216         "xchg %%rdx, regs+0x18\n\t"             \
217         "xchg %%rbp, regs+0x28\n\t"             \
218         "xchg %%rsi, regs+0x30\n\t"             \
219         "xchg %%rdi, regs+0x38\n\t"             \
220         "xchg %%r8, regs+0x40\n\t"              \
221         "xchg %%r9, regs+0x48\n\t"              \
222         "xchg %%r10, regs+0x50\n\t"             \
223         "xchg %%r11, regs+0x58\n\t"             \
224         "xchg %%r12, regs+0x60\n\t"             \
225         "xchg %%r13, regs+0x68\n\t"             \
226         "xchg %%r14, regs+0x70\n\t"             \
227         "xchg %%r15, regs+0x78\n\t"
228 
229 #define LOAD_GPR_C      SAVE_GPR_C
230 
231 static bool test_run(struct test *test, struct vmcb *vmcb)
232 {
233     u64 vmcb_phys = virt_to_phys(vmcb);
234     u64 guest_stack[10000];
235     bool success;
236 
237     test->vmcb = vmcb;
238     test->prepare(test);
239     vmcb->save.rip = (ulong)test_thunk;
240     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
241     regs.rdi = (ulong)test;
242     do {
243         tsc_start = rdtsc();
244         asm volatile (
245             "clgi \n\t"
246             "vmload \n\t"
247             "mov regs+0x80, %%r15\n\t"  // rflags
248             "mov %%r15, 0x170(%0)\n\t"
249             "mov regs, %%r15\n\t"       // rax
250             "mov %%r15, 0x1f8(%0)\n\t"
251             LOAD_GPR_C
252             "vmrun \n\t"
253             SAVE_GPR_C
254             "mov 0x170(%0), %%r15\n\t"  // rflags
255             "mov %%r15, regs+0x80\n\t"
256             "mov 0x1f8(%0), %%r15\n\t"  // rax
257             "mov %%r15, regs\n\t"
258             "vmsave \n\t"
259             "stgi"
260             : : "a"(vmcb_phys)
261             : "rbx", "rcx", "rdx", "rsi",
262               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
263               "memory");
264 	tsc_end = rdtsc();
265         ++test->exits;
266     } while (!test->finished(test));
267 
268 
269     success = test->succeeded(test);
270 
271     printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
272 
273     return success;
274 }
275 
276 static bool smp_supported(void)
277 {
278 	return cpu_count() > 1;
279 }
280 
281 static bool default_supported(void)
282 {
283     return true;
284 }
285 
286 static void default_prepare(struct test *test)
287 {
288     vmcb_ident(test->vmcb);
289     cli();
290 }
291 
292 static bool default_finished(struct test *test)
293 {
294     return true; /* one vmexit */
295 }
296 
297 static void null_test(struct test *test)
298 {
299 }
300 
301 static bool null_check(struct test *test)
302 {
303     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
304 }
305 
306 static void prepare_no_vmrun_int(struct test *test)
307 {
308     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
309 }
310 
311 static bool check_no_vmrun_int(struct test *test)
312 {
313     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
314 }
315 
316 static void test_vmrun(struct test *test)
317 {
318     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
319 }
320 
321 static bool check_vmrun(struct test *test)
322 {
323     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
324 }
325 
326 static void prepare_cr3_intercept(struct test *test)
327 {
328     default_prepare(test);
329     test->vmcb->control.intercept_cr_read |= 1 << 3;
330 }
331 
332 static void test_cr3_intercept(struct test *test)
333 {
334     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
335 }
336 
337 static bool check_cr3_intercept(struct test *test)
338 {
339     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
340 }
341 
342 static bool check_cr3_nointercept(struct test *test)
343 {
344     return null_check(test) && test->scratch == read_cr3();
345 }
346 
347 static void corrupt_cr3_intercept_bypass(void *_test)
348 {
349     struct test *test = _test;
350     extern volatile u32 mmio_insn;
351 
352     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
353         pause();
354     pause();
355     pause();
356     pause();
357     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
358 }
359 
360 static void prepare_cr3_intercept_bypass(struct test *test)
361 {
362     default_prepare(test);
363     test->vmcb->control.intercept_cr_read |= 1 << 3;
364     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
365 }
366 
367 static void test_cr3_intercept_bypass(struct test *test)
368 {
369     ulong a = 0xa0000;
370 
371     test->scratch = 1;
372     while (test->scratch != 2)
373         barrier();
374 
375     asm volatile ("mmio_insn: mov %0, (%0); nop"
376                   : "+a"(a) : : "memory");
377     test->scratch = a;
378 }
379 
380 static bool next_rip_supported(void)
381 {
382     return (cpuid(SVM_CPUID_FUNC).d & 8);
383 }
384 
385 static void prepare_next_rip(struct test *test)
386 {
387     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
388 }
389 
390 
391 static void test_next_rip(struct test *test)
392 {
393     asm volatile ("rdtsc\n\t"
394                   ".globl exp_next_rip\n\t"
395                   "exp_next_rip:\n\t" ::: "eax", "edx");
396 }
397 
398 static bool check_next_rip(struct test *test)
399 {
400     extern char exp_next_rip;
401     unsigned long address = (unsigned long)&exp_next_rip;
402 
403     return address == test->vmcb->control.next_rip;
404 }
405 
406 static void prepare_mode_switch(struct test *test)
407 {
408     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
409                                              |  (1ULL << UD_VECTOR)
410                                              |  (1ULL << DF_VECTOR)
411                                              |  (1ULL << PF_VECTOR);
412     test->scratch = 0;
413 }
414 
415 static void test_mode_switch(struct test *test)
416 {
417     asm volatile("	cli\n"
418 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
419 		 "1:\n"
420 		 "	.long 2f\n"
421 		 "	.long " xstr(KERNEL_CS32) "\n"
422 		 ".code32\n"
423 		 "2:\n"
424 		 "	movl %%cr0, %%eax\n"
425 		 "	btcl  $31, %%eax\n" /* clear PG */
426 		 "	movl %%eax, %%cr0\n"
427 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
428 		 "	rdmsr\n"
429 		 "	btcl $8, %%eax\n" /* clear LME */
430 		 "	wrmsr\n"
431 		 "	movl %%cr4, %%eax\n"
432 		 "	btcl $5, %%eax\n" /* clear PAE */
433 		 "	movl %%eax, %%cr4\n"
434 		 "	movw %[ds16], %%ax\n"
435 		 "	movw %%ax, %%ds\n"
436 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
437 		 ".code16\n"
438 		 "3:\n"
439 		 "	movl %%cr0, %%eax\n"
440 		 "	btcl $0, %%eax\n" /* clear PE  */
441 		 "	movl %%eax, %%cr0\n"
442 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
443 		 "4:\n"
444 		 "	vmmcall\n"
445 		 "	movl %%cr0, %%eax\n"
446 		 "	btsl $0, %%eax\n" /* set PE  */
447 		 "	movl %%eax, %%cr0\n"
448 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
449 		 ".code32\n"
450 		 "5:\n"
451 		 "	movl %%cr4, %%eax\n"
452 		 "	btsl $5, %%eax\n" /* set PAE */
453 		 "	movl %%eax, %%cr4\n"
454 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
455 		 "	rdmsr\n"
456 		 "	btsl $8, %%eax\n" /* set LME */
457 		 "	wrmsr\n"
458 		 "	movl %%cr0, %%eax\n"
459 		 "	btsl  $31, %%eax\n" /* set PG */
460 		 "	movl %%eax, %%cr0\n"
461 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
462 		 ".code64\n\t"
463 		 "6:\n"
464 		 "	vmmcall\n"
465 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
466 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
467 		 : "rax", "rbx", "rcx", "rdx", "memory");
468 }
469 
470 static bool mode_switch_finished(struct test *test)
471 {
472     u64 cr0, cr4, efer;
473 
474     cr0  = test->vmcb->save.cr0;
475     cr4  = test->vmcb->save.cr4;
476     efer = test->vmcb->save.efer;
477 
478     /* Only expect VMMCALL intercepts */
479     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
480 	    return true;
481 
482     /* Jump over VMMCALL instruction */
483     test->vmcb->save.rip += 3;
484 
485     /* Do sanity checks */
486     switch (test->scratch) {
487     case 0:
488         /* Test should be in real mode now - check for this */
489         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
490             (cr4  & 0x00000020) || /* CR4.PAE */
491             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
492                 return true;
493         break;
494     case 2:
495         /* Test should be back in long-mode now - check for this */
496         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
497             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
498             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
499 		    return true;
500 	break;
501     }
502 
503     /* one step forward */
504     test->scratch += 1;
505 
506     return test->scratch == 2;
507 }
508 
509 static bool check_mode_switch(struct test *test)
510 {
511 	return test->scratch == 2;
512 }
513 
514 static void prepare_ioio(struct test *test)
515 {
516     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
517     test->scratch = 0;
518     memset(io_bitmap, 0, 8192);
519     io_bitmap[8192] = 0xFF;
520 }
521 
522 int get_test_stage(struct test *test)
523 {
524     barrier();
525     return test->scratch;
526 }
527 
528 void inc_test_stage(struct test *test)
529 {
530     barrier();
531     test->scratch++;
532     barrier();
533 }
534 
535 static void test_ioio(struct test *test)
536 {
537     // stage 0, test IO pass
538     inb(0x5000);
539     outb(0x0, 0x5000);
540     if (get_test_stage(test) != 0)
541         goto fail;
542 
543     // test IO width, in/out
544     io_bitmap[0] = 0xFF;
545     inc_test_stage(test);
546     inb(0x0);
547     if (get_test_stage(test) != 2)
548         goto fail;
549 
550     outw(0x0, 0x0);
551     if (get_test_stage(test) != 3)
552         goto fail;
553 
554     inl(0x0);
555     if (get_test_stage(test) != 4)
556         goto fail;
557 
558     // test low/high IO port
559     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
560     inb(0x5000);
561     if (get_test_stage(test) != 5)
562         goto fail;
563 
564     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
565     inw(0x9000);
566     if (get_test_stage(test) != 6)
567         goto fail;
568 
569     // test partial pass
570     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
571     inl(0x4FFF);
572     if (get_test_stage(test) != 7)
573         goto fail;
574 
575     // test across pages
576     inc_test_stage(test);
577     inl(0x7FFF);
578     if (get_test_stage(test) != 8)
579         goto fail;
580 
581     inc_test_stage(test);
582     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
583     inl(0x7FFF);
584     if (get_test_stage(test) != 10)
585         goto fail;
586 
587     io_bitmap[0] = 0;
588     inl(0xFFFF);
589     if (get_test_stage(test) != 11)
590         goto fail;
591 
592     io_bitmap[0] = 0xFF;
593     io_bitmap[8192] = 0;
594     inl(0xFFFF);
595     inc_test_stage(test);
596     if (get_test_stage(test) != 12)
597         goto fail;
598 
599     return;
600 
601 fail:
602     printf("test failure, stage %d\n", get_test_stage(test));
603     test->scratch = -1;
604 }
605 
606 static bool ioio_finished(struct test *test)
607 {
608     unsigned port, size;
609 
610     /* Only expect IOIO intercepts */
611     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
612         return true;
613 
614     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
615         return true;
616 
617     /* one step forward */
618     test->scratch += 1;
619 
620     port = test->vmcb->control.exit_info_1 >> 16;
621     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
622 
623     while (size--) {
624         io_bitmap[port / 8] &= ~(1 << (port & 7));
625         port++;
626     }
627 
628     return false;
629 }
630 
631 static bool check_ioio(struct test *test)
632 {
633     memset(io_bitmap, 0, 8193);
634     return test->scratch != -1;
635 }
636 
637 static void prepare_asid_zero(struct test *test)
638 {
639     test->vmcb->control.asid = 0;
640 }
641 
642 static void test_asid_zero(struct test *test)
643 {
644     asm volatile ("vmmcall\n\t");
645 }
646 
647 static bool check_asid_zero(struct test *test)
648 {
649     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
650 }
651 
652 static void sel_cr0_bug_prepare(struct test *test)
653 {
654     vmcb_ident(test->vmcb);
655     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
656 }
657 
658 static bool sel_cr0_bug_finished(struct test *test)
659 {
660 	return true;
661 }
662 
663 static void sel_cr0_bug_test(struct test *test)
664 {
665     unsigned long cr0;
666 
667     /* read cr0, clear CD, and write back */
668     cr0  = read_cr0();
669     cr0 |= (1UL << 30);
670     write_cr0(cr0);
671 
672     /*
673      * If we are here the test failed, not sure what to do now because we
674      * are not in guest-mode anymore so we can't trigger an intercept.
675      * Trigger a tripple-fault for now.
676      */
677     printf("sel_cr0 test failed. Can not recover from this - exiting\n");
678     exit(1);
679 }
680 
681 static bool sel_cr0_bug_check(struct test *test)
682 {
683     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
684 }
685 
686 static void npt_nx_prepare(struct test *test)
687 {
688 
689     u64 *pte;
690 
691     vmcb_ident(test->vmcb);
692     pte = npt_get_pte((u64)null_test);
693 
694     *pte |= (1ULL << 63);
695 }
696 
697 static bool npt_nx_check(struct test *test)
698 {
699     u64 *pte = npt_get_pte((u64)null_test);
700 
701     *pte &= ~(1ULL << 63);
702 
703     test->vmcb->save.efer |= (1 << 11);
704 
705     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
706            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
707 }
708 
709 static void npt_us_prepare(struct test *test)
710 {
711     u64 *pte;
712 
713     vmcb_ident(test->vmcb);
714     pte = npt_get_pte((u64)scratch_page);
715 
716     *pte &= ~(1ULL << 2);
717 }
718 
719 static void npt_us_test(struct test *test)
720 {
721     (void) *(volatile u64 *)scratch_page;
722 }
723 
724 static bool npt_us_check(struct test *test)
725 {
726     u64 *pte = npt_get_pte((u64)scratch_page);
727 
728     *pte |= (1ULL << 2);
729 
730     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
731            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
732 }
733 
734 static void npt_rsvd_prepare(struct test *test)
735 {
736 
737     vmcb_ident(test->vmcb);
738 
739     pdpe[0] |= (1ULL << 8);
740 }
741 
742 static bool npt_rsvd_check(struct test *test)
743 {
744     pdpe[0] &= ~(1ULL << 8);
745 
746     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
747             && (test->vmcb->control.exit_info_1 == 0x200000006ULL);
748 }
749 
750 static void npt_rw_prepare(struct test *test)
751 {
752 
753     u64 *pte;
754 
755     vmcb_ident(test->vmcb);
756     pte = npt_get_pte(0x80000);
757 
758     *pte &= ~(1ULL << 1);
759 }
760 
761 static void npt_rw_test(struct test *test)
762 {
763     u64 *data = (void*)(0x80000);
764 
765     *data = 0;
766 }
767 
768 static bool npt_rw_check(struct test *test)
769 {
770     u64 *pte = npt_get_pte(0x80000);
771 
772     *pte |= (1ULL << 1);
773 
774     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
775            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
776 }
777 
778 static void npt_pfwalk_prepare(struct test *test)
779 {
780 
781     u64 *pte;
782 
783     vmcb_ident(test->vmcb);
784     pte = npt_get_pte(read_cr3());
785 
786     *pte &= ~(1ULL << 1);
787 }
788 
789 static bool npt_pfwalk_check(struct test *test)
790 {
791     u64 *pte = npt_get_pte(read_cr3());
792 
793     *pte |= (1ULL << 1);
794 
795     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
796            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
797 	   && (test->vmcb->control.exit_info_2 == read_cr3());
798 }
799 
800 static void npt_l1mmio_prepare(struct test *test)
801 {
802     vmcb_ident(test->vmcb);
803 }
804 
805 u32 nested_apic_version1;
806 u32 nested_apic_version2;
807 
808 static void npt_l1mmio_test(struct test *test)
809 {
810     volatile u32 *data = (volatile void*)(0xfee00030UL);
811 
812     nested_apic_version1 = *data;
813     nested_apic_version2 = *data;
814 }
815 
816 static bool npt_l1mmio_check(struct test *test)
817 {
818     volatile u32 *data = (volatile void*)(0xfee00030);
819     u32 lvr = *data;
820 
821     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
822 }
823 
824 static void latency_prepare(struct test *test)
825 {
826     default_prepare(test);
827     runs = LATENCY_RUNS;
828     latvmrun_min = latvmexit_min = -1ULL;
829     latvmrun_max = latvmexit_max = 0;
830     vmrun_sum = vmexit_sum = 0;
831 }
832 
833 static void latency_test(struct test *test)
834 {
835     u64 cycles;
836 
837 start:
838     tsc_end = rdtsc();
839 
840     cycles = tsc_end - tsc_start;
841 
842     if (cycles > latvmrun_max)
843         latvmrun_max = cycles;
844 
845     if (cycles < latvmrun_min)
846         latvmrun_min = cycles;
847 
848     vmrun_sum += cycles;
849 
850     tsc_start = rdtsc();
851 
852     asm volatile ("vmmcall" : : : "memory");
853     goto start;
854 }
855 
856 static bool latency_finished(struct test *test)
857 {
858     u64 cycles;
859 
860     tsc_end = rdtsc();
861 
862     cycles = tsc_end - tsc_start;
863 
864     if (cycles > latvmexit_max)
865         latvmexit_max = cycles;
866 
867     if (cycles < latvmexit_min)
868         latvmexit_min = cycles;
869 
870     vmexit_sum += cycles;
871 
872     test->vmcb->save.rip += 3;
873 
874     runs -= 1;
875 
876     return runs == 0;
877 }
878 
879 static bool latency_check(struct test *test)
880 {
881     printf("    Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max,
882             latvmrun_min, vmrun_sum / LATENCY_RUNS);
883     printf("    Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max,
884             latvmexit_min, vmexit_sum / LATENCY_RUNS);
885     return true;
886 }
887 
888 static void lat_svm_insn_prepare(struct test *test)
889 {
890     default_prepare(test);
891     runs = LATENCY_RUNS;
892     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
893     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
894     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
895 }
896 
897 static bool lat_svm_insn_finished(struct test *test)
898 {
899     u64 vmcb_phys = virt_to_phys(test->vmcb);
900     u64 cycles;
901 
902     for ( ; runs != 0; runs--) {
903         tsc_start = rdtsc();
904         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
905         cycles = rdtsc() - tsc_start;
906         if (cycles > latvmload_max)
907             latvmload_max = cycles;
908         if (cycles < latvmload_min)
909             latvmload_min = cycles;
910         vmload_sum += cycles;
911 
912         tsc_start = rdtsc();
913         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
914         cycles = rdtsc() - tsc_start;
915         if (cycles > latvmsave_max)
916             latvmsave_max = cycles;
917         if (cycles < latvmsave_min)
918             latvmsave_min = cycles;
919         vmsave_sum += cycles;
920 
921         tsc_start = rdtsc();
922         asm volatile("stgi\n\t");
923         cycles = rdtsc() - tsc_start;
924         if (cycles > latstgi_max)
925             latstgi_max = cycles;
926         if (cycles < latstgi_min)
927             latstgi_min = cycles;
928         stgi_sum += cycles;
929 
930         tsc_start = rdtsc();
931         asm volatile("clgi\n\t");
932         cycles = rdtsc() - tsc_start;
933         if (cycles > latclgi_max)
934             latclgi_max = cycles;
935         if (cycles < latclgi_min)
936             latclgi_min = cycles;
937         clgi_sum += cycles;
938     }
939 
940     return true;
941 }
942 
943 static bool lat_svm_insn_check(struct test *test)
944 {
945     printf("    Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
946             latvmload_min, vmload_sum / LATENCY_RUNS);
947     printf("    Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
948             latvmsave_min, vmsave_sum / LATENCY_RUNS);
949     printf("    Latency STGI:   max: %d min: %d avg: %d\n", latstgi_max,
950             latstgi_min, stgi_sum / LATENCY_RUNS);
951     printf("    Latency CLGI:   max: %d min: %d avg: %d\n", latclgi_max,
952             latclgi_min, clgi_sum / LATENCY_RUNS);
953     return true;
954 }
955 static struct test tests[] = {
956     { "null", default_supported, default_prepare, null_test,
957       default_finished, null_check },
958     { "vmrun", default_supported, default_prepare, test_vmrun,
959        default_finished, check_vmrun },
960     { "ioio", default_supported, prepare_ioio, test_ioio,
961        ioio_finished, check_ioio },
962     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
963       null_test, default_finished, check_no_vmrun_int },
964     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
965       test_cr3_intercept, default_finished, check_cr3_intercept },
966     { "cr3 read nointercept", default_supported, default_prepare,
967       test_cr3_intercept, default_finished, check_cr3_nointercept },
968     { "cr3 read intercept emulate", smp_supported,
969       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
970       default_finished, check_cr3_intercept },
971     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
972       default_finished, check_next_rip },
973     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
974        mode_switch_finished, check_mode_switch },
975     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
976        default_finished, check_asid_zero },
977     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
978        sel_cr0_bug_finished, sel_cr0_bug_check },
979     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
980 	    default_finished, npt_nx_check },
981     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
982 	    default_finished, npt_us_check },
983     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
984 	    default_finished, npt_rsvd_check },
985     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
986 	    default_finished, npt_rw_check },
987     { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test,
988 	    default_finished, npt_pfwalk_check },
989     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
990 	    default_finished, npt_l1mmio_check },
991     { "latency_run_exit", default_supported, latency_prepare, latency_test,
992       latency_finished, latency_check },
993     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
994       lat_svm_insn_finished, lat_svm_insn_check },
995 };
996 
997 int main(int ac, char **av)
998 {
999     int i, nr, passed, done;
1000     struct vmcb *vmcb;
1001 
1002     setup_vm();
1003     smp_init();
1004 
1005     if (!(cpuid(0x80000001).c & 4)) {
1006         printf("SVM not availble\n");
1007         return 0;
1008     }
1009 
1010     setup_svm();
1011 
1012     vmcb = alloc_page();
1013 
1014     nr = ARRAY_SIZE(tests);
1015     passed = done = 0;
1016     for (i = 0; i < nr; ++i) {
1017         if (!tests[i].supported())
1018             continue;
1019         done += 1;
1020         passed += test_run(&tests[i], vmcb);
1021     }
1022 
1023     printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed));
1024     return passed == done ? 0 : 1;
1025 }
1026