xref: /kvm-unit-tests/x86/svm.c (revision db4898e8f67c57369499ea16c605aa3390da367a)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "alloc_page.h"
10 
11 /* for the nested page table*/
12 u64 *pml4e;
13 u64 *pdpe;
14 u64 *pde[4];
15 u64 *pte[2048];
16 void *scratch_page;
17 
18 #define LATENCY_RUNS 1000000
19 
20 u64 tsc_start;
21 u64 tsc_end;
22 
23 u64 vmrun_sum, vmexit_sum;
24 u64 vmsave_sum, vmload_sum;
25 u64 stgi_sum, clgi_sum;
26 u64 latvmrun_max;
27 u64 latvmrun_min;
28 u64 latvmexit_max;
29 u64 latvmexit_min;
30 u64 latvmload_max;
31 u64 latvmload_min;
32 u64 latvmsave_max;
33 u64 latvmsave_min;
34 u64 latstgi_max;
35 u64 latstgi_min;
36 u64 latclgi_max;
37 u64 latclgi_min;
38 u64 runs;
39 
40 u8 *io_bitmap;
41 u8 io_bitmap_area[16384];
42 
43 static bool npt_supported(void)
44 {
45    return cpuid(0x8000000A).d & 1;
46 }
47 
48 static void setup_svm(void)
49 {
50     void *hsave = alloc_page();
51     u64 *page, address;
52     int i,j;
53 
54     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
55     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
56     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
57 
58     scratch_page = alloc_page();
59 
60     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
61 
62     if (!npt_supported())
63         return;
64 
65     printf("NPT detected - running all tests with NPT enabled\n");
66 
67     /*
68      * Nested paging supported - Build a nested page table
69      * Build the page-table bottom-up and map everything with 4k pages
70      * to get enough granularity for the NPT unit-tests.
71      */
72 
73     address = 0;
74 
75     /* PTE level */
76     for (i = 0; i < 2048; ++i) {
77         page = alloc_page();
78 
79         for (j = 0; j < 512; ++j, address += 4096)
80             page[j] = address | 0x067ULL;
81 
82         pte[i] = page;
83     }
84 
85     /* PDE level */
86     for (i = 0; i < 4; ++i) {
87         page = alloc_page();
88 
89         for (j = 0; j < 512; ++j)
90             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
91 
92         pde[i] = page;
93     }
94 
95     /* PDPe level */
96     pdpe   = alloc_page();
97     for (i = 0; i < 4; ++i)
98        pdpe[i] = ((u64)(pde[i])) | 0x27;
99 
100     /* PML4e level */
101     pml4e    = alloc_page();
102     pml4e[0] = ((u64)pdpe) | 0x27;
103 }
104 
105 static u64 *npt_get_pde(u64 address)
106 {
107     int i1, i2;
108 
109     address >>= 21;
110     i1 = (address >> 9) & 0x3;
111     i2 = address & 0x1ff;
112 
113     return &pde[i1][i2];
114 }
115 
116 static u64 *npt_get_pte(u64 address)
117 {
118     int i1, i2;
119 
120     address >>= 12;
121     i1 = (address >> 9) & 0x7ff;
122     i2 = address & 0x1ff;
123 
124     return &pte[i1][i2];
125 }
126 
127 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
128                          u64 base, u32 limit, u32 attr)
129 {
130     seg->selector = selector;
131     seg->attrib = attr;
132     seg->limit = limit;
133     seg->base = base;
134 }
135 
136 static void vmcb_ident(struct vmcb *vmcb)
137 {
138     u64 vmcb_phys = virt_to_phys(vmcb);
139     struct vmcb_save_area *save = &vmcb->save;
140     struct vmcb_control_area *ctrl = &vmcb->control;
141     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
142         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
143     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
144         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
145     struct descriptor_table_ptr desc_table_ptr;
146 
147     memset(vmcb, 0, sizeof(*vmcb));
148     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
149     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
150     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
151     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
152     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
153     sgdt(&desc_table_ptr);
154     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
155     sidt(&desc_table_ptr);
156     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
157     ctrl->asid = 1;
158     save->cpl = 0;
159     save->efer = rdmsr(MSR_EFER);
160     save->cr4 = read_cr4();
161     save->cr3 = read_cr3();
162     save->cr0 = read_cr0();
163     save->dr7 = read_dr7();
164     save->dr6 = read_dr6();
165     save->cr2 = read_cr2();
166     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
167     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
168     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
169     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
170 
171     if (npt_supported()) {
172         ctrl->nested_ctl = 1;
173         ctrl->nested_cr3 = (u64)pml4e;
174     }
175 }
176 
177 struct test {
178     const char *name;
179     bool (*supported)(void);
180     void (*prepare)(struct test *test);
181     void (*guest_func)(struct test *test);
182     bool (*finished)(struct test *test);
183     bool (*succeeded)(struct test *test);
184     struct vmcb *vmcb;
185     int exits;
186     ulong scratch;
187 };
188 
189 static inline void vmmcall(void)
190 {
191     asm volatile ("vmmcall" : : : "memory");
192 }
193 
194 static void test_thunk(struct test *test)
195 {
196     test->guest_func(test);
197     vmmcall();
198 }
199 
200 struct regs {
201         u64 rax;
202         u64 rcx;
203         u64 rdx;
204         u64 rbx;
205         u64 cr2;
206         u64 rbp;
207         u64 rsi;
208         u64 rdi;
209         u64 r8;
210         u64 r9;
211         u64 r10;
212         u64 r11;
213         u64 r12;
214         u64 r13;
215         u64 r14;
216         u64 r15;
217         u64 rflags;
218 };
219 
220 struct regs regs;
221 
222 // rax handled specially below
223 
224 #define SAVE_GPR_C                              \
225         "xchg %%rbx, regs+0x8\n\t"              \
226         "xchg %%rcx, regs+0x10\n\t"             \
227         "xchg %%rdx, regs+0x18\n\t"             \
228         "xchg %%rbp, regs+0x28\n\t"             \
229         "xchg %%rsi, regs+0x30\n\t"             \
230         "xchg %%rdi, regs+0x38\n\t"             \
231         "xchg %%r8, regs+0x40\n\t"              \
232         "xchg %%r9, regs+0x48\n\t"              \
233         "xchg %%r10, regs+0x50\n\t"             \
234         "xchg %%r11, regs+0x58\n\t"             \
235         "xchg %%r12, regs+0x60\n\t"             \
236         "xchg %%r13, regs+0x68\n\t"             \
237         "xchg %%r14, regs+0x70\n\t"             \
238         "xchg %%r15, regs+0x78\n\t"
239 
240 #define LOAD_GPR_C      SAVE_GPR_C
241 
242 static void test_run(struct test *test, struct vmcb *vmcb)
243 {
244     u64 vmcb_phys = virt_to_phys(vmcb);
245     u64 guest_stack[10000];
246 
247     test->vmcb = vmcb;
248     test->prepare(test);
249     vmcb->save.rip = (ulong)test_thunk;
250     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
251     regs.rdi = (ulong)test;
252     do {
253         tsc_start = rdtsc();
254         asm volatile (
255             "clgi \n\t"
256             "vmload \n\t"
257             "mov regs+0x80, %%r15\n\t"  // rflags
258             "mov %%r15, 0x170(%0)\n\t"
259             "mov regs, %%r15\n\t"       // rax
260             "mov %%r15, 0x1f8(%0)\n\t"
261             LOAD_GPR_C
262             "vmrun \n\t"
263             SAVE_GPR_C
264             "mov 0x170(%0), %%r15\n\t"  // rflags
265             "mov %%r15, regs+0x80\n\t"
266             "mov 0x1f8(%0), %%r15\n\t"  // rax
267             "mov %%r15, regs\n\t"
268             "vmsave \n\t"
269             "stgi"
270             : : "a"(vmcb_phys)
271             : "rbx", "rcx", "rdx", "rsi",
272               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
273               "memory");
274 	tsc_end = rdtsc();
275         ++test->exits;
276     } while (!test->finished(test));
277 
278     report("%s", test->succeeded(test), test->name);
279 }
280 
281 static bool smp_supported(void)
282 {
283 	return cpu_count() > 1;
284 }
285 
286 static bool default_supported(void)
287 {
288     return true;
289 }
290 
291 static void default_prepare(struct test *test)
292 {
293     vmcb_ident(test->vmcb);
294     cli();
295 }
296 
297 static bool default_finished(struct test *test)
298 {
299     return true; /* one vmexit */
300 }
301 
302 static void null_test(struct test *test)
303 {
304 }
305 
306 static bool null_check(struct test *test)
307 {
308     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
309 }
310 
311 static void prepare_no_vmrun_int(struct test *test)
312 {
313     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
314 }
315 
316 static bool check_no_vmrun_int(struct test *test)
317 {
318     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
319 }
320 
321 static void test_vmrun(struct test *test)
322 {
323     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
324 }
325 
326 static bool check_vmrun(struct test *test)
327 {
328     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
329 }
330 
331 static void prepare_cr3_intercept(struct test *test)
332 {
333     default_prepare(test);
334     test->vmcb->control.intercept_cr_read |= 1 << 3;
335 }
336 
337 static void test_cr3_intercept(struct test *test)
338 {
339     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
340 }
341 
342 static bool check_cr3_intercept(struct test *test)
343 {
344     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
345 }
346 
347 static bool check_cr3_nointercept(struct test *test)
348 {
349     return null_check(test) && test->scratch == read_cr3();
350 }
351 
352 static void corrupt_cr3_intercept_bypass(void *_test)
353 {
354     struct test *test = _test;
355     extern volatile u32 mmio_insn;
356 
357     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
358         pause();
359     pause();
360     pause();
361     pause();
362     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
363 }
364 
365 static void prepare_cr3_intercept_bypass(struct test *test)
366 {
367     default_prepare(test);
368     test->vmcb->control.intercept_cr_read |= 1 << 3;
369     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
370 }
371 
372 static void test_cr3_intercept_bypass(struct test *test)
373 {
374     ulong a = 0xa0000;
375 
376     test->scratch = 1;
377     while (test->scratch != 2)
378         barrier();
379 
380     asm volatile ("mmio_insn: mov %0, (%0); nop"
381                   : "+a"(a) : : "memory");
382     test->scratch = a;
383 }
384 
385 static bool next_rip_supported(void)
386 {
387     return (cpuid(SVM_CPUID_FUNC).d & 8);
388 }
389 
390 static void prepare_next_rip(struct test *test)
391 {
392     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
393 }
394 
395 
396 static void test_next_rip(struct test *test)
397 {
398     asm volatile ("rdtsc\n\t"
399                   ".globl exp_next_rip\n\t"
400                   "exp_next_rip:\n\t" ::: "eax", "edx");
401 }
402 
403 static bool check_next_rip(struct test *test)
404 {
405     extern char exp_next_rip;
406     unsigned long address = (unsigned long)&exp_next_rip;
407 
408     return address == test->vmcb->control.next_rip;
409 }
410 
411 static void prepare_mode_switch(struct test *test)
412 {
413     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
414                                              |  (1ULL << UD_VECTOR)
415                                              |  (1ULL << DF_VECTOR)
416                                              |  (1ULL << PF_VECTOR);
417     test->scratch = 0;
418 }
419 
420 static void test_mode_switch(struct test *test)
421 {
422     asm volatile("	cli\n"
423 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
424 		 "1:\n"
425 		 "	.long 2f\n"
426 		 "	.long " xstr(KERNEL_CS32) "\n"
427 		 ".code32\n"
428 		 "2:\n"
429 		 "	movl %%cr0, %%eax\n"
430 		 "	btcl  $31, %%eax\n" /* clear PG */
431 		 "	movl %%eax, %%cr0\n"
432 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
433 		 "	rdmsr\n"
434 		 "	btcl $8, %%eax\n" /* clear LME */
435 		 "	wrmsr\n"
436 		 "	movl %%cr4, %%eax\n"
437 		 "	btcl $5, %%eax\n" /* clear PAE */
438 		 "	movl %%eax, %%cr4\n"
439 		 "	movw %[ds16], %%ax\n"
440 		 "	movw %%ax, %%ds\n"
441 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
442 		 ".code16\n"
443 		 "3:\n"
444 		 "	movl %%cr0, %%eax\n"
445 		 "	btcl $0, %%eax\n" /* clear PE  */
446 		 "	movl %%eax, %%cr0\n"
447 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
448 		 "4:\n"
449 		 "	vmmcall\n"
450 		 "	movl %%cr0, %%eax\n"
451 		 "	btsl $0, %%eax\n" /* set PE  */
452 		 "	movl %%eax, %%cr0\n"
453 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
454 		 ".code32\n"
455 		 "5:\n"
456 		 "	movl %%cr4, %%eax\n"
457 		 "	btsl $5, %%eax\n" /* set PAE */
458 		 "	movl %%eax, %%cr4\n"
459 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
460 		 "	rdmsr\n"
461 		 "	btsl $8, %%eax\n" /* set LME */
462 		 "	wrmsr\n"
463 		 "	movl %%cr0, %%eax\n"
464 		 "	btsl  $31, %%eax\n" /* set PG */
465 		 "	movl %%eax, %%cr0\n"
466 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
467 		 ".code64\n\t"
468 		 "6:\n"
469 		 "	vmmcall\n"
470 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
471 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
472 		 : "rax", "rbx", "rcx", "rdx", "memory");
473 }
474 
475 static bool mode_switch_finished(struct test *test)
476 {
477     u64 cr0, cr4, efer;
478 
479     cr0  = test->vmcb->save.cr0;
480     cr4  = test->vmcb->save.cr4;
481     efer = test->vmcb->save.efer;
482 
483     /* Only expect VMMCALL intercepts */
484     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
485 	    return true;
486 
487     /* Jump over VMMCALL instruction */
488     test->vmcb->save.rip += 3;
489 
490     /* Do sanity checks */
491     switch (test->scratch) {
492     case 0:
493         /* Test should be in real mode now - check for this */
494         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
495             (cr4  & 0x00000020) || /* CR4.PAE */
496             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
497                 return true;
498         break;
499     case 2:
500         /* Test should be back in long-mode now - check for this */
501         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
502             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
503             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
504 		    return true;
505 	break;
506     }
507 
508     /* one step forward */
509     test->scratch += 1;
510 
511     return test->scratch == 2;
512 }
513 
514 static bool check_mode_switch(struct test *test)
515 {
516 	return test->scratch == 2;
517 }
518 
519 static void prepare_ioio(struct test *test)
520 {
521     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
522     test->scratch = 0;
523     memset(io_bitmap, 0, 8192);
524     io_bitmap[8192] = 0xFF;
525 }
526 
527 static int get_test_stage(struct test *test)
528 {
529     barrier();
530     return test->scratch;
531 }
532 
533 static void inc_test_stage(struct test *test)
534 {
535     barrier();
536     test->scratch++;
537     barrier();
538 }
539 
540 static void test_ioio(struct test *test)
541 {
542     // stage 0, test IO pass
543     inb(0x5000);
544     outb(0x0, 0x5000);
545     if (get_test_stage(test) != 0)
546         goto fail;
547 
548     // test IO width, in/out
549     io_bitmap[0] = 0xFF;
550     inc_test_stage(test);
551     inb(0x0);
552     if (get_test_stage(test) != 2)
553         goto fail;
554 
555     outw(0x0, 0x0);
556     if (get_test_stage(test) != 3)
557         goto fail;
558 
559     inl(0x0);
560     if (get_test_stage(test) != 4)
561         goto fail;
562 
563     // test low/high IO port
564     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
565     inb(0x5000);
566     if (get_test_stage(test) != 5)
567         goto fail;
568 
569     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
570     inw(0x9000);
571     if (get_test_stage(test) != 6)
572         goto fail;
573 
574     // test partial pass
575     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
576     inl(0x4FFF);
577     if (get_test_stage(test) != 7)
578         goto fail;
579 
580     // test across pages
581     inc_test_stage(test);
582     inl(0x7FFF);
583     if (get_test_stage(test) != 8)
584         goto fail;
585 
586     inc_test_stage(test);
587     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
588     inl(0x7FFF);
589     if (get_test_stage(test) != 10)
590         goto fail;
591 
592     io_bitmap[0] = 0;
593     inl(0xFFFF);
594     if (get_test_stage(test) != 11)
595         goto fail;
596 
597     io_bitmap[0] = 0xFF;
598     io_bitmap[8192] = 0;
599     inl(0xFFFF);
600     inc_test_stage(test);
601     if (get_test_stage(test) != 12)
602         goto fail;
603 
604     return;
605 
606 fail:
607     report("stage %d", false, get_test_stage(test));
608     test->scratch = -1;
609 }
610 
611 static bool ioio_finished(struct test *test)
612 {
613     unsigned port, size;
614 
615     /* Only expect IOIO intercepts */
616     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
617         return true;
618 
619     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
620         return true;
621 
622     /* one step forward */
623     test->scratch += 1;
624 
625     port = test->vmcb->control.exit_info_1 >> 16;
626     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
627 
628     while (size--) {
629         io_bitmap[port / 8] &= ~(1 << (port & 7));
630         port++;
631     }
632 
633     return false;
634 }
635 
636 static bool check_ioio(struct test *test)
637 {
638     memset(io_bitmap, 0, 8193);
639     return test->scratch != -1;
640 }
641 
642 static void prepare_asid_zero(struct test *test)
643 {
644     test->vmcb->control.asid = 0;
645 }
646 
647 static void test_asid_zero(struct test *test)
648 {
649     asm volatile ("vmmcall\n\t");
650 }
651 
652 static bool check_asid_zero(struct test *test)
653 {
654     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
655 }
656 
657 static void sel_cr0_bug_prepare(struct test *test)
658 {
659     vmcb_ident(test->vmcb);
660     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
661 }
662 
663 static bool sel_cr0_bug_finished(struct test *test)
664 {
665 	return true;
666 }
667 
668 static void sel_cr0_bug_test(struct test *test)
669 {
670     unsigned long cr0;
671 
672     /* read cr0, clear CD, and write back */
673     cr0  = read_cr0();
674     cr0 |= (1UL << 30);
675     write_cr0(cr0);
676 
677     /*
678      * If we are here the test failed, not sure what to do now because we
679      * are not in guest-mode anymore so we can't trigger an intercept.
680      * Trigger a tripple-fault for now.
681      */
682     report("sel_cr0 test. Can not recover from this - exiting", false);
683     exit(report_summary());
684 }
685 
686 static bool sel_cr0_bug_check(struct test *test)
687 {
688     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
689 }
690 
691 static void npt_nx_prepare(struct test *test)
692 {
693 
694     u64 *pte;
695 
696     vmcb_ident(test->vmcb);
697     pte = npt_get_pte((u64)null_test);
698 
699     *pte |= (1ULL << 63);
700 }
701 
702 static bool npt_nx_check(struct test *test)
703 {
704     u64 *pte = npt_get_pte((u64)null_test);
705 
706     *pte &= ~(1ULL << 63);
707 
708     test->vmcb->save.efer |= (1 << 11);
709 
710     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
711            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
712 }
713 
714 static void npt_us_prepare(struct test *test)
715 {
716     u64 *pte;
717 
718     vmcb_ident(test->vmcb);
719     pte = npt_get_pte((u64)scratch_page);
720 
721     *pte &= ~(1ULL << 2);
722 }
723 
724 static void npt_us_test(struct test *test)
725 {
726     (void) *(volatile u64 *)scratch_page;
727 }
728 
729 static bool npt_us_check(struct test *test)
730 {
731     u64 *pte = npt_get_pte((u64)scratch_page);
732 
733     *pte |= (1ULL << 2);
734 
735     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
736            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
737 }
738 
739 u64 save_pde;
740 
741 static void npt_rsvd_prepare(struct test *test)
742 {
743     u64 *pde;
744 
745     vmcb_ident(test->vmcb);
746     pde = npt_get_pde((u64) null_test);
747 
748     save_pde = *pde;
749     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
750 }
751 
752 static bool npt_rsvd_check(struct test *test)
753 {
754     u64 *pde = npt_get_pde((u64) null_test);
755 
756     *pde = save_pde;
757 
758     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
759             && (test->vmcb->control.exit_info_1 == 0x10000001dULL);
760 }
761 
762 static void npt_rw_prepare(struct test *test)
763 {
764 
765     u64 *pte;
766 
767     vmcb_ident(test->vmcb);
768     pte = npt_get_pte(0x80000);
769 
770     *pte &= ~(1ULL << 1);
771 }
772 
773 static void npt_rw_test(struct test *test)
774 {
775     u64 *data = (void*)(0x80000);
776 
777     *data = 0;
778 }
779 
780 static bool npt_rw_check(struct test *test)
781 {
782     u64 *pte = npt_get_pte(0x80000);
783 
784     *pte |= (1ULL << 1);
785 
786     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
787            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
788 }
789 
790 static void npt_rw_pfwalk_prepare(struct test *test)
791 {
792 
793     u64 *pte;
794 
795     vmcb_ident(test->vmcb);
796     pte = npt_get_pte(read_cr3());
797 
798     *pte &= ~(1ULL << 1);
799 }
800 
801 static bool npt_rw_pfwalk_check(struct test *test)
802 {
803     u64 *pte = npt_get_pte(read_cr3());
804 
805     *pte |= (1ULL << 1);
806 
807     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
808            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
809 	   && (test->vmcb->control.exit_info_2 == read_cr3());
810 }
811 
812 static void npt_rsvd_pfwalk_prepare(struct test *test)
813 {
814 
815     vmcb_ident(test->vmcb);
816 
817     pdpe[0] |= (1ULL << 8);
818 }
819 
820 static bool npt_rsvd_pfwalk_check(struct test *test)
821 {
822     pdpe[0] &= ~(1ULL << 8);
823 
824     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
825             && (test->vmcb->control.exit_info_1 == 0x200000006ULL);
826 }
827 
828 static void npt_l1mmio_prepare(struct test *test)
829 {
830     vmcb_ident(test->vmcb);
831 }
832 
833 u32 nested_apic_version1;
834 u32 nested_apic_version2;
835 
836 static void npt_l1mmio_test(struct test *test)
837 {
838     volatile u32 *data = (volatile void*)(0xfee00030UL);
839 
840     nested_apic_version1 = *data;
841     nested_apic_version2 = *data;
842 }
843 
844 static bool npt_l1mmio_check(struct test *test)
845 {
846     volatile u32 *data = (volatile void*)(0xfee00030);
847     u32 lvr = *data;
848 
849     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
850 }
851 
852 static void npt_rw_l1mmio_prepare(struct test *test)
853 {
854 
855     u64 *pte;
856 
857     vmcb_ident(test->vmcb);
858     pte = npt_get_pte(0xfee00080);
859 
860     *pte &= ~(1ULL << 1);
861 }
862 
863 static void npt_rw_l1mmio_test(struct test *test)
864 {
865     volatile u32 *data = (volatile void*)(0xfee00080);
866 
867     *data = *data;
868 }
869 
870 static bool npt_rw_l1mmio_check(struct test *test)
871 {
872     u64 *pte = npt_get_pte(0xfee00080);
873 
874     *pte |= (1ULL << 1);
875 
876     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
877            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
878 }
879 
880 #define TSC_ADJUST_VALUE    (1ll << 32)
881 #define TSC_OFFSET_VALUE    (-1ll << 48)
882 static bool ok;
883 
884 static void tsc_adjust_prepare(struct test *test)
885 {
886     default_prepare(test);
887     test->vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
888 
889     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
890     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
891     ok = adjust == -TSC_ADJUST_VALUE;
892 }
893 
894 static void tsc_adjust_test(struct test *test)
895 {
896     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
897     ok &= adjust == -TSC_ADJUST_VALUE;
898 
899     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
900     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
901 
902     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
903     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
904 
905     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
906     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
907 
908     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
909     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
910 }
911 
912 static bool tsc_adjust_check(struct test *test)
913 {
914     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
915 
916     wrmsr(MSR_IA32_TSC_ADJUST, 0);
917     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
918 }
919 
920 static void latency_prepare(struct test *test)
921 {
922     default_prepare(test);
923     runs = LATENCY_RUNS;
924     latvmrun_min = latvmexit_min = -1ULL;
925     latvmrun_max = latvmexit_max = 0;
926     vmrun_sum = vmexit_sum = 0;
927 }
928 
929 static void latency_test(struct test *test)
930 {
931     u64 cycles;
932 
933 start:
934     tsc_end = rdtsc();
935 
936     cycles = tsc_end - tsc_start;
937 
938     if (cycles > latvmrun_max)
939         latvmrun_max = cycles;
940 
941     if (cycles < latvmrun_min)
942         latvmrun_min = cycles;
943 
944     vmrun_sum += cycles;
945 
946     tsc_start = rdtsc();
947 
948     asm volatile ("vmmcall" : : : "memory");
949     goto start;
950 }
951 
952 static bool latency_finished(struct test *test)
953 {
954     u64 cycles;
955 
956     tsc_end = rdtsc();
957 
958     cycles = tsc_end - tsc_start;
959 
960     if (cycles > latvmexit_max)
961         latvmexit_max = cycles;
962 
963     if (cycles < latvmexit_min)
964         latvmexit_min = cycles;
965 
966     vmexit_sum += cycles;
967 
968     test->vmcb->save.rip += 3;
969 
970     runs -= 1;
971 
972     return runs == 0;
973 }
974 
975 static bool latency_check(struct test *test)
976 {
977     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
978             latvmrun_min, vmrun_sum / LATENCY_RUNS);
979     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
980             latvmexit_min, vmexit_sum / LATENCY_RUNS);
981     return true;
982 }
983 
984 static void lat_svm_insn_prepare(struct test *test)
985 {
986     default_prepare(test);
987     runs = LATENCY_RUNS;
988     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
989     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
990     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
991 }
992 
993 static bool lat_svm_insn_finished(struct test *test)
994 {
995     u64 vmcb_phys = virt_to_phys(test->vmcb);
996     u64 cycles;
997 
998     for ( ; runs != 0; runs--) {
999         tsc_start = rdtsc();
1000         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
1001         cycles = rdtsc() - tsc_start;
1002         if (cycles > latvmload_max)
1003             latvmload_max = cycles;
1004         if (cycles < latvmload_min)
1005             latvmload_min = cycles;
1006         vmload_sum += cycles;
1007 
1008         tsc_start = rdtsc();
1009         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
1010         cycles = rdtsc() - tsc_start;
1011         if (cycles > latvmsave_max)
1012             latvmsave_max = cycles;
1013         if (cycles < latvmsave_min)
1014             latvmsave_min = cycles;
1015         vmsave_sum += cycles;
1016 
1017         tsc_start = rdtsc();
1018         asm volatile("stgi\n\t");
1019         cycles = rdtsc() - tsc_start;
1020         if (cycles > latstgi_max)
1021             latstgi_max = cycles;
1022         if (cycles < latstgi_min)
1023             latstgi_min = cycles;
1024         stgi_sum += cycles;
1025 
1026         tsc_start = rdtsc();
1027         asm volatile("clgi\n\t");
1028         cycles = rdtsc() - tsc_start;
1029         if (cycles > latclgi_max)
1030             latclgi_max = cycles;
1031         if (cycles < latclgi_min)
1032             latclgi_min = cycles;
1033         clgi_sum += cycles;
1034     }
1035 
1036     return true;
1037 }
1038 
1039 static bool lat_svm_insn_check(struct test *test)
1040 {
1041     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1042             latvmload_min, vmload_sum / LATENCY_RUNS);
1043     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1044             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1045     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1046             latstgi_min, stgi_sum / LATENCY_RUNS);
1047     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1048             latclgi_min, clgi_sum / LATENCY_RUNS);
1049     return true;
1050 }
1051 static struct test tests[] = {
1052     { "null", default_supported, default_prepare, null_test,
1053       default_finished, null_check },
1054     { "vmrun", default_supported, default_prepare, test_vmrun,
1055        default_finished, check_vmrun },
1056     { "ioio", default_supported, prepare_ioio, test_ioio,
1057        ioio_finished, check_ioio },
1058     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
1059       null_test, default_finished, check_no_vmrun_int },
1060     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
1061       test_cr3_intercept, default_finished, check_cr3_intercept },
1062     { "cr3 read nointercept", default_supported, default_prepare,
1063       test_cr3_intercept, default_finished, check_cr3_nointercept },
1064     { "cr3 read intercept emulate", smp_supported,
1065       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
1066       default_finished, check_cr3_intercept },
1067     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
1068       default_finished, check_next_rip },
1069     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
1070        mode_switch_finished, check_mode_switch },
1071     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
1072        default_finished, check_asid_zero },
1073     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
1074        sel_cr0_bug_finished, sel_cr0_bug_check },
1075     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
1076 	    default_finished, npt_nx_check },
1077     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
1078 	    default_finished, npt_us_check },
1079     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
1080 	    default_finished, npt_rsvd_check },
1081     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
1082 	    default_finished, npt_rw_check },
1083     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, null_test,
1084 	    default_finished, npt_rsvd_pfwalk_check },
1085     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, null_test,
1086 	    default_finished, npt_rw_pfwalk_check },
1087     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
1088 	    default_finished, npt_l1mmio_check },
1089     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, npt_rw_l1mmio_test,
1090 	    default_finished, npt_rw_l1mmio_check },
1091     { "tsc_adjust", default_supported, tsc_adjust_prepare, tsc_adjust_test,
1092        default_finished, tsc_adjust_check },
1093     { "latency_run_exit", default_supported, latency_prepare, latency_test,
1094       latency_finished, latency_check },
1095     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
1096       lat_svm_insn_finished, lat_svm_insn_check },
1097 };
1098 
1099 int main(int ac, char **av)
1100 {
1101     int i, nr;
1102     struct vmcb *vmcb;
1103 
1104     setup_vm();
1105     smp_init();
1106 
1107     if (!(cpuid(0x80000001).c & 4)) {
1108         printf("SVM not availble\n");
1109         return report_summary();
1110     }
1111 
1112     setup_svm();
1113 
1114     vmcb = alloc_page();
1115 
1116     nr = ARRAY_SIZE(tests);
1117     for (i = 0; i < nr; ++i) {
1118         if (!tests[i].supported())
1119             continue;
1120         test_run(&tests[i], vmcb);
1121     }
1122 
1123     return report_summary();
1124 }
1125