xref: /kvm-unit-tests/x86/svm_tests.c (revision 410b3bf09e76fd2b6d68b424a26d407a0bc4bc11)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "alloc_page.h"
10 #include "isr.h"
11 #include "apic.h"
12 
13 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
14 
15 static void *scratch_page;
16 
17 #define LATENCY_RUNS 1000000
18 
19 u64 tsc_start;
20 u64 tsc_end;
21 
22 u64 vmrun_sum, vmexit_sum;
23 u64 vmsave_sum, vmload_sum;
24 u64 stgi_sum, clgi_sum;
25 u64 latvmrun_max;
26 u64 latvmrun_min;
27 u64 latvmexit_max;
28 u64 latvmexit_min;
29 u64 latvmload_max;
30 u64 latvmload_min;
31 u64 latvmsave_max;
32 u64 latvmsave_min;
33 u64 latstgi_max;
34 u64 latstgi_min;
35 u64 latclgi_max;
36 u64 latclgi_min;
37 u64 runs;
38 
39 static void null_test(struct svm_test *test)
40 {
41 }
42 
43 static bool null_check(struct svm_test *test)
44 {
45     return vmcb->control.exit_code == SVM_EXIT_VMMCALL;
46 }
47 
48 static void prepare_no_vmrun_int(struct svm_test *test)
49 {
50     vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
51 }
52 
53 static bool check_no_vmrun_int(struct svm_test *test)
54 {
55     return vmcb->control.exit_code == SVM_EXIT_ERR;
56 }
57 
58 static void test_vmrun(struct svm_test *test)
59 {
60     asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb)));
61 }
62 
63 static bool check_vmrun(struct svm_test *test)
64 {
65     return vmcb->control.exit_code == SVM_EXIT_VMRUN;
66 }
67 
68 static void prepare_cr3_intercept(struct svm_test *test)
69 {
70     default_prepare(test);
71     vmcb->control.intercept_cr_read |= 1 << 3;
72 }
73 
74 static void test_cr3_intercept(struct svm_test *test)
75 {
76     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
77 }
78 
79 static bool check_cr3_intercept(struct svm_test *test)
80 {
81     return vmcb->control.exit_code == SVM_EXIT_READ_CR3;
82 }
83 
84 static bool check_cr3_nointercept(struct svm_test *test)
85 {
86     return null_check(test) && test->scratch == read_cr3();
87 }
88 
89 static void corrupt_cr3_intercept_bypass(void *_test)
90 {
91     struct svm_test *test = _test;
92     extern volatile u32 mmio_insn;
93 
94     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
95         pause();
96     pause();
97     pause();
98     pause();
99     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
100 }
101 
102 static void prepare_cr3_intercept_bypass(struct svm_test *test)
103 {
104     default_prepare(test);
105     vmcb->control.intercept_cr_read |= 1 << 3;
106     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
107 }
108 
109 static void test_cr3_intercept_bypass(struct svm_test *test)
110 {
111     ulong a = 0xa0000;
112 
113     test->scratch = 1;
114     while (test->scratch != 2)
115         barrier();
116 
117     asm volatile ("mmio_insn: mov %0, (%0); nop"
118                   : "+a"(a) : : "memory");
119     test->scratch = a;
120 }
121 
122 static void prepare_dr_intercept(struct svm_test *test)
123 {
124     default_prepare(test);
125     vmcb->control.intercept_dr_read = 0xff;
126     vmcb->control.intercept_dr_write = 0xff;
127 }
128 
129 static void test_dr_intercept(struct svm_test *test)
130 {
131     unsigned int i, failcnt = 0;
132 
133     /* Loop testing debug register reads */
134     for (i = 0; i < 8; i++) {
135 
136         switch (i) {
137         case 0:
138             asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
139             break;
140         case 1:
141             asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
142             break;
143         case 2:
144             asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
145             break;
146         case 3:
147             asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
148             break;
149         case 4:
150             asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
151             break;
152         case 5:
153             asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
154             break;
155         case 6:
156             asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
157             break;
158         case 7:
159             asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
160             break;
161         }
162 
163         if (test->scratch != i) {
164             report(false, "dr%u read intercept", i);
165             failcnt++;
166         }
167     }
168 
169     /* Loop testing debug register writes */
170     for (i = 0; i < 8; i++) {
171 
172         switch (i) {
173         case 0:
174             asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
175             break;
176         case 1:
177             asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
178             break;
179         case 2:
180             asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
181             break;
182         case 3:
183             asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
184             break;
185         case 4:
186             asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
187             break;
188         case 5:
189             asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
190             break;
191         case 6:
192             asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
193             break;
194         case 7:
195             asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
196             break;
197         }
198 
199         if (test->scratch != i) {
200             report(false, "dr%u write intercept", i);
201             failcnt++;
202         }
203     }
204 
205     test->scratch = failcnt;
206 }
207 
208 static bool dr_intercept_finished(struct svm_test *test)
209 {
210     ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0);
211 
212     /* Only expect DR intercepts */
213     if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
214         return true;
215 
216     /*
217      * Compute debug register number.
218      * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
219      * Programmer's Manual Volume 2 - System Programming:
220      * http://support.amd.com/TechDocs/24593.pdf
221      * there are 16 VMEXIT codes each for DR read and write.
222      */
223     test->scratch = (n % 16);
224 
225     /* Jump over MOV instruction */
226     vmcb->save.rip += 3;
227 
228     return false;
229 }
230 
231 static bool check_dr_intercept(struct svm_test *test)
232 {
233     return !test->scratch;
234 }
235 
236 static bool next_rip_supported(void)
237 {
238     return this_cpu_has(X86_FEATURE_NRIPS);
239 }
240 
241 static void prepare_next_rip(struct svm_test *test)
242 {
243     vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
244 }
245 
246 
247 static void test_next_rip(struct svm_test *test)
248 {
249     asm volatile ("rdtsc\n\t"
250                   ".globl exp_next_rip\n\t"
251                   "exp_next_rip:\n\t" ::: "eax", "edx");
252 }
253 
254 static bool check_next_rip(struct svm_test *test)
255 {
256     extern char exp_next_rip;
257     unsigned long address = (unsigned long)&exp_next_rip;
258 
259     return address == vmcb->control.next_rip;
260 }
261 
262 extern u8 *msr_bitmap;
263 
264 static void prepare_msr_intercept(struct svm_test *test)
265 {
266     default_prepare(test);
267     vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
268     vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR);
269     memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
270 }
271 
272 static void test_msr_intercept(struct svm_test *test)
273 {
274     unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */
275     unsigned long msr_index;
276 
277     for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) {
278         if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) {
279             /*
280              * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
281              * Programmer's Manual volume 2 - System Programming:
282              * http://support.amd.com/TechDocs/24593.pdf
283              * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
284              */
285             continue;
286         }
287 
288         /* Skips gaps between supported MSR ranges */
289         if (msr_index == 0x2000)
290             msr_index = 0xc0000000;
291         else if (msr_index == 0xc0002000)
292             msr_index = 0xc0010000;
293 
294         test->scratch = -1;
295 
296         rdmsr(msr_index);
297 
298         /* Check that a read intercept occurred for MSR at msr_index */
299         if (test->scratch != msr_index)
300             report(false, "MSR 0x%lx read intercept", msr_index);
301 
302         /*
303          * Poor man approach to generate a value that
304          * seems arbitrary each time around the loop.
305          */
306         msr_value += (msr_value << 1);
307 
308         wrmsr(msr_index, msr_value);
309 
310         /* Check that a write intercept occurred for MSR with msr_value */
311         if (test->scratch != msr_value)
312             report(false, "MSR 0x%lx write intercept", msr_index);
313     }
314 
315     test->scratch = -2;
316 }
317 
318 static bool msr_intercept_finished(struct svm_test *test)
319 {
320     u32 exit_code = vmcb->control.exit_code;
321     u64 exit_info_1;
322     u8 *opcode;
323 
324     if (exit_code == SVM_EXIT_MSR) {
325         exit_info_1 = vmcb->control.exit_info_1;
326     } else {
327         /*
328          * If #GP exception occurs instead, check that it was
329          * for RDMSR/WRMSR and set exit_info_1 accordingly.
330          */
331 
332         if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR))
333             return true;
334 
335         opcode = (u8 *)vmcb->save.rip;
336         if (opcode[0] != 0x0f)
337             return true;
338 
339         switch (opcode[1]) {
340         case 0x30: /* WRMSR */
341             exit_info_1 = 1;
342             break;
343         case 0x32: /* RDMSR */
344             exit_info_1 = 0;
345             break;
346         default:
347             return true;
348         }
349 
350         /*
351          * Warn that #GP exception occured instead.
352          * RCX holds the MSR index.
353          */
354         printf("%s 0x%lx #GP exception\n",
355             exit_info_1 ? "WRMSR" : "RDMSR", get_regs().rcx);
356     }
357 
358     /* Jump over RDMSR/WRMSR instruction */
359     vmcb->save.rip += 2;
360 
361     /*
362      * Test whether the intercept was for RDMSR/WRMSR.
363      * For RDMSR, test->scratch is set to the MSR index;
364      *      RCX holds the MSR index.
365      * For WRMSR, test->scratch is set to the MSR value;
366      *      RDX holds the upper 32 bits of the MSR value,
367      *      while RAX hold its lower 32 bits.
368      */
369     if (exit_info_1)
370         test->scratch =
371             ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff));
372     else
373         test->scratch = get_regs().rcx;
374 
375     return false;
376 }
377 
378 static bool check_msr_intercept(struct svm_test *test)
379 {
380     memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
381     return (test->scratch == -2);
382 }
383 
384 static void prepare_mode_switch(struct svm_test *test)
385 {
386     vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
387                                              |  (1ULL << UD_VECTOR)
388                                              |  (1ULL << DF_VECTOR)
389                                              |  (1ULL << PF_VECTOR);
390     test->scratch = 0;
391 }
392 
393 static void test_mode_switch(struct svm_test *test)
394 {
395     asm volatile("	cli\n"
396 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
397 		 "1:\n"
398 		 "	.long 2f\n"
399 		 "	.long " xstr(KERNEL_CS32) "\n"
400 		 ".code32\n"
401 		 "2:\n"
402 		 "	movl %%cr0, %%eax\n"
403 		 "	btcl  $31, %%eax\n" /* clear PG */
404 		 "	movl %%eax, %%cr0\n"
405 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
406 		 "	rdmsr\n"
407 		 "	btcl $8, %%eax\n" /* clear LME */
408 		 "	wrmsr\n"
409 		 "	movl %%cr4, %%eax\n"
410 		 "	btcl $5, %%eax\n" /* clear PAE */
411 		 "	movl %%eax, %%cr4\n"
412 		 "	movw %[ds16], %%ax\n"
413 		 "	movw %%ax, %%ds\n"
414 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
415 		 ".code16\n"
416 		 "3:\n"
417 		 "	movl %%cr0, %%eax\n"
418 		 "	btcl $0, %%eax\n" /* clear PE  */
419 		 "	movl %%eax, %%cr0\n"
420 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
421 		 "4:\n"
422 		 "	vmmcall\n"
423 		 "	movl %%cr0, %%eax\n"
424 		 "	btsl $0, %%eax\n" /* set PE  */
425 		 "	movl %%eax, %%cr0\n"
426 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
427 		 ".code32\n"
428 		 "5:\n"
429 		 "	movl %%cr4, %%eax\n"
430 		 "	btsl $5, %%eax\n" /* set PAE */
431 		 "	movl %%eax, %%cr4\n"
432 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
433 		 "	rdmsr\n"
434 		 "	btsl $8, %%eax\n" /* set LME */
435 		 "	wrmsr\n"
436 		 "	movl %%cr0, %%eax\n"
437 		 "	btsl  $31, %%eax\n" /* set PG */
438 		 "	movl %%eax, %%cr0\n"
439 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
440 		 ".code64\n\t"
441 		 "6:\n"
442 		 "	vmmcall\n"
443 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
444 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
445 		 : "rax", "rbx", "rcx", "rdx", "memory");
446 }
447 
448 static bool mode_switch_finished(struct svm_test *test)
449 {
450     u64 cr0, cr4, efer;
451 
452     cr0  = vmcb->save.cr0;
453     cr4  = vmcb->save.cr4;
454     efer = vmcb->save.efer;
455 
456     /* Only expect VMMCALL intercepts */
457     if (vmcb->control.exit_code != SVM_EXIT_VMMCALL)
458 	    return true;
459 
460     /* Jump over VMMCALL instruction */
461     vmcb->save.rip += 3;
462 
463     /* Do sanity checks */
464     switch (test->scratch) {
465     case 0:
466         /* Test should be in real mode now - check for this */
467         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
468             (cr4  & 0x00000020) || /* CR4.PAE */
469             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
470                 return true;
471         break;
472     case 2:
473         /* Test should be back in long-mode now - check for this */
474         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
475             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
476             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
477 		    return true;
478 	break;
479     }
480 
481     /* one step forward */
482     test->scratch += 1;
483 
484     return test->scratch == 2;
485 }
486 
487 static bool check_mode_switch(struct svm_test *test)
488 {
489 	return test->scratch == 2;
490 }
491 
492 extern u8 *io_bitmap;
493 
494 static void prepare_ioio(struct svm_test *test)
495 {
496     vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
497     test->scratch = 0;
498     memset(io_bitmap, 0, 8192);
499     io_bitmap[8192] = 0xFF;
500 }
501 
502 static void test_ioio(struct svm_test *test)
503 {
504     // stage 0, test IO pass
505     inb(0x5000);
506     outb(0x0, 0x5000);
507     if (get_test_stage(test) != 0)
508         goto fail;
509 
510     // test IO width, in/out
511     io_bitmap[0] = 0xFF;
512     inc_test_stage(test);
513     inb(0x0);
514     if (get_test_stage(test) != 2)
515         goto fail;
516 
517     outw(0x0, 0x0);
518     if (get_test_stage(test) != 3)
519         goto fail;
520 
521     inl(0x0);
522     if (get_test_stage(test) != 4)
523         goto fail;
524 
525     // test low/high IO port
526     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
527     inb(0x5000);
528     if (get_test_stage(test) != 5)
529         goto fail;
530 
531     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
532     inw(0x9000);
533     if (get_test_stage(test) != 6)
534         goto fail;
535 
536     // test partial pass
537     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
538     inl(0x4FFF);
539     if (get_test_stage(test) != 7)
540         goto fail;
541 
542     // test across pages
543     inc_test_stage(test);
544     inl(0x7FFF);
545     if (get_test_stage(test) != 8)
546         goto fail;
547 
548     inc_test_stage(test);
549     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
550     inl(0x7FFF);
551     if (get_test_stage(test) != 10)
552         goto fail;
553 
554     io_bitmap[0] = 0;
555     inl(0xFFFF);
556     if (get_test_stage(test) != 11)
557         goto fail;
558 
559     io_bitmap[0] = 0xFF;
560     io_bitmap[8192] = 0;
561     inl(0xFFFF);
562     inc_test_stage(test);
563     if (get_test_stage(test) != 12)
564         goto fail;
565 
566     return;
567 
568 fail:
569     report(false, "stage %d", get_test_stage(test));
570     test->scratch = -1;
571 }
572 
573 static bool ioio_finished(struct svm_test *test)
574 {
575     unsigned port, size;
576 
577     /* Only expect IOIO intercepts */
578     if (vmcb->control.exit_code == SVM_EXIT_VMMCALL)
579         return true;
580 
581     if (vmcb->control.exit_code != SVM_EXIT_IOIO)
582         return true;
583 
584     /* one step forward */
585     test->scratch += 1;
586 
587     port = vmcb->control.exit_info_1 >> 16;
588     size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
589 
590     while (size--) {
591         io_bitmap[port / 8] &= ~(1 << (port & 7));
592         port++;
593     }
594 
595     return false;
596 }
597 
598 static bool check_ioio(struct svm_test *test)
599 {
600     memset(io_bitmap, 0, 8193);
601     return test->scratch != -1;
602 }
603 
604 static void prepare_asid_zero(struct svm_test *test)
605 {
606     vmcb->control.asid = 0;
607 }
608 
609 static void test_asid_zero(struct svm_test *test)
610 {
611     asm volatile ("vmmcall\n\t");
612 }
613 
614 static bool check_asid_zero(struct svm_test *test)
615 {
616     return vmcb->control.exit_code == SVM_EXIT_ERR;
617 }
618 
619 static void sel_cr0_bug_prepare(struct svm_test *test)
620 {
621     vmcb_ident(vmcb);
622     vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
623 }
624 
625 static bool sel_cr0_bug_finished(struct svm_test *test)
626 {
627 	return true;
628 }
629 
630 static void sel_cr0_bug_test(struct svm_test *test)
631 {
632     unsigned long cr0;
633 
634     /* read cr0, clear CD, and write back */
635     cr0  = read_cr0();
636     cr0 |= (1UL << 30);
637     write_cr0(cr0);
638 
639     /*
640      * If we are here the test failed, not sure what to do now because we
641      * are not in guest-mode anymore so we can't trigger an intercept.
642      * Trigger a tripple-fault for now.
643      */
644     report(false, "sel_cr0 test. Can not recover from this - exiting");
645     exit(report_summary());
646 }
647 
648 static bool sel_cr0_bug_check(struct svm_test *test)
649 {
650     return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
651 }
652 
653 static void npt_nx_prepare(struct svm_test *test)
654 {
655 
656     u64 *pte;
657 
658     vmcb_ident(vmcb);
659     pte = npt_get_pte((u64)null_test);
660 
661     *pte |= (1ULL << 63);
662 }
663 
664 static bool npt_nx_check(struct svm_test *test)
665 {
666     u64 *pte = npt_get_pte((u64)null_test);
667 
668     *pte &= ~(1ULL << 63);
669 
670     vmcb->save.efer |= (1 << 11);
671 
672     return (vmcb->control.exit_code == SVM_EXIT_NPF)
673            && (vmcb->control.exit_info_1 == 0x100000015ULL);
674 }
675 
676 static void npt_us_prepare(struct svm_test *test)
677 {
678     u64 *pte;
679 
680     scratch_page = alloc_page();
681     vmcb_ident(vmcb);
682     pte = npt_get_pte((u64)scratch_page);
683 
684     *pte &= ~(1ULL << 2);
685 }
686 
687 static void npt_us_test(struct svm_test *test)
688 {
689     (void) *(volatile u64 *)scratch_page;
690 }
691 
692 static bool npt_us_check(struct svm_test *test)
693 {
694     u64 *pte = npt_get_pte((u64)scratch_page);
695 
696     *pte |= (1ULL << 2);
697 
698     return (vmcb->control.exit_code == SVM_EXIT_NPF)
699            && (vmcb->control.exit_info_1 == 0x100000005ULL);
700 }
701 
702 u64 save_pde;
703 
704 static void npt_rsvd_prepare(struct svm_test *test)
705 {
706     u64 *pde;
707 
708     vmcb_ident(vmcb);
709     pde = npt_get_pde((u64) null_test);
710 
711     save_pde = *pde;
712     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
713 }
714 
715 static bool npt_rsvd_check(struct svm_test *test)
716 {
717     u64 *pde = npt_get_pde((u64) null_test);
718 
719     *pde = save_pde;
720 
721     return (vmcb->control.exit_code == SVM_EXIT_NPF)
722             && (vmcb->control.exit_info_1 == 0x10000001dULL);
723 }
724 
725 static void npt_rw_prepare(struct svm_test *test)
726 {
727 
728     u64 *pte;
729 
730     vmcb_ident(vmcb);
731     pte = npt_get_pte(0x80000);
732 
733     *pte &= ~(1ULL << 1);
734 }
735 
736 static void npt_rw_test(struct svm_test *test)
737 {
738     u64 *data = (void*)(0x80000);
739 
740     *data = 0;
741 }
742 
743 static bool npt_rw_check(struct svm_test *test)
744 {
745     u64 *pte = npt_get_pte(0x80000);
746 
747     *pte |= (1ULL << 1);
748 
749     return (vmcb->control.exit_code == SVM_EXIT_NPF)
750            && (vmcb->control.exit_info_1 == 0x100000007ULL);
751 }
752 
753 static void npt_rw_pfwalk_prepare(struct svm_test *test)
754 {
755 
756     u64 *pte;
757 
758     vmcb_ident(vmcb);
759     pte = npt_get_pte(read_cr3());
760 
761     *pte &= ~(1ULL << 1);
762 }
763 
764 static bool npt_rw_pfwalk_check(struct svm_test *test)
765 {
766     u64 *pte = npt_get_pte(read_cr3());
767 
768     *pte |= (1ULL << 1);
769 
770     return (vmcb->control.exit_code == SVM_EXIT_NPF)
771            && (vmcb->control.exit_info_1 == 0x200000006ULL)
772 	   && (vmcb->control.exit_info_2 == read_cr3());
773 }
774 
775 static void npt_rsvd_pfwalk_prepare(struct svm_test *test)
776 {
777     u64 *pdpe;
778     vmcb_ident(vmcb);
779 
780     pdpe = npt_get_pdpe();
781     pdpe[0] |= (1ULL << 8);
782 }
783 
784 static bool npt_rsvd_pfwalk_check(struct svm_test *test)
785 {
786     u64 *pdpe = npt_get_pdpe();
787     pdpe[0] &= ~(1ULL << 8);
788 
789     return (vmcb->control.exit_code == SVM_EXIT_NPF)
790             && (vmcb->control.exit_info_1 == 0x20000000eULL);
791 }
792 
793 static void npt_l1mmio_prepare(struct svm_test *test)
794 {
795     vmcb_ident(vmcb);
796 }
797 
798 u32 nested_apic_version1;
799 u32 nested_apic_version2;
800 
801 static void npt_l1mmio_test(struct svm_test *test)
802 {
803     volatile u32 *data = (volatile void*)(0xfee00030UL);
804 
805     nested_apic_version1 = *data;
806     nested_apic_version2 = *data;
807 }
808 
809 static bool npt_l1mmio_check(struct svm_test *test)
810 {
811     volatile u32 *data = (volatile void*)(0xfee00030);
812     u32 lvr = *data;
813 
814     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
815 }
816 
817 static void npt_rw_l1mmio_prepare(struct svm_test *test)
818 {
819 
820     u64 *pte;
821 
822     vmcb_ident(vmcb);
823     pte = npt_get_pte(0xfee00080);
824 
825     *pte &= ~(1ULL << 1);
826 }
827 
828 static void npt_rw_l1mmio_test(struct svm_test *test)
829 {
830     volatile u32 *data = (volatile void*)(0xfee00080);
831 
832     *data = *data;
833 }
834 
835 static bool npt_rw_l1mmio_check(struct svm_test *test)
836 {
837     u64 *pte = npt_get_pte(0xfee00080);
838 
839     *pte |= (1ULL << 1);
840 
841     return (vmcb->control.exit_code == SVM_EXIT_NPF)
842            && (vmcb->control.exit_info_1 == 0x100000007ULL);
843 }
844 
845 #define TSC_ADJUST_VALUE    (1ll << 32)
846 #define TSC_OFFSET_VALUE    (-1ll << 48)
847 static bool ok;
848 
849 static void tsc_adjust_prepare(struct svm_test *test)
850 {
851     default_prepare(test);
852     vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
853 
854     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
855     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
856     ok = adjust == -TSC_ADJUST_VALUE;
857 }
858 
859 static void tsc_adjust_test(struct svm_test *test)
860 {
861     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
862     ok &= adjust == -TSC_ADJUST_VALUE;
863 
864     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
865     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
866 
867     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
868     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
869 
870     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
871     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
872 
873     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
874     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
875 }
876 
877 static bool tsc_adjust_check(struct svm_test *test)
878 {
879     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
880 
881     wrmsr(MSR_IA32_TSC_ADJUST, 0);
882     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
883 }
884 
885 static void latency_prepare(struct svm_test *test)
886 {
887     default_prepare(test);
888     runs = LATENCY_RUNS;
889     latvmrun_min = latvmexit_min = -1ULL;
890     latvmrun_max = latvmexit_max = 0;
891     vmrun_sum = vmexit_sum = 0;
892     tsc_start = rdtsc();
893 }
894 
895 static void latency_test(struct svm_test *test)
896 {
897     u64 cycles;
898 
899 start:
900     tsc_end = rdtsc();
901 
902     cycles = tsc_end - tsc_start;
903 
904     if (cycles > latvmrun_max)
905         latvmrun_max = cycles;
906 
907     if (cycles < latvmrun_min)
908         latvmrun_min = cycles;
909 
910     vmrun_sum += cycles;
911 
912     tsc_start = rdtsc();
913 
914     asm volatile ("vmmcall" : : : "memory");
915     goto start;
916 }
917 
918 static bool latency_finished(struct svm_test *test)
919 {
920     u64 cycles;
921 
922     tsc_end = rdtsc();
923 
924     cycles = tsc_end - tsc_start;
925 
926     if (cycles > latvmexit_max)
927         latvmexit_max = cycles;
928 
929     if (cycles < latvmexit_min)
930         latvmexit_min = cycles;
931 
932     vmexit_sum += cycles;
933 
934     vmcb->save.rip += 3;
935 
936     runs -= 1;
937 
938     tsc_end = rdtsc();
939 
940     return runs == 0;
941 }
942 
943 static bool latency_check(struct svm_test *test)
944 {
945     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
946             latvmrun_min, vmrun_sum / LATENCY_RUNS);
947     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
948             latvmexit_min, vmexit_sum / LATENCY_RUNS);
949     return true;
950 }
951 
952 static void lat_svm_insn_prepare(struct svm_test *test)
953 {
954     default_prepare(test);
955     runs = LATENCY_RUNS;
956     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
957     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
958     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
959 }
960 
961 static bool lat_svm_insn_finished(struct svm_test *test)
962 {
963     u64 vmcb_phys = virt_to_phys(vmcb);
964     u64 cycles;
965 
966     for ( ; runs != 0; runs--) {
967         tsc_start = rdtsc();
968         asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
969         cycles = rdtsc() - tsc_start;
970         if (cycles > latvmload_max)
971             latvmload_max = cycles;
972         if (cycles < latvmload_min)
973             latvmload_min = cycles;
974         vmload_sum += cycles;
975 
976         tsc_start = rdtsc();
977         asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
978         cycles = rdtsc() - tsc_start;
979         if (cycles > latvmsave_max)
980             latvmsave_max = cycles;
981         if (cycles < latvmsave_min)
982             latvmsave_min = cycles;
983         vmsave_sum += cycles;
984 
985         tsc_start = rdtsc();
986         asm volatile("stgi\n\t");
987         cycles = rdtsc() - tsc_start;
988         if (cycles > latstgi_max)
989             latstgi_max = cycles;
990         if (cycles < latstgi_min)
991             latstgi_min = cycles;
992         stgi_sum += cycles;
993 
994         tsc_start = rdtsc();
995         asm volatile("clgi\n\t");
996         cycles = rdtsc() - tsc_start;
997         if (cycles > latclgi_max)
998             latclgi_max = cycles;
999         if (cycles < latclgi_min)
1000             latclgi_min = cycles;
1001         clgi_sum += cycles;
1002     }
1003 
1004     tsc_end = rdtsc();
1005 
1006     return true;
1007 }
1008 
1009 static bool lat_svm_insn_check(struct svm_test *test)
1010 {
1011     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1012             latvmload_min, vmload_sum / LATENCY_RUNS);
1013     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1014             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1015     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1016             latstgi_min, stgi_sum / LATENCY_RUNS);
1017     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1018             latclgi_min, clgi_sum / LATENCY_RUNS);
1019     return true;
1020 }
1021 
1022 bool pending_event_ipi_fired;
1023 bool pending_event_guest_run;
1024 
1025 static void pending_event_ipi_isr(isr_regs_t *regs)
1026 {
1027     pending_event_ipi_fired = true;
1028     eoi();
1029 }
1030 
1031 static void pending_event_prepare(struct svm_test *test)
1032 {
1033     int ipi_vector = 0xf1;
1034 
1035     default_prepare(test);
1036 
1037     pending_event_ipi_fired = false;
1038 
1039     handle_irq(ipi_vector, pending_event_ipi_isr);
1040 
1041     pending_event_guest_run = false;
1042 
1043     vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1044     vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1045 
1046     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1047                   APIC_DM_FIXED | ipi_vector, 0);
1048 
1049     set_test_stage(test, 0);
1050 }
1051 
1052 static void pending_event_test(struct svm_test *test)
1053 {
1054     pending_event_guest_run = true;
1055 }
1056 
1057 static bool pending_event_finished(struct svm_test *test)
1058 {
1059     switch (get_test_stage(test)) {
1060     case 0:
1061         if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1062             report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x",
1063                    vmcb->control.exit_code);
1064             return true;
1065         }
1066 
1067         vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1068         vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1069 
1070         if (pending_event_guest_run) {
1071             report(false, "Guest ran before host received IPI\n");
1072             return true;
1073         }
1074 
1075         irq_enable();
1076         asm volatile ("nop");
1077         irq_disable();
1078 
1079         if (!pending_event_ipi_fired) {
1080             report(false, "Pending interrupt not dispatched after IRQ enabled\n");
1081             return true;
1082         }
1083         break;
1084 
1085     case 1:
1086         if (!pending_event_guest_run) {
1087             report(false, "Guest did not resume when no interrupt\n");
1088             return true;
1089         }
1090         break;
1091     }
1092 
1093     inc_test_stage(test);
1094 
1095     return get_test_stage(test) == 2;
1096 }
1097 
1098 static bool pending_event_check(struct svm_test *test)
1099 {
1100     return get_test_stage(test) == 2;
1101 }
1102 
1103 static void pending_event_cli_prepare(struct svm_test *test)
1104 {
1105     default_prepare(test);
1106 
1107     pending_event_ipi_fired = false;
1108 
1109     handle_irq(0xf1, pending_event_ipi_isr);
1110 
1111     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1112               APIC_DM_FIXED | 0xf1, 0);
1113 
1114     set_test_stage(test, 0);
1115 }
1116 
1117 static void pending_event_cli_prepare_gif_clear(struct svm_test *test)
1118 {
1119     asm("cli");
1120 }
1121 
1122 static void pending_event_cli_test(struct svm_test *test)
1123 {
1124     if (pending_event_ipi_fired == true) {
1125         set_test_stage(test, -1);
1126         report(false, "Interrupt preceeded guest");
1127         vmmcall();
1128     }
1129 
1130     /* VINTR_MASKING is zero.  This should cause the IPI to fire.  */
1131     irq_enable();
1132     asm volatile ("nop");
1133     irq_disable();
1134 
1135     if (pending_event_ipi_fired != true) {
1136         set_test_stage(test, -1);
1137         report(false, "Interrupt not triggered by guest");
1138     }
1139 
1140     vmmcall();
1141 
1142     /*
1143      * Now VINTR_MASKING=1, but no interrupt is pending so
1144      * the VINTR interception should be clear in VMCB02.  Check
1145      * that L0 did not leave a stale VINTR in the VMCB.
1146      */
1147     irq_enable();
1148     asm volatile ("nop");
1149     irq_disable();
1150 }
1151 
1152 static bool pending_event_cli_finished(struct svm_test *test)
1153 {
1154     if ( vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1155         report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x",
1156                vmcb->control.exit_code);
1157         return true;
1158     }
1159 
1160     switch (get_test_stage(test)) {
1161     case 0:
1162         vmcb->save.rip += 3;
1163 
1164         pending_event_ipi_fired = false;
1165 
1166         vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1167 
1168 	/* Now entering again with VINTR_MASKING=1.  */
1169         apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1170               APIC_DM_FIXED | 0xf1, 0);
1171 
1172         break;
1173 
1174     case 1:
1175         if (pending_event_ipi_fired == true) {
1176             report(false, "Interrupt triggered by guest");
1177             return true;
1178         }
1179 
1180         irq_enable();
1181         asm volatile ("nop");
1182         irq_disable();
1183 
1184         if (pending_event_ipi_fired != true) {
1185             report(false, "Interrupt not triggered by host");
1186             return true;
1187         }
1188 
1189         break;
1190 
1191     default:
1192         return true;
1193     }
1194 
1195     inc_test_stage(test);
1196 
1197     return get_test_stage(test) == 2;
1198 }
1199 
1200 static bool pending_event_cli_check(struct svm_test *test)
1201 {
1202     return get_test_stage(test) == 2;
1203 }
1204 
1205 #define TIMER_VECTOR    222
1206 
1207 static volatile bool timer_fired;
1208 
1209 static void timer_isr(isr_regs_t *regs)
1210 {
1211     timer_fired = true;
1212     apic_write(APIC_EOI, 0);
1213 }
1214 
1215 static void interrupt_prepare(struct svm_test *test)
1216 {
1217     default_prepare(test);
1218     handle_irq(TIMER_VECTOR, timer_isr);
1219     timer_fired = false;
1220     set_test_stage(test, 0);
1221 }
1222 
1223 static void interrupt_test(struct svm_test *test)
1224 {
1225     long long start, loops;
1226 
1227     apic_write(APIC_LVTT, TIMER_VECTOR);
1228     irq_enable();
1229     apic_write(APIC_TMICT, 1); //Timer Initial Count Register 0x380 one-shot
1230     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1231         asm volatile ("nop");
1232 
1233     report(timer_fired, "direct interrupt while running guest");
1234 
1235     if (!timer_fired) {
1236         set_test_stage(test, -1);
1237         vmmcall();
1238     }
1239 
1240     apic_write(APIC_TMICT, 0);
1241     irq_disable();
1242     vmmcall();
1243 
1244     timer_fired = false;
1245     apic_write(APIC_TMICT, 1);
1246     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1247         asm volatile ("nop");
1248 
1249     report(timer_fired, "intercepted interrupt while running guest");
1250 
1251     if (!timer_fired) {
1252         set_test_stage(test, -1);
1253         vmmcall();
1254     }
1255 
1256     irq_enable();
1257     apic_write(APIC_TMICT, 0);
1258     irq_disable();
1259 
1260     timer_fired = false;
1261     start = rdtsc();
1262     apic_write(APIC_TMICT, 1000000);
1263     asm volatile ("sti; hlt");
1264 
1265     report(rdtsc() - start > 10000 && timer_fired,
1266           "direct interrupt + hlt");
1267 
1268     if (!timer_fired) {
1269         set_test_stage(test, -1);
1270         vmmcall();
1271     }
1272 
1273     apic_write(APIC_TMICT, 0);
1274     irq_disable();
1275     vmmcall();
1276 
1277     timer_fired = false;
1278     start = rdtsc();
1279     apic_write(APIC_TMICT, 1000000);
1280     asm volatile ("hlt");
1281 
1282     report(rdtsc() - start > 10000 && timer_fired,
1283            "intercepted interrupt + hlt");
1284 
1285     if (!timer_fired) {
1286         set_test_stage(test, -1);
1287         vmmcall();
1288     }
1289 
1290     apic_write(APIC_TMICT, 0);
1291     irq_disable();
1292 }
1293 
1294 static bool interrupt_finished(struct svm_test *test)
1295 {
1296     switch (get_test_stage(test)) {
1297     case 0:
1298     case 2:
1299         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1300             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1301                    vmcb->control.exit_code);
1302             return true;
1303         }
1304         vmcb->save.rip += 3;
1305 
1306         vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1307         vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1308         break;
1309 
1310     case 1:
1311     case 3:
1312         if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1313             report(false, "VMEXIT not due to intr intercept. Exit reason 0x%x",
1314                    vmcb->control.exit_code);
1315             return true;
1316         }
1317 
1318         irq_enable();
1319         asm volatile ("nop");
1320         irq_disable();
1321 
1322         vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1323         vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1324         break;
1325 
1326     case 4:
1327         break;
1328 
1329     default:
1330         return true;
1331     }
1332 
1333     inc_test_stage(test);
1334 
1335     return get_test_stage(test) == 5;
1336 }
1337 
1338 static bool interrupt_check(struct svm_test *test)
1339 {
1340     return get_test_stage(test) == 5;
1341 }
1342 
1343 #define TEST(name) { #name, .v2 = name }
1344 
1345 /*
1346  * v2 tests
1347  */
1348 
1349 static void basic_guest_main(struct svm_test *test)
1350 {
1351 }
1352 
1353 static void svm_guest_state_test(void)
1354 {
1355 	u64 efer_saved = vmcb->save.efer;
1356 	u64 efer = efer_saved;
1357 
1358 	test_set_guest(basic_guest_main);
1359 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer);
1360 	efer &= ~EFER_SVME;
1361 	vmcb->save.efer = efer;
1362 	report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer);
1363 	vmcb->save.efer = efer_saved;
1364 }
1365 
1366 struct svm_test svm_tests[] = {
1367     { "null", default_supported, default_prepare,
1368       default_prepare_gif_clear, null_test,
1369       default_finished, null_check },
1370     { "vmrun", default_supported, default_prepare,
1371       default_prepare_gif_clear, test_vmrun,
1372        default_finished, check_vmrun },
1373     { "ioio", default_supported, prepare_ioio,
1374        default_prepare_gif_clear, test_ioio,
1375        ioio_finished, check_ioio },
1376     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
1377       default_prepare_gif_clear, null_test, default_finished,
1378       check_no_vmrun_int },
1379     { "cr3 read intercept", default_supported,
1380       prepare_cr3_intercept, default_prepare_gif_clear,
1381       test_cr3_intercept, default_finished, check_cr3_intercept },
1382     { "cr3 read nointercept", default_supported, default_prepare,
1383       default_prepare_gif_clear, test_cr3_intercept, default_finished,
1384       check_cr3_nointercept },
1385     { "cr3 read intercept emulate", smp_supported,
1386       prepare_cr3_intercept_bypass, default_prepare_gif_clear,
1387       test_cr3_intercept_bypass, default_finished, check_cr3_intercept },
1388     { "dr intercept check", default_supported, prepare_dr_intercept,
1389       default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished,
1390       check_dr_intercept },
1391     { "next_rip", next_rip_supported, prepare_next_rip,
1392       default_prepare_gif_clear, test_next_rip,
1393       default_finished, check_next_rip },
1394     { "msr intercept check", default_supported, prepare_msr_intercept,
1395       default_prepare_gif_clear, test_msr_intercept,
1396       msr_intercept_finished, check_msr_intercept },
1397     { "mode_switch", default_supported, prepare_mode_switch,
1398       default_prepare_gif_clear, test_mode_switch,
1399        mode_switch_finished, check_mode_switch },
1400     { "asid_zero", default_supported, prepare_asid_zero,
1401       default_prepare_gif_clear, test_asid_zero,
1402        default_finished, check_asid_zero },
1403     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare,
1404       default_prepare_gif_clear, sel_cr0_bug_test,
1405        sel_cr0_bug_finished, sel_cr0_bug_check },
1406     { "npt_nx", npt_supported, npt_nx_prepare,
1407       default_prepare_gif_clear, null_test,
1408       default_finished, npt_nx_check },
1409     { "npt_us", npt_supported, npt_us_prepare,
1410       default_prepare_gif_clear, npt_us_test,
1411       default_finished, npt_us_check },
1412     { "npt_rsvd", npt_supported, npt_rsvd_prepare,
1413       default_prepare_gif_clear, null_test,
1414       default_finished, npt_rsvd_check },
1415     { "npt_rw", npt_supported, npt_rw_prepare,
1416       default_prepare_gif_clear, npt_rw_test,
1417       default_finished, npt_rw_check },
1418     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare,
1419       default_prepare_gif_clear, null_test,
1420       default_finished, npt_rsvd_pfwalk_check },
1421     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare,
1422       default_prepare_gif_clear, null_test,
1423       default_finished, npt_rw_pfwalk_check },
1424     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare,
1425       default_prepare_gif_clear, npt_l1mmio_test,
1426       default_finished, npt_l1mmio_check },
1427     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare,
1428       default_prepare_gif_clear, npt_rw_l1mmio_test,
1429       default_finished, npt_rw_l1mmio_check },
1430     { "tsc_adjust", default_supported, tsc_adjust_prepare,
1431       default_prepare_gif_clear, tsc_adjust_test,
1432       default_finished, tsc_adjust_check },
1433     { "latency_run_exit", default_supported, latency_prepare,
1434       default_prepare_gif_clear, latency_test,
1435       latency_finished, latency_check },
1436     { "latency_svm_insn", default_supported, lat_svm_insn_prepare,
1437       default_prepare_gif_clear, null_test,
1438       lat_svm_insn_finished, lat_svm_insn_check },
1439     { "pending_event", default_supported, pending_event_prepare,
1440       default_prepare_gif_clear,
1441       pending_event_test, pending_event_finished, pending_event_check },
1442     { "pending_event_cli", default_supported, pending_event_cli_prepare,
1443       pending_event_cli_prepare_gif_clear,
1444       pending_event_cli_test, pending_event_cli_finished,
1445       pending_event_cli_check },
1446     { "interrupt", default_supported, interrupt_prepare,
1447       default_prepare_gif_clear, interrupt_test,
1448       interrupt_finished, interrupt_check },
1449     TEST(svm_guest_state_test),
1450     { NULL, NULL, NULL, NULL, NULL, NULL, NULL }
1451 };
1452