xref: /kvm-unit-tests/x86/svm_tests.c (revision 16f52ec9a4763e62e35453497e4f077031abcbfb)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "alloc_page.h"
10 #include "isr.h"
11 #include "apic.h"
12 #include "delay.h"
13 
14 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
15 
16 static void *scratch_page;
17 
18 #define LATENCY_RUNS 1000000
19 
20 extern u16 cpu_online_count;
21 
22 u64 tsc_start;
23 u64 tsc_end;
24 
25 u64 vmrun_sum, vmexit_sum;
26 u64 vmsave_sum, vmload_sum;
27 u64 stgi_sum, clgi_sum;
28 u64 latvmrun_max;
29 u64 latvmrun_min;
30 u64 latvmexit_max;
31 u64 latvmexit_min;
32 u64 latvmload_max;
33 u64 latvmload_min;
34 u64 latvmsave_max;
35 u64 latvmsave_min;
36 u64 latstgi_max;
37 u64 latstgi_min;
38 u64 latclgi_max;
39 u64 latclgi_min;
40 u64 runs;
41 
42 static void null_test(struct svm_test *test)
43 {
44 }
45 
46 static bool null_check(struct svm_test *test)
47 {
48     return vmcb->control.exit_code == SVM_EXIT_VMMCALL;
49 }
50 
51 static void prepare_no_vmrun_int(struct svm_test *test)
52 {
53     vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
54 }
55 
56 static bool check_no_vmrun_int(struct svm_test *test)
57 {
58     return vmcb->control.exit_code == SVM_EXIT_ERR;
59 }
60 
61 static void test_vmrun(struct svm_test *test)
62 {
63     asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb)));
64 }
65 
66 static bool check_vmrun(struct svm_test *test)
67 {
68     return vmcb->control.exit_code == SVM_EXIT_VMRUN;
69 }
70 
71 static void prepare_rsm_intercept(struct svm_test *test)
72 {
73     default_prepare(test);
74     vmcb->control.intercept |= 1 << INTERCEPT_RSM;
75     vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR);
76 }
77 
78 static void test_rsm_intercept(struct svm_test *test)
79 {
80     asm volatile ("rsm" : : : "memory");
81 }
82 
83 static bool check_rsm_intercept(struct svm_test *test)
84 {
85     return get_test_stage(test) == 2;
86 }
87 
88 static bool finished_rsm_intercept(struct svm_test *test)
89 {
90     switch (get_test_stage(test)) {
91     case 0:
92         if (vmcb->control.exit_code != SVM_EXIT_RSM) {
93             report(false, "VMEXIT not due to rsm. Exit reason 0x%x",
94                    vmcb->control.exit_code);
95             return true;
96         }
97         vmcb->control.intercept &= ~(1 << INTERCEPT_RSM);
98         inc_test_stage(test);
99         break;
100 
101     case 1:
102         if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) {
103             report(false, "VMEXIT not due to #UD. Exit reason 0x%x",
104                    vmcb->control.exit_code);
105             return true;
106         }
107         vmcb->save.rip += 2;
108         inc_test_stage(test);
109         break;
110 
111     default:
112         return true;
113     }
114     return get_test_stage(test) == 2;
115 }
116 
117 static void prepare_cr3_intercept(struct svm_test *test)
118 {
119     default_prepare(test);
120     vmcb->control.intercept_cr_read |= 1 << 3;
121 }
122 
123 static void test_cr3_intercept(struct svm_test *test)
124 {
125     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
126 }
127 
128 static bool check_cr3_intercept(struct svm_test *test)
129 {
130     return vmcb->control.exit_code == SVM_EXIT_READ_CR3;
131 }
132 
133 static bool check_cr3_nointercept(struct svm_test *test)
134 {
135     return null_check(test) && test->scratch == read_cr3();
136 }
137 
138 static void corrupt_cr3_intercept_bypass(void *_test)
139 {
140     struct svm_test *test = _test;
141     extern volatile u32 mmio_insn;
142 
143     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
144         pause();
145     pause();
146     pause();
147     pause();
148     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
149 }
150 
151 static void prepare_cr3_intercept_bypass(struct svm_test *test)
152 {
153     default_prepare(test);
154     vmcb->control.intercept_cr_read |= 1 << 3;
155     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
156 }
157 
158 static void test_cr3_intercept_bypass(struct svm_test *test)
159 {
160     ulong a = 0xa0000;
161 
162     test->scratch = 1;
163     while (test->scratch != 2)
164         barrier();
165 
166     asm volatile ("mmio_insn: mov %0, (%0); nop"
167                   : "+a"(a) : : "memory");
168     test->scratch = a;
169 }
170 
171 static void prepare_dr_intercept(struct svm_test *test)
172 {
173     default_prepare(test);
174     vmcb->control.intercept_dr_read = 0xff;
175     vmcb->control.intercept_dr_write = 0xff;
176 }
177 
178 static void test_dr_intercept(struct svm_test *test)
179 {
180     unsigned int i, failcnt = 0;
181 
182     /* Loop testing debug register reads */
183     for (i = 0; i < 8; i++) {
184 
185         switch (i) {
186         case 0:
187             asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
188             break;
189         case 1:
190             asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
191             break;
192         case 2:
193             asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
194             break;
195         case 3:
196             asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
197             break;
198         case 4:
199             asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
200             break;
201         case 5:
202             asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
203             break;
204         case 6:
205             asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
206             break;
207         case 7:
208             asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
209             break;
210         }
211 
212         if (test->scratch != i) {
213             report(false, "dr%u read intercept", i);
214             failcnt++;
215         }
216     }
217 
218     /* Loop testing debug register writes */
219     for (i = 0; i < 8; i++) {
220 
221         switch (i) {
222         case 0:
223             asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
224             break;
225         case 1:
226             asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
227             break;
228         case 2:
229             asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
230             break;
231         case 3:
232             asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
233             break;
234         case 4:
235             asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
236             break;
237         case 5:
238             asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
239             break;
240         case 6:
241             asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
242             break;
243         case 7:
244             asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
245             break;
246         }
247 
248         if (test->scratch != i) {
249             report(false, "dr%u write intercept", i);
250             failcnt++;
251         }
252     }
253 
254     test->scratch = failcnt;
255 }
256 
257 static bool dr_intercept_finished(struct svm_test *test)
258 {
259     ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0);
260 
261     /* Only expect DR intercepts */
262     if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
263         return true;
264 
265     /*
266      * Compute debug register number.
267      * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
268      * Programmer's Manual Volume 2 - System Programming:
269      * http://support.amd.com/TechDocs/24593.pdf
270      * there are 16 VMEXIT codes each for DR read and write.
271      */
272     test->scratch = (n % 16);
273 
274     /* Jump over MOV instruction */
275     vmcb->save.rip += 3;
276 
277     return false;
278 }
279 
280 static bool check_dr_intercept(struct svm_test *test)
281 {
282     return !test->scratch;
283 }
284 
285 static bool next_rip_supported(void)
286 {
287     return this_cpu_has(X86_FEATURE_NRIPS);
288 }
289 
290 static void prepare_next_rip(struct svm_test *test)
291 {
292     vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
293 }
294 
295 
296 static void test_next_rip(struct svm_test *test)
297 {
298     asm volatile ("rdtsc\n\t"
299                   ".globl exp_next_rip\n\t"
300                   "exp_next_rip:\n\t" ::: "eax", "edx");
301 }
302 
303 static bool check_next_rip(struct svm_test *test)
304 {
305     extern char exp_next_rip;
306     unsigned long address = (unsigned long)&exp_next_rip;
307 
308     return address == vmcb->control.next_rip;
309 }
310 
311 extern u8 *msr_bitmap;
312 
313 static void prepare_msr_intercept(struct svm_test *test)
314 {
315     default_prepare(test);
316     vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
317     vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR);
318     memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
319 }
320 
321 static void test_msr_intercept(struct svm_test *test)
322 {
323     unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */
324     unsigned long msr_index;
325 
326     for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) {
327         if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) {
328             /*
329              * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
330              * Programmer's Manual volume 2 - System Programming:
331              * http://support.amd.com/TechDocs/24593.pdf
332              * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
333              */
334             continue;
335         }
336 
337         /* Skips gaps between supported MSR ranges */
338         if (msr_index == 0x2000)
339             msr_index = 0xc0000000;
340         else if (msr_index == 0xc0002000)
341             msr_index = 0xc0010000;
342 
343         test->scratch = -1;
344 
345         rdmsr(msr_index);
346 
347         /* Check that a read intercept occurred for MSR at msr_index */
348         if (test->scratch != msr_index)
349             report(false, "MSR 0x%lx read intercept", msr_index);
350 
351         /*
352          * Poor man approach to generate a value that
353          * seems arbitrary each time around the loop.
354          */
355         msr_value += (msr_value << 1);
356 
357         wrmsr(msr_index, msr_value);
358 
359         /* Check that a write intercept occurred for MSR with msr_value */
360         if (test->scratch != msr_value)
361             report(false, "MSR 0x%lx write intercept", msr_index);
362     }
363 
364     test->scratch = -2;
365 }
366 
367 static bool msr_intercept_finished(struct svm_test *test)
368 {
369     u32 exit_code = vmcb->control.exit_code;
370     u64 exit_info_1;
371     u8 *opcode;
372 
373     if (exit_code == SVM_EXIT_MSR) {
374         exit_info_1 = vmcb->control.exit_info_1;
375     } else {
376         /*
377          * If #GP exception occurs instead, check that it was
378          * for RDMSR/WRMSR and set exit_info_1 accordingly.
379          */
380 
381         if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR))
382             return true;
383 
384         opcode = (u8 *)vmcb->save.rip;
385         if (opcode[0] != 0x0f)
386             return true;
387 
388         switch (opcode[1]) {
389         case 0x30: /* WRMSR */
390             exit_info_1 = 1;
391             break;
392         case 0x32: /* RDMSR */
393             exit_info_1 = 0;
394             break;
395         default:
396             return true;
397         }
398 
399         /*
400          * Warn that #GP exception occured instead.
401          * RCX holds the MSR index.
402          */
403         printf("%s 0x%lx #GP exception\n",
404             exit_info_1 ? "WRMSR" : "RDMSR", get_regs().rcx);
405     }
406 
407     /* Jump over RDMSR/WRMSR instruction */
408     vmcb->save.rip += 2;
409 
410     /*
411      * Test whether the intercept was for RDMSR/WRMSR.
412      * For RDMSR, test->scratch is set to the MSR index;
413      *      RCX holds the MSR index.
414      * For WRMSR, test->scratch is set to the MSR value;
415      *      RDX holds the upper 32 bits of the MSR value,
416      *      while RAX hold its lower 32 bits.
417      */
418     if (exit_info_1)
419         test->scratch =
420             ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff));
421     else
422         test->scratch = get_regs().rcx;
423 
424     return false;
425 }
426 
427 static bool check_msr_intercept(struct svm_test *test)
428 {
429     memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
430     return (test->scratch == -2);
431 }
432 
433 static void prepare_mode_switch(struct svm_test *test)
434 {
435     vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
436                                              |  (1ULL << UD_VECTOR)
437                                              |  (1ULL << DF_VECTOR)
438                                              |  (1ULL << PF_VECTOR);
439     test->scratch = 0;
440 }
441 
442 static void test_mode_switch(struct svm_test *test)
443 {
444     asm volatile("	cli\n"
445 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
446 		 "1:\n"
447 		 "	.long 2f\n"
448 		 "	.long " xstr(KERNEL_CS32) "\n"
449 		 ".code32\n"
450 		 "2:\n"
451 		 "	movl %%cr0, %%eax\n"
452 		 "	btcl  $31, %%eax\n" /* clear PG */
453 		 "	movl %%eax, %%cr0\n"
454 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
455 		 "	rdmsr\n"
456 		 "	btcl $8, %%eax\n" /* clear LME */
457 		 "	wrmsr\n"
458 		 "	movl %%cr4, %%eax\n"
459 		 "	btcl $5, %%eax\n" /* clear PAE */
460 		 "	movl %%eax, %%cr4\n"
461 		 "	movw %[ds16], %%ax\n"
462 		 "	movw %%ax, %%ds\n"
463 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
464 		 ".code16\n"
465 		 "3:\n"
466 		 "	movl %%cr0, %%eax\n"
467 		 "	btcl $0, %%eax\n" /* clear PE  */
468 		 "	movl %%eax, %%cr0\n"
469 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
470 		 "4:\n"
471 		 "	vmmcall\n"
472 		 "	movl %%cr0, %%eax\n"
473 		 "	btsl $0, %%eax\n" /* set PE  */
474 		 "	movl %%eax, %%cr0\n"
475 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
476 		 ".code32\n"
477 		 "5:\n"
478 		 "	movl %%cr4, %%eax\n"
479 		 "	btsl $5, %%eax\n" /* set PAE */
480 		 "	movl %%eax, %%cr4\n"
481 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
482 		 "	rdmsr\n"
483 		 "	btsl $8, %%eax\n" /* set LME */
484 		 "	wrmsr\n"
485 		 "	movl %%cr0, %%eax\n"
486 		 "	btsl  $31, %%eax\n" /* set PG */
487 		 "	movl %%eax, %%cr0\n"
488 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
489 		 ".code64\n\t"
490 		 "6:\n"
491 		 "	vmmcall\n"
492 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
493 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
494 		 : "rax", "rbx", "rcx", "rdx", "memory");
495 }
496 
497 static bool mode_switch_finished(struct svm_test *test)
498 {
499     u64 cr0, cr4, efer;
500 
501     cr0  = vmcb->save.cr0;
502     cr4  = vmcb->save.cr4;
503     efer = vmcb->save.efer;
504 
505     /* Only expect VMMCALL intercepts */
506     if (vmcb->control.exit_code != SVM_EXIT_VMMCALL)
507 	    return true;
508 
509     /* Jump over VMMCALL instruction */
510     vmcb->save.rip += 3;
511 
512     /* Do sanity checks */
513     switch (test->scratch) {
514     case 0:
515         /* Test should be in real mode now - check for this */
516         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
517             (cr4  & 0x00000020) || /* CR4.PAE */
518             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
519                 return true;
520         break;
521     case 2:
522         /* Test should be back in long-mode now - check for this */
523         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
524             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
525             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
526 		    return true;
527 	break;
528     }
529 
530     /* one step forward */
531     test->scratch += 1;
532 
533     return test->scratch == 2;
534 }
535 
536 static bool check_mode_switch(struct svm_test *test)
537 {
538 	return test->scratch == 2;
539 }
540 
541 extern u8 *io_bitmap;
542 
543 static void prepare_ioio(struct svm_test *test)
544 {
545     vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
546     test->scratch = 0;
547     memset(io_bitmap, 0, 8192);
548     io_bitmap[8192] = 0xFF;
549 }
550 
551 static void test_ioio(struct svm_test *test)
552 {
553     // stage 0, test IO pass
554     inb(0x5000);
555     outb(0x0, 0x5000);
556     if (get_test_stage(test) != 0)
557         goto fail;
558 
559     // test IO width, in/out
560     io_bitmap[0] = 0xFF;
561     inc_test_stage(test);
562     inb(0x0);
563     if (get_test_stage(test) != 2)
564         goto fail;
565 
566     outw(0x0, 0x0);
567     if (get_test_stage(test) != 3)
568         goto fail;
569 
570     inl(0x0);
571     if (get_test_stage(test) != 4)
572         goto fail;
573 
574     // test low/high IO port
575     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
576     inb(0x5000);
577     if (get_test_stage(test) != 5)
578         goto fail;
579 
580     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
581     inw(0x9000);
582     if (get_test_stage(test) != 6)
583         goto fail;
584 
585     // test partial pass
586     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
587     inl(0x4FFF);
588     if (get_test_stage(test) != 7)
589         goto fail;
590 
591     // test across pages
592     inc_test_stage(test);
593     inl(0x7FFF);
594     if (get_test_stage(test) != 8)
595         goto fail;
596 
597     inc_test_stage(test);
598     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
599     inl(0x7FFF);
600     if (get_test_stage(test) != 10)
601         goto fail;
602 
603     io_bitmap[0] = 0;
604     inl(0xFFFF);
605     if (get_test_stage(test) != 11)
606         goto fail;
607 
608     io_bitmap[0] = 0xFF;
609     io_bitmap[8192] = 0;
610     inl(0xFFFF);
611     inc_test_stage(test);
612     if (get_test_stage(test) != 12)
613         goto fail;
614 
615     return;
616 
617 fail:
618     report(false, "stage %d", get_test_stage(test));
619     test->scratch = -1;
620 }
621 
622 static bool ioio_finished(struct svm_test *test)
623 {
624     unsigned port, size;
625 
626     /* Only expect IOIO intercepts */
627     if (vmcb->control.exit_code == SVM_EXIT_VMMCALL)
628         return true;
629 
630     if (vmcb->control.exit_code != SVM_EXIT_IOIO)
631         return true;
632 
633     /* one step forward */
634     test->scratch += 1;
635 
636     port = vmcb->control.exit_info_1 >> 16;
637     size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
638 
639     while (size--) {
640         io_bitmap[port / 8] &= ~(1 << (port & 7));
641         port++;
642     }
643 
644     return false;
645 }
646 
647 static bool check_ioio(struct svm_test *test)
648 {
649     memset(io_bitmap, 0, 8193);
650     return test->scratch != -1;
651 }
652 
653 static void prepare_asid_zero(struct svm_test *test)
654 {
655     vmcb->control.asid = 0;
656 }
657 
658 static void test_asid_zero(struct svm_test *test)
659 {
660     asm volatile ("vmmcall\n\t");
661 }
662 
663 static bool check_asid_zero(struct svm_test *test)
664 {
665     return vmcb->control.exit_code == SVM_EXIT_ERR;
666 }
667 
668 static void sel_cr0_bug_prepare(struct svm_test *test)
669 {
670     vmcb_ident(vmcb);
671     vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
672 }
673 
674 static bool sel_cr0_bug_finished(struct svm_test *test)
675 {
676 	return true;
677 }
678 
679 static void sel_cr0_bug_test(struct svm_test *test)
680 {
681     unsigned long cr0;
682 
683     /* read cr0, clear CD, and write back */
684     cr0  = read_cr0();
685     cr0 |= (1UL << 30);
686     write_cr0(cr0);
687 
688     /*
689      * If we are here the test failed, not sure what to do now because we
690      * are not in guest-mode anymore so we can't trigger an intercept.
691      * Trigger a tripple-fault for now.
692      */
693     report(false, "sel_cr0 test. Can not recover from this - exiting");
694     exit(report_summary());
695 }
696 
697 static bool sel_cr0_bug_check(struct svm_test *test)
698 {
699     return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
700 }
701 
702 static void npt_nx_prepare(struct svm_test *test)
703 {
704 
705     u64 *pte;
706 
707     vmcb_ident(vmcb);
708     pte = npt_get_pte((u64)null_test);
709 
710     *pte |= (1ULL << 63);
711 }
712 
713 static bool npt_nx_check(struct svm_test *test)
714 {
715     u64 *pte = npt_get_pte((u64)null_test);
716 
717     *pte &= ~(1ULL << 63);
718 
719     vmcb->save.efer |= (1 << 11);
720 
721     return (vmcb->control.exit_code == SVM_EXIT_NPF)
722            && (vmcb->control.exit_info_1 == 0x100000015ULL);
723 }
724 
725 static void npt_np_prepare(struct svm_test *test)
726 {
727     u64 *pte;
728 
729     scratch_page = alloc_page();
730     vmcb_ident(vmcb);
731     pte = npt_get_pte((u64)scratch_page);
732 
733     *pte &= ~1ULL;
734 }
735 
736 static void npt_np_test(struct svm_test *test)
737 {
738     (void) *(volatile u64 *)scratch_page;
739 }
740 
741 static bool npt_np_check(struct svm_test *test)
742 {
743     u64 *pte = npt_get_pte((u64)scratch_page);
744 
745     *pte |= 1ULL;
746 
747     return (vmcb->control.exit_code == SVM_EXIT_NPF)
748            && (vmcb->control.exit_info_1 == 0x100000004ULL);
749 }
750 
751 static void npt_us_prepare(struct svm_test *test)
752 {
753     u64 *pte;
754 
755     scratch_page = alloc_page();
756     vmcb_ident(vmcb);
757     pte = npt_get_pte((u64)scratch_page);
758 
759     *pte &= ~(1ULL << 2);
760 }
761 
762 static void npt_us_test(struct svm_test *test)
763 {
764     (void) *(volatile u64 *)scratch_page;
765 }
766 
767 static bool npt_us_check(struct svm_test *test)
768 {
769     u64 *pte = npt_get_pte((u64)scratch_page);
770 
771     *pte |= (1ULL << 2);
772 
773     return (vmcb->control.exit_code == SVM_EXIT_NPF)
774            && (vmcb->control.exit_info_1 == 0x100000005ULL);
775 }
776 
777 u64 save_pde;
778 
779 static void npt_rsvd_prepare(struct svm_test *test)
780 {
781     u64 *pde;
782 
783     vmcb_ident(vmcb);
784     pde = npt_get_pde((u64) null_test);
785 
786     save_pde = *pde;
787     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
788 }
789 
790 static bool npt_rsvd_check(struct svm_test *test)
791 {
792     u64 *pde = npt_get_pde((u64) null_test);
793 
794     *pde = save_pde;
795 
796     return (vmcb->control.exit_code == SVM_EXIT_NPF)
797             && (vmcb->control.exit_info_1 == 0x10000001dULL);
798 }
799 
800 static void npt_rw_prepare(struct svm_test *test)
801 {
802 
803     u64 *pte;
804 
805     vmcb_ident(vmcb);
806     pte = npt_get_pte(0x80000);
807 
808     *pte &= ~(1ULL << 1);
809 }
810 
811 static void npt_rw_test(struct svm_test *test)
812 {
813     u64 *data = (void*)(0x80000);
814 
815     *data = 0;
816 }
817 
818 static bool npt_rw_check(struct svm_test *test)
819 {
820     u64 *pte = npt_get_pte(0x80000);
821 
822     *pte |= (1ULL << 1);
823 
824     return (vmcb->control.exit_code == SVM_EXIT_NPF)
825            && (vmcb->control.exit_info_1 == 0x100000007ULL);
826 }
827 
828 static void npt_rw_pfwalk_prepare(struct svm_test *test)
829 {
830 
831     u64 *pte;
832 
833     vmcb_ident(vmcb);
834     pte = npt_get_pte(read_cr3());
835 
836     *pte &= ~(1ULL << 1);
837 }
838 
839 static bool npt_rw_pfwalk_check(struct svm_test *test)
840 {
841     u64 *pte = npt_get_pte(read_cr3());
842 
843     *pte |= (1ULL << 1);
844 
845     return (vmcb->control.exit_code == SVM_EXIT_NPF)
846            && (vmcb->control.exit_info_1 == 0x200000007ULL)
847 	   && (vmcb->control.exit_info_2 == read_cr3());
848 }
849 
850 static void npt_rsvd_pfwalk_prepare(struct svm_test *test)
851 {
852     u64 *pdpe;
853     vmcb_ident(vmcb);
854 
855     pdpe = npt_get_pml4e();
856     pdpe[0] |= (1ULL << 8);
857 }
858 
859 static bool npt_rsvd_pfwalk_check(struct svm_test *test)
860 {
861     u64 *pdpe = npt_get_pml4e();
862     pdpe[0] &= ~(1ULL << 8);
863 
864     return (vmcb->control.exit_code == SVM_EXIT_NPF)
865             && (vmcb->control.exit_info_1 == 0x20000000fULL);
866 }
867 
868 static void npt_l1mmio_prepare(struct svm_test *test)
869 {
870     vmcb_ident(vmcb);
871 }
872 
873 u32 nested_apic_version1;
874 u32 nested_apic_version2;
875 
876 static void npt_l1mmio_test(struct svm_test *test)
877 {
878     volatile u32 *data = (volatile void*)(0xfee00030UL);
879 
880     nested_apic_version1 = *data;
881     nested_apic_version2 = *data;
882 }
883 
884 static bool npt_l1mmio_check(struct svm_test *test)
885 {
886     volatile u32 *data = (volatile void*)(0xfee00030);
887     u32 lvr = *data;
888 
889     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
890 }
891 
892 static void npt_rw_l1mmio_prepare(struct svm_test *test)
893 {
894 
895     u64 *pte;
896 
897     vmcb_ident(vmcb);
898     pte = npt_get_pte(0xfee00080);
899 
900     *pte &= ~(1ULL << 1);
901 }
902 
903 static void npt_rw_l1mmio_test(struct svm_test *test)
904 {
905     volatile u32 *data = (volatile void*)(0xfee00080);
906 
907     *data = *data;
908 }
909 
910 static bool npt_rw_l1mmio_check(struct svm_test *test)
911 {
912     u64 *pte = npt_get_pte(0xfee00080);
913 
914     *pte |= (1ULL << 1);
915 
916     return (vmcb->control.exit_code == SVM_EXIT_NPF)
917            && (vmcb->control.exit_info_1 == 0x100000007ULL);
918 }
919 
920 #define TSC_ADJUST_VALUE    (1ll << 32)
921 #define TSC_OFFSET_VALUE    (~0ull << 48)
922 static bool ok;
923 
924 static bool tsc_adjust_supported(void)
925 {
926     return this_cpu_has(X86_FEATURE_TSC_ADJUST);
927 }
928 
929 static void tsc_adjust_prepare(struct svm_test *test)
930 {
931     default_prepare(test);
932     vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
933 
934     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
935     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
936     ok = adjust == -TSC_ADJUST_VALUE;
937 }
938 
939 static void tsc_adjust_test(struct svm_test *test)
940 {
941     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
942     ok &= adjust == -TSC_ADJUST_VALUE;
943 
944     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
945     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
946 
947     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
948     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
949 
950     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
951     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
952 
953     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
954     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
955 }
956 
957 static bool tsc_adjust_check(struct svm_test *test)
958 {
959     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
960 
961     wrmsr(MSR_IA32_TSC_ADJUST, 0);
962     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
963 }
964 
965 static void latency_prepare(struct svm_test *test)
966 {
967     default_prepare(test);
968     runs = LATENCY_RUNS;
969     latvmrun_min = latvmexit_min = -1ULL;
970     latvmrun_max = latvmexit_max = 0;
971     vmrun_sum = vmexit_sum = 0;
972     tsc_start = rdtsc();
973 }
974 
975 static void latency_test(struct svm_test *test)
976 {
977     u64 cycles;
978 
979 start:
980     tsc_end = rdtsc();
981 
982     cycles = tsc_end - tsc_start;
983 
984     if (cycles > latvmrun_max)
985         latvmrun_max = cycles;
986 
987     if (cycles < latvmrun_min)
988         latvmrun_min = cycles;
989 
990     vmrun_sum += cycles;
991 
992     tsc_start = rdtsc();
993 
994     asm volatile ("vmmcall" : : : "memory");
995     goto start;
996 }
997 
998 static bool latency_finished(struct svm_test *test)
999 {
1000     u64 cycles;
1001 
1002     tsc_end = rdtsc();
1003 
1004     cycles = tsc_end - tsc_start;
1005 
1006     if (cycles > latvmexit_max)
1007         latvmexit_max = cycles;
1008 
1009     if (cycles < latvmexit_min)
1010         latvmexit_min = cycles;
1011 
1012     vmexit_sum += cycles;
1013 
1014     vmcb->save.rip += 3;
1015 
1016     runs -= 1;
1017 
1018     tsc_end = rdtsc();
1019 
1020     return runs == 0;
1021 }
1022 
1023 static bool latency_finished_clean(struct svm_test *test)
1024 {
1025     vmcb->control.clean = VMCB_CLEAN_ALL;
1026     return latency_finished(test);
1027 }
1028 
1029 static bool latency_check(struct svm_test *test)
1030 {
1031     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
1032             latvmrun_min, vmrun_sum / LATENCY_RUNS);
1033     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
1034             latvmexit_min, vmexit_sum / LATENCY_RUNS);
1035     return true;
1036 }
1037 
1038 static void lat_svm_insn_prepare(struct svm_test *test)
1039 {
1040     default_prepare(test);
1041     runs = LATENCY_RUNS;
1042     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
1043     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
1044     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
1045 }
1046 
1047 static bool lat_svm_insn_finished(struct svm_test *test)
1048 {
1049     u64 vmcb_phys = virt_to_phys(vmcb);
1050     u64 cycles;
1051 
1052     for ( ; runs != 0; runs--) {
1053         tsc_start = rdtsc();
1054         asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
1055         cycles = rdtsc() - tsc_start;
1056         if (cycles > latvmload_max)
1057             latvmload_max = cycles;
1058         if (cycles < latvmload_min)
1059             latvmload_min = cycles;
1060         vmload_sum += cycles;
1061 
1062         tsc_start = rdtsc();
1063         asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
1064         cycles = rdtsc() - tsc_start;
1065         if (cycles > latvmsave_max)
1066             latvmsave_max = cycles;
1067         if (cycles < latvmsave_min)
1068             latvmsave_min = cycles;
1069         vmsave_sum += cycles;
1070 
1071         tsc_start = rdtsc();
1072         asm volatile("stgi\n\t");
1073         cycles = rdtsc() - tsc_start;
1074         if (cycles > latstgi_max)
1075             latstgi_max = cycles;
1076         if (cycles < latstgi_min)
1077             latstgi_min = cycles;
1078         stgi_sum += cycles;
1079 
1080         tsc_start = rdtsc();
1081         asm volatile("clgi\n\t");
1082         cycles = rdtsc() - tsc_start;
1083         if (cycles > latclgi_max)
1084             latclgi_max = cycles;
1085         if (cycles < latclgi_min)
1086             latclgi_min = cycles;
1087         clgi_sum += cycles;
1088     }
1089 
1090     tsc_end = rdtsc();
1091 
1092     return true;
1093 }
1094 
1095 static bool lat_svm_insn_check(struct svm_test *test)
1096 {
1097     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1098             latvmload_min, vmload_sum / LATENCY_RUNS);
1099     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1100             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1101     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1102             latstgi_min, stgi_sum / LATENCY_RUNS);
1103     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1104             latclgi_min, clgi_sum / LATENCY_RUNS);
1105     return true;
1106 }
1107 
1108 bool pending_event_ipi_fired;
1109 bool pending_event_guest_run;
1110 
1111 static void pending_event_ipi_isr(isr_regs_t *regs)
1112 {
1113     pending_event_ipi_fired = true;
1114     eoi();
1115 }
1116 
1117 static void pending_event_prepare(struct svm_test *test)
1118 {
1119     int ipi_vector = 0xf1;
1120 
1121     default_prepare(test);
1122 
1123     pending_event_ipi_fired = false;
1124 
1125     handle_irq(ipi_vector, pending_event_ipi_isr);
1126 
1127     pending_event_guest_run = false;
1128 
1129     vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1130     vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1131 
1132     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1133                   APIC_DM_FIXED | ipi_vector, 0);
1134 
1135     set_test_stage(test, 0);
1136 }
1137 
1138 static void pending_event_test(struct svm_test *test)
1139 {
1140     pending_event_guest_run = true;
1141 }
1142 
1143 static bool pending_event_finished(struct svm_test *test)
1144 {
1145     switch (get_test_stage(test)) {
1146     case 0:
1147         if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1148             report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x",
1149                    vmcb->control.exit_code);
1150             return true;
1151         }
1152 
1153         vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1154         vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1155 
1156         if (pending_event_guest_run) {
1157             report(false, "Guest ran before host received IPI\n");
1158             return true;
1159         }
1160 
1161         irq_enable();
1162         asm volatile ("nop");
1163         irq_disable();
1164 
1165         if (!pending_event_ipi_fired) {
1166             report(false, "Pending interrupt not dispatched after IRQ enabled\n");
1167             return true;
1168         }
1169         break;
1170 
1171     case 1:
1172         if (!pending_event_guest_run) {
1173             report(false, "Guest did not resume when no interrupt\n");
1174             return true;
1175         }
1176         break;
1177     }
1178 
1179     inc_test_stage(test);
1180 
1181     return get_test_stage(test) == 2;
1182 }
1183 
1184 static bool pending_event_check(struct svm_test *test)
1185 {
1186     return get_test_stage(test) == 2;
1187 }
1188 
1189 static void pending_event_cli_prepare(struct svm_test *test)
1190 {
1191     default_prepare(test);
1192 
1193     pending_event_ipi_fired = false;
1194 
1195     handle_irq(0xf1, pending_event_ipi_isr);
1196 
1197     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1198               APIC_DM_FIXED | 0xf1, 0);
1199 
1200     set_test_stage(test, 0);
1201 }
1202 
1203 static void pending_event_cli_prepare_gif_clear(struct svm_test *test)
1204 {
1205     asm("cli");
1206 }
1207 
1208 static void pending_event_cli_test(struct svm_test *test)
1209 {
1210     if (pending_event_ipi_fired == true) {
1211         set_test_stage(test, -1);
1212         report(false, "Interrupt preceeded guest");
1213         vmmcall();
1214     }
1215 
1216     /* VINTR_MASKING is zero.  This should cause the IPI to fire.  */
1217     irq_enable();
1218     asm volatile ("nop");
1219     irq_disable();
1220 
1221     if (pending_event_ipi_fired != true) {
1222         set_test_stage(test, -1);
1223         report(false, "Interrupt not triggered by guest");
1224     }
1225 
1226     vmmcall();
1227 
1228     /*
1229      * Now VINTR_MASKING=1, but no interrupt is pending so
1230      * the VINTR interception should be clear in VMCB02.  Check
1231      * that L0 did not leave a stale VINTR in the VMCB.
1232      */
1233     irq_enable();
1234     asm volatile ("nop");
1235     irq_disable();
1236 }
1237 
1238 static bool pending_event_cli_finished(struct svm_test *test)
1239 {
1240     if ( vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1241         report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x",
1242                vmcb->control.exit_code);
1243         return true;
1244     }
1245 
1246     switch (get_test_stage(test)) {
1247     case 0:
1248         vmcb->save.rip += 3;
1249 
1250         pending_event_ipi_fired = false;
1251 
1252         vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1253 
1254 	/* Now entering again with VINTR_MASKING=1.  */
1255         apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1256               APIC_DM_FIXED | 0xf1, 0);
1257 
1258         break;
1259 
1260     case 1:
1261         if (pending_event_ipi_fired == true) {
1262             report(false, "Interrupt triggered by guest");
1263             return true;
1264         }
1265 
1266         irq_enable();
1267         asm volatile ("nop");
1268         irq_disable();
1269 
1270         if (pending_event_ipi_fired != true) {
1271             report(false, "Interrupt not triggered by host");
1272             return true;
1273         }
1274 
1275         break;
1276 
1277     default:
1278         return true;
1279     }
1280 
1281     inc_test_stage(test);
1282 
1283     return get_test_stage(test) == 2;
1284 }
1285 
1286 static bool pending_event_cli_check(struct svm_test *test)
1287 {
1288     return get_test_stage(test) == 2;
1289 }
1290 
1291 #define TIMER_VECTOR    222
1292 
1293 static volatile bool timer_fired;
1294 
1295 static void timer_isr(isr_regs_t *regs)
1296 {
1297     timer_fired = true;
1298     apic_write(APIC_EOI, 0);
1299 }
1300 
1301 static void interrupt_prepare(struct svm_test *test)
1302 {
1303     default_prepare(test);
1304     handle_irq(TIMER_VECTOR, timer_isr);
1305     timer_fired = false;
1306     set_test_stage(test, 0);
1307 }
1308 
1309 static void interrupt_test(struct svm_test *test)
1310 {
1311     long long start, loops;
1312 
1313     apic_write(APIC_LVTT, TIMER_VECTOR);
1314     irq_enable();
1315     apic_write(APIC_TMICT, 1); //Timer Initial Count Register 0x380 one-shot
1316     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1317         asm volatile ("nop");
1318 
1319     report(timer_fired, "direct interrupt while running guest");
1320 
1321     if (!timer_fired) {
1322         set_test_stage(test, -1);
1323         vmmcall();
1324     }
1325 
1326     apic_write(APIC_TMICT, 0);
1327     irq_disable();
1328     vmmcall();
1329 
1330     timer_fired = false;
1331     apic_write(APIC_TMICT, 1);
1332     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1333         asm volatile ("nop");
1334 
1335     report(timer_fired, "intercepted interrupt while running guest");
1336 
1337     if (!timer_fired) {
1338         set_test_stage(test, -1);
1339         vmmcall();
1340     }
1341 
1342     irq_enable();
1343     apic_write(APIC_TMICT, 0);
1344     irq_disable();
1345 
1346     timer_fired = false;
1347     start = rdtsc();
1348     apic_write(APIC_TMICT, 1000000);
1349     asm volatile ("sti; hlt");
1350 
1351     report(rdtsc() - start > 10000 && timer_fired,
1352           "direct interrupt + hlt");
1353 
1354     if (!timer_fired) {
1355         set_test_stage(test, -1);
1356         vmmcall();
1357     }
1358 
1359     apic_write(APIC_TMICT, 0);
1360     irq_disable();
1361     vmmcall();
1362 
1363     timer_fired = false;
1364     start = rdtsc();
1365     apic_write(APIC_TMICT, 1000000);
1366     asm volatile ("hlt");
1367 
1368     report(rdtsc() - start > 10000 && timer_fired,
1369            "intercepted interrupt + hlt");
1370 
1371     if (!timer_fired) {
1372         set_test_stage(test, -1);
1373         vmmcall();
1374     }
1375 
1376     apic_write(APIC_TMICT, 0);
1377     irq_disable();
1378 }
1379 
1380 static bool interrupt_finished(struct svm_test *test)
1381 {
1382     switch (get_test_stage(test)) {
1383     case 0:
1384     case 2:
1385         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1386             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1387                    vmcb->control.exit_code);
1388             return true;
1389         }
1390         vmcb->save.rip += 3;
1391 
1392         vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1393         vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1394         break;
1395 
1396     case 1:
1397     case 3:
1398         if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1399             report(false, "VMEXIT not due to intr intercept. Exit reason 0x%x",
1400                    vmcb->control.exit_code);
1401             return true;
1402         }
1403 
1404         irq_enable();
1405         asm volatile ("nop");
1406         irq_disable();
1407 
1408         vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1409         vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1410         break;
1411 
1412     case 4:
1413         break;
1414 
1415     default:
1416         return true;
1417     }
1418 
1419     inc_test_stage(test);
1420 
1421     return get_test_stage(test) == 5;
1422 }
1423 
1424 static bool interrupt_check(struct svm_test *test)
1425 {
1426     return get_test_stage(test) == 5;
1427 }
1428 
1429 static volatile bool nmi_fired;
1430 
1431 static void nmi_handler(isr_regs_t *regs)
1432 {
1433     nmi_fired = true;
1434     apic_write(APIC_EOI, 0);
1435 }
1436 
1437 static void nmi_prepare(struct svm_test *test)
1438 {
1439     default_prepare(test);
1440     nmi_fired = false;
1441     handle_irq(NMI_VECTOR, nmi_handler);
1442     set_test_stage(test, 0);
1443 }
1444 
1445 static void nmi_test(struct svm_test *test)
1446 {
1447     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1448 
1449     report(nmi_fired, "direct NMI while running guest");
1450 
1451     if (!nmi_fired)
1452         set_test_stage(test, -1);
1453 
1454     vmmcall();
1455 
1456     nmi_fired = false;
1457 
1458     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1459 
1460     if (!nmi_fired) {
1461         report(nmi_fired, "intercepted pending NMI not dispatched");
1462         set_test_stage(test, -1);
1463     }
1464 
1465 }
1466 
1467 static bool nmi_finished(struct svm_test *test)
1468 {
1469     switch (get_test_stage(test)) {
1470     case 0:
1471         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1472             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1473                    vmcb->control.exit_code);
1474             return true;
1475         }
1476         vmcb->save.rip += 3;
1477 
1478         vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1479         break;
1480 
1481     case 1:
1482         if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1483             report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x",
1484                    vmcb->control.exit_code);
1485             return true;
1486         }
1487 
1488         report(true, "NMI intercept while running guest");
1489         break;
1490 
1491     case 2:
1492         break;
1493 
1494     default:
1495         return true;
1496     }
1497 
1498     inc_test_stage(test);
1499 
1500     return get_test_stage(test) == 3;
1501 }
1502 
1503 static bool nmi_check(struct svm_test *test)
1504 {
1505     return get_test_stage(test) == 3;
1506 }
1507 
1508 #define NMI_DELAY 100000000ULL
1509 
1510 static void nmi_message_thread(void *_test)
1511 {
1512     struct svm_test *test = _test;
1513 
1514     while (get_test_stage(test) != 1)
1515         pause();
1516 
1517     delay(NMI_DELAY);
1518 
1519     apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
1520 
1521     while (get_test_stage(test) != 2)
1522         pause();
1523 
1524     delay(NMI_DELAY);
1525 
1526     apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
1527 }
1528 
1529 static void nmi_hlt_test(struct svm_test *test)
1530 {
1531     long long start;
1532 
1533     on_cpu_async(1, nmi_message_thread, test);
1534 
1535     start = rdtsc();
1536 
1537     set_test_stage(test, 1);
1538 
1539     asm volatile ("hlt");
1540 
1541     report((rdtsc() - start > NMI_DELAY) && nmi_fired,
1542           "direct NMI + hlt");
1543 
1544     if (!nmi_fired)
1545         set_test_stage(test, -1);
1546 
1547     nmi_fired = false;
1548 
1549     vmmcall();
1550 
1551     start = rdtsc();
1552 
1553     set_test_stage(test, 2);
1554 
1555     asm volatile ("hlt");
1556 
1557     report((rdtsc() - start > NMI_DELAY) && nmi_fired,
1558            "intercepted NMI + hlt");
1559 
1560     if (!nmi_fired) {
1561         report(nmi_fired, "intercepted pending NMI not dispatched");
1562         set_test_stage(test, -1);
1563         vmmcall();
1564     }
1565 
1566     set_test_stage(test, 3);
1567 }
1568 
1569 static bool nmi_hlt_finished(struct svm_test *test)
1570 {
1571     switch (get_test_stage(test)) {
1572     case 1:
1573         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1574             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1575                    vmcb->control.exit_code);
1576             return true;
1577         }
1578         vmcb->save.rip += 3;
1579 
1580         vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1581         break;
1582 
1583     case 2:
1584         if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1585             report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x",
1586                    vmcb->control.exit_code);
1587             return true;
1588         }
1589 
1590         report(true, "NMI intercept while running guest");
1591         break;
1592 
1593     case 3:
1594         break;
1595 
1596     default:
1597         return true;
1598     }
1599 
1600     return get_test_stage(test) == 3;
1601 }
1602 
1603 static bool nmi_hlt_check(struct svm_test *test)
1604 {
1605     return get_test_stage(test) == 3;
1606 }
1607 
1608 static volatile int count_exc = 0;
1609 
1610 static void my_isr(struct ex_regs *r)
1611 {
1612         count_exc++;
1613 }
1614 
1615 static void exc_inject_prepare(struct svm_test *test)
1616 {
1617     default_prepare(test);
1618     handle_exception(DE_VECTOR, my_isr);
1619     handle_exception(NMI_VECTOR, my_isr);
1620 }
1621 
1622 
1623 static void exc_inject_test(struct svm_test *test)
1624 {
1625     asm volatile ("vmmcall\n\tvmmcall\n\t");
1626 }
1627 
1628 static bool exc_inject_finished(struct svm_test *test)
1629 {
1630     switch (get_test_stage(test)) {
1631     case 0:
1632         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1633             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1634                    vmcb->control.exit_code);
1635             return true;
1636         }
1637         vmcb->save.rip += 3;
1638         vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
1639         break;
1640 
1641     case 1:
1642         if (vmcb->control.exit_code != SVM_EXIT_ERR) {
1643             report(false, "VMEXIT not due to error. Exit reason 0x%x",
1644                    vmcb->control.exit_code);
1645             return true;
1646         }
1647         report(count_exc == 0, "exception with vector 2 not injected");
1648         vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
1649         break;
1650 
1651     case 2:
1652         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1653             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1654                    vmcb->control.exit_code);
1655             return true;
1656         }
1657         vmcb->save.rip += 3;
1658         report(count_exc == 1, "divide overflow exception injected");
1659         report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared");
1660         break;
1661 
1662     default:
1663         return true;
1664     }
1665 
1666     inc_test_stage(test);
1667 
1668     return get_test_stage(test) == 3;
1669 }
1670 
1671 static bool exc_inject_check(struct svm_test *test)
1672 {
1673     return count_exc == 1 && get_test_stage(test) == 3;
1674 }
1675 
1676 static volatile bool virq_fired;
1677 
1678 static void virq_isr(isr_regs_t *regs)
1679 {
1680     virq_fired = true;
1681 }
1682 
1683 static void virq_inject_prepare(struct svm_test *test)
1684 {
1685     handle_irq(0xf1, virq_isr);
1686     default_prepare(test);
1687     vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
1688                             (0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority
1689     vmcb->control.int_vector = 0xf1;
1690     virq_fired = false;
1691     set_test_stage(test, 0);
1692 }
1693 
1694 static void virq_inject_test(struct svm_test *test)
1695 {
1696     if (virq_fired) {
1697         report(false, "virtual interrupt fired before L2 sti");
1698         set_test_stage(test, -1);
1699         vmmcall();
1700     }
1701 
1702     irq_enable();
1703     asm volatile ("nop");
1704     irq_disable();
1705 
1706     if (!virq_fired) {
1707         report(false, "virtual interrupt not fired after L2 sti");
1708         set_test_stage(test, -1);
1709     }
1710 
1711     vmmcall();
1712 
1713     if (virq_fired) {
1714         report(false, "virtual interrupt fired before L2 sti after VINTR intercept");
1715         set_test_stage(test, -1);
1716         vmmcall();
1717     }
1718 
1719     irq_enable();
1720     asm volatile ("nop");
1721     irq_disable();
1722 
1723     if (!virq_fired) {
1724         report(false, "virtual interrupt not fired after return from VINTR intercept");
1725         set_test_stage(test, -1);
1726     }
1727 
1728     vmmcall();
1729 
1730     irq_enable();
1731     asm volatile ("nop");
1732     irq_disable();
1733 
1734     if (virq_fired) {
1735         report(false, "virtual interrupt fired when V_IRQ_PRIO less than V_TPR");
1736         set_test_stage(test, -1);
1737     }
1738 
1739     vmmcall();
1740     vmmcall();
1741 }
1742 
1743 static bool virq_inject_finished(struct svm_test *test)
1744 {
1745     vmcb->save.rip += 3;
1746 
1747     switch (get_test_stage(test)) {
1748     case 0:
1749         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1750             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1751                    vmcb->control.exit_code);
1752             return true;
1753         }
1754         if (vmcb->control.int_ctl & V_IRQ_MASK) {
1755             report(false, "V_IRQ not cleared on VMEXIT after firing");
1756             return true;
1757         }
1758         virq_fired = false;
1759         vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1760         vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
1761                             (0x0f << V_INTR_PRIO_SHIFT);
1762         break;
1763 
1764     case 1:
1765         if (vmcb->control.exit_code != SVM_EXIT_VINTR) {
1766             report(false, "VMEXIT not due to vintr. Exit reason 0x%x",
1767                    vmcb->control.exit_code);
1768             return true;
1769         }
1770         if (virq_fired) {
1771             report(false, "V_IRQ fired before SVM_EXIT_VINTR");
1772             return true;
1773         }
1774         vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
1775         break;
1776 
1777     case 2:
1778         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1779             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1780                    vmcb->control.exit_code);
1781             return true;
1782         }
1783         virq_fired = false;
1784         // Set irq to lower priority
1785         vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
1786                             (0x08 << V_INTR_PRIO_SHIFT);
1787         // Raise guest TPR
1788         vmcb->control.int_ctl |= 0x0a & V_TPR_MASK;
1789         break;
1790 
1791     case 3:
1792         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1793             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1794                    vmcb->control.exit_code);
1795             return true;
1796         }
1797         vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1798         break;
1799 
1800     case 4:
1801         // INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR
1802         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1803             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1804                    vmcb->control.exit_code);
1805             return true;
1806         }
1807         break;
1808 
1809     default:
1810         return true;
1811     }
1812 
1813     inc_test_stage(test);
1814 
1815     return get_test_stage(test) == 5;
1816 }
1817 
1818 static bool virq_inject_check(struct svm_test *test)
1819 {
1820     return get_test_stage(test) == 5;
1821 }
1822 
1823 /*
1824  * Detect nested guest RIP corruption as explained in kernel commit
1825  * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73
1826  *
1827  * In the assembly loop below 'ins' is executed while IO instructions
1828  * are not intercepted; the instruction is emulated by L0.
1829  *
1830  * At the same time we are getting interrupts from the local APIC timer,
1831  * and we do intercept them in L1
1832  *
1833  * If the interrupt happens on the insb instruction, L0 will VMexit, emulate
1834  * the insb instruction and then it will inject the interrupt to L1 through
1835  * a nested VMexit.  Due to a bug, it would leave pre-emulation values of RIP,
1836  * RAX and RSP in the VMCB.
1837  *
1838  * In our intercept handler we detect the bug by checking that RIP is that of
1839  * the insb instruction, but its memory operand has already been written.
1840  * This means that insb was already executed.
1841  */
1842 
1843 static volatile int isr_cnt = 0;
1844 static volatile uint8_t io_port_var = 0xAA;
1845 extern const char insb_instruction_label[];
1846 
1847 static void reg_corruption_isr(isr_regs_t *regs)
1848 {
1849     isr_cnt++;
1850     apic_write(APIC_EOI, 0);
1851 }
1852 
1853 static void reg_corruption_prepare(struct svm_test *test)
1854 {
1855     default_prepare(test);
1856     set_test_stage(test, 0);
1857 
1858     vmcb->control.int_ctl = V_INTR_MASKING_MASK;
1859     vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1860 
1861     handle_irq(TIMER_VECTOR, reg_corruption_isr);
1862 
1863     /* set local APIC to inject external interrupts */
1864     apic_write(APIC_TMICT, 0);
1865     apic_write(APIC_TDCR, 0);
1866     apic_write(APIC_LVTT, TIMER_VECTOR | APIC_LVT_TIMER_PERIODIC);
1867     apic_write(APIC_TMICT, 1000);
1868 }
1869 
1870 static void reg_corruption_test(struct svm_test *test)
1871 {
1872     /* this is endless loop, which is interrupted by the timer interrupt */
1873     asm volatile (
1874             "1:\n\t"
1875             "movw $0x4d0, %%dx\n\t" // IO port
1876             "lea %[io_port_var], %%rdi\n\t"
1877             "movb $0xAA, %[io_port_var]\n\t"
1878             "insb_instruction_label:\n\t"
1879             "insb\n\t"
1880             "jmp 1b\n\t"
1881 
1882             : [io_port_var] "=m" (io_port_var)
1883             : /* no inputs*/
1884             : "rdx", "rdi"
1885     );
1886 }
1887 
1888 static bool reg_corruption_finished(struct svm_test *test)
1889 {
1890     if (isr_cnt == 10000) {
1891         report(true,
1892                "No RIP corruption detected after %d timer interrupts",
1893                isr_cnt);
1894         set_test_stage(test, 1);
1895         return true;
1896     }
1897 
1898     if (vmcb->control.exit_code == SVM_EXIT_INTR) {
1899 
1900         void* guest_rip = (void*)vmcb->save.rip;
1901 
1902         irq_enable();
1903         asm volatile ("nop");
1904         irq_disable();
1905 
1906         if (guest_rip == insb_instruction_label && io_port_var != 0xAA) {
1907             report(false,
1908                    "RIP corruption detected after %d timer interrupts",
1909                    isr_cnt);
1910             return true;
1911         }
1912 
1913     }
1914     return false;
1915 }
1916 
1917 static bool reg_corruption_check(struct svm_test *test)
1918 {
1919     return get_test_stage(test) == 1;
1920 }
1921 
1922 static void get_tss_entry(void *data)
1923 {
1924     struct descriptor_table_ptr gdt;
1925     struct segment_desc64 *gdt_table;
1926     struct segment_desc64 *tss_entry;
1927     u16 tr = 0;
1928 
1929     sgdt(&gdt);
1930     tr = str();
1931     gdt_table = (struct segment_desc64 *) gdt.base;
1932     tss_entry = &gdt_table[tr / sizeof(struct segment_desc64)];
1933     *((struct segment_desc64 **)data) = tss_entry;
1934 }
1935 
1936 static int orig_cpu_count;
1937 
1938 static void init_startup_prepare(struct svm_test *test)
1939 {
1940     struct segment_desc64 *tss_entry;
1941     int i;
1942 
1943     vmcb_ident(vmcb);
1944 
1945     on_cpu(1, get_tss_entry, &tss_entry);
1946 
1947     orig_cpu_count = cpu_online_count;
1948 
1949     apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT,
1950                    id_map[1]);
1951 
1952     delay(100000000ULL);
1953 
1954     --cpu_online_count;
1955 
1956     *(uint64_t *)tss_entry &= ~DESC_BUSY;
1957 
1958     apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_STARTUP, id_map[1]);
1959 
1960     for (i = 0; i < 5 && cpu_online_count < orig_cpu_count; i++)
1961        delay(100000000ULL);
1962 }
1963 
1964 static bool init_startup_finished(struct svm_test *test)
1965 {
1966     return true;
1967 }
1968 
1969 static bool init_startup_check(struct svm_test *test)
1970 {
1971     return cpu_online_count == orig_cpu_count;
1972 }
1973 
1974 static volatile bool init_intercept;
1975 
1976 static void init_intercept_prepare(struct svm_test *test)
1977 {
1978     init_intercept = false;
1979     vmcb_ident(vmcb);
1980     vmcb->control.intercept |= (1ULL << INTERCEPT_INIT);
1981 }
1982 
1983 static void init_intercept_test(struct svm_test *test)
1984 {
1985     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 0);
1986 }
1987 
1988 static bool init_intercept_finished(struct svm_test *test)
1989 {
1990     vmcb->save.rip += 3;
1991 
1992     if (vmcb->control.exit_code != SVM_EXIT_INIT) {
1993         report(false, "VMEXIT not due to init intercept. Exit reason 0x%x",
1994                vmcb->control.exit_code);
1995 
1996         return true;
1997         }
1998 
1999     init_intercept = true;
2000 
2001     report(true, "INIT to vcpu intercepted");
2002 
2003     return true;
2004 }
2005 
2006 static bool init_intercept_check(struct svm_test *test)
2007 {
2008     return init_intercept;
2009 }
2010 
2011 /*
2012  * Setting host EFLAGS.TF causes a #DB trap after the VMRUN completes on the
2013  * host side (i.e., after the #VMEXIT from the guest).
2014  *
2015  * [AMD APM]
2016  */
2017 static volatile u8 host_rflags_guest_main_flag = 0;
2018 static volatile u8 host_rflags_db_handler_flag = 0;
2019 static volatile bool host_rflags_ss_on_vmrun = false;
2020 static volatile bool host_rflags_vmrun_reached = false;
2021 static volatile bool host_rflags_set_tf = false;
2022 static u64 post_vmrun_rip;
2023 
2024 extern u64 *vmrun_rip;
2025 
2026 static void host_rflags_db_handler(struct ex_regs *r)
2027 {
2028 	if (host_rflags_ss_on_vmrun) {
2029 		if (host_rflags_vmrun_reached) {
2030 			r->rflags &= ~X86_EFLAGS_TF;
2031 			post_vmrun_rip = r->rip;
2032 		} else {
2033 			if (r->rip == (u64)&vmrun_rip)
2034 				host_rflags_vmrun_reached = true;
2035 		}
2036 	} else {
2037 		r->rflags &= ~X86_EFLAGS_TF;
2038 	}
2039 }
2040 
2041 static void host_rflags_prepare(struct svm_test *test)
2042 {
2043 	default_prepare(test);
2044 	handle_exception(DB_VECTOR, host_rflags_db_handler);
2045 	set_test_stage(test, 0);
2046 }
2047 
2048 static void host_rflags_prepare_gif_clear(struct svm_test *test)
2049 {
2050 	if (host_rflags_set_tf)
2051 		write_rflags(read_rflags() | X86_EFLAGS_TF);
2052 }
2053 
2054 static void host_rflags_test(struct svm_test *test)
2055 {
2056 	while (1) {
2057 		if (get_test_stage(test) > 0 && host_rflags_set_tf &&
2058 		    (!host_rflags_ss_on_vmrun) &&
2059 		    (!host_rflags_db_handler_flag))
2060 			host_rflags_guest_main_flag = 1;
2061 		if (get_test_stage(test) == 3)
2062 			break;
2063 		vmmcall();
2064 	}
2065 }
2066 
2067 static bool host_rflags_finished(struct svm_test *test)
2068 {
2069 	switch (get_test_stage(test)) {
2070 	case 0:
2071 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
2072 			report(false, "Unexpected VMEXIT. Exit reason 0x%x",
2073 			    vmcb->control.exit_code);
2074 			return true;
2075 		}
2076 		vmcb->save.rip += 3;
2077 		/*
2078 		 * Setting host EFLAGS.TF not immediately before VMRUN, causes
2079 		 * #DB trap before first guest instruction is executed
2080 		 */
2081 		host_rflags_set_tf = true;
2082 		break;
2083 	case 1:
2084 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL ||
2085 		    (!host_rflags_guest_main_flag)) {
2086 			report(false, "Unexpected VMEXIT or #DB handler"
2087 			    " invoked before guest main. Exit reason 0x%x",
2088 			    vmcb->control.exit_code);
2089 			return true;
2090 		}
2091 		vmcb->save.rip += 3;
2092 		/*
2093 		 * Setting host EFLAGS.TF immediately before VMRUN, causes #DB
2094 		 * trap after VMRUN completes on the host side (i.e., after
2095 		 * VMEXIT from guest).
2096 		 */
2097 		host_rflags_ss_on_vmrun = true;
2098 		break;
2099 	case 2:
2100 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL ||
2101 		    (post_vmrun_rip - (u64)&vmrun_rip) != 3) {
2102 			report(false, "Unexpected VMEXIT or RIP mismatch."
2103 			    " Exit reason 0x%x, VMRUN RIP: %lx, post-VMRUN"
2104 			    " RIP: %lx", vmcb->control.exit_code,
2105 			    (u64)&vmrun_rip, post_vmrun_rip);
2106 			return true;
2107 		}
2108 		host_rflags_set_tf = false;
2109 		vmcb->save.rip += 3;
2110 		break;
2111 	default:
2112 		return true;
2113 	}
2114 	inc_test_stage(test);
2115 	return get_test_stage(test) == 4;
2116 }
2117 
2118 static bool host_rflags_check(struct svm_test *test)
2119 {
2120 	return get_test_stage(test) == 3;
2121 }
2122 
2123 #define TEST(name) { #name, .v2 = name }
2124 
2125 /*
2126  * v2 tests
2127  */
2128 
2129 /*
2130  * Ensure that kvm recalculates the L1 guest's CPUID.01H:ECX.OSXSAVE
2131  * after VM-exit from an L2 guest that sets CR4.OSXSAVE to a different
2132  * value than in L1.
2133  */
2134 
2135 static void svm_cr4_osxsave_test_guest(struct svm_test *test)
2136 {
2137 	write_cr4(read_cr4() & ~X86_CR4_OSXSAVE);
2138 }
2139 
2140 static void svm_cr4_osxsave_test(void)
2141 {
2142 	if (!this_cpu_has(X86_FEATURE_XSAVE)) {
2143 		report_skip("XSAVE not detected");
2144 		return;
2145 	}
2146 
2147 	if (!(read_cr4() & X86_CR4_OSXSAVE)) {
2148 		unsigned long cr4 = read_cr4() | X86_CR4_OSXSAVE;
2149 
2150 		write_cr4(cr4);
2151 		vmcb->save.cr4 = cr4;
2152 	}
2153 
2154 	report(cpuid_osxsave(), "CPUID.01H:ECX.XSAVE set before VMRUN");
2155 
2156 	test_set_guest(svm_cr4_osxsave_test_guest);
2157 	report(svm_vmrun() == SVM_EXIT_VMMCALL,
2158 	       "svm_cr4_osxsave_test_guest finished with VMMCALL");
2159 
2160 	report(cpuid_osxsave(), "CPUID.01H:ECX.XSAVE set after VMRUN");
2161 }
2162 
2163 static void basic_guest_main(struct svm_test *test)
2164 {
2165 }
2166 
2167 
2168 #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val,	\
2169 				   resv_mask)				\
2170 {									\
2171         u64 tmp, mask;							\
2172         int i;								\
2173 									\
2174         for (i = start; i <= end; i = i + inc) {			\
2175                 mask = 1ull << i;					\
2176                 if (!(mask & resv_mask))				\
2177                         continue;					\
2178                 tmp = val | mask;					\
2179 		reg = tmp;						\
2180 		report(svm_vmrun() == SVM_EXIT_ERR, "Test %s %d:%d: %lx",\
2181 		    str_name, end, start, tmp);				\
2182         }								\
2183 }
2184 
2185 #define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask,	\
2186 				  exit_code, test_name)			\
2187 {									\
2188 	u64 tmp, mask;							\
2189 	int i;								\
2190 									\
2191 	for (i = start; i <= end; i = i + inc) {			\
2192 		mask = 1ull << i;					\
2193 		if (!(mask & resv_mask))				\
2194 			continue;					\
2195 		tmp = val | mask;					\
2196 		switch (cr) {						\
2197 		case 0:							\
2198 			vmcb->save.cr0 = tmp;				\
2199 			break;						\
2200 		case 3:							\
2201 			vmcb->save.cr3 = tmp;				\
2202 			break;						\
2203 		case 4:							\
2204 			vmcb->save.cr4 = tmp;				\
2205 		}							\
2206 		report(svm_vmrun() == exit_code, "Test CR%d " test_name "%d:%d: %lx",\
2207 		    cr, end, start, tmp);				\
2208 	}								\
2209 }
2210 
2211 static void test_efer(void)
2212 {
2213 	/*
2214 	 * Un-setting EFER.SVME is illegal
2215 	 */
2216 	u64 efer_saved = vmcb->save.efer;
2217 	u64 efer = efer_saved;
2218 
2219 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer);
2220 	efer &= ~EFER_SVME;
2221 	vmcb->save.efer = efer;
2222 	report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer);
2223 	vmcb->save.efer = efer_saved;
2224 
2225 	/*
2226 	 * EFER MBZ bits: 63:16, 9
2227 	 */
2228 	efer_saved = vmcb->save.efer;
2229 
2230 	SVM_TEST_REG_RESERVED_BITS(8, 9, 1, "EFER", vmcb->save.efer,
2231 	    efer_saved, SVM_EFER_RESERVED_MASK);
2232 	SVM_TEST_REG_RESERVED_BITS(16, 63, 4, "EFER", vmcb->save.efer,
2233 	    efer_saved, SVM_EFER_RESERVED_MASK);
2234 
2235 	/*
2236 	 * EFER.LME and CR0.PG are both set and CR4.PAE is zero.
2237 	 */
2238 	u64 cr0_saved = vmcb->save.cr0;
2239 	u64 cr0;
2240 	u64 cr4_saved = vmcb->save.cr4;
2241 	u64 cr4;
2242 
2243 	efer = efer_saved | EFER_LME;
2244 	vmcb->save.efer = efer;
2245 	cr0 = cr0_saved | X86_CR0_PG | X86_CR0_PE;
2246 	vmcb->save.cr0 = cr0;
2247 	cr4 = cr4_saved & ~X86_CR4_PAE;
2248 	vmcb->save.cr4 = cr4;
2249 	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
2250 	    "CR0.PG=1 (%lx) and CR4.PAE=0 (%lx)", efer, cr0, cr4);
2251 
2252 	/*
2253 	 * EFER.LME and CR0.PG are both set and CR0.PE is zero.
2254 	 */
2255 	vmcb->save.cr4 = cr4_saved | X86_CR4_PAE;
2256 	cr0 &= ~X86_CR0_PE;
2257 	vmcb->save.cr0 = cr0;
2258 	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
2259 	    "CR0.PG=1 and CR0.PE=0 (%lx)", efer, cr0);
2260 
2261 	/*
2262 	 * EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero.
2263 	 */
2264 	u32 cs_attrib_saved = vmcb->save.cs.attrib;
2265 	u32 cs_attrib;
2266 
2267 	cr0 |= X86_CR0_PE;
2268 	vmcb->save.cr0 = cr0;
2269 	cs_attrib = cs_attrib_saved | SVM_SELECTOR_L_MASK |
2270 	    SVM_SELECTOR_DB_MASK;
2271 	vmcb->save.cs.attrib = cs_attrib;
2272 	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
2273 	    "CR0.PG=1 (%lx), CR4.PAE=1 (%lx), CS.L=1 and CS.D=1 (%x)",
2274 	    efer, cr0, cr4, cs_attrib);
2275 
2276 	vmcb->save.cr0 = cr0_saved;
2277 	vmcb->save.cr4 = cr4_saved;
2278 	vmcb->save.efer = efer_saved;
2279 	vmcb->save.cs.attrib = cs_attrib_saved;
2280 }
2281 
2282 static void test_cr0(void)
2283 {
2284 	/*
2285 	 * Un-setting CR0.CD and setting CR0.NW is illegal combination
2286 	 */
2287 	u64 cr0_saved = vmcb->save.cr0;
2288 	u64 cr0 = cr0_saved;
2289 
2290 	cr0 |= X86_CR0_CD;
2291 	cr0 &= ~X86_CR0_NW;
2292 	vmcb->save.cr0 = cr0;
2293 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx",
2294 	    cr0);
2295 	cr0 |= X86_CR0_NW;
2296 	vmcb->save.cr0 = cr0;
2297 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx",
2298 	    cr0);
2299 	cr0 &= ~X86_CR0_NW;
2300 	cr0 &= ~X86_CR0_CD;
2301 	vmcb->save.cr0 = cr0;
2302 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx",
2303 	    cr0);
2304 	cr0 |= X86_CR0_NW;
2305 	vmcb->save.cr0 = cr0;
2306 	report (svm_vmrun() == SVM_EXIT_ERR, "Test CR0 CD=0,NW=1: %lx",
2307 	    cr0);
2308 	vmcb->save.cr0 = cr0_saved;
2309 
2310 	/*
2311 	 * CR0[63:32] are not zero
2312 	 */
2313 	cr0 = cr0_saved;
2314 
2315 	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "CR0", vmcb->save.cr0, cr0_saved,
2316 	    SVM_CR0_RESERVED_MASK);
2317 	vmcb->save.cr0 = cr0_saved;
2318 }
2319 
2320 static void test_cr3(void)
2321 {
2322 	/*
2323 	 * CR3 MBZ bits based on different modes:
2324 	 *   [63:52] - long mode
2325 	 */
2326 	u64 cr3_saved = vmcb->save.cr3;
2327 
2328 	SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved,
2329 	    SVM_CR3_LONG_MBZ_MASK, SVM_EXIT_ERR, "");
2330 
2331 	vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_MBZ_MASK;
2332 	report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
2333 	    vmcb->save.cr3);
2334 
2335 	/*
2336 	 * CR3 non-MBZ reserved bits based on different modes:
2337 	 *   [11:5] [2:0] - long mode (PCIDE=0)
2338 	 *          [2:0] - PAE legacy mode
2339 	 */
2340 	u64 cr4_saved = vmcb->save.cr4;
2341 	u64 *pdpe = npt_get_pml4e();
2342 
2343 	/*
2344 	 * Long mode
2345 	 */
2346 	if (this_cpu_has(X86_FEATURE_PCID)) {
2347 		vmcb->save.cr4 = cr4_saved | X86_CR4_PCIDE;
2348 		SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
2349 		    SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_VMMCALL, "(PCIDE=1) ");
2350 
2351 		vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK;
2352 		report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
2353 		    vmcb->save.cr3);
2354 	}
2355 
2356 	vmcb->save.cr4 = cr4_saved & ~X86_CR4_PCIDE;
2357 
2358 	/* Clear P (Present) bit in NPT in order to trigger #NPF */
2359 	pdpe[0] &= ~1ULL;
2360 
2361 	SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
2362 	    SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_NPF, "(PCIDE=0) ");
2363 
2364 	pdpe[0] |= 1ULL;
2365 	vmcb->save.cr3 = cr3_saved;
2366 
2367 	/*
2368 	 * PAE legacy
2369 	 */
2370 	pdpe[0] &= ~1ULL;
2371 	vmcb->save.cr4 = cr4_saved | X86_CR4_PAE;
2372 	SVM_TEST_CR_RESERVED_BITS(0, 2, 1, 3, cr3_saved,
2373 	    SVM_CR3_PAE_LEGACY_RESERVED_MASK, SVM_EXIT_NPF, "(PAE) ");
2374 
2375 	pdpe[0] |= 1ULL;
2376 	vmcb->save.cr3 = cr3_saved;
2377 	vmcb->save.cr4 = cr4_saved;
2378 }
2379 
2380 static void test_cr4(void)
2381 {
2382 	/*
2383 	 * CR4 MBZ bits based on different modes:
2384 	 *   [15:12], 17, 19, [31:22] - legacy mode
2385 	 *   [15:12], 17, 19, [63:22] - long mode
2386 	 */
2387 	u64 cr4_saved = vmcb->save.cr4;
2388 	u64 efer_saved = vmcb->save.efer;
2389 	u64 efer = efer_saved;
2390 
2391 	efer &= ~EFER_LME;
2392 	vmcb->save.efer = efer;
2393 	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
2394 	    SVM_CR4_LEGACY_RESERVED_MASK, SVM_EXIT_ERR, "");
2395 
2396 	efer |= EFER_LME;
2397 	vmcb->save.efer = efer;
2398 	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
2399 	    SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, "");
2400 	SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved,
2401 	    SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, "");
2402 
2403 	vmcb->save.cr4 = cr4_saved;
2404 	vmcb->save.efer = efer_saved;
2405 }
2406 
2407 static void test_dr(void)
2408 {
2409 	/*
2410 	 * DR6[63:32] and DR7[63:32] are MBZ
2411 	 */
2412 	u64 dr_saved = vmcb->save.dr6;
2413 
2414 	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR6", vmcb->save.dr6, dr_saved,
2415 	    SVM_DR6_RESERVED_MASK);
2416 	vmcb->save.dr6 = dr_saved;
2417 
2418 	dr_saved = vmcb->save.dr7;
2419 	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR7", vmcb->save.dr7, dr_saved,
2420 	    SVM_DR7_RESERVED_MASK);
2421 
2422 	vmcb->save.dr7 = dr_saved;
2423 }
2424 
2425 /* TODO: verify if high 32-bits are sign- or zero-extended on bare metal */
2426 #define	TEST_BITMAP_ADDR(save_intercept, type, addr, exit_code,		\
2427 			 msg) {						\
2428 	vmcb->control.intercept = saved_intercept | 1ULL << type;	\
2429 	if (type == INTERCEPT_MSR_PROT)					\
2430 		vmcb->control.msrpm_base_pa = addr;			\
2431 	else								\
2432 		vmcb->control.iopm_base_pa = addr;			\
2433 	report(svm_vmrun() == exit_code,				\
2434 	    "Test %s address: %lx", msg, addr);                         \
2435 }
2436 
2437 /*
2438  * If the MSR or IOIO intercept table extends to a physical address that
2439  * is greater than or equal to the maximum supported physical address, the
2440  * guest state is illegal.
2441  *
2442  * The VMRUN instruction ignores the lower 12 bits of the address specified
2443  * in the VMCB.
2444  *
2445  * MSRPM spans 2 contiguous 4KB pages while IOPM spans 2 contiguous 4KB
2446  * pages + 1 byte.
2447  *
2448  * [APM vol 2]
2449  *
2450  * Note: Unallocated MSRPM addresses conforming to consistency checks, generate
2451  * #NPF.
2452  */
2453 static void test_msrpm_iopm_bitmap_addrs(void)
2454 {
2455 	u64 saved_intercept = vmcb->control.intercept;
2456 	u64 addr_beyond_limit = 1ull << cpuid_maxphyaddr();
2457 	u64 addr = virt_to_phys(msr_bitmap) & (~((1ull << 12) - 1));
2458 
2459 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2460 			addr_beyond_limit - 3 * PAGE_SIZE, SVM_EXIT_ERR,
2461 			"MSRPM");
2462 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2463 			addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR,
2464 			"MSRPM");
2465 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2466 			addr_beyond_limit - 2 * PAGE_SIZE + 1, SVM_EXIT_ERR,
2467 			"MSRPM");
2468 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2469 			addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR,
2470 			"MSRPM");
2471 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr,
2472 			SVM_EXIT_VMMCALL, "MSRPM");
2473 	addr |= (1ull << 12) - 1;
2474 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr,
2475 			SVM_EXIT_VMMCALL, "MSRPM");
2476 
2477 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2478 			addr_beyond_limit - 4 * PAGE_SIZE, SVM_EXIT_VMMCALL,
2479 			"IOPM");
2480 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2481 			addr_beyond_limit - 3 * PAGE_SIZE, SVM_EXIT_VMMCALL,
2482 			"IOPM");
2483 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2484 			addr_beyond_limit - 2 * PAGE_SIZE - 2, SVM_EXIT_VMMCALL,
2485 			"IOPM");
2486 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2487 			addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR,
2488 			"IOPM");
2489 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2490 			addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR,
2491 			"IOPM");
2492 	addr = virt_to_phys(io_bitmap) & (~((1ull << 11) - 1));
2493 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr,
2494 			SVM_EXIT_VMMCALL, "IOPM");
2495 	addr |= (1ull << 12) - 1;
2496 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr,
2497 			SVM_EXIT_VMMCALL, "IOPM");
2498 
2499 	vmcb->control.intercept = saved_intercept;
2500 }
2501 
2502 #define TEST_CANONICAL(seg_base, msg)					\
2503 	saved_addr = seg_base;						\
2504 	seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \
2505 	report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test %s.base for canonical form: %lx", msg, seg_base);							\
2506 	seg_base = saved_addr;
2507 
2508 /*
2509  * VMRUN canonicalizes (i.e., sign-extend to bit 63) all base addresses
2510  • in the segment registers that have been loaded.
2511  */
2512 static void test_vmrun_canonicalization(void)
2513 {
2514 	u64 saved_addr;
2515 	u8 addr_limit = cpuid_maxphyaddr();
2516 	u64 noncanonical_mask = NONCANONICAL & ~((1ul << addr_limit) - 1);
2517 
2518 	TEST_CANONICAL(vmcb->save.es.base, "ES");
2519 	TEST_CANONICAL(vmcb->save.cs.base, "CS");
2520 	TEST_CANONICAL(vmcb->save.ss.base, "SS");
2521 	TEST_CANONICAL(vmcb->save.ds.base, "DS");
2522 	TEST_CANONICAL(vmcb->save.fs.base, "FS");
2523 	TEST_CANONICAL(vmcb->save.gs.base, "GS");
2524 	TEST_CANONICAL(vmcb->save.gdtr.base, "GDTR");
2525 	TEST_CANONICAL(vmcb->save.ldtr.base, "LDTR");
2526 	TEST_CANONICAL(vmcb->save.idtr.base, "IDTR");
2527 	TEST_CANONICAL(vmcb->save.tr.base, "TR");
2528 }
2529 
2530 static void svm_guest_state_test(void)
2531 {
2532 	test_set_guest(basic_guest_main);
2533 	test_efer();
2534 	test_cr0();
2535 	test_cr3();
2536 	test_cr4();
2537 	test_dr();
2538 	test_msrpm_iopm_bitmap_addrs();
2539 	test_vmrun_canonicalization();
2540 }
2541 
2542 
2543 static bool volatile svm_errata_reproduced = false;
2544 static unsigned long volatile physical = 0;
2545 
2546 
2547 /*
2548  *
2549  * Test the following errata:
2550  * If the VMRUN/VMSAVE/VMLOAD are attempted by the nested guest,
2551  * the CPU would first check the EAX against host reserved memory
2552  * regions (so far only SMM_ADDR/SMM_MASK are known to cause it),
2553  * and only then signal #VMexit
2554  *
2555  * Try to reproduce this by trying vmsave on each possible 4K aligned memory
2556  * address in the low 4G where the SMM area has to reside.
2557  */
2558 
2559 static void gp_isr(struct ex_regs *r)
2560 {
2561     svm_errata_reproduced = true;
2562     /* skip over the vmsave instruction*/
2563     r->rip += 3;
2564 }
2565 
2566 static void svm_vmrun_errata_test(void)
2567 {
2568     unsigned long *last_page = NULL;
2569 
2570     handle_exception(GP_VECTOR, gp_isr);
2571 
2572     while (!svm_errata_reproduced) {
2573 
2574         unsigned long *page = alloc_pages(1);
2575 
2576         if (!page) {
2577             report(true, "All guest memory tested, no bug found");;
2578             break;
2579         }
2580 
2581         physical = virt_to_phys(page);
2582 
2583         asm volatile (
2584             "mov %[_physical], %%rax\n\t"
2585             "vmsave %%rax\n\t"
2586 
2587             : [_physical] "=m" (physical)
2588             : /* no inputs*/
2589             : "rax" /*clobbers*/
2590         );
2591 
2592         if (svm_errata_reproduced) {
2593             report(false, "Got #GP exception - svm errata reproduced at 0x%lx",
2594                    physical);
2595             break;
2596         }
2597 
2598         *page = (unsigned long)last_page;
2599         last_page = page;
2600     }
2601 
2602     while (last_page) {
2603         unsigned long *page = last_page;
2604         last_page = (unsigned long *)*last_page;
2605         free_pages_by_order(page, 1);
2606     }
2607 }
2608 
2609 static void vmload_vmsave_guest_main(struct svm_test *test)
2610 {
2611 	u64 vmcb_phys = virt_to_phys(vmcb);
2612 
2613 	asm volatile ("vmload %0" : : "a"(vmcb_phys));
2614 	asm volatile ("vmsave %0" : : "a"(vmcb_phys));
2615 }
2616 
2617 static void svm_vmload_vmsave(void)
2618 {
2619 	u32 intercept_saved = vmcb->control.intercept;
2620 
2621 	test_set_guest(vmload_vmsave_guest_main);
2622 
2623 	/*
2624 	 * Disabling intercept for VMLOAD and VMSAVE doesn't cause
2625 	 * respective #VMEXIT to host
2626 	 */
2627 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD);
2628 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE);
2629 	svm_vmrun();
2630 	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
2631 	    "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
2632 
2633 	/*
2634 	 * Enabling intercept for VMLOAD and VMSAVE causes respective
2635 	 * #VMEXIT to host
2636 	 */
2637 	vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD);
2638 	svm_vmrun();
2639 	report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test "
2640 	    "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT");
2641 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD);
2642 	vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE);
2643 	svm_vmrun();
2644 	report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test "
2645 	    "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT");
2646 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE);
2647 	svm_vmrun();
2648 	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
2649 	    "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
2650 
2651 	vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD);
2652 	svm_vmrun();
2653 	report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test "
2654 	    "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT");
2655 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD);
2656 	svm_vmrun();
2657 	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
2658 	    "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
2659 
2660 	vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE);
2661 	svm_vmrun();
2662 	report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test "
2663 	    "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT");
2664 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE);
2665 	svm_vmrun();
2666 	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
2667 	    "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
2668 
2669 	vmcb->control.intercept = intercept_saved;
2670 }
2671 
2672 struct svm_test svm_tests[] = {
2673     { "null", default_supported, default_prepare,
2674       default_prepare_gif_clear, null_test,
2675       default_finished, null_check },
2676     { "vmrun", default_supported, default_prepare,
2677       default_prepare_gif_clear, test_vmrun,
2678        default_finished, check_vmrun },
2679     { "ioio", default_supported, prepare_ioio,
2680        default_prepare_gif_clear, test_ioio,
2681        ioio_finished, check_ioio },
2682     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
2683       default_prepare_gif_clear, null_test, default_finished,
2684       check_no_vmrun_int },
2685     { "rsm", default_supported,
2686       prepare_rsm_intercept, default_prepare_gif_clear,
2687       test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept },
2688     { "cr3 read intercept", default_supported,
2689       prepare_cr3_intercept, default_prepare_gif_clear,
2690       test_cr3_intercept, default_finished, check_cr3_intercept },
2691     { "cr3 read nointercept", default_supported, default_prepare,
2692       default_prepare_gif_clear, test_cr3_intercept, default_finished,
2693       check_cr3_nointercept },
2694     { "cr3 read intercept emulate", smp_supported,
2695       prepare_cr3_intercept_bypass, default_prepare_gif_clear,
2696       test_cr3_intercept_bypass, default_finished, check_cr3_intercept },
2697     { "dr intercept check", default_supported, prepare_dr_intercept,
2698       default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished,
2699       check_dr_intercept },
2700     { "next_rip", next_rip_supported, prepare_next_rip,
2701       default_prepare_gif_clear, test_next_rip,
2702       default_finished, check_next_rip },
2703     { "msr intercept check", default_supported, prepare_msr_intercept,
2704       default_prepare_gif_clear, test_msr_intercept,
2705       msr_intercept_finished, check_msr_intercept },
2706     { "mode_switch", default_supported, prepare_mode_switch,
2707       default_prepare_gif_clear, test_mode_switch,
2708        mode_switch_finished, check_mode_switch },
2709     { "asid_zero", default_supported, prepare_asid_zero,
2710       default_prepare_gif_clear, test_asid_zero,
2711        default_finished, check_asid_zero },
2712     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare,
2713       default_prepare_gif_clear, sel_cr0_bug_test,
2714        sel_cr0_bug_finished, sel_cr0_bug_check },
2715     { "npt_nx", npt_supported, npt_nx_prepare,
2716       default_prepare_gif_clear, null_test,
2717       default_finished, npt_nx_check },
2718     { "npt_np", npt_supported, npt_np_prepare,
2719       default_prepare_gif_clear, npt_np_test,
2720       default_finished, npt_np_check },
2721     { "npt_us", npt_supported, npt_us_prepare,
2722       default_prepare_gif_clear, npt_us_test,
2723       default_finished, npt_us_check },
2724     { "npt_rsvd", npt_supported, npt_rsvd_prepare,
2725       default_prepare_gif_clear, null_test,
2726       default_finished, npt_rsvd_check },
2727     { "npt_rw", npt_supported, npt_rw_prepare,
2728       default_prepare_gif_clear, npt_rw_test,
2729       default_finished, npt_rw_check },
2730     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare,
2731       default_prepare_gif_clear, null_test,
2732       default_finished, npt_rsvd_pfwalk_check },
2733     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare,
2734       default_prepare_gif_clear, null_test,
2735       default_finished, npt_rw_pfwalk_check },
2736     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare,
2737       default_prepare_gif_clear, npt_l1mmio_test,
2738       default_finished, npt_l1mmio_check },
2739     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare,
2740       default_prepare_gif_clear, npt_rw_l1mmio_test,
2741       default_finished, npt_rw_l1mmio_check },
2742     { "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare,
2743       default_prepare_gif_clear, tsc_adjust_test,
2744       default_finished, tsc_adjust_check },
2745     { "latency_run_exit", default_supported, latency_prepare,
2746       default_prepare_gif_clear, latency_test,
2747       latency_finished, latency_check },
2748     { "latency_run_exit_clean", default_supported, latency_prepare,
2749       default_prepare_gif_clear, latency_test,
2750       latency_finished_clean, latency_check },
2751     { "latency_svm_insn", default_supported, lat_svm_insn_prepare,
2752       default_prepare_gif_clear, null_test,
2753       lat_svm_insn_finished, lat_svm_insn_check },
2754     { "exc_inject", default_supported, exc_inject_prepare,
2755       default_prepare_gif_clear, exc_inject_test,
2756       exc_inject_finished, exc_inject_check },
2757     { "pending_event", default_supported, pending_event_prepare,
2758       default_prepare_gif_clear,
2759       pending_event_test, pending_event_finished, pending_event_check },
2760     { "pending_event_cli", default_supported, pending_event_cli_prepare,
2761       pending_event_cli_prepare_gif_clear,
2762       pending_event_cli_test, pending_event_cli_finished,
2763       pending_event_cli_check },
2764     { "interrupt", default_supported, interrupt_prepare,
2765       default_prepare_gif_clear, interrupt_test,
2766       interrupt_finished, interrupt_check },
2767     { "nmi", default_supported, nmi_prepare,
2768       default_prepare_gif_clear, nmi_test,
2769       nmi_finished, nmi_check },
2770     { "nmi_hlt", smp_supported, nmi_prepare,
2771       default_prepare_gif_clear, nmi_hlt_test,
2772       nmi_hlt_finished, nmi_hlt_check },
2773     { "virq_inject", default_supported, virq_inject_prepare,
2774       default_prepare_gif_clear, virq_inject_test,
2775       virq_inject_finished, virq_inject_check },
2776     { "reg_corruption", default_supported, reg_corruption_prepare,
2777       default_prepare_gif_clear, reg_corruption_test,
2778       reg_corruption_finished, reg_corruption_check },
2779     { "svm_init_startup_test", smp_supported, init_startup_prepare,
2780       default_prepare_gif_clear, null_test,
2781       init_startup_finished, init_startup_check },
2782     { "svm_init_intercept_test", smp_supported, init_intercept_prepare,
2783       default_prepare_gif_clear, init_intercept_test,
2784       init_intercept_finished, init_intercept_check, .on_vcpu = 2 },
2785     { "host_rflags", default_supported, host_rflags_prepare,
2786       host_rflags_prepare_gif_clear, host_rflags_test,
2787       host_rflags_finished, host_rflags_check },
2788     TEST(svm_cr4_osxsave_test),
2789     TEST(svm_guest_state_test),
2790     TEST(svm_vmrun_errata_test),
2791     TEST(svm_vmload_vmsave),
2792     { NULL, NULL, NULL, NULL, NULL, NULL, NULL }
2793 };
2794