xref: /kvm-unit-tests/x86/svm_tests.c (revision 74e79380f900368bf7f8c9aaac5ac1aba962d63e)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "alloc_page.h"
10 #include "isr.h"
11 #include "apic.h"
12 #include "delay.h"
13 
14 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
15 
16 static void *scratch_page;
17 
18 #define LATENCY_RUNS 1000000
19 
20 u64 tsc_start;
21 u64 tsc_end;
22 
23 u64 vmrun_sum, vmexit_sum;
24 u64 vmsave_sum, vmload_sum;
25 u64 stgi_sum, clgi_sum;
26 u64 latvmrun_max;
27 u64 latvmrun_min;
28 u64 latvmexit_max;
29 u64 latvmexit_min;
30 u64 latvmload_max;
31 u64 latvmload_min;
32 u64 latvmsave_max;
33 u64 latvmsave_min;
34 u64 latstgi_max;
35 u64 latstgi_min;
36 u64 latclgi_max;
37 u64 latclgi_min;
38 u64 runs;
39 
40 static void null_test(struct svm_test *test)
41 {
42 }
43 
44 static bool null_check(struct svm_test *test)
45 {
46     return vmcb->control.exit_code == SVM_EXIT_VMMCALL;
47 }
48 
49 static void prepare_no_vmrun_int(struct svm_test *test)
50 {
51     vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
52 }
53 
54 static bool check_no_vmrun_int(struct svm_test *test)
55 {
56     return vmcb->control.exit_code == SVM_EXIT_ERR;
57 }
58 
59 static void test_vmrun(struct svm_test *test)
60 {
61     asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb)));
62 }
63 
64 static bool check_vmrun(struct svm_test *test)
65 {
66     return vmcb->control.exit_code == SVM_EXIT_VMRUN;
67 }
68 
69 static void prepare_cr3_intercept(struct svm_test *test)
70 {
71     default_prepare(test);
72     vmcb->control.intercept_cr_read |= 1 << 3;
73 }
74 
75 static void test_cr3_intercept(struct svm_test *test)
76 {
77     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
78 }
79 
80 static bool check_cr3_intercept(struct svm_test *test)
81 {
82     return vmcb->control.exit_code == SVM_EXIT_READ_CR3;
83 }
84 
85 static bool check_cr3_nointercept(struct svm_test *test)
86 {
87     return null_check(test) && test->scratch == read_cr3();
88 }
89 
90 static void corrupt_cr3_intercept_bypass(void *_test)
91 {
92     struct svm_test *test = _test;
93     extern volatile u32 mmio_insn;
94 
95     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
96         pause();
97     pause();
98     pause();
99     pause();
100     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
101 }
102 
103 static void prepare_cr3_intercept_bypass(struct svm_test *test)
104 {
105     default_prepare(test);
106     vmcb->control.intercept_cr_read |= 1 << 3;
107     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
108 }
109 
110 static void test_cr3_intercept_bypass(struct svm_test *test)
111 {
112     ulong a = 0xa0000;
113 
114     test->scratch = 1;
115     while (test->scratch != 2)
116         barrier();
117 
118     asm volatile ("mmio_insn: mov %0, (%0); nop"
119                   : "+a"(a) : : "memory");
120     test->scratch = a;
121 }
122 
123 static void prepare_dr_intercept(struct svm_test *test)
124 {
125     default_prepare(test);
126     vmcb->control.intercept_dr_read = 0xff;
127     vmcb->control.intercept_dr_write = 0xff;
128 }
129 
130 static void test_dr_intercept(struct svm_test *test)
131 {
132     unsigned int i, failcnt = 0;
133 
134     /* Loop testing debug register reads */
135     for (i = 0; i < 8; i++) {
136 
137         switch (i) {
138         case 0:
139             asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
140             break;
141         case 1:
142             asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
143             break;
144         case 2:
145             asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
146             break;
147         case 3:
148             asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
149             break;
150         case 4:
151             asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
152             break;
153         case 5:
154             asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
155             break;
156         case 6:
157             asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
158             break;
159         case 7:
160             asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
161             break;
162         }
163 
164         if (test->scratch != i) {
165             report(false, "dr%u read intercept", i);
166             failcnt++;
167         }
168     }
169 
170     /* Loop testing debug register writes */
171     for (i = 0; i < 8; i++) {
172 
173         switch (i) {
174         case 0:
175             asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
176             break;
177         case 1:
178             asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
179             break;
180         case 2:
181             asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
182             break;
183         case 3:
184             asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
185             break;
186         case 4:
187             asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
188             break;
189         case 5:
190             asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
191             break;
192         case 6:
193             asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
194             break;
195         case 7:
196             asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
197             break;
198         }
199 
200         if (test->scratch != i) {
201             report(false, "dr%u write intercept", i);
202             failcnt++;
203         }
204     }
205 
206     test->scratch = failcnt;
207 }
208 
209 static bool dr_intercept_finished(struct svm_test *test)
210 {
211     ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0);
212 
213     /* Only expect DR intercepts */
214     if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
215         return true;
216 
217     /*
218      * Compute debug register number.
219      * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
220      * Programmer's Manual Volume 2 - System Programming:
221      * http://support.amd.com/TechDocs/24593.pdf
222      * there are 16 VMEXIT codes each for DR read and write.
223      */
224     test->scratch = (n % 16);
225 
226     /* Jump over MOV instruction */
227     vmcb->save.rip += 3;
228 
229     return false;
230 }
231 
232 static bool check_dr_intercept(struct svm_test *test)
233 {
234     return !test->scratch;
235 }
236 
237 static bool next_rip_supported(void)
238 {
239     return this_cpu_has(X86_FEATURE_NRIPS);
240 }
241 
242 static void prepare_next_rip(struct svm_test *test)
243 {
244     vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
245 }
246 
247 
248 static void test_next_rip(struct svm_test *test)
249 {
250     asm volatile ("rdtsc\n\t"
251                   ".globl exp_next_rip\n\t"
252                   "exp_next_rip:\n\t" ::: "eax", "edx");
253 }
254 
255 static bool check_next_rip(struct svm_test *test)
256 {
257     extern char exp_next_rip;
258     unsigned long address = (unsigned long)&exp_next_rip;
259 
260     return address == vmcb->control.next_rip;
261 }
262 
263 extern u8 *msr_bitmap;
264 
265 static void prepare_msr_intercept(struct svm_test *test)
266 {
267     default_prepare(test);
268     vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
269     vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR);
270     memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
271 }
272 
273 static void test_msr_intercept(struct svm_test *test)
274 {
275     unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */
276     unsigned long msr_index;
277 
278     for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) {
279         if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) {
280             /*
281              * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
282              * Programmer's Manual volume 2 - System Programming:
283              * http://support.amd.com/TechDocs/24593.pdf
284              * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
285              */
286             continue;
287         }
288 
289         /* Skips gaps between supported MSR ranges */
290         if (msr_index == 0x2000)
291             msr_index = 0xc0000000;
292         else if (msr_index == 0xc0002000)
293             msr_index = 0xc0010000;
294 
295         test->scratch = -1;
296 
297         rdmsr(msr_index);
298 
299         /* Check that a read intercept occurred for MSR at msr_index */
300         if (test->scratch != msr_index)
301             report(false, "MSR 0x%lx read intercept", msr_index);
302 
303         /*
304          * Poor man approach to generate a value that
305          * seems arbitrary each time around the loop.
306          */
307         msr_value += (msr_value << 1);
308 
309         wrmsr(msr_index, msr_value);
310 
311         /* Check that a write intercept occurred for MSR with msr_value */
312         if (test->scratch != msr_value)
313             report(false, "MSR 0x%lx write intercept", msr_index);
314     }
315 
316     test->scratch = -2;
317 }
318 
319 static bool msr_intercept_finished(struct svm_test *test)
320 {
321     u32 exit_code = vmcb->control.exit_code;
322     u64 exit_info_1;
323     u8 *opcode;
324 
325     if (exit_code == SVM_EXIT_MSR) {
326         exit_info_1 = vmcb->control.exit_info_1;
327     } else {
328         /*
329          * If #GP exception occurs instead, check that it was
330          * for RDMSR/WRMSR and set exit_info_1 accordingly.
331          */
332 
333         if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR))
334             return true;
335 
336         opcode = (u8 *)vmcb->save.rip;
337         if (opcode[0] != 0x0f)
338             return true;
339 
340         switch (opcode[1]) {
341         case 0x30: /* WRMSR */
342             exit_info_1 = 1;
343             break;
344         case 0x32: /* RDMSR */
345             exit_info_1 = 0;
346             break;
347         default:
348             return true;
349         }
350 
351         /*
352          * Warn that #GP exception occured instead.
353          * RCX holds the MSR index.
354          */
355         printf("%s 0x%lx #GP exception\n",
356             exit_info_1 ? "WRMSR" : "RDMSR", get_regs().rcx);
357     }
358 
359     /* Jump over RDMSR/WRMSR instruction */
360     vmcb->save.rip += 2;
361 
362     /*
363      * Test whether the intercept was for RDMSR/WRMSR.
364      * For RDMSR, test->scratch is set to the MSR index;
365      *      RCX holds the MSR index.
366      * For WRMSR, test->scratch is set to the MSR value;
367      *      RDX holds the upper 32 bits of the MSR value,
368      *      while RAX hold its lower 32 bits.
369      */
370     if (exit_info_1)
371         test->scratch =
372             ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff));
373     else
374         test->scratch = get_regs().rcx;
375 
376     return false;
377 }
378 
379 static bool check_msr_intercept(struct svm_test *test)
380 {
381     memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
382     return (test->scratch == -2);
383 }
384 
385 static void prepare_mode_switch(struct svm_test *test)
386 {
387     vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
388                                              |  (1ULL << UD_VECTOR)
389                                              |  (1ULL << DF_VECTOR)
390                                              |  (1ULL << PF_VECTOR);
391     test->scratch = 0;
392 }
393 
394 static void test_mode_switch(struct svm_test *test)
395 {
396     asm volatile("	cli\n"
397 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
398 		 "1:\n"
399 		 "	.long 2f\n"
400 		 "	.long " xstr(KERNEL_CS32) "\n"
401 		 ".code32\n"
402 		 "2:\n"
403 		 "	movl %%cr0, %%eax\n"
404 		 "	btcl  $31, %%eax\n" /* clear PG */
405 		 "	movl %%eax, %%cr0\n"
406 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
407 		 "	rdmsr\n"
408 		 "	btcl $8, %%eax\n" /* clear LME */
409 		 "	wrmsr\n"
410 		 "	movl %%cr4, %%eax\n"
411 		 "	btcl $5, %%eax\n" /* clear PAE */
412 		 "	movl %%eax, %%cr4\n"
413 		 "	movw %[ds16], %%ax\n"
414 		 "	movw %%ax, %%ds\n"
415 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
416 		 ".code16\n"
417 		 "3:\n"
418 		 "	movl %%cr0, %%eax\n"
419 		 "	btcl $0, %%eax\n" /* clear PE  */
420 		 "	movl %%eax, %%cr0\n"
421 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
422 		 "4:\n"
423 		 "	vmmcall\n"
424 		 "	movl %%cr0, %%eax\n"
425 		 "	btsl $0, %%eax\n" /* set PE  */
426 		 "	movl %%eax, %%cr0\n"
427 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
428 		 ".code32\n"
429 		 "5:\n"
430 		 "	movl %%cr4, %%eax\n"
431 		 "	btsl $5, %%eax\n" /* set PAE */
432 		 "	movl %%eax, %%cr4\n"
433 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
434 		 "	rdmsr\n"
435 		 "	btsl $8, %%eax\n" /* set LME */
436 		 "	wrmsr\n"
437 		 "	movl %%cr0, %%eax\n"
438 		 "	btsl  $31, %%eax\n" /* set PG */
439 		 "	movl %%eax, %%cr0\n"
440 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
441 		 ".code64\n\t"
442 		 "6:\n"
443 		 "	vmmcall\n"
444 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
445 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
446 		 : "rax", "rbx", "rcx", "rdx", "memory");
447 }
448 
449 static bool mode_switch_finished(struct svm_test *test)
450 {
451     u64 cr0, cr4, efer;
452 
453     cr0  = vmcb->save.cr0;
454     cr4  = vmcb->save.cr4;
455     efer = vmcb->save.efer;
456 
457     /* Only expect VMMCALL intercepts */
458     if (vmcb->control.exit_code != SVM_EXIT_VMMCALL)
459 	    return true;
460 
461     /* Jump over VMMCALL instruction */
462     vmcb->save.rip += 3;
463 
464     /* Do sanity checks */
465     switch (test->scratch) {
466     case 0:
467         /* Test should be in real mode now - check for this */
468         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
469             (cr4  & 0x00000020) || /* CR4.PAE */
470             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
471                 return true;
472         break;
473     case 2:
474         /* Test should be back in long-mode now - check for this */
475         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
476             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
477             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
478 		    return true;
479 	break;
480     }
481 
482     /* one step forward */
483     test->scratch += 1;
484 
485     return test->scratch == 2;
486 }
487 
488 static bool check_mode_switch(struct svm_test *test)
489 {
490 	return test->scratch == 2;
491 }
492 
493 extern u8 *io_bitmap;
494 
495 static void prepare_ioio(struct svm_test *test)
496 {
497     vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
498     test->scratch = 0;
499     memset(io_bitmap, 0, 8192);
500     io_bitmap[8192] = 0xFF;
501 }
502 
503 static void test_ioio(struct svm_test *test)
504 {
505     // stage 0, test IO pass
506     inb(0x5000);
507     outb(0x0, 0x5000);
508     if (get_test_stage(test) != 0)
509         goto fail;
510 
511     // test IO width, in/out
512     io_bitmap[0] = 0xFF;
513     inc_test_stage(test);
514     inb(0x0);
515     if (get_test_stage(test) != 2)
516         goto fail;
517 
518     outw(0x0, 0x0);
519     if (get_test_stage(test) != 3)
520         goto fail;
521 
522     inl(0x0);
523     if (get_test_stage(test) != 4)
524         goto fail;
525 
526     // test low/high IO port
527     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
528     inb(0x5000);
529     if (get_test_stage(test) != 5)
530         goto fail;
531 
532     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
533     inw(0x9000);
534     if (get_test_stage(test) != 6)
535         goto fail;
536 
537     // test partial pass
538     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
539     inl(0x4FFF);
540     if (get_test_stage(test) != 7)
541         goto fail;
542 
543     // test across pages
544     inc_test_stage(test);
545     inl(0x7FFF);
546     if (get_test_stage(test) != 8)
547         goto fail;
548 
549     inc_test_stage(test);
550     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
551     inl(0x7FFF);
552     if (get_test_stage(test) != 10)
553         goto fail;
554 
555     io_bitmap[0] = 0;
556     inl(0xFFFF);
557     if (get_test_stage(test) != 11)
558         goto fail;
559 
560     io_bitmap[0] = 0xFF;
561     io_bitmap[8192] = 0;
562     inl(0xFFFF);
563     inc_test_stage(test);
564     if (get_test_stage(test) != 12)
565         goto fail;
566 
567     return;
568 
569 fail:
570     report(false, "stage %d", get_test_stage(test));
571     test->scratch = -1;
572 }
573 
574 static bool ioio_finished(struct svm_test *test)
575 {
576     unsigned port, size;
577 
578     /* Only expect IOIO intercepts */
579     if (vmcb->control.exit_code == SVM_EXIT_VMMCALL)
580         return true;
581 
582     if (vmcb->control.exit_code != SVM_EXIT_IOIO)
583         return true;
584 
585     /* one step forward */
586     test->scratch += 1;
587 
588     port = vmcb->control.exit_info_1 >> 16;
589     size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
590 
591     while (size--) {
592         io_bitmap[port / 8] &= ~(1 << (port & 7));
593         port++;
594     }
595 
596     return false;
597 }
598 
599 static bool check_ioio(struct svm_test *test)
600 {
601     memset(io_bitmap, 0, 8193);
602     return test->scratch != -1;
603 }
604 
605 static void prepare_asid_zero(struct svm_test *test)
606 {
607     vmcb->control.asid = 0;
608 }
609 
610 static void test_asid_zero(struct svm_test *test)
611 {
612     asm volatile ("vmmcall\n\t");
613 }
614 
615 static bool check_asid_zero(struct svm_test *test)
616 {
617     return vmcb->control.exit_code == SVM_EXIT_ERR;
618 }
619 
620 static void sel_cr0_bug_prepare(struct svm_test *test)
621 {
622     vmcb_ident(vmcb);
623     vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
624 }
625 
626 static bool sel_cr0_bug_finished(struct svm_test *test)
627 {
628 	return true;
629 }
630 
631 static void sel_cr0_bug_test(struct svm_test *test)
632 {
633     unsigned long cr0;
634 
635     /* read cr0, clear CD, and write back */
636     cr0  = read_cr0();
637     cr0 |= (1UL << 30);
638     write_cr0(cr0);
639 
640     /*
641      * If we are here the test failed, not sure what to do now because we
642      * are not in guest-mode anymore so we can't trigger an intercept.
643      * Trigger a tripple-fault for now.
644      */
645     report(false, "sel_cr0 test. Can not recover from this - exiting");
646     exit(report_summary());
647 }
648 
649 static bool sel_cr0_bug_check(struct svm_test *test)
650 {
651     return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
652 }
653 
654 static void npt_nx_prepare(struct svm_test *test)
655 {
656 
657     u64 *pte;
658 
659     vmcb_ident(vmcb);
660     pte = npt_get_pte((u64)null_test);
661 
662     *pte |= (1ULL << 63);
663 }
664 
665 static bool npt_nx_check(struct svm_test *test)
666 {
667     u64 *pte = npt_get_pte((u64)null_test);
668 
669     *pte &= ~(1ULL << 63);
670 
671     vmcb->save.efer |= (1 << 11);
672 
673     return (vmcb->control.exit_code == SVM_EXIT_NPF)
674            && (vmcb->control.exit_info_1 == 0x100000015ULL);
675 }
676 
677 static void npt_us_prepare(struct svm_test *test)
678 {
679     u64 *pte;
680 
681     scratch_page = alloc_page();
682     vmcb_ident(vmcb);
683     pte = npt_get_pte((u64)scratch_page);
684 
685     *pte &= ~(1ULL << 2);
686 }
687 
688 static void npt_us_test(struct svm_test *test)
689 {
690     (void) *(volatile u64 *)scratch_page;
691 }
692 
693 static bool npt_us_check(struct svm_test *test)
694 {
695     u64 *pte = npt_get_pte((u64)scratch_page);
696 
697     *pte |= (1ULL << 2);
698 
699     return (vmcb->control.exit_code == SVM_EXIT_NPF)
700            && (vmcb->control.exit_info_1 == 0x100000005ULL);
701 }
702 
703 u64 save_pde;
704 
705 static void npt_rsvd_prepare(struct svm_test *test)
706 {
707     u64 *pde;
708 
709     vmcb_ident(vmcb);
710     pde = npt_get_pde((u64) null_test);
711 
712     save_pde = *pde;
713     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
714 }
715 
716 static bool npt_rsvd_check(struct svm_test *test)
717 {
718     u64 *pde = npt_get_pde((u64) null_test);
719 
720     *pde = save_pde;
721 
722     return (vmcb->control.exit_code == SVM_EXIT_NPF)
723             && (vmcb->control.exit_info_1 == 0x10000001dULL);
724 }
725 
726 static void npt_rw_prepare(struct svm_test *test)
727 {
728 
729     u64 *pte;
730 
731     vmcb_ident(vmcb);
732     pte = npt_get_pte(0x80000);
733 
734     *pte &= ~(1ULL << 1);
735 }
736 
737 static void npt_rw_test(struct svm_test *test)
738 {
739     u64 *data = (void*)(0x80000);
740 
741     *data = 0;
742 }
743 
744 static bool npt_rw_check(struct svm_test *test)
745 {
746     u64 *pte = npt_get_pte(0x80000);
747 
748     *pte |= (1ULL << 1);
749 
750     return (vmcb->control.exit_code == SVM_EXIT_NPF)
751            && (vmcb->control.exit_info_1 == 0x100000007ULL);
752 }
753 
754 static void npt_rw_pfwalk_prepare(struct svm_test *test)
755 {
756 
757     u64 *pte;
758 
759     vmcb_ident(vmcb);
760     pte = npt_get_pte(read_cr3());
761 
762     *pte &= ~(1ULL << 1);
763 }
764 
765 static bool npt_rw_pfwalk_check(struct svm_test *test)
766 {
767     u64 *pte = npt_get_pte(read_cr3());
768 
769     *pte |= (1ULL << 1);
770 
771     return (vmcb->control.exit_code == SVM_EXIT_NPF)
772            && (vmcb->control.exit_info_1 == 0x200000006ULL)
773 	   && (vmcb->control.exit_info_2 == read_cr3());
774 }
775 
776 static void npt_rsvd_pfwalk_prepare(struct svm_test *test)
777 {
778     u64 *pdpe;
779     vmcb_ident(vmcb);
780 
781     pdpe = npt_get_pdpe();
782     pdpe[0] |= (1ULL << 8);
783 }
784 
785 static bool npt_rsvd_pfwalk_check(struct svm_test *test)
786 {
787     u64 *pdpe = npt_get_pdpe();
788     pdpe[0] &= ~(1ULL << 8);
789 
790     return (vmcb->control.exit_code == SVM_EXIT_NPF)
791             && (vmcb->control.exit_info_1 == 0x20000000eULL);
792 }
793 
794 static void npt_l1mmio_prepare(struct svm_test *test)
795 {
796     vmcb_ident(vmcb);
797 }
798 
799 u32 nested_apic_version1;
800 u32 nested_apic_version2;
801 
802 static void npt_l1mmio_test(struct svm_test *test)
803 {
804     volatile u32 *data = (volatile void*)(0xfee00030UL);
805 
806     nested_apic_version1 = *data;
807     nested_apic_version2 = *data;
808 }
809 
810 static bool npt_l1mmio_check(struct svm_test *test)
811 {
812     volatile u32 *data = (volatile void*)(0xfee00030);
813     u32 lvr = *data;
814 
815     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
816 }
817 
818 static void npt_rw_l1mmio_prepare(struct svm_test *test)
819 {
820 
821     u64 *pte;
822 
823     vmcb_ident(vmcb);
824     pte = npt_get_pte(0xfee00080);
825 
826     *pte &= ~(1ULL << 1);
827 }
828 
829 static void npt_rw_l1mmio_test(struct svm_test *test)
830 {
831     volatile u32 *data = (volatile void*)(0xfee00080);
832 
833     *data = *data;
834 }
835 
836 static bool npt_rw_l1mmio_check(struct svm_test *test)
837 {
838     u64 *pte = npt_get_pte(0xfee00080);
839 
840     *pte |= (1ULL << 1);
841 
842     return (vmcb->control.exit_code == SVM_EXIT_NPF)
843            && (vmcb->control.exit_info_1 == 0x100000007ULL);
844 }
845 
846 #define TSC_ADJUST_VALUE    (1ll << 32)
847 #define TSC_OFFSET_VALUE    (-1ll << 48)
848 static bool ok;
849 
850 static void tsc_adjust_prepare(struct svm_test *test)
851 {
852     default_prepare(test);
853     vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
854 
855     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
856     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
857     ok = adjust == -TSC_ADJUST_VALUE;
858 }
859 
860 static void tsc_adjust_test(struct svm_test *test)
861 {
862     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
863     ok &= adjust == -TSC_ADJUST_VALUE;
864 
865     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
866     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
867 
868     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
869     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
870 
871     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
872     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
873 
874     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
875     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
876 }
877 
878 static bool tsc_adjust_check(struct svm_test *test)
879 {
880     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
881 
882     wrmsr(MSR_IA32_TSC_ADJUST, 0);
883     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
884 }
885 
886 static void latency_prepare(struct svm_test *test)
887 {
888     default_prepare(test);
889     runs = LATENCY_RUNS;
890     latvmrun_min = latvmexit_min = -1ULL;
891     latvmrun_max = latvmexit_max = 0;
892     vmrun_sum = vmexit_sum = 0;
893     tsc_start = rdtsc();
894 }
895 
896 static void latency_test(struct svm_test *test)
897 {
898     u64 cycles;
899 
900 start:
901     tsc_end = rdtsc();
902 
903     cycles = tsc_end - tsc_start;
904 
905     if (cycles > latvmrun_max)
906         latvmrun_max = cycles;
907 
908     if (cycles < latvmrun_min)
909         latvmrun_min = cycles;
910 
911     vmrun_sum += cycles;
912 
913     tsc_start = rdtsc();
914 
915     asm volatile ("vmmcall" : : : "memory");
916     goto start;
917 }
918 
919 static bool latency_finished(struct svm_test *test)
920 {
921     u64 cycles;
922 
923     tsc_end = rdtsc();
924 
925     cycles = tsc_end - tsc_start;
926 
927     if (cycles > latvmexit_max)
928         latvmexit_max = cycles;
929 
930     if (cycles < latvmexit_min)
931         latvmexit_min = cycles;
932 
933     vmexit_sum += cycles;
934 
935     vmcb->save.rip += 3;
936 
937     runs -= 1;
938 
939     tsc_end = rdtsc();
940 
941     return runs == 0;
942 }
943 
944 static bool latency_check(struct svm_test *test)
945 {
946     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
947             latvmrun_min, vmrun_sum / LATENCY_RUNS);
948     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
949             latvmexit_min, vmexit_sum / LATENCY_RUNS);
950     return true;
951 }
952 
953 static void lat_svm_insn_prepare(struct svm_test *test)
954 {
955     default_prepare(test);
956     runs = LATENCY_RUNS;
957     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
958     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
959     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
960 }
961 
962 static bool lat_svm_insn_finished(struct svm_test *test)
963 {
964     u64 vmcb_phys = virt_to_phys(vmcb);
965     u64 cycles;
966 
967     for ( ; runs != 0; runs--) {
968         tsc_start = rdtsc();
969         asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
970         cycles = rdtsc() - tsc_start;
971         if (cycles > latvmload_max)
972             latvmload_max = cycles;
973         if (cycles < latvmload_min)
974             latvmload_min = cycles;
975         vmload_sum += cycles;
976 
977         tsc_start = rdtsc();
978         asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
979         cycles = rdtsc() - tsc_start;
980         if (cycles > latvmsave_max)
981             latvmsave_max = cycles;
982         if (cycles < latvmsave_min)
983             latvmsave_min = cycles;
984         vmsave_sum += cycles;
985 
986         tsc_start = rdtsc();
987         asm volatile("stgi\n\t");
988         cycles = rdtsc() - tsc_start;
989         if (cycles > latstgi_max)
990             latstgi_max = cycles;
991         if (cycles < latstgi_min)
992             latstgi_min = cycles;
993         stgi_sum += cycles;
994 
995         tsc_start = rdtsc();
996         asm volatile("clgi\n\t");
997         cycles = rdtsc() - tsc_start;
998         if (cycles > latclgi_max)
999             latclgi_max = cycles;
1000         if (cycles < latclgi_min)
1001             latclgi_min = cycles;
1002         clgi_sum += cycles;
1003     }
1004 
1005     tsc_end = rdtsc();
1006 
1007     return true;
1008 }
1009 
1010 static bool lat_svm_insn_check(struct svm_test *test)
1011 {
1012     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1013             latvmload_min, vmload_sum / LATENCY_RUNS);
1014     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1015             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1016     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1017             latstgi_min, stgi_sum / LATENCY_RUNS);
1018     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1019             latclgi_min, clgi_sum / LATENCY_RUNS);
1020     return true;
1021 }
1022 
1023 bool pending_event_ipi_fired;
1024 bool pending_event_guest_run;
1025 
1026 static void pending_event_ipi_isr(isr_regs_t *regs)
1027 {
1028     pending_event_ipi_fired = true;
1029     eoi();
1030 }
1031 
1032 static void pending_event_prepare(struct svm_test *test)
1033 {
1034     int ipi_vector = 0xf1;
1035 
1036     default_prepare(test);
1037 
1038     pending_event_ipi_fired = false;
1039 
1040     handle_irq(ipi_vector, pending_event_ipi_isr);
1041 
1042     pending_event_guest_run = false;
1043 
1044     vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1045     vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1046 
1047     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1048                   APIC_DM_FIXED | ipi_vector, 0);
1049 
1050     set_test_stage(test, 0);
1051 }
1052 
1053 static void pending_event_test(struct svm_test *test)
1054 {
1055     pending_event_guest_run = true;
1056 }
1057 
1058 static bool pending_event_finished(struct svm_test *test)
1059 {
1060     switch (get_test_stage(test)) {
1061     case 0:
1062         if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1063             report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x",
1064                    vmcb->control.exit_code);
1065             return true;
1066         }
1067 
1068         vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1069         vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1070 
1071         if (pending_event_guest_run) {
1072             report(false, "Guest ran before host received IPI\n");
1073             return true;
1074         }
1075 
1076         irq_enable();
1077         asm volatile ("nop");
1078         irq_disable();
1079 
1080         if (!pending_event_ipi_fired) {
1081             report(false, "Pending interrupt not dispatched after IRQ enabled\n");
1082             return true;
1083         }
1084         break;
1085 
1086     case 1:
1087         if (!pending_event_guest_run) {
1088             report(false, "Guest did not resume when no interrupt\n");
1089             return true;
1090         }
1091         break;
1092     }
1093 
1094     inc_test_stage(test);
1095 
1096     return get_test_stage(test) == 2;
1097 }
1098 
1099 static bool pending_event_check(struct svm_test *test)
1100 {
1101     return get_test_stage(test) == 2;
1102 }
1103 
1104 static void pending_event_cli_prepare(struct svm_test *test)
1105 {
1106     default_prepare(test);
1107 
1108     pending_event_ipi_fired = false;
1109 
1110     handle_irq(0xf1, pending_event_ipi_isr);
1111 
1112     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1113               APIC_DM_FIXED | 0xf1, 0);
1114 
1115     set_test_stage(test, 0);
1116 }
1117 
1118 static void pending_event_cli_prepare_gif_clear(struct svm_test *test)
1119 {
1120     asm("cli");
1121 }
1122 
1123 static void pending_event_cli_test(struct svm_test *test)
1124 {
1125     if (pending_event_ipi_fired == true) {
1126         set_test_stage(test, -1);
1127         report(false, "Interrupt preceeded guest");
1128         vmmcall();
1129     }
1130 
1131     /* VINTR_MASKING is zero.  This should cause the IPI to fire.  */
1132     irq_enable();
1133     asm volatile ("nop");
1134     irq_disable();
1135 
1136     if (pending_event_ipi_fired != true) {
1137         set_test_stage(test, -1);
1138         report(false, "Interrupt not triggered by guest");
1139     }
1140 
1141     vmmcall();
1142 
1143     /*
1144      * Now VINTR_MASKING=1, but no interrupt is pending so
1145      * the VINTR interception should be clear in VMCB02.  Check
1146      * that L0 did not leave a stale VINTR in the VMCB.
1147      */
1148     irq_enable();
1149     asm volatile ("nop");
1150     irq_disable();
1151 }
1152 
1153 static bool pending_event_cli_finished(struct svm_test *test)
1154 {
1155     if ( vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1156         report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x",
1157                vmcb->control.exit_code);
1158         return true;
1159     }
1160 
1161     switch (get_test_stage(test)) {
1162     case 0:
1163         vmcb->save.rip += 3;
1164 
1165         pending_event_ipi_fired = false;
1166 
1167         vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1168 
1169 	/* Now entering again with VINTR_MASKING=1.  */
1170         apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1171               APIC_DM_FIXED | 0xf1, 0);
1172 
1173         break;
1174 
1175     case 1:
1176         if (pending_event_ipi_fired == true) {
1177             report(false, "Interrupt triggered by guest");
1178             return true;
1179         }
1180 
1181         irq_enable();
1182         asm volatile ("nop");
1183         irq_disable();
1184 
1185         if (pending_event_ipi_fired != true) {
1186             report(false, "Interrupt not triggered by host");
1187             return true;
1188         }
1189 
1190         break;
1191 
1192     default:
1193         return true;
1194     }
1195 
1196     inc_test_stage(test);
1197 
1198     return get_test_stage(test) == 2;
1199 }
1200 
1201 static bool pending_event_cli_check(struct svm_test *test)
1202 {
1203     return get_test_stage(test) == 2;
1204 }
1205 
1206 #define TIMER_VECTOR    222
1207 
1208 static volatile bool timer_fired;
1209 
1210 static void timer_isr(isr_regs_t *regs)
1211 {
1212     timer_fired = true;
1213     apic_write(APIC_EOI, 0);
1214 }
1215 
1216 static void interrupt_prepare(struct svm_test *test)
1217 {
1218     default_prepare(test);
1219     handle_irq(TIMER_VECTOR, timer_isr);
1220     timer_fired = false;
1221     set_test_stage(test, 0);
1222 }
1223 
1224 static void interrupt_test(struct svm_test *test)
1225 {
1226     long long start, loops;
1227 
1228     apic_write(APIC_LVTT, TIMER_VECTOR);
1229     irq_enable();
1230     apic_write(APIC_TMICT, 1); //Timer Initial Count Register 0x380 one-shot
1231     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1232         asm volatile ("nop");
1233 
1234     report(timer_fired, "direct interrupt while running guest");
1235 
1236     if (!timer_fired) {
1237         set_test_stage(test, -1);
1238         vmmcall();
1239     }
1240 
1241     apic_write(APIC_TMICT, 0);
1242     irq_disable();
1243     vmmcall();
1244 
1245     timer_fired = false;
1246     apic_write(APIC_TMICT, 1);
1247     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1248         asm volatile ("nop");
1249 
1250     report(timer_fired, "intercepted interrupt while running guest");
1251 
1252     if (!timer_fired) {
1253         set_test_stage(test, -1);
1254         vmmcall();
1255     }
1256 
1257     irq_enable();
1258     apic_write(APIC_TMICT, 0);
1259     irq_disable();
1260 
1261     timer_fired = false;
1262     start = rdtsc();
1263     apic_write(APIC_TMICT, 1000000);
1264     asm volatile ("sti; hlt");
1265 
1266     report(rdtsc() - start > 10000 && timer_fired,
1267           "direct interrupt + hlt");
1268 
1269     if (!timer_fired) {
1270         set_test_stage(test, -1);
1271         vmmcall();
1272     }
1273 
1274     apic_write(APIC_TMICT, 0);
1275     irq_disable();
1276     vmmcall();
1277 
1278     timer_fired = false;
1279     start = rdtsc();
1280     apic_write(APIC_TMICT, 1000000);
1281     asm volatile ("hlt");
1282 
1283     report(rdtsc() - start > 10000 && timer_fired,
1284            "intercepted interrupt + hlt");
1285 
1286     if (!timer_fired) {
1287         set_test_stage(test, -1);
1288         vmmcall();
1289     }
1290 
1291     apic_write(APIC_TMICT, 0);
1292     irq_disable();
1293 }
1294 
1295 static bool interrupt_finished(struct svm_test *test)
1296 {
1297     switch (get_test_stage(test)) {
1298     case 0:
1299     case 2:
1300         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1301             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1302                    vmcb->control.exit_code);
1303             return true;
1304         }
1305         vmcb->save.rip += 3;
1306 
1307         vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1308         vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1309         break;
1310 
1311     case 1:
1312     case 3:
1313         if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1314             report(false, "VMEXIT not due to intr intercept. Exit reason 0x%x",
1315                    vmcb->control.exit_code);
1316             return true;
1317         }
1318 
1319         /* The guest is not woken up from HLT and RIP still points to it.  */
1320         if (get_test_stage(test) == 3) {
1321             vmcb->save.rip++;
1322         }
1323 
1324         irq_enable();
1325         asm volatile ("nop");
1326         irq_disable();
1327 
1328         vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1329         vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1330         break;
1331 
1332     case 4:
1333         break;
1334 
1335     default:
1336         return true;
1337     }
1338 
1339     inc_test_stage(test);
1340 
1341     return get_test_stage(test) == 5;
1342 }
1343 
1344 static bool interrupt_check(struct svm_test *test)
1345 {
1346     return get_test_stage(test) == 5;
1347 }
1348 
1349 static volatile bool nmi_fired;
1350 
1351 static void nmi_handler(isr_regs_t *regs)
1352 {
1353     nmi_fired = true;
1354     apic_write(APIC_EOI, 0);
1355 }
1356 
1357 static void nmi_prepare(struct svm_test *test)
1358 {
1359     default_prepare(test);
1360     nmi_fired = false;
1361     handle_irq(NMI_VECTOR, nmi_handler);
1362     set_test_stage(test, 0);
1363 }
1364 
1365 static void nmi_test(struct svm_test *test)
1366 {
1367     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1368 
1369     report(nmi_fired, "direct NMI while running guest");
1370 
1371     if (!nmi_fired)
1372         set_test_stage(test, -1);
1373 
1374     vmmcall();
1375 
1376     nmi_fired = false;
1377 
1378     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1379 
1380     if (!nmi_fired) {
1381         report(nmi_fired, "intercepted pending NMI not dispatched");
1382         set_test_stage(test, -1);
1383     }
1384 
1385 }
1386 
1387 static bool nmi_finished(struct svm_test *test)
1388 {
1389     switch (get_test_stage(test)) {
1390     case 0:
1391         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1392             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1393                    vmcb->control.exit_code);
1394             return true;
1395         }
1396         vmcb->save.rip += 3;
1397 
1398         vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1399         break;
1400 
1401     case 1:
1402         if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1403             report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x",
1404                    vmcb->control.exit_code);
1405             return true;
1406         }
1407 
1408         report(true, "NMI intercept while running guest");
1409         break;
1410 
1411     case 2:
1412         break;
1413 
1414     default:
1415         return true;
1416     }
1417 
1418     inc_test_stage(test);
1419 
1420     return get_test_stage(test) == 3;
1421 }
1422 
1423 static bool nmi_check(struct svm_test *test)
1424 {
1425     return get_test_stage(test) == 3;
1426 }
1427 
1428 #define NMI_DELAY 100000000ULL
1429 
1430 static void nmi_message_thread(void *_test)
1431 {
1432     struct svm_test *test = _test;
1433 
1434     while (get_test_stage(test) != 1)
1435         pause();
1436 
1437     delay(NMI_DELAY);
1438 
1439     apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
1440 
1441     while (get_test_stage(test) != 2)
1442         pause();
1443 
1444     delay(NMI_DELAY);
1445 
1446     apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
1447 }
1448 
1449 static void nmi_hlt_test(struct svm_test *test)
1450 {
1451     long long start;
1452 
1453     on_cpu_async(1, nmi_message_thread, test);
1454 
1455     start = rdtsc();
1456 
1457     set_test_stage(test, 1);
1458 
1459     asm volatile ("hlt");
1460 
1461     report((rdtsc() - start > NMI_DELAY) && nmi_fired,
1462           "direct NMI + hlt");
1463 
1464     if (!nmi_fired)
1465         set_test_stage(test, -1);
1466 
1467     nmi_fired = false;
1468 
1469     vmmcall();
1470 
1471     start = rdtsc();
1472 
1473     set_test_stage(test, 2);
1474 
1475     asm volatile ("hlt");
1476 
1477     report((rdtsc() - start > NMI_DELAY) && nmi_fired,
1478            "intercepted NMI + hlt");
1479 
1480     if (!nmi_fired) {
1481         report(nmi_fired, "intercepted pending NMI not dispatched");
1482         set_test_stage(test, -1);
1483         vmmcall();
1484     }
1485 
1486     set_test_stage(test, 3);
1487 }
1488 
1489 static bool nmi_hlt_finished(struct svm_test *test)
1490 {
1491     switch (get_test_stage(test)) {
1492     case 1:
1493         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1494             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1495                    vmcb->control.exit_code);
1496             return true;
1497         }
1498         vmcb->save.rip += 3;
1499 
1500         vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1501         break;
1502 
1503     case 2:
1504         if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1505             report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x",
1506                    vmcb->control.exit_code);
1507             return true;
1508         }
1509 
1510         /* The guest is not woken up from HLT and RIP still points to it.  */
1511         vmcb->save.rip++;
1512 
1513         report(true, "NMI intercept while running guest");
1514         break;
1515 
1516     case 3:
1517         break;
1518 
1519     default:
1520         return true;
1521     }
1522 
1523     return get_test_stage(test) == 3;
1524 }
1525 
1526 static bool nmi_hlt_check(struct svm_test *test)
1527 {
1528     return get_test_stage(test) == 3;
1529 }
1530 
1531 static volatile int count_exc = 0;
1532 
1533 static void my_isr(struct ex_regs *r)
1534 {
1535         count_exc++;
1536 }
1537 
1538 static void exc_inject_prepare(struct svm_test *test)
1539 {
1540     default_prepare(test);
1541     handle_exception(DE_VECTOR, my_isr);
1542     handle_exception(NMI_VECTOR, my_isr);
1543 }
1544 
1545 
1546 static void exc_inject_test(struct svm_test *test)
1547 {
1548     asm volatile ("vmmcall\n\tvmmcall\n\t");
1549 }
1550 
1551 static bool exc_inject_finished(struct svm_test *test)
1552 {
1553     vmcb->save.rip += 3;
1554 
1555     switch (get_test_stage(test)) {
1556     case 0:
1557         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1558             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1559                    vmcb->control.exit_code);
1560             return true;
1561         }
1562         vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
1563         break;
1564 
1565     case 1:
1566         if (vmcb->control.exit_code != SVM_EXIT_ERR) {
1567             report(false, "VMEXIT not due to error. Exit reason 0x%x",
1568                    vmcb->control.exit_code);
1569             return true;
1570         }
1571         report(count_exc == 0, "exception with vector 2 not injected");
1572         vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
1573         break;
1574 
1575     case 2:
1576         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1577             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1578                    vmcb->control.exit_code);
1579             return true;
1580         }
1581         report(count_exc == 1, "divide overflow exception injected");
1582         report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared");
1583         break;
1584 
1585     default:
1586         return true;
1587     }
1588 
1589     inc_test_stage(test);
1590 
1591     return get_test_stage(test) == 3;
1592 }
1593 
1594 static bool exc_inject_check(struct svm_test *test)
1595 {
1596     return count_exc == 1 && get_test_stage(test) == 3;
1597 }
1598 
1599 #define TEST(name) { #name, .v2 = name }
1600 
1601 /*
1602  * v2 tests
1603  */
1604 
1605 static void basic_guest_main(struct svm_test *test)
1606 {
1607 }
1608 
1609 static void svm_guest_state_test(void)
1610 {
1611 	test_set_guest(basic_guest_main);
1612 
1613 	/*
1614 	 * Un-setting EFER.SVME is illegal
1615 	 */
1616 	u64 efer_saved = vmcb->save.efer;
1617 	u64 efer = efer_saved;
1618 
1619 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer);
1620 	efer &= ~EFER_SVME;
1621 	vmcb->save.efer = efer;
1622 	report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer);
1623 	vmcb->save.efer = efer_saved;
1624 
1625 	/*
1626 	 * Un-setting CR0.CD and setting CR0.NW is illegal combination
1627 	 */
1628 	u64 cr0_saved = vmcb->save.cr0;
1629 	u64 cr0 = cr0_saved;
1630 
1631 	cr0 |= X86_CR0_CD;
1632 	cr0 &= ~X86_CR0_NW;
1633 	vmcb->save.cr0 = cr0;
1634 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "CR0: %lx", cr0);
1635 	cr0 |= X86_CR0_NW;
1636 	vmcb->save.cr0 = cr0;
1637 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "CR0: %lx", cr0);
1638 	cr0 &= ~X86_CR0_NW;
1639 	cr0 &= ~X86_CR0_CD;
1640 	vmcb->save.cr0 = cr0;
1641 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "CR0: %lx", cr0);
1642 	cr0 |= X86_CR0_NW;
1643 	vmcb->save.cr0 = cr0;
1644 	report (svm_vmrun() == SVM_EXIT_ERR, "CR0: %lx", cr0);
1645 	vmcb->save.cr0 = cr0_saved;
1646 
1647 	/*
1648 	 * CR0[63:32] are not zero
1649 	 */
1650 	int i;
1651 
1652 	cr0 = cr0_saved;
1653 	for (i = 32; i < 63; i = i + 4) {
1654 		cr0 = cr0_saved | (1ull << i);
1655 		vmcb->save.cr0 = cr0;
1656 		report (svm_vmrun() == SVM_EXIT_ERR, "CR0[63:32]: %lx",
1657 		    cr0 >> 32);
1658 	}
1659 	vmcb->save.cr0 = cr0_saved;
1660 }
1661 
1662 struct svm_test svm_tests[] = {
1663     { "null", default_supported, default_prepare,
1664       default_prepare_gif_clear, null_test,
1665       default_finished, null_check },
1666     { "vmrun", default_supported, default_prepare,
1667       default_prepare_gif_clear, test_vmrun,
1668        default_finished, check_vmrun },
1669     { "ioio", default_supported, prepare_ioio,
1670        default_prepare_gif_clear, test_ioio,
1671        ioio_finished, check_ioio },
1672     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
1673       default_prepare_gif_clear, null_test, default_finished,
1674       check_no_vmrun_int },
1675     { "cr3 read intercept", default_supported,
1676       prepare_cr3_intercept, default_prepare_gif_clear,
1677       test_cr3_intercept, default_finished, check_cr3_intercept },
1678     { "cr3 read nointercept", default_supported, default_prepare,
1679       default_prepare_gif_clear, test_cr3_intercept, default_finished,
1680       check_cr3_nointercept },
1681     { "cr3 read intercept emulate", smp_supported,
1682       prepare_cr3_intercept_bypass, default_prepare_gif_clear,
1683       test_cr3_intercept_bypass, default_finished, check_cr3_intercept },
1684     { "dr intercept check", default_supported, prepare_dr_intercept,
1685       default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished,
1686       check_dr_intercept },
1687     { "next_rip", next_rip_supported, prepare_next_rip,
1688       default_prepare_gif_clear, test_next_rip,
1689       default_finished, check_next_rip },
1690     { "msr intercept check", default_supported, prepare_msr_intercept,
1691       default_prepare_gif_clear, test_msr_intercept,
1692       msr_intercept_finished, check_msr_intercept },
1693     { "mode_switch", default_supported, prepare_mode_switch,
1694       default_prepare_gif_clear, test_mode_switch,
1695        mode_switch_finished, check_mode_switch },
1696     { "asid_zero", default_supported, prepare_asid_zero,
1697       default_prepare_gif_clear, test_asid_zero,
1698        default_finished, check_asid_zero },
1699     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare,
1700       default_prepare_gif_clear, sel_cr0_bug_test,
1701        sel_cr0_bug_finished, sel_cr0_bug_check },
1702     { "npt_nx", npt_supported, npt_nx_prepare,
1703       default_prepare_gif_clear, null_test,
1704       default_finished, npt_nx_check },
1705     { "npt_us", npt_supported, npt_us_prepare,
1706       default_prepare_gif_clear, npt_us_test,
1707       default_finished, npt_us_check },
1708     { "npt_rsvd", npt_supported, npt_rsvd_prepare,
1709       default_prepare_gif_clear, null_test,
1710       default_finished, npt_rsvd_check },
1711     { "npt_rw", npt_supported, npt_rw_prepare,
1712       default_prepare_gif_clear, npt_rw_test,
1713       default_finished, npt_rw_check },
1714     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare,
1715       default_prepare_gif_clear, null_test,
1716       default_finished, npt_rsvd_pfwalk_check },
1717     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare,
1718       default_prepare_gif_clear, null_test,
1719       default_finished, npt_rw_pfwalk_check },
1720     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare,
1721       default_prepare_gif_clear, npt_l1mmio_test,
1722       default_finished, npt_l1mmio_check },
1723     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare,
1724       default_prepare_gif_clear, npt_rw_l1mmio_test,
1725       default_finished, npt_rw_l1mmio_check },
1726     { "tsc_adjust", default_supported, tsc_adjust_prepare,
1727       default_prepare_gif_clear, tsc_adjust_test,
1728       default_finished, tsc_adjust_check },
1729     { "latency_run_exit", default_supported, latency_prepare,
1730       default_prepare_gif_clear, latency_test,
1731       latency_finished, latency_check },
1732     { "latency_svm_insn", default_supported, lat_svm_insn_prepare,
1733       default_prepare_gif_clear, null_test,
1734       lat_svm_insn_finished, lat_svm_insn_check },
1735     { "exc_inject", default_supported, exc_inject_prepare,
1736       default_prepare_gif_clear, exc_inject_test,
1737       exc_inject_finished, exc_inject_check },
1738     { "pending_event", default_supported, pending_event_prepare,
1739       default_prepare_gif_clear,
1740       pending_event_test, pending_event_finished, pending_event_check },
1741     { "pending_event_cli", default_supported, pending_event_cli_prepare,
1742       pending_event_cli_prepare_gif_clear,
1743       pending_event_cli_test, pending_event_cli_finished,
1744       pending_event_cli_check },
1745     { "interrupt", default_supported, interrupt_prepare,
1746       default_prepare_gif_clear, interrupt_test,
1747       interrupt_finished, interrupt_check },
1748     { "nmi", default_supported, nmi_prepare,
1749       default_prepare_gif_clear, nmi_test,
1750       nmi_finished, nmi_check },
1751     { "nmi_hlt", smp_supported, nmi_prepare,
1752       default_prepare_gif_clear, nmi_hlt_test,
1753       nmi_hlt_finished, nmi_hlt_check },
1754     TEST(svm_guest_state_test),
1755     { NULL, NULL, NULL, NULL, NULL, NULL, NULL }
1756 };
1757