xref: /kvm-unit-tests/x86/svm_tests.c (revision c67363eef051e7334935e6009286da7f7a73831d)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "alloc_page.h"
10 #include "isr.h"
11 #include "apic.h"
12 #include "delay.h"
13 
14 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
15 
16 static void *scratch_page;
17 
18 #define LATENCY_RUNS 1000000
19 
20 u64 tsc_start;
21 u64 tsc_end;
22 
23 u64 vmrun_sum, vmexit_sum;
24 u64 vmsave_sum, vmload_sum;
25 u64 stgi_sum, clgi_sum;
26 u64 latvmrun_max;
27 u64 latvmrun_min;
28 u64 latvmexit_max;
29 u64 latvmexit_min;
30 u64 latvmload_max;
31 u64 latvmload_min;
32 u64 latvmsave_max;
33 u64 latvmsave_min;
34 u64 latstgi_max;
35 u64 latstgi_min;
36 u64 latclgi_max;
37 u64 latclgi_min;
38 u64 runs;
39 
40 static void null_test(struct svm_test *test)
41 {
42 }
43 
44 static bool null_check(struct svm_test *test)
45 {
46     return vmcb->control.exit_code == SVM_EXIT_VMMCALL;
47 }
48 
49 static void prepare_no_vmrun_int(struct svm_test *test)
50 {
51     vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
52 }
53 
54 static bool check_no_vmrun_int(struct svm_test *test)
55 {
56     return vmcb->control.exit_code == SVM_EXIT_ERR;
57 }
58 
59 static void test_vmrun(struct svm_test *test)
60 {
61     asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb)));
62 }
63 
64 static bool check_vmrun(struct svm_test *test)
65 {
66     return vmcb->control.exit_code == SVM_EXIT_VMRUN;
67 }
68 
69 static void prepare_rsm_intercept(struct svm_test *test)
70 {
71     default_prepare(test);
72     vmcb->control.intercept |= 1 << INTERCEPT_RSM;
73     vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR);
74 }
75 
76 static void test_rsm_intercept(struct svm_test *test)
77 {
78     asm volatile ("rsm" : : : "memory");
79 }
80 
81 static bool check_rsm_intercept(struct svm_test *test)
82 {
83     return get_test_stage(test) == 2;
84 }
85 
86 static bool finished_rsm_intercept(struct svm_test *test)
87 {
88     switch (get_test_stage(test)) {
89     case 0:
90         if (vmcb->control.exit_code != SVM_EXIT_RSM) {
91             report(false, "VMEXIT not due to rsm. Exit reason 0x%x",
92                    vmcb->control.exit_code);
93             return true;
94         }
95         vmcb->control.intercept &= ~(1 << INTERCEPT_RSM);
96         inc_test_stage(test);
97         break;
98 
99     case 1:
100         if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) {
101             report(false, "VMEXIT not due to #UD. Exit reason 0x%x",
102                    vmcb->control.exit_code);
103             return true;
104         }
105         vmcb->save.rip += 2;
106         inc_test_stage(test);
107         break;
108 
109     default:
110         return true;
111     }
112     return get_test_stage(test) == 2;
113 }
114 
115 static void prepare_cr3_intercept(struct svm_test *test)
116 {
117     default_prepare(test);
118     vmcb->control.intercept_cr_read |= 1 << 3;
119 }
120 
121 static void test_cr3_intercept(struct svm_test *test)
122 {
123     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
124 }
125 
126 static bool check_cr3_intercept(struct svm_test *test)
127 {
128     return vmcb->control.exit_code == SVM_EXIT_READ_CR3;
129 }
130 
131 static bool check_cr3_nointercept(struct svm_test *test)
132 {
133     return null_check(test) && test->scratch == read_cr3();
134 }
135 
136 static void corrupt_cr3_intercept_bypass(void *_test)
137 {
138     struct svm_test *test = _test;
139     extern volatile u32 mmio_insn;
140 
141     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
142         pause();
143     pause();
144     pause();
145     pause();
146     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
147 }
148 
149 static void prepare_cr3_intercept_bypass(struct svm_test *test)
150 {
151     default_prepare(test);
152     vmcb->control.intercept_cr_read |= 1 << 3;
153     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
154 }
155 
156 static void test_cr3_intercept_bypass(struct svm_test *test)
157 {
158     ulong a = 0xa0000;
159 
160     test->scratch = 1;
161     while (test->scratch != 2)
162         barrier();
163 
164     asm volatile ("mmio_insn: mov %0, (%0); nop"
165                   : "+a"(a) : : "memory");
166     test->scratch = a;
167 }
168 
169 static void prepare_dr_intercept(struct svm_test *test)
170 {
171     default_prepare(test);
172     vmcb->control.intercept_dr_read = 0xff;
173     vmcb->control.intercept_dr_write = 0xff;
174 }
175 
176 static void test_dr_intercept(struct svm_test *test)
177 {
178     unsigned int i, failcnt = 0;
179 
180     /* Loop testing debug register reads */
181     for (i = 0; i < 8; i++) {
182 
183         switch (i) {
184         case 0:
185             asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
186             break;
187         case 1:
188             asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
189             break;
190         case 2:
191             asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
192             break;
193         case 3:
194             asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
195             break;
196         case 4:
197             asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
198             break;
199         case 5:
200             asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
201             break;
202         case 6:
203             asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
204             break;
205         case 7:
206             asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
207             break;
208         }
209 
210         if (test->scratch != i) {
211             report(false, "dr%u read intercept", i);
212             failcnt++;
213         }
214     }
215 
216     /* Loop testing debug register writes */
217     for (i = 0; i < 8; i++) {
218 
219         switch (i) {
220         case 0:
221             asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
222             break;
223         case 1:
224             asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
225             break;
226         case 2:
227             asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
228             break;
229         case 3:
230             asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
231             break;
232         case 4:
233             asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
234             break;
235         case 5:
236             asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
237             break;
238         case 6:
239             asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
240             break;
241         case 7:
242             asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
243             break;
244         }
245 
246         if (test->scratch != i) {
247             report(false, "dr%u write intercept", i);
248             failcnt++;
249         }
250     }
251 
252     test->scratch = failcnt;
253 }
254 
255 static bool dr_intercept_finished(struct svm_test *test)
256 {
257     ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0);
258 
259     /* Only expect DR intercepts */
260     if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
261         return true;
262 
263     /*
264      * Compute debug register number.
265      * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
266      * Programmer's Manual Volume 2 - System Programming:
267      * http://support.amd.com/TechDocs/24593.pdf
268      * there are 16 VMEXIT codes each for DR read and write.
269      */
270     test->scratch = (n % 16);
271 
272     /* Jump over MOV instruction */
273     vmcb->save.rip += 3;
274 
275     return false;
276 }
277 
278 static bool check_dr_intercept(struct svm_test *test)
279 {
280     return !test->scratch;
281 }
282 
283 static bool next_rip_supported(void)
284 {
285     return this_cpu_has(X86_FEATURE_NRIPS);
286 }
287 
288 static void prepare_next_rip(struct svm_test *test)
289 {
290     vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
291 }
292 
293 
294 static void test_next_rip(struct svm_test *test)
295 {
296     asm volatile ("rdtsc\n\t"
297                   ".globl exp_next_rip\n\t"
298                   "exp_next_rip:\n\t" ::: "eax", "edx");
299 }
300 
301 static bool check_next_rip(struct svm_test *test)
302 {
303     extern char exp_next_rip;
304     unsigned long address = (unsigned long)&exp_next_rip;
305 
306     return address == vmcb->control.next_rip;
307 }
308 
309 extern u8 *msr_bitmap;
310 
311 static void prepare_msr_intercept(struct svm_test *test)
312 {
313     default_prepare(test);
314     vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
315     vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR);
316     memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
317 }
318 
319 static void test_msr_intercept(struct svm_test *test)
320 {
321     unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */
322     unsigned long msr_index;
323 
324     for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) {
325         if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) {
326             /*
327              * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
328              * Programmer's Manual volume 2 - System Programming:
329              * http://support.amd.com/TechDocs/24593.pdf
330              * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
331              */
332             continue;
333         }
334 
335         /* Skips gaps between supported MSR ranges */
336         if (msr_index == 0x2000)
337             msr_index = 0xc0000000;
338         else if (msr_index == 0xc0002000)
339             msr_index = 0xc0010000;
340 
341         test->scratch = -1;
342 
343         rdmsr(msr_index);
344 
345         /* Check that a read intercept occurred for MSR at msr_index */
346         if (test->scratch != msr_index)
347             report(false, "MSR 0x%lx read intercept", msr_index);
348 
349         /*
350          * Poor man approach to generate a value that
351          * seems arbitrary each time around the loop.
352          */
353         msr_value += (msr_value << 1);
354 
355         wrmsr(msr_index, msr_value);
356 
357         /* Check that a write intercept occurred for MSR with msr_value */
358         if (test->scratch != msr_value)
359             report(false, "MSR 0x%lx write intercept", msr_index);
360     }
361 
362     test->scratch = -2;
363 }
364 
365 static bool msr_intercept_finished(struct svm_test *test)
366 {
367     u32 exit_code = vmcb->control.exit_code;
368     u64 exit_info_1;
369     u8 *opcode;
370 
371     if (exit_code == SVM_EXIT_MSR) {
372         exit_info_1 = vmcb->control.exit_info_1;
373     } else {
374         /*
375          * If #GP exception occurs instead, check that it was
376          * for RDMSR/WRMSR and set exit_info_1 accordingly.
377          */
378 
379         if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR))
380             return true;
381 
382         opcode = (u8 *)vmcb->save.rip;
383         if (opcode[0] != 0x0f)
384             return true;
385 
386         switch (opcode[1]) {
387         case 0x30: /* WRMSR */
388             exit_info_1 = 1;
389             break;
390         case 0x32: /* RDMSR */
391             exit_info_1 = 0;
392             break;
393         default:
394             return true;
395         }
396 
397         /*
398          * Warn that #GP exception occured instead.
399          * RCX holds the MSR index.
400          */
401         printf("%s 0x%lx #GP exception\n",
402             exit_info_1 ? "WRMSR" : "RDMSR", get_regs().rcx);
403     }
404 
405     /* Jump over RDMSR/WRMSR instruction */
406     vmcb->save.rip += 2;
407 
408     /*
409      * Test whether the intercept was for RDMSR/WRMSR.
410      * For RDMSR, test->scratch is set to the MSR index;
411      *      RCX holds the MSR index.
412      * For WRMSR, test->scratch is set to the MSR value;
413      *      RDX holds the upper 32 bits of the MSR value,
414      *      while RAX hold its lower 32 bits.
415      */
416     if (exit_info_1)
417         test->scratch =
418             ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff));
419     else
420         test->scratch = get_regs().rcx;
421 
422     return false;
423 }
424 
425 static bool check_msr_intercept(struct svm_test *test)
426 {
427     memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
428     return (test->scratch == -2);
429 }
430 
431 static void prepare_mode_switch(struct svm_test *test)
432 {
433     vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
434                                              |  (1ULL << UD_VECTOR)
435                                              |  (1ULL << DF_VECTOR)
436                                              |  (1ULL << PF_VECTOR);
437     test->scratch = 0;
438 }
439 
440 static void test_mode_switch(struct svm_test *test)
441 {
442     asm volatile("	cli\n"
443 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
444 		 "1:\n"
445 		 "	.long 2f\n"
446 		 "	.long " xstr(KERNEL_CS32) "\n"
447 		 ".code32\n"
448 		 "2:\n"
449 		 "	movl %%cr0, %%eax\n"
450 		 "	btcl  $31, %%eax\n" /* clear PG */
451 		 "	movl %%eax, %%cr0\n"
452 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
453 		 "	rdmsr\n"
454 		 "	btcl $8, %%eax\n" /* clear LME */
455 		 "	wrmsr\n"
456 		 "	movl %%cr4, %%eax\n"
457 		 "	btcl $5, %%eax\n" /* clear PAE */
458 		 "	movl %%eax, %%cr4\n"
459 		 "	movw %[ds16], %%ax\n"
460 		 "	movw %%ax, %%ds\n"
461 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
462 		 ".code16\n"
463 		 "3:\n"
464 		 "	movl %%cr0, %%eax\n"
465 		 "	btcl $0, %%eax\n" /* clear PE  */
466 		 "	movl %%eax, %%cr0\n"
467 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
468 		 "4:\n"
469 		 "	vmmcall\n"
470 		 "	movl %%cr0, %%eax\n"
471 		 "	btsl $0, %%eax\n" /* set PE  */
472 		 "	movl %%eax, %%cr0\n"
473 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
474 		 ".code32\n"
475 		 "5:\n"
476 		 "	movl %%cr4, %%eax\n"
477 		 "	btsl $5, %%eax\n" /* set PAE */
478 		 "	movl %%eax, %%cr4\n"
479 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
480 		 "	rdmsr\n"
481 		 "	btsl $8, %%eax\n" /* set LME */
482 		 "	wrmsr\n"
483 		 "	movl %%cr0, %%eax\n"
484 		 "	btsl  $31, %%eax\n" /* set PG */
485 		 "	movl %%eax, %%cr0\n"
486 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
487 		 ".code64\n\t"
488 		 "6:\n"
489 		 "	vmmcall\n"
490 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
491 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
492 		 : "rax", "rbx", "rcx", "rdx", "memory");
493 }
494 
495 static bool mode_switch_finished(struct svm_test *test)
496 {
497     u64 cr0, cr4, efer;
498 
499     cr0  = vmcb->save.cr0;
500     cr4  = vmcb->save.cr4;
501     efer = vmcb->save.efer;
502 
503     /* Only expect VMMCALL intercepts */
504     if (vmcb->control.exit_code != SVM_EXIT_VMMCALL)
505 	    return true;
506 
507     /* Jump over VMMCALL instruction */
508     vmcb->save.rip += 3;
509 
510     /* Do sanity checks */
511     switch (test->scratch) {
512     case 0:
513         /* Test should be in real mode now - check for this */
514         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
515             (cr4  & 0x00000020) || /* CR4.PAE */
516             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
517                 return true;
518         break;
519     case 2:
520         /* Test should be back in long-mode now - check for this */
521         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
522             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
523             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
524 		    return true;
525 	break;
526     }
527 
528     /* one step forward */
529     test->scratch += 1;
530 
531     return test->scratch == 2;
532 }
533 
534 static bool check_mode_switch(struct svm_test *test)
535 {
536 	return test->scratch == 2;
537 }
538 
539 extern u8 *io_bitmap;
540 
541 static void prepare_ioio(struct svm_test *test)
542 {
543     vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
544     test->scratch = 0;
545     memset(io_bitmap, 0, 8192);
546     io_bitmap[8192] = 0xFF;
547 }
548 
549 static void test_ioio(struct svm_test *test)
550 {
551     // stage 0, test IO pass
552     inb(0x5000);
553     outb(0x0, 0x5000);
554     if (get_test_stage(test) != 0)
555         goto fail;
556 
557     // test IO width, in/out
558     io_bitmap[0] = 0xFF;
559     inc_test_stage(test);
560     inb(0x0);
561     if (get_test_stage(test) != 2)
562         goto fail;
563 
564     outw(0x0, 0x0);
565     if (get_test_stage(test) != 3)
566         goto fail;
567 
568     inl(0x0);
569     if (get_test_stage(test) != 4)
570         goto fail;
571 
572     // test low/high IO port
573     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
574     inb(0x5000);
575     if (get_test_stage(test) != 5)
576         goto fail;
577 
578     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
579     inw(0x9000);
580     if (get_test_stage(test) != 6)
581         goto fail;
582 
583     // test partial pass
584     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
585     inl(0x4FFF);
586     if (get_test_stage(test) != 7)
587         goto fail;
588 
589     // test across pages
590     inc_test_stage(test);
591     inl(0x7FFF);
592     if (get_test_stage(test) != 8)
593         goto fail;
594 
595     inc_test_stage(test);
596     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
597     inl(0x7FFF);
598     if (get_test_stage(test) != 10)
599         goto fail;
600 
601     io_bitmap[0] = 0;
602     inl(0xFFFF);
603     if (get_test_stage(test) != 11)
604         goto fail;
605 
606     io_bitmap[0] = 0xFF;
607     io_bitmap[8192] = 0;
608     inl(0xFFFF);
609     inc_test_stage(test);
610     if (get_test_stage(test) != 12)
611         goto fail;
612 
613     return;
614 
615 fail:
616     report(false, "stage %d", get_test_stage(test));
617     test->scratch = -1;
618 }
619 
620 static bool ioio_finished(struct svm_test *test)
621 {
622     unsigned port, size;
623 
624     /* Only expect IOIO intercepts */
625     if (vmcb->control.exit_code == SVM_EXIT_VMMCALL)
626         return true;
627 
628     if (vmcb->control.exit_code != SVM_EXIT_IOIO)
629         return true;
630 
631     /* one step forward */
632     test->scratch += 1;
633 
634     port = vmcb->control.exit_info_1 >> 16;
635     size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
636 
637     while (size--) {
638         io_bitmap[port / 8] &= ~(1 << (port & 7));
639         port++;
640     }
641 
642     return false;
643 }
644 
645 static bool check_ioio(struct svm_test *test)
646 {
647     memset(io_bitmap, 0, 8193);
648     return test->scratch != -1;
649 }
650 
651 static void prepare_asid_zero(struct svm_test *test)
652 {
653     vmcb->control.asid = 0;
654 }
655 
656 static void test_asid_zero(struct svm_test *test)
657 {
658     asm volatile ("vmmcall\n\t");
659 }
660 
661 static bool check_asid_zero(struct svm_test *test)
662 {
663     return vmcb->control.exit_code == SVM_EXIT_ERR;
664 }
665 
666 static void sel_cr0_bug_prepare(struct svm_test *test)
667 {
668     vmcb_ident(vmcb);
669     vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
670 }
671 
672 static bool sel_cr0_bug_finished(struct svm_test *test)
673 {
674 	return true;
675 }
676 
677 static void sel_cr0_bug_test(struct svm_test *test)
678 {
679     unsigned long cr0;
680 
681     /* read cr0, clear CD, and write back */
682     cr0  = read_cr0();
683     cr0 |= (1UL << 30);
684     write_cr0(cr0);
685 
686     /*
687      * If we are here the test failed, not sure what to do now because we
688      * are not in guest-mode anymore so we can't trigger an intercept.
689      * Trigger a tripple-fault for now.
690      */
691     report(false, "sel_cr0 test. Can not recover from this - exiting");
692     exit(report_summary());
693 }
694 
695 static bool sel_cr0_bug_check(struct svm_test *test)
696 {
697     return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
698 }
699 
700 static void npt_nx_prepare(struct svm_test *test)
701 {
702 
703     u64 *pte;
704 
705     vmcb_ident(vmcb);
706     pte = npt_get_pte((u64)null_test);
707 
708     *pte |= (1ULL << 63);
709 }
710 
711 static bool npt_nx_check(struct svm_test *test)
712 {
713     u64 *pte = npt_get_pte((u64)null_test);
714 
715     *pte &= ~(1ULL << 63);
716 
717     vmcb->save.efer |= (1 << 11);
718 
719     return (vmcb->control.exit_code == SVM_EXIT_NPF)
720            && (vmcb->control.exit_info_1 == 0x100000015ULL);
721 }
722 
723 static void npt_np_prepare(struct svm_test *test)
724 {
725     u64 *pte;
726 
727     scratch_page = alloc_page();
728     vmcb_ident(vmcb);
729     pte = npt_get_pte((u64)scratch_page);
730 
731     *pte &= ~1ULL;
732 }
733 
734 static void npt_np_test(struct svm_test *test)
735 {
736     (void) *(volatile u64 *)scratch_page;
737 }
738 
739 static bool npt_np_check(struct svm_test *test)
740 {
741     u64 *pte = npt_get_pte((u64)scratch_page);
742 
743     *pte |= 1ULL;
744 
745     return (vmcb->control.exit_code == SVM_EXIT_NPF)
746            && (vmcb->control.exit_info_1 == 0x100000004ULL);
747 }
748 
749 static void npt_us_prepare(struct svm_test *test)
750 {
751     u64 *pte;
752 
753     scratch_page = alloc_page();
754     vmcb_ident(vmcb);
755     pte = npt_get_pte((u64)scratch_page);
756 
757     *pte &= ~(1ULL << 2);
758 }
759 
760 static void npt_us_test(struct svm_test *test)
761 {
762     (void) *(volatile u64 *)scratch_page;
763 }
764 
765 static bool npt_us_check(struct svm_test *test)
766 {
767     u64 *pte = npt_get_pte((u64)scratch_page);
768 
769     *pte |= (1ULL << 2);
770 
771     return (vmcb->control.exit_code == SVM_EXIT_NPF)
772            && (vmcb->control.exit_info_1 == 0x100000005ULL);
773 }
774 
775 u64 save_pde;
776 
777 static void npt_rsvd_prepare(struct svm_test *test)
778 {
779     u64 *pde;
780 
781     vmcb_ident(vmcb);
782     pde = npt_get_pde((u64) null_test);
783 
784     save_pde = *pde;
785     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
786 }
787 
788 static bool npt_rsvd_check(struct svm_test *test)
789 {
790     u64 *pde = npt_get_pde((u64) null_test);
791 
792     *pde = save_pde;
793 
794     return (vmcb->control.exit_code == SVM_EXIT_NPF)
795             && (vmcb->control.exit_info_1 == 0x10000001dULL);
796 }
797 
798 static void npt_rw_prepare(struct svm_test *test)
799 {
800 
801     u64 *pte;
802 
803     vmcb_ident(vmcb);
804     pte = npt_get_pte(0x80000);
805 
806     *pte &= ~(1ULL << 1);
807 }
808 
809 static void npt_rw_test(struct svm_test *test)
810 {
811     u64 *data = (void*)(0x80000);
812 
813     *data = 0;
814 }
815 
816 static bool npt_rw_check(struct svm_test *test)
817 {
818     u64 *pte = npt_get_pte(0x80000);
819 
820     *pte |= (1ULL << 1);
821 
822     return (vmcb->control.exit_code == SVM_EXIT_NPF)
823            && (vmcb->control.exit_info_1 == 0x100000007ULL);
824 }
825 
826 static void npt_rw_pfwalk_prepare(struct svm_test *test)
827 {
828 
829     u64 *pte;
830 
831     vmcb_ident(vmcb);
832     pte = npt_get_pte(read_cr3());
833 
834     *pte &= ~(1ULL << 1);
835 }
836 
837 static bool npt_rw_pfwalk_check(struct svm_test *test)
838 {
839     u64 *pte = npt_get_pte(read_cr3());
840 
841     *pte |= (1ULL << 1);
842 
843     return (vmcb->control.exit_code == SVM_EXIT_NPF)
844            && (vmcb->control.exit_info_1 == 0x200000007ULL)
845 	   && (vmcb->control.exit_info_2 == read_cr3());
846 }
847 
848 static void npt_rsvd_pfwalk_prepare(struct svm_test *test)
849 {
850     u64 *pdpe;
851     vmcb_ident(vmcb);
852 
853     pdpe = npt_get_pml4e();
854     pdpe[0] |= (1ULL << 8);
855 }
856 
857 static bool npt_rsvd_pfwalk_check(struct svm_test *test)
858 {
859     u64 *pdpe = npt_get_pml4e();
860     pdpe[0] &= ~(1ULL << 8);
861 
862     return (vmcb->control.exit_code == SVM_EXIT_NPF)
863             && (vmcb->control.exit_info_1 == 0x20000000fULL);
864 }
865 
866 static void npt_l1mmio_prepare(struct svm_test *test)
867 {
868     vmcb_ident(vmcb);
869 }
870 
871 u32 nested_apic_version1;
872 u32 nested_apic_version2;
873 
874 static void npt_l1mmio_test(struct svm_test *test)
875 {
876     volatile u32 *data = (volatile void*)(0xfee00030UL);
877 
878     nested_apic_version1 = *data;
879     nested_apic_version2 = *data;
880 }
881 
882 static bool npt_l1mmio_check(struct svm_test *test)
883 {
884     volatile u32 *data = (volatile void*)(0xfee00030);
885     u32 lvr = *data;
886 
887     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
888 }
889 
890 static void npt_rw_l1mmio_prepare(struct svm_test *test)
891 {
892 
893     u64 *pte;
894 
895     vmcb_ident(vmcb);
896     pte = npt_get_pte(0xfee00080);
897 
898     *pte &= ~(1ULL << 1);
899 }
900 
901 static void npt_rw_l1mmio_test(struct svm_test *test)
902 {
903     volatile u32 *data = (volatile void*)(0xfee00080);
904 
905     *data = *data;
906 }
907 
908 static bool npt_rw_l1mmio_check(struct svm_test *test)
909 {
910     u64 *pte = npt_get_pte(0xfee00080);
911 
912     *pte |= (1ULL << 1);
913 
914     return (vmcb->control.exit_code == SVM_EXIT_NPF)
915            && (vmcb->control.exit_info_1 == 0x100000007ULL);
916 }
917 
918 #define TSC_ADJUST_VALUE    (1ll << 32)
919 #define TSC_OFFSET_VALUE    (~0ull << 48)
920 static bool ok;
921 
922 static bool tsc_adjust_supported(void)
923 {
924     return this_cpu_has(X86_FEATURE_TSC_ADJUST);
925 }
926 
927 static void tsc_adjust_prepare(struct svm_test *test)
928 {
929     default_prepare(test);
930     vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
931 
932     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
933     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
934     ok = adjust == -TSC_ADJUST_VALUE;
935 }
936 
937 static void tsc_adjust_test(struct svm_test *test)
938 {
939     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
940     ok &= adjust == -TSC_ADJUST_VALUE;
941 
942     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
943     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
944 
945     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
946     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
947 
948     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
949     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
950 
951     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
952     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
953 }
954 
955 static bool tsc_adjust_check(struct svm_test *test)
956 {
957     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
958 
959     wrmsr(MSR_IA32_TSC_ADJUST, 0);
960     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
961 }
962 
963 static void latency_prepare(struct svm_test *test)
964 {
965     default_prepare(test);
966     runs = LATENCY_RUNS;
967     latvmrun_min = latvmexit_min = -1ULL;
968     latvmrun_max = latvmexit_max = 0;
969     vmrun_sum = vmexit_sum = 0;
970     tsc_start = rdtsc();
971 }
972 
973 static void latency_test(struct svm_test *test)
974 {
975     u64 cycles;
976 
977 start:
978     tsc_end = rdtsc();
979 
980     cycles = tsc_end - tsc_start;
981 
982     if (cycles > latvmrun_max)
983         latvmrun_max = cycles;
984 
985     if (cycles < latvmrun_min)
986         latvmrun_min = cycles;
987 
988     vmrun_sum += cycles;
989 
990     tsc_start = rdtsc();
991 
992     asm volatile ("vmmcall" : : : "memory");
993     goto start;
994 }
995 
996 static bool latency_finished(struct svm_test *test)
997 {
998     u64 cycles;
999 
1000     tsc_end = rdtsc();
1001 
1002     cycles = tsc_end - tsc_start;
1003 
1004     if (cycles > latvmexit_max)
1005         latvmexit_max = cycles;
1006 
1007     if (cycles < latvmexit_min)
1008         latvmexit_min = cycles;
1009 
1010     vmexit_sum += cycles;
1011 
1012     vmcb->save.rip += 3;
1013 
1014     runs -= 1;
1015 
1016     tsc_end = rdtsc();
1017 
1018     return runs == 0;
1019 }
1020 
1021 static bool latency_check(struct svm_test *test)
1022 {
1023     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
1024             latvmrun_min, vmrun_sum / LATENCY_RUNS);
1025     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
1026             latvmexit_min, vmexit_sum / LATENCY_RUNS);
1027     return true;
1028 }
1029 
1030 static void lat_svm_insn_prepare(struct svm_test *test)
1031 {
1032     default_prepare(test);
1033     runs = LATENCY_RUNS;
1034     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
1035     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
1036     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
1037 }
1038 
1039 static bool lat_svm_insn_finished(struct svm_test *test)
1040 {
1041     u64 vmcb_phys = virt_to_phys(vmcb);
1042     u64 cycles;
1043 
1044     for ( ; runs != 0; runs--) {
1045         tsc_start = rdtsc();
1046         asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
1047         cycles = rdtsc() - tsc_start;
1048         if (cycles > latvmload_max)
1049             latvmload_max = cycles;
1050         if (cycles < latvmload_min)
1051             latvmload_min = cycles;
1052         vmload_sum += cycles;
1053 
1054         tsc_start = rdtsc();
1055         asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
1056         cycles = rdtsc() - tsc_start;
1057         if (cycles > latvmsave_max)
1058             latvmsave_max = cycles;
1059         if (cycles < latvmsave_min)
1060             latvmsave_min = cycles;
1061         vmsave_sum += cycles;
1062 
1063         tsc_start = rdtsc();
1064         asm volatile("stgi\n\t");
1065         cycles = rdtsc() - tsc_start;
1066         if (cycles > latstgi_max)
1067             latstgi_max = cycles;
1068         if (cycles < latstgi_min)
1069             latstgi_min = cycles;
1070         stgi_sum += cycles;
1071 
1072         tsc_start = rdtsc();
1073         asm volatile("clgi\n\t");
1074         cycles = rdtsc() - tsc_start;
1075         if (cycles > latclgi_max)
1076             latclgi_max = cycles;
1077         if (cycles < latclgi_min)
1078             latclgi_min = cycles;
1079         clgi_sum += cycles;
1080     }
1081 
1082     tsc_end = rdtsc();
1083 
1084     return true;
1085 }
1086 
1087 static bool lat_svm_insn_check(struct svm_test *test)
1088 {
1089     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1090             latvmload_min, vmload_sum / LATENCY_RUNS);
1091     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1092             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1093     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1094             latstgi_min, stgi_sum / LATENCY_RUNS);
1095     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1096             latclgi_min, clgi_sum / LATENCY_RUNS);
1097     return true;
1098 }
1099 
1100 bool pending_event_ipi_fired;
1101 bool pending_event_guest_run;
1102 
1103 static void pending_event_ipi_isr(isr_regs_t *regs)
1104 {
1105     pending_event_ipi_fired = true;
1106     eoi();
1107 }
1108 
1109 static void pending_event_prepare(struct svm_test *test)
1110 {
1111     int ipi_vector = 0xf1;
1112 
1113     default_prepare(test);
1114 
1115     pending_event_ipi_fired = false;
1116 
1117     handle_irq(ipi_vector, pending_event_ipi_isr);
1118 
1119     pending_event_guest_run = false;
1120 
1121     vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1122     vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1123 
1124     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1125                   APIC_DM_FIXED | ipi_vector, 0);
1126 
1127     set_test_stage(test, 0);
1128 }
1129 
1130 static void pending_event_test(struct svm_test *test)
1131 {
1132     pending_event_guest_run = true;
1133 }
1134 
1135 static bool pending_event_finished(struct svm_test *test)
1136 {
1137     switch (get_test_stage(test)) {
1138     case 0:
1139         if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1140             report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x",
1141                    vmcb->control.exit_code);
1142             return true;
1143         }
1144 
1145         vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1146         vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1147 
1148         if (pending_event_guest_run) {
1149             report(false, "Guest ran before host received IPI\n");
1150             return true;
1151         }
1152 
1153         irq_enable();
1154         asm volatile ("nop");
1155         irq_disable();
1156 
1157         if (!pending_event_ipi_fired) {
1158             report(false, "Pending interrupt not dispatched after IRQ enabled\n");
1159             return true;
1160         }
1161         break;
1162 
1163     case 1:
1164         if (!pending_event_guest_run) {
1165             report(false, "Guest did not resume when no interrupt\n");
1166             return true;
1167         }
1168         break;
1169     }
1170 
1171     inc_test_stage(test);
1172 
1173     return get_test_stage(test) == 2;
1174 }
1175 
1176 static bool pending_event_check(struct svm_test *test)
1177 {
1178     return get_test_stage(test) == 2;
1179 }
1180 
1181 static void pending_event_cli_prepare(struct svm_test *test)
1182 {
1183     default_prepare(test);
1184 
1185     pending_event_ipi_fired = false;
1186 
1187     handle_irq(0xf1, pending_event_ipi_isr);
1188 
1189     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1190               APIC_DM_FIXED | 0xf1, 0);
1191 
1192     set_test_stage(test, 0);
1193 }
1194 
1195 static void pending_event_cli_prepare_gif_clear(struct svm_test *test)
1196 {
1197     asm("cli");
1198 }
1199 
1200 static void pending_event_cli_test(struct svm_test *test)
1201 {
1202     if (pending_event_ipi_fired == true) {
1203         set_test_stage(test, -1);
1204         report(false, "Interrupt preceeded guest");
1205         vmmcall();
1206     }
1207 
1208     /* VINTR_MASKING is zero.  This should cause the IPI to fire.  */
1209     irq_enable();
1210     asm volatile ("nop");
1211     irq_disable();
1212 
1213     if (pending_event_ipi_fired != true) {
1214         set_test_stage(test, -1);
1215         report(false, "Interrupt not triggered by guest");
1216     }
1217 
1218     vmmcall();
1219 
1220     /*
1221      * Now VINTR_MASKING=1, but no interrupt is pending so
1222      * the VINTR interception should be clear in VMCB02.  Check
1223      * that L0 did not leave a stale VINTR in the VMCB.
1224      */
1225     irq_enable();
1226     asm volatile ("nop");
1227     irq_disable();
1228 }
1229 
1230 static bool pending_event_cli_finished(struct svm_test *test)
1231 {
1232     if ( vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1233         report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x",
1234                vmcb->control.exit_code);
1235         return true;
1236     }
1237 
1238     switch (get_test_stage(test)) {
1239     case 0:
1240         vmcb->save.rip += 3;
1241 
1242         pending_event_ipi_fired = false;
1243 
1244         vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1245 
1246 	/* Now entering again with VINTR_MASKING=1.  */
1247         apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1248               APIC_DM_FIXED | 0xf1, 0);
1249 
1250         break;
1251 
1252     case 1:
1253         if (pending_event_ipi_fired == true) {
1254             report(false, "Interrupt triggered by guest");
1255             return true;
1256         }
1257 
1258         irq_enable();
1259         asm volatile ("nop");
1260         irq_disable();
1261 
1262         if (pending_event_ipi_fired != true) {
1263             report(false, "Interrupt not triggered by host");
1264             return true;
1265         }
1266 
1267         break;
1268 
1269     default:
1270         return true;
1271     }
1272 
1273     inc_test_stage(test);
1274 
1275     return get_test_stage(test) == 2;
1276 }
1277 
1278 static bool pending_event_cli_check(struct svm_test *test)
1279 {
1280     return get_test_stage(test) == 2;
1281 }
1282 
1283 #define TIMER_VECTOR    222
1284 
1285 static volatile bool timer_fired;
1286 
1287 static void timer_isr(isr_regs_t *regs)
1288 {
1289     timer_fired = true;
1290     apic_write(APIC_EOI, 0);
1291 }
1292 
1293 static void interrupt_prepare(struct svm_test *test)
1294 {
1295     default_prepare(test);
1296     handle_irq(TIMER_VECTOR, timer_isr);
1297     timer_fired = false;
1298     set_test_stage(test, 0);
1299 }
1300 
1301 static void interrupt_test(struct svm_test *test)
1302 {
1303     long long start, loops;
1304 
1305     apic_write(APIC_LVTT, TIMER_VECTOR);
1306     irq_enable();
1307     apic_write(APIC_TMICT, 1); //Timer Initial Count Register 0x380 one-shot
1308     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1309         asm volatile ("nop");
1310 
1311     report(timer_fired, "direct interrupt while running guest");
1312 
1313     if (!timer_fired) {
1314         set_test_stage(test, -1);
1315         vmmcall();
1316     }
1317 
1318     apic_write(APIC_TMICT, 0);
1319     irq_disable();
1320     vmmcall();
1321 
1322     timer_fired = false;
1323     apic_write(APIC_TMICT, 1);
1324     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1325         asm volatile ("nop");
1326 
1327     report(timer_fired, "intercepted interrupt while running guest");
1328 
1329     if (!timer_fired) {
1330         set_test_stage(test, -1);
1331         vmmcall();
1332     }
1333 
1334     irq_enable();
1335     apic_write(APIC_TMICT, 0);
1336     irq_disable();
1337 
1338     timer_fired = false;
1339     start = rdtsc();
1340     apic_write(APIC_TMICT, 1000000);
1341     asm volatile ("sti; hlt");
1342 
1343     report(rdtsc() - start > 10000 && timer_fired,
1344           "direct interrupt + hlt");
1345 
1346     if (!timer_fired) {
1347         set_test_stage(test, -1);
1348         vmmcall();
1349     }
1350 
1351     apic_write(APIC_TMICT, 0);
1352     irq_disable();
1353     vmmcall();
1354 
1355     timer_fired = false;
1356     start = rdtsc();
1357     apic_write(APIC_TMICT, 1000000);
1358     asm volatile ("hlt");
1359 
1360     report(rdtsc() - start > 10000 && timer_fired,
1361            "intercepted interrupt + hlt");
1362 
1363     if (!timer_fired) {
1364         set_test_stage(test, -1);
1365         vmmcall();
1366     }
1367 
1368     apic_write(APIC_TMICT, 0);
1369     irq_disable();
1370 }
1371 
1372 static bool interrupt_finished(struct svm_test *test)
1373 {
1374     switch (get_test_stage(test)) {
1375     case 0:
1376     case 2:
1377         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1378             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1379                    vmcb->control.exit_code);
1380             return true;
1381         }
1382         vmcb->save.rip += 3;
1383 
1384         vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1385         vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1386         break;
1387 
1388     case 1:
1389     case 3:
1390         if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1391             report(false, "VMEXIT not due to intr intercept. Exit reason 0x%x",
1392                    vmcb->control.exit_code);
1393             return true;
1394         }
1395 
1396         irq_enable();
1397         asm volatile ("nop");
1398         irq_disable();
1399 
1400         vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1401         vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1402         break;
1403 
1404     case 4:
1405         break;
1406 
1407     default:
1408         return true;
1409     }
1410 
1411     inc_test_stage(test);
1412 
1413     return get_test_stage(test) == 5;
1414 }
1415 
1416 static bool interrupt_check(struct svm_test *test)
1417 {
1418     return get_test_stage(test) == 5;
1419 }
1420 
1421 static volatile bool nmi_fired;
1422 
1423 static void nmi_handler(isr_regs_t *regs)
1424 {
1425     nmi_fired = true;
1426     apic_write(APIC_EOI, 0);
1427 }
1428 
1429 static void nmi_prepare(struct svm_test *test)
1430 {
1431     default_prepare(test);
1432     nmi_fired = false;
1433     handle_irq(NMI_VECTOR, nmi_handler);
1434     set_test_stage(test, 0);
1435 }
1436 
1437 static void nmi_test(struct svm_test *test)
1438 {
1439     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1440 
1441     report(nmi_fired, "direct NMI while running guest");
1442 
1443     if (!nmi_fired)
1444         set_test_stage(test, -1);
1445 
1446     vmmcall();
1447 
1448     nmi_fired = false;
1449 
1450     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1451 
1452     if (!nmi_fired) {
1453         report(nmi_fired, "intercepted pending NMI not dispatched");
1454         set_test_stage(test, -1);
1455     }
1456 
1457 }
1458 
1459 static bool nmi_finished(struct svm_test *test)
1460 {
1461     switch (get_test_stage(test)) {
1462     case 0:
1463         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1464             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1465                    vmcb->control.exit_code);
1466             return true;
1467         }
1468         vmcb->save.rip += 3;
1469 
1470         vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1471         break;
1472 
1473     case 1:
1474         if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1475             report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x",
1476                    vmcb->control.exit_code);
1477             return true;
1478         }
1479 
1480         report(true, "NMI intercept while running guest");
1481         break;
1482 
1483     case 2:
1484         break;
1485 
1486     default:
1487         return true;
1488     }
1489 
1490     inc_test_stage(test);
1491 
1492     return get_test_stage(test) == 3;
1493 }
1494 
1495 static bool nmi_check(struct svm_test *test)
1496 {
1497     return get_test_stage(test) == 3;
1498 }
1499 
1500 #define NMI_DELAY 100000000ULL
1501 
1502 static void nmi_message_thread(void *_test)
1503 {
1504     struct svm_test *test = _test;
1505 
1506     while (get_test_stage(test) != 1)
1507         pause();
1508 
1509     delay(NMI_DELAY);
1510 
1511     apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
1512 
1513     while (get_test_stage(test) != 2)
1514         pause();
1515 
1516     delay(NMI_DELAY);
1517 
1518     apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
1519 }
1520 
1521 static void nmi_hlt_test(struct svm_test *test)
1522 {
1523     long long start;
1524 
1525     on_cpu_async(1, nmi_message_thread, test);
1526 
1527     start = rdtsc();
1528 
1529     set_test_stage(test, 1);
1530 
1531     asm volatile ("hlt");
1532 
1533     report((rdtsc() - start > NMI_DELAY) && nmi_fired,
1534           "direct NMI + hlt");
1535 
1536     if (!nmi_fired)
1537         set_test_stage(test, -1);
1538 
1539     nmi_fired = false;
1540 
1541     vmmcall();
1542 
1543     start = rdtsc();
1544 
1545     set_test_stage(test, 2);
1546 
1547     asm volatile ("hlt");
1548 
1549     report((rdtsc() - start > NMI_DELAY) && nmi_fired,
1550            "intercepted NMI + hlt");
1551 
1552     if (!nmi_fired) {
1553         report(nmi_fired, "intercepted pending NMI not dispatched");
1554         set_test_stage(test, -1);
1555         vmmcall();
1556     }
1557 
1558     set_test_stage(test, 3);
1559 }
1560 
1561 static bool nmi_hlt_finished(struct svm_test *test)
1562 {
1563     switch (get_test_stage(test)) {
1564     case 1:
1565         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1566             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1567                    vmcb->control.exit_code);
1568             return true;
1569         }
1570         vmcb->save.rip += 3;
1571 
1572         vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1573         break;
1574 
1575     case 2:
1576         if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1577             report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x",
1578                    vmcb->control.exit_code);
1579             return true;
1580         }
1581 
1582         report(true, "NMI intercept while running guest");
1583         break;
1584 
1585     case 3:
1586         break;
1587 
1588     default:
1589         return true;
1590     }
1591 
1592     return get_test_stage(test) == 3;
1593 }
1594 
1595 static bool nmi_hlt_check(struct svm_test *test)
1596 {
1597     return get_test_stage(test) == 3;
1598 }
1599 
1600 static volatile int count_exc = 0;
1601 
1602 static void my_isr(struct ex_regs *r)
1603 {
1604         count_exc++;
1605 }
1606 
1607 static void exc_inject_prepare(struct svm_test *test)
1608 {
1609     default_prepare(test);
1610     handle_exception(DE_VECTOR, my_isr);
1611     handle_exception(NMI_VECTOR, my_isr);
1612 }
1613 
1614 
1615 static void exc_inject_test(struct svm_test *test)
1616 {
1617     asm volatile ("vmmcall\n\tvmmcall\n\t");
1618 }
1619 
1620 static bool exc_inject_finished(struct svm_test *test)
1621 {
1622     switch (get_test_stage(test)) {
1623     case 0:
1624         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1625             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1626                    vmcb->control.exit_code);
1627             return true;
1628         }
1629         vmcb->save.rip += 3;
1630         vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
1631         break;
1632 
1633     case 1:
1634         if (vmcb->control.exit_code != SVM_EXIT_ERR) {
1635             report(false, "VMEXIT not due to error. Exit reason 0x%x",
1636                    vmcb->control.exit_code);
1637             return true;
1638         }
1639         report(count_exc == 0, "exception with vector 2 not injected");
1640         vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
1641         break;
1642 
1643     case 2:
1644         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1645             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1646                    vmcb->control.exit_code);
1647             return true;
1648         }
1649         vmcb->save.rip += 3;
1650         report(count_exc == 1, "divide overflow exception injected");
1651         report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared");
1652         break;
1653 
1654     default:
1655         return true;
1656     }
1657 
1658     inc_test_stage(test);
1659 
1660     return get_test_stage(test) == 3;
1661 }
1662 
1663 static bool exc_inject_check(struct svm_test *test)
1664 {
1665     return count_exc == 1 && get_test_stage(test) == 3;
1666 }
1667 
1668 static volatile bool virq_fired;
1669 
1670 static void virq_isr(isr_regs_t *regs)
1671 {
1672     virq_fired = true;
1673 }
1674 
1675 static void virq_inject_prepare(struct svm_test *test)
1676 {
1677     handle_irq(0xf1, virq_isr);
1678     default_prepare(test);
1679     vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
1680                             (0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority
1681     vmcb->control.int_vector = 0xf1;
1682     virq_fired = false;
1683     set_test_stage(test, 0);
1684 }
1685 
1686 static void virq_inject_test(struct svm_test *test)
1687 {
1688     if (virq_fired) {
1689         report(false, "virtual interrupt fired before L2 sti");
1690         set_test_stage(test, -1);
1691         vmmcall();
1692     }
1693 
1694     irq_enable();
1695     asm volatile ("nop");
1696     irq_disable();
1697 
1698     if (!virq_fired) {
1699         report(false, "virtual interrupt not fired after L2 sti");
1700         set_test_stage(test, -1);
1701     }
1702 
1703     vmmcall();
1704 
1705     if (virq_fired) {
1706         report(false, "virtual interrupt fired before L2 sti after VINTR intercept");
1707         set_test_stage(test, -1);
1708         vmmcall();
1709     }
1710 
1711     irq_enable();
1712     asm volatile ("nop");
1713     irq_disable();
1714 
1715     if (!virq_fired) {
1716         report(false, "virtual interrupt not fired after return from VINTR intercept");
1717         set_test_stage(test, -1);
1718     }
1719 
1720     vmmcall();
1721 
1722     irq_enable();
1723     asm volatile ("nop");
1724     irq_disable();
1725 
1726     if (virq_fired) {
1727         report(false, "virtual interrupt fired when V_IRQ_PRIO less than V_TPR");
1728         set_test_stage(test, -1);
1729     }
1730 
1731     vmmcall();
1732     vmmcall();
1733 }
1734 
1735 static bool virq_inject_finished(struct svm_test *test)
1736 {
1737     vmcb->save.rip += 3;
1738 
1739     switch (get_test_stage(test)) {
1740     case 0:
1741         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1742             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1743                    vmcb->control.exit_code);
1744             return true;
1745         }
1746         if (vmcb->control.int_ctl & V_IRQ_MASK) {
1747             report(false, "V_IRQ not cleared on VMEXIT after firing");
1748             return true;
1749         }
1750         virq_fired = false;
1751         vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1752         vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
1753                             (0x0f << V_INTR_PRIO_SHIFT);
1754         break;
1755 
1756     case 1:
1757         if (vmcb->control.exit_code != SVM_EXIT_VINTR) {
1758             report(false, "VMEXIT not due to vintr. Exit reason 0x%x",
1759                    vmcb->control.exit_code);
1760             return true;
1761         }
1762         if (virq_fired) {
1763             report(false, "V_IRQ fired before SVM_EXIT_VINTR");
1764             return true;
1765         }
1766         vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
1767         break;
1768 
1769     case 2:
1770         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1771             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1772                    vmcb->control.exit_code);
1773             return true;
1774         }
1775         virq_fired = false;
1776         // Set irq to lower priority
1777         vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
1778                             (0x08 << V_INTR_PRIO_SHIFT);
1779         // Raise guest TPR
1780         vmcb->control.int_ctl |= 0x0a & V_TPR_MASK;
1781         break;
1782 
1783     case 3:
1784         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1785             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1786                    vmcb->control.exit_code);
1787             return true;
1788         }
1789         vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1790         break;
1791 
1792     case 4:
1793         // INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR
1794         if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1795             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1796                    vmcb->control.exit_code);
1797             return true;
1798         }
1799         break;
1800 
1801     default:
1802         return true;
1803     }
1804 
1805     inc_test_stage(test);
1806 
1807     return get_test_stage(test) == 5;
1808 }
1809 
1810 static bool virq_inject_check(struct svm_test *test)
1811 {
1812     return get_test_stage(test) == 5;
1813 }
1814 
1815 /*
1816  * Detect nested guest RIP corruption as explained in kernel commit
1817  * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73
1818  *
1819  * In the assembly loop below 'ins' is executed while IO instructions
1820  * are not intercepted; the instruction is emulated by L0.
1821  *
1822  * At the same time we are getting interrupts from the local APIC timer,
1823  * and we do intercept them in L1
1824  *
1825  * If the interrupt happens on the insb instruction, L0 will VMexit, emulate
1826  * the insb instruction and then it will inject the interrupt to L1 through
1827  * a nested VMexit.  Due to a bug, it would leave pre-emulation values of RIP,
1828  * RAX and RSP in the VMCB.
1829  *
1830  * In our intercept handler we detect the bug by checking that RIP is that of
1831  * the insb instruction, but its memory operand has already been written.
1832  * This means that insb was already executed.
1833  */
1834 
1835 static volatile int isr_cnt = 0;
1836 static volatile uint8_t io_port_var = 0xAA;
1837 extern const char insb_instruction_label[];
1838 
1839 static void reg_corruption_isr(isr_regs_t *regs)
1840 {
1841     isr_cnt++;
1842     apic_write(APIC_EOI, 0);
1843 }
1844 
1845 static void reg_corruption_prepare(struct svm_test *test)
1846 {
1847     default_prepare(test);
1848     set_test_stage(test, 0);
1849 
1850     vmcb->control.int_ctl = V_INTR_MASKING_MASK;
1851     vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1852 
1853     handle_irq(TIMER_VECTOR, reg_corruption_isr);
1854 
1855     /* set local APIC to inject external interrupts */
1856     apic_write(APIC_TMICT, 0);
1857     apic_write(APIC_TDCR, 0);
1858     apic_write(APIC_LVTT, TIMER_VECTOR | APIC_LVT_TIMER_PERIODIC);
1859     apic_write(APIC_TMICT, 1000);
1860 }
1861 
1862 static void reg_corruption_test(struct svm_test *test)
1863 {
1864     /* this is endless loop, which is interrupted by the timer interrupt */
1865     asm volatile (
1866             "1:\n\t"
1867             "movw $0x4d0, %%dx\n\t" // IO port
1868             "lea %[io_port_var], %%rdi\n\t"
1869             "movb $0xAA, %[io_port_var]\n\t"
1870             "insb_instruction_label:\n\t"
1871             "insb\n\t"
1872             "jmp 1b\n\t"
1873 
1874             : [io_port_var] "=m" (io_port_var)
1875             : /* no inputs*/
1876             : "rdx", "rdi"
1877     );
1878 }
1879 
1880 static bool reg_corruption_finished(struct svm_test *test)
1881 {
1882     if (isr_cnt == 10000) {
1883         report(true,
1884                "No RIP corruption detected after %d timer interrupts",
1885                isr_cnt);
1886         set_test_stage(test, 1);
1887         return true;
1888     }
1889 
1890     if (vmcb->control.exit_code == SVM_EXIT_INTR) {
1891 
1892         void* guest_rip = (void*)vmcb->save.rip;
1893 
1894         irq_enable();
1895         asm volatile ("nop");
1896         irq_disable();
1897 
1898         if (guest_rip == insb_instruction_label && io_port_var != 0xAA) {
1899             report(false,
1900                    "RIP corruption detected after %d timer interrupts",
1901                    isr_cnt);
1902             return true;
1903         }
1904 
1905     }
1906     return false;
1907 }
1908 
1909 static bool reg_corruption_check(struct svm_test *test)
1910 {
1911     return get_test_stage(test) == 1;
1912 }
1913 
1914 #define TEST(name) { #name, .v2 = name }
1915 
1916 /*
1917  * v2 tests
1918  */
1919 
1920 static void basic_guest_main(struct svm_test *test)
1921 {
1922 }
1923 
1924 
1925 #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val,	\
1926 				   resv_mask)				\
1927 {									\
1928         u64 tmp, mask;							\
1929         int i;								\
1930 									\
1931         for (i = start; i <= end; i = i + inc) {			\
1932                 mask = 1ull << i;					\
1933                 if (!(mask & resv_mask))				\
1934                         continue;					\
1935                 tmp = val | mask;					\
1936 		reg = tmp;						\
1937 		report(svm_vmrun() == SVM_EXIT_ERR, "Test %s %d:%d: %lx",\
1938 		    str_name, end, start, tmp);				\
1939         }								\
1940 }
1941 
1942 #define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask,	\
1943 				  exit_code)				\
1944 {									\
1945 	u64 tmp, mask;							\
1946 	int i;								\
1947 									\
1948 	for (i = start; i <= end; i = i + inc) {			\
1949 		mask = 1ull << i;					\
1950 		if (!(mask & resv_mask))				\
1951 			continue;					\
1952 		tmp = val | mask;					\
1953 		switch (cr) {						\
1954 		case 0:							\
1955 			vmcb->save.cr0 = tmp;				\
1956 			break;						\
1957 		case 3:							\
1958 			vmcb->save.cr3 = tmp;				\
1959 			break;						\
1960 		case 4:							\
1961 			vmcb->save.cr4 = tmp;				\
1962 		}							\
1963 		report(svm_vmrun() == exit_code, "Test CR%d %d:%d: %lx",\
1964 		    cr, end, start, tmp);				\
1965 	}								\
1966 }
1967 
1968 static void test_efer(void)
1969 {
1970 	/*
1971 	 * Un-setting EFER.SVME is illegal
1972 	 */
1973 	u64 efer_saved = vmcb->save.efer;
1974 	u64 efer = efer_saved;
1975 
1976 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer);
1977 	efer &= ~EFER_SVME;
1978 	vmcb->save.efer = efer;
1979 	report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer);
1980 	vmcb->save.efer = efer_saved;
1981 
1982 	/*
1983 	 * EFER MBZ bits: 63:16, 9
1984 	 */
1985 	efer_saved = vmcb->save.efer;
1986 
1987 	SVM_TEST_REG_RESERVED_BITS(8, 9, 1, "EFER", vmcb->save.efer,
1988 	    efer_saved, SVM_EFER_RESERVED_MASK);
1989 	SVM_TEST_REG_RESERVED_BITS(16, 63, 4, "EFER", vmcb->save.efer,
1990 	    efer_saved, SVM_EFER_RESERVED_MASK);
1991 
1992 	/*
1993 	 * EFER.LME and CR0.PG are both set and CR4.PAE is zero.
1994 	 */
1995 	u64 cr0_saved = vmcb->save.cr0;
1996 	u64 cr0;
1997 	u64 cr4_saved = vmcb->save.cr4;
1998 	u64 cr4;
1999 
2000 	efer = efer_saved | EFER_LME;
2001 	vmcb->save.efer = efer;
2002 	cr0 = cr0_saved | X86_CR0_PG | X86_CR0_PE;
2003 	vmcb->save.cr0 = cr0;
2004 	cr4 = cr4_saved & ~X86_CR4_PAE;
2005 	vmcb->save.cr4 = cr4;
2006 	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
2007 	    "CR0.PG=1 (%lx) and CR4.PAE=0 (%lx)", efer, cr0, cr4);
2008 
2009 	/*
2010 	 * EFER.LME and CR0.PG are both set and CR0.PE is zero.
2011 	 */
2012 	vmcb->save.cr4 = cr4_saved | X86_CR4_PAE;
2013 	cr0 &= ~X86_CR0_PE;
2014 	vmcb->save.cr0 = cr0;
2015 	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
2016 	    "CR0.PG=1 and CR0.PE=0 (%lx)", efer, cr0);
2017 
2018 	/*
2019 	 * EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero.
2020 	 */
2021 	u32 cs_attrib_saved = vmcb->save.cs.attrib;
2022 	u32 cs_attrib;
2023 
2024 	cr0 |= X86_CR0_PE;
2025 	vmcb->save.cr0 = cr0;
2026 	cs_attrib = cs_attrib_saved | SVM_SELECTOR_L_MASK |
2027 	    SVM_SELECTOR_DB_MASK;
2028 	vmcb->save.cs.attrib = cs_attrib;
2029 	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
2030 	    "CR0.PG=1 (%lx), CR4.PAE=1 (%lx), CS.L=1 and CS.D=1 (%x)",
2031 	    efer, cr0, cr4, cs_attrib);
2032 
2033 	vmcb->save.cr0 = cr0_saved;
2034 	vmcb->save.cr4 = cr4_saved;
2035 	vmcb->save.efer = efer_saved;
2036 	vmcb->save.cs.attrib = cs_attrib_saved;
2037 }
2038 
2039 static void test_cr0(void)
2040 {
2041 	/*
2042 	 * Un-setting CR0.CD and setting CR0.NW is illegal combination
2043 	 */
2044 	u64 cr0_saved = vmcb->save.cr0;
2045 	u64 cr0 = cr0_saved;
2046 
2047 	cr0 |= X86_CR0_CD;
2048 	cr0 &= ~X86_CR0_NW;
2049 	vmcb->save.cr0 = cr0;
2050 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx",
2051 	    cr0);
2052 	cr0 |= X86_CR0_NW;
2053 	vmcb->save.cr0 = cr0;
2054 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx",
2055 	    cr0);
2056 	cr0 &= ~X86_CR0_NW;
2057 	cr0 &= ~X86_CR0_CD;
2058 	vmcb->save.cr0 = cr0;
2059 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx",
2060 	    cr0);
2061 	cr0 |= X86_CR0_NW;
2062 	vmcb->save.cr0 = cr0;
2063 	report (svm_vmrun() == SVM_EXIT_ERR, "Test CR0 CD=0,NW=1: %lx",
2064 	    cr0);
2065 	vmcb->save.cr0 = cr0_saved;
2066 
2067 	/*
2068 	 * CR0[63:32] are not zero
2069 	 */
2070 	cr0 = cr0_saved;
2071 
2072 	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "CR0", vmcb->save.cr0, cr0_saved,
2073 	    SVM_CR0_RESERVED_MASK);
2074 	vmcb->save.cr0 = cr0_saved;
2075 }
2076 
2077 static void test_cr3(void)
2078 {
2079 	/*
2080 	 * CR3 MBZ bits based on different modes:
2081 	 *   [63:52] - long mode
2082 	 */
2083 	u64 cr3_saved = vmcb->save.cr3;
2084 
2085 	SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved,
2086 	    SVM_CR3_LONG_MBZ_MASK, SVM_EXIT_ERR);
2087 
2088 	vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_MBZ_MASK;
2089 	report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
2090 	    vmcb->save.cr3);
2091 
2092 	/*
2093 	 * CR3 non-MBZ reserved bits based on different modes:
2094 	 *   [11:5] [2:0] - long mode
2095 	 *          [2:0] - PAE legacy mode
2096 	 */
2097 	u64 cr4_saved = vmcb->save.cr4;
2098 	u64 *pdpe = npt_get_pml4e();
2099 
2100 	/*
2101 	 * Long mode
2102 	 */
2103 	if (this_cpu_has(X86_FEATURE_PCID)) {
2104 		vmcb->save.cr4 = cr4_saved | X86_CR4_PCIDE;
2105 		SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
2106 		    SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_VMMCALL);
2107 
2108 		vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK;
2109 		report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
2110 		    vmcb->save.cr3);
2111 	} else {
2112 
2113 		vmcb->save.cr4 = cr4_saved & ~X86_CR4_PCIDE;
2114 
2115 		/* Clear P (Present) bit in NPT in order to trigger #NPF */
2116 		pdpe[0] &= ~1ULL;
2117 
2118 		SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
2119 		    SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_NPF);
2120 
2121 		pdpe[0] |= 1ULL;
2122 		vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK;
2123 		report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
2124 		    vmcb->save.cr3);
2125 	}
2126 
2127 	/*
2128 	 * PAE legacy
2129 	 */
2130 	pdpe[0] &= ~1ULL;
2131 	vmcb->save.cr4 = cr4_saved | X86_CR4_PAE;
2132 	SVM_TEST_CR_RESERVED_BITS(0, 2, 1, 3, cr3_saved,
2133 	    SVM_CR3_PAE_LEGACY_RESERVED_MASK, SVM_EXIT_NPF);
2134 
2135 	pdpe[0] |= 1ULL;
2136 	vmcb->save.cr3 = cr3_saved & ~SVM_CR3_PAE_LEGACY_RESERVED_MASK;
2137 	report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
2138 	    vmcb->save.cr3);
2139 
2140 	vmcb->save.cr3 = cr3_saved;
2141 	vmcb->save.cr4 = cr4_saved;
2142 }
2143 
2144 static void test_cr4(void)
2145 {
2146 	/*
2147 	 * CR4 MBZ bits based on different modes:
2148 	 *   [15:12], 17, 19, [31:22] - legacy mode
2149 	 *   [15:12], 17, 19, [63:22] - long mode
2150 	 */
2151 	u64 cr4_saved = vmcb->save.cr4;
2152 	u64 efer_saved = vmcb->save.efer;
2153 	u64 efer = efer_saved;
2154 
2155 	efer &= ~EFER_LME;
2156 	vmcb->save.efer = efer;
2157 	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
2158 	    SVM_CR4_LEGACY_RESERVED_MASK, SVM_EXIT_ERR);
2159 
2160 	efer |= EFER_LME;
2161 	vmcb->save.efer = efer;
2162 	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
2163 	    SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR);
2164 	SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved,
2165 	    SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR);
2166 
2167 	vmcb->save.cr4 = cr4_saved;
2168 	vmcb->save.efer = efer_saved;
2169 }
2170 
2171 static void test_dr(void)
2172 {
2173 	/*
2174 	 * DR6[63:32] and DR7[63:32] are MBZ
2175 	 */
2176 	u64 dr_saved = vmcb->save.dr6;
2177 
2178 	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR6", vmcb->save.dr6, dr_saved,
2179 	    SVM_DR6_RESERVED_MASK);
2180 	vmcb->save.dr6 = dr_saved;
2181 
2182 	dr_saved = vmcb->save.dr7;
2183 	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR7", vmcb->save.dr7, dr_saved,
2184 	    SVM_DR7_RESERVED_MASK);
2185 
2186 	vmcb->save.dr7 = dr_saved;
2187 }
2188 
2189 static void svm_guest_state_test(void)
2190 {
2191 	test_set_guest(basic_guest_main);
2192 
2193 	test_efer();
2194 	test_cr0();
2195 	test_cr3();
2196 	test_cr4();
2197 	test_dr();
2198 }
2199 
2200 struct svm_test svm_tests[] = {
2201     { "null", default_supported, default_prepare,
2202       default_prepare_gif_clear, null_test,
2203       default_finished, null_check },
2204     { "vmrun", default_supported, default_prepare,
2205       default_prepare_gif_clear, test_vmrun,
2206        default_finished, check_vmrun },
2207     { "ioio", default_supported, prepare_ioio,
2208        default_prepare_gif_clear, test_ioio,
2209        ioio_finished, check_ioio },
2210     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
2211       default_prepare_gif_clear, null_test, default_finished,
2212       check_no_vmrun_int },
2213     { "rsm", default_supported,
2214       prepare_rsm_intercept, default_prepare_gif_clear,
2215       test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept },
2216     { "cr3 read intercept", default_supported,
2217       prepare_cr3_intercept, default_prepare_gif_clear,
2218       test_cr3_intercept, default_finished, check_cr3_intercept },
2219     { "cr3 read nointercept", default_supported, default_prepare,
2220       default_prepare_gif_clear, test_cr3_intercept, default_finished,
2221       check_cr3_nointercept },
2222     { "cr3 read intercept emulate", smp_supported,
2223       prepare_cr3_intercept_bypass, default_prepare_gif_clear,
2224       test_cr3_intercept_bypass, default_finished, check_cr3_intercept },
2225     { "dr intercept check", default_supported, prepare_dr_intercept,
2226       default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished,
2227       check_dr_intercept },
2228     { "next_rip", next_rip_supported, prepare_next_rip,
2229       default_prepare_gif_clear, test_next_rip,
2230       default_finished, check_next_rip },
2231     { "msr intercept check", default_supported, prepare_msr_intercept,
2232       default_prepare_gif_clear, test_msr_intercept,
2233       msr_intercept_finished, check_msr_intercept },
2234     { "mode_switch", default_supported, prepare_mode_switch,
2235       default_prepare_gif_clear, test_mode_switch,
2236        mode_switch_finished, check_mode_switch },
2237     { "asid_zero", default_supported, prepare_asid_zero,
2238       default_prepare_gif_clear, test_asid_zero,
2239        default_finished, check_asid_zero },
2240     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare,
2241       default_prepare_gif_clear, sel_cr0_bug_test,
2242        sel_cr0_bug_finished, sel_cr0_bug_check },
2243     { "npt_nx", npt_supported, npt_nx_prepare,
2244       default_prepare_gif_clear, null_test,
2245       default_finished, npt_nx_check },
2246     { "npt_np", npt_supported, npt_np_prepare,
2247       default_prepare_gif_clear, npt_np_test,
2248       default_finished, npt_np_check },
2249     { "npt_us", npt_supported, npt_us_prepare,
2250       default_prepare_gif_clear, npt_us_test,
2251       default_finished, npt_us_check },
2252     { "npt_rsvd", npt_supported, npt_rsvd_prepare,
2253       default_prepare_gif_clear, null_test,
2254       default_finished, npt_rsvd_check },
2255     { "npt_rw", npt_supported, npt_rw_prepare,
2256       default_prepare_gif_clear, npt_rw_test,
2257       default_finished, npt_rw_check },
2258     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare,
2259       default_prepare_gif_clear, null_test,
2260       default_finished, npt_rsvd_pfwalk_check },
2261     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare,
2262       default_prepare_gif_clear, null_test,
2263       default_finished, npt_rw_pfwalk_check },
2264     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare,
2265       default_prepare_gif_clear, npt_l1mmio_test,
2266       default_finished, npt_l1mmio_check },
2267     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare,
2268       default_prepare_gif_clear, npt_rw_l1mmio_test,
2269       default_finished, npt_rw_l1mmio_check },
2270     { "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare,
2271       default_prepare_gif_clear, tsc_adjust_test,
2272       default_finished, tsc_adjust_check },
2273     { "latency_run_exit", default_supported, latency_prepare,
2274       default_prepare_gif_clear, latency_test,
2275       latency_finished, latency_check },
2276     { "latency_svm_insn", default_supported, lat_svm_insn_prepare,
2277       default_prepare_gif_clear, null_test,
2278       lat_svm_insn_finished, lat_svm_insn_check },
2279     { "exc_inject", default_supported, exc_inject_prepare,
2280       default_prepare_gif_clear, exc_inject_test,
2281       exc_inject_finished, exc_inject_check },
2282     { "pending_event", default_supported, pending_event_prepare,
2283       default_prepare_gif_clear,
2284       pending_event_test, pending_event_finished, pending_event_check },
2285     { "pending_event_cli", default_supported, pending_event_cli_prepare,
2286       pending_event_cli_prepare_gif_clear,
2287       pending_event_cli_test, pending_event_cli_finished,
2288       pending_event_cli_check },
2289     { "interrupt", default_supported, interrupt_prepare,
2290       default_prepare_gif_clear, interrupt_test,
2291       interrupt_finished, interrupt_check },
2292     { "nmi", default_supported, nmi_prepare,
2293       default_prepare_gif_clear, nmi_test,
2294       nmi_finished, nmi_check },
2295     { "nmi_hlt", smp_supported, nmi_prepare,
2296       default_prepare_gif_clear, nmi_hlt_test,
2297       nmi_hlt_finished, nmi_hlt_check },
2298     { "virq_inject", default_supported, virq_inject_prepare,
2299       default_prepare_gif_clear, virq_inject_test,
2300       virq_inject_finished, virq_inject_check },
2301     { "reg_corruption", default_supported, reg_corruption_prepare,
2302       default_prepare_gif_clear, reg_corruption_test,
2303       reg_corruption_finished, reg_corruption_check },
2304     TEST(svm_guest_state_test),
2305     { NULL, NULL, NULL, NULL, NULL, NULL, NULL }
2306 };
2307