xref: /kvm-unit-tests/x86/svm_tests.c (revision f3f338619e4938c2509f5c691adc1f331b07c203)
1  #include "svm.h"
2  #include "libcflat.h"
3  #include "processor.h"
4  #include "desc.h"
5  #include "msr.h"
6  #include "vm.h"
7  #include "smp.h"
8  #include "alloc_page.h"
9  #include "isr.h"
10  #include "apic.h"
11  #include "delay.h"
12  #include "util.h"
13  #include "x86/usermode.h"
14  #include "vmalloc.h"
15  
16  #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
17  
18  #define LATENCY_RUNS 1000000
19  
20  u64 tsc_start;
21  u64 tsc_end;
22  
23  u64 vmrun_sum, vmexit_sum;
24  u64 vmsave_sum, vmload_sum;
25  u64 stgi_sum, clgi_sum;
26  u64 latvmrun_max;
27  u64 latvmrun_min;
28  u64 latvmexit_max;
29  u64 latvmexit_min;
30  u64 latvmload_max;
31  u64 latvmload_min;
32  u64 latvmsave_max;
33  u64 latvmsave_min;
34  u64 latstgi_max;
35  u64 latstgi_min;
36  u64 latclgi_max;
37  u64 latclgi_min;
38  u64 runs;
39  
null_test(struct svm_test * test)40  static void null_test(struct svm_test *test)
41  {
42  }
43  
null_check(struct svm_test * test)44  static bool null_check(struct svm_test *test)
45  {
46  	return vmcb->control.exit_code == SVM_EXIT_VMMCALL;
47  }
48  
prepare_no_vmrun_int(struct svm_test * test)49  static void prepare_no_vmrun_int(struct svm_test *test)
50  {
51  	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
52  }
53  
check_no_vmrun_int(struct svm_test * test)54  static bool check_no_vmrun_int(struct svm_test *test)
55  {
56  	return vmcb->control.exit_code == SVM_EXIT_ERR;
57  }
58  
test_vmrun(struct svm_test * test)59  static void test_vmrun(struct svm_test *test)
60  {
61  	asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb)));
62  }
63  
check_vmrun(struct svm_test * test)64  static bool check_vmrun(struct svm_test *test)
65  {
66  	return vmcb->control.exit_code == SVM_EXIT_VMRUN;
67  }
68  
prepare_rsm_intercept(struct svm_test * test)69  static void prepare_rsm_intercept(struct svm_test *test)
70  {
71  	default_prepare(test);
72  	vmcb->control.intercept |= 1 << INTERCEPT_RSM;
73  	vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR);
74  }
75  
test_rsm_intercept(struct svm_test * test)76  static void test_rsm_intercept(struct svm_test *test)
77  {
78  	asm volatile ("rsm" : : : "memory");
79  }
80  
check_rsm_intercept(struct svm_test * test)81  static bool check_rsm_intercept(struct svm_test *test)
82  {
83  	return get_test_stage(test) == 2;
84  }
85  
finished_rsm_intercept(struct svm_test * test)86  static bool finished_rsm_intercept(struct svm_test *test)
87  {
88  	switch (get_test_stage(test)) {
89  	case 0:
90  		if (vmcb->control.exit_code != SVM_EXIT_RSM) {
91  			report_fail("VMEXIT not due to rsm. Exit reason 0x%x",
92  				    vmcb->control.exit_code);
93  			return true;
94  		}
95  		vmcb->control.intercept &= ~(1 << INTERCEPT_RSM);
96  		inc_test_stage(test);
97  		break;
98  
99  	case 1:
100  		if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) {
101  			report_fail("VMEXIT not due to #UD. Exit reason 0x%x",
102  				    vmcb->control.exit_code);
103  			return true;
104  		}
105  		vmcb->save.rip += 2;
106  		inc_test_stage(test);
107  		break;
108  
109  	default:
110  		return true;
111  	}
112  	return get_test_stage(test) == 2;
113  }
114  
prepare_cr3_intercept(struct svm_test * test)115  static void prepare_cr3_intercept(struct svm_test *test)
116  {
117  	default_prepare(test);
118  	vmcb->control.intercept_cr_read |= 1 << 3;
119  }
120  
test_cr3_intercept(struct svm_test * test)121  static void test_cr3_intercept(struct svm_test *test)
122  {
123  	asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
124  }
125  
check_cr3_intercept(struct svm_test * test)126  static bool check_cr3_intercept(struct svm_test *test)
127  {
128  	return vmcb->control.exit_code == SVM_EXIT_READ_CR3;
129  }
130  
check_cr3_nointercept(struct svm_test * test)131  static bool check_cr3_nointercept(struct svm_test *test)
132  {
133  	return null_check(test) && test->scratch == read_cr3();
134  }
135  
corrupt_cr3_intercept_bypass(void * _test)136  static void corrupt_cr3_intercept_bypass(void *_test)
137  {
138  	struct svm_test *test = _test;
139  	extern volatile u32 mmio_insn;
140  
141  	while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
142  		pause();
143  	pause();
144  	pause();
145  	pause();
146  	mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
147  }
148  
prepare_cr3_intercept_bypass(struct svm_test * test)149  static void prepare_cr3_intercept_bypass(struct svm_test *test)
150  {
151  	default_prepare(test);
152  	vmcb->control.intercept_cr_read |= 1 << 3;
153  	on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
154  }
155  
test_cr3_intercept_bypass(struct svm_test * test)156  static void test_cr3_intercept_bypass(struct svm_test *test)
157  {
158  	ulong a = 0xa0000;
159  
160  	test->scratch = 1;
161  	while (test->scratch != 2)
162  		barrier();
163  
164  	asm volatile ("mmio_insn: mov %0, (%0); nop"
165  		      : "+a"(a) : : "memory");
166  	test->scratch = a;
167  }
168  
prepare_dr_intercept(struct svm_test * test)169  static void prepare_dr_intercept(struct svm_test *test)
170  {
171  	default_prepare(test);
172  	vmcb->control.intercept_dr_read = 0xff;
173  	vmcb->control.intercept_dr_write = 0xff;
174  }
175  
test_dr_intercept(struct svm_test * test)176  static void test_dr_intercept(struct svm_test *test)
177  {
178  	unsigned int i, failcnt = 0;
179  
180  	/* Loop testing debug register reads */
181  	for (i = 0; i < 8; i++) {
182  
183  		switch (i) {
184  		case 0:
185  			asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
186  			break;
187  		case 1:
188  			asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
189  			break;
190  		case 2:
191  			asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
192  			break;
193  		case 3:
194  			asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
195  			break;
196  		case 4:
197  			asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
198  			break;
199  		case 5:
200  			asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
201  			break;
202  		case 6:
203  			asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
204  			break;
205  		case 7:
206  			asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
207  			break;
208  		}
209  
210  		if (test->scratch != i) {
211  			report_fail("dr%u read intercept", i);
212  			failcnt++;
213  		}
214  	}
215  
216  	/* Loop testing debug register writes */
217  	for (i = 0; i < 8; i++) {
218  
219  		switch (i) {
220  		case 0:
221  			asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
222  			break;
223  		case 1:
224  			asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
225  			break;
226  		case 2:
227  			asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
228  			break;
229  		case 3:
230  			asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
231  			break;
232  		case 4:
233  			asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
234  			break;
235  		case 5:
236  			asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
237  			break;
238  		case 6:
239  			asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
240  			break;
241  		case 7:
242  			asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
243  			break;
244  		}
245  
246  		if (test->scratch != i) {
247  			report_fail("dr%u write intercept", i);
248  			failcnt++;
249  		}
250  	}
251  
252  	test->scratch = failcnt;
253  }
254  
dr_intercept_finished(struct svm_test * test)255  static bool dr_intercept_finished(struct svm_test *test)
256  {
257  	ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0);
258  
259  	/* Only expect DR intercepts */
260  	if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
261  		return true;
262  
263  	/*
264  	 * Compute debug register number.
265  	 * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
266  	 * Programmer's Manual Volume 2 - System Programming:
267  	 * http://support.amd.com/TechDocs/24593.pdf
268  	 * there are 16 VMEXIT codes each for DR read and write.
269  	 */
270  	test->scratch = (n % 16);
271  
272  	/* Jump over MOV instruction */
273  	vmcb->save.rip += 3;
274  
275  	return false;
276  }
277  
check_dr_intercept(struct svm_test * test)278  static bool check_dr_intercept(struct svm_test *test)
279  {
280  	return !test->scratch;
281  }
282  
next_rip_supported(void)283  static bool next_rip_supported(void)
284  {
285  	return this_cpu_has(X86_FEATURE_NRIPS);
286  }
287  
prepare_next_rip(struct svm_test * test)288  static void prepare_next_rip(struct svm_test *test)
289  {
290  	vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
291  }
292  
293  
test_next_rip(struct svm_test * test)294  static void test_next_rip(struct svm_test *test)
295  {
296  	asm volatile ("rdtsc\n\t"
297  		      ".globl exp_next_rip\n\t"
298  		      "exp_next_rip:\n\t" ::: "eax", "edx");
299  }
300  
check_next_rip(struct svm_test * test)301  static bool check_next_rip(struct svm_test *test)
302  {
303  	extern char exp_next_rip;
304  	unsigned long address = (unsigned long)&exp_next_rip;
305  
306  	return address == vmcb->control.next_rip;
307  }
308  
309  extern u8 *msr_bitmap;
310  
311  static bool is_x2apic;
312  
prepare_msr_intercept(struct svm_test * test)313  static void prepare_msr_intercept(struct svm_test *test)
314  {
315  	default_prepare(test);
316  	vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
317  
318  	memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
319  
320  	is_x2apic = is_x2apic_enabled();
321  }
322  
323  #define SVM_MSRPM_BYTES_PER_RANGE 2048
324  #define SVM_BITS_PER_MSR 2
325  #define SVM_MSRS_PER_BYTE 4
326  #define SVM_MSRS_PER_RANGE 8192
327  #define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1)
328  
get_msrpm_bit_nr(u32 msr)329  static int get_msrpm_bit_nr(u32 msr)
330  {
331  	int range_nr;
332  
333  	switch (msr & ~SVM_MSRPM_OFFSET_MASK) {
334  	case 0:
335  		range_nr = 0;
336  		break;
337  	case 0xc0000000:
338  		range_nr = 1;
339  		break;
340  	case 0xc0010000:
341  		range_nr = 2;
342  		break;
343  	default:
344  		return - 1;
345  	}
346  
347  	return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +
348  	       (msr & SVM_MSRPM_OFFSET_MASK) * SVM_BITS_PER_MSR;
349  }
350  
__test_msr_intercept(struct svm_test * test)351  static void __test_msr_intercept(struct svm_test *test)
352  {
353  	u64 val, exp, arb_val = 0xef8056791234abcd; /* Arbitrary value */
354  	int vector;
355  	u32 msr;
356  
357  	for (msr = 0; msr <= 0xc0012000; msr++) {
358  		if (msr == 0xC0010131 /* MSR_SEV_STATUS */) {
359  			/*
360  			 * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
361  			 * Programmer's Manual volume 2 - System Programming:
362  			 * http://support.amd.com/TechDocs/24593.pdf
363  			 * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
364  			 */
365  			continue;
366  		}
367  
368  		/*
369  		 * Test one MSR just before and after each range, but otherwise
370  		 * skips gaps between supported MSR ranges.
371  		 */
372  		if (msr == 0x2000 + 1)
373  			msr = 0xc0000000 - 1;
374  		else if (msr == 0xc0002000 + 1)
375  			msr = 0xc0010000 - 1;
376  
377  		test->scratch = msr;
378  		vmmcall();
379  
380  		test->scratch = -1;
381  
382  		vector = rdmsr_safe(msr, &val);
383  		if (vector)
384  			report_fail("Expected RDMSR(0x%x) to #VMEXIT, got exception '%u'",
385  				    msr, vector);
386  		else if (test->scratch != msr)
387  			report_fail("Expected RDMSR(0x%x) to #VMEXIT, got scratch '%ld",
388  				    msr, test->scratch);
389  
390  		test->scratch = BIT_ULL(32) | msr;
391  		vmmcall();
392  
393  		/*
394  		 * Poor man approach to generate a value that
395  		 * seems arbitrary each time around the loop.
396  		 */
397  		arb_val += (arb_val << 1);
398  
399  		test->scratch = -1;
400  
401  		vector = wrmsr_safe(msr, arb_val);
402  		if (vector)
403  			report_fail("Expected WRMSR(0x%x) to #VMEXIT, got exception '%u'",
404  				    msr, vector);
405  		else if (test->scratch != arb_val)
406  			report_fail("Expected WRMSR(0x%x) to #VMEXIT, got scratch '%ld' (wanted %ld)",
407  				    msr, test->scratch, arb_val);
408  
409  		test->scratch = BIT_ULL(33) | msr;
410  		vmmcall();
411  
412  		if (get_msrpm_bit_nr(msr) < 0) {
413  			report(msr == 0x2000 ||
414  			       msr == 0xc0000000 - 1 || msr == 0xc0002000 ||
415  			       msr == 0xc0010000 - 1 || msr == 0xc0012000,
416  			       "MSR 0x%x not covered by an MSRPM range", msr);
417  			continue;
418  		}
419  
420  		exp = test->scratch;
421  
422  		/*
423  		 * Verify that disabling interception for MSRs within an MSRPM
424  		 * range behaves as expected.  Simply eat exceptions, the goal
425  		 * is to verify interception, not MSR emulation/virtualization.
426  		 */
427  		test->scratch = -1;
428  		(void)rdmsr_safe(msr, &val);
429  		if (test->scratch != -1)
430  			report_fail("RDMSR 0x%x, Wanted -1 (no intercept), got 0x%lx",
431  				    msr, test->scratch);
432  
433  		/*
434  		 * Verify L1 and L2 see the same MSR value.  Skip TSC to avoid
435  		 * false failures, as it's constantly changing.
436  		 */
437  		if (val != exp && msr != MSR_IA32_TSC)
438  			report_fail("RDMSR 0x%x, wanted val '0%lx', got val '0x%lx'",
439  				    msr, exp, val);
440  
441  		test->scratch = BIT_ULL(34) | msr;
442  		vmmcall();
443  
444  		test->scratch = -1;
445  		(void)wrmsr_safe(msr, val);
446  		if (test->scratch != -1)
447  			report_fail("WRMSR 0x%x, Wanted -1 (no intercept), got 0x%lx",
448  				    msr, test->scratch);
449  
450  		test->scratch = BIT_ULL(35) | msr;
451  		vmmcall();
452  	}
453  }
454  
test_msr_intercept(struct svm_test * test)455  static void test_msr_intercept(struct svm_test *test)
456  {
457  	__test_msr_intercept(test);
458  
459  	test->scratch = -2;
460  	vmmcall();
461  
462  	__test_msr_intercept(test);
463  
464  	test->scratch = -3;
465  }
466  
restore_msrpm_bit(int bit_nr,bool set)467  static void restore_msrpm_bit(int bit_nr, bool set)
468  {
469  	if (set)
470  		__set_bit(bit_nr, msr_bitmap);
471  	else
472  		__clear_bit(bit_nr, msr_bitmap);
473  }
474  
msr_intercept_finished(struct svm_test * test)475  static bool msr_intercept_finished(struct svm_test *test)
476  {
477  	u32 exit_code = vmcb->control.exit_code;
478  	bool all_set = false;
479  	int bit_nr;
480  
481  	if (exit_code == SVM_EXIT_VMMCALL) {
482  		u32 msr = test->scratch & -1u;
483  
484  		vmcb->save.rip += 3;
485  
486  		if (test->scratch == -3)
487  			return true;
488  
489  		if (test->scratch == -2) {
490  			all_set = true;
491  			memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
492  			return false;
493  		}
494  
495  		bit_nr = get_msrpm_bit_nr(msr);
496  		if (bit_nr < 0)
497  			return false;
498  
499  		switch (test->scratch >> 32) {
500  		case 0:
501  			__set_bit(bit_nr, msr_bitmap);
502  			return false;
503  		case 1:
504  			restore_msrpm_bit(bit_nr, all_set);
505  			__set_bit(bit_nr + 1, msr_bitmap);
506  			return false;
507  		case 2:
508  			restore_msrpm_bit(bit_nr + 1, all_set);
509  			__clear_bit(bit_nr, msr_bitmap);
510  			(void)rdmsr_safe(msr, &test->scratch);
511  			return false;
512  		case 4:
513  			restore_msrpm_bit(bit_nr, all_set);
514  			__clear_bit(bit_nr + 1, msr_bitmap);
515  			/*
516  			 * Disable x2APIC so that WRMSR faults instead of doing
517  			 * random things, e.g. sending IPIs.
518  			 */
519  			if (is_x2apic && msr >= 0x800 && msr <= 0x8ff)
520  				reset_apic();
521  			return false;
522  		case 8:
523  			restore_msrpm_bit(bit_nr + 1, all_set);
524  			if (is_x2apic && msr >= 0x800 && msr <= 0x8ff)
525  				enable_x2apic();
526  			return false;
527  		default:
528  			return true;
529  		}
530  	}
531  
532  	if (exit_code != SVM_EXIT_MSR) {
533  		report_fail("Wanted MSR VM-Exit, got reason 0x%x", exit_code);
534  		return true;
535  	}
536  
537  	/* Jump over RDMSR/WRMSR instruction */
538  	vmcb->save.rip += 2;
539  
540  	/*
541  	 * Test whether the intercept was for RDMSR/WRMSR.
542  	 * For RDMSR, test->scratch is set to the MSR index;
543  	 *      RCX holds the MSR index.
544  	 * For WRMSR, test->scratch is set to the MSR value;
545  	 *      RDX holds the upper 32 bits of the MSR value,
546  	 *      while RAX hold its lower 32 bits.
547  	 */
548  	if (vmcb->control.exit_info_1)
549  		test->scratch = ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff));
550  	else
551  		test->scratch = get_regs().rcx;
552  
553  	return false;
554  }
555  
check_msr_intercept(struct svm_test * test)556  static bool check_msr_intercept(struct svm_test *test)
557  {
558  	return (test->scratch == -3);
559  }
560  
prepare_mode_switch(struct svm_test * test)561  static void prepare_mode_switch(struct svm_test *test)
562  {
563  	vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
564  		|  (1ULL << UD_VECTOR)
565  		|  (1ULL << DF_VECTOR)
566  		|  (1ULL << PF_VECTOR);
567  	test->scratch = 0;
568  }
569  
test_mode_switch(struct svm_test * test)570  static void test_mode_switch(struct svm_test *test)
571  {
572  	asm volatile("	cli\n"
573  		     "	ljmp *1f\n" /* jump to 32-bit code segment */
574  		     "1:\n"
575  		     "	.long 2f\n"
576  		     "	.long " xstr(KERNEL_CS32) "\n"
577  		     ".code32\n"
578  		     "2:\n"
579  		     "	movl %%cr0, %%eax\n"
580  		     "	btcl  $31, %%eax\n" /* clear PG */
581  		     "	movl %%eax, %%cr0\n"
582  		     "	movl $0xc0000080, %%ecx\n" /* EFER */
583  		     "	rdmsr\n"
584  		     "	btcl $8, %%eax\n" /* clear LME */
585  		     "	wrmsr\n"
586  		     "	movl %%cr4, %%eax\n"
587  		     "	btcl $5, %%eax\n" /* clear PAE */
588  		     "	movl %%eax, %%cr4\n"
589  		     "	movw %[ds16], %%ax\n"
590  		     "	movw %%ax, %%ds\n"
591  		     "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
592  		     ".code16\n"
593  		     "3:\n"
594  		     "	movl %%cr0, %%eax\n"
595  		     "	btcl $0, %%eax\n" /* clear PE  */
596  		     "	movl %%eax, %%cr0\n"
597  		     "	ljmpl $0, $4f\n"   /* jump to real-mode */
598  		     "4:\n"
599  		     "	vmmcall\n"
600  		     "	movl %%cr0, %%eax\n"
601  		     "	btsl $0, %%eax\n" /* set PE  */
602  		     "	movl %%eax, %%cr0\n"
603  		     "	ljmpl %[cs32], $5f\n" /* back to protected mode */
604  		     ".code32\n"
605  		     "5:\n"
606  		     "	movl %%cr4, %%eax\n"
607  		     "	btsl $5, %%eax\n" /* set PAE */
608  		     "	movl %%eax, %%cr4\n"
609  		     "	movl $0xc0000080, %%ecx\n" /* EFER */
610  		     "	rdmsr\n"
611  		     "	btsl $8, %%eax\n" /* set LME */
612  		     "	wrmsr\n"
613  		     "	movl %%cr0, %%eax\n"
614  		     "	btsl  $31, %%eax\n" /* set PG */
615  		     "	movl %%eax, %%cr0\n"
616  		     "	ljmpl %[cs64], $6f\n"    /* back to long mode */
617  		     ".code64\n\t"
618  		     "6:\n"
619  		     "	vmmcall\n"
620  		     :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
621  		      [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
622  		     : "rax", "rbx", "rcx", "rdx", "memory");
623  }
624  
mode_switch_finished(struct svm_test * test)625  static bool mode_switch_finished(struct svm_test *test)
626  {
627  	u64 cr0, cr4, efer;
628  
629  	cr0  = vmcb->save.cr0;
630  	cr4  = vmcb->save.cr4;
631  	efer = vmcb->save.efer;
632  
633  	/* Only expect VMMCALL intercepts */
634  	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL)
635  		return true;
636  
637  	/* Jump over VMMCALL instruction */
638  	vmcb->save.rip += 3;
639  
640  	/* Do sanity checks */
641  	switch (test->scratch) {
642  	case 0:
643  		/* Test should be in real mode now - check for this */
644  		if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
645  		    (cr4  & 0x00000020) || /* CR4.PAE */
646  		    (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
647  			return true;
648  		break;
649  	case 2:
650  		/* Test should be back in long-mode now - check for this */
651  		if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
652  		    ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
653  		    ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
654  			return true;
655  		break;
656  	}
657  
658  	/* one step forward */
659  	test->scratch += 1;
660  
661  	return test->scratch == 2;
662  }
663  
check_mode_switch(struct svm_test * test)664  static bool check_mode_switch(struct svm_test *test)
665  {
666  	return test->scratch == 2;
667  }
668  
669  extern u8 *io_bitmap;
670  
prepare_ioio(struct svm_test * test)671  static void prepare_ioio(struct svm_test *test)
672  {
673  	vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
674  	test->scratch = 0;
675  	memset(io_bitmap, 0, 8192);
676  	io_bitmap[8192] = 0xFF;
677  }
678  
test_ioio(struct svm_test * test)679  static void test_ioio(struct svm_test *test)
680  {
681  	// stage 0, test IO pass
682  	inb(0x5000);
683  	outb(0x0, 0x5000);
684  	if (get_test_stage(test) != 0)
685  		goto fail;
686  
687  	// test IO width, in/out
688  	io_bitmap[0] = 0xFF;
689  	inc_test_stage(test);
690  	inb(0x0);
691  	if (get_test_stage(test) != 2)
692  		goto fail;
693  
694  	outw(0x0, 0x0);
695  	if (get_test_stage(test) != 3)
696  		goto fail;
697  
698  	inl(0x0);
699  	if (get_test_stage(test) != 4)
700  		goto fail;
701  
702  	// test low/high IO port
703  	io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
704  	inb(0x5000);
705  	if (get_test_stage(test) != 5)
706  		goto fail;
707  
708  	io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
709  	inw(0x9000);
710  	if (get_test_stage(test) != 6)
711  		goto fail;
712  
713  	// test partial pass
714  	io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
715  	inl(0x4FFF);
716  	if (get_test_stage(test) != 7)
717  		goto fail;
718  
719  	// test across pages
720  	inc_test_stage(test);
721  	inl(0x7FFF);
722  	if (get_test_stage(test) != 8)
723  		goto fail;
724  
725  	inc_test_stage(test);
726  	io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
727  	inl(0x7FFF);
728  	if (get_test_stage(test) != 10)
729  		goto fail;
730  
731  	io_bitmap[0] = 0;
732  	inl(0xFFFF);
733  	if (get_test_stage(test) != 11)
734  		goto fail;
735  
736  	io_bitmap[0] = 0xFF;
737  	io_bitmap[8192] = 0;
738  	inl(0xFFFF);
739  	inc_test_stage(test);
740  	if (get_test_stage(test) != 12)
741  		goto fail;
742  
743  	return;
744  
745  fail:
746  	report_fail("stage %d", get_test_stage(test));
747  	test->scratch = -1;
748  }
749  
ioio_finished(struct svm_test * test)750  static bool ioio_finished(struct svm_test *test)
751  {
752  	unsigned port, size;
753  
754  	/* Only expect IOIO intercepts */
755  	if (vmcb->control.exit_code == SVM_EXIT_VMMCALL)
756  		return true;
757  
758  	if (vmcb->control.exit_code != SVM_EXIT_IOIO)
759  		return true;
760  
761  	/* one step forward */
762  	test->scratch += 1;
763  
764  	port = vmcb->control.exit_info_1 >> 16;
765  	size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
766  
767  	while (size--) {
768  		io_bitmap[port / 8] &= ~(1 << (port & 7));
769  		port++;
770  	}
771  
772  	return false;
773  }
774  
check_ioio(struct svm_test * test)775  static bool check_ioio(struct svm_test *test)
776  {
777  	memset(io_bitmap, 0, 8193);
778  	return test->scratch != -1;
779  }
780  
prepare_asid_zero(struct svm_test * test)781  static void prepare_asid_zero(struct svm_test *test)
782  {
783  	vmcb->control.asid = 0;
784  }
785  
test_asid_zero(struct svm_test * test)786  static void test_asid_zero(struct svm_test *test)
787  {
788  	asm volatile ("vmmcall\n\t");
789  }
790  
check_asid_zero(struct svm_test * test)791  static bool check_asid_zero(struct svm_test *test)
792  {
793  	return vmcb->control.exit_code == SVM_EXIT_ERR;
794  }
795  
sel_cr0_bug_prepare(struct svm_test * test)796  static void sel_cr0_bug_prepare(struct svm_test *test)
797  {
798  	vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
799  }
800  
sel_cr0_bug_finished(struct svm_test * test)801  static bool sel_cr0_bug_finished(struct svm_test *test)
802  {
803  	return true;
804  }
805  
sel_cr0_bug_test(struct svm_test * test)806  static void sel_cr0_bug_test(struct svm_test *test)
807  {
808  	unsigned long cr0;
809  
810  	/* read cr0, clear CD, and write back */
811  	cr0  = read_cr0();
812  	cr0 |= (1UL << 30);
813  	write_cr0(cr0);
814  
815  	/*
816  	 * If we are here the test failed, not sure what to do now because we
817  	 * are not in guest-mode anymore so we can't trigger an intercept.
818  	 * Trigger a tripple-fault for now.
819  	 */
820  	report_fail("sel_cr0 test. Can not recover from this - exiting");
821  	exit(report_summary());
822  }
823  
sel_cr0_bug_check(struct svm_test * test)824  static bool sel_cr0_bug_check(struct svm_test *test)
825  {
826  	return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
827  }
828  
829  #define TSC_ADJUST_VALUE    (1ll << 32)
830  #define TSC_OFFSET_VALUE    (~0ull << 48)
831  static bool ok;
832  
tsc_adjust_supported(void)833  static bool tsc_adjust_supported(void)
834  {
835  	return this_cpu_has(X86_FEATURE_TSC_ADJUST);
836  }
837  
tsc_adjust_prepare(struct svm_test * test)838  static void tsc_adjust_prepare(struct svm_test *test)
839  {
840  	default_prepare(test);
841  	vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
842  
843  	wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
844  	int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
845  	ok = adjust == -TSC_ADJUST_VALUE;
846  }
847  
tsc_adjust_test(struct svm_test * test)848  static void tsc_adjust_test(struct svm_test *test)
849  {
850  	int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
851  	ok &= adjust == -TSC_ADJUST_VALUE;
852  
853  	uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
854  	wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
855  
856  	adjust = rdmsr(MSR_IA32_TSC_ADJUST);
857  	ok &= adjust <= -2 * TSC_ADJUST_VALUE;
858  
859  	uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
860  	ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
861  
862  	uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
863  	ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
864  }
865  
tsc_adjust_check(struct svm_test * test)866  static bool tsc_adjust_check(struct svm_test *test)
867  {
868  	int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
869  
870  	wrmsr(MSR_IA32_TSC_ADJUST, 0);
871  	return ok && adjust <= -2 * TSC_ADJUST_VALUE;
872  }
873  
874  
875  static u64 guest_tsc_delay_value;
876  /* number of bits to shift tsc right for stable result */
877  #define TSC_SHIFT 24
878  #define TSC_SCALE_ITERATIONS 10
879  
svm_tsc_scale_guest(struct svm_test * test)880  static void svm_tsc_scale_guest(struct svm_test *test)
881  {
882  	u64 start_tsc = rdtsc();
883  
884  	while (rdtsc() - start_tsc < guest_tsc_delay_value)
885  		cpu_relax();
886  }
887  
svm_tsc_scale_run_testcase(u64 duration,double tsc_scale,u64 tsc_offset)888  static void svm_tsc_scale_run_testcase(u64 duration,
889  				       double tsc_scale, u64 tsc_offset)
890  {
891  	u64 start_tsc, actual_duration;
892  
893  	guest_tsc_delay_value = (duration << TSC_SHIFT) * tsc_scale;
894  
895  	test_set_guest(svm_tsc_scale_guest);
896  	vmcb->control.tsc_offset = tsc_offset;
897  	wrmsr(MSR_AMD64_TSC_RATIO, (u64)(tsc_scale * (1ULL << 32)));
898  
899  	start_tsc = rdtsc();
900  
901  	if (svm_vmrun() != SVM_EXIT_VMMCALL)
902  		report_fail("unexpected vm exit code 0x%x", vmcb->control.exit_code);
903  
904  	actual_duration = (rdtsc() - start_tsc) >> TSC_SHIFT;
905  
906  	report(duration == actual_duration, "tsc delay (expected: %lu, actual: %lu)",
907  	       duration, actual_duration);
908  }
909  
svm_tsc_scale_test(void)910  static void svm_tsc_scale_test(void)
911  {
912  	int i;
913  
914  	if (!tsc_scale_supported()) {
915  		report_skip("TSC scale not supported in the guest");
916  		return;
917  	}
918  
919  	report(rdmsr(MSR_AMD64_TSC_RATIO) == TSC_RATIO_DEFAULT,
920  	       "initial TSC scale ratio");
921  
922  	for (i = 0 ; i < TSC_SCALE_ITERATIONS; i++) {
923  
924  		double tsc_scale = (double)(rdrand() % 100 + 1) / 10;
925  		int duration = rdrand() % 50 + 1;
926  		u64 tsc_offset = rdrand();
927  
928  		report_info("duration=%d, tsc_scale=%d, tsc_offset=%ld",
929  			    duration, (int)(tsc_scale * 100), tsc_offset);
930  
931  		svm_tsc_scale_run_testcase(duration, tsc_scale, tsc_offset);
932  	}
933  
934  	svm_tsc_scale_run_testcase(50, 255, rdrand());
935  	svm_tsc_scale_run_testcase(50, 0.0001, rdrand());
936  }
937  
latency_prepare(struct svm_test * test)938  static void latency_prepare(struct svm_test *test)
939  {
940  	default_prepare(test);
941  	runs = LATENCY_RUNS;
942  	latvmrun_min = latvmexit_min = -1ULL;
943  	latvmrun_max = latvmexit_max = 0;
944  	vmrun_sum = vmexit_sum = 0;
945  	tsc_start = rdtsc();
946  }
947  
latency_test(struct svm_test * test)948  static void latency_test(struct svm_test *test)
949  {
950  	u64 cycles;
951  
952  start:
953  	tsc_end = rdtsc();
954  
955  	cycles = tsc_end - tsc_start;
956  
957  	if (cycles > latvmrun_max)
958  		latvmrun_max = cycles;
959  
960  	if (cycles < latvmrun_min)
961  		latvmrun_min = cycles;
962  
963  	vmrun_sum += cycles;
964  
965  	tsc_start = rdtsc();
966  
967  	asm volatile ("vmmcall" : : : "memory");
968  	goto start;
969  }
970  
latency_finished(struct svm_test * test)971  static bool latency_finished(struct svm_test *test)
972  {
973  	u64 cycles;
974  
975  	tsc_end = rdtsc();
976  
977  	cycles = tsc_end - tsc_start;
978  
979  	if (cycles > latvmexit_max)
980  		latvmexit_max = cycles;
981  
982  	if (cycles < latvmexit_min)
983  		latvmexit_min = cycles;
984  
985  	vmexit_sum += cycles;
986  
987  	vmcb->save.rip += 3;
988  
989  	runs -= 1;
990  
991  	tsc_end = rdtsc();
992  
993  	return runs == 0;
994  }
995  
latency_finished_clean(struct svm_test * test)996  static bool latency_finished_clean(struct svm_test *test)
997  {
998  	vmcb->control.clean = VMCB_CLEAN_ALL;
999  	return latency_finished(test);
1000  }
1001  
latency_check(struct svm_test * test)1002  static bool latency_check(struct svm_test *test)
1003  {
1004  	printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
1005  	       latvmrun_min, vmrun_sum / LATENCY_RUNS);
1006  	printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
1007  	       latvmexit_min, vmexit_sum / LATENCY_RUNS);
1008  	return true;
1009  }
1010  
lat_svm_insn_prepare(struct svm_test * test)1011  static void lat_svm_insn_prepare(struct svm_test *test)
1012  {
1013  	default_prepare(test);
1014  	runs = LATENCY_RUNS;
1015  	latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
1016  	latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
1017  	vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
1018  }
1019  
lat_svm_insn_finished(struct svm_test * test)1020  static bool lat_svm_insn_finished(struct svm_test *test)
1021  {
1022  	u64 vmcb_phys = virt_to_phys(vmcb);
1023  	u64 cycles;
1024  
1025  	for ( ; runs != 0; runs--) {
1026  		tsc_start = rdtsc();
1027  		asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
1028  		cycles = rdtsc() - tsc_start;
1029  		if (cycles > latvmload_max)
1030  			latvmload_max = cycles;
1031  		if (cycles < latvmload_min)
1032  			latvmload_min = cycles;
1033  		vmload_sum += cycles;
1034  
1035  		tsc_start = rdtsc();
1036  		asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
1037  		cycles = rdtsc() - tsc_start;
1038  		if (cycles > latvmsave_max)
1039  			latvmsave_max = cycles;
1040  		if (cycles < latvmsave_min)
1041  			latvmsave_min = cycles;
1042  		vmsave_sum += cycles;
1043  
1044  		tsc_start = rdtsc();
1045  		asm volatile("stgi\n\t");
1046  		cycles = rdtsc() - tsc_start;
1047  		if (cycles > latstgi_max)
1048  			latstgi_max = cycles;
1049  		if (cycles < latstgi_min)
1050  			latstgi_min = cycles;
1051  		stgi_sum += cycles;
1052  
1053  		tsc_start = rdtsc();
1054  		asm volatile("clgi\n\t");
1055  		cycles = rdtsc() - tsc_start;
1056  		if (cycles > latclgi_max)
1057  			latclgi_max = cycles;
1058  		if (cycles < latclgi_min)
1059  			latclgi_min = cycles;
1060  		clgi_sum += cycles;
1061  	}
1062  
1063  	tsc_end = rdtsc();
1064  
1065  	return true;
1066  }
1067  
lat_svm_insn_check(struct svm_test * test)1068  static bool lat_svm_insn_check(struct svm_test *test)
1069  {
1070  	printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1071  	       latvmload_min, vmload_sum / LATENCY_RUNS);
1072  	printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1073  	       latvmsave_min, vmsave_sum / LATENCY_RUNS);
1074  	printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1075  	       latstgi_min, stgi_sum / LATENCY_RUNS);
1076  	printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1077  	       latclgi_min, clgi_sum / LATENCY_RUNS);
1078  	return true;
1079  }
1080  
1081  /*
1082   * Report failures from SVM guest code, and on failure, set the stage to -1 and
1083   * do VMMCALL to terminate the test (host side must treat -1 as "finished").
1084   * TODO: fix the tests that don't play nice with a straight report, e.g. the
1085   * V_TPR test fails if report() is invoked.
1086   */
1087  #define report_svm_guest(cond, test, fmt, args...)	\
1088  do {							\
1089  	if (!(cond)) {					\
1090  		report_fail(fmt, ##args);		\
1091  		set_test_stage(test, -1);		\
1092  		vmmcall();				\
1093  	}						\
1094  } while (0)
1095  
1096  bool pending_event_ipi_fired;
1097  bool pending_event_guest_run;
1098  
pending_event_ipi_isr(isr_regs_t * regs)1099  static void pending_event_ipi_isr(isr_regs_t *regs)
1100  {
1101  	pending_event_ipi_fired = true;
1102  	eoi();
1103  }
1104  
pending_event_prepare(struct svm_test * test)1105  static void pending_event_prepare(struct svm_test *test)
1106  {
1107  	int ipi_vector = 0xf1;
1108  
1109  	default_prepare(test);
1110  
1111  	pending_event_ipi_fired = false;
1112  
1113  	handle_irq(ipi_vector, pending_event_ipi_isr);
1114  
1115  	pending_event_guest_run = false;
1116  
1117  	vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1118  	vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1119  
1120  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1121  		       APIC_DM_FIXED | ipi_vector, 0);
1122  
1123  	set_test_stage(test, 0);
1124  }
1125  
pending_event_test(struct svm_test * test)1126  static void pending_event_test(struct svm_test *test)
1127  {
1128  	pending_event_guest_run = true;
1129  }
1130  
pending_event_finished(struct svm_test * test)1131  static bool pending_event_finished(struct svm_test *test)
1132  {
1133  	switch (get_test_stage(test)) {
1134  	case 0:
1135  		if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1136  			report_fail("VMEXIT not due to pending interrupt. Exit reason 0x%x",
1137  				    vmcb->control.exit_code);
1138  			return true;
1139  		}
1140  
1141  		vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1142  		vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1143  
1144  		if (pending_event_guest_run) {
1145  			report_fail("Guest ran before host received IPI\n");
1146  			return true;
1147  		}
1148  
1149  		sti_nop_cli();
1150  
1151  		if (!pending_event_ipi_fired) {
1152  			report_fail("Pending interrupt not dispatched after IRQ enabled\n");
1153  			return true;
1154  		}
1155  		break;
1156  
1157  	case 1:
1158  		if (!pending_event_guest_run) {
1159  			report_fail("Guest did not resume when no interrupt\n");
1160  			return true;
1161  		}
1162  		break;
1163  	}
1164  
1165  	inc_test_stage(test);
1166  
1167  	return get_test_stage(test) == 2;
1168  }
1169  
pending_event_check(struct svm_test * test)1170  static bool pending_event_check(struct svm_test *test)
1171  {
1172  	return get_test_stage(test) == 2;
1173  }
1174  
pending_event_cli_prepare(struct svm_test * test)1175  static void pending_event_cli_prepare(struct svm_test *test)
1176  {
1177  	default_prepare(test);
1178  
1179  	pending_event_ipi_fired = false;
1180  
1181  	handle_irq(0xf1, pending_event_ipi_isr);
1182  
1183  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1184  		       APIC_DM_FIXED | 0xf1, 0);
1185  
1186  	set_test_stage(test, 0);
1187  }
1188  
pending_event_cli_prepare_gif_clear(struct svm_test * test)1189  static void pending_event_cli_prepare_gif_clear(struct svm_test *test)
1190  {
1191  	asm("cli");
1192  }
1193  
pending_event_cli_test(struct svm_test * test)1194  static void pending_event_cli_test(struct svm_test *test)
1195  {
1196  	report_svm_guest(!pending_event_ipi_fired, test,
1197  			 "IRQ should NOT be delivered while IRQs disabled");
1198  
1199  	/* VINTR_MASKING is zero.  This should cause the IPI to fire.  */
1200  	sti_nop_cli();
1201  
1202  	report_svm_guest(pending_event_ipi_fired, test,
1203  			 "IRQ should be delivered after enabling IRQs");
1204  	vmmcall();
1205  
1206  	/*
1207  	 * Now VINTR_MASKING=1, but no interrupt is pending so
1208  	 * the VINTR interception should be clear in VMCB02.  Check
1209  	 * that L0 did not leave a stale VINTR in the VMCB.
1210  	 */
1211  	sti_nop_cli();
1212  }
1213  
pending_event_cli_finished(struct svm_test * test)1214  static bool pending_event_cli_finished(struct svm_test *test)
1215  {
1216  	report_svm_guest(vmcb->control.exit_code == SVM_EXIT_VMMCALL, test,
1217  			 "Wanted VMMCALL VM-Exit, got exit reason 0x%x",
1218  			 vmcb->control.exit_code);
1219  
1220  	switch (get_test_stage(test)) {
1221  	case 0:
1222  		vmcb->save.rip += 3;
1223  
1224  		pending_event_ipi_fired = false;
1225  
1226  		vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1227  
1228  		/* Now entering again with VINTR_MASKING=1.  */
1229  		apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1230  			       APIC_DM_FIXED | 0xf1, 0);
1231  
1232  		break;
1233  
1234  	case 1:
1235  		if (pending_event_ipi_fired == true) {
1236  			report_fail("Interrupt triggered by guest");
1237  			return true;
1238  		}
1239  
1240  		sti_nop_cli();
1241  
1242  		if (pending_event_ipi_fired != true) {
1243  			report_fail("Interrupt not triggered by host");
1244  			return true;
1245  		}
1246  
1247  		break;
1248  
1249  	default:
1250  		return true;
1251  	}
1252  
1253  	inc_test_stage(test);
1254  
1255  	return get_test_stage(test) == 2;
1256  }
1257  
pending_event_cli_check(struct svm_test * test)1258  static bool pending_event_cli_check(struct svm_test *test)
1259  {
1260  	return get_test_stage(test) == 2;
1261  }
1262  
1263  #define TIMER_VECTOR    222
1264  
1265  static volatile bool timer_fired;
1266  
timer_isr(isr_regs_t * regs)1267  static void timer_isr(isr_regs_t *regs)
1268  {
1269  	timer_fired = true;
1270  	apic_write(APIC_EOI, 0);
1271  }
1272  
interrupt_prepare(struct svm_test * test)1273  static void interrupt_prepare(struct svm_test *test)
1274  {
1275  	default_prepare(test);
1276  	handle_irq(TIMER_VECTOR, timer_isr);
1277  	timer_fired = false;
1278  	set_test_stage(test, 0);
1279  }
1280  
interrupt_test(struct svm_test * test)1281  static void interrupt_test(struct svm_test *test)
1282  {
1283  	long long start, loops;
1284  
1285  	apic_setup_timer(TIMER_VECTOR, APIC_LVT_TIMER_PERIODIC);
1286  	sti();
1287  	apic_start_timer(1000);
1288  
1289  	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1290  		asm volatile ("nop");
1291  
1292  	report_svm_guest(timer_fired, test,
1293  			 "direct interrupt while running guest");
1294  
1295  	apic_stop_timer();
1296  	cli();
1297  	vmmcall();
1298  
1299  	timer_fired = false;
1300  	apic_start_timer(1000);
1301  	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1302  		asm volatile ("nop");
1303  
1304  	report_svm_guest(timer_fired, test,
1305  			 "intercepted interrupt while running guest");
1306  
1307  	sti();
1308  	apic_stop_timer();
1309  	cli();
1310  
1311  	timer_fired = false;
1312  	start = rdtsc();
1313  	apic_start_timer(1000000);
1314  	safe_halt();
1315  
1316  	report_svm_guest(timer_fired, test, "direct interrupt + hlt");
1317  	report(rdtsc() - start > 10000, "IRQ arrived after expected delay");
1318  
1319  	apic_stop_timer();
1320  	cli();
1321  	vmmcall();
1322  
1323  	timer_fired = false;
1324  	start = rdtsc();
1325  	apic_start_timer(1000000);
1326  	asm volatile ("hlt");
1327  
1328  	report_svm_guest(timer_fired, test, "intercepted interrupt + hlt");
1329  	report(rdtsc() - start > 10000, "IRQ arrived after expected delay");
1330  
1331  	apic_cleanup_timer();
1332  }
1333  
interrupt_finished(struct svm_test * test)1334  static bool interrupt_finished(struct svm_test *test)
1335  {
1336  	switch (get_test_stage(test)) {
1337  	case 0:
1338  	case 2:
1339  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1340  			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1341  				    vmcb->control.exit_code);
1342  			return true;
1343  		}
1344  		vmcb->save.rip += 3;
1345  
1346  		vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1347  		vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1348  		break;
1349  
1350  	case 1:
1351  	case 3:
1352  		if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1353  			report_fail("VMEXIT not due to intr intercept. Exit reason 0x%x",
1354  				    vmcb->control.exit_code);
1355  			return true;
1356  		}
1357  
1358  		sti_nop_cli();
1359  
1360  		vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1361  		vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1362  		break;
1363  
1364  	case 4:
1365  		break;
1366  
1367  	default:
1368  		return true;
1369  	}
1370  
1371  	inc_test_stage(test);
1372  
1373  	return get_test_stage(test) == 5;
1374  }
1375  
interrupt_check(struct svm_test * test)1376  static bool interrupt_check(struct svm_test *test)
1377  {
1378  	return get_test_stage(test) == 5;
1379  }
1380  
1381  static volatile bool nmi_fired;
1382  
nmi_handler(struct ex_regs * regs)1383  static void nmi_handler(struct ex_regs *regs)
1384  {
1385  	nmi_fired = true;
1386  }
1387  
nmi_prepare(struct svm_test * test)1388  static void nmi_prepare(struct svm_test *test)
1389  {
1390  	default_prepare(test);
1391  	nmi_fired = false;
1392  	handle_exception(NMI_VECTOR, nmi_handler);
1393  	set_test_stage(test, 0);
1394  }
1395  
nmi_test(struct svm_test * test)1396  static void nmi_test(struct svm_test *test)
1397  {
1398  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1399  
1400  	report_svm_guest(nmi_fired, test, "direct NMI while running guest");
1401  
1402  	vmmcall();
1403  
1404  	nmi_fired = false;
1405  
1406  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1407  
1408  	report_svm_guest(nmi_fired, test, "intercepted pending NMI delivered to guest");
1409  }
1410  
nmi_finished(struct svm_test * test)1411  static bool nmi_finished(struct svm_test *test)
1412  {
1413  	switch (get_test_stage(test)) {
1414  	case 0:
1415  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1416  			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1417  				    vmcb->control.exit_code);
1418  			return true;
1419  		}
1420  		vmcb->save.rip += 3;
1421  
1422  		vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1423  		break;
1424  
1425  	case 1:
1426  		if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1427  			report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x",
1428  				    vmcb->control.exit_code);
1429  			return true;
1430  		}
1431  
1432  		report_pass("NMI intercept while running guest");
1433  		break;
1434  
1435  	case 2:
1436  		break;
1437  
1438  	default:
1439  		return true;
1440  	}
1441  
1442  	inc_test_stage(test);
1443  
1444  	return get_test_stage(test) == 3;
1445  }
1446  
nmi_check(struct svm_test * test)1447  static bool nmi_check(struct svm_test *test)
1448  {
1449  	return get_test_stage(test) == 3;
1450  }
1451  
1452  #define NMI_DELAY 100000000ULL
1453  
nmi_message_thread(void * _test)1454  static void nmi_message_thread(void *_test)
1455  {
1456  	struct svm_test *test = _test;
1457  
1458  	while (get_test_stage(test) != 1)
1459  		pause();
1460  
1461  	delay(NMI_DELAY);
1462  
1463  	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
1464  
1465  	while (get_test_stage(test) != 2)
1466  		pause();
1467  
1468  	delay(NMI_DELAY);
1469  
1470  	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
1471  }
1472  
nmi_hlt_test(struct svm_test * test)1473  static void nmi_hlt_test(struct svm_test *test)
1474  {
1475  	long long start;
1476  
1477  	on_cpu_async(1, nmi_message_thread, test);
1478  
1479  	start = rdtsc();
1480  
1481  	set_test_stage(test, 1);
1482  
1483  	asm volatile ("hlt");
1484  
1485  	report_svm_guest(nmi_fired, test, "direct NMI + hlt");
1486  	report(rdtsc() - start > NMI_DELAY, "direct NMI after expected delay");
1487  
1488  	nmi_fired = false;
1489  
1490  	vmmcall();
1491  
1492  	start = rdtsc();
1493  
1494  	set_test_stage(test, 2);
1495  
1496  	asm volatile ("hlt");
1497  
1498  	report_svm_guest(nmi_fired, test, "intercepted NMI + hlt");
1499  	report(rdtsc() - start > NMI_DELAY, "intercepted NMI after expected delay");
1500  
1501  	set_test_stage(test, 3);
1502  }
1503  
nmi_hlt_finished(struct svm_test * test)1504  static bool nmi_hlt_finished(struct svm_test *test)
1505  {
1506  	switch (get_test_stage(test)) {
1507  	case 1:
1508  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1509  			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1510  				    vmcb->control.exit_code);
1511  			return true;
1512  		}
1513  		vmcb->save.rip += 3;
1514  
1515  		vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1516  		break;
1517  
1518  	case 2:
1519  		if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1520  			report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x",
1521  				    vmcb->control.exit_code);
1522  			return true;
1523  		}
1524  
1525  		report_pass("NMI intercept while running guest");
1526  		break;
1527  
1528  	case 3:
1529  		break;
1530  
1531  	default:
1532  		return true;
1533  	}
1534  
1535  	return get_test_stage(test) == 3;
1536  }
1537  
nmi_hlt_check(struct svm_test * test)1538  static bool nmi_hlt_check(struct svm_test *test)
1539  {
1540  	return get_test_stage(test) == 3;
1541  }
1542  
vnmi_prepare(struct svm_test * test)1543  static void vnmi_prepare(struct svm_test *test)
1544  {
1545  	nmi_prepare(test);
1546  
1547  	/*
1548  	 * Disable NMI interception to start.  Enabling vNMI without
1549  	 * intercepting "real" NMIs should result in an ERR VM-Exit.
1550  	 */
1551  	vmcb->control.intercept &= ~(1ULL << INTERCEPT_NMI);
1552  	vmcb->control.int_ctl = V_NMI_ENABLE_MASK;
1553  	vmcb->control.int_vector = NMI_VECTOR;
1554  }
1555  
vnmi_test(struct svm_test * test)1556  static void vnmi_test(struct svm_test *test)
1557  {
1558  	report_svm_guest(!nmi_fired, test, "No vNMI before injection");
1559  	vmmcall();
1560  
1561  	report_svm_guest(nmi_fired, test, "vNMI delivered after injection");
1562  	vmmcall();
1563  }
1564  
vnmi_finished(struct svm_test * test)1565  static bool vnmi_finished(struct svm_test *test)
1566  {
1567  	switch (get_test_stage(test)) {
1568  	case 0:
1569  		if (vmcb->control.exit_code != SVM_EXIT_ERR) {
1570  			report_fail("Wanted ERR VM-Exit, got 0x%x",
1571  				    vmcb->control.exit_code);
1572  			return true;
1573  		}
1574  		report(!nmi_fired, "vNMI enabled but NMI_INTERCEPT unset!");
1575  		vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1576  		vmcb->save.rip += 3;
1577  		break;
1578  
1579  	case 1:
1580  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1581  			report_fail("Wanted VMMCALL VM-Exit, got 0x%x",
1582  				    vmcb->control.exit_code);
1583  			return true;
1584  		}
1585  		report(!nmi_fired, "vNMI with vector 2 not injected");
1586  		vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
1587  		vmcb->save.rip += 3;
1588  		break;
1589  
1590  	case 2:
1591  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1592  			report_fail("Wanted VMMCALL VM-Exit, got 0x%x",
1593  				    vmcb->control.exit_code);
1594  			return true;
1595  		}
1596  		if (vmcb->control.int_ctl & V_NMI_BLOCKING_MASK) {
1597  			report_fail("V_NMI_BLOCKING_MASK not cleared on VMEXIT");
1598  			return true;
1599  		}
1600  		report_pass("VNMI serviced");
1601  		vmcb->save.rip += 3;
1602  		break;
1603  
1604  	default:
1605  		return true;
1606  	}
1607  
1608  	inc_test_stage(test);
1609  
1610  	return get_test_stage(test) == 3;
1611  }
1612  
vnmi_check(struct svm_test * test)1613  static bool vnmi_check(struct svm_test *test)
1614  {
1615  	return get_test_stage(test) == 3;
1616  }
1617  
1618  static volatile int count_exc = 0;
1619  
my_isr(struct ex_regs * r)1620  static void my_isr(struct ex_regs *r)
1621  {
1622  	count_exc++;
1623  }
1624  
exc_inject_prepare(struct svm_test * test)1625  static void exc_inject_prepare(struct svm_test *test)
1626  {
1627  	default_prepare(test);
1628  	handle_exception(DE_VECTOR, my_isr);
1629  	handle_exception(NMI_VECTOR, my_isr);
1630  }
1631  
1632  
exc_inject_test(struct svm_test * test)1633  static void exc_inject_test(struct svm_test *test)
1634  {
1635  	asm volatile ("vmmcall\n\tvmmcall\n\t");
1636  }
1637  
exc_inject_finished(struct svm_test * test)1638  static bool exc_inject_finished(struct svm_test *test)
1639  {
1640  	switch (get_test_stage(test)) {
1641  	case 0:
1642  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1643  			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1644  				    vmcb->control.exit_code);
1645  			return true;
1646  		}
1647  		vmcb->save.rip += 3;
1648  		vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
1649  		break;
1650  
1651  	case 1:
1652  		if (vmcb->control.exit_code != SVM_EXIT_ERR) {
1653  			report_fail("VMEXIT not due to error. Exit reason 0x%x",
1654  				    vmcb->control.exit_code);
1655  			return true;
1656  		}
1657  		report(count_exc == 0, "exception with vector 2 not injected");
1658  		vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
1659  		break;
1660  
1661  	case 2:
1662  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1663  			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1664  				    vmcb->control.exit_code);
1665  			return true;
1666  		}
1667  		vmcb->save.rip += 3;
1668  		report(count_exc == 1, "divide overflow exception injected");
1669  		report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared");
1670  		break;
1671  
1672  	default:
1673  		return true;
1674  	}
1675  
1676  	inc_test_stage(test);
1677  
1678  	return get_test_stage(test) == 3;
1679  }
1680  
exc_inject_check(struct svm_test * test)1681  static bool exc_inject_check(struct svm_test *test)
1682  {
1683  	return count_exc == 1 && get_test_stage(test) == 3;
1684  }
1685  
1686  static volatile bool virq_fired;
1687  static volatile unsigned long virq_rip;
1688  
virq_isr(isr_regs_t * regs)1689  static void virq_isr(isr_regs_t *regs)
1690  {
1691  	virq_fired = true;
1692  	virq_rip = regs->rip;
1693  }
1694  
virq_inject_prepare(struct svm_test * test)1695  static void virq_inject_prepare(struct svm_test *test)
1696  {
1697  	handle_irq(0xf1, virq_isr);
1698  	default_prepare(test);
1699  	vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
1700  		(0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority
1701  	vmcb->control.int_vector = 0xf1;
1702  	virq_fired = false;
1703  	virq_rip = -1;
1704  	set_test_stage(test, 0);
1705  }
1706  
virq_inject_test(struct svm_test * test)1707  static void virq_inject_test(struct svm_test *test)
1708  {
1709  	report_svm_guest(!virq_fired, test, "virtual IRQ blocked after L2 cli");
1710  
1711  	sti_nop_cli();
1712  
1713  	report_svm_guest(virq_fired, test, "virtual IRQ fired after L2 sti");
1714  
1715  	vmmcall();
1716  
1717  	report_svm_guest(!virq_fired, test, "intercepted VINTR blocked after L2 cli");
1718  
1719  	sti_nop_cli();
1720  
1721  	report_svm_guest(virq_fired, test, "intercepted VINTR fired after L2 sti");
1722  
1723  	vmmcall();
1724  
1725  	sti_nop_cli();
1726  
1727  	report_svm_guest(!virq_fired, test,
1728  			  "virtual IRQ blocked V_IRQ_PRIO less than V_TPR");
1729  
1730  	vmmcall();
1731  	vmmcall();
1732  }
1733  
virq_inject_finished(struct svm_test * test)1734  static bool virq_inject_finished(struct svm_test *test)
1735  {
1736  	vmcb->save.rip += 3;
1737  
1738  	switch (get_test_stage(test)) {
1739  	case 0:
1740  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1741  			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1742  				    vmcb->control.exit_code);
1743  			return true;
1744  		}
1745  		if (vmcb->control.int_ctl & V_IRQ_MASK) {
1746  			report_fail("V_IRQ not cleared on VMEXIT after firing");
1747  			return true;
1748  		}
1749  		virq_fired = false;
1750  		vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1751  		vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
1752  			(0x0f << V_INTR_PRIO_SHIFT);
1753  		break;
1754  
1755  	case 1:
1756  		if (vmcb->control.exit_code != SVM_EXIT_VINTR) {
1757  			report_fail("VMEXIT not due to vintr. Exit reason 0x%x",
1758  				    vmcb->control.exit_code);
1759  			return true;
1760  		}
1761  		if (virq_fired) {
1762  			report_fail("V_IRQ fired before SVM_EXIT_VINTR");
1763  			return true;
1764  		}
1765  		vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
1766  		break;
1767  
1768  	case 2:
1769  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1770  			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1771  				    vmcb->control.exit_code);
1772  			return true;
1773  		}
1774  		virq_fired = false;
1775  		// Set irq to lower priority
1776  		vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
1777  			(0x08 << V_INTR_PRIO_SHIFT);
1778  		// Raise guest TPR
1779  		vmcb->control.int_ctl |= 0x0a & V_TPR_MASK;
1780  		break;
1781  
1782  	case 3:
1783  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1784  			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1785  				    vmcb->control.exit_code);
1786  			return true;
1787  		}
1788  		vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1789  		break;
1790  
1791  	case 4:
1792  		// INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR
1793  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1794  			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1795  				    vmcb->control.exit_code);
1796  			return true;
1797  		}
1798  		break;
1799  
1800  	default:
1801  		return true;
1802  	}
1803  
1804  	inc_test_stage(test);
1805  
1806  	return get_test_stage(test) == 5;
1807  }
1808  
virq_inject_check(struct svm_test * test)1809  static bool virq_inject_check(struct svm_test *test)
1810  {
1811  	return get_test_stage(test) == 5;
1812  }
1813  
virq_inject_within_shadow_prepare(struct svm_test * test)1814  static void virq_inject_within_shadow_prepare(struct svm_test *test)
1815  {
1816  	virq_inject_prepare(test);
1817  	vmcb->control.int_state = SVM_INTERRUPT_SHADOW_MASK;
1818  	vmcb->save.rflags |= X86_EFLAGS_IF;
1819  }
1820  
1821  extern void virq_inject_within_shadow_test(struct svm_test *test);
1822  asm("virq_inject_within_shadow_test: nop; nop; vmmcall");
1823  
virq_inject_within_shadow_prepare_gif_clear(struct svm_test * test)1824  static void virq_inject_within_shadow_prepare_gif_clear(struct svm_test *test)
1825  {
1826  	vmcb->save.rip = (unsigned long) test->guest_func;
1827  }
1828  
virq_inject_within_shadow_finished(struct svm_test * test)1829  static bool virq_inject_within_shadow_finished(struct svm_test *test)
1830  {
1831  	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL)
1832  		report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1833  			    vmcb->control.exit_code);
1834  	if (!virq_fired)
1835  		report_fail("V_IRQ did not fire");
1836  	else if (virq_rip != (unsigned long) virq_inject_within_shadow_test + 1)
1837  		report_fail("Unexpected RIP for interrupt handler");
1838  	else if (vmcb->control.int_ctl & V_IRQ_MASK)
1839  		report_fail("V_IRQ not cleared on VMEXIT after firing");
1840  	else if (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
1841  		report_fail("Interrupt shadow not cleared");
1842  	else
1843  		inc_test_stage(test);
1844  
1845  	return true;
1846  }
1847  
virq_inject_within_shadow_check(struct svm_test * test)1848  static bool virq_inject_within_shadow_check(struct svm_test *test)
1849  {
1850  	return get_test_stage(test) == 1;
1851  }
1852  
1853  /*
1854   * Detect nested guest RIP corruption as explained in kernel commit
1855   * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73
1856   *
1857   * In the assembly loop below 'ins' is executed while IO instructions
1858   * are not intercepted; the instruction is emulated by L0.
1859   *
1860   * At the same time we are getting interrupts from the local APIC timer,
1861   * and we do intercept them in L1
1862   *
1863   * If the interrupt happens on the insb instruction, L0 will VMexit, emulate
1864   * the insb instruction and then it will inject the interrupt to L1 through
1865   * a nested VMexit.  Due to a bug, it would leave pre-emulation values of RIP,
1866   * RAX and RSP in the VMCB.
1867   *
1868   * In our intercept handler we detect the bug by checking that RIP is that of
1869   * the insb instruction, but its memory operand has already been written.
1870   * This means that insb was already executed.
1871   */
1872  
1873  static volatile int isr_cnt = 0;
1874  static volatile uint8_t io_port_var = 0xAA;
1875  extern const char insb_instruction_label[];
1876  
reg_corruption_isr(isr_regs_t * regs)1877  static void reg_corruption_isr(isr_regs_t *regs)
1878  {
1879  	isr_cnt++;
1880  	apic_write(APIC_EOI, 0);
1881  }
1882  
reg_corruption_prepare(struct svm_test * test)1883  static void reg_corruption_prepare(struct svm_test *test)
1884  {
1885  	default_prepare(test);
1886  	set_test_stage(test, 0);
1887  
1888  	vmcb->control.int_ctl = V_INTR_MASKING_MASK;
1889  	vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1890  
1891  	handle_irq(TIMER_VECTOR, reg_corruption_isr);
1892  
1893  	/* set local APIC to inject external interrupts */
1894  	apic_setup_timer(TIMER_VECTOR, APIC_LVT_TIMER_PERIODIC);
1895  	apic_start_timer(1000);
1896  }
1897  
reg_corruption_test(struct svm_test * test)1898  static void reg_corruption_test(struct svm_test *test)
1899  {
1900  	/* this is endless loop, which is interrupted by the timer interrupt */
1901  	asm volatile (
1902  		      "1:\n\t"
1903  		      "movw $0x4d0, %%dx\n\t" // IO port
1904  		      "lea %[io_port_var], %%rdi\n\t"
1905  		      "movb $0xAA, %[io_port_var]\n\t"
1906  		      "insb_instruction_label:\n\t"
1907  		      "insb\n\t"
1908  		      "jmp 1b\n\t"
1909  
1910  		      : [io_port_var] "=m" (io_port_var)
1911  		      : /* no inputs*/
1912  		      : "rdx", "rdi"
1913  		      );
1914  }
1915  
reg_corruption_finished(struct svm_test * test)1916  static bool reg_corruption_finished(struct svm_test *test)
1917  {
1918  	if (isr_cnt == 10000) {
1919  		report_pass("No RIP corruption detected after %d timer interrupts",
1920  			    isr_cnt);
1921  		set_test_stage(test, 1);
1922  		goto cleanup;
1923  	}
1924  
1925  	if (vmcb->control.exit_code == SVM_EXIT_INTR) {
1926  
1927  		void* guest_rip = (void*)vmcb->save.rip;
1928  
1929  		sti_nop_cli();
1930  
1931  		if (guest_rip == insb_instruction_label && io_port_var != 0xAA) {
1932  			report_fail("RIP corruption detected after %d timer interrupts",
1933  				    isr_cnt);
1934  			goto cleanup;
1935  		}
1936  
1937  	}
1938  	return false;
1939  cleanup:
1940  	apic_cleanup_timer();
1941  	return true;
1942  
1943  }
1944  
reg_corruption_check(struct svm_test * test)1945  static bool reg_corruption_check(struct svm_test *test)
1946  {
1947  	return get_test_stage(test) == 1;
1948  }
1949  
get_tss_entry(void * data)1950  static void get_tss_entry(void *data)
1951  {
1952  	*((gdt_entry_t **)data) = get_tss_descr();
1953  }
1954  
1955  static int orig_cpu_count;
1956  
init_startup_prepare(struct svm_test * test)1957  static void init_startup_prepare(struct svm_test *test)
1958  {
1959  	gdt_entry_t *tss_entry;
1960  	int i;
1961  
1962  	on_cpu(1, get_tss_entry, &tss_entry);
1963  
1964  	orig_cpu_count = atomic_read(&cpu_online_count);
1965  
1966  	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT,
1967  		       id_map[1]);
1968  
1969  	delay(100000000ULL);
1970  
1971  	atomic_dec(&cpu_online_count);
1972  
1973  	tss_entry->type &= ~DESC_BUSY;
1974  
1975  	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_STARTUP, id_map[1]);
1976  
1977  	for (i = 0; i < 5 && atomic_read(&cpu_online_count) < orig_cpu_count; i++)
1978  		delay(100000000ULL);
1979  }
1980  
init_startup_finished(struct svm_test * test)1981  static bool init_startup_finished(struct svm_test *test)
1982  {
1983  	return true;
1984  }
1985  
init_startup_check(struct svm_test * test)1986  static bool init_startup_check(struct svm_test *test)
1987  {
1988  	return atomic_read(&cpu_online_count) == orig_cpu_count;
1989  }
1990  
1991  static volatile bool init_intercept;
1992  
init_intercept_prepare(struct svm_test * test)1993  static void init_intercept_prepare(struct svm_test *test)
1994  {
1995  	init_intercept = false;
1996  	vmcb->control.intercept |= (1ULL << INTERCEPT_INIT);
1997  }
1998  
init_intercept_test(struct svm_test * test)1999  static void init_intercept_test(struct svm_test *test)
2000  {
2001  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 0);
2002  }
2003  
init_intercept_finished(struct svm_test * test)2004  static bool init_intercept_finished(struct svm_test *test)
2005  {
2006  	vmcb->save.rip += 3;
2007  
2008  	if (vmcb->control.exit_code != SVM_EXIT_INIT) {
2009  		report_fail("VMEXIT not due to init intercept. Exit reason 0x%x",
2010  			    vmcb->control.exit_code);
2011  
2012  		return true;
2013  	}
2014  
2015  	init_intercept = true;
2016  
2017  	report_pass("INIT to vcpu intercepted");
2018  
2019  	return true;
2020  }
2021  
init_intercept_check(struct svm_test * test)2022  static bool init_intercept_check(struct svm_test *test)
2023  {
2024  	return init_intercept;
2025  }
2026  
2027  /*
2028   * Setting host EFLAGS.TF causes a #DB trap after the VMRUN completes on the
2029   * host side (i.e., after the #VMEXIT from the guest).
2030   *
2031   * Setting host EFLAGS.RF suppresses any potential instruction breakpoint
2032   * match on the VMRUN and completion of the VMRUN instruction clears the
2033   * host EFLAGS.RF bit.
2034   *
2035   * [AMD APM]
2036   */
2037  static volatile u8 host_rflags_guest_main_flag = 0;
2038  static volatile u8 host_rflags_db_handler_flag = 0;
2039  static volatile bool host_rflags_ss_on_vmrun = false;
2040  static volatile bool host_rflags_vmrun_reached = false;
2041  static volatile bool host_rflags_set_tf = false;
2042  static volatile bool host_rflags_set_rf = false;
2043  static u64 rip_detected;
2044  
2045  extern u64 *vmrun_rip;
2046  
host_rflags_db_handler(struct ex_regs * r)2047  static void host_rflags_db_handler(struct ex_regs *r)
2048  {
2049  	if (host_rflags_ss_on_vmrun) {
2050  		if (host_rflags_vmrun_reached) {
2051  			if (!host_rflags_set_rf) {
2052  				r->rflags &= ~X86_EFLAGS_TF;
2053  				rip_detected = r->rip;
2054  			} else {
2055  				r->rflags |= X86_EFLAGS_RF;
2056  				++host_rflags_db_handler_flag;
2057  			}
2058  		} else {
2059  			if (r->rip == (u64)&vmrun_rip) {
2060  				host_rflags_vmrun_reached = true;
2061  
2062  				if (host_rflags_set_rf) {
2063  					host_rflags_guest_main_flag = 0;
2064  					rip_detected = r->rip;
2065  					r->rflags &= ~X86_EFLAGS_TF;
2066  
2067  					/* Trigger #DB via debug registers */
2068  					write_dr0((void *)&vmrun_rip);
2069  					write_dr7(0x403);
2070  				}
2071  			}
2072  		}
2073  	} else {
2074  		r->rflags &= ~X86_EFLAGS_TF;
2075  	}
2076  }
2077  
host_rflags_prepare(struct svm_test * test)2078  static void host_rflags_prepare(struct svm_test *test)
2079  {
2080  	default_prepare(test);
2081  	handle_exception(DB_VECTOR, host_rflags_db_handler);
2082  	set_test_stage(test, 0);
2083  }
2084  
host_rflags_prepare_gif_clear(struct svm_test * test)2085  static void host_rflags_prepare_gif_clear(struct svm_test *test)
2086  {
2087  	if (host_rflags_set_tf)
2088  		write_rflags(read_rflags() | X86_EFLAGS_TF);
2089  }
2090  
host_rflags_test(struct svm_test * test)2091  static void host_rflags_test(struct svm_test *test)
2092  {
2093  	while (1) {
2094  		if (get_test_stage(test) > 0) {
2095  			if ((host_rflags_set_tf && !host_rflags_ss_on_vmrun && !host_rflags_db_handler_flag) ||
2096  			    (host_rflags_set_rf && host_rflags_db_handler_flag == 1))
2097  				host_rflags_guest_main_flag = 1;
2098  		}
2099  
2100  		if (get_test_stage(test) == 4)
2101  			break;
2102  		vmmcall();
2103  	}
2104  }
2105  
host_rflags_finished(struct svm_test * test)2106  static bool host_rflags_finished(struct svm_test *test)
2107  {
2108  	switch (get_test_stage(test)) {
2109  	case 0:
2110  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
2111  			report_fail("Unexpected VMEXIT. Exit reason 0x%x",
2112  				    vmcb->control.exit_code);
2113  			return true;
2114  		}
2115  		vmcb->save.rip += 3;
2116  		/*
2117  		 * Setting host EFLAGS.TF not immediately before VMRUN, causes
2118  		 * #DB trap before first guest instruction is executed
2119  		 */
2120  		host_rflags_set_tf = true;
2121  		break;
2122  	case 1:
2123  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL ||
2124  		    host_rflags_guest_main_flag != 1) {
2125  			report_fail("Unexpected VMEXIT or #DB handler"
2126  				    " invoked before guest main. Exit reason 0x%x",
2127  				    vmcb->control.exit_code);
2128  			return true;
2129  		}
2130  		vmcb->save.rip += 3;
2131  		/*
2132  		 * Setting host EFLAGS.TF immediately before VMRUN, causes #DB
2133  		 * trap after VMRUN completes on the host side (i.e., after
2134  		 * VMEXIT from guest).
2135  		 */
2136  		host_rflags_ss_on_vmrun = true;
2137  		break;
2138  	case 2:
2139  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL ||
2140  		    rip_detected != (u64)&vmrun_rip + 3) {
2141  			report_fail("Unexpected VMEXIT or RIP mismatch."
2142  				    " Exit reason 0x%x, RIP actual: %lx, RIP expected: "
2143  				    "%lx", vmcb->control.exit_code,
2144  				    (u64)&vmrun_rip + 3, rip_detected);
2145  			return true;
2146  		}
2147  		host_rflags_set_rf = true;
2148  		host_rflags_guest_main_flag = 0;
2149  		host_rflags_vmrun_reached = false;
2150  		vmcb->save.rip += 3;
2151  		break;
2152  	case 3:
2153  		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL ||
2154  		    rip_detected != (u64)&vmrun_rip ||
2155  		    host_rflags_guest_main_flag != 1 ||
2156  		    host_rflags_db_handler_flag > 1 ||
2157  		    read_rflags() & X86_EFLAGS_RF) {
2158  			report_fail("Unexpected VMEXIT or RIP mismatch or "
2159  				    "EFLAGS.RF not cleared."
2160  				    " Exit reason 0x%x, RIP actual: %lx, RIP expected: "
2161  				    "%lx", vmcb->control.exit_code,
2162  				    (u64)&vmrun_rip, rip_detected);
2163  			return true;
2164  		}
2165  		host_rflags_set_tf = false;
2166  		host_rflags_set_rf = false;
2167  		vmcb->save.rip += 3;
2168  		break;
2169  	default:
2170  		return true;
2171  	}
2172  	inc_test_stage(test);
2173  	return get_test_stage(test) == 5;
2174  }
2175  
host_rflags_check(struct svm_test * test)2176  static bool host_rflags_check(struct svm_test *test)
2177  {
2178  	return get_test_stage(test) == 4;
2179  }
2180  
2181  #define TEST(name) { #name, .v2 = name }
2182  
2183  /*
2184   * v2 tests
2185   */
2186  
2187  /*
2188   * Ensure that kvm recalculates the L1 guest's CPUID.01H:ECX.OSXSAVE
2189   * after VM-exit from an L2 guest that sets CR4.OSXSAVE to a different
2190   * value than in L1.
2191   */
2192  
svm_cr4_osxsave_test_guest(struct svm_test * test)2193  static void svm_cr4_osxsave_test_guest(struct svm_test *test)
2194  {
2195  	write_cr4(read_cr4() & ~X86_CR4_OSXSAVE);
2196  }
2197  
svm_cr4_osxsave_test(void)2198  static void svm_cr4_osxsave_test(void)
2199  {
2200  	if (!this_cpu_has(X86_FEATURE_XSAVE)) {
2201  		report_skip("XSAVE not detected");
2202  		return;
2203  	}
2204  
2205  	if (!(read_cr4() & X86_CR4_OSXSAVE)) {
2206  		unsigned long cr4 = read_cr4() | X86_CR4_OSXSAVE;
2207  
2208  		write_cr4(cr4);
2209  		vmcb->save.cr4 = cr4;
2210  	}
2211  
2212  	report(this_cpu_has(X86_FEATURE_OSXSAVE), "CPUID.01H:ECX.XSAVE set before VMRUN");
2213  
2214  	test_set_guest(svm_cr4_osxsave_test_guest);
2215  	report(svm_vmrun() == SVM_EXIT_VMMCALL,
2216  	       "svm_cr4_osxsave_test_guest finished with VMMCALL");
2217  
2218  	report(this_cpu_has(X86_FEATURE_OSXSAVE), "CPUID.01H:ECX.XSAVE set after VMRUN");
2219  }
2220  
basic_guest_main(struct svm_test * test)2221  static void basic_guest_main(struct svm_test *test)
2222  {
2223  }
2224  
2225  
2226  #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val,	\
2227  				   resv_mask)				\
2228  {									\
2229  	u64 tmp, mask;							\
2230  	int i;								\
2231  									\
2232  	for (i = start; i <= end; i = i + inc) {			\
2233  		mask = 1ull << i;					\
2234  		if (!(mask & resv_mask))				\
2235  			continue;					\
2236  		tmp = val | mask;					\
2237  		reg = tmp;						\
2238  		report(svm_vmrun() == SVM_EXIT_ERR, "Test %s %d:%d: %lx", \
2239  		       str_name, end, start, tmp);			\
2240  	}								\
2241  }
2242  
2243  #define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask,	\
2244  				  exit_code, test_name)			\
2245  {									\
2246  	u64 tmp, mask;							\
2247  	u32 r;								\
2248  	int i;								\
2249  									\
2250  	for (i = start; i <= end; i = i + inc) {			\
2251  		mask = 1ull << i;					\
2252  		if (!(mask & resv_mask))				\
2253  			continue;					\
2254  		tmp = val | mask;					\
2255  		switch (cr) {						\
2256  		case 0:							\
2257  			vmcb->save.cr0 = tmp;				\
2258  			break;						\
2259  		case 3:							\
2260  			vmcb->save.cr3 = tmp;				\
2261  			break;						\
2262  		case 4:							\
2263  			vmcb->save.cr4 = tmp;				\
2264  		}							\
2265  		r = svm_vmrun();					\
2266  		report(r == exit_code, "Test CR%d %s%d:%d: %lx, wanted exit 0x%x, got 0x%x", \
2267  		       cr, test_name, end, start, tmp, exit_code, r);	\
2268  	}								\
2269  }
2270  
test_efer(void)2271  static void test_efer(void)
2272  {
2273  	/*
2274  	 * Un-setting EFER.SVME is illegal
2275  	 */
2276  	u64 efer_saved = vmcb->save.efer;
2277  	u64 efer = efer_saved;
2278  
2279  	report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer);
2280  	efer &= ~EFER_SVME;
2281  	vmcb->save.efer = efer;
2282  	report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer);
2283  	vmcb->save.efer = efer_saved;
2284  
2285  	/*
2286  	 * EFER MBZ bits: 63:16, 9
2287  	 */
2288  	efer_saved = vmcb->save.efer;
2289  
2290  	SVM_TEST_REG_RESERVED_BITS(8, 9, 1, "EFER", vmcb->save.efer,
2291  				   efer_saved, SVM_EFER_RESERVED_MASK);
2292  	SVM_TEST_REG_RESERVED_BITS(16, 63, 4, "EFER", vmcb->save.efer,
2293  				   efer_saved, SVM_EFER_RESERVED_MASK);
2294  
2295  	/*
2296  	 * EFER.LME and CR0.PG are both set and CR4.PAE is zero.
2297  	 */
2298  	u64 cr0_saved = vmcb->save.cr0;
2299  	u64 cr0;
2300  	u64 cr4_saved = vmcb->save.cr4;
2301  	u64 cr4;
2302  
2303  	efer = efer_saved | EFER_LME;
2304  	vmcb->save.efer = efer;
2305  	cr0 = cr0_saved | X86_CR0_PG | X86_CR0_PE;
2306  	vmcb->save.cr0 = cr0;
2307  	cr4 = cr4_saved & ~X86_CR4_PAE;
2308  	vmcb->save.cr4 = cr4;
2309  	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
2310  	       "CR0.PG=1 (%lx) and CR4.PAE=0 (%lx)", efer, cr0, cr4);
2311  
2312  	/*
2313  	 * EFER.LME and CR0.PG are both set and CR0.PE is zero.
2314  	 * CR4.PAE needs to be set as we otherwise cannot
2315  	 * determine if CR4.PAE=0 or CR0.PE=0 triggered the
2316  	 * SVM_EXIT_ERR.
2317  	 */
2318  	cr4 = cr4_saved | X86_CR4_PAE;
2319  	vmcb->save.cr4 = cr4;
2320  	cr0 &= ~X86_CR0_PE;
2321  	vmcb->save.cr0 = cr0;
2322  	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
2323  	       "CR0.PG=1 and CR0.PE=0 (%lx)", efer, cr0);
2324  
2325  	/*
2326  	 * EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero.
2327  	 */
2328  	u32 cs_attrib_saved = vmcb->save.cs.attrib;
2329  	u32 cs_attrib;
2330  
2331  	cr0 |= X86_CR0_PE;
2332  	vmcb->save.cr0 = cr0;
2333  	cs_attrib = cs_attrib_saved | SVM_SELECTOR_L_MASK |
2334  		SVM_SELECTOR_DB_MASK;
2335  	vmcb->save.cs.attrib = cs_attrib;
2336  	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
2337  	       "CR0.PG=1 (%lx), CR4.PAE=1 (%lx), CS.L=1 and CS.D=1 (%x)",
2338  	       efer, cr0, cr4, cs_attrib);
2339  
2340  	vmcb->save.cr0 = cr0_saved;
2341  	vmcb->save.cr4 = cr4_saved;
2342  	vmcb->save.efer = efer_saved;
2343  	vmcb->save.cs.attrib = cs_attrib_saved;
2344  }
2345  
test_cr0(void)2346  static void test_cr0(void)
2347  {
2348  	/*
2349  	 * Un-setting CR0.CD and setting CR0.NW is illegal combination
2350  	 */
2351  	u64 cr0_saved = vmcb->save.cr0;
2352  	u64 cr0 = cr0_saved;
2353  
2354  	cr0 |= X86_CR0_CD;
2355  	cr0 &= ~X86_CR0_NW;
2356  	vmcb->save.cr0 = cr0;
2357  	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx",
2358  		cr0);
2359  	cr0 |= X86_CR0_NW;
2360  	vmcb->save.cr0 = cr0;
2361  	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx",
2362  		cr0);
2363  	cr0 &= ~X86_CR0_NW;
2364  	cr0 &= ~X86_CR0_CD;
2365  	vmcb->save.cr0 = cr0;
2366  	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx",
2367  		cr0);
2368  	cr0 |= X86_CR0_NW;
2369  	vmcb->save.cr0 = cr0;
2370  	report (svm_vmrun() == SVM_EXIT_ERR, "Test CR0 CD=0,NW=1: %lx",
2371  		cr0);
2372  	vmcb->save.cr0 = cr0_saved;
2373  
2374  	/*
2375  	 * CR0[63:32] are not zero
2376  	 */
2377  	cr0 = cr0_saved;
2378  
2379  	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "CR0", vmcb->save.cr0, cr0_saved,
2380  				   SVM_CR0_RESERVED_MASK);
2381  	vmcb->save.cr0 = cr0_saved;
2382  }
2383  
test_cr3(void)2384  static void test_cr3(void)
2385  {
2386  	/*
2387  	 * CR3 MBZ bits based on different modes:
2388  	 *   [63:52] - long mode
2389  	 */
2390  	u64 cr3_saved = vmcb->save.cr3;
2391  
2392  	SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved,
2393  				  SVM_CR3_LONG_MBZ_MASK, SVM_EXIT_ERR, "");
2394  
2395  	vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_MBZ_MASK;
2396  	report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
2397  	       vmcb->save.cr3);
2398  
2399  	/*
2400  	 * CR3 non-MBZ reserved bits based on different modes:
2401  	 *   [11:5] [2:0] - long mode (PCIDE=0)
2402  	 *          [2:0] - PAE legacy mode
2403  	 */
2404  	u64 cr4_saved = vmcb->save.cr4;
2405  	u64 *pdpe = npt_get_pml4e();
2406  
2407  	/*
2408  	 * Long mode
2409  	 */
2410  	if (this_cpu_has(X86_FEATURE_PCID)) {
2411  		vmcb->save.cr4 = cr4_saved | X86_CR4_PCIDE;
2412  		SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
2413  					  SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_VMMCALL, "(PCIDE=1) ");
2414  
2415  		vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK;
2416  		report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
2417  		       vmcb->save.cr3);
2418  	}
2419  
2420  	vmcb->save.cr4 = cr4_saved & ~X86_CR4_PCIDE;
2421  
2422  	if (!npt_supported())
2423  		goto skip_npt_only;
2424  
2425  	/* Clear P (Present) bit in NPT in order to trigger #NPF */
2426  	pdpe[0] &= ~1ULL;
2427  
2428  	SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
2429  				  SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_NPF, "(PCIDE=0) ");
2430  
2431  	pdpe[0] |= 1ULL;
2432  	vmcb->save.cr3 = cr3_saved;
2433  
2434  	/*
2435  	 * PAE legacy
2436  	 */
2437  	pdpe[0] &= ~1ULL;
2438  	vmcb->save.cr4 = cr4_saved | X86_CR4_PAE;
2439  	SVM_TEST_CR_RESERVED_BITS(0, 2, 1, 3, cr3_saved,
2440  				  SVM_CR3_PAE_LEGACY_RESERVED_MASK, SVM_EXIT_NPF, "(PAE) ");
2441  
2442  	pdpe[0] |= 1ULL;
2443  
2444  skip_npt_only:
2445  	vmcb->save.cr3 = cr3_saved;
2446  	vmcb->save.cr4 = cr4_saved;
2447  }
2448  
2449  /* Test CR4 MBZ bits based on legacy or long modes */
test_cr4(void)2450  static void test_cr4(void)
2451  {
2452  	u64 cr4_saved = vmcb->save.cr4;
2453  	u64 efer_saved = vmcb->save.efer;
2454  	u64 efer = efer_saved;
2455  
2456  	efer &= ~EFER_LME;
2457  	vmcb->save.efer = efer;
2458  	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
2459  				  SVM_CR4_LEGACY_RESERVED_MASK, SVM_EXIT_ERR, "");
2460  
2461  	efer |= EFER_LME;
2462  	vmcb->save.efer = efer;
2463  	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
2464  				  SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, "");
2465  	SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved,
2466  				  SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, "");
2467  
2468  	vmcb->save.cr4 = cr4_saved;
2469  	vmcb->save.efer = efer_saved;
2470  }
2471  
test_dr(void)2472  static void test_dr(void)
2473  {
2474  	/*
2475  	 * DR6[63:32] and DR7[63:32] are MBZ
2476  	 */
2477  	u64 dr_saved = vmcb->save.dr6;
2478  
2479  	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR6", vmcb->save.dr6, dr_saved,
2480  				   SVM_DR6_RESERVED_MASK);
2481  	vmcb->save.dr6 = dr_saved;
2482  
2483  	dr_saved = vmcb->save.dr7;
2484  	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR7", vmcb->save.dr7, dr_saved,
2485  				   SVM_DR7_RESERVED_MASK);
2486  
2487  	vmcb->save.dr7 = dr_saved;
2488  }
2489  
2490  /* TODO: verify if high 32-bits are sign- or zero-extended on bare metal */
2491  #define	TEST_BITMAP_ADDR(save_intercept, type, addr, exit_code,		\
2492  			 msg) {						\
2493  		vmcb->control.intercept = saved_intercept | 1ULL << type; \
2494  		if (type == INTERCEPT_MSR_PROT)				\
2495  			vmcb->control.msrpm_base_pa = addr;		\
2496  		else							\
2497  			vmcb->control.iopm_base_pa = addr;		\
2498  		report(svm_vmrun() == exit_code,			\
2499  		       "Test %s address: %lx", msg, addr);		\
2500  	}
2501  
2502  /*
2503   * If the MSR or IOIO intercept table extends to a physical address that
2504   * is greater than or equal to the maximum supported physical address, the
2505   * guest state is illegal.
2506   *
2507   * The VMRUN instruction ignores the lower 12 bits of the address specified
2508   * in the VMCB.
2509   *
2510   * MSRPM spans 2 contiguous 4KB pages while IOPM spans 2 contiguous 4KB
2511   * pages + 1 byte.
2512   *
2513   * [APM vol 2]
2514   *
2515   * Note: Unallocated MSRPM addresses conforming to consistency checks, generate
2516   * #NPF.
2517   */
test_msrpm_iopm_bitmap_addrs(void)2518  static void test_msrpm_iopm_bitmap_addrs(void)
2519  {
2520  	u64 saved_intercept = vmcb->control.intercept;
2521  	u64 addr_beyond_limit = 1ull << cpuid_maxphyaddr();
2522  	u64 addr = virt_to_phys(msr_bitmap) & (~((1ull << 12) - 1));
2523  
2524  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2525  			 addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR,
2526  			 "MSRPM");
2527  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2528  			 addr_beyond_limit - 2 * PAGE_SIZE + 1, SVM_EXIT_ERR,
2529  			 "MSRPM");
2530  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2531  			 addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR,
2532  			 "MSRPM");
2533  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr,
2534  			 SVM_EXIT_VMMCALL, "MSRPM");
2535  	addr |= (1ull << 12) - 1;
2536  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr,
2537  			 SVM_EXIT_VMMCALL, "MSRPM");
2538  
2539  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2540  			 addr_beyond_limit - 4 * PAGE_SIZE, SVM_EXIT_VMMCALL,
2541  			 "IOPM");
2542  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2543  			 addr_beyond_limit - 3 * PAGE_SIZE, SVM_EXIT_VMMCALL,
2544  			 "IOPM");
2545  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2546  			 addr_beyond_limit - 2 * PAGE_SIZE - 2, SVM_EXIT_VMMCALL,
2547  			 "IOPM");
2548  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2549  			 addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR,
2550  			 "IOPM");
2551  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2552  			 addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR,
2553  			 "IOPM");
2554  	addr = virt_to_phys(io_bitmap) & (~((1ull << 11) - 1));
2555  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr,
2556  			 SVM_EXIT_VMMCALL, "IOPM");
2557  	addr |= (1ull << 12) - 1;
2558  	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr,
2559  			 SVM_EXIT_VMMCALL, "IOPM");
2560  
2561  	vmcb->control.intercept = saved_intercept;
2562  }
2563  
2564  /*
2565   * Unlike VMSAVE, VMRUN seems not to update the value of noncanonical
2566   * segment bases in the VMCB.  However, VMENTRY succeeds as documented.
2567   */
2568  #define TEST_CANONICAL_VMRUN(seg_base, msg)				\
2569  	saved_addr = seg_base;						\
2570  	seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \
2571  	return_value = svm_vmrun();					\
2572  	report(return_value == SVM_EXIT_VMMCALL,			\
2573  	       "Successful VMRUN with noncanonical %s.base", msg);	\
2574  	seg_base = saved_addr;
2575  
2576  
2577  #define TEST_CANONICAL_VMLOAD(seg_base, msg)				\
2578  	saved_addr = seg_base;						\
2579  	seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \
2580  	asm volatile ("vmload %0" : : "a"(vmcb_phys) : "memory");	\
2581  	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");	\
2582  	report(is_canonical(seg_base),					\
2583  	       "Test %s.base for canonical form: %lx", msg, seg_base);	\
2584  	seg_base = saved_addr;
2585  
test_canonicalization(void)2586  static void test_canonicalization(void)
2587  {
2588  	u64 saved_addr;
2589  	u64 return_value;
2590  	u64 addr_limit;
2591  	u64 vmcb_phys = virt_to_phys(vmcb);
2592  
2593  	addr_limit = (this_cpu_has(X86_FEATURE_LA57)) ? 57 : 48;
2594  	u64 noncanonical_mask = NONCANONICAL & ~((1ul << addr_limit) - 1);
2595  
2596  	TEST_CANONICAL_VMLOAD(vmcb->save.fs.base, "FS");
2597  	TEST_CANONICAL_VMLOAD(vmcb->save.gs.base, "GS");
2598  	TEST_CANONICAL_VMLOAD(vmcb->save.ldtr.base, "LDTR");
2599  	TEST_CANONICAL_VMLOAD(vmcb->save.tr.base, "TR");
2600  	TEST_CANONICAL_VMLOAD(vmcb->save.kernel_gs_base, "KERNEL GS");
2601  	TEST_CANONICAL_VMRUN(vmcb->save.es.base, "ES");
2602  	TEST_CANONICAL_VMRUN(vmcb->save.cs.base, "CS");
2603  	TEST_CANONICAL_VMRUN(vmcb->save.ss.base, "SS");
2604  	TEST_CANONICAL_VMRUN(vmcb->save.ds.base, "DS");
2605  	TEST_CANONICAL_VMRUN(vmcb->save.gdtr.base, "GDTR");
2606  	TEST_CANONICAL_VMRUN(vmcb->save.idtr.base, "IDTR");
2607  }
2608  
2609  /*
2610   * When VMRUN loads a guest value of 1 in EFLAGS.TF, that value does not
2611   * cause a trace trap between the VMRUN and the first guest instruction, but
2612   * rather after completion of the first guest instruction.
2613   *
2614   * [APM vol 2]
2615   */
2616  u64 guest_rflags_test_trap_rip;
2617  
guest_rflags_test_db_handler(struct ex_regs * r)2618  static void guest_rflags_test_db_handler(struct ex_regs *r)
2619  {
2620  	guest_rflags_test_trap_rip = r->rip;
2621  	r->rflags &= ~X86_EFLAGS_TF;
2622  }
2623  
svm_guest_state_test(void)2624  static void svm_guest_state_test(void)
2625  {
2626  	test_set_guest(basic_guest_main);
2627  	test_efer();
2628  	test_cr0();
2629  	test_cr3();
2630  	test_cr4();
2631  	test_dr();
2632  	test_msrpm_iopm_bitmap_addrs();
2633  	test_canonicalization();
2634  }
2635  
2636  extern void guest_rflags_test_guest(struct svm_test *test);
2637  extern u64 *insn2;
2638  extern u64 *guest_end;
2639  
2640  asm("guest_rflags_test_guest:\n\t"
2641      "push %rbp\n\t"
2642      ".global insn2\n\t"
2643      "insn2:\n\t"
2644      "mov %rsp,%rbp\n\t"
2645      "vmmcall\n\t"
2646      "vmmcall\n\t"
2647      ".global guest_end\n\t"
2648      "guest_end:\n\t"
2649      "vmmcall\n\t"
2650      "pop %rbp\n\t"
2651      "ret");
2652  
svm_test_singlestep(void)2653  static void svm_test_singlestep(void)
2654  {
2655  	handle_exception(DB_VECTOR, guest_rflags_test_db_handler);
2656  
2657  	/*
2658  	 * Trap expected after completion of first guest instruction
2659  	 */
2660  	vmcb->save.rflags |= X86_EFLAGS_TF;
2661  	report (__svm_vmrun((u64)guest_rflags_test_guest) == SVM_EXIT_VMMCALL &&
2662  		guest_rflags_test_trap_rip == (u64)&insn2,
2663  		"Test EFLAGS.TF on VMRUN: trap expected  after completion of first guest instruction");
2664  	/*
2665  	 * No trap expected
2666  	 */
2667  	guest_rflags_test_trap_rip = 0;
2668  	vmcb->save.rip += 3;
2669  	vmcb->save.rflags |= X86_EFLAGS_TF;
2670  	report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL &&
2671  		guest_rflags_test_trap_rip == 0, "Test EFLAGS.TF on VMRUN: trap not expected");
2672  
2673  	/*
2674  	 * Let guest finish execution
2675  	 */
2676  	vmcb->save.rip += 3;
2677  	report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL &&
2678  		vmcb->save.rip == (u64)&guest_end, "Test EFLAGS.TF on VMRUN: guest execution completion");
2679  }
2680  
2681  static bool volatile svm_errata_reproduced = false;
2682  static unsigned long volatile physical = 0;
2683  
2684  
2685  /*
2686   *
2687   * Test the following errata:
2688   * If the VMRUN/VMSAVE/VMLOAD are attempted by the nested guest,
2689   * the CPU would first check the EAX against host reserved memory
2690   * regions (so far only SMM_ADDR/SMM_MASK are known to cause it),
2691   * and only then signal #VMexit
2692   *
2693   * Try to reproduce this by trying vmsave on each possible 4K aligned memory
2694   * address in the low 4G where the SMM area has to reside.
2695   */
2696  
gp_isr(struct ex_regs * r)2697  static void gp_isr(struct ex_regs *r)
2698  {
2699  	svm_errata_reproduced = true;
2700  	/* skip over the vmsave instruction*/
2701  	r->rip += 3;
2702  }
2703  
svm_vmrun_errata_test(void)2704  static void svm_vmrun_errata_test(void)
2705  {
2706  	unsigned long *last_page = NULL;
2707  
2708  	handle_exception(GP_VECTOR, gp_isr);
2709  
2710  	while (!svm_errata_reproduced) {
2711  
2712  		unsigned long *page = alloc_pages(1);
2713  
2714  		if (!page) {
2715  			report_pass("All guest memory tested, no bug found");
2716  			break;
2717  		}
2718  
2719  		physical = virt_to_phys(page);
2720  
2721  		asm volatile (
2722  			      "mov %[_physical], %%rax\n\t"
2723  			      "vmsave %%rax\n\t"
2724  
2725  			      : [_physical] "=m" (physical)
2726  			      : /* no inputs*/
2727  			      : "rax" /*clobbers*/
2728  			      );
2729  
2730  		if (svm_errata_reproduced) {
2731  			report_fail("Got #GP exception - svm errata reproduced at 0x%lx",
2732  				    physical);
2733  			break;
2734  		}
2735  
2736  		*page = (unsigned long)last_page;
2737  		last_page = page;
2738  	}
2739  
2740  	while (last_page) {
2741  		unsigned long *page = last_page;
2742  		last_page = (unsigned long *)*last_page;
2743  		free_pages_by_order(page, 1);
2744  	}
2745  }
2746  
vmload_vmsave_guest_main(struct svm_test * test)2747  static void vmload_vmsave_guest_main(struct svm_test *test)
2748  {
2749  	u64 vmcb_phys = virt_to_phys(vmcb);
2750  
2751  	asm volatile ("vmload %0" : : "a"(vmcb_phys));
2752  	asm volatile ("vmsave %0" : : "a"(vmcb_phys));
2753  }
2754  
svm_vmload_vmsave(void)2755  static void svm_vmload_vmsave(void)
2756  {
2757  	u32 intercept_saved = vmcb->control.intercept;
2758  
2759  	test_set_guest(vmload_vmsave_guest_main);
2760  
2761  	/*
2762  	 * Disabling intercept for VMLOAD and VMSAVE doesn't cause
2763  	 * respective #VMEXIT to host
2764  	 */
2765  	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD);
2766  	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE);
2767  	svm_vmrun();
2768  	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
2769  	       "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
2770  
2771  	/*
2772  	 * Enabling intercept for VMLOAD and VMSAVE causes respective
2773  	 * #VMEXIT to host
2774  	 */
2775  	vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD);
2776  	svm_vmrun();
2777  	report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test "
2778  	       "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT");
2779  	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD);
2780  	vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE);
2781  	svm_vmrun();
2782  	report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test "
2783  	       "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT");
2784  	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE);
2785  	svm_vmrun();
2786  	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
2787  	       "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
2788  
2789  	vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD);
2790  	svm_vmrun();
2791  	report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test "
2792  	       "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT");
2793  	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD);
2794  	svm_vmrun();
2795  	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
2796  	       "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
2797  
2798  	vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE);
2799  	svm_vmrun();
2800  	report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test "
2801  	       "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT");
2802  	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE);
2803  	svm_vmrun();
2804  	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
2805  	       "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
2806  
2807  	vmcb->control.intercept = intercept_saved;
2808  }
2809  
prepare_vgif_enabled(struct svm_test * test)2810  static void prepare_vgif_enabled(struct svm_test *test)
2811  {
2812  	default_prepare(test);
2813  }
2814  
test_vgif(struct svm_test * test)2815  static void test_vgif(struct svm_test *test)
2816  {
2817  	asm volatile ("vmmcall\n\tstgi\n\tvmmcall\n\tclgi\n\tvmmcall\n\t");
2818  }
2819  
vgif_finished(struct svm_test * test)2820  static bool vgif_finished(struct svm_test *test)
2821  {
2822  	switch (get_test_stage(test))
2823  		{
2824  		case 0:
2825  			if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
2826  				report_fail("VMEXIT not due to vmmcall.");
2827  				return true;
2828  			}
2829  			vmcb->control.int_ctl |= V_GIF_ENABLED_MASK;
2830  			vmcb->save.rip += 3;
2831  			inc_test_stage(test);
2832  			break;
2833  		case 1:
2834  			if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
2835  				report_fail("VMEXIT not due to vmmcall.");
2836  				return true;
2837  			}
2838  			if (!(vmcb->control.int_ctl & V_GIF_MASK)) {
2839  				report_fail("Failed to set VGIF when executing STGI.");
2840  				vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK;
2841  				return true;
2842  			}
2843  			report_pass("STGI set VGIF bit.");
2844  			vmcb->save.rip += 3;
2845  			inc_test_stage(test);
2846  			break;
2847  		case 2:
2848  			if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
2849  				report_fail("VMEXIT not due to vmmcall.");
2850  				return true;
2851  			}
2852  			if (vmcb->control.int_ctl & V_GIF_MASK) {
2853  				report_fail("Failed to clear VGIF when executing CLGI.");
2854  				vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK;
2855  				return true;
2856  			}
2857  			report_pass("CLGI cleared VGIF bit.");
2858  			vmcb->save.rip += 3;
2859  			inc_test_stage(test);
2860  			vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK;
2861  			break;
2862  		default:
2863  			return true;
2864  			break;
2865  		}
2866  
2867  	return get_test_stage(test) == 3;
2868  }
2869  
vgif_check(struct svm_test * test)2870  static bool vgif_check(struct svm_test *test)
2871  {
2872  	return get_test_stage(test) == 3;
2873  }
2874  
2875  
2876  static int pause_test_counter;
2877  static int wait_counter;
2878  
pause_filter_test_guest_main(struct svm_test * test)2879  static void pause_filter_test_guest_main(struct svm_test *test)
2880  {
2881  	int i;
2882  	for (i = 0 ; i < pause_test_counter ; i++)
2883  		pause();
2884  
2885  	if (!wait_counter)
2886  		return;
2887  
2888  	for (i = 0; i < wait_counter; i++)
2889  		;
2890  
2891  	for (i = 0 ; i < pause_test_counter ; i++)
2892  		pause();
2893  
2894  }
2895  
pause_filter_run_test(int pause_iterations,int filter_value,int wait_iterations,int threshold)2896  static void pause_filter_run_test(int pause_iterations, int filter_value, int wait_iterations, int threshold)
2897  {
2898  	test_set_guest(pause_filter_test_guest_main);
2899  
2900  	pause_test_counter = pause_iterations;
2901  	wait_counter = wait_iterations;
2902  
2903  	vmcb->control.pause_filter_count = filter_value;
2904  	vmcb->control.pause_filter_thresh = threshold;
2905  	svm_vmrun();
2906  
2907  	if (filter_value <= pause_iterations || wait_iterations < threshold)
2908  		report(vmcb->control.exit_code == SVM_EXIT_PAUSE, "expected PAUSE vmexit");
2909  	else
2910  		report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "no expected PAUSE vmexit");
2911  }
2912  
pause_filter_test(void)2913  static void pause_filter_test(void)
2914  {
2915  	if (!pause_filter_supported()) {
2916  		report_skip("PAUSE filter not supported in the guest");
2917  		return;
2918  	}
2919  
2920  	vmcb->control.intercept |= (1 << INTERCEPT_PAUSE);
2921  
2922  	// filter count more that pause count - no VMexit
2923  	pause_filter_run_test(10, 9, 0, 0);
2924  
2925  	// filter count smaller pause count - no VMexit
2926  	pause_filter_run_test(20, 21, 0, 0);
2927  
2928  
2929  	if (pause_threshold_supported()) {
2930  		// filter count smaller pause count - no VMexit +  large enough threshold
2931  		// so that filter counter resets
2932  		pause_filter_run_test(20, 21, 1000, 10);
2933  
2934  		// filter count smaller pause count - no VMexit +  small threshold
2935  		// so that filter doesn't reset
2936  		pause_filter_run_test(20, 21, 10, 1000);
2937  	} else {
2938  		report_skip("PAUSE threshold not supported in the guest");
2939  		return;
2940  	}
2941  }
2942  
2943  /* If CR0.TS and CR0.EM are cleared in L2, no #NM is generated. */
svm_no_nm_test(void)2944  static void svm_no_nm_test(void)
2945  {
2946  	write_cr0(read_cr0() & ~X86_CR0_TS);
2947  	test_set_guest((test_guest_func)fnop);
2948  
2949  	vmcb->save.cr0 = vmcb->save.cr0 & ~(X86_CR0_TS | X86_CR0_EM);
2950  	report(svm_vmrun() == SVM_EXIT_VMMCALL,
2951  	       "fnop with CR0.TS and CR0.EM unset no #NM exception");
2952  }
2953  
amd_get_lbr_rip(u32 msr)2954  static u64 amd_get_lbr_rip(u32 msr)
2955  {
2956  	return rdmsr(msr) & ~AMD_LBR_RECORD_MISPREDICT;
2957  }
2958  
2959  #define HOST_CHECK_LBR(from_expected, to_expected)					\
2960  do {											\
2961  	TEST_EXPECT_EQ((u64)from_expected, amd_get_lbr_rip(MSR_IA32_LASTBRANCHFROMIP));	\
2962  	TEST_EXPECT_EQ((u64)to_expected, amd_get_lbr_rip(MSR_IA32_LASTBRANCHTOIP));	\
2963  } while (0)
2964  
2965  /*
2966   * FIXME: Do something other than generate an exception to communicate failure.
2967   * Debugging without expected vs. actual is an absolute nightmare.
2968   */
2969  #define GUEST_CHECK_LBR(from_expected, to_expected)				\
2970  do {										\
2971  	if ((u64)(from_expected) != amd_get_lbr_rip(MSR_IA32_LASTBRANCHFROMIP))	\
2972  		asm volatile("ud2");						\
2973  	if ((u64)(to_expected) != amd_get_lbr_rip(MSR_IA32_LASTBRANCHTOIP))	\
2974  		asm volatile("ud2");						\
2975  } while (0)
2976  
2977  #define REPORT_GUEST_LBR_ERROR(vmcb)						\
2978  	report(false, "LBR guest test failed.  Exit reason 0x%x, RIP = %lx, from = %lx, to = %lx, ex from = %lx, ex to = %lx", \
2979  		       vmcb->control.exit_code, vmcb->save.rip,			\
2980  		       vmcb->save.br_from, vmcb->save.br_to,			\
2981  		       vmcb->save.last_excp_from, vmcb->save.last_excp_to)
2982  
2983  #define DO_BRANCH(branch_name)				\
2984  	asm volatile (					\
2985  		      # branch_name "_from:"		\
2986  		      "jmp " # branch_name  "_to\n"	\
2987  		      "nop\n"				\
2988  		      "nop\n"				\
2989  		      # branch_name  "_to:"		\
2990  		      "nop\n"				\
2991  		       )
2992  
2993  
2994  extern u64 guest_branch0_from, guest_branch0_to;
2995  extern u64 guest_branch2_from, guest_branch2_to;
2996  
2997  extern u64 host_branch0_from, host_branch0_to;
2998  extern u64 host_branch2_from, host_branch2_to;
2999  extern u64 host_branch3_from, host_branch3_to;
3000  extern u64 host_branch4_from, host_branch4_to;
3001  
3002  u64 dbgctl;
3003  
svm_lbrv_test_guest1(void)3004  static void svm_lbrv_test_guest1(void)
3005  {
3006  	/*
3007  	 * This guest expects the LBR to be already enabled when it starts,
3008  	 * it does a branch, and then disables the LBR and then checks.
3009  	 */
3010  
3011  	DO_BRANCH(guest_branch0);
3012  
3013  	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3014  	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3015  
3016  	if (dbgctl != DEBUGCTLMSR_LBR)
3017  		asm volatile("ud2\n");
3018  	if (rdmsr(MSR_IA32_DEBUGCTLMSR) != 0)
3019  		asm volatile("ud2\n");
3020  
3021  	GUEST_CHECK_LBR(&guest_branch0_from, &guest_branch0_to);
3022  	asm volatile ("vmmcall\n");
3023  }
3024  
svm_lbrv_test_guest2(void)3025  static void svm_lbrv_test_guest2(void)
3026  {
3027  	/*
3028  	 * This guest expects the LBR to be disabled when it starts,
3029  	 * enables it, does a branch, disables it and then checks.
3030  	 */
3031  
3032  	DO_BRANCH(guest_branch1);
3033  	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3034  
3035  	if (dbgctl != 0)
3036  		asm volatile("ud2\n");
3037  
3038  	GUEST_CHECK_LBR(&host_branch2_from, &host_branch2_to);
3039  
3040  	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3041  	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3042  	DO_BRANCH(guest_branch2);
3043  	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3044  
3045  	if (dbgctl != DEBUGCTLMSR_LBR)
3046  		asm volatile("ud2\n");
3047  	GUEST_CHECK_LBR(&guest_branch2_from, &guest_branch2_to);
3048  
3049  	asm volatile ("vmmcall\n");
3050  }
3051  
svm_lbrv_test0(void)3052  static void svm_lbrv_test0(void)
3053  {
3054  	report(true, "Basic LBR test");
3055  	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3056  	DO_BRANCH(host_branch0);
3057  	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3058  	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3059  
3060  	TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR);
3061  	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3062  	TEST_EXPECT_EQ(dbgctl, 0);
3063  
3064  	HOST_CHECK_LBR(&host_branch0_from, &host_branch0_to);
3065  }
3066  
svm_lbrv_test1(void)3067  static void svm_lbrv_test1(void)
3068  {
3069  	report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(1)");
3070  
3071  	svm_setup_vmrun((u64)svm_lbrv_test_guest1);
3072  	vmcb->control.virt_ext = 0;
3073  
3074  	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3075  	DO_BRANCH(host_branch1);
3076  	SVM_BARE_VMRUN;
3077  	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3078  
3079  	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
3080  		REPORT_GUEST_LBR_ERROR(vmcb);
3081  		return;
3082  	}
3083  
3084  	TEST_EXPECT_EQ(dbgctl, 0);
3085  	HOST_CHECK_LBR(&guest_branch0_from, &guest_branch0_to);
3086  }
3087  
svm_lbrv_test2(void)3088  static void svm_lbrv_test2(void)
3089  {
3090  	report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(2)");
3091  
3092  	svm_setup_vmrun((u64)svm_lbrv_test_guest2);
3093  	vmcb->control.virt_ext = 0;
3094  
3095  	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3096  	DO_BRANCH(host_branch2);
3097  	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3098  	SVM_BARE_VMRUN;
3099  	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3100  	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3101  
3102  	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
3103  		REPORT_GUEST_LBR_ERROR(vmcb);
3104  		return;
3105  	}
3106  
3107  	TEST_EXPECT_EQ(dbgctl, 0);
3108  	HOST_CHECK_LBR(&guest_branch2_from, &guest_branch2_to);
3109  }
3110  
svm_lbrv_nested_test1(void)3111  static void svm_lbrv_nested_test1(void)
3112  {
3113  	if (!lbrv_supported()) {
3114  		report_skip("LBRV not supported in the guest");
3115  		return;
3116  	}
3117  
3118  	report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (1)");
3119  	svm_setup_vmrun((u64)svm_lbrv_test_guest1);
3120  	vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK;
3121  	vmcb->save.dbgctl = DEBUGCTLMSR_LBR;
3122  
3123  	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3124  	DO_BRANCH(host_branch3);
3125  	SVM_BARE_VMRUN;
3126  	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3127  	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3128  
3129  	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
3130  		REPORT_GUEST_LBR_ERROR(vmcb);
3131  		return;
3132  	}
3133  
3134  	if (vmcb->save.dbgctl != 0) {
3135  		report(false, "unexpected virtual guest MSR_IA32_DEBUGCTLMSR value 0x%lx", vmcb->save.dbgctl);
3136  		return;
3137  	}
3138  
3139  	TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR);
3140  	HOST_CHECK_LBR(&host_branch3_from, &host_branch3_to);
3141  }
3142  
svm_lbrv_nested_test2(void)3143  static void svm_lbrv_nested_test2(void)
3144  {
3145  	if (!lbrv_supported()) {
3146  		report_skip("LBRV not supported in the guest");
3147  		return;
3148  	}
3149  
3150  	report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (2)");
3151  	svm_setup_vmrun((u64)svm_lbrv_test_guest2);
3152  	vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK;
3153  
3154  	vmcb->save.dbgctl = 0;
3155  	vmcb->save.br_from = (u64)&host_branch2_from;
3156  	vmcb->save.br_to = (u64)&host_branch2_to;
3157  
3158  	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3159  	DO_BRANCH(host_branch4);
3160  	SVM_BARE_VMRUN;
3161  	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3162  	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3163  
3164  	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
3165  		REPORT_GUEST_LBR_ERROR(vmcb);
3166  		return;
3167  	}
3168  
3169  	TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR);
3170  	HOST_CHECK_LBR(&host_branch4_from, &host_branch4_to);
3171  }
3172  
3173  
3174  // test that a nested guest which does enable INTR interception
3175  // but doesn't enable virtual interrupt masking works
3176  
3177  static volatile int dummy_isr_recevied;
dummy_isr(isr_regs_t * regs)3178  static void dummy_isr(isr_regs_t *regs)
3179  {
3180  	dummy_isr_recevied++;
3181  	eoi();
3182  }
3183  
3184  
3185  static volatile int nmi_recevied;
dummy_nmi_handler(struct ex_regs * regs)3186  static void dummy_nmi_handler(struct ex_regs *regs)
3187  {
3188  	nmi_recevied++;
3189  }
3190  
3191  
svm_intr_intercept_mix_run_guest(volatile int * counter,int expected_vmexit)3192  static void svm_intr_intercept_mix_run_guest(volatile int *counter, int expected_vmexit)
3193  {
3194  	if (counter)
3195  		*counter = 0;
3196  
3197  	sti();  // host IF value should not matter
3198  	clgi(); // vmrun will set back GIF to 1
3199  
3200  	svm_vmrun();
3201  
3202  	if (counter)
3203  		report(!*counter, "No interrupt expected");
3204  
3205  	stgi();
3206  
3207  	if (counter)
3208  		report(*counter == 1, "Interrupt is expected");
3209  
3210  	report(vmcb->control.exit_code == expected_vmexit,
3211  	       "Wanted VM-Exit reason 0x%x, got 0x%x",
3212  	       expected_vmexit, vmcb->control.exit_code);
3213  	report(vmcb->save.rflags & X86_EFLAGS_IF, "Guest should have EFLAGS.IF set now");
3214  	cli();
3215  }
3216  
3217  
3218  // subtest: test that enabling EFLAGS.IF is enough to trigger an interrupt
svm_intr_intercept_mix_if_guest(struct svm_test * test)3219  static void svm_intr_intercept_mix_if_guest(struct svm_test *test)
3220  {
3221  	asm volatile("nop;nop;nop;nop");
3222  	report(!dummy_isr_recevied, "No interrupt expected");
3223  	sti_nop();
3224  	report(0, "must not reach here");
3225  }
3226  
svm_intr_intercept_mix_if(void)3227  static void svm_intr_intercept_mix_if(void)
3228  {
3229  	// make a physical interrupt to be pending
3230  	handle_irq(0x55, dummy_isr);
3231  
3232  	vmcb->control.intercept |= (1 << INTERCEPT_INTR);
3233  	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3234  	vmcb->save.rflags &= ~X86_EFLAGS_IF;
3235  
3236  	test_set_guest(svm_intr_intercept_mix_if_guest);
3237  	cli();
3238  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0);
3239  	svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR);
3240  }
3241  
3242  
3243  // subtest: test that a clever guest can trigger an interrupt by setting GIF
3244  // if GIF is not intercepted
svm_intr_intercept_mix_gif_guest(struct svm_test * test)3245  static void svm_intr_intercept_mix_gif_guest(struct svm_test *test)
3246  {
3247  
3248  	asm volatile("nop;nop;nop;nop");
3249  	report(!dummy_isr_recevied, "No interrupt expected");
3250  
3251  	// clear GIF and enable IF
3252  	// that should still not cause VM exit
3253  	clgi();
3254  	sti_nop();
3255  	report(!dummy_isr_recevied, "No interrupt expected");
3256  
3257  	stgi();
3258  	report(0, "must not reach here");
3259  }
3260  
svm_intr_intercept_mix_gif(void)3261  static void svm_intr_intercept_mix_gif(void)
3262  {
3263  	handle_irq(0x55, dummy_isr);
3264  
3265  	vmcb->control.intercept |= (1 << INTERCEPT_INTR);
3266  	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3267  	vmcb->save.rflags &= ~X86_EFLAGS_IF;
3268  
3269  	test_set_guest(svm_intr_intercept_mix_gif_guest);
3270  	cli();
3271  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0);
3272  	svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR);
3273  }
3274  
3275  // subtest: test that a clever guest can trigger an interrupt by setting GIF
3276  // if GIF is not intercepted and interrupt comes after guest
3277  // started running
svm_intr_intercept_mix_gif_guest2(struct svm_test * test)3278  static void svm_intr_intercept_mix_gif_guest2(struct svm_test *test)
3279  {
3280  	asm volatile("nop;nop;nop;nop");
3281  	report(!dummy_isr_recevied, "No interrupt expected");
3282  
3283  	clgi();
3284  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0);
3285  	report(!dummy_isr_recevied, "No interrupt expected");
3286  
3287  	stgi();
3288  	report(0, "must not reach here");
3289  }
3290  
svm_intr_intercept_mix_gif2(void)3291  static void svm_intr_intercept_mix_gif2(void)
3292  {
3293  	handle_irq(0x55, dummy_isr);
3294  
3295  	vmcb->control.intercept |= (1 << INTERCEPT_INTR);
3296  	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3297  	vmcb->save.rflags |= X86_EFLAGS_IF;
3298  
3299  	test_set_guest(svm_intr_intercept_mix_gif_guest2);
3300  	svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR);
3301  }
3302  
3303  
3304  // subtest: test that pending NMI will be handled when guest enables GIF
svm_intr_intercept_mix_nmi_guest(struct svm_test * test)3305  static void svm_intr_intercept_mix_nmi_guest(struct svm_test *test)
3306  {
3307  	asm volatile("nop;nop;nop;nop");
3308  	report(!nmi_recevied, "No NMI expected");
3309  	cli(); // should have no effect
3310  
3311  	clgi();
3312  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI, 0);
3313  	sti_nop(); // should have no effect
3314  	report(!nmi_recevied, "No NMI expected");
3315  
3316  	stgi();
3317  	report(0, "must not reach here");
3318  }
3319  
svm_intr_intercept_mix_nmi(void)3320  static void svm_intr_intercept_mix_nmi(void)
3321  {
3322  	handle_exception(2, dummy_nmi_handler);
3323  
3324  	vmcb->control.intercept |= (1 << INTERCEPT_NMI);
3325  	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3326  	vmcb->save.rflags |= X86_EFLAGS_IF;
3327  
3328  	test_set_guest(svm_intr_intercept_mix_nmi_guest);
3329  	svm_intr_intercept_mix_run_guest(&nmi_recevied, SVM_EXIT_NMI);
3330  }
3331  
3332  // test that pending SMI will be handled when guest enables GIF
3333  // TODO: can't really count #SMIs so just test that guest doesn't hang
3334  // and VMexits on SMI
svm_intr_intercept_mix_smi_guest(struct svm_test * test)3335  static void svm_intr_intercept_mix_smi_guest(struct svm_test *test)
3336  {
3337  	asm volatile("nop;nop;nop;nop");
3338  
3339  	clgi();
3340  	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_SMI, 0);
3341  	sti_nop(); // should have no effect
3342  	stgi();
3343  	report(0, "must not reach here");
3344  }
3345  
svm_intr_intercept_mix_smi(void)3346  static void svm_intr_intercept_mix_smi(void)
3347  {
3348  	vmcb->control.intercept |= (1 << INTERCEPT_SMI);
3349  	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3350  	test_set_guest(svm_intr_intercept_mix_smi_guest);
3351  	svm_intr_intercept_mix_run_guest(NULL, SVM_EXIT_SMI);
3352  }
3353  
svm_l2_ac_test(void)3354  static void svm_l2_ac_test(void)
3355  {
3356  	bool hit_ac = false;
3357  
3358  	write_cr0(read_cr0() | X86_CR0_AM);
3359  	write_rflags(read_rflags() | X86_EFLAGS_AC);
3360  
3361  	run_in_user(generate_usermode_ac, AC_VECTOR, 0, 0, 0, 0, &hit_ac);
3362  	report(hit_ac, "Usermode #AC handled in L2");
3363  	vmmcall();
3364  }
3365  
3366  struct svm_exception_test {
3367  	u8 vector;
3368  	void (*guest_code)(void);
3369  };
3370  
3371  struct svm_exception_test svm_exception_tests[] = {
3372  	{ GP_VECTOR, generate_non_canonical_gp },
3373  	{ UD_VECTOR, generate_ud },
3374  	{ DE_VECTOR, generate_de },
3375  	{ DB_VECTOR, generate_single_step_db },
3376  	{ BP_VECTOR, generate_bp },
3377  	{ AC_VECTOR, svm_l2_ac_test },
3378  	{ OF_VECTOR, generate_of },
3379  	{ NM_VECTOR, generate_cr0_ts_nm },
3380  	{ NM_VECTOR, generate_cr0_em_nm },
3381  };
3382  
3383  static u8 svm_exception_test_vector;
3384  
svm_exception_handler(struct ex_regs * regs)3385  static void svm_exception_handler(struct ex_regs *regs)
3386  {
3387  	report(regs->vector == svm_exception_test_vector,
3388  		"Handling %s in L2's exception handler",
3389  		exception_mnemonic(svm_exception_test_vector));
3390  	vmmcall();
3391  }
3392  
handle_exception_in_l2(u8 vector)3393  static void handle_exception_in_l2(u8 vector)
3394  {
3395  	handler old_handler = handle_exception(vector, svm_exception_handler);
3396  	svm_exception_test_vector = vector;
3397  
3398  	report(svm_vmrun() == SVM_EXIT_VMMCALL,
3399  		"%s handled by L2", exception_mnemonic(vector));
3400  
3401  	handle_exception(vector, old_handler);
3402  }
3403  
handle_exception_in_l1(u32 vector)3404  static void handle_exception_in_l1(u32 vector)
3405  {
3406  	u32 old_ie = vmcb->control.intercept_exceptions;
3407  
3408  	vmcb->control.intercept_exceptions |= (1ULL << vector);
3409  
3410  	report(svm_vmrun() == (SVM_EXIT_EXCP_BASE + vector),
3411  		"%s handled by L1",  exception_mnemonic(vector));
3412  
3413  	vmcb->control.intercept_exceptions = old_ie;
3414  }
3415  
svm_exception_test(void)3416  static void svm_exception_test(void)
3417  {
3418  	struct svm_exception_test *t;
3419  	int i;
3420  
3421  	for (i = 0; i < ARRAY_SIZE(svm_exception_tests); i++) {
3422  		t = &svm_exception_tests[i];
3423  		test_set_guest((test_guest_func)t->guest_code);
3424  
3425  		handle_exception_in_l2(t->vector);
3426  		vmcb_ident(vmcb);
3427  
3428  		handle_exception_in_l1(t->vector);
3429  		vmcb_ident(vmcb);
3430  	}
3431  }
3432  
shutdown_intercept_test_guest(struct svm_test * test)3433  static void shutdown_intercept_test_guest(struct svm_test *test)
3434  {
3435  	asm volatile ("ud2");
3436  	report_fail("should not reach here\n");
3437  
3438  }
3439  
svm_shutdown_intercept_test(void)3440  static void svm_shutdown_intercept_test(void)
3441  {
3442  	test_set_guest(shutdown_intercept_test_guest);
3443  	vmcb->save.idtr.base = (u64)alloc_vpage();
3444  	vmcb->control.intercept |= (1ULL << INTERCEPT_SHUTDOWN);
3445  	svm_vmrun();
3446  	report(vmcb->control.exit_code == SVM_EXIT_SHUTDOWN, "shutdown test passed");
3447  }
3448  
3449  struct svm_test svm_tests[] = {
3450  	{ "null", default_supported, default_prepare,
3451  	  default_prepare_gif_clear, null_test,
3452  	  default_finished, null_check },
3453  	{ "vmrun", default_supported, default_prepare,
3454  	  default_prepare_gif_clear, test_vmrun,
3455  	  default_finished, check_vmrun },
3456  	{ "ioio", default_supported, prepare_ioio,
3457  	  default_prepare_gif_clear, test_ioio,
3458  	  ioio_finished, check_ioio },
3459  	{ "vmrun intercept check", default_supported, prepare_no_vmrun_int,
3460  	  default_prepare_gif_clear, null_test, default_finished,
3461  	  check_no_vmrun_int },
3462  	{ "rsm", default_supported,
3463  	  prepare_rsm_intercept, default_prepare_gif_clear,
3464  	  test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept },
3465  	{ "cr3 read intercept", default_supported,
3466  	  prepare_cr3_intercept, default_prepare_gif_clear,
3467  	  test_cr3_intercept, default_finished, check_cr3_intercept },
3468  	{ "cr3 read nointercept", default_supported, default_prepare,
3469  	  default_prepare_gif_clear, test_cr3_intercept, default_finished,
3470  	  check_cr3_nointercept },
3471  	{ "cr3 read intercept emulate", smp_supported,
3472  	  prepare_cr3_intercept_bypass, default_prepare_gif_clear,
3473  	  test_cr3_intercept_bypass, default_finished, check_cr3_intercept },
3474  	{ "dr intercept check", default_supported, prepare_dr_intercept,
3475  	  default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished,
3476  	  check_dr_intercept },
3477  	{ "next_rip", next_rip_supported, prepare_next_rip,
3478  	  default_prepare_gif_clear, test_next_rip,
3479  	  default_finished, check_next_rip },
3480  	{ "msr intercept check", default_supported, prepare_msr_intercept,
3481  	  default_prepare_gif_clear, test_msr_intercept,
3482  	  msr_intercept_finished, check_msr_intercept },
3483  	{ "mode_switch", default_supported, prepare_mode_switch,
3484  	  default_prepare_gif_clear, test_mode_switch,
3485  	  mode_switch_finished, check_mode_switch },
3486  	{ "asid_zero", default_supported, prepare_asid_zero,
3487  	  default_prepare_gif_clear, test_asid_zero,
3488  	  default_finished, check_asid_zero },
3489  	{ "sel_cr0_bug", default_supported, sel_cr0_bug_prepare,
3490  	  default_prepare_gif_clear, sel_cr0_bug_test,
3491  	  sel_cr0_bug_finished, sel_cr0_bug_check },
3492  	{ "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare,
3493  	  default_prepare_gif_clear, tsc_adjust_test,
3494  	  default_finished, tsc_adjust_check },
3495  	{ "latency_run_exit", default_supported, latency_prepare,
3496  	  default_prepare_gif_clear, latency_test,
3497  	  latency_finished, latency_check },
3498  	{ "latency_run_exit_clean", default_supported, latency_prepare,
3499  	  default_prepare_gif_clear, latency_test,
3500  	  latency_finished_clean, latency_check },
3501  	{ "latency_svm_insn", default_supported, lat_svm_insn_prepare,
3502  	  default_prepare_gif_clear, null_test,
3503  	  lat_svm_insn_finished, lat_svm_insn_check },
3504  	{ "exc_inject", default_supported, exc_inject_prepare,
3505  	  default_prepare_gif_clear, exc_inject_test,
3506  	  exc_inject_finished, exc_inject_check },
3507  	{ "pending_event", default_supported, pending_event_prepare,
3508  	  default_prepare_gif_clear,
3509  	  pending_event_test, pending_event_finished, pending_event_check },
3510  	{ "pending_event_cli", default_supported, pending_event_cli_prepare,
3511  	  pending_event_cli_prepare_gif_clear,
3512  	  pending_event_cli_test, pending_event_cli_finished,
3513  	  pending_event_cli_check },
3514  	{ "interrupt", default_supported, interrupt_prepare,
3515  	  default_prepare_gif_clear, interrupt_test,
3516  	  interrupt_finished, interrupt_check },
3517  	{ "nmi", default_supported, nmi_prepare,
3518  	  default_prepare_gif_clear, nmi_test,
3519  	  nmi_finished, nmi_check },
3520  	{ "nmi_hlt", smp_supported, nmi_prepare,
3521  	  default_prepare_gif_clear, nmi_hlt_test,
3522  	  nmi_hlt_finished, nmi_hlt_check },
3523          { "vnmi", vnmi_supported, vnmi_prepare,
3524            default_prepare_gif_clear, vnmi_test,
3525            vnmi_finished, vnmi_check },
3526  	{ "virq_inject", default_supported, virq_inject_prepare,
3527  	  default_prepare_gif_clear, virq_inject_test,
3528  	  virq_inject_finished, virq_inject_check },
3529  	{ "virq_inject_within_shadow", default_supported, virq_inject_within_shadow_prepare,
3530  	  virq_inject_within_shadow_prepare_gif_clear, virq_inject_within_shadow_test,
3531  	  virq_inject_within_shadow_finished, virq_inject_within_shadow_check },
3532  	{ "reg_corruption", default_supported, reg_corruption_prepare,
3533  	  default_prepare_gif_clear, reg_corruption_test,
3534  	  reg_corruption_finished, reg_corruption_check },
3535  	{ "svm_init_startup_test", smp_supported, init_startup_prepare,
3536  	  default_prepare_gif_clear, null_test,
3537  	  init_startup_finished, init_startup_check },
3538  	{ "svm_init_intercept_test", smp_supported, init_intercept_prepare,
3539  	  default_prepare_gif_clear, init_intercept_test,
3540  	  init_intercept_finished, init_intercept_check, .on_vcpu = 2 },
3541  	{ "host_rflags", default_supported, host_rflags_prepare,
3542  	  host_rflags_prepare_gif_clear, host_rflags_test,
3543  	  host_rflags_finished, host_rflags_check },
3544  	{ "vgif", vgif_supported, prepare_vgif_enabled,
3545  	  default_prepare_gif_clear, test_vgif, vgif_finished,
3546  	  vgif_check },
3547  	TEST(svm_cr4_osxsave_test),
3548  	TEST(svm_guest_state_test),
3549  	TEST(svm_vmrun_errata_test),
3550  	TEST(svm_vmload_vmsave),
3551  	TEST(svm_test_singlestep),
3552  	TEST(svm_no_nm_test),
3553  	TEST(svm_exception_test),
3554  	TEST(svm_lbrv_test0),
3555  	TEST(svm_lbrv_test1),
3556  	TEST(svm_lbrv_test2),
3557  	TEST(svm_lbrv_nested_test1),
3558  	TEST(svm_lbrv_nested_test2),
3559  	TEST(svm_intr_intercept_mix_if),
3560  	TEST(svm_intr_intercept_mix_gif),
3561  	TEST(svm_intr_intercept_mix_gif2),
3562  	TEST(svm_intr_intercept_mix_nmi),
3563  	TEST(svm_intr_intercept_mix_smi),
3564  	TEST(svm_tsc_scale_test),
3565  	TEST(pause_filter_test),
3566  	TEST(svm_shutdown_intercept_test),
3567  	{ NULL, NULL, NULL, NULL, NULL, NULL, NULL }
3568  };
3569  
main(int ac,char ** av)3570  int main(int ac, char **av)
3571  {
3572  	setup_vm();
3573  	return run_svm_tests(ac, av, svm_tests);
3574  }
3575