xref: /kvm-unit-tests/x86/svm_tests.c (revision f3f338619e4938c2509f5c691adc1f331b07c203)
1ad879127SKrish Sadhukhan #include "svm.h"
2ad879127SKrish Sadhukhan #include "libcflat.h"
3ad879127SKrish Sadhukhan #include "processor.h"
4ad879127SKrish Sadhukhan #include "desc.h"
5ad879127SKrish Sadhukhan #include "msr.h"
6ad879127SKrish Sadhukhan #include "vm.h"
7ad879127SKrish Sadhukhan #include "smp.h"
8ad879127SKrish Sadhukhan #include "alloc_page.h"
9ad879127SKrish Sadhukhan #include "isr.h"
10ad879127SKrish Sadhukhan #include "apic.h"
119da1f4d8SCathy Avery #include "delay.h"
12ddb85855SSean Christopherson #include "util.h"
138177dc62SManali Shukla #include "x86/usermode.h"
14c64f24fdSMaxim Levitsky #include "vmalloc.h"
15ad879127SKrish Sadhukhan 
16ad879127SKrish Sadhukhan #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
17ad879127SKrish Sadhukhan 
18ad879127SKrish Sadhukhan #define LATENCY_RUNS 1000000
19ad879127SKrish Sadhukhan 
20ad879127SKrish Sadhukhan u64 tsc_start;
21ad879127SKrish Sadhukhan u64 tsc_end;
22ad879127SKrish Sadhukhan 
23ad879127SKrish Sadhukhan u64 vmrun_sum, vmexit_sum;
24ad879127SKrish Sadhukhan u64 vmsave_sum, vmload_sum;
25ad879127SKrish Sadhukhan u64 stgi_sum, clgi_sum;
26ad879127SKrish Sadhukhan u64 latvmrun_max;
27ad879127SKrish Sadhukhan u64 latvmrun_min;
28ad879127SKrish Sadhukhan u64 latvmexit_max;
29ad879127SKrish Sadhukhan u64 latvmexit_min;
30ad879127SKrish Sadhukhan u64 latvmload_max;
31ad879127SKrish Sadhukhan u64 latvmload_min;
32ad879127SKrish Sadhukhan u64 latvmsave_max;
33ad879127SKrish Sadhukhan u64 latvmsave_min;
34ad879127SKrish Sadhukhan u64 latstgi_max;
35ad879127SKrish Sadhukhan u64 latstgi_min;
36ad879127SKrish Sadhukhan u64 latclgi_max;
37ad879127SKrish Sadhukhan u64 latclgi_min;
38ad879127SKrish Sadhukhan u64 runs;
39ad879127SKrish Sadhukhan 
null_test(struct svm_test * test)40ad879127SKrish Sadhukhan static void null_test(struct svm_test *test)
41ad879127SKrish Sadhukhan {
42ad879127SKrish Sadhukhan }
43ad879127SKrish Sadhukhan 
null_check(struct svm_test * test)44ad879127SKrish Sadhukhan static bool null_check(struct svm_test *test)
45ad879127SKrish Sadhukhan {
46096cf7feSPaolo Bonzini 	return vmcb->control.exit_code == SVM_EXIT_VMMCALL;
47ad879127SKrish Sadhukhan }
48ad879127SKrish Sadhukhan 
prepare_no_vmrun_int(struct svm_test * test)49ad879127SKrish Sadhukhan static void prepare_no_vmrun_int(struct svm_test *test)
50ad879127SKrish Sadhukhan {
51096cf7feSPaolo Bonzini 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
52ad879127SKrish Sadhukhan }
53ad879127SKrish Sadhukhan 
check_no_vmrun_int(struct svm_test * test)54ad879127SKrish Sadhukhan static bool check_no_vmrun_int(struct svm_test *test)
55ad879127SKrish Sadhukhan {
56096cf7feSPaolo Bonzini 	return vmcb->control.exit_code == SVM_EXIT_ERR;
57ad879127SKrish Sadhukhan }
58ad879127SKrish Sadhukhan 
test_vmrun(struct svm_test * test)59ad879127SKrish Sadhukhan static void test_vmrun(struct svm_test *test)
60ad879127SKrish Sadhukhan {
61096cf7feSPaolo Bonzini 	asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb)));
62ad879127SKrish Sadhukhan }
63ad879127SKrish Sadhukhan 
check_vmrun(struct svm_test * test)64ad879127SKrish Sadhukhan static bool check_vmrun(struct svm_test *test)
65ad879127SKrish Sadhukhan {
66096cf7feSPaolo Bonzini 	return vmcb->control.exit_code == SVM_EXIT_VMRUN;
67ad879127SKrish Sadhukhan }
68ad879127SKrish Sadhukhan 
prepare_rsm_intercept(struct svm_test * test)69401299a5SPaolo Bonzini static void prepare_rsm_intercept(struct svm_test *test)
70401299a5SPaolo Bonzini {
71401299a5SPaolo Bonzini 	default_prepare(test);
72401299a5SPaolo Bonzini 	vmcb->control.intercept |= 1 << INTERCEPT_RSM;
73401299a5SPaolo Bonzini 	vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR);
74401299a5SPaolo Bonzini }
75401299a5SPaolo Bonzini 
test_rsm_intercept(struct svm_test * test)76401299a5SPaolo Bonzini static void test_rsm_intercept(struct svm_test *test)
77401299a5SPaolo Bonzini {
78401299a5SPaolo Bonzini 	asm volatile ("rsm" : : : "memory");
79401299a5SPaolo Bonzini }
80401299a5SPaolo Bonzini 
check_rsm_intercept(struct svm_test * test)81401299a5SPaolo Bonzini static bool check_rsm_intercept(struct svm_test *test)
82401299a5SPaolo Bonzini {
83401299a5SPaolo Bonzini 	return get_test_stage(test) == 2;
84401299a5SPaolo Bonzini }
85401299a5SPaolo Bonzini 
finished_rsm_intercept(struct svm_test * test)86401299a5SPaolo Bonzini static bool finished_rsm_intercept(struct svm_test *test)
87401299a5SPaolo Bonzini {
88401299a5SPaolo Bonzini 	switch (get_test_stage(test)) {
89401299a5SPaolo Bonzini 	case 0:
90401299a5SPaolo Bonzini 		if (vmcb->control.exit_code != SVM_EXIT_RSM) {
91198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to rsm. Exit reason 0x%x",
92401299a5SPaolo Bonzini 				    vmcb->control.exit_code);
93401299a5SPaolo Bonzini 			return true;
94401299a5SPaolo Bonzini 		}
95401299a5SPaolo Bonzini 		vmcb->control.intercept &= ~(1 << INTERCEPT_RSM);
96401299a5SPaolo Bonzini 		inc_test_stage(test);
97401299a5SPaolo Bonzini 		break;
98401299a5SPaolo Bonzini 
99401299a5SPaolo Bonzini 	case 1:
100401299a5SPaolo Bonzini 		if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) {
101198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to #UD. Exit reason 0x%x",
102401299a5SPaolo Bonzini 				    vmcb->control.exit_code);
103401299a5SPaolo Bonzini 			return true;
104401299a5SPaolo Bonzini 		}
105401299a5SPaolo Bonzini 		vmcb->save.rip += 2;
106401299a5SPaolo Bonzini 		inc_test_stage(test);
107401299a5SPaolo Bonzini 		break;
108401299a5SPaolo Bonzini 
109401299a5SPaolo Bonzini 	default:
110401299a5SPaolo Bonzini 		return true;
111401299a5SPaolo Bonzini 	}
112401299a5SPaolo Bonzini 	return get_test_stage(test) == 2;
113401299a5SPaolo Bonzini }
114401299a5SPaolo Bonzini 
prepare_cr3_intercept(struct svm_test * test)115ad879127SKrish Sadhukhan static void prepare_cr3_intercept(struct svm_test *test)
116ad879127SKrish Sadhukhan {
117ad879127SKrish Sadhukhan 	default_prepare(test);
118096cf7feSPaolo Bonzini 	vmcb->control.intercept_cr_read |= 1 << 3;
119ad879127SKrish Sadhukhan }
120ad879127SKrish Sadhukhan 
test_cr3_intercept(struct svm_test * test)121ad879127SKrish Sadhukhan static void test_cr3_intercept(struct svm_test *test)
122ad879127SKrish Sadhukhan {
123ad879127SKrish Sadhukhan 	asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
124ad879127SKrish Sadhukhan }
125ad879127SKrish Sadhukhan 
check_cr3_intercept(struct svm_test * test)126ad879127SKrish Sadhukhan static bool check_cr3_intercept(struct svm_test *test)
127ad879127SKrish Sadhukhan {
128096cf7feSPaolo Bonzini 	return vmcb->control.exit_code == SVM_EXIT_READ_CR3;
129ad879127SKrish Sadhukhan }
130ad879127SKrish Sadhukhan 
check_cr3_nointercept(struct svm_test * test)131ad879127SKrish Sadhukhan static bool check_cr3_nointercept(struct svm_test *test)
132ad879127SKrish Sadhukhan {
133ad879127SKrish Sadhukhan 	return null_check(test) && test->scratch == read_cr3();
134ad879127SKrish Sadhukhan }
135ad879127SKrish Sadhukhan 
corrupt_cr3_intercept_bypass(void * _test)136ad879127SKrish Sadhukhan static void corrupt_cr3_intercept_bypass(void *_test)
137ad879127SKrish Sadhukhan {
138ad879127SKrish Sadhukhan 	struct svm_test *test = _test;
139ad879127SKrish Sadhukhan 	extern volatile u32 mmio_insn;
140ad879127SKrish Sadhukhan 
141ad879127SKrish Sadhukhan 	while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
142ad879127SKrish Sadhukhan 		pause();
143ad879127SKrish Sadhukhan 	pause();
144ad879127SKrish Sadhukhan 	pause();
145ad879127SKrish Sadhukhan 	pause();
146ad879127SKrish Sadhukhan 	mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
147ad879127SKrish Sadhukhan }
148ad879127SKrish Sadhukhan 
prepare_cr3_intercept_bypass(struct svm_test * test)149ad879127SKrish Sadhukhan static void prepare_cr3_intercept_bypass(struct svm_test *test)
150ad879127SKrish Sadhukhan {
151ad879127SKrish Sadhukhan 	default_prepare(test);
152096cf7feSPaolo Bonzini 	vmcb->control.intercept_cr_read |= 1 << 3;
153ad879127SKrish Sadhukhan 	on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
154ad879127SKrish Sadhukhan }
155ad879127SKrish Sadhukhan 
test_cr3_intercept_bypass(struct svm_test * test)156ad879127SKrish Sadhukhan static void test_cr3_intercept_bypass(struct svm_test *test)
157ad879127SKrish Sadhukhan {
158ad879127SKrish Sadhukhan 	ulong a = 0xa0000;
159ad879127SKrish Sadhukhan 
160ad879127SKrish Sadhukhan 	test->scratch = 1;
161ad879127SKrish Sadhukhan 	while (test->scratch != 2)
162ad879127SKrish Sadhukhan 		barrier();
163ad879127SKrish Sadhukhan 
164ad879127SKrish Sadhukhan 	asm volatile ("mmio_insn: mov %0, (%0); nop"
165ad879127SKrish Sadhukhan 		      : "+a"(a) : : "memory");
166ad879127SKrish Sadhukhan 	test->scratch = a;
167ad879127SKrish Sadhukhan }
168ad879127SKrish Sadhukhan 
prepare_dr_intercept(struct svm_test * test)169ad879127SKrish Sadhukhan static void prepare_dr_intercept(struct svm_test *test)
170ad879127SKrish Sadhukhan {
171ad879127SKrish Sadhukhan 	default_prepare(test);
172096cf7feSPaolo Bonzini 	vmcb->control.intercept_dr_read = 0xff;
173096cf7feSPaolo Bonzini 	vmcb->control.intercept_dr_write = 0xff;
174ad879127SKrish Sadhukhan }
175ad879127SKrish Sadhukhan 
test_dr_intercept(struct svm_test * test)176ad879127SKrish Sadhukhan static void test_dr_intercept(struct svm_test *test)
177ad879127SKrish Sadhukhan {
178ad879127SKrish Sadhukhan 	unsigned int i, failcnt = 0;
179ad879127SKrish Sadhukhan 
180ad879127SKrish Sadhukhan 	/* Loop testing debug register reads */
181ad879127SKrish Sadhukhan 	for (i = 0; i < 8; i++) {
182ad879127SKrish Sadhukhan 
183ad879127SKrish Sadhukhan 		switch (i) {
184ad879127SKrish Sadhukhan 		case 0:
185ad879127SKrish Sadhukhan 			asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
186ad879127SKrish Sadhukhan 			break;
187ad879127SKrish Sadhukhan 		case 1:
188ad879127SKrish Sadhukhan 			asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
189ad879127SKrish Sadhukhan 			break;
190ad879127SKrish Sadhukhan 		case 2:
191ad879127SKrish Sadhukhan 			asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
192ad879127SKrish Sadhukhan 			break;
193ad879127SKrish Sadhukhan 		case 3:
194ad879127SKrish Sadhukhan 			asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
195ad879127SKrish Sadhukhan 			break;
196ad879127SKrish Sadhukhan 		case 4:
197ad879127SKrish Sadhukhan 			asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
198ad879127SKrish Sadhukhan 			break;
199ad879127SKrish Sadhukhan 		case 5:
200ad879127SKrish Sadhukhan 			asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
201ad879127SKrish Sadhukhan 			break;
202ad879127SKrish Sadhukhan 		case 6:
203ad879127SKrish Sadhukhan 			asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
204ad879127SKrish Sadhukhan 			break;
205ad879127SKrish Sadhukhan 		case 7:
206ad879127SKrish Sadhukhan 			asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
207ad879127SKrish Sadhukhan 			break;
208ad879127SKrish Sadhukhan 		}
209ad879127SKrish Sadhukhan 
210ad879127SKrish Sadhukhan 		if (test->scratch != i) {
211198dfd0eSJanis Schoetterl-Glausch 			report_fail("dr%u read intercept", i);
212ad879127SKrish Sadhukhan 			failcnt++;
213ad879127SKrish Sadhukhan 		}
214ad879127SKrish Sadhukhan 	}
215ad879127SKrish Sadhukhan 
216ad879127SKrish Sadhukhan 	/* Loop testing debug register writes */
217ad879127SKrish Sadhukhan 	for (i = 0; i < 8; i++) {
218ad879127SKrish Sadhukhan 
219ad879127SKrish Sadhukhan 		switch (i) {
220ad879127SKrish Sadhukhan 		case 0:
221ad879127SKrish Sadhukhan 			asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
222ad879127SKrish Sadhukhan 			break;
223ad879127SKrish Sadhukhan 		case 1:
224ad879127SKrish Sadhukhan 			asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
225ad879127SKrish Sadhukhan 			break;
226ad879127SKrish Sadhukhan 		case 2:
227ad879127SKrish Sadhukhan 			asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
228ad879127SKrish Sadhukhan 			break;
229ad879127SKrish Sadhukhan 		case 3:
230ad879127SKrish Sadhukhan 			asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
231ad879127SKrish Sadhukhan 			break;
232ad879127SKrish Sadhukhan 		case 4:
233ad879127SKrish Sadhukhan 			asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
234ad879127SKrish Sadhukhan 			break;
235ad879127SKrish Sadhukhan 		case 5:
236ad879127SKrish Sadhukhan 			asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
237ad879127SKrish Sadhukhan 			break;
238ad879127SKrish Sadhukhan 		case 6:
239ad879127SKrish Sadhukhan 			asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
240ad879127SKrish Sadhukhan 			break;
241ad879127SKrish Sadhukhan 		case 7:
242ad879127SKrish Sadhukhan 			asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
243ad879127SKrish Sadhukhan 			break;
244ad879127SKrish Sadhukhan 		}
245ad879127SKrish Sadhukhan 
246ad879127SKrish Sadhukhan 		if (test->scratch != i) {
247198dfd0eSJanis Schoetterl-Glausch 			report_fail("dr%u write intercept", i);
248ad879127SKrish Sadhukhan 			failcnt++;
249ad879127SKrish Sadhukhan 		}
250ad879127SKrish Sadhukhan 	}
251ad879127SKrish Sadhukhan 
252ad879127SKrish Sadhukhan 	test->scratch = failcnt;
253ad879127SKrish Sadhukhan }
254ad879127SKrish Sadhukhan 
dr_intercept_finished(struct svm_test * test)255ad879127SKrish Sadhukhan static bool dr_intercept_finished(struct svm_test *test)
256ad879127SKrish Sadhukhan {
257096cf7feSPaolo Bonzini 	ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0);
258ad879127SKrish Sadhukhan 
259ad879127SKrish Sadhukhan 	/* Only expect DR intercepts */
260ad879127SKrish Sadhukhan 	if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
261ad879127SKrish Sadhukhan 		return true;
262ad879127SKrish Sadhukhan 
263ad879127SKrish Sadhukhan 	/*
264ad879127SKrish Sadhukhan 	 * Compute debug register number.
265ad879127SKrish Sadhukhan 	 * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
266ad879127SKrish Sadhukhan 	 * Programmer's Manual Volume 2 - System Programming:
267ad879127SKrish Sadhukhan 	 * http://support.amd.com/TechDocs/24593.pdf
268ad879127SKrish Sadhukhan 	 * there are 16 VMEXIT codes each for DR read and write.
269ad879127SKrish Sadhukhan 	 */
270ad879127SKrish Sadhukhan 	test->scratch = (n % 16);
271ad879127SKrish Sadhukhan 
272ad879127SKrish Sadhukhan 	/* Jump over MOV instruction */
273096cf7feSPaolo Bonzini 	vmcb->save.rip += 3;
274ad879127SKrish Sadhukhan 
275ad879127SKrish Sadhukhan 	return false;
276ad879127SKrish Sadhukhan }
277ad879127SKrish Sadhukhan 
check_dr_intercept(struct svm_test * test)278ad879127SKrish Sadhukhan static bool check_dr_intercept(struct svm_test *test)
279ad879127SKrish Sadhukhan {
280ad879127SKrish Sadhukhan 	return !test->scratch;
281ad879127SKrish Sadhukhan }
282ad879127SKrish Sadhukhan 
next_rip_supported(void)283ad879127SKrish Sadhukhan static bool next_rip_supported(void)
284ad879127SKrish Sadhukhan {
285ad879127SKrish Sadhukhan 	return this_cpu_has(X86_FEATURE_NRIPS);
286ad879127SKrish Sadhukhan }
287ad879127SKrish Sadhukhan 
prepare_next_rip(struct svm_test * test)288ad879127SKrish Sadhukhan static void prepare_next_rip(struct svm_test *test)
289ad879127SKrish Sadhukhan {
290096cf7feSPaolo Bonzini 	vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
291ad879127SKrish Sadhukhan }
292ad879127SKrish Sadhukhan 
293ad879127SKrish Sadhukhan 
test_next_rip(struct svm_test * test)294ad879127SKrish Sadhukhan static void test_next_rip(struct svm_test *test)
295ad879127SKrish Sadhukhan {
296ad879127SKrish Sadhukhan 	asm volatile ("rdtsc\n\t"
297ad879127SKrish Sadhukhan 		      ".globl exp_next_rip\n\t"
298ad879127SKrish Sadhukhan 		      "exp_next_rip:\n\t" ::: "eax", "edx");
299ad879127SKrish Sadhukhan }
300ad879127SKrish Sadhukhan 
check_next_rip(struct svm_test * test)301ad879127SKrish Sadhukhan static bool check_next_rip(struct svm_test *test)
302ad879127SKrish Sadhukhan {
303ad879127SKrish Sadhukhan 	extern char exp_next_rip;
304ad879127SKrish Sadhukhan 	unsigned long address = (unsigned long)&exp_next_rip;
305ad879127SKrish Sadhukhan 
306096cf7feSPaolo Bonzini 	return address == vmcb->control.next_rip;
307ad879127SKrish Sadhukhan }
308ad879127SKrish Sadhukhan 
309ad879127SKrish Sadhukhan extern u8 *msr_bitmap;
310ad879127SKrish Sadhukhan 
3111e2a6424SSean Christopherson static bool is_x2apic;
3121e2a6424SSean Christopherson 
prepare_msr_intercept(struct svm_test * test)313ad879127SKrish Sadhukhan static void prepare_msr_intercept(struct svm_test *test)
314ad879127SKrish Sadhukhan {
315ad879127SKrish Sadhukhan 	default_prepare(test);
316096cf7feSPaolo Bonzini 	vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
3171c7e7d1cSSean Christopherson 
3181c7e7d1cSSean Christopherson 	memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
3191e2a6424SSean Christopherson 
3201e2a6424SSean Christopherson 	is_x2apic = is_x2apic_enabled();
321ad879127SKrish Sadhukhan }
322ad879127SKrish Sadhukhan 
3231c7e7d1cSSean Christopherson #define SVM_MSRPM_BYTES_PER_RANGE 2048
3241c7e7d1cSSean Christopherson #define SVM_BITS_PER_MSR 2
3251c7e7d1cSSean Christopherson #define SVM_MSRS_PER_BYTE 4
3261c7e7d1cSSean Christopherson #define SVM_MSRS_PER_RANGE 8192
3271c7e7d1cSSean Christopherson #define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1)
3281c7e7d1cSSean Christopherson 
get_msrpm_bit_nr(u32 msr)3291c7e7d1cSSean Christopherson static int get_msrpm_bit_nr(u32 msr)
330ad879127SKrish Sadhukhan {
3311c7e7d1cSSean Christopherson 	int range_nr;
3321c7e7d1cSSean Christopherson 
3331c7e7d1cSSean Christopherson 	switch (msr & ~SVM_MSRPM_OFFSET_MASK) {
3341c7e7d1cSSean Christopherson 	case 0:
3351c7e7d1cSSean Christopherson 		range_nr = 0;
3361c7e7d1cSSean Christopherson 		break;
3371c7e7d1cSSean Christopherson 	case 0xc0000000:
3381c7e7d1cSSean Christopherson 		range_nr = 1;
3391c7e7d1cSSean Christopherson 		break;
3401c7e7d1cSSean Christopherson 	case 0xc0010000:
3411c7e7d1cSSean Christopherson 		range_nr = 2;
3421c7e7d1cSSean Christopherson 		break;
3431c7e7d1cSSean Christopherson 	default:
3441c7e7d1cSSean Christopherson 		return - 1;
3451c7e7d1cSSean Christopherson 	}
3461c7e7d1cSSean Christopherson 
3471c7e7d1cSSean Christopherson 	return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +
3481c7e7d1cSSean Christopherson 	       (msr & SVM_MSRPM_OFFSET_MASK) * SVM_BITS_PER_MSR;
3491c7e7d1cSSean Christopherson }
3501c7e7d1cSSean Christopherson 
__test_msr_intercept(struct svm_test * test)3511c7e7d1cSSean Christopherson static void __test_msr_intercept(struct svm_test *test)
3521c7e7d1cSSean Christopherson {
353*bbafc577SSean Christopherson 	u64 val, exp, arb_val = 0xef8056791234abcd; /* Arbitrary value */
35427eeac46SSean Christopherson 	int vector;
355165c839cSSean Christopherson 	u32 msr;
356ad879127SKrish Sadhukhan 
357165c839cSSean Christopherson 	for (msr = 0; msr <= 0xc0012000; msr++) {
358165c839cSSean Christopherson 		if (msr == 0xC0010131 /* MSR_SEV_STATUS */) {
359ad879127SKrish Sadhukhan 			/*
360ad879127SKrish Sadhukhan 			 * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
361ad879127SKrish Sadhukhan 			 * Programmer's Manual volume 2 - System Programming:
362ad879127SKrish Sadhukhan 			 * http://support.amd.com/TechDocs/24593.pdf
363ad879127SKrish Sadhukhan 			 * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
364ad879127SKrish Sadhukhan 			 */
365ad879127SKrish Sadhukhan 			continue;
366ad879127SKrish Sadhukhan 		}
367ad879127SKrish Sadhukhan 
368573d62c6SSean Christopherson 		/*
369573d62c6SSean Christopherson 		 * Test one MSR just before and after each range, but otherwise
370573d62c6SSean Christopherson 		 * skips gaps between supported MSR ranges.
371573d62c6SSean Christopherson 		 */
372165c839cSSean Christopherson 		if (msr == 0x2000 + 1)
373165c839cSSean Christopherson 			msr = 0xc0000000 - 1;
374165c839cSSean Christopherson 		else if (msr == 0xc0002000 + 1)
375165c839cSSean Christopherson 			msr = 0xc0010000 - 1;
376ad879127SKrish Sadhukhan 
3771c7e7d1cSSean Christopherson 		test->scratch = msr;
3781c7e7d1cSSean Christopherson 		vmmcall();
3791c7e7d1cSSean Christopherson 
380ad879127SKrish Sadhukhan 		test->scratch = -1;
381ad879127SKrish Sadhukhan 
3821c7e7d1cSSean Christopherson 		vector = rdmsr_safe(msr, &val);
38327eeac46SSean Christopherson 		if (vector)
384165c839cSSean Christopherson 			report_fail("Expected RDMSR(0x%x) to #VMEXIT, got exception '%u'",
385165c839cSSean Christopherson 				    msr, vector);
386165c839cSSean Christopherson 		else if (test->scratch != msr)
387165c839cSSean Christopherson 			report_fail("Expected RDMSR(0x%x) to #VMEXIT, got scratch '%ld",
388165c839cSSean Christopherson 				    msr, test->scratch);
389ad879127SKrish Sadhukhan 
3901c7e7d1cSSean Christopherson 		test->scratch = BIT_ULL(32) | msr;
3911c7e7d1cSSean Christopherson 		vmmcall();
3921c7e7d1cSSean Christopherson 
393ad879127SKrish Sadhukhan 		/*
394ad879127SKrish Sadhukhan 		 * Poor man approach to generate a value that
395ad879127SKrish Sadhukhan 		 * seems arbitrary each time around the loop.
396ad879127SKrish Sadhukhan 		 */
397165c839cSSean Christopherson 		arb_val += (arb_val << 1);
398ad879127SKrish Sadhukhan 
3991c7e7d1cSSean Christopherson 		test->scratch = -1;
4001c7e7d1cSSean Christopherson 
401165c839cSSean Christopherson 		vector = wrmsr_safe(msr, arb_val);
40227eeac46SSean Christopherson 		if (vector)
403165c839cSSean Christopherson 			report_fail("Expected WRMSR(0x%x) to #VMEXIT, got exception '%u'",
404165c839cSSean Christopherson 				    msr, vector);
405165c839cSSean Christopherson 		else if (test->scratch != arb_val)
406165c839cSSean Christopherson 			report_fail("Expected WRMSR(0x%x) to #VMEXIT, got scratch '%ld' (wanted %ld)",
407165c839cSSean Christopherson 				    msr, test->scratch, arb_val);
4081c7e7d1cSSean Christopherson 
4091c7e7d1cSSean Christopherson 		test->scratch = BIT_ULL(33) | msr;
4101c7e7d1cSSean Christopherson 		vmmcall();
4111e2a6424SSean Christopherson 
4121e2a6424SSean Christopherson 		if (get_msrpm_bit_nr(msr) < 0) {
4131e2a6424SSean Christopherson 			report(msr == 0x2000 ||
4141e2a6424SSean Christopherson 			       msr == 0xc0000000 - 1 || msr == 0xc0002000 ||
4151e2a6424SSean Christopherson 			       msr == 0xc0010000 - 1 || msr == 0xc0012000,
4161e2a6424SSean Christopherson 			       "MSR 0x%x not covered by an MSRPM range", msr);
4171e2a6424SSean Christopherson 			continue;
4181e2a6424SSean Christopherson 		}
4191e2a6424SSean Christopherson 
420*bbafc577SSean Christopherson 		exp = test->scratch;
421*bbafc577SSean Christopherson 
4221e2a6424SSean Christopherson 		/*
4231e2a6424SSean Christopherson 		 * Verify that disabling interception for MSRs within an MSRPM
4241e2a6424SSean Christopherson 		 * range behaves as expected.  Simply eat exceptions, the goal
4251e2a6424SSean Christopherson 		 * is to verify interception, not MSR emulation/virtualization.
4261e2a6424SSean Christopherson 		 */
4271e2a6424SSean Christopherson 		test->scratch = -1;
4281e2a6424SSean Christopherson 		(void)rdmsr_safe(msr, &val);
4291e2a6424SSean Christopherson 		if (test->scratch != -1)
4301e2a6424SSean Christopherson 			report_fail("RDMSR 0x%x, Wanted -1 (no intercept), got 0x%lx",
4311e2a6424SSean Christopherson 				    msr, test->scratch);
4321e2a6424SSean Christopherson 
433*bbafc577SSean Christopherson 		/*
434*bbafc577SSean Christopherson 		 * Verify L1 and L2 see the same MSR value.  Skip TSC to avoid
435*bbafc577SSean Christopherson 		 * false failures, as it's constantly changing.
436*bbafc577SSean Christopherson 		 */
437*bbafc577SSean Christopherson 		if (val != exp && msr != MSR_IA32_TSC)
438*bbafc577SSean Christopherson 			report_fail("RDMSR 0x%x, wanted val '0%lx', got val '0x%lx'",
439*bbafc577SSean Christopherson 				    msr, exp, val);
440*bbafc577SSean Christopherson 
4411e2a6424SSean Christopherson 		test->scratch = BIT_ULL(34) | msr;
4421e2a6424SSean Christopherson 		vmmcall();
4431e2a6424SSean Christopherson 
4441e2a6424SSean Christopherson 		test->scratch = -1;
4451e2a6424SSean Christopherson 		(void)wrmsr_safe(msr, val);
4461e2a6424SSean Christopherson 		if (test->scratch != -1)
4471e2a6424SSean Christopherson 			report_fail("WRMSR 0x%x, Wanted -1 (no intercept), got 0x%lx",
4481e2a6424SSean Christopherson 				    msr, test->scratch);
4491e2a6424SSean Christopherson 
4501e2a6424SSean Christopherson 		test->scratch = BIT_ULL(35) | msr;
4511e2a6424SSean Christopherson 		vmmcall();
4521c7e7d1cSSean Christopherson 	}
453ad879127SKrish Sadhukhan }
454ad879127SKrish Sadhukhan 
test_msr_intercept(struct svm_test * test)4551c7e7d1cSSean Christopherson static void test_msr_intercept(struct svm_test *test)
4561c7e7d1cSSean Christopherson {
4571c7e7d1cSSean Christopherson 	__test_msr_intercept(test);
4581c7e7d1cSSean Christopherson 
459ad879127SKrish Sadhukhan 	test->scratch = -2;
4601c7e7d1cSSean Christopherson 	vmmcall();
4611c7e7d1cSSean Christopherson 
4621c7e7d1cSSean Christopherson 	__test_msr_intercept(test);
4631c7e7d1cSSean Christopherson 
4641c7e7d1cSSean Christopherson 	test->scratch = -3;
4651c7e7d1cSSean Christopherson }
4661c7e7d1cSSean Christopherson 
restore_msrpm_bit(int bit_nr,bool set)4671c7e7d1cSSean Christopherson static void restore_msrpm_bit(int bit_nr, bool set)
4681c7e7d1cSSean Christopherson {
4691c7e7d1cSSean Christopherson 	if (set)
4701c7e7d1cSSean Christopherson 		__set_bit(bit_nr, msr_bitmap);
4711c7e7d1cSSean Christopherson 	else
4721c7e7d1cSSean Christopherson 		__clear_bit(bit_nr, msr_bitmap);
473ad879127SKrish Sadhukhan }
474ad879127SKrish Sadhukhan 
msr_intercept_finished(struct svm_test * test)475ad879127SKrish Sadhukhan static bool msr_intercept_finished(struct svm_test *test)
476ad879127SKrish Sadhukhan {
477096cf7feSPaolo Bonzini 	u32 exit_code = vmcb->control.exit_code;
4781c7e7d1cSSean Christopherson 	bool all_set = false;
4791c7e7d1cSSean Christopherson 	int bit_nr;
480ad879127SKrish Sadhukhan 
4811c7e7d1cSSean Christopherson 	if (exit_code == SVM_EXIT_VMMCALL) {
4821e2a6424SSean Christopherson 		u32 msr = test->scratch & -1u;
4831e2a6424SSean Christopherson 
4841c7e7d1cSSean Christopherson 		vmcb->save.rip += 3;
4851c7e7d1cSSean Christopherson 
4861c7e7d1cSSean Christopherson 		if (test->scratch == -3)
487ad879127SKrish Sadhukhan 			return true;
488ad879127SKrish Sadhukhan 
4891c7e7d1cSSean Christopherson 		if (test->scratch == -2) {
4901c7e7d1cSSean Christopherson 			all_set = true;
4911c7e7d1cSSean Christopherson 			memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
4921c7e7d1cSSean Christopherson 			return false;
4931c7e7d1cSSean Christopherson 		}
4941c7e7d1cSSean Christopherson 
4951e2a6424SSean Christopherson 		bit_nr = get_msrpm_bit_nr(msr);
4961c7e7d1cSSean Christopherson 		if (bit_nr < 0)
4971c7e7d1cSSean Christopherson 			return false;
4981c7e7d1cSSean Christopherson 
4991c7e7d1cSSean Christopherson 		switch (test->scratch >> 32) {
5001c7e7d1cSSean Christopherson 		case 0:
5011c7e7d1cSSean Christopherson 			__set_bit(bit_nr, msr_bitmap);
5021c7e7d1cSSean Christopherson 			return false;
5031c7e7d1cSSean Christopherson 		case 1:
5041c7e7d1cSSean Christopherson 			restore_msrpm_bit(bit_nr, all_set);
5051c7e7d1cSSean Christopherson 			__set_bit(bit_nr + 1, msr_bitmap);
5061c7e7d1cSSean Christopherson 			return false;
5071c7e7d1cSSean Christopherson 		case 2:
5081c7e7d1cSSean Christopherson 			restore_msrpm_bit(bit_nr + 1, all_set);
5091e2a6424SSean Christopherson 			__clear_bit(bit_nr, msr_bitmap);
510*bbafc577SSean Christopherson 			(void)rdmsr_safe(msr, &test->scratch);
5111e2a6424SSean Christopherson 			return false;
5121e2a6424SSean Christopherson 		case 4:
5131e2a6424SSean Christopherson 			restore_msrpm_bit(bit_nr, all_set);
5141e2a6424SSean Christopherson 			__clear_bit(bit_nr + 1, msr_bitmap);
5151e2a6424SSean Christopherson 			/*
5161e2a6424SSean Christopherson 			 * Disable x2APIC so that WRMSR faults instead of doing
5171e2a6424SSean Christopherson 			 * random things, e.g. sending IPIs.
5181e2a6424SSean Christopherson 			 */
5191e2a6424SSean Christopherson 			if (is_x2apic && msr >= 0x800 && msr <= 0x8ff)
5201e2a6424SSean Christopherson 				reset_apic();
5211e2a6424SSean Christopherson 			return false;
5221e2a6424SSean Christopherson 		case 8:
5231e2a6424SSean Christopherson 			restore_msrpm_bit(bit_nr + 1, all_set);
5241e2a6424SSean Christopherson 			if (is_x2apic && msr >= 0x800 && msr <= 0x8ff)
5251e2a6424SSean Christopherson 				enable_x2apic();
5261c7e7d1cSSean Christopherson 			return false;
5271c7e7d1cSSean Christopherson 		default:
5281c7e7d1cSSean Christopherson 			return true;
5291c7e7d1cSSean Christopherson 		}
5301c7e7d1cSSean Christopherson 	}
5311c7e7d1cSSean Christopherson 
53227eeac46SSean Christopherson 	if (exit_code != SVM_EXIT_MSR) {
53327eeac46SSean Christopherson 		report_fail("Wanted MSR VM-Exit, got reason 0x%x", exit_code);
534ad879127SKrish Sadhukhan 		return true;
535ad879127SKrish Sadhukhan 	}
536ad879127SKrish Sadhukhan 
537ad879127SKrish Sadhukhan 	/* Jump over RDMSR/WRMSR instruction */
538096cf7feSPaolo Bonzini 	vmcb->save.rip += 2;
539ad879127SKrish Sadhukhan 
540ad879127SKrish Sadhukhan 	/*
541ad879127SKrish Sadhukhan 	 * Test whether the intercept was for RDMSR/WRMSR.
542ad879127SKrish Sadhukhan 	 * For RDMSR, test->scratch is set to the MSR index;
543ad879127SKrish Sadhukhan 	 *      RCX holds the MSR index.
544ad879127SKrish Sadhukhan 	 * For WRMSR, test->scratch is set to the MSR value;
545ad879127SKrish Sadhukhan 	 *      RDX holds the upper 32 bits of the MSR value,
546ad879127SKrish Sadhukhan 	 *      while RAX hold its lower 32 bits.
547ad879127SKrish Sadhukhan 	 */
54827eeac46SSean Christopherson 	if (vmcb->control.exit_info_1)
54927eeac46SSean Christopherson 		test->scratch = ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff));
550ad879127SKrish Sadhukhan 	else
551ad879127SKrish Sadhukhan 		test->scratch = get_regs().rcx;
552ad879127SKrish Sadhukhan 
553ad879127SKrish Sadhukhan 	return false;
554ad879127SKrish Sadhukhan }
555ad879127SKrish Sadhukhan 
check_msr_intercept(struct svm_test * test)556ad879127SKrish Sadhukhan static bool check_msr_intercept(struct svm_test *test)
557ad879127SKrish Sadhukhan {
5581c7e7d1cSSean Christopherson 	return (test->scratch == -3);
559ad879127SKrish Sadhukhan }
560ad879127SKrish Sadhukhan 
prepare_mode_switch(struct svm_test * test)561ad879127SKrish Sadhukhan static void prepare_mode_switch(struct svm_test *test)
562ad879127SKrish Sadhukhan {
563096cf7feSPaolo Bonzini 	vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
564ad879127SKrish Sadhukhan 		|  (1ULL << UD_VECTOR)
565ad879127SKrish Sadhukhan 		|  (1ULL << DF_VECTOR)
566ad879127SKrish Sadhukhan 		|  (1ULL << PF_VECTOR);
567ad879127SKrish Sadhukhan 	test->scratch = 0;
568ad879127SKrish Sadhukhan }
569ad879127SKrish Sadhukhan 
test_mode_switch(struct svm_test * test)570ad879127SKrish Sadhukhan static void test_mode_switch(struct svm_test *test)
571ad879127SKrish Sadhukhan {
572ad879127SKrish Sadhukhan 	asm volatile("	cli\n"
573ad879127SKrish Sadhukhan 		     "	ljmp *1f\n" /* jump to 32-bit code segment */
574ad879127SKrish Sadhukhan 		     "1:\n"
575ad879127SKrish Sadhukhan 		     "	.long 2f\n"
576ad879127SKrish Sadhukhan 		     "	.long " xstr(KERNEL_CS32) "\n"
577ad879127SKrish Sadhukhan 		     ".code32\n"
578ad879127SKrish Sadhukhan 		     "2:\n"
579ad879127SKrish Sadhukhan 		     "	movl %%cr0, %%eax\n"
580ad879127SKrish Sadhukhan 		     "	btcl  $31, %%eax\n" /* clear PG */
581ad879127SKrish Sadhukhan 		     "	movl %%eax, %%cr0\n"
582ad879127SKrish Sadhukhan 		     "	movl $0xc0000080, %%ecx\n" /* EFER */
583ad879127SKrish Sadhukhan 		     "	rdmsr\n"
584ad879127SKrish Sadhukhan 		     "	btcl $8, %%eax\n" /* clear LME */
585ad879127SKrish Sadhukhan 		     "	wrmsr\n"
586ad879127SKrish Sadhukhan 		     "	movl %%cr4, %%eax\n"
587ad879127SKrish Sadhukhan 		     "	btcl $5, %%eax\n" /* clear PAE */
588ad879127SKrish Sadhukhan 		     "	movl %%eax, %%cr4\n"
589ad879127SKrish Sadhukhan 		     "	movw %[ds16], %%ax\n"
590ad879127SKrish Sadhukhan 		     "	movw %%ax, %%ds\n"
591ad879127SKrish Sadhukhan 		     "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
592ad879127SKrish Sadhukhan 		     ".code16\n"
593ad879127SKrish Sadhukhan 		     "3:\n"
594ad879127SKrish Sadhukhan 		     "	movl %%cr0, %%eax\n"
595ad879127SKrish Sadhukhan 		     "	btcl $0, %%eax\n" /* clear PE  */
596ad879127SKrish Sadhukhan 		     "	movl %%eax, %%cr0\n"
597ad879127SKrish Sadhukhan 		     "	ljmpl $0, $4f\n"   /* jump to real-mode */
598ad879127SKrish Sadhukhan 		     "4:\n"
599ad879127SKrish Sadhukhan 		     "	vmmcall\n"
600ad879127SKrish Sadhukhan 		     "	movl %%cr0, %%eax\n"
601ad879127SKrish Sadhukhan 		     "	btsl $0, %%eax\n" /* set PE  */
602ad879127SKrish Sadhukhan 		     "	movl %%eax, %%cr0\n"
603ad879127SKrish Sadhukhan 		     "	ljmpl %[cs32], $5f\n" /* back to protected mode */
604ad879127SKrish Sadhukhan 		     ".code32\n"
605ad879127SKrish Sadhukhan 		     "5:\n"
606ad879127SKrish Sadhukhan 		     "	movl %%cr4, %%eax\n"
607ad879127SKrish Sadhukhan 		     "	btsl $5, %%eax\n" /* set PAE */
608ad879127SKrish Sadhukhan 		     "	movl %%eax, %%cr4\n"
609ad879127SKrish Sadhukhan 		     "	movl $0xc0000080, %%ecx\n" /* EFER */
610ad879127SKrish Sadhukhan 		     "	rdmsr\n"
611ad879127SKrish Sadhukhan 		     "	btsl $8, %%eax\n" /* set LME */
612ad879127SKrish Sadhukhan 		     "	wrmsr\n"
613ad879127SKrish Sadhukhan 		     "	movl %%cr0, %%eax\n"
614ad879127SKrish Sadhukhan 		     "	btsl  $31, %%eax\n" /* set PG */
615ad879127SKrish Sadhukhan 		     "	movl %%eax, %%cr0\n"
616ad879127SKrish Sadhukhan 		     "	ljmpl %[cs64], $6f\n"    /* back to long mode */
617ad879127SKrish Sadhukhan 		     ".code64\n\t"
618ad879127SKrish Sadhukhan 		     "6:\n"
619ad879127SKrish Sadhukhan 		     "	vmmcall\n"
620ad879127SKrish Sadhukhan 		     :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
621ad879127SKrish Sadhukhan 		      [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
622ad879127SKrish Sadhukhan 		     : "rax", "rbx", "rcx", "rdx", "memory");
623ad879127SKrish Sadhukhan }
624ad879127SKrish Sadhukhan 
mode_switch_finished(struct svm_test * test)625ad879127SKrish Sadhukhan static bool mode_switch_finished(struct svm_test *test)
626ad879127SKrish Sadhukhan {
627ad879127SKrish Sadhukhan 	u64 cr0, cr4, efer;
628ad879127SKrish Sadhukhan 
629096cf7feSPaolo Bonzini 	cr0  = vmcb->save.cr0;
630096cf7feSPaolo Bonzini 	cr4  = vmcb->save.cr4;
631096cf7feSPaolo Bonzini 	efer = vmcb->save.efer;
632ad879127SKrish Sadhukhan 
633ad879127SKrish Sadhukhan 	/* Only expect VMMCALL intercepts */
634096cf7feSPaolo Bonzini 	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL)
635ad879127SKrish Sadhukhan 		return true;
636ad879127SKrish Sadhukhan 
637ad879127SKrish Sadhukhan 	/* Jump over VMMCALL instruction */
638096cf7feSPaolo Bonzini 	vmcb->save.rip += 3;
639ad879127SKrish Sadhukhan 
640ad879127SKrish Sadhukhan 	/* Do sanity checks */
641ad879127SKrish Sadhukhan 	switch (test->scratch) {
642ad879127SKrish Sadhukhan 	case 0:
643ad879127SKrish Sadhukhan 		/* Test should be in real mode now - check for this */
644ad879127SKrish Sadhukhan 		if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
645ad879127SKrish Sadhukhan 		    (cr4  & 0x00000020) || /* CR4.PAE */
646ad879127SKrish Sadhukhan 		    (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
647ad879127SKrish Sadhukhan 			return true;
648ad879127SKrish Sadhukhan 		break;
649ad879127SKrish Sadhukhan 	case 2:
650ad879127SKrish Sadhukhan 		/* Test should be back in long-mode now - check for this */
651ad879127SKrish Sadhukhan 		if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
652ad879127SKrish Sadhukhan 		    ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
653ad879127SKrish Sadhukhan 		    ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
654ad879127SKrish Sadhukhan 			return true;
655ad879127SKrish Sadhukhan 		break;
656ad879127SKrish Sadhukhan 	}
657ad879127SKrish Sadhukhan 
658ad879127SKrish Sadhukhan 	/* one step forward */
659ad879127SKrish Sadhukhan 	test->scratch += 1;
660ad879127SKrish Sadhukhan 
661ad879127SKrish Sadhukhan 	return test->scratch == 2;
662ad879127SKrish Sadhukhan }
663ad879127SKrish Sadhukhan 
check_mode_switch(struct svm_test * test)664ad879127SKrish Sadhukhan static bool check_mode_switch(struct svm_test *test)
665ad879127SKrish Sadhukhan {
666ad879127SKrish Sadhukhan 	return test->scratch == 2;
667ad879127SKrish Sadhukhan }
668ad879127SKrish Sadhukhan 
669ad879127SKrish Sadhukhan extern u8 *io_bitmap;
670ad879127SKrish Sadhukhan 
prepare_ioio(struct svm_test * test)671ad879127SKrish Sadhukhan static void prepare_ioio(struct svm_test *test)
672ad879127SKrish Sadhukhan {
673096cf7feSPaolo Bonzini 	vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
674ad879127SKrish Sadhukhan 	test->scratch = 0;
675ad879127SKrish Sadhukhan 	memset(io_bitmap, 0, 8192);
676ad879127SKrish Sadhukhan 	io_bitmap[8192] = 0xFF;
677ad879127SKrish Sadhukhan }
678ad879127SKrish Sadhukhan 
test_ioio(struct svm_test * test)679ad879127SKrish Sadhukhan static void test_ioio(struct svm_test *test)
680ad879127SKrish Sadhukhan {
681ad879127SKrish Sadhukhan 	// stage 0, test IO pass
682ad879127SKrish Sadhukhan 	inb(0x5000);
683ad879127SKrish Sadhukhan 	outb(0x0, 0x5000);
684ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 0)
685ad879127SKrish Sadhukhan 		goto fail;
686ad879127SKrish Sadhukhan 
687ad879127SKrish Sadhukhan 	// test IO width, in/out
688ad879127SKrish Sadhukhan 	io_bitmap[0] = 0xFF;
689ad879127SKrish Sadhukhan 	inc_test_stage(test);
690ad879127SKrish Sadhukhan 	inb(0x0);
691ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 2)
692ad879127SKrish Sadhukhan 		goto fail;
693ad879127SKrish Sadhukhan 
694ad879127SKrish Sadhukhan 	outw(0x0, 0x0);
695ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 3)
696ad879127SKrish Sadhukhan 		goto fail;
697ad879127SKrish Sadhukhan 
698ad879127SKrish Sadhukhan 	inl(0x0);
699ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 4)
700ad879127SKrish Sadhukhan 		goto fail;
701ad879127SKrish Sadhukhan 
702ad879127SKrish Sadhukhan 	// test low/high IO port
703ad879127SKrish Sadhukhan 	io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
704ad879127SKrish Sadhukhan 	inb(0x5000);
705ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 5)
706ad879127SKrish Sadhukhan 		goto fail;
707ad879127SKrish Sadhukhan 
708ad879127SKrish Sadhukhan 	io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
709ad879127SKrish Sadhukhan 	inw(0x9000);
710ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 6)
711ad879127SKrish Sadhukhan 		goto fail;
712ad879127SKrish Sadhukhan 
713ad879127SKrish Sadhukhan 	// test partial pass
714ad879127SKrish Sadhukhan 	io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
715ad879127SKrish Sadhukhan 	inl(0x4FFF);
716ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 7)
717ad879127SKrish Sadhukhan 		goto fail;
718ad879127SKrish Sadhukhan 
719ad879127SKrish Sadhukhan 	// test across pages
720ad879127SKrish Sadhukhan 	inc_test_stage(test);
721ad879127SKrish Sadhukhan 	inl(0x7FFF);
722ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 8)
723ad879127SKrish Sadhukhan 		goto fail;
724ad879127SKrish Sadhukhan 
725ad879127SKrish Sadhukhan 	inc_test_stage(test);
726ad879127SKrish Sadhukhan 	io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
727ad879127SKrish Sadhukhan 	inl(0x7FFF);
728ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 10)
729ad879127SKrish Sadhukhan 		goto fail;
730ad879127SKrish Sadhukhan 
731ad879127SKrish Sadhukhan 	io_bitmap[0] = 0;
732ad879127SKrish Sadhukhan 	inl(0xFFFF);
733ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 11)
734ad879127SKrish Sadhukhan 		goto fail;
735ad879127SKrish Sadhukhan 
736ad879127SKrish Sadhukhan 	io_bitmap[0] = 0xFF;
737ad879127SKrish Sadhukhan 	io_bitmap[8192] = 0;
738ad879127SKrish Sadhukhan 	inl(0xFFFF);
739ad879127SKrish Sadhukhan 	inc_test_stage(test);
740ad879127SKrish Sadhukhan 	if (get_test_stage(test) != 12)
741ad879127SKrish Sadhukhan 		goto fail;
742ad879127SKrish Sadhukhan 
743ad879127SKrish Sadhukhan 	return;
744ad879127SKrish Sadhukhan 
745ad879127SKrish Sadhukhan fail:
746198dfd0eSJanis Schoetterl-Glausch 	report_fail("stage %d", get_test_stage(test));
747ad879127SKrish Sadhukhan 	test->scratch = -1;
748ad879127SKrish Sadhukhan }
749ad879127SKrish Sadhukhan 
ioio_finished(struct svm_test * test)750ad879127SKrish Sadhukhan static bool ioio_finished(struct svm_test *test)
751ad879127SKrish Sadhukhan {
752ad879127SKrish Sadhukhan 	unsigned port, size;
753ad879127SKrish Sadhukhan 
754ad879127SKrish Sadhukhan 	/* Only expect IOIO intercepts */
755096cf7feSPaolo Bonzini 	if (vmcb->control.exit_code == SVM_EXIT_VMMCALL)
756ad879127SKrish Sadhukhan 		return true;
757ad879127SKrish Sadhukhan 
758096cf7feSPaolo Bonzini 	if (vmcb->control.exit_code != SVM_EXIT_IOIO)
759ad879127SKrish Sadhukhan 		return true;
760ad879127SKrish Sadhukhan 
761ad879127SKrish Sadhukhan 	/* one step forward */
762ad879127SKrish Sadhukhan 	test->scratch += 1;
763ad879127SKrish Sadhukhan 
764096cf7feSPaolo Bonzini 	port = vmcb->control.exit_info_1 >> 16;
765096cf7feSPaolo Bonzini 	size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
766ad879127SKrish Sadhukhan 
767ad879127SKrish Sadhukhan 	while (size--) {
768ad879127SKrish Sadhukhan 		io_bitmap[port / 8] &= ~(1 << (port & 7));
769ad879127SKrish Sadhukhan 		port++;
770ad879127SKrish Sadhukhan 	}
771ad879127SKrish Sadhukhan 
772ad879127SKrish Sadhukhan 	return false;
773ad879127SKrish Sadhukhan }
774ad879127SKrish Sadhukhan 
check_ioio(struct svm_test * test)775ad879127SKrish Sadhukhan static bool check_ioio(struct svm_test *test)
776ad879127SKrish Sadhukhan {
777ad879127SKrish Sadhukhan 	memset(io_bitmap, 0, 8193);
778ad879127SKrish Sadhukhan 	return test->scratch != -1;
779ad879127SKrish Sadhukhan }
780ad879127SKrish Sadhukhan 
prepare_asid_zero(struct svm_test * test)781ad879127SKrish Sadhukhan static void prepare_asid_zero(struct svm_test *test)
782ad879127SKrish Sadhukhan {
783096cf7feSPaolo Bonzini 	vmcb->control.asid = 0;
784ad879127SKrish Sadhukhan }
785ad879127SKrish Sadhukhan 
test_asid_zero(struct svm_test * test)786ad879127SKrish Sadhukhan static void test_asid_zero(struct svm_test *test)
787ad879127SKrish Sadhukhan {
788ad879127SKrish Sadhukhan 	asm volatile ("vmmcall\n\t");
789ad879127SKrish Sadhukhan }
790ad879127SKrish Sadhukhan 
check_asid_zero(struct svm_test * test)791ad879127SKrish Sadhukhan static bool check_asid_zero(struct svm_test *test)
792ad879127SKrish Sadhukhan {
793096cf7feSPaolo Bonzini 	return vmcb->control.exit_code == SVM_EXIT_ERR;
794ad879127SKrish Sadhukhan }
795ad879127SKrish Sadhukhan 
sel_cr0_bug_prepare(struct svm_test * test)796ad879127SKrish Sadhukhan static void sel_cr0_bug_prepare(struct svm_test *test)
797ad879127SKrish Sadhukhan {
798096cf7feSPaolo Bonzini 	vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
799ad879127SKrish Sadhukhan }
800ad879127SKrish Sadhukhan 
sel_cr0_bug_finished(struct svm_test * test)801ad879127SKrish Sadhukhan static bool sel_cr0_bug_finished(struct svm_test *test)
802ad879127SKrish Sadhukhan {
803ad879127SKrish Sadhukhan 	return true;
804ad879127SKrish Sadhukhan }
805ad879127SKrish Sadhukhan 
sel_cr0_bug_test(struct svm_test * test)806ad879127SKrish Sadhukhan static void sel_cr0_bug_test(struct svm_test *test)
807ad879127SKrish Sadhukhan {
808ad879127SKrish Sadhukhan 	unsigned long cr0;
809ad879127SKrish Sadhukhan 
810ad879127SKrish Sadhukhan 	/* read cr0, clear CD, and write back */
811ad879127SKrish Sadhukhan 	cr0  = read_cr0();
812ad879127SKrish Sadhukhan 	cr0 |= (1UL << 30);
813ad879127SKrish Sadhukhan 	write_cr0(cr0);
814ad879127SKrish Sadhukhan 
815ad879127SKrish Sadhukhan 	/*
816ad879127SKrish Sadhukhan 	 * If we are here the test failed, not sure what to do now because we
817ad879127SKrish Sadhukhan 	 * are not in guest-mode anymore so we can't trigger an intercept.
818ad879127SKrish Sadhukhan 	 * Trigger a tripple-fault for now.
819ad879127SKrish Sadhukhan 	 */
820198dfd0eSJanis Schoetterl-Glausch 	report_fail("sel_cr0 test. Can not recover from this - exiting");
821ad879127SKrish Sadhukhan 	exit(report_summary());
822ad879127SKrish Sadhukhan }
823ad879127SKrish Sadhukhan 
sel_cr0_bug_check(struct svm_test * test)824ad879127SKrish Sadhukhan static bool sel_cr0_bug_check(struct svm_test *test)
825ad879127SKrish Sadhukhan {
826096cf7feSPaolo Bonzini 	return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
827ad879127SKrish Sadhukhan }
828ad879127SKrish Sadhukhan 
829ad879127SKrish Sadhukhan #define TSC_ADJUST_VALUE    (1ll << 32)
830f3154609SBill Wendling #define TSC_OFFSET_VALUE    (~0ull << 48)
831ad879127SKrish Sadhukhan static bool ok;
832ad879127SKrish Sadhukhan 
tsc_adjust_supported(void)83310a65fc4SNadav Amit static bool tsc_adjust_supported(void)
83410a65fc4SNadav Amit {
83510a65fc4SNadav Amit 	return this_cpu_has(X86_FEATURE_TSC_ADJUST);
83610a65fc4SNadav Amit }
83710a65fc4SNadav Amit 
tsc_adjust_prepare(struct svm_test * test)838ad879127SKrish Sadhukhan static void tsc_adjust_prepare(struct svm_test *test)
839ad879127SKrish Sadhukhan {
840ad879127SKrish Sadhukhan 	default_prepare(test);
841096cf7feSPaolo Bonzini 	vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
842ad879127SKrish Sadhukhan 
843ad879127SKrish Sadhukhan 	wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
844ad879127SKrish Sadhukhan 	int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
845ad879127SKrish Sadhukhan 	ok = adjust == -TSC_ADJUST_VALUE;
846ad879127SKrish Sadhukhan }
847ad879127SKrish Sadhukhan 
tsc_adjust_test(struct svm_test * test)848ad879127SKrish Sadhukhan static void tsc_adjust_test(struct svm_test *test)
849ad879127SKrish Sadhukhan {
850ad879127SKrish Sadhukhan 	int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
851ad879127SKrish Sadhukhan 	ok &= adjust == -TSC_ADJUST_VALUE;
852ad879127SKrish Sadhukhan 
853ad879127SKrish Sadhukhan 	uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
854ad879127SKrish Sadhukhan 	wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
855ad879127SKrish Sadhukhan 
856ad879127SKrish Sadhukhan 	adjust = rdmsr(MSR_IA32_TSC_ADJUST);
857ad879127SKrish Sadhukhan 	ok &= adjust <= -2 * TSC_ADJUST_VALUE;
858ad879127SKrish Sadhukhan 
859ad879127SKrish Sadhukhan 	uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
860ad879127SKrish Sadhukhan 	ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
861ad879127SKrish Sadhukhan 
862ad879127SKrish Sadhukhan 	uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
863ad879127SKrish Sadhukhan 	ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
864ad879127SKrish Sadhukhan }
865ad879127SKrish Sadhukhan 
tsc_adjust_check(struct svm_test * test)866ad879127SKrish Sadhukhan static bool tsc_adjust_check(struct svm_test *test)
867ad879127SKrish Sadhukhan {
868ad879127SKrish Sadhukhan 	int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
869ad879127SKrish Sadhukhan 
870ad879127SKrish Sadhukhan 	wrmsr(MSR_IA32_TSC_ADJUST, 0);
871ad879127SKrish Sadhukhan 	return ok && adjust <= -2 * TSC_ADJUST_VALUE;
872ad879127SKrish Sadhukhan }
873ad879127SKrish Sadhukhan 
874a8503d50SMaxim Levitsky 
875a8503d50SMaxim Levitsky static u64 guest_tsc_delay_value;
876a8503d50SMaxim Levitsky /* number of bits to shift tsc right for stable result */
877a8503d50SMaxim Levitsky #define TSC_SHIFT 24
878a8503d50SMaxim Levitsky #define TSC_SCALE_ITERATIONS 10
879a8503d50SMaxim Levitsky 
svm_tsc_scale_guest(struct svm_test * test)880a8503d50SMaxim Levitsky static void svm_tsc_scale_guest(struct svm_test *test)
881a8503d50SMaxim Levitsky {
882a8503d50SMaxim Levitsky 	u64 start_tsc = rdtsc();
883a8503d50SMaxim Levitsky 
884a8503d50SMaxim Levitsky 	while (rdtsc() - start_tsc < guest_tsc_delay_value)
885a8503d50SMaxim Levitsky 		cpu_relax();
886a8503d50SMaxim Levitsky }
887a8503d50SMaxim Levitsky 
svm_tsc_scale_run_testcase(u64 duration,double tsc_scale,u64 tsc_offset)888a8503d50SMaxim Levitsky static void svm_tsc_scale_run_testcase(u64 duration,
889a8503d50SMaxim Levitsky 				       double tsc_scale, u64 tsc_offset)
890a8503d50SMaxim Levitsky {
891a8503d50SMaxim Levitsky 	u64 start_tsc, actual_duration;
892a8503d50SMaxim Levitsky 
893a8503d50SMaxim Levitsky 	guest_tsc_delay_value = (duration << TSC_SHIFT) * tsc_scale;
894a8503d50SMaxim Levitsky 
895a8503d50SMaxim Levitsky 	test_set_guest(svm_tsc_scale_guest);
896a8503d50SMaxim Levitsky 	vmcb->control.tsc_offset = tsc_offset;
897a8503d50SMaxim Levitsky 	wrmsr(MSR_AMD64_TSC_RATIO, (u64)(tsc_scale * (1ULL << 32)));
898a8503d50SMaxim Levitsky 
899a8503d50SMaxim Levitsky 	start_tsc = rdtsc();
900a8503d50SMaxim Levitsky 
901a8503d50SMaxim Levitsky 	if (svm_vmrun() != SVM_EXIT_VMMCALL)
902a8503d50SMaxim Levitsky 		report_fail("unexpected vm exit code 0x%x", vmcb->control.exit_code);
903a8503d50SMaxim Levitsky 
904a8503d50SMaxim Levitsky 	actual_duration = (rdtsc() - start_tsc) >> TSC_SHIFT;
905a8503d50SMaxim Levitsky 
906a8503d50SMaxim Levitsky 	report(duration == actual_duration, "tsc delay (expected: %lu, actual: %lu)",
907a8503d50SMaxim Levitsky 	       duration, actual_duration);
908a8503d50SMaxim Levitsky }
909a8503d50SMaxim Levitsky 
svm_tsc_scale_test(void)910a8503d50SMaxim Levitsky static void svm_tsc_scale_test(void)
911a8503d50SMaxim Levitsky {
912a8503d50SMaxim Levitsky 	int i;
913a8503d50SMaxim Levitsky 
914a8503d50SMaxim Levitsky 	if (!tsc_scale_supported()) {
915a8503d50SMaxim Levitsky 		report_skip("TSC scale not supported in the guest");
916a8503d50SMaxim Levitsky 		return;
917a8503d50SMaxim Levitsky 	}
918a8503d50SMaxim Levitsky 
919a8503d50SMaxim Levitsky 	report(rdmsr(MSR_AMD64_TSC_RATIO) == TSC_RATIO_DEFAULT,
920a8503d50SMaxim Levitsky 	       "initial TSC scale ratio");
921a8503d50SMaxim Levitsky 
922a8503d50SMaxim Levitsky 	for (i = 0 ; i < TSC_SCALE_ITERATIONS; i++) {
923a8503d50SMaxim Levitsky 
924a8503d50SMaxim Levitsky 		double tsc_scale = (double)(rdrand() % 100 + 1) / 10;
925a8503d50SMaxim Levitsky 		int duration = rdrand() % 50 + 1;
926a8503d50SMaxim Levitsky 		u64 tsc_offset = rdrand();
927a8503d50SMaxim Levitsky 
928a8503d50SMaxim Levitsky 		report_info("duration=%d, tsc_scale=%d, tsc_offset=%ld",
929a8503d50SMaxim Levitsky 			    duration, (int)(tsc_scale * 100), tsc_offset);
930a8503d50SMaxim Levitsky 
931a8503d50SMaxim Levitsky 		svm_tsc_scale_run_testcase(duration, tsc_scale, tsc_offset);
932a8503d50SMaxim Levitsky 	}
933a8503d50SMaxim Levitsky 
934a8503d50SMaxim Levitsky 	svm_tsc_scale_run_testcase(50, 255, rdrand());
935a8503d50SMaxim Levitsky 	svm_tsc_scale_run_testcase(50, 0.0001, rdrand());
936a8503d50SMaxim Levitsky }
937a8503d50SMaxim Levitsky 
latency_prepare(struct svm_test * test)938ad879127SKrish Sadhukhan static void latency_prepare(struct svm_test *test)
939ad879127SKrish Sadhukhan {
940ad879127SKrish Sadhukhan 	default_prepare(test);
941ad879127SKrish Sadhukhan 	runs = LATENCY_RUNS;
942ad879127SKrish Sadhukhan 	latvmrun_min = latvmexit_min = -1ULL;
943ad879127SKrish Sadhukhan 	latvmrun_max = latvmexit_max = 0;
944ad879127SKrish Sadhukhan 	vmrun_sum = vmexit_sum = 0;
945ad879127SKrish Sadhukhan 	tsc_start = rdtsc();
946ad879127SKrish Sadhukhan }
947ad879127SKrish Sadhukhan 
latency_test(struct svm_test * test)948ad879127SKrish Sadhukhan static void latency_test(struct svm_test *test)
949ad879127SKrish Sadhukhan {
950ad879127SKrish Sadhukhan 	u64 cycles;
951ad879127SKrish Sadhukhan 
952ad879127SKrish Sadhukhan start:
953ad879127SKrish Sadhukhan 	tsc_end = rdtsc();
954ad879127SKrish Sadhukhan 
955ad879127SKrish Sadhukhan 	cycles = tsc_end - tsc_start;
956ad879127SKrish Sadhukhan 
957ad879127SKrish Sadhukhan 	if (cycles > latvmrun_max)
958ad879127SKrish Sadhukhan 		latvmrun_max = cycles;
959ad879127SKrish Sadhukhan 
960ad879127SKrish Sadhukhan 	if (cycles < latvmrun_min)
961ad879127SKrish Sadhukhan 		latvmrun_min = cycles;
962ad879127SKrish Sadhukhan 
963ad879127SKrish Sadhukhan 	vmrun_sum += cycles;
964ad879127SKrish Sadhukhan 
965ad879127SKrish Sadhukhan 	tsc_start = rdtsc();
966ad879127SKrish Sadhukhan 
967ad879127SKrish Sadhukhan 	asm volatile ("vmmcall" : : : "memory");
968ad879127SKrish Sadhukhan 	goto start;
969ad879127SKrish Sadhukhan }
970ad879127SKrish Sadhukhan 
latency_finished(struct svm_test * test)971ad879127SKrish Sadhukhan static bool latency_finished(struct svm_test *test)
972ad879127SKrish Sadhukhan {
973ad879127SKrish Sadhukhan 	u64 cycles;
974ad879127SKrish Sadhukhan 
975ad879127SKrish Sadhukhan 	tsc_end = rdtsc();
976ad879127SKrish Sadhukhan 
977ad879127SKrish Sadhukhan 	cycles = tsc_end - tsc_start;
978ad879127SKrish Sadhukhan 
979ad879127SKrish Sadhukhan 	if (cycles > latvmexit_max)
980ad879127SKrish Sadhukhan 		latvmexit_max = cycles;
981ad879127SKrish Sadhukhan 
982ad879127SKrish Sadhukhan 	if (cycles < latvmexit_min)
983ad879127SKrish Sadhukhan 		latvmexit_min = cycles;
984ad879127SKrish Sadhukhan 
985ad879127SKrish Sadhukhan 	vmexit_sum += cycles;
986ad879127SKrish Sadhukhan 
987096cf7feSPaolo Bonzini 	vmcb->save.rip += 3;
988ad879127SKrish Sadhukhan 
989ad879127SKrish Sadhukhan 	runs -= 1;
990ad879127SKrish Sadhukhan 
991ad879127SKrish Sadhukhan 	tsc_end = rdtsc();
992ad879127SKrish Sadhukhan 
993ad879127SKrish Sadhukhan 	return runs == 0;
994ad879127SKrish Sadhukhan }
995ad879127SKrish Sadhukhan 
latency_finished_clean(struct svm_test * test)996f7fa53dcSPaolo Bonzini static bool latency_finished_clean(struct svm_test *test)
997f7fa53dcSPaolo Bonzini {
998f7fa53dcSPaolo Bonzini 	vmcb->control.clean = VMCB_CLEAN_ALL;
999f7fa53dcSPaolo Bonzini 	return latency_finished(test);
1000f7fa53dcSPaolo Bonzini }
1001f7fa53dcSPaolo Bonzini 
latency_check(struct svm_test * test)1002ad879127SKrish Sadhukhan static bool latency_check(struct svm_test *test)
1003ad879127SKrish Sadhukhan {
1004ad879127SKrish Sadhukhan 	printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
1005ad879127SKrish Sadhukhan 	       latvmrun_min, vmrun_sum / LATENCY_RUNS);
1006ad879127SKrish Sadhukhan 	printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
1007ad879127SKrish Sadhukhan 	       latvmexit_min, vmexit_sum / LATENCY_RUNS);
1008ad879127SKrish Sadhukhan 	return true;
1009ad879127SKrish Sadhukhan }
1010ad879127SKrish Sadhukhan 
lat_svm_insn_prepare(struct svm_test * test)1011ad879127SKrish Sadhukhan static void lat_svm_insn_prepare(struct svm_test *test)
1012ad879127SKrish Sadhukhan {
1013ad879127SKrish Sadhukhan 	default_prepare(test);
1014ad879127SKrish Sadhukhan 	runs = LATENCY_RUNS;
1015ad879127SKrish Sadhukhan 	latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
1016ad879127SKrish Sadhukhan 	latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
1017ad879127SKrish Sadhukhan 	vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
1018ad879127SKrish Sadhukhan }
1019ad879127SKrish Sadhukhan 
lat_svm_insn_finished(struct svm_test * test)1020ad879127SKrish Sadhukhan static bool lat_svm_insn_finished(struct svm_test *test)
1021ad879127SKrish Sadhukhan {
1022096cf7feSPaolo Bonzini 	u64 vmcb_phys = virt_to_phys(vmcb);
1023ad879127SKrish Sadhukhan 	u64 cycles;
1024ad879127SKrish Sadhukhan 
1025ad879127SKrish Sadhukhan 	for ( ; runs != 0; runs--) {
1026ad879127SKrish Sadhukhan 		tsc_start = rdtsc();
1027ad879127SKrish Sadhukhan 		asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
1028ad879127SKrish Sadhukhan 		cycles = rdtsc() - tsc_start;
1029ad879127SKrish Sadhukhan 		if (cycles > latvmload_max)
1030ad879127SKrish Sadhukhan 			latvmload_max = cycles;
1031ad879127SKrish Sadhukhan 		if (cycles < latvmload_min)
1032ad879127SKrish Sadhukhan 			latvmload_min = cycles;
1033ad879127SKrish Sadhukhan 		vmload_sum += cycles;
1034ad879127SKrish Sadhukhan 
1035ad879127SKrish Sadhukhan 		tsc_start = rdtsc();
1036ad879127SKrish Sadhukhan 		asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
1037ad879127SKrish Sadhukhan 		cycles = rdtsc() - tsc_start;
1038ad879127SKrish Sadhukhan 		if (cycles > latvmsave_max)
1039ad879127SKrish Sadhukhan 			latvmsave_max = cycles;
1040ad879127SKrish Sadhukhan 		if (cycles < latvmsave_min)
1041ad879127SKrish Sadhukhan 			latvmsave_min = cycles;
1042ad879127SKrish Sadhukhan 		vmsave_sum += cycles;
1043ad879127SKrish Sadhukhan 
1044ad879127SKrish Sadhukhan 		tsc_start = rdtsc();
1045ad879127SKrish Sadhukhan 		asm volatile("stgi\n\t");
1046ad879127SKrish Sadhukhan 		cycles = rdtsc() - tsc_start;
1047ad879127SKrish Sadhukhan 		if (cycles > latstgi_max)
1048ad879127SKrish Sadhukhan 			latstgi_max = cycles;
1049ad879127SKrish Sadhukhan 		if (cycles < latstgi_min)
1050ad879127SKrish Sadhukhan 			latstgi_min = cycles;
1051ad879127SKrish Sadhukhan 		stgi_sum += cycles;
1052ad879127SKrish Sadhukhan 
1053ad879127SKrish Sadhukhan 		tsc_start = rdtsc();
1054ad879127SKrish Sadhukhan 		asm volatile("clgi\n\t");
1055ad879127SKrish Sadhukhan 		cycles = rdtsc() - tsc_start;
1056ad879127SKrish Sadhukhan 		if (cycles > latclgi_max)
1057ad879127SKrish Sadhukhan 			latclgi_max = cycles;
1058ad879127SKrish Sadhukhan 		if (cycles < latclgi_min)
1059ad879127SKrish Sadhukhan 			latclgi_min = cycles;
1060ad879127SKrish Sadhukhan 		clgi_sum += cycles;
1061ad879127SKrish Sadhukhan 	}
1062ad879127SKrish Sadhukhan 
1063ad879127SKrish Sadhukhan 	tsc_end = rdtsc();
1064ad879127SKrish Sadhukhan 
1065ad879127SKrish Sadhukhan 	return true;
1066ad879127SKrish Sadhukhan }
1067ad879127SKrish Sadhukhan 
lat_svm_insn_check(struct svm_test * test)1068ad879127SKrish Sadhukhan static bool lat_svm_insn_check(struct svm_test *test)
1069ad879127SKrish Sadhukhan {
1070ad879127SKrish Sadhukhan 	printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1071ad879127SKrish Sadhukhan 	       latvmload_min, vmload_sum / LATENCY_RUNS);
1072ad879127SKrish Sadhukhan 	printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1073ad879127SKrish Sadhukhan 	       latvmsave_min, vmsave_sum / LATENCY_RUNS);
1074ad879127SKrish Sadhukhan 	printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1075ad879127SKrish Sadhukhan 	       latstgi_min, stgi_sum / LATENCY_RUNS);
1076ad879127SKrish Sadhukhan 	printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1077ad879127SKrish Sadhukhan 	       latclgi_min, clgi_sum / LATENCY_RUNS);
1078ad879127SKrish Sadhukhan 	return true;
1079ad879127SKrish Sadhukhan }
1080ad879127SKrish Sadhukhan 
1081493d27d4SSean Christopherson /*
1082493d27d4SSean Christopherson  * Report failures from SVM guest code, and on failure, set the stage to -1 and
1083493d27d4SSean Christopherson  * do VMMCALL to terminate the test (host side must treat -1 as "finished").
1084493d27d4SSean Christopherson  * TODO: fix the tests that don't play nice with a straight report, e.g. the
1085493d27d4SSean Christopherson  * V_TPR test fails if report() is invoked.
1086493d27d4SSean Christopherson  */
1087493d27d4SSean Christopherson #define report_svm_guest(cond, test, fmt, args...)	\
1088493d27d4SSean Christopherson do {							\
1089493d27d4SSean Christopherson 	if (!(cond)) {					\
1090493d27d4SSean Christopherson 		report_fail(fmt, ##args);		\
1091493d27d4SSean Christopherson 		set_test_stage(test, -1);		\
1092493d27d4SSean Christopherson 		vmmcall();				\
1093493d27d4SSean Christopherson 	}						\
1094493d27d4SSean Christopherson } while (0)
1095493d27d4SSean Christopherson 
1096ad879127SKrish Sadhukhan bool pending_event_ipi_fired;
1097ad879127SKrish Sadhukhan bool pending_event_guest_run;
1098ad879127SKrish Sadhukhan 
pending_event_ipi_isr(isr_regs_t * regs)1099ad879127SKrish Sadhukhan static void pending_event_ipi_isr(isr_regs_t *regs)
1100ad879127SKrish Sadhukhan {
1101ad879127SKrish Sadhukhan 	pending_event_ipi_fired = true;
1102ad879127SKrish Sadhukhan 	eoi();
1103ad879127SKrish Sadhukhan }
1104ad879127SKrish Sadhukhan 
pending_event_prepare(struct svm_test * test)1105ad879127SKrish Sadhukhan static void pending_event_prepare(struct svm_test *test)
1106ad879127SKrish Sadhukhan {
1107ad879127SKrish Sadhukhan 	int ipi_vector = 0xf1;
1108ad879127SKrish Sadhukhan 
1109ad879127SKrish Sadhukhan 	default_prepare(test);
1110ad879127SKrish Sadhukhan 
1111ad879127SKrish Sadhukhan 	pending_event_ipi_fired = false;
1112ad879127SKrish Sadhukhan 
1113ad879127SKrish Sadhukhan 	handle_irq(ipi_vector, pending_event_ipi_isr);
1114ad879127SKrish Sadhukhan 
1115ad879127SKrish Sadhukhan 	pending_event_guest_run = false;
1116ad879127SKrish Sadhukhan 
1117096cf7feSPaolo Bonzini 	vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1118096cf7feSPaolo Bonzini 	vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1119ad879127SKrish Sadhukhan 
1120ad879127SKrish Sadhukhan 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1121ad879127SKrish Sadhukhan 		       APIC_DM_FIXED | ipi_vector, 0);
1122ad879127SKrish Sadhukhan 
1123ad879127SKrish Sadhukhan 	set_test_stage(test, 0);
1124ad879127SKrish Sadhukhan }
1125ad879127SKrish Sadhukhan 
pending_event_test(struct svm_test * test)1126ad879127SKrish Sadhukhan static void pending_event_test(struct svm_test *test)
1127ad879127SKrish Sadhukhan {
1128ad879127SKrish Sadhukhan 	pending_event_guest_run = true;
1129ad879127SKrish Sadhukhan }
1130ad879127SKrish Sadhukhan 
pending_event_finished(struct svm_test * test)1131ad879127SKrish Sadhukhan static bool pending_event_finished(struct svm_test *test)
1132ad879127SKrish Sadhukhan {
1133ad879127SKrish Sadhukhan 	switch (get_test_stage(test)) {
1134ad879127SKrish Sadhukhan 	case 0:
1135096cf7feSPaolo Bonzini 		if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1136198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to pending interrupt. Exit reason 0x%x",
1137096cf7feSPaolo Bonzini 				    vmcb->control.exit_code);
1138ad879127SKrish Sadhukhan 			return true;
1139ad879127SKrish Sadhukhan 		}
1140ad879127SKrish Sadhukhan 
1141096cf7feSPaolo Bonzini 		vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1142096cf7feSPaolo Bonzini 		vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1143ad879127SKrish Sadhukhan 
1144ad879127SKrish Sadhukhan 		if (pending_event_guest_run) {
1145198dfd0eSJanis Schoetterl-Glausch 			report_fail("Guest ran before host received IPI\n");
1146ad879127SKrish Sadhukhan 			return true;
1147ad879127SKrish Sadhukhan 		}
1148ad879127SKrish Sadhukhan 
1149e4007e62SMaxim Levitsky 		sti_nop_cli();
1150ad879127SKrish Sadhukhan 
1151ad879127SKrish Sadhukhan 		if (!pending_event_ipi_fired) {
1152198dfd0eSJanis Schoetterl-Glausch 			report_fail("Pending interrupt not dispatched after IRQ enabled\n");
1153ad879127SKrish Sadhukhan 			return true;
1154ad879127SKrish Sadhukhan 		}
1155ad879127SKrish Sadhukhan 		break;
1156ad879127SKrish Sadhukhan 
1157ad879127SKrish Sadhukhan 	case 1:
1158ad879127SKrish Sadhukhan 		if (!pending_event_guest_run) {
1159198dfd0eSJanis Schoetterl-Glausch 			report_fail("Guest did not resume when no interrupt\n");
1160ad879127SKrish Sadhukhan 			return true;
1161ad879127SKrish Sadhukhan 		}
1162ad879127SKrish Sadhukhan 		break;
1163ad879127SKrish Sadhukhan 	}
1164ad879127SKrish Sadhukhan 
1165ad879127SKrish Sadhukhan 	inc_test_stage(test);
1166ad879127SKrish Sadhukhan 
1167ad879127SKrish Sadhukhan 	return get_test_stage(test) == 2;
1168ad879127SKrish Sadhukhan }
1169ad879127SKrish Sadhukhan 
pending_event_check(struct svm_test * test)1170ad879127SKrish Sadhukhan static bool pending_event_check(struct svm_test *test)
1171ad879127SKrish Sadhukhan {
1172ad879127SKrish Sadhukhan 	return get_test_stage(test) == 2;
1173ad879127SKrish Sadhukhan }
1174ad879127SKrish Sadhukhan 
pending_event_cli_prepare(struct svm_test * test)117585dc2aceSPaolo Bonzini static void pending_event_cli_prepare(struct svm_test *test)
1176ad879127SKrish Sadhukhan {
1177ad879127SKrish Sadhukhan 	default_prepare(test);
1178ad879127SKrish Sadhukhan 
1179ad879127SKrish Sadhukhan 	pending_event_ipi_fired = false;
1180ad879127SKrish Sadhukhan 
1181ad879127SKrish Sadhukhan 	handle_irq(0xf1, pending_event_ipi_isr);
1182ad879127SKrish Sadhukhan 
1183ad879127SKrish Sadhukhan 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1184ad879127SKrish Sadhukhan 		       APIC_DM_FIXED | 0xf1, 0);
1185ad879127SKrish Sadhukhan 
1186ad879127SKrish Sadhukhan 	set_test_stage(test, 0);
1187ad879127SKrish Sadhukhan }
1188ad879127SKrish Sadhukhan 
pending_event_cli_prepare_gif_clear(struct svm_test * test)118985dc2aceSPaolo Bonzini static void pending_event_cli_prepare_gif_clear(struct svm_test *test)
1190ad879127SKrish Sadhukhan {
1191ad879127SKrish Sadhukhan 	asm("cli");
1192ad879127SKrish Sadhukhan }
1193ad879127SKrish Sadhukhan 
pending_event_cli_test(struct svm_test * test)119485dc2aceSPaolo Bonzini static void pending_event_cli_test(struct svm_test *test)
1195ad879127SKrish Sadhukhan {
1196493d27d4SSean Christopherson 	report_svm_guest(!pending_event_ipi_fired, test,
1197493d27d4SSean Christopherson 			 "IRQ should NOT be delivered while IRQs disabled");
1198ad879127SKrish Sadhukhan 
119985dc2aceSPaolo Bonzini 	/* VINTR_MASKING is zero.  This should cause the IPI to fire.  */
1200e4007e62SMaxim Levitsky 	sti_nop_cli();
1201ad879127SKrish Sadhukhan 
1202493d27d4SSean Christopherson 	report_svm_guest(pending_event_ipi_fired, test,
1203493d27d4SSean Christopherson 			 "IRQ should be delivered after enabling IRQs");
1204ad879127SKrish Sadhukhan 	vmmcall();
1205ad879127SKrish Sadhukhan 
120685dc2aceSPaolo Bonzini 	/*
120785dc2aceSPaolo Bonzini 	 * Now VINTR_MASKING=1, but no interrupt is pending so
120885dc2aceSPaolo Bonzini 	 * the VINTR interception should be clear in VMCB02.  Check
120985dc2aceSPaolo Bonzini 	 * that L0 did not leave a stale VINTR in the VMCB.
121085dc2aceSPaolo Bonzini 	 */
1211e4007e62SMaxim Levitsky 	sti_nop_cli();
1212ad879127SKrish Sadhukhan }
1213ad879127SKrish Sadhukhan 
pending_event_cli_finished(struct svm_test * test)121485dc2aceSPaolo Bonzini static bool pending_event_cli_finished(struct svm_test *test)
1215ad879127SKrish Sadhukhan {
1216493d27d4SSean Christopherson 	report_svm_guest(vmcb->control.exit_code == SVM_EXIT_VMMCALL, test,
1217493d27d4SSean Christopherson 			 "Wanted VMMCALL VM-Exit, got exit reason 0x%x",
1218096cf7feSPaolo Bonzini 			 vmcb->control.exit_code);
1219ad879127SKrish Sadhukhan 
1220ad879127SKrish Sadhukhan 	switch (get_test_stage(test)) {
1221ad879127SKrish Sadhukhan 	case 0:
1222096cf7feSPaolo Bonzini 		vmcb->save.rip += 3;
1223ad879127SKrish Sadhukhan 
1224ad879127SKrish Sadhukhan 		pending_event_ipi_fired = false;
1225ad879127SKrish Sadhukhan 
1226096cf7feSPaolo Bonzini 		vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1227ad879127SKrish Sadhukhan 
122885dc2aceSPaolo Bonzini 		/* Now entering again with VINTR_MASKING=1.  */
1229ad879127SKrish Sadhukhan 		apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1230ad879127SKrish Sadhukhan 			       APIC_DM_FIXED | 0xf1, 0);
1231ad879127SKrish Sadhukhan 
1232ad879127SKrish Sadhukhan 		break;
1233ad879127SKrish Sadhukhan 
1234ad879127SKrish Sadhukhan 	case 1:
1235ad879127SKrish Sadhukhan 		if (pending_event_ipi_fired == true) {
1236198dfd0eSJanis Schoetterl-Glausch 			report_fail("Interrupt triggered by guest");
1237ad879127SKrish Sadhukhan 			return true;
1238ad879127SKrish Sadhukhan 		}
1239ad879127SKrish Sadhukhan 
1240e4007e62SMaxim Levitsky 		sti_nop_cli();
1241ad879127SKrish Sadhukhan 
1242ad879127SKrish Sadhukhan 		if (pending_event_ipi_fired != true) {
1243198dfd0eSJanis Schoetterl-Glausch 			report_fail("Interrupt not triggered by host");
1244ad879127SKrish Sadhukhan 			return true;
1245ad879127SKrish Sadhukhan 		}
1246ad879127SKrish Sadhukhan 
1247ad879127SKrish Sadhukhan 		break;
1248ad879127SKrish Sadhukhan 
1249ad879127SKrish Sadhukhan 	default:
1250ad879127SKrish Sadhukhan 		return true;
1251ad879127SKrish Sadhukhan 	}
1252ad879127SKrish Sadhukhan 
1253ad879127SKrish Sadhukhan 	inc_test_stage(test);
1254ad879127SKrish Sadhukhan 
1255ad879127SKrish Sadhukhan 	return get_test_stage(test) == 2;
1256ad879127SKrish Sadhukhan }
1257ad879127SKrish Sadhukhan 
pending_event_cli_check(struct svm_test * test)125885dc2aceSPaolo Bonzini static bool pending_event_cli_check(struct svm_test *test)
1259ad879127SKrish Sadhukhan {
1260ad879127SKrish Sadhukhan 	return get_test_stage(test) == 2;
1261ad879127SKrish Sadhukhan }
1262ad879127SKrish Sadhukhan 
126385dc2aceSPaolo Bonzini #define TIMER_VECTOR    222
126485dc2aceSPaolo Bonzini 
126585dc2aceSPaolo Bonzini static volatile bool timer_fired;
126685dc2aceSPaolo Bonzini 
timer_isr(isr_regs_t * regs)126785dc2aceSPaolo Bonzini static void timer_isr(isr_regs_t *regs)
126885dc2aceSPaolo Bonzini {
126985dc2aceSPaolo Bonzini 	timer_fired = true;
127085dc2aceSPaolo Bonzini 	apic_write(APIC_EOI, 0);
127185dc2aceSPaolo Bonzini }
127285dc2aceSPaolo Bonzini 
interrupt_prepare(struct svm_test * test)127385dc2aceSPaolo Bonzini static void interrupt_prepare(struct svm_test *test)
127485dc2aceSPaolo Bonzini {
127585dc2aceSPaolo Bonzini 	default_prepare(test);
127685dc2aceSPaolo Bonzini 	handle_irq(TIMER_VECTOR, timer_isr);
127785dc2aceSPaolo Bonzini 	timer_fired = false;
127885dc2aceSPaolo Bonzini 	set_test_stage(test, 0);
127985dc2aceSPaolo Bonzini }
128085dc2aceSPaolo Bonzini 
interrupt_test(struct svm_test * test)128185dc2aceSPaolo Bonzini static void interrupt_test(struct svm_test *test)
128285dc2aceSPaolo Bonzini {
128385dc2aceSPaolo Bonzini 	long long start, loops;
128485dc2aceSPaolo Bonzini 
1285a2c7dff7SMaxim Levitsky 	apic_setup_timer(TIMER_VECTOR, APIC_LVT_TIMER_PERIODIC);
1286787f0aebSMaxim Levitsky 	sti();
128798eb2a39SPaolo Bonzini 	apic_start_timer(1000);
1288a2c7dff7SMaxim Levitsky 
128985dc2aceSPaolo Bonzini 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
129085dc2aceSPaolo Bonzini 		asm volatile ("nop");
129185dc2aceSPaolo Bonzini 
1292493d27d4SSean Christopherson 	report_svm_guest(timer_fired, test,
1293493d27d4SSean Christopherson 			 "direct interrupt while running guest");
129485dc2aceSPaolo Bonzini 
1295a2c7dff7SMaxim Levitsky 	apic_stop_timer();
1296787f0aebSMaxim Levitsky 	cli();
129785dc2aceSPaolo Bonzini 	vmmcall();
129885dc2aceSPaolo Bonzini 
129985dc2aceSPaolo Bonzini 	timer_fired = false;
130098eb2a39SPaolo Bonzini 	apic_start_timer(1000);
130185dc2aceSPaolo Bonzini 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
130285dc2aceSPaolo Bonzini 		asm volatile ("nop");
130385dc2aceSPaolo Bonzini 
1304493d27d4SSean Christopherson 	report_svm_guest(timer_fired, test,
1305493d27d4SSean Christopherson 			 "intercepted interrupt while running guest");
130685dc2aceSPaolo Bonzini 
1307787f0aebSMaxim Levitsky 	sti();
1308a2c7dff7SMaxim Levitsky 	apic_stop_timer();
1309787f0aebSMaxim Levitsky 	cli();
131085dc2aceSPaolo Bonzini 
131185dc2aceSPaolo Bonzini 	timer_fired = false;
131285dc2aceSPaolo Bonzini 	start = rdtsc();
1313a2c7dff7SMaxim Levitsky 	apic_start_timer(1000000);
1314a3001422SOliver Upton 	safe_halt();
131585dc2aceSPaolo Bonzini 
1316493d27d4SSean Christopherson 	report_svm_guest(timer_fired, test, "direct interrupt + hlt");
1317493d27d4SSean Christopherson 	report(rdtsc() - start > 10000, "IRQ arrived after expected delay");
131885dc2aceSPaolo Bonzini 
1319a2c7dff7SMaxim Levitsky 	apic_stop_timer();
1320787f0aebSMaxim Levitsky 	cli();
132185dc2aceSPaolo Bonzini 	vmmcall();
132285dc2aceSPaolo Bonzini 
132385dc2aceSPaolo Bonzini 	timer_fired = false;
132485dc2aceSPaolo Bonzini 	start = rdtsc();
1325a2c7dff7SMaxim Levitsky 	apic_start_timer(1000000);
132685dc2aceSPaolo Bonzini 	asm volatile ("hlt");
132785dc2aceSPaolo Bonzini 
1328493d27d4SSean Christopherson 	report_svm_guest(timer_fired, test, "intercepted interrupt + hlt");
1329493d27d4SSean Christopherson 	report(rdtsc() - start > 10000, "IRQ arrived after expected delay");
133085dc2aceSPaolo Bonzini 
1331a2c7dff7SMaxim Levitsky 	apic_cleanup_timer();
133285dc2aceSPaolo Bonzini }
133385dc2aceSPaolo Bonzini 
interrupt_finished(struct svm_test * test)133485dc2aceSPaolo Bonzini static bool interrupt_finished(struct svm_test *test)
133585dc2aceSPaolo Bonzini {
133685dc2aceSPaolo Bonzini 	switch (get_test_stage(test)) {
133785dc2aceSPaolo Bonzini 	case 0:
133885dc2aceSPaolo Bonzini 	case 2:
1339096cf7feSPaolo Bonzini 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1340198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1341096cf7feSPaolo Bonzini 				    vmcb->control.exit_code);
134285dc2aceSPaolo Bonzini 			return true;
134385dc2aceSPaolo Bonzini 		}
1344096cf7feSPaolo Bonzini 		vmcb->save.rip += 3;
134585dc2aceSPaolo Bonzini 
1346096cf7feSPaolo Bonzini 		vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1347096cf7feSPaolo Bonzini 		vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
134885dc2aceSPaolo Bonzini 		break;
134985dc2aceSPaolo Bonzini 
135085dc2aceSPaolo Bonzini 	case 1:
135185dc2aceSPaolo Bonzini 	case 3:
1352096cf7feSPaolo Bonzini 		if (vmcb->control.exit_code != SVM_EXIT_INTR) {
1353198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to intr intercept. Exit reason 0x%x",
1354096cf7feSPaolo Bonzini 				    vmcb->control.exit_code);
135585dc2aceSPaolo Bonzini 			return true;
135685dc2aceSPaolo Bonzini 		}
135785dc2aceSPaolo Bonzini 
1358e4007e62SMaxim Levitsky 		sti_nop_cli();
135985dc2aceSPaolo Bonzini 
1360096cf7feSPaolo Bonzini 		vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1361096cf7feSPaolo Bonzini 		vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
136285dc2aceSPaolo Bonzini 		break;
136385dc2aceSPaolo Bonzini 
136485dc2aceSPaolo Bonzini 	case 4:
136585dc2aceSPaolo Bonzini 		break;
136685dc2aceSPaolo Bonzini 
136785dc2aceSPaolo Bonzini 	default:
136885dc2aceSPaolo Bonzini 		return true;
136985dc2aceSPaolo Bonzini 	}
137085dc2aceSPaolo Bonzini 
137185dc2aceSPaolo Bonzini 	inc_test_stage(test);
137285dc2aceSPaolo Bonzini 
137385dc2aceSPaolo Bonzini 	return get_test_stage(test) == 5;
137485dc2aceSPaolo Bonzini }
137585dc2aceSPaolo Bonzini 
interrupt_check(struct svm_test * test)137685dc2aceSPaolo Bonzini static bool interrupt_check(struct svm_test *test)
137785dc2aceSPaolo Bonzini {
137885dc2aceSPaolo Bonzini 	return get_test_stage(test) == 5;
137985dc2aceSPaolo Bonzini }
138085dc2aceSPaolo Bonzini 
1381d4db486bSCathy Avery static volatile bool nmi_fired;
1382d4db486bSCathy Avery 
nmi_handler(struct ex_regs * regs)13834a1207f6SMaxim Levitsky static void nmi_handler(struct ex_regs *regs)
1384d4db486bSCathy Avery {
1385d4db486bSCathy Avery 	nmi_fired = true;
1386d4db486bSCathy Avery }
1387d4db486bSCathy Avery 
nmi_prepare(struct svm_test * test)1388d4db486bSCathy Avery static void nmi_prepare(struct svm_test *test)
1389d4db486bSCathy Avery {
1390d4db486bSCathy Avery 	default_prepare(test);
1391d4db486bSCathy Avery 	nmi_fired = false;
13924a1207f6SMaxim Levitsky 	handle_exception(NMI_VECTOR, nmi_handler);
1393d4db486bSCathy Avery 	set_test_stage(test, 0);
1394d4db486bSCathy Avery }
1395d4db486bSCathy Avery 
nmi_test(struct svm_test * test)1396d4db486bSCathy Avery static void nmi_test(struct svm_test *test)
1397d4db486bSCathy Avery {
1398d4db486bSCathy Avery 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1399d4db486bSCathy Avery 
1400493d27d4SSean Christopherson 	report_svm_guest(nmi_fired, test, "direct NMI while running guest");
1401d4db486bSCathy Avery 
1402d4db486bSCathy Avery 	vmmcall();
1403d4db486bSCathy Avery 
1404d4db486bSCathy Avery 	nmi_fired = false;
1405d4db486bSCathy Avery 
1406d4db486bSCathy Avery 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0);
1407d4db486bSCathy Avery 
1408493d27d4SSean Christopherson 	report_svm_guest(nmi_fired, test, "intercepted pending NMI delivered to guest");
1409d4db486bSCathy Avery }
1410d4db486bSCathy Avery 
nmi_finished(struct svm_test * test)1411d4db486bSCathy Avery static bool nmi_finished(struct svm_test *test)
1412d4db486bSCathy Avery {
1413d4db486bSCathy Avery 	switch (get_test_stage(test)) {
1414d4db486bSCathy Avery 	case 0:
1415d4db486bSCathy Avery 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1416198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
1417d4db486bSCathy Avery 				    vmcb->control.exit_code);
1418d4db486bSCathy Avery 			return true;
1419d4db486bSCathy Avery 		}
1420d4db486bSCathy Avery 		vmcb->save.rip += 3;
1421d4db486bSCathy Avery 
1422d4db486bSCathy Avery 		vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
1423d4db486bSCathy Avery 		break;
1424d4db486bSCathy Avery 
1425d4db486bSCathy Avery 	case 1:
1426d4db486bSCathy Avery 		if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1427198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x",
1428d4db486bSCathy Avery 				    vmcb->control.exit_code);
1429d4db486bSCathy Avery 			return true;
1430d4db486bSCathy Avery 		}
1431d4db486bSCathy Avery 
14325c3582f0SJanis Schoetterl-Glausch 		report_pass("NMI intercept while running guest");
1433d4db486bSCathy Avery 		break;
1434d4db486bSCathy Avery 
1435d4db486bSCathy Avery 	case 2:
1436d4db486bSCathy Avery 		break;
1437d4db486bSCathy Avery 
1438d4db486bSCathy Avery 	default:
1439d4db486bSCathy Avery 		return true;
1440d4db486bSCathy Avery 	}
1441d4db486bSCathy Avery 
1442d4db486bSCathy Avery 	inc_test_stage(test);
1443d4db486bSCathy Avery 
1444d4db486bSCathy Avery 	return get_test_stage(test) == 3;
1445d4db486bSCathy Avery }
1446d4db486bSCathy Avery 
nmi_check(struct svm_test * test)1447d4db486bSCathy Avery static bool nmi_check(struct svm_test *test)
1448d4db486bSCathy Avery {
1449d4db486bSCathy Avery 	return get_test_stage(test) == 3;
1450d4db486bSCathy Avery }
1451d4db486bSCathy Avery 
14529da1f4d8SCathy Avery #define NMI_DELAY 100000000ULL
14539da1f4d8SCathy Avery 
nmi_message_thread(void * _test)14549da1f4d8SCathy Avery static void nmi_message_thread(void *_test)
14559da1f4d8SCathy Avery {
14569da1f4d8SCathy Avery 	struct svm_test *test = _test;
14579da1f4d8SCathy Avery 
14589da1f4d8SCathy Avery 	while (get_test_stage(test) != 1)
14599da1f4d8SCathy Avery 		pause();
14609da1f4d8SCathy Avery 
14619da1f4d8SCathy Avery 	delay(NMI_DELAY);
14629da1f4d8SCathy Avery 
14639da1f4d8SCathy Avery 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
14649da1f4d8SCathy Avery 
14659da1f4d8SCathy Avery 	while (get_test_stage(test) != 2)
14669da1f4d8SCathy Avery 		pause();
14679da1f4d8SCathy Avery 
14689da1f4d8SCathy Avery 	delay(NMI_DELAY);
14699da1f4d8SCathy Avery 
14709da1f4d8SCathy Avery 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]);
14719da1f4d8SCathy Avery }
14729da1f4d8SCathy Avery 
nmi_hlt_test(struct svm_test * test)14739da1f4d8SCathy Avery static void nmi_hlt_test(struct svm_test *test)
14749da1f4d8SCathy Avery {
14759da1f4d8SCathy Avery 	long long start;
14769da1f4d8SCathy Avery 
14779da1f4d8SCathy Avery 	on_cpu_async(1, nmi_message_thread, test);
14789da1f4d8SCathy Avery 
14799da1f4d8SCathy Avery 	start = rdtsc();
14809da1f4d8SCathy Avery 
14819da1f4d8SCathy Avery 	set_test_stage(test, 1);
14829da1f4d8SCathy Avery 
14839da1f4d8SCathy Avery 	asm volatile ("hlt");
14849da1f4d8SCathy Avery 
1485493d27d4SSean Christopherson 	report_svm_guest(nmi_fired, test, "direct NMI + hlt");
1486493d27d4SSean Christopherson 	report(rdtsc() - start > NMI_DELAY, "direct NMI after expected delay");
14879da1f4d8SCathy Avery 
14889da1f4d8SCathy Avery 	nmi_fired = false;
14899da1f4d8SCathy Avery 
14909da1f4d8SCathy Avery 	vmmcall();
14919da1f4d8SCathy Avery 
14929da1f4d8SCathy Avery 	start = rdtsc();
14939da1f4d8SCathy Avery 
14949da1f4d8SCathy Avery 	set_test_stage(test, 2);
14959da1f4d8SCathy Avery 
14969da1f4d8SCathy Avery 	asm volatile ("hlt");
14979da1f4d8SCathy Avery 
1498493d27d4SSean Christopherson 	report_svm_guest(nmi_fired, test, "intercepted NMI + hlt");
1499493d27d4SSean Christopherson 	report(rdtsc() - start > NMI_DELAY, "intercepted NMI after expected delay");
15009da1f4d8SCathy Avery 
15019da1f4d8SCathy Avery 	set_test_stage(test, 3);
15029da1f4d8SCathy Avery }
15039da1f4d8SCathy Avery 
nmi_hlt_finished(struct svm_test * test)15049da1f4d8SCathy Avery static bool nmi_hlt_finished(struct svm_test *test)
15059da1f4d8SCathy Avery {
15069da1f4d8SCathy Avery 	switch (get_test_stage(test)) {
15079da1f4d8SCathy Avery 	case 1:
15089da1f4d8SCathy Avery 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1509198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
15109da1f4d8SCathy Avery 				    vmcb->control.exit_code);
15119da1f4d8SCathy Avery 			return true;
15129da1f4d8SCathy Avery 		}
15139da1f4d8SCathy Avery 		vmcb->save.rip += 3;
15149da1f4d8SCathy Avery 
15159da1f4d8SCathy Avery 		vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
15169da1f4d8SCathy Avery 		break;
15179da1f4d8SCathy Avery 
15189da1f4d8SCathy Avery 	case 2:
15199da1f4d8SCathy Avery 		if (vmcb->control.exit_code != SVM_EXIT_NMI) {
1520198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x",
15219da1f4d8SCathy Avery 				    vmcb->control.exit_code);
15229da1f4d8SCathy Avery 			return true;
15239da1f4d8SCathy Avery 		}
15249da1f4d8SCathy Avery 
15255c3582f0SJanis Schoetterl-Glausch 		report_pass("NMI intercept while running guest");
15269da1f4d8SCathy Avery 		break;
15279da1f4d8SCathy Avery 
15289da1f4d8SCathy Avery 	case 3:
15299da1f4d8SCathy Avery 		break;
15309da1f4d8SCathy Avery 
15319da1f4d8SCathy Avery 	default:
15329da1f4d8SCathy Avery 		return true;
15339da1f4d8SCathy Avery 	}
15349da1f4d8SCathy Avery 
15359da1f4d8SCathy Avery 	return get_test_stage(test) == 3;
15369da1f4d8SCathy Avery }
15379da1f4d8SCathy Avery 
nmi_hlt_check(struct svm_test * test)15389da1f4d8SCathy Avery static bool nmi_hlt_check(struct svm_test *test)
15399da1f4d8SCathy Avery {
15409da1f4d8SCathy Avery 	return get_test_stage(test) == 3;
15419da1f4d8SCathy Avery }
15429da1f4d8SCathy Avery 
vnmi_prepare(struct svm_test * test)154308200397SSantosh Shukla static void vnmi_prepare(struct svm_test *test)
154408200397SSantosh Shukla {
154508200397SSantosh Shukla 	nmi_prepare(test);
154608200397SSantosh Shukla 
154708200397SSantosh Shukla 	/*
154808200397SSantosh Shukla 	 * Disable NMI interception to start.  Enabling vNMI without
154908200397SSantosh Shukla 	 * intercepting "real" NMIs should result in an ERR VM-Exit.
155008200397SSantosh Shukla 	 */
155108200397SSantosh Shukla 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_NMI);
155208200397SSantosh Shukla 	vmcb->control.int_ctl = V_NMI_ENABLE_MASK;
155308200397SSantosh Shukla 	vmcb->control.int_vector = NMI_VECTOR;
155408200397SSantosh Shukla }
155508200397SSantosh Shukla 
vnmi_test(struct svm_test * test)155608200397SSantosh Shukla static void vnmi_test(struct svm_test *test)
155708200397SSantosh Shukla {
155808200397SSantosh Shukla 	report_svm_guest(!nmi_fired, test, "No vNMI before injection");
155908200397SSantosh Shukla 	vmmcall();
156008200397SSantosh Shukla 
156108200397SSantosh Shukla 	report_svm_guest(nmi_fired, test, "vNMI delivered after injection");
156208200397SSantosh Shukla 	vmmcall();
156308200397SSantosh Shukla }
156408200397SSantosh Shukla 
vnmi_finished(struct svm_test * test)156508200397SSantosh Shukla static bool vnmi_finished(struct svm_test *test)
156608200397SSantosh Shukla {
156708200397SSantosh Shukla 	switch (get_test_stage(test)) {
156808200397SSantosh Shukla 	case 0:
156908200397SSantosh Shukla 		if (vmcb->control.exit_code != SVM_EXIT_ERR) {
157008200397SSantosh Shukla 			report_fail("Wanted ERR VM-Exit, got 0x%x",
157108200397SSantosh Shukla 				    vmcb->control.exit_code);
157208200397SSantosh Shukla 			return true;
157308200397SSantosh Shukla 		}
157408200397SSantosh Shukla 		report(!nmi_fired, "vNMI enabled but NMI_INTERCEPT unset!");
157508200397SSantosh Shukla 		vmcb->control.intercept |= (1ULL << INTERCEPT_NMI);
157608200397SSantosh Shukla 		vmcb->save.rip += 3;
157708200397SSantosh Shukla 		break;
157808200397SSantosh Shukla 
157908200397SSantosh Shukla 	case 1:
158008200397SSantosh Shukla 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
158108200397SSantosh Shukla 			report_fail("Wanted VMMCALL VM-Exit, got 0x%x",
158208200397SSantosh Shukla 				    vmcb->control.exit_code);
158308200397SSantosh Shukla 			return true;
158408200397SSantosh Shukla 		}
158508200397SSantosh Shukla 		report(!nmi_fired, "vNMI with vector 2 not injected");
158608200397SSantosh Shukla 		vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
158708200397SSantosh Shukla 		vmcb->save.rip += 3;
158808200397SSantosh Shukla 		break;
158908200397SSantosh Shukla 
159008200397SSantosh Shukla 	case 2:
159108200397SSantosh Shukla 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
159208200397SSantosh Shukla 			report_fail("Wanted VMMCALL VM-Exit, got 0x%x",
159308200397SSantosh Shukla 				    vmcb->control.exit_code);
159408200397SSantosh Shukla 			return true;
159508200397SSantosh Shukla 		}
159608200397SSantosh Shukla 		if (vmcb->control.int_ctl & V_NMI_BLOCKING_MASK) {
159708200397SSantosh Shukla 			report_fail("V_NMI_BLOCKING_MASK not cleared on VMEXIT");
159808200397SSantosh Shukla 			return true;
159908200397SSantosh Shukla 		}
160008200397SSantosh Shukla 		report_pass("VNMI serviced");
160108200397SSantosh Shukla 		vmcb->save.rip += 3;
160208200397SSantosh Shukla 		break;
160308200397SSantosh Shukla 
160408200397SSantosh Shukla 	default:
160508200397SSantosh Shukla 		return true;
160608200397SSantosh Shukla 	}
160708200397SSantosh Shukla 
160808200397SSantosh Shukla 	inc_test_stage(test);
160908200397SSantosh Shukla 
161008200397SSantosh Shukla 	return get_test_stage(test) == 3;
161108200397SSantosh Shukla }
161208200397SSantosh Shukla 
vnmi_check(struct svm_test * test)161308200397SSantosh Shukla static bool vnmi_check(struct svm_test *test)
161408200397SSantosh Shukla {
161508200397SSantosh Shukla 	return get_test_stage(test) == 3;
161608200397SSantosh Shukla }
161708200397SSantosh Shukla 
16184b4fb247SPaolo Bonzini static volatile int count_exc = 0;
16194b4fb247SPaolo Bonzini 
my_isr(struct ex_regs * r)16204b4fb247SPaolo Bonzini static void my_isr(struct ex_regs *r)
16214b4fb247SPaolo Bonzini {
16224b4fb247SPaolo Bonzini 	count_exc++;
16234b4fb247SPaolo Bonzini }
16244b4fb247SPaolo Bonzini 
exc_inject_prepare(struct svm_test * test)16254b4fb247SPaolo Bonzini static void exc_inject_prepare(struct svm_test *test)
16264b4fb247SPaolo Bonzini {
16278634a266SPaolo Bonzini 	default_prepare(test);
16284b4fb247SPaolo Bonzini 	handle_exception(DE_VECTOR, my_isr);
16294b4fb247SPaolo Bonzini 	handle_exception(NMI_VECTOR, my_isr);
16304b4fb247SPaolo Bonzini }
16314b4fb247SPaolo Bonzini 
16324b4fb247SPaolo Bonzini 
exc_inject_test(struct svm_test * test)16334b4fb247SPaolo Bonzini static void exc_inject_test(struct svm_test *test)
16344b4fb247SPaolo Bonzini {
16354b4fb247SPaolo Bonzini 	asm volatile ("vmmcall\n\tvmmcall\n\t");
16364b4fb247SPaolo Bonzini }
16374b4fb247SPaolo Bonzini 
exc_inject_finished(struct svm_test * test)16384b4fb247SPaolo Bonzini static bool exc_inject_finished(struct svm_test *test)
16394b4fb247SPaolo Bonzini {
16404b4fb247SPaolo Bonzini 	switch (get_test_stage(test)) {
16414b4fb247SPaolo Bonzini 	case 0:
16424b4fb247SPaolo Bonzini 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1643198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
16444b4fb247SPaolo Bonzini 				    vmcb->control.exit_code);
16454b4fb247SPaolo Bonzini 			return true;
16464b4fb247SPaolo Bonzini 		}
16472c1ca866SNadav Amit 		vmcb->save.rip += 3;
16484b4fb247SPaolo Bonzini 		vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
16494b4fb247SPaolo Bonzini 		break;
16504b4fb247SPaolo Bonzini 
16514b4fb247SPaolo Bonzini 	case 1:
16524b4fb247SPaolo Bonzini 		if (vmcb->control.exit_code != SVM_EXIT_ERR) {
1653198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to error. Exit reason 0x%x",
16544b4fb247SPaolo Bonzini 				    vmcb->control.exit_code);
16554b4fb247SPaolo Bonzini 			return true;
16564b4fb247SPaolo Bonzini 		}
16574b4fb247SPaolo Bonzini 		report(count_exc == 0, "exception with vector 2 not injected");
16584b4fb247SPaolo Bonzini 		vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID;
16594b4fb247SPaolo Bonzini 		break;
16604b4fb247SPaolo Bonzini 
16614b4fb247SPaolo Bonzini 	case 2:
16624b4fb247SPaolo Bonzini 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1663198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
16644b4fb247SPaolo Bonzini 				    vmcb->control.exit_code);
16654b4fb247SPaolo Bonzini 			return true;
16664b4fb247SPaolo Bonzini 		}
16672c1ca866SNadav Amit 		vmcb->save.rip += 3;
16684b4fb247SPaolo Bonzini 		report(count_exc == 1, "divide overflow exception injected");
16694b4fb247SPaolo Bonzini 		report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared");
16704b4fb247SPaolo Bonzini 		break;
16714b4fb247SPaolo Bonzini 
16724b4fb247SPaolo Bonzini 	default:
16734b4fb247SPaolo Bonzini 		return true;
16744b4fb247SPaolo Bonzini 	}
16754b4fb247SPaolo Bonzini 
16764b4fb247SPaolo Bonzini 	inc_test_stage(test);
16774b4fb247SPaolo Bonzini 
16784b4fb247SPaolo Bonzini 	return get_test_stage(test) == 3;
16794b4fb247SPaolo Bonzini }
16804b4fb247SPaolo Bonzini 
exc_inject_check(struct svm_test * test)16814b4fb247SPaolo Bonzini static bool exc_inject_check(struct svm_test *test)
16824b4fb247SPaolo Bonzini {
16834b4fb247SPaolo Bonzini 	return count_exc == 1 && get_test_stage(test) == 3;
16844b4fb247SPaolo Bonzini }
16854b4fb247SPaolo Bonzini 
16869c838954SCathy Avery static volatile bool virq_fired;
16874b3c6114SPaolo Bonzini static volatile unsigned long virq_rip;
16889c838954SCathy Avery 
virq_isr(isr_regs_t * regs)16899c838954SCathy Avery static void virq_isr(isr_regs_t *regs)
16909c838954SCathy Avery {
16919c838954SCathy Avery 	virq_fired = true;
16924b3c6114SPaolo Bonzini 	virq_rip = regs->rip;
16939c838954SCathy Avery }
16949c838954SCathy Avery 
virq_inject_prepare(struct svm_test * test)16959c838954SCathy Avery static void virq_inject_prepare(struct svm_test *test)
16969c838954SCathy Avery {
16979c838954SCathy Avery 	handle_irq(0xf1, virq_isr);
16989c838954SCathy Avery 	default_prepare(test);
16999c838954SCathy Avery 	vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
17009c838954SCathy Avery 		(0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority
17019c838954SCathy Avery 	vmcb->control.int_vector = 0xf1;
17029c838954SCathy Avery 	virq_fired = false;
17034b3c6114SPaolo Bonzini 	virq_rip = -1;
17049c838954SCathy Avery 	set_test_stage(test, 0);
17059c838954SCathy Avery }
17069c838954SCathy Avery 
virq_inject_test(struct svm_test * test)17079c838954SCathy Avery static void virq_inject_test(struct svm_test *test)
17089c838954SCathy Avery {
1709493d27d4SSean Christopherson 	report_svm_guest(!virq_fired, test, "virtual IRQ blocked after L2 cli");
17109c838954SCathy Avery 
1711e4007e62SMaxim Levitsky 	sti_nop_cli();
17129c838954SCathy Avery 
1713493d27d4SSean Christopherson 	report_svm_guest(virq_fired, test, "virtual IRQ fired after L2 sti");
17149c838954SCathy Avery 
17159c838954SCathy Avery 	vmmcall();
17169c838954SCathy Avery 
1717493d27d4SSean Christopherson 	report_svm_guest(!virq_fired, test, "intercepted VINTR blocked after L2 cli");
17189c838954SCathy Avery 
1719e4007e62SMaxim Levitsky 	sti_nop_cli();
17209c838954SCathy Avery 
1721493d27d4SSean Christopherson 	report_svm_guest(virq_fired, test, "intercepted VINTR fired after L2 sti");
17229c838954SCathy Avery 
17239c838954SCathy Avery 	vmmcall();
17249c838954SCathy Avery 
1725e4007e62SMaxim Levitsky 	sti_nop_cli();
17269c838954SCathy Avery 
1727493d27d4SSean Christopherson 	report_svm_guest(!virq_fired, test,
1728493d27d4SSean Christopherson 			  "virtual IRQ blocked V_IRQ_PRIO less than V_TPR");
17299c838954SCathy Avery 
17309c838954SCathy Avery 	vmmcall();
17319c838954SCathy Avery 	vmmcall();
17329c838954SCathy Avery }
17339c838954SCathy Avery 
virq_inject_finished(struct svm_test * test)17349c838954SCathy Avery static bool virq_inject_finished(struct svm_test *test)
17359c838954SCathy Avery {
17369c838954SCathy Avery 	vmcb->save.rip += 3;
17379c838954SCathy Avery 
17389c838954SCathy Avery 	switch (get_test_stage(test)) {
17399c838954SCathy Avery 	case 0:
17409c838954SCathy Avery 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1741198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
17429c838954SCathy Avery 				    vmcb->control.exit_code);
17439c838954SCathy Avery 			return true;
17449c838954SCathy Avery 		}
17459c838954SCathy Avery 		if (vmcb->control.int_ctl & V_IRQ_MASK) {
1746198dfd0eSJanis Schoetterl-Glausch 			report_fail("V_IRQ not cleared on VMEXIT after firing");
17479c838954SCathy Avery 			return true;
17489c838954SCathy Avery 		}
17499c838954SCathy Avery 		virq_fired = false;
17509c838954SCathy Avery 		vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
17519c838954SCathy Avery 		vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
17529c838954SCathy Avery 			(0x0f << V_INTR_PRIO_SHIFT);
17539c838954SCathy Avery 		break;
17549c838954SCathy Avery 
17559c838954SCathy Avery 	case 1:
17569c838954SCathy Avery 		if (vmcb->control.exit_code != SVM_EXIT_VINTR) {
1757198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vintr. Exit reason 0x%x",
17589c838954SCathy Avery 				    vmcb->control.exit_code);
17599c838954SCathy Avery 			return true;
17609c838954SCathy Avery 		}
17619c838954SCathy Avery 		if (virq_fired) {
1762198dfd0eSJanis Schoetterl-Glausch 			report_fail("V_IRQ fired before SVM_EXIT_VINTR");
17639c838954SCathy Avery 			return true;
17649c838954SCathy Avery 		}
17659c838954SCathy Avery 		vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
17669c838954SCathy Avery 		break;
17679c838954SCathy Avery 
17689c838954SCathy Avery 	case 2:
17699c838954SCathy Avery 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1770198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
17719c838954SCathy Avery 				    vmcb->control.exit_code);
17729c838954SCathy Avery 			return true;
17739c838954SCathy Avery 		}
17749c838954SCathy Avery 		virq_fired = false;
17759c838954SCathy Avery 		// Set irq to lower priority
17769c838954SCathy Avery 		vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK |
17779c838954SCathy Avery 			(0x08 << V_INTR_PRIO_SHIFT);
17789c838954SCathy Avery 		// Raise guest TPR
17799c838954SCathy Avery 		vmcb->control.int_ctl |= 0x0a & V_TPR_MASK;
17809c838954SCathy Avery 		break;
17819c838954SCathy Avery 
17829c838954SCathy Avery 	case 3:
17839c838954SCathy Avery 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1784198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
17859c838954SCathy Avery 				    vmcb->control.exit_code);
17869c838954SCathy Avery 			return true;
17879c838954SCathy Avery 		}
17889c838954SCathy Avery 		vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
17899c838954SCathy Avery 		break;
17909c838954SCathy Avery 
17919c838954SCathy Avery 	case 4:
17929c838954SCathy Avery 		// INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR
17939c838954SCathy Avery 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1794198dfd0eSJanis Schoetterl-Glausch 			report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
17959c838954SCathy Avery 				    vmcb->control.exit_code);
17969c838954SCathy Avery 			return true;
17979c838954SCathy Avery 		}
17989c838954SCathy Avery 		break;
17999c838954SCathy Avery 
18009c838954SCathy Avery 	default:
18019c838954SCathy Avery 		return true;
18029c838954SCathy Avery 	}
18039c838954SCathy Avery 
18049c838954SCathy Avery 	inc_test_stage(test);
18059c838954SCathy Avery 
18069c838954SCathy Avery 	return get_test_stage(test) == 5;
18079c838954SCathy Avery }
18089c838954SCathy Avery 
virq_inject_check(struct svm_test * test)18099c838954SCathy Avery static bool virq_inject_check(struct svm_test *test)
18109c838954SCathy Avery {
18119c838954SCathy Avery 	return get_test_stage(test) == 5;
18129c838954SCathy Avery }
18139c838954SCathy Avery 
virq_inject_within_shadow_prepare(struct svm_test * test)18144b3c6114SPaolo Bonzini static void virq_inject_within_shadow_prepare(struct svm_test *test)
18154b3c6114SPaolo Bonzini {
18164b3c6114SPaolo Bonzini 	virq_inject_prepare(test);
18174b3c6114SPaolo Bonzini 	vmcb->control.int_state = SVM_INTERRUPT_SHADOW_MASK;
18184b3c6114SPaolo Bonzini 	vmcb->save.rflags |= X86_EFLAGS_IF;
18194b3c6114SPaolo Bonzini }
18204b3c6114SPaolo Bonzini 
18214b3c6114SPaolo Bonzini extern void virq_inject_within_shadow_test(struct svm_test *test);
18224b3c6114SPaolo Bonzini asm("virq_inject_within_shadow_test: nop; nop; vmmcall");
18234b3c6114SPaolo Bonzini 
virq_inject_within_shadow_prepare_gif_clear(struct svm_test * test)18244b3c6114SPaolo Bonzini static void virq_inject_within_shadow_prepare_gif_clear(struct svm_test *test)
18254b3c6114SPaolo Bonzini {
18264b3c6114SPaolo Bonzini 	vmcb->save.rip = (unsigned long) test->guest_func;
18274b3c6114SPaolo Bonzini }
18284b3c6114SPaolo Bonzini 
virq_inject_within_shadow_finished(struct svm_test * test)18294b3c6114SPaolo Bonzini static bool virq_inject_within_shadow_finished(struct svm_test *test)
18304b3c6114SPaolo Bonzini {
18314b3c6114SPaolo Bonzini 	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL)
18324b3c6114SPaolo Bonzini 		report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x",
18334b3c6114SPaolo Bonzini 			    vmcb->control.exit_code);
18344b3c6114SPaolo Bonzini 	if (!virq_fired)
18354b3c6114SPaolo Bonzini 		report_fail("V_IRQ did not fire");
18364b3c6114SPaolo Bonzini 	else if (virq_rip != (unsigned long) virq_inject_within_shadow_test + 1)
18374b3c6114SPaolo Bonzini 		report_fail("Unexpected RIP for interrupt handler");
18384b3c6114SPaolo Bonzini 	else if (vmcb->control.int_ctl & V_IRQ_MASK)
18394b3c6114SPaolo Bonzini 		report_fail("V_IRQ not cleared on VMEXIT after firing");
18404b3c6114SPaolo Bonzini 	else if (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
18414b3c6114SPaolo Bonzini 		report_fail("Interrupt shadow not cleared");
18424b3c6114SPaolo Bonzini 	else
18434b3c6114SPaolo Bonzini 		inc_test_stage(test);
18444b3c6114SPaolo Bonzini 
18454b3c6114SPaolo Bonzini 	return true;
18464b3c6114SPaolo Bonzini }
18474b3c6114SPaolo Bonzini 
virq_inject_within_shadow_check(struct svm_test * test)18484b3c6114SPaolo Bonzini static bool virq_inject_within_shadow_check(struct svm_test *test)
18494b3c6114SPaolo Bonzini {
18504b3c6114SPaolo Bonzini 	return get_test_stage(test) == 1;
18514b3c6114SPaolo Bonzini }
18524b3c6114SPaolo Bonzini 
1853da338a31SMaxim Levitsky /*
1854da338a31SMaxim Levitsky  * Detect nested guest RIP corruption as explained in kernel commit
1855da338a31SMaxim Levitsky  * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73
1856da338a31SMaxim Levitsky  *
1857da338a31SMaxim Levitsky  * In the assembly loop below 'ins' is executed while IO instructions
1858da338a31SMaxim Levitsky  * are not intercepted; the instruction is emulated by L0.
1859da338a31SMaxim Levitsky  *
1860da338a31SMaxim Levitsky  * At the same time we are getting interrupts from the local APIC timer,
1861da338a31SMaxim Levitsky  * and we do intercept them in L1
1862da338a31SMaxim Levitsky  *
1863da338a31SMaxim Levitsky  * If the interrupt happens on the insb instruction, L0 will VMexit, emulate
1864da338a31SMaxim Levitsky  * the insb instruction and then it will inject the interrupt to L1 through
1865da338a31SMaxim Levitsky  * a nested VMexit.  Due to a bug, it would leave pre-emulation values of RIP,
1866da338a31SMaxim Levitsky  * RAX and RSP in the VMCB.
1867da338a31SMaxim Levitsky  *
1868da338a31SMaxim Levitsky  * In our intercept handler we detect the bug by checking that RIP is that of
1869da338a31SMaxim Levitsky  * the insb instruction, but its memory operand has already been written.
1870da338a31SMaxim Levitsky  * This means that insb was already executed.
1871da338a31SMaxim Levitsky  */
1872da338a31SMaxim Levitsky 
1873da338a31SMaxim Levitsky static volatile int isr_cnt = 0;
1874da338a31SMaxim Levitsky static volatile uint8_t io_port_var = 0xAA;
1875da338a31SMaxim Levitsky extern const char insb_instruction_label[];
1876da338a31SMaxim Levitsky 
reg_corruption_isr(isr_regs_t * regs)1877da338a31SMaxim Levitsky static void reg_corruption_isr(isr_regs_t *regs)
1878da338a31SMaxim Levitsky {
1879da338a31SMaxim Levitsky 	isr_cnt++;
1880da338a31SMaxim Levitsky 	apic_write(APIC_EOI, 0);
1881da338a31SMaxim Levitsky }
1882da338a31SMaxim Levitsky 
reg_corruption_prepare(struct svm_test * test)1883da338a31SMaxim Levitsky static void reg_corruption_prepare(struct svm_test *test)
1884da338a31SMaxim Levitsky {
1885da338a31SMaxim Levitsky 	default_prepare(test);
1886da338a31SMaxim Levitsky 	set_test_stage(test, 0);
1887da338a31SMaxim Levitsky 
1888da338a31SMaxim Levitsky 	vmcb->control.int_ctl = V_INTR_MASKING_MASK;
1889da338a31SMaxim Levitsky 	vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1890da338a31SMaxim Levitsky 
1891da338a31SMaxim Levitsky 	handle_irq(TIMER_VECTOR, reg_corruption_isr);
1892da338a31SMaxim Levitsky 
1893da338a31SMaxim Levitsky 	/* set local APIC to inject external interrupts */
1894a2c7dff7SMaxim Levitsky 	apic_setup_timer(TIMER_VECTOR, APIC_LVT_TIMER_PERIODIC);
1895a2c7dff7SMaxim Levitsky 	apic_start_timer(1000);
1896da338a31SMaxim Levitsky }
1897da338a31SMaxim Levitsky 
reg_corruption_test(struct svm_test * test)1898da338a31SMaxim Levitsky static void reg_corruption_test(struct svm_test *test)
1899da338a31SMaxim Levitsky {
1900da338a31SMaxim Levitsky 	/* this is endless loop, which is interrupted by the timer interrupt */
1901da338a31SMaxim Levitsky 	asm volatile (
1902da338a31SMaxim Levitsky 		      "1:\n\t"
1903da338a31SMaxim Levitsky 		      "movw $0x4d0, %%dx\n\t" // IO port
1904da338a31SMaxim Levitsky 		      "lea %[io_port_var], %%rdi\n\t"
1905da338a31SMaxim Levitsky 		      "movb $0xAA, %[io_port_var]\n\t"
1906da338a31SMaxim Levitsky 		      "insb_instruction_label:\n\t"
1907da338a31SMaxim Levitsky 		      "insb\n\t"
1908da338a31SMaxim Levitsky 		      "jmp 1b\n\t"
1909da338a31SMaxim Levitsky 
1910da338a31SMaxim Levitsky 		      : [io_port_var] "=m" (io_port_var)
1911da338a31SMaxim Levitsky 		      : /* no inputs*/
1912da338a31SMaxim Levitsky 		      : "rdx", "rdi"
1913da338a31SMaxim Levitsky 		      );
1914da338a31SMaxim Levitsky }
1915da338a31SMaxim Levitsky 
reg_corruption_finished(struct svm_test * test)1916da338a31SMaxim Levitsky static bool reg_corruption_finished(struct svm_test *test)
1917da338a31SMaxim Levitsky {
1918da338a31SMaxim Levitsky 	if (isr_cnt == 10000) {
19195c3582f0SJanis Schoetterl-Glausch 		report_pass("No RIP corruption detected after %d timer interrupts",
1920da338a31SMaxim Levitsky 			    isr_cnt);
1921da338a31SMaxim Levitsky 		set_test_stage(test, 1);
1922491bbc64SMaxim Levitsky 		goto cleanup;
1923da338a31SMaxim Levitsky 	}
1924da338a31SMaxim Levitsky 
1925da338a31SMaxim Levitsky 	if (vmcb->control.exit_code == SVM_EXIT_INTR) {
1926da338a31SMaxim Levitsky 
1927da338a31SMaxim Levitsky 		void* guest_rip = (void*)vmcb->save.rip;
1928da338a31SMaxim Levitsky 
1929e4007e62SMaxim Levitsky 		sti_nop_cli();
1930da338a31SMaxim Levitsky 
1931da338a31SMaxim Levitsky 		if (guest_rip == insb_instruction_label && io_port_var != 0xAA) {
1932198dfd0eSJanis Schoetterl-Glausch 			report_fail("RIP corruption detected after %d timer interrupts",
1933da338a31SMaxim Levitsky 				    isr_cnt);
1934491bbc64SMaxim Levitsky 			goto cleanup;
1935da338a31SMaxim Levitsky 		}
1936da338a31SMaxim Levitsky 
1937da338a31SMaxim Levitsky 	}
1938da338a31SMaxim Levitsky 	return false;
1939491bbc64SMaxim Levitsky cleanup:
1940a2c7dff7SMaxim Levitsky 	apic_cleanup_timer();
1941491bbc64SMaxim Levitsky 	return true;
1942491bbc64SMaxim Levitsky 
1943da338a31SMaxim Levitsky }
1944da338a31SMaxim Levitsky 
reg_corruption_check(struct svm_test * test)1945da338a31SMaxim Levitsky static bool reg_corruption_check(struct svm_test *test)
1946da338a31SMaxim Levitsky {
1947da338a31SMaxim Levitsky 	return get_test_stage(test) == 1;
1948da338a31SMaxim Levitsky }
1949da338a31SMaxim Levitsky 
get_tss_entry(void * data)19504770e9c8SCathy Avery static void get_tss_entry(void *data)
19514770e9c8SCathy Avery {
1952a7f32d87SPaolo Bonzini 	*((gdt_entry_t **)data) = get_tss_descr();
19534770e9c8SCathy Avery }
19544770e9c8SCathy Avery 
19554770e9c8SCathy Avery static int orig_cpu_count;
19564770e9c8SCathy Avery 
init_startup_prepare(struct svm_test * test)19574770e9c8SCathy Avery static void init_startup_prepare(struct svm_test *test)
19584770e9c8SCathy Avery {
1959a7f32d87SPaolo Bonzini 	gdt_entry_t *tss_entry;
19604770e9c8SCathy Avery 	int i;
19614770e9c8SCathy Avery 
19624770e9c8SCathy Avery 	on_cpu(1, get_tss_entry, &tss_entry);
19634770e9c8SCathy Avery 
1964d36b378fSVarad Gautam 	orig_cpu_count = atomic_read(&cpu_online_count);
19654770e9c8SCathy Avery 
19664770e9c8SCathy Avery 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT,
19674770e9c8SCathy Avery 		       id_map[1]);
19684770e9c8SCathy Avery 
19694770e9c8SCathy Avery 	delay(100000000ULL);
19704770e9c8SCathy Avery 
1971d36b378fSVarad Gautam 	atomic_dec(&cpu_online_count);
19724770e9c8SCathy Avery 
1973a7f32d87SPaolo Bonzini 	tss_entry->type &= ~DESC_BUSY;
19744770e9c8SCathy Avery 
19754770e9c8SCathy Avery 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_STARTUP, id_map[1]);
19764770e9c8SCathy Avery 
1977d36b378fSVarad Gautam 	for (i = 0; i < 5 && atomic_read(&cpu_online_count) < orig_cpu_count; i++)
19784770e9c8SCathy Avery 		delay(100000000ULL);
19794770e9c8SCathy Avery }
19804770e9c8SCathy Avery 
init_startup_finished(struct svm_test * test)19814770e9c8SCathy Avery static bool init_startup_finished(struct svm_test *test)
19824770e9c8SCathy Avery {
19834770e9c8SCathy Avery 	return true;
19844770e9c8SCathy Avery }
19854770e9c8SCathy Avery 
init_startup_check(struct svm_test * test)19864770e9c8SCathy Avery static bool init_startup_check(struct svm_test *test)
19874770e9c8SCathy Avery {
1988d36b378fSVarad Gautam 	return atomic_read(&cpu_online_count) == orig_cpu_count;
19894770e9c8SCathy Avery }
19904770e9c8SCathy Avery 
1991d5da6dfeSCathy Avery static volatile bool init_intercept;
1992d5da6dfeSCathy Avery 
init_intercept_prepare(struct svm_test * test)1993d5da6dfeSCathy Avery static void init_intercept_prepare(struct svm_test *test)
1994d5da6dfeSCathy Avery {
1995d5da6dfeSCathy Avery 	init_intercept = false;
1996d5da6dfeSCathy Avery 	vmcb->control.intercept |= (1ULL << INTERCEPT_INIT);
1997d5da6dfeSCathy Avery }
1998d5da6dfeSCathy Avery 
init_intercept_test(struct svm_test * test)1999d5da6dfeSCathy Avery static void init_intercept_test(struct svm_test *test)
2000d5da6dfeSCathy Avery {
2001d5da6dfeSCathy Avery 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 0);
2002d5da6dfeSCathy Avery }
2003d5da6dfeSCathy Avery 
init_intercept_finished(struct svm_test * test)2004d5da6dfeSCathy Avery static bool init_intercept_finished(struct svm_test *test)
2005d5da6dfeSCathy Avery {
2006d5da6dfeSCathy Avery 	vmcb->save.rip += 3;
2007d5da6dfeSCathy Avery 
2008d5da6dfeSCathy Avery 	if (vmcb->control.exit_code != SVM_EXIT_INIT) {
2009198dfd0eSJanis Schoetterl-Glausch 		report_fail("VMEXIT not due to init intercept. Exit reason 0x%x",
2010d5da6dfeSCathy Avery 			    vmcb->control.exit_code);
2011d5da6dfeSCathy Avery 
2012d5da6dfeSCathy Avery 		return true;
2013d5da6dfeSCathy Avery 	}
2014d5da6dfeSCathy Avery 
2015d5da6dfeSCathy Avery 	init_intercept = true;
2016d5da6dfeSCathy Avery 
20175c3582f0SJanis Schoetterl-Glausch 	report_pass("INIT to vcpu intercepted");
2018d5da6dfeSCathy Avery 
2019d5da6dfeSCathy Avery 	return true;
2020d5da6dfeSCathy Avery }
2021d5da6dfeSCathy Avery 
init_intercept_check(struct svm_test * test)2022d5da6dfeSCathy Avery static bool init_intercept_check(struct svm_test *test)
2023d5da6dfeSCathy Avery {
2024d5da6dfeSCathy Avery 	return init_intercept;
2025d5da6dfeSCathy Avery }
2026d5da6dfeSCathy Avery 
20277839b0ecSKrish Sadhukhan /*
20287839b0ecSKrish Sadhukhan  * Setting host EFLAGS.TF causes a #DB trap after the VMRUN completes on the
20297839b0ecSKrish Sadhukhan  * host side (i.e., after the #VMEXIT from the guest).
20307839b0ecSKrish Sadhukhan  *
20310689a980SKrish Sadhukhan  * Setting host EFLAGS.RF suppresses any potential instruction breakpoint
20320689a980SKrish Sadhukhan  * match on the VMRUN and completion of the VMRUN instruction clears the
20330689a980SKrish Sadhukhan  * host EFLAGS.RF bit.
20340689a980SKrish Sadhukhan  *
20357839b0ecSKrish Sadhukhan  * [AMD APM]
20367839b0ecSKrish Sadhukhan  */
20377839b0ecSKrish Sadhukhan static volatile u8 host_rflags_guest_main_flag = 0;
20387839b0ecSKrish Sadhukhan static volatile u8 host_rflags_db_handler_flag = 0;
20397839b0ecSKrish Sadhukhan static volatile bool host_rflags_ss_on_vmrun = false;
20407839b0ecSKrish Sadhukhan static volatile bool host_rflags_vmrun_reached = false;
20417839b0ecSKrish Sadhukhan static volatile bool host_rflags_set_tf = false;
20420689a980SKrish Sadhukhan static volatile bool host_rflags_set_rf = false;
20430689a980SKrish Sadhukhan static u64 rip_detected;
20447839b0ecSKrish Sadhukhan 
20457839b0ecSKrish Sadhukhan extern u64 *vmrun_rip;
20467839b0ecSKrish Sadhukhan 
host_rflags_db_handler(struct ex_regs * r)20477839b0ecSKrish Sadhukhan static void host_rflags_db_handler(struct ex_regs *r)
20487839b0ecSKrish Sadhukhan {
20497839b0ecSKrish Sadhukhan 	if (host_rflags_ss_on_vmrun) {
20507839b0ecSKrish Sadhukhan 		if (host_rflags_vmrun_reached) {
20510689a980SKrish Sadhukhan 			if (!host_rflags_set_rf) {
20527839b0ecSKrish Sadhukhan 				r->rflags &= ~X86_EFLAGS_TF;
20530689a980SKrish Sadhukhan 				rip_detected = r->rip;
20547839b0ecSKrish Sadhukhan 			} else {
20550689a980SKrish Sadhukhan 				r->rflags |= X86_EFLAGS_RF;
20560689a980SKrish Sadhukhan 				++host_rflags_db_handler_flag;
20570689a980SKrish Sadhukhan 			}
20580689a980SKrish Sadhukhan 		} else {
20590689a980SKrish Sadhukhan 			if (r->rip == (u64)&vmrun_rip) {
20607839b0ecSKrish Sadhukhan 				host_rflags_vmrun_reached = true;
20610689a980SKrish Sadhukhan 
20620689a980SKrish Sadhukhan 				if (host_rflags_set_rf) {
20630689a980SKrish Sadhukhan 					host_rflags_guest_main_flag = 0;
20640689a980SKrish Sadhukhan 					rip_detected = r->rip;
20650689a980SKrish Sadhukhan 					r->rflags &= ~X86_EFLAGS_TF;
20660689a980SKrish Sadhukhan 
20670689a980SKrish Sadhukhan 					/* Trigger #DB via debug registers */
20680689a980SKrish Sadhukhan 					write_dr0((void *)&vmrun_rip);
20690689a980SKrish Sadhukhan 					write_dr7(0x403);
20700689a980SKrish Sadhukhan 				}
20710689a980SKrish Sadhukhan 			}
20727839b0ecSKrish Sadhukhan 		}
20737839b0ecSKrish Sadhukhan 	} else {
20747839b0ecSKrish Sadhukhan 		r->rflags &= ~X86_EFLAGS_TF;
20757839b0ecSKrish Sadhukhan 	}
20767839b0ecSKrish Sadhukhan }
20777839b0ecSKrish Sadhukhan 
host_rflags_prepare(struct svm_test * test)20787839b0ecSKrish Sadhukhan static void host_rflags_prepare(struct svm_test *test)
20797839b0ecSKrish Sadhukhan {
20807839b0ecSKrish Sadhukhan 	default_prepare(test);
20817839b0ecSKrish Sadhukhan 	handle_exception(DB_VECTOR, host_rflags_db_handler);
20827839b0ecSKrish Sadhukhan 	set_test_stage(test, 0);
20837839b0ecSKrish Sadhukhan }
20847839b0ecSKrish Sadhukhan 
host_rflags_prepare_gif_clear(struct svm_test * test)20857839b0ecSKrish Sadhukhan static void host_rflags_prepare_gif_clear(struct svm_test *test)
20867839b0ecSKrish Sadhukhan {
20877839b0ecSKrish Sadhukhan 	if (host_rflags_set_tf)
20887839b0ecSKrish Sadhukhan 		write_rflags(read_rflags() | X86_EFLAGS_TF);
20897839b0ecSKrish Sadhukhan }
20907839b0ecSKrish Sadhukhan 
host_rflags_test(struct svm_test * test)20917839b0ecSKrish Sadhukhan static void host_rflags_test(struct svm_test *test)
20927839b0ecSKrish Sadhukhan {
20937839b0ecSKrish Sadhukhan 	while (1) {
20940689a980SKrish Sadhukhan 		if (get_test_stage(test) > 0) {
20950689a980SKrish Sadhukhan 			if ((host_rflags_set_tf && !host_rflags_ss_on_vmrun && !host_rflags_db_handler_flag) ||
20960689a980SKrish Sadhukhan 			    (host_rflags_set_rf && host_rflags_db_handler_flag == 1))
20977839b0ecSKrish Sadhukhan 				host_rflags_guest_main_flag = 1;
20980689a980SKrish Sadhukhan 		}
20990689a980SKrish Sadhukhan 
21000689a980SKrish Sadhukhan 		if (get_test_stage(test) == 4)
21017839b0ecSKrish Sadhukhan 			break;
21027839b0ecSKrish Sadhukhan 		vmmcall();
21037839b0ecSKrish Sadhukhan 	}
21047839b0ecSKrish Sadhukhan }
21057839b0ecSKrish Sadhukhan 
host_rflags_finished(struct svm_test * test)21067839b0ecSKrish Sadhukhan static bool host_rflags_finished(struct svm_test *test)
21077839b0ecSKrish Sadhukhan {
21087839b0ecSKrish Sadhukhan 	switch (get_test_stage(test)) {
21097839b0ecSKrish Sadhukhan 	case 0:
21107839b0ecSKrish Sadhukhan 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
2111198dfd0eSJanis Schoetterl-Glausch 			report_fail("Unexpected VMEXIT. Exit reason 0x%x",
21127839b0ecSKrish Sadhukhan 				    vmcb->control.exit_code);
21137839b0ecSKrish Sadhukhan 			return true;
21147839b0ecSKrish Sadhukhan 		}
21157839b0ecSKrish Sadhukhan 		vmcb->save.rip += 3;
21167839b0ecSKrish Sadhukhan 		/*
21177839b0ecSKrish Sadhukhan 		 * Setting host EFLAGS.TF not immediately before VMRUN, causes
21187839b0ecSKrish Sadhukhan 		 * #DB trap before first guest instruction is executed
21197839b0ecSKrish Sadhukhan 		 */
21207839b0ecSKrish Sadhukhan 		host_rflags_set_tf = true;
21217839b0ecSKrish Sadhukhan 		break;
21227839b0ecSKrish Sadhukhan 	case 1:
21237839b0ecSKrish Sadhukhan 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL ||
21240689a980SKrish Sadhukhan 		    host_rflags_guest_main_flag != 1) {
2125198dfd0eSJanis Schoetterl-Glausch 			report_fail("Unexpected VMEXIT or #DB handler"
21267839b0ecSKrish Sadhukhan 				    " invoked before guest main. Exit reason 0x%x",
21277839b0ecSKrish Sadhukhan 				    vmcb->control.exit_code);
21287839b0ecSKrish Sadhukhan 			return true;
21297839b0ecSKrish Sadhukhan 		}
21307839b0ecSKrish Sadhukhan 		vmcb->save.rip += 3;
21317839b0ecSKrish Sadhukhan 		/*
21327839b0ecSKrish Sadhukhan 		 * Setting host EFLAGS.TF immediately before VMRUN, causes #DB
21337839b0ecSKrish Sadhukhan 		 * trap after VMRUN completes on the host side (i.e., after
21347839b0ecSKrish Sadhukhan 		 * VMEXIT from guest).
21357839b0ecSKrish Sadhukhan 		 */
21367839b0ecSKrish Sadhukhan 		host_rflags_ss_on_vmrun = true;
21377839b0ecSKrish Sadhukhan 		break;
21387839b0ecSKrish Sadhukhan 	case 2:
21397839b0ecSKrish Sadhukhan 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL ||
21400c22fd44SPaolo Bonzini 		    rip_detected != (u64)&vmrun_rip + 3) {
2141198dfd0eSJanis Schoetterl-Glausch 			report_fail("Unexpected VMEXIT or RIP mismatch."
21420689a980SKrish Sadhukhan 				    " Exit reason 0x%x, RIP actual: %lx, RIP expected: "
21430689a980SKrish Sadhukhan 				    "%lx", vmcb->control.exit_code,
21440c22fd44SPaolo Bonzini 				    (u64)&vmrun_rip + 3, rip_detected);
21450689a980SKrish Sadhukhan 			return true;
21460689a980SKrish Sadhukhan 		}
21470689a980SKrish Sadhukhan 		host_rflags_set_rf = true;
21480689a980SKrish Sadhukhan 		host_rflags_guest_main_flag = 0;
21490689a980SKrish Sadhukhan 		host_rflags_vmrun_reached = false;
21500689a980SKrish Sadhukhan 		vmcb->save.rip += 3;
21510689a980SKrish Sadhukhan 		break;
21520689a980SKrish Sadhukhan 	case 3:
21530689a980SKrish Sadhukhan 		if (vmcb->control.exit_code != SVM_EXIT_VMMCALL ||
21540689a980SKrish Sadhukhan 		    rip_detected != (u64)&vmrun_rip ||
21550689a980SKrish Sadhukhan 		    host_rflags_guest_main_flag != 1 ||
21560689a980SKrish Sadhukhan 		    host_rflags_db_handler_flag > 1 ||
21570689a980SKrish Sadhukhan 		    read_rflags() & X86_EFLAGS_RF) {
2158198dfd0eSJanis Schoetterl-Glausch 			report_fail("Unexpected VMEXIT or RIP mismatch or "
21590689a980SKrish Sadhukhan 				    "EFLAGS.RF not cleared."
21600689a980SKrish Sadhukhan 				    " Exit reason 0x%x, RIP actual: %lx, RIP expected: "
21610689a980SKrish Sadhukhan 				    "%lx", vmcb->control.exit_code,
21620689a980SKrish Sadhukhan 				    (u64)&vmrun_rip, rip_detected);
21637839b0ecSKrish Sadhukhan 			return true;
21647839b0ecSKrish Sadhukhan 		}
21657839b0ecSKrish Sadhukhan 		host_rflags_set_tf = false;
21660689a980SKrish Sadhukhan 		host_rflags_set_rf = false;
21677839b0ecSKrish Sadhukhan 		vmcb->save.rip += 3;
21687839b0ecSKrish Sadhukhan 		break;
21697839b0ecSKrish Sadhukhan 	default:
21707839b0ecSKrish Sadhukhan 		return true;
21717839b0ecSKrish Sadhukhan 	}
21727839b0ecSKrish Sadhukhan 	inc_test_stage(test);
21730689a980SKrish Sadhukhan 	return get_test_stage(test) == 5;
21747839b0ecSKrish Sadhukhan }
21757839b0ecSKrish Sadhukhan 
host_rflags_check(struct svm_test * test)21767839b0ecSKrish Sadhukhan static bool host_rflags_check(struct svm_test *test)
21777839b0ecSKrish Sadhukhan {
21780689a980SKrish Sadhukhan 	return get_test_stage(test) == 4;
21797839b0ecSKrish Sadhukhan }
21807839b0ecSKrish Sadhukhan 
21818660d1b5SKrish Sadhukhan #define TEST(name) { #name, .v2 = name }
21828660d1b5SKrish Sadhukhan 
2183ba29942cSKrish Sadhukhan /*
2184ba29942cSKrish Sadhukhan  * v2 tests
2185ba29942cSKrish Sadhukhan  */
2186ba29942cSKrish Sadhukhan 
2187f32183f5SJim Mattson /*
2188f32183f5SJim Mattson  * Ensure that kvm recalculates the L1 guest's CPUID.01H:ECX.OSXSAVE
2189f32183f5SJim Mattson  * after VM-exit from an L2 guest that sets CR4.OSXSAVE to a different
2190f32183f5SJim Mattson  * value than in L1.
2191f32183f5SJim Mattson  */
2192f32183f5SJim Mattson 
svm_cr4_osxsave_test_guest(struct svm_test * test)2193f32183f5SJim Mattson static void svm_cr4_osxsave_test_guest(struct svm_test *test)
2194f32183f5SJim Mattson {
2195f32183f5SJim Mattson 	write_cr4(read_cr4() & ~X86_CR4_OSXSAVE);
2196f32183f5SJim Mattson }
2197f32183f5SJim Mattson 
svm_cr4_osxsave_test(void)2198f32183f5SJim Mattson static void svm_cr4_osxsave_test(void)
2199f32183f5SJim Mattson {
2200f32183f5SJim Mattson 	if (!this_cpu_has(X86_FEATURE_XSAVE)) {
2201f32183f5SJim Mattson 		report_skip("XSAVE not detected");
2202f32183f5SJim Mattson 		return;
2203f32183f5SJim Mattson 	}
2204f32183f5SJim Mattson 
2205f32183f5SJim Mattson 	if (!(read_cr4() & X86_CR4_OSXSAVE)) {
2206f32183f5SJim Mattson 		unsigned long cr4 = read_cr4() | X86_CR4_OSXSAVE;
2207f32183f5SJim Mattson 
2208f32183f5SJim Mattson 		write_cr4(cr4);
2209f32183f5SJim Mattson 		vmcb->save.cr4 = cr4;
2210f32183f5SJim Mattson 	}
2211f32183f5SJim Mattson 
2212816c0359SSean Christopherson 	report(this_cpu_has(X86_FEATURE_OSXSAVE), "CPUID.01H:ECX.XSAVE set before VMRUN");
2213f32183f5SJim Mattson 
2214f32183f5SJim Mattson 	test_set_guest(svm_cr4_osxsave_test_guest);
2215f32183f5SJim Mattson 	report(svm_vmrun() == SVM_EXIT_VMMCALL,
2216f32183f5SJim Mattson 	       "svm_cr4_osxsave_test_guest finished with VMMCALL");
2217f32183f5SJim Mattson 
2218816c0359SSean Christopherson 	report(this_cpu_has(X86_FEATURE_OSXSAVE), "CPUID.01H:ECX.XSAVE set after VMRUN");
2219f32183f5SJim Mattson }
2220f32183f5SJim Mattson 
basic_guest_main(struct svm_test * test)2221ba29942cSKrish Sadhukhan static void basic_guest_main(struct svm_test *test)
2222ba29942cSKrish Sadhukhan {
2223ba29942cSKrish Sadhukhan }
2224ba29942cSKrish Sadhukhan 
2225eae10e8fSKrish Sadhukhan 
2226eae10e8fSKrish Sadhukhan #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val,	\
2227eae10e8fSKrish Sadhukhan 				   resv_mask)				\
2228eae10e8fSKrish Sadhukhan {									\
2229eae10e8fSKrish Sadhukhan 	u64 tmp, mask;							\
2230eae10e8fSKrish Sadhukhan 	int i;								\
2231eae10e8fSKrish Sadhukhan 									\
2232eae10e8fSKrish Sadhukhan 	for (i = start; i <= end; i = i + inc) {			\
2233eae10e8fSKrish Sadhukhan 		mask = 1ull << i;					\
2234eae10e8fSKrish Sadhukhan 		if (!(mask & resv_mask))				\
2235eae10e8fSKrish Sadhukhan 			continue;					\
2236eae10e8fSKrish Sadhukhan 		tmp = val | mask;					\
2237eae10e8fSKrish Sadhukhan 		reg = tmp;						\
2238eae10e8fSKrish Sadhukhan 		report(svm_vmrun() == SVM_EXIT_ERR, "Test %s %d:%d: %lx", \
2239eae10e8fSKrish Sadhukhan 		       str_name, end, start, tmp);			\
2240eae10e8fSKrish Sadhukhan 	}								\
2241eae10e8fSKrish Sadhukhan }
2242eae10e8fSKrish Sadhukhan 
22436d0ecbf6SKrish Sadhukhan #define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask,	\
2244cb6524f3SPaolo Bonzini 				  exit_code, test_name)			\
2245a79c9495SKrish Sadhukhan {									\
2246a79c9495SKrish Sadhukhan 	u64 tmp, mask;							\
22478ae6d77fSSean Christopherson 	u32 r;								\
2248a79c9495SKrish Sadhukhan 	int i;								\
2249a79c9495SKrish Sadhukhan 									\
2250a79c9495SKrish Sadhukhan 	for (i = start; i <= end; i = i + inc) {			\
2251a79c9495SKrish Sadhukhan 		mask = 1ull << i;					\
2252a79c9495SKrish Sadhukhan 		if (!(mask & resv_mask))				\
2253a79c9495SKrish Sadhukhan 			continue;					\
2254a79c9495SKrish Sadhukhan 		tmp = val | mask;					\
2255a79c9495SKrish Sadhukhan 		switch (cr) {						\
2256a79c9495SKrish Sadhukhan 		case 0:							\
2257a79c9495SKrish Sadhukhan 			vmcb->save.cr0 = tmp;				\
2258a79c9495SKrish Sadhukhan 			break;						\
2259a79c9495SKrish Sadhukhan 		case 3:							\
2260a79c9495SKrish Sadhukhan 			vmcb->save.cr3 = tmp;				\
2261a79c9495SKrish Sadhukhan 			break;						\
2262a79c9495SKrish Sadhukhan 		case 4:							\
2263a79c9495SKrish Sadhukhan 			vmcb->save.cr4 = tmp;				\
2264a79c9495SKrish Sadhukhan 		}							\
22658ae6d77fSSean Christopherson 		r = svm_vmrun();					\
22668ae6d77fSSean Christopherson 		report(r == exit_code, "Test CR%d %s%d:%d: %lx, wanted exit 0x%x, got 0x%x", \
22678ae6d77fSSean Christopherson 		       cr, test_name, end, start, tmp, exit_code, r);	\
2268a79c9495SKrish Sadhukhan 	}								\
2269a79c9495SKrish Sadhukhan }
2270e8d7a8f6SKrish Sadhukhan 
test_efer(void)2271a79c9495SKrish Sadhukhan static void test_efer(void)
2272a79c9495SKrish Sadhukhan {
2273e8d7a8f6SKrish Sadhukhan 	/*
2274e8d7a8f6SKrish Sadhukhan 	 * Un-setting EFER.SVME is illegal
2275e8d7a8f6SKrish Sadhukhan 	 */
2276ba29942cSKrish Sadhukhan 	u64 efer_saved = vmcb->save.efer;
2277ba29942cSKrish Sadhukhan 	u64 efer = efer_saved;
2278ba29942cSKrish Sadhukhan 
2279ba29942cSKrish Sadhukhan 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer);
2280ba29942cSKrish Sadhukhan 	efer &= ~EFER_SVME;
2281ba29942cSKrish Sadhukhan 	vmcb->save.efer = efer;
2282ba29942cSKrish Sadhukhan 	report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer);
2283ba29942cSKrish Sadhukhan 	vmcb->save.efer = efer_saved;
2284e8d7a8f6SKrish Sadhukhan 
2285e8d7a8f6SKrish Sadhukhan 	/*
2286a79c9495SKrish Sadhukhan 	 * EFER MBZ bits: 63:16, 9
2287a79c9495SKrish Sadhukhan 	 */
2288a79c9495SKrish Sadhukhan 	efer_saved = vmcb->save.efer;
2289a79c9495SKrish Sadhukhan 
2290a79c9495SKrish Sadhukhan 	SVM_TEST_REG_RESERVED_BITS(8, 9, 1, "EFER", vmcb->save.efer,
2291a79c9495SKrish Sadhukhan 				   efer_saved, SVM_EFER_RESERVED_MASK);
2292a79c9495SKrish Sadhukhan 	SVM_TEST_REG_RESERVED_BITS(16, 63, 4, "EFER", vmcb->save.efer,
2293a79c9495SKrish Sadhukhan 				   efer_saved, SVM_EFER_RESERVED_MASK);
2294a79c9495SKrish Sadhukhan 
22951d7bde08SKrish Sadhukhan 	/*
22961d7bde08SKrish Sadhukhan 	 * EFER.LME and CR0.PG are both set and CR4.PAE is zero.
22971d7bde08SKrish Sadhukhan 	 */
22981d7bde08SKrish Sadhukhan 	u64 cr0_saved = vmcb->save.cr0;
22991d7bde08SKrish Sadhukhan 	u64 cr0;
23001d7bde08SKrish Sadhukhan 	u64 cr4_saved = vmcb->save.cr4;
23011d7bde08SKrish Sadhukhan 	u64 cr4;
23021d7bde08SKrish Sadhukhan 
23031d7bde08SKrish Sadhukhan 	efer = efer_saved | EFER_LME;
23041d7bde08SKrish Sadhukhan 	vmcb->save.efer = efer;
23051d7bde08SKrish Sadhukhan 	cr0 = cr0_saved | X86_CR0_PG | X86_CR0_PE;
23061d7bde08SKrish Sadhukhan 	vmcb->save.cr0 = cr0;
23071d7bde08SKrish Sadhukhan 	cr4 = cr4_saved & ~X86_CR4_PAE;
23081d7bde08SKrish Sadhukhan 	vmcb->save.cr4 = cr4;
23091d7bde08SKrish Sadhukhan 	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
23101d7bde08SKrish Sadhukhan 	       "CR0.PG=1 (%lx) and CR4.PAE=0 (%lx)", efer, cr0, cr4);
23111d7bde08SKrish Sadhukhan 
23121d7bde08SKrish Sadhukhan 	/*
23131d7bde08SKrish Sadhukhan 	 * EFER.LME and CR0.PG are both set and CR0.PE is zero.
2314fc050452SLara Lazier 	 * CR4.PAE needs to be set as we otherwise cannot
2315fc050452SLara Lazier 	 * determine if CR4.PAE=0 or CR0.PE=0 triggered the
2316fc050452SLara Lazier 	 * SVM_EXIT_ERR.
23171d7bde08SKrish Sadhukhan 	 */
2318fc050452SLara Lazier 	cr4 = cr4_saved | X86_CR4_PAE;
2319fc050452SLara Lazier 	vmcb->save.cr4 = cr4;
23201d7bde08SKrish Sadhukhan 	cr0 &= ~X86_CR0_PE;
23211d7bde08SKrish Sadhukhan 	vmcb->save.cr0 = cr0;
23221d7bde08SKrish Sadhukhan 	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
23231d7bde08SKrish Sadhukhan 	       "CR0.PG=1 and CR0.PE=0 (%lx)", efer, cr0);
23241d7bde08SKrish Sadhukhan 
23251d7bde08SKrish Sadhukhan 	/*
23261d7bde08SKrish Sadhukhan 	 * EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero.
23271d7bde08SKrish Sadhukhan 	 */
23281d7bde08SKrish Sadhukhan 	u32 cs_attrib_saved = vmcb->save.cs.attrib;
23291d7bde08SKrish Sadhukhan 	u32 cs_attrib;
23301d7bde08SKrish Sadhukhan 
23311d7bde08SKrish Sadhukhan 	cr0 |= X86_CR0_PE;
23321d7bde08SKrish Sadhukhan 	vmcb->save.cr0 = cr0;
23331d7bde08SKrish Sadhukhan 	cs_attrib = cs_attrib_saved | SVM_SELECTOR_L_MASK |
23341d7bde08SKrish Sadhukhan 		SVM_SELECTOR_DB_MASK;
23351d7bde08SKrish Sadhukhan 	vmcb->save.cs.attrib = cs_attrib;
23361d7bde08SKrish Sadhukhan 	report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), "
23371d7bde08SKrish Sadhukhan 	       "CR0.PG=1 (%lx), CR4.PAE=1 (%lx), CS.L=1 and CS.D=1 (%x)",
23381d7bde08SKrish Sadhukhan 	       efer, cr0, cr4, cs_attrib);
23391d7bde08SKrish Sadhukhan 
23401d7bde08SKrish Sadhukhan 	vmcb->save.cr0 = cr0_saved;
23411d7bde08SKrish Sadhukhan 	vmcb->save.cr4 = cr4_saved;
2342a79c9495SKrish Sadhukhan 	vmcb->save.efer = efer_saved;
23431d7bde08SKrish Sadhukhan 	vmcb->save.cs.attrib = cs_attrib_saved;
2344a79c9495SKrish Sadhukhan }
2345a79c9495SKrish Sadhukhan 
test_cr0(void)2346a79c9495SKrish Sadhukhan static void test_cr0(void)
2347a79c9495SKrish Sadhukhan {
2348a79c9495SKrish Sadhukhan 	/*
2349e8d7a8f6SKrish Sadhukhan 	 * Un-setting CR0.CD and setting CR0.NW is illegal combination
2350e8d7a8f6SKrish Sadhukhan 	 */
2351e8d7a8f6SKrish Sadhukhan 	u64 cr0_saved = vmcb->save.cr0;
2352e8d7a8f6SKrish Sadhukhan 	u64 cr0 = cr0_saved;
2353e8d7a8f6SKrish Sadhukhan 
2354e8d7a8f6SKrish Sadhukhan 	cr0 |= X86_CR0_CD;
2355e8d7a8f6SKrish Sadhukhan 	cr0 &= ~X86_CR0_NW;
2356e8d7a8f6SKrish Sadhukhan 	vmcb->save.cr0 = cr0;
2357a79c9495SKrish Sadhukhan 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx",
2358a79c9495SKrish Sadhukhan 		cr0);
2359e8d7a8f6SKrish Sadhukhan 	cr0 |= X86_CR0_NW;
2360e8d7a8f6SKrish Sadhukhan 	vmcb->save.cr0 = cr0;
2361a79c9495SKrish Sadhukhan 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx",
2362a79c9495SKrish Sadhukhan 		cr0);
2363e8d7a8f6SKrish Sadhukhan 	cr0 &= ~X86_CR0_NW;
2364e8d7a8f6SKrish Sadhukhan 	cr0 &= ~X86_CR0_CD;
2365e8d7a8f6SKrish Sadhukhan 	vmcb->save.cr0 = cr0;
2366a79c9495SKrish Sadhukhan 	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx",
2367a79c9495SKrish Sadhukhan 		cr0);
2368e8d7a8f6SKrish Sadhukhan 	cr0 |= X86_CR0_NW;
2369e8d7a8f6SKrish Sadhukhan 	vmcb->save.cr0 = cr0;
2370a79c9495SKrish Sadhukhan 	report (svm_vmrun() == SVM_EXIT_ERR, "Test CR0 CD=0,NW=1: %lx",
2371a79c9495SKrish Sadhukhan 		cr0);
2372e8d7a8f6SKrish Sadhukhan 	vmcb->save.cr0 = cr0_saved;
23735c052c90SKrish Sadhukhan 
23745c052c90SKrish Sadhukhan 	/*
23755c052c90SKrish Sadhukhan 	 * CR0[63:32] are not zero
23765c052c90SKrish Sadhukhan 	 */
23775c052c90SKrish Sadhukhan 	cr0 = cr0_saved;
2378eae10e8fSKrish Sadhukhan 
2379eae10e8fSKrish Sadhukhan 	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "CR0", vmcb->save.cr0, cr0_saved,
2380eae10e8fSKrish Sadhukhan 				   SVM_CR0_RESERVED_MASK);
23815c052c90SKrish Sadhukhan 	vmcb->save.cr0 = cr0_saved;
2382a79c9495SKrish Sadhukhan }
2383eae10e8fSKrish Sadhukhan 
test_cr3(void)2384a79c9495SKrish Sadhukhan static void test_cr3(void)
2385a79c9495SKrish Sadhukhan {
2386a79c9495SKrish Sadhukhan 	/*
2387a79c9495SKrish Sadhukhan 	 * CR3 MBZ bits based on different modes:
238829a01803SNadav Amit 	 *   [63:52] - long mode
2389a79c9495SKrish Sadhukhan 	 */
2390a79c9495SKrish Sadhukhan 	u64 cr3_saved = vmcb->save.cr3;
2391a79c9495SKrish Sadhukhan 
2392a79c9495SKrish Sadhukhan 	SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved,
2393cb6524f3SPaolo Bonzini 				  SVM_CR3_LONG_MBZ_MASK, SVM_EXIT_ERR, "");
23946d0ecbf6SKrish Sadhukhan 
23956d0ecbf6SKrish Sadhukhan 	vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_MBZ_MASK;
23966d0ecbf6SKrish Sadhukhan 	report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
23976d0ecbf6SKrish Sadhukhan 	       vmcb->save.cr3);
23986d0ecbf6SKrish Sadhukhan 
23996d0ecbf6SKrish Sadhukhan 	/*
24006d0ecbf6SKrish Sadhukhan 	 * CR3 non-MBZ reserved bits based on different modes:
2401cb6524f3SPaolo Bonzini 	 *   [11:5] [2:0] - long mode (PCIDE=0)
24026d0ecbf6SKrish Sadhukhan 	 *          [2:0] - PAE legacy mode
24036d0ecbf6SKrish Sadhukhan 	 */
24046d0ecbf6SKrish Sadhukhan 	u64 cr4_saved = vmcb->save.cr4;
24056d0ecbf6SKrish Sadhukhan 	u64 *pdpe = npt_get_pml4e();
24066d0ecbf6SKrish Sadhukhan 
24076d0ecbf6SKrish Sadhukhan 	/*
24086d0ecbf6SKrish Sadhukhan 	 * Long mode
24096d0ecbf6SKrish Sadhukhan 	 */
24106d0ecbf6SKrish Sadhukhan 	if (this_cpu_has(X86_FEATURE_PCID)) {
24116d0ecbf6SKrish Sadhukhan 		vmcb->save.cr4 = cr4_saved | X86_CR4_PCIDE;
24126d0ecbf6SKrish Sadhukhan 		SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
2413cb6524f3SPaolo Bonzini 					  SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_VMMCALL, "(PCIDE=1) ");
24146d0ecbf6SKrish Sadhukhan 
24156d0ecbf6SKrish Sadhukhan 		vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK;
24166d0ecbf6SKrish Sadhukhan 		report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
24176d0ecbf6SKrish Sadhukhan 		       vmcb->save.cr3);
2418cb6524f3SPaolo Bonzini 	}
24196d0ecbf6SKrish Sadhukhan 
24206d0ecbf6SKrish Sadhukhan 	vmcb->save.cr4 = cr4_saved & ~X86_CR4_PCIDE;
24216d0ecbf6SKrish Sadhukhan 
2422993749ffSSean Christopherson 	if (!npt_supported())
2423993749ffSSean Christopherson 		goto skip_npt_only;
2424993749ffSSean Christopherson 
24256d0ecbf6SKrish Sadhukhan 	/* Clear P (Present) bit in NPT in order to trigger #NPF */
24266d0ecbf6SKrish Sadhukhan 	pdpe[0] &= ~1ULL;
24276d0ecbf6SKrish Sadhukhan 
24286d0ecbf6SKrish Sadhukhan 	SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
2429cb6524f3SPaolo Bonzini 				  SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_NPF, "(PCIDE=0) ");
24306d0ecbf6SKrish Sadhukhan 
24316d0ecbf6SKrish Sadhukhan 	pdpe[0] |= 1ULL;
2432cb6524f3SPaolo Bonzini 	vmcb->save.cr3 = cr3_saved;
24336d0ecbf6SKrish Sadhukhan 
24346d0ecbf6SKrish Sadhukhan 	/*
24356d0ecbf6SKrish Sadhukhan 	 * PAE legacy
24366d0ecbf6SKrish Sadhukhan 	 */
24376d0ecbf6SKrish Sadhukhan 	pdpe[0] &= ~1ULL;
24386d0ecbf6SKrish Sadhukhan 	vmcb->save.cr4 = cr4_saved | X86_CR4_PAE;
24396d0ecbf6SKrish Sadhukhan 	SVM_TEST_CR_RESERVED_BITS(0, 2, 1, 3, cr3_saved,
2440cb6524f3SPaolo Bonzini 				  SVM_CR3_PAE_LEGACY_RESERVED_MASK, SVM_EXIT_NPF, "(PAE) ");
24416d0ecbf6SKrish Sadhukhan 
24426d0ecbf6SKrish Sadhukhan 	pdpe[0] |= 1ULL;
2443993749ffSSean Christopherson 
2444993749ffSSean Christopherson skip_npt_only:
2445a79c9495SKrish Sadhukhan 	vmcb->save.cr3 = cr3_saved;
24466d0ecbf6SKrish Sadhukhan 	vmcb->save.cr4 = cr4_saved;
2447a79c9495SKrish Sadhukhan }
2448a79c9495SKrish Sadhukhan 
2449d30973c3SWei Huang /* Test CR4 MBZ bits based on legacy or long modes */
test_cr4(void)2450a79c9495SKrish Sadhukhan static void test_cr4(void)
2451a79c9495SKrish Sadhukhan {
2452a79c9495SKrish Sadhukhan 	u64 cr4_saved = vmcb->save.cr4;
2453a79c9495SKrish Sadhukhan 	u64 efer_saved = vmcb->save.efer;
2454a79c9495SKrish Sadhukhan 	u64 efer = efer_saved;
2455a79c9495SKrish Sadhukhan 
2456a79c9495SKrish Sadhukhan 	efer &= ~EFER_LME;
2457a79c9495SKrish Sadhukhan 	vmcb->save.efer = efer;
2458a79c9495SKrish Sadhukhan 	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
2459cb6524f3SPaolo Bonzini 				  SVM_CR4_LEGACY_RESERVED_MASK, SVM_EXIT_ERR, "");
2460a79c9495SKrish Sadhukhan 
2461a79c9495SKrish Sadhukhan 	efer |= EFER_LME;
2462a79c9495SKrish Sadhukhan 	vmcb->save.efer = efer;
2463a79c9495SKrish Sadhukhan 	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
2464cb6524f3SPaolo Bonzini 				  SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, "");
2465a79c9495SKrish Sadhukhan 	SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved,
2466cb6524f3SPaolo Bonzini 				  SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, "");
2467a79c9495SKrish Sadhukhan 
2468a79c9495SKrish Sadhukhan 	vmcb->save.cr4 = cr4_saved;
2469a79c9495SKrish Sadhukhan 	vmcb->save.efer = efer_saved;
2470a79c9495SKrish Sadhukhan }
2471a79c9495SKrish Sadhukhan 
test_dr(void)2472a79c9495SKrish Sadhukhan static void test_dr(void)
2473a79c9495SKrish Sadhukhan {
2474eae10e8fSKrish Sadhukhan 	/*
2475eae10e8fSKrish Sadhukhan 	 * DR6[63:32] and DR7[63:32] are MBZ
2476eae10e8fSKrish Sadhukhan 	 */
2477eae10e8fSKrish Sadhukhan 	u64 dr_saved = vmcb->save.dr6;
2478eae10e8fSKrish Sadhukhan 
2479eae10e8fSKrish Sadhukhan 	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR6", vmcb->save.dr6, dr_saved,
2480eae10e8fSKrish Sadhukhan 				   SVM_DR6_RESERVED_MASK);
2481eae10e8fSKrish Sadhukhan 	vmcb->save.dr6 = dr_saved;
2482eae10e8fSKrish Sadhukhan 
2483eae10e8fSKrish Sadhukhan 	dr_saved = vmcb->save.dr7;
2484eae10e8fSKrish Sadhukhan 	SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR7", vmcb->save.dr7, dr_saved,
2485eae10e8fSKrish Sadhukhan 				   SVM_DR7_RESERVED_MASK);
2486eae10e8fSKrish Sadhukhan 
2487eae10e8fSKrish Sadhukhan 	vmcb->save.dr7 = dr_saved;
2488a79c9495SKrish Sadhukhan }
2489eae10e8fSKrish Sadhukhan 
2490abe82380SKrish Sadhukhan /* TODO: verify if high 32-bits are sign- or zero-extended on bare metal */
2491abe82380SKrish Sadhukhan #define	TEST_BITMAP_ADDR(save_intercept, type, addr, exit_code,		\
2492abe82380SKrish Sadhukhan 			 msg) {						\
2493abe82380SKrish Sadhukhan 		vmcb->control.intercept = saved_intercept | 1ULL << type; \
2494abe82380SKrish Sadhukhan 		if (type == INTERCEPT_MSR_PROT)				\
2495abe82380SKrish Sadhukhan 			vmcb->control.msrpm_base_pa = addr;		\
2496abe82380SKrish Sadhukhan 		else							\
2497abe82380SKrish Sadhukhan 			vmcb->control.iopm_base_pa = addr;		\
2498abe82380SKrish Sadhukhan 		report(svm_vmrun() == exit_code,			\
2499abe82380SKrish Sadhukhan 		       "Test %s address: %lx", msg, addr);		\
2500abe82380SKrish Sadhukhan 	}
2501abe82380SKrish Sadhukhan 
2502abe82380SKrish Sadhukhan /*
2503abe82380SKrish Sadhukhan  * If the MSR or IOIO intercept table extends to a physical address that
2504abe82380SKrish Sadhukhan  * is greater than or equal to the maximum supported physical address, the
2505abe82380SKrish Sadhukhan  * guest state is illegal.
2506abe82380SKrish Sadhukhan  *
2507abe82380SKrish Sadhukhan  * The VMRUN instruction ignores the lower 12 bits of the address specified
2508abe82380SKrish Sadhukhan  * in the VMCB.
2509abe82380SKrish Sadhukhan  *
2510abe82380SKrish Sadhukhan  * MSRPM spans 2 contiguous 4KB pages while IOPM spans 2 contiguous 4KB
2511abe82380SKrish Sadhukhan  * pages + 1 byte.
2512abe82380SKrish Sadhukhan  *
2513abe82380SKrish Sadhukhan  * [APM vol 2]
2514abe82380SKrish Sadhukhan  *
2515abe82380SKrish Sadhukhan  * Note: Unallocated MSRPM addresses conforming to consistency checks, generate
2516abe82380SKrish Sadhukhan  * #NPF.
2517abe82380SKrish Sadhukhan  */
test_msrpm_iopm_bitmap_addrs(void)2518abe82380SKrish Sadhukhan static void test_msrpm_iopm_bitmap_addrs(void)
2519abe82380SKrish Sadhukhan {
2520abe82380SKrish Sadhukhan 	u64 saved_intercept = vmcb->control.intercept;
2521abe82380SKrish Sadhukhan 	u64 addr_beyond_limit = 1ull << cpuid_maxphyaddr();
2522abe82380SKrish Sadhukhan 	u64 addr = virt_to_phys(msr_bitmap) & (~((1ull << 12) - 1));
2523abe82380SKrish Sadhukhan 
2524abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2525abe82380SKrish Sadhukhan 			 addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR,
2526abe82380SKrish Sadhukhan 			 "MSRPM");
2527abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2528abe82380SKrish Sadhukhan 			 addr_beyond_limit - 2 * PAGE_SIZE + 1, SVM_EXIT_ERR,
2529abe82380SKrish Sadhukhan 			 "MSRPM");
2530abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT,
2531abe82380SKrish Sadhukhan 			 addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR,
2532abe82380SKrish Sadhukhan 			 "MSRPM");
2533abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr,
2534abe82380SKrish Sadhukhan 			 SVM_EXIT_VMMCALL, "MSRPM");
2535abe82380SKrish Sadhukhan 	addr |= (1ull << 12) - 1;
2536abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr,
2537abe82380SKrish Sadhukhan 			 SVM_EXIT_VMMCALL, "MSRPM");
2538abe82380SKrish Sadhukhan 
2539abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2540abe82380SKrish Sadhukhan 			 addr_beyond_limit - 4 * PAGE_SIZE, SVM_EXIT_VMMCALL,
2541abe82380SKrish Sadhukhan 			 "IOPM");
2542abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2543abe82380SKrish Sadhukhan 			 addr_beyond_limit - 3 * PAGE_SIZE, SVM_EXIT_VMMCALL,
2544abe82380SKrish Sadhukhan 			 "IOPM");
2545abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2546abe82380SKrish Sadhukhan 			 addr_beyond_limit - 2 * PAGE_SIZE - 2, SVM_EXIT_VMMCALL,
2547abe82380SKrish Sadhukhan 			 "IOPM");
2548abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2549abe82380SKrish Sadhukhan 			 addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR,
2550abe82380SKrish Sadhukhan 			 "IOPM");
2551abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT,
2552abe82380SKrish Sadhukhan 			 addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR,
2553abe82380SKrish Sadhukhan 			 "IOPM");
2554abe82380SKrish Sadhukhan 	addr = virt_to_phys(io_bitmap) & (~((1ull << 11) - 1));
2555abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr,
2556abe82380SKrish Sadhukhan 			 SVM_EXIT_VMMCALL, "IOPM");
2557abe82380SKrish Sadhukhan 	addr |= (1ull << 12) - 1;
2558abe82380SKrish Sadhukhan 	TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr,
2559abe82380SKrish Sadhukhan 			 SVM_EXIT_VMMCALL, "IOPM");
2560abe82380SKrish Sadhukhan 
2561abe82380SKrish Sadhukhan 	vmcb->control.intercept = saved_intercept;
2562abe82380SKrish Sadhukhan }
2563abe82380SKrish Sadhukhan 
2564ba3c9773SLara Lazier /*
2565ba3c9773SLara Lazier  * Unlike VMSAVE, VMRUN seems not to update the value of noncanonical
2566ba3c9773SLara Lazier  * segment bases in the VMCB.  However, VMENTRY succeeds as documented.
2567ba3c9773SLara Lazier  */
2568ba3c9773SLara Lazier #define TEST_CANONICAL_VMRUN(seg_base, msg)				\
2569a99070ebSKrish Sadhukhan 	saved_addr = seg_base;						\
2570a99070ebSKrish Sadhukhan 	seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \
2571ba3c9773SLara Lazier 	return_value = svm_vmrun();					\
2572ba3c9773SLara Lazier 	report(return_value == SVM_EXIT_VMMCALL,			\
2573ba3c9773SLara Lazier 	       "Successful VMRUN with noncanonical %s.base", msg);	\
2574a99070ebSKrish Sadhukhan 	seg_base = saved_addr;
2575a99070ebSKrish Sadhukhan 
2576ba3c9773SLara Lazier 
2577ba3c9773SLara Lazier #define TEST_CANONICAL_VMLOAD(seg_base, msg)				\
2578ba3c9773SLara Lazier 	saved_addr = seg_base;						\
2579ba3c9773SLara Lazier 	seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \
2580ba3c9773SLara Lazier 	asm volatile ("vmload %0" : : "a"(vmcb_phys) : "memory");	\
2581ba3c9773SLara Lazier 	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");	\
2582ba3c9773SLara Lazier 	report(is_canonical(seg_base),					\
2583ba3c9773SLara Lazier 	       "Test %s.base for canonical form: %lx", msg, seg_base);	\
2584ba3c9773SLara Lazier 	seg_base = saved_addr;
2585ba3c9773SLara Lazier 
test_canonicalization(void)2586ba3c9773SLara Lazier static void test_canonicalization(void)
2587a99070ebSKrish Sadhukhan {
2588a99070ebSKrish Sadhukhan 	u64 saved_addr;
2589ba3c9773SLara Lazier 	u64 return_value;
2590ba3c9773SLara Lazier 	u64 addr_limit;
2591ba3c9773SLara Lazier 	u64 vmcb_phys = virt_to_phys(vmcb);
2592ba3c9773SLara Lazier 
2593ba3c9773SLara Lazier 	addr_limit = (this_cpu_has(X86_FEATURE_LA57)) ? 57 : 48;
2594a99070ebSKrish Sadhukhan 	u64 noncanonical_mask = NONCANONICAL & ~((1ul << addr_limit) - 1);
2595a99070ebSKrish Sadhukhan 
2596ba3c9773SLara Lazier 	TEST_CANONICAL_VMLOAD(vmcb->save.fs.base, "FS");
2597ba3c9773SLara Lazier 	TEST_CANONICAL_VMLOAD(vmcb->save.gs.base, "GS");
2598ba3c9773SLara Lazier 	TEST_CANONICAL_VMLOAD(vmcb->save.ldtr.base, "LDTR");
2599ba3c9773SLara Lazier 	TEST_CANONICAL_VMLOAD(vmcb->save.tr.base, "TR");
2600ba3c9773SLara Lazier 	TEST_CANONICAL_VMLOAD(vmcb->save.kernel_gs_base, "KERNEL GS");
2601ba3c9773SLara Lazier 	TEST_CANONICAL_VMRUN(vmcb->save.es.base, "ES");
2602ba3c9773SLara Lazier 	TEST_CANONICAL_VMRUN(vmcb->save.cs.base, "CS");
2603ba3c9773SLara Lazier 	TEST_CANONICAL_VMRUN(vmcb->save.ss.base, "SS");
2604ba3c9773SLara Lazier 	TEST_CANONICAL_VMRUN(vmcb->save.ds.base, "DS");
2605ba3c9773SLara Lazier 	TEST_CANONICAL_VMRUN(vmcb->save.gdtr.base, "GDTR");
2606ba3c9773SLara Lazier 	TEST_CANONICAL_VMRUN(vmcb->save.idtr.base, "IDTR");
2607a99070ebSKrish Sadhukhan }
2608a99070ebSKrish Sadhukhan 
2609665f5677SKrish Sadhukhan /*
2610665f5677SKrish Sadhukhan  * When VMRUN loads a guest value of 1 in EFLAGS.TF, that value does not
2611665f5677SKrish Sadhukhan  * cause a trace trap between the VMRUN and the first guest instruction, but
2612665f5677SKrish Sadhukhan  * rather after completion of the first guest instruction.
2613665f5677SKrish Sadhukhan  *
2614665f5677SKrish Sadhukhan  * [APM vol 2]
2615665f5677SKrish Sadhukhan  */
2616665f5677SKrish Sadhukhan u64 guest_rflags_test_trap_rip;
2617665f5677SKrish Sadhukhan 
guest_rflags_test_db_handler(struct ex_regs * r)2618665f5677SKrish Sadhukhan static void guest_rflags_test_db_handler(struct ex_regs *r)
2619665f5677SKrish Sadhukhan {
2620665f5677SKrish Sadhukhan 	guest_rflags_test_trap_rip = r->rip;
2621665f5677SKrish Sadhukhan 	r->rflags &= ~X86_EFLAGS_TF;
2622665f5677SKrish Sadhukhan }
2623665f5677SKrish Sadhukhan 
svm_guest_state_test(void)2624a79c9495SKrish Sadhukhan static void svm_guest_state_test(void)
2625a79c9495SKrish Sadhukhan {
2626a79c9495SKrish Sadhukhan 	test_set_guest(basic_guest_main);
2627a79c9495SKrish Sadhukhan 	test_efer();
2628a79c9495SKrish Sadhukhan 	test_cr0();
2629a79c9495SKrish Sadhukhan 	test_cr3();
2630a79c9495SKrish Sadhukhan 	test_cr4();
2631a79c9495SKrish Sadhukhan 	test_dr();
2632abe82380SKrish Sadhukhan 	test_msrpm_iopm_bitmap_addrs();
2633ba3c9773SLara Lazier 	test_canonicalization();
2634ba29942cSKrish Sadhukhan }
2635ba29942cSKrish Sadhukhan 
2636665f5677SKrish Sadhukhan extern void guest_rflags_test_guest(struct svm_test *test);
2637665f5677SKrish Sadhukhan extern u64 *insn2;
2638665f5677SKrish Sadhukhan extern u64 *guest_end;
2639665f5677SKrish Sadhukhan 
2640665f5677SKrish Sadhukhan asm("guest_rflags_test_guest:\n\t"
2641665f5677SKrish Sadhukhan     "push %rbp\n\t"
2642665f5677SKrish Sadhukhan     ".global insn2\n\t"
2643665f5677SKrish Sadhukhan     "insn2:\n\t"
2644665f5677SKrish Sadhukhan     "mov %rsp,%rbp\n\t"
2645665f5677SKrish Sadhukhan     "vmmcall\n\t"
2646665f5677SKrish Sadhukhan     "vmmcall\n\t"
2647665f5677SKrish Sadhukhan     ".global guest_end\n\t"
2648665f5677SKrish Sadhukhan     "guest_end:\n\t"
2649665f5677SKrish Sadhukhan     "vmmcall\n\t"
2650665f5677SKrish Sadhukhan     "pop %rbp\n\t"
2651665f5677SKrish Sadhukhan     "ret");
2652665f5677SKrish Sadhukhan 
svm_test_singlestep(void)2653665f5677SKrish Sadhukhan static void svm_test_singlestep(void)
2654665f5677SKrish Sadhukhan {
2655665f5677SKrish Sadhukhan 	handle_exception(DB_VECTOR, guest_rflags_test_db_handler);
2656665f5677SKrish Sadhukhan 
2657665f5677SKrish Sadhukhan 	/*
2658665f5677SKrish Sadhukhan 	 * Trap expected after completion of first guest instruction
2659665f5677SKrish Sadhukhan 	 */
2660665f5677SKrish Sadhukhan 	vmcb->save.rflags |= X86_EFLAGS_TF;
2661665f5677SKrish Sadhukhan 	report (__svm_vmrun((u64)guest_rflags_test_guest) == SVM_EXIT_VMMCALL &&
2662665f5677SKrish Sadhukhan 		guest_rflags_test_trap_rip == (u64)&insn2,
2663665f5677SKrish Sadhukhan 		"Test EFLAGS.TF on VMRUN: trap expected  after completion of first guest instruction");
2664665f5677SKrish Sadhukhan 	/*
2665665f5677SKrish Sadhukhan 	 * No trap expected
2666665f5677SKrish Sadhukhan 	 */
2667665f5677SKrish Sadhukhan 	guest_rflags_test_trap_rip = 0;
2668665f5677SKrish Sadhukhan 	vmcb->save.rip += 3;
2669665f5677SKrish Sadhukhan 	vmcb->save.rflags |= X86_EFLAGS_TF;
2670665f5677SKrish Sadhukhan 	report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL &&
2671665f5677SKrish Sadhukhan 		guest_rflags_test_trap_rip == 0, "Test EFLAGS.TF on VMRUN: trap not expected");
2672665f5677SKrish Sadhukhan 
2673665f5677SKrish Sadhukhan 	/*
2674665f5677SKrish Sadhukhan 	 * Let guest finish execution
2675665f5677SKrish Sadhukhan 	 */
2676665f5677SKrish Sadhukhan 	vmcb->save.rip += 3;
2677665f5677SKrish Sadhukhan 	report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL &&
2678665f5677SKrish Sadhukhan 		vmcb->save.rip == (u64)&guest_end, "Test EFLAGS.TF on VMRUN: guest execution completion");
2679665f5677SKrish Sadhukhan }
2680665f5677SKrish Sadhukhan 
26817a57ef5dSMaxim Levitsky static bool volatile svm_errata_reproduced = false;
26827a57ef5dSMaxim Levitsky static unsigned long volatile physical = 0;
26837a57ef5dSMaxim Levitsky 
26847a57ef5dSMaxim Levitsky 
26857a57ef5dSMaxim Levitsky /*
26867a57ef5dSMaxim Levitsky  *
26877a57ef5dSMaxim Levitsky  * Test the following errata:
26887a57ef5dSMaxim Levitsky  * If the VMRUN/VMSAVE/VMLOAD are attempted by the nested guest,
26897a57ef5dSMaxim Levitsky  * the CPU would first check the EAX against host reserved memory
26907a57ef5dSMaxim Levitsky  * regions (so far only SMM_ADDR/SMM_MASK are known to cause it),
26917a57ef5dSMaxim Levitsky  * and only then signal #VMexit
26927a57ef5dSMaxim Levitsky  *
26937a57ef5dSMaxim Levitsky  * Try to reproduce this by trying vmsave on each possible 4K aligned memory
26947a57ef5dSMaxim Levitsky  * address in the low 4G where the SMM area has to reside.
26957a57ef5dSMaxim Levitsky  */
26967a57ef5dSMaxim Levitsky 
gp_isr(struct ex_regs * r)26977a57ef5dSMaxim Levitsky static void gp_isr(struct ex_regs *r)
26987a57ef5dSMaxim Levitsky {
26997a57ef5dSMaxim Levitsky 	svm_errata_reproduced = true;
27007a57ef5dSMaxim Levitsky 	/* skip over the vmsave instruction*/
27017a57ef5dSMaxim Levitsky 	r->rip += 3;
27027a57ef5dSMaxim Levitsky }
27037a57ef5dSMaxim Levitsky 
svm_vmrun_errata_test(void)27047a57ef5dSMaxim Levitsky static void svm_vmrun_errata_test(void)
27057a57ef5dSMaxim Levitsky {
27067a57ef5dSMaxim Levitsky 	unsigned long *last_page = NULL;
27077a57ef5dSMaxim Levitsky 
27087a57ef5dSMaxim Levitsky 	handle_exception(GP_VECTOR, gp_isr);
27097a57ef5dSMaxim Levitsky 
27107a57ef5dSMaxim Levitsky 	while (!svm_errata_reproduced) {
27117a57ef5dSMaxim Levitsky 
27127a57ef5dSMaxim Levitsky 		unsigned long *page = alloc_pages(1);
27137a57ef5dSMaxim Levitsky 
27147a57ef5dSMaxim Levitsky 		if (!page) {
27155c3582f0SJanis Schoetterl-Glausch 			report_pass("All guest memory tested, no bug found");
27167a57ef5dSMaxim Levitsky 			break;
27177a57ef5dSMaxim Levitsky 		}
27187a57ef5dSMaxim Levitsky 
27197a57ef5dSMaxim Levitsky 		physical = virt_to_phys(page);
27207a57ef5dSMaxim Levitsky 
27217a57ef5dSMaxim Levitsky 		asm volatile (
27227a57ef5dSMaxim Levitsky 			      "mov %[_physical], %%rax\n\t"
27237a57ef5dSMaxim Levitsky 			      "vmsave %%rax\n\t"
27247a57ef5dSMaxim Levitsky 
27257a57ef5dSMaxim Levitsky 			      : [_physical] "=m" (physical)
27267a57ef5dSMaxim Levitsky 			      : /* no inputs*/
27277a57ef5dSMaxim Levitsky 			      : "rax" /*clobbers*/
27287a57ef5dSMaxim Levitsky 			      );
27297a57ef5dSMaxim Levitsky 
27307a57ef5dSMaxim Levitsky 		if (svm_errata_reproduced) {
2731198dfd0eSJanis Schoetterl-Glausch 			report_fail("Got #GP exception - svm errata reproduced at 0x%lx",
27327a57ef5dSMaxim Levitsky 				    physical);
27337a57ef5dSMaxim Levitsky 			break;
27347a57ef5dSMaxim Levitsky 		}
27357a57ef5dSMaxim Levitsky 
27367a57ef5dSMaxim Levitsky 		*page = (unsigned long)last_page;
27377a57ef5dSMaxim Levitsky 		last_page = page;
27387a57ef5dSMaxim Levitsky 	}
27397a57ef5dSMaxim Levitsky 
27407a57ef5dSMaxim Levitsky 	while (last_page) {
27417a57ef5dSMaxim Levitsky 		unsigned long *page = last_page;
27427a57ef5dSMaxim Levitsky 		last_page = (unsigned long *)*last_page;
27437a57ef5dSMaxim Levitsky 		free_pages_by_order(page, 1);
27447a57ef5dSMaxim Levitsky 	}
27457a57ef5dSMaxim Levitsky }
27467a57ef5dSMaxim Levitsky 
vmload_vmsave_guest_main(struct svm_test * test)27470b6f6cedSKrish Sadhukhan static void vmload_vmsave_guest_main(struct svm_test *test)
27480b6f6cedSKrish Sadhukhan {
27490b6f6cedSKrish Sadhukhan 	u64 vmcb_phys = virt_to_phys(vmcb);
27500b6f6cedSKrish Sadhukhan 
27510b6f6cedSKrish Sadhukhan 	asm volatile ("vmload %0" : : "a"(vmcb_phys));
27520b6f6cedSKrish Sadhukhan 	asm volatile ("vmsave %0" : : "a"(vmcb_phys));
27530b6f6cedSKrish Sadhukhan }
27540b6f6cedSKrish Sadhukhan 
svm_vmload_vmsave(void)27550b6f6cedSKrish Sadhukhan static void svm_vmload_vmsave(void)
27560b6f6cedSKrish Sadhukhan {
27570b6f6cedSKrish Sadhukhan 	u32 intercept_saved = vmcb->control.intercept;
27580b6f6cedSKrish Sadhukhan 
27590b6f6cedSKrish Sadhukhan 	test_set_guest(vmload_vmsave_guest_main);
27600b6f6cedSKrish Sadhukhan 
27610b6f6cedSKrish Sadhukhan 	/*
27620b6f6cedSKrish Sadhukhan 	 * Disabling intercept for VMLOAD and VMSAVE doesn't cause
27630b6f6cedSKrish Sadhukhan 	 * respective #VMEXIT to host
27640b6f6cedSKrish Sadhukhan 	 */
27650b6f6cedSKrish Sadhukhan 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD);
27660b6f6cedSKrish Sadhukhan 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE);
27670b6f6cedSKrish Sadhukhan 	svm_vmrun();
27680b6f6cedSKrish Sadhukhan 	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
27690b6f6cedSKrish Sadhukhan 	       "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
27700b6f6cedSKrish Sadhukhan 
27710b6f6cedSKrish Sadhukhan 	/*
27720b6f6cedSKrish Sadhukhan 	 * Enabling intercept for VMLOAD and VMSAVE causes respective
27730b6f6cedSKrish Sadhukhan 	 * #VMEXIT to host
27740b6f6cedSKrish Sadhukhan 	 */
27750b6f6cedSKrish Sadhukhan 	vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD);
27760b6f6cedSKrish Sadhukhan 	svm_vmrun();
27770b6f6cedSKrish Sadhukhan 	report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test "
27780b6f6cedSKrish Sadhukhan 	       "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT");
27790b6f6cedSKrish Sadhukhan 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD);
27800b6f6cedSKrish Sadhukhan 	vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE);
27810b6f6cedSKrish Sadhukhan 	svm_vmrun();
27820b6f6cedSKrish Sadhukhan 	report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test "
27830b6f6cedSKrish Sadhukhan 	       "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT");
27840b6f6cedSKrish Sadhukhan 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE);
27850b6f6cedSKrish Sadhukhan 	svm_vmrun();
27860b6f6cedSKrish Sadhukhan 	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
27870b6f6cedSKrish Sadhukhan 	       "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
27880b6f6cedSKrish Sadhukhan 
27890b6f6cedSKrish Sadhukhan 	vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD);
27900b6f6cedSKrish Sadhukhan 	svm_vmrun();
27910b6f6cedSKrish Sadhukhan 	report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test "
27920b6f6cedSKrish Sadhukhan 	       "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT");
27930b6f6cedSKrish Sadhukhan 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD);
27940b6f6cedSKrish Sadhukhan 	svm_vmrun();
27950b6f6cedSKrish Sadhukhan 	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
27960b6f6cedSKrish Sadhukhan 	       "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
27970b6f6cedSKrish Sadhukhan 
27980b6f6cedSKrish Sadhukhan 	vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE);
27990b6f6cedSKrish Sadhukhan 	svm_vmrun();
28000b6f6cedSKrish Sadhukhan 	report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test "
28010b6f6cedSKrish Sadhukhan 	       "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT");
28020b6f6cedSKrish Sadhukhan 	vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE);
28030b6f6cedSKrish Sadhukhan 	svm_vmrun();
28040b6f6cedSKrish Sadhukhan 	report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test "
28050b6f6cedSKrish Sadhukhan 	       "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT");
28060b6f6cedSKrish Sadhukhan 
28070b6f6cedSKrish Sadhukhan 	vmcb->control.intercept = intercept_saved;
28080b6f6cedSKrish Sadhukhan }
28090b6f6cedSKrish Sadhukhan 
prepare_vgif_enabled(struct svm_test * test)2810f6972bd6SLara Lazier static void prepare_vgif_enabled(struct svm_test *test)
2811f6972bd6SLara Lazier {
2812f6972bd6SLara Lazier 	default_prepare(test);
2813f6972bd6SLara Lazier }
2814f6972bd6SLara Lazier 
test_vgif(struct svm_test * test)2815f6972bd6SLara Lazier static void test_vgif(struct svm_test *test)
2816f6972bd6SLara Lazier {
2817f6972bd6SLara Lazier 	asm volatile ("vmmcall\n\tstgi\n\tvmmcall\n\tclgi\n\tvmmcall\n\t");
2818f6972bd6SLara Lazier }
2819f6972bd6SLara Lazier 
vgif_finished(struct svm_test * test)2820f6972bd6SLara Lazier static bool vgif_finished(struct svm_test *test)
2821f6972bd6SLara Lazier {
2822f6972bd6SLara Lazier 	switch (get_test_stage(test))
2823f6972bd6SLara Lazier 		{
2824f6972bd6SLara Lazier 		case 0:
2825f6972bd6SLara Lazier 			if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
2826198dfd0eSJanis Schoetterl-Glausch 				report_fail("VMEXIT not due to vmmcall.");
2827f6972bd6SLara Lazier 				return true;
2828f6972bd6SLara Lazier 			}
2829f6972bd6SLara Lazier 			vmcb->control.int_ctl |= V_GIF_ENABLED_MASK;
2830f6972bd6SLara Lazier 			vmcb->save.rip += 3;
2831f6972bd6SLara Lazier 			inc_test_stage(test);
2832f6972bd6SLara Lazier 			break;
2833f6972bd6SLara Lazier 		case 1:
2834f6972bd6SLara Lazier 			if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
2835198dfd0eSJanis Schoetterl-Glausch 				report_fail("VMEXIT not due to vmmcall.");
2836f6972bd6SLara Lazier 				return true;
2837f6972bd6SLara Lazier 			}
2838f6972bd6SLara Lazier 			if (!(vmcb->control.int_ctl & V_GIF_MASK)) {
2839198dfd0eSJanis Schoetterl-Glausch 				report_fail("Failed to set VGIF when executing STGI.");
2840f6972bd6SLara Lazier 				vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK;
2841f6972bd6SLara Lazier 				return true;
2842f6972bd6SLara Lazier 			}
28435c3582f0SJanis Schoetterl-Glausch 			report_pass("STGI set VGIF bit.");
2844f6972bd6SLara Lazier 			vmcb->save.rip += 3;
2845f6972bd6SLara Lazier 			inc_test_stage(test);
2846f6972bd6SLara Lazier 			break;
2847f6972bd6SLara Lazier 		case 2:
2848f6972bd6SLara Lazier 			if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
2849198dfd0eSJanis Schoetterl-Glausch 				report_fail("VMEXIT not due to vmmcall.");
2850f6972bd6SLara Lazier 				return true;
2851f6972bd6SLara Lazier 			}
2852f6972bd6SLara Lazier 			if (vmcb->control.int_ctl & V_GIF_MASK) {
2853198dfd0eSJanis Schoetterl-Glausch 				report_fail("Failed to clear VGIF when executing CLGI.");
2854f6972bd6SLara Lazier 				vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK;
2855f6972bd6SLara Lazier 				return true;
2856f6972bd6SLara Lazier 			}
28575c3582f0SJanis Schoetterl-Glausch 			report_pass("CLGI cleared VGIF bit.");
2858f6972bd6SLara Lazier 			vmcb->save.rip += 3;
2859f6972bd6SLara Lazier 			inc_test_stage(test);
2860f6972bd6SLara Lazier 			vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK;
2861f6972bd6SLara Lazier 			break;
2862f6972bd6SLara Lazier 		default:
2863f6972bd6SLara Lazier 			return true;
2864f6972bd6SLara Lazier 			break;
2865f6972bd6SLara Lazier 		}
2866f6972bd6SLara Lazier 
2867f6972bd6SLara Lazier 	return get_test_stage(test) == 3;
2868f6972bd6SLara Lazier }
2869f6972bd6SLara Lazier 
vgif_check(struct svm_test * test)2870f6972bd6SLara Lazier static bool vgif_check(struct svm_test *test)
2871f6972bd6SLara Lazier {
2872f6972bd6SLara Lazier 	return get_test_stage(test) == 3;
2873f6972bd6SLara Lazier }
2874f6972bd6SLara Lazier 
28758650dffeSMaxim Levitsky 
28768650dffeSMaxim Levitsky static int pause_test_counter;
28778650dffeSMaxim Levitsky static int wait_counter;
28788650dffeSMaxim Levitsky 
pause_filter_test_guest_main(struct svm_test * test)28798650dffeSMaxim Levitsky static void pause_filter_test_guest_main(struct svm_test *test)
28808650dffeSMaxim Levitsky {
28818650dffeSMaxim Levitsky 	int i;
28828650dffeSMaxim Levitsky 	for (i = 0 ; i < pause_test_counter ; i++)
28838650dffeSMaxim Levitsky 		pause();
28848650dffeSMaxim Levitsky 
28858650dffeSMaxim Levitsky 	if (!wait_counter)
28868650dffeSMaxim Levitsky 		return;
28878650dffeSMaxim Levitsky 
28888650dffeSMaxim Levitsky 	for (i = 0; i < wait_counter; i++)
28898650dffeSMaxim Levitsky 		;
28908650dffeSMaxim Levitsky 
28918650dffeSMaxim Levitsky 	for (i = 0 ; i < pause_test_counter ; i++)
28928650dffeSMaxim Levitsky 		pause();
28938650dffeSMaxim Levitsky 
28948650dffeSMaxim Levitsky }
28958650dffeSMaxim Levitsky 
pause_filter_run_test(int pause_iterations,int filter_value,int wait_iterations,int threshold)28968650dffeSMaxim Levitsky static void pause_filter_run_test(int pause_iterations, int filter_value, int wait_iterations, int threshold)
28978650dffeSMaxim Levitsky {
28988650dffeSMaxim Levitsky 	test_set_guest(pause_filter_test_guest_main);
28998650dffeSMaxim Levitsky 
29008650dffeSMaxim Levitsky 	pause_test_counter = pause_iterations;
29018650dffeSMaxim Levitsky 	wait_counter = wait_iterations;
29028650dffeSMaxim Levitsky 
29038650dffeSMaxim Levitsky 	vmcb->control.pause_filter_count = filter_value;
29048650dffeSMaxim Levitsky 	vmcb->control.pause_filter_thresh = threshold;
29058650dffeSMaxim Levitsky 	svm_vmrun();
29068650dffeSMaxim Levitsky 
29078650dffeSMaxim Levitsky 	if (filter_value <= pause_iterations || wait_iterations < threshold)
29088650dffeSMaxim Levitsky 		report(vmcb->control.exit_code == SVM_EXIT_PAUSE, "expected PAUSE vmexit");
29098650dffeSMaxim Levitsky 	else
29108650dffeSMaxim Levitsky 		report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "no expected PAUSE vmexit");
29118650dffeSMaxim Levitsky }
29128650dffeSMaxim Levitsky 
pause_filter_test(void)29138650dffeSMaxim Levitsky static void pause_filter_test(void)
29148650dffeSMaxim Levitsky {
29158650dffeSMaxim Levitsky 	if (!pause_filter_supported()) {
29168650dffeSMaxim Levitsky 		report_skip("PAUSE filter not supported in the guest");
29178650dffeSMaxim Levitsky 		return;
29188650dffeSMaxim Levitsky 	}
29198650dffeSMaxim Levitsky 
29208650dffeSMaxim Levitsky 	vmcb->control.intercept |= (1 << INTERCEPT_PAUSE);
29218650dffeSMaxim Levitsky 
29228650dffeSMaxim Levitsky 	// filter count more that pause count - no VMexit
29238650dffeSMaxim Levitsky 	pause_filter_run_test(10, 9, 0, 0);
29248650dffeSMaxim Levitsky 
29258650dffeSMaxim Levitsky 	// filter count smaller pause count - no VMexit
29268650dffeSMaxim Levitsky 	pause_filter_run_test(20, 21, 0, 0);
29278650dffeSMaxim Levitsky 
29288650dffeSMaxim Levitsky 
29298650dffeSMaxim Levitsky 	if (pause_threshold_supported()) {
29308650dffeSMaxim Levitsky 		// filter count smaller pause count - no VMexit +  large enough threshold
29318650dffeSMaxim Levitsky 		// so that filter counter resets
29328650dffeSMaxim Levitsky 		pause_filter_run_test(20, 21, 1000, 10);
29338650dffeSMaxim Levitsky 
29348650dffeSMaxim Levitsky 		// filter count smaller pause count - no VMexit +  small threshold
29358650dffeSMaxim Levitsky 		// so that filter doesn't reset
29368650dffeSMaxim Levitsky 		pause_filter_run_test(20, 21, 10, 1000);
29378650dffeSMaxim Levitsky 	} else {
29388650dffeSMaxim Levitsky 		report_skip("PAUSE threshold not supported in the guest");
29398650dffeSMaxim Levitsky 		return;
29408650dffeSMaxim Levitsky 	}
29418650dffeSMaxim Levitsky }
29428650dffeSMaxim Levitsky 
2943694e59baSManali Shukla /* If CR0.TS and CR0.EM are cleared in L2, no #NM is generated. */
svm_no_nm_test(void)2944694e59baSManali Shukla static void svm_no_nm_test(void)
29455c92f156SManali Shukla {
29465c92f156SManali Shukla 	write_cr0(read_cr0() & ~X86_CR0_TS);
2947694e59baSManali Shukla 	test_set_guest((test_guest_func)fnop);
29485c92f156SManali Shukla 
29495c92f156SManali Shukla 	vmcb->save.cr0 = vmcb->save.cr0 & ~(X86_CR0_TS | X86_CR0_EM);
2950694e59baSManali Shukla 	report(svm_vmrun() == SVM_EXIT_VMMCALL,
2951d4ae0a71SThomas Huth 	       "fnop with CR0.TS and CR0.EM unset no #NM exception");
29525c92f156SManali Shukla }
2953f6972bd6SLara Lazier 
amd_get_lbr_rip(u32 msr)2954ddb85855SSean Christopherson static u64 amd_get_lbr_rip(u32 msr)
2955537d39dfSMaxim Levitsky {
2956ddb85855SSean Christopherson 	return rdmsr(msr) & ~AMD_LBR_RECORD_MISPREDICT;
2957537d39dfSMaxim Levitsky }
2958537d39dfSMaxim Levitsky 
2959ddb85855SSean Christopherson #define HOST_CHECK_LBR(from_expected, to_expected)					\
2960ddb85855SSean Christopherson do {											\
2961ddb85855SSean Christopherson 	TEST_EXPECT_EQ((u64)from_expected, amd_get_lbr_rip(MSR_IA32_LASTBRANCHFROMIP));	\
2962ddb85855SSean Christopherson 	TEST_EXPECT_EQ((u64)to_expected, amd_get_lbr_rip(MSR_IA32_LASTBRANCHTOIP));	\
2963ddb85855SSean Christopherson } while (0)
2964537d39dfSMaxim Levitsky 
2965ddb85855SSean Christopherson /*
2966ddb85855SSean Christopherson  * FIXME: Do something other than generate an exception to communicate failure.
2967ddb85855SSean Christopherson  * Debugging without expected vs. actual is an absolute nightmare.
2968ddb85855SSean Christopherson  */
2969ddb85855SSean Christopherson #define GUEST_CHECK_LBR(from_expected, to_expected)				\
2970ddb85855SSean Christopherson do {										\
2971ddb85855SSean Christopherson 	if ((u64)(from_expected) != amd_get_lbr_rip(MSR_IA32_LASTBRANCHFROMIP))	\
2972ddb85855SSean Christopherson 		asm volatile("ud2");						\
2973ddb85855SSean Christopherson 	if ((u64)(to_expected) != amd_get_lbr_rip(MSR_IA32_LASTBRANCHTOIP))	\
2974ddb85855SSean Christopherson 		asm volatile("ud2");						\
2975ddb85855SSean Christopherson } while (0)
2976537d39dfSMaxim Levitsky 
297792098120SSean Christopherson #define REPORT_GUEST_LBR_ERROR(vmcb)						\
297892098120SSean Christopherson 	report(false, "LBR guest test failed.  Exit reason 0x%x, RIP = %lx, from = %lx, to = %lx, ex from = %lx, ex to = %lx", \
297992098120SSean Christopherson 		       vmcb->control.exit_code, vmcb->save.rip,			\
298092098120SSean Christopherson 		       vmcb->save.br_from, vmcb->save.br_to,			\
298192098120SSean Christopherson 		       vmcb->save.last_excp_from, vmcb->save.last_excp_to)
298292098120SSean Christopherson 
2983537d39dfSMaxim Levitsky #define DO_BRANCH(branch_name)				\
2984537d39dfSMaxim Levitsky 	asm volatile (					\
2985537d39dfSMaxim Levitsky 		      # branch_name "_from:"		\
2986537d39dfSMaxim Levitsky 		      "jmp " # branch_name  "_to\n"	\
2987537d39dfSMaxim Levitsky 		      "nop\n"				\
2988537d39dfSMaxim Levitsky 		      "nop\n"				\
2989537d39dfSMaxim Levitsky 		      # branch_name  "_to:"		\
2990537d39dfSMaxim Levitsky 		      "nop\n"				\
2991537d39dfSMaxim Levitsky 		       )
2992537d39dfSMaxim Levitsky 
2993537d39dfSMaxim Levitsky 
2994537d39dfSMaxim Levitsky extern u64 guest_branch0_from, guest_branch0_to;
2995537d39dfSMaxim Levitsky extern u64 guest_branch2_from, guest_branch2_to;
2996537d39dfSMaxim Levitsky 
2997537d39dfSMaxim Levitsky extern u64 host_branch0_from, host_branch0_to;
2998537d39dfSMaxim Levitsky extern u64 host_branch2_from, host_branch2_to;
2999537d39dfSMaxim Levitsky extern u64 host_branch3_from, host_branch3_to;
3000537d39dfSMaxim Levitsky extern u64 host_branch4_from, host_branch4_to;
3001537d39dfSMaxim Levitsky 
3002537d39dfSMaxim Levitsky u64 dbgctl;
3003537d39dfSMaxim Levitsky 
svm_lbrv_test_guest1(void)3004537d39dfSMaxim Levitsky static void svm_lbrv_test_guest1(void)
3005537d39dfSMaxim Levitsky {
3006537d39dfSMaxim Levitsky 	/*
3007537d39dfSMaxim Levitsky 	 * This guest expects the LBR to be already enabled when it starts,
3008537d39dfSMaxim Levitsky 	 * it does a branch, and then disables the LBR and then checks.
3009537d39dfSMaxim Levitsky 	 */
3010537d39dfSMaxim Levitsky 
3011537d39dfSMaxim Levitsky 	DO_BRANCH(guest_branch0);
3012537d39dfSMaxim Levitsky 
3013537d39dfSMaxim Levitsky 	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3014537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3015537d39dfSMaxim Levitsky 
3016537d39dfSMaxim Levitsky 	if (dbgctl != DEBUGCTLMSR_LBR)
3017537d39dfSMaxim Levitsky 		asm volatile("ud2\n");
3018537d39dfSMaxim Levitsky 	if (rdmsr(MSR_IA32_DEBUGCTLMSR) != 0)
3019537d39dfSMaxim Levitsky 		asm volatile("ud2\n");
3020537d39dfSMaxim Levitsky 
3021ddb85855SSean Christopherson 	GUEST_CHECK_LBR(&guest_branch0_from, &guest_branch0_to);
3022537d39dfSMaxim Levitsky 	asm volatile ("vmmcall\n");
3023537d39dfSMaxim Levitsky }
3024537d39dfSMaxim Levitsky 
svm_lbrv_test_guest2(void)3025537d39dfSMaxim Levitsky static void svm_lbrv_test_guest2(void)
3026537d39dfSMaxim Levitsky {
3027537d39dfSMaxim Levitsky 	/*
3028537d39dfSMaxim Levitsky 	 * This guest expects the LBR to be disabled when it starts,
3029537d39dfSMaxim Levitsky 	 * enables it, does a branch, disables it and then checks.
3030537d39dfSMaxim Levitsky 	 */
3031537d39dfSMaxim Levitsky 
3032537d39dfSMaxim Levitsky 	DO_BRANCH(guest_branch1);
3033537d39dfSMaxim Levitsky 	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3034537d39dfSMaxim Levitsky 
3035537d39dfSMaxim Levitsky 	if (dbgctl != 0)
3036537d39dfSMaxim Levitsky 		asm volatile("ud2\n");
3037537d39dfSMaxim Levitsky 
3038ddb85855SSean Christopherson 	GUEST_CHECK_LBR(&host_branch2_from, &host_branch2_to);
3039537d39dfSMaxim Levitsky 
3040537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3041537d39dfSMaxim Levitsky 	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3042537d39dfSMaxim Levitsky 	DO_BRANCH(guest_branch2);
3043537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3044537d39dfSMaxim Levitsky 
3045537d39dfSMaxim Levitsky 	if (dbgctl != DEBUGCTLMSR_LBR)
3046537d39dfSMaxim Levitsky 		asm volatile("ud2\n");
3047ddb85855SSean Christopherson 	GUEST_CHECK_LBR(&guest_branch2_from, &guest_branch2_to);
3048537d39dfSMaxim Levitsky 
3049537d39dfSMaxim Levitsky 	asm volatile ("vmmcall\n");
3050537d39dfSMaxim Levitsky }
3051537d39dfSMaxim Levitsky 
svm_lbrv_test0(void)3052537d39dfSMaxim Levitsky static void svm_lbrv_test0(void)
3053537d39dfSMaxim Levitsky {
3054537d39dfSMaxim Levitsky 	report(true, "Basic LBR test");
3055537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3056537d39dfSMaxim Levitsky 	DO_BRANCH(host_branch0);
3057537d39dfSMaxim Levitsky 	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3058537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3059537d39dfSMaxim Levitsky 
3060554fa461SSean Christopherson 	TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR);
3061537d39dfSMaxim Levitsky 	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3062554fa461SSean Christopherson 	TEST_EXPECT_EQ(dbgctl, 0);
3063537d39dfSMaxim Levitsky 
3064ddb85855SSean Christopherson 	HOST_CHECK_LBR(&host_branch0_from, &host_branch0_to);
3065537d39dfSMaxim Levitsky }
3066537d39dfSMaxim Levitsky 
svm_lbrv_test1(void)3067537d39dfSMaxim Levitsky static void svm_lbrv_test1(void)
3068537d39dfSMaxim Levitsky {
3069537d39dfSMaxim Levitsky 	report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(1)");
3070537d39dfSMaxim Levitsky 
30715200c1f1SSean Christopherson 	svm_setup_vmrun((u64)svm_lbrv_test_guest1);
3072537d39dfSMaxim Levitsky 	vmcb->control.virt_ext = 0;
3073537d39dfSMaxim Levitsky 
3074537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3075537d39dfSMaxim Levitsky 	DO_BRANCH(host_branch1);
3076537d39dfSMaxim Levitsky 	SVM_BARE_VMRUN;
3077537d39dfSMaxim Levitsky 	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3078537d39dfSMaxim Levitsky 
3079537d39dfSMaxim Levitsky 	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
308092098120SSean Christopherson 		REPORT_GUEST_LBR_ERROR(vmcb);
3081537d39dfSMaxim Levitsky 		return;
3082537d39dfSMaxim Levitsky 	}
3083537d39dfSMaxim Levitsky 
3084554fa461SSean Christopherson 	TEST_EXPECT_EQ(dbgctl, 0);
3085ddb85855SSean Christopherson 	HOST_CHECK_LBR(&guest_branch0_from, &guest_branch0_to);
3086537d39dfSMaxim Levitsky }
3087537d39dfSMaxim Levitsky 
svm_lbrv_test2(void)3088537d39dfSMaxim Levitsky static void svm_lbrv_test2(void)
3089537d39dfSMaxim Levitsky {
3090537d39dfSMaxim Levitsky 	report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(2)");
3091537d39dfSMaxim Levitsky 
30925200c1f1SSean Christopherson 	svm_setup_vmrun((u64)svm_lbrv_test_guest2);
3093537d39dfSMaxim Levitsky 	vmcb->control.virt_ext = 0;
3094537d39dfSMaxim Levitsky 
3095537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3096537d39dfSMaxim Levitsky 	DO_BRANCH(host_branch2);
3097537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3098537d39dfSMaxim Levitsky 	SVM_BARE_VMRUN;
3099537d39dfSMaxim Levitsky 	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3100537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3101537d39dfSMaxim Levitsky 
3102537d39dfSMaxim Levitsky 	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
310392098120SSean Christopherson 		REPORT_GUEST_LBR_ERROR(vmcb);
3104537d39dfSMaxim Levitsky 		return;
3105537d39dfSMaxim Levitsky 	}
3106537d39dfSMaxim Levitsky 
3107554fa461SSean Christopherson 	TEST_EXPECT_EQ(dbgctl, 0);
3108ddb85855SSean Christopherson 	HOST_CHECK_LBR(&guest_branch2_from, &guest_branch2_to);
3109537d39dfSMaxim Levitsky }
3110537d39dfSMaxim Levitsky 
svm_lbrv_nested_test1(void)3111537d39dfSMaxim Levitsky static void svm_lbrv_nested_test1(void)
3112537d39dfSMaxim Levitsky {
3113537d39dfSMaxim Levitsky 	if (!lbrv_supported()) {
3114537d39dfSMaxim Levitsky 		report_skip("LBRV not supported in the guest");
3115537d39dfSMaxim Levitsky 		return;
3116537d39dfSMaxim Levitsky 	}
3117537d39dfSMaxim Levitsky 
3118537d39dfSMaxim Levitsky 	report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (1)");
31195200c1f1SSean Christopherson 	svm_setup_vmrun((u64)svm_lbrv_test_guest1);
3120537d39dfSMaxim Levitsky 	vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK;
3121537d39dfSMaxim Levitsky 	vmcb->save.dbgctl = DEBUGCTLMSR_LBR;
3122537d39dfSMaxim Levitsky 
3123537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3124537d39dfSMaxim Levitsky 	DO_BRANCH(host_branch3);
3125537d39dfSMaxim Levitsky 	SVM_BARE_VMRUN;
3126537d39dfSMaxim Levitsky 	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3127537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3128537d39dfSMaxim Levitsky 
3129537d39dfSMaxim Levitsky 	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
313092098120SSean Christopherson 		REPORT_GUEST_LBR_ERROR(vmcb);
3131537d39dfSMaxim Levitsky 		return;
3132537d39dfSMaxim Levitsky 	}
3133537d39dfSMaxim Levitsky 
3134537d39dfSMaxim Levitsky 	if (vmcb->save.dbgctl != 0) {
3135537d39dfSMaxim Levitsky 		report(false, "unexpected virtual guest MSR_IA32_DEBUGCTLMSR value 0x%lx", vmcb->save.dbgctl);
3136537d39dfSMaxim Levitsky 		return;
3137537d39dfSMaxim Levitsky 	}
3138537d39dfSMaxim Levitsky 
3139554fa461SSean Christopherson 	TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR);
3140ddb85855SSean Christopherson 	HOST_CHECK_LBR(&host_branch3_from, &host_branch3_to);
3141537d39dfSMaxim Levitsky }
31423f27d772SManali Shukla 
svm_lbrv_nested_test2(void)3143537d39dfSMaxim Levitsky static void svm_lbrv_nested_test2(void)
3144537d39dfSMaxim Levitsky {
3145537d39dfSMaxim Levitsky 	if (!lbrv_supported()) {
3146537d39dfSMaxim Levitsky 		report_skip("LBRV not supported in the guest");
3147537d39dfSMaxim Levitsky 		return;
3148537d39dfSMaxim Levitsky 	}
3149537d39dfSMaxim Levitsky 
3150537d39dfSMaxim Levitsky 	report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (2)");
31515200c1f1SSean Christopherson 	svm_setup_vmrun((u64)svm_lbrv_test_guest2);
3152537d39dfSMaxim Levitsky 	vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK;
3153537d39dfSMaxim Levitsky 
3154537d39dfSMaxim Levitsky 	vmcb->save.dbgctl = 0;
3155537d39dfSMaxim Levitsky 	vmcb->save.br_from = (u64)&host_branch2_from;
3156537d39dfSMaxim Levitsky 	vmcb->save.br_to = (u64)&host_branch2_to;
3157537d39dfSMaxim Levitsky 
3158537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
3159537d39dfSMaxim Levitsky 	DO_BRANCH(host_branch4);
3160537d39dfSMaxim Levitsky 	SVM_BARE_VMRUN;
3161537d39dfSMaxim Levitsky 	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
3162537d39dfSMaxim Levitsky 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
3163537d39dfSMaxim Levitsky 
3164537d39dfSMaxim Levitsky 	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
316592098120SSean Christopherson 		REPORT_GUEST_LBR_ERROR(vmcb);
3166537d39dfSMaxim Levitsky 		return;
3167537d39dfSMaxim Levitsky 	}
3168537d39dfSMaxim Levitsky 
3169554fa461SSean Christopherson 	TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR);
3170ddb85855SSean Christopherson 	HOST_CHECK_LBR(&host_branch4_from, &host_branch4_to);
3171537d39dfSMaxim Levitsky }
3172537d39dfSMaxim Levitsky 
3173c45bccfcSMaxim Levitsky 
3174c45bccfcSMaxim Levitsky // test that a nested guest which does enable INTR interception
3175c45bccfcSMaxim Levitsky // but doesn't enable virtual interrupt masking works
3176c45bccfcSMaxim Levitsky 
3177c45bccfcSMaxim Levitsky static volatile int dummy_isr_recevied;
dummy_isr(isr_regs_t * regs)3178c45bccfcSMaxim Levitsky static void dummy_isr(isr_regs_t *regs)
3179c45bccfcSMaxim Levitsky {
3180c45bccfcSMaxim Levitsky 	dummy_isr_recevied++;
3181c45bccfcSMaxim Levitsky 	eoi();
3182c45bccfcSMaxim Levitsky }
3183c45bccfcSMaxim Levitsky 
3184c45bccfcSMaxim Levitsky 
3185c45bccfcSMaxim Levitsky static volatile int nmi_recevied;
dummy_nmi_handler(struct ex_regs * regs)3186c45bccfcSMaxim Levitsky static void dummy_nmi_handler(struct ex_regs *regs)
3187c45bccfcSMaxim Levitsky {
3188c45bccfcSMaxim Levitsky 	nmi_recevied++;
3189c45bccfcSMaxim Levitsky }
3190c45bccfcSMaxim Levitsky 
3191c45bccfcSMaxim Levitsky 
svm_intr_intercept_mix_run_guest(volatile int * counter,int expected_vmexit)3192c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_run_guest(volatile int *counter, int expected_vmexit)
3193c45bccfcSMaxim Levitsky {
3194c45bccfcSMaxim Levitsky 	if (counter)
3195c45bccfcSMaxim Levitsky 		*counter = 0;
3196c45bccfcSMaxim Levitsky 
3197c45bccfcSMaxim Levitsky 	sti();  // host IF value should not matter
319827eeac46SSean Christopherson 	clgi(); // vmrun will set back GIF to 1
3199c45bccfcSMaxim Levitsky 
3200c45bccfcSMaxim Levitsky 	svm_vmrun();
3201c45bccfcSMaxim Levitsky 
3202c45bccfcSMaxim Levitsky 	if (counter)
3203c45bccfcSMaxim Levitsky 		report(!*counter, "No interrupt expected");
3204c45bccfcSMaxim Levitsky 
3205c45bccfcSMaxim Levitsky 	stgi();
3206c45bccfcSMaxim Levitsky 
3207c45bccfcSMaxim Levitsky 	if (counter)
3208c45bccfcSMaxim Levitsky 		report(*counter == 1, "Interrupt is expected");
3209c45bccfcSMaxim Levitsky 
321027eeac46SSean Christopherson 	report(vmcb->control.exit_code == expected_vmexit,
321127eeac46SSean Christopherson 	       "Wanted VM-Exit reason 0x%x, got 0x%x",
321227eeac46SSean Christopherson 	       expected_vmexit, vmcb->control.exit_code);
3213c45bccfcSMaxim Levitsky 	report(vmcb->save.rflags & X86_EFLAGS_IF, "Guest should have EFLAGS.IF set now");
3214c45bccfcSMaxim Levitsky 	cli();
3215c45bccfcSMaxim Levitsky }
3216c45bccfcSMaxim Levitsky 
3217c45bccfcSMaxim Levitsky 
3218d0458710SMaxim Levitsky // subtest: test that enabling EFLAGS.IF is enough to trigger an interrupt
svm_intr_intercept_mix_if_guest(struct svm_test * test)3219c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_if_guest(struct svm_test *test)
3220c45bccfcSMaxim Levitsky {
3221c45bccfcSMaxim Levitsky 	asm volatile("nop;nop;nop;nop");
3222c45bccfcSMaxim Levitsky 	report(!dummy_isr_recevied, "No interrupt expected");
3223e4007e62SMaxim Levitsky 	sti_nop();
3224c45bccfcSMaxim Levitsky 	report(0, "must not reach here");
3225c45bccfcSMaxim Levitsky }
3226c45bccfcSMaxim Levitsky 
svm_intr_intercept_mix_if(void)3227c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_if(void)
3228c45bccfcSMaxim Levitsky {
3229c45bccfcSMaxim Levitsky 	// make a physical interrupt to be pending
3230c45bccfcSMaxim Levitsky 	handle_irq(0x55, dummy_isr);
3231c45bccfcSMaxim Levitsky 
3232c45bccfcSMaxim Levitsky 	vmcb->control.intercept |= (1 << INTERCEPT_INTR);
3233c45bccfcSMaxim Levitsky 	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3234c45bccfcSMaxim Levitsky 	vmcb->save.rflags &= ~X86_EFLAGS_IF;
3235c45bccfcSMaxim Levitsky 
3236c45bccfcSMaxim Levitsky 	test_set_guest(svm_intr_intercept_mix_if_guest);
32372602a896SMaxim Levitsky 	cli();
3238c45bccfcSMaxim Levitsky 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0);
3239c45bccfcSMaxim Levitsky 	svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR);
3240c45bccfcSMaxim Levitsky }
3241c45bccfcSMaxim Levitsky 
3242c45bccfcSMaxim Levitsky 
3243c45bccfcSMaxim Levitsky // subtest: test that a clever guest can trigger an interrupt by setting GIF
3244c45bccfcSMaxim Levitsky // if GIF is not intercepted
svm_intr_intercept_mix_gif_guest(struct svm_test * test)3245c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif_guest(struct svm_test *test)
3246c45bccfcSMaxim Levitsky {
3247c45bccfcSMaxim Levitsky 
3248c45bccfcSMaxim Levitsky 	asm volatile("nop;nop;nop;nop");
3249c45bccfcSMaxim Levitsky 	report(!dummy_isr_recevied, "No interrupt expected");
3250c45bccfcSMaxim Levitsky 
3251c45bccfcSMaxim Levitsky 	// clear GIF and enable IF
3252c45bccfcSMaxim Levitsky 	// that should still not cause VM exit
3253c45bccfcSMaxim Levitsky 	clgi();
3254e4007e62SMaxim Levitsky 	sti_nop();
3255c45bccfcSMaxim Levitsky 	report(!dummy_isr_recevied, "No interrupt expected");
3256c45bccfcSMaxim Levitsky 
3257c45bccfcSMaxim Levitsky 	stgi();
3258c45bccfcSMaxim Levitsky 	report(0, "must not reach here");
3259c45bccfcSMaxim Levitsky }
3260c45bccfcSMaxim Levitsky 
svm_intr_intercept_mix_gif(void)3261c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif(void)
3262c45bccfcSMaxim Levitsky {
3263c45bccfcSMaxim Levitsky 	handle_irq(0x55, dummy_isr);
3264c45bccfcSMaxim Levitsky 
3265c45bccfcSMaxim Levitsky 	vmcb->control.intercept |= (1 << INTERCEPT_INTR);
3266c45bccfcSMaxim Levitsky 	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3267c45bccfcSMaxim Levitsky 	vmcb->save.rflags &= ~X86_EFLAGS_IF;
3268c45bccfcSMaxim Levitsky 
3269c45bccfcSMaxim Levitsky 	test_set_guest(svm_intr_intercept_mix_gif_guest);
32702602a896SMaxim Levitsky 	cli();
3271c45bccfcSMaxim Levitsky 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0);
3272c45bccfcSMaxim Levitsky 	svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR);
3273c45bccfcSMaxim Levitsky }
3274c45bccfcSMaxim Levitsky 
3275c45bccfcSMaxim Levitsky // subtest: test that a clever guest can trigger an interrupt by setting GIF
3276c45bccfcSMaxim Levitsky // if GIF is not intercepted and interrupt comes after guest
3277c45bccfcSMaxim Levitsky // started running
svm_intr_intercept_mix_gif_guest2(struct svm_test * test)3278c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif_guest2(struct svm_test *test)
3279c45bccfcSMaxim Levitsky {
3280c45bccfcSMaxim Levitsky 	asm volatile("nop;nop;nop;nop");
3281c45bccfcSMaxim Levitsky 	report(!dummy_isr_recevied, "No interrupt expected");
3282c45bccfcSMaxim Levitsky 
3283c45bccfcSMaxim Levitsky 	clgi();
3284c45bccfcSMaxim Levitsky 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0);
3285c45bccfcSMaxim Levitsky 	report(!dummy_isr_recevied, "No interrupt expected");
3286c45bccfcSMaxim Levitsky 
3287c45bccfcSMaxim Levitsky 	stgi();
3288c45bccfcSMaxim Levitsky 	report(0, "must not reach here");
3289c45bccfcSMaxim Levitsky }
3290c45bccfcSMaxim Levitsky 
svm_intr_intercept_mix_gif2(void)3291c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif2(void)
3292c45bccfcSMaxim Levitsky {
3293c45bccfcSMaxim Levitsky 	handle_irq(0x55, dummy_isr);
3294c45bccfcSMaxim Levitsky 
3295c45bccfcSMaxim Levitsky 	vmcb->control.intercept |= (1 << INTERCEPT_INTR);
3296c45bccfcSMaxim Levitsky 	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3297c45bccfcSMaxim Levitsky 	vmcb->save.rflags |= X86_EFLAGS_IF;
3298c45bccfcSMaxim Levitsky 
3299c45bccfcSMaxim Levitsky 	test_set_guest(svm_intr_intercept_mix_gif_guest2);
3300c45bccfcSMaxim Levitsky 	svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR);
3301c45bccfcSMaxim Levitsky }
3302c45bccfcSMaxim Levitsky 
3303c45bccfcSMaxim Levitsky 
3304c45bccfcSMaxim Levitsky // subtest: test that pending NMI will be handled when guest enables GIF
svm_intr_intercept_mix_nmi_guest(struct svm_test * test)3305c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_nmi_guest(struct svm_test *test)
3306c45bccfcSMaxim Levitsky {
3307c45bccfcSMaxim Levitsky 	asm volatile("nop;nop;nop;nop");
3308c45bccfcSMaxim Levitsky 	report(!nmi_recevied, "No NMI expected");
3309c45bccfcSMaxim Levitsky 	cli(); // should have no effect
3310c45bccfcSMaxim Levitsky 
3311c45bccfcSMaxim Levitsky 	clgi();
3312c45bccfcSMaxim Levitsky 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI, 0);
3313e4007e62SMaxim Levitsky 	sti_nop(); // should have no effect
3314c45bccfcSMaxim Levitsky 	report(!nmi_recevied, "No NMI expected");
3315c45bccfcSMaxim Levitsky 
3316c45bccfcSMaxim Levitsky 	stgi();
3317c45bccfcSMaxim Levitsky 	report(0, "must not reach here");
3318c45bccfcSMaxim Levitsky }
3319c45bccfcSMaxim Levitsky 
svm_intr_intercept_mix_nmi(void)3320c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_nmi(void)
3321c45bccfcSMaxim Levitsky {
3322c45bccfcSMaxim Levitsky 	handle_exception(2, dummy_nmi_handler);
3323c45bccfcSMaxim Levitsky 
3324c45bccfcSMaxim Levitsky 	vmcb->control.intercept |= (1 << INTERCEPT_NMI);
3325c45bccfcSMaxim Levitsky 	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3326c45bccfcSMaxim Levitsky 	vmcb->save.rflags |= X86_EFLAGS_IF;
3327c45bccfcSMaxim Levitsky 
3328c45bccfcSMaxim Levitsky 	test_set_guest(svm_intr_intercept_mix_nmi_guest);
3329c45bccfcSMaxim Levitsky 	svm_intr_intercept_mix_run_guest(&nmi_recevied, SVM_EXIT_NMI);
3330c45bccfcSMaxim Levitsky }
3331c45bccfcSMaxim Levitsky 
3332c45bccfcSMaxim Levitsky // test that pending SMI will be handled when guest enables GIF
3333c45bccfcSMaxim Levitsky // TODO: can't really count #SMIs so just test that guest doesn't hang
3334c45bccfcSMaxim Levitsky // and VMexits on SMI
svm_intr_intercept_mix_smi_guest(struct svm_test * test)3335c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_smi_guest(struct svm_test *test)
3336c45bccfcSMaxim Levitsky {
3337c45bccfcSMaxim Levitsky 	asm volatile("nop;nop;nop;nop");
3338c45bccfcSMaxim Levitsky 
3339c45bccfcSMaxim Levitsky 	clgi();
3340c45bccfcSMaxim Levitsky 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_SMI, 0);
3341e4007e62SMaxim Levitsky 	sti_nop(); // should have no effect
3342c45bccfcSMaxim Levitsky 	stgi();
3343c45bccfcSMaxim Levitsky 	report(0, "must not reach here");
3344c45bccfcSMaxim Levitsky }
3345c45bccfcSMaxim Levitsky 
svm_intr_intercept_mix_smi(void)3346c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_smi(void)
3347c45bccfcSMaxim Levitsky {
3348c45bccfcSMaxim Levitsky 	vmcb->control.intercept |= (1 << INTERCEPT_SMI);
3349c45bccfcSMaxim Levitsky 	vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3350c45bccfcSMaxim Levitsky 	test_set_guest(svm_intr_intercept_mix_smi_guest);
3351c45bccfcSMaxim Levitsky 	svm_intr_intercept_mix_run_guest(NULL, SVM_EXIT_SMI);
3352c45bccfcSMaxim Levitsky }
3353c45bccfcSMaxim Levitsky 
svm_l2_ac_test(void)33548177dc62SManali Shukla static void svm_l2_ac_test(void)
33558177dc62SManali Shukla {
33568177dc62SManali Shukla 	bool hit_ac = false;
33578177dc62SManali Shukla 
33588177dc62SManali Shukla 	write_cr0(read_cr0() | X86_CR0_AM);
33598177dc62SManali Shukla 	write_rflags(read_rflags() | X86_EFLAGS_AC);
33608177dc62SManali Shukla 
33618177dc62SManali Shukla 	run_in_user(generate_usermode_ac, AC_VECTOR, 0, 0, 0, 0, &hit_ac);
33628177dc62SManali Shukla 	report(hit_ac, "Usermode #AC handled in L2");
33638177dc62SManali Shukla 	vmmcall();
33648177dc62SManali Shukla }
33658177dc62SManali Shukla 
33668177dc62SManali Shukla struct svm_exception_test {
33678177dc62SManali Shukla 	u8 vector;
33688177dc62SManali Shukla 	void (*guest_code)(void);
33698177dc62SManali Shukla };
33708177dc62SManali Shukla 
33718177dc62SManali Shukla struct svm_exception_test svm_exception_tests[] = {
33728177dc62SManali Shukla 	{ GP_VECTOR, generate_non_canonical_gp },
33738177dc62SManali Shukla 	{ UD_VECTOR, generate_ud },
33748177dc62SManali Shukla 	{ DE_VECTOR, generate_de },
33758177dc62SManali Shukla 	{ DB_VECTOR, generate_single_step_db },
337644550f53SManali Shukla 	{ BP_VECTOR, generate_bp },
33778177dc62SManali Shukla 	{ AC_VECTOR, svm_l2_ac_test },
33780851b7f7SManali Shukla 	{ OF_VECTOR, generate_of },
3379694e59baSManali Shukla 	{ NM_VECTOR, generate_cr0_ts_nm },
3380694e59baSManali Shukla 	{ NM_VECTOR, generate_cr0_em_nm },
33818177dc62SManali Shukla };
33828177dc62SManali Shukla 
33838177dc62SManali Shukla static u8 svm_exception_test_vector;
33848177dc62SManali Shukla 
svm_exception_handler(struct ex_regs * regs)33858177dc62SManali Shukla static void svm_exception_handler(struct ex_regs *regs)
33868177dc62SManali Shukla {
33878177dc62SManali Shukla 	report(regs->vector == svm_exception_test_vector,
33888177dc62SManali Shukla 		"Handling %s in L2's exception handler",
33898177dc62SManali Shukla 		exception_mnemonic(svm_exception_test_vector));
33908177dc62SManali Shukla 	vmmcall();
33918177dc62SManali Shukla }
33928177dc62SManali Shukla 
handle_exception_in_l2(u8 vector)33938177dc62SManali Shukla static void handle_exception_in_l2(u8 vector)
33948177dc62SManali Shukla {
33958177dc62SManali Shukla 	handler old_handler = handle_exception(vector, svm_exception_handler);
33968177dc62SManali Shukla 	svm_exception_test_vector = vector;
33978177dc62SManali Shukla 
33988177dc62SManali Shukla 	report(svm_vmrun() == SVM_EXIT_VMMCALL,
33998177dc62SManali Shukla 		"%s handled by L2", exception_mnemonic(vector));
34008177dc62SManali Shukla 
34018177dc62SManali Shukla 	handle_exception(vector, old_handler);
34028177dc62SManali Shukla }
34038177dc62SManali Shukla 
handle_exception_in_l1(u32 vector)34048177dc62SManali Shukla static void handle_exception_in_l1(u32 vector)
34058177dc62SManali Shukla {
34068177dc62SManali Shukla 	u32 old_ie = vmcb->control.intercept_exceptions;
34078177dc62SManali Shukla 
34088177dc62SManali Shukla 	vmcb->control.intercept_exceptions |= (1ULL << vector);
34098177dc62SManali Shukla 
34108177dc62SManali Shukla 	report(svm_vmrun() == (SVM_EXIT_EXCP_BASE + vector),
34118177dc62SManali Shukla 		"%s handled by L1",  exception_mnemonic(vector));
34128177dc62SManali Shukla 
34138177dc62SManali Shukla 	vmcb->control.intercept_exceptions = old_ie;
34148177dc62SManali Shukla }
34158177dc62SManali Shukla 
svm_exception_test(void)34168177dc62SManali Shukla static void svm_exception_test(void)
34178177dc62SManali Shukla {
34188177dc62SManali Shukla 	struct svm_exception_test *t;
34198177dc62SManali Shukla 	int i;
34208177dc62SManali Shukla 
34218177dc62SManali Shukla 	for (i = 0; i < ARRAY_SIZE(svm_exception_tests); i++) {
34228177dc62SManali Shukla 		t = &svm_exception_tests[i];
34238177dc62SManali Shukla 		test_set_guest((test_guest_func)t->guest_code);
34248177dc62SManali Shukla 
34258177dc62SManali Shukla 		handle_exception_in_l2(t->vector);
34268177dc62SManali Shukla 		vmcb_ident(vmcb);
34278177dc62SManali Shukla 
34288177dc62SManali Shukla 		handle_exception_in_l1(t->vector);
34298177dc62SManali Shukla 		vmcb_ident(vmcb);
34308177dc62SManali Shukla 	}
34318177dc62SManali Shukla }
34328177dc62SManali Shukla 
shutdown_intercept_test_guest(struct svm_test * test)3433c64f24fdSMaxim Levitsky static void shutdown_intercept_test_guest(struct svm_test *test)
3434c64f24fdSMaxim Levitsky {
3435c64f24fdSMaxim Levitsky 	asm volatile ("ud2");
3436c64f24fdSMaxim Levitsky 	report_fail("should not reach here\n");
3437c64f24fdSMaxim Levitsky 
3438c64f24fdSMaxim Levitsky }
3439c64f24fdSMaxim Levitsky 
svm_shutdown_intercept_test(void)3440c64f24fdSMaxim Levitsky static void svm_shutdown_intercept_test(void)
3441c64f24fdSMaxim Levitsky {
3442c64f24fdSMaxim Levitsky 	test_set_guest(shutdown_intercept_test_guest);
3443c64f24fdSMaxim Levitsky 	vmcb->save.idtr.base = (u64)alloc_vpage();
3444c64f24fdSMaxim Levitsky 	vmcb->control.intercept |= (1ULL << INTERCEPT_SHUTDOWN);
3445c64f24fdSMaxim Levitsky 	svm_vmrun();
3446c64f24fdSMaxim Levitsky 	report(vmcb->control.exit_code == SVM_EXIT_SHUTDOWN, "shutdown test passed");
3447c64f24fdSMaxim Levitsky }
3448c64f24fdSMaxim Levitsky 
34493f27d772SManali Shukla struct svm_test svm_tests[] = {
3450ad879127SKrish Sadhukhan 	{ "null", default_supported, default_prepare,
3451ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, null_test,
3452ad879127SKrish Sadhukhan 	  default_finished, null_check },
3453ad879127SKrish Sadhukhan 	{ "vmrun", default_supported, default_prepare,
3454ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, test_vmrun,
3455ad879127SKrish Sadhukhan 	  default_finished, check_vmrun },
3456ad879127SKrish Sadhukhan 	{ "ioio", default_supported, prepare_ioio,
3457ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, test_ioio,
3458ad879127SKrish Sadhukhan 	  ioio_finished, check_ioio },
3459ad879127SKrish Sadhukhan 	{ "vmrun intercept check", default_supported, prepare_no_vmrun_int,
3460ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, null_test, default_finished,
3461ad879127SKrish Sadhukhan 	  check_no_vmrun_int },
3462401299a5SPaolo Bonzini 	{ "rsm", default_supported,
3463401299a5SPaolo Bonzini 	  prepare_rsm_intercept, default_prepare_gif_clear,
3464401299a5SPaolo Bonzini 	  test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept },
3465ad879127SKrish Sadhukhan 	{ "cr3 read intercept", default_supported,
3466ad879127SKrish Sadhukhan 	  prepare_cr3_intercept, default_prepare_gif_clear,
3467ad879127SKrish Sadhukhan 	  test_cr3_intercept, default_finished, check_cr3_intercept },
3468ad879127SKrish Sadhukhan 	{ "cr3 read nointercept", default_supported, default_prepare,
3469ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, test_cr3_intercept, default_finished,
3470ad879127SKrish Sadhukhan 	  check_cr3_nointercept },
3471ad879127SKrish Sadhukhan 	{ "cr3 read intercept emulate", smp_supported,
3472ad879127SKrish Sadhukhan 	  prepare_cr3_intercept_bypass, default_prepare_gif_clear,
3473ad879127SKrish Sadhukhan 	  test_cr3_intercept_bypass, default_finished, check_cr3_intercept },
3474ad879127SKrish Sadhukhan 	{ "dr intercept check", default_supported, prepare_dr_intercept,
3475ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished,
3476ad879127SKrish Sadhukhan 	  check_dr_intercept },
3477ad879127SKrish Sadhukhan 	{ "next_rip", next_rip_supported, prepare_next_rip,
3478ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, test_next_rip,
3479ad879127SKrish Sadhukhan 	  default_finished, check_next_rip },
3480ad879127SKrish Sadhukhan 	{ "msr intercept check", default_supported, prepare_msr_intercept,
3481ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, test_msr_intercept,
3482ad879127SKrish Sadhukhan 	  msr_intercept_finished, check_msr_intercept },
3483ad879127SKrish Sadhukhan 	{ "mode_switch", default_supported, prepare_mode_switch,
3484ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, test_mode_switch,
3485ad879127SKrish Sadhukhan 	  mode_switch_finished, check_mode_switch },
3486ad879127SKrish Sadhukhan 	{ "asid_zero", default_supported, prepare_asid_zero,
3487ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, test_asid_zero,
3488ad879127SKrish Sadhukhan 	  default_finished, check_asid_zero },
3489ad879127SKrish Sadhukhan 	{ "sel_cr0_bug", default_supported, sel_cr0_bug_prepare,
3490ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, sel_cr0_bug_test,
3491ad879127SKrish Sadhukhan 	  sel_cr0_bug_finished, sel_cr0_bug_check },
349210a65fc4SNadav Amit 	{ "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare,
3493ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, tsc_adjust_test,
3494ad879127SKrish Sadhukhan 	  default_finished, tsc_adjust_check },
3495ad879127SKrish Sadhukhan 	{ "latency_run_exit", default_supported, latency_prepare,
3496ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, latency_test,
3497ad879127SKrish Sadhukhan 	  latency_finished, latency_check },
3498f7fa53dcSPaolo Bonzini 	{ "latency_run_exit_clean", default_supported, latency_prepare,
3499f7fa53dcSPaolo Bonzini 	  default_prepare_gif_clear, latency_test,
3500f7fa53dcSPaolo Bonzini 	  latency_finished_clean, latency_check },
3501ad879127SKrish Sadhukhan 	{ "latency_svm_insn", default_supported, lat_svm_insn_prepare,
3502ad879127SKrish Sadhukhan 	  default_prepare_gif_clear, null_test,
3503ad879127SKrish Sadhukhan 	  lat_svm_insn_finished, lat_svm_insn_check },
35044b4fb247SPaolo Bonzini 	{ "exc_inject", default_supported, exc_inject_prepare,
35054b4fb247SPaolo Bonzini 	  default_prepare_gif_clear, exc_inject_test,
35064b4fb247SPaolo Bonzini 	  exc_inject_finished, exc_inject_check },
3507ad879127SKrish Sadhukhan 	{ "pending_event", default_supported, pending_event_prepare,
3508ad879127SKrish Sadhukhan 	  default_prepare_gif_clear,
3509ad879127SKrish Sadhukhan 	  pending_event_test, pending_event_finished, pending_event_check },
351085dc2aceSPaolo Bonzini 	{ "pending_event_cli", default_supported, pending_event_cli_prepare,
351185dc2aceSPaolo Bonzini 	  pending_event_cli_prepare_gif_clear,
351285dc2aceSPaolo Bonzini 	  pending_event_cli_test, pending_event_cli_finished,
351385dc2aceSPaolo Bonzini 	  pending_event_cli_check },
351485dc2aceSPaolo Bonzini 	{ "interrupt", default_supported, interrupt_prepare,
351585dc2aceSPaolo Bonzini 	  default_prepare_gif_clear, interrupt_test,
351685dc2aceSPaolo Bonzini 	  interrupt_finished, interrupt_check },
3517d4db486bSCathy Avery 	{ "nmi", default_supported, nmi_prepare,
3518d4db486bSCathy Avery 	  default_prepare_gif_clear, nmi_test,
3519d4db486bSCathy Avery 	  nmi_finished, nmi_check },
35209da1f4d8SCathy Avery 	{ "nmi_hlt", smp_supported, nmi_prepare,
35219da1f4d8SCathy Avery 	  default_prepare_gif_clear, nmi_hlt_test,
35229da1f4d8SCathy Avery 	  nmi_hlt_finished, nmi_hlt_check },
352308200397SSantosh Shukla         { "vnmi", vnmi_supported, vnmi_prepare,
352408200397SSantosh Shukla           default_prepare_gif_clear, vnmi_test,
352508200397SSantosh Shukla           vnmi_finished, vnmi_check },
35269c838954SCathy Avery 	{ "virq_inject", default_supported, virq_inject_prepare,
35279c838954SCathy Avery 	  default_prepare_gif_clear, virq_inject_test,
35289c838954SCathy Avery 	  virq_inject_finished, virq_inject_check },
35294b3c6114SPaolo Bonzini 	{ "virq_inject_within_shadow", default_supported, virq_inject_within_shadow_prepare,
35304b3c6114SPaolo Bonzini 	  virq_inject_within_shadow_prepare_gif_clear, virq_inject_within_shadow_test,
35314b3c6114SPaolo Bonzini 	  virq_inject_within_shadow_finished, virq_inject_within_shadow_check },
3532da338a31SMaxim Levitsky 	{ "reg_corruption", default_supported, reg_corruption_prepare,
3533da338a31SMaxim Levitsky 	  default_prepare_gif_clear, reg_corruption_test,
3534da338a31SMaxim Levitsky 	  reg_corruption_finished, reg_corruption_check },
35354770e9c8SCathy Avery 	{ "svm_init_startup_test", smp_supported, init_startup_prepare,
35364770e9c8SCathy Avery 	  default_prepare_gif_clear, null_test,
35374770e9c8SCathy Avery 	  init_startup_finished, init_startup_check },
3538d5da6dfeSCathy Avery 	{ "svm_init_intercept_test", smp_supported, init_intercept_prepare,
3539d5da6dfeSCathy Avery 	  default_prepare_gif_clear, init_intercept_test,
3540d5da6dfeSCathy Avery 	  init_intercept_finished, init_intercept_check, .on_vcpu = 2 },
35417839b0ecSKrish Sadhukhan 	{ "host_rflags", default_supported, host_rflags_prepare,
35427839b0ecSKrish Sadhukhan 	  host_rflags_prepare_gif_clear, host_rflags_test,
35437839b0ecSKrish Sadhukhan 	  host_rflags_finished, host_rflags_check },
3544f6972bd6SLara Lazier 	{ "vgif", vgif_supported, prepare_vgif_enabled,
3545f6972bd6SLara Lazier 	  default_prepare_gif_clear, test_vgif, vgif_finished,
3546f6972bd6SLara Lazier 	  vgif_check },
3547f32183f5SJim Mattson 	TEST(svm_cr4_osxsave_test),
3548ba29942cSKrish Sadhukhan 	TEST(svm_guest_state_test),
35497a57ef5dSMaxim Levitsky 	TEST(svm_vmrun_errata_test),
35500b6f6cedSKrish Sadhukhan 	TEST(svm_vmload_vmsave),
3551665f5677SKrish Sadhukhan 	TEST(svm_test_singlestep),
3552694e59baSManali Shukla 	TEST(svm_no_nm_test),
35538177dc62SManali Shukla 	TEST(svm_exception_test),
3554537d39dfSMaxim Levitsky 	TEST(svm_lbrv_test0),
3555537d39dfSMaxim Levitsky 	TEST(svm_lbrv_test1),
3556537d39dfSMaxim Levitsky 	TEST(svm_lbrv_test2),
3557537d39dfSMaxim Levitsky 	TEST(svm_lbrv_nested_test1),
3558537d39dfSMaxim Levitsky 	TEST(svm_lbrv_nested_test2),
3559c45bccfcSMaxim Levitsky 	TEST(svm_intr_intercept_mix_if),
3560c45bccfcSMaxim Levitsky 	TEST(svm_intr_intercept_mix_gif),
3561c45bccfcSMaxim Levitsky 	TEST(svm_intr_intercept_mix_gif2),
3562c45bccfcSMaxim Levitsky 	TEST(svm_intr_intercept_mix_nmi),
3563c45bccfcSMaxim Levitsky 	TEST(svm_intr_intercept_mix_smi),
3564a8503d50SMaxim Levitsky 	TEST(svm_tsc_scale_test),
35658650dffeSMaxim Levitsky 	TEST(pause_filter_test),
3566c64f24fdSMaxim Levitsky 	TEST(svm_shutdown_intercept_test),
3567ad879127SKrish Sadhukhan 	{ NULL, NULL, NULL, NULL, NULL, NULL, NULL }
3568ad879127SKrish Sadhukhan };
3569712840d5SManali Shukla 
main(int ac,char ** av)3570712840d5SManali Shukla int main(int ac, char **av)
3571712840d5SManali Shukla {
3572ade7601dSSean Christopherson 	setup_vm();
3573712840d5SManali Shukla 	return run_svm_tests(ac, av, svm_tests);
3574712840d5SManali Shukla }
3575