1cfe95239SSean Christopherson #include "libcflat.h"
2*f6257e24SMaxim Levitsky #include "apic.h"
3cfe95239SSean Christopherson #include "processor.h"
4*f6257e24SMaxim Levitsky #include "msr.h"
5*f6257e24SMaxim Levitsky #include "x86/vm.h"
6*f6257e24SMaxim Levitsky #include "asm/setup.h"
7*f6257e24SMaxim Levitsky
8*f6257e24SMaxim Levitsky #ifdef __x86_64__
9*f6257e24SMaxim Levitsky enum TEST_REGISTER {
10*f6257e24SMaxim Levitsky TEST_REGISTER_GDTR_BASE,
11*f6257e24SMaxim Levitsky TEST_REGISTER_IDTR_BASE,
12*f6257e24SMaxim Levitsky TEST_REGISTER_TR_BASE,
13*f6257e24SMaxim Levitsky TEST_REGISTER_LDT_BASE,
14*f6257e24SMaxim Levitsky TEST_REGISTER_MSR /* upper 32 bits = msr address */
15*f6257e24SMaxim Levitsky };
16*f6257e24SMaxim Levitsky
get_test_register_value(u64 test_register)17*f6257e24SMaxim Levitsky static u64 get_test_register_value(u64 test_register)
18*f6257e24SMaxim Levitsky {
19*f6257e24SMaxim Levitsky struct descriptor_table_ptr dt_ptr;
20*f6257e24SMaxim Levitsky u32 msr = test_register >> 32;
21*f6257e24SMaxim Levitsky
22*f6257e24SMaxim Levitsky /*
23*f6257e24SMaxim Levitsky * Note: value for LDT and TSS base might not reflect the actual base
24*f6257e24SMaxim Levitsky * that the CPU currently uses, because the (hidden) base value can't be
25*f6257e24SMaxim Levitsky * directly read.
26*f6257e24SMaxim Levitsky */
27*f6257e24SMaxim Levitsky switch ((u32)test_register) {
28*f6257e24SMaxim Levitsky case TEST_REGISTER_GDTR_BASE:
29*f6257e24SMaxim Levitsky sgdt(&dt_ptr);
30*f6257e24SMaxim Levitsky return dt_ptr.base;
31*f6257e24SMaxim Levitsky case TEST_REGISTER_IDTR_BASE:
32*f6257e24SMaxim Levitsky sidt(&dt_ptr);
33*f6257e24SMaxim Levitsky return dt_ptr.base;
34*f6257e24SMaxim Levitsky case TEST_REGISTER_TR_BASE:
35*f6257e24SMaxim Levitsky return get_gdt_entry_base(get_tss_descr());
36*f6257e24SMaxim Levitsky case TEST_REGISTER_LDT_BASE:
37*f6257e24SMaxim Levitsky return get_gdt_entry_base(get_ldt_descr());
38*f6257e24SMaxim Levitsky case TEST_REGISTER_MSR:
39*f6257e24SMaxim Levitsky return rdmsr(msr);
40*f6257e24SMaxim Levitsky default:
41*f6257e24SMaxim Levitsky assert(0);
42*f6257e24SMaxim Levitsky return 0;
43*f6257e24SMaxim Levitsky }
44*f6257e24SMaxim Levitsky }
45*f6257e24SMaxim Levitsky
46*f6257e24SMaxim Levitsky enum SET_REGISTER_MODE {
47*f6257e24SMaxim Levitsky SET_REGISTER_MODE_UNSAFE,
48*f6257e24SMaxim Levitsky SET_REGISTER_MODE_SAFE,
49*f6257e24SMaxim Levitsky SET_REGISTER_MODE_FEP,
50*f6257e24SMaxim Levitsky };
51*f6257e24SMaxim Levitsky
set_test_register_value(u64 test_register,int test_mode,u64 value)52*f6257e24SMaxim Levitsky static bool set_test_register_value(u64 test_register, int test_mode, u64 value)
53*f6257e24SMaxim Levitsky {
54*f6257e24SMaxim Levitsky struct descriptor_table_ptr dt_ptr;
55*f6257e24SMaxim Levitsky u32 msr = test_register >> 32;
56*f6257e24SMaxim Levitsky u16 sel;
57*f6257e24SMaxim Levitsky
58*f6257e24SMaxim Levitsky switch ((u32)test_register) {
59*f6257e24SMaxim Levitsky case TEST_REGISTER_GDTR_BASE:
60*f6257e24SMaxim Levitsky sgdt(&dt_ptr);
61*f6257e24SMaxim Levitsky dt_ptr.base = value;
62*f6257e24SMaxim Levitsky
63*f6257e24SMaxim Levitsky switch (test_mode) {
64*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_UNSAFE:
65*f6257e24SMaxim Levitsky lgdt(&dt_ptr);
66*f6257e24SMaxim Levitsky return true;
67*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_SAFE:
68*f6257e24SMaxim Levitsky return lgdt_safe(&dt_ptr) == 0;
69*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_FEP:
70*f6257e24SMaxim Levitsky return lgdt_fep_safe(&dt_ptr) == 0;
71*f6257e24SMaxim Levitsky }
72*f6257e24SMaxim Levitsky case TEST_REGISTER_IDTR_BASE:
73*f6257e24SMaxim Levitsky sidt(&dt_ptr);
74*f6257e24SMaxim Levitsky dt_ptr.base = value;
75*f6257e24SMaxim Levitsky
76*f6257e24SMaxim Levitsky switch (test_mode) {
77*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_UNSAFE:
78*f6257e24SMaxim Levitsky lidt(&dt_ptr);
79*f6257e24SMaxim Levitsky return true;
80*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_SAFE:
81*f6257e24SMaxim Levitsky return lidt_safe(&dt_ptr) == 0;
82*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_FEP:
83*f6257e24SMaxim Levitsky return lidt_fep_safe(&dt_ptr) == 0;
84*f6257e24SMaxim Levitsky }
85*f6257e24SMaxim Levitsky case TEST_REGISTER_TR_BASE:
86*f6257e24SMaxim Levitsky sel = str();
87*f6257e24SMaxim Levitsky set_gdt_entry_base(sel, value);
88*f6257e24SMaxim Levitsky clear_tss_busy(sel);
89*f6257e24SMaxim Levitsky
90*f6257e24SMaxim Levitsky switch (test_mode) {
91*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_UNSAFE:
92*f6257e24SMaxim Levitsky ltr(sel);
93*f6257e24SMaxim Levitsky return true;
94*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_SAFE:
95*f6257e24SMaxim Levitsky return ltr_safe(sel) == 0;
96*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_FEP:
97*f6257e24SMaxim Levitsky return ltr_fep_safe(sel) == 0;
98*f6257e24SMaxim Levitsky }
99*f6257e24SMaxim Levitsky
100*f6257e24SMaxim Levitsky case TEST_REGISTER_LDT_BASE:
101*f6257e24SMaxim Levitsky sel = sldt();
102*f6257e24SMaxim Levitsky set_gdt_entry_base(sel, value);
103*f6257e24SMaxim Levitsky
104*f6257e24SMaxim Levitsky switch (test_mode) {
105*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_UNSAFE:
106*f6257e24SMaxim Levitsky lldt(sel);
107*f6257e24SMaxim Levitsky return true;
108*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_SAFE:
109*f6257e24SMaxim Levitsky return lldt_safe(sel) == 0;
110*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_FEP:
111*f6257e24SMaxim Levitsky return lldt_fep_safe(sel) == 0;
112*f6257e24SMaxim Levitsky }
113*f6257e24SMaxim Levitsky case TEST_REGISTER_MSR:
114*f6257e24SMaxim Levitsky switch (test_mode) {
115*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_UNSAFE:
116*f6257e24SMaxim Levitsky wrmsr(msr, value);
117*f6257e24SMaxim Levitsky return true;
118*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_SAFE:
119*f6257e24SMaxim Levitsky return wrmsr_safe(msr, value) == 0;
120*f6257e24SMaxim Levitsky case SET_REGISTER_MODE_FEP:
121*f6257e24SMaxim Levitsky return wrmsr_fep_safe(msr, value) == 0;
122*f6257e24SMaxim Levitsky }
123*f6257e24SMaxim Levitsky default:
124*f6257e24SMaxim Levitsky assert(false);
125*f6257e24SMaxim Levitsky return 0;
126*f6257e24SMaxim Levitsky }
127*f6257e24SMaxim Levitsky }
128*f6257e24SMaxim Levitsky
test_register_write(const char * register_name,u64 test_register,bool force_emulation,u64 test_value,bool expect_success)129*f6257e24SMaxim Levitsky static void test_register_write(const char *register_name, u64 test_register,
130*f6257e24SMaxim Levitsky bool force_emulation, u64 test_value,
131*f6257e24SMaxim Levitsky bool expect_success)
132*f6257e24SMaxim Levitsky {
133*f6257e24SMaxim Levitsky int test_mode = (force_emulation ? SET_REGISTER_MODE_FEP : SET_REGISTER_MODE_SAFE);
134*f6257e24SMaxim Levitsky u64 old_value, expected_value;
135*f6257e24SMaxim Levitsky bool success;
136*f6257e24SMaxim Levitsky
137*f6257e24SMaxim Levitsky old_value = get_test_register_value(test_register);
138*f6257e24SMaxim Levitsky expected_value = expect_success ? test_value : old_value;
139*f6257e24SMaxim Levitsky
140*f6257e24SMaxim Levitsky /*
141*f6257e24SMaxim Levitsky * TODO: A successful write to the MSR_GS_BASE corrupts it, and that
142*f6257e24SMaxim Levitsky * breaks the wrmsr_safe macro (it uses GS for per-CPU data).
143*f6257e24SMaxim Levitsky */
144*f6257e24SMaxim Levitsky if ((test_register >> 32) == MSR_GS_BASE && expect_success)
145*f6257e24SMaxim Levitsky test_mode = SET_REGISTER_MODE_UNSAFE;
146*f6257e24SMaxim Levitsky
147*f6257e24SMaxim Levitsky /* Write the test value*/
148*f6257e24SMaxim Levitsky success = set_test_register_value(test_register, test_mode, test_value);
149*f6257e24SMaxim Levitsky report(success == expect_success,
150*f6257e24SMaxim Levitsky "Write to %s with value %lx did %s%s as expected",
151*f6257e24SMaxim Levitsky register_name, test_value,
152*f6257e24SMaxim Levitsky success == expect_success ? "" : "NOT ",
153*f6257e24SMaxim Levitsky (expect_success ? "succeed" : "fail"));
154*f6257e24SMaxim Levitsky
155*f6257e24SMaxim Levitsky /*
156*f6257e24SMaxim Levitsky * Check that the value was really written. Don't test TR and LDTR,
157*f6257e24SMaxim Levitsky * because it's not possible to read them directly.
158*f6257e24SMaxim Levitsky */
159*f6257e24SMaxim Levitsky if (success == expect_success &&
160*f6257e24SMaxim Levitsky test_register != TEST_REGISTER_TR_BASE &&
161*f6257e24SMaxim Levitsky test_register != TEST_REGISTER_LDT_BASE) {
162*f6257e24SMaxim Levitsky u64 new_value = get_test_register_value(test_register);
163*f6257e24SMaxim Levitsky
164*f6257e24SMaxim Levitsky report(new_value == expected_value,
165*f6257e24SMaxim Levitsky "%s set to %lx as expected (actual value %lx)",
166*f6257e24SMaxim Levitsky register_name, expected_value, new_value);
167*f6257e24SMaxim Levitsky }
168*f6257e24SMaxim Levitsky
169*f6257e24SMaxim Levitsky
170*f6257e24SMaxim Levitsky /*
171*f6257e24SMaxim Levitsky * Restore the old value directly without safety wrapper, to avoid test
172*f6257e24SMaxim Levitsky * crashes related to temporary clobbered GDT/IDT/etc bases.
173*f6257e24SMaxim Levitsky */
174*f6257e24SMaxim Levitsky set_test_register_value(test_register, SET_REGISTER_MODE_UNSAFE, old_value);
175*f6257e24SMaxim Levitsky }
176*f6257e24SMaxim Levitsky
test_register(const char * register_name,u64 test_register,bool force_emulation)177*f6257e24SMaxim Levitsky static void test_register(const char *register_name, u64 test_register,
178*f6257e24SMaxim Levitsky bool force_emulation)
179*f6257e24SMaxim Levitsky {
180*f6257e24SMaxim Levitsky /* Canonical 48 bit value should always succeed */
181*f6257e24SMaxim Levitsky test_register_write(register_name, test_register, force_emulation,
182*f6257e24SMaxim Levitsky CANONICAL_48_VAL, true);
183*f6257e24SMaxim Levitsky
184*f6257e24SMaxim Levitsky /* 57-canonical value will work on CPUs that *support* LA57 */
185*f6257e24SMaxim Levitsky test_register_write(register_name, test_register, force_emulation,
186*f6257e24SMaxim Levitsky CANONICAL_57_VAL, this_cpu_has(X86_FEATURE_LA57));
187*f6257e24SMaxim Levitsky
188*f6257e24SMaxim Levitsky /* Non 57 canonical value should never work */
189*f6257e24SMaxim Levitsky test_register_write(register_name, test_register, force_emulation,
190*f6257e24SMaxim Levitsky NONCANONICAL, false);
191*f6257e24SMaxim Levitsky }
192*f6257e24SMaxim Levitsky
193*f6257e24SMaxim Levitsky
194*f6257e24SMaxim Levitsky #define TEST_REGISTER(name, force_emulation) \
195*f6257e24SMaxim Levitsky test_register(#name, TEST_REGISTER_ ##name, force_emulation)
196*f6257e24SMaxim Levitsky
197*f6257e24SMaxim Levitsky #define __TEST_MSR(msr_name, address, force_emulation) \
198*f6257e24SMaxim Levitsky test_register(msr_name, ((u64)TEST_REGISTER_MSR | \
199*f6257e24SMaxim Levitsky ((u64)(address) << 32)), force_emulation)
200*f6257e24SMaxim Levitsky
201*f6257e24SMaxim Levitsky #define TEST_MSR(msr_name, force_emulation) \
202*f6257e24SMaxim Levitsky __TEST_MSR(#msr_name, msr_name, force_emulation)
203*f6257e24SMaxim Levitsky
__test_invpcid(u64 test_value,bool expect_success)204*f6257e24SMaxim Levitsky static void __test_invpcid(u64 test_value, bool expect_success)
205*f6257e24SMaxim Levitsky {
206*f6257e24SMaxim Levitsky struct invpcid_desc desc;
207*f6257e24SMaxim Levitsky
208*f6257e24SMaxim Levitsky memset(&desc, 0, sizeof(desc));
209*f6257e24SMaxim Levitsky bool success;
210*f6257e24SMaxim Levitsky
211*f6257e24SMaxim Levitsky desc.addr = test_value;
212*f6257e24SMaxim Levitsky desc.pcid = 10; /* Arbitrary number*/
213*f6257e24SMaxim Levitsky
214*f6257e24SMaxim Levitsky success = invpcid_safe(0, &desc) == 0;
215*f6257e24SMaxim Levitsky
216*f6257e24SMaxim Levitsky report(success == expect_success,
217*f6257e24SMaxim Levitsky "Tested invpcid type 0 with 0x%lx value - %s",
218*f6257e24SMaxim Levitsky test_value, success ? "success" : "failure");
219*f6257e24SMaxim Levitsky }
220*f6257e24SMaxim Levitsky
test_invpcid(void)221*f6257e24SMaxim Levitsky static void test_invpcid(void)
222*f6257e24SMaxim Levitsky {
223*f6257e24SMaxim Levitsky /*
224*f6257e24SMaxim Levitsky * Note that this test tests the kvm's behavior only when ept=0.
225*f6257e24SMaxim Levitsky * Otherwise invpcid is not intercepted.
226*f6257e24SMaxim Levitsky *
227*f6257e24SMaxim Levitsky * Also KVM's x86 emulator doesn't support invpcid, thus testing invpcid
228*f6257e24SMaxim Levitsky * with FEP is pointless.
229*f6257e24SMaxim Levitsky */
230*f6257e24SMaxim Levitsky assert(write_cr4_safe(read_cr4() | X86_CR4_PCIDE) == 0);
231*f6257e24SMaxim Levitsky
232*f6257e24SMaxim Levitsky __test_invpcid(CANONICAL_48_VAL, true);
233*f6257e24SMaxim Levitsky __test_invpcid(CANONICAL_57_VAL, this_cpu_has(X86_FEATURE_LA57));
234*f6257e24SMaxim Levitsky __test_invpcid(NONCANONICAL, false);
235*f6257e24SMaxim Levitsky }
236*f6257e24SMaxim Levitsky
__test_canonical_checks(bool force_emulation)237*f6257e24SMaxim Levitsky static void __test_canonical_checks(bool force_emulation)
238*f6257e24SMaxim Levitsky {
239*f6257e24SMaxim Levitsky printf("\nRunning canonical test %s forced emulation:\n",
240*f6257e24SMaxim Levitsky force_emulation ? "with" : "without");
241*f6257e24SMaxim Levitsky
242*f6257e24SMaxim Levitsky /* Direct DT addresses */
243*f6257e24SMaxim Levitsky TEST_REGISTER(GDTR_BASE, force_emulation);
244*f6257e24SMaxim Levitsky TEST_REGISTER(IDTR_BASE, force_emulation);
245*f6257e24SMaxim Levitsky
246*f6257e24SMaxim Levitsky /* Indirect DT addresses */
247*f6257e24SMaxim Levitsky TEST_REGISTER(TR_BASE, force_emulation);
248*f6257e24SMaxim Levitsky TEST_REGISTER(LDT_BASE, force_emulation);
249*f6257e24SMaxim Levitsky
250*f6257e24SMaxim Levitsky /* x86_64 extended segment bases */
251*f6257e24SMaxim Levitsky TEST_MSR(MSR_FS_BASE, force_emulation);
252*f6257e24SMaxim Levitsky TEST_MSR(MSR_GS_BASE, force_emulation);
253*f6257e24SMaxim Levitsky TEST_MSR(MSR_KERNEL_GS_BASE, force_emulation);
254*f6257e24SMaxim Levitsky
255*f6257e24SMaxim Levitsky /*
256*f6257e24SMaxim Levitsky * SYSENTER ESP/EIP MSRs have canonical checks only on Intel, because
257*f6257e24SMaxim Levitsky * only on Intel these instructions were extended to 64 bit.
258*f6257e24SMaxim Levitsky *
259*f6257e24SMaxim Levitsky * KVM emulation however ignores canonical checks for these MSRs, even
260*f6257e24SMaxim Levitsky * on Intel, to support cross-vendor migration. This includes nested
261*f6257e24SMaxim Levitsky * virtualization.
262*f6257e24SMaxim Levitsky *
263*f6257e24SMaxim Levitsky * Thus, the checks only work when run on bare metal, without forced
264*f6257e24SMaxim Levitsky * emulation. Unfortunately, there is no foolproof way to detect bare
265*f6257e24SMaxim Levitsky * metal from within this test. E.g. checking HYPERVISOR in CPUID is
266*f6257e24SMaxim Levitsky * useless because that only detects if _this_ code is running in a VM,
267*f6257e24SMaxim Levitsky * it doesn't detect if the "host" is itself a VM.
268*f6257e24SMaxim Levitsky *
269*f6257e24SMaxim Levitsky * TODO: Enable testing of SYSENTER MSRs on bare metal.
270*f6257e24SMaxim Levitsky */
271*f6257e24SMaxim Levitsky if (false && is_intel() && !force_emulation) {
272*f6257e24SMaxim Levitsky TEST_MSR(MSR_IA32_SYSENTER_ESP, force_emulation);
273*f6257e24SMaxim Levitsky TEST_MSR(MSR_IA32_SYSENTER_EIP, force_emulation);
274*f6257e24SMaxim Levitsky } else {
275*f6257e24SMaxim Levitsky report_skip("skipping MSR_IA32_SYSENTER_ESP/MSR_IA32_SYSENTER_EIP %s",
276*f6257e24SMaxim Levitsky (is_intel() ? "due to known errata in KVM" : "due to AMD host"));
277*f6257e24SMaxim Levitsky }
278*f6257e24SMaxim Levitsky
279*f6257e24SMaxim Levitsky /* SYSCALL target MSRs */
280*f6257e24SMaxim Levitsky TEST_MSR(MSR_CSTAR, force_emulation);
281*f6257e24SMaxim Levitsky TEST_MSR(MSR_LSTAR, force_emulation);
282*f6257e24SMaxim Levitsky
283*f6257e24SMaxim Levitsky /* PEBS DS area */
284*f6257e24SMaxim Levitsky if (this_cpu_has(X86_FEATURE_DS))
285*f6257e24SMaxim Levitsky TEST_MSR(MSR_IA32_DS_AREA, force_emulation);
286*f6257e24SMaxim Levitsky else
287*f6257e24SMaxim Levitsky report_skip("Skipping MSR_IA32_DS_AREA - PEBS not supported");
288*f6257e24SMaxim Levitsky
289*f6257e24SMaxim Levitsky /* PT filter ranges */
290*f6257e24SMaxim Levitsky if (this_cpu_has(X86_FEATURE_INTEL_PT)) {
291*f6257e24SMaxim Levitsky int n_ranges = cpuid_indexed(0x14, 0x1).a & 0x7;
292*f6257e24SMaxim Levitsky int i;
293*f6257e24SMaxim Levitsky
294*f6257e24SMaxim Levitsky for (i = 0 ; i < n_ranges ; i++) {
295*f6257e24SMaxim Levitsky wrmsr(MSR_IA32_RTIT_CTL, (1ull << (RTIT_CTL_ADDR0_OFFSET+i*4)));
296*f6257e24SMaxim Levitsky __TEST_MSR("MSR_IA32_RTIT_ADDR_A",
297*f6257e24SMaxim Levitsky MSR_IA32_RTIT_ADDR0_A + i*2, force_emulation);
298*f6257e24SMaxim Levitsky __TEST_MSR("MSR_IA32_RTIT_ADDR_B",
299*f6257e24SMaxim Levitsky MSR_IA32_RTIT_ADDR0_B + i*2, force_emulation);
300*f6257e24SMaxim Levitsky }
301*f6257e24SMaxim Levitsky } else {
302*f6257e24SMaxim Levitsky report_skip("Skipping MSR_IA32_RTIT_ADDR* - Intel PT is not supported");
303*f6257e24SMaxim Levitsky }
304*f6257e24SMaxim Levitsky
305*f6257e24SMaxim Levitsky /* Test that INVPCID type 0 #GPs correctly */
306*f6257e24SMaxim Levitsky if (this_cpu_has(X86_FEATURE_INVPCID))
307*f6257e24SMaxim Levitsky test_invpcid();
308*f6257e24SMaxim Levitsky else
309*f6257e24SMaxim Levitsky report_skip("Skipping INVPCID - not supported");
310*f6257e24SMaxim Levitsky }
311*f6257e24SMaxim Levitsky
test_canonical_checks(void)312*f6257e24SMaxim Levitsky static void test_canonical_checks(void)
313*f6257e24SMaxim Levitsky {
314*f6257e24SMaxim Levitsky __test_canonical_checks(false);
315*f6257e24SMaxim Levitsky
316*f6257e24SMaxim Levitsky if (is_fep_available())
317*f6257e24SMaxim Levitsky __test_canonical_checks(true);
318*f6257e24SMaxim Levitsky else
319*f6257e24SMaxim Levitsky report_skip("Force emulation prefix not enabled");
320*f6257e24SMaxim Levitsky }
321*f6257e24SMaxim Levitsky #endif
322cfe95239SSean Christopherson
main(int ac,char ** av)323cfe95239SSean Christopherson int main(int ac, char **av)
324cfe95239SSean Christopherson {
3254143fbfdSSean Christopherson int vector = write_cr4_safe(read_cr4() | X86_CR4_LA57);
32681dcf3f7SSean Christopherson bool is_64bit = rdmsr(MSR_EFER) & EFER_LMA;
32781dcf3f7SSean Christopherson int expected = !is_64bit && this_cpu_has(X86_FEATURE_LA57) ? 0 : GP_VECTOR;
328cfe95239SSean Christopherson
32981dcf3f7SSean Christopherson report(vector == expected, "%s when CR4.LA57 %ssupported (in %u-bit mode)",
33081dcf3f7SSean Christopherson expected ? "#GP" : "No fault",
33181dcf3f7SSean Christopherson this_cpu_has(X86_FEATURE_LA57) ? "un" : "", is_64bit ? 64 : 32);
33281dcf3f7SSean Christopherson
333*f6257e24SMaxim Levitsky #ifdef __x86_64__
334*f6257e24SMaxim Levitsky /* set dummy LDTR pointer */
335*f6257e24SMaxim Levitsky set_gdt_entry(FIRST_SPARE_SEL, 0xffaabb, 0xffff, 0x82, 0);
336*f6257e24SMaxim Levitsky lldt(FIRST_SPARE_SEL);
337*f6257e24SMaxim Levitsky
338*f6257e24SMaxim Levitsky test_canonical_checks();
339*f6257e24SMaxim Levitsky
340*f6257e24SMaxim Levitsky if (is_64bit && this_cpu_has(X86_FEATURE_LA57)) {
341*f6257e24SMaxim Levitsky printf("Switching to 5 level paging mode and rerunning canonical tests.\n");
342*f6257e24SMaxim Levitsky setup_5level_page_table();
343*f6257e24SMaxim Levitsky }
344*f6257e24SMaxim Levitsky #endif
345*f6257e24SMaxim Levitsky
346cfe95239SSean Christopherson return report_summary();
347cfe95239SSean Christopherson }
348