1 /* msr tests */
2
3 #include "libcflat.h"
4 #include "apic.h"
5 #include "processor.h"
6 #include "msr.h"
7 #include <stdlib.h>
8
9 /*
10 * This test allows two modes:
11 * 1. Default: the `msr_info' array contains the default test configurations
12 * 2. Custom: by providing command line arguments it is possible to test any MSR and value
13 * Parameters order:
14 * 1. msr index as a base 16 number
15 * 2. value as a base 16 number
16 */
17
18 struct msr_info {
19 int index;
20 bool is_64bit_only;
21 const char *name;
22 unsigned long long value;
23 unsigned long long keep;
24 };
25
26
27 #define addr_64 0x0000123456789abcULL
28 #define addr_ul (unsigned long)addr_64
29
30 #define MSR_TEST(msr, val, ro) \
31 { .index = msr, .name = #msr, .value = val, .is_64bit_only = false, .keep = ro }
32 #define MSR_TEST_ONLY64(msr, val, ro) \
33 { .index = msr, .name = #msr, .value = val, .is_64bit_only = true, .keep = ro }
34
35 struct msr_info msr_info[] =
36 {
37 MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0),
38 MSR_TEST(MSR_IA32_SYSENTER_ESP, addr_ul, 0),
39 MSR_TEST(MSR_IA32_SYSENTER_EIP, addr_ul, 0),
40 // reserved: 1:2, 4:6, 8:10, 13:15, 17, 19:21, 24:33, 35:63
41 // read-only: 7, 11, 12
42 MSR_TEST(MSR_IA32_MISC_ENABLE, 0x400c50809, 0x1880),
43 MSR_TEST(MSR_IA32_CR_PAT, 0x07070707, 0),
44 MSR_TEST_ONLY64(MSR_FS_BASE, addr_64, 0),
45 MSR_TEST_ONLY64(MSR_GS_BASE, addr_64, 0),
46 MSR_TEST_ONLY64(MSR_KERNEL_GS_BASE, addr_64, 0),
47 MSR_TEST(MSR_EFER, EFER_SCE, 0),
48 MSR_TEST_ONLY64(MSR_LSTAR, addr_64, 0),
49 MSR_TEST_ONLY64(MSR_CSTAR, addr_64, 0),
50 MSR_TEST_ONLY64(MSR_SYSCALL_MASK, 0xffffffff, 0),
51 // MSR_IA32_DEBUGCTLMSR needs svm feature LBRV
52 // MSR_VM_HSAVE_PA only AMD host
53 };
54
__test_msr_rw(u32 msr,const char * name,unsigned long long val,unsigned long long keep_mask)55 static void __test_msr_rw(u32 msr, const char *name, unsigned long long val,
56 unsigned long long keep_mask)
57 {
58 unsigned long long r, orig;
59
60 orig = rdmsr(msr);
61 /*
62 * Special case EFER since clearing LME/LMA is not allowed in 64-bit mode,
63 * and conversely setting those bits on 32-bit CPUs is not allowed. Treat
64 * the desired value as extra bits to set.
65 */
66 if (msr == MSR_EFER)
67 val |= orig;
68 else
69 val = (val & ~keep_mask) | (orig & keep_mask);
70
71 wrmsr(msr, val);
72 r = rdmsr(msr);
73 wrmsr(msr, orig);
74
75 if (r != val) {
76 printf("testing %s: output = %#" PRIx32 ":%#" PRIx32
77 " expected = %#" PRIx32 ":%#" PRIx32 "\n", name,
78 (u32)(r >> 32), (u32)r, (u32)(val >> 32), (u32)val);
79 }
80 report(val == r, "%s", name);
81 }
82
test_msr_rw(u32 msr,const char * name,unsigned long long val)83 static void test_msr_rw(u32 msr, const char *name, unsigned long long val)
84 {
85 __test_msr_rw(msr, name, val, 0);
86 }
87
test_wrmsr(u32 msr,const char * name,unsigned long long val)88 static void test_wrmsr(u32 msr, const char *name, unsigned long long val)
89 {
90 unsigned char vector = wrmsr_safe(msr, val);
91
92 report(!vector,
93 "Expected success on WRMSR(%s, 0x%llx), got vector %d",
94 name, val, vector);
95 }
96
test_wrmsr_fault(u32 msr,const char * name,unsigned long long val)97 static void test_wrmsr_fault(u32 msr, const char *name, unsigned long long val)
98 {
99 unsigned char vector = wrmsr_safe(msr, val);
100
101 report(vector == GP_VECTOR,
102 "Expected #GP on WRMSR(%s, 0x%llx), got vector %d",
103 name, val, vector);
104 }
105
test_rdmsr_fault(u32 msr,const char * name)106 static void test_rdmsr_fault(u32 msr, const char *name)
107 {
108 uint64_t ignored;
109 unsigned char vector = rdmsr_safe(msr, &ignored);
110
111 report(vector == GP_VECTOR,
112 "Expected #GP on RDMSR(%s), got vector %d", name, vector);
113 }
114
test_wrmsr_fep_fault(u32 msr,const char * name,unsigned long long val)115 static void test_wrmsr_fep_fault(u32 msr, const char *name,
116 unsigned long long val)
117 {
118 unsigned char vector = wrmsr_fep_safe(msr, val);
119
120 report(vector == GP_VECTOR,
121 "Expected #GP on emulated WRSMR(%s, 0x%llx), got vector %d",
122 name, val, vector);
123 }
124
test_msr(struct msr_info * msr,bool is_64bit_host)125 static void test_msr(struct msr_info *msr, bool is_64bit_host)
126 {
127 if (is_64bit_host || !msr->is_64bit_only) {
128 __test_msr_rw(msr->index, msr->name, msr->value, msr->keep);
129
130 /*
131 * The 64-bit only MSRs that take an address always perform
132 * canonical checks on both Intel and AMD.
133 */
134 if (msr->is_64bit_only &&
135 msr->value == addr_64)
136 test_wrmsr_fault(msr->index, msr->name, NONCANONICAL);
137 } else {
138 test_wrmsr_fault(msr->index, msr->name, msr->value);
139 test_rdmsr_fault(msr->index, msr->name);
140 }
141 }
142
test_custom_msr(int ac,char ** av)143 static void test_custom_msr(int ac, char **av)
144 {
145 bool is_64bit_host = this_cpu_has(X86_FEATURE_LM);
146 char msr_name[32];
147 int index = strtoul(av[1], NULL, 0x10);
148 snprintf(msr_name, sizeof(msr_name), "MSR:0x%x", index);
149
150 struct msr_info msr = {
151 .index = index,
152 .name = msr_name,
153 .value = strtoull(av[2], NULL, 0x10)
154 };
155 test_msr(&msr, is_64bit_host);
156 }
157
test_misc_msrs(void)158 static void test_misc_msrs(void)
159 {
160 bool is_64bit_host = this_cpu_has(X86_FEATURE_LM);
161 int i;
162
163 for (i = 0 ; i < ARRAY_SIZE(msr_info); i++)
164 test_msr(&msr_info[i], is_64bit_host);
165 }
166
test_mce_msrs(void)167 static void test_mce_msrs(void)
168 {
169 bool is_64bit_host = this_cpu_has(X86_FEATURE_LM);
170 unsigned int nr_mce_banks;
171 char msr_name[32];
172 int i;
173
174 nr_mce_banks = rdmsr(MSR_IA32_MCG_CAP) & 0xff;
175 for (i = 0; i < nr_mce_banks; i++) {
176 snprintf(msr_name, sizeof(msr_name), "MSR_IA32_MC%u_CTL", i);
177 test_msr_rw(MSR_IA32_MCx_CTL(i), msr_name, 0);
178 test_msr_rw(MSR_IA32_MCx_CTL(i), msr_name, -1ull);
179 test_wrmsr_fault(MSR_IA32_MCx_CTL(i), msr_name, NONCANONICAL);
180
181 snprintf(msr_name, sizeof(msr_name), "MSR_IA32_MC%u_STATUS", i);
182 test_msr_rw(MSR_IA32_MCx_STATUS(i), msr_name, 0);
183 /*
184 * STATUS MSRs can only be written with '0' (to clear the MSR),
185 * except on AMD-based systems with bit 18 set in MSR_K7_HWCR.
186 * That bit is not architectural and should not be set by
187 * default by KVM or by the VMM (though this might fail if run
188 * on bare metal).
189 */
190 test_wrmsr_fault(MSR_IA32_MCx_STATUS(i), msr_name, 1);
191
192 snprintf(msr_name, sizeof(msr_name), "MSR_IA32_MC%u_ADDR", i);
193 test_msr_rw(MSR_IA32_MCx_ADDR(i), msr_name, 0);
194 test_msr_rw(MSR_IA32_MCx_ADDR(i), msr_name, -1ull);
195 /*
196 * The ADDR is a physical address, and all bits are writable on
197 * 64-bit hosts. Don't test the negative case, as KVM doesn't
198 * enforce checks on bits 63:36 for 32-bit hosts. The behavior
199 * depends on the underlying hardware, e.g. a 32-bit guest on a
200 * 64-bit host may observe 64-bit values in the ADDR MSRs.
201 */
202 if (is_64bit_host)
203 test_msr_rw(MSR_IA32_MCx_ADDR(i), msr_name, NONCANONICAL);
204
205 snprintf(msr_name, sizeof(msr_name), "MSR_IA32_MC%u_MISC", i);
206 test_msr_rw(MSR_IA32_MCx_MISC(i), msr_name, 0);
207 test_msr_rw(MSR_IA32_MCx_MISC(i), msr_name, -1ull);
208 test_msr_rw(MSR_IA32_MCx_MISC(i), msr_name, NONCANONICAL);
209 }
210
211 /*
212 * The theoretical maximum number of MCE banks is 32 (on Intel CPUs,
213 * without jumping to a new base address), as the last unclaimed MSR is
214 * 0x479; 0x480 begins the VMX MSRs. Verify accesses to theoretically
215 * legal, unsupported MSRs fault.
216 */
217 for (i = nr_mce_banks; i < 32; i++) {
218 snprintf(msr_name, sizeof(msr_name), "MSR_IA32_MC%u_CTL", i);
219 test_rdmsr_fault(MSR_IA32_MCx_CTL(i), msr_name);
220 test_wrmsr_fault(MSR_IA32_MCx_CTL(i), msr_name, 0);
221
222 snprintf(msr_name, sizeof(msr_name), "MSR_IA32_MC%u_STATUS", i);
223 test_rdmsr_fault(MSR_IA32_MCx_STATUS(i), msr_name);
224 test_wrmsr_fault(MSR_IA32_MCx_STATUS(i), msr_name, 0);
225
226 snprintf(msr_name, sizeof(msr_name), "MSR_IA32_MC%u_ADDR", i);
227 test_rdmsr_fault(MSR_IA32_MCx_ADDR(i), msr_name);
228 test_wrmsr_fault(MSR_IA32_MCx_ADDR(i), msr_name, 0);
229
230 snprintf(msr_name, sizeof(msr_name), "MSR_IA32_MC%u_MISC", i);
231 test_rdmsr_fault(MSR_IA32_MCx_MISC(i), msr_name);
232 test_wrmsr_fault(MSR_IA32_MCx_MISC(i), msr_name, 0);
233 }
234 }
235
__test_x2apic_msrs(bool x2apic_enabled)236 static void __test_x2apic_msrs(bool x2apic_enabled)
237 {
238 enum x2apic_reg_semantics semantics;
239 unsigned int index, i;
240 char msr_name[32];
241
242 for (i = 0; i < 0x1000; i += 0x10) {
243 index = x2apic_msr(i);
244 snprintf(msr_name, sizeof(msr_name), "x2APIC MSR 0x%x", index);
245
246 if (x2apic_enabled)
247 semantics = get_x2apic_reg_semantics(i);
248 else
249 semantics = X2APIC_INVALID;
250
251 if (!(semantics & X2APIC_WRITABLE))
252 test_wrmsr_fault(index, msr_name, 0);
253
254 if (!(semantics & X2APIC_READABLE))
255 test_rdmsr_fault(index, msr_name);
256
257 /*
258 * Except for ICR, the only 64-bit x2APIC register, bits 64:32
259 * are reserved. ICR is testable if x2APIC is disabled.
260 */
261 if (!x2apic_enabled || i != APIC_ICR)
262 test_wrmsr_fault(index, msr_name, -1ull);
263
264 /* Bits 31:8 of self-IPI are reserved. */
265 if (i == APIC_SELF_IPI) {
266 test_wrmsr_fault(index, "x2APIC Self-IPI", 0x100);
267 test_wrmsr_fault(index, "x2APIC Self-IPI", 0xff00);
268 test_wrmsr_fault(index, "x2APIC Self-IPI", 0xff000000ull);
269 }
270
271 if (semantics == X2APIC_RW)
272 __test_msr_rw(index, msr_name, 0, -1ull);
273 else if (semantics == X2APIC_WO)
274 wrmsr(index, 0);
275 else if (semantics == X2APIC_RO)
276 report(!(rdmsr(index) >> 32),
277 "Expected bits 63:32 == 0 for '%s'", msr_name);
278 }
279 }
280
test_x2apic_msrs(void)281 static void test_x2apic_msrs(void)
282 {
283 reset_apic();
284
285 __test_x2apic_msrs(false);
286
287 if (!enable_x2apic())
288 return;
289
290 __test_x2apic_msrs(true);
291 }
292
test_cmd_msrs(void)293 static void test_cmd_msrs(void)
294 {
295 int i;
296
297 test_rdmsr_fault(MSR_IA32_PRED_CMD, "PRED_CMD");
298 if (this_cpu_has(X86_FEATURE_SPEC_CTRL) ||
299 this_cpu_has(X86_FEATURE_AMD_IBPB)) {
300 test_wrmsr(MSR_IA32_PRED_CMD, "PRED_CMD", 0);
301 test_wrmsr(MSR_IA32_PRED_CMD, "PRED_CMD", PRED_CMD_IBPB);
302 } else {
303 test_wrmsr_fault(MSR_IA32_PRED_CMD, "PRED_CMD", 0);
304 test_wrmsr_fault(MSR_IA32_PRED_CMD, "PRED_CMD", PRED_CMD_IBPB);
305 }
306
307 test_rdmsr_fault(MSR_IA32_FLUSH_CMD, "FLUSH_CMD");
308 if (this_cpu_has(X86_FEATURE_FLUSH_L1D)) {
309 test_wrmsr(MSR_IA32_FLUSH_CMD, "FLUSH_CMD", 0);
310 test_wrmsr(MSR_IA32_FLUSH_CMD, "FLUSH_CMD", L1D_FLUSH);
311 } else {
312 test_wrmsr_fault(MSR_IA32_FLUSH_CMD, "FLUSH_CMD", 0);
313 test_wrmsr_fault(MSR_IA32_FLUSH_CMD, "FLUSH_CMD", L1D_FLUSH);
314 }
315
316 if (is_fep_available()) {
317 for (i = 1; i < 64; i++)
318 test_wrmsr_fep_fault(MSR_IA32_FLUSH_CMD, "FLUSH_CMD", BIT_ULL(i));
319 }
320 }
321
main(int ac,char ** av)322 int main(int ac, char **av)
323 {
324 /*
325 * If the user provided an MSR+value, test exactly that and skip all
326 * built-in testcases.
327 */
328 if (ac == 3) {
329 test_custom_msr(ac, av);
330 } else {
331 test_misc_msrs();
332 test_mce_msrs();
333 test_x2apic_msrs();
334 test_cmd_msrs();
335 }
336
337 return report_summary();
338 }
339