1 /* msr tests */ 2 3 #include "libcflat.h" 4 #include "processor.h" 5 #include "msr.h" 6 7 struct msr_info { 8 int index; 9 bool is_64bit_only; 10 const char *name; 11 unsigned long long value; 12 }; 13 14 15 #define addr_64 0x0000123456789abcULL 16 #define addr_ul (unsigned long)addr_64 17 18 #define MSR_TEST(msr, val, only64) \ 19 { .index = msr, .name = #msr, .value = val, .is_64bit_only = only64 } 20 21 struct msr_info msr_info[] = 22 { 23 MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, false), 24 MSR_TEST(MSR_IA32_SYSENTER_ESP, addr_ul, false), 25 MSR_TEST(MSR_IA32_SYSENTER_EIP, addr_ul, false), 26 // reserved: 1:2, 4:6, 8:10, 13:15, 17, 19:21, 24:33, 35:63 27 MSR_TEST(MSR_IA32_MISC_ENABLE, 0x400c51889, false), 28 MSR_TEST(MSR_IA32_CR_PAT, 0x07070707, false), 29 MSR_TEST(MSR_FS_BASE, addr_64, true), 30 MSR_TEST(MSR_GS_BASE, addr_64, true), 31 MSR_TEST(MSR_KERNEL_GS_BASE, addr_64, true), 32 MSR_TEST(MSR_EFER, EFER_SCE, false), 33 MSR_TEST(MSR_LSTAR, addr_64, true), 34 MSR_TEST(MSR_CSTAR, addr_64, true), 35 MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, true), 36 // MSR_IA32_DEBUGCTLMSR needs svm feature LBRV 37 // MSR_VM_HSAVE_PA only AMD host 38 }; 39 40 static void test_msr_rw(struct msr_info *msr, unsigned long long val) 41 { 42 unsigned long long r, orig; 43 44 orig = rdmsr(msr->index); 45 /* 46 * Special case EFER since clearing LME/LMA is not allowed in 64-bit mode, 47 * and conversely setting those bits on 32-bit CPUs is not allowed. Treat 48 * the desired value as extra bits to set. 49 */ 50 if (msr->index == MSR_EFER) 51 val |= orig; 52 wrmsr(msr->index, val); 53 r = rdmsr(msr->index); 54 wrmsr(msr->index, orig); 55 if (r != val) { 56 printf("testing %s: output = %#" PRIx32 ":%#" PRIx32 57 " expected = %#" PRIx32 ":%#" PRIx32 "\n", msr->name, 58 (u32)(r >> 32), (u32)r, (u32)(val >> 32), (u32)val); 59 } 60 report(val == r, "%s", msr->name); 61 } 62 63 static void test_wrmsr_fault(struct msr_info *msr, unsigned long long val) 64 { 65 unsigned char vector = wrmsr_checking(msr->index, val); 66 67 report(vector == GP_VECTOR, 68 "Expected #GP on WRSMR(%s, 0x%llx), got vector %d", 69 msr->name, val, vector); 70 } 71 72 static void test_rdmsr_fault(struct msr_info *msr) 73 { 74 unsigned char vector = rdmsr_checking(msr->index); 75 76 report(vector == GP_VECTOR, 77 "Expected #GP on RDSMR(%s), got vector %d", msr->name, vector); 78 } 79 80 int main(int ac, char **av) 81 { 82 bool is_64bit_host = this_cpu_has(X86_FEATURE_LM); 83 int i; 84 85 for (i = 0 ; i < ARRAY_SIZE(msr_info); i++) { 86 if (is_64bit_host || !msr_info[i].is_64bit_only) { 87 test_msr_rw(&msr_info[i], msr_info[i].value); 88 89 /* 90 * The 64-bit only MSRs that take an address always perform 91 * canonical checks on both Intel and AMD. 92 */ 93 if (msr_info[i].is_64bit_only && 94 msr_info[i].value == addr_64) 95 test_wrmsr_fault(&msr_info[i], NONCANONICAL); 96 } else { 97 test_wrmsr_fault(&msr_info[i], msr_info[i].value); 98 test_rdmsr_fault(&msr_info[i]); 99 } 100 } 101 102 return report_summary(); 103 } 104