1ad879127SKrish Sadhukhan #include "svm.h" 2ad879127SKrish Sadhukhan #include "libcflat.h" 3ad879127SKrish Sadhukhan #include "processor.h" 4ad879127SKrish Sadhukhan #include "desc.h" 5ad879127SKrish Sadhukhan #include "msr.h" 6ad879127SKrish Sadhukhan #include "vm.h" 7ad879127SKrish Sadhukhan #include "smp.h" 8ad879127SKrish Sadhukhan #include "alloc_page.h" 9ad879127SKrish Sadhukhan #include "isr.h" 10ad879127SKrish Sadhukhan #include "apic.h" 119da1f4d8SCathy Avery #include "delay.h" 12ddb85855SSean Christopherson #include "util.h" 138177dc62SManali Shukla #include "x86/usermode.h" 14c64f24fdSMaxim Levitsky #include "vmalloc.h" 15ad879127SKrish Sadhukhan 16ad879127SKrish Sadhukhan #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f 17ad879127SKrish Sadhukhan 18ad879127SKrish Sadhukhan #define LATENCY_RUNS 1000000 19ad879127SKrish Sadhukhan 20ad879127SKrish Sadhukhan u64 tsc_start; 21ad879127SKrish Sadhukhan u64 tsc_end; 22ad879127SKrish Sadhukhan 23ad879127SKrish Sadhukhan u64 vmrun_sum, vmexit_sum; 24ad879127SKrish Sadhukhan u64 vmsave_sum, vmload_sum; 25ad879127SKrish Sadhukhan u64 stgi_sum, clgi_sum; 26ad879127SKrish Sadhukhan u64 latvmrun_max; 27ad879127SKrish Sadhukhan u64 latvmrun_min; 28ad879127SKrish Sadhukhan u64 latvmexit_max; 29ad879127SKrish Sadhukhan u64 latvmexit_min; 30ad879127SKrish Sadhukhan u64 latvmload_max; 31ad879127SKrish Sadhukhan u64 latvmload_min; 32ad879127SKrish Sadhukhan u64 latvmsave_max; 33ad879127SKrish Sadhukhan u64 latvmsave_min; 34ad879127SKrish Sadhukhan u64 latstgi_max; 35ad879127SKrish Sadhukhan u64 latstgi_min; 36ad879127SKrish Sadhukhan u64 latclgi_max; 37ad879127SKrish Sadhukhan u64 latclgi_min; 38ad879127SKrish Sadhukhan u64 runs; 39ad879127SKrish Sadhukhan 40ad879127SKrish Sadhukhan static void null_test(struct svm_test *test) 41ad879127SKrish Sadhukhan { 42ad879127SKrish Sadhukhan } 43ad879127SKrish Sadhukhan 44ad879127SKrish Sadhukhan static bool null_check(struct svm_test *test) 45ad879127SKrish Sadhukhan { 46096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_VMMCALL; 47ad879127SKrish Sadhukhan } 48ad879127SKrish Sadhukhan 49ad879127SKrish Sadhukhan static void prepare_no_vmrun_int(struct svm_test *test) 50ad879127SKrish Sadhukhan { 51096cf7feSPaolo Bonzini vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 52ad879127SKrish Sadhukhan } 53ad879127SKrish Sadhukhan 54ad879127SKrish Sadhukhan static bool check_no_vmrun_int(struct svm_test *test) 55ad879127SKrish Sadhukhan { 56096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_ERR; 57ad879127SKrish Sadhukhan } 58ad879127SKrish Sadhukhan 59ad879127SKrish Sadhukhan static void test_vmrun(struct svm_test *test) 60ad879127SKrish Sadhukhan { 61096cf7feSPaolo Bonzini asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb))); 62ad879127SKrish Sadhukhan } 63ad879127SKrish Sadhukhan 64ad879127SKrish Sadhukhan static bool check_vmrun(struct svm_test *test) 65ad879127SKrish Sadhukhan { 66096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_VMRUN; 67ad879127SKrish Sadhukhan } 68ad879127SKrish Sadhukhan 69401299a5SPaolo Bonzini static void prepare_rsm_intercept(struct svm_test *test) 70401299a5SPaolo Bonzini { 71401299a5SPaolo Bonzini default_prepare(test); 72401299a5SPaolo Bonzini vmcb->control.intercept |= 1 << INTERCEPT_RSM; 73401299a5SPaolo Bonzini vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR); 74401299a5SPaolo Bonzini } 75401299a5SPaolo Bonzini 76401299a5SPaolo Bonzini static void test_rsm_intercept(struct svm_test *test) 77401299a5SPaolo Bonzini { 78401299a5SPaolo Bonzini asm volatile ("rsm" : : : "memory"); 79401299a5SPaolo Bonzini } 80401299a5SPaolo Bonzini 81401299a5SPaolo Bonzini static bool check_rsm_intercept(struct svm_test *test) 82401299a5SPaolo Bonzini { 83401299a5SPaolo Bonzini return get_test_stage(test) == 2; 84401299a5SPaolo Bonzini } 85401299a5SPaolo Bonzini 86401299a5SPaolo Bonzini static bool finished_rsm_intercept(struct svm_test *test) 87401299a5SPaolo Bonzini { 88401299a5SPaolo Bonzini switch (get_test_stage(test)) { 89401299a5SPaolo Bonzini case 0: 90401299a5SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_RSM) { 91198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to rsm. Exit reason 0x%x", 92401299a5SPaolo Bonzini vmcb->control.exit_code); 93401299a5SPaolo Bonzini return true; 94401299a5SPaolo Bonzini } 95401299a5SPaolo Bonzini vmcb->control.intercept &= ~(1 << INTERCEPT_RSM); 96401299a5SPaolo Bonzini inc_test_stage(test); 97401299a5SPaolo Bonzini break; 98401299a5SPaolo Bonzini 99401299a5SPaolo Bonzini case 1: 100401299a5SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) { 101198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to #UD. Exit reason 0x%x", 102401299a5SPaolo Bonzini vmcb->control.exit_code); 103401299a5SPaolo Bonzini return true; 104401299a5SPaolo Bonzini } 105401299a5SPaolo Bonzini vmcb->save.rip += 2; 106401299a5SPaolo Bonzini inc_test_stage(test); 107401299a5SPaolo Bonzini break; 108401299a5SPaolo Bonzini 109401299a5SPaolo Bonzini default: 110401299a5SPaolo Bonzini return true; 111401299a5SPaolo Bonzini } 112401299a5SPaolo Bonzini return get_test_stage(test) == 2; 113401299a5SPaolo Bonzini } 114401299a5SPaolo Bonzini 115ad879127SKrish Sadhukhan static void prepare_cr3_intercept(struct svm_test *test) 116ad879127SKrish Sadhukhan { 117ad879127SKrish Sadhukhan default_prepare(test); 118096cf7feSPaolo Bonzini vmcb->control.intercept_cr_read |= 1 << 3; 119ad879127SKrish Sadhukhan } 120ad879127SKrish Sadhukhan 121ad879127SKrish Sadhukhan static void test_cr3_intercept(struct svm_test *test) 122ad879127SKrish Sadhukhan { 123ad879127SKrish Sadhukhan asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 124ad879127SKrish Sadhukhan } 125ad879127SKrish Sadhukhan 126ad879127SKrish Sadhukhan static bool check_cr3_intercept(struct svm_test *test) 127ad879127SKrish Sadhukhan { 128096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_READ_CR3; 129ad879127SKrish Sadhukhan } 130ad879127SKrish Sadhukhan 131ad879127SKrish Sadhukhan static bool check_cr3_nointercept(struct svm_test *test) 132ad879127SKrish Sadhukhan { 133ad879127SKrish Sadhukhan return null_check(test) && test->scratch == read_cr3(); 134ad879127SKrish Sadhukhan } 135ad879127SKrish Sadhukhan 136ad879127SKrish Sadhukhan static void corrupt_cr3_intercept_bypass(void *_test) 137ad879127SKrish Sadhukhan { 138ad879127SKrish Sadhukhan struct svm_test *test = _test; 139ad879127SKrish Sadhukhan extern volatile u32 mmio_insn; 140ad879127SKrish Sadhukhan 141ad879127SKrish Sadhukhan while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 142ad879127SKrish Sadhukhan pause(); 143ad879127SKrish Sadhukhan pause(); 144ad879127SKrish Sadhukhan pause(); 145ad879127SKrish Sadhukhan pause(); 146ad879127SKrish Sadhukhan mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 147ad879127SKrish Sadhukhan } 148ad879127SKrish Sadhukhan 149ad879127SKrish Sadhukhan static void prepare_cr3_intercept_bypass(struct svm_test *test) 150ad879127SKrish Sadhukhan { 151ad879127SKrish Sadhukhan default_prepare(test); 152096cf7feSPaolo Bonzini vmcb->control.intercept_cr_read |= 1 << 3; 153ad879127SKrish Sadhukhan on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 154ad879127SKrish Sadhukhan } 155ad879127SKrish Sadhukhan 156ad879127SKrish Sadhukhan static void test_cr3_intercept_bypass(struct svm_test *test) 157ad879127SKrish Sadhukhan { 158ad879127SKrish Sadhukhan ulong a = 0xa0000; 159ad879127SKrish Sadhukhan 160ad879127SKrish Sadhukhan test->scratch = 1; 161ad879127SKrish Sadhukhan while (test->scratch != 2) 162ad879127SKrish Sadhukhan barrier(); 163ad879127SKrish Sadhukhan 164ad879127SKrish Sadhukhan asm volatile ("mmio_insn: mov %0, (%0); nop" 165ad879127SKrish Sadhukhan : "+a"(a) : : "memory"); 166ad879127SKrish Sadhukhan test->scratch = a; 167ad879127SKrish Sadhukhan } 168ad879127SKrish Sadhukhan 169ad879127SKrish Sadhukhan static void prepare_dr_intercept(struct svm_test *test) 170ad879127SKrish Sadhukhan { 171ad879127SKrish Sadhukhan default_prepare(test); 172096cf7feSPaolo Bonzini vmcb->control.intercept_dr_read = 0xff; 173096cf7feSPaolo Bonzini vmcb->control.intercept_dr_write = 0xff; 174ad879127SKrish Sadhukhan } 175ad879127SKrish Sadhukhan 176ad879127SKrish Sadhukhan static void test_dr_intercept(struct svm_test *test) 177ad879127SKrish Sadhukhan { 178ad879127SKrish Sadhukhan unsigned int i, failcnt = 0; 179ad879127SKrish Sadhukhan 180ad879127SKrish Sadhukhan /* Loop testing debug register reads */ 181ad879127SKrish Sadhukhan for (i = 0; i < 8; i++) { 182ad879127SKrish Sadhukhan 183ad879127SKrish Sadhukhan switch (i) { 184ad879127SKrish Sadhukhan case 0: 185ad879127SKrish Sadhukhan asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory"); 186ad879127SKrish Sadhukhan break; 187ad879127SKrish Sadhukhan case 1: 188ad879127SKrish Sadhukhan asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory"); 189ad879127SKrish Sadhukhan break; 190ad879127SKrish Sadhukhan case 2: 191ad879127SKrish Sadhukhan asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory"); 192ad879127SKrish Sadhukhan break; 193ad879127SKrish Sadhukhan case 3: 194ad879127SKrish Sadhukhan asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory"); 195ad879127SKrish Sadhukhan break; 196ad879127SKrish Sadhukhan case 4: 197ad879127SKrish Sadhukhan asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory"); 198ad879127SKrish Sadhukhan break; 199ad879127SKrish Sadhukhan case 5: 200ad879127SKrish Sadhukhan asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory"); 201ad879127SKrish Sadhukhan break; 202ad879127SKrish Sadhukhan case 6: 203ad879127SKrish Sadhukhan asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory"); 204ad879127SKrish Sadhukhan break; 205ad879127SKrish Sadhukhan case 7: 206ad879127SKrish Sadhukhan asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory"); 207ad879127SKrish Sadhukhan break; 208ad879127SKrish Sadhukhan } 209ad879127SKrish Sadhukhan 210ad879127SKrish Sadhukhan if (test->scratch != i) { 211198dfd0eSJanis Schoetterl-Glausch report_fail("dr%u read intercept", i); 212ad879127SKrish Sadhukhan failcnt++; 213ad879127SKrish Sadhukhan } 214ad879127SKrish Sadhukhan } 215ad879127SKrish Sadhukhan 216ad879127SKrish Sadhukhan /* Loop testing debug register writes */ 217ad879127SKrish Sadhukhan for (i = 0; i < 8; i++) { 218ad879127SKrish Sadhukhan 219ad879127SKrish Sadhukhan switch (i) { 220ad879127SKrish Sadhukhan case 0: 221ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory"); 222ad879127SKrish Sadhukhan break; 223ad879127SKrish Sadhukhan case 1: 224ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory"); 225ad879127SKrish Sadhukhan break; 226ad879127SKrish Sadhukhan case 2: 227ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory"); 228ad879127SKrish Sadhukhan break; 229ad879127SKrish Sadhukhan case 3: 230ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory"); 231ad879127SKrish Sadhukhan break; 232ad879127SKrish Sadhukhan case 4: 233ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory"); 234ad879127SKrish Sadhukhan break; 235ad879127SKrish Sadhukhan case 5: 236ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory"); 237ad879127SKrish Sadhukhan break; 238ad879127SKrish Sadhukhan case 6: 239ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory"); 240ad879127SKrish Sadhukhan break; 241ad879127SKrish Sadhukhan case 7: 242ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory"); 243ad879127SKrish Sadhukhan break; 244ad879127SKrish Sadhukhan } 245ad879127SKrish Sadhukhan 246ad879127SKrish Sadhukhan if (test->scratch != i) { 247198dfd0eSJanis Schoetterl-Glausch report_fail("dr%u write intercept", i); 248ad879127SKrish Sadhukhan failcnt++; 249ad879127SKrish Sadhukhan } 250ad879127SKrish Sadhukhan } 251ad879127SKrish Sadhukhan 252ad879127SKrish Sadhukhan test->scratch = failcnt; 253ad879127SKrish Sadhukhan } 254ad879127SKrish Sadhukhan 255ad879127SKrish Sadhukhan static bool dr_intercept_finished(struct svm_test *test) 256ad879127SKrish Sadhukhan { 257096cf7feSPaolo Bonzini ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0); 258ad879127SKrish Sadhukhan 259ad879127SKrish Sadhukhan /* Only expect DR intercepts */ 260ad879127SKrish Sadhukhan if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0)) 261ad879127SKrish Sadhukhan return true; 262ad879127SKrish Sadhukhan 263ad879127SKrish Sadhukhan /* 264ad879127SKrish Sadhukhan * Compute debug register number. 265ad879127SKrish Sadhukhan * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture 266ad879127SKrish Sadhukhan * Programmer's Manual Volume 2 - System Programming: 267ad879127SKrish Sadhukhan * http://support.amd.com/TechDocs/24593.pdf 268ad879127SKrish Sadhukhan * there are 16 VMEXIT codes each for DR read and write. 269ad879127SKrish Sadhukhan */ 270ad879127SKrish Sadhukhan test->scratch = (n % 16); 271ad879127SKrish Sadhukhan 272ad879127SKrish Sadhukhan /* Jump over MOV instruction */ 273096cf7feSPaolo Bonzini vmcb->save.rip += 3; 274ad879127SKrish Sadhukhan 275ad879127SKrish Sadhukhan return false; 276ad879127SKrish Sadhukhan } 277ad879127SKrish Sadhukhan 278ad879127SKrish Sadhukhan static bool check_dr_intercept(struct svm_test *test) 279ad879127SKrish Sadhukhan { 280ad879127SKrish Sadhukhan return !test->scratch; 281ad879127SKrish Sadhukhan } 282ad879127SKrish Sadhukhan 283ad879127SKrish Sadhukhan static bool next_rip_supported(void) 284ad879127SKrish Sadhukhan { 285ad879127SKrish Sadhukhan return this_cpu_has(X86_FEATURE_NRIPS); 286ad879127SKrish Sadhukhan } 287ad879127SKrish Sadhukhan 288ad879127SKrish Sadhukhan static void prepare_next_rip(struct svm_test *test) 289ad879127SKrish Sadhukhan { 290096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 291ad879127SKrish Sadhukhan } 292ad879127SKrish Sadhukhan 293ad879127SKrish Sadhukhan 294ad879127SKrish Sadhukhan static void test_next_rip(struct svm_test *test) 295ad879127SKrish Sadhukhan { 296ad879127SKrish Sadhukhan asm volatile ("rdtsc\n\t" 297ad879127SKrish Sadhukhan ".globl exp_next_rip\n\t" 298ad879127SKrish Sadhukhan "exp_next_rip:\n\t" ::: "eax", "edx"); 299ad879127SKrish Sadhukhan } 300ad879127SKrish Sadhukhan 301ad879127SKrish Sadhukhan static bool check_next_rip(struct svm_test *test) 302ad879127SKrish Sadhukhan { 303ad879127SKrish Sadhukhan extern char exp_next_rip; 304ad879127SKrish Sadhukhan unsigned long address = (unsigned long)&exp_next_rip; 305ad879127SKrish Sadhukhan 306096cf7feSPaolo Bonzini return address == vmcb->control.next_rip; 307ad879127SKrish Sadhukhan } 308ad879127SKrish Sadhukhan 309ad879127SKrish Sadhukhan extern u8 *msr_bitmap; 310ad879127SKrish Sadhukhan 311*1e2a6424SSean Christopherson static bool is_x2apic; 312*1e2a6424SSean Christopherson 313ad879127SKrish Sadhukhan static void prepare_msr_intercept(struct svm_test *test) 314ad879127SKrish Sadhukhan { 315ad879127SKrish Sadhukhan default_prepare(test); 316096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT); 3171c7e7d1cSSean Christopherson 3181c7e7d1cSSean Christopherson memset(msr_bitmap, 0, MSR_BITMAP_SIZE); 319*1e2a6424SSean Christopherson 320*1e2a6424SSean Christopherson is_x2apic = is_x2apic_enabled(); 321ad879127SKrish Sadhukhan } 322ad879127SKrish Sadhukhan 3231c7e7d1cSSean Christopherson #define SVM_MSRPM_BYTES_PER_RANGE 2048 3241c7e7d1cSSean Christopherson #define SVM_BITS_PER_MSR 2 3251c7e7d1cSSean Christopherson #define SVM_MSRS_PER_BYTE 4 3261c7e7d1cSSean Christopherson #define SVM_MSRS_PER_RANGE 8192 3271c7e7d1cSSean Christopherson #define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1) 3281c7e7d1cSSean Christopherson 3291c7e7d1cSSean Christopherson static int get_msrpm_bit_nr(u32 msr) 330ad879127SKrish Sadhukhan { 3311c7e7d1cSSean Christopherson int range_nr; 3321c7e7d1cSSean Christopherson 3331c7e7d1cSSean Christopherson switch (msr & ~SVM_MSRPM_OFFSET_MASK) { 3341c7e7d1cSSean Christopherson case 0: 3351c7e7d1cSSean Christopherson range_nr = 0; 3361c7e7d1cSSean Christopherson break; 3371c7e7d1cSSean Christopherson case 0xc0000000: 3381c7e7d1cSSean Christopherson range_nr = 1; 3391c7e7d1cSSean Christopherson break; 3401c7e7d1cSSean Christopherson case 0xc0010000: 3411c7e7d1cSSean Christopherson range_nr = 2; 3421c7e7d1cSSean Christopherson break; 3431c7e7d1cSSean Christopherson default: 3441c7e7d1cSSean Christopherson return - 1; 3451c7e7d1cSSean Christopherson } 3461c7e7d1cSSean Christopherson 3471c7e7d1cSSean Christopherson return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE + 3481c7e7d1cSSean Christopherson (msr & SVM_MSRPM_OFFSET_MASK) * SVM_BITS_PER_MSR; 3491c7e7d1cSSean Christopherson } 3501c7e7d1cSSean Christopherson 3511c7e7d1cSSean Christopherson static void __test_msr_intercept(struct svm_test *test) 3521c7e7d1cSSean Christopherson { 3531c7e7d1cSSean Christopherson u64 val, arb_val = 0xef8056791234abcd; /* Arbitrary value */ 35427eeac46SSean Christopherson int vector; 355165c839cSSean Christopherson u32 msr; 356ad879127SKrish Sadhukhan 357165c839cSSean Christopherson for (msr = 0; msr <= 0xc0012000; msr++) { 358165c839cSSean Christopherson if (msr == 0xC0010131 /* MSR_SEV_STATUS */) { 359ad879127SKrish Sadhukhan /* 360ad879127SKrish Sadhukhan * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture 361ad879127SKrish Sadhukhan * Programmer's Manual volume 2 - System Programming: 362ad879127SKrish Sadhukhan * http://support.amd.com/TechDocs/24593.pdf 363ad879127SKrish Sadhukhan * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR. 364ad879127SKrish Sadhukhan */ 365ad879127SKrish Sadhukhan continue; 366ad879127SKrish Sadhukhan } 367ad879127SKrish Sadhukhan 368573d62c6SSean Christopherson /* 369573d62c6SSean Christopherson * Test one MSR just before and after each range, but otherwise 370573d62c6SSean Christopherson * skips gaps between supported MSR ranges. 371573d62c6SSean Christopherson */ 372165c839cSSean Christopherson if (msr == 0x2000 + 1) 373165c839cSSean Christopherson msr = 0xc0000000 - 1; 374165c839cSSean Christopherson else if (msr == 0xc0002000 + 1) 375165c839cSSean Christopherson msr = 0xc0010000 - 1; 376ad879127SKrish Sadhukhan 3771c7e7d1cSSean Christopherson test->scratch = msr; 3781c7e7d1cSSean Christopherson vmmcall(); 3791c7e7d1cSSean Christopherson 380ad879127SKrish Sadhukhan test->scratch = -1; 381ad879127SKrish Sadhukhan 3821c7e7d1cSSean Christopherson vector = rdmsr_safe(msr, &val); 38327eeac46SSean Christopherson if (vector) 384165c839cSSean Christopherson report_fail("Expected RDMSR(0x%x) to #VMEXIT, got exception '%u'", 385165c839cSSean Christopherson msr, vector); 386165c839cSSean Christopherson else if (test->scratch != msr) 387165c839cSSean Christopherson report_fail("Expected RDMSR(0x%x) to #VMEXIT, got scratch '%ld", 388165c839cSSean Christopherson msr, test->scratch); 389ad879127SKrish Sadhukhan 3901c7e7d1cSSean Christopherson test->scratch = BIT_ULL(32) | msr; 3911c7e7d1cSSean Christopherson vmmcall(); 3921c7e7d1cSSean Christopherson 393ad879127SKrish Sadhukhan /* 394ad879127SKrish Sadhukhan * Poor man approach to generate a value that 395ad879127SKrish Sadhukhan * seems arbitrary each time around the loop. 396ad879127SKrish Sadhukhan */ 397165c839cSSean Christopherson arb_val += (arb_val << 1); 398ad879127SKrish Sadhukhan 3991c7e7d1cSSean Christopherson test->scratch = -1; 4001c7e7d1cSSean Christopherson 401165c839cSSean Christopherson vector = wrmsr_safe(msr, arb_val); 40227eeac46SSean Christopherson if (vector) 403165c839cSSean Christopherson report_fail("Expected WRMSR(0x%x) to #VMEXIT, got exception '%u'", 404165c839cSSean Christopherson msr, vector); 405165c839cSSean Christopherson else if (test->scratch != arb_val) 406165c839cSSean Christopherson report_fail("Expected WRMSR(0x%x) to #VMEXIT, got scratch '%ld' (wanted %ld)", 407165c839cSSean Christopherson msr, test->scratch, arb_val); 4081c7e7d1cSSean Christopherson 4091c7e7d1cSSean Christopherson test->scratch = BIT_ULL(33) | msr; 4101c7e7d1cSSean Christopherson vmmcall(); 411*1e2a6424SSean Christopherson 412*1e2a6424SSean Christopherson if (get_msrpm_bit_nr(msr) < 0) { 413*1e2a6424SSean Christopherson report(msr == 0x2000 || 414*1e2a6424SSean Christopherson msr == 0xc0000000 - 1 || msr == 0xc0002000 || 415*1e2a6424SSean Christopherson msr == 0xc0010000 - 1 || msr == 0xc0012000, 416*1e2a6424SSean Christopherson "MSR 0x%x not covered by an MSRPM range", msr); 417*1e2a6424SSean Christopherson continue; 418*1e2a6424SSean Christopherson } 419*1e2a6424SSean Christopherson 420*1e2a6424SSean Christopherson /* 421*1e2a6424SSean Christopherson * Verify that disabling interception for MSRs within an MSRPM 422*1e2a6424SSean Christopherson * range behaves as expected. Simply eat exceptions, the goal 423*1e2a6424SSean Christopherson * is to verify interception, not MSR emulation/virtualization. 424*1e2a6424SSean Christopherson */ 425*1e2a6424SSean Christopherson test->scratch = -1; 426*1e2a6424SSean Christopherson (void)rdmsr_safe(msr, &val); 427*1e2a6424SSean Christopherson if (test->scratch != -1) 428*1e2a6424SSean Christopherson report_fail("RDMSR 0x%x, Wanted -1 (no intercept), got 0x%lx", 429*1e2a6424SSean Christopherson msr, test->scratch); 430*1e2a6424SSean Christopherson 431*1e2a6424SSean Christopherson test->scratch = BIT_ULL(34) | msr; 432*1e2a6424SSean Christopherson vmmcall(); 433*1e2a6424SSean Christopherson 434*1e2a6424SSean Christopherson test->scratch = -1; 435*1e2a6424SSean Christopherson (void)wrmsr_safe(msr, val); 436*1e2a6424SSean Christopherson if (test->scratch != -1) 437*1e2a6424SSean Christopherson report_fail("WRMSR 0x%x, Wanted -1 (no intercept), got 0x%lx", 438*1e2a6424SSean Christopherson msr, test->scratch); 439*1e2a6424SSean Christopherson 440*1e2a6424SSean Christopherson test->scratch = BIT_ULL(35) | msr; 441*1e2a6424SSean Christopherson vmmcall(); 4421c7e7d1cSSean Christopherson } 443ad879127SKrish Sadhukhan } 444ad879127SKrish Sadhukhan 4451c7e7d1cSSean Christopherson static void test_msr_intercept(struct svm_test *test) 4461c7e7d1cSSean Christopherson { 4471c7e7d1cSSean Christopherson __test_msr_intercept(test); 4481c7e7d1cSSean Christopherson 449ad879127SKrish Sadhukhan test->scratch = -2; 4501c7e7d1cSSean Christopherson vmmcall(); 4511c7e7d1cSSean Christopherson 4521c7e7d1cSSean Christopherson __test_msr_intercept(test); 4531c7e7d1cSSean Christopherson 4541c7e7d1cSSean Christopherson test->scratch = -3; 4551c7e7d1cSSean Christopherson } 4561c7e7d1cSSean Christopherson 4571c7e7d1cSSean Christopherson static void restore_msrpm_bit(int bit_nr, bool set) 4581c7e7d1cSSean Christopherson { 4591c7e7d1cSSean Christopherson if (set) 4601c7e7d1cSSean Christopherson __set_bit(bit_nr, msr_bitmap); 4611c7e7d1cSSean Christopherson else 4621c7e7d1cSSean Christopherson __clear_bit(bit_nr, msr_bitmap); 463ad879127SKrish Sadhukhan } 464ad879127SKrish Sadhukhan 465ad879127SKrish Sadhukhan static bool msr_intercept_finished(struct svm_test *test) 466ad879127SKrish Sadhukhan { 467096cf7feSPaolo Bonzini u32 exit_code = vmcb->control.exit_code; 4681c7e7d1cSSean Christopherson bool all_set = false; 4691c7e7d1cSSean Christopherson int bit_nr; 470ad879127SKrish Sadhukhan 4711c7e7d1cSSean Christopherson if (exit_code == SVM_EXIT_VMMCALL) { 472*1e2a6424SSean Christopherson u32 msr = test->scratch & -1u; 473*1e2a6424SSean Christopherson 4741c7e7d1cSSean Christopherson vmcb->save.rip += 3; 4751c7e7d1cSSean Christopherson 4761c7e7d1cSSean Christopherson if (test->scratch == -3) 477ad879127SKrish Sadhukhan return true; 478ad879127SKrish Sadhukhan 4791c7e7d1cSSean Christopherson if (test->scratch == -2) { 4801c7e7d1cSSean Christopherson all_set = true; 4811c7e7d1cSSean Christopherson memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE); 4821c7e7d1cSSean Christopherson return false; 4831c7e7d1cSSean Christopherson } 4841c7e7d1cSSean Christopherson 485*1e2a6424SSean Christopherson bit_nr = get_msrpm_bit_nr(msr); 4861c7e7d1cSSean Christopherson if (bit_nr < 0) 4871c7e7d1cSSean Christopherson return false; 4881c7e7d1cSSean Christopherson 4891c7e7d1cSSean Christopherson switch (test->scratch >> 32) { 4901c7e7d1cSSean Christopherson case 0: 4911c7e7d1cSSean Christopherson __set_bit(bit_nr, msr_bitmap); 4921c7e7d1cSSean Christopherson return false; 4931c7e7d1cSSean Christopherson case 1: 4941c7e7d1cSSean Christopherson restore_msrpm_bit(bit_nr, all_set); 4951c7e7d1cSSean Christopherson __set_bit(bit_nr + 1, msr_bitmap); 4961c7e7d1cSSean Christopherson return false; 4971c7e7d1cSSean Christopherson case 2: 4981c7e7d1cSSean Christopherson restore_msrpm_bit(bit_nr + 1, all_set); 499*1e2a6424SSean Christopherson __clear_bit(bit_nr, msr_bitmap); 500*1e2a6424SSean Christopherson return false; 501*1e2a6424SSean Christopherson case 4: 502*1e2a6424SSean Christopherson restore_msrpm_bit(bit_nr, all_set); 503*1e2a6424SSean Christopherson __clear_bit(bit_nr + 1, msr_bitmap); 504*1e2a6424SSean Christopherson /* 505*1e2a6424SSean Christopherson * Disable x2APIC so that WRMSR faults instead of doing 506*1e2a6424SSean Christopherson * random things, e.g. sending IPIs. 507*1e2a6424SSean Christopherson */ 508*1e2a6424SSean Christopherson if (is_x2apic && msr >= 0x800 && msr <= 0x8ff) 509*1e2a6424SSean Christopherson reset_apic(); 510*1e2a6424SSean Christopherson return false; 511*1e2a6424SSean Christopherson case 8: 512*1e2a6424SSean Christopherson restore_msrpm_bit(bit_nr + 1, all_set); 513*1e2a6424SSean Christopherson if (is_x2apic && msr >= 0x800 && msr <= 0x8ff) 514*1e2a6424SSean Christopherson enable_x2apic(); 5151c7e7d1cSSean Christopherson return false; 5161c7e7d1cSSean Christopherson default: 5171c7e7d1cSSean Christopherson return true; 5181c7e7d1cSSean Christopherson } 5191c7e7d1cSSean Christopherson } 5201c7e7d1cSSean Christopherson 52127eeac46SSean Christopherson if (exit_code != SVM_EXIT_MSR) { 52227eeac46SSean Christopherson report_fail("Wanted MSR VM-Exit, got reason 0x%x", exit_code); 523ad879127SKrish Sadhukhan return true; 524ad879127SKrish Sadhukhan } 525ad879127SKrish Sadhukhan 526ad879127SKrish Sadhukhan /* Jump over RDMSR/WRMSR instruction */ 527096cf7feSPaolo Bonzini vmcb->save.rip += 2; 528ad879127SKrish Sadhukhan 529ad879127SKrish Sadhukhan /* 530ad879127SKrish Sadhukhan * Test whether the intercept was for RDMSR/WRMSR. 531ad879127SKrish Sadhukhan * For RDMSR, test->scratch is set to the MSR index; 532ad879127SKrish Sadhukhan * RCX holds the MSR index. 533ad879127SKrish Sadhukhan * For WRMSR, test->scratch is set to the MSR value; 534ad879127SKrish Sadhukhan * RDX holds the upper 32 bits of the MSR value, 535ad879127SKrish Sadhukhan * while RAX hold its lower 32 bits. 536ad879127SKrish Sadhukhan */ 53727eeac46SSean Christopherson if (vmcb->control.exit_info_1) 53827eeac46SSean Christopherson test->scratch = ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff)); 539ad879127SKrish Sadhukhan else 540ad879127SKrish Sadhukhan test->scratch = get_regs().rcx; 541ad879127SKrish Sadhukhan 542ad879127SKrish Sadhukhan return false; 543ad879127SKrish Sadhukhan } 544ad879127SKrish Sadhukhan 545ad879127SKrish Sadhukhan static bool check_msr_intercept(struct svm_test *test) 546ad879127SKrish Sadhukhan { 5471c7e7d1cSSean Christopherson return (test->scratch == -3); 548ad879127SKrish Sadhukhan } 549ad879127SKrish Sadhukhan 550ad879127SKrish Sadhukhan static void prepare_mode_switch(struct svm_test *test) 551ad879127SKrish Sadhukhan { 552096cf7feSPaolo Bonzini vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 553ad879127SKrish Sadhukhan | (1ULL << UD_VECTOR) 554ad879127SKrish Sadhukhan | (1ULL << DF_VECTOR) 555ad879127SKrish Sadhukhan | (1ULL << PF_VECTOR); 556ad879127SKrish Sadhukhan test->scratch = 0; 557ad879127SKrish Sadhukhan } 558ad879127SKrish Sadhukhan 559ad879127SKrish Sadhukhan static void test_mode_switch(struct svm_test *test) 560ad879127SKrish Sadhukhan { 561ad879127SKrish Sadhukhan asm volatile(" cli\n" 562ad879127SKrish Sadhukhan " ljmp *1f\n" /* jump to 32-bit code segment */ 563ad879127SKrish Sadhukhan "1:\n" 564ad879127SKrish Sadhukhan " .long 2f\n" 565ad879127SKrish Sadhukhan " .long " xstr(KERNEL_CS32) "\n" 566ad879127SKrish Sadhukhan ".code32\n" 567ad879127SKrish Sadhukhan "2:\n" 568ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 569ad879127SKrish Sadhukhan " btcl $31, %%eax\n" /* clear PG */ 570ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 571ad879127SKrish Sadhukhan " movl $0xc0000080, %%ecx\n" /* EFER */ 572ad879127SKrish Sadhukhan " rdmsr\n" 573ad879127SKrish Sadhukhan " btcl $8, %%eax\n" /* clear LME */ 574ad879127SKrish Sadhukhan " wrmsr\n" 575ad879127SKrish Sadhukhan " movl %%cr4, %%eax\n" 576ad879127SKrish Sadhukhan " btcl $5, %%eax\n" /* clear PAE */ 577ad879127SKrish Sadhukhan " movl %%eax, %%cr4\n" 578ad879127SKrish Sadhukhan " movw %[ds16], %%ax\n" 579ad879127SKrish Sadhukhan " movw %%ax, %%ds\n" 580ad879127SKrish Sadhukhan " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 581ad879127SKrish Sadhukhan ".code16\n" 582ad879127SKrish Sadhukhan "3:\n" 583ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 584ad879127SKrish Sadhukhan " btcl $0, %%eax\n" /* clear PE */ 585ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 586ad879127SKrish Sadhukhan " ljmpl $0, $4f\n" /* jump to real-mode */ 587ad879127SKrish Sadhukhan "4:\n" 588ad879127SKrish Sadhukhan " vmmcall\n" 589ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 590ad879127SKrish Sadhukhan " btsl $0, %%eax\n" /* set PE */ 591ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 592ad879127SKrish Sadhukhan " ljmpl %[cs32], $5f\n" /* back to protected mode */ 593ad879127SKrish Sadhukhan ".code32\n" 594ad879127SKrish Sadhukhan "5:\n" 595ad879127SKrish Sadhukhan " movl %%cr4, %%eax\n" 596ad879127SKrish Sadhukhan " btsl $5, %%eax\n" /* set PAE */ 597ad879127SKrish Sadhukhan " movl %%eax, %%cr4\n" 598ad879127SKrish Sadhukhan " movl $0xc0000080, %%ecx\n" /* EFER */ 599ad879127SKrish Sadhukhan " rdmsr\n" 600ad879127SKrish Sadhukhan " btsl $8, %%eax\n" /* set LME */ 601ad879127SKrish Sadhukhan " wrmsr\n" 602ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 603ad879127SKrish Sadhukhan " btsl $31, %%eax\n" /* set PG */ 604ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 605ad879127SKrish Sadhukhan " ljmpl %[cs64], $6f\n" /* back to long mode */ 606ad879127SKrish Sadhukhan ".code64\n\t" 607ad879127SKrish Sadhukhan "6:\n" 608ad879127SKrish Sadhukhan " vmmcall\n" 609ad879127SKrish Sadhukhan :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 610ad879127SKrish Sadhukhan [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 611ad879127SKrish Sadhukhan : "rax", "rbx", "rcx", "rdx", "memory"); 612ad879127SKrish Sadhukhan } 613ad879127SKrish Sadhukhan 614ad879127SKrish Sadhukhan static bool mode_switch_finished(struct svm_test *test) 615ad879127SKrish Sadhukhan { 616ad879127SKrish Sadhukhan u64 cr0, cr4, efer; 617ad879127SKrish Sadhukhan 618096cf7feSPaolo Bonzini cr0 = vmcb->save.cr0; 619096cf7feSPaolo Bonzini cr4 = vmcb->save.cr4; 620096cf7feSPaolo Bonzini efer = vmcb->save.efer; 621ad879127SKrish Sadhukhan 622ad879127SKrish Sadhukhan /* Only expect VMMCALL intercepts */ 623096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) 624ad879127SKrish Sadhukhan return true; 625ad879127SKrish Sadhukhan 626ad879127SKrish Sadhukhan /* Jump over VMMCALL instruction */ 627096cf7feSPaolo Bonzini vmcb->save.rip += 3; 628ad879127SKrish Sadhukhan 629ad879127SKrish Sadhukhan /* Do sanity checks */ 630ad879127SKrish Sadhukhan switch (test->scratch) { 631ad879127SKrish Sadhukhan case 0: 632ad879127SKrish Sadhukhan /* Test should be in real mode now - check for this */ 633ad879127SKrish Sadhukhan if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 634ad879127SKrish Sadhukhan (cr4 & 0x00000020) || /* CR4.PAE */ 635ad879127SKrish Sadhukhan (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 636ad879127SKrish Sadhukhan return true; 637ad879127SKrish Sadhukhan break; 638ad879127SKrish Sadhukhan case 2: 639ad879127SKrish Sadhukhan /* Test should be back in long-mode now - check for this */ 640ad879127SKrish Sadhukhan if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 641ad879127SKrish Sadhukhan ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 642ad879127SKrish Sadhukhan ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 643ad879127SKrish Sadhukhan return true; 644ad879127SKrish Sadhukhan break; 645ad879127SKrish Sadhukhan } 646ad879127SKrish Sadhukhan 647ad879127SKrish Sadhukhan /* one step forward */ 648ad879127SKrish Sadhukhan test->scratch += 1; 649ad879127SKrish Sadhukhan 650ad879127SKrish Sadhukhan return test->scratch == 2; 651ad879127SKrish Sadhukhan } 652ad879127SKrish Sadhukhan 653ad879127SKrish Sadhukhan static bool check_mode_switch(struct svm_test *test) 654ad879127SKrish Sadhukhan { 655ad879127SKrish Sadhukhan return test->scratch == 2; 656ad879127SKrish Sadhukhan } 657ad879127SKrish Sadhukhan 658ad879127SKrish Sadhukhan extern u8 *io_bitmap; 659ad879127SKrish Sadhukhan 660ad879127SKrish Sadhukhan static void prepare_ioio(struct svm_test *test) 661ad879127SKrish Sadhukhan { 662096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT); 663ad879127SKrish Sadhukhan test->scratch = 0; 664ad879127SKrish Sadhukhan memset(io_bitmap, 0, 8192); 665ad879127SKrish Sadhukhan io_bitmap[8192] = 0xFF; 666ad879127SKrish Sadhukhan } 667ad879127SKrish Sadhukhan 668ad879127SKrish Sadhukhan static void test_ioio(struct svm_test *test) 669ad879127SKrish Sadhukhan { 670ad879127SKrish Sadhukhan // stage 0, test IO pass 671ad879127SKrish Sadhukhan inb(0x5000); 672ad879127SKrish Sadhukhan outb(0x0, 0x5000); 673ad879127SKrish Sadhukhan if (get_test_stage(test) != 0) 674ad879127SKrish Sadhukhan goto fail; 675ad879127SKrish Sadhukhan 676ad879127SKrish Sadhukhan // test IO width, in/out 677ad879127SKrish Sadhukhan io_bitmap[0] = 0xFF; 678ad879127SKrish Sadhukhan inc_test_stage(test); 679ad879127SKrish Sadhukhan inb(0x0); 680ad879127SKrish Sadhukhan if (get_test_stage(test) != 2) 681ad879127SKrish Sadhukhan goto fail; 682ad879127SKrish Sadhukhan 683ad879127SKrish Sadhukhan outw(0x0, 0x0); 684ad879127SKrish Sadhukhan if (get_test_stage(test) != 3) 685ad879127SKrish Sadhukhan goto fail; 686ad879127SKrish Sadhukhan 687ad879127SKrish Sadhukhan inl(0x0); 688ad879127SKrish Sadhukhan if (get_test_stage(test) != 4) 689ad879127SKrish Sadhukhan goto fail; 690ad879127SKrish Sadhukhan 691ad879127SKrish Sadhukhan // test low/high IO port 692ad879127SKrish Sadhukhan io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 693ad879127SKrish Sadhukhan inb(0x5000); 694ad879127SKrish Sadhukhan if (get_test_stage(test) != 5) 695ad879127SKrish Sadhukhan goto fail; 696ad879127SKrish Sadhukhan 697ad879127SKrish Sadhukhan io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8)); 698ad879127SKrish Sadhukhan inw(0x9000); 699ad879127SKrish Sadhukhan if (get_test_stage(test) != 6) 700ad879127SKrish Sadhukhan goto fail; 701ad879127SKrish Sadhukhan 702ad879127SKrish Sadhukhan // test partial pass 703ad879127SKrish Sadhukhan io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 704ad879127SKrish Sadhukhan inl(0x4FFF); 705ad879127SKrish Sadhukhan if (get_test_stage(test) != 7) 706ad879127SKrish Sadhukhan goto fail; 707ad879127SKrish Sadhukhan 708ad879127SKrish Sadhukhan // test across pages 709ad879127SKrish Sadhukhan inc_test_stage(test); 710ad879127SKrish Sadhukhan inl(0x7FFF); 711ad879127SKrish Sadhukhan if (get_test_stage(test) != 8) 712ad879127SKrish Sadhukhan goto fail; 713ad879127SKrish Sadhukhan 714ad879127SKrish Sadhukhan inc_test_stage(test); 715ad879127SKrish Sadhukhan io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8); 716ad879127SKrish Sadhukhan inl(0x7FFF); 717ad879127SKrish Sadhukhan if (get_test_stage(test) != 10) 718ad879127SKrish Sadhukhan goto fail; 719ad879127SKrish Sadhukhan 720ad879127SKrish Sadhukhan io_bitmap[0] = 0; 721ad879127SKrish Sadhukhan inl(0xFFFF); 722ad879127SKrish Sadhukhan if (get_test_stage(test) != 11) 723ad879127SKrish Sadhukhan goto fail; 724ad879127SKrish Sadhukhan 725ad879127SKrish Sadhukhan io_bitmap[0] = 0xFF; 726ad879127SKrish Sadhukhan io_bitmap[8192] = 0; 727ad879127SKrish Sadhukhan inl(0xFFFF); 728ad879127SKrish Sadhukhan inc_test_stage(test); 729ad879127SKrish Sadhukhan if (get_test_stage(test) != 12) 730ad879127SKrish Sadhukhan goto fail; 731ad879127SKrish Sadhukhan 732ad879127SKrish Sadhukhan return; 733ad879127SKrish Sadhukhan 734ad879127SKrish Sadhukhan fail: 735198dfd0eSJanis Schoetterl-Glausch report_fail("stage %d", get_test_stage(test)); 736ad879127SKrish Sadhukhan test->scratch = -1; 737ad879127SKrish Sadhukhan } 738ad879127SKrish Sadhukhan 739ad879127SKrish Sadhukhan static bool ioio_finished(struct svm_test *test) 740ad879127SKrish Sadhukhan { 741ad879127SKrish Sadhukhan unsigned port, size; 742ad879127SKrish Sadhukhan 743ad879127SKrish Sadhukhan /* Only expect IOIO intercepts */ 744096cf7feSPaolo Bonzini if (vmcb->control.exit_code == SVM_EXIT_VMMCALL) 745ad879127SKrish Sadhukhan return true; 746ad879127SKrish Sadhukhan 747096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_IOIO) 748ad879127SKrish Sadhukhan return true; 749ad879127SKrish Sadhukhan 750ad879127SKrish Sadhukhan /* one step forward */ 751ad879127SKrish Sadhukhan test->scratch += 1; 752ad879127SKrish Sadhukhan 753096cf7feSPaolo Bonzini port = vmcb->control.exit_info_1 >> 16; 754096cf7feSPaolo Bonzini size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7; 755ad879127SKrish Sadhukhan 756ad879127SKrish Sadhukhan while (size--) { 757ad879127SKrish Sadhukhan io_bitmap[port / 8] &= ~(1 << (port & 7)); 758ad879127SKrish Sadhukhan port++; 759ad879127SKrish Sadhukhan } 760ad879127SKrish Sadhukhan 761ad879127SKrish Sadhukhan return false; 762ad879127SKrish Sadhukhan } 763ad879127SKrish Sadhukhan 764ad879127SKrish Sadhukhan static bool check_ioio(struct svm_test *test) 765ad879127SKrish Sadhukhan { 766ad879127SKrish Sadhukhan memset(io_bitmap, 0, 8193); 767ad879127SKrish Sadhukhan return test->scratch != -1; 768ad879127SKrish Sadhukhan } 769ad879127SKrish Sadhukhan 770ad879127SKrish Sadhukhan static void prepare_asid_zero(struct svm_test *test) 771ad879127SKrish Sadhukhan { 772096cf7feSPaolo Bonzini vmcb->control.asid = 0; 773ad879127SKrish Sadhukhan } 774ad879127SKrish Sadhukhan 775ad879127SKrish Sadhukhan static void test_asid_zero(struct svm_test *test) 776ad879127SKrish Sadhukhan { 777ad879127SKrish Sadhukhan asm volatile ("vmmcall\n\t"); 778ad879127SKrish Sadhukhan } 779ad879127SKrish Sadhukhan 780ad879127SKrish Sadhukhan static bool check_asid_zero(struct svm_test *test) 781ad879127SKrish Sadhukhan { 782096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_ERR; 783ad879127SKrish Sadhukhan } 784ad879127SKrish Sadhukhan 785ad879127SKrish Sadhukhan static void sel_cr0_bug_prepare(struct svm_test *test) 786ad879127SKrish Sadhukhan { 787096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 788ad879127SKrish Sadhukhan } 789ad879127SKrish Sadhukhan 790ad879127SKrish Sadhukhan static bool sel_cr0_bug_finished(struct svm_test *test) 791ad879127SKrish Sadhukhan { 792ad879127SKrish Sadhukhan return true; 793ad879127SKrish Sadhukhan } 794ad879127SKrish Sadhukhan 795ad879127SKrish Sadhukhan static void sel_cr0_bug_test(struct svm_test *test) 796ad879127SKrish Sadhukhan { 797ad879127SKrish Sadhukhan unsigned long cr0; 798ad879127SKrish Sadhukhan 799ad879127SKrish Sadhukhan /* read cr0, clear CD, and write back */ 800ad879127SKrish Sadhukhan cr0 = read_cr0(); 801ad879127SKrish Sadhukhan cr0 |= (1UL << 30); 802ad879127SKrish Sadhukhan write_cr0(cr0); 803ad879127SKrish Sadhukhan 804ad879127SKrish Sadhukhan /* 805ad879127SKrish Sadhukhan * If we are here the test failed, not sure what to do now because we 806ad879127SKrish Sadhukhan * are not in guest-mode anymore so we can't trigger an intercept. 807ad879127SKrish Sadhukhan * Trigger a tripple-fault for now. 808ad879127SKrish Sadhukhan */ 809198dfd0eSJanis Schoetterl-Glausch report_fail("sel_cr0 test. Can not recover from this - exiting"); 810ad879127SKrish Sadhukhan exit(report_summary()); 811ad879127SKrish Sadhukhan } 812ad879127SKrish Sadhukhan 813ad879127SKrish Sadhukhan static bool sel_cr0_bug_check(struct svm_test *test) 814ad879127SKrish Sadhukhan { 815096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 816ad879127SKrish Sadhukhan } 817ad879127SKrish Sadhukhan 818ad879127SKrish Sadhukhan #define TSC_ADJUST_VALUE (1ll << 32) 819f3154609SBill Wendling #define TSC_OFFSET_VALUE (~0ull << 48) 820ad879127SKrish Sadhukhan static bool ok; 821ad879127SKrish Sadhukhan 82210a65fc4SNadav Amit static bool tsc_adjust_supported(void) 82310a65fc4SNadav Amit { 82410a65fc4SNadav Amit return this_cpu_has(X86_FEATURE_TSC_ADJUST); 82510a65fc4SNadav Amit } 82610a65fc4SNadav Amit 827ad879127SKrish Sadhukhan static void tsc_adjust_prepare(struct svm_test *test) 828ad879127SKrish Sadhukhan { 829ad879127SKrish Sadhukhan default_prepare(test); 830096cf7feSPaolo Bonzini vmcb->control.tsc_offset = TSC_OFFSET_VALUE; 831ad879127SKrish Sadhukhan 832ad879127SKrish Sadhukhan wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE); 833ad879127SKrish Sadhukhan int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 834ad879127SKrish Sadhukhan ok = adjust == -TSC_ADJUST_VALUE; 835ad879127SKrish Sadhukhan } 836ad879127SKrish Sadhukhan 837ad879127SKrish Sadhukhan static void tsc_adjust_test(struct svm_test *test) 838ad879127SKrish Sadhukhan { 839ad879127SKrish Sadhukhan int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 840ad879127SKrish Sadhukhan ok &= adjust == -TSC_ADJUST_VALUE; 841ad879127SKrish Sadhukhan 842ad879127SKrish Sadhukhan uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 843ad879127SKrish Sadhukhan wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 844ad879127SKrish Sadhukhan 845ad879127SKrish Sadhukhan adjust = rdmsr(MSR_IA32_TSC_ADJUST); 846ad879127SKrish Sadhukhan ok &= adjust <= -2 * TSC_ADJUST_VALUE; 847ad879127SKrish Sadhukhan 848ad879127SKrish Sadhukhan uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE; 849ad879127SKrish Sadhukhan ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 850ad879127SKrish Sadhukhan 851ad879127SKrish Sadhukhan uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE; 852ad879127SKrish Sadhukhan ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 853ad879127SKrish Sadhukhan } 854ad879127SKrish Sadhukhan 855ad879127SKrish Sadhukhan static bool tsc_adjust_check(struct svm_test *test) 856ad879127SKrish Sadhukhan { 857ad879127SKrish Sadhukhan int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 858ad879127SKrish Sadhukhan 859ad879127SKrish Sadhukhan wrmsr(MSR_IA32_TSC_ADJUST, 0); 860ad879127SKrish Sadhukhan return ok && adjust <= -2 * TSC_ADJUST_VALUE; 861ad879127SKrish Sadhukhan } 862ad879127SKrish Sadhukhan 863a8503d50SMaxim Levitsky 864a8503d50SMaxim Levitsky static u64 guest_tsc_delay_value; 865a8503d50SMaxim Levitsky /* number of bits to shift tsc right for stable result */ 866a8503d50SMaxim Levitsky #define TSC_SHIFT 24 867a8503d50SMaxim Levitsky #define TSC_SCALE_ITERATIONS 10 868a8503d50SMaxim Levitsky 869a8503d50SMaxim Levitsky static void svm_tsc_scale_guest(struct svm_test *test) 870a8503d50SMaxim Levitsky { 871a8503d50SMaxim Levitsky u64 start_tsc = rdtsc(); 872a8503d50SMaxim Levitsky 873a8503d50SMaxim Levitsky while (rdtsc() - start_tsc < guest_tsc_delay_value) 874a8503d50SMaxim Levitsky cpu_relax(); 875a8503d50SMaxim Levitsky } 876a8503d50SMaxim Levitsky 877a8503d50SMaxim Levitsky static void svm_tsc_scale_run_testcase(u64 duration, 878a8503d50SMaxim Levitsky double tsc_scale, u64 tsc_offset) 879a8503d50SMaxim Levitsky { 880a8503d50SMaxim Levitsky u64 start_tsc, actual_duration; 881a8503d50SMaxim Levitsky 882a8503d50SMaxim Levitsky guest_tsc_delay_value = (duration << TSC_SHIFT) * tsc_scale; 883a8503d50SMaxim Levitsky 884a8503d50SMaxim Levitsky test_set_guest(svm_tsc_scale_guest); 885a8503d50SMaxim Levitsky vmcb->control.tsc_offset = tsc_offset; 886a8503d50SMaxim Levitsky wrmsr(MSR_AMD64_TSC_RATIO, (u64)(tsc_scale * (1ULL << 32))); 887a8503d50SMaxim Levitsky 888a8503d50SMaxim Levitsky start_tsc = rdtsc(); 889a8503d50SMaxim Levitsky 890a8503d50SMaxim Levitsky if (svm_vmrun() != SVM_EXIT_VMMCALL) 891a8503d50SMaxim Levitsky report_fail("unexpected vm exit code 0x%x", vmcb->control.exit_code); 892a8503d50SMaxim Levitsky 893a8503d50SMaxim Levitsky actual_duration = (rdtsc() - start_tsc) >> TSC_SHIFT; 894a8503d50SMaxim Levitsky 895a8503d50SMaxim Levitsky report(duration == actual_duration, "tsc delay (expected: %lu, actual: %lu)", 896a8503d50SMaxim Levitsky duration, actual_duration); 897a8503d50SMaxim Levitsky } 898a8503d50SMaxim Levitsky 899a8503d50SMaxim Levitsky static void svm_tsc_scale_test(void) 900a8503d50SMaxim Levitsky { 901a8503d50SMaxim Levitsky int i; 902a8503d50SMaxim Levitsky 903a8503d50SMaxim Levitsky if (!tsc_scale_supported()) { 904a8503d50SMaxim Levitsky report_skip("TSC scale not supported in the guest"); 905a8503d50SMaxim Levitsky return; 906a8503d50SMaxim Levitsky } 907a8503d50SMaxim Levitsky 908a8503d50SMaxim Levitsky report(rdmsr(MSR_AMD64_TSC_RATIO) == TSC_RATIO_DEFAULT, 909a8503d50SMaxim Levitsky "initial TSC scale ratio"); 910a8503d50SMaxim Levitsky 911a8503d50SMaxim Levitsky for (i = 0 ; i < TSC_SCALE_ITERATIONS; i++) { 912a8503d50SMaxim Levitsky 913a8503d50SMaxim Levitsky double tsc_scale = (double)(rdrand() % 100 + 1) / 10; 914a8503d50SMaxim Levitsky int duration = rdrand() % 50 + 1; 915a8503d50SMaxim Levitsky u64 tsc_offset = rdrand(); 916a8503d50SMaxim Levitsky 917a8503d50SMaxim Levitsky report_info("duration=%d, tsc_scale=%d, tsc_offset=%ld", 918a8503d50SMaxim Levitsky duration, (int)(tsc_scale * 100), tsc_offset); 919a8503d50SMaxim Levitsky 920a8503d50SMaxim Levitsky svm_tsc_scale_run_testcase(duration, tsc_scale, tsc_offset); 921a8503d50SMaxim Levitsky } 922a8503d50SMaxim Levitsky 923a8503d50SMaxim Levitsky svm_tsc_scale_run_testcase(50, 255, rdrand()); 924a8503d50SMaxim Levitsky svm_tsc_scale_run_testcase(50, 0.0001, rdrand()); 925a8503d50SMaxim Levitsky } 926a8503d50SMaxim Levitsky 927ad879127SKrish Sadhukhan static void latency_prepare(struct svm_test *test) 928ad879127SKrish Sadhukhan { 929ad879127SKrish Sadhukhan default_prepare(test); 930ad879127SKrish Sadhukhan runs = LATENCY_RUNS; 931ad879127SKrish Sadhukhan latvmrun_min = latvmexit_min = -1ULL; 932ad879127SKrish Sadhukhan latvmrun_max = latvmexit_max = 0; 933ad879127SKrish Sadhukhan vmrun_sum = vmexit_sum = 0; 934ad879127SKrish Sadhukhan tsc_start = rdtsc(); 935ad879127SKrish Sadhukhan } 936ad879127SKrish Sadhukhan 937ad879127SKrish Sadhukhan static void latency_test(struct svm_test *test) 938ad879127SKrish Sadhukhan { 939ad879127SKrish Sadhukhan u64 cycles; 940ad879127SKrish Sadhukhan 941ad879127SKrish Sadhukhan start: 942ad879127SKrish Sadhukhan tsc_end = rdtsc(); 943ad879127SKrish Sadhukhan 944ad879127SKrish Sadhukhan cycles = tsc_end - tsc_start; 945ad879127SKrish Sadhukhan 946ad879127SKrish Sadhukhan if (cycles > latvmrun_max) 947ad879127SKrish Sadhukhan latvmrun_max = cycles; 948ad879127SKrish Sadhukhan 949ad879127SKrish Sadhukhan if (cycles < latvmrun_min) 950ad879127SKrish Sadhukhan latvmrun_min = cycles; 951ad879127SKrish Sadhukhan 952ad879127SKrish Sadhukhan vmrun_sum += cycles; 953ad879127SKrish Sadhukhan 954ad879127SKrish Sadhukhan tsc_start = rdtsc(); 955ad879127SKrish Sadhukhan 956ad879127SKrish Sadhukhan asm volatile ("vmmcall" : : : "memory"); 957ad879127SKrish Sadhukhan goto start; 958ad879127SKrish Sadhukhan } 959ad879127SKrish Sadhukhan 960ad879127SKrish Sadhukhan static bool latency_finished(struct svm_test *test) 961ad879127SKrish Sadhukhan { 962ad879127SKrish Sadhukhan u64 cycles; 963ad879127SKrish Sadhukhan 964ad879127SKrish Sadhukhan tsc_end = rdtsc(); 965ad879127SKrish Sadhukhan 966ad879127SKrish Sadhukhan cycles = tsc_end - tsc_start; 967ad879127SKrish Sadhukhan 968ad879127SKrish Sadhukhan if (cycles > latvmexit_max) 969ad879127SKrish Sadhukhan latvmexit_max = cycles; 970ad879127SKrish Sadhukhan 971ad879127SKrish Sadhukhan if (cycles < latvmexit_min) 972ad879127SKrish Sadhukhan latvmexit_min = cycles; 973ad879127SKrish Sadhukhan 974ad879127SKrish Sadhukhan vmexit_sum += cycles; 975ad879127SKrish Sadhukhan 976096cf7feSPaolo Bonzini vmcb->save.rip += 3; 977ad879127SKrish Sadhukhan 978ad879127SKrish Sadhukhan runs -= 1; 979ad879127SKrish Sadhukhan 980ad879127SKrish Sadhukhan tsc_end = rdtsc(); 981ad879127SKrish Sadhukhan 982ad879127SKrish Sadhukhan return runs == 0; 983ad879127SKrish Sadhukhan } 984ad879127SKrish Sadhukhan 985f7fa53dcSPaolo Bonzini static bool latency_finished_clean(struct svm_test *test) 986f7fa53dcSPaolo Bonzini { 987f7fa53dcSPaolo Bonzini vmcb->control.clean = VMCB_CLEAN_ALL; 988f7fa53dcSPaolo Bonzini return latency_finished(test); 989f7fa53dcSPaolo Bonzini } 990f7fa53dcSPaolo Bonzini 991ad879127SKrish Sadhukhan static bool latency_check(struct svm_test *test) 992ad879127SKrish Sadhukhan { 993ad879127SKrish Sadhukhan printf(" Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max, 994ad879127SKrish Sadhukhan latvmrun_min, vmrun_sum / LATENCY_RUNS); 995ad879127SKrish Sadhukhan printf(" Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max, 996ad879127SKrish Sadhukhan latvmexit_min, vmexit_sum / LATENCY_RUNS); 997ad879127SKrish Sadhukhan return true; 998ad879127SKrish Sadhukhan } 999ad879127SKrish Sadhukhan 1000ad879127SKrish Sadhukhan static void lat_svm_insn_prepare(struct svm_test *test) 1001ad879127SKrish Sadhukhan { 1002ad879127SKrish Sadhukhan default_prepare(test); 1003ad879127SKrish Sadhukhan runs = LATENCY_RUNS; 1004ad879127SKrish Sadhukhan latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 1005ad879127SKrish Sadhukhan latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 1006ad879127SKrish Sadhukhan vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 1007ad879127SKrish Sadhukhan } 1008ad879127SKrish Sadhukhan 1009ad879127SKrish Sadhukhan static bool lat_svm_insn_finished(struct svm_test *test) 1010ad879127SKrish Sadhukhan { 1011096cf7feSPaolo Bonzini u64 vmcb_phys = virt_to_phys(vmcb); 1012ad879127SKrish Sadhukhan u64 cycles; 1013ad879127SKrish Sadhukhan 1014ad879127SKrish Sadhukhan for ( ; runs != 0; runs--) { 1015ad879127SKrish Sadhukhan tsc_start = rdtsc(); 1016ad879127SKrish Sadhukhan asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory"); 1017ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 1018ad879127SKrish Sadhukhan if (cycles > latvmload_max) 1019ad879127SKrish Sadhukhan latvmload_max = cycles; 1020ad879127SKrish Sadhukhan if (cycles < latvmload_min) 1021ad879127SKrish Sadhukhan latvmload_min = cycles; 1022ad879127SKrish Sadhukhan vmload_sum += cycles; 1023ad879127SKrish Sadhukhan 1024ad879127SKrish Sadhukhan tsc_start = rdtsc(); 1025ad879127SKrish Sadhukhan asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory"); 1026ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 1027ad879127SKrish Sadhukhan if (cycles > latvmsave_max) 1028ad879127SKrish Sadhukhan latvmsave_max = cycles; 1029ad879127SKrish Sadhukhan if (cycles < latvmsave_min) 1030ad879127SKrish Sadhukhan latvmsave_min = cycles; 1031ad879127SKrish Sadhukhan vmsave_sum += cycles; 1032ad879127SKrish Sadhukhan 1033ad879127SKrish Sadhukhan tsc_start = rdtsc(); 1034ad879127SKrish Sadhukhan asm volatile("stgi\n\t"); 1035ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 1036ad879127SKrish Sadhukhan if (cycles > latstgi_max) 1037ad879127SKrish Sadhukhan latstgi_max = cycles; 1038ad879127SKrish Sadhukhan if (cycles < latstgi_min) 1039ad879127SKrish Sadhukhan latstgi_min = cycles; 1040ad879127SKrish Sadhukhan stgi_sum += cycles; 1041ad879127SKrish Sadhukhan 1042ad879127SKrish Sadhukhan tsc_start = rdtsc(); 1043ad879127SKrish Sadhukhan asm volatile("clgi\n\t"); 1044ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 1045ad879127SKrish Sadhukhan if (cycles > latclgi_max) 1046ad879127SKrish Sadhukhan latclgi_max = cycles; 1047ad879127SKrish Sadhukhan if (cycles < latclgi_min) 1048ad879127SKrish Sadhukhan latclgi_min = cycles; 1049ad879127SKrish Sadhukhan clgi_sum += cycles; 1050ad879127SKrish Sadhukhan } 1051ad879127SKrish Sadhukhan 1052ad879127SKrish Sadhukhan tsc_end = rdtsc(); 1053ad879127SKrish Sadhukhan 1054ad879127SKrish Sadhukhan return true; 1055ad879127SKrish Sadhukhan } 1056ad879127SKrish Sadhukhan 1057ad879127SKrish Sadhukhan static bool lat_svm_insn_check(struct svm_test *test) 1058ad879127SKrish Sadhukhan { 1059ad879127SKrish Sadhukhan printf(" Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max, 1060ad879127SKrish Sadhukhan latvmload_min, vmload_sum / LATENCY_RUNS); 1061ad879127SKrish Sadhukhan printf(" Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max, 1062ad879127SKrish Sadhukhan latvmsave_min, vmsave_sum / LATENCY_RUNS); 1063ad879127SKrish Sadhukhan printf(" Latency STGI: max: %ld min: %ld avg: %ld\n", latstgi_max, 1064ad879127SKrish Sadhukhan latstgi_min, stgi_sum / LATENCY_RUNS); 1065ad879127SKrish Sadhukhan printf(" Latency CLGI: max: %ld min: %ld avg: %ld\n", latclgi_max, 1066ad879127SKrish Sadhukhan latclgi_min, clgi_sum / LATENCY_RUNS); 1067ad879127SKrish Sadhukhan return true; 1068ad879127SKrish Sadhukhan } 1069ad879127SKrish Sadhukhan 1070493d27d4SSean Christopherson /* 1071493d27d4SSean Christopherson * Report failures from SVM guest code, and on failure, set the stage to -1 and 1072493d27d4SSean Christopherson * do VMMCALL to terminate the test (host side must treat -1 as "finished"). 1073493d27d4SSean Christopherson * TODO: fix the tests that don't play nice with a straight report, e.g. the 1074493d27d4SSean Christopherson * V_TPR test fails if report() is invoked. 1075493d27d4SSean Christopherson */ 1076493d27d4SSean Christopherson #define report_svm_guest(cond, test, fmt, args...) \ 1077493d27d4SSean Christopherson do { \ 1078493d27d4SSean Christopherson if (!(cond)) { \ 1079493d27d4SSean Christopherson report_fail(fmt, ##args); \ 1080493d27d4SSean Christopherson set_test_stage(test, -1); \ 1081493d27d4SSean Christopherson vmmcall(); \ 1082493d27d4SSean Christopherson } \ 1083493d27d4SSean Christopherson } while (0) 1084493d27d4SSean Christopherson 1085ad879127SKrish Sadhukhan bool pending_event_ipi_fired; 1086ad879127SKrish Sadhukhan bool pending_event_guest_run; 1087ad879127SKrish Sadhukhan 1088ad879127SKrish Sadhukhan static void pending_event_ipi_isr(isr_regs_t *regs) 1089ad879127SKrish Sadhukhan { 1090ad879127SKrish Sadhukhan pending_event_ipi_fired = true; 1091ad879127SKrish Sadhukhan eoi(); 1092ad879127SKrish Sadhukhan } 1093ad879127SKrish Sadhukhan 1094ad879127SKrish Sadhukhan static void pending_event_prepare(struct svm_test *test) 1095ad879127SKrish Sadhukhan { 1096ad879127SKrish Sadhukhan int ipi_vector = 0xf1; 1097ad879127SKrish Sadhukhan 1098ad879127SKrish Sadhukhan default_prepare(test); 1099ad879127SKrish Sadhukhan 1100ad879127SKrish Sadhukhan pending_event_ipi_fired = false; 1101ad879127SKrish Sadhukhan 1102ad879127SKrish Sadhukhan handle_irq(ipi_vector, pending_event_ipi_isr); 1103ad879127SKrish Sadhukhan 1104ad879127SKrish Sadhukhan pending_event_guest_run = false; 1105ad879127SKrish Sadhukhan 1106096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1107096cf7feSPaolo Bonzini vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1108ad879127SKrish Sadhukhan 1109ad879127SKrish Sadhukhan apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1110ad879127SKrish Sadhukhan APIC_DM_FIXED | ipi_vector, 0); 1111ad879127SKrish Sadhukhan 1112ad879127SKrish Sadhukhan set_test_stage(test, 0); 1113ad879127SKrish Sadhukhan } 1114ad879127SKrish Sadhukhan 1115ad879127SKrish Sadhukhan static void pending_event_test(struct svm_test *test) 1116ad879127SKrish Sadhukhan { 1117ad879127SKrish Sadhukhan pending_event_guest_run = true; 1118ad879127SKrish Sadhukhan } 1119ad879127SKrish Sadhukhan 1120ad879127SKrish Sadhukhan static bool pending_event_finished(struct svm_test *test) 1121ad879127SKrish Sadhukhan { 1122ad879127SKrish Sadhukhan switch (get_test_stage(test)) { 1123ad879127SKrish Sadhukhan case 0: 1124096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1125198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to pending interrupt. Exit reason 0x%x", 1126096cf7feSPaolo Bonzini vmcb->control.exit_code); 1127ad879127SKrish Sadhukhan return true; 1128ad879127SKrish Sadhukhan } 1129ad879127SKrish Sadhukhan 1130096cf7feSPaolo Bonzini vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1131096cf7feSPaolo Bonzini vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 1132ad879127SKrish Sadhukhan 1133ad879127SKrish Sadhukhan if (pending_event_guest_run) { 1134198dfd0eSJanis Schoetterl-Glausch report_fail("Guest ran before host received IPI\n"); 1135ad879127SKrish Sadhukhan return true; 1136ad879127SKrish Sadhukhan } 1137ad879127SKrish Sadhukhan 1138e4007e62SMaxim Levitsky sti_nop_cli(); 1139ad879127SKrish Sadhukhan 1140ad879127SKrish Sadhukhan if (!pending_event_ipi_fired) { 1141198dfd0eSJanis Schoetterl-Glausch report_fail("Pending interrupt not dispatched after IRQ enabled\n"); 1142ad879127SKrish Sadhukhan return true; 1143ad879127SKrish Sadhukhan } 1144ad879127SKrish Sadhukhan break; 1145ad879127SKrish Sadhukhan 1146ad879127SKrish Sadhukhan case 1: 1147ad879127SKrish Sadhukhan if (!pending_event_guest_run) { 1148198dfd0eSJanis Schoetterl-Glausch report_fail("Guest did not resume when no interrupt\n"); 1149ad879127SKrish Sadhukhan return true; 1150ad879127SKrish Sadhukhan } 1151ad879127SKrish Sadhukhan break; 1152ad879127SKrish Sadhukhan } 1153ad879127SKrish Sadhukhan 1154ad879127SKrish Sadhukhan inc_test_stage(test); 1155ad879127SKrish Sadhukhan 1156ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1157ad879127SKrish Sadhukhan } 1158ad879127SKrish Sadhukhan 1159ad879127SKrish Sadhukhan static bool pending_event_check(struct svm_test *test) 1160ad879127SKrish Sadhukhan { 1161ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1162ad879127SKrish Sadhukhan } 1163ad879127SKrish Sadhukhan 116485dc2aceSPaolo Bonzini static void pending_event_cli_prepare(struct svm_test *test) 1165ad879127SKrish Sadhukhan { 1166ad879127SKrish Sadhukhan default_prepare(test); 1167ad879127SKrish Sadhukhan 1168ad879127SKrish Sadhukhan pending_event_ipi_fired = false; 1169ad879127SKrish Sadhukhan 1170ad879127SKrish Sadhukhan handle_irq(0xf1, pending_event_ipi_isr); 1171ad879127SKrish Sadhukhan 1172ad879127SKrish Sadhukhan apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1173ad879127SKrish Sadhukhan APIC_DM_FIXED | 0xf1, 0); 1174ad879127SKrish Sadhukhan 1175ad879127SKrish Sadhukhan set_test_stage(test, 0); 1176ad879127SKrish Sadhukhan } 1177ad879127SKrish Sadhukhan 117885dc2aceSPaolo Bonzini static void pending_event_cli_prepare_gif_clear(struct svm_test *test) 1179ad879127SKrish Sadhukhan { 1180ad879127SKrish Sadhukhan asm("cli"); 1181ad879127SKrish Sadhukhan } 1182ad879127SKrish Sadhukhan 118385dc2aceSPaolo Bonzini static void pending_event_cli_test(struct svm_test *test) 1184ad879127SKrish Sadhukhan { 1185493d27d4SSean Christopherson report_svm_guest(!pending_event_ipi_fired, test, 1186493d27d4SSean Christopherson "IRQ should NOT be delivered while IRQs disabled"); 1187ad879127SKrish Sadhukhan 118885dc2aceSPaolo Bonzini /* VINTR_MASKING is zero. This should cause the IPI to fire. */ 1189e4007e62SMaxim Levitsky sti_nop_cli(); 1190ad879127SKrish Sadhukhan 1191493d27d4SSean Christopherson report_svm_guest(pending_event_ipi_fired, test, 1192493d27d4SSean Christopherson "IRQ should be delivered after enabling IRQs"); 1193ad879127SKrish Sadhukhan vmmcall(); 1194ad879127SKrish Sadhukhan 119585dc2aceSPaolo Bonzini /* 119685dc2aceSPaolo Bonzini * Now VINTR_MASKING=1, but no interrupt is pending so 119785dc2aceSPaolo Bonzini * the VINTR interception should be clear in VMCB02. Check 119885dc2aceSPaolo Bonzini * that L0 did not leave a stale VINTR in the VMCB. 119985dc2aceSPaolo Bonzini */ 1200e4007e62SMaxim Levitsky sti_nop_cli(); 1201ad879127SKrish Sadhukhan } 1202ad879127SKrish Sadhukhan 120385dc2aceSPaolo Bonzini static bool pending_event_cli_finished(struct svm_test *test) 1204ad879127SKrish Sadhukhan { 1205493d27d4SSean Christopherson report_svm_guest(vmcb->control.exit_code == SVM_EXIT_VMMCALL, test, 1206493d27d4SSean Christopherson "Wanted VMMCALL VM-Exit, got exit reason 0x%x", 1207096cf7feSPaolo Bonzini vmcb->control.exit_code); 1208ad879127SKrish Sadhukhan 1209ad879127SKrish Sadhukhan switch (get_test_stage(test)) { 1210ad879127SKrish Sadhukhan case 0: 1211096cf7feSPaolo Bonzini vmcb->save.rip += 3; 1212ad879127SKrish Sadhukhan 1213ad879127SKrish Sadhukhan pending_event_ipi_fired = false; 1214ad879127SKrish Sadhukhan 1215096cf7feSPaolo Bonzini vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1216ad879127SKrish Sadhukhan 121785dc2aceSPaolo Bonzini /* Now entering again with VINTR_MASKING=1. */ 1218ad879127SKrish Sadhukhan apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1219ad879127SKrish Sadhukhan APIC_DM_FIXED | 0xf1, 0); 1220ad879127SKrish Sadhukhan 1221ad879127SKrish Sadhukhan break; 1222ad879127SKrish Sadhukhan 1223ad879127SKrish Sadhukhan case 1: 1224ad879127SKrish Sadhukhan if (pending_event_ipi_fired == true) { 1225198dfd0eSJanis Schoetterl-Glausch report_fail("Interrupt triggered by guest"); 1226ad879127SKrish Sadhukhan return true; 1227ad879127SKrish Sadhukhan } 1228ad879127SKrish Sadhukhan 1229e4007e62SMaxim Levitsky sti_nop_cli(); 1230ad879127SKrish Sadhukhan 1231ad879127SKrish Sadhukhan if (pending_event_ipi_fired != true) { 1232198dfd0eSJanis Schoetterl-Glausch report_fail("Interrupt not triggered by host"); 1233ad879127SKrish Sadhukhan return true; 1234ad879127SKrish Sadhukhan } 1235ad879127SKrish Sadhukhan 1236ad879127SKrish Sadhukhan break; 1237ad879127SKrish Sadhukhan 1238ad879127SKrish Sadhukhan default: 1239ad879127SKrish Sadhukhan return true; 1240ad879127SKrish Sadhukhan } 1241ad879127SKrish Sadhukhan 1242ad879127SKrish Sadhukhan inc_test_stage(test); 1243ad879127SKrish Sadhukhan 1244ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1245ad879127SKrish Sadhukhan } 1246ad879127SKrish Sadhukhan 124785dc2aceSPaolo Bonzini static bool pending_event_cli_check(struct svm_test *test) 1248ad879127SKrish Sadhukhan { 1249ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1250ad879127SKrish Sadhukhan } 1251ad879127SKrish Sadhukhan 125285dc2aceSPaolo Bonzini #define TIMER_VECTOR 222 125385dc2aceSPaolo Bonzini 125485dc2aceSPaolo Bonzini static volatile bool timer_fired; 125585dc2aceSPaolo Bonzini 125685dc2aceSPaolo Bonzini static void timer_isr(isr_regs_t *regs) 125785dc2aceSPaolo Bonzini { 125885dc2aceSPaolo Bonzini timer_fired = true; 125985dc2aceSPaolo Bonzini apic_write(APIC_EOI, 0); 126085dc2aceSPaolo Bonzini } 126185dc2aceSPaolo Bonzini 126285dc2aceSPaolo Bonzini static void interrupt_prepare(struct svm_test *test) 126385dc2aceSPaolo Bonzini { 126485dc2aceSPaolo Bonzini default_prepare(test); 126585dc2aceSPaolo Bonzini handle_irq(TIMER_VECTOR, timer_isr); 126685dc2aceSPaolo Bonzini timer_fired = false; 126785dc2aceSPaolo Bonzini set_test_stage(test, 0); 126885dc2aceSPaolo Bonzini } 126985dc2aceSPaolo Bonzini 127085dc2aceSPaolo Bonzini static void interrupt_test(struct svm_test *test) 127185dc2aceSPaolo Bonzini { 127285dc2aceSPaolo Bonzini long long start, loops; 127385dc2aceSPaolo Bonzini 1274a2c7dff7SMaxim Levitsky apic_setup_timer(TIMER_VECTOR, APIC_LVT_TIMER_PERIODIC); 1275787f0aebSMaxim Levitsky sti(); 127698eb2a39SPaolo Bonzini apic_start_timer(1000); 1277a2c7dff7SMaxim Levitsky 127885dc2aceSPaolo Bonzini for (loops = 0; loops < 10000000 && !timer_fired; loops++) 127985dc2aceSPaolo Bonzini asm volatile ("nop"); 128085dc2aceSPaolo Bonzini 1281493d27d4SSean Christopherson report_svm_guest(timer_fired, test, 1282493d27d4SSean Christopherson "direct interrupt while running guest"); 128385dc2aceSPaolo Bonzini 1284a2c7dff7SMaxim Levitsky apic_stop_timer(); 1285787f0aebSMaxim Levitsky cli(); 128685dc2aceSPaolo Bonzini vmmcall(); 128785dc2aceSPaolo Bonzini 128885dc2aceSPaolo Bonzini timer_fired = false; 128998eb2a39SPaolo Bonzini apic_start_timer(1000); 129085dc2aceSPaolo Bonzini for (loops = 0; loops < 10000000 && !timer_fired; loops++) 129185dc2aceSPaolo Bonzini asm volatile ("nop"); 129285dc2aceSPaolo Bonzini 1293493d27d4SSean Christopherson report_svm_guest(timer_fired, test, 1294493d27d4SSean Christopherson "intercepted interrupt while running guest"); 129585dc2aceSPaolo Bonzini 1296787f0aebSMaxim Levitsky sti(); 1297a2c7dff7SMaxim Levitsky apic_stop_timer(); 1298787f0aebSMaxim Levitsky cli(); 129985dc2aceSPaolo Bonzini 130085dc2aceSPaolo Bonzini timer_fired = false; 130185dc2aceSPaolo Bonzini start = rdtsc(); 1302a2c7dff7SMaxim Levitsky apic_start_timer(1000000); 1303a3001422SOliver Upton safe_halt(); 130485dc2aceSPaolo Bonzini 1305493d27d4SSean Christopherson report_svm_guest(timer_fired, test, "direct interrupt + hlt"); 1306493d27d4SSean Christopherson report(rdtsc() - start > 10000, "IRQ arrived after expected delay"); 130785dc2aceSPaolo Bonzini 1308a2c7dff7SMaxim Levitsky apic_stop_timer(); 1309787f0aebSMaxim Levitsky cli(); 131085dc2aceSPaolo Bonzini vmmcall(); 131185dc2aceSPaolo Bonzini 131285dc2aceSPaolo Bonzini timer_fired = false; 131385dc2aceSPaolo Bonzini start = rdtsc(); 1314a2c7dff7SMaxim Levitsky apic_start_timer(1000000); 131585dc2aceSPaolo Bonzini asm volatile ("hlt"); 131685dc2aceSPaolo Bonzini 1317493d27d4SSean Christopherson report_svm_guest(timer_fired, test, "intercepted interrupt + hlt"); 1318493d27d4SSean Christopherson report(rdtsc() - start > 10000, "IRQ arrived after expected delay"); 131985dc2aceSPaolo Bonzini 1320a2c7dff7SMaxim Levitsky apic_cleanup_timer(); 132185dc2aceSPaolo Bonzini } 132285dc2aceSPaolo Bonzini 132385dc2aceSPaolo Bonzini static bool interrupt_finished(struct svm_test *test) 132485dc2aceSPaolo Bonzini { 132585dc2aceSPaolo Bonzini switch (get_test_stage(test)) { 132685dc2aceSPaolo Bonzini case 0: 132785dc2aceSPaolo Bonzini case 2: 1328096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1329198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1330096cf7feSPaolo Bonzini vmcb->control.exit_code); 133185dc2aceSPaolo Bonzini return true; 133285dc2aceSPaolo Bonzini } 1333096cf7feSPaolo Bonzini vmcb->save.rip += 3; 133485dc2aceSPaolo Bonzini 1335096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1336096cf7feSPaolo Bonzini vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 133785dc2aceSPaolo Bonzini break; 133885dc2aceSPaolo Bonzini 133985dc2aceSPaolo Bonzini case 1: 134085dc2aceSPaolo Bonzini case 3: 1341096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1342198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to intr intercept. Exit reason 0x%x", 1343096cf7feSPaolo Bonzini vmcb->control.exit_code); 134485dc2aceSPaolo Bonzini return true; 134585dc2aceSPaolo Bonzini } 134685dc2aceSPaolo Bonzini 1347e4007e62SMaxim Levitsky sti_nop_cli(); 134885dc2aceSPaolo Bonzini 1349096cf7feSPaolo Bonzini vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1350096cf7feSPaolo Bonzini vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 135185dc2aceSPaolo Bonzini break; 135285dc2aceSPaolo Bonzini 135385dc2aceSPaolo Bonzini case 4: 135485dc2aceSPaolo Bonzini break; 135585dc2aceSPaolo Bonzini 135685dc2aceSPaolo Bonzini default: 135785dc2aceSPaolo Bonzini return true; 135885dc2aceSPaolo Bonzini } 135985dc2aceSPaolo Bonzini 136085dc2aceSPaolo Bonzini inc_test_stage(test); 136185dc2aceSPaolo Bonzini 136285dc2aceSPaolo Bonzini return get_test_stage(test) == 5; 136385dc2aceSPaolo Bonzini } 136485dc2aceSPaolo Bonzini 136585dc2aceSPaolo Bonzini static bool interrupt_check(struct svm_test *test) 136685dc2aceSPaolo Bonzini { 136785dc2aceSPaolo Bonzini return get_test_stage(test) == 5; 136885dc2aceSPaolo Bonzini } 136985dc2aceSPaolo Bonzini 1370d4db486bSCathy Avery static volatile bool nmi_fired; 1371d4db486bSCathy Avery 13724a1207f6SMaxim Levitsky static void nmi_handler(struct ex_regs *regs) 1373d4db486bSCathy Avery { 1374d4db486bSCathy Avery nmi_fired = true; 1375d4db486bSCathy Avery } 1376d4db486bSCathy Avery 1377d4db486bSCathy Avery static void nmi_prepare(struct svm_test *test) 1378d4db486bSCathy Avery { 1379d4db486bSCathy Avery default_prepare(test); 1380d4db486bSCathy Avery nmi_fired = false; 13814a1207f6SMaxim Levitsky handle_exception(NMI_VECTOR, nmi_handler); 1382d4db486bSCathy Avery set_test_stage(test, 0); 1383d4db486bSCathy Avery } 1384d4db486bSCathy Avery 1385d4db486bSCathy Avery static void nmi_test(struct svm_test *test) 1386d4db486bSCathy Avery { 1387d4db486bSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1388d4db486bSCathy Avery 1389493d27d4SSean Christopherson report_svm_guest(nmi_fired, test, "direct NMI while running guest"); 1390d4db486bSCathy Avery 1391d4db486bSCathy Avery vmmcall(); 1392d4db486bSCathy Avery 1393d4db486bSCathy Avery nmi_fired = false; 1394d4db486bSCathy Avery 1395d4db486bSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1396d4db486bSCathy Avery 1397493d27d4SSean Christopherson report_svm_guest(nmi_fired, test, "intercepted pending NMI delivered to guest"); 1398d4db486bSCathy Avery } 1399d4db486bSCathy Avery 1400d4db486bSCathy Avery static bool nmi_finished(struct svm_test *test) 1401d4db486bSCathy Avery { 1402d4db486bSCathy Avery switch (get_test_stage(test)) { 1403d4db486bSCathy Avery case 0: 1404d4db486bSCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1405198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1406d4db486bSCathy Avery vmcb->control.exit_code); 1407d4db486bSCathy Avery return true; 1408d4db486bSCathy Avery } 1409d4db486bSCathy Avery vmcb->save.rip += 3; 1410d4db486bSCathy Avery 1411d4db486bSCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 1412d4db486bSCathy Avery break; 1413d4db486bSCathy Avery 1414d4db486bSCathy Avery case 1: 1415d4db486bSCathy Avery if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1416198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x", 1417d4db486bSCathy Avery vmcb->control.exit_code); 1418d4db486bSCathy Avery return true; 1419d4db486bSCathy Avery } 1420d4db486bSCathy Avery 14215c3582f0SJanis Schoetterl-Glausch report_pass("NMI intercept while running guest"); 1422d4db486bSCathy Avery break; 1423d4db486bSCathy Avery 1424d4db486bSCathy Avery case 2: 1425d4db486bSCathy Avery break; 1426d4db486bSCathy Avery 1427d4db486bSCathy Avery default: 1428d4db486bSCathy Avery return true; 1429d4db486bSCathy Avery } 1430d4db486bSCathy Avery 1431d4db486bSCathy Avery inc_test_stage(test); 1432d4db486bSCathy Avery 1433d4db486bSCathy Avery return get_test_stage(test) == 3; 1434d4db486bSCathy Avery } 1435d4db486bSCathy Avery 1436d4db486bSCathy Avery static bool nmi_check(struct svm_test *test) 1437d4db486bSCathy Avery { 1438d4db486bSCathy Avery return get_test_stage(test) == 3; 1439d4db486bSCathy Avery } 1440d4db486bSCathy Avery 14419da1f4d8SCathy Avery #define NMI_DELAY 100000000ULL 14429da1f4d8SCathy Avery 14439da1f4d8SCathy Avery static void nmi_message_thread(void *_test) 14449da1f4d8SCathy Avery { 14459da1f4d8SCathy Avery struct svm_test *test = _test; 14469da1f4d8SCathy Avery 14479da1f4d8SCathy Avery while (get_test_stage(test) != 1) 14489da1f4d8SCathy Avery pause(); 14499da1f4d8SCathy Avery 14509da1f4d8SCathy Avery delay(NMI_DELAY); 14519da1f4d8SCathy Avery 14529da1f4d8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 14539da1f4d8SCathy Avery 14549da1f4d8SCathy Avery while (get_test_stage(test) != 2) 14559da1f4d8SCathy Avery pause(); 14569da1f4d8SCathy Avery 14579da1f4d8SCathy Avery delay(NMI_DELAY); 14589da1f4d8SCathy Avery 14599da1f4d8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 14609da1f4d8SCathy Avery } 14619da1f4d8SCathy Avery 14629da1f4d8SCathy Avery static void nmi_hlt_test(struct svm_test *test) 14639da1f4d8SCathy Avery { 14649da1f4d8SCathy Avery long long start; 14659da1f4d8SCathy Avery 14669da1f4d8SCathy Avery on_cpu_async(1, nmi_message_thread, test); 14679da1f4d8SCathy Avery 14689da1f4d8SCathy Avery start = rdtsc(); 14699da1f4d8SCathy Avery 14709da1f4d8SCathy Avery set_test_stage(test, 1); 14719da1f4d8SCathy Avery 14729da1f4d8SCathy Avery asm volatile ("hlt"); 14739da1f4d8SCathy Avery 1474493d27d4SSean Christopherson report_svm_guest(nmi_fired, test, "direct NMI + hlt"); 1475493d27d4SSean Christopherson report(rdtsc() - start > NMI_DELAY, "direct NMI after expected delay"); 14769da1f4d8SCathy Avery 14779da1f4d8SCathy Avery nmi_fired = false; 14789da1f4d8SCathy Avery 14799da1f4d8SCathy Avery vmmcall(); 14809da1f4d8SCathy Avery 14819da1f4d8SCathy Avery start = rdtsc(); 14829da1f4d8SCathy Avery 14839da1f4d8SCathy Avery set_test_stage(test, 2); 14849da1f4d8SCathy Avery 14859da1f4d8SCathy Avery asm volatile ("hlt"); 14869da1f4d8SCathy Avery 1487493d27d4SSean Christopherson report_svm_guest(nmi_fired, test, "intercepted NMI + hlt"); 1488493d27d4SSean Christopherson report(rdtsc() - start > NMI_DELAY, "intercepted NMI after expected delay"); 14899da1f4d8SCathy Avery 14909da1f4d8SCathy Avery set_test_stage(test, 3); 14919da1f4d8SCathy Avery } 14929da1f4d8SCathy Avery 14939da1f4d8SCathy Avery static bool nmi_hlt_finished(struct svm_test *test) 14949da1f4d8SCathy Avery { 14959da1f4d8SCathy Avery switch (get_test_stage(test)) { 14969da1f4d8SCathy Avery case 1: 14979da1f4d8SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1498198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 14999da1f4d8SCathy Avery vmcb->control.exit_code); 15009da1f4d8SCathy Avery return true; 15019da1f4d8SCathy Avery } 15029da1f4d8SCathy Avery vmcb->save.rip += 3; 15039da1f4d8SCathy Avery 15049da1f4d8SCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 15059da1f4d8SCathy Avery break; 15069da1f4d8SCathy Avery 15079da1f4d8SCathy Avery case 2: 15089da1f4d8SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1509198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x", 15109da1f4d8SCathy Avery vmcb->control.exit_code); 15119da1f4d8SCathy Avery return true; 15129da1f4d8SCathy Avery } 15139da1f4d8SCathy Avery 15145c3582f0SJanis Schoetterl-Glausch report_pass("NMI intercept while running guest"); 15159da1f4d8SCathy Avery break; 15169da1f4d8SCathy Avery 15179da1f4d8SCathy Avery case 3: 15189da1f4d8SCathy Avery break; 15199da1f4d8SCathy Avery 15209da1f4d8SCathy Avery default: 15219da1f4d8SCathy Avery return true; 15229da1f4d8SCathy Avery } 15239da1f4d8SCathy Avery 15249da1f4d8SCathy Avery return get_test_stage(test) == 3; 15259da1f4d8SCathy Avery } 15269da1f4d8SCathy Avery 15279da1f4d8SCathy Avery static bool nmi_hlt_check(struct svm_test *test) 15289da1f4d8SCathy Avery { 15299da1f4d8SCathy Avery return get_test_stage(test) == 3; 15309da1f4d8SCathy Avery } 15319da1f4d8SCathy Avery 153208200397SSantosh Shukla static void vnmi_prepare(struct svm_test *test) 153308200397SSantosh Shukla { 153408200397SSantosh Shukla nmi_prepare(test); 153508200397SSantosh Shukla 153608200397SSantosh Shukla /* 153708200397SSantosh Shukla * Disable NMI interception to start. Enabling vNMI without 153808200397SSantosh Shukla * intercepting "real" NMIs should result in an ERR VM-Exit. 153908200397SSantosh Shukla */ 154008200397SSantosh Shukla vmcb->control.intercept &= ~(1ULL << INTERCEPT_NMI); 154108200397SSantosh Shukla vmcb->control.int_ctl = V_NMI_ENABLE_MASK; 154208200397SSantosh Shukla vmcb->control.int_vector = NMI_VECTOR; 154308200397SSantosh Shukla } 154408200397SSantosh Shukla 154508200397SSantosh Shukla static void vnmi_test(struct svm_test *test) 154608200397SSantosh Shukla { 154708200397SSantosh Shukla report_svm_guest(!nmi_fired, test, "No vNMI before injection"); 154808200397SSantosh Shukla vmmcall(); 154908200397SSantosh Shukla 155008200397SSantosh Shukla report_svm_guest(nmi_fired, test, "vNMI delivered after injection"); 155108200397SSantosh Shukla vmmcall(); 155208200397SSantosh Shukla } 155308200397SSantosh Shukla 155408200397SSantosh Shukla static bool vnmi_finished(struct svm_test *test) 155508200397SSantosh Shukla { 155608200397SSantosh Shukla switch (get_test_stage(test)) { 155708200397SSantosh Shukla case 0: 155808200397SSantosh Shukla if (vmcb->control.exit_code != SVM_EXIT_ERR) { 155908200397SSantosh Shukla report_fail("Wanted ERR VM-Exit, got 0x%x", 156008200397SSantosh Shukla vmcb->control.exit_code); 156108200397SSantosh Shukla return true; 156208200397SSantosh Shukla } 156308200397SSantosh Shukla report(!nmi_fired, "vNMI enabled but NMI_INTERCEPT unset!"); 156408200397SSantosh Shukla vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 156508200397SSantosh Shukla vmcb->save.rip += 3; 156608200397SSantosh Shukla break; 156708200397SSantosh Shukla 156808200397SSantosh Shukla case 1: 156908200397SSantosh Shukla if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 157008200397SSantosh Shukla report_fail("Wanted VMMCALL VM-Exit, got 0x%x", 157108200397SSantosh Shukla vmcb->control.exit_code); 157208200397SSantosh Shukla return true; 157308200397SSantosh Shukla } 157408200397SSantosh Shukla report(!nmi_fired, "vNMI with vector 2 not injected"); 157508200397SSantosh Shukla vmcb->control.int_ctl |= V_NMI_PENDING_MASK; 157608200397SSantosh Shukla vmcb->save.rip += 3; 157708200397SSantosh Shukla break; 157808200397SSantosh Shukla 157908200397SSantosh Shukla case 2: 158008200397SSantosh Shukla if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 158108200397SSantosh Shukla report_fail("Wanted VMMCALL VM-Exit, got 0x%x", 158208200397SSantosh Shukla vmcb->control.exit_code); 158308200397SSantosh Shukla return true; 158408200397SSantosh Shukla } 158508200397SSantosh Shukla if (vmcb->control.int_ctl & V_NMI_BLOCKING_MASK) { 158608200397SSantosh Shukla report_fail("V_NMI_BLOCKING_MASK not cleared on VMEXIT"); 158708200397SSantosh Shukla return true; 158808200397SSantosh Shukla } 158908200397SSantosh Shukla report_pass("VNMI serviced"); 159008200397SSantosh Shukla vmcb->save.rip += 3; 159108200397SSantosh Shukla break; 159208200397SSantosh Shukla 159308200397SSantosh Shukla default: 159408200397SSantosh Shukla return true; 159508200397SSantosh Shukla } 159608200397SSantosh Shukla 159708200397SSantosh Shukla inc_test_stage(test); 159808200397SSantosh Shukla 159908200397SSantosh Shukla return get_test_stage(test) == 3; 160008200397SSantosh Shukla } 160108200397SSantosh Shukla 160208200397SSantosh Shukla static bool vnmi_check(struct svm_test *test) 160308200397SSantosh Shukla { 160408200397SSantosh Shukla return get_test_stage(test) == 3; 160508200397SSantosh Shukla } 160608200397SSantosh Shukla 16074b4fb247SPaolo Bonzini static volatile int count_exc = 0; 16084b4fb247SPaolo Bonzini 16094b4fb247SPaolo Bonzini static void my_isr(struct ex_regs *r) 16104b4fb247SPaolo Bonzini { 16114b4fb247SPaolo Bonzini count_exc++; 16124b4fb247SPaolo Bonzini } 16134b4fb247SPaolo Bonzini 16144b4fb247SPaolo Bonzini static void exc_inject_prepare(struct svm_test *test) 16154b4fb247SPaolo Bonzini { 16168634a266SPaolo Bonzini default_prepare(test); 16174b4fb247SPaolo Bonzini handle_exception(DE_VECTOR, my_isr); 16184b4fb247SPaolo Bonzini handle_exception(NMI_VECTOR, my_isr); 16194b4fb247SPaolo Bonzini } 16204b4fb247SPaolo Bonzini 16214b4fb247SPaolo Bonzini 16224b4fb247SPaolo Bonzini static void exc_inject_test(struct svm_test *test) 16234b4fb247SPaolo Bonzini { 16244b4fb247SPaolo Bonzini asm volatile ("vmmcall\n\tvmmcall\n\t"); 16254b4fb247SPaolo Bonzini } 16264b4fb247SPaolo Bonzini 16274b4fb247SPaolo Bonzini static bool exc_inject_finished(struct svm_test *test) 16284b4fb247SPaolo Bonzini { 16294b4fb247SPaolo Bonzini switch (get_test_stage(test)) { 16304b4fb247SPaolo Bonzini case 0: 16314b4fb247SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1632198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 16334b4fb247SPaolo Bonzini vmcb->control.exit_code); 16344b4fb247SPaolo Bonzini return true; 16354b4fb247SPaolo Bonzini } 16362c1ca866SNadav Amit vmcb->save.rip += 3; 16374b4fb247SPaolo Bonzini vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 16384b4fb247SPaolo Bonzini break; 16394b4fb247SPaolo Bonzini 16404b4fb247SPaolo Bonzini case 1: 16414b4fb247SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_ERR) { 1642198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to error. Exit reason 0x%x", 16434b4fb247SPaolo Bonzini vmcb->control.exit_code); 16444b4fb247SPaolo Bonzini return true; 16454b4fb247SPaolo Bonzini } 16464b4fb247SPaolo Bonzini report(count_exc == 0, "exception with vector 2 not injected"); 16474b4fb247SPaolo Bonzini vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 16484b4fb247SPaolo Bonzini break; 16494b4fb247SPaolo Bonzini 16504b4fb247SPaolo Bonzini case 2: 16514b4fb247SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1652198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 16534b4fb247SPaolo Bonzini vmcb->control.exit_code); 16544b4fb247SPaolo Bonzini return true; 16554b4fb247SPaolo Bonzini } 16562c1ca866SNadav Amit vmcb->save.rip += 3; 16574b4fb247SPaolo Bonzini report(count_exc == 1, "divide overflow exception injected"); 16584b4fb247SPaolo Bonzini report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared"); 16594b4fb247SPaolo Bonzini break; 16604b4fb247SPaolo Bonzini 16614b4fb247SPaolo Bonzini default: 16624b4fb247SPaolo Bonzini return true; 16634b4fb247SPaolo Bonzini } 16644b4fb247SPaolo Bonzini 16654b4fb247SPaolo Bonzini inc_test_stage(test); 16664b4fb247SPaolo Bonzini 16674b4fb247SPaolo Bonzini return get_test_stage(test) == 3; 16684b4fb247SPaolo Bonzini } 16694b4fb247SPaolo Bonzini 16704b4fb247SPaolo Bonzini static bool exc_inject_check(struct svm_test *test) 16714b4fb247SPaolo Bonzini { 16724b4fb247SPaolo Bonzini return count_exc == 1 && get_test_stage(test) == 3; 16734b4fb247SPaolo Bonzini } 16744b4fb247SPaolo Bonzini 16759c838954SCathy Avery static volatile bool virq_fired; 16764b3c6114SPaolo Bonzini static volatile unsigned long virq_rip; 16779c838954SCathy Avery 16789c838954SCathy Avery static void virq_isr(isr_regs_t *regs) 16799c838954SCathy Avery { 16809c838954SCathy Avery virq_fired = true; 16814b3c6114SPaolo Bonzini virq_rip = regs->rip; 16829c838954SCathy Avery } 16839c838954SCathy Avery 16849c838954SCathy Avery static void virq_inject_prepare(struct svm_test *test) 16859c838954SCathy Avery { 16869c838954SCathy Avery handle_irq(0xf1, virq_isr); 16879c838954SCathy Avery default_prepare(test); 16889c838954SCathy Avery vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 16899c838954SCathy Avery (0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority 16909c838954SCathy Avery vmcb->control.int_vector = 0xf1; 16919c838954SCathy Avery virq_fired = false; 16924b3c6114SPaolo Bonzini virq_rip = -1; 16939c838954SCathy Avery set_test_stage(test, 0); 16949c838954SCathy Avery } 16959c838954SCathy Avery 16969c838954SCathy Avery static void virq_inject_test(struct svm_test *test) 16979c838954SCathy Avery { 1698493d27d4SSean Christopherson report_svm_guest(!virq_fired, test, "virtual IRQ blocked after L2 cli"); 16999c838954SCathy Avery 1700e4007e62SMaxim Levitsky sti_nop_cli(); 17019c838954SCathy Avery 1702493d27d4SSean Christopherson report_svm_guest(virq_fired, test, "virtual IRQ fired after L2 sti"); 17039c838954SCathy Avery 17049c838954SCathy Avery vmmcall(); 17059c838954SCathy Avery 1706493d27d4SSean Christopherson report_svm_guest(!virq_fired, test, "intercepted VINTR blocked after L2 cli"); 17079c838954SCathy Avery 1708e4007e62SMaxim Levitsky sti_nop_cli(); 17099c838954SCathy Avery 1710493d27d4SSean Christopherson report_svm_guest(virq_fired, test, "intercepted VINTR fired after L2 sti"); 17119c838954SCathy Avery 17129c838954SCathy Avery vmmcall(); 17139c838954SCathy Avery 1714e4007e62SMaxim Levitsky sti_nop_cli(); 17159c838954SCathy Avery 1716493d27d4SSean Christopherson report_svm_guest(!virq_fired, test, 1717493d27d4SSean Christopherson "virtual IRQ blocked V_IRQ_PRIO less than V_TPR"); 17189c838954SCathy Avery 17199c838954SCathy Avery vmmcall(); 17209c838954SCathy Avery vmmcall(); 17219c838954SCathy Avery } 17229c838954SCathy Avery 17239c838954SCathy Avery static bool virq_inject_finished(struct svm_test *test) 17249c838954SCathy Avery { 17259c838954SCathy Avery vmcb->save.rip += 3; 17269c838954SCathy Avery 17279c838954SCathy Avery switch (get_test_stage(test)) { 17289c838954SCathy Avery case 0: 17299c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1730198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 17319c838954SCathy Avery vmcb->control.exit_code); 17329c838954SCathy Avery return true; 17339c838954SCathy Avery } 17349c838954SCathy Avery if (vmcb->control.int_ctl & V_IRQ_MASK) { 1735198dfd0eSJanis Schoetterl-Glausch report_fail("V_IRQ not cleared on VMEXIT after firing"); 17369c838954SCathy Avery return true; 17379c838954SCathy Avery } 17389c838954SCathy Avery virq_fired = false; 17399c838954SCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 17409c838954SCathy Avery vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 17419c838954SCathy Avery (0x0f << V_INTR_PRIO_SHIFT); 17429c838954SCathy Avery break; 17439c838954SCathy Avery 17449c838954SCathy Avery case 1: 17459c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VINTR) { 1746198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vintr. Exit reason 0x%x", 17479c838954SCathy Avery vmcb->control.exit_code); 17489c838954SCathy Avery return true; 17499c838954SCathy Avery } 17509c838954SCathy Avery if (virq_fired) { 1751198dfd0eSJanis Schoetterl-Glausch report_fail("V_IRQ fired before SVM_EXIT_VINTR"); 17529c838954SCathy Avery return true; 17539c838954SCathy Avery } 17549c838954SCathy Avery vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); 17559c838954SCathy Avery break; 17569c838954SCathy Avery 17579c838954SCathy Avery case 2: 17589c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1759198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 17609c838954SCathy Avery vmcb->control.exit_code); 17619c838954SCathy Avery return true; 17629c838954SCathy Avery } 17639c838954SCathy Avery virq_fired = false; 17649c838954SCathy Avery // Set irq to lower priority 17659c838954SCathy Avery vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 17669c838954SCathy Avery (0x08 << V_INTR_PRIO_SHIFT); 17679c838954SCathy Avery // Raise guest TPR 17689c838954SCathy Avery vmcb->control.int_ctl |= 0x0a & V_TPR_MASK; 17699c838954SCathy Avery break; 17709c838954SCathy Avery 17719c838954SCathy Avery case 3: 17729c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1773198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 17749c838954SCathy Avery vmcb->control.exit_code); 17759c838954SCathy Avery return true; 17769c838954SCathy Avery } 17779c838954SCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 17789c838954SCathy Avery break; 17799c838954SCathy Avery 17809c838954SCathy Avery case 4: 17819c838954SCathy Avery // INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR 17829c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1783198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 17849c838954SCathy Avery vmcb->control.exit_code); 17859c838954SCathy Avery return true; 17869c838954SCathy Avery } 17879c838954SCathy Avery break; 17889c838954SCathy Avery 17899c838954SCathy Avery default: 17909c838954SCathy Avery return true; 17919c838954SCathy Avery } 17929c838954SCathy Avery 17939c838954SCathy Avery inc_test_stage(test); 17949c838954SCathy Avery 17959c838954SCathy Avery return get_test_stage(test) == 5; 17969c838954SCathy Avery } 17979c838954SCathy Avery 17989c838954SCathy Avery static bool virq_inject_check(struct svm_test *test) 17999c838954SCathy Avery { 18009c838954SCathy Avery return get_test_stage(test) == 5; 18019c838954SCathy Avery } 18029c838954SCathy Avery 18034b3c6114SPaolo Bonzini static void virq_inject_within_shadow_prepare(struct svm_test *test) 18044b3c6114SPaolo Bonzini { 18054b3c6114SPaolo Bonzini virq_inject_prepare(test); 18064b3c6114SPaolo Bonzini vmcb->control.int_state = SVM_INTERRUPT_SHADOW_MASK; 18074b3c6114SPaolo Bonzini vmcb->save.rflags |= X86_EFLAGS_IF; 18084b3c6114SPaolo Bonzini } 18094b3c6114SPaolo Bonzini 18104b3c6114SPaolo Bonzini extern void virq_inject_within_shadow_test(struct svm_test *test); 18114b3c6114SPaolo Bonzini asm("virq_inject_within_shadow_test: nop; nop; vmmcall"); 18124b3c6114SPaolo Bonzini 18134b3c6114SPaolo Bonzini static void virq_inject_within_shadow_prepare_gif_clear(struct svm_test *test) 18144b3c6114SPaolo Bonzini { 18154b3c6114SPaolo Bonzini vmcb->save.rip = (unsigned long) test->guest_func; 18164b3c6114SPaolo Bonzini } 18174b3c6114SPaolo Bonzini 18184b3c6114SPaolo Bonzini static bool virq_inject_within_shadow_finished(struct svm_test *test) 18194b3c6114SPaolo Bonzini { 18204b3c6114SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) 18214b3c6114SPaolo Bonzini report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 18224b3c6114SPaolo Bonzini vmcb->control.exit_code); 18234b3c6114SPaolo Bonzini if (!virq_fired) 18244b3c6114SPaolo Bonzini report_fail("V_IRQ did not fire"); 18254b3c6114SPaolo Bonzini else if (virq_rip != (unsigned long) virq_inject_within_shadow_test + 1) 18264b3c6114SPaolo Bonzini report_fail("Unexpected RIP for interrupt handler"); 18274b3c6114SPaolo Bonzini else if (vmcb->control.int_ctl & V_IRQ_MASK) 18284b3c6114SPaolo Bonzini report_fail("V_IRQ not cleared on VMEXIT after firing"); 18294b3c6114SPaolo Bonzini else if (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) 18304b3c6114SPaolo Bonzini report_fail("Interrupt shadow not cleared"); 18314b3c6114SPaolo Bonzini else 18324b3c6114SPaolo Bonzini inc_test_stage(test); 18334b3c6114SPaolo Bonzini 18344b3c6114SPaolo Bonzini return true; 18354b3c6114SPaolo Bonzini } 18364b3c6114SPaolo Bonzini 18374b3c6114SPaolo Bonzini static bool virq_inject_within_shadow_check(struct svm_test *test) 18384b3c6114SPaolo Bonzini { 18394b3c6114SPaolo Bonzini return get_test_stage(test) == 1; 18404b3c6114SPaolo Bonzini } 18414b3c6114SPaolo Bonzini 1842da338a31SMaxim Levitsky /* 1843da338a31SMaxim Levitsky * Detect nested guest RIP corruption as explained in kernel commit 1844da338a31SMaxim Levitsky * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73 1845da338a31SMaxim Levitsky * 1846da338a31SMaxim Levitsky * In the assembly loop below 'ins' is executed while IO instructions 1847da338a31SMaxim Levitsky * are not intercepted; the instruction is emulated by L0. 1848da338a31SMaxim Levitsky * 1849da338a31SMaxim Levitsky * At the same time we are getting interrupts from the local APIC timer, 1850da338a31SMaxim Levitsky * and we do intercept them in L1 1851da338a31SMaxim Levitsky * 1852da338a31SMaxim Levitsky * If the interrupt happens on the insb instruction, L0 will VMexit, emulate 1853da338a31SMaxim Levitsky * the insb instruction and then it will inject the interrupt to L1 through 1854da338a31SMaxim Levitsky * a nested VMexit. Due to a bug, it would leave pre-emulation values of RIP, 1855da338a31SMaxim Levitsky * RAX and RSP in the VMCB. 1856da338a31SMaxim Levitsky * 1857da338a31SMaxim Levitsky * In our intercept handler we detect the bug by checking that RIP is that of 1858da338a31SMaxim Levitsky * the insb instruction, but its memory operand has already been written. 1859da338a31SMaxim Levitsky * This means that insb was already executed. 1860da338a31SMaxim Levitsky */ 1861da338a31SMaxim Levitsky 1862da338a31SMaxim Levitsky static volatile int isr_cnt = 0; 1863da338a31SMaxim Levitsky static volatile uint8_t io_port_var = 0xAA; 1864da338a31SMaxim Levitsky extern const char insb_instruction_label[]; 1865da338a31SMaxim Levitsky 1866da338a31SMaxim Levitsky static void reg_corruption_isr(isr_regs_t *regs) 1867da338a31SMaxim Levitsky { 1868da338a31SMaxim Levitsky isr_cnt++; 1869da338a31SMaxim Levitsky apic_write(APIC_EOI, 0); 1870da338a31SMaxim Levitsky } 1871da338a31SMaxim Levitsky 1872da338a31SMaxim Levitsky static void reg_corruption_prepare(struct svm_test *test) 1873da338a31SMaxim Levitsky { 1874da338a31SMaxim Levitsky default_prepare(test); 1875da338a31SMaxim Levitsky set_test_stage(test, 0); 1876da338a31SMaxim Levitsky 1877da338a31SMaxim Levitsky vmcb->control.int_ctl = V_INTR_MASKING_MASK; 1878da338a31SMaxim Levitsky vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1879da338a31SMaxim Levitsky 1880da338a31SMaxim Levitsky handle_irq(TIMER_VECTOR, reg_corruption_isr); 1881da338a31SMaxim Levitsky 1882da338a31SMaxim Levitsky /* set local APIC to inject external interrupts */ 1883a2c7dff7SMaxim Levitsky apic_setup_timer(TIMER_VECTOR, APIC_LVT_TIMER_PERIODIC); 1884a2c7dff7SMaxim Levitsky apic_start_timer(1000); 1885da338a31SMaxim Levitsky } 1886da338a31SMaxim Levitsky 1887da338a31SMaxim Levitsky static void reg_corruption_test(struct svm_test *test) 1888da338a31SMaxim Levitsky { 1889da338a31SMaxim Levitsky /* this is endless loop, which is interrupted by the timer interrupt */ 1890da338a31SMaxim Levitsky asm volatile ( 1891da338a31SMaxim Levitsky "1:\n\t" 1892da338a31SMaxim Levitsky "movw $0x4d0, %%dx\n\t" // IO port 1893da338a31SMaxim Levitsky "lea %[io_port_var], %%rdi\n\t" 1894da338a31SMaxim Levitsky "movb $0xAA, %[io_port_var]\n\t" 1895da338a31SMaxim Levitsky "insb_instruction_label:\n\t" 1896da338a31SMaxim Levitsky "insb\n\t" 1897da338a31SMaxim Levitsky "jmp 1b\n\t" 1898da338a31SMaxim Levitsky 1899da338a31SMaxim Levitsky : [io_port_var] "=m" (io_port_var) 1900da338a31SMaxim Levitsky : /* no inputs*/ 1901da338a31SMaxim Levitsky : "rdx", "rdi" 1902da338a31SMaxim Levitsky ); 1903da338a31SMaxim Levitsky } 1904da338a31SMaxim Levitsky 1905da338a31SMaxim Levitsky static bool reg_corruption_finished(struct svm_test *test) 1906da338a31SMaxim Levitsky { 1907da338a31SMaxim Levitsky if (isr_cnt == 10000) { 19085c3582f0SJanis Schoetterl-Glausch report_pass("No RIP corruption detected after %d timer interrupts", 1909da338a31SMaxim Levitsky isr_cnt); 1910da338a31SMaxim Levitsky set_test_stage(test, 1); 1911491bbc64SMaxim Levitsky goto cleanup; 1912da338a31SMaxim Levitsky } 1913da338a31SMaxim Levitsky 1914da338a31SMaxim Levitsky if (vmcb->control.exit_code == SVM_EXIT_INTR) { 1915da338a31SMaxim Levitsky 1916da338a31SMaxim Levitsky void* guest_rip = (void*)vmcb->save.rip; 1917da338a31SMaxim Levitsky 1918e4007e62SMaxim Levitsky sti_nop_cli(); 1919da338a31SMaxim Levitsky 1920da338a31SMaxim Levitsky if (guest_rip == insb_instruction_label && io_port_var != 0xAA) { 1921198dfd0eSJanis Schoetterl-Glausch report_fail("RIP corruption detected after %d timer interrupts", 1922da338a31SMaxim Levitsky isr_cnt); 1923491bbc64SMaxim Levitsky goto cleanup; 1924da338a31SMaxim Levitsky } 1925da338a31SMaxim Levitsky 1926da338a31SMaxim Levitsky } 1927da338a31SMaxim Levitsky return false; 1928491bbc64SMaxim Levitsky cleanup: 1929a2c7dff7SMaxim Levitsky apic_cleanup_timer(); 1930491bbc64SMaxim Levitsky return true; 1931491bbc64SMaxim Levitsky 1932da338a31SMaxim Levitsky } 1933da338a31SMaxim Levitsky 1934da338a31SMaxim Levitsky static bool reg_corruption_check(struct svm_test *test) 1935da338a31SMaxim Levitsky { 1936da338a31SMaxim Levitsky return get_test_stage(test) == 1; 1937da338a31SMaxim Levitsky } 1938da338a31SMaxim Levitsky 19394770e9c8SCathy Avery static void get_tss_entry(void *data) 19404770e9c8SCathy Avery { 1941a7f32d87SPaolo Bonzini *((gdt_entry_t **)data) = get_tss_descr(); 19424770e9c8SCathy Avery } 19434770e9c8SCathy Avery 19444770e9c8SCathy Avery static int orig_cpu_count; 19454770e9c8SCathy Avery 19464770e9c8SCathy Avery static void init_startup_prepare(struct svm_test *test) 19474770e9c8SCathy Avery { 1948a7f32d87SPaolo Bonzini gdt_entry_t *tss_entry; 19494770e9c8SCathy Avery int i; 19504770e9c8SCathy Avery 19514770e9c8SCathy Avery on_cpu(1, get_tss_entry, &tss_entry); 19524770e9c8SCathy Avery 1953d36b378fSVarad Gautam orig_cpu_count = atomic_read(&cpu_online_count); 19544770e9c8SCathy Avery 19554770e9c8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 19564770e9c8SCathy Avery id_map[1]); 19574770e9c8SCathy Avery 19584770e9c8SCathy Avery delay(100000000ULL); 19594770e9c8SCathy Avery 1960d36b378fSVarad Gautam atomic_dec(&cpu_online_count); 19614770e9c8SCathy Avery 1962a7f32d87SPaolo Bonzini tss_entry->type &= ~DESC_BUSY; 19634770e9c8SCathy Avery 19644770e9c8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_STARTUP, id_map[1]); 19654770e9c8SCathy Avery 1966d36b378fSVarad Gautam for (i = 0; i < 5 && atomic_read(&cpu_online_count) < orig_cpu_count; i++) 19674770e9c8SCathy Avery delay(100000000ULL); 19684770e9c8SCathy Avery } 19694770e9c8SCathy Avery 19704770e9c8SCathy Avery static bool init_startup_finished(struct svm_test *test) 19714770e9c8SCathy Avery { 19724770e9c8SCathy Avery return true; 19734770e9c8SCathy Avery } 19744770e9c8SCathy Avery 19754770e9c8SCathy Avery static bool init_startup_check(struct svm_test *test) 19764770e9c8SCathy Avery { 1977d36b378fSVarad Gautam return atomic_read(&cpu_online_count) == orig_cpu_count; 19784770e9c8SCathy Avery } 19794770e9c8SCathy Avery 1980d5da6dfeSCathy Avery static volatile bool init_intercept; 1981d5da6dfeSCathy Avery 1982d5da6dfeSCathy Avery static void init_intercept_prepare(struct svm_test *test) 1983d5da6dfeSCathy Avery { 1984d5da6dfeSCathy Avery init_intercept = false; 1985d5da6dfeSCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_INIT); 1986d5da6dfeSCathy Avery } 1987d5da6dfeSCathy Avery 1988d5da6dfeSCathy Avery static void init_intercept_test(struct svm_test *test) 1989d5da6dfeSCathy Avery { 1990d5da6dfeSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 0); 1991d5da6dfeSCathy Avery } 1992d5da6dfeSCathy Avery 1993d5da6dfeSCathy Avery static bool init_intercept_finished(struct svm_test *test) 1994d5da6dfeSCathy Avery { 1995d5da6dfeSCathy Avery vmcb->save.rip += 3; 1996d5da6dfeSCathy Avery 1997d5da6dfeSCathy Avery if (vmcb->control.exit_code != SVM_EXIT_INIT) { 1998198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to init intercept. Exit reason 0x%x", 1999d5da6dfeSCathy Avery vmcb->control.exit_code); 2000d5da6dfeSCathy Avery 2001d5da6dfeSCathy Avery return true; 2002d5da6dfeSCathy Avery } 2003d5da6dfeSCathy Avery 2004d5da6dfeSCathy Avery init_intercept = true; 2005d5da6dfeSCathy Avery 20065c3582f0SJanis Schoetterl-Glausch report_pass("INIT to vcpu intercepted"); 2007d5da6dfeSCathy Avery 2008d5da6dfeSCathy Avery return true; 2009d5da6dfeSCathy Avery } 2010d5da6dfeSCathy Avery 2011d5da6dfeSCathy Avery static bool init_intercept_check(struct svm_test *test) 2012d5da6dfeSCathy Avery { 2013d5da6dfeSCathy Avery return init_intercept; 2014d5da6dfeSCathy Avery } 2015d5da6dfeSCathy Avery 20167839b0ecSKrish Sadhukhan /* 20177839b0ecSKrish Sadhukhan * Setting host EFLAGS.TF causes a #DB trap after the VMRUN completes on the 20187839b0ecSKrish Sadhukhan * host side (i.e., after the #VMEXIT from the guest). 20197839b0ecSKrish Sadhukhan * 20200689a980SKrish Sadhukhan * Setting host EFLAGS.RF suppresses any potential instruction breakpoint 20210689a980SKrish Sadhukhan * match on the VMRUN and completion of the VMRUN instruction clears the 20220689a980SKrish Sadhukhan * host EFLAGS.RF bit. 20230689a980SKrish Sadhukhan * 20247839b0ecSKrish Sadhukhan * [AMD APM] 20257839b0ecSKrish Sadhukhan */ 20267839b0ecSKrish Sadhukhan static volatile u8 host_rflags_guest_main_flag = 0; 20277839b0ecSKrish Sadhukhan static volatile u8 host_rflags_db_handler_flag = 0; 20287839b0ecSKrish Sadhukhan static volatile bool host_rflags_ss_on_vmrun = false; 20297839b0ecSKrish Sadhukhan static volatile bool host_rflags_vmrun_reached = false; 20307839b0ecSKrish Sadhukhan static volatile bool host_rflags_set_tf = false; 20310689a980SKrish Sadhukhan static volatile bool host_rflags_set_rf = false; 20320689a980SKrish Sadhukhan static u64 rip_detected; 20337839b0ecSKrish Sadhukhan 20347839b0ecSKrish Sadhukhan extern u64 *vmrun_rip; 20357839b0ecSKrish Sadhukhan 20367839b0ecSKrish Sadhukhan static void host_rflags_db_handler(struct ex_regs *r) 20377839b0ecSKrish Sadhukhan { 20387839b0ecSKrish Sadhukhan if (host_rflags_ss_on_vmrun) { 20397839b0ecSKrish Sadhukhan if (host_rflags_vmrun_reached) { 20400689a980SKrish Sadhukhan if (!host_rflags_set_rf) { 20417839b0ecSKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 20420689a980SKrish Sadhukhan rip_detected = r->rip; 20437839b0ecSKrish Sadhukhan } else { 20440689a980SKrish Sadhukhan r->rflags |= X86_EFLAGS_RF; 20450689a980SKrish Sadhukhan ++host_rflags_db_handler_flag; 20460689a980SKrish Sadhukhan } 20470689a980SKrish Sadhukhan } else { 20480689a980SKrish Sadhukhan if (r->rip == (u64)&vmrun_rip) { 20497839b0ecSKrish Sadhukhan host_rflags_vmrun_reached = true; 20500689a980SKrish Sadhukhan 20510689a980SKrish Sadhukhan if (host_rflags_set_rf) { 20520689a980SKrish Sadhukhan host_rflags_guest_main_flag = 0; 20530689a980SKrish Sadhukhan rip_detected = r->rip; 20540689a980SKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 20550689a980SKrish Sadhukhan 20560689a980SKrish Sadhukhan /* Trigger #DB via debug registers */ 20570689a980SKrish Sadhukhan write_dr0((void *)&vmrun_rip); 20580689a980SKrish Sadhukhan write_dr7(0x403); 20590689a980SKrish Sadhukhan } 20600689a980SKrish Sadhukhan } 20617839b0ecSKrish Sadhukhan } 20627839b0ecSKrish Sadhukhan } else { 20637839b0ecSKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 20647839b0ecSKrish Sadhukhan } 20657839b0ecSKrish Sadhukhan } 20667839b0ecSKrish Sadhukhan 20677839b0ecSKrish Sadhukhan static void host_rflags_prepare(struct svm_test *test) 20687839b0ecSKrish Sadhukhan { 20697839b0ecSKrish Sadhukhan default_prepare(test); 20707839b0ecSKrish Sadhukhan handle_exception(DB_VECTOR, host_rflags_db_handler); 20717839b0ecSKrish Sadhukhan set_test_stage(test, 0); 20727839b0ecSKrish Sadhukhan } 20737839b0ecSKrish Sadhukhan 20747839b0ecSKrish Sadhukhan static void host_rflags_prepare_gif_clear(struct svm_test *test) 20757839b0ecSKrish Sadhukhan { 20767839b0ecSKrish Sadhukhan if (host_rflags_set_tf) 20777839b0ecSKrish Sadhukhan write_rflags(read_rflags() | X86_EFLAGS_TF); 20787839b0ecSKrish Sadhukhan } 20797839b0ecSKrish Sadhukhan 20807839b0ecSKrish Sadhukhan static void host_rflags_test(struct svm_test *test) 20817839b0ecSKrish Sadhukhan { 20827839b0ecSKrish Sadhukhan while (1) { 20830689a980SKrish Sadhukhan if (get_test_stage(test) > 0) { 20840689a980SKrish Sadhukhan if ((host_rflags_set_tf && !host_rflags_ss_on_vmrun && !host_rflags_db_handler_flag) || 20850689a980SKrish Sadhukhan (host_rflags_set_rf && host_rflags_db_handler_flag == 1)) 20867839b0ecSKrish Sadhukhan host_rflags_guest_main_flag = 1; 20870689a980SKrish Sadhukhan } 20880689a980SKrish Sadhukhan 20890689a980SKrish Sadhukhan if (get_test_stage(test) == 4) 20907839b0ecSKrish Sadhukhan break; 20917839b0ecSKrish Sadhukhan vmmcall(); 20927839b0ecSKrish Sadhukhan } 20937839b0ecSKrish Sadhukhan } 20947839b0ecSKrish Sadhukhan 20957839b0ecSKrish Sadhukhan static bool host_rflags_finished(struct svm_test *test) 20967839b0ecSKrish Sadhukhan { 20977839b0ecSKrish Sadhukhan switch (get_test_stage(test)) { 20987839b0ecSKrish Sadhukhan case 0: 20997839b0ecSKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2100198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT. Exit reason 0x%x", 21017839b0ecSKrish Sadhukhan vmcb->control.exit_code); 21027839b0ecSKrish Sadhukhan return true; 21037839b0ecSKrish Sadhukhan } 21047839b0ecSKrish Sadhukhan vmcb->save.rip += 3; 21057839b0ecSKrish Sadhukhan /* 21067839b0ecSKrish Sadhukhan * Setting host EFLAGS.TF not immediately before VMRUN, causes 21077839b0ecSKrish Sadhukhan * #DB trap before first guest instruction is executed 21087839b0ecSKrish Sadhukhan */ 21097839b0ecSKrish Sadhukhan host_rflags_set_tf = true; 21107839b0ecSKrish Sadhukhan break; 21117839b0ecSKrish Sadhukhan case 1: 21127839b0ecSKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 21130689a980SKrish Sadhukhan host_rflags_guest_main_flag != 1) { 2114198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT or #DB handler" 21157839b0ecSKrish Sadhukhan " invoked before guest main. Exit reason 0x%x", 21167839b0ecSKrish Sadhukhan vmcb->control.exit_code); 21177839b0ecSKrish Sadhukhan return true; 21187839b0ecSKrish Sadhukhan } 21197839b0ecSKrish Sadhukhan vmcb->save.rip += 3; 21207839b0ecSKrish Sadhukhan /* 21217839b0ecSKrish Sadhukhan * Setting host EFLAGS.TF immediately before VMRUN, causes #DB 21227839b0ecSKrish Sadhukhan * trap after VMRUN completes on the host side (i.e., after 21237839b0ecSKrish Sadhukhan * VMEXIT from guest). 21247839b0ecSKrish Sadhukhan */ 21257839b0ecSKrish Sadhukhan host_rflags_ss_on_vmrun = true; 21267839b0ecSKrish Sadhukhan break; 21277839b0ecSKrish Sadhukhan case 2: 21287839b0ecSKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 21290c22fd44SPaolo Bonzini rip_detected != (u64)&vmrun_rip + 3) { 2130198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT or RIP mismatch." 21310689a980SKrish Sadhukhan " Exit reason 0x%x, RIP actual: %lx, RIP expected: " 21320689a980SKrish Sadhukhan "%lx", vmcb->control.exit_code, 21330c22fd44SPaolo Bonzini (u64)&vmrun_rip + 3, rip_detected); 21340689a980SKrish Sadhukhan return true; 21350689a980SKrish Sadhukhan } 21360689a980SKrish Sadhukhan host_rflags_set_rf = true; 21370689a980SKrish Sadhukhan host_rflags_guest_main_flag = 0; 21380689a980SKrish Sadhukhan host_rflags_vmrun_reached = false; 21390689a980SKrish Sadhukhan vmcb->save.rip += 3; 21400689a980SKrish Sadhukhan break; 21410689a980SKrish Sadhukhan case 3: 21420689a980SKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 21430689a980SKrish Sadhukhan rip_detected != (u64)&vmrun_rip || 21440689a980SKrish Sadhukhan host_rflags_guest_main_flag != 1 || 21450689a980SKrish Sadhukhan host_rflags_db_handler_flag > 1 || 21460689a980SKrish Sadhukhan read_rflags() & X86_EFLAGS_RF) { 2147198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT or RIP mismatch or " 21480689a980SKrish Sadhukhan "EFLAGS.RF not cleared." 21490689a980SKrish Sadhukhan " Exit reason 0x%x, RIP actual: %lx, RIP expected: " 21500689a980SKrish Sadhukhan "%lx", vmcb->control.exit_code, 21510689a980SKrish Sadhukhan (u64)&vmrun_rip, rip_detected); 21527839b0ecSKrish Sadhukhan return true; 21537839b0ecSKrish Sadhukhan } 21547839b0ecSKrish Sadhukhan host_rflags_set_tf = false; 21550689a980SKrish Sadhukhan host_rflags_set_rf = false; 21567839b0ecSKrish Sadhukhan vmcb->save.rip += 3; 21577839b0ecSKrish Sadhukhan break; 21587839b0ecSKrish Sadhukhan default: 21597839b0ecSKrish Sadhukhan return true; 21607839b0ecSKrish Sadhukhan } 21617839b0ecSKrish Sadhukhan inc_test_stage(test); 21620689a980SKrish Sadhukhan return get_test_stage(test) == 5; 21637839b0ecSKrish Sadhukhan } 21647839b0ecSKrish Sadhukhan 21657839b0ecSKrish Sadhukhan static bool host_rflags_check(struct svm_test *test) 21667839b0ecSKrish Sadhukhan { 21670689a980SKrish Sadhukhan return get_test_stage(test) == 4; 21687839b0ecSKrish Sadhukhan } 21697839b0ecSKrish Sadhukhan 21708660d1b5SKrish Sadhukhan #define TEST(name) { #name, .v2 = name } 21718660d1b5SKrish Sadhukhan 2172ba29942cSKrish Sadhukhan /* 2173ba29942cSKrish Sadhukhan * v2 tests 2174ba29942cSKrish Sadhukhan */ 2175ba29942cSKrish Sadhukhan 2176f32183f5SJim Mattson /* 2177f32183f5SJim Mattson * Ensure that kvm recalculates the L1 guest's CPUID.01H:ECX.OSXSAVE 2178f32183f5SJim Mattson * after VM-exit from an L2 guest that sets CR4.OSXSAVE to a different 2179f32183f5SJim Mattson * value than in L1. 2180f32183f5SJim Mattson */ 2181f32183f5SJim Mattson 2182f32183f5SJim Mattson static void svm_cr4_osxsave_test_guest(struct svm_test *test) 2183f32183f5SJim Mattson { 2184f32183f5SJim Mattson write_cr4(read_cr4() & ~X86_CR4_OSXSAVE); 2185f32183f5SJim Mattson } 2186f32183f5SJim Mattson 2187f32183f5SJim Mattson static void svm_cr4_osxsave_test(void) 2188f32183f5SJim Mattson { 2189f32183f5SJim Mattson if (!this_cpu_has(X86_FEATURE_XSAVE)) { 2190f32183f5SJim Mattson report_skip("XSAVE not detected"); 2191f32183f5SJim Mattson return; 2192f32183f5SJim Mattson } 2193f32183f5SJim Mattson 2194f32183f5SJim Mattson if (!(read_cr4() & X86_CR4_OSXSAVE)) { 2195f32183f5SJim Mattson unsigned long cr4 = read_cr4() | X86_CR4_OSXSAVE; 2196f32183f5SJim Mattson 2197f32183f5SJim Mattson write_cr4(cr4); 2198f32183f5SJim Mattson vmcb->save.cr4 = cr4; 2199f32183f5SJim Mattson } 2200f32183f5SJim Mattson 2201816c0359SSean Christopherson report(this_cpu_has(X86_FEATURE_OSXSAVE), "CPUID.01H:ECX.XSAVE set before VMRUN"); 2202f32183f5SJim Mattson 2203f32183f5SJim Mattson test_set_guest(svm_cr4_osxsave_test_guest); 2204f32183f5SJim Mattson report(svm_vmrun() == SVM_EXIT_VMMCALL, 2205f32183f5SJim Mattson "svm_cr4_osxsave_test_guest finished with VMMCALL"); 2206f32183f5SJim Mattson 2207816c0359SSean Christopherson report(this_cpu_has(X86_FEATURE_OSXSAVE), "CPUID.01H:ECX.XSAVE set after VMRUN"); 2208f32183f5SJim Mattson } 2209f32183f5SJim Mattson 2210ba29942cSKrish Sadhukhan static void basic_guest_main(struct svm_test *test) 2211ba29942cSKrish Sadhukhan { 2212ba29942cSKrish Sadhukhan } 2213ba29942cSKrish Sadhukhan 2214eae10e8fSKrish Sadhukhan 2215eae10e8fSKrish Sadhukhan #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val, \ 2216eae10e8fSKrish Sadhukhan resv_mask) \ 2217eae10e8fSKrish Sadhukhan { \ 2218eae10e8fSKrish Sadhukhan u64 tmp, mask; \ 2219eae10e8fSKrish Sadhukhan int i; \ 2220eae10e8fSKrish Sadhukhan \ 2221eae10e8fSKrish Sadhukhan for (i = start; i <= end; i = i + inc) { \ 2222eae10e8fSKrish Sadhukhan mask = 1ull << i; \ 2223eae10e8fSKrish Sadhukhan if (!(mask & resv_mask)) \ 2224eae10e8fSKrish Sadhukhan continue; \ 2225eae10e8fSKrish Sadhukhan tmp = val | mask; \ 2226eae10e8fSKrish Sadhukhan reg = tmp; \ 2227eae10e8fSKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "Test %s %d:%d: %lx", \ 2228eae10e8fSKrish Sadhukhan str_name, end, start, tmp); \ 2229eae10e8fSKrish Sadhukhan } \ 2230eae10e8fSKrish Sadhukhan } 2231eae10e8fSKrish Sadhukhan 22326d0ecbf6SKrish Sadhukhan #define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask, \ 2233cb6524f3SPaolo Bonzini exit_code, test_name) \ 2234a79c9495SKrish Sadhukhan { \ 2235a79c9495SKrish Sadhukhan u64 tmp, mask; \ 22368ae6d77fSSean Christopherson u32 r; \ 2237a79c9495SKrish Sadhukhan int i; \ 2238a79c9495SKrish Sadhukhan \ 2239a79c9495SKrish Sadhukhan for (i = start; i <= end; i = i + inc) { \ 2240a79c9495SKrish Sadhukhan mask = 1ull << i; \ 2241a79c9495SKrish Sadhukhan if (!(mask & resv_mask)) \ 2242a79c9495SKrish Sadhukhan continue; \ 2243a79c9495SKrish Sadhukhan tmp = val | mask; \ 2244a79c9495SKrish Sadhukhan switch (cr) { \ 2245a79c9495SKrish Sadhukhan case 0: \ 2246a79c9495SKrish Sadhukhan vmcb->save.cr0 = tmp; \ 2247a79c9495SKrish Sadhukhan break; \ 2248a79c9495SKrish Sadhukhan case 3: \ 2249a79c9495SKrish Sadhukhan vmcb->save.cr3 = tmp; \ 2250a79c9495SKrish Sadhukhan break; \ 2251a79c9495SKrish Sadhukhan case 4: \ 2252a79c9495SKrish Sadhukhan vmcb->save.cr4 = tmp; \ 2253a79c9495SKrish Sadhukhan } \ 22548ae6d77fSSean Christopherson r = svm_vmrun(); \ 22558ae6d77fSSean Christopherson report(r == exit_code, "Test CR%d %s%d:%d: %lx, wanted exit 0x%x, got 0x%x", \ 22568ae6d77fSSean Christopherson cr, test_name, end, start, tmp, exit_code, r); \ 2257a79c9495SKrish Sadhukhan } \ 2258a79c9495SKrish Sadhukhan } 2259e8d7a8f6SKrish Sadhukhan 2260a79c9495SKrish Sadhukhan static void test_efer(void) 2261a79c9495SKrish Sadhukhan { 2262e8d7a8f6SKrish Sadhukhan /* 2263e8d7a8f6SKrish Sadhukhan * Un-setting EFER.SVME is illegal 2264e8d7a8f6SKrish Sadhukhan */ 2265ba29942cSKrish Sadhukhan u64 efer_saved = vmcb->save.efer; 2266ba29942cSKrish Sadhukhan u64 efer = efer_saved; 2267ba29942cSKrish Sadhukhan 2268ba29942cSKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer); 2269ba29942cSKrish Sadhukhan efer &= ~EFER_SVME; 2270ba29942cSKrish Sadhukhan vmcb->save.efer = efer; 2271ba29942cSKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer); 2272ba29942cSKrish Sadhukhan vmcb->save.efer = efer_saved; 2273e8d7a8f6SKrish Sadhukhan 2274e8d7a8f6SKrish Sadhukhan /* 2275a79c9495SKrish Sadhukhan * EFER MBZ bits: 63:16, 9 2276a79c9495SKrish Sadhukhan */ 2277a79c9495SKrish Sadhukhan efer_saved = vmcb->save.efer; 2278a79c9495SKrish Sadhukhan 2279a79c9495SKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(8, 9, 1, "EFER", vmcb->save.efer, 2280a79c9495SKrish Sadhukhan efer_saved, SVM_EFER_RESERVED_MASK); 2281a79c9495SKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(16, 63, 4, "EFER", vmcb->save.efer, 2282a79c9495SKrish Sadhukhan efer_saved, SVM_EFER_RESERVED_MASK); 2283a79c9495SKrish Sadhukhan 22841d7bde08SKrish Sadhukhan /* 22851d7bde08SKrish Sadhukhan * EFER.LME and CR0.PG are both set and CR4.PAE is zero. 22861d7bde08SKrish Sadhukhan */ 22871d7bde08SKrish Sadhukhan u64 cr0_saved = vmcb->save.cr0; 22881d7bde08SKrish Sadhukhan u64 cr0; 22891d7bde08SKrish Sadhukhan u64 cr4_saved = vmcb->save.cr4; 22901d7bde08SKrish Sadhukhan u64 cr4; 22911d7bde08SKrish Sadhukhan 22921d7bde08SKrish Sadhukhan efer = efer_saved | EFER_LME; 22931d7bde08SKrish Sadhukhan vmcb->save.efer = efer; 22941d7bde08SKrish Sadhukhan cr0 = cr0_saved | X86_CR0_PG | X86_CR0_PE; 22951d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0; 22961d7bde08SKrish Sadhukhan cr4 = cr4_saved & ~X86_CR4_PAE; 22971d7bde08SKrish Sadhukhan vmcb->save.cr4 = cr4; 22981d7bde08SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 22991d7bde08SKrish Sadhukhan "CR0.PG=1 (%lx) and CR4.PAE=0 (%lx)", efer, cr0, cr4); 23001d7bde08SKrish Sadhukhan 23011d7bde08SKrish Sadhukhan /* 23021d7bde08SKrish Sadhukhan * EFER.LME and CR0.PG are both set and CR0.PE is zero. 2303fc050452SLara Lazier * CR4.PAE needs to be set as we otherwise cannot 2304fc050452SLara Lazier * determine if CR4.PAE=0 or CR0.PE=0 triggered the 2305fc050452SLara Lazier * SVM_EXIT_ERR. 23061d7bde08SKrish Sadhukhan */ 2307fc050452SLara Lazier cr4 = cr4_saved | X86_CR4_PAE; 2308fc050452SLara Lazier vmcb->save.cr4 = cr4; 23091d7bde08SKrish Sadhukhan cr0 &= ~X86_CR0_PE; 23101d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0; 23111d7bde08SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 23121d7bde08SKrish Sadhukhan "CR0.PG=1 and CR0.PE=0 (%lx)", efer, cr0); 23131d7bde08SKrish Sadhukhan 23141d7bde08SKrish Sadhukhan /* 23151d7bde08SKrish Sadhukhan * EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero. 23161d7bde08SKrish Sadhukhan */ 23171d7bde08SKrish Sadhukhan u32 cs_attrib_saved = vmcb->save.cs.attrib; 23181d7bde08SKrish Sadhukhan u32 cs_attrib; 23191d7bde08SKrish Sadhukhan 23201d7bde08SKrish Sadhukhan cr0 |= X86_CR0_PE; 23211d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0; 23221d7bde08SKrish Sadhukhan cs_attrib = cs_attrib_saved | SVM_SELECTOR_L_MASK | 23231d7bde08SKrish Sadhukhan SVM_SELECTOR_DB_MASK; 23241d7bde08SKrish Sadhukhan vmcb->save.cs.attrib = cs_attrib; 23251d7bde08SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 23261d7bde08SKrish Sadhukhan "CR0.PG=1 (%lx), CR4.PAE=1 (%lx), CS.L=1 and CS.D=1 (%x)", 23271d7bde08SKrish Sadhukhan efer, cr0, cr4, cs_attrib); 23281d7bde08SKrish Sadhukhan 23291d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0_saved; 23301d7bde08SKrish Sadhukhan vmcb->save.cr4 = cr4_saved; 2331a79c9495SKrish Sadhukhan vmcb->save.efer = efer_saved; 23321d7bde08SKrish Sadhukhan vmcb->save.cs.attrib = cs_attrib_saved; 2333a79c9495SKrish Sadhukhan } 2334a79c9495SKrish Sadhukhan 2335a79c9495SKrish Sadhukhan static void test_cr0(void) 2336a79c9495SKrish Sadhukhan { 2337a79c9495SKrish Sadhukhan /* 2338e8d7a8f6SKrish Sadhukhan * Un-setting CR0.CD and setting CR0.NW is illegal combination 2339e8d7a8f6SKrish Sadhukhan */ 2340e8d7a8f6SKrish Sadhukhan u64 cr0_saved = vmcb->save.cr0; 2341e8d7a8f6SKrish Sadhukhan u64 cr0 = cr0_saved; 2342e8d7a8f6SKrish Sadhukhan 2343e8d7a8f6SKrish Sadhukhan cr0 |= X86_CR0_CD; 2344e8d7a8f6SKrish Sadhukhan cr0 &= ~X86_CR0_NW; 2345e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2346a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx", 2347a79c9495SKrish Sadhukhan cr0); 2348e8d7a8f6SKrish Sadhukhan cr0 |= X86_CR0_NW; 2349e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2350a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx", 2351a79c9495SKrish Sadhukhan cr0); 2352e8d7a8f6SKrish Sadhukhan cr0 &= ~X86_CR0_NW; 2353e8d7a8f6SKrish Sadhukhan cr0 &= ~X86_CR0_CD; 2354e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2355a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx", 2356a79c9495SKrish Sadhukhan cr0); 2357e8d7a8f6SKrish Sadhukhan cr0 |= X86_CR0_NW; 2358e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2359a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_ERR, "Test CR0 CD=0,NW=1: %lx", 2360a79c9495SKrish Sadhukhan cr0); 2361e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0_saved; 23625c052c90SKrish Sadhukhan 23635c052c90SKrish Sadhukhan /* 23645c052c90SKrish Sadhukhan * CR0[63:32] are not zero 23655c052c90SKrish Sadhukhan */ 23665c052c90SKrish Sadhukhan cr0 = cr0_saved; 2367eae10e8fSKrish Sadhukhan 2368eae10e8fSKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "CR0", vmcb->save.cr0, cr0_saved, 2369eae10e8fSKrish Sadhukhan SVM_CR0_RESERVED_MASK); 23705c052c90SKrish Sadhukhan vmcb->save.cr0 = cr0_saved; 2371a79c9495SKrish Sadhukhan } 2372eae10e8fSKrish Sadhukhan 2373a79c9495SKrish Sadhukhan static void test_cr3(void) 2374a79c9495SKrish Sadhukhan { 2375a79c9495SKrish Sadhukhan /* 2376a79c9495SKrish Sadhukhan * CR3 MBZ bits based on different modes: 237729a01803SNadav Amit * [63:52] - long mode 2378a79c9495SKrish Sadhukhan */ 2379a79c9495SKrish Sadhukhan u64 cr3_saved = vmcb->save.cr3; 2380a79c9495SKrish Sadhukhan 2381a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved, 2382cb6524f3SPaolo Bonzini SVM_CR3_LONG_MBZ_MASK, SVM_EXIT_ERR, ""); 23836d0ecbf6SKrish Sadhukhan 23846d0ecbf6SKrish Sadhukhan vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_MBZ_MASK; 23856d0ecbf6SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx", 23866d0ecbf6SKrish Sadhukhan vmcb->save.cr3); 23876d0ecbf6SKrish Sadhukhan 23886d0ecbf6SKrish Sadhukhan /* 23896d0ecbf6SKrish Sadhukhan * CR3 non-MBZ reserved bits based on different modes: 2390cb6524f3SPaolo Bonzini * [11:5] [2:0] - long mode (PCIDE=0) 23916d0ecbf6SKrish Sadhukhan * [2:0] - PAE legacy mode 23926d0ecbf6SKrish Sadhukhan */ 23936d0ecbf6SKrish Sadhukhan u64 cr4_saved = vmcb->save.cr4; 23946d0ecbf6SKrish Sadhukhan u64 *pdpe = npt_get_pml4e(); 23956d0ecbf6SKrish Sadhukhan 23966d0ecbf6SKrish Sadhukhan /* 23976d0ecbf6SKrish Sadhukhan * Long mode 23986d0ecbf6SKrish Sadhukhan */ 23996d0ecbf6SKrish Sadhukhan if (this_cpu_has(X86_FEATURE_PCID)) { 24006d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved | X86_CR4_PCIDE; 24016d0ecbf6SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved, 2402cb6524f3SPaolo Bonzini SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_VMMCALL, "(PCIDE=1) "); 24036d0ecbf6SKrish Sadhukhan 24046d0ecbf6SKrish Sadhukhan vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK; 24056d0ecbf6SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx", 24066d0ecbf6SKrish Sadhukhan vmcb->save.cr3); 2407cb6524f3SPaolo Bonzini } 24086d0ecbf6SKrish Sadhukhan 24096d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved & ~X86_CR4_PCIDE; 24106d0ecbf6SKrish Sadhukhan 2411993749ffSSean Christopherson if (!npt_supported()) 2412993749ffSSean Christopherson goto skip_npt_only; 2413993749ffSSean Christopherson 24146d0ecbf6SKrish Sadhukhan /* Clear P (Present) bit in NPT in order to trigger #NPF */ 24156d0ecbf6SKrish Sadhukhan pdpe[0] &= ~1ULL; 24166d0ecbf6SKrish Sadhukhan 24176d0ecbf6SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved, 2418cb6524f3SPaolo Bonzini SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_NPF, "(PCIDE=0) "); 24196d0ecbf6SKrish Sadhukhan 24206d0ecbf6SKrish Sadhukhan pdpe[0] |= 1ULL; 2421cb6524f3SPaolo Bonzini vmcb->save.cr3 = cr3_saved; 24226d0ecbf6SKrish Sadhukhan 24236d0ecbf6SKrish Sadhukhan /* 24246d0ecbf6SKrish Sadhukhan * PAE legacy 24256d0ecbf6SKrish Sadhukhan */ 24266d0ecbf6SKrish Sadhukhan pdpe[0] &= ~1ULL; 24276d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved | X86_CR4_PAE; 24286d0ecbf6SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 2, 1, 3, cr3_saved, 2429cb6524f3SPaolo Bonzini SVM_CR3_PAE_LEGACY_RESERVED_MASK, SVM_EXIT_NPF, "(PAE) "); 24306d0ecbf6SKrish Sadhukhan 24316d0ecbf6SKrish Sadhukhan pdpe[0] |= 1ULL; 2432993749ffSSean Christopherson 2433993749ffSSean Christopherson skip_npt_only: 2434a79c9495SKrish Sadhukhan vmcb->save.cr3 = cr3_saved; 24356d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved; 2436a79c9495SKrish Sadhukhan } 2437a79c9495SKrish Sadhukhan 2438d30973c3SWei Huang /* Test CR4 MBZ bits based on legacy or long modes */ 2439a79c9495SKrish Sadhukhan static void test_cr4(void) 2440a79c9495SKrish Sadhukhan { 2441a79c9495SKrish Sadhukhan u64 cr4_saved = vmcb->save.cr4; 2442a79c9495SKrish Sadhukhan u64 efer_saved = vmcb->save.efer; 2443a79c9495SKrish Sadhukhan u64 efer = efer_saved; 2444a79c9495SKrish Sadhukhan 2445a79c9495SKrish Sadhukhan efer &= ~EFER_LME; 2446a79c9495SKrish Sadhukhan vmcb->save.efer = efer; 2447a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2448cb6524f3SPaolo Bonzini SVM_CR4_LEGACY_RESERVED_MASK, SVM_EXIT_ERR, ""); 2449a79c9495SKrish Sadhukhan 2450a79c9495SKrish Sadhukhan efer |= EFER_LME; 2451a79c9495SKrish Sadhukhan vmcb->save.efer = efer; 2452a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2453cb6524f3SPaolo Bonzini SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, ""); 2454a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved, 2455cb6524f3SPaolo Bonzini SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, ""); 2456a79c9495SKrish Sadhukhan 2457a79c9495SKrish Sadhukhan vmcb->save.cr4 = cr4_saved; 2458a79c9495SKrish Sadhukhan vmcb->save.efer = efer_saved; 2459a79c9495SKrish Sadhukhan } 2460a79c9495SKrish Sadhukhan 2461a79c9495SKrish Sadhukhan static void test_dr(void) 2462a79c9495SKrish Sadhukhan { 2463eae10e8fSKrish Sadhukhan /* 2464eae10e8fSKrish Sadhukhan * DR6[63:32] and DR7[63:32] are MBZ 2465eae10e8fSKrish Sadhukhan */ 2466eae10e8fSKrish Sadhukhan u64 dr_saved = vmcb->save.dr6; 2467eae10e8fSKrish Sadhukhan 2468eae10e8fSKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR6", vmcb->save.dr6, dr_saved, 2469eae10e8fSKrish Sadhukhan SVM_DR6_RESERVED_MASK); 2470eae10e8fSKrish Sadhukhan vmcb->save.dr6 = dr_saved; 2471eae10e8fSKrish Sadhukhan 2472eae10e8fSKrish Sadhukhan dr_saved = vmcb->save.dr7; 2473eae10e8fSKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR7", vmcb->save.dr7, dr_saved, 2474eae10e8fSKrish Sadhukhan SVM_DR7_RESERVED_MASK); 2475eae10e8fSKrish Sadhukhan 2476eae10e8fSKrish Sadhukhan vmcb->save.dr7 = dr_saved; 2477a79c9495SKrish Sadhukhan } 2478eae10e8fSKrish Sadhukhan 2479abe82380SKrish Sadhukhan /* TODO: verify if high 32-bits are sign- or zero-extended on bare metal */ 2480abe82380SKrish Sadhukhan #define TEST_BITMAP_ADDR(save_intercept, type, addr, exit_code, \ 2481abe82380SKrish Sadhukhan msg) { \ 2482abe82380SKrish Sadhukhan vmcb->control.intercept = saved_intercept | 1ULL << type; \ 2483abe82380SKrish Sadhukhan if (type == INTERCEPT_MSR_PROT) \ 2484abe82380SKrish Sadhukhan vmcb->control.msrpm_base_pa = addr; \ 2485abe82380SKrish Sadhukhan else \ 2486abe82380SKrish Sadhukhan vmcb->control.iopm_base_pa = addr; \ 2487abe82380SKrish Sadhukhan report(svm_vmrun() == exit_code, \ 2488abe82380SKrish Sadhukhan "Test %s address: %lx", msg, addr); \ 2489abe82380SKrish Sadhukhan } 2490abe82380SKrish Sadhukhan 2491abe82380SKrish Sadhukhan /* 2492abe82380SKrish Sadhukhan * If the MSR or IOIO intercept table extends to a physical address that 2493abe82380SKrish Sadhukhan * is greater than or equal to the maximum supported physical address, the 2494abe82380SKrish Sadhukhan * guest state is illegal. 2495abe82380SKrish Sadhukhan * 2496abe82380SKrish Sadhukhan * The VMRUN instruction ignores the lower 12 bits of the address specified 2497abe82380SKrish Sadhukhan * in the VMCB. 2498abe82380SKrish Sadhukhan * 2499abe82380SKrish Sadhukhan * MSRPM spans 2 contiguous 4KB pages while IOPM spans 2 contiguous 4KB 2500abe82380SKrish Sadhukhan * pages + 1 byte. 2501abe82380SKrish Sadhukhan * 2502abe82380SKrish Sadhukhan * [APM vol 2] 2503abe82380SKrish Sadhukhan * 2504abe82380SKrish Sadhukhan * Note: Unallocated MSRPM addresses conforming to consistency checks, generate 2505abe82380SKrish Sadhukhan * #NPF. 2506abe82380SKrish Sadhukhan */ 2507abe82380SKrish Sadhukhan static void test_msrpm_iopm_bitmap_addrs(void) 2508abe82380SKrish Sadhukhan { 2509abe82380SKrish Sadhukhan u64 saved_intercept = vmcb->control.intercept; 2510abe82380SKrish Sadhukhan u64 addr_beyond_limit = 1ull << cpuid_maxphyaddr(); 2511abe82380SKrish Sadhukhan u64 addr = virt_to_phys(msr_bitmap) & (~((1ull << 12) - 1)); 2512abe82380SKrish Sadhukhan 2513abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2514abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR, 2515abe82380SKrish Sadhukhan "MSRPM"); 2516abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2517abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE + 1, SVM_EXIT_ERR, 2518abe82380SKrish Sadhukhan "MSRPM"); 2519abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2520abe82380SKrish Sadhukhan addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR, 2521abe82380SKrish Sadhukhan "MSRPM"); 2522abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr, 2523abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "MSRPM"); 2524abe82380SKrish Sadhukhan addr |= (1ull << 12) - 1; 2525abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr, 2526abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "MSRPM"); 2527abe82380SKrish Sadhukhan 2528abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2529abe82380SKrish Sadhukhan addr_beyond_limit - 4 * PAGE_SIZE, SVM_EXIT_VMMCALL, 2530abe82380SKrish Sadhukhan "IOPM"); 2531abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2532abe82380SKrish Sadhukhan addr_beyond_limit - 3 * PAGE_SIZE, SVM_EXIT_VMMCALL, 2533abe82380SKrish Sadhukhan "IOPM"); 2534abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2535abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE - 2, SVM_EXIT_VMMCALL, 2536abe82380SKrish Sadhukhan "IOPM"); 2537abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2538abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR, 2539abe82380SKrish Sadhukhan "IOPM"); 2540abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2541abe82380SKrish Sadhukhan addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR, 2542abe82380SKrish Sadhukhan "IOPM"); 2543abe82380SKrish Sadhukhan addr = virt_to_phys(io_bitmap) & (~((1ull << 11) - 1)); 2544abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr, 2545abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "IOPM"); 2546abe82380SKrish Sadhukhan addr |= (1ull << 12) - 1; 2547abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr, 2548abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "IOPM"); 2549abe82380SKrish Sadhukhan 2550abe82380SKrish Sadhukhan vmcb->control.intercept = saved_intercept; 2551abe82380SKrish Sadhukhan } 2552abe82380SKrish Sadhukhan 2553ba3c9773SLara Lazier /* 2554ba3c9773SLara Lazier * Unlike VMSAVE, VMRUN seems not to update the value of noncanonical 2555ba3c9773SLara Lazier * segment bases in the VMCB. However, VMENTRY succeeds as documented. 2556ba3c9773SLara Lazier */ 2557ba3c9773SLara Lazier #define TEST_CANONICAL_VMRUN(seg_base, msg) \ 2558a99070ebSKrish Sadhukhan saved_addr = seg_base; \ 2559a99070ebSKrish Sadhukhan seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ 2560ba3c9773SLara Lazier return_value = svm_vmrun(); \ 2561ba3c9773SLara Lazier report(return_value == SVM_EXIT_VMMCALL, \ 2562ba3c9773SLara Lazier "Successful VMRUN with noncanonical %s.base", msg); \ 2563a99070ebSKrish Sadhukhan seg_base = saved_addr; 2564a99070ebSKrish Sadhukhan 2565ba3c9773SLara Lazier 2566ba3c9773SLara Lazier #define TEST_CANONICAL_VMLOAD(seg_base, msg) \ 2567ba3c9773SLara Lazier saved_addr = seg_base; \ 2568ba3c9773SLara Lazier seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ 2569ba3c9773SLara Lazier asm volatile ("vmload %0" : : "a"(vmcb_phys) : "memory"); \ 2570ba3c9773SLara Lazier asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory"); \ 2571ba3c9773SLara Lazier report(is_canonical(seg_base), \ 2572ba3c9773SLara Lazier "Test %s.base for canonical form: %lx", msg, seg_base); \ 2573ba3c9773SLara Lazier seg_base = saved_addr; 2574ba3c9773SLara Lazier 2575ba3c9773SLara Lazier static void test_canonicalization(void) 2576a99070ebSKrish Sadhukhan { 2577a99070ebSKrish Sadhukhan u64 saved_addr; 2578ba3c9773SLara Lazier u64 return_value; 2579ba3c9773SLara Lazier u64 addr_limit; 2580ba3c9773SLara Lazier u64 vmcb_phys = virt_to_phys(vmcb); 2581ba3c9773SLara Lazier 2582ba3c9773SLara Lazier addr_limit = (this_cpu_has(X86_FEATURE_LA57)) ? 57 : 48; 2583a99070ebSKrish Sadhukhan u64 noncanonical_mask = NONCANONICAL & ~((1ul << addr_limit) - 1); 2584a99070ebSKrish Sadhukhan 2585ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.fs.base, "FS"); 2586ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.gs.base, "GS"); 2587ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.ldtr.base, "LDTR"); 2588ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.tr.base, "TR"); 2589ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.kernel_gs_base, "KERNEL GS"); 2590ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.es.base, "ES"); 2591ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.cs.base, "CS"); 2592ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.ss.base, "SS"); 2593ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.ds.base, "DS"); 2594ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.gdtr.base, "GDTR"); 2595ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.idtr.base, "IDTR"); 2596a99070ebSKrish Sadhukhan } 2597a99070ebSKrish Sadhukhan 2598665f5677SKrish Sadhukhan /* 2599665f5677SKrish Sadhukhan * When VMRUN loads a guest value of 1 in EFLAGS.TF, that value does not 2600665f5677SKrish Sadhukhan * cause a trace trap between the VMRUN and the first guest instruction, but 2601665f5677SKrish Sadhukhan * rather after completion of the first guest instruction. 2602665f5677SKrish Sadhukhan * 2603665f5677SKrish Sadhukhan * [APM vol 2] 2604665f5677SKrish Sadhukhan */ 2605665f5677SKrish Sadhukhan u64 guest_rflags_test_trap_rip; 2606665f5677SKrish Sadhukhan 2607665f5677SKrish Sadhukhan static void guest_rflags_test_db_handler(struct ex_regs *r) 2608665f5677SKrish Sadhukhan { 2609665f5677SKrish Sadhukhan guest_rflags_test_trap_rip = r->rip; 2610665f5677SKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 2611665f5677SKrish Sadhukhan } 2612665f5677SKrish Sadhukhan 2613a79c9495SKrish Sadhukhan static void svm_guest_state_test(void) 2614a79c9495SKrish Sadhukhan { 2615a79c9495SKrish Sadhukhan test_set_guest(basic_guest_main); 2616a79c9495SKrish Sadhukhan test_efer(); 2617a79c9495SKrish Sadhukhan test_cr0(); 2618a79c9495SKrish Sadhukhan test_cr3(); 2619a79c9495SKrish Sadhukhan test_cr4(); 2620a79c9495SKrish Sadhukhan test_dr(); 2621abe82380SKrish Sadhukhan test_msrpm_iopm_bitmap_addrs(); 2622ba3c9773SLara Lazier test_canonicalization(); 2623ba29942cSKrish Sadhukhan } 2624ba29942cSKrish Sadhukhan 2625665f5677SKrish Sadhukhan extern void guest_rflags_test_guest(struct svm_test *test); 2626665f5677SKrish Sadhukhan extern u64 *insn2; 2627665f5677SKrish Sadhukhan extern u64 *guest_end; 2628665f5677SKrish Sadhukhan 2629665f5677SKrish Sadhukhan asm("guest_rflags_test_guest:\n\t" 2630665f5677SKrish Sadhukhan "push %rbp\n\t" 2631665f5677SKrish Sadhukhan ".global insn2\n\t" 2632665f5677SKrish Sadhukhan "insn2:\n\t" 2633665f5677SKrish Sadhukhan "mov %rsp,%rbp\n\t" 2634665f5677SKrish Sadhukhan "vmmcall\n\t" 2635665f5677SKrish Sadhukhan "vmmcall\n\t" 2636665f5677SKrish Sadhukhan ".global guest_end\n\t" 2637665f5677SKrish Sadhukhan "guest_end:\n\t" 2638665f5677SKrish Sadhukhan "vmmcall\n\t" 2639665f5677SKrish Sadhukhan "pop %rbp\n\t" 2640665f5677SKrish Sadhukhan "ret"); 2641665f5677SKrish Sadhukhan 2642665f5677SKrish Sadhukhan static void svm_test_singlestep(void) 2643665f5677SKrish Sadhukhan { 2644665f5677SKrish Sadhukhan handle_exception(DB_VECTOR, guest_rflags_test_db_handler); 2645665f5677SKrish Sadhukhan 2646665f5677SKrish Sadhukhan /* 2647665f5677SKrish Sadhukhan * Trap expected after completion of first guest instruction 2648665f5677SKrish Sadhukhan */ 2649665f5677SKrish Sadhukhan vmcb->save.rflags |= X86_EFLAGS_TF; 2650665f5677SKrish Sadhukhan report (__svm_vmrun((u64)guest_rflags_test_guest) == SVM_EXIT_VMMCALL && 2651665f5677SKrish Sadhukhan guest_rflags_test_trap_rip == (u64)&insn2, 2652665f5677SKrish Sadhukhan "Test EFLAGS.TF on VMRUN: trap expected after completion of first guest instruction"); 2653665f5677SKrish Sadhukhan /* 2654665f5677SKrish Sadhukhan * No trap expected 2655665f5677SKrish Sadhukhan */ 2656665f5677SKrish Sadhukhan guest_rflags_test_trap_rip = 0; 2657665f5677SKrish Sadhukhan vmcb->save.rip += 3; 2658665f5677SKrish Sadhukhan vmcb->save.rflags |= X86_EFLAGS_TF; 2659665f5677SKrish Sadhukhan report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL && 2660665f5677SKrish Sadhukhan guest_rflags_test_trap_rip == 0, "Test EFLAGS.TF on VMRUN: trap not expected"); 2661665f5677SKrish Sadhukhan 2662665f5677SKrish Sadhukhan /* 2663665f5677SKrish Sadhukhan * Let guest finish execution 2664665f5677SKrish Sadhukhan */ 2665665f5677SKrish Sadhukhan vmcb->save.rip += 3; 2666665f5677SKrish Sadhukhan report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL && 2667665f5677SKrish Sadhukhan vmcb->save.rip == (u64)&guest_end, "Test EFLAGS.TF on VMRUN: guest execution completion"); 2668665f5677SKrish Sadhukhan } 2669665f5677SKrish Sadhukhan 26707a57ef5dSMaxim Levitsky static bool volatile svm_errata_reproduced = false; 26717a57ef5dSMaxim Levitsky static unsigned long volatile physical = 0; 26727a57ef5dSMaxim Levitsky 26737a57ef5dSMaxim Levitsky 26747a57ef5dSMaxim Levitsky /* 26757a57ef5dSMaxim Levitsky * 26767a57ef5dSMaxim Levitsky * Test the following errata: 26777a57ef5dSMaxim Levitsky * If the VMRUN/VMSAVE/VMLOAD are attempted by the nested guest, 26787a57ef5dSMaxim Levitsky * the CPU would first check the EAX against host reserved memory 26797a57ef5dSMaxim Levitsky * regions (so far only SMM_ADDR/SMM_MASK are known to cause it), 26807a57ef5dSMaxim Levitsky * and only then signal #VMexit 26817a57ef5dSMaxim Levitsky * 26827a57ef5dSMaxim Levitsky * Try to reproduce this by trying vmsave on each possible 4K aligned memory 26837a57ef5dSMaxim Levitsky * address in the low 4G where the SMM area has to reside. 26847a57ef5dSMaxim Levitsky */ 26857a57ef5dSMaxim Levitsky 26867a57ef5dSMaxim Levitsky static void gp_isr(struct ex_regs *r) 26877a57ef5dSMaxim Levitsky { 26887a57ef5dSMaxim Levitsky svm_errata_reproduced = true; 26897a57ef5dSMaxim Levitsky /* skip over the vmsave instruction*/ 26907a57ef5dSMaxim Levitsky r->rip += 3; 26917a57ef5dSMaxim Levitsky } 26927a57ef5dSMaxim Levitsky 26937a57ef5dSMaxim Levitsky static void svm_vmrun_errata_test(void) 26947a57ef5dSMaxim Levitsky { 26957a57ef5dSMaxim Levitsky unsigned long *last_page = NULL; 26967a57ef5dSMaxim Levitsky 26977a57ef5dSMaxim Levitsky handle_exception(GP_VECTOR, gp_isr); 26987a57ef5dSMaxim Levitsky 26997a57ef5dSMaxim Levitsky while (!svm_errata_reproduced) { 27007a57ef5dSMaxim Levitsky 27017a57ef5dSMaxim Levitsky unsigned long *page = alloc_pages(1); 27027a57ef5dSMaxim Levitsky 27037a57ef5dSMaxim Levitsky if (!page) { 27045c3582f0SJanis Schoetterl-Glausch report_pass("All guest memory tested, no bug found"); 27057a57ef5dSMaxim Levitsky break; 27067a57ef5dSMaxim Levitsky } 27077a57ef5dSMaxim Levitsky 27087a57ef5dSMaxim Levitsky physical = virt_to_phys(page); 27097a57ef5dSMaxim Levitsky 27107a57ef5dSMaxim Levitsky asm volatile ( 27117a57ef5dSMaxim Levitsky "mov %[_physical], %%rax\n\t" 27127a57ef5dSMaxim Levitsky "vmsave %%rax\n\t" 27137a57ef5dSMaxim Levitsky 27147a57ef5dSMaxim Levitsky : [_physical] "=m" (physical) 27157a57ef5dSMaxim Levitsky : /* no inputs*/ 27167a57ef5dSMaxim Levitsky : "rax" /*clobbers*/ 27177a57ef5dSMaxim Levitsky ); 27187a57ef5dSMaxim Levitsky 27197a57ef5dSMaxim Levitsky if (svm_errata_reproduced) { 2720198dfd0eSJanis Schoetterl-Glausch report_fail("Got #GP exception - svm errata reproduced at 0x%lx", 27217a57ef5dSMaxim Levitsky physical); 27227a57ef5dSMaxim Levitsky break; 27237a57ef5dSMaxim Levitsky } 27247a57ef5dSMaxim Levitsky 27257a57ef5dSMaxim Levitsky *page = (unsigned long)last_page; 27267a57ef5dSMaxim Levitsky last_page = page; 27277a57ef5dSMaxim Levitsky } 27287a57ef5dSMaxim Levitsky 27297a57ef5dSMaxim Levitsky while (last_page) { 27307a57ef5dSMaxim Levitsky unsigned long *page = last_page; 27317a57ef5dSMaxim Levitsky last_page = (unsigned long *)*last_page; 27327a57ef5dSMaxim Levitsky free_pages_by_order(page, 1); 27337a57ef5dSMaxim Levitsky } 27347a57ef5dSMaxim Levitsky } 27357a57ef5dSMaxim Levitsky 27360b6f6cedSKrish Sadhukhan static void vmload_vmsave_guest_main(struct svm_test *test) 27370b6f6cedSKrish Sadhukhan { 27380b6f6cedSKrish Sadhukhan u64 vmcb_phys = virt_to_phys(vmcb); 27390b6f6cedSKrish Sadhukhan 27400b6f6cedSKrish Sadhukhan asm volatile ("vmload %0" : : "a"(vmcb_phys)); 27410b6f6cedSKrish Sadhukhan asm volatile ("vmsave %0" : : "a"(vmcb_phys)); 27420b6f6cedSKrish Sadhukhan } 27430b6f6cedSKrish Sadhukhan 27440b6f6cedSKrish Sadhukhan static void svm_vmload_vmsave(void) 27450b6f6cedSKrish Sadhukhan { 27460b6f6cedSKrish Sadhukhan u32 intercept_saved = vmcb->control.intercept; 27470b6f6cedSKrish Sadhukhan 27480b6f6cedSKrish Sadhukhan test_set_guest(vmload_vmsave_guest_main); 27490b6f6cedSKrish Sadhukhan 27500b6f6cedSKrish Sadhukhan /* 27510b6f6cedSKrish Sadhukhan * Disabling intercept for VMLOAD and VMSAVE doesn't cause 27520b6f6cedSKrish Sadhukhan * respective #VMEXIT to host 27530b6f6cedSKrish Sadhukhan */ 27540b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 27550b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 27560b6f6cedSKrish Sadhukhan svm_vmrun(); 27570b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 27580b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 27590b6f6cedSKrish Sadhukhan 27600b6f6cedSKrish Sadhukhan /* 27610b6f6cedSKrish Sadhukhan * Enabling intercept for VMLOAD and VMSAVE causes respective 27620b6f6cedSKrish Sadhukhan * #VMEXIT to host 27630b6f6cedSKrish Sadhukhan */ 27640b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD); 27650b6f6cedSKrish Sadhukhan svm_vmrun(); 27660b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test " 27670b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT"); 27680b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 27690b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE); 27700b6f6cedSKrish Sadhukhan svm_vmrun(); 27710b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test " 27720b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT"); 27730b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 27740b6f6cedSKrish Sadhukhan svm_vmrun(); 27750b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 27760b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 27770b6f6cedSKrish Sadhukhan 27780b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD); 27790b6f6cedSKrish Sadhukhan svm_vmrun(); 27800b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test " 27810b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT"); 27820b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 27830b6f6cedSKrish Sadhukhan svm_vmrun(); 27840b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 27850b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 27860b6f6cedSKrish Sadhukhan 27870b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE); 27880b6f6cedSKrish Sadhukhan svm_vmrun(); 27890b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test " 27900b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT"); 27910b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 27920b6f6cedSKrish Sadhukhan svm_vmrun(); 27930b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 27940b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 27950b6f6cedSKrish Sadhukhan 27960b6f6cedSKrish Sadhukhan vmcb->control.intercept = intercept_saved; 27970b6f6cedSKrish Sadhukhan } 27980b6f6cedSKrish Sadhukhan 2799f6972bd6SLara Lazier static void prepare_vgif_enabled(struct svm_test *test) 2800f6972bd6SLara Lazier { 2801f6972bd6SLara Lazier default_prepare(test); 2802f6972bd6SLara Lazier } 2803f6972bd6SLara Lazier 2804f6972bd6SLara Lazier static void test_vgif(struct svm_test *test) 2805f6972bd6SLara Lazier { 2806f6972bd6SLara Lazier asm volatile ("vmmcall\n\tstgi\n\tvmmcall\n\tclgi\n\tvmmcall\n\t"); 2807f6972bd6SLara Lazier } 2808f6972bd6SLara Lazier 2809f6972bd6SLara Lazier static bool vgif_finished(struct svm_test *test) 2810f6972bd6SLara Lazier { 2811f6972bd6SLara Lazier switch (get_test_stage(test)) 2812f6972bd6SLara Lazier { 2813f6972bd6SLara Lazier case 0: 2814f6972bd6SLara Lazier if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2815198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall."); 2816f6972bd6SLara Lazier return true; 2817f6972bd6SLara Lazier } 2818f6972bd6SLara Lazier vmcb->control.int_ctl |= V_GIF_ENABLED_MASK; 2819f6972bd6SLara Lazier vmcb->save.rip += 3; 2820f6972bd6SLara Lazier inc_test_stage(test); 2821f6972bd6SLara Lazier break; 2822f6972bd6SLara Lazier case 1: 2823f6972bd6SLara Lazier if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2824198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall."); 2825f6972bd6SLara Lazier return true; 2826f6972bd6SLara Lazier } 2827f6972bd6SLara Lazier if (!(vmcb->control.int_ctl & V_GIF_MASK)) { 2828198dfd0eSJanis Schoetterl-Glausch report_fail("Failed to set VGIF when executing STGI."); 2829f6972bd6SLara Lazier vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2830f6972bd6SLara Lazier return true; 2831f6972bd6SLara Lazier } 28325c3582f0SJanis Schoetterl-Glausch report_pass("STGI set VGIF bit."); 2833f6972bd6SLara Lazier vmcb->save.rip += 3; 2834f6972bd6SLara Lazier inc_test_stage(test); 2835f6972bd6SLara Lazier break; 2836f6972bd6SLara Lazier case 2: 2837f6972bd6SLara Lazier if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2838198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall."); 2839f6972bd6SLara Lazier return true; 2840f6972bd6SLara Lazier } 2841f6972bd6SLara Lazier if (vmcb->control.int_ctl & V_GIF_MASK) { 2842198dfd0eSJanis Schoetterl-Glausch report_fail("Failed to clear VGIF when executing CLGI."); 2843f6972bd6SLara Lazier vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2844f6972bd6SLara Lazier return true; 2845f6972bd6SLara Lazier } 28465c3582f0SJanis Schoetterl-Glausch report_pass("CLGI cleared VGIF bit."); 2847f6972bd6SLara Lazier vmcb->save.rip += 3; 2848f6972bd6SLara Lazier inc_test_stage(test); 2849f6972bd6SLara Lazier vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2850f6972bd6SLara Lazier break; 2851f6972bd6SLara Lazier default: 2852f6972bd6SLara Lazier return true; 2853f6972bd6SLara Lazier break; 2854f6972bd6SLara Lazier } 2855f6972bd6SLara Lazier 2856f6972bd6SLara Lazier return get_test_stage(test) == 3; 2857f6972bd6SLara Lazier } 2858f6972bd6SLara Lazier 2859f6972bd6SLara Lazier static bool vgif_check(struct svm_test *test) 2860f6972bd6SLara Lazier { 2861f6972bd6SLara Lazier return get_test_stage(test) == 3; 2862f6972bd6SLara Lazier } 2863f6972bd6SLara Lazier 28648650dffeSMaxim Levitsky 28658650dffeSMaxim Levitsky static int pause_test_counter; 28668650dffeSMaxim Levitsky static int wait_counter; 28678650dffeSMaxim Levitsky 28688650dffeSMaxim Levitsky static void pause_filter_test_guest_main(struct svm_test *test) 28698650dffeSMaxim Levitsky { 28708650dffeSMaxim Levitsky int i; 28718650dffeSMaxim Levitsky for (i = 0 ; i < pause_test_counter ; i++) 28728650dffeSMaxim Levitsky pause(); 28738650dffeSMaxim Levitsky 28748650dffeSMaxim Levitsky if (!wait_counter) 28758650dffeSMaxim Levitsky return; 28768650dffeSMaxim Levitsky 28778650dffeSMaxim Levitsky for (i = 0; i < wait_counter; i++) 28788650dffeSMaxim Levitsky ; 28798650dffeSMaxim Levitsky 28808650dffeSMaxim Levitsky for (i = 0 ; i < pause_test_counter ; i++) 28818650dffeSMaxim Levitsky pause(); 28828650dffeSMaxim Levitsky 28838650dffeSMaxim Levitsky } 28848650dffeSMaxim Levitsky 28858650dffeSMaxim Levitsky static void pause_filter_run_test(int pause_iterations, int filter_value, int wait_iterations, int threshold) 28868650dffeSMaxim Levitsky { 28878650dffeSMaxim Levitsky test_set_guest(pause_filter_test_guest_main); 28888650dffeSMaxim Levitsky 28898650dffeSMaxim Levitsky pause_test_counter = pause_iterations; 28908650dffeSMaxim Levitsky wait_counter = wait_iterations; 28918650dffeSMaxim Levitsky 28928650dffeSMaxim Levitsky vmcb->control.pause_filter_count = filter_value; 28938650dffeSMaxim Levitsky vmcb->control.pause_filter_thresh = threshold; 28948650dffeSMaxim Levitsky svm_vmrun(); 28958650dffeSMaxim Levitsky 28968650dffeSMaxim Levitsky if (filter_value <= pause_iterations || wait_iterations < threshold) 28978650dffeSMaxim Levitsky report(vmcb->control.exit_code == SVM_EXIT_PAUSE, "expected PAUSE vmexit"); 28988650dffeSMaxim Levitsky else 28998650dffeSMaxim Levitsky report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "no expected PAUSE vmexit"); 29008650dffeSMaxim Levitsky } 29018650dffeSMaxim Levitsky 29028650dffeSMaxim Levitsky static void pause_filter_test(void) 29038650dffeSMaxim Levitsky { 29048650dffeSMaxim Levitsky if (!pause_filter_supported()) { 29058650dffeSMaxim Levitsky report_skip("PAUSE filter not supported in the guest"); 29068650dffeSMaxim Levitsky return; 29078650dffeSMaxim Levitsky } 29088650dffeSMaxim Levitsky 29098650dffeSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_PAUSE); 29108650dffeSMaxim Levitsky 29118650dffeSMaxim Levitsky // filter count more that pause count - no VMexit 29128650dffeSMaxim Levitsky pause_filter_run_test(10, 9, 0, 0); 29138650dffeSMaxim Levitsky 29148650dffeSMaxim Levitsky // filter count smaller pause count - no VMexit 29158650dffeSMaxim Levitsky pause_filter_run_test(20, 21, 0, 0); 29168650dffeSMaxim Levitsky 29178650dffeSMaxim Levitsky 29188650dffeSMaxim Levitsky if (pause_threshold_supported()) { 29198650dffeSMaxim Levitsky // filter count smaller pause count - no VMexit + large enough threshold 29208650dffeSMaxim Levitsky // so that filter counter resets 29218650dffeSMaxim Levitsky pause_filter_run_test(20, 21, 1000, 10); 29228650dffeSMaxim Levitsky 29238650dffeSMaxim Levitsky // filter count smaller pause count - no VMexit + small threshold 29248650dffeSMaxim Levitsky // so that filter doesn't reset 29258650dffeSMaxim Levitsky pause_filter_run_test(20, 21, 10, 1000); 29268650dffeSMaxim Levitsky } else { 29278650dffeSMaxim Levitsky report_skip("PAUSE threshold not supported in the guest"); 29288650dffeSMaxim Levitsky return; 29298650dffeSMaxim Levitsky } 29308650dffeSMaxim Levitsky } 29318650dffeSMaxim Levitsky 2932694e59baSManali Shukla /* If CR0.TS and CR0.EM are cleared in L2, no #NM is generated. */ 2933694e59baSManali Shukla static void svm_no_nm_test(void) 29345c92f156SManali Shukla { 29355c92f156SManali Shukla write_cr0(read_cr0() & ~X86_CR0_TS); 2936694e59baSManali Shukla test_set_guest((test_guest_func)fnop); 29375c92f156SManali Shukla 29385c92f156SManali Shukla vmcb->save.cr0 = vmcb->save.cr0 & ~(X86_CR0_TS | X86_CR0_EM); 2939694e59baSManali Shukla report(svm_vmrun() == SVM_EXIT_VMMCALL, 2940d4ae0a71SThomas Huth "fnop with CR0.TS and CR0.EM unset no #NM exception"); 29415c92f156SManali Shukla } 2942f6972bd6SLara Lazier 2943ddb85855SSean Christopherson static u64 amd_get_lbr_rip(u32 msr) 2944537d39dfSMaxim Levitsky { 2945ddb85855SSean Christopherson return rdmsr(msr) & ~AMD_LBR_RECORD_MISPREDICT; 2946537d39dfSMaxim Levitsky } 2947537d39dfSMaxim Levitsky 2948ddb85855SSean Christopherson #define HOST_CHECK_LBR(from_expected, to_expected) \ 2949ddb85855SSean Christopherson do { \ 2950ddb85855SSean Christopherson TEST_EXPECT_EQ((u64)from_expected, amd_get_lbr_rip(MSR_IA32_LASTBRANCHFROMIP)); \ 2951ddb85855SSean Christopherson TEST_EXPECT_EQ((u64)to_expected, amd_get_lbr_rip(MSR_IA32_LASTBRANCHTOIP)); \ 2952ddb85855SSean Christopherson } while (0) 2953537d39dfSMaxim Levitsky 2954ddb85855SSean Christopherson /* 2955ddb85855SSean Christopherson * FIXME: Do something other than generate an exception to communicate failure. 2956ddb85855SSean Christopherson * Debugging without expected vs. actual is an absolute nightmare. 2957ddb85855SSean Christopherson */ 2958ddb85855SSean Christopherson #define GUEST_CHECK_LBR(from_expected, to_expected) \ 2959ddb85855SSean Christopherson do { \ 2960ddb85855SSean Christopherson if ((u64)(from_expected) != amd_get_lbr_rip(MSR_IA32_LASTBRANCHFROMIP)) \ 2961ddb85855SSean Christopherson asm volatile("ud2"); \ 2962ddb85855SSean Christopherson if ((u64)(to_expected) != amd_get_lbr_rip(MSR_IA32_LASTBRANCHTOIP)) \ 2963ddb85855SSean Christopherson asm volatile("ud2"); \ 2964ddb85855SSean Christopherson } while (0) 2965537d39dfSMaxim Levitsky 296692098120SSean Christopherson #define REPORT_GUEST_LBR_ERROR(vmcb) \ 296792098120SSean Christopherson report(false, "LBR guest test failed. Exit reason 0x%x, RIP = %lx, from = %lx, to = %lx, ex from = %lx, ex to = %lx", \ 296892098120SSean Christopherson vmcb->control.exit_code, vmcb->save.rip, \ 296992098120SSean Christopherson vmcb->save.br_from, vmcb->save.br_to, \ 297092098120SSean Christopherson vmcb->save.last_excp_from, vmcb->save.last_excp_to) 297192098120SSean Christopherson 2972537d39dfSMaxim Levitsky #define DO_BRANCH(branch_name) \ 2973537d39dfSMaxim Levitsky asm volatile ( \ 2974537d39dfSMaxim Levitsky # branch_name "_from:" \ 2975537d39dfSMaxim Levitsky "jmp " # branch_name "_to\n" \ 2976537d39dfSMaxim Levitsky "nop\n" \ 2977537d39dfSMaxim Levitsky "nop\n" \ 2978537d39dfSMaxim Levitsky # branch_name "_to:" \ 2979537d39dfSMaxim Levitsky "nop\n" \ 2980537d39dfSMaxim Levitsky ) 2981537d39dfSMaxim Levitsky 2982537d39dfSMaxim Levitsky 2983537d39dfSMaxim Levitsky extern u64 guest_branch0_from, guest_branch0_to; 2984537d39dfSMaxim Levitsky extern u64 guest_branch2_from, guest_branch2_to; 2985537d39dfSMaxim Levitsky 2986537d39dfSMaxim Levitsky extern u64 host_branch0_from, host_branch0_to; 2987537d39dfSMaxim Levitsky extern u64 host_branch2_from, host_branch2_to; 2988537d39dfSMaxim Levitsky extern u64 host_branch3_from, host_branch3_to; 2989537d39dfSMaxim Levitsky extern u64 host_branch4_from, host_branch4_to; 2990537d39dfSMaxim Levitsky 2991537d39dfSMaxim Levitsky u64 dbgctl; 2992537d39dfSMaxim Levitsky 2993537d39dfSMaxim Levitsky static void svm_lbrv_test_guest1(void) 2994537d39dfSMaxim Levitsky { 2995537d39dfSMaxim Levitsky /* 2996537d39dfSMaxim Levitsky * This guest expects the LBR to be already enabled when it starts, 2997537d39dfSMaxim Levitsky * it does a branch, and then disables the LBR and then checks. 2998537d39dfSMaxim Levitsky */ 2999537d39dfSMaxim Levitsky 3000537d39dfSMaxim Levitsky DO_BRANCH(guest_branch0); 3001537d39dfSMaxim Levitsky 3002537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3003537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3004537d39dfSMaxim Levitsky 3005537d39dfSMaxim Levitsky if (dbgctl != DEBUGCTLMSR_LBR) 3006537d39dfSMaxim Levitsky asm volatile("ud2\n"); 3007537d39dfSMaxim Levitsky if (rdmsr(MSR_IA32_DEBUGCTLMSR) != 0) 3008537d39dfSMaxim Levitsky asm volatile("ud2\n"); 3009537d39dfSMaxim Levitsky 3010ddb85855SSean Christopherson GUEST_CHECK_LBR(&guest_branch0_from, &guest_branch0_to); 3011537d39dfSMaxim Levitsky asm volatile ("vmmcall\n"); 3012537d39dfSMaxim Levitsky } 3013537d39dfSMaxim Levitsky 3014537d39dfSMaxim Levitsky static void svm_lbrv_test_guest2(void) 3015537d39dfSMaxim Levitsky { 3016537d39dfSMaxim Levitsky /* 3017537d39dfSMaxim Levitsky * This guest expects the LBR to be disabled when it starts, 3018537d39dfSMaxim Levitsky * enables it, does a branch, disables it and then checks. 3019537d39dfSMaxim Levitsky */ 3020537d39dfSMaxim Levitsky 3021537d39dfSMaxim Levitsky DO_BRANCH(guest_branch1); 3022537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3023537d39dfSMaxim Levitsky 3024537d39dfSMaxim Levitsky if (dbgctl != 0) 3025537d39dfSMaxim Levitsky asm volatile("ud2\n"); 3026537d39dfSMaxim Levitsky 3027ddb85855SSean Christopherson GUEST_CHECK_LBR(&host_branch2_from, &host_branch2_to); 3028537d39dfSMaxim Levitsky 3029537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3030537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3031537d39dfSMaxim Levitsky DO_BRANCH(guest_branch2); 3032537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3033537d39dfSMaxim Levitsky 3034537d39dfSMaxim Levitsky if (dbgctl != DEBUGCTLMSR_LBR) 3035537d39dfSMaxim Levitsky asm volatile("ud2\n"); 3036ddb85855SSean Christopherson GUEST_CHECK_LBR(&guest_branch2_from, &guest_branch2_to); 3037537d39dfSMaxim Levitsky 3038537d39dfSMaxim Levitsky asm volatile ("vmmcall\n"); 3039537d39dfSMaxim Levitsky } 3040537d39dfSMaxim Levitsky 3041537d39dfSMaxim Levitsky static void svm_lbrv_test0(void) 3042537d39dfSMaxim Levitsky { 3043537d39dfSMaxim Levitsky report(true, "Basic LBR test"); 3044537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3045537d39dfSMaxim Levitsky DO_BRANCH(host_branch0); 3046537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3047537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3048537d39dfSMaxim Levitsky 3049554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR); 3050537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3051554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, 0); 3052537d39dfSMaxim Levitsky 3053ddb85855SSean Christopherson HOST_CHECK_LBR(&host_branch0_from, &host_branch0_to); 3054537d39dfSMaxim Levitsky } 3055537d39dfSMaxim Levitsky 3056537d39dfSMaxim Levitsky static void svm_lbrv_test1(void) 3057537d39dfSMaxim Levitsky { 3058537d39dfSMaxim Levitsky report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(1)"); 3059537d39dfSMaxim Levitsky 30605200c1f1SSean Christopherson svm_setup_vmrun((u64)svm_lbrv_test_guest1); 3061537d39dfSMaxim Levitsky vmcb->control.virt_ext = 0; 3062537d39dfSMaxim Levitsky 3063537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3064537d39dfSMaxim Levitsky DO_BRANCH(host_branch1); 3065537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 3066537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3067537d39dfSMaxim Levitsky 3068537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 306992098120SSean Christopherson REPORT_GUEST_LBR_ERROR(vmcb); 3070537d39dfSMaxim Levitsky return; 3071537d39dfSMaxim Levitsky } 3072537d39dfSMaxim Levitsky 3073554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, 0); 3074ddb85855SSean Christopherson HOST_CHECK_LBR(&guest_branch0_from, &guest_branch0_to); 3075537d39dfSMaxim Levitsky } 3076537d39dfSMaxim Levitsky 3077537d39dfSMaxim Levitsky static void svm_lbrv_test2(void) 3078537d39dfSMaxim Levitsky { 3079537d39dfSMaxim Levitsky report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(2)"); 3080537d39dfSMaxim Levitsky 30815200c1f1SSean Christopherson svm_setup_vmrun((u64)svm_lbrv_test_guest2); 3082537d39dfSMaxim Levitsky vmcb->control.virt_ext = 0; 3083537d39dfSMaxim Levitsky 3084537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3085537d39dfSMaxim Levitsky DO_BRANCH(host_branch2); 3086537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3087537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 3088537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3089537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3090537d39dfSMaxim Levitsky 3091537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 309292098120SSean Christopherson REPORT_GUEST_LBR_ERROR(vmcb); 3093537d39dfSMaxim Levitsky return; 3094537d39dfSMaxim Levitsky } 3095537d39dfSMaxim Levitsky 3096554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, 0); 3097ddb85855SSean Christopherson HOST_CHECK_LBR(&guest_branch2_from, &guest_branch2_to); 3098537d39dfSMaxim Levitsky } 3099537d39dfSMaxim Levitsky 3100537d39dfSMaxim Levitsky static void svm_lbrv_nested_test1(void) 3101537d39dfSMaxim Levitsky { 3102537d39dfSMaxim Levitsky if (!lbrv_supported()) { 3103537d39dfSMaxim Levitsky report_skip("LBRV not supported in the guest"); 3104537d39dfSMaxim Levitsky return; 3105537d39dfSMaxim Levitsky } 3106537d39dfSMaxim Levitsky 3107537d39dfSMaxim Levitsky report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (1)"); 31085200c1f1SSean Christopherson svm_setup_vmrun((u64)svm_lbrv_test_guest1); 3109537d39dfSMaxim Levitsky vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK; 3110537d39dfSMaxim Levitsky vmcb->save.dbgctl = DEBUGCTLMSR_LBR; 3111537d39dfSMaxim Levitsky 3112537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3113537d39dfSMaxim Levitsky DO_BRANCH(host_branch3); 3114537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 3115537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3116537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3117537d39dfSMaxim Levitsky 3118537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 311992098120SSean Christopherson REPORT_GUEST_LBR_ERROR(vmcb); 3120537d39dfSMaxim Levitsky return; 3121537d39dfSMaxim Levitsky } 3122537d39dfSMaxim Levitsky 3123537d39dfSMaxim Levitsky if (vmcb->save.dbgctl != 0) { 3124537d39dfSMaxim Levitsky report(false, "unexpected virtual guest MSR_IA32_DEBUGCTLMSR value 0x%lx", vmcb->save.dbgctl); 3125537d39dfSMaxim Levitsky return; 3126537d39dfSMaxim Levitsky } 3127537d39dfSMaxim Levitsky 3128554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR); 3129ddb85855SSean Christopherson HOST_CHECK_LBR(&host_branch3_from, &host_branch3_to); 3130537d39dfSMaxim Levitsky } 31313f27d772SManali Shukla 3132537d39dfSMaxim Levitsky static void svm_lbrv_nested_test2(void) 3133537d39dfSMaxim Levitsky { 3134537d39dfSMaxim Levitsky if (!lbrv_supported()) { 3135537d39dfSMaxim Levitsky report_skip("LBRV not supported in the guest"); 3136537d39dfSMaxim Levitsky return; 3137537d39dfSMaxim Levitsky } 3138537d39dfSMaxim Levitsky 3139537d39dfSMaxim Levitsky report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (2)"); 31405200c1f1SSean Christopherson svm_setup_vmrun((u64)svm_lbrv_test_guest2); 3141537d39dfSMaxim Levitsky vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK; 3142537d39dfSMaxim Levitsky 3143537d39dfSMaxim Levitsky vmcb->save.dbgctl = 0; 3144537d39dfSMaxim Levitsky vmcb->save.br_from = (u64)&host_branch2_from; 3145537d39dfSMaxim Levitsky vmcb->save.br_to = (u64)&host_branch2_to; 3146537d39dfSMaxim Levitsky 3147537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3148537d39dfSMaxim Levitsky DO_BRANCH(host_branch4); 3149537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 3150537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3151537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3152537d39dfSMaxim Levitsky 3153537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 315492098120SSean Christopherson REPORT_GUEST_LBR_ERROR(vmcb); 3155537d39dfSMaxim Levitsky return; 3156537d39dfSMaxim Levitsky } 3157537d39dfSMaxim Levitsky 3158554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR); 3159ddb85855SSean Christopherson HOST_CHECK_LBR(&host_branch4_from, &host_branch4_to); 3160537d39dfSMaxim Levitsky } 3161537d39dfSMaxim Levitsky 3162c45bccfcSMaxim Levitsky 3163c45bccfcSMaxim Levitsky // test that a nested guest which does enable INTR interception 3164c45bccfcSMaxim Levitsky // but doesn't enable virtual interrupt masking works 3165c45bccfcSMaxim Levitsky 3166c45bccfcSMaxim Levitsky static volatile int dummy_isr_recevied; 3167c45bccfcSMaxim Levitsky static void dummy_isr(isr_regs_t *regs) 3168c45bccfcSMaxim Levitsky { 3169c45bccfcSMaxim Levitsky dummy_isr_recevied++; 3170c45bccfcSMaxim Levitsky eoi(); 3171c45bccfcSMaxim Levitsky } 3172c45bccfcSMaxim Levitsky 3173c45bccfcSMaxim Levitsky 3174c45bccfcSMaxim Levitsky static volatile int nmi_recevied; 3175c45bccfcSMaxim Levitsky static void dummy_nmi_handler(struct ex_regs *regs) 3176c45bccfcSMaxim Levitsky { 3177c45bccfcSMaxim Levitsky nmi_recevied++; 3178c45bccfcSMaxim Levitsky } 3179c45bccfcSMaxim Levitsky 3180c45bccfcSMaxim Levitsky 3181c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_run_guest(volatile int *counter, int expected_vmexit) 3182c45bccfcSMaxim Levitsky { 3183c45bccfcSMaxim Levitsky if (counter) 3184c45bccfcSMaxim Levitsky *counter = 0; 3185c45bccfcSMaxim Levitsky 3186c45bccfcSMaxim Levitsky sti(); // host IF value should not matter 318727eeac46SSean Christopherson clgi(); // vmrun will set back GIF to 1 3188c45bccfcSMaxim Levitsky 3189c45bccfcSMaxim Levitsky svm_vmrun(); 3190c45bccfcSMaxim Levitsky 3191c45bccfcSMaxim Levitsky if (counter) 3192c45bccfcSMaxim Levitsky report(!*counter, "No interrupt expected"); 3193c45bccfcSMaxim Levitsky 3194c45bccfcSMaxim Levitsky stgi(); 3195c45bccfcSMaxim Levitsky 3196c45bccfcSMaxim Levitsky if (counter) 3197c45bccfcSMaxim Levitsky report(*counter == 1, "Interrupt is expected"); 3198c45bccfcSMaxim Levitsky 319927eeac46SSean Christopherson report(vmcb->control.exit_code == expected_vmexit, 320027eeac46SSean Christopherson "Wanted VM-Exit reason 0x%x, got 0x%x", 320127eeac46SSean Christopherson expected_vmexit, vmcb->control.exit_code); 3202c45bccfcSMaxim Levitsky report(vmcb->save.rflags & X86_EFLAGS_IF, "Guest should have EFLAGS.IF set now"); 3203c45bccfcSMaxim Levitsky cli(); 3204c45bccfcSMaxim Levitsky } 3205c45bccfcSMaxim Levitsky 3206c45bccfcSMaxim Levitsky 3207d0458710SMaxim Levitsky // subtest: test that enabling EFLAGS.IF is enough to trigger an interrupt 3208c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_if_guest(struct svm_test *test) 3209c45bccfcSMaxim Levitsky { 3210c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3211c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3212e4007e62SMaxim Levitsky sti_nop(); 3213c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3214c45bccfcSMaxim Levitsky } 3215c45bccfcSMaxim Levitsky 3216c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_if(void) 3217c45bccfcSMaxim Levitsky { 3218c45bccfcSMaxim Levitsky // make a physical interrupt to be pending 3219c45bccfcSMaxim Levitsky handle_irq(0x55, dummy_isr); 3220c45bccfcSMaxim Levitsky 3221c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3222c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3223c45bccfcSMaxim Levitsky vmcb->save.rflags &= ~X86_EFLAGS_IF; 3224c45bccfcSMaxim Levitsky 3225c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_if_guest); 32262602a896SMaxim Levitsky cli(); 3227c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3228c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3229c45bccfcSMaxim Levitsky } 3230c45bccfcSMaxim Levitsky 3231c45bccfcSMaxim Levitsky 3232c45bccfcSMaxim Levitsky // subtest: test that a clever guest can trigger an interrupt by setting GIF 3233c45bccfcSMaxim Levitsky // if GIF is not intercepted 3234c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif_guest(struct svm_test *test) 3235c45bccfcSMaxim Levitsky { 3236c45bccfcSMaxim Levitsky 3237c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3238c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3239c45bccfcSMaxim Levitsky 3240c45bccfcSMaxim Levitsky // clear GIF and enable IF 3241c45bccfcSMaxim Levitsky // that should still not cause VM exit 3242c45bccfcSMaxim Levitsky clgi(); 3243e4007e62SMaxim Levitsky sti_nop(); 3244c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3245c45bccfcSMaxim Levitsky 3246c45bccfcSMaxim Levitsky stgi(); 3247c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3248c45bccfcSMaxim Levitsky } 3249c45bccfcSMaxim Levitsky 3250c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif(void) 3251c45bccfcSMaxim Levitsky { 3252c45bccfcSMaxim Levitsky handle_irq(0x55, dummy_isr); 3253c45bccfcSMaxim Levitsky 3254c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3255c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3256c45bccfcSMaxim Levitsky vmcb->save.rflags &= ~X86_EFLAGS_IF; 3257c45bccfcSMaxim Levitsky 3258c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_gif_guest); 32592602a896SMaxim Levitsky cli(); 3260c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3261c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3262c45bccfcSMaxim Levitsky } 3263c45bccfcSMaxim Levitsky 3264c45bccfcSMaxim Levitsky // subtest: test that a clever guest can trigger an interrupt by setting GIF 3265c45bccfcSMaxim Levitsky // if GIF is not intercepted and interrupt comes after guest 3266c45bccfcSMaxim Levitsky // started running 3267c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif_guest2(struct svm_test *test) 3268c45bccfcSMaxim Levitsky { 3269c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3270c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3271c45bccfcSMaxim Levitsky 3272c45bccfcSMaxim Levitsky clgi(); 3273c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3274c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3275c45bccfcSMaxim Levitsky 3276c45bccfcSMaxim Levitsky stgi(); 3277c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3278c45bccfcSMaxim Levitsky } 3279c45bccfcSMaxim Levitsky 3280c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif2(void) 3281c45bccfcSMaxim Levitsky { 3282c45bccfcSMaxim Levitsky handle_irq(0x55, dummy_isr); 3283c45bccfcSMaxim Levitsky 3284c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3285c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3286c45bccfcSMaxim Levitsky vmcb->save.rflags |= X86_EFLAGS_IF; 3287c45bccfcSMaxim Levitsky 3288c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_gif_guest2); 3289c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3290c45bccfcSMaxim Levitsky } 3291c45bccfcSMaxim Levitsky 3292c45bccfcSMaxim Levitsky 3293c45bccfcSMaxim Levitsky // subtest: test that pending NMI will be handled when guest enables GIF 3294c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_nmi_guest(struct svm_test *test) 3295c45bccfcSMaxim Levitsky { 3296c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3297c45bccfcSMaxim Levitsky report(!nmi_recevied, "No NMI expected"); 3298c45bccfcSMaxim Levitsky cli(); // should have no effect 3299c45bccfcSMaxim Levitsky 3300c45bccfcSMaxim Levitsky clgi(); 3301c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI, 0); 3302e4007e62SMaxim Levitsky sti_nop(); // should have no effect 3303c45bccfcSMaxim Levitsky report(!nmi_recevied, "No NMI expected"); 3304c45bccfcSMaxim Levitsky 3305c45bccfcSMaxim Levitsky stgi(); 3306c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3307c45bccfcSMaxim Levitsky } 3308c45bccfcSMaxim Levitsky 3309c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_nmi(void) 3310c45bccfcSMaxim Levitsky { 3311c45bccfcSMaxim Levitsky handle_exception(2, dummy_nmi_handler); 3312c45bccfcSMaxim Levitsky 3313c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_NMI); 3314c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3315c45bccfcSMaxim Levitsky vmcb->save.rflags |= X86_EFLAGS_IF; 3316c45bccfcSMaxim Levitsky 3317c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_nmi_guest); 3318c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&nmi_recevied, SVM_EXIT_NMI); 3319c45bccfcSMaxim Levitsky } 3320c45bccfcSMaxim Levitsky 3321c45bccfcSMaxim Levitsky // test that pending SMI will be handled when guest enables GIF 3322c45bccfcSMaxim Levitsky // TODO: can't really count #SMIs so just test that guest doesn't hang 3323c45bccfcSMaxim Levitsky // and VMexits on SMI 3324c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_smi_guest(struct svm_test *test) 3325c45bccfcSMaxim Levitsky { 3326c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3327c45bccfcSMaxim Levitsky 3328c45bccfcSMaxim Levitsky clgi(); 3329c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_SMI, 0); 3330e4007e62SMaxim Levitsky sti_nop(); // should have no effect 3331c45bccfcSMaxim Levitsky stgi(); 3332c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3333c45bccfcSMaxim Levitsky } 3334c45bccfcSMaxim Levitsky 3335c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_smi(void) 3336c45bccfcSMaxim Levitsky { 3337c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_SMI); 3338c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3339c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_smi_guest); 3340c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(NULL, SVM_EXIT_SMI); 3341c45bccfcSMaxim Levitsky } 3342c45bccfcSMaxim Levitsky 33438177dc62SManali Shukla static void svm_l2_ac_test(void) 33448177dc62SManali Shukla { 33458177dc62SManali Shukla bool hit_ac = false; 33468177dc62SManali Shukla 33478177dc62SManali Shukla write_cr0(read_cr0() | X86_CR0_AM); 33488177dc62SManali Shukla write_rflags(read_rflags() | X86_EFLAGS_AC); 33498177dc62SManali Shukla 33508177dc62SManali Shukla run_in_user(generate_usermode_ac, AC_VECTOR, 0, 0, 0, 0, &hit_ac); 33518177dc62SManali Shukla report(hit_ac, "Usermode #AC handled in L2"); 33528177dc62SManali Shukla vmmcall(); 33538177dc62SManali Shukla } 33548177dc62SManali Shukla 33558177dc62SManali Shukla struct svm_exception_test { 33568177dc62SManali Shukla u8 vector; 33578177dc62SManali Shukla void (*guest_code)(void); 33588177dc62SManali Shukla }; 33598177dc62SManali Shukla 33608177dc62SManali Shukla struct svm_exception_test svm_exception_tests[] = { 33618177dc62SManali Shukla { GP_VECTOR, generate_non_canonical_gp }, 33628177dc62SManali Shukla { UD_VECTOR, generate_ud }, 33638177dc62SManali Shukla { DE_VECTOR, generate_de }, 33648177dc62SManali Shukla { DB_VECTOR, generate_single_step_db }, 336544550f53SManali Shukla { BP_VECTOR, generate_bp }, 33668177dc62SManali Shukla { AC_VECTOR, svm_l2_ac_test }, 33670851b7f7SManali Shukla { OF_VECTOR, generate_of }, 3368694e59baSManali Shukla { NM_VECTOR, generate_cr0_ts_nm }, 3369694e59baSManali Shukla { NM_VECTOR, generate_cr0_em_nm }, 33708177dc62SManali Shukla }; 33718177dc62SManali Shukla 33728177dc62SManali Shukla static u8 svm_exception_test_vector; 33738177dc62SManali Shukla 33748177dc62SManali Shukla static void svm_exception_handler(struct ex_regs *regs) 33758177dc62SManali Shukla { 33768177dc62SManali Shukla report(regs->vector == svm_exception_test_vector, 33778177dc62SManali Shukla "Handling %s in L2's exception handler", 33788177dc62SManali Shukla exception_mnemonic(svm_exception_test_vector)); 33798177dc62SManali Shukla vmmcall(); 33808177dc62SManali Shukla } 33818177dc62SManali Shukla 33828177dc62SManali Shukla static void handle_exception_in_l2(u8 vector) 33838177dc62SManali Shukla { 33848177dc62SManali Shukla handler old_handler = handle_exception(vector, svm_exception_handler); 33858177dc62SManali Shukla svm_exception_test_vector = vector; 33868177dc62SManali Shukla 33878177dc62SManali Shukla report(svm_vmrun() == SVM_EXIT_VMMCALL, 33888177dc62SManali Shukla "%s handled by L2", exception_mnemonic(vector)); 33898177dc62SManali Shukla 33908177dc62SManali Shukla handle_exception(vector, old_handler); 33918177dc62SManali Shukla } 33928177dc62SManali Shukla 33938177dc62SManali Shukla static void handle_exception_in_l1(u32 vector) 33948177dc62SManali Shukla { 33958177dc62SManali Shukla u32 old_ie = vmcb->control.intercept_exceptions; 33968177dc62SManali Shukla 33978177dc62SManali Shukla vmcb->control.intercept_exceptions |= (1ULL << vector); 33988177dc62SManali Shukla 33998177dc62SManali Shukla report(svm_vmrun() == (SVM_EXIT_EXCP_BASE + vector), 34008177dc62SManali Shukla "%s handled by L1", exception_mnemonic(vector)); 34018177dc62SManali Shukla 34028177dc62SManali Shukla vmcb->control.intercept_exceptions = old_ie; 34038177dc62SManali Shukla } 34048177dc62SManali Shukla 34058177dc62SManali Shukla static void svm_exception_test(void) 34068177dc62SManali Shukla { 34078177dc62SManali Shukla struct svm_exception_test *t; 34088177dc62SManali Shukla int i; 34098177dc62SManali Shukla 34108177dc62SManali Shukla for (i = 0; i < ARRAY_SIZE(svm_exception_tests); i++) { 34118177dc62SManali Shukla t = &svm_exception_tests[i]; 34128177dc62SManali Shukla test_set_guest((test_guest_func)t->guest_code); 34138177dc62SManali Shukla 34148177dc62SManali Shukla handle_exception_in_l2(t->vector); 34158177dc62SManali Shukla vmcb_ident(vmcb); 34168177dc62SManali Shukla 34178177dc62SManali Shukla handle_exception_in_l1(t->vector); 34188177dc62SManali Shukla vmcb_ident(vmcb); 34198177dc62SManali Shukla } 34208177dc62SManali Shukla } 34218177dc62SManali Shukla 3422c64f24fdSMaxim Levitsky static void shutdown_intercept_test_guest(struct svm_test *test) 3423c64f24fdSMaxim Levitsky { 3424c64f24fdSMaxim Levitsky asm volatile ("ud2"); 3425c64f24fdSMaxim Levitsky report_fail("should not reach here\n"); 3426c64f24fdSMaxim Levitsky 3427c64f24fdSMaxim Levitsky } 3428c64f24fdSMaxim Levitsky 3429c64f24fdSMaxim Levitsky static void svm_shutdown_intercept_test(void) 3430c64f24fdSMaxim Levitsky { 3431c64f24fdSMaxim Levitsky test_set_guest(shutdown_intercept_test_guest); 3432c64f24fdSMaxim Levitsky vmcb->save.idtr.base = (u64)alloc_vpage(); 3433c64f24fdSMaxim Levitsky vmcb->control.intercept |= (1ULL << INTERCEPT_SHUTDOWN); 3434c64f24fdSMaxim Levitsky svm_vmrun(); 3435c64f24fdSMaxim Levitsky report(vmcb->control.exit_code == SVM_EXIT_SHUTDOWN, "shutdown test passed"); 3436c64f24fdSMaxim Levitsky } 3437c64f24fdSMaxim Levitsky 34383f27d772SManali Shukla struct svm_test svm_tests[] = { 3439ad879127SKrish Sadhukhan { "null", default_supported, default_prepare, 3440ad879127SKrish Sadhukhan default_prepare_gif_clear, null_test, 3441ad879127SKrish Sadhukhan default_finished, null_check }, 3442ad879127SKrish Sadhukhan { "vmrun", default_supported, default_prepare, 3443ad879127SKrish Sadhukhan default_prepare_gif_clear, test_vmrun, 3444ad879127SKrish Sadhukhan default_finished, check_vmrun }, 3445ad879127SKrish Sadhukhan { "ioio", default_supported, prepare_ioio, 3446ad879127SKrish Sadhukhan default_prepare_gif_clear, test_ioio, 3447ad879127SKrish Sadhukhan ioio_finished, check_ioio }, 3448ad879127SKrish Sadhukhan { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 3449ad879127SKrish Sadhukhan default_prepare_gif_clear, null_test, default_finished, 3450ad879127SKrish Sadhukhan check_no_vmrun_int }, 3451401299a5SPaolo Bonzini { "rsm", default_supported, 3452401299a5SPaolo Bonzini prepare_rsm_intercept, default_prepare_gif_clear, 3453401299a5SPaolo Bonzini test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept }, 3454ad879127SKrish Sadhukhan { "cr3 read intercept", default_supported, 3455ad879127SKrish Sadhukhan prepare_cr3_intercept, default_prepare_gif_clear, 3456ad879127SKrish Sadhukhan test_cr3_intercept, default_finished, check_cr3_intercept }, 3457ad879127SKrish Sadhukhan { "cr3 read nointercept", default_supported, default_prepare, 3458ad879127SKrish Sadhukhan default_prepare_gif_clear, test_cr3_intercept, default_finished, 3459ad879127SKrish Sadhukhan check_cr3_nointercept }, 3460ad879127SKrish Sadhukhan { "cr3 read intercept emulate", smp_supported, 3461ad879127SKrish Sadhukhan prepare_cr3_intercept_bypass, default_prepare_gif_clear, 3462ad879127SKrish Sadhukhan test_cr3_intercept_bypass, default_finished, check_cr3_intercept }, 3463ad879127SKrish Sadhukhan { "dr intercept check", default_supported, prepare_dr_intercept, 3464ad879127SKrish Sadhukhan default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished, 3465ad879127SKrish Sadhukhan check_dr_intercept }, 3466ad879127SKrish Sadhukhan { "next_rip", next_rip_supported, prepare_next_rip, 3467ad879127SKrish Sadhukhan default_prepare_gif_clear, test_next_rip, 3468ad879127SKrish Sadhukhan default_finished, check_next_rip }, 3469ad879127SKrish Sadhukhan { "msr intercept check", default_supported, prepare_msr_intercept, 3470ad879127SKrish Sadhukhan default_prepare_gif_clear, test_msr_intercept, 3471ad879127SKrish Sadhukhan msr_intercept_finished, check_msr_intercept }, 3472ad879127SKrish Sadhukhan { "mode_switch", default_supported, prepare_mode_switch, 3473ad879127SKrish Sadhukhan default_prepare_gif_clear, test_mode_switch, 3474ad879127SKrish Sadhukhan mode_switch_finished, check_mode_switch }, 3475ad879127SKrish Sadhukhan { "asid_zero", default_supported, prepare_asid_zero, 3476ad879127SKrish Sadhukhan default_prepare_gif_clear, test_asid_zero, 3477ad879127SKrish Sadhukhan default_finished, check_asid_zero }, 3478ad879127SKrish Sadhukhan { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, 3479ad879127SKrish Sadhukhan default_prepare_gif_clear, sel_cr0_bug_test, 3480ad879127SKrish Sadhukhan sel_cr0_bug_finished, sel_cr0_bug_check }, 348110a65fc4SNadav Amit { "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare, 3482ad879127SKrish Sadhukhan default_prepare_gif_clear, tsc_adjust_test, 3483ad879127SKrish Sadhukhan default_finished, tsc_adjust_check }, 3484ad879127SKrish Sadhukhan { "latency_run_exit", default_supported, latency_prepare, 3485ad879127SKrish Sadhukhan default_prepare_gif_clear, latency_test, 3486ad879127SKrish Sadhukhan latency_finished, latency_check }, 3487f7fa53dcSPaolo Bonzini { "latency_run_exit_clean", default_supported, latency_prepare, 3488f7fa53dcSPaolo Bonzini default_prepare_gif_clear, latency_test, 3489f7fa53dcSPaolo Bonzini latency_finished_clean, latency_check }, 3490ad879127SKrish Sadhukhan { "latency_svm_insn", default_supported, lat_svm_insn_prepare, 3491ad879127SKrish Sadhukhan default_prepare_gif_clear, null_test, 3492ad879127SKrish Sadhukhan lat_svm_insn_finished, lat_svm_insn_check }, 34934b4fb247SPaolo Bonzini { "exc_inject", default_supported, exc_inject_prepare, 34944b4fb247SPaolo Bonzini default_prepare_gif_clear, exc_inject_test, 34954b4fb247SPaolo Bonzini exc_inject_finished, exc_inject_check }, 3496ad879127SKrish Sadhukhan { "pending_event", default_supported, pending_event_prepare, 3497ad879127SKrish Sadhukhan default_prepare_gif_clear, 3498ad879127SKrish Sadhukhan pending_event_test, pending_event_finished, pending_event_check }, 349985dc2aceSPaolo Bonzini { "pending_event_cli", default_supported, pending_event_cli_prepare, 350085dc2aceSPaolo Bonzini pending_event_cli_prepare_gif_clear, 350185dc2aceSPaolo Bonzini pending_event_cli_test, pending_event_cli_finished, 350285dc2aceSPaolo Bonzini pending_event_cli_check }, 350385dc2aceSPaolo Bonzini { "interrupt", default_supported, interrupt_prepare, 350485dc2aceSPaolo Bonzini default_prepare_gif_clear, interrupt_test, 350585dc2aceSPaolo Bonzini interrupt_finished, interrupt_check }, 3506d4db486bSCathy Avery { "nmi", default_supported, nmi_prepare, 3507d4db486bSCathy Avery default_prepare_gif_clear, nmi_test, 3508d4db486bSCathy Avery nmi_finished, nmi_check }, 35099da1f4d8SCathy Avery { "nmi_hlt", smp_supported, nmi_prepare, 35109da1f4d8SCathy Avery default_prepare_gif_clear, nmi_hlt_test, 35119da1f4d8SCathy Avery nmi_hlt_finished, nmi_hlt_check }, 351208200397SSantosh Shukla { "vnmi", vnmi_supported, vnmi_prepare, 351308200397SSantosh Shukla default_prepare_gif_clear, vnmi_test, 351408200397SSantosh Shukla vnmi_finished, vnmi_check }, 35159c838954SCathy Avery { "virq_inject", default_supported, virq_inject_prepare, 35169c838954SCathy Avery default_prepare_gif_clear, virq_inject_test, 35179c838954SCathy Avery virq_inject_finished, virq_inject_check }, 35184b3c6114SPaolo Bonzini { "virq_inject_within_shadow", default_supported, virq_inject_within_shadow_prepare, 35194b3c6114SPaolo Bonzini virq_inject_within_shadow_prepare_gif_clear, virq_inject_within_shadow_test, 35204b3c6114SPaolo Bonzini virq_inject_within_shadow_finished, virq_inject_within_shadow_check }, 3521da338a31SMaxim Levitsky { "reg_corruption", default_supported, reg_corruption_prepare, 3522da338a31SMaxim Levitsky default_prepare_gif_clear, reg_corruption_test, 3523da338a31SMaxim Levitsky reg_corruption_finished, reg_corruption_check }, 35244770e9c8SCathy Avery { "svm_init_startup_test", smp_supported, init_startup_prepare, 35254770e9c8SCathy Avery default_prepare_gif_clear, null_test, 35264770e9c8SCathy Avery init_startup_finished, init_startup_check }, 3527d5da6dfeSCathy Avery { "svm_init_intercept_test", smp_supported, init_intercept_prepare, 3528d5da6dfeSCathy Avery default_prepare_gif_clear, init_intercept_test, 3529d5da6dfeSCathy Avery init_intercept_finished, init_intercept_check, .on_vcpu = 2 }, 35307839b0ecSKrish Sadhukhan { "host_rflags", default_supported, host_rflags_prepare, 35317839b0ecSKrish Sadhukhan host_rflags_prepare_gif_clear, host_rflags_test, 35327839b0ecSKrish Sadhukhan host_rflags_finished, host_rflags_check }, 3533f6972bd6SLara Lazier { "vgif", vgif_supported, prepare_vgif_enabled, 3534f6972bd6SLara Lazier default_prepare_gif_clear, test_vgif, vgif_finished, 3535f6972bd6SLara Lazier vgif_check }, 3536f32183f5SJim Mattson TEST(svm_cr4_osxsave_test), 3537ba29942cSKrish Sadhukhan TEST(svm_guest_state_test), 35387a57ef5dSMaxim Levitsky TEST(svm_vmrun_errata_test), 35390b6f6cedSKrish Sadhukhan TEST(svm_vmload_vmsave), 3540665f5677SKrish Sadhukhan TEST(svm_test_singlestep), 3541694e59baSManali Shukla TEST(svm_no_nm_test), 35428177dc62SManali Shukla TEST(svm_exception_test), 3543537d39dfSMaxim Levitsky TEST(svm_lbrv_test0), 3544537d39dfSMaxim Levitsky TEST(svm_lbrv_test1), 3545537d39dfSMaxim Levitsky TEST(svm_lbrv_test2), 3546537d39dfSMaxim Levitsky TEST(svm_lbrv_nested_test1), 3547537d39dfSMaxim Levitsky TEST(svm_lbrv_nested_test2), 3548c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_if), 3549c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_gif), 3550c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_gif2), 3551c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_nmi), 3552c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_smi), 3553a8503d50SMaxim Levitsky TEST(svm_tsc_scale_test), 35548650dffeSMaxim Levitsky TEST(pause_filter_test), 3555c64f24fdSMaxim Levitsky TEST(svm_shutdown_intercept_test), 3556ad879127SKrish Sadhukhan { NULL, NULL, NULL, NULL, NULL, NULL, NULL } 3557ad879127SKrish Sadhukhan }; 3558712840d5SManali Shukla 3559712840d5SManali Shukla int main(int ac, char **av) 3560712840d5SManali Shukla { 3561ade7601dSSean Christopherson setup_vm(); 3562712840d5SManali Shukla return run_svm_tests(ac, av, svm_tests); 3563712840d5SManali Shukla } 3564