1ad879127SKrish Sadhukhan #include "svm.h" 2ad879127SKrish Sadhukhan #include "libcflat.h" 3ad879127SKrish Sadhukhan #include "processor.h" 4ad879127SKrish Sadhukhan #include "desc.h" 5ad879127SKrish Sadhukhan #include "msr.h" 6ad879127SKrish Sadhukhan #include "vm.h" 7ad879127SKrish Sadhukhan #include "smp.h" 8ad879127SKrish Sadhukhan #include "types.h" 9ad879127SKrish Sadhukhan #include "alloc_page.h" 10ad879127SKrish Sadhukhan #include "isr.h" 11ad879127SKrish Sadhukhan #include "apic.h" 129da1f4d8SCathy Avery #include "delay.h" 13ddb85855SSean Christopherson #include "util.h" 148177dc62SManali Shukla #include "x86/usermode.h" 15*c64f24fdSMaxim Levitsky #include "vmalloc.h" 16ad879127SKrish Sadhukhan 17ad879127SKrish Sadhukhan #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f 18ad879127SKrish Sadhukhan 19ad879127SKrish Sadhukhan #define LATENCY_RUNS 1000000 20ad879127SKrish Sadhukhan 21ad879127SKrish Sadhukhan u64 tsc_start; 22ad879127SKrish Sadhukhan u64 tsc_end; 23ad879127SKrish Sadhukhan 24ad879127SKrish Sadhukhan u64 vmrun_sum, vmexit_sum; 25ad879127SKrish Sadhukhan u64 vmsave_sum, vmload_sum; 26ad879127SKrish Sadhukhan u64 stgi_sum, clgi_sum; 27ad879127SKrish Sadhukhan u64 latvmrun_max; 28ad879127SKrish Sadhukhan u64 latvmrun_min; 29ad879127SKrish Sadhukhan u64 latvmexit_max; 30ad879127SKrish Sadhukhan u64 latvmexit_min; 31ad879127SKrish Sadhukhan u64 latvmload_max; 32ad879127SKrish Sadhukhan u64 latvmload_min; 33ad879127SKrish Sadhukhan u64 latvmsave_max; 34ad879127SKrish Sadhukhan u64 latvmsave_min; 35ad879127SKrish Sadhukhan u64 latstgi_max; 36ad879127SKrish Sadhukhan u64 latstgi_min; 37ad879127SKrish Sadhukhan u64 latclgi_max; 38ad879127SKrish Sadhukhan u64 latclgi_min; 39ad879127SKrish Sadhukhan u64 runs; 40ad879127SKrish Sadhukhan 41ad879127SKrish Sadhukhan static void null_test(struct svm_test *test) 42ad879127SKrish Sadhukhan { 43ad879127SKrish Sadhukhan } 44ad879127SKrish Sadhukhan 45ad879127SKrish Sadhukhan static bool null_check(struct svm_test *test) 46ad879127SKrish Sadhukhan { 47096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_VMMCALL; 48ad879127SKrish Sadhukhan } 49ad879127SKrish Sadhukhan 50ad879127SKrish Sadhukhan static void prepare_no_vmrun_int(struct svm_test *test) 51ad879127SKrish Sadhukhan { 52096cf7feSPaolo Bonzini vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 53ad879127SKrish Sadhukhan } 54ad879127SKrish Sadhukhan 55ad879127SKrish Sadhukhan static bool check_no_vmrun_int(struct svm_test *test) 56ad879127SKrish Sadhukhan { 57096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_ERR; 58ad879127SKrish Sadhukhan } 59ad879127SKrish Sadhukhan 60ad879127SKrish Sadhukhan static void test_vmrun(struct svm_test *test) 61ad879127SKrish Sadhukhan { 62096cf7feSPaolo Bonzini asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb))); 63ad879127SKrish Sadhukhan } 64ad879127SKrish Sadhukhan 65ad879127SKrish Sadhukhan static bool check_vmrun(struct svm_test *test) 66ad879127SKrish Sadhukhan { 67096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_VMRUN; 68ad879127SKrish Sadhukhan } 69ad879127SKrish Sadhukhan 70401299a5SPaolo Bonzini static void prepare_rsm_intercept(struct svm_test *test) 71401299a5SPaolo Bonzini { 72401299a5SPaolo Bonzini default_prepare(test); 73401299a5SPaolo Bonzini vmcb->control.intercept |= 1 << INTERCEPT_RSM; 74401299a5SPaolo Bonzini vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR); 75401299a5SPaolo Bonzini } 76401299a5SPaolo Bonzini 77401299a5SPaolo Bonzini static void test_rsm_intercept(struct svm_test *test) 78401299a5SPaolo Bonzini { 79401299a5SPaolo Bonzini asm volatile ("rsm" : : : "memory"); 80401299a5SPaolo Bonzini } 81401299a5SPaolo Bonzini 82401299a5SPaolo Bonzini static bool check_rsm_intercept(struct svm_test *test) 83401299a5SPaolo Bonzini { 84401299a5SPaolo Bonzini return get_test_stage(test) == 2; 85401299a5SPaolo Bonzini } 86401299a5SPaolo Bonzini 87401299a5SPaolo Bonzini static bool finished_rsm_intercept(struct svm_test *test) 88401299a5SPaolo Bonzini { 89401299a5SPaolo Bonzini switch (get_test_stage(test)) { 90401299a5SPaolo Bonzini case 0: 91401299a5SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_RSM) { 92198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to rsm. Exit reason 0x%x", 93401299a5SPaolo Bonzini vmcb->control.exit_code); 94401299a5SPaolo Bonzini return true; 95401299a5SPaolo Bonzini } 96401299a5SPaolo Bonzini vmcb->control.intercept &= ~(1 << INTERCEPT_RSM); 97401299a5SPaolo Bonzini inc_test_stage(test); 98401299a5SPaolo Bonzini break; 99401299a5SPaolo Bonzini 100401299a5SPaolo Bonzini case 1: 101401299a5SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) { 102198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to #UD. Exit reason 0x%x", 103401299a5SPaolo Bonzini vmcb->control.exit_code); 104401299a5SPaolo Bonzini return true; 105401299a5SPaolo Bonzini } 106401299a5SPaolo Bonzini vmcb->save.rip += 2; 107401299a5SPaolo Bonzini inc_test_stage(test); 108401299a5SPaolo Bonzini break; 109401299a5SPaolo Bonzini 110401299a5SPaolo Bonzini default: 111401299a5SPaolo Bonzini return true; 112401299a5SPaolo Bonzini } 113401299a5SPaolo Bonzini return get_test_stage(test) == 2; 114401299a5SPaolo Bonzini } 115401299a5SPaolo Bonzini 116ad879127SKrish Sadhukhan static void prepare_cr3_intercept(struct svm_test *test) 117ad879127SKrish Sadhukhan { 118ad879127SKrish Sadhukhan default_prepare(test); 119096cf7feSPaolo Bonzini vmcb->control.intercept_cr_read |= 1 << 3; 120ad879127SKrish Sadhukhan } 121ad879127SKrish Sadhukhan 122ad879127SKrish Sadhukhan static void test_cr3_intercept(struct svm_test *test) 123ad879127SKrish Sadhukhan { 124ad879127SKrish Sadhukhan asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 125ad879127SKrish Sadhukhan } 126ad879127SKrish Sadhukhan 127ad879127SKrish Sadhukhan static bool check_cr3_intercept(struct svm_test *test) 128ad879127SKrish Sadhukhan { 129096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_READ_CR3; 130ad879127SKrish Sadhukhan } 131ad879127SKrish Sadhukhan 132ad879127SKrish Sadhukhan static bool check_cr3_nointercept(struct svm_test *test) 133ad879127SKrish Sadhukhan { 134ad879127SKrish Sadhukhan return null_check(test) && test->scratch == read_cr3(); 135ad879127SKrish Sadhukhan } 136ad879127SKrish Sadhukhan 137ad879127SKrish Sadhukhan static void corrupt_cr3_intercept_bypass(void *_test) 138ad879127SKrish Sadhukhan { 139ad879127SKrish Sadhukhan struct svm_test *test = _test; 140ad879127SKrish Sadhukhan extern volatile u32 mmio_insn; 141ad879127SKrish Sadhukhan 142ad879127SKrish Sadhukhan while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 143ad879127SKrish Sadhukhan pause(); 144ad879127SKrish Sadhukhan pause(); 145ad879127SKrish Sadhukhan pause(); 146ad879127SKrish Sadhukhan pause(); 147ad879127SKrish Sadhukhan mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 148ad879127SKrish Sadhukhan } 149ad879127SKrish Sadhukhan 150ad879127SKrish Sadhukhan static void prepare_cr3_intercept_bypass(struct svm_test *test) 151ad879127SKrish Sadhukhan { 152ad879127SKrish Sadhukhan default_prepare(test); 153096cf7feSPaolo Bonzini vmcb->control.intercept_cr_read |= 1 << 3; 154ad879127SKrish Sadhukhan on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 155ad879127SKrish Sadhukhan } 156ad879127SKrish Sadhukhan 157ad879127SKrish Sadhukhan static void test_cr3_intercept_bypass(struct svm_test *test) 158ad879127SKrish Sadhukhan { 159ad879127SKrish Sadhukhan ulong a = 0xa0000; 160ad879127SKrish Sadhukhan 161ad879127SKrish Sadhukhan test->scratch = 1; 162ad879127SKrish Sadhukhan while (test->scratch != 2) 163ad879127SKrish Sadhukhan barrier(); 164ad879127SKrish Sadhukhan 165ad879127SKrish Sadhukhan asm volatile ("mmio_insn: mov %0, (%0); nop" 166ad879127SKrish Sadhukhan : "+a"(a) : : "memory"); 167ad879127SKrish Sadhukhan test->scratch = a; 168ad879127SKrish Sadhukhan } 169ad879127SKrish Sadhukhan 170ad879127SKrish Sadhukhan static void prepare_dr_intercept(struct svm_test *test) 171ad879127SKrish Sadhukhan { 172ad879127SKrish Sadhukhan default_prepare(test); 173096cf7feSPaolo Bonzini vmcb->control.intercept_dr_read = 0xff; 174096cf7feSPaolo Bonzini vmcb->control.intercept_dr_write = 0xff; 175ad879127SKrish Sadhukhan } 176ad879127SKrish Sadhukhan 177ad879127SKrish Sadhukhan static void test_dr_intercept(struct svm_test *test) 178ad879127SKrish Sadhukhan { 179ad879127SKrish Sadhukhan unsigned int i, failcnt = 0; 180ad879127SKrish Sadhukhan 181ad879127SKrish Sadhukhan /* Loop testing debug register reads */ 182ad879127SKrish Sadhukhan for (i = 0; i < 8; i++) { 183ad879127SKrish Sadhukhan 184ad879127SKrish Sadhukhan switch (i) { 185ad879127SKrish Sadhukhan case 0: 186ad879127SKrish Sadhukhan asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory"); 187ad879127SKrish Sadhukhan break; 188ad879127SKrish Sadhukhan case 1: 189ad879127SKrish Sadhukhan asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory"); 190ad879127SKrish Sadhukhan break; 191ad879127SKrish Sadhukhan case 2: 192ad879127SKrish Sadhukhan asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory"); 193ad879127SKrish Sadhukhan break; 194ad879127SKrish Sadhukhan case 3: 195ad879127SKrish Sadhukhan asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory"); 196ad879127SKrish Sadhukhan break; 197ad879127SKrish Sadhukhan case 4: 198ad879127SKrish Sadhukhan asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory"); 199ad879127SKrish Sadhukhan break; 200ad879127SKrish Sadhukhan case 5: 201ad879127SKrish Sadhukhan asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory"); 202ad879127SKrish Sadhukhan break; 203ad879127SKrish Sadhukhan case 6: 204ad879127SKrish Sadhukhan asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory"); 205ad879127SKrish Sadhukhan break; 206ad879127SKrish Sadhukhan case 7: 207ad879127SKrish Sadhukhan asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory"); 208ad879127SKrish Sadhukhan break; 209ad879127SKrish Sadhukhan } 210ad879127SKrish Sadhukhan 211ad879127SKrish Sadhukhan if (test->scratch != i) { 212198dfd0eSJanis Schoetterl-Glausch report_fail("dr%u read intercept", i); 213ad879127SKrish Sadhukhan failcnt++; 214ad879127SKrish Sadhukhan } 215ad879127SKrish Sadhukhan } 216ad879127SKrish Sadhukhan 217ad879127SKrish Sadhukhan /* Loop testing debug register writes */ 218ad879127SKrish Sadhukhan for (i = 0; i < 8; i++) { 219ad879127SKrish Sadhukhan 220ad879127SKrish Sadhukhan switch (i) { 221ad879127SKrish Sadhukhan case 0: 222ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory"); 223ad879127SKrish Sadhukhan break; 224ad879127SKrish Sadhukhan case 1: 225ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory"); 226ad879127SKrish Sadhukhan break; 227ad879127SKrish Sadhukhan case 2: 228ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory"); 229ad879127SKrish Sadhukhan break; 230ad879127SKrish Sadhukhan case 3: 231ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory"); 232ad879127SKrish Sadhukhan break; 233ad879127SKrish Sadhukhan case 4: 234ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory"); 235ad879127SKrish Sadhukhan break; 236ad879127SKrish Sadhukhan case 5: 237ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory"); 238ad879127SKrish Sadhukhan break; 239ad879127SKrish Sadhukhan case 6: 240ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory"); 241ad879127SKrish Sadhukhan break; 242ad879127SKrish Sadhukhan case 7: 243ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory"); 244ad879127SKrish Sadhukhan break; 245ad879127SKrish Sadhukhan } 246ad879127SKrish Sadhukhan 247ad879127SKrish Sadhukhan if (test->scratch != i) { 248198dfd0eSJanis Schoetterl-Glausch report_fail("dr%u write intercept", i); 249ad879127SKrish Sadhukhan failcnt++; 250ad879127SKrish Sadhukhan } 251ad879127SKrish Sadhukhan } 252ad879127SKrish Sadhukhan 253ad879127SKrish Sadhukhan test->scratch = failcnt; 254ad879127SKrish Sadhukhan } 255ad879127SKrish Sadhukhan 256ad879127SKrish Sadhukhan static bool dr_intercept_finished(struct svm_test *test) 257ad879127SKrish Sadhukhan { 258096cf7feSPaolo Bonzini ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0); 259ad879127SKrish Sadhukhan 260ad879127SKrish Sadhukhan /* Only expect DR intercepts */ 261ad879127SKrish Sadhukhan if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0)) 262ad879127SKrish Sadhukhan return true; 263ad879127SKrish Sadhukhan 264ad879127SKrish Sadhukhan /* 265ad879127SKrish Sadhukhan * Compute debug register number. 266ad879127SKrish Sadhukhan * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture 267ad879127SKrish Sadhukhan * Programmer's Manual Volume 2 - System Programming: 268ad879127SKrish Sadhukhan * http://support.amd.com/TechDocs/24593.pdf 269ad879127SKrish Sadhukhan * there are 16 VMEXIT codes each for DR read and write. 270ad879127SKrish Sadhukhan */ 271ad879127SKrish Sadhukhan test->scratch = (n % 16); 272ad879127SKrish Sadhukhan 273ad879127SKrish Sadhukhan /* Jump over MOV instruction */ 274096cf7feSPaolo Bonzini vmcb->save.rip += 3; 275ad879127SKrish Sadhukhan 276ad879127SKrish Sadhukhan return false; 277ad879127SKrish Sadhukhan } 278ad879127SKrish Sadhukhan 279ad879127SKrish Sadhukhan static bool check_dr_intercept(struct svm_test *test) 280ad879127SKrish Sadhukhan { 281ad879127SKrish Sadhukhan return !test->scratch; 282ad879127SKrish Sadhukhan } 283ad879127SKrish Sadhukhan 284ad879127SKrish Sadhukhan static bool next_rip_supported(void) 285ad879127SKrish Sadhukhan { 286ad879127SKrish Sadhukhan return this_cpu_has(X86_FEATURE_NRIPS); 287ad879127SKrish Sadhukhan } 288ad879127SKrish Sadhukhan 289ad879127SKrish Sadhukhan static void prepare_next_rip(struct svm_test *test) 290ad879127SKrish Sadhukhan { 291096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 292ad879127SKrish Sadhukhan } 293ad879127SKrish Sadhukhan 294ad879127SKrish Sadhukhan 295ad879127SKrish Sadhukhan static void test_next_rip(struct svm_test *test) 296ad879127SKrish Sadhukhan { 297ad879127SKrish Sadhukhan asm volatile ("rdtsc\n\t" 298ad879127SKrish Sadhukhan ".globl exp_next_rip\n\t" 299ad879127SKrish Sadhukhan "exp_next_rip:\n\t" ::: "eax", "edx"); 300ad879127SKrish Sadhukhan } 301ad879127SKrish Sadhukhan 302ad879127SKrish Sadhukhan static bool check_next_rip(struct svm_test *test) 303ad879127SKrish Sadhukhan { 304ad879127SKrish Sadhukhan extern char exp_next_rip; 305ad879127SKrish Sadhukhan unsigned long address = (unsigned long)&exp_next_rip; 306ad879127SKrish Sadhukhan 307096cf7feSPaolo Bonzini return address == vmcb->control.next_rip; 308ad879127SKrish Sadhukhan } 309ad879127SKrish Sadhukhan 310ad879127SKrish Sadhukhan extern u8 *msr_bitmap; 311ad879127SKrish Sadhukhan 312ad879127SKrish Sadhukhan static void prepare_msr_intercept(struct svm_test *test) 313ad879127SKrish Sadhukhan { 314ad879127SKrish Sadhukhan default_prepare(test); 315096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT); 316096cf7feSPaolo Bonzini vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR); 317ad879127SKrish Sadhukhan memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE); 318ad879127SKrish Sadhukhan } 319ad879127SKrish Sadhukhan 320ad879127SKrish Sadhukhan static void test_msr_intercept(struct svm_test *test) 321ad879127SKrish Sadhukhan { 322ad879127SKrish Sadhukhan unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */ 323ad879127SKrish Sadhukhan unsigned long msr_index; 324ad879127SKrish Sadhukhan 325ad879127SKrish Sadhukhan for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) { 326ad879127SKrish Sadhukhan if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) { 327ad879127SKrish Sadhukhan /* 328ad879127SKrish Sadhukhan * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture 329ad879127SKrish Sadhukhan * Programmer's Manual volume 2 - System Programming: 330ad879127SKrish Sadhukhan * http://support.amd.com/TechDocs/24593.pdf 331ad879127SKrish Sadhukhan * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR. 332ad879127SKrish Sadhukhan */ 333ad879127SKrish Sadhukhan continue; 334ad879127SKrish Sadhukhan } 335ad879127SKrish Sadhukhan 336ad879127SKrish Sadhukhan /* Skips gaps between supported MSR ranges */ 337ad879127SKrish Sadhukhan if (msr_index == 0x2000) 338ad879127SKrish Sadhukhan msr_index = 0xc0000000; 339ad879127SKrish Sadhukhan else if (msr_index == 0xc0002000) 340ad879127SKrish Sadhukhan msr_index = 0xc0010000; 341ad879127SKrish Sadhukhan 342ad879127SKrish Sadhukhan test->scratch = -1; 343ad879127SKrish Sadhukhan 344ad879127SKrish Sadhukhan rdmsr(msr_index); 345ad879127SKrish Sadhukhan 346ad879127SKrish Sadhukhan /* Check that a read intercept occurred for MSR at msr_index */ 347ad879127SKrish Sadhukhan if (test->scratch != msr_index) 348198dfd0eSJanis Schoetterl-Glausch report_fail("MSR 0x%lx read intercept", msr_index); 349ad879127SKrish Sadhukhan 350ad879127SKrish Sadhukhan /* 351ad879127SKrish Sadhukhan * Poor man approach to generate a value that 352ad879127SKrish Sadhukhan * seems arbitrary each time around the loop. 353ad879127SKrish Sadhukhan */ 354ad879127SKrish Sadhukhan msr_value += (msr_value << 1); 355ad879127SKrish Sadhukhan 356ad879127SKrish Sadhukhan wrmsr(msr_index, msr_value); 357ad879127SKrish Sadhukhan 358ad879127SKrish Sadhukhan /* Check that a write intercept occurred for MSR with msr_value */ 359ad879127SKrish Sadhukhan if (test->scratch != msr_value) 360198dfd0eSJanis Schoetterl-Glausch report_fail("MSR 0x%lx write intercept", msr_index); 361ad879127SKrish Sadhukhan } 362ad879127SKrish Sadhukhan 363ad879127SKrish Sadhukhan test->scratch = -2; 364ad879127SKrish Sadhukhan } 365ad879127SKrish Sadhukhan 366ad879127SKrish Sadhukhan static bool msr_intercept_finished(struct svm_test *test) 367ad879127SKrish Sadhukhan { 368096cf7feSPaolo Bonzini u32 exit_code = vmcb->control.exit_code; 369ad879127SKrish Sadhukhan u64 exit_info_1; 370ad879127SKrish Sadhukhan u8 *opcode; 371ad879127SKrish Sadhukhan 372ad879127SKrish Sadhukhan if (exit_code == SVM_EXIT_MSR) { 373096cf7feSPaolo Bonzini exit_info_1 = vmcb->control.exit_info_1; 374ad879127SKrish Sadhukhan } else { 375ad879127SKrish Sadhukhan /* 376ad879127SKrish Sadhukhan * If #GP exception occurs instead, check that it was 377ad879127SKrish Sadhukhan * for RDMSR/WRMSR and set exit_info_1 accordingly. 378ad879127SKrish Sadhukhan */ 379ad879127SKrish Sadhukhan 380ad879127SKrish Sadhukhan if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR)) 381ad879127SKrish Sadhukhan return true; 382ad879127SKrish Sadhukhan 383096cf7feSPaolo Bonzini opcode = (u8 *)vmcb->save.rip; 384ad879127SKrish Sadhukhan if (opcode[0] != 0x0f) 385ad879127SKrish Sadhukhan return true; 386ad879127SKrish Sadhukhan 387ad879127SKrish Sadhukhan switch (opcode[1]) { 388ad879127SKrish Sadhukhan case 0x30: /* WRMSR */ 389ad879127SKrish Sadhukhan exit_info_1 = 1; 390ad879127SKrish Sadhukhan break; 391ad879127SKrish Sadhukhan case 0x32: /* RDMSR */ 392ad879127SKrish Sadhukhan exit_info_1 = 0; 393ad879127SKrish Sadhukhan break; 394ad879127SKrish Sadhukhan default: 395ad879127SKrish Sadhukhan return true; 396ad879127SKrish Sadhukhan } 397ad879127SKrish Sadhukhan 398ad879127SKrish Sadhukhan /* 3993f27d772SManali Shukla * Warn that #GP exception occured instead. 400ad879127SKrish Sadhukhan * RCX holds the MSR index. 401ad879127SKrish Sadhukhan */ 402ad879127SKrish Sadhukhan printf("%s 0x%lx #GP exception\n", 403ad879127SKrish Sadhukhan exit_info_1 ? "WRMSR" : "RDMSR", get_regs().rcx); 404ad879127SKrish Sadhukhan } 405ad879127SKrish Sadhukhan 406ad879127SKrish Sadhukhan /* Jump over RDMSR/WRMSR instruction */ 407096cf7feSPaolo Bonzini vmcb->save.rip += 2; 408ad879127SKrish Sadhukhan 409ad879127SKrish Sadhukhan /* 410ad879127SKrish Sadhukhan * Test whether the intercept was for RDMSR/WRMSR. 411ad879127SKrish Sadhukhan * For RDMSR, test->scratch is set to the MSR index; 412ad879127SKrish Sadhukhan * RCX holds the MSR index. 413ad879127SKrish Sadhukhan * For WRMSR, test->scratch is set to the MSR value; 414ad879127SKrish Sadhukhan * RDX holds the upper 32 bits of the MSR value, 415ad879127SKrish Sadhukhan * while RAX hold its lower 32 bits. 416ad879127SKrish Sadhukhan */ 417ad879127SKrish Sadhukhan if (exit_info_1) 418ad879127SKrish Sadhukhan test->scratch = 419096cf7feSPaolo Bonzini ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff)); 420ad879127SKrish Sadhukhan else 421ad879127SKrish Sadhukhan test->scratch = get_regs().rcx; 422ad879127SKrish Sadhukhan 423ad879127SKrish Sadhukhan return false; 424ad879127SKrish Sadhukhan } 425ad879127SKrish Sadhukhan 426ad879127SKrish Sadhukhan static bool check_msr_intercept(struct svm_test *test) 427ad879127SKrish Sadhukhan { 428ad879127SKrish Sadhukhan memset(msr_bitmap, 0, MSR_BITMAP_SIZE); 429ad879127SKrish Sadhukhan return (test->scratch == -2); 430ad879127SKrish Sadhukhan } 431ad879127SKrish Sadhukhan 432ad879127SKrish Sadhukhan static void prepare_mode_switch(struct svm_test *test) 433ad879127SKrish Sadhukhan { 434096cf7feSPaolo Bonzini vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 435ad879127SKrish Sadhukhan | (1ULL << UD_VECTOR) 436ad879127SKrish Sadhukhan | (1ULL << DF_VECTOR) 437ad879127SKrish Sadhukhan | (1ULL << PF_VECTOR); 438ad879127SKrish Sadhukhan test->scratch = 0; 439ad879127SKrish Sadhukhan } 440ad879127SKrish Sadhukhan 441ad879127SKrish Sadhukhan static void test_mode_switch(struct svm_test *test) 442ad879127SKrish Sadhukhan { 443ad879127SKrish Sadhukhan asm volatile(" cli\n" 444ad879127SKrish Sadhukhan " ljmp *1f\n" /* jump to 32-bit code segment */ 445ad879127SKrish Sadhukhan "1:\n" 446ad879127SKrish Sadhukhan " .long 2f\n" 447ad879127SKrish Sadhukhan " .long " xstr(KERNEL_CS32) "\n" 448ad879127SKrish Sadhukhan ".code32\n" 449ad879127SKrish Sadhukhan "2:\n" 450ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 451ad879127SKrish Sadhukhan " btcl $31, %%eax\n" /* clear PG */ 452ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 453ad879127SKrish Sadhukhan " movl $0xc0000080, %%ecx\n" /* EFER */ 454ad879127SKrish Sadhukhan " rdmsr\n" 455ad879127SKrish Sadhukhan " btcl $8, %%eax\n" /* clear LME */ 456ad879127SKrish Sadhukhan " wrmsr\n" 457ad879127SKrish Sadhukhan " movl %%cr4, %%eax\n" 458ad879127SKrish Sadhukhan " btcl $5, %%eax\n" /* clear PAE */ 459ad879127SKrish Sadhukhan " movl %%eax, %%cr4\n" 460ad879127SKrish Sadhukhan " movw %[ds16], %%ax\n" 461ad879127SKrish Sadhukhan " movw %%ax, %%ds\n" 462ad879127SKrish Sadhukhan " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 463ad879127SKrish Sadhukhan ".code16\n" 464ad879127SKrish Sadhukhan "3:\n" 465ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 466ad879127SKrish Sadhukhan " btcl $0, %%eax\n" /* clear PE */ 467ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 468ad879127SKrish Sadhukhan " ljmpl $0, $4f\n" /* jump to real-mode */ 469ad879127SKrish Sadhukhan "4:\n" 470ad879127SKrish Sadhukhan " vmmcall\n" 471ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 472ad879127SKrish Sadhukhan " btsl $0, %%eax\n" /* set PE */ 473ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 474ad879127SKrish Sadhukhan " ljmpl %[cs32], $5f\n" /* back to protected mode */ 475ad879127SKrish Sadhukhan ".code32\n" 476ad879127SKrish Sadhukhan "5:\n" 477ad879127SKrish Sadhukhan " movl %%cr4, %%eax\n" 478ad879127SKrish Sadhukhan " btsl $5, %%eax\n" /* set PAE */ 479ad879127SKrish Sadhukhan " movl %%eax, %%cr4\n" 480ad879127SKrish Sadhukhan " movl $0xc0000080, %%ecx\n" /* EFER */ 481ad879127SKrish Sadhukhan " rdmsr\n" 482ad879127SKrish Sadhukhan " btsl $8, %%eax\n" /* set LME */ 483ad879127SKrish Sadhukhan " wrmsr\n" 484ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 485ad879127SKrish Sadhukhan " btsl $31, %%eax\n" /* set PG */ 486ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 487ad879127SKrish Sadhukhan " ljmpl %[cs64], $6f\n" /* back to long mode */ 488ad879127SKrish Sadhukhan ".code64\n\t" 489ad879127SKrish Sadhukhan "6:\n" 490ad879127SKrish Sadhukhan " vmmcall\n" 491ad879127SKrish Sadhukhan :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 492ad879127SKrish Sadhukhan [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 493ad879127SKrish Sadhukhan : "rax", "rbx", "rcx", "rdx", "memory"); 494ad879127SKrish Sadhukhan } 495ad879127SKrish Sadhukhan 496ad879127SKrish Sadhukhan static bool mode_switch_finished(struct svm_test *test) 497ad879127SKrish Sadhukhan { 498ad879127SKrish Sadhukhan u64 cr0, cr4, efer; 499ad879127SKrish Sadhukhan 500096cf7feSPaolo Bonzini cr0 = vmcb->save.cr0; 501096cf7feSPaolo Bonzini cr4 = vmcb->save.cr4; 502096cf7feSPaolo Bonzini efer = vmcb->save.efer; 503ad879127SKrish Sadhukhan 504ad879127SKrish Sadhukhan /* Only expect VMMCALL intercepts */ 505096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) 506ad879127SKrish Sadhukhan return true; 507ad879127SKrish Sadhukhan 508ad879127SKrish Sadhukhan /* Jump over VMMCALL instruction */ 509096cf7feSPaolo Bonzini vmcb->save.rip += 3; 510ad879127SKrish Sadhukhan 511ad879127SKrish Sadhukhan /* Do sanity checks */ 512ad879127SKrish Sadhukhan switch (test->scratch) { 513ad879127SKrish Sadhukhan case 0: 514ad879127SKrish Sadhukhan /* Test should be in real mode now - check for this */ 515ad879127SKrish Sadhukhan if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 516ad879127SKrish Sadhukhan (cr4 & 0x00000020) || /* CR4.PAE */ 517ad879127SKrish Sadhukhan (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 518ad879127SKrish Sadhukhan return true; 519ad879127SKrish Sadhukhan break; 520ad879127SKrish Sadhukhan case 2: 521ad879127SKrish Sadhukhan /* Test should be back in long-mode now - check for this */ 522ad879127SKrish Sadhukhan if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 523ad879127SKrish Sadhukhan ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 524ad879127SKrish Sadhukhan ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 525ad879127SKrish Sadhukhan return true; 526ad879127SKrish Sadhukhan break; 527ad879127SKrish Sadhukhan } 528ad879127SKrish Sadhukhan 529ad879127SKrish Sadhukhan /* one step forward */ 530ad879127SKrish Sadhukhan test->scratch += 1; 531ad879127SKrish Sadhukhan 532ad879127SKrish Sadhukhan return test->scratch == 2; 533ad879127SKrish Sadhukhan } 534ad879127SKrish Sadhukhan 535ad879127SKrish Sadhukhan static bool check_mode_switch(struct svm_test *test) 536ad879127SKrish Sadhukhan { 537ad879127SKrish Sadhukhan return test->scratch == 2; 538ad879127SKrish Sadhukhan } 539ad879127SKrish Sadhukhan 540ad879127SKrish Sadhukhan extern u8 *io_bitmap; 541ad879127SKrish Sadhukhan 542ad879127SKrish Sadhukhan static void prepare_ioio(struct svm_test *test) 543ad879127SKrish Sadhukhan { 544096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT); 545ad879127SKrish Sadhukhan test->scratch = 0; 546ad879127SKrish Sadhukhan memset(io_bitmap, 0, 8192); 547ad879127SKrish Sadhukhan io_bitmap[8192] = 0xFF; 548ad879127SKrish Sadhukhan } 549ad879127SKrish Sadhukhan 550ad879127SKrish Sadhukhan static void test_ioio(struct svm_test *test) 551ad879127SKrish Sadhukhan { 552ad879127SKrish Sadhukhan // stage 0, test IO pass 553ad879127SKrish Sadhukhan inb(0x5000); 554ad879127SKrish Sadhukhan outb(0x0, 0x5000); 555ad879127SKrish Sadhukhan if (get_test_stage(test) != 0) 556ad879127SKrish Sadhukhan goto fail; 557ad879127SKrish Sadhukhan 558ad879127SKrish Sadhukhan // test IO width, in/out 559ad879127SKrish Sadhukhan io_bitmap[0] = 0xFF; 560ad879127SKrish Sadhukhan inc_test_stage(test); 561ad879127SKrish Sadhukhan inb(0x0); 562ad879127SKrish Sadhukhan if (get_test_stage(test) != 2) 563ad879127SKrish Sadhukhan goto fail; 564ad879127SKrish Sadhukhan 565ad879127SKrish Sadhukhan outw(0x0, 0x0); 566ad879127SKrish Sadhukhan if (get_test_stage(test) != 3) 567ad879127SKrish Sadhukhan goto fail; 568ad879127SKrish Sadhukhan 569ad879127SKrish Sadhukhan inl(0x0); 570ad879127SKrish Sadhukhan if (get_test_stage(test) != 4) 571ad879127SKrish Sadhukhan goto fail; 572ad879127SKrish Sadhukhan 573ad879127SKrish Sadhukhan // test low/high IO port 574ad879127SKrish Sadhukhan io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 575ad879127SKrish Sadhukhan inb(0x5000); 576ad879127SKrish Sadhukhan if (get_test_stage(test) != 5) 577ad879127SKrish Sadhukhan goto fail; 578ad879127SKrish Sadhukhan 579ad879127SKrish Sadhukhan io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8)); 580ad879127SKrish Sadhukhan inw(0x9000); 581ad879127SKrish Sadhukhan if (get_test_stage(test) != 6) 582ad879127SKrish Sadhukhan goto fail; 583ad879127SKrish Sadhukhan 584ad879127SKrish Sadhukhan // test partial pass 585ad879127SKrish Sadhukhan io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 586ad879127SKrish Sadhukhan inl(0x4FFF); 587ad879127SKrish Sadhukhan if (get_test_stage(test) != 7) 588ad879127SKrish Sadhukhan goto fail; 589ad879127SKrish Sadhukhan 590ad879127SKrish Sadhukhan // test across pages 591ad879127SKrish Sadhukhan inc_test_stage(test); 592ad879127SKrish Sadhukhan inl(0x7FFF); 593ad879127SKrish Sadhukhan if (get_test_stage(test) != 8) 594ad879127SKrish Sadhukhan goto fail; 595ad879127SKrish Sadhukhan 596ad879127SKrish Sadhukhan inc_test_stage(test); 597ad879127SKrish Sadhukhan io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8); 598ad879127SKrish Sadhukhan inl(0x7FFF); 599ad879127SKrish Sadhukhan if (get_test_stage(test) != 10) 600ad879127SKrish Sadhukhan goto fail; 601ad879127SKrish Sadhukhan 602ad879127SKrish Sadhukhan io_bitmap[0] = 0; 603ad879127SKrish Sadhukhan inl(0xFFFF); 604ad879127SKrish Sadhukhan if (get_test_stage(test) != 11) 605ad879127SKrish Sadhukhan goto fail; 606ad879127SKrish Sadhukhan 607ad879127SKrish Sadhukhan io_bitmap[0] = 0xFF; 608ad879127SKrish Sadhukhan io_bitmap[8192] = 0; 609ad879127SKrish Sadhukhan inl(0xFFFF); 610ad879127SKrish Sadhukhan inc_test_stage(test); 611ad879127SKrish Sadhukhan if (get_test_stage(test) != 12) 612ad879127SKrish Sadhukhan goto fail; 613ad879127SKrish Sadhukhan 614ad879127SKrish Sadhukhan return; 615ad879127SKrish Sadhukhan 616ad879127SKrish Sadhukhan fail: 617198dfd0eSJanis Schoetterl-Glausch report_fail("stage %d", get_test_stage(test)); 618ad879127SKrish Sadhukhan test->scratch = -1; 619ad879127SKrish Sadhukhan } 620ad879127SKrish Sadhukhan 621ad879127SKrish Sadhukhan static bool ioio_finished(struct svm_test *test) 622ad879127SKrish Sadhukhan { 623ad879127SKrish Sadhukhan unsigned port, size; 624ad879127SKrish Sadhukhan 625ad879127SKrish Sadhukhan /* Only expect IOIO intercepts */ 626096cf7feSPaolo Bonzini if (vmcb->control.exit_code == SVM_EXIT_VMMCALL) 627ad879127SKrish Sadhukhan return true; 628ad879127SKrish Sadhukhan 629096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_IOIO) 630ad879127SKrish Sadhukhan return true; 631ad879127SKrish Sadhukhan 632ad879127SKrish Sadhukhan /* one step forward */ 633ad879127SKrish Sadhukhan test->scratch += 1; 634ad879127SKrish Sadhukhan 635096cf7feSPaolo Bonzini port = vmcb->control.exit_info_1 >> 16; 636096cf7feSPaolo Bonzini size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7; 637ad879127SKrish Sadhukhan 638ad879127SKrish Sadhukhan while (size--) { 639ad879127SKrish Sadhukhan io_bitmap[port / 8] &= ~(1 << (port & 7)); 640ad879127SKrish Sadhukhan port++; 641ad879127SKrish Sadhukhan } 642ad879127SKrish Sadhukhan 643ad879127SKrish Sadhukhan return false; 644ad879127SKrish Sadhukhan } 645ad879127SKrish Sadhukhan 646ad879127SKrish Sadhukhan static bool check_ioio(struct svm_test *test) 647ad879127SKrish Sadhukhan { 648ad879127SKrish Sadhukhan memset(io_bitmap, 0, 8193); 649ad879127SKrish Sadhukhan return test->scratch != -1; 650ad879127SKrish Sadhukhan } 651ad879127SKrish Sadhukhan 652ad879127SKrish Sadhukhan static void prepare_asid_zero(struct svm_test *test) 653ad879127SKrish Sadhukhan { 654096cf7feSPaolo Bonzini vmcb->control.asid = 0; 655ad879127SKrish Sadhukhan } 656ad879127SKrish Sadhukhan 657ad879127SKrish Sadhukhan static void test_asid_zero(struct svm_test *test) 658ad879127SKrish Sadhukhan { 659ad879127SKrish Sadhukhan asm volatile ("vmmcall\n\t"); 660ad879127SKrish Sadhukhan } 661ad879127SKrish Sadhukhan 662ad879127SKrish Sadhukhan static bool check_asid_zero(struct svm_test *test) 663ad879127SKrish Sadhukhan { 664096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_ERR; 665ad879127SKrish Sadhukhan } 666ad879127SKrish Sadhukhan 667ad879127SKrish Sadhukhan static void sel_cr0_bug_prepare(struct svm_test *test) 668ad879127SKrish Sadhukhan { 669096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 670ad879127SKrish Sadhukhan } 671ad879127SKrish Sadhukhan 672ad879127SKrish Sadhukhan static bool sel_cr0_bug_finished(struct svm_test *test) 673ad879127SKrish Sadhukhan { 674ad879127SKrish Sadhukhan return true; 675ad879127SKrish Sadhukhan } 676ad879127SKrish Sadhukhan 677ad879127SKrish Sadhukhan static void sel_cr0_bug_test(struct svm_test *test) 678ad879127SKrish Sadhukhan { 679ad879127SKrish Sadhukhan unsigned long cr0; 680ad879127SKrish Sadhukhan 681ad879127SKrish Sadhukhan /* read cr0, clear CD, and write back */ 682ad879127SKrish Sadhukhan cr0 = read_cr0(); 683ad879127SKrish Sadhukhan cr0 |= (1UL << 30); 684ad879127SKrish Sadhukhan write_cr0(cr0); 685ad879127SKrish Sadhukhan 686ad879127SKrish Sadhukhan /* 687ad879127SKrish Sadhukhan * If we are here the test failed, not sure what to do now because we 688ad879127SKrish Sadhukhan * are not in guest-mode anymore so we can't trigger an intercept. 689ad879127SKrish Sadhukhan * Trigger a tripple-fault for now. 690ad879127SKrish Sadhukhan */ 691198dfd0eSJanis Schoetterl-Glausch report_fail("sel_cr0 test. Can not recover from this - exiting"); 692ad879127SKrish Sadhukhan exit(report_summary()); 693ad879127SKrish Sadhukhan } 694ad879127SKrish Sadhukhan 695ad879127SKrish Sadhukhan static bool sel_cr0_bug_check(struct svm_test *test) 696ad879127SKrish Sadhukhan { 697096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 698ad879127SKrish Sadhukhan } 699ad879127SKrish Sadhukhan 700ad879127SKrish Sadhukhan #define TSC_ADJUST_VALUE (1ll << 32) 701f3154609SBill Wendling #define TSC_OFFSET_VALUE (~0ull << 48) 702ad879127SKrish Sadhukhan static bool ok; 703ad879127SKrish Sadhukhan 70410a65fc4SNadav Amit static bool tsc_adjust_supported(void) 70510a65fc4SNadav Amit { 70610a65fc4SNadav Amit return this_cpu_has(X86_FEATURE_TSC_ADJUST); 70710a65fc4SNadav Amit } 70810a65fc4SNadav Amit 709ad879127SKrish Sadhukhan static void tsc_adjust_prepare(struct svm_test *test) 710ad879127SKrish Sadhukhan { 711ad879127SKrish Sadhukhan default_prepare(test); 712096cf7feSPaolo Bonzini vmcb->control.tsc_offset = TSC_OFFSET_VALUE; 713ad879127SKrish Sadhukhan 714ad879127SKrish Sadhukhan wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE); 715ad879127SKrish Sadhukhan int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 716ad879127SKrish Sadhukhan ok = adjust == -TSC_ADJUST_VALUE; 717ad879127SKrish Sadhukhan } 718ad879127SKrish Sadhukhan 719ad879127SKrish Sadhukhan static void tsc_adjust_test(struct svm_test *test) 720ad879127SKrish Sadhukhan { 721ad879127SKrish Sadhukhan int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 722ad879127SKrish Sadhukhan ok &= adjust == -TSC_ADJUST_VALUE; 723ad879127SKrish Sadhukhan 724ad879127SKrish Sadhukhan uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 725ad879127SKrish Sadhukhan wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 726ad879127SKrish Sadhukhan 727ad879127SKrish Sadhukhan adjust = rdmsr(MSR_IA32_TSC_ADJUST); 728ad879127SKrish Sadhukhan ok &= adjust <= -2 * TSC_ADJUST_VALUE; 729ad879127SKrish Sadhukhan 730ad879127SKrish Sadhukhan uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE; 731ad879127SKrish Sadhukhan ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 732ad879127SKrish Sadhukhan 733ad879127SKrish Sadhukhan uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE; 734ad879127SKrish Sadhukhan ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 735ad879127SKrish Sadhukhan } 736ad879127SKrish Sadhukhan 737ad879127SKrish Sadhukhan static bool tsc_adjust_check(struct svm_test *test) 738ad879127SKrish Sadhukhan { 739ad879127SKrish Sadhukhan int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 740ad879127SKrish Sadhukhan 741ad879127SKrish Sadhukhan wrmsr(MSR_IA32_TSC_ADJUST, 0); 742ad879127SKrish Sadhukhan return ok && adjust <= -2 * TSC_ADJUST_VALUE; 743ad879127SKrish Sadhukhan } 744ad879127SKrish Sadhukhan 745a8503d50SMaxim Levitsky 746a8503d50SMaxim Levitsky static u64 guest_tsc_delay_value; 747a8503d50SMaxim Levitsky /* number of bits to shift tsc right for stable result */ 748a8503d50SMaxim Levitsky #define TSC_SHIFT 24 749a8503d50SMaxim Levitsky #define TSC_SCALE_ITERATIONS 10 750a8503d50SMaxim Levitsky 751a8503d50SMaxim Levitsky static void svm_tsc_scale_guest(struct svm_test *test) 752a8503d50SMaxim Levitsky { 753a8503d50SMaxim Levitsky u64 start_tsc = rdtsc(); 754a8503d50SMaxim Levitsky 755a8503d50SMaxim Levitsky while (rdtsc() - start_tsc < guest_tsc_delay_value) 756a8503d50SMaxim Levitsky cpu_relax(); 757a8503d50SMaxim Levitsky } 758a8503d50SMaxim Levitsky 759a8503d50SMaxim Levitsky static void svm_tsc_scale_run_testcase(u64 duration, 760a8503d50SMaxim Levitsky double tsc_scale, u64 tsc_offset) 761a8503d50SMaxim Levitsky { 762a8503d50SMaxim Levitsky u64 start_tsc, actual_duration; 763a8503d50SMaxim Levitsky 764a8503d50SMaxim Levitsky guest_tsc_delay_value = (duration << TSC_SHIFT) * tsc_scale; 765a8503d50SMaxim Levitsky 766a8503d50SMaxim Levitsky test_set_guest(svm_tsc_scale_guest); 767a8503d50SMaxim Levitsky vmcb->control.tsc_offset = tsc_offset; 768a8503d50SMaxim Levitsky wrmsr(MSR_AMD64_TSC_RATIO, (u64)(tsc_scale * (1ULL << 32))); 769a8503d50SMaxim Levitsky 770a8503d50SMaxim Levitsky start_tsc = rdtsc(); 771a8503d50SMaxim Levitsky 772a8503d50SMaxim Levitsky if (svm_vmrun() != SVM_EXIT_VMMCALL) 773a8503d50SMaxim Levitsky report_fail("unexpected vm exit code 0x%x", vmcb->control.exit_code); 774a8503d50SMaxim Levitsky 775a8503d50SMaxim Levitsky actual_duration = (rdtsc() - start_tsc) >> TSC_SHIFT; 776a8503d50SMaxim Levitsky 777a8503d50SMaxim Levitsky report(duration == actual_duration, "tsc delay (expected: %lu, actual: %lu)", 778a8503d50SMaxim Levitsky duration, actual_duration); 779a8503d50SMaxim Levitsky } 780a8503d50SMaxim Levitsky 781a8503d50SMaxim Levitsky static void svm_tsc_scale_test(void) 782a8503d50SMaxim Levitsky { 783a8503d50SMaxim Levitsky int i; 784a8503d50SMaxim Levitsky 785a8503d50SMaxim Levitsky if (!tsc_scale_supported()) { 786a8503d50SMaxim Levitsky report_skip("TSC scale not supported in the guest"); 787a8503d50SMaxim Levitsky return; 788a8503d50SMaxim Levitsky } 789a8503d50SMaxim Levitsky 790a8503d50SMaxim Levitsky report(rdmsr(MSR_AMD64_TSC_RATIO) == TSC_RATIO_DEFAULT, 791a8503d50SMaxim Levitsky "initial TSC scale ratio"); 792a8503d50SMaxim Levitsky 793a8503d50SMaxim Levitsky for (i = 0 ; i < TSC_SCALE_ITERATIONS; i++) { 794a8503d50SMaxim Levitsky 795a8503d50SMaxim Levitsky double tsc_scale = (double)(rdrand() % 100 + 1) / 10; 796a8503d50SMaxim Levitsky int duration = rdrand() % 50 + 1; 797a8503d50SMaxim Levitsky u64 tsc_offset = rdrand(); 798a8503d50SMaxim Levitsky 799a8503d50SMaxim Levitsky report_info("duration=%d, tsc_scale=%d, tsc_offset=%ld", 800a8503d50SMaxim Levitsky duration, (int)(tsc_scale * 100), tsc_offset); 801a8503d50SMaxim Levitsky 802a8503d50SMaxim Levitsky svm_tsc_scale_run_testcase(duration, tsc_scale, tsc_offset); 803a8503d50SMaxim Levitsky } 804a8503d50SMaxim Levitsky 805a8503d50SMaxim Levitsky svm_tsc_scale_run_testcase(50, 255, rdrand()); 806a8503d50SMaxim Levitsky svm_tsc_scale_run_testcase(50, 0.0001, rdrand()); 807a8503d50SMaxim Levitsky } 808a8503d50SMaxim Levitsky 809ad879127SKrish Sadhukhan static void latency_prepare(struct svm_test *test) 810ad879127SKrish Sadhukhan { 811ad879127SKrish Sadhukhan default_prepare(test); 812ad879127SKrish Sadhukhan runs = LATENCY_RUNS; 813ad879127SKrish Sadhukhan latvmrun_min = latvmexit_min = -1ULL; 814ad879127SKrish Sadhukhan latvmrun_max = latvmexit_max = 0; 815ad879127SKrish Sadhukhan vmrun_sum = vmexit_sum = 0; 816ad879127SKrish Sadhukhan tsc_start = rdtsc(); 817ad879127SKrish Sadhukhan } 818ad879127SKrish Sadhukhan 819ad879127SKrish Sadhukhan static void latency_test(struct svm_test *test) 820ad879127SKrish Sadhukhan { 821ad879127SKrish Sadhukhan u64 cycles; 822ad879127SKrish Sadhukhan 823ad879127SKrish Sadhukhan start: 824ad879127SKrish Sadhukhan tsc_end = rdtsc(); 825ad879127SKrish Sadhukhan 826ad879127SKrish Sadhukhan cycles = tsc_end - tsc_start; 827ad879127SKrish Sadhukhan 828ad879127SKrish Sadhukhan if (cycles > latvmrun_max) 829ad879127SKrish Sadhukhan latvmrun_max = cycles; 830ad879127SKrish Sadhukhan 831ad879127SKrish Sadhukhan if (cycles < latvmrun_min) 832ad879127SKrish Sadhukhan latvmrun_min = cycles; 833ad879127SKrish Sadhukhan 834ad879127SKrish Sadhukhan vmrun_sum += cycles; 835ad879127SKrish Sadhukhan 836ad879127SKrish Sadhukhan tsc_start = rdtsc(); 837ad879127SKrish Sadhukhan 838ad879127SKrish Sadhukhan asm volatile ("vmmcall" : : : "memory"); 839ad879127SKrish Sadhukhan goto start; 840ad879127SKrish Sadhukhan } 841ad879127SKrish Sadhukhan 842ad879127SKrish Sadhukhan static bool latency_finished(struct svm_test *test) 843ad879127SKrish Sadhukhan { 844ad879127SKrish Sadhukhan u64 cycles; 845ad879127SKrish Sadhukhan 846ad879127SKrish Sadhukhan tsc_end = rdtsc(); 847ad879127SKrish Sadhukhan 848ad879127SKrish Sadhukhan cycles = tsc_end - tsc_start; 849ad879127SKrish Sadhukhan 850ad879127SKrish Sadhukhan if (cycles > latvmexit_max) 851ad879127SKrish Sadhukhan latvmexit_max = cycles; 852ad879127SKrish Sadhukhan 853ad879127SKrish Sadhukhan if (cycles < latvmexit_min) 854ad879127SKrish Sadhukhan latvmexit_min = cycles; 855ad879127SKrish Sadhukhan 856ad879127SKrish Sadhukhan vmexit_sum += cycles; 857ad879127SKrish Sadhukhan 858096cf7feSPaolo Bonzini vmcb->save.rip += 3; 859ad879127SKrish Sadhukhan 860ad879127SKrish Sadhukhan runs -= 1; 861ad879127SKrish Sadhukhan 862ad879127SKrish Sadhukhan tsc_end = rdtsc(); 863ad879127SKrish Sadhukhan 864ad879127SKrish Sadhukhan return runs == 0; 865ad879127SKrish Sadhukhan } 866ad879127SKrish Sadhukhan 867f7fa53dcSPaolo Bonzini static bool latency_finished_clean(struct svm_test *test) 868f7fa53dcSPaolo Bonzini { 869f7fa53dcSPaolo Bonzini vmcb->control.clean = VMCB_CLEAN_ALL; 870f7fa53dcSPaolo Bonzini return latency_finished(test); 871f7fa53dcSPaolo Bonzini } 872f7fa53dcSPaolo Bonzini 873ad879127SKrish Sadhukhan static bool latency_check(struct svm_test *test) 874ad879127SKrish Sadhukhan { 875ad879127SKrish Sadhukhan printf(" Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max, 876ad879127SKrish Sadhukhan latvmrun_min, vmrun_sum / LATENCY_RUNS); 877ad879127SKrish Sadhukhan printf(" Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max, 878ad879127SKrish Sadhukhan latvmexit_min, vmexit_sum / LATENCY_RUNS); 879ad879127SKrish Sadhukhan return true; 880ad879127SKrish Sadhukhan } 881ad879127SKrish Sadhukhan 882ad879127SKrish Sadhukhan static void lat_svm_insn_prepare(struct svm_test *test) 883ad879127SKrish Sadhukhan { 884ad879127SKrish Sadhukhan default_prepare(test); 885ad879127SKrish Sadhukhan runs = LATENCY_RUNS; 886ad879127SKrish Sadhukhan latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 887ad879127SKrish Sadhukhan latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 888ad879127SKrish Sadhukhan vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 889ad879127SKrish Sadhukhan } 890ad879127SKrish Sadhukhan 891ad879127SKrish Sadhukhan static bool lat_svm_insn_finished(struct svm_test *test) 892ad879127SKrish Sadhukhan { 893096cf7feSPaolo Bonzini u64 vmcb_phys = virt_to_phys(vmcb); 894ad879127SKrish Sadhukhan u64 cycles; 895ad879127SKrish Sadhukhan 896ad879127SKrish Sadhukhan for ( ; runs != 0; runs--) { 897ad879127SKrish Sadhukhan tsc_start = rdtsc(); 898ad879127SKrish Sadhukhan asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory"); 899ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 900ad879127SKrish Sadhukhan if (cycles > latvmload_max) 901ad879127SKrish Sadhukhan latvmload_max = cycles; 902ad879127SKrish Sadhukhan if (cycles < latvmload_min) 903ad879127SKrish Sadhukhan latvmload_min = cycles; 904ad879127SKrish Sadhukhan vmload_sum += cycles; 905ad879127SKrish Sadhukhan 906ad879127SKrish Sadhukhan tsc_start = rdtsc(); 907ad879127SKrish Sadhukhan asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory"); 908ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 909ad879127SKrish Sadhukhan if (cycles > latvmsave_max) 910ad879127SKrish Sadhukhan latvmsave_max = cycles; 911ad879127SKrish Sadhukhan if (cycles < latvmsave_min) 912ad879127SKrish Sadhukhan latvmsave_min = cycles; 913ad879127SKrish Sadhukhan vmsave_sum += cycles; 914ad879127SKrish Sadhukhan 915ad879127SKrish Sadhukhan tsc_start = rdtsc(); 916ad879127SKrish Sadhukhan asm volatile("stgi\n\t"); 917ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 918ad879127SKrish Sadhukhan if (cycles > latstgi_max) 919ad879127SKrish Sadhukhan latstgi_max = cycles; 920ad879127SKrish Sadhukhan if (cycles < latstgi_min) 921ad879127SKrish Sadhukhan latstgi_min = cycles; 922ad879127SKrish Sadhukhan stgi_sum += cycles; 923ad879127SKrish Sadhukhan 924ad879127SKrish Sadhukhan tsc_start = rdtsc(); 925ad879127SKrish Sadhukhan asm volatile("clgi\n\t"); 926ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 927ad879127SKrish Sadhukhan if (cycles > latclgi_max) 928ad879127SKrish Sadhukhan latclgi_max = cycles; 929ad879127SKrish Sadhukhan if (cycles < latclgi_min) 930ad879127SKrish Sadhukhan latclgi_min = cycles; 931ad879127SKrish Sadhukhan clgi_sum += cycles; 932ad879127SKrish Sadhukhan } 933ad879127SKrish Sadhukhan 934ad879127SKrish Sadhukhan tsc_end = rdtsc(); 935ad879127SKrish Sadhukhan 936ad879127SKrish Sadhukhan return true; 937ad879127SKrish Sadhukhan } 938ad879127SKrish Sadhukhan 939ad879127SKrish Sadhukhan static bool lat_svm_insn_check(struct svm_test *test) 940ad879127SKrish Sadhukhan { 941ad879127SKrish Sadhukhan printf(" Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max, 942ad879127SKrish Sadhukhan latvmload_min, vmload_sum / LATENCY_RUNS); 943ad879127SKrish Sadhukhan printf(" Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max, 944ad879127SKrish Sadhukhan latvmsave_min, vmsave_sum / LATENCY_RUNS); 945ad879127SKrish Sadhukhan printf(" Latency STGI: max: %ld min: %ld avg: %ld\n", latstgi_max, 946ad879127SKrish Sadhukhan latstgi_min, stgi_sum / LATENCY_RUNS); 947ad879127SKrish Sadhukhan printf(" Latency CLGI: max: %ld min: %ld avg: %ld\n", latclgi_max, 948ad879127SKrish Sadhukhan latclgi_min, clgi_sum / LATENCY_RUNS); 949ad879127SKrish Sadhukhan return true; 950ad879127SKrish Sadhukhan } 951ad879127SKrish Sadhukhan 952493d27d4SSean Christopherson /* 953493d27d4SSean Christopherson * Report failures from SVM guest code, and on failure, set the stage to -1 and 954493d27d4SSean Christopherson * do VMMCALL to terminate the test (host side must treat -1 as "finished"). 955493d27d4SSean Christopherson * TODO: fix the tests that don't play nice with a straight report, e.g. the 956493d27d4SSean Christopherson * V_TPR test fails if report() is invoked. 957493d27d4SSean Christopherson */ 958493d27d4SSean Christopherson #define report_svm_guest(cond, test, fmt, args...) \ 959493d27d4SSean Christopherson do { \ 960493d27d4SSean Christopherson if (!(cond)) { \ 961493d27d4SSean Christopherson report_fail(fmt, ##args); \ 962493d27d4SSean Christopherson set_test_stage(test, -1); \ 963493d27d4SSean Christopherson vmmcall(); \ 964493d27d4SSean Christopherson } \ 965493d27d4SSean Christopherson } while (0) 966493d27d4SSean Christopherson 967ad879127SKrish Sadhukhan bool pending_event_ipi_fired; 968ad879127SKrish Sadhukhan bool pending_event_guest_run; 969ad879127SKrish Sadhukhan 970ad879127SKrish Sadhukhan static void pending_event_ipi_isr(isr_regs_t *regs) 971ad879127SKrish Sadhukhan { 972ad879127SKrish Sadhukhan pending_event_ipi_fired = true; 973ad879127SKrish Sadhukhan eoi(); 974ad879127SKrish Sadhukhan } 975ad879127SKrish Sadhukhan 976ad879127SKrish Sadhukhan static void pending_event_prepare(struct svm_test *test) 977ad879127SKrish Sadhukhan { 978ad879127SKrish Sadhukhan int ipi_vector = 0xf1; 979ad879127SKrish Sadhukhan 980ad879127SKrish Sadhukhan default_prepare(test); 981ad879127SKrish Sadhukhan 982ad879127SKrish Sadhukhan pending_event_ipi_fired = false; 983ad879127SKrish Sadhukhan 984ad879127SKrish Sadhukhan handle_irq(ipi_vector, pending_event_ipi_isr); 985ad879127SKrish Sadhukhan 986ad879127SKrish Sadhukhan pending_event_guest_run = false; 987ad879127SKrish Sadhukhan 988096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 989096cf7feSPaolo Bonzini vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 990ad879127SKrish Sadhukhan 991ad879127SKrish Sadhukhan apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 992ad879127SKrish Sadhukhan APIC_DM_FIXED | ipi_vector, 0); 993ad879127SKrish Sadhukhan 994ad879127SKrish Sadhukhan set_test_stage(test, 0); 995ad879127SKrish Sadhukhan } 996ad879127SKrish Sadhukhan 997ad879127SKrish Sadhukhan static void pending_event_test(struct svm_test *test) 998ad879127SKrish Sadhukhan { 999ad879127SKrish Sadhukhan pending_event_guest_run = true; 1000ad879127SKrish Sadhukhan } 1001ad879127SKrish Sadhukhan 1002ad879127SKrish Sadhukhan static bool pending_event_finished(struct svm_test *test) 1003ad879127SKrish Sadhukhan { 1004ad879127SKrish Sadhukhan switch (get_test_stage(test)) { 1005ad879127SKrish Sadhukhan case 0: 1006096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1007198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to pending interrupt. Exit reason 0x%x", 1008096cf7feSPaolo Bonzini vmcb->control.exit_code); 1009ad879127SKrish Sadhukhan return true; 1010ad879127SKrish Sadhukhan } 1011ad879127SKrish Sadhukhan 1012096cf7feSPaolo Bonzini vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1013096cf7feSPaolo Bonzini vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 1014ad879127SKrish Sadhukhan 1015ad879127SKrish Sadhukhan if (pending_event_guest_run) { 1016198dfd0eSJanis Schoetterl-Glausch report_fail("Guest ran before host received IPI\n"); 1017ad879127SKrish Sadhukhan return true; 1018ad879127SKrish Sadhukhan } 1019ad879127SKrish Sadhukhan 1020e4007e62SMaxim Levitsky sti_nop_cli(); 1021ad879127SKrish Sadhukhan 1022ad879127SKrish Sadhukhan if (!pending_event_ipi_fired) { 1023198dfd0eSJanis Schoetterl-Glausch report_fail("Pending interrupt not dispatched after IRQ enabled\n"); 1024ad879127SKrish Sadhukhan return true; 1025ad879127SKrish Sadhukhan } 1026ad879127SKrish Sadhukhan break; 1027ad879127SKrish Sadhukhan 1028ad879127SKrish Sadhukhan case 1: 1029ad879127SKrish Sadhukhan if (!pending_event_guest_run) { 1030198dfd0eSJanis Schoetterl-Glausch report_fail("Guest did not resume when no interrupt\n"); 1031ad879127SKrish Sadhukhan return true; 1032ad879127SKrish Sadhukhan } 1033ad879127SKrish Sadhukhan break; 1034ad879127SKrish Sadhukhan } 1035ad879127SKrish Sadhukhan 1036ad879127SKrish Sadhukhan inc_test_stage(test); 1037ad879127SKrish Sadhukhan 1038ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1039ad879127SKrish Sadhukhan } 1040ad879127SKrish Sadhukhan 1041ad879127SKrish Sadhukhan static bool pending_event_check(struct svm_test *test) 1042ad879127SKrish Sadhukhan { 1043ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1044ad879127SKrish Sadhukhan } 1045ad879127SKrish Sadhukhan 104685dc2aceSPaolo Bonzini static void pending_event_cli_prepare(struct svm_test *test) 1047ad879127SKrish Sadhukhan { 1048ad879127SKrish Sadhukhan default_prepare(test); 1049ad879127SKrish Sadhukhan 1050ad879127SKrish Sadhukhan pending_event_ipi_fired = false; 1051ad879127SKrish Sadhukhan 1052ad879127SKrish Sadhukhan handle_irq(0xf1, pending_event_ipi_isr); 1053ad879127SKrish Sadhukhan 1054ad879127SKrish Sadhukhan apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1055ad879127SKrish Sadhukhan APIC_DM_FIXED | 0xf1, 0); 1056ad879127SKrish Sadhukhan 1057ad879127SKrish Sadhukhan set_test_stage(test, 0); 1058ad879127SKrish Sadhukhan } 1059ad879127SKrish Sadhukhan 106085dc2aceSPaolo Bonzini static void pending_event_cli_prepare_gif_clear(struct svm_test *test) 1061ad879127SKrish Sadhukhan { 1062ad879127SKrish Sadhukhan asm("cli"); 1063ad879127SKrish Sadhukhan } 1064ad879127SKrish Sadhukhan 106585dc2aceSPaolo Bonzini static void pending_event_cli_test(struct svm_test *test) 1066ad879127SKrish Sadhukhan { 1067493d27d4SSean Christopherson report_svm_guest(!pending_event_ipi_fired, test, 1068493d27d4SSean Christopherson "IRQ should NOT be delivered while IRQs disabled"); 1069ad879127SKrish Sadhukhan 107085dc2aceSPaolo Bonzini /* VINTR_MASKING is zero. This should cause the IPI to fire. */ 1071e4007e62SMaxim Levitsky sti_nop_cli(); 1072ad879127SKrish Sadhukhan 1073493d27d4SSean Christopherson report_svm_guest(pending_event_ipi_fired, test, 1074493d27d4SSean Christopherson "IRQ should be delivered after enabling IRQs"); 1075ad879127SKrish Sadhukhan vmmcall(); 1076ad879127SKrish Sadhukhan 107785dc2aceSPaolo Bonzini /* 107885dc2aceSPaolo Bonzini * Now VINTR_MASKING=1, but no interrupt is pending so 107985dc2aceSPaolo Bonzini * the VINTR interception should be clear in VMCB02. Check 108085dc2aceSPaolo Bonzini * that L0 did not leave a stale VINTR in the VMCB. 108185dc2aceSPaolo Bonzini */ 1082e4007e62SMaxim Levitsky sti_nop_cli(); 1083ad879127SKrish Sadhukhan } 1084ad879127SKrish Sadhukhan 108585dc2aceSPaolo Bonzini static bool pending_event_cli_finished(struct svm_test *test) 1086ad879127SKrish Sadhukhan { 1087493d27d4SSean Christopherson report_svm_guest(vmcb->control.exit_code == SVM_EXIT_VMMCALL, test, 1088493d27d4SSean Christopherson "Wanted VMMCALL VM-Exit, got exit reason 0x%x", 1089096cf7feSPaolo Bonzini vmcb->control.exit_code); 1090ad879127SKrish Sadhukhan 1091ad879127SKrish Sadhukhan switch (get_test_stage(test)) { 1092ad879127SKrish Sadhukhan case 0: 1093096cf7feSPaolo Bonzini vmcb->save.rip += 3; 1094ad879127SKrish Sadhukhan 1095ad879127SKrish Sadhukhan pending_event_ipi_fired = false; 1096ad879127SKrish Sadhukhan 1097096cf7feSPaolo Bonzini vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1098ad879127SKrish Sadhukhan 109985dc2aceSPaolo Bonzini /* Now entering again with VINTR_MASKING=1. */ 1100ad879127SKrish Sadhukhan apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1101ad879127SKrish Sadhukhan APIC_DM_FIXED | 0xf1, 0); 1102ad879127SKrish Sadhukhan 1103ad879127SKrish Sadhukhan break; 1104ad879127SKrish Sadhukhan 1105ad879127SKrish Sadhukhan case 1: 1106ad879127SKrish Sadhukhan if (pending_event_ipi_fired == true) { 1107198dfd0eSJanis Schoetterl-Glausch report_fail("Interrupt triggered by guest"); 1108ad879127SKrish Sadhukhan return true; 1109ad879127SKrish Sadhukhan } 1110ad879127SKrish Sadhukhan 1111e4007e62SMaxim Levitsky sti_nop_cli(); 1112ad879127SKrish Sadhukhan 1113ad879127SKrish Sadhukhan if (pending_event_ipi_fired != true) { 1114198dfd0eSJanis Schoetterl-Glausch report_fail("Interrupt not triggered by host"); 1115ad879127SKrish Sadhukhan return true; 1116ad879127SKrish Sadhukhan } 1117ad879127SKrish Sadhukhan 1118ad879127SKrish Sadhukhan break; 1119ad879127SKrish Sadhukhan 1120ad879127SKrish Sadhukhan default: 1121ad879127SKrish Sadhukhan return true; 1122ad879127SKrish Sadhukhan } 1123ad879127SKrish Sadhukhan 1124ad879127SKrish Sadhukhan inc_test_stage(test); 1125ad879127SKrish Sadhukhan 1126ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1127ad879127SKrish Sadhukhan } 1128ad879127SKrish Sadhukhan 112985dc2aceSPaolo Bonzini static bool pending_event_cli_check(struct svm_test *test) 1130ad879127SKrish Sadhukhan { 1131ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1132ad879127SKrish Sadhukhan } 1133ad879127SKrish Sadhukhan 113485dc2aceSPaolo Bonzini #define TIMER_VECTOR 222 113585dc2aceSPaolo Bonzini 113685dc2aceSPaolo Bonzini static volatile bool timer_fired; 113785dc2aceSPaolo Bonzini 113885dc2aceSPaolo Bonzini static void timer_isr(isr_regs_t *regs) 113985dc2aceSPaolo Bonzini { 114085dc2aceSPaolo Bonzini timer_fired = true; 114185dc2aceSPaolo Bonzini apic_write(APIC_EOI, 0); 114285dc2aceSPaolo Bonzini } 114385dc2aceSPaolo Bonzini 114485dc2aceSPaolo Bonzini static void interrupt_prepare(struct svm_test *test) 114585dc2aceSPaolo Bonzini { 114685dc2aceSPaolo Bonzini default_prepare(test); 114785dc2aceSPaolo Bonzini handle_irq(TIMER_VECTOR, timer_isr); 114885dc2aceSPaolo Bonzini timer_fired = false; 114985dc2aceSPaolo Bonzini set_test_stage(test, 0); 115085dc2aceSPaolo Bonzini } 115185dc2aceSPaolo Bonzini 115285dc2aceSPaolo Bonzini static void interrupt_test(struct svm_test *test) 115385dc2aceSPaolo Bonzini { 115485dc2aceSPaolo Bonzini long long start, loops; 115585dc2aceSPaolo Bonzini 1156a2c7dff7SMaxim Levitsky apic_setup_timer(TIMER_VECTOR, APIC_LVT_TIMER_PERIODIC); 1157787f0aebSMaxim Levitsky sti(); 1158a2c7dff7SMaxim Levitsky apic_start_timer(1); 1159a2c7dff7SMaxim Levitsky 116085dc2aceSPaolo Bonzini for (loops = 0; loops < 10000000 && !timer_fired; loops++) 116185dc2aceSPaolo Bonzini asm volatile ("nop"); 116285dc2aceSPaolo Bonzini 1163493d27d4SSean Christopherson report_svm_guest(timer_fired, test, 1164493d27d4SSean Christopherson "direct interrupt while running guest"); 116585dc2aceSPaolo Bonzini 1166a2c7dff7SMaxim Levitsky apic_stop_timer(); 1167787f0aebSMaxim Levitsky cli(); 116885dc2aceSPaolo Bonzini vmmcall(); 116985dc2aceSPaolo Bonzini 117085dc2aceSPaolo Bonzini timer_fired = false; 1171a2c7dff7SMaxim Levitsky apic_start_timer(1); 117285dc2aceSPaolo Bonzini for (loops = 0; loops < 10000000 && !timer_fired; loops++) 117385dc2aceSPaolo Bonzini asm volatile ("nop"); 117485dc2aceSPaolo Bonzini 1175493d27d4SSean Christopherson report_svm_guest(timer_fired, test, 1176493d27d4SSean Christopherson "intercepted interrupt while running guest"); 117785dc2aceSPaolo Bonzini 1178787f0aebSMaxim Levitsky sti(); 1179a2c7dff7SMaxim Levitsky apic_stop_timer(); 1180787f0aebSMaxim Levitsky cli(); 118185dc2aceSPaolo Bonzini 118285dc2aceSPaolo Bonzini timer_fired = false; 118385dc2aceSPaolo Bonzini start = rdtsc(); 1184a2c7dff7SMaxim Levitsky apic_start_timer(1000000); 1185a3001422SOliver Upton safe_halt(); 118685dc2aceSPaolo Bonzini 1187493d27d4SSean Christopherson report_svm_guest(timer_fired, test, "direct interrupt + hlt"); 1188493d27d4SSean Christopherson report(rdtsc() - start > 10000, "IRQ arrived after expected delay"); 118985dc2aceSPaolo Bonzini 1190a2c7dff7SMaxim Levitsky apic_stop_timer(); 1191787f0aebSMaxim Levitsky cli(); 119285dc2aceSPaolo Bonzini vmmcall(); 119385dc2aceSPaolo Bonzini 119485dc2aceSPaolo Bonzini timer_fired = false; 119585dc2aceSPaolo Bonzini start = rdtsc(); 1196a2c7dff7SMaxim Levitsky apic_start_timer(1000000); 119785dc2aceSPaolo Bonzini asm volatile ("hlt"); 119885dc2aceSPaolo Bonzini 1199493d27d4SSean Christopherson report_svm_guest(timer_fired, test, "intercepted interrupt + hlt"); 1200493d27d4SSean Christopherson report(rdtsc() - start > 10000, "IRQ arrived after expected delay"); 120185dc2aceSPaolo Bonzini 1202a2c7dff7SMaxim Levitsky apic_cleanup_timer(); 120385dc2aceSPaolo Bonzini } 120485dc2aceSPaolo Bonzini 120585dc2aceSPaolo Bonzini static bool interrupt_finished(struct svm_test *test) 120685dc2aceSPaolo Bonzini { 120785dc2aceSPaolo Bonzini switch (get_test_stage(test)) { 120885dc2aceSPaolo Bonzini case 0: 120985dc2aceSPaolo Bonzini case 2: 1210096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1211198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1212096cf7feSPaolo Bonzini vmcb->control.exit_code); 121385dc2aceSPaolo Bonzini return true; 121485dc2aceSPaolo Bonzini } 1215096cf7feSPaolo Bonzini vmcb->save.rip += 3; 121685dc2aceSPaolo Bonzini 1217096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1218096cf7feSPaolo Bonzini vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 121985dc2aceSPaolo Bonzini break; 122085dc2aceSPaolo Bonzini 122185dc2aceSPaolo Bonzini case 1: 122285dc2aceSPaolo Bonzini case 3: 1223096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1224198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to intr intercept. Exit reason 0x%x", 1225096cf7feSPaolo Bonzini vmcb->control.exit_code); 122685dc2aceSPaolo Bonzini return true; 122785dc2aceSPaolo Bonzini } 122885dc2aceSPaolo Bonzini 1229e4007e62SMaxim Levitsky sti_nop_cli(); 123085dc2aceSPaolo Bonzini 1231096cf7feSPaolo Bonzini vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1232096cf7feSPaolo Bonzini vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 123385dc2aceSPaolo Bonzini break; 123485dc2aceSPaolo Bonzini 123585dc2aceSPaolo Bonzini case 4: 123685dc2aceSPaolo Bonzini break; 123785dc2aceSPaolo Bonzini 123885dc2aceSPaolo Bonzini default: 123985dc2aceSPaolo Bonzini return true; 124085dc2aceSPaolo Bonzini } 124185dc2aceSPaolo Bonzini 124285dc2aceSPaolo Bonzini inc_test_stage(test); 124385dc2aceSPaolo Bonzini 124485dc2aceSPaolo Bonzini return get_test_stage(test) == 5; 124585dc2aceSPaolo Bonzini } 124685dc2aceSPaolo Bonzini 124785dc2aceSPaolo Bonzini static bool interrupt_check(struct svm_test *test) 124885dc2aceSPaolo Bonzini { 124985dc2aceSPaolo Bonzini return get_test_stage(test) == 5; 125085dc2aceSPaolo Bonzini } 125185dc2aceSPaolo Bonzini 1252d4db486bSCathy Avery static volatile bool nmi_fired; 1253d4db486bSCathy Avery 12544a1207f6SMaxim Levitsky static void nmi_handler(struct ex_regs *regs) 1255d4db486bSCathy Avery { 1256d4db486bSCathy Avery nmi_fired = true; 1257d4db486bSCathy Avery } 1258d4db486bSCathy Avery 1259d4db486bSCathy Avery static void nmi_prepare(struct svm_test *test) 1260d4db486bSCathy Avery { 1261d4db486bSCathy Avery default_prepare(test); 1262d4db486bSCathy Avery nmi_fired = false; 12634a1207f6SMaxim Levitsky handle_exception(NMI_VECTOR, nmi_handler); 1264d4db486bSCathy Avery set_test_stage(test, 0); 1265d4db486bSCathy Avery } 1266d4db486bSCathy Avery 1267d4db486bSCathy Avery static void nmi_test(struct svm_test *test) 1268d4db486bSCathy Avery { 1269d4db486bSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1270d4db486bSCathy Avery 1271493d27d4SSean Christopherson report_svm_guest(nmi_fired, test, "direct NMI while running guest"); 1272d4db486bSCathy Avery 1273d4db486bSCathy Avery vmmcall(); 1274d4db486bSCathy Avery 1275d4db486bSCathy Avery nmi_fired = false; 1276d4db486bSCathy Avery 1277d4db486bSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1278d4db486bSCathy Avery 1279493d27d4SSean Christopherson report_svm_guest(nmi_fired, test, "intercepted pending NMI delivered to guest"); 1280d4db486bSCathy Avery } 1281d4db486bSCathy Avery 1282d4db486bSCathy Avery static bool nmi_finished(struct svm_test *test) 1283d4db486bSCathy Avery { 1284d4db486bSCathy Avery switch (get_test_stage(test)) { 1285d4db486bSCathy Avery case 0: 1286d4db486bSCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1287198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1288d4db486bSCathy Avery vmcb->control.exit_code); 1289d4db486bSCathy Avery return true; 1290d4db486bSCathy Avery } 1291d4db486bSCathy Avery vmcb->save.rip += 3; 1292d4db486bSCathy Avery 1293d4db486bSCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 1294d4db486bSCathy Avery break; 1295d4db486bSCathy Avery 1296d4db486bSCathy Avery case 1: 1297d4db486bSCathy Avery if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1298198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x", 1299d4db486bSCathy Avery vmcb->control.exit_code); 1300d4db486bSCathy Avery return true; 1301d4db486bSCathy Avery } 1302d4db486bSCathy Avery 13035c3582f0SJanis Schoetterl-Glausch report_pass("NMI intercept while running guest"); 1304d4db486bSCathy Avery break; 1305d4db486bSCathy Avery 1306d4db486bSCathy Avery case 2: 1307d4db486bSCathy Avery break; 1308d4db486bSCathy Avery 1309d4db486bSCathy Avery default: 1310d4db486bSCathy Avery return true; 1311d4db486bSCathy Avery } 1312d4db486bSCathy Avery 1313d4db486bSCathy Avery inc_test_stage(test); 1314d4db486bSCathy Avery 1315d4db486bSCathy Avery return get_test_stage(test) == 3; 1316d4db486bSCathy Avery } 1317d4db486bSCathy Avery 1318d4db486bSCathy Avery static bool nmi_check(struct svm_test *test) 1319d4db486bSCathy Avery { 1320d4db486bSCathy Avery return get_test_stage(test) == 3; 1321d4db486bSCathy Avery } 1322d4db486bSCathy Avery 13239da1f4d8SCathy Avery #define NMI_DELAY 100000000ULL 13249da1f4d8SCathy Avery 13259da1f4d8SCathy Avery static void nmi_message_thread(void *_test) 13269da1f4d8SCathy Avery { 13279da1f4d8SCathy Avery struct svm_test *test = _test; 13289da1f4d8SCathy Avery 13299da1f4d8SCathy Avery while (get_test_stage(test) != 1) 13309da1f4d8SCathy Avery pause(); 13319da1f4d8SCathy Avery 13329da1f4d8SCathy Avery delay(NMI_DELAY); 13339da1f4d8SCathy Avery 13349da1f4d8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 13359da1f4d8SCathy Avery 13369da1f4d8SCathy Avery while (get_test_stage(test) != 2) 13379da1f4d8SCathy Avery pause(); 13389da1f4d8SCathy Avery 13399da1f4d8SCathy Avery delay(NMI_DELAY); 13409da1f4d8SCathy Avery 13419da1f4d8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 13429da1f4d8SCathy Avery } 13439da1f4d8SCathy Avery 13449da1f4d8SCathy Avery static void nmi_hlt_test(struct svm_test *test) 13459da1f4d8SCathy Avery { 13469da1f4d8SCathy Avery long long start; 13479da1f4d8SCathy Avery 13489da1f4d8SCathy Avery on_cpu_async(1, nmi_message_thread, test); 13499da1f4d8SCathy Avery 13509da1f4d8SCathy Avery start = rdtsc(); 13519da1f4d8SCathy Avery 13529da1f4d8SCathy Avery set_test_stage(test, 1); 13539da1f4d8SCathy Avery 13549da1f4d8SCathy Avery asm volatile ("hlt"); 13559da1f4d8SCathy Avery 1356493d27d4SSean Christopherson report_svm_guest(nmi_fired, test, "direct NMI + hlt"); 1357493d27d4SSean Christopherson report(rdtsc() - start > NMI_DELAY, "direct NMI after expected delay"); 13589da1f4d8SCathy Avery 13599da1f4d8SCathy Avery nmi_fired = false; 13609da1f4d8SCathy Avery 13619da1f4d8SCathy Avery vmmcall(); 13629da1f4d8SCathy Avery 13639da1f4d8SCathy Avery start = rdtsc(); 13649da1f4d8SCathy Avery 13659da1f4d8SCathy Avery set_test_stage(test, 2); 13669da1f4d8SCathy Avery 13679da1f4d8SCathy Avery asm volatile ("hlt"); 13689da1f4d8SCathy Avery 1369493d27d4SSean Christopherson report_svm_guest(nmi_fired, test, "intercepted NMI + hlt"); 1370493d27d4SSean Christopherson report(rdtsc() - start > NMI_DELAY, "intercepted NMI after expected delay"); 13719da1f4d8SCathy Avery 13729da1f4d8SCathy Avery set_test_stage(test, 3); 13739da1f4d8SCathy Avery } 13749da1f4d8SCathy Avery 13759da1f4d8SCathy Avery static bool nmi_hlt_finished(struct svm_test *test) 13769da1f4d8SCathy Avery { 13779da1f4d8SCathy Avery switch (get_test_stage(test)) { 13789da1f4d8SCathy Avery case 1: 13799da1f4d8SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1380198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 13819da1f4d8SCathy Avery vmcb->control.exit_code); 13829da1f4d8SCathy Avery return true; 13839da1f4d8SCathy Avery } 13849da1f4d8SCathy Avery vmcb->save.rip += 3; 13859da1f4d8SCathy Avery 13869da1f4d8SCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 13879da1f4d8SCathy Avery break; 13889da1f4d8SCathy Avery 13899da1f4d8SCathy Avery case 2: 13909da1f4d8SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1391198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x", 13929da1f4d8SCathy Avery vmcb->control.exit_code); 13939da1f4d8SCathy Avery return true; 13949da1f4d8SCathy Avery } 13959da1f4d8SCathy Avery 13965c3582f0SJanis Schoetterl-Glausch report_pass("NMI intercept while running guest"); 13979da1f4d8SCathy Avery break; 13989da1f4d8SCathy Avery 13999da1f4d8SCathy Avery case 3: 14009da1f4d8SCathy Avery break; 14019da1f4d8SCathy Avery 14029da1f4d8SCathy Avery default: 14039da1f4d8SCathy Avery return true; 14049da1f4d8SCathy Avery } 14059da1f4d8SCathy Avery 14069da1f4d8SCathy Avery return get_test_stage(test) == 3; 14079da1f4d8SCathy Avery } 14089da1f4d8SCathy Avery 14099da1f4d8SCathy Avery static bool nmi_hlt_check(struct svm_test *test) 14109da1f4d8SCathy Avery { 14119da1f4d8SCathy Avery return get_test_stage(test) == 3; 14129da1f4d8SCathy Avery } 14139da1f4d8SCathy Avery 141408200397SSantosh Shukla static void vnmi_prepare(struct svm_test *test) 141508200397SSantosh Shukla { 141608200397SSantosh Shukla nmi_prepare(test); 141708200397SSantosh Shukla 141808200397SSantosh Shukla /* 141908200397SSantosh Shukla * Disable NMI interception to start. Enabling vNMI without 142008200397SSantosh Shukla * intercepting "real" NMIs should result in an ERR VM-Exit. 142108200397SSantosh Shukla */ 142208200397SSantosh Shukla vmcb->control.intercept &= ~(1ULL << INTERCEPT_NMI); 142308200397SSantosh Shukla vmcb->control.int_ctl = V_NMI_ENABLE_MASK; 142408200397SSantosh Shukla vmcb->control.int_vector = NMI_VECTOR; 142508200397SSantosh Shukla } 142608200397SSantosh Shukla 142708200397SSantosh Shukla static void vnmi_test(struct svm_test *test) 142808200397SSantosh Shukla { 142908200397SSantosh Shukla report_svm_guest(!nmi_fired, test, "No vNMI before injection"); 143008200397SSantosh Shukla vmmcall(); 143108200397SSantosh Shukla 143208200397SSantosh Shukla report_svm_guest(nmi_fired, test, "vNMI delivered after injection"); 143308200397SSantosh Shukla vmmcall(); 143408200397SSantosh Shukla } 143508200397SSantosh Shukla 143608200397SSantosh Shukla static bool vnmi_finished(struct svm_test *test) 143708200397SSantosh Shukla { 143808200397SSantosh Shukla switch (get_test_stage(test)) { 143908200397SSantosh Shukla case 0: 144008200397SSantosh Shukla if (vmcb->control.exit_code != SVM_EXIT_ERR) { 144108200397SSantosh Shukla report_fail("Wanted ERR VM-Exit, got 0x%x", 144208200397SSantosh Shukla vmcb->control.exit_code); 144308200397SSantosh Shukla return true; 144408200397SSantosh Shukla } 144508200397SSantosh Shukla report(!nmi_fired, "vNMI enabled but NMI_INTERCEPT unset!"); 144608200397SSantosh Shukla vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 144708200397SSantosh Shukla vmcb->save.rip += 3; 144808200397SSantosh Shukla break; 144908200397SSantosh Shukla 145008200397SSantosh Shukla case 1: 145108200397SSantosh Shukla if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 145208200397SSantosh Shukla report_fail("Wanted VMMCALL VM-Exit, got 0x%x", 145308200397SSantosh Shukla vmcb->control.exit_code); 145408200397SSantosh Shukla return true; 145508200397SSantosh Shukla } 145608200397SSantosh Shukla report(!nmi_fired, "vNMI with vector 2 not injected"); 145708200397SSantosh Shukla vmcb->control.int_ctl |= V_NMI_PENDING_MASK; 145808200397SSantosh Shukla vmcb->save.rip += 3; 145908200397SSantosh Shukla break; 146008200397SSantosh Shukla 146108200397SSantosh Shukla case 2: 146208200397SSantosh Shukla if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 146308200397SSantosh Shukla report_fail("Wanted VMMCALL VM-Exit, got 0x%x", 146408200397SSantosh Shukla vmcb->control.exit_code); 146508200397SSantosh Shukla return true; 146608200397SSantosh Shukla } 146708200397SSantosh Shukla if (vmcb->control.int_ctl & V_NMI_BLOCKING_MASK) { 146808200397SSantosh Shukla report_fail("V_NMI_BLOCKING_MASK not cleared on VMEXIT"); 146908200397SSantosh Shukla return true; 147008200397SSantosh Shukla } 147108200397SSantosh Shukla report_pass("VNMI serviced"); 147208200397SSantosh Shukla vmcb->save.rip += 3; 147308200397SSantosh Shukla break; 147408200397SSantosh Shukla 147508200397SSantosh Shukla default: 147608200397SSantosh Shukla return true; 147708200397SSantosh Shukla } 147808200397SSantosh Shukla 147908200397SSantosh Shukla inc_test_stage(test); 148008200397SSantosh Shukla 148108200397SSantosh Shukla return get_test_stage(test) == 3; 148208200397SSantosh Shukla } 148308200397SSantosh Shukla 148408200397SSantosh Shukla static bool vnmi_check(struct svm_test *test) 148508200397SSantosh Shukla { 148608200397SSantosh Shukla return get_test_stage(test) == 3; 148708200397SSantosh Shukla } 148808200397SSantosh Shukla 14894b4fb247SPaolo Bonzini static volatile int count_exc = 0; 14904b4fb247SPaolo Bonzini 14914b4fb247SPaolo Bonzini static void my_isr(struct ex_regs *r) 14924b4fb247SPaolo Bonzini { 14934b4fb247SPaolo Bonzini count_exc++; 14944b4fb247SPaolo Bonzini } 14954b4fb247SPaolo Bonzini 14964b4fb247SPaolo Bonzini static void exc_inject_prepare(struct svm_test *test) 14974b4fb247SPaolo Bonzini { 14988634a266SPaolo Bonzini default_prepare(test); 14994b4fb247SPaolo Bonzini handle_exception(DE_VECTOR, my_isr); 15004b4fb247SPaolo Bonzini handle_exception(NMI_VECTOR, my_isr); 15014b4fb247SPaolo Bonzini } 15024b4fb247SPaolo Bonzini 15034b4fb247SPaolo Bonzini 15044b4fb247SPaolo Bonzini static void exc_inject_test(struct svm_test *test) 15054b4fb247SPaolo Bonzini { 15064b4fb247SPaolo Bonzini asm volatile ("vmmcall\n\tvmmcall\n\t"); 15074b4fb247SPaolo Bonzini } 15084b4fb247SPaolo Bonzini 15094b4fb247SPaolo Bonzini static bool exc_inject_finished(struct svm_test *test) 15104b4fb247SPaolo Bonzini { 15114b4fb247SPaolo Bonzini switch (get_test_stage(test)) { 15124b4fb247SPaolo Bonzini case 0: 15134b4fb247SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1514198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 15154b4fb247SPaolo Bonzini vmcb->control.exit_code); 15164b4fb247SPaolo Bonzini return true; 15174b4fb247SPaolo Bonzini } 15182c1ca866SNadav Amit vmcb->save.rip += 3; 15194b4fb247SPaolo Bonzini vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 15204b4fb247SPaolo Bonzini break; 15214b4fb247SPaolo Bonzini 15224b4fb247SPaolo Bonzini case 1: 15234b4fb247SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_ERR) { 1524198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to error. Exit reason 0x%x", 15254b4fb247SPaolo Bonzini vmcb->control.exit_code); 15264b4fb247SPaolo Bonzini return true; 15274b4fb247SPaolo Bonzini } 15284b4fb247SPaolo Bonzini report(count_exc == 0, "exception with vector 2 not injected"); 15294b4fb247SPaolo Bonzini vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 15304b4fb247SPaolo Bonzini break; 15314b4fb247SPaolo Bonzini 15324b4fb247SPaolo Bonzini case 2: 15334b4fb247SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1534198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 15354b4fb247SPaolo Bonzini vmcb->control.exit_code); 15364b4fb247SPaolo Bonzini return true; 15374b4fb247SPaolo Bonzini } 15382c1ca866SNadav Amit vmcb->save.rip += 3; 15394b4fb247SPaolo Bonzini report(count_exc == 1, "divide overflow exception injected"); 15404b4fb247SPaolo Bonzini report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared"); 15414b4fb247SPaolo Bonzini break; 15424b4fb247SPaolo Bonzini 15434b4fb247SPaolo Bonzini default: 15444b4fb247SPaolo Bonzini return true; 15454b4fb247SPaolo Bonzini } 15464b4fb247SPaolo Bonzini 15474b4fb247SPaolo Bonzini inc_test_stage(test); 15484b4fb247SPaolo Bonzini 15494b4fb247SPaolo Bonzini return get_test_stage(test) == 3; 15504b4fb247SPaolo Bonzini } 15514b4fb247SPaolo Bonzini 15524b4fb247SPaolo Bonzini static bool exc_inject_check(struct svm_test *test) 15534b4fb247SPaolo Bonzini { 15544b4fb247SPaolo Bonzini return count_exc == 1 && get_test_stage(test) == 3; 15554b4fb247SPaolo Bonzini } 15564b4fb247SPaolo Bonzini 15579c838954SCathy Avery static volatile bool virq_fired; 15589c838954SCathy Avery 15599c838954SCathy Avery static void virq_isr(isr_regs_t *regs) 15609c838954SCathy Avery { 15619c838954SCathy Avery virq_fired = true; 15629c838954SCathy Avery } 15639c838954SCathy Avery 15649c838954SCathy Avery static void virq_inject_prepare(struct svm_test *test) 15659c838954SCathy Avery { 15669c838954SCathy Avery handle_irq(0xf1, virq_isr); 15679c838954SCathy Avery default_prepare(test); 15689c838954SCathy Avery vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 15699c838954SCathy Avery (0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority 15709c838954SCathy Avery vmcb->control.int_vector = 0xf1; 15719c838954SCathy Avery virq_fired = false; 15729c838954SCathy Avery set_test_stage(test, 0); 15739c838954SCathy Avery } 15749c838954SCathy Avery 15759c838954SCathy Avery static void virq_inject_test(struct svm_test *test) 15769c838954SCathy Avery { 1577493d27d4SSean Christopherson report_svm_guest(!virq_fired, test, "virtual IRQ blocked after L2 cli"); 15789c838954SCathy Avery 1579e4007e62SMaxim Levitsky sti_nop_cli(); 15809c838954SCathy Avery 1581493d27d4SSean Christopherson report_svm_guest(virq_fired, test, "virtual IRQ fired after L2 sti"); 15829c838954SCathy Avery 15839c838954SCathy Avery vmmcall(); 15849c838954SCathy Avery 1585493d27d4SSean Christopherson report_svm_guest(!virq_fired, test, "intercepted VINTR blocked after L2 cli"); 15869c838954SCathy Avery 1587e4007e62SMaxim Levitsky sti_nop_cli(); 15889c838954SCathy Avery 1589493d27d4SSean Christopherson report_svm_guest(virq_fired, test, "intercepted VINTR fired after L2 sti"); 15909c838954SCathy Avery 15919c838954SCathy Avery vmmcall(); 15929c838954SCathy Avery 1593e4007e62SMaxim Levitsky sti_nop_cli(); 15949c838954SCathy Avery 1595493d27d4SSean Christopherson report_svm_guest(!virq_fired, test, 1596493d27d4SSean Christopherson "virtual IRQ blocked V_IRQ_PRIO less than V_TPR"); 15979c838954SCathy Avery 15989c838954SCathy Avery vmmcall(); 15999c838954SCathy Avery vmmcall(); 16009c838954SCathy Avery } 16019c838954SCathy Avery 16029c838954SCathy Avery static bool virq_inject_finished(struct svm_test *test) 16039c838954SCathy Avery { 16049c838954SCathy Avery vmcb->save.rip += 3; 16059c838954SCathy Avery 16069c838954SCathy Avery switch (get_test_stage(test)) { 16079c838954SCathy Avery case 0: 16089c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1609198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 16109c838954SCathy Avery vmcb->control.exit_code); 16119c838954SCathy Avery return true; 16129c838954SCathy Avery } 16139c838954SCathy Avery if (vmcb->control.int_ctl & V_IRQ_MASK) { 1614198dfd0eSJanis Schoetterl-Glausch report_fail("V_IRQ not cleared on VMEXIT after firing"); 16159c838954SCathy Avery return true; 16169c838954SCathy Avery } 16179c838954SCathy Avery virq_fired = false; 16189c838954SCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 16199c838954SCathy Avery vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 16209c838954SCathy Avery (0x0f << V_INTR_PRIO_SHIFT); 16219c838954SCathy Avery break; 16229c838954SCathy Avery 16239c838954SCathy Avery case 1: 16249c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VINTR) { 1625198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vintr. Exit reason 0x%x", 16269c838954SCathy Avery vmcb->control.exit_code); 16279c838954SCathy Avery return true; 16289c838954SCathy Avery } 16299c838954SCathy Avery if (virq_fired) { 1630198dfd0eSJanis Schoetterl-Glausch report_fail("V_IRQ fired before SVM_EXIT_VINTR"); 16319c838954SCathy Avery return true; 16329c838954SCathy Avery } 16339c838954SCathy Avery vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); 16349c838954SCathy Avery break; 16359c838954SCathy Avery 16369c838954SCathy Avery case 2: 16379c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1638198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 16399c838954SCathy Avery vmcb->control.exit_code); 16409c838954SCathy Avery return true; 16419c838954SCathy Avery } 16429c838954SCathy Avery virq_fired = false; 16439c838954SCathy Avery // Set irq to lower priority 16449c838954SCathy Avery vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 16459c838954SCathy Avery (0x08 << V_INTR_PRIO_SHIFT); 16469c838954SCathy Avery // Raise guest TPR 16479c838954SCathy Avery vmcb->control.int_ctl |= 0x0a & V_TPR_MASK; 16489c838954SCathy Avery break; 16499c838954SCathy Avery 16509c838954SCathy Avery case 3: 16519c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1652198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 16539c838954SCathy Avery vmcb->control.exit_code); 16549c838954SCathy Avery return true; 16559c838954SCathy Avery } 16569c838954SCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 16579c838954SCathy Avery break; 16589c838954SCathy Avery 16599c838954SCathy Avery case 4: 16609c838954SCathy Avery // INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR 16619c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1662198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 16639c838954SCathy Avery vmcb->control.exit_code); 16649c838954SCathy Avery return true; 16659c838954SCathy Avery } 16669c838954SCathy Avery break; 16679c838954SCathy Avery 16689c838954SCathy Avery default: 16699c838954SCathy Avery return true; 16709c838954SCathy Avery } 16719c838954SCathy Avery 16729c838954SCathy Avery inc_test_stage(test); 16739c838954SCathy Avery 16749c838954SCathy Avery return get_test_stage(test) == 5; 16759c838954SCathy Avery } 16769c838954SCathy Avery 16779c838954SCathy Avery static bool virq_inject_check(struct svm_test *test) 16789c838954SCathy Avery { 16799c838954SCathy Avery return get_test_stage(test) == 5; 16809c838954SCathy Avery } 16819c838954SCathy Avery 1682da338a31SMaxim Levitsky /* 1683da338a31SMaxim Levitsky * Detect nested guest RIP corruption as explained in kernel commit 1684da338a31SMaxim Levitsky * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73 1685da338a31SMaxim Levitsky * 1686da338a31SMaxim Levitsky * In the assembly loop below 'ins' is executed while IO instructions 1687da338a31SMaxim Levitsky * are not intercepted; the instruction is emulated by L0. 1688da338a31SMaxim Levitsky * 1689da338a31SMaxim Levitsky * At the same time we are getting interrupts from the local APIC timer, 1690da338a31SMaxim Levitsky * and we do intercept them in L1 1691da338a31SMaxim Levitsky * 1692da338a31SMaxim Levitsky * If the interrupt happens on the insb instruction, L0 will VMexit, emulate 1693da338a31SMaxim Levitsky * the insb instruction and then it will inject the interrupt to L1 through 1694da338a31SMaxim Levitsky * a nested VMexit. Due to a bug, it would leave pre-emulation values of RIP, 1695da338a31SMaxim Levitsky * RAX and RSP in the VMCB. 1696da338a31SMaxim Levitsky * 1697da338a31SMaxim Levitsky * In our intercept handler we detect the bug by checking that RIP is that of 1698da338a31SMaxim Levitsky * the insb instruction, but its memory operand has already been written. 1699da338a31SMaxim Levitsky * This means that insb was already executed. 1700da338a31SMaxim Levitsky */ 1701da338a31SMaxim Levitsky 1702da338a31SMaxim Levitsky static volatile int isr_cnt = 0; 1703da338a31SMaxim Levitsky static volatile uint8_t io_port_var = 0xAA; 1704da338a31SMaxim Levitsky extern const char insb_instruction_label[]; 1705da338a31SMaxim Levitsky 1706da338a31SMaxim Levitsky static void reg_corruption_isr(isr_regs_t *regs) 1707da338a31SMaxim Levitsky { 1708da338a31SMaxim Levitsky isr_cnt++; 1709da338a31SMaxim Levitsky apic_write(APIC_EOI, 0); 1710da338a31SMaxim Levitsky } 1711da338a31SMaxim Levitsky 1712da338a31SMaxim Levitsky static void reg_corruption_prepare(struct svm_test *test) 1713da338a31SMaxim Levitsky { 1714da338a31SMaxim Levitsky default_prepare(test); 1715da338a31SMaxim Levitsky set_test_stage(test, 0); 1716da338a31SMaxim Levitsky 1717da338a31SMaxim Levitsky vmcb->control.int_ctl = V_INTR_MASKING_MASK; 1718da338a31SMaxim Levitsky vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1719da338a31SMaxim Levitsky 1720da338a31SMaxim Levitsky handle_irq(TIMER_VECTOR, reg_corruption_isr); 1721da338a31SMaxim Levitsky 1722da338a31SMaxim Levitsky /* set local APIC to inject external interrupts */ 1723a2c7dff7SMaxim Levitsky apic_setup_timer(TIMER_VECTOR, APIC_LVT_TIMER_PERIODIC); 1724a2c7dff7SMaxim Levitsky apic_start_timer(1000); 1725da338a31SMaxim Levitsky } 1726da338a31SMaxim Levitsky 1727da338a31SMaxim Levitsky static void reg_corruption_test(struct svm_test *test) 1728da338a31SMaxim Levitsky { 1729da338a31SMaxim Levitsky /* this is endless loop, which is interrupted by the timer interrupt */ 1730da338a31SMaxim Levitsky asm volatile ( 1731da338a31SMaxim Levitsky "1:\n\t" 1732da338a31SMaxim Levitsky "movw $0x4d0, %%dx\n\t" // IO port 1733da338a31SMaxim Levitsky "lea %[io_port_var], %%rdi\n\t" 1734da338a31SMaxim Levitsky "movb $0xAA, %[io_port_var]\n\t" 1735da338a31SMaxim Levitsky "insb_instruction_label:\n\t" 1736da338a31SMaxim Levitsky "insb\n\t" 1737da338a31SMaxim Levitsky "jmp 1b\n\t" 1738da338a31SMaxim Levitsky 1739da338a31SMaxim Levitsky : [io_port_var] "=m" (io_port_var) 1740da338a31SMaxim Levitsky : /* no inputs*/ 1741da338a31SMaxim Levitsky : "rdx", "rdi" 1742da338a31SMaxim Levitsky ); 1743da338a31SMaxim Levitsky } 1744da338a31SMaxim Levitsky 1745da338a31SMaxim Levitsky static bool reg_corruption_finished(struct svm_test *test) 1746da338a31SMaxim Levitsky { 1747da338a31SMaxim Levitsky if (isr_cnt == 10000) { 17485c3582f0SJanis Schoetterl-Glausch report_pass("No RIP corruption detected after %d timer interrupts", 1749da338a31SMaxim Levitsky isr_cnt); 1750da338a31SMaxim Levitsky set_test_stage(test, 1); 1751491bbc64SMaxim Levitsky goto cleanup; 1752da338a31SMaxim Levitsky } 1753da338a31SMaxim Levitsky 1754da338a31SMaxim Levitsky if (vmcb->control.exit_code == SVM_EXIT_INTR) { 1755da338a31SMaxim Levitsky 1756da338a31SMaxim Levitsky void* guest_rip = (void*)vmcb->save.rip; 1757da338a31SMaxim Levitsky 1758e4007e62SMaxim Levitsky sti_nop_cli(); 1759da338a31SMaxim Levitsky 1760da338a31SMaxim Levitsky if (guest_rip == insb_instruction_label && io_port_var != 0xAA) { 1761198dfd0eSJanis Schoetterl-Glausch report_fail("RIP corruption detected after %d timer interrupts", 1762da338a31SMaxim Levitsky isr_cnt); 1763491bbc64SMaxim Levitsky goto cleanup; 1764da338a31SMaxim Levitsky } 1765da338a31SMaxim Levitsky 1766da338a31SMaxim Levitsky } 1767da338a31SMaxim Levitsky return false; 1768491bbc64SMaxim Levitsky cleanup: 1769a2c7dff7SMaxim Levitsky apic_cleanup_timer(); 1770491bbc64SMaxim Levitsky return true; 1771491bbc64SMaxim Levitsky 1772da338a31SMaxim Levitsky } 1773da338a31SMaxim Levitsky 1774da338a31SMaxim Levitsky static bool reg_corruption_check(struct svm_test *test) 1775da338a31SMaxim Levitsky { 1776da338a31SMaxim Levitsky return get_test_stage(test) == 1; 1777da338a31SMaxim Levitsky } 1778da338a31SMaxim Levitsky 17794770e9c8SCathy Avery static void get_tss_entry(void *data) 17804770e9c8SCathy Avery { 1781a7f32d87SPaolo Bonzini *((gdt_entry_t **)data) = get_tss_descr(); 17824770e9c8SCathy Avery } 17834770e9c8SCathy Avery 17844770e9c8SCathy Avery static int orig_cpu_count; 17854770e9c8SCathy Avery 17864770e9c8SCathy Avery static void init_startup_prepare(struct svm_test *test) 17874770e9c8SCathy Avery { 1788a7f32d87SPaolo Bonzini gdt_entry_t *tss_entry; 17894770e9c8SCathy Avery int i; 17904770e9c8SCathy Avery 17914770e9c8SCathy Avery on_cpu(1, get_tss_entry, &tss_entry); 17924770e9c8SCathy Avery 1793d36b378fSVarad Gautam orig_cpu_count = atomic_read(&cpu_online_count); 17944770e9c8SCathy Avery 17954770e9c8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 17964770e9c8SCathy Avery id_map[1]); 17974770e9c8SCathy Avery 17984770e9c8SCathy Avery delay(100000000ULL); 17994770e9c8SCathy Avery 1800d36b378fSVarad Gautam atomic_dec(&cpu_online_count); 18014770e9c8SCathy Avery 1802a7f32d87SPaolo Bonzini tss_entry->type &= ~DESC_BUSY; 18034770e9c8SCathy Avery 18044770e9c8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_STARTUP, id_map[1]); 18054770e9c8SCathy Avery 1806d36b378fSVarad Gautam for (i = 0; i < 5 && atomic_read(&cpu_online_count) < orig_cpu_count; i++) 18074770e9c8SCathy Avery delay(100000000ULL); 18084770e9c8SCathy Avery } 18094770e9c8SCathy Avery 18104770e9c8SCathy Avery static bool init_startup_finished(struct svm_test *test) 18114770e9c8SCathy Avery { 18124770e9c8SCathy Avery return true; 18134770e9c8SCathy Avery } 18144770e9c8SCathy Avery 18154770e9c8SCathy Avery static bool init_startup_check(struct svm_test *test) 18164770e9c8SCathy Avery { 1817d36b378fSVarad Gautam return atomic_read(&cpu_online_count) == orig_cpu_count; 18184770e9c8SCathy Avery } 18194770e9c8SCathy Avery 1820d5da6dfeSCathy Avery static volatile bool init_intercept; 1821d5da6dfeSCathy Avery 1822d5da6dfeSCathy Avery static void init_intercept_prepare(struct svm_test *test) 1823d5da6dfeSCathy Avery { 1824d5da6dfeSCathy Avery init_intercept = false; 1825d5da6dfeSCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_INIT); 1826d5da6dfeSCathy Avery } 1827d5da6dfeSCathy Avery 1828d5da6dfeSCathy Avery static void init_intercept_test(struct svm_test *test) 1829d5da6dfeSCathy Avery { 1830d5da6dfeSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 0); 1831d5da6dfeSCathy Avery } 1832d5da6dfeSCathy Avery 1833d5da6dfeSCathy Avery static bool init_intercept_finished(struct svm_test *test) 1834d5da6dfeSCathy Avery { 1835d5da6dfeSCathy Avery vmcb->save.rip += 3; 1836d5da6dfeSCathy Avery 1837d5da6dfeSCathy Avery if (vmcb->control.exit_code != SVM_EXIT_INIT) { 1838198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to init intercept. Exit reason 0x%x", 1839d5da6dfeSCathy Avery vmcb->control.exit_code); 1840d5da6dfeSCathy Avery 1841d5da6dfeSCathy Avery return true; 1842d5da6dfeSCathy Avery } 1843d5da6dfeSCathy Avery 1844d5da6dfeSCathy Avery init_intercept = true; 1845d5da6dfeSCathy Avery 18465c3582f0SJanis Schoetterl-Glausch report_pass("INIT to vcpu intercepted"); 1847d5da6dfeSCathy Avery 1848d5da6dfeSCathy Avery return true; 1849d5da6dfeSCathy Avery } 1850d5da6dfeSCathy Avery 1851d5da6dfeSCathy Avery static bool init_intercept_check(struct svm_test *test) 1852d5da6dfeSCathy Avery { 1853d5da6dfeSCathy Avery return init_intercept; 1854d5da6dfeSCathy Avery } 1855d5da6dfeSCathy Avery 18567839b0ecSKrish Sadhukhan /* 18577839b0ecSKrish Sadhukhan * Setting host EFLAGS.TF causes a #DB trap after the VMRUN completes on the 18587839b0ecSKrish Sadhukhan * host side (i.e., after the #VMEXIT from the guest). 18597839b0ecSKrish Sadhukhan * 18600689a980SKrish Sadhukhan * Setting host EFLAGS.RF suppresses any potential instruction breakpoint 18610689a980SKrish Sadhukhan * match on the VMRUN and completion of the VMRUN instruction clears the 18620689a980SKrish Sadhukhan * host EFLAGS.RF bit. 18630689a980SKrish Sadhukhan * 18647839b0ecSKrish Sadhukhan * [AMD APM] 18657839b0ecSKrish Sadhukhan */ 18667839b0ecSKrish Sadhukhan static volatile u8 host_rflags_guest_main_flag = 0; 18677839b0ecSKrish Sadhukhan static volatile u8 host_rflags_db_handler_flag = 0; 18687839b0ecSKrish Sadhukhan static volatile bool host_rflags_ss_on_vmrun = false; 18697839b0ecSKrish Sadhukhan static volatile bool host_rflags_vmrun_reached = false; 18707839b0ecSKrish Sadhukhan static volatile bool host_rflags_set_tf = false; 18710689a980SKrish Sadhukhan static volatile bool host_rflags_set_rf = false; 18720689a980SKrish Sadhukhan static u64 rip_detected; 18737839b0ecSKrish Sadhukhan 18747839b0ecSKrish Sadhukhan extern u64 *vmrun_rip; 18757839b0ecSKrish Sadhukhan 18767839b0ecSKrish Sadhukhan static void host_rflags_db_handler(struct ex_regs *r) 18777839b0ecSKrish Sadhukhan { 18787839b0ecSKrish Sadhukhan if (host_rflags_ss_on_vmrun) { 18797839b0ecSKrish Sadhukhan if (host_rflags_vmrun_reached) { 18800689a980SKrish Sadhukhan if (!host_rflags_set_rf) { 18817839b0ecSKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 18820689a980SKrish Sadhukhan rip_detected = r->rip; 18837839b0ecSKrish Sadhukhan } else { 18840689a980SKrish Sadhukhan r->rflags |= X86_EFLAGS_RF; 18850689a980SKrish Sadhukhan ++host_rflags_db_handler_flag; 18860689a980SKrish Sadhukhan } 18870689a980SKrish Sadhukhan } else { 18880689a980SKrish Sadhukhan if (r->rip == (u64)&vmrun_rip) { 18897839b0ecSKrish Sadhukhan host_rflags_vmrun_reached = true; 18900689a980SKrish Sadhukhan 18910689a980SKrish Sadhukhan if (host_rflags_set_rf) { 18920689a980SKrish Sadhukhan host_rflags_guest_main_flag = 0; 18930689a980SKrish Sadhukhan rip_detected = r->rip; 18940689a980SKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 18950689a980SKrish Sadhukhan 18960689a980SKrish Sadhukhan /* Trigger #DB via debug registers */ 18970689a980SKrish Sadhukhan write_dr0((void *)&vmrun_rip); 18980689a980SKrish Sadhukhan write_dr7(0x403); 18990689a980SKrish Sadhukhan } 19000689a980SKrish Sadhukhan } 19017839b0ecSKrish Sadhukhan } 19027839b0ecSKrish Sadhukhan } else { 19037839b0ecSKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 19047839b0ecSKrish Sadhukhan } 19057839b0ecSKrish Sadhukhan } 19067839b0ecSKrish Sadhukhan 19077839b0ecSKrish Sadhukhan static void host_rflags_prepare(struct svm_test *test) 19087839b0ecSKrish Sadhukhan { 19097839b0ecSKrish Sadhukhan default_prepare(test); 19107839b0ecSKrish Sadhukhan handle_exception(DB_VECTOR, host_rflags_db_handler); 19117839b0ecSKrish Sadhukhan set_test_stage(test, 0); 19127839b0ecSKrish Sadhukhan } 19137839b0ecSKrish Sadhukhan 19147839b0ecSKrish Sadhukhan static void host_rflags_prepare_gif_clear(struct svm_test *test) 19157839b0ecSKrish Sadhukhan { 19167839b0ecSKrish Sadhukhan if (host_rflags_set_tf) 19177839b0ecSKrish Sadhukhan write_rflags(read_rflags() | X86_EFLAGS_TF); 19187839b0ecSKrish Sadhukhan } 19197839b0ecSKrish Sadhukhan 19207839b0ecSKrish Sadhukhan static void host_rflags_test(struct svm_test *test) 19217839b0ecSKrish Sadhukhan { 19227839b0ecSKrish Sadhukhan while (1) { 19230689a980SKrish Sadhukhan if (get_test_stage(test) > 0) { 19240689a980SKrish Sadhukhan if ((host_rflags_set_tf && !host_rflags_ss_on_vmrun && !host_rflags_db_handler_flag) || 19250689a980SKrish Sadhukhan (host_rflags_set_rf && host_rflags_db_handler_flag == 1)) 19267839b0ecSKrish Sadhukhan host_rflags_guest_main_flag = 1; 19270689a980SKrish Sadhukhan } 19280689a980SKrish Sadhukhan 19290689a980SKrish Sadhukhan if (get_test_stage(test) == 4) 19307839b0ecSKrish Sadhukhan break; 19317839b0ecSKrish Sadhukhan vmmcall(); 19327839b0ecSKrish Sadhukhan } 19337839b0ecSKrish Sadhukhan } 19347839b0ecSKrish Sadhukhan 19357839b0ecSKrish Sadhukhan static bool host_rflags_finished(struct svm_test *test) 19367839b0ecSKrish Sadhukhan { 19377839b0ecSKrish Sadhukhan switch (get_test_stage(test)) { 19387839b0ecSKrish Sadhukhan case 0: 19397839b0ecSKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1940198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT. Exit reason 0x%x", 19417839b0ecSKrish Sadhukhan vmcb->control.exit_code); 19427839b0ecSKrish Sadhukhan return true; 19437839b0ecSKrish Sadhukhan } 19447839b0ecSKrish Sadhukhan vmcb->save.rip += 3; 19457839b0ecSKrish Sadhukhan /* 19467839b0ecSKrish Sadhukhan * Setting host EFLAGS.TF not immediately before VMRUN, causes 19477839b0ecSKrish Sadhukhan * #DB trap before first guest instruction is executed 19487839b0ecSKrish Sadhukhan */ 19497839b0ecSKrish Sadhukhan host_rflags_set_tf = true; 19507839b0ecSKrish Sadhukhan break; 19517839b0ecSKrish Sadhukhan case 1: 19527839b0ecSKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 19530689a980SKrish Sadhukhan host_rflags_guest_main_flag != 1) { 1954198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT or #DB handler" 19557839b0ecSKrish Sadhukhan " invoked before guest main. Exit reason 0x%x", 19567839b0ecSKrish Sadhukhan vmcb->control.exit_code); 19577839b0ecSKrish Sadhukhan return true; 19587839b0ecSKrish Sadhukhan } 19597839b0ecSKrish Sadhukhan vmcb->save.rip += 3; 19607839b0ecSKrish Sadhukhan /* 19617839b0ecSKrish Sadhukhan * Setting host EFLAGS.TF immediately before VMRUN, causes #DB 19627839b0ecSKrish Sadhukhan * trap after VMRUN completes on the host side (i.e., after 19637839b0ecSKrish Sadhukhan * VMEXIT from guest). 19647839b0ecSKrish Sadhukhan */ 19657839b0ecSKrish Sadhukhan host_rflags_ss_on_vmrun = true; 19667839b0ecSKrish Sadhukhan break; 19677839b0ecSKrish Sadhukhan case 2: 19687839b0ecSKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 19690c22fd44SPaolo Bonzini rip_detected != (u64)&vmrun_rip + 3) { 1970198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT or RIP mismatch." 19710689a980SKrish Sadhukhan " Exit reason 0x%x, RIP actual: %lx, RIP expected: " 19720689a980SKrish Sadhukhan "%lx", vmcb->control.exit_code, 19730c22fd44SPaolo Bonzini (u64)&vmrun_rip + 3, rip_detected); 19740689a980SKrish Sadhukhan return true; 19750689a980SKrish Sadhukhan } 19760689a980SKrish Sadhukhan host_rflags_set_rf = true; 19770689a980SKrish Sadhukhan host_rflags_guest_main_flag = 0; 19780689a980SKrish Sadhukhan host_rflags_vmrun_reached = false; 19790689a980SKrish Sadhukhan vmcb->save.rip += 3; 19800689a980SKrish Sadhukhan break; 19810689a980SKrish Sadhukhan case 3: 19820689a980SKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 19830689a980SKrish Sadhukhan rip_detected != (u64)&vmrun_rip || 19840689a980SKrish Sadhukhan host_rflags_guest_main_flag != 1 || 19850689a980SKrish Sadhukhan host_rflags_db_handler_flag > 1 || 19860689a980SKrish Sadhukhan read_rflags() & X86_EFLAGS_RF) { 1987198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT or RIP mismatch or " 19880689a980SKrish Sadhukhan "EFLAGS.RF not cleared." 19890689a980SKrish Sadhukhan " Exit reason 0x%x, RIP actual: %lx, RIP expected: " 19900689a980SKrish Sadhukhan "%lx", vmcb->control.exit_code, 19910689a980SKrish Sadhukhan (u64)&vmrun_rip, rip_detected); 19927839b0ecSKrish Sadhukhan return true; 19937839b0ecSKrish Sadhukhan } 19947839b0ecSKrish Sadhukhan host_rflags_set_tf = false; 19950689a980SKrish Sadhukhan host_rflags_set_rf = false; 19967839b0ecSKrish Sadhukhan vmcb->save.rip += 3; 19977839b0ecSKrish Sadhukhan break; 19987839b0ecSKrish Sadhukhan default: 19997839b0ecSKrish Sadhukhan return true; 20007839b0ecSKrish Sadhukhan } 20017839b0ecSKrish Sadhukhan inc_test_stage(test); 20020689a980SKrish Sadhukhan return get_test_stage(test) == 5; 20037839b0ecSKrish Sadhukhan } 20047839b0ecSKrish Sadhukhan 20057839b0ecSKrish Sadhukhan static bool host_rflags_check(struct svm_test *test) 20067839b0ecSKrish Sadhukhan { 20070689a980SKrish Sadhukhan return get_test_stage(test) == 4; 20087839b0ecSKrish Sadhukhan } 20097839b0ecSKrish Sadhukhan 20108660d1b5SKrish Sadhukhan #define TEST(name) { #name, .v2 = name } 20118660d1b5SKrish Sadhukhan 2012ba29942cSKrish Sadhukhan /* 2013ba29942cSKrish Sadhukhan * v2 tests 2014ba29942cSKrish Sadhukhan */ 2015ba29942cSKrish Sadhukhan 2016f32183f5SJim Mattson /* 2017f32183f5SJim Mattson * Ensure that kvm recalculates the L1 guest's CPUID.01H:ECX.OSXSAVE 2018f32183f5SJim Mattson * after VM-exit from an L2 guest that sets CR4.OSXSAVE to a different 2019f32183f5SJim Mattson * value than in L1. 2020f32183f5SJim Mattson */ 2021f32183f5SJim Mattson 2022f32183f5SJim Mattson static void svm_cr4_osxsave_test_guest(struct svm_test *test) 2023f32183f5SJim Mattson { 2024f32183f5SJim Mattson write_cr4(read_cr4() & ~X86_CR4_OSXSAVE); 2025f32183f5SJim Mattson } 2026f32183f5SJim Mattson 2027f32183f5SJim Mattson static void svm_cr4_osxsave_test(void) 2028f32183f5SJim Mattson { 2029f32183f5SJim Mattson if (!this_cpu_has(X86_FEATURE_XSAVE)) { 2030f32183f5SJim Mattson report_skip("XSAVE not detected"); 2031f32183f5SJim Mattson return; 2032f32183f5SJim Mattson } 2033f32183f5SJim Mattson 2034f32183f5SJim Mattson if (!(read_cr4() & X86_CR4_OSXSAVE)) { 2035f32183f5SJim Mattson unsigned long cr4 = read_cr4() | X86_CR4_OSXSAVE; 2036f32183f5SJim Mattson 2037f32183f5SJim Mattson write_cr4(cr4); 2038f32183f5SJim Mattson vmcb->save.cr4 = cr4; 2039f32183f5SJim Mattson } 2040f32183f5SJim Mattson 2041816c0359SSean Christopherson report(this_cpu_has(X86_FEATURE_OSXSAVE), "CPUID.01H:ECX.XSAVE set before VMRUN"); 2042f32183f5SJim Mattson 2043f32183f5SJim Mattson test_set_guest(svm_cr4_osxsave_test_guest); 2044f32183f5SJim Mattson report(svm_vmrun() == SVM_EXIT_VMMCALL, 2045f32183f5SJim Mattson "svm_cr4_osxsave_test_guest finished with VMMCALL"); 2046f32183f5SJim Mattson 2047816c0359SSean Christopherson report(this_cpu_has(X86_FEATURE_OSXSAVE), "CPUID.01H:ECX.XSAVE set after VMRUN"); 2048f32183f5SJim Mattson } 2049f32183f5SJim Mattson 2050ba29942cSKrish Sadhukhan static void basic_guest_main(struct svm_test *test) 2051ba29942cSKrish Sadhukhan { 2052ba29942cSKrish Sadhukhan } 2053ba29942cSKrish Sadhukhan 2054eae10e8fSKrish Sadhukhan 2055eae10e8fSKrish Sadhukhan #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val, \ 2056eae10e8fSKrish Sadhukhan resv_mask) \ 2057eae10e8fSKrish Sadhukhan { \ 2058eae10e8fSKrish Sadhukhan u64 tmp, mask; \ 2059eae10e8fSKrish Sadhukhan int i; \ 2060eae10e8fSKrish Sadhukhan \ 2061eae10e8fSKrish Sadhukhan for (i = start; i <= end; i = i + inc) { \ 2062eae10e8fSKrish Sadhukhan mask = 1ull << i; \ 2063eae10e8fSKrish Sadhukhan if (!(mask & resv_mask)) \ 2064eae10e8fSKrish Sadhukhan continue; \ 2065eae10e8fSKrish Sadhukhan tmp = val | mask; \ 2066eae10e8fSKrish Sadhukhan reg = tmp; \ 2067eae10e8fSKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "Test %s %d:%d: %lx", \ 2068eae10e8fSKrish Sadhukhan str_name, end, start, tmp); \ 2069eae10e8fSKrish Sadhukhan } \ 2070eae10e8fSKrish Sadhukhan } 2071eae10e8fSKrish Sadhukhan 20726d0ecbf6SKrish Sadhukhan #define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask, \ 2073cb6524f3SPaolo Bonzini exit_code, test_name) \ 2074a79c9495SKrish Sadhukhan { \ 2075a79c9495SKrish Sadhukhan u64 tmp, mask; \ 20768ae6d77fSSean Christopherson u32 r; \ 2077a79c9495SKrish Sadhukhan int i; \ 2078a79c9495SKrish Sadhukhan \ 2079a79c9495SKrish Sadhukhan for (i = start; i <= end; i = i + inc) { \ 2080a79c9495SKrish Sadhukhan mask = 1ull << i; \ 2081a79c9495SKrish Sadhukhan if (!(mask & resv_mask)) \ 2082a79c9495SKrish Sadhukhan continue; \ 2083a79c9495SKrish Sadhukhan tmp = val | mask; \ 2084a79c9495SKrish Sadhukhan switch (cr) { \ 2085a79c9495SKrish Sadhukhan case 0: \ 2086a79c9495SKrish Sadhukhan vmcb->save.cr0 = tmp; \ 2087a79c9495SKrish Sadhukhan break; \ 2088a79c9495SKrish Sadhukhan case 3: \ 2089a79c9495SKrish Sadhukhan vmcb->save.cr3 = tmp; \ 2090a79c9495SKrish Sadhukhan break; \ 2091a79c9495SKrish Sadhukhan case 4: \ 2092a79c9495SKrish Sadhukhan vmcb->save.cr4 = tmp; \ 2093a79c9495SKrish Sadhukhan } \ 20948ae6d77fSSean Christopherson r = svm_vmrun(); \ 20958ae6d77fSSean Christopherson report(r == exit_code, "Test CR%d %s%d:%d: %lx, wanted exit 0x%x, got 0x%x", \ 20968ae6d77fSSean Christopherson cr, test_name, end, start, tmp, exit_code, r); \ 2097a79c9495SKrish Sadhukhan } \ 2098a79c9495SKrish Sadhukhan } 2099e8d7a8f6SKrish Sadhukhan 2100a79c9495SKrish Sadhukhan static void test_efer(void) 2101a79c9495SKrish Sadhukhan { 2102e8d7a8f6SKrish Sadhukhan /* 2103e8d7a8f6SKrish Sadhukhan * Un-setting EFER.SVME is illegal 2104e8d7a8f6SKrish Sadhukhan */ 2105ba29942cSKrish Sadhukhan u64 efer_saved = vmcb->save.efer; 2106ba29942cSKrish Sadhukhan u64 efer = efer_saved; 2107ba29942cSKrish Sadhukhan 2108ba29942cSKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer); 2109ba29942cSKrish Sadhukhan efer &= ~EFER_SVME; 2110ba29942cSKrish Sadhukhan vmcb->save.efer = efer; 2111ba29942cSKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer); 2112ba29942cSKrish Sadhukhan vmcb->save.efer = efer_saved; 2113e8d7a8f6SKrish Sadhukhan 2114e8d7a8f6SKrish Sadhukhan /* 2115a79c9495SKrish Sadhukhan * EFER MBZ bits: 63:16, 9 2116a79c9495SKrish Sadhukhan */ 2117a79c9495SKrish Sadhukhan efer_saved = vmcb->save.efer; 2118a79c9495SKrish Sadhukhan 2119a79c9495SKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(8, 9, 1, "EFER", vmcb->save.efer, 2120a79c9495SKrish Sadhukhan efer_saved, SVM_EFER_RESERVED_MASK); 2121a79c9495SKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(16, 63, 4, "EFER", vmcb->save.efer, 2122a79c9495SKrish Sadhukhan efer_saved, SVM_EFER_RESERVED_MASK); 2123a79c9495SKrish Sadhukhan 21241d7bde08SKrish Sadhukhan /* 21251d7bde08SKrish Sadhukhan * EFER.LME and CR0.PG are both set and CR4.PAE is zero. 21261d7bde08SKrish Sadhukhan */ 21271d7bde08SKrish Sadhukhan u64 cr0_saved = vmcb->save.cr0; 21281d7bde08SKrish Sadhukhan u64 cr0; 21291d7bde08SKrish Sadhukhan u64 cr4_saved = vmcb->save.cr4; 21301d7bde08SKrish Sadhukhan u64 cr4; 21311d7bde08SKrish Sadhukhan 21321d7bde08SKrish Sadhukhan efer = efer_saved | EFER_LME; 21331d7bde08SKrish Sadhukhan vmcb->save.efer = efer; 21341d7bde08SKrish Sadhukhan cr0 = cr0_saved | X86_CR0_PG | X86_CR0_PE; 21351d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0; 21361d7bde08SKrish Sadhukhan cr4 = cr4_saved & ~X86_CR4_PAE; 21371d7bde08SKrish Sadhukhan vmcb->save.cr4 = cr4; 21381d7bde08SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 21391d7bde08SKrish Sadhukhan "CR0.PG=1 (%lx) and CR4.PAE=0 (%lx)", efer, cr0, cr4); 21401d7bde08SKrish Sadhukhan 21411d7bde08SKrish Sadhukhan /* 21421d7bde08SKrish Sadhukhan * EFER.LME and CR0.PG are both set and CR0.PE is zero. 2143fc050452SLara Lazier * CR4.PAE needs to be set as we otherwise cannot 2144fc050452SLara Lazier * determine if CR4.PAE=0 or CR0.PE=0 triggered the 2145fc050452SLara Lazier * SVM_EXIT_ERR. 21461d7bde08SKrish Sadhukhan */ 2147fc050452SLara Lazier cr4 = cr4_saved | X86_CR4_PAE; 2148fc050452SLara Lazier vmcb->save.cr4 = cr4; 21491d7bde08SKrish Sadhukhan cr0 &= ~X86_CR0_PE; 21501d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0; 21511d7bde08SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 21521d7bde08SKrish Sadhukhan "CR0.PG=1 and CR0.PE=0 (%lx)", efer, cr0); 21531d7bde08SKrish Sadhukhan 21541d7bde08SKrish Sadhukhan /* 21551d7bde08SKrish Sadhukhan * EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero. 21561d7bde08SKrish Sadhukhan */ 21571d7bde08SKrish Sadhukhan u32 cs_attrib_saved = vmcb->save.cs.attrib; 21581d7bde08SKrish Sadhukhan u32 cs_attrib; 21591d7bde08SKrish Sadhukhan 21601d7bde08SKrish Sadhukhan cr0 |= X86_CR0_PE; 21611d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0; 21621d7bde08SKrish Sadhukhan cs_attrib = cs_attrib_saved | SVM_SELECTOR_L_MASK | 21631d7bde08SKrish Sadhukhan SVM_SELECTOR_DB_MASK; 21641d7bde08SKrish Sadhukhan vmcb->save.cs.attrib = cs_attrib; 21651d7bde08SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 21661d7bde08SKrish Sadhukhan "CR0.PG=1 (%lx), CR4.PAE=1 (%lx), CS.L=1 and CS.D=1 (%x)", 21671d7bde08SKrish Sadhukhan efer, cr0, cr4, cs_attrib); 21681d7bde08SKrish Sadhukhan 21691d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0_saved; 21701d7bde08SKrish Sadhukhan vmcb->save.cr4 = cr4_saved; 2171a79c9495SKrish Sadhukhan vmcb->save.efer = efer_saved; 21721d7bde08SKrish Sadhukhan vmcb->save.cs.attrib = cs_attrib_saved; 2173a79c9495SKrish Sadhukhan } 2174a79c9495SKrish Sadhukhan 2175a79c9495SKrish Sadhukhan static void test_cr0(void) 2176a79c9495SKrish Sadhukhan { 2177a79c9495SKrish Sadhukhan /* 2178e8d7a8f6SKrish Sadhukhan * Un-setting CR0.CD and setting CR0.NW is illegal combination 2179e8d7a8f6SKrish Sadhukhan */ 2180e8d7a8f6SKrish Sadhukhan u64 cr0_saved = vmcb->save.cr0; 2181e8d7a8f6SKrish Sadhukhan u64 cr0 = cr0_saved; 2182e8d7a8f6SKrish Sadhukhan 2183e8d7a8f6SKrish Sadhukhan cr0 |= X86_CR0_CD; 2184e8d7a8f6SKrish Sadhukhan cr0 &= ~X86_CR0_NW; 2185e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2186a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx", 2187a79c9495SKrish Sadhukhan cr0); 2188e8d7a8f6SKrish Sadhukhan cr0 |= X86_CR0_NW; 2189e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2190a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx", 2191a79c9495SKrish Sadhukhan cr0); 2192e8d7a8f6SKrish Sadhukhan cr0 &= ~X86_CR0_NW; 2193e8d7a8f6SKrish Sadhukhan cr0 &= ~X86_CR0_CD; 2194e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2195a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx", 2196a79c9495SKrish Sadhukhan cr0); 2197e8d7a8f6SKrish Sadhukhan cr0 |= X86_CR0_NW; 2198e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2199a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_ERR, "Test CR0 CD=0,NW=1: %lx", 2200a79c9495SKrish Sadhukhan cr0); 2201e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0_saved; 22025c052c90SKrish Sadhukhan 22035c052c90SKrish Sadhukhan /* 22045c052c90SKrish Sadhukhan * CR0[63:32] are not zero 22055c052c90SKrish Sadhukhan */ 22065c052c90SKrish Sadhukhan cr0 = cr0_saved; 2207eae10e8fSKrish Sadhukhan 2208eae10e8fSKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "CR0", vmcb->save.cr0, cr0_saved, 2209eae10e8fSKrish Sadhukhan SVM_CR0_RESERVED_MASK); 22105c052c90SKrish Sadhukhan vmcb->save.cr0 = cr0_saved; 2211a79c9495SKrish Sadhukhan } 2212eae10e8fSKrish Sadhukhan 2213a79c9495SKrish Sadhukhan static void test_cr3(void) 2214a79c9495SKrish Sadhukhan { 2215a79c9495SKrish Sadhukhan /* 2216a79c9495SKrish Sadhukhan * CR3 MBZ bits based on different modes: 221729a01803SNadav Amit * [63:52] - long mode 2218a79c9495SKrish Sadhukhan */ 2219a79c9495SKrish Sadhukhan u64 cr3_saved = vmcb->save.cr3; 2220a79c9495SKrish Sadhukhan 2221a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved, 2222cb6524f3SPaolo Bonzini SVM_CR3_LONG_MBZ_MASK, SVM_EXIT_ERR, ""); 22236d0ecbf6SKrish Sadhukhan 22246d0ecbf6SKrish Sadhukhan vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_MBZ_MASK; 22256d0ecbf6SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx", 22266d0ecbf6SKrish Sadhukhan vmcb->save.cr3); 22276d0ecbf6SKrish Sadhukhan 22286d0ecbf6SKrish Sadhukhan /* 22296d0ecbf6SKrish Sadhukhan * CR3 non-MBZ reserved bits based on different modes: 2230cb6524f3SPaolo Bonzini * [11:5] [2:0] - long mode (PCIDE=0) 22316d0ecbf6SKrish Sadhukhan * [2:0] - PAE legacy mode 22326d0ecbf6SKrish Sadhukhan */ 22336d0ecbf6SKrish Sadhukhan u64 cr4_saved = vmcb->save.cr4; 22346d0ecbf6SKrish Sadhukhan u64 *pdpe = npt_get_pml4e(); 22356d0ecbf6SKrish Sadhukhan 22366d0ecbf6SKrish Sadhukhan /* 22376d0ecbf6SKrish Sadhukhan * Long mode 22386d0ecbf6SKrish Sadhukhan */ 22396d0ecbf6SKrish Sadhukhan if (this_cpu_has(X86_FEATURE_PCID)) { 22406d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved | X86_CR4_PCIDE; 22416d0ecbf6SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved, 2242cb6524f3SPaolo Bonzini SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_VMMCALL, "(PCIDE=1) "); 22436d0ecbf6SKrish Sadhukhan 22446d0ecbf6SKrish Sadhukhan vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK; 22456d0ecbf6SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx", 22466d0ecbf6SKrish Sadhukhan vmcb->save.cr3); 2247cb6524f3SPaolo Bonzini } 22486d0ecbf6SKrish Sadhukhan 22496d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved & ~X86_CR4_PCIDE; 22506d0ecbf6SKrish Sadhukhan 2251993749ffSSean Christopherson if (!npt_supported()) 2252993749ffSSean Christopherson goto skip_npt_only; 2253993749ffSSean Christopherson 22546d0ecbf6SKrish Sadhukhan /* Clear P (Present) bit in NPT in order to trigger #NPF */ 22556d0ecbf6SKrish Sadhukhan pdpe[0] &= ~1ULL; 22566d0ecbf6SKrish Sadhukhan 22576d0ecbf6SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved, 2258cb6524f3SPaolo Bonzini SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_NPF, "(PCIDE=0) "); 22596d0ecbf6SKrish Sadhukhan 22606d0ecbf6SKrish Sadhukhan pdpe[0] |= 1ULL; 2261cb6524f3SPaolo Bonzini vmcb->save.cr3 = cr3_saved; 22626d0ecbf6SKrish Sadhukhan 22636d0ecbf6SKrish Sadhukhan /* 22646d0ecbf6SKrish Sadhukhan * PAE legacy 22656d0ecbf6SKrish Sadhukhan */ 22666d0ecbf6SKrish Sadhukhan pdpe[0] &= ~1ULL; 22676d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved | X86_CR4_PAE; 22686d0ecbf6SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 2, 1, 3, cr3_saved, 2269cb6524f3SPaolo Bonzini SVM_CR3_PAE_LEGACY_RESERVED_MASK, SVM_EXIT_NPF, "(PAE) "); 22706d0ecbf6SKrish Sadhukhan 22716d0ecbf6SKrish Sadhukhan pdpe[0] |= 1ULL; 2272993749ffSSean Christopherson 2273993749ffSSean Christopherson skip_npt_only: 2274a79c9495SKrish Sadhukhan vmcb->save.cr3 = cr3_saved; 22756d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved; 2276a79c9495SKrish Sadhukhan } 2277a79c9495SKrish Sadhukhan 2278d30973c3SWei Huang /* Test CR4 MBZ bits based on legacy or long modes */ 2279a79c9495SKrish Sadhukhan static void test_cr4(void) 2280a79c9495SKrish Sadhukhan { 2281a79c9495SKrish Sadhukhan u64 cr4_saved = vmcb->save.cr4; 2282a79c9495SKrish Sadhukhan u64 efer_saved = vmcb->save.efer; 2283a79c9495SKrish Sadhukhan u64 efer = efer_saved; 2284a79c9495SKrish Sadhukhan 2285a79c9495SKrish Sadhukhan efer &= ~EFER_LME; 2286a79c9495SKrish Sadhukhan vmcb->save.efer = efer; 2287a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2288cb6524f3SPaolo Bonzini SVM_CR4_LEGACY_RESERVED_MASK, SVM_EXIT_ERR, ""); 2289a79c9495SKrish Sadhukhan 2290a79c9495SKrish Sadhukhan efer |= EFER_LME; 2291a79c9495SKrish Sadhukhan vmcb->save.efer = efer; 2292a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2293cb6524f3SPaolo Bonzini SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, ""); 2294a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved, 2295cb6524f3SPaolo Bonzini SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, ""); 2296a79c9495SKrish Sadhukhan 2297a79c9495SKrish Sadhukhan vmcb->save.cr4 = cr4_saved; 2298a79c9495SKrish Sadhukhan vmcb->save.efer = efer_saved; 2299a79c9495SKrish Sadhukhan } 2300a79c9495SKrish Sadhukhan 2301a79c9495SKrish Sadhukhan static void test_dr(void) 2302a79c9495SKrish Sadhukhan { 2303eae10e8fSKrish Sadhukhan /* 2304eae10e8fSKrish Sadhukhan * DR6[63:32] and DR7[63:32] are MBZ 2305eae10e8fSKrish Sadhukhan */ 2306eae10e8fSKrish Sadhukhan u64 dr_saved = vmcb->save.dr6; 2307eae10e8fSKrish Sadhukhan 2308eae10e8fSKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR6", vmcb->save.dr6, dr_saved, 2309eae10e8fSKrish Sadhukhan SVM_DR6_RESERVED_MASK); 2310eae10e8fSKrish Sadhukhan vmcb->save.dr6 = dr_saved; 2311eae10e8fSKrish Sadhukhan 2312eae10e8fSKrish Sadhukhan dr_saved = vmcb->save.dr7; 2313eae10e8fSKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR7", vmcb->save.dr7, dr_saved, 2314eae10e8fSKrish Sadhukhan SVM_DR7_RESERVED_MASK); 2315eae10e8fSKrish Sadhukhan 2316eae10e8fSKrish Sadhukhan vmcb->save.dr7 = dr_saved; 2317a79c9495SKrish Sadhukhan } 2318eae10e8fSKrish Sadhukhan 2319abe82380SKrish Sadhukhan /* TODO: verify if high 32-bits are sign- or zero-extended on bare metal */ 2320abe82380SKrish Sadhukhan #define TEST_BITMAP_ADDR(save_intercept, type, addr, exit_code, \ 2321abe82380SKrish Sadhukhan msg) { \ 2322abe82380SKrish Sadhukhan vmcb->control.intercept = saved_intercept | 1ULL << type; \ 2323abe82380SKrish Sadhukhan if (type == INTERCEPT_MSR_PROT) \ 2324abe82380SKrish Sadhukhan vmcb->control.msrpm_base_pa = addr; \ 2325abe82380SKrish Sadhukhan else \ 2326abe82380SKrish Sadhukhan vmcb->control.iopm_base_pa = addr; \ 2327abe82380SKrish Sadhukhan report(svm_vmrun() == exit_code, \ 2328abe82380SKrish Sadhukhan "Test %s address: %lx", msg, addr); \ 2329abe82380SKrish Sadhukhan } 2330abe82380SKrish Sadhukhan 2331abe82380SKrish Sadhukhan /* 2332abe82380SKrish Sadhukhan * If the MSR or IOIO intercept table extends to a physical address that 2333abe82380SKrish Sadhukhan * is greater than or equal to the maximum supported physical address, the 2334abe82380SKrish Sadhukhan * guest state is illegal. 2335abe82380SKrish Sadhukhan * 2336abe82380SKrish Sadhukhan * The VMRUN instruction ignores the lower 12 bits of the address specified 2337abe82380SKrish Sadhukhan * in the VMCB. 2338abe82380SKrish Sadhukhan * 2339abe82380SKrish Sadhukhan * MSRPM spans 2 contiguous 4KB pages while IOPM spans 2 contiguous 4KB 2340abe82380SKrish Sadhukhan * pages + 1 byte. 2341abe82380SKrish Sadhukhan * 2342abe82380SKrish Sadhukhan * [APM vol 2] 2343abe82380SKrish Sadhukhan * 2344abe82380SKrish Sadhukhan * Note: Unallocated MSRPM addresses conforming to consistency checks, generate 2345abe82380SKrish Sadhukhan * #NPF. 2346abe82380SKrish Sadhukhan */ 2347abe82380SKrish Sadhukhan static void test_msrpm_iopm_bitmap_addrs(void) 2348abe82380SKrish Sadhukhan { 2349abe82380SKrish Sadhukhan u64 saved_intercept = vmcb->control.intercept; 2350abe82380SKrish Sadhukhan u64 addr_beyond_limit = 1ull << cpuid_maxphyaddr(); 2351abe82380SKrish Sadhukhan u64 addr = virt_to_phys(msr_bitmap) & (~((1ull << 12) - 1)); 2352abe82380SKrish Sadhukhan 2353abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2354abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR, 2355abe82380SKrish Sadhukhan "MSRPM"); 2356abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2357abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE + 1, SVM_EXIT_ERR, 2358abe82380SKrish Sadhukhan "MSRPM"); 2359abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2360abe82380SKrish Sadhukhan addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR, 2361abe82380SKrish Sadhukhan "MSRPM"); 2362abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr, 2363abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "MSRPM"); 2364abe82380SKrish Sadhukhan addr |= (1ull << 12) - 1; 2365abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr, 2366abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "MSRPM"); 2367abe82380SKrish Sadhukhan 2368abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2369abe82380SKrish Sadhukhan addr_beyond_limit - 4 * PAGE_SIZE, SVM_EXIT_VMMCALL, 2370abe82380SKrish Sadhukhan "IOPM"); 2371abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2372abe82380SKrish Sadhukhan addr_beyond_limit - 3 * PAGE_SIZE, SVM_EXIT_VMMCALL, 2373abe82380SKrish Sadhukhan "IOPM"); 2374abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2375abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE - 2, SVM_EXIT_VMMCALL, 2376abe82380SKrish Sadhukhan "IOPM"); 2377abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2378abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR, 2379abe82380SKrish Sadhukhan "IOPM"); 2380abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2381abe82380SKrish Sadhukhan addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR, 2382abe82380SKrish Sadhukhan "IOPM"); 2383abe82380SKrish Sadhukhan addr = virt_to_phys(io_bitmap) & (~((1ull << 11) - 1)); 2384abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr, 2385abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "IOPM"); 2386abe82380SKrish Sadhukhan addr |= (1ull << 12) - 1; 2387abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr, 2388abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "IOPM"); 2389abe82380SKrish Sadhukhan 2390abe82380SKrish Sadhukhan vmcb->control.intercept = saved_intercept; 2391abe82380SKrish Sadhukhan } 2392abe82380SKrish Sadhukhan 2393ba3c9773SLara Lazier /* 2394ba3c9773SLara Lazier * Unlike VMSAVE, VMRUN seems not to update the value of noncanonical 2395ba3c9773SLara Lazier * segment bases in the VMCB. However, VMENTRY succeeds as documented. 2396ba3c9773SLara Lazier */ 2397ba3c9773SLara Lazier #define TEST_CANONICAL_VMRUN(seg_base, msg) \ 2398a99070ebSKrish Sadhukhan saved_addr = seg_base; \ 2399a99070ebSKrish Sadhukhan seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ 2400ba3c9773SLara Lazier return_value = svm_vmrun(); \ 2401ba3c9773SLara Lazier report(return_value == SVM_EXIT_VMMCALL, \ 2402ba3c9773SLara Lazier "Successful VMRUN with noncanonical %s.base", msg); \ 2403a99070ebSKrish Sadhukhan seg_base = saved_addr; 2404a99070ebSKrish Sadhukhan 2405ba3c9773SLara Lazier 2406ba3c9773SLara Lazier #define TEST_CANONICAL_VMLOAD(seg_base, msg) \ 2407ba3c9773SLara Lazier saved_addr = seg_base; \ 2408ba3c9773SLara Lazier seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ 2409ba3c9773SLara Lazier asm volatile ("vmload %0" : : "a"(vmcb_phys) : "memory"); \ 2410ba3c9773SLara Lazier asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory"); \ 2411ba3c9773SLara Lazier report(is_canonical(seg_base), \ 2412ba3c9773SLara Lazier "Test %s.base for canonical form: %lx", msg, seg_base); \ 2413ba3c9773SLara Lazier seg_base = saved_addr; 2414ba3c9773SLara Lazier 2415ba3c9773SLara Lazier static void test_canonicalization(void) 2416a99070ebSKrish Sadhukhan { 2417a99070ebSKrish Sadhukhan u64 saved_addr; 2418ba3c9773SLara Lazier u64 return_value; 2419ba3c9773SLara Lazier u64 addr_limit; 2420ba3c9773SLara Lazier u64 vmcb_phys = virt_to_phys(vmcb); 2421ba3c9773SLara Lazier 2422ba3c9773SLara Lazier addr_limit = (this_cpu_has(X86_FEATURE_LA57)) ? 57 : 48; 2423a99070ebSKrish Sadhukhan u64 noncanonical_mask = NONCANONICAL & ~((1ul << addr_limit) - 1); 2424a99070ebSKrish Sadhukhan 2425ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.fs.base, "FS"); 2426ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.gs.base, "GS"); 2427ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.ldtr.base, "LDTR"); 2428ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.tr.base, "TR"); 2429ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.kernel_gs_base, "KERNEL GS"); 2430ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.es.base, "ES"); 2431ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.cs.base, "CS"); 2432ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.ss.base, "SS"); 2433ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.ds.base, "DS"); 2434ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.gdtr.base, "GDTR"); 2435ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.idtr.base, "IDTR"); 2436a99070ebSKrish Sadhukhan } 2437a99070ebSKrish Sadhukhan 2438665f5677SKrish Sadhukhan /* 2439665f5677SKrish Sadhukhan * When VMRUN loads a guest value of 1 in EFLAGS.TF, that value does not 2440665f5677SKrish Sadhukhan * cause a trace trap between the VMRUN and the first guest instruction, but 2441665f5677SKrish Sadhukhan * rather after completion of the first guest instruction. 2442665f5677SKrish Sadhukhan * 2443665f5677SKrish Sadhukhan * [APM vol 2] 2444665f5677SKrish Sadhukhan */ 2445665f5677SKrish Sadhukhan u64 guest_rflags_test_trap_rip; 2446665f5677SKrish Sadhukhan 2447665f5677SKrish Sadhukhan static void guest_rflags_test_db_handler(struct ex_regs *r) 2448665f5677SKrish Sadhukhan { 2449665f5677SKrish Sadhukhan guest_rflags_test_trap_rip = r->rip; 2450665f5677SKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 2451665f5677SKrish Sadhukhan } 2452665f5677SKrish Sadhukhan 2453a79c9495SKrish Sadhukhan static void svm_guest_state_test(void) 2454a79c9495SKrish Sadhukhan { 2455a79c9495SKrish Sadhukhan test_set_guest(basic_guest_main); 2456a79c9495SKrish Sadhukhan test_efer(); 2457a79c9495SKrish Sadhukhan test_cr0(); 2458a79c9495SKrish Sadhukhan test_cr3(); 2459a79c9495SKrish Sadhukhan test_cr4(); 2460a79c9495SKrish Sadhukhan test_dr(); 2461abe82380SKrish Sadhukhan test_msrpm_iopm_bitmap_addrs(); 2462ba3c9773SLara Lazier test_canonicalization(); 2463ba29942cSKrish Sadhukhan } 2464ba29942cSKrish Sadhukhan 2465665f5677SKrish Sadhukhan extern void guest_rflags_test_guest(struct svm_test *test); 2466665f5677SKrish Sadhukhan extern u64 *insn2; 2467665f5677SKrish Sadhukhan extern u64 *guest_end; 2468665f5677SKrish Sadhukhan 2469665f5677SKrish Sadhukhan asm("guest_rflags_test_guest:\n\t" 2470665f5677SKrish Sadhukhan "push %rbp\n\t" 2471665f5677SKrish Sadhukhan ".global insn2\n\t" 2472665f5677SKrish Sadhukhan "insn2:\n\t" 2473665f5677SKrish Sadhukhan "mov %rsp,%rbp\n\t" 2474665f5677SKrish Sadhukhan "vmmcall\n\t" 2475665f5677SKrish Sadhukhan "vmmcall\n\t" 2476665f5677SKrish Sadhukhan ".global guest_end\n\t" 2477665f5677SKrish Sadhukhan "guest_end:\n\t" 2478665f5677SKrish Sadhukhan "vmmcall\n\t" 2479665f5677SKrish Sadhukhan "pop %rbp\n\t" 2480665f5677SKrish Sadhukhan "ret"); 2481665f5677SKrish Sadhukhan 2482665f5677SKrish Sadhukhan static void svm_test_singlestep(void) 2483665f5677SKrish Sadhukhan { 2484665f5677SKrish Sadhukhan handle_exception(DB_VECTOR, guest_rflags_test_db_handler); 2485665f5677SKrish Sadhukhan 2486665f5677SKrish Sadhukhan /* 2487665f5677SKrish Sadhukhan * Trap expected after completion of first guest instruction 2488665f5677SKrish Sadhukhan */ 2489665f5677SKrish Sadhukhan vmcb->save.rflags |= X86_EFLAGS_TF; 2490665f5677SKrish Sadhukhan report (__svm_vmrun((u64)guest_rflags_test_guest) == SVM_EXIT_VMMCALL && 2491665f5677SKrish Sadhukhan guest_rflags_test_trap_rip == (u64)&insn2, 2492665f5677SKrish Sadhukhan "Test EFLAGS.TF on VMRUN: trap expected after completion of first guest instruction"); 2493665f5677SKrish Sadhukhan /* 2494665f5677SKrish Sadhukhan * No trap expected 2495665f5677SKrish Sadhukhan */ 2496665f5677SKrish Sadhukhan guest_rflags_test_trap_rip = 0; 2497665f5677SKrish Sadhukhan vmcb->save.rip += 3; 2498665f5677SKrish Sadhukhan vmcb->save.rflags |= X86_EFLAGS_TF; 2499665f5677SKrish Sadhukhan report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL && 2500665f5677SKrish Sadhukhan guest_rflags_test_trap_rip == 0, "Test EFLAGS.TF on VMRUN: trap not expected"); 2501665f5677SKrish Sadhukhan 2502665f5677SKrish Sadhukhan /* 2503665f5677SKrish Sadhukhan * Let guest finish execution 2504665f5677SKrish Sadhukhan */ 2505665f5677SKrish Sadhukhan vmcb->save.rip += 3; 2506665f5677SKrish Sadhukhan report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL && 2507665f5677SKrish Sadhukhan vmcb->save.rip == (u64)&guest_end, "Test EFLAGS.TF on VMRUN: guest execution completion"); 2508665f5677SKrish Sadhukhan } 2509665f5677SKrish Sadhukhan 25107a57ef5dSMaxim Levitsky static bool volatile svm_errata_reproduced = false; 25117a57ef5dSMaxim Levitsky static unsigned long volatile physical = 0; 25127a57ef5dSMaxim Levitsky 25137a57ef5dSMaxim Levitsky 25147a57ef5dSMaxim Levitsky /* 25157a57ef5dSMaxim Levitsky * 25167a57ef5dSMaxim Levitsky * Test the following errata: 25177a57ef5dSMaxim Levitsky * If the VMRUN/VMSAVE/VMLOAD are attempted by the nested guest, 25187a57ef5dSMaxim Levitsky * the CPU would first check the EAX against host reserved memory 25197a57ef5dSMaxim Levitsky * regions (so far only SMM_ADDR/SMM_MASK are known to cause it), 25207a57ef5dSMaxim Levitsky * and only then signal #VMexit 25217a57ef5dSMaxim Levitsky * 25227a57ef5dSMaxim Levitsky * Try to reproduce this by trying vmsave on each possible 4K aligned memory 25237a57ef5dSMaxim Levitsky * address in the low 4G where the SMM area has to reside. 25247a57ef5dSMaxim Levitsky */ 25257a57ef5dSMaxim Levitsky 25267a57ef5dSMaxim Levitsky static void gp_isr(struct ex_regs *r) 25277a57ef5dSMaxim Levitsky { 25287a57ef5dSMaxim Levitsky svm_errata_reproduced = true; 25297a57ef5dSMaxim Levitsky /* skip over the vmsave instruction*/ 25307a57ef5dSMaxim Levitsky r->rip += 3; 25317a57ef5dSMaxim Levitsky } 25327a57ef5dSMaxim Levitsky 25337a57ef5dSMaxim Levitsky static void svm_vmrun_errata_test(void) 25347a57ef5dSMaxim Levitsky { 25357a57ef5dSMaxim Levitsky unsigned long *last_page = NULL; 25367a57ef5dSMaxim Levitsky 25377a57ef5dSMaxim Levitsky handle_exception(GP_VECTOR, gp_isr); 25387a57ef5dSMaxim Levitsky 25397a57ef5dSMaxim Levitsky while (!svm_errata_reproduced) { 25407a57ef5dSMaxim Levitsky 25417a57ef5dSMaxim Levitsky unsigned long *page = alloc_pages(1); 25427a57ef5dSMaxim Levitsky 25437a57ef5dSMaxim Levitsky if (!page) { 25445c3582f0SJanis Schoetterl-Glausch report_pass("All guest memory tested, no bug found"); 25457a57ef5dSMaxim Levitsky break; 25467a57ef5dSMaxim Levitsky } 25477a57ef5dSMaxim Levitsky 25487a57ef5dSMaxim Levitsky physical = virt_to_phys(page); 25497a57ef5dSMaxim Levitsky 25507a57ef5dSMaxim Levitsky asm volatile ( 25517a57ef5dSMaxim Levitsky "mov %[_physical], %%rax\n\t" 25527a57ef5dSMaxim Levitsky "vmsave %%rax\n\t" 25537a57ef5dSMaxim Levitsky 25547a57ef5dSMaxim Levitsky : [_physical] "=m" (physical) 25557a57ef5dSMaxim Levitsky : /* no inputs*/ 25567a57ef5dSMaxim Levitsky : "rax" /*clobbers*/ 25577a57ef5dSMaxim Levitsky ); 25587a57ef5dSMaxim Levitsky 25597a57ef5dSMaxim Levitsky if (svm_errata_reproduced) { 2560198dfd0eSJanis Schoetterl-Glausch report_fail("Got #GP exception - svm errata reproduced at 0x%lx", 25617a57ef5dSMaxim Levitsky physical); 25627a57ef5dSMaxim Levitsky break; 25637a57ef5dSMaxim Levitsky } 25647a57ef5dSMaxim Levitsky 25657a57ef5dSMaxim Levitsky *page = (unsigned long)last_page; 25667a57ef5dSMaxim Levitsky last_page = page; 25677a57ef5dSMaxim Levitsky } 25687a57ef5dSMaxim Levitsky 25697a57ef5dSMaxim Levitsky while (last_page) { 25707a57ef5dSMaxim Levitsky unsigned long *page = last_page; 25717a57ef5dSMaxim Levitsky last_page = (unsigned long *)*last_page; 25727a57ef5dSMaxim Levitsky free_pages_by_order(page, 1); 25737a57ef5dSMaxim Levitsky } 25747a57ef5dSMaxim Levitsky } 25757a57ef5dSMaxim Levitsky 25760b6f6cedSKrish Sadhukhan static void vmload_vmsave_guest_main(struct svm_test *test) 25770b6f6cedSKrish Sadhukhan { 25780b6f6cedSKrish Sadhukhan u64 vmcb_phys = virt_to_phys(vmcb); 25790b6f6cedSKrish Sadhukhan 25800b6f6cedSKrish Sadhukhan asm volatile ("vmload %0" : : "a"(vmcb_phys)); 25810b6f6cedSKrish Sadhukhan asm volatile ("vmsave %0" : : "a"(vmcb_phys)); 25820b6f6cedSKrish Sadhukhan } 25830b6f6cedSKrish Sadhukhan 25840b6f6cedSKrish Sadhukhan static void svm_vmload_vmsave(void) 25850b6f6cedSKrish Sadhukhan { 25860b6f6cedSKrish Sadhukhan u32 intercept_saved = vmcb->control.intercept; 25870b6f6cedSKrish Sadhukhan 25880b6f6cedSKrish Sadhukhan test_set_guest(vmload_vmsave_guest_main); 25890b6f6cedSKrish Sadhukhan 25900b6f6cedSKrish Sadhukhan /* 25910b6f6cedSKrish Sadhukhan * Disabling intercept for VMLOAD and VMSAVE doesn't cause 25920b6f6cedSKrish Sadhukhan * respective #VMEXIT to host 25930b6f6cedSKrish Sadhukhan */ 25940b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 25950b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 25960b6f6cedSKrish Sadhukhan svm_vmrun(); 25970b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 25980b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 25990b6f6cedSKrish Sadhukhan 26000b6f6cedSKrish Sadhukhan /* 26010b6f6cedSKrish Sadhukhan * Enabling intercept for VMLOAD and VMSAVE causes respective 26020b6f6cedSKrish Sadhukhan * #VMEXIT to host 26030b6f6cedSKrish Sadhukhan */ 26040b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD); 26050b6f6cedSKrish Sadhukhan svm_vmrun(); 26060b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test " 26070b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT"); 26080b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 26090b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE); 26100b6f6cedSKrish Sadhukhan svm_vmrun(); 26110b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test " 26120b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT"); 26130b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 26140b6f6cedSKrish Sadhukhan svm_vmrun(); 26150b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 26160b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 26170b6f6cedSKrish Sadhukhan 26180b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD); 26190b6f6cedSKrish Sadhukhan svm_vmrun(); 26200b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test " 26210b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT"); 26220b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 26230b6f6cedSKrish Sadhukhan svm_vmrun(); 26240b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 26250b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 26260b6f6cedSKrish Sadhukhan 26270b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE); 26280b6f6cedSKrish Sadhukhan svm_vmrun(); 26290b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test " 26300b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT"); 26310b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 26320b6f6cedSKrish Sadhukhan svm_vmrun(); 26330b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 26340b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 26350b6f6cedSKrish Sadhukhan 26360b6f6cedSKrish Sadhukhan vmcb->control.intercept = intercept_saved; 26370b6f6cedSKrish Sadhukhan } 26380b6f6cedSKrish Sadhukhan 2639f6972bd6SLara Lazier static void prepare_vgif_enabled(struct svm_test *test) 2640f6972bd6SLara Lazier { 2641f6972bd6SLara Lazier default_prepare(test); 2642f6972bd6SLara Lazier } 2643f6972bd6SLara Lazier 2644f6972bd6SLara Lazier static void test_vgif(struct svm_test *test) 2645f6972bd6SLara Lazier { 2646f6972bd6SLara Lazier asm volatile ("vmmcall\n\tstgi\n\tvmmcall\n\tclgi\n\tvmmcall\n\t"); 2647f6972bd6SLara Lazier } 2648f6972bd6SLara Lazier 2649f6972bd6SLara Lazier static bool vgif_finished(struct svm_test *test) 2650f6972bd6SLara Lazier { 2651f6972bd6SLara Lazier switch (get_test_stage(test)) 2652f6972bd6SLara Lazier { 2653f6972bd6SLara Lazier case 0: 2654f6972bd6SLara Lazier if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2655198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall."); 2656f6972bd6SLara Lazier return true; 2657f6972bd6SLara Lazier } 2658f6972bd6SLara Lazier vmcb->control.int_ctl |= V_GIF_ENABLED_MASK; 2659f6972bd6SLara Lazier vmcb->save.rip += 3; 2660f6972bd6SLara Lazier inc_test_stage(test); 2661f6972bd6SLara Lazier break; 2662f6972bd6SLara Lazier case 1: 2663f6972bd6SLara Lazier if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2664198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall."); 2665f6972bd6SLara Lazier return true; 2666f6972bd6SLara Lazier } 2667f6972bd6SLara Lazier if (!(vmcb->control.int_ctl & V_GIF_MASK)) { 2668198dfd0eSJanis Schoetterl-Glausch report_fail("Failed to set VGIF when executing STGI."); 2669f6972bd6SLara Lazier vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2670f6972bd6SLara Lazier return true; 2671f6972bd6SLara Lazier } 26725c3582f0SJanis Schoetterl-Glausch report_pass("STGI set VGIF bit."); 2673f6972bd6SLara Lazier vmcb->save.rip += 3; 2674f6972bd6SLara Lazier inc_test_stage(test); 2675f6972bd6SLara Lazier break; 2676f6972bd6SLara Lazier case 2: 2677f6972bd6SLara Lazier if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2678198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall."); 2679f6972bd6SLara Lazier return true; 2680f6972bd6SLara Lazier } 2681f6972bd6SLara Lazier if (vmcb->control.int_ctl & V_GIF_MASK) { 2682198dfd0eSJanis Schoetterl-Glausch report_fail("Failed to clear VGIF when executing CLGI."); 2683f6972bd6SLara Lazier vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2684f6972bd6SLara Lazier return true; 2685f6972bd6SLara Lazier } 26865c3582f0SJanis Schoetterl-Glausch report_pass("CLGI cleared VGIF bit."); 2687f6972bd6SLara Lazier vmcb->save.rip += 3; 2688f6972bd6SLara Lazier inc_test_stage(test); 2689f6972bd6SLara Lazier vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2690f6972bd6SLara Lazier break; 2691f6972bd6SLara Lazier default: 2692f6972bd6SLara Lazier return true; 2693f6972bd6SLara Lazier break; 2694f6972bd6SLara Lazier } 2695f6972bd6SLara Lazier 2696f6972bd6SLara Lazier return get_test_stage(test) == 3; 2697f6972bd6SLara Lazier } 2698f6972bd6SLara Lazier 2699f6972bd6SLara Lazier static bool vgif_check(struct svm_test *test) 2700f6972bd6SLara Lazier { 2701f6972bd6SLara Lazier return get_test_stage(test) == 3; 2702f6972bd6SLara Lazier } 2703f6972bd6SLara Lazier 27048650dffeSMaxim Levitsky 27058650dffeSMaxim Levitsky static int pause_test_counter; 27068650dffeSMaxim Levitsky static int wait_counter; 27078650dffeSMaxim Levitsky 27088650dffeSMaxim Levitsky static void pause_filter_test_guest_main(struct svm_test *test) 27098650dffeSMaxim Levitsky { 27108650dffeSMaxim Levitsky int i; 27118650dffeSMaxim Levitsky for (i = 0 ; i < pause_test_counter ; i++) 27128650dffeSMaxim Levitsky pause(); 27138650dffeSMaxim Levitsky 27148650dffeSMaxim Levitsky if (!wait_counter) 27158650dffeSMaxim Levitsky return; 27168650dffeSMaxim Levitsky 27178650dffeSMaxim Levitsky for (i = 0; i < wait_counter; i++) 27188650dffeSMaxim Levitsky ; 27198650dffeSMaxim Levitsky 27208650dffeSMaxim Levitsky for (i = 0 ; i < pause_test_counter ; i++) 27218650dffeSMaxim Levitsky pause(); 27228650dffeSMaxim Levitsky 27238650dffeSMaxim Levitsky } 27248650dffeSMaxim Levitsky 27258650dffeSMaxim Levitsky static void pause_filter_run_test(int pause_iterations, int filter_value, int wait_iterations, int threshold) 27268650dffeSMaxim Levitsky { 27278650dffeSMaxim Levitsky test_set_guest(pause_filter_test_guest_main); 27288650dffeSMaxim Levitsky 27298650dffeSMaxim Levitsky pause_test_counter = pause_iterations; 27308650dffeSMaxim Levitsky wait_counter = wait_iterations; 27318650dffeSMaxim Levitsky 27328650dffeSMaxim Levitsky vmcb->control.pause_filter_count = filter_value; 27338650dffeSMaxim Levitsky vmcb->control.pause_filter_thresh = threshold; 27348650dffeSMaxim Levitsky svm_vmrun(); 27358650dffeSMaxim Levitsky 27368650dffeSMaxim Levitsky if (filter_value <= pause_iterations || wait_iterations < threshold) 27378650dffeSMaxim Levitsky report(vmcb->control.exit_code == SVM_EXIT_PAUSE, "expected PAUSE vmexit"); 27388650dffeSMaxim Levitsky else 27398650dffeSMaxim Levitsky report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "no expected PAUSE vmexit"); 27408650dffeSMaxim Levitsky } 27418650dffeSMaxim Levitsky 27428650dffeSMaxim Levitsky static void pause_filter_test(void) 27438650dffeSMaxim Levitsky { 27448650dffeSMaxim Levitsky if (!pause_filter_supported()) { 27458650dffeSMaxim Levitsky report_skip("PAUSE filter not supported in the guest"); 27468650dffeSMaxim Levitsky return; 27478650dffeSMaxim Levitsky } 27488650dffeSMaxim Levitsky 27498650dffeSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_PAUSE); 27508650dffeSMaxim Levitsky 27518650dffeSMaxim Levitsky // filter count more that pause count - no VMexit 27528650dffeSMaxim Levitsky pause_filter_run_test(10, 9, 0, 0); 27538650dffeSMaxim Levitsky 27548650dffeSMaxim Levitsky // filter count smaller pause count - no VMexit 27558650dffeSMaxim Levitsky pause_filter_run_test(20, 21, 0, 0); 27568650dffeSMaxim Levitsky 27578650dffeSMaxim Levitsky 27588650dffeSMaxim Levitsky if (pause_threshold_supported()) { 27598650dffeSMaxim Levitsky // filter count smaller pause count - no VMexit + large enough threshold 27608650dffeSMaxim Levitsky // so that filter counter resets 27618650dffeSMaxim Levitsky pause_filter_run_test(20, 21, 1000, 10); 27628650dffeSMaxim Levitsky 27638650dffeSMaxim Levitsky // filter count smaller pause count - no VMexit + small threshold 27648650dffeSMaxim Levitsky // so that filter doesn't reset 27658650dffeSMaxim Levitsky pause_filter_run_test(20, 21, 10, 1000); 27668650dffeSMaxim Levitsky } else { 27678650dffeSMaxim Levitsky report_skip("PAUSE threshold not supported in the guest"); 27688650dffeSMaxim Levitsky return; 27698650dffeSMaxim Levitsky } 27708650dffeSMaxim Levitsky } 27718650dffeSMaxim Levitsky 2772694e59baSManali Shukla /* If CR0.TS and CR0.EM are cleared in L2, no #NM is generated. */ 2773694e59baSManali Shukla static void svm_no_nm_test(void) 27745c92f156SManali Shukla { 27755c92f156SManali Shukla write_cr0(read_cr0() & ~X86_CR0_TS); 2776694e59baSManali Shukla test_set_guest((test_guest_func)fnop); 27775c92f156SManali Shukla 27785c92f156SManali Shukla vmcb->save.cr0 = vmcb->save.cr0 & ~(X86_CR0_TS | X86_CR0_EM); 2779694e59baSManali Shukla report(svm_vmrun() == SVM_EXIT_VMMCALL, 27803f27d772SManali Shukla "fnop with CR0.TS and CR0.EM unset no #NM excpetion"); 27815c92f156SManali Shukla } 2782f6972bd6SLara Lazier 2783ddb85855SSean Christopherson static u64 amd_get_lbr_rip(u32 msr) 2784537d39dfSMaxim Levitsky { 2785ddb85855SSean Christopherson return rdmsr(msr) & ~AMD_LBR_RECORD_MISPREDICT; 2786537d39dfSMaxim Levitsky } 2787537d39dfSMaxim Levitsky 2788ddb85855SSean Christopherson #define HOST_CHECK_LBR(from_expected, to_expected) \ 2789ddb85855SSean Christopherson do { \ 2790ddb85855SSean Christopherson TEST_EXPECT_EQ((u64)from_expected, amd_get_lbr_rip(MSR_IA32_LASTBRANCHFROMIP)); \ 2791ddb85855SSean Christopherson TEST_EXPECT_EQ((u64)to_expected, amd_get_lbr_rip(MSR_IA32_LASTBRANCHTOIP)); \ 2792ddb85855SSean Christopherson } while (0) 2793537d39dfSMaxim Levitsky 2794ddb85855SSean Christopherson /* 2795ddb85855SSean Christopherson * FIXME: Do something other than generate an exception to communicate failure. 2796ddb85855SSean Christopherson * Debugging without expected vs. actual is an absolute nightmare. 2797ddb85855SSean Christopherson */ 2798ddb85855SSean Christopherson #define GUEST_CHECK_LBR(from_expected, to_expected) \ 2799ddb85855SSean Christopherson do { \ 2800ddb85855SSean Christopherson if ((u64)(from_expected) != amd_get_lbr_rip(MSR_IA32_LASTBRANCHFROMIP)) \ 2801ddb85855SSean Christopherson asm volatile("ud2"); \ 2802ddb85855SSean Christopherson if ((u64)(to_expected) != amd_get_lbr_rip(MSR_IA32_LASTBRANCHTOIP)) \ 2803ddb85855SSean Christopherson asm volatile("ud2"); \ 2804ddb85855SSean Christopherson } while (0) 2805537d39dfSMaxim Levitsky 280692098120SSean Christopherson #define REPORT_GUEST_LBR_ERROR(vmcb) \ 280792098120SSean Christopherson report(false, "LBR guest test failed. Exit reason 0x%x, RIP = %lx, from = %lx, to = %lx, ex from = %lx, ex to = %lx", \ 280892098120SSean Christopherson vmcb->control.exit_code, vmcb->save.rip, \ 280992098120SSean Christopherson vmcb->save.br_from, vmcb->save.br_to, \ 281092098120SSean Christopherson vmcb->save.last_excp_from, vmcb->save.last_excp_to) 281192098120SSean Christopherson 2812537d39dfSMaxim Levitsky #define DO_BRANCH(branch_name) \ 2813537d39dfSMaxim Levitsky asm volatile ( \ 2814537d39dfSMaxim Levitsky # branch_name "_from:" \ 2815537d39dfSMaxim Levitsky "jmp " # branch_name "_to\n" \ 2816537d39dfSMaxim Levitsky "nop\n" \ 2817537d39dfSMaxim Levitsky "nop\n" \ 2818537d39dfSMaxim Levitsky # branch_name "_to:" \ 2819537d39dfSMaxim Levitsky "nop\n" \ 2820537d39dfSMaxim Levitsky ) 2821537d39dfSMaxim Levitsky 2822537d39dfSMaxim Levitsky 2823537d39dfSMaxim Levitsky extern u64 guest_branch0_from, guest_branch0_to; 2824537d39dfSMaxim Levitsky extern u64 guest_branch2_from, guest_branch2_to; 2825537d39dfSMaxim Levitsky 2826537d39dfSMaxim Levitsky extern u64 host_branch0_from, host_branch0_to; 2827537d39dfSMaxim Levitsky extern u64 host_branch2_from, host_branch2_to; 2828537d39dfSMaxim Levitsky extern u64 host_branch3_from, host_branch3_to; 2829537d39dfSMaxim Levitsky extern u64 host_branch4_from, host_branch4_to; 2830537d39dfSMaxim Levitsky 2831537d39dfSMaxim Levitsky u64 dbgctl; 2832537d39dfSMaxim Levitsky 2833537d39dfSMaxim Levitsky static void svm_lbrv_test_guest1(void) 2834537d39dfSMaxim Levitsky { 2835537d39dfSMaxim Levitsky /* 2836537d39dfSMaxim Levitsky * This guest expects the LBR to be already enabled when it starts, 2837537d39dfSMaxim Levitsky * it does a branch, and then disables the LBR and then checks. 2838537d39dfSMaxim Levitsky */ 2839537d39dfSMaxim Levitsky 2840537d39dfSMaxim Levitsky DO_BRANCH(guest_branch0); 2841537d39dfSMaxim Levitsky 2842537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2843537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2844537d39dfSMaxim Levitsky 2845537d39dfSMaxim Levitsky if (dbgctl != DEBUGCTLMSR_LBR) 2846537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2847537d39dfSMaxim Levitsky if (rdmsr(MSR_IA32_DEBUGCTLMSR) != 0) 2848537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2849537d39dfSMaxim Levitsky 2850ddb85855SSean Christopherson GUEST_CHECK_LBR(&guest_branch0_from, &guest_branch0_to); 2851537d39dfSMaxim Levitsky asm volatile ("vmmcall\n"); 2852537d39dfSMaxim Levitsky } 2853537d39dfSMaxim Levitsky 2854537d39dfSMaxim Levitsky static void svm_lbrv_test_guest2(void) 2855537d39dfSMaxim Levitsky { 2856537d39dfSMaxim Levitsky /* 2857537d39dfSMaxim Levitsky * This guest expects the LBR to be disabled when it starts, 2858537d39dfSMaxim Levitsky * enables it, does a branch, disables it and then checks. 2859537d39dfSMaxim Levitsky */ 2860537d39dfSMaxim Levitsky 2861537d39dfSMaxim Levitsky DO_BRANCH(guest_branch1); 2862537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2863537d39dfSMaxim Levitsky 2864537d39dfSMaxim Levitsky if (dbgctl != 0) 2865537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2866537d39dfSMaxim Levitsky 2867ddb85855SSean Christopherson GUEST_CHECK_LBR(&host_branch2_from, &host_branch2_to); 2868537d39dfSMaxim Levitsky 2869537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2870537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2871537d39dfSMaxim Levitsky DO_BRANCH(guest_branch2); 2872537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2873537d39dfSMaxim Levitsky 2874537d39dfSMaxim Levitsky if (dbgctl != DEBUGCTLMSR_LBR) 2875537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2876ddb85855SSean Christopherson GUEST_CHECK_LBR(&guest_branch2_from, &guest_branch2_to); 2877537d39dfSMaxim Levitsky 2878537d39dfSMaxim Levitsky asm volatile ("vmmcall\n"); 2879537d39dfSMaxim Levitsky } 2880537d39dfSMaxim Levitsky 2881537d39dfSMaxim Levitsky static void svm_lbrv_test0(void) 2882537d39dfSMaxim Levitsky { 2883537d39dfSMaxim Levitsky report(true, "Basic LBR test"); 2884537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2885537d39dfSMaxim Levitsky DO_BRANCH(host_branch0); 2886537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2887537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2888537d39dfSMaxim Levitsky 2889554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR); 2890537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2891554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, 0); 2892537d39dfSMaxim Levitsky 2893ddb85855SSean Christopherson HOST_CHECK_LBR(&host_branch0_from, &host_branch0_to); 2894537d39dfSMaxim Levitsky } 2895537d39dfSMaxim Levitsky 2896537d39dfSMaxim Levitsky static void svm_lbrv_test1(void) 2897537d39dfSMaxim Levitsky { 2898537d39dfSMaxim Levitsky report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(1)"); 2899537d39dfSMaxim Levitsky 29005200c1f1SSean Christopherson svm_setup_vmrun((u64)svm_lbrv_test_guest1); 2901537d39dfSMaxim Levitsky vmcb->control.virt_ext = 0; 2902537d39dfSMaxim Levitsky 2903537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2904537d39dfSMaxim Levitsky DO_BRANCH(host_branch1); 2905537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 2906537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2907537d39dfSMaxim Levitsky 2908537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 290992098120SSean Christopherson REPORT_GUEST_LBR_ERROR(vmcb); 2910537d39dfSMaxim Levitsky return; 2911537d39dfSMaxim Levitsky } 2912537d39dfSMaxim Levitsky 2913554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, 0); 2914ddb85855SSean Christopherson HOST_CHECK_LBR(&guest_branch0_from, &guest_branch0_to); 2915537d39dfSMaxim Levitsky } 2916537d39dfSMaxim Levitsky 2917537d39dfSMaxim Levitsky static void svm_lbrv_test2(void) 2918537d39dfSMaxim Levitsky { 2919537d39dfSMaxim Levitsky report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(2)"); 2920537d39dfSMaxim Levitsky 29215200c1f1SSean Christopherson svm_setup_vmrun((u64)svm_lbrv_test_guest2); 2922537d39dfSMaxim Levitsky vmcb->control.virt_ext = 0; 2923537d39dfSMaxim Levitsky 2924537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2925537d39dfSMaxim Levitsky DO_BRANCH(host_branch2); 2926537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2927537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 2928537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2929537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2930537d39dfSMaxim Levitsky 2931537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 293292098120SSean Christopherson REPORT_GUEST_LBR_ERROR(vmcb); 2933537d39dfSMaxim Levitsky return; 2934537d39dfSMaxim Levitsky } 2935537d39dfSMaxim Levitsky 2936554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, 0); 2937ddb85855SSean Christopherson HOST_CHECK_LBR(&guest_branch2_from, &guest_branch2_to); 2938537d39dfSMaxim Levitsky } 2939537d39dfSMaxim Levitsky 2940537d39dfSMaxim Levitsky static void svm_lbrv_nested_test1(void) 2941537d39dfSMaxim Levitsky { 2942537d39dfSMaxim Levitsky if (!lbrv_supported()) { 2943537d39dfSMaxim Levitsky report_skip("LBRV not supported in the guest"); 2944537d39dfSMaxim Levitsky return; 2945537d39dfSMaxim Levitsky } 2946537d39dfSMaxim Levitsky 2947537d39dfSMaxim Levitsky report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (1)"); 29485200c1f1SSean Christopherson svm_setup_vmrun((u64)svm_lbrv_test_guest1); 2949537d39dfSMaxim Levitsky vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK; 2950537d39dfSMaxim Levitsky vmcb->save.dbgctl = DEBUGCTLMSR_LBR; 2951537d39dfSMaxim Levitsky 2952537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2953537d39dfSMaxim Levitsky DO_BRANCH(host_branch3); 2954537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 2955537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2956537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2957537d39dfSMaxim Levitsky 2958537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 295992098120SSean Christopherson REPORT_GUEST_LBR_ERROR(vmcb); 2960537d39dfSMaxim Levitsky return; 2961537d39dfSMaxim Levitsky } 2962537d39dfSMaxim Levitsky 2963537d39dfSMaxim Levitsky if (vmcb->save.dbgctl != 0) { 2964537d39dfSMaxim Levitsky report(false, "unexpected virtual guest MSR_IA32_DEBUGCTLMSR value 0x%lx", vmcb->save.dbgctl); 2965537d39dfSMaxim Levitsky return; 2966537d39dfSMaxim Levitsky } 2967537d39dfSMaxim Levitsky 2968554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR); 2969ddb85855SSean Christopherson HOST_CHECK_LBR(&host_branch3_from, &host_branch3_to); 2970537d39dfSMaxim Levitsky } 29713f27d772SManali Shukla 2972537d39dfSMaxim Levitsky static void svm_lbrv_nested_test2(void) 2973537d39dfSMaxim Levitsky { 2974537d39dfSMaxim Levitsky if (!lbrv_supported()) { 2975537d39dfSMaxim Levitsky report_skip("LBRV not supported in the guest"); 2976537d39dfSMaxim Levitsky return; 2977537d39dfSMaxim Levitsky } 2978537d39dfSMaxim Levitsky 2979537d39dfSMaxim Levitsky report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (2)"); 29805200c1f1SSean Christopherson svm_setup_vmrun((u64)svm_lbrv_test_guest2); 2981537d39dfSMaxim Levitsky vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK; 2982537d39dfSMaxim Levitsky 2983537d39dfSMaxim Levitsky vmcb->save.dbgctl = 0; 2984537d39dfSMaxim Levitsky vmcb->save.br_from = (u64)&host_branch2_from; 2985537d39dfSMaxim Levitsky vmcb->save.br_to = (u64)&host_branch2_to; 2986537d39dfSMaxim Levitsky 2987537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2988537d39dfSMaxim Levitsky DO_BRANCH(host_branch4); 2989537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 2990537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2991537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2992537d39dfSMaxim Levitsky 2993537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 299492098120SSean Christopherson REPORT_GUEST_LBR_ERROR(vmcb); 2995537d39dfSMaxim Levitsky return; 2996537d39dfSMaxim Levitsky } 2997537d39dfSMaxim Levitsky 2998554fa461SSean Christopherson TEST_EXPECT_EQ(dbgctl, DEBUGCTLMSR_LBR); 2999ddb85855SSean Christopherson HOST_CHECK_LBR(&host_branch4_from, &host_branch4_to); 3000537d39dfSMaxim Levitsky } 3001537d39dfSMaxim Levitsky 3002c45bccfcSMaxim Levitsky 3003c45bccfcSMaxim Levitsky // test that a nested guest which does enable INTR interception 3004c45bccfcSMaxim Levitsky // but doesn't enable virtual interrupt masking works 3005c45bccfcSMaxim Levitsky 3006c45bccfcSMaxim Levitsky static volatile int dummy_isr_recevied; 3007c45bccfcSMaxim Levitsky static void dummy_isr(isr_regs_t *regs) 3008c45bccfcSMaxim Levitsky { 3009c45bccfcSMaxim Levitsky dummy_isr_recevied++; 3010c45bccfcSMaxim Levitsky eoi(); 3011c45bccfcSMaxim Levitsky } 3012c45bccfcSMaxim Levitsky 3013c45bccfcSMaxim Levitsky 3014c45bccfcSMaxim Levitsky static volatile int nmi_recevied; 3015c45bccfcSMaxim Levitsky static void dummy_nmi_handler(struct ex_regs *regs) 3016c45bccfcSMaxim Levitsky { 3017c45bccfcSMaxim Levitsky nmi_recevied++; 3018c45bccfcSMaxim Levitsky } 3019c45bccfcSMaxim Levitsky 3020c45bccfcSMaxim Levitsky 3021c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_run_guest(volatile int *counter, int expected_vmexit) 3022c45bccfcSMaxim Levitsky { 3023c45bccfcSMaxim Levitsky if (counter) 3024c45bccfcSMaxim Levitsky *counter = 0; 3025c45bccfcSMaxim Levitsky 3026c45bccfcSMaxim Levitsky sti(); // host IF value should not matter 3027c45bccfcSMaxim Levitsky clgi(); // vmrun will set back GI to 1 3028c45bccfcSMaxim Levitsky 3029c45bccfcSMaxim Levitsky svm_vmrun(); 3030c45bccfcSMaxim Levitsky 3031c45bccfcSMaxim Levitsky if (counter) 3032c45bccfcSMaxim Levitsky report(!*counter, "No interrupt expected"); 3033c45bccfcSMaxim Levitsky 3034c45bccfcSMaxim Levitsky stgi(); 3035c45bccfcSMaxim Levitsky 3036c45bccfcSMaxim Levitsky if (counter) 3037c45bccfcSMaxim Levitsky report(*counter == 1, "Interrupt is expected"); 3038c45bccfcSMaxim Levitsky 3039c45bccfcSMaxim Levitsky report (vmcb->control.exit_code == expected_vmexit, "Test expected VM exit"); 3040c45bccfcSMaxim Levitsky report(vmcb->save.rflags & X86_EFLAGS_IF, "Guest should have EFLAGS.IF set now"); 3041c45bccfcSMaxim Levitsky cli(); 3042c45bccfcSMaxim Levitsky } 3043c45bccfcSMaxim Levitsky 3044c45bccfcSMaxim Levitsky 3045d0458710SMaxim Levitsky // subtest: test that enabling EFLAGS.IF is enough to trigger an interrupt 3046c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_if_guest(struct svm_test *test) 3047c45bccfcSMaxim Levitsky { 3048c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3049c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3050e4007e62SMaxim Levitsky sti_nop(); 3051c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3052c45bccfcSMaxim Levitsky } 3053c45bccfcSMaxim Levitsky 3054c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_if(void) 3055c45bccfcSMaxim Levitsky { 3056c45bccfcSMaxim Levitsky // make a physical interrupt to be pending 3057c45bccfcSMaxim Levitsky handle_irq(0x55, dummy_isr); 3058c45bccfcSMaxim Levitsky 3059c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3060c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3061c45bccfcSMaxim Levitsky vmcb->save.rflags &= ~X86_EFLAGS_IF; 3062c45bccfcSMaxim Levitsky 3063c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_if_guest); 30642602a896SMaxim Levitsky cli(); 3065c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3066c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3067c45bccfcSMaxim Levitsky } 3068c45bccfcSMaxim Levitsky 3069c45bccfcSMaxim Levitsky 3070c45bccfcSMaxim Levitsky // subtest: test that a clever guest can trigger an interrupt by setting GIF 3071c45bccfcSMaxim Levitsky // if GIF is not intercepted 3072c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif_guest(struct svm_test *test) 3073c45bccfcSMaxim Levitsky { 3074c45bccfcSMaxim Levitsky 3075c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3076c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3077c45bccfcSMaxim Levitsky 3078c45bccfcSMaxim Levitsky // clear GIF and enable IF 3079c45bccfcSMaxim Levitsky // that should still not cause VM exit 3080c45bccfcSMaxim Levitsky clgi(); 3081e4007e62SMaxim Levitsky sti_nop(); 3082c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3083c45bccfcSMaxim Levitsky 3084c45bccfcSMaxim Levitsky stgi(); 3085c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3086c45bccfcSMaxim Levitsky } 3087c45bccfcSMaxim Levitsky 3088c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif(void) 3089c45bccfcSMaxim Levitsky { 3090c45bccfcSMaxim Levitsky handle_irq(0x55, dummy_isr); 3091c45bccfcSMaxim Levitsky 3092c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3093c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3094c45bccfcSMaxim Levitsky vmcb->save.rflags &= ~X86_EFLAGS_IF; 3095c45bccfcSMaxim Levitsky 3096c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_gif_guest); 30972602a896SMaxim Levitsky cli(); 3098c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3099c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3100c45bccfcSMaxim Levitsky } 3101c45bccfcSMaxim Levitsky 3102c45bccfcSMaxim Levitsky // subtest: test that a clever guest can trigger an interrupt by setting GIF 3103c45bccfcSMaxim Levitsky // if GIF is not intercepted and interrupt comes after guest 3104c45bccfcSMaxim Levitsky // started running 3105c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif_guest2(struct svm_test *test) 3106c45bccfcSMaxim Levitsky { 3107c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3108c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3109c45bccfcSMaxim Levitsky 3110c45bccfcSMaxim Levitsky clgi(); 3111c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3112c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3113c45bccfcSMaxim Levitsky 3114c45bccfcSMaxim Levitsky stgi(); 3115c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3116c45bccfcSMaxim Levitsky } 3117c45bccfcSMaxim Levitsky 3118c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif2(void) 3119c45bccfcSMaxim Levitsky { 3120c45bccfcSMaxim Levitsky handle_irq(0x55, dummy_isr); 3121c45bccfcSMaxim Levitsky 3122c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3123c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3124c45bccfcSMaxim Levitsky vmcb->save.rflags |= X86_EFLAGS_IF; 3125c45bccfcSMaxim Levitsky 3126c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_gif_guest2); 3127c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3128c45bccfcSMaxim Levitsky } 3129c45bccfcSMaxim Levitsky 3130c45bccfcSMaxim Levitsky 3131c45bccfcSMaxim Levitsky // subtest: test that pending NMI will be handled when guest enables GIF 3132c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_nmi_guest(struct svm_test *test) 3133c45bccfcSMaxim Levitsky { 3134c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3135c45bccfcSMaxim Levitsky report(!nmi_recevied, "No NMI expected"); 3136c45bccfcSMaxim Levitsky cli(); // should have no effect 3137c45bccfcSMaxim Levitsky 3138c45bccfcSMaxim Levitsky clgi(); 3139c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI, 0); 3140e4007e62SMaxim Levitsky sti_nop(); // should have no effect 3141c45bccfcSMaxim Levitsky report(!nmi_recevied, "No NMI expected"); 3142c45bccfcSMaxim Levitsky 3143c45bccfcSMaxim Levitsky stgi(); 3144c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3145c45bccfcSMaxim Levitsky } 3146c45bccfcSMaxim Levitsky 3147c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_nmi(void) 3148c45bccfcSMaxim Levitsky { 3149c45bccfcSMaxim Levitsky handle_exception(2, dummy_nmi_handler); 3150c45bccfcSMaxim Levitsky 3151c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_NMI); 3152c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3153c45bccfcSMaxim Levitsky vmcb->save.rflags |= X86_EFLAGS_IF; 3154c45bccfcSMaxim Levitsky 3155c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_nmi_guest); 3156c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&nmi_recevied, SVM_EXIT_NMI); 3157c45bccfcSMaxim Levitsky } 3158c45bccfcSMaxim Levitsky 3159c45bccfcSMaxim Levitsky // test that pending SMI will be handled when guest enables GIF 3160c45bccfcSMaxim Levitsky // TODO: can't really count #SMIs so just test that guest doesn't hang 3161c45bccfcSMaxim Levitsky // and VMexits on SMI 3162c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_smi_guest(struct svm_test *test) 3163c45bccfcSMaxim Levitsky { 3164c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3165c45bccfcSMaxim Levitsky 3166c45bccfcSMaxim Levitsky clgi(); 3167c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_SMI, 0); 3168e4007e62SMaxim Levitsky sti_nop(); // should have no effect 3169c45bccfcSMaxim Levitsky stgi(); 3170c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3171c45bccfcSMaxim Levitsky } 3172c45bccfcSMaxim Levitsky 3173c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_smi(void) 3174c45bccfcSMaxim Levitsky { 3175c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_SMI); 3176c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3177c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_smi_guest); 3178c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(NULL, SVM_EXIT_SMI); 3179c45bccfcSMaxim Levitsky } 3180c45bccfcSMaxim Levitsky 31818177dc62SManali Shukla static void svm_l2_ac_test(void) 31828177dc62SManali Shukla { 31838177dc62SManali Shukla bool hit_ac = false; 31848177dc62SManali Shukla 31858177dc62SManali Shukla write_cr0(read_cr0() | X86_CR0_AM); 31868177dc62SManali Shukla write_rflags(read_rflags() | X86_EFLAGS_AC); 31878177dc62SManali Shukla 31888177dc62SManali Shukla run_in_user(generate_usermode_ac, AC_VECTOR, 0, 0, 0, 0, &hit_ac); 31898177dc62SManali Shukla report(hit_ac, "Usermode #AC handled in L2"); 31908177dc62SManali Shukla vmmcall(); 31918177dc62SManali Shukla } 31928177dc62SManali Shukla 31938177dc62SManali Shukla struct svm_exception_test { 31948177dc62SManali Shukla u8 vector; 31958177dc62SManali Shukla void (*guest_code)(void); 31968177dc62SManali Shukla }; 31978177dc62SManali Shukla 31988177dc62SManali Shukla struct svm_exception_test svm_exception_tests[] = { 31998177dc62SManali Shukla { GP_VECTOR, generate_non_canonical_gp }, 32008177dc62SManali Shukla { UD_VECTOR, generate_ud }, 32018177dc62SManali Shukla { DE_VECTOR, generate_de }, 32028177dc62SManali Shukla { DB_VECTOR, generate_single_step_db }, 320344550f53SManali Shukla { BP_VECTOR, generate_bp }, 32048177dc62SManali Shukla { AC_VECTOR, svm_l2_ac_test }, 32050851b7f7SManali Shukla { OF_VECTOR, generate_of }, 3206694e59baSManali Shukla { NM_VECTOR, generate_cr0_ts_nm }, 3207694e59baSManali Shukla { NM_VECTOR, generate_cr0_em_nm }, 32088177dc62SManali Shukla }; 32098177dc62SManali Shukla 32108177dc62SManali Shukla static u8 svm_exception_test_vector; 32118177dc62SManali Shukla 32128177dc62SManali Shukla static void svm_exception_handler(struct ex_regs *regs) 32138177dc62SManali Shukla { 32148177dc62SManali Shukla report(regs->vector == svm_exception_test_vector, 32158177dc62SManali Shukla "Handling %s in L2's exception handler", 32168177dc62SManali Shukla exception_mnemonic(svm_exception_test_vector)); 32178177dc62SManali Shukla vmmcall(); 32188177dc62SManali Shukla } 32198177dc62SManali Shukla 32208177dc62SManali Shukla static void handle_exception_in_l2(u8 vector) 32218177dc62SManali Shukla { 32228177dc62SManali Shukla handler old_handler = handle_exception(vector, svm_exception_handler); 32238177dc62SManali Shukla svm_exception_test_vector = vector; 32248177dc62SManali Shukla 32258177dc62SManali Shukla report(svm_vmrun() == SVM_EXIT_VMMCALL, 32268177dc62SManali Shukla "%s handled by L2", exception_mnemonic(vector)); 32278177dc62SManali Shukla 32288177dc62SManali Shukla handle_exception(vector, old_handler); 32298177dc62SManali Shukla } 32308177dc62SManali Shukla 32318177dc62SManali Shukla static void handle_exception_in_l1(u32 vector) 32328177dc62SManali Shukla { 32338177dc62SManali Shukla u32 old_ie = vmcb->control.intercept_exceptions; 32348177dc62SManali Shukla 32358177dc62SManali Shukla vmcb->control.intercept_exceptions |= (1ULL << vector); 32368177dc62SManali Shukla 32378177dc62SManali Shukla report(svm_vmrun() == (SVM_EXIT_EXCP_BASE + vector), 32388177dc62SManali Shukla "%s handled by L1", exception_mnemonic(vector)); 32398177dc62SManali Shukla 32408177dc62SManali Shukla vmcb->control.intercept_exceptions = old_ie; 32418177dc62SManali Shukla } 32428177dc62SManali Shukla 32438177dc62SManali Shukla static void svm_exception_test(void) 32448177dc62SManali Shukla { 32458177dc62SManali Shukla struct svm_exception_test *t; 32468177dc62SManali Shukla int i; 32478177dc62SManali Shukla 32488177dc62SManali Shukla for (i = 0; i < ARRAY_SIZE(svm_exception_tests); i++) { 32498177dc62SManali Shukla t = &svm_exception_tests[i]; 32508177dc62SManali Shukla test_set_guest((test_guest_func)t->guest_code); 32518177dc62SManali Shukla 32528177dc62SManali Shukla handle_exception_in_l2(t->vector); 32538177dc62SManali Shukla vmcb_ident(vmcb); 32548177dc62SManali Shukla 32558177dc62SManali Shukla handle_exception_in_l1(t->vector); 32568177dc62SManali Shukla vmcb_ident(vmcb); 32578177dc62SManali Shukla } 32588177dc62SManali Shukla } 32598177dc62SManali Shukla 3260*c64f24fdSMaxim Levitsky static void shutdown_intercept_test_guest(struct svm_test *test) 3261*c64f24fdSMaxim Levitsky { 3262*c64f24fdSMaxim Levitsky asm volatile ("ud2"); 3263*c64f24fdSMaxim Levitsky report_fail("should not reach here\n"); 3264*c64f24fdSMaxim Levitsky 3265*c64f24fdSMaxim Levitsky } 3266*c64f24fdSMaxim Levitsky 3267*c64f24fdSMaxim Levitsky static void svm_shutdown_intercept_test(void) 3268*c64f24fdSMaxim Levitsky { 3269*c64f24fdSMaxim Levitsky test_set_guest(shutdown_intercept_test_guest); 3270*c64f24fdSMaxim Levitsky vmcb->save.idtr.base = (u64)alloc_vpage(); 3271*c64f24fdSMaxim Levitsky vmcb->control.intercept |= (1ULL << INTERCEPT_SHUTDOWN); 3272*c64f24fdSMaxim Levitsky svm_vmrun(); 3273*c64f24fdSMaxim Levitsky report(vmcb->control.exit_code == SVM_EXIT_SHUTDOWN, "shutdown test passed"); 3274*c64f24fdSMaxim Levitsky } 3275*c64f24fdSMaxim Levitsky 32763f27d772SManali Shukla struct svm_test svm_tests[] = { 3277ad879127SKrish Sadhukhan { "null", default_supported, default_prepare, 3278ad879127SKrish Sadhukhan default_prepare_gif_clear, null_test, 3279ad879127SKrish Sadhukhan default_finished, null_check }, 3280ad879127SKrish Sadhukhan { "vmrun", default_supported, default_prepare, 3281ad879127SKrish Sadhukhan default_prepare_gif_clear, test_vmrun, 3282ad879127SKrish Sadhukhan default_finished, check_vmrun }, 3283ad879127SKrish Sadhukhan { "ioio", default_supported, prepare_ioio, 3284ad879127SKrish Sadhukhan default_prepare_gif_clear, test_ioio, 3285ad879127SKrish Sadhukhan ioio_finished, check_ioio }, 3286ad879127SKrish Sadhukhan { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 3287ad879127SKrish Sadhukhan default_prepare_gif_clear, null_test, default_finished, 3288ad879127SKrish Sadhukhan check_no_vmrun_int }, 3289401299a5SPaolo Bonzini { "rsm", default_supported, 3290401299a5SPaolo Bonzini prepare_rsm_intercept, default_prepare_gif_clear, 3291401299a5SPaolo Bonzini test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept }, 3292ad879127SKrish Sadhukhan { "cr3 read intercept", default_supported, 3293ad879127SKrish Sadhukhan prepare_cr3_intercept, default_prepare_gif_clear, 3294ad879127SKrish Sadhukhan test_cr3_intercept, default_finished, check_cr3_intercept }, 3295ad879127SKrish Sadhukhan { "cr3 read nointercept", default_supported, default_prepare, 3296ad879127SKrish Sadhukhan default_prepare_gif_clear, test_cr3_intercept, default_finished, 3297ad879127SKrish Sadhukhan check_cr3_nointercept }, 3298ad879127SKrish Sadhukhan { "cr3 read intercept emulate", smp_supported, 3299ad879127SKrish Sadhukhan prepare_cr3_intercept_bypass, default_prepare_gif_clear, 3300ad879127SKrish Sadhukhan test_cr3_intercept_bypass, default_finished, check_cr3_intercept }, 3301ad879127SKrish Sadhukhan { "dr intercept check", default_supported, prepare_dr_intercept, 3302ad879127SKrish Sadhukhan default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished, 3303ad879127SKrish Sadhukhan check_dr_intercept }, 3304ad879127SKrish Sadhukhan { "next_rip", next_rip_supported, prepare_next_rip, 3305ad879127SKrish Sadhukhan default_prepare_gif_clear, test_next_rip, 3306ad879127SKrish Sadhukhan default_finished, check_next_rip }, 3307ad879127SKrish Sadhukhan { "msr intercept check", default_supported, prepare_msr_intercept, 3308ad879127SKrish Sadhukhan default_prepare_gif_clear, test_msr_intercept, 3309ad879127SKrish Sadhukhan msr_intercept_finished, check_msr_intercept }, 3310ad879127SKrish Sadhukhan { "mode_switch", default_supported, prepare_mode_switch, 3311ad879127SKrish Sadhukhan default_prepare_gif_clear, test_mode_switch, 3312ad879127SKrish Sadhukhan mode_switch_finished, check_mode_switch }, 3313ad879127SKrish Sadhukhan { "asid_zero", default_supported, prepare_asid_zero, 3314ad879127SKrish Sadhukhan default_prepare_gif_clear, test_asid_zero, 3315ad879127SKrish Sadhukhan default_finished, check_asid_zero }, 3316ad879127SKrish Sadhukhan { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, 3317ad879127SKrish Sadhukhan default_prepare_gif_clear, sel_cr0_bug_test, 3318ad879127SKrish Sadhukhan sel_cr0_bug_finished, sel_cr0_bug_check }, 331910a65fc4SNadav Amit { "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare, 3320ad879127SKrish Sadhukhan default_prepare_gif_clear, tsc_adjust_test, 3321ad879127SKrish Sadhukhan default_finished, tsc_adjust_check }, 3322ad879127SKrish Sadhukhan { "latency_run_exit", default_supported, latency_prepare, 3323ad879127SKrish Sadhukhan default_prepare_gif_clear, latency_test, 3324ad879127SKrish Sadhukhan latency_finished, latency_check }, 3325f7fa53dcSPaolo Bonzini { "latency_run_exit_clean", default_supported, latency_prepare, 3326f7fa53dcSPaolo Bonzini default_prepare_gif_clear, latency_test, 3327f7fa53dcSPaolo Bonzini latency_finished_clean, latency_check }, 3328ad879127SKrish Sadhukhan { "latency_svm_insn", default_supported, lat_svm_insn_prepare, 3329ad879127SKrish Sadhukhan default_prepare_gif_clear, null_test, 3330ad879127SKrish Sadhukhan lat_svm_insn_finished, lat_svm_insn_check }, 33314b4fb247SPaolo Bonzini { "exc_inject", default_supported, exc_inject_prepare, 33324b4fb247SPaolo Bonzini default_prepare_gif_clear, exc_inject_test, 33334b4fb247SPaolo Bonzini exc_inject_finished, exc_inject_check }, 3334ad879127SKrish Sadhukhan { "pending_event", default_supported, pending_event_prepare, 3335ad879127SKrish Sadhukhan default_prepare_gif_clear, 3336ad879127SKrish Sadhukhan pending_event_test, pending_event_finished, pending_event_check }, 333785dc2aceSPaolo Bonzini { "pending_event_cli", default_supported, pending_event_cli_prepare, 333885dc2aceSPaolo Bonzini pending_event_cli_prepare_gif_clear, 333985dc2aceSPaolo Bonzini pending_event_cli_test, pending_event_cli_finished, 334085dc2aceSPaolo Bonzini pending_event_cli_check }, 334185dc2aceSPaolo Bonzini { "interrupt", default_supported, interrupt_prepare, 334285dc2aceSPaolo Bonzini default_prepare_gif_clear, interrupt_test, 334385dc2aceSPaolo Bonzini interrupt_finished, interrupt_check }, 3344d4db486bSCathy Avery { "nmi", default_supported, nmi_prepare, 3345d4db486bSCathy Avery default_prepare_gif_clear, nmi_test, 3346d4db486bSCathy Avery nmi_finished, nmi_check }, 33479da1f4d8SCathy Avery { "nmi_hlt", smp_supported, nmi_prepare, 33489da1f4d8SCathy Avery default_prepare_gif_clear, nmi_hlt_test, 33499da1f4d8SCathy Avery nmi_hlt_finished, nmi_hlt_check }, 335008200397SSantosh Shukla { "vnmi", vnmi_supported, vnmi_prepare, 335108200397SSantosh Shukla default_prepare_gif_clear, vnmi_test, 335208200397SSantosh Shukla vnmi_finished, vnmi_check }, 33539c838954SCathy Avery { "virq_inject", default_supported, virq_inject_prepare, 33549c838954SCathy Avery default_prepare_gif_clear, virq_inject_test, 33559c838954SCathy Avery virq_inject_finished, virq_inject_check }, 3356da338a31SMaxim Levitsky { "reg_corruption", default_supported, reg_corruption_prepare, 3357da338a31SMaxim Levitsky default_prepare_gif_clear, reg_corruption_test, 3358da338a31SMaxim Levitsky reg_corruption_finished, reg_corruption_check }, 33594770e9c8SCathy Avery { "svm_init_startup_test", smp_supported, init_startup_prepare, 33604770e9c8SCathy Avery default_prepare_gif_clear, null_test, 33614770e9c8SCathy Avery init_startup_finished, init_startup_check }, 3362d5da6dfeSCathy Avery { "svm_init_intercept_test", smp_supported, init_intercept_prepare, 3363d5da6dfeSCathy Avery default_prepare_gif_clear, init_intercept_test, 3364d5da6dfeSCathy Avery init_intercept_finished, init_intercept_check, .on_vcpu = 2 }, 33657839b0ecSKrish Sadhukhan { "host_rflags", default_supported, host_rflags_prepare, 33667839b0ecSKrish Sadhukhan host_rflags_prepare_gif_clear, host_rflags_test, 33677839b0ecSKrish Sadhukhan host_rflags_finished, host_rflags_check }, 3368f6972bd6SLara Lazier { "vgif", vgif_supported, prepare_vgif_enabled, 3369f6972bd6SLara Lazier default_prepare_gif_clear, test_vgif, vgif_finished, 3370f6972bd6SLara Lazier vgif_check }, 3371f32183f5SJim Mattson TEST(svm_cr4_osxsave_test), 3372ba29942cSKrish Sadhukhan TEST(svm_guest_state_test), 33737a57ef5dSMaxim Levitsky TEST(svm_vmrun_errata_test), 33740b6f6cedSKrish Sadhukhan TEST(svm_vmload_vmsave), 3375665f5677SKrish Sadhukhan TEST(svm_test_singlestep), 3376694e59baSManali Shukla TEST(svm_no_nm_test), 33778177dc62SManali Shukla TEST(svm_exception_test), 3378537d39dfSMaxim Levitsky TEST(svm_lbrv_test0), 3379537d39dfSMaxim Levitsky TEST(svm_lbrv_test1), 3380537d39dfSMaxim Levitsky TEST(svm_lbrv_test2), 3381537d39dfSMaxim Levitsky TEST(svm_lbrv_nested_test1), 3382537d39dfSMaxim Levitsky TEST(svm_lbrv_nested_test2), 3383c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_if), 3384c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_gif), 3385c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_gif2), 3386c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_nmi), 3387c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_smi), 3388a8503d50SMaxim Levitsky TEST(svm_tsc_scale_test), 33898650dffeSMaxim Levitsky TEST(pause_filter_test), 3390*c64f24fdSMaxim Levitsky TEST(svm_shutdown_intercept_test), 3391ad879127SKrish Sadhukhan { NULL, NULL, NULL, NULL, NULL, NULL, NULL } 3392ad879127SKrish Sadhukhan }; 3393712840d5SManali Shukla 3394712840d5SManali Shukla int main(int ac, char **av) 3395712840d5SManali Shukla { 3396ade7601dSSean Christopherson setup_vm(); 3397712840d5SManali Shukla return run_svm_tests(ac, av, svm_tests); 3398712840d5SManali Shukla } 3399