1ad879127SKrish Sadhukhan #include "svm.h" 2ad879127SKrish Sadhukhan #include "libcflat.h" 3ad879127SKrish Sadhukhan #include "processor.h" 4ad879127SKrish Sadhukhan #include "desc.h" 5ad879127SKrish Sadhukhan #include "msr.h" 6ad879127SKrish Sadhukhan #include "vm.h" 7ad879127SKrish Sadhukhan #include "smp.h" 8ad879127SKrish Sadhukhan #include "types.h" 9ad879127SKrish Sadhukhan #include "alloc_page.h" 10ad879127SKrish Sadhukhan #include "isr.h" 11ad879127SKrish Sadhukhan #include "apic.h" 129da1f4d8SCathy Avery #include "delay.h" 13ad879127SKrish Sadhukhan 14ad879127SKrish Sadhukhan #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f 15ad879127SKrish Sadhukhan 16ad879127SKrish Sadhukhan #define LATENCY_RUNS 1000000 17ad879127SKrish Sadhukhan 18ad879127SKrish Sadhukhan u64 tsc_start; 19ad879127SKrish Sadhukhan u64 tsc_end; 20ad879127SKrish Sadhukhan 21ad879127SKrish Sadhukhan u64 vmrun_sum, vmexit_sum; 22ad879127SKrish Sadhukhan u64 vmsave_sum, vmload_sum; 23ad879127SKrish Sadhukhan u64 stgi_sum, clgi_sum; 24ad879127SKrish Sadhukhan u64 latvmrun_max; 25ad879127SKrish Sadhukhan u64 latvmrun_min; 26ad879127SKrish Sadhukhan u64 latvmexit_max; 27ad879127SKrish Sadhukhan u64 latvmexit_min; 28ad879127SKrish Sadhukhan u64 latvmload_max; 29ad879127SKrish Sadhukhan u64 latvmload_min; 30ad879127SKrish Sadhukhan u64 latvmsave_max; 31ad879127SKrish Sadhukhan u64 latvmsave_min; 32ad879127SKrish Sadhukhan u64 latstgi_max; 33ad879127SKrish Sadhukhan u64 latstgi_min; 34ad879127SKrish Sadhukhan u64 latclgi_max; 35ad879127SKrish Sadhukhan u64 latclgi_min; 36ad879127SKrish Sadhukhan u64 runs; 37ad879127SKrish Sadhukhan 38ad879127SKrish Sadhukhan static void null_test(struct svm_test *test) 39ad879127SKrish Sadhukhan { 40ad879127SKrish Sadhukhan } 41ad879127SKrish Sadhukhan 42ad879127SKrish Sadhukhan static bool null_check(struct svm_test *test) 43ad879127SKrish Sadhukhan { 44096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_VMMCALL; 45ad879127SKrish Sadhukhan } 46ad879127SKrish Sadhukhan 47ad879127SKrish Sadhukhan static void prepare_no_vmrun_int(struct svm_test *test) 48ad879127SKrish Sadhukhan { 49096cf7feSPaolo Bonzini vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 50ad879127SKrish Sadhukhan } 51ad879127SKrish Sadhukhan 52ad879127SKrish Sadhukhan static bool check_no_vmrun_int(struct svm_test *test) 53ad879127SKrish Sadhukhan { 54096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_ERR; 55ad879127SKrish Sadhukhan } 56ad879127SKrish Sadhukhan 57ad879127SKrish Sadhukhan static void test_vmrun(struct svm_test *test) 58ad879127SKrish Sadhukhan { 59096cf7feSPaolo Bonzini asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb))); 60ad879127SKrish Sadhukhan } 61ad879127SKrish Sadhukhan 62ad879127SKrish Sadhukhan static bool check_vmrun(struct svm_test *test) 63ad879127SKrish Sadhukhan { 64096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_VMRUN; 65ad879127SKrish Sadhukhan } 66ad879127SKrish Sadhukhan 67401299a5SPaolo Bonzini static void prepare_rsm_intercept(struct svm_test *test) 68401299a5SPaolo Bonzini { 69401299a5SPaolo Bonzini default_prepare(test); 70401299a5SPaolo Bonzini vmcb->control.intercept |= 1 << INTERCEPT_RSM; 71401299a5SPaolo Bonzini vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR); 72401299a5SPaolo Bonzini } 73401299a5SPaolo Bonzini 74401299a5SPaolo Bonzini static void test_rsm_intercept(struct svm_test *test) 75401299a5SPaolo Bonzini { 76401299a5SPaolo Bonzini asm volatile ("rsm" : : : "memory"); 77401299a5SPaolo Bonzini } 78401299a5SPaolo Bonzini 79401299a5SPaolo Bonzini static bool check_rsm_intercept(struct svm_test *test) 80401299a5SPaolo Bonzini { 81401299a5SPaolo Bonzini return get_test_stage(test) == 2; 82401299a5SPaolo Bonzini } 83401299a5SPaolo Bonzini 84401299a5SPaolo Bonzini static bool finished_rsm_intercept(struct svm_test *test) 85401299a5SPaolo Bonzini { 86401299a5SPaolo Bonzini switch (get_test_stage(test)) { 87401299a5SPaolo Bonzini case 0: 88401299a5SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_RSM) { 89198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to rsm. Exit reason 0x%x", 90401299a5SPaolo Bonzini vmcb->control.exit_code); 91401299a5SPaolo Bonzini return true; 92401299a5SPaolo Bonzini } 93401299a5SPaolo Bonzini vmcb->control.intercept &= ~(1 << INTERCEPT_RSM); 94401299a5SPaolo Bonzini inc_test_stage(test); 95401299a5SPaolo Bonzini break; 96401299a5SPaolo Bonzini 97401299a5SPaolo Bonzini case 1: 98401299a5SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) { 99198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to #UD. Exit reason 0x%x", 100401299a5SPaolo Bonzini vmcb->control.exit_code); 101401299a5SPaolo Bonzini return true; 102401299a5SPaolo Bonzini } 103401299a5SPaolo Bonzini vmcb->save.rip += 2; 104401299a5SPaolo Bonzini inc_test_stage(test); 105401299a5SPaolo Bonzini break; 106401299a5SPaolo Bonzini 107401299a5SPaolo Bonzini default: 108401299a5SPaolo Bonzini return true; 109401299a5SPaolo Bonzini } 110401299a5SPaolo Bonzini return get_test_stage(test) == 2; 111401299a5SPaolo Bonzini } 112401299a5SPaolo Bonzini 113ad879127SKrish Sadhukhan static void prepare_cr3_intercept(struct svm_test *test) 114ad879127SKrish Sadhukhan { 115ad879127SKrish Sadhukhan default_prepare(test); 116096cf7feSPaolo Bonzini vmcb->control.intercept_cr_read |= 1 << 3; 117ad879127SKrish Sadhukhan } 118ad879127SKrish Sadhukhan 119ad879127SKrish Sadhukhan static void test_cr3_intercept(struct svm_test *test) 120ad879127SKrish Sadhukhan { 121ad879127SKrish Sadhukhan asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 122ad879127SKrish Sadhukhan } 123ad879127SKrish Sadhukhan 124ad879127SKrish Sadhukhan static bool check_cr3_intercept(struct svm_test *test) 125ad879127SKrish Sadhukhan { 126096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_READ_CR3; 127ad879127SKrish Sadhukhan } 128ad879127SKrish Sadhukhan 129ad879127SKrish Sadhukhan static bool check_cr3_nointercept(struct svm_test *test) 130ad879127SKrish Sadhukhan { 131ad879127SKrish Sadhukhan return null_check(test) && test->scratch == read_cr3(); 132ad879127SKrish Sadhukhan } 133ad879127SKrish Sadhukhan 134ad879127SKrish Sadhukhan static void corrupt_cr3_intercept_bypass(void *_test) 135ad879127SKrish Sadhukhan { 136ad879127SKrish Sadhukhan struct svm_test *test = _test; 137ad879127SKrish Sadhukhan extern volatile u32 mmio_insn; 138ad879127SKrish Sadhukhan 139ad879127SKrish Sadhukhan while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 140ad879127SKrish Sadhukhan pause(); 141ad879127SKrish Sadhukhan pause(); 142ad879127SKrish Sadhukhan pause(); 143ad879127SKrish Sadhukhan pause(); 144ad879127SKrish Sadhukhan mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 145ad879127SKrish Sadhukhan } 146ad879127SKrish Sadhukhan 147ad879127SKrish Sadhukhan static void prepare_cr3_intercept_bypass(struct svm_test *test) 148ad879127SKrish Sadhukhan { 149ad879127SKrish Sadhukhan default_prepare(test); 150096cf7feSPaolo Bonzini vmcb->control.intercept_cr_read |= 1 << 3; 151ad879127SKrish Sadhukhan on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 152ad879127SKrish Sadhukhan } 153ad879127SKrish Sadhukhan 154ad879127SKrish Sadhukhan static void test_cr3_intercept_bypass(struct svm_test *test) 155ad879127SKrish Sadhukhan { 156ad879127SKrish Sadhukhan ulong a = 0xa0000; 157ad879127SKrish Sadhukhan 158ad879127SKrish Sadhukhan test->scratch = 1; 159ad879127SKrish Sadhukhan while (test->scratch != 2) 160ad879127SKrish Sadhukhan barrier(); 161ad879127SKrish Sadhukhan 162ad879127SKrish Sadhukhan asm volatile ("mmio_insn: mov %0, (%0); nop" 163ad879127SKrish Sadhukhan : "+a"(a) : : "memory"); 164ad879127SKrish Sadhukhan test->scratch = a; 165ad879127SKrish Sadhukhan } 166ad879127SKrish Sadhukhan 167ad879127SKrish Sadhukhan static void prepare_dr_intercept(struct svm_test *test) 168ad879127SKrish Sadhukhan { 169ad879127SKrish Sadhukhan default_prepare(test); 170096cf7feSPaolo Bonzini vmcb->control.intercept_dr_read = 0xff; 171096cf7feSPaolo Bonzini vmcb->control.intercept_dr_write = 0xff; 172ad879127SKrish Sadhukhan } 173ad879127SKrish Sadhukhan 174ad879127SKrish Sadhukhan static void test_dr_intercept(struct svm_test *test) 175ad879127SKrish Sadhukhan { 176ad879127SKrish Sadhukhan unsigned int i, failcnt = 0; 177ad879127SKrish Sadhukhan 178ad879127SKrish Sadhukhan /* Loop testing debug register reads */ 179ad879127SKrish Sadhukhan for (i = 0; i < 8; i++) { 180ad879127SKrish Sadhukhan 181ad879127SKrish Sadhukhan switch (i) { 182ad879127SKrish Sadhukhan case 0: 183ad879127SKrish Sadhukhan asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory"); 184ad879127SKrish Sadhukhan break; 185ad879127SKrish Sadhukhan case 1: 186ad879127SKrish Sadhukhan asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory"); 187ad879127SKrish Sadhukhan break; 188ad879127SKrish Sadhukhan case 2: 189ad879127SKrish Sadhukhan asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory"); 190ad879127SKrish Sadhukhan break; 191ad879127SKrish Sadhukhan case 3: 192ad879127SKrish Sadhukhan asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory"); 193ad879127SKrish Sadhukhan break; 194ad879127SKrish Sadhukhan case 4: 195ad879127SKrish Sadhukhan asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory"); 196ad879127SKrish Sadhukhan break; 197ad879127SKrish Sadhukhan case 5: 198ad879127SKrish Sadhukhan asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory"); 199ad879127SKrish Sadhukhan break; 200ad879127SKrish Sadhukhan case 6: 201ad879127SKrish Sadhukhan asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory"); 202ad879127SKrish Sadhukhan break; 203ad879127SKrish Sadhukhan case 7: 204ad879127SKrish Sadhukhan asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory"); 205ad879127SKrish Sadhukhan break; 206ad879127SKrish Sadhukhan } 207ad879127SKrish Sadhukhan 208ad879127SKrish Sadhukhan if (test->scratch != i) { 209198dfd0eSJanis Schoetterl-Glausch report_fail("dr%u read intercept", i); 210ad879127SKrish Sadhukhan failcnt++; 211ad879127SKrish Sadhukhan } 212ad879127SKrish Sadhukhan } 213ad879127SKrish Sadhukhan 214ad879127SKrish Sadhukhan /* Loop testing debug register writes */ 215ad879127SKrish Sadhukhan for (i = 0; i < 8; i++) { 216ad879127SKrish Sadhukhan 217ad879127SKrish Sadhukhan switch (i) { 218ad879127SKrish Sadhukhan case 0: 219ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory"); 220ad879127SKrish Sadhukhan break; 221ad879127SKrish Sadhukhan case 1: 222ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory"); 223ad879127SKrish Sadhukhan break; 224ad879127SKrish Sadhukhan case 2: 225ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory"); 226ad879127SKrish Sadhukhan break; 227ad879127SKrish Sadhukhan case 3: 228ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory"); 229ad879127SKrish Sadhukhan break; 230ad879127SKrish Sadhukhan case 4: 231ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory"); 232ad879127SKrish Sadhukhan break; 233ad879127SKrish Sadhukhan case 5: 234ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory"); 235ad879127SKrish Sadhukhan break; 236ad879127SKrish Sadhukhan case 6: 237ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory"); 238ad879127SKrish Sadhukhan break; 239ad879127SKrish Sadhukhan case 7: 240ad879127SKrish Sadhukhan asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory"); 241ad879127SKrish Sadhukhan break; 242ad879127SKrish Sadhukhan } 243ad879127SKrish Sadhukhan 244ad879127SKrish Sadhukhan if (test->scratch != i) { 245198dfd0eSJanis Schoetterl-Glausch report_fail("dr%u write intercept", i); 246ad879127SKrish Sadhukhan failcnt++; 247ad879127SKrish Sadhukhan } 248ad879127SKrish Sadhukhan } 249ad879127SKrish Sadhukhan 250ad879127SKrish Sadhukhan test->scratch = failcnt; 251ad879127SKrish Sadhukhan } 252ad879127SKrish Sadhukhan 253ad879127SKrish Sadhukhan static bool dr_intercept_finished(struct svm_test *test) 254ad879127SKrish Sadhukhan { 255096cf7feSPaolo Bonzini ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0); 256ad879127SKrish Sadhukhan 257ad879127SKrish Sadhukhan /* Only expect DR intercepts */ 258ad879127SKrish Sadhukhan if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0)) 259ad879127SKrish Sadhukhan return true; 260ad879127SKrish Sadhukhan 261ad879127SKrish Sadhukhan /* 262ad879127SKrish Sadhukhan * Compute debug register number. 263ad879127SKrish Sadhukhan * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture 264ad879127SKrish Sadhukhan * Programmer's Manual Volume 2 - System Programming: 265ad879127SKrish Sadhukhan * http://support.amd.com/TechDocs/24593.pdf 266ad879127SKrish Sadhukhan * there are 16 VMEXIT codes each for DR read and write. 267ad879127SKrish Sadhukhan */ 268ad879127SKrish Sadhukhan test->scratch = (n % 16); 269ad879127SKrish Sadhukhan 270ad879127SKrish Sadhukhan /* Jump over MOV instruction */ 271096cf7feSPaolo Bonzini vmcb->save.rip += 3; 272ad879127SKrish Sadhukhan 273ad879127SKrish Sadhukhan return false; 274ad879127SKrish Sadhukhan } 275ad879127SKrish Sadhukhan 276ad879127SKrish Sadhukhan static bool check_dr_intercept(struct svm_test *test) 277ad879127SKrish Sadhukhan { 278ad879127SKrish Sadhukhan return !test->scratch; 279ad879127SKrish Sadhukhan } 280ad879127SKrish Sadhukhan 281ad879127SKrish Sadhukhan static bool next_rip_supported(void) 282ad879127SKrish Sadhukhan { 283ad879127SKrish Sadhukhan return this_cpu_has(X86_FEATURE_NRIPS); 284ad879127SKrish Sadhukhan } 285ad879127SKrish Sadhukhan 286ad879127SKrish Sadhukhan static void prepare_next_rip(struct svm_test *test) 287ad879127SKrish Sadhukhan { 288096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 289ad879127SKrish Sadhukhan } 290ad879127SKrish Sadhukhan 291ad879127SKrish Sadhukhan 292ad879127SKrish Sadhukhan static void test_next_rip(struct svm_test *test) 293ad879127SKrish Sadhukhan { 294ad879127SKrish Sadhukhan asm volatile ("rdtsc\n\t" 295ad879127SKrish Sadhukhan ".globl exp_next_rip\n\t" 296ad879127SKrish Sadhukhan "exp_next_rip:\n\t" ::: "eax", "edx"); 297ad879127SKrish Sadhukhan } 298ad879127SKrish Sadhukhan 299ad879127SKrish Sadhukhan static bool check_next_rip(struct svm_test *test) 300ad879127SKrish Sadhukhan { 301ad879127SKrish Sadhukhan extern char exp_next_rip; 302ad879127SKrish Sadhukhan unsigned long address = (unsigned long)&exp_next_rip; 303ad879127SKrish Sadhukhan 304096cf7feSPaolo Bonzini return address == vmcb->control.next_rip; 305ad879127SKrish Sadhukhan } 306ad879127SKrish Sadhukhan 307ad879127SKrish Sadhukhan extern u8 *msr_bitmap; 308ad879127SKrish Sadhukhan 309ad879127SKrish Sadhukhan static void prepare_msr_intercept(struct svm_test *test) 310ad879127SKrish Sadhukhan { 311ad879127SKrish Sadhukhan default_prepare(test); 312096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT); 313096cf7feSPaolo Bonzini vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR); 314ad879127SKrish Sadhukhan memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE); 315ad879127SKrish Sadhukhan } 316ad879127SKrish Sadhukhan 317ad879127SKrish Sadhukhan static void test_msr_intercept(struct svm_test *test) 318ad879127SKrish Sadhukhan { 319ad879127SKrish Sadhukhan unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */ 320ad879127SKrish Sadhukhan unsigned long msr_index; 321ad879127SKrish Sadhukhan 322ad879127SKrish Sadhukhan for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) { 323ad879127SKrish Sadhukhan if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) { 324ad879127SKrish Sadhukhan /* 325ad879127SKrish Sadhukhan * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture 326ad879127SKrish Sadhukhan * Programmer's Manual volume 2 - System Programming: 327ad879127SKrish Sadhukhan * http://support.amd.com/TechDocs/24593.pdf 328ad879127SKrish Sadhukhan * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR. 329ad879127SKrish Sadhukhan */ 330ad879127SKrish Sadhukhan continue; 331ad879127SKrish Sadhukhan } 332ad879127SKrish Sadhukhan 333ad879127SKrish Sadhukhan /* Skips gaps between supported MSR ranges */ 334ad879127SKrish Sadhukhan if (msr_index == 0x2000) 335ad879127SKrish Sadhukhan msr_index = 0xc0000000; 336ad879127SKrish Sadhukhan else if (msr_index == 0xc0002000) 337ad879127SKrish Sadhukhan msr_index = 0xc0010000; 338ad879127SKrish Sadhukhan 339ad879127SKrish Sadhukhan test->scratch = -1; 340ad879127SKrish Sadhukhan 341ad879127SKrish Sadhukhan rdmsr(msr_index); 342ad879127SKrish Sadhukhan 343ad879127SKrish Sadhukhan /* Check that a read intercept occurred for MSR at msr_index */ 344ad879127SKrish Sadhukhan if (test->scratch != msr_index) 345198dfd0eSJanis Schoetterl-Glausch report_fail("MSR 0x%lx read intercept", msr_index); 346ad879127SKrish Sadhukhan 347ad879127SKrish Sadhukhan /* 348ad879127SKrish Sadhukhan * Poor man approach to generate a value that 349ad879127SKrish Sadhukhan * seems arbitrary each time around the loop. 350ad879127SKrish Sadhukhan */ 351ad879127SKrish Sadhukhan msr_value += (msr_value << 1); 352ad879127SKrish Sadhukhan 353ad879127SKrish Sadhukhan wrmsr(msr_index, msr_value); 354ad879127SKrish Sadhukhan 355ad879127SKrish Sadhukhan /* Check that a write intercept occurred for MSR with msr_value */ 356ad879127SKrish Sadhukhan if (test->scratch != msr_value) 357198dfd0eSJanis Schoetterl-Glausch report_fail("MSR 0x%lx write intercept", msr_index); 358ad879127SKrish Sadhukhan } 359ad879127SKrish Sadhukhan 360ad879127SKrish Sadhukhan test->scratch = -2; 361ad879127SKrish Sadhukhan } 362ad879127SKrish Sadhukhan 363ad879127SKrish Sadhukhan static bool msr_intercept_finished(struct svm_test *test) 364ad879127SKrish Sadhukhan { 365096cf7feSPaolo Bonzini u32 exit_code = vmcb->control.exit_code; 366ad879127SKrish Sadhukhan u64 exit_info_1; 367ad879127SKrish Sadhukhan u8 *opcode; 368ad879127SKrish Sadhukhan 369ad879127SKrish Sadhukhan if (exit_code == SVM_EXIT_MSR) { 370096cf7feSPaolo Bonzini exit_info_1 = vmcb->control.exit_info_1; 371ad879127SKrish Sadhukhan } else { 372ad879127SKrish Sadhukhan /* 373ad879127SKrish Sadhukhan * If #GP exception occurs instead, check that it was 374ad879127SKrish Sadhukhan * for RDMSR/WRMSR and set exit_info_1 accordingly. 375ad879127SKrish Sadhukhan */ 376ad879127SKrish Sadhukhan 377ad879127SKrish Sadhukhan if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR)) 378ad879127SKrish Sadhukhan return true; 379ad879127SKrish Sadhukhan 380096cf7feSPaolo Bonzini opcode = (u8 *)vmcb->save.rip; 381ad879127SKrish Sadhukhan if (opcode[0] != 0x0f) 382ad879127SKrish Sadhukhan return true; 383ad879127SKrish Sadhukhan 384ad879127SKrish Sadhukhan switch (opcode[1]) { 385ad879127SKrish Sadhukhan case 0x30: /* WRMSR */ 386ad879127SKrish Sadhukhan exit_info_1 = 1; 387ad879127SKrish Sadhukhan break; 388ad879127SKrish Sadhukhan case 0x32: /* RDMSR */ 389ad879127SKrish Sadhukhan exit_info_1 = 0; 390ad879127SKrish Sadhukhan break; 391ad879127SKrish Sadhukhan default: 392ad879127SKrish Sadhukhan return true; 393ad879127SKrish Sadhukhan } 394ad879127SKrish Sadhukhan 395ad879127SKrish Sadhukhan /* 3963f27d772SManali Shukla * Warn that #GP exception occured instead. 397ad879127SKrish Sadhukhan * RCX holds the MSR index. 398ad879127SKrish Sadhukhan */ 399ad879127SKrish Sadhukhan printf("%s 0x%lx #GP exception\n", 400ad879127SKrish Sadhukhan exit_info_1 ? "WRMSR" : "RDMSR", get_regs().rcx); 401ad879127SKrish Sadhukhan } 402ad879127SKrish Sadhukhan 403ad879127SKrish Sadhukhan /* Jump over RDMSR/WRMSR instruction */ 404096cf7feSPaolo Bonzini vmcb->save.rip += 2; 405ad879127SKrish Sadhukhan 406ad879127SKrish Sadhukhan /* 407ad879127SKrish Sadhukhan * Test whether the intercept was for RDMSR/WRMSR. 408ad879127SKrish Sadhukhan * For RDMSR, test->scratch is set to the MSR index; 409ad879127SKrish Sadhukhan * RCX holds the MSR index. 410ad879127SKrish Sadhukhan * For WRMSR, test->scratch is set to the MSR value; 411ad879127SKrish Sadhukhan * RDX holds the upper 32 bits of the MSR value, 412ad879127SKrish Sadhukhan * while RAX hold its lower 32 bits. 413ad879127SKrish Sadhukhan */ 414ad879127SKrish Sadhukhan if (exit_info_1) 415ad879127SKrish Sadhukhan test->scratch = 416096cf7feSPaolo Bonzini ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff)); 417ad879127SKrish Sadhukhan else 418ad879127SKrish Sadhukhan test->scratch = get_regs().rcx; 419ad879127SKrish Sadhukhan 420ad879127SKrish Sadhukhan return false; 421ad879127SKrish Sadhukhan } 422ad879127SKrish Sadhukhan 423ad879127SKrish Sadhukhan static bool check_msr_intercept(struct svm_test *test) 424ad879127SKrish Sadhukhan { 425ad879127SKrish Sadhukhan memset(msr_bitmap, 0, MSR_BITMAP_SIZE); 426ad879127SKrish Sadhukhan return (test->scratch == -2); 427ad879127SKrish Sadhukhan } 428ad879127SKrish Sadhukhan 429ad879127SKrish Sadhukhan static void prepare_mode_switch(struct svm_test *test) 430ad879127SKrish Sadhukhan { 431096cf7feSPaolo Bonzini vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 432ad879127SKrish Sadhukhan | (1ULL << UD_VECTOR) 433ad879127SKrish Sadhukhan | (1ULL << DF_VECTOR) 434ad879127SKrish Sadhukhan | (1ULL << PF_VECTOR); 435ad879127SKrish Sadhukhan test->scratch = 0; 436ad879127SKrish Sadhukhan } 437ad879127SKrish Sadhukhan 438ad879127SKrish Sadhukhan static void test_mode_switch(struct svm_test *test) 439ad879127SKrish Sadhukhan { 440ad879127SKrish Sadhukhan asm volatile(" cli\n" 441ad879127SKrish Sadhukhan " ljmp *1f\n" /* jump to 32-bit code segment */ 442ad879127SKrish Sadhukhan "1:\n" 443ad879127SKrish Sadhukhan " .long 2f\n" 444ad879127SKrish Sadhukhan " .long " xstr(KERNEL_CS32) "\n" 445ad879127SKrish Sadhukhan ".code32\n" 446ad879127SKrish Sadhukhan "2:\n" 447ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 448ad879127SKrish Sadhukhan " btcl $31, %%eax\n" /* clear PG */ 449ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 450ad879127SKrish Sadhukhan " movl $0xc0000080, %%ecx\n" /* EFER */ 451ad879127SKrish Sadhukhan " rdmsr\n" 452ad879127SKrish Sadhukhan " btcl $8, %%eax\n" /* clear LME */ 453ad879127SKrish Sadhukhan " wrmsr\n" 454ad879127SKrish Sadhukhan " movl %%cr4, %%eax\n" 455ad879127SKrish Sadhukhan " btcl $5, %%eax\n" /* clear PAE */ 456ad879127SKrish Sadhukhan " movl %%eax, %%cr4\n" 457ad879127SKrish Sadhukhan " movw %[ds16], %%ax\n" 458ad879127SKrish Sadhukhan " movw %%ax, %%ds\n" 459ad879127SKrish Sadhukhan " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 460ad879127SKrish Sadhukhan ".code16\n" 461ad879127SKrish Sadhukhan "3:\n" 462ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 463ad879127SKrish Sadhukhan " btcl $0, %%eax\n" /* clear PE */ 464ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 465ad879127SKrish Sadhukhan " ljmpl $0, $4f\n" /* jump to real-mode */ 466ad879127SKrish Sadhukhan "4:\n" 467ad879127SKrish Sadhukhan " vmmcall\n" 468ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 469ad879127SKrish Sadhukhan " btsl $0, %%eax\n" /* set PE */ 470ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 471ad879127SKrish Sadhukhan " ljmpl %[cs32], $5f\n" /* back to protected mode */ 472ad879127SKrish Sadhukhan ".code32\n" 473ad879127SKrish Sadhukhan "5:\n" 474ad879127SKrish Sadhukhan " movl %%cr4, %%eax\n" 475ad879127SKrish Sadhukhan " btsl $5, %%eax\n" /* set PAE */ 476ad879127SKrish Sadhukhan " movl %%eax, %%cr4\n" 477ad879127SKrish Sadhukhan " movl $0xc0000080, %%ecx\n" /* EFER */ 478ad879127SKrish Sadhukhan " rdmsr\n" 479ad879127SKrish Sadhukhan " btsl $8, %%eax\n" /* set LME */ 480ad879127SKrish Sadhukhan " wrmsr\n" 481ad879127SKrish Sadhukhan " movl %%cr0, %%eax\n" 482ad879127SKrish Sadhukhan " btsl $31, %%eax\n" /* set PG */ 483ad879127SKrish Sadhukhan " movl %%eax, %%cr0\n" 484ad879127SKrish Sadhukhan " ljmpl %[cs64], $6f\n" /* back to long mode */ 485ad879127SKrish Sadhukhan ".code64\n\t" 486ad879127SKrish Sadhukhan "6:\n" 487ad879127SKrish Sadhukhan " vmmcall\n" 488ad879127SKrish Sadhukhan :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 489ad879127SKrish Sadhukhan [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 490ad879127SKrish Sadhukhan : "rax", "rbx", "rcx", "rdx", "memory"); 491ad879127SKrish Sadhukhan } 492ad879127SKrish Sadhukhan 493ad879127SKrish Sadhukhan static bool mode_switch_finished(struct svm_test *test) 494ad879127SKrish Sadhukhan { 495ad879127SKrish Sadhukhan u64 cr0, cr4, efer; 496ad879127SKrish Sadhukhan 497096cf7feSPaolo Bonzini cr0 = vmcb->save.cr0; 498096cf7feSPaolo Bonzini cr4 = vmcb->save.cr4; 499096cf7feSPaolo Bonzini efer = vmcb->save.efer; 500ad879127SKrish Sadhukhan 501ad879127SKrish Sadhukhan /* Only expect VMMCALL intercepts */ 502096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) 503ad879127SKrish Sadhukhan return true; 504ad879127SKrish Sadhukhan 505ad879127SKrish Sadhukhan /* Jump over VMMCALL instruction */ 506096cf7feSPaolo Bonzini vmcb->save.rip += 3; 507ad879127SKrish Sadhukhan 508ad879127SKrish Sadhukhan /* Do sanity checks */ 509ad879127SKrish Sadhukhan switch (test->scratch) { 510ad879127SKrish Sadhukhan case 0: 511ad879127SKrish Sadhukhan /* Test should be in real mode now - check for this */ 512ad879127SKrish Sadhukhan if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 513ad879127SKrish Sadhukhan (cr4 & 0x00000020) || /* CR4.PAE */ 514ad879127SKrish Sadhukhan (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 515ad879127SKrish Sadhukhan return true; 516ad879127SKrish Sadhukhan break; 517ad879127SKrish Sadhukhan case 2: 518ad879127SKrish Sadhukhan /* Test should be back in long-mode now - check for this */ 519ad879127SKrish Sadhukhan if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 520ad879127SKrish Sadhukhan ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 521ad879127SKrish Sadhukhan ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 522ad879127SKrish Sadhukhan return true; 523ad879127SKrish Sadhukhan break; 524ad879127SKrish Sadhukhan } 525ad879127SKrish Sadhukhan 526ad879127SKrish Sadhukhan /* one step forward */ 527ad879127SKrish Sadhukhan test->scratch += 1; 528ad879127SKrish Sadhukhan 529ad879127SKrish Sadhukhan return test->scratch == 2; 530ad879127SKrish Sadhukhan } 531ad879127SKrish Sadhukhan 532ad879127SKrish Sadhukhan static bool check_mode_switch(struct svm_test *test) 533ad879127SKrish Sadhukhan { 534ad879127SKrish Sadhukhan return test->scratch == 2; 535ad879127SKrish Sadhukhan } 536ad879127SKrish Sadhukhan 537ad879127SKrish Sadhukhan extern u8 *io_bitmap; 538ad879127SKrish Sadhukhan 539ad879127SKrish Sadhukhan static void prepare_ioio(struct svm_test *test) 540ad879127SKrish Sadhukhan { 541096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT); 542ad879127SKrish Sadhukhan test->scratch = 0; 543ad879127SKrish Sadhukhan memset(io_bitmap, 0, 8192); 544ad879127SKrish Sadhukhan io_bitmap[8192] = 0xFF; 545ad879127SKrish Sadhukhan } 546ad879127SKrish Sadhukhan 547ad879127SKrish Sadhukhan static void test_ioio(struct svm_test *test) 548ad879127SKrish Sadhukhan { 549ad879127SKrish Sadhukhan // stage 0, test IO pass 550ad879127SKrish Sadhukhan inb(0x5000); 551ad879127SKrish Sadhukhan outb(0x0, 0x5000); 552ad879127SKrish Sadhukhan if (get_test_stage(test) != 0) 553ad879127SKrish Sadhukhan goto fail; 554ad879127SKrish Sadhukhan 555ad879127SKrish Sadhukhan // test IO width, in/out 556ad879127SKrish Sadhukhan io_bitmap[0] = 0xFF; 557ad879127SKrish Sadhukhan inc_test_stage(test); 558ad879127SKrish Sadhukhan inb(0x0); 559ad879127SKrish Sadhukhan if (get_test_stage(test) != 2) 560ad879127SKrish Sadhukhan goto fail; 561ad879127SKrish Sadhukhan 562ad879127SKrish Sadhukhan outw(0x0, 0x0); 563ad879127SKrish Sadhukhan if (get_test_stage(test) != 3) 564ad879127SKrish Sadhukhan goto fail; 565ad879127SKrish Sadhukhan 566ad879127SKrish Sadhukhan inl(0x0); 567ad879127SKrish Sadhukhan if (get_test_stage(test) != 4) 568ad879127SKrish Sadhukhan goto fail; 569ad879127SKrish Sadhukhan 570ad879127SKrish Sadhukhan // test low/high IO port 571ad879127SKrish Sadhukhan io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 572ad879127SKrish Sadhukhan inb(0x5000); 573ad879127SKrish Sadhukhan if (get_test_stage(test) != 5) 574ad879127SKrish Sadhukhan goto fail; 575ad879127SKrish Sadhukhan 576ad879127SKrish Sadhukhan io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8)); 577ad879127SKrish Sadhukhan inw(0x9000); 578ad879127SKrish Sadhukhan if (get_test_stage(test) != 6) 579ad879127SKrish Sadhukhan goto fail; 580ad879127SKrish Sadhukhan 581ad879127SKrish Sadhukhan // test partial pass 582ad879127SKrish Sadhukhan io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 583ad879127SKrish Sadhukhan inl(0x4FFF); 584ad879127SKrish Sadhukhan if (get_test_stage(test) != 7) 585ad879127SKrish Sadhukhan goto fail; 586ad879127SKrish Sadhukhan 587ad879127SKrish Sadhukhan // test across pages 588ad879127SKrish Sadhukhan inc_test_stage(test); 589ad879127SKrish Sadhukhan inl(0x7FFF); 590ad879127SKrish Sadhukhan if (get_test_stage(test) != 8) 591ad879127SKrish Sadhukhan goto fail; 592ad879127SKrish Sadhukhan 593ad879127SKrish Sadhukhan inc_test_stage(test); 594ad879127SKrish Sadhukhan io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8); 595ad879127SKrish Sadhukhan inl(0x7FFF); 596ad879127SKrish Sadhukhan if (get_test_stage(test) != 10) 597ad879127SKrish Sadhukhan goto fail; 598ad879127SKrish Sadhukhan 599ad879127SKrish Sadhukhan io_bitmap[0] = 0; 600ad879127SKrish Sadhukhan inl(0xFFFF); 601ad879127SKrish Sadhukhan if (get_test_stage(test) != 11) 602ad879127SKrish Sadhukhan goto fail; 603ad879127SKrish Sadhukhan 604ad879127SKrish Sadhukhan io_bitmap[0] = 0xFF; 605ad879127SKrish Sadhukhan io_bitmap[8192] = 0; 606ad879127SKrish Sadhukhan inl(0xFFFF); 607ad879127SKrish Sadhukhan inc_test_stage(test); 608ad879127SKrish Sadhukhan if (get_test_stage(test) != 12) 609ad879127SKrish Sadhukhan goto fail; 610ad879127SKrish Sadhukhan 611ad879127SKrish Sadhukhan return; 612ad879127SKrish Sadhukhan 613ad879127SKrish Sadhukhan fail: 614198dfd0eSJanis Schoetterl-Glausch report_fail("stage %d", get_test_stage(test)); 615ad879127SKrish Sadhukhan test->scratch = -1; 616ad879127SKrish Sadhukhan } 617ad879127SKrish Sadhukhan 618ad879127SKrish Sadhukhan static bool ioio_finished(struct svm_test *test) 619ad879127SKrish Sadhukhan { 620ad879127SKrish Sadhukhan unsigned port, size; 621ad879127SKrish Sadhukhan 622ad879127SKrish Sadhukhan /* Only expect IOIO intercepts */ 623096cf7feSPaolo Bonzini if (vmcb->control.exit_code == SVM_EXIT_VMMCALL) 624ad879127SKrish Sadhukhan return true; 625ad879127SKrish Sadhukhan 626096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_IOIO) 627ad879127SKrish Sadhukhan return true; 628ad879127SKrish Sadhukhan 629ad879127SKrish Sadhukhan /* one step forward */ 630ad879127SKrish Sadhukhan test->scratch += 1; 631ad879127SKrish Sadhukhan 632096cf7feSPaolo Bonzini port = vmcb->control.exit_info_1 >> 16; 633096cf7feSPaolo Bonzini size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7; 634ad879127SKrish Sadhukhan 635ad879127SKrish Sadhukhan while (size--) { 636ad879127SKrish Sadhukhan io_bitmap[port / 8] &= ~(1 << (port & 7)); 637ad879127SKrish Sadhukhan port++; 638ad879127SKrish Sadhukhan } 639ad879127SKrish Sadhukhan 640ad879127SKrish Sadhukhan return false; 641ad879127SKrish Sadhukhan } 642ad879127SKrish Sadhukhan 643ad879127SKrish Sadhukhan static bool check_ioio(struct svm_test *test) 644ad879127SKrish Sadhukhan { 645ad879127SKrish Sadhukhan memset(io_bitmap, 0, 8193); 646ad879127SKrish Sadhukhan return test->scratch != -1; 647ad879127SKrish Sadhukhan } 648ad879127SKrish Sadhukhan 649ad879127SKrish Sadhukhan static void prepare_asid_zero(struct svm_test *test) 650ad879127SKrish Sadhukhan { 651096cf7feSPaolo Bonzini vmcb->control.asid = 0; 652ad879127SKrish Sadhukhan } 653ad879127SKrish Sadhukhan 654ad879127SKrish Sadhukhan static void test_asid_zero(struct svm_test *test) 655ad879127SKrish Sadhukhan { 656ad879127SKrish Sadhukhan asm volatile ("vmmcall\n\t"); 657ad879127SKrish Sadhukhan } 658ad879127SKrish Sadhukhan 659ad879127SKrish Sadhukhan static bool check_asid_zero(struct svm_test *test) 660ad879127SKrish Sadhukhan { 661096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_ERR; 662ad879127SKrish Sadhukhan } 663ad879127SKrish Sadhukhan 664ad879127SKrish Sadhukhan static void sel_cr0_bug_prepare(struct svm_test *test) 665ad879127SKrish Sadhukhan { 666096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 667ad879127SKrish Sadhukhan } 668ad879127SKrish Sadhukhan 669ad879127SKrish Sadhukhan static bool sel_cr0_bug_finished(struct svm_test *test) 670ad879127SKrish Sadhukhan { 671ad879127SKrish Sadhukhan return true; 672ad879127SKrish Sadhukhan } 673ad879127SKrish Sadhukhan 674ad879127SKrish Sadhukhan static void sel_cr0_bug_test(struct svm_test *test) 675ad879127SKrish Sadhukhan { 676ad879127SKrish Sadhukhan unsigned long cr0; 677ad879127SKrish Sadhukhan 678ad879127SKrish Sadhukhan /* read cr0, clear CD, and write back */ 679ad879127SKrish Sadhukhan cr0 = read_cr0(); 680ad879127SKrish Sadhukhan cr0 |= (1UL << 30); 681ad879127SKrish Sadhukhan write_cr0(cr0); 682ad879127SKrish Sadhukhan 683ad879127SKrish Sadhukhan /* 684ad879127SKrish Sadhukhan * If we are here the test failed, not sure what to do now because we 685ad879127SKrish Sadhukhan * are not in guest-mode anymore so we can't trigger an intercept. 686ad879127SKrish Sadhukhan * Trigger a tripple-fault for now. 687ad879127SKrish Sadhukhan */ 688198dfd0eSJanis Schoetterl-Glausch report_fail("sel_cr0 test. Can not recover from this - exiting"); 689ad879127SKrish Sadhukhan exit(report_summary()); 690ad879127SKrish Sadhukhan } 691ad879127SKrish Sadhukhan 692ad879127SKrish Sadhukhan static bool sel_cr0_bug_check(struct svm_test *test) 693ad879127SKrish Sadhukhan { 694096cf7feSPaolo Bonzini return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 695ad879127SKrish Sadhukhan } 696ad879127SKrish Sadhukhan 697ad879127SKrish Sadhukhan #define TSC_ADJUST_VALUE (1ll << 32) 698f3154609SBill Wendling #define TSC_OFFSET_VALUE (~0ull << 48) 699ad879127SKrish Sadhukhan static bool ok; 700ad879127SKrish Sadhukhan 70110a65fc4SNadav Amit static bool tsc_adjust_supported(void) 70210a65fc4SNadav Amit { 70310a65fc4SNadav Amit return this_cpu_has(X86_FEATURE_TSC_ADJUST); 70410a65fc4SNadav Amit } 70510a65fc4SNadav Amit 706ad879127SKrish Sadhukhan static void tsc_adjust_prepare(struct svm_test *test) 707ad879127SKrish Sadhukhan { 708ad879127SKrish Sadhukhan default_prepare(test); 709096cf7feSPaolo Bonzini vmcb->control.tsc_offset = TSC_OFFSET_VALUE; 710ad879127SKrish Sadhukhan 711ad879127SKrish Sadhukhan wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE); 712ad879127SKrish Sadhukhan int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 713ad879127SKrish Sadhukhan ok = adjust == -TSC_ADJUST_VALUE; 714ad879127SKrish Sadhukhan } 715ad879127SKrish Sadhukhan 716ad879127SKrish Sadhukhan static void tsc_adjust_test(struct svm_test *test) 717ad879127SKrish Sadhukhan { 718ad879127SKrish Sadhukhan int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 719ad879127SKrish Sadhukhan ok &= adjust == -TSC_ADJUST_VALUE; 720ad879127SKrish Sadhukhan 721ad879127SKrish Sadhukhan uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 722ad879127SKrish Sadhukhan wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 723ad879127SKrish Sadhukhan 724ad879127SKrish Sadhukhan adjust = rdmsr(MSR_IA32_TSC_ADJUST); 725ad879127SKrish Sadhukhan ok &= adjust <= -2 * TSC_ADJUST_VALUE; 726ad879127SKrish Sadhukhan 727ad879127SKrish Sadhukhan uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE; 728ad879127SKrish Sadhukhan ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 729ad879127SKrish Sadhukhan 730ad879127SKrish Sadhukhan uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE; 731ad879127SKrish Sadhukhan ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 732ad879127SKrish Sadhukhan } 733ad879127SKrish Sadhukhan 734ad879127SKrish Sadhukhan static bool tsc_adjust_check(struct svm_test *test) 735ad879127SKrish Sadhukhan { 736ad879127SKrish Sadhukhan int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 737ad879127SKrish Sadhukhan 738ad879127SKrish Sadhukhan wrmsr(MSR_IA32_TSC_ADJUST, 0); 739ad879127SKrish Sadhukhan return ok && adjust <= -2 * TSC_ADJUST_VALUE; 740ad879127SKrish Sadhukhan } 741ad879127SKrish Sadhukhan 742a8503d50SMaxim Levitsky 743a8503d50SMaxim Levitsky static u64 guest_tsc_delay_value; 744a8503d50SMaxim Levitsky /* number of bits to shift tsc right for stable result */ 745a8503d50SMaxim Levitsky #define TSC_SHIFT 24 746a8503d50SMaxim Levitsky #define TSC_SCALE_ITERATIONS 10 747a8503d50SMaxim Levitsky 748a8503d50SMaxim Levitsky static void svm_tsc_scale_guest(struct svm_test *test) 749a8503d50SMaxim Levitsky { 750a8503d50SMaxim Levitsky u64 start_tsc = rdtsc(); 751a8503d50SMaxim Levitsky 752a8503d50SMaxim Levitsky while (rdtsc() - start_tsc < guest_tsc_delay_value) 753a8503d50SMaxim Levitsky cpu_relax(); 754a8503d50SMaxim Levitsky } 755a8503d50SMaxim Levitsky 756a8503d50SMaxim Levitsky static void svm_tsc_scale_run_testcase(u64 duration, 757a8503d50SMaxim Levitsky double tsc_scale, u64 tsc_offset) 758a8503d50SMaxim Levitsky { 759a8503d50SMaxim Levitsky u64 start_tsc, actual_duration; 760a8503d50SMaxim Levitsky 761a8503d50SMaxim Levitsky guest_tsc_delay_value = (duration << TSC_SHIFT) * tsc_scale; 762a8503d50SMaxim Levitsky 763a8503d50SMaxim Levitsky test_set_guest(svm_tsc_scale_guest); 764a8503d50SMaxim Levitsky vmcb->control.tsc_offset = tsc_offset; 765a8503d50SMaxim Levitsky wrmsr(MSR_AMD64_TSC_RATIO, (u64)(tsc_scale * (1ULL << 32))); 766a8503d50SMaxim Levitsky 767a8503d50SMaxim Levitsky start_tsc = rdtsc(); 768a8503d50SMaxim Levitsky 769a8503d50SMaxim Levitsky if (svm_vmrun() != SVM_EXIT_VMMCALL) 770a8503d50SMaxim Levitsky report_fail("unexpected vm exit code 0x%x", vmcb->control.exit_code); 771a8503d50SMaxim Levitsky 772a8503d50SMaxim Levitsky actual_duration = (rdtsc() - start_tsc) >> TSC_SHIFT; 773a8503d50SMaxim Levitsky 774a8503d50SMaxim Levitsky report(duration == actual_duration, "tsc delay (expected: %lu, actual: %lu)", 775a8503d50SMaxim Levitsky duration, actual_duration); 776a8503d50SMaxim Levitsky } 777a8503d50SMaxim Levitsky 778a8503d50SMaxim Levitsky static void svm_tsc_scale_test(void) 779a8503d50SMaxim Levitsky { 780a8503d50SMaxim Levitsky int i; 781a8503d50SMaxim Levitsky 782a8503d50SMaxim Levitsky if (!tsc_scale_supported()) { 783a8503d50SMaxim Levitsky report_skip("TSC scale not supported in the guest"); 784a8503d50SMaxim Levitsky return; 785a8503d50SMaxim Levitsky } 786a8503d50SMaxim Levitsky 787a8503d50SMaxim Levitsky report(rdmsr(MSR_AMD64_TSC_RATIO) == TSC_RATIO_DEFAULT, 788a8503d50SMaxim Levitsky "initial TSC scale ratio"); 789a8503d50SMaxim Levitsky 790a8503d50SMaxim Levitsky for (i = 0 ; i < TSC_SCALE_ITERATIONS; i++) { 791a8503d50SMaxim Levitsky 792a8503d50SMaxim Levitsky double tsc_scale = (double)(rdrand() % 100 + 1) / 10; 793a8503d50SMaxim Levitsky int duration = rdrand() % 50 + 1; 794a8503d50SMaxim Levitsky u64 tsc_offset = rdrand(); 795a8503d50SMaxim Levitsky 796a8503d50SMaxim Levitsky report_info("duration=%d, tsc_scale=%d, tsc_offset=%ld", 797a8503d50SMaxim Levitsky duration, (int)(tsc_scale * 100), tsc_offset); 798a8503d50SMaxim Levitsky 799a8503d50SMaxim Levitsky svm_tsc_scale_run_testcase(duration, tsc_scale, tsc_offset); 800a8503d50SMaxim Levitsky } 801a8503d50SMaxim Levitsky 802a8503d50SMaxim Levitsky svm_tsc_scale_run_testcase(50, 255, rdrand()); 803a8503d50SMaxim Levitsky svm_tsc_scale_run_testcase(50, 0.0001, rdrand()); 804a8503d50SMaxim Levitsky } 805a8503d50SMaxim Levitsky 806ad879127SKrish Sadhukhan static void latency_prepare(struct svm_test *test) 807ad879127SKrish Sadhukhan { 808ad879127SKrish Sadhukhan default_prepare(test); 809ad879127SKrish Sadhukhan runs = LATENCY_RUNS; 810ad879127SKrish Sadhukhan latvmrun_min = latvmexit_min = -1ULL; 811ad879127SKrish Sadhukhan latvmrun_max = latvmexit_max = 0; 812ad879127SKrish Sadhukhan vmrun_sum = vmexit_sum = 0; 813ad879127SKrish Sadhukhan tsc_start = rdtsc(); 814ad879127SKrish Sadhukhan } 815ad879127SKrish Sadhukhan 816ad879127SKrish Sadhukhan static void latency_test(struct svm_test *test) 817ad879127SKrish Sadhukhan { 818ad879127SKrish Sadhukhan u64 cycles; 819ad879127SKrish Sadhukhan 820ad879127SKrish Sadhukhan start: 821ad879127SKrish Sadhukhan tsc_end = rdtsc(); 822ad879127SKrish Sadhukhan 823ad879127SKrish Sadhukhan cycles = tsc_end - tsc_start; 824ad879127SKrish Sadhukhan 825ad879127SKrish Sadhukhan if (cycles > latvmrun_max) 826ad879127SKrish Sadhukhan latvmrun_max = cycles; 827ad879127SKrish Sadhukhan 828ad879127SKrish Sadhukhan if (cycles < latvmrun_min) 829ad879127SKrish Sadhukhan latvmrun_min = cycles; 830ad879127SKrish Sadhukhan 831ad879127SKrish Sadhukhan vmrun_sum += cycles; 832ad879127SKrish Sadhukhan 833ad879127SKrish Sadhukhan tsc_start = rdtsc(); 834ad879127SKrish Sadhukhan 835ad879127SKrish Sadhukhan asm volatile ("vmmcall" : : : "memory"); 836ad879127SKrish Sadhukhan goto start; 837ad879127SKrish Sadhukhan } 838ad879127SKrish Sadhukhan 839ad879127SKrish Sadhukhan static bool latency_finished(struct svm_test *test) 840ad879127SKrish Sadhukhan { 841ad879127SKrish Sadhukhan u64 cycles; 842ad879127SKrish Sadhukhan 843ad879127SKrish Sadhukhan tsc_end = rdtsc(); 844ad879127SKrish Sadhukhan 845ad879127SKrish Sadhukhan cycles = tsc_end - tsc_start; 846ad879127SKrish Sadhukhan 847ad879127SKrish Sadhukhan if (cycles > latvmexit_max) 848ad879127SKrish Sadhukhan latvmexit_max = cycles; 849ad879127SKrish Sadhukhan 850ad879127SKrish Sadhukhan if (cycles < latvmexit_min) 851ad879127SKrish Sadhukhan latvmexit_min = cycles; 852ad879127SKrish Sadhukhan 853ad879127SKrish Sadhukhan vmexit_sum += cycles; 854ad879127SKrish Sadhukhan 855096cf7feSPaolo Bonzini vmcb->save.rip += 3; 856ad879127SKrish Sadhukhan 857ad879127SKrish Sadhukhan runs -= 1; 858ad879127SKrish Sadhukhan 859ad879127SKrish Sadhukhan tsc_end = rdtsc(); 860ad879127SKrish Sadhukhan 861ad879127SKrish Sadhukhan return runs == 0; 862ad879127SKrish Sadhukhan } 863ad879127SKrish Sadhukhan 864f7fa53dcSPaolo Bonzini static bool latency_finished_clean(struct svm_test *test) 865f7fa53dcSPaolo Bonzini { 866f7fa53dcSPaolo Bonzini vmcb->control.clean = VMCB_CLEAN_ALL; 867f7fa53dcSPaolo Bonzini return latency_finished(test); 868f7fa53dcSPaolo Bonzini } 869f7fa53dcSPaolo Bonzini 870ad879127SKrish Sadhukhan static bool latency_check(struct svm_test *test) 871ad879127SKrish Sadhukhan { 872ad879127SKrish Sadhukhan printf(" Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max, 873ad879127SKrish Sadhukhan latvmrun_min, vmrun_sum / LATENCY_RUNS); 874ad879127SKrish Sadhukhan printf(" Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max, 875ad879127SKrish Sadhukhan latvmexit_min, vmexit_sum / LATENCY_RUNS); 876ad879127SKrish Sadhukhan return true; 877ad879127SKrish Sadhukhan } 878ad879127SKrish Sadhukhan 879ad879127SKrish Sadhukhan static void lat_svm_insn_prepare(struct svm_test *test) 880ad879127SKrish Sadhukhan { 881ad879127SKrish Sadhukhan default_prepare(test); 882ad879127SKrish Sadhukhan runs = LATENCY_RUNS; 883ad879127SKrish Sadhukhan latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 884ad879127SKrish Sadhukhan latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 885ad879127SKrish Sadhukhan vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 886ad879127SKrish Sadhukhan } 887ad879127SKrish Sadhukhan 888ad879127SKrish Sadhukhan static bool lat_svm_insn_finished(struct svm_test *test) 889ad879127SKrish Sadhukhan { 890096cf7feSPaolo Bonzini u64 vmcb_phys = virt_to_phys(vmcb); 891ad879127SKrish Sadhukhan u64 cycles; 892ad879127SKrish Sadhukhan 893ad879127SKrish Sadhukhan for ( ; runs != 0; runs--) { 894ad879127SKrish Sadhukhan tsc_start = rdtsc(); 895ad879127SKrish Sadhukhan asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory"); 896ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 897ad879127SKrish Sadhukhan if (cycles > latvmload_max) 898ad879127SKrish Sadhukhan latvmload_max = cycles; 899ad879127SKrish Sadhukhan if (cycles < latvmload_min) 900ad879127SKrish Sadhukhan latvmload_min = cycles; 901ad879127SKrish Sadhukhan vmload_sum += cycles; 902ad879127SKrish Sadhukhan 903ad879127SKrish Sadhukhan tsc_start = rdtsc(); 904ad879127SKrish Sadhukhan asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory"); 905ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 906ad879127SKrish Sadhukhan if (cycles > latvmsave_max) 907ad879127SKrish Sadhukhan latvmsave_max = cycles; 908ad879127SKrish Sadhukhan if (cycles < latvmsave_min) 909ad879127SKrish Sadhukhan latvmsave_min = cycles; 910ad879127SKrish Sadhukhan vmsave_sum += cycles; 911ad879127SKrish Sadhukhan 912ad879127SKrish Sadhukhan tsc_start = rdtsc(); 913ad879127SKrish Sadhukhan asm volatile("stgi\n\t"); 914ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 915ad879127SKrish Sadhukhan if (cycles > latstgi_max) 916ad879127SKrish Sadhukhan latstgi_max = cycles; 917ad879127SKrish Sadhukhan if (cycles < latstgi_min) 918ad879127SKrish Sadhukhan latstgi_min = cycles; 919ad879127SKrish Sadhukhan stgi_sum += cycles; 920ad879127SKrish Sadhukhan 921ad879127SKrish Sadhukhan tsc_start = rdtsc(); 922ad879127SKrish Sadhukhan asm volatile("clgi\n\t"); 923ad879127SKrish Sadhukhan cycles = rdtsc() - tsc_start; 924ad879127SKrish Sadhukhan if (cycles > latclgi_max) 925ad879127SKrish Sadhukhan latclgi_max = cycles; 926ad879127SKrish Sadhukhan if (cycles < latclgi_min) 927ad879127SKrish Sadhukhan latclgi_min = cycles; 928ad879127SKrish Sadhukhan clgi_sum += cycles; 929ad879127SKrish Sadhukhan } 930ad879127SKrish Sadhukhan 931ad879127SKrish Sadhukhan tsc_end = rdtsc(); 932ad879127SKrish Sadhukhan 933ad879127SKrish Sadhukhan return true; 934ad879127SKrish Sadhukhan } 935ad879127SKrish Sadhukhan 936ad879127SKrish Sadhukhan static bool lat_svm_insn_check(struct svm_test *test) 937ad879127SKrish Sadhukhan { 938ad879127SKrish Sadhukhan printf(" Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max, 939ad879127SKrish Sadhukhan latvmload_min, vmload_sum / LATENCY_RUNS); 940ad879127SKrish Sadhukhan printf(" Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max, 941ad879127SKrish Sadhukhan latvmsave_min, vmsave_sum / LATENCY_RUNS); 942ad879127SKrish Sadhukhan printf(" Latency STGI: max: %ld min: %ld avg: %ld\n", latstgi_max, 943ad879127SKrish Sadhukhan latstgi_min, stgi_sum / LATENCY_RUNS); 944ad879127SKrish Sadhukhan printf(" Latency CLGI: max: %ld min: %ld avg: %ld\n", latclgi_max, 945ad879127SKrish Sadhukhan latclgi_min, clgi_sum / LATENCY_RUNS); 946ad879127SKrish Sadhukhan return true; 947ad879127SKrish Sadhukhan } 948ad879127SKrish Sadhukhan 949ad879127SKrish Sadhukhan bool pending_event_ipi_fired; 950ad879127SKrish Sadhukhan bool pending_event_guest_run; 951ad879127SKrish Sadhukhan 952ad879127SKrish Sadhukhan static void pending_event_ipi_isr(isr_regs_t *regs) 953ad879127SKrish Sadhukhan { 954ad879127SKrish Sadhukhan pending_event_ipi_fired = true; 955ad879127SKrish Sadhukhan eoi(); 956ad879127SKrish Sadhukhan } 957ad879127SKrish Sadhukhan 958ad879127SKrish Sadhukhan static void pending_event_prepare(struct svm_test *test) 959ad879127SKrish Sadhukhan { 960ad879127SKrish Sadhukhan int ipi_vector = 0xf1; 961ad879127SKrish Sadhukhan 962ad879127SKrish Sadhukhan default_prepare(test); 963ad879127SKrish Sadhukhan 964ad879127SKrish Sadhukhan pending_event_ipi_fired = false; 965ad879127SKrish Sadhukhan 966ad879127SKrish Sadhukhan handle_irq(ipi_vector, pending_event_ipi_isr); 967ad879127SKrish Sadhukhan 968ad879127SKrish Sadhukhan pending_event_guest_run = false; 969ad879127SKrish Sadhukhan 970096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 971096cf7feSPaolo Bonzini vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 972ad879127SKrish Sadhukhan 973ad879127SKrish Sadhukhan apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 974ad879127SKrish Sadhukhan APIC_DM_FIXED | ipi_vector, 0); 975ad879127SKrish Sadhukhan 976ad879127SKrish Sadhukhan set_test_stage(test, 0); 977ad879127SKrish Sadhukhan } 978ad879127SKrish Sadhukhan 979ad879127SKrish Sadhukhan static void pending_event_test(struct svm_test *test) 980ad879127SKrish Sadhukhan { 981ad879127SKrish Sadhukhan pending_event_guest_run = true; 982ad879127SKrish Sadhukhan } 983ad879127SKrish Sadhukhan 984ad879127SKrish Sadhukhan static bool pending_event_finished(struct svm_test *test) 985ad879127SKrish Sadhukhan { 986ad879127SKrish Sadhukhan switch (get_test_stage(test)) { 987ad879127SKrish Sadhukhan case 0: 988096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_INTR) { 989198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to pending interrupt. Exit reason 0x%x", 990096cf7feSPaolo Bonzini vmcb->control.exit_code); 991ad879127SKrish Sadhukhan return true; 992ad879127SKrish Sadhukhan } 993ad879127SKrish Sadhukhan 994096cf7feSPaolo Bonzini vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 995096cf7feSPaolo Bonzini vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 996ad879127SKrish Sadhukhan 997ad879127SKrish Sadhukhan if (pending_event_guest_run) { 998198dfd0eSJanis Schoetterl-Glausch report_fail("Guest ran before host received IPI\n"); 999ad879127SKrish Sadhukhan return true; 1000ad879127SKrish Sadhukhan } 1001ad879127SKrish Sadhukhan 1002ad879127SKrish Sadhukhan irq_enable(); 1003ad879127SKrish Sadhukhan asm volatile ("nop"); 1004ad879127SKrish Sadhukhan irq_disable(); 1005ad879127SKrish Sadhukhan 1006ad879127SKrish Sadhukhan if (!pending_event_ipi_fired) { 1007198dfd0eSJanis Schoetterl-Glausch report_fail("Pending interrupt not dispatched after IRQ enabled\n"); 1008ad879127SKrish Sadhukhan return true; 1009ad879127SKrish Sadhukhan } 1010ad879127SKrish Sadhukhan break; 1011ad879127SKrish Sadhukhan 1012ad879127SKrish Sadhukhan case 1: 1013ad879127SKrish Sadhukhan if (!pending_event_guest_run) { 1014198dfd0eSJanis Schoetterl-Glausch report_fail("Guest did not resume when no interrupt\n"); 1015ad879127SKrish Sadhukhan return true; 1016ad879127SKrish Sadhukhan } 1017ad879127SKrish Sadhukhan break; 1018ad879127SKrish Sadhukhan } 1019ad879127SKrish Sadhukhan 1020ad879127SKrish Sadhukhan inc_test_stage(test); 1021ad879127SKrish Sadhukhan 1022ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1023ad879127SKrish Sadhukhan } 1024ad879127SKrish Sadhukhan 1025ad879127SKrish Sadhukhan static bool pending_event_check(struct svm_test *test) 1026ad879127SKrish Sadhukhan { 1027ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1028ad879127SKrish Sadhukhan } 1029ad879127SKrish Sadhukhan 103085dc2aceSPaolo Bonzini static void pending_event_cli_prepare(struct svm_test *test) 1031ad879127SKrish Sadhukhan { 1032ad879127SKrish Sadhukhan default_prepare(test); 1033ad879127SKrish Sadhukhan 1034ad879127SKrish Sadhukhan pending_event_ipi_fired = false; 1035ad879127SKrish Sadhukhan 1036ad879127SKrish Sadhukhan handle_irq(0xf1, pending_event_ipi_isr); 1037ad879127SKrish Sadhukhan 1038ad879127SKrish Sadhukhan apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1039ad879127SKrish Sadhukhan APIC_DM_FIXED | 0xf1, 0); 1040ad879127SKrish Sadhukhan 1041ad879127SKrish Sadhukhan set_test_stage(test, 0); 1042ad879127SKrish Sadhukhan } 1043ad879127SKrish Sadhukhan 104485dc2aceSPaolo Bonzini static void pending_event_cli_prepare_gif_clear(struct svm_test *test) 1045ad879127SKrish Sadhukhan { 1046ad879127SKrish Sadhukhan asm("cli"); 1047ad879127SKrish Sadhukhan } 1048ad879127SKrish Sadhukhan 104985dc2aceSPaolo Bonzini static void pending_event_cli_test(struct svm_test *test) 1050ad879127SKrish Sadhukhan { 1051ad879127SKrish Sadhukhan if (pending_event_ipi_fired == true) { 1052ad879127SKrish Sadhukhan set_test_stage(test, -1); 1053198dfd0eSJanis Schoetterl-Glausch report_fail("Interrupt preceeded guest"); 1054ad879127SKrish Sadhukhan vmmcall(); 1055ad879127SKrish Sadhukhan } 1056ad879127SKrish Sadhukhan 105785dc2aceSPaolo Bonzini /* VINTR_MASKING is zero. This should cause the IPI to fire. */ 1058ad879127SKrish Sadhukhan irq_enable(); 1059ad879127SKrish Sadhukhan asm volatile ("nop"); 1060ad879127SKrish Sadhukhan irq_disable(); 1061ad879127SKrish Sadhukhan 1062ad879127SKrish Sadhukhan if (pending_event_ipi_fired != true) { 1063ad879127SKrish Sadhukhan set_test_stage(test, -1); 1064198dfd0eSJanis Schoetterl-Glausch report_fail("Interrupt not triggered by guest"); 1065ad879127SKrish Sadhukhan } 1066ad879127SKrish Sadhukhan 1067ad879127SKrish Sadhukhan vmmcall(); 1068ad879127SKrish Sadhukhan 106985dc2aceSPaolo Bonzini /* 107085dc2aceSPaolo Bonzini * Now VINTR_MASKING=1, but no interrupt is pending so 107185dc2aceSPaolo Bonzini * the VINTR interception should be clear in VMCB02. Check 107285dc2aceSPaolo Bonzini * that L0 did not leave a stale VINTR in the VMCB. 107385dc2aceSPaolo Bonzini */ 1074ad879127SKrish Sadhukhan irq_enable(); 1075ad879127SKrish Sadhukhan asm volatile ("nop"); 1076ad879127SKrish Sadhukhan irq_disable(); 1077ad879127SKrish Sadhukhan } 1078ad879127SKrish Sadhukhan 107985dc2aceSPaolo Bonzini static bool pending_event_cli_finished(struct svm_test *test) 1080ad879127SKrish Sadhukhan { 1081096cf7feSPaolo Bonzini if ( vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1082198dfd0eSJanis Schoetterl-Glausch report_fail("VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x", 1083096cf7feSPaolo Bonzini vmcb->control.exit_code); 1084ad879127SKrish Sadhukhan return true; 1085ad879127SKrish Sadhukhan } 1086ad879127SKrish Sadhukhan 1087ad879127SKrish Sadhukhan switch (get_test_stage(test)) { 1088ad879127SKrish Sadhukhan case 0: 1089096cf7feSPaolo Bonzini vmcb->save.rip += 3; 1090ad879127SKrish Sadhukhan 1091ad879127SKrish Sadhukhan pending_event_ipi_fired = false; 1092ad879127SKrish Sadhukhan 1093096cf7feSPaolo Bonzini vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1094ad879127SKrish Sadhukhan 109585dc2aceSPaolo Bonzini /* Now entering again with VINTR_MASKING=1. */ 1096ad879127SKrish Sadhukhan apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1097ad879127SKrish Sadhukhan APIC_DM_FIXED | 0xf1, 0); 1098ad879127SKrish Sadhukhan 1099ad879127SKrish Sadhukhan break; 1100ad879127SKrish Sadhukhan 1101ad879127SKrish Sadhukhan case 1: 1102ad879127SKrish Sadhukhan if (pending_event_ipi_fired == true) { 1103198dfd0eSJanis Schoetterl-Glausch report_fail("Interrupt triggered by guest"); 1104ad879127SKrish Sadhukhan return true; 1105ad879127SKrish Sadhukhan } 1106ad879127SKrish Sadhukhan 1107ad879127SKrish Sadhukhan irq_enable(); 1108ad879127SKrish Sadhukhan asm volatile ("nop"); 1109ad879127SKrish Sadhukhan irq_disable(); 1110ad879127SKrish Sadhukhan 1111ad879127SKrish Sadhukhan if (pending_event_ipi_fired != true) { 1112198dfd0eSJanis Schoetterl-Glausch report_fail("Interrupt not triggered by host"); 1113ad879127SKrish Sadhukhan return true; 1114ad879127SKrish Sadhukhan } 1115ad879127SKrish Sadhukhan 1116ad879127SKrish Sadhukhan break; 1117ad879127SKrish Sadhukhan 1118ad879127SKrish Sadhukhan default: 1119ad879127SKrish Sadhukhan return true; 1120ad879127SKrish Sadhukhan } 1121ad879127SKrish Sadhukhan 1122ad879127SKrish Sadhukhan inc_test_stage(test); 1123ad879127SKrish Sadhukhan 1124ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1125ad879127SKrish Sadhukhan } 1126ad879127SKrish Sadhukhan 112785dc2aceSPaolo Bonzini static bool pending_event_cli_check(struct svm_test *test) 1128ad879127SKrish Sadhukhan { 1129ad879127SKrish Sadhukhan return get_test_stage(test) == 2; 1130ad879127SKrish Sadhukhan } 1131ad879127SKrish Sadhukhan 113285dc2aceSPaolo Bonzini #define TIMER_VECTOR 222 113385dc2aceSPaolo Bonzini 113485dc2aceSPaolo Bonzini static volatile bool timer_fired; 113585dc2aceSPaolo Bonzini 113685dc2aceSPaolo Bonzini static void timer_isr(isr_regs_t *regs) 113785dc2aceSPaolo Bonzini { 113885dc2aceSPaolo Bonzini timer_fired = true; 113985dc2aceSPaolo Bonzini apic_write(APIC_EOI, 0); 114085dc2aceSPaolo Bonzini } 114185dc2aceSPaolo Bonzini 114285dc2aceSPaolo Bonzini static void interrupt_prepare(struct svm_test *test) 114385dc2aceSPaolo Bonzini { 114485dc2aceSPaolo Bonzini default_prepare(test); 114585dc2aceSPaolo Bonzini handle_irq(TIMER_VECTOR, timer_isr); 114685dc2aceSPaolo Bonzini timer_fired = false; 114785dc2aceSPaolo Bonzini set_test_stage(test, 0); 114885dc2aceSPaolo Bonzini } 114985dc2aceSPaolo Bonzini 115085dc2aceSPaolo Bonzini static void interrupt_test(struct svm_test *test) 115185dc2aceSPaolo Bonzini { 115285dc2aceSPaolo Bonzini long long start, loops; 115385dc2aceSPaolo Bonzini 115485dc2aceSPaolo Bonzini apic_write(APIC_LVTT, TIMER_VECTOR); 115585dc2aceSPaolo Bonzini irq_enable(); 115685dc2aceSPaolo Bonzini apic_write(APIC_TMICT, 1); //Timer Initial Count Register 0x380 one-shot 115785dc2aceSPaolo Bonzini for (loops = 0; loops < 10000000 && !timer_fired; loops++) 115885dc2aceSPaolo Bonzini asm volatile ("nop"); 115985dc2aceSPaolo Bonzini 116085dc2aceSPaolo Bonzini report(timer_fired, "direct interrupt while running guest"); 116185dc2aceSPaolo Bonzini 116285dc2aceSPaolo Bonzini if (!timer_fired) { 116385dc2aceSPaolo Bonzini set_test_stage(test, -1); 116485dc2aceSPaolo Bonzini vmmcall(); 116585dc2aceSPaolo Bonzini } 116685dc2aceSPaolo Bonzini 116785dc2aceSPaolo Bonzini apic_write(APIC_TMICT, 0); 116885dc2aceSPaolo Bonzini irq_disable(); 116985dc2aceSPaolo Bonzini vmmcall(); 117085dc2aceSPaolo Bonzini 117185dc2aceSPaolo Bonzini timer_fired = false; 117285dc2aceSPaolo Bonzini apic_write(APIC_TMICT, 1); 117385dc2aceSPaolo Bonzini for (loops = 0; loops < 10000000 && !timer_fired; loops++) 117485dc2aceSPaolo Bonzini asm volatile ("nop"); 117585dc2aceSPaolo Bonzini 117685dc2aceSPaolo Bonzini report(timer_fired, "intercepted interrupt while running guest"); 117785dc2aceSPaolo Bonzini 117885dc2aceSPaolo Bonzini if (!timer_fired) { 117985dc2aceSPaolo Bonzini set_test_stage(test, -1); 118085dc2aceSPaolo Bonzini vmmcall(); 118185dc2aceSPaolo Bonzini } 118285dc2aceSPaolo Bonzini 118385dc2aceSPaolo Bonzini irq_enable(); 118485dc2aceSPaolo Bonzini apic_write(APIC_TMICT, 0); 118585dc2aceSPaolo Bonzini irq_disable(); 118685dc2aceSPaolo Bonzini 118785dc2aceSPaolo Bonzini timer_fired = false; 118885dc2aceSPaolo Bonzini start = rdtsc(); 118985dc2aceSPaolo Bonzini apic_write(APIC_TMICT, 1000000); 1190a3001422SOliver Upton safe_halt(); 119185dc2aceSPaolo Bonzini 119285dc2aceSPaolo Bonzini report(rdtsc() - start > 10000 && timer_fired, 119385dc2aceSPaolo Bonzini "direct interrupt + hlt"); 119485dc2aceSPaolo Bonzini 119585dc2aceSPaolo Bonzini if (!timer_fired) { 119685dc2aceSPaolo Bonzini set_test_stage(test, -1); 119785dc2aceSPaolo Bonzini vmmcall(); 119885dc2aceSPaolo Bonzini } 119985dc2aceSPaolo Bonzini 120085dc2aceSPaolo Bonzini apic_write(APIC_TMICT, 0); 120185dc2aceSPaolo Bonzini irq_disable(); 120285dc2aceSPaolo Bonzini vmmcall(); 120385dc2aceSPaolo Bonzini 120485dc2aceSPaolo Bonzini timer_fired = false; 120585dc2aceSPaolo Bonzini start = rdtsc(); 120685dc2aceSPaolo Bonzini apic_write(APIC_TMICT, 1000000); 120785dc2aceSPaolo Bonzini asm volatile ("hlt"); 120885dc2aceSPaolo Bonzini 120985dc2aceSPaolo Bonzini report(rdtsc() - start > 10000 && timer_fired, 121085dc2aceSPaolo Bonzini "intercepted interrupt + hlt"); 121185dc2aceSPaolo Bonzini 121285dc2aceSPaolo Bonzini if (!timer_fired) { 121385dc2aceSPaolo Bonzini set_test_stage(test, -1); 121485dc2aceSPaolo Bonzini vmmcall(); 121585dc2aceSPaolo Bonzini } 121685dc2aceSPaolo Bonzini 121785dc2aceSPaolo Bonzini apic_write(APIC_TMICT, 0); 121885dc2aceSPaolo Bonzini irq_disable(); 121985dc2aceSPaolo Bonzini } 122085dc2aceSPaolo Bonzini 122185dc2aceSPaolo Bonzini static bool interrupt_finished(struct svm_test *test) 122285dc2aceSPaolo Bonzini { 122385dc2aceSPaolo Bonzini switch (get_test_stage(test)) { 122485dc2aceSPaolo Bonzini case 0: 122585dc2aceSPaolo Bonzini case 2: 1226096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1227198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1228096cf7feSPaolo Bonzini vmcb->control.exit_code); 122985dc2aceSPaolo Bonzini return true; 123085dc2aceSPaolo Bonzini } 1231096cf7feSPaolo Bonzini vmcb->save.rip += 3; 123285dc2aceSPaolo Bonzini 1233096cf7feSPaolo Bonzini vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1234096cf7feSPaolo Bonzini vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 123585dc2aceSPaolo Bonzini break; 123685dc2aceSPaolo Bonzini 123785dc2aceSPaolo Bonzini case 1: 123885dc2aceSPaolo Bonzini case 3: 1239096cf7feSPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1240198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to intr intercept. Exit reason 0x%x", 1241096cf7feSPaolo Bonzini vmcb->control.exit_code); 124285dc2aceSPaolo Bonzini return true; 124385dc2aceSPaolo Bonzini } 124485dc2aceSPaolo Bonzini 124585dc2aceSPaolo Bonzini irq_enable(); 124685dc2aceSPaolo Bonzini asm volatile ("nop"); 124785dc2aceSPaolo Bonzini irq_disable(); 124885dc2aceSPaolo Bonzini 1249096cf7feSPaolo Bonzini vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1250096cf7feSPaolo Bonzini vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 125185dc2aceSPaolo Bonzini break; 125285dc2aceSPaolo Bonzini 125385dc2aceSPaolo Bonzini case 4: 125485dc2aceSPaolo Bonzini break; 125585dc2aceSPaolo Bonzini 125685dc2aceSPaolo Bonzini default: 125785dc2aceSPaolo Bonzini return true; 125885dc2aceSPaolo Bonzini } 125985dc2aceSPaolo Bonzini 126085dc2aceSPaolo Bonzini inc_test_stage(test); 126185dc2aceSPaolo Bonzini 126285dc2aceSPaolo Bonzini return get_test_stage(test) == 5; 126385dc2aceSPaolo Bonzini } 126485dc2aceSPaolo Bonzini 126585dc2aceSPaolo Bonzini static bool interrupt_check(struct svm_test *test) 126685dc2aceSPaolo Bonzini { 126785dc2aceSPaolo Bonzini return get_test_stage(test) == 5; 126885dc2aceSPaolo Bonzini } 126985dc2aceSPaolo Bonzini 1270d4db486bSCathy Avery static volatile bool nmi_fired; 1271d4db486bSCathy Avery 12724a1207f6SMaxim Levitsky static void nmi_handler(struct ex_regs *regs) 1273d4db486bSCathy Avery { 1274d4db486bSCathy Avery nmi_fired = true; 1275d4db486bSCathy Avery } 1276d4db486bSCathy Avery 1277d4db486bSCathy Avery static void nmi_prepare(struct svm_test *test) 1278d4db486bSCathy Avery { 1279d4db486bSCathy Avery default_prepare(test); 1280d4db486bSCathy Avery nmi_fired = false; 12814a1207f6SMaxim Levitsky handle_exception(NMI_VECTOR, nmi_handler); 1282d4db486bSCathy Avery set_test_stage(test, 0); 1283d4db486bSCathy Avery } 1284d4db486bSCathy Avery 1285d4db486bSCathy Avery static void nmi_test(struct svm_test *test) 1286d4db486bSCathy Avery { 1287d4db486bSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1288d4db486bSCathy Avery 1289d4db486bSCathy Avery report(nmi_fired, "direct NMI while running guest"); 1290d4db486bSCathy Avery 1291d4db486bSCathy Avery if (!nmi_fired) 1292d4db486bSCathy Avery set_test_stage(test, -1); 1293d4db486bSCathy Avery 1294d4db486bSCathy Avery vmmcall(); 1295d4db486bSCathy Avery 1296d4db486bSCathy Avery nmi_fired = false; 1297d4db486bSCathy Avery 1298d4db486bSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1299d4db486bSCathy Avery 1300d4db486bSCathy Avery if (!nmi_fired) { 1301d4db486bSCathy Avery report(nmi_fired, "intercepted pending NMI not dispatched"); 1302d4db486bSCathy Avery set_test_stage(test, -1); 1303d4db486bSCathy Avery } 1304d4db486bSCathy Avery 1305d4db486bSCathy Avery } 1306d4db486bSCathy Avery 1307d4db486bSCathy Avery static bool nmi_finished(struct svm_test *test) 1308d4db486bSCathy Avery { 1309d4db486bSCathy Avery switch (get_test_stage(test)) { 1310d4db486bSCathy Avery case 0: 1311d4db486bSCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1312198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1313d4db486bSCathy Avery vmcb->control.exit_code); 1314d4db486bSCathy Avery return true; 1315d4db486bSCathy Avery } 1316d4db486bSCathy Avery vmcb->save.rip += 3; 1317d4db486bSCathy Avery 1318d4db486bSCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 1319d4db486bSCathy Avery break; 1320d4db486bSCathy Avery 1321d4db486bSCathy Avery case 1: 1322d4db486bSCathy Avery if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1323198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x", 1324d4db486bSCathy Avery vmcb->control.exit_code); 1325d4db486bSCathy Avery return true; 1326d4db486bSCathy Avery } 1327d4db486bSCathy Avery 13285c3582f0SJanis Schoetterl-Glausch report_pass("NMI intercept while running guest"); 1329d4db486bSCathy Avery break; 1330d4db486bSCathy Avery 1331d4db486bSCathy Avery case 2: 1332d4db486bSCathy Avery break; 1333d4db486bSCathy Avery 1334d4db486bSCathy Avery default: 1335d4db486bSCathy Avery return true; 1336d4db486bSCathy Avery } 1337d4db486bSCathy Avery 1338d4db486bSCathy Avery inc_test_stage(test); 1339d4db486bSCathy Avery 1340d4db486bSCathy Avery return get_test_stage(test) == 3; 1341d4db486bSCathy Avery } 1342d4db486bSCathy Avery 1343d4db486bSCathy Avery static bool nmi_check(struct svm_test *test) 1344d4db486bSCathy Avery { 1345d4db486bSCathy Avery return get_test_stage(test) == 3; 1346d4db486bSCathy Avery } 1347d4db486bSCathy Avery 13489da1f4d8SCathy Avery #define NMI_DELAY 100000000ULL 13499da1f4d8SCathy Avery 13509da1f4d8SCathy Avery static void nmi_message_thread(void *_test) 13519da1f4d8SCathy Avery { 13529da1f4d8SCathy Avery struct svm_test *test = _test; 13539da1f4d8SCathy Avery 13549da1f4d8SCathy Avery while (get_test_stage(test) != 1) 13559da1f4d8SCathy Avery pause(); 13569da1f4d8SCathy Avery 13579da1f4d8SCathy Avery delay(NMI_DELAY); 13589da1f4d8SCathy Avery 13599da1f4d8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 13609da1f4d8SCathy Avery 13619da1f4d8SCathy Avery while (get_test_stage(test) != 2) 13629da1f4d8SCathy Avery pause(); 13639da1f4d8SCathy Avery 13649da1f4d8SCathy Avery delay(NMI_DELAY); 13659da1f4d8SCathy Avery 13669da1f4d8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 13679da1f4d8SCathy Avery } 13689da1f4d8SCathy Avery 13699da1f4d8SCathy Avery static void nmi_hlt_test(struct svm_test *test) 13709da1f4d8SCathy Avery { 13719da1f4d8SCathy Avery long long start; 13729da1f4d8SCathy Avery 13739da1f4d8SCathy Avery on_cpu_async(1, nmi_message_thread, test); 13749da1f4d8SCathy Avery 13759da1f4d8SCathy Avery start = rdtsc(); 13769da1f4d8SCathy Avery 13779da1f4d8SCathy Avery set_test_stage(test, 1); 13789da1f4d8SCathy Avery 13799da1f4d8SCathy Avery asm volatile ("hlt"); 13809da1f4d8SCathy Avery 13819da1f4d8SCathy Avery report((rdtsc() - start > NMI_DELAY) && nmi_fired, 13829da1f4d8SCathy Avery "direct NMI + hlt"); 13839da1f4d8SCathy Avery 13849da1f4d8SCathy Avery if (!nmi_fired) 13859da1f4d8SCathy Avery set_test_stage(test, -1); 13869da1f4d8SCathy Avery 13879da1f4d8SCathy Avery nmi_fired = false; 13889da1f4d8SCathy Avery 13899da1f4d8SCathy Avery vmmcall(); 13909da1f4d8SCathy Avery 13919da1f4d8SCathy Avery start = rdtsc(); 13929da1f4d8SCathy Avery 13939da1f4d8SCathy Avery set_test_stage(test, 2); 13949da1f4d8SCathy Avery 13959da1f4d8SCathy Avery asm volatile ("hlt"); 13969da1f4d8SCathy Avery 13979da1f4d8SCathy Avery report((rdtsc() - start > NMI_DELAY) && nmi_fired, 13989da1f4d8SCathy Avery "intercepted NMI + hlt"); 13999da1f4d8SCathy Avery 14009da1f4d8SCathy Avery if (!nmi_fired) { 14019da1f4d8SCathy Avery report(nmi_fired, "intercepted pending NMI not dispatched"); 14029da1f4d8SCathy Avery set_test_stage(test, -1); 14037e7d9357SCathy Avery vmmcall(); 14049da1f4d8SCathy Avery } 14059da1f4d8SCathy Avery 14069da1f4d8SCathy Avery set_test_stage(test, 3); 14079da1f4d8SCathy Avery } 14089da1f4d8SCathy Avery 14099da1f4d8SCathy Avery static bool nmi_hlt_finished(struct svm_test *test) 14109da1f4d8SCathy Avery { 14119da1f4d8SCathy Avery switch (get_test_stage(test)) { 14129da1f4d8SCathy Avery case 1: 14139da1f4d8SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1414198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 14159da1f4d8SCathy Avery vmcb->control.exit_code); 14169da1f4d8SCathy Avery return true; 14179da1f4d8SCathy Avery } 14189da1f4d8SCathy Avery vmcb->save.rip += 3; 14199da1f4d8SCathy Avery 14209da1f4d8SCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 14219da1f4d8SCathy Avery break; 14229da1f4d8SCathy Avery 14239da1f4d8SCathy Avery case 2: 14249da1f4d8SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1425198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x", 14269da1f4d8SCathy Avery vmcb->control.exit_code); 14279da1f4d8SCathy Avery return true; 14289da1f4d8SCathy Avery } 14299da1f4d8SCathy Avery 14305c3582f0SJanis Schoetterl-Glausch report_pass("NMI intercept while running guest"); 14319da1f4d8SCathy Avery break; 14329da1f4d8SCathy Avery 14339da1f4d8SCathy Avery case 3: 14349da1f4d8SCathy Avery break; 14359da1f4d8SCathy Avery 14369da1f4d8SCathy Avery default: 14379da1f4d8SCathy Avery return true; 14389da1f4d8SCathy Avery } 14399da1f4d8SCathy Avery 14409da1f4d8SCathy Avery return get_test_stage(test) == 3; 14419da1f4d8SCathy Avery } 14429da1f4d8SCathy Avery 14439da1f4d8SCathy Avery static bool nmi_hlt_check(struct svm_test *test) 14449da1f4d8SCathy Avery { 14459da1f4d8SCathy Avery return get_test_stage(test) == 3; 14469da1f4d8SCathy Avery } 14479da1f4d8SCathy Avery 14484b4fb247SPaolo Bonzini static volatile int count_exc = 0; 14494b4fb247SPaolo Bonzini 14504b4fb247SPaolo Bonzini static void my_isr(struct ex_regs *r) 14514b4fb247SPaolo Bonzini { 14524b4fb247SPaolo Bonzini count_exc++; 14534b4fb247SPaolo Bonzini } 14544b4fb247SPaolo Bonzini 14554b4fb247SPaolo Bonzini static void exc_inject_prepare(struct svm_test *test) 14564b4fb247SPaolo Bonzini { 14578634a266SPaolo Bonzini default_prepare(test); 14584b4fb247SPaolo Bonzini handle_exception(DE_VECTOR, my_isr); 14594b4fb247SPaolo Bonzini handle_exception(NMI_VECTOR, my_isr); 14604b4fb247SPaolo Bonzini } 14614b4fb247SPaolo Bonzini 14624b4fb247SPaolo Bonzini 14634b4fb247SPaolo Bonzini static void exc_inject_test(struct svm_test *test) 14644b4fb247SPaolo Bonzini { 14654b4fb247SPaolo Bonzini asm volatile ("vmmcall\n\tvmmcall\n\t"); 14664b4fb247SPaolo Bonzini } 14674b4fb247SPaolo Bonzini 14684b4fb247SPaolo Bonzini static bool exc_inject_finished(struct svm_test *test) 14694b4fb247SPaolo Bonzini { 14704b4fb247SPaolo Bonzini switch (get_test_stage(test)) { 14714b4fb247SPaolo Bonzini case 0: 14724b4fb247SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1473198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 14744b4fb247SPaolo Bonzini vmcb->control.exit_code); 14754b4fb247SPaolo Bonzini return true; 14764b4fb247SPaolo Bonzini } 14772c1ca866SNadav Amit vmcb->save.rip += 3; 14784b4fb247SPaolo Bonzini vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 14794b4fb247SPaolo Bonzini break; 14804b4fb247SPaolo Bonzini 14814b4fb247SPaolo Bonzini case 1: 14824b4fb247SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_ERR) { 1483198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to error. Exit reason 0x%x", 14844b4fb247SPaolo Bonzini vmcb->control.exit_code); 14854b4fb247SPaolo Bonzini return true; 14864b4fb247SPaolo Bonzini } 14874b4fb247SPaolo Bonzini report(count_exc == 0, "exception with vector 2 not injected"); 14884b4fb247SPaolo Bonzini vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 14894b4fb247SPaolo Bonzini break; 14904b4fb247SPaolo Bonzini 14914b4fb247SPaolo Bonzini case 2: 14924b4fb247SPaolo Bonzini if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1493198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 14944b4fb247SPaolo Bonzini vmcb->control.exit_code); 14954b4fb247SPaolo Bonzini return true; 14964b4fb247SPaolo Bonzini } 14972c1ca866SNadav Amit vmcb->save.rip += 3; 14984b4fb247SPaolo Bonzini report(count_exc == 1, "divide overflow exception injected"); 14994b4fb247SPaolo Bonzini report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared"); 15004b4fb247SPaolo Bonzini break; 15014b4fb247SPaolo Bonzini 15024b4fb247SPaolo Bonzini default: 15034b4fb247SPaolo Bonzini return true; 15044b4fb247SPaolo Bonzini } 15054b4fb247SPaolo Bonzini 15064b4fb247SPaolo Bonzini inc_test_stage(test); 15074b4fb247SPaolo Bonzini 15084b4fb247SPaolo Bonzini return get_test_stage(test) == 3; 15094b4fb247SPaolo Bonzini } 15104b4fb247SPaolo Bonzini 15114b4fb247SPaolo Bonzini static bool exc_inject_check(struct svm_test *test) 15124b4fb247SPaolo Bonzini { 15134b4fb247SPaolo Bonzini return count_exc == 1 && get_test_stage(test) == 3; 15144b4fb247SPaolo Bonzini } 15154b4fb247SPaolo Bonzini 15169c838954SCathy Avery static volatile bool virq_fired; 15179c838954SCathy Avery 15189c838954SCathy Avery static void virq_isr(isr_regs_t *regs) 15199c838954SCathy Avery { 15209c838954SCathy Avery virq_fired = true; 15219c838954SCathy Avery } 15229c838954SCathy Avery 15239c838954SCathy Avery static void virq_inject_prepare(struct svm_test *test) 15249c838954SCathy Avery { 15259c838954SCathy Avery handle_irq(0xf1, virq_isr); 15269c838954SCathy Avery default_prepare(test); 15279c838954SCathy Avery vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 15289c838954SCathy Avery (0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority 15299c838954SCathy Avery vmcb->control.int_vector = 0xf1; 15309c838954SCathy Avery virq_fired = false; 15319c838954SCathy Avery set_test_stage(test, 0); 15329c838954SCathy Avery } 15339c838954SCathy Avery 15349c838954SCathy Avery static void virq_inject_test(struct svm_test *test) 15359c838954SCathy Avery { 15369c838954SCathy Avery if (virq_fired) { 1537198dfd0eSJanis Schoetterl-Glausch report_fail("virtual interrupt fired before L2 sti"); 15389c838954SCathy Avery set_test_stage(test, -1); 15399c838954SCathy Avery vmmcall(); 15409c838954SCathy Avery } 15419c838954SCathy Avery 15429c838954SCathy Avery irq_enable(); 15439c838954SCathy Avery asm volatile ("nop"); 15449c838954SCathy Avery irq_disable(); 15459c838954SCathy Avery 15469c838954SCathy Avery if (!virq_fired) { 1547198dfd0eSJanis Schoetterl-Glausch report_fail("virtual interrupt not fired after L2 sti"); 15489c838954SCathy Avery set_test_stage(test, -1); 15499c838954SCathy Avery } 15509c838954SCathy Avery 15519c838954SCathy Avery vmmcall(); 15529c838954SCathy Avery 15539c838954SCathy Avery if (virq_fired) { 1554198dfd0eSJanis Schoetterl-Glausch report_fail("virtual interrupt fired before L2 sti after VINTR intercept"); 15559c838954SCathy Avery set_test_stage(test, -1); 15569c838954SCathy Avery vmmcall(); 15579c838954SCathy Avery } 15589c838954SCathy Avery 15599c838954SCathy Avery irq_enable(); 15609c838954SCathy Avery asm volatile ("nop"); 15619c838954SCathy Avery irq_disable(); 15629c838954SCathy Avery 15639c838954SCathy Avery if (!virq_fired) { 1564198dfd0eSJanis Schoetterl-Glausch report_fail("virtual interrupt not fired after return from VINTR intercept"); 15659c838954SCathy Avery set_test_stage(test, -1); 15669c838954SCathy Avery } 15679c838954SCathy Avery 15689c838954SCathy Avery vmmcall(); 15699c838954SCathy Avery 15709c838954SCathy Avery irq_enable(); 15719c838954SCathy Avery asm volatile ("nop"); 15729c838954SCathy Avery irq_disable(); 15739c838954SCathy Avery 15749c838954SCathy Avery if (virq_fired) { 1575198dfd0eSJanis Schoetterl-Glausch report_fail("virtual interrupt fired when V_IRQ_PRIO less than V_TPR"); 15769c838954SCathy Avery set_test_stage(test, -1); 15779c838954SCathy Avery } 15789c838954SCathy Avery 15799c838954SCathy Avery vmmcall(); 15809c838954SCathy Avery vmmcall(); 15819c838954SCathy Avery } 15829c838954SCathy Avery 15839c838954SCathy Avery static bool virq_inject_finished(struct svm_test *test) 15849c838954SCathy Avery { 15859c838954SCathy Avery vmcb->save.rip += 3; 15869c838954SCathy Avery 15879c838954SCathy Avery switch (get_test_stage(test)) { 15889c838954SCathy Avery case 0: 15899c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1590198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 15919c838954SCathy Avery vmcb->control.exit_code); 15929c838954SCathy Avery return true; 15939c838954SCathy Avery } 15949c838954SCathy Avery if (vmcb->control.int_ctl & V_IRQ_MASK) { 1595198dfd0eSJanis Schoetterl-Glausch report_fail("V_IRQ not cleared on VMEXIT after firing"); 15969c838954SCathy Avery return true; 15979c838954SCathy Avery } 15989c838954SCathy Avery virq_fired = false; 15999c838954SCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 16009c838954SCathy Avery vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 16019c838954SCathy Avery (0x0f << V_INTR_PRIO_SHIFT); 16029c838954SCathy Avery break; 16039c838954SCathy Avery 16049c838954SCathy Avery case 1: 16059c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VINTR) { 1606198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vintr. Exit reason 0x%x", 16079c838954SCathy Avery vmcb->control.exit_code); 16089c838954SCathy Avery return true; 16099c838954SCathy Avery } 16109c838954SCathy Avery if (virq_fired) { 1611198dfd0eSJanis Schoetterl-Glausch report_fail("V_IRQ fired before SVM_EXIT_VINTR"); 16129c838954SCathy Avery return true; 16139c838954SCathy Avery } 16149c838954SCathy Avery vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); 16159c838954SCathy Avery break; 16169c838954SCathy Avery 16179c838954SCathy Avery case 2: 16189c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1619198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 16209c838954SCathy Avery vmcb->control.exit_code); 16219c838954SCathy Avery return true; 16229c838954SCathy Avery } 16239c838954SCathy Avery virq_fired = false; 16249c838954SCathy Avery // Set irq to lower priority 16259c838954SCathy Avery vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 16269c838954SCathy Avery (0x08 << V_INTR_PRIO_SHIFT); 16279c838954SCathy Avery // Raise guest TPR 16289c838954SCathy Avery vmcb->control.int_ctl |= 0x0a & V_TPR_MASK; 16299c838954SCathy Avery break; 16309c838954SCathy Avery 16319c838954SCathy Avery case 3: 16329c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1633198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 16349c838954SCathy Avery vmcb->control.exit_code); 16359c838954SCathy Avery return true; 16369c838954SCathy Avery } 16379c838954SCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 16389c838954SCathy Avery break; 16399c838954SCathy Avery 16409c838954SCathy Avery case 4: 16419c838954SCathy Avery // INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR 16429c838954SCathy Avery if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1643198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 16449c838954SCathy Avery vmcb->control.exit_code); 16459c838954SCathy Avery return true; 16469c838954SCathy Avery } 16479c838954SCathy Avery break; 16489c838954SCathy Avery 16499c838954SCathy Avery default: 16509c838954SCathy Avery return true; 16519c838954SCathy Avery } 16529c838954SCathy Avery 16539c838954SCathy Avery inc_test_stage(test); 16549c838954SCathy Avery 16559c838954SCathy Avery return get_test_stage(test) == 5; 16569c838954SCathy Avery } 16579c838954SCathy Avery 16589c838954SCathy Avery static bool virq_inject_check(struct svm_test *test) 16599c838954SCathy Avery { 16609c838954SCathy Avery return get_test_stage(test) == 5; 16619c838954SCathy Avery } 16629c838954SCathy Avery 1663da338a31SMaxim Levitsky /* 1664da338a31SMaxim Levitsky * Detect nested guest RIP corruption as explained in kernel commit 1665da338a31SMaxim Levitsky * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73 1666da338a31SMaxim Levitsky * 1667da338a31SMaxim Levitsky * In the assembly loop below 'ins' is executed while IO instructions 1668da338a31SMaxim Levitsky * are not intercepted; the instruction is emulated by L0. 1669da338a31SMaxim Levitsky * 1670da338a31SMaxim Levitsky * At the same time we are getting interrupts from the local APIC timer, 1671da338a31SMaxim Levitsky * and we do intercept them in L1 1672da338a31SMaxim Levitsky * 1673da338a31SMaxim Levitsky * If the interrupt happens on the insb instruction, L0 will VMexit, emulate 1674da338a31SMaxim Levitsky * the insb instruction and then it will inject the interrupt to L1 through 1675da338a31SMaxim Levitsky * a nested VMexit. Due to a bug, it would leave pre-emulation values of RIP, 1676da338a31SMaxim Levitsky * RAX and RSP in the VMCB. 1677da338a31SMaxim Levitsky * 1678da338a31SMaxim Levitsky * In our intercept handler we detect the bug by checking that RIP is that of 1679da338a31SMaxim Levitsky * the insb instruction, but its memory operand has already been written. 1680da338a31SMaxim Levitsky * This means that insb was already executed. 1681da338a31SMaxim Levitsky */ 1682da338a31SMaxim Levitsky 1683da338a31SMaxim Levitsky static volatile int isr_cnt = 0; 1684da338a31SMaxim Levitsky static volatile uint8_t io_port_var = 0xAA; 1685da338a31SMaxim Levitsky extern const char insb_instruction_label[]; 1686da338a31SMaxim Levitsky 1687da338a31SMaxim Levitsky static void reg_corruption_isr(isr_regs_t *regs) 1688da338a31SMaxim Levitsky { 1689da338a31SMaxim Levitsky isr_cnt++; 1690da338a31SMaxim Levitsky apic_write(APIC_EOI, 0); 1691da338a31SMaxim Levitsky } 1692da338a31SMaxim Levitsky 1693da338a31SMaxim Levitsky static void reg_corruption_prepare(struct svm_test *test) 1694da338a31SMaxim Levitsky { 1695da338a31SMaxim Levitsky default_prepare(test); 1696da338a31SMaxim Levitsky set_test_stage(test, 0); 1697da338a31SMaxim Levitsky 1698da338a31SMaxim Levitsky vmcb->control.int_ctl = V_INTR_MASKING_MASK; 1699da338a31SMaxim Levitsky vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1700da338a31SMaxim Levitsky 1701da338a31SMaxim Levitsky handle_irq(TIMER_VECTOR, reg_corruption_isr); 1702da338a31SMaxim Levitsky 1703da338a31SMaxim Levitsky /* set local APIC to inject external interrupts */ 1704da338a31SMaxim Levitsky apic_write(APIC_TMICT, 0); 1705da338a31SMaxim Levitsky apic_write(APIC_TDCR, 0); 1706da338a31SMaxim Levitsky apic_write(APIC_LVTT, TIMER_VECTOR | APIC_LVT_TIMER_PERIODIC); 1707da338a31SMaxim Levitsky apic_write(APIC_TMICT, 1000); 1708da338a31SMaxim Levitsky } 1709da338a31SMaxim Levitsky 1710da338a31SMaxim Levitsky static void reg_corruption_test(struct svm_test *test) 1711da338a31SMaxim Levitsky { 1712da338a31SMaxim Levitsky /* this is endless loop, which is interrupted by the timer interrupt */ 1713da338a31SMaxim Levitsky asm volatile ( 1714da338a31SMaxim Levitsky "1:\n\t" 1715da338a31SMaxim Levitsky "movw $0x4d0, %%dx\n\t" // IO port 1716da338a31SMaxim Levitsky "lea %[io_port_var], %%rdi\n\t" 1717da338a31SMaxim Levitsky "movb $0xAA, %[io_port_var]\n\t" 1718da338a31SMaxim Levitsky "insb_instruction_label:\n\t" 1719da338a31SMaxim Levitsky "insb\n\t" 1720da338a31SMaxim Levitsky "jmp 1b\n\t" 1721da338a31SMaxim Levitsky 1722da338a31SMaxim Levitsky : [io_port_var] "=m" (io_port_var) 1723da338a31SMaxim Levitsky : /* no inputs*/ 1724da338a31SMaxim Levitsky : "rdx", "rdi" 1725da338a31SMaxim Levitsky ); 1726da338a31SMaxim Levitsky } 1727da338a31SMaxim Levitsky 1728da338a31SMaxim Levitsky static bool reg_corruption_finished(struct svm_test *test) 1729da338a31SMaxim Levitsky { 1730da338a31SMaxim Levitsky if (isr_cnt == 10000) { 17315c3582f0SJanis Schoetterl-Glausch report_pass("No RIP corruption detected after %d timer interrupts", 1732da338a31SMaxim Levitsky isr_cnt); 1733da338a31SMaxim Levitsky set_test_stage(test, 1); 1734491bbc64SMaxim Levitsky goto cleanup; 1735da338a31SMaxim Levitsky } 1736da338a31SMaxim Levitsky 1737da338a31SMaxim Levitsky if (vmcb->control.exit_code == SVM_EXIT_INTR) { 1738da338a31SMaxim Levitsky 1739da338a31SMaxim Levitsky void* guest_rip = (void*)vmcb->save.rip; 1740da338a31SMaxim Levitsky 1741da338a31SMaxim Levitsky irq_enable(); 1742da338a31SMaxim Levitsky asm volatile ("nop"); 1743da338a31SMaxim Levitsky irq_disable(); 1744da338a31SMaxim Levitsky 1745da338a31SMaxim Levitsky if (guest_rip == insb_instruction_label && io_port_var != 0xAA) { 1746198dfd0eSJanis Schoetterl-Glausch report_fail("RIP corruption detected after %d timer interrupts", 1747da338a31SMaxim Levitsky isr_cnt); 1748491bbc64SMaxim Levitsky goto cleanup; 1749da338a31SMaxim Levitsky } 1750da338a31SMaxim Levitsky 1751da338a31SMaxim Levitsky } 1752da338a31SMaxim Levitsky return false; 1753491bbc64SMaxim Levitsky cleanup: 1754491bbc64SMaxim Levitsky apic_write(APIC_LVTT, APIC_LVT_TIMER_MASK); 1755491bbc64SMaxim Levitsky apic_write(APIC_TMICT, 0); 1756491bbc64SMaxim Levitsky return true; 1757491bbc64SMaxim Levitsky 1758da338a31SMaxim Levitsky } 1759da338a31SMaxim Levitsky 1760da338a31SMaxim Levitsky static bool reg_corruption_check(struct svm_test *test) 1761da338a31SMaxim Levitsky { 1762da338a31SMaxim Levitsky return get_test_stage(test) == 1; 1763da338a31SMaxim Levitsky } 1764da338a31SMaxim Levitsky 17654770e9c8SCathy Avery static void get_tss_entry(void *data) 17664770e9c8SCathy Avery { 1767a7f32d87SPaolo Bonzini *((gdt_entry_t **)data) = get_tss_descr(); 17684770e9c8SCathy Avery } 17694770e9c8SCathy Avery 17704770e9c8SCathy Avery static int orig_cpu_count; 17714770e9c8SCathy Avery 17724770e9c8SCathy Avery static void init_startup_prepare(struct svm_test *test) 17734770e9c8SCathy Avery { 1774a7f32d87SPaolo Bonzini gdt_entry_t *tss_entry; 17754770e9c8SCathy Avery int i; 17764770e9c8SCathy Avery 17774770e9c8SCathy Avery on_cpu(1, get_tss_entry, &tss_entry); 17784770e9c8SCathy Avery 1779*d36b378fSVarad Gautam orig_cpu_count = atomic_read(&cpu_online_count); 17804770e9c8SCathy Avery 17814770e9c8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 17824770e9c8SCathy Avery id_map[1]); 17834770e9c8SCathy Avery 17844770e9c8SCathy Avery delay(100000000ULL); 17854770e9c8SCathy Avery 1786*d36b378fSVarad Gautam atomic_dec(&cpu_online_count); 17874770e9c8SCathy Avery 1788a7f32d87SPaolo Bonzini tss_entry->type &= ~DESC_BUSY; 17894770e9c8SCathy Avery 17904770e9c8SCathy Avery apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_STARTUP, id_map[1]); 17914770e9c8SCathy Avery 1792*d36b378fSVarad Gautam for (i = 0; i < 5 && atomic_read(&cpu_online_count) < orig_cpu_count; i++) 17934770e9c8SCathy Avery delay(100000000ULL); 17944770e9c8SCathy Avery } 17954770e9c8SCathy Avery 17964770e9c8SCathy Avery static bool init_startup_finished(struct svm_test *test) 17974770e9c8SCathy Avery { 17984770e9c8SCathy Avery return true; 17994770e9c8SCathy Avery } 18004770e9c8SCathy Avery 18014770e9c8SCathy Avery static bool init_startup_check(struct svm_test *test) 18024770e9c8SCathy Avery { 1803*d36b378fSVarad Gautam return atomic_read(&cpu_online_count) == orig_cpu_count; 18044770e9c8SCathy Avery } 18054770e9c8SCathy Avery 1806d5da6dfeSCathy Avery static volatile bool init_intercept; 1807d5da6dfeSCathy Avery 1808d5da6dfeSCathy Avery static void init_intercept_prepare(struct svm_test *test) 1809d5da6dfeSCathy Avery { 1810d5da6dfeSCathy Avery init_intercept = false; 1811d5da6dfeSCathy Avery vmcb->control.intercept |= (1ULL << INTERCEPT_INIT); 1812d5da6dfeSCathy Avery } 1813d5da6dfeSCathy Avery 1814d5da6dfeSCathy Avery static void init_intercept_test(struct svm_test *test) 1815d5da6dfeSCathy Avery { 1816d5da6dfeSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 0); 1817d5da6dfeSCathy Avery } 1818d5da6dfeSCathy Avery 1819d5da6dfeSCathy Avery static bool init_intercept_finished(struct svm_test *test) 1820d5da6dfeSCathy Avery { 1821d5da6dfeSCathy Avery vmcb->save.rip += 3; 1822d5da6dfeSCathy Avery 1823d5da6dfeSCathy Avery if (vmcb->control.exit_code != SVM_EXIT_INIT) { 1824198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to init intercept. Exit reason 0x%x", 1825d5da6dfeSCathy Avery vmcb->control.exit_code); 1826d5da6dfeSCathy Avery 1827d5da6dfeSCathy Avery return true; 1828d5da6dfeSCathy Avery } 1829d5da6dfeSCathy Avery 1830d5da6dfeSCathy Avery init_intercept = true; 1831d5da6dfeSCathy Avery 18325c3582f0SJanis Schoetterl-Glausch report_pass("INIT to vcpu intercepted"); 1833d5da6dfeSCathy Avery 1834d5da6dfeSCathy Avery return true; 1835d5da6dfeSCathy Avery } 1836d5da6dfeSCathy Avery 1837d5da6dfeSCathy Avery static bool init_intercept_check(struct svm_test *test) 1838d5da6dfeSCathy Avery { 1839d5da6dfeSCathy Avery return init_intercept; 1840d5da6dfeSCathy Avery } 1841d5da6dfeSCathy Avery 18427839b0ecSKrish Sadhukhan /* 18437839b0ecSKrish Sadhukhan * Setting host EFLAGS.TF causes a #DB trap after the VMRUN completes on the 18447839b0ecSKrish Sadhukhan * host side (i.e., after the #VMEXIT from the guest). 18457839b0ecSKrish Sadhukhan * 18460689a980SKrish Sadhukhan * Setting host EFLAGS.RF suppresses any potential instruction breakpoint 18470689a980SKrish Sadhukhan * match on the VMRUN and completion of the VMRUN instruction clears the 18480689a980SKrish Sadhukhan * host EFLAGS.RF bit. 18490689a980SKrish Sadhukhan * 18507839b0ecSKrish Sadhukhan * [AMD APM] 18517839b0ecSKrish Sadhukhan */ 18527839b0ecSKrish Sadhukhan static volatile u8 host_rflags_guest_main_flag = 0; 18537839b0ecSKrish Sadhukhan static volatile u8 host_rflags_db_handler_flag = 0; 18547839b0ecSKrish Sadhukhan static volatile bool host_rflags_ss_on_vmrun = false; 18557839b0ecSKrish Sadhukhan static volatile bool host_rflags_vmrun_reached = false; 18567839b0ecSKrish Sadhukhan static volatile bool host_rflags_set_tf = false; 18570689a980SKrish Sadhukhan static volatile bool host_rflags_set_rf = false; 18580689a980SKrish Sadhukhan static u64 rip_detected; 18597839b0ecSKrish Sadhukhan 18607839b0ecSKrish Sadhukhan extern u64 *vmrun_rip; 18617839b0ecSKrish Sadhukhan 18627839b0ecSKrish Sadhukhan static void host_rflags_db_handler(struct ex_regs *r) 18637839b0ecSKrish Sadhukhan { 18647839b0ecSKrish Sadhukhan if (host_rflags_ss_on_vmrun) { 18657839b0ecSKrish Sadhukhan if (host_rflags_vmrun_reached) { 18660689a980SKrish Sadhukhan if (!host_rflags_set_rf) { 18677839b0ecSKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 18680689a980SKrish Sadhukhan rip_detected = r->rip; 18697839b0ecSKrish Sadhukhan } else { 18700689a980SKrish Sadhukhan r->rflags |= X86_EFLAGS_RF; 18710689a980SKrish Sadhukhan ++host_rflags_db_handler_flag; 18720689a980SKrish Sadhukhan } 18730689a980SKrish Sadhukhan } else { 18740689a980SKrish Sadhukhan if (r->rip == (u64)&vmrun_rip) { 18757839b0ecSKrish Sadhukhan host_rflags_vmrun_reached = true; 18760689a980SKrish Sadhukhan 18770689a980SKrish Sadhukhan if (host_rflags_set_rf) { 18780689a980SKrish Sadhukhan host_rflags_guest_main_flag = 0; 18790689a980SKrish Sadhukhan rip_detected = r->rip; 18800689a980SKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 18810689a980SKrish Sadhukhan 18820689a980SKrish Sadhukhan /* Trigger #DB via debug registers */ 18830689a980SKrish Sadhukhan write_dr0((void *)&vmrun_rip); 18840689a980SKrish Sadhukhan write_dr7(0x403); 18850689a980SKrish Sadhukhan } 18860689a980SKrish Sadhukhan } 18877839b0ecSKrish Sadhukhan } 18887839b0ecSKrish Sadhukhan } else { 18897839b0ecSKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 18907839b0ecSKrish Sadhukhan } 18917839b0ecSKrish Sadhukhan } 18927839b0ecSKrish Sadhukhan 18937839b0ecSKrish Sadhukhan static void host_rflags_prepare(struct svm_test *test) 18947839b0ecSKrish Sadhukhan { 18957839b0ecSKrish Sadhukhan default_prepare(test); 18967839b0ecSKrish Sadhukhan handle_exception(DB_VECTOR, host_rflags_db_handler); 18977839b0ecSKrish Sadhukhan set_test_stage(test, 0); 18987839b0ecSKrish Sadhukhan } 18997839b0ecSKrish Sadhukhan 19007839b0ecSKrish Sadhukhan static void host_rflags_prepare_gif_clear(struct svm_test *test) 19017839b0ecSKrish Sadhukhan { 19027839b0ecSKrish Sadhukhan if (host_rflags_set_tf) 19037839b0ecSKrish Sadhukhan write_rflags(read_rflags() | X86_EFLAGS_TF); 19047839b0ecSKrish Sadhukhan } 19057839b0ecSKrish Sadhukhan 19067839b0ecSKrish Sadhukhan static void host_rflags_test(struct svm_test *test) 19077839b0ecSKrish Sadhukhan { 19087839b0ecSKrish Sadhukhan while (1) { 19090689a980SKrish Sadhukhan if (get_test_stage(test) > 0) { 19100689a980SKrish Sadhukhan if ((host_rflags_set_tf && !host_rflags_ss_on_vmrun && !host_rflags_db_handler_flag) || 19110689a980SKrish Sadhukhan (host_rflags_set_rf && host_rflags_db_handler_flag == 1)) 19127839b0ecSKrish Sadhukhan host_rflags_guest_main_flag = 1; 19130689a980SKrish Sadhukhan } 19140689a980SKrish Sadhukhan 19150689a980SKrish Sadhukhan if (get_test_stage(test) == 4) 19167839b0ecSKrish Sadhukhan break; 19177839b0ecSKrish Sadhukhan vmmcall(); 19187839b0ecSKrish Sadhukhan } 19197839b0ecSKrish Sadhukhan } 19207839b0ecSKrish Sadhukhan 19217839b0ecSKrish Sadhukhan static bool host_rflags_finished(struct svm_test *test) 19227839b0ecSKrish Sadhukhan { 19237839b0ecSKrish Sadhukhan switch (get_test_stage(test)) { 19247839b0ecSKrish Sadhukhan case 0: 19257839b0ecSKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1926198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT. Exit reason 0x%x", 19277839b0ecSKrish Sadhukhan vmcb->control.exit_code); 19287839b0ecSKrish Sadhukhan return true; 19297839b0ecSKrish Sadhukhan } 19307839b0ecSKrish Sadhukhan vmcb->save.rip += 3; 19317839b0ecSKrish Sadhukhan /* 19327839b0ecSKrish Sadhukhan * Setting host EFLAGS.TF not immediately before VMRUN, causes 19337839b0ecSKrish Sadhukhan * #DB trap before first guest instruction is executed 19347839b0ecSKrish Sadhukhan */ 19357839b0ecSKrish Sadhukhan host_rflags_set_tf = true; 19367839b0ecSKrish Sadhukhan break; 19377839b0ecSKrish Sadhukhan case 1: 19387839b0ecSKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 19390689a980SKrish Sadhukhan host_rflags_guest_main_flag != 1) { 1940198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT or #DB handler" 19417839b0ecSKrish Sadhukhan " invoked before guest main. Exit reason 0x%x", 19427839b0ecSKrish Sadhukhan vmcb->control.exit_code); 19437839b0ecSKrish Sadhukhan return true; 19447839b0ecSKrish Sadhukhan } 19457839b0ecSKrish Sadhukhan vmcb->save.rip += 3; 19467839b0ecSKrish Sadhukhan /* 19477839b0ecSKrish Sadhukhan * Setting host EFLAGS.TF immediately before VMRUN, causes #DB 19487839b0ecSKrish Sadhukhan * trap after VMRUN completes on the host side (i.e., after 19497839b0ecSKrish Sadhukhan * VMEXIT from guest). 19507839b0ecSKrish Sadhukhan */ 19517839b0ecSKrish Sadhukhan host_rflags_ss_on_vmrun = true; 19527839b0ecSKrish Sadhukhan break; 19537839b0ecSKrish Sadhukhan case 2: 19547839b0ecSKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 19550c22fd44SPaolo Bonzini rip_detected != (u64)&vmrun_rip + 3) { 1956198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT or RIP mismatch." 19570689a980SKrish Sadhukhan " Exit reason 0x%x, RIP actual: %lx, RIP expected: " 19580689a980SKrish Sadhukhan "%lx", vmcb->control.exit_code, 19590c22fd44SPaolo Bonzini (u64)&vmrun_rip + 3, rip_detected); 19600689a980SKrish Sadhukhan return true; 19610689a980SKrish Sadhukhan } 19620689a980SKrish Sadhukhan host_rflags_set_rf = true; 19630689a980SKrish Sadhukhan host_rflags_guest_main_flag = 0; 19640689a980SKrish Sadhukhan host_rflags_vmrun_reached = false; 19650689a980SKrish Sadhukhan vmcb->save.rip += 3; 19660689a980SKrish Sadhukhan break; 19670689a980SKrish Sadhukhan case 3: 19680689a980SKrish Sadhukhan if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 19690689a980SKrish Sadhukhan rip_detected != (u64)&vmrun_rip || 19700689a980SKrish Sadhukhan host_rflags_guest_main_flag != 1 || 19710689a980SKrish Sadhukhan host_rflags_db_handler_flag > 1 || 19720689a980SKrish Sadhukhan read_rflags() & X86_EFLAGS_RF) { 1973198dfd0eSJanis Schoetterl-Glausch report_fail("Unexpected VMEXIT or RIP mismatch or " 19740689a980SKrish Sadhukhan "EFLAGS.RF not cleared." 19750689a980SKrish Sadhukhan " Exit reason 0x%x, RIP actual: %lx, RIP expected: " 19760689a980SKrish Sadhukhan "%lx", vmcb->control.exit_code, 19770689a980SKrish Sadhukhan (u64)&vmrun_rip, rip_detected); 19787839b0ecSKrish Sadhukhan return true; 19797839b0ecSKrish Sadhukhan } 19807839b0ecSKrish Sadhukhan host_rflags_set_tf = false; 19810689a980SKrish Sadhukhan host_rflags_set_rf = false; 19827839b0ecSKrish Sadhukhan vmcb->save.rip += 3; 19837839b0ecSKrish Sadhukhan break; 19847839b0ecSKrish Sadhukhan default: 19857839b0ecSKrish Sadhukhan return true; 19867839b0ecSKrish Sadhukhan } 19877839b0ecSKrish Sadhukhan inc_test_stage(test); 19880689a980SKrish Sadhukhan return get_test_stage(test) == 5; 19897839b0ecSKrish Sadhukhan } 19907839b0ecSKrish Sadhukhan 19917839b0ecSKrish Sadhukhan static bool host_rflags_check(struct svm_test *test) 19927839b0ecSKrish Sadhukhan { 19930689a980SKrish Sadhukhan return get_test_stage(test) == 4; 19947839b0ecSKrish Sadhukhan } 19957839b0ecSKrish Sadhukhan 19968660d1b5SKrish Sadhukhan #define TEST(name) { #name, .v2 = name } 19978660d1b5SKrish Sadhukhan 1998ba29942cSKrish Sadhukhan /* 1999ba29942cSKrish Sadhukhan * v2 tests 2000ba29942cSKrish Sadhukhan */ 2001ba29942cSKrish Sadhukhan 2002f32183f5SJim Mattson /* 2003f32183f5SJim Mattson * Ensure that kvm recalculates the L1 guest's CPUID.01H:ECX.OSXSAVE 2004f32183f5SJim Mattson * after VM-exit from an L2 guest that sets CR4.OSXSAVE to a different 2005f32183f5SJim Mattson * value than in L1. 2006f32183f5SJim Mattson */ 2007f32183f5SJim Mattson 2008f32183f5SJim Mattson static void svm_cr4_osxsave_test_guest(struct svm_test *test) 2009f32183f5SJim Mattson { 2010f32183f5SJim Mattson write_cr4(read_cr4() & ~X86_CR4_OSXSAVE); 2011f32183f5SJim Mattson } 2012f32183f5SJim Mattson 2013f32183f5SJim Mattson static void svm_cr4_osxsave_test(void) 2014f32183f5SJim Mattson { 2015f32183f5SJim Mattson if (!this_cpu_has(X86_FEATURE_XSAVE)) { 2016f32183f5SJim Mattson report_skip("XSAVE not detected"); 2017f32183f5SJim Mattson return; 2018f32183f5SJim Mattson } 2019f32183f5SJim Mattson 2020f32183f5SJim Mattson if (!(read_cr4() & X86_CR4_OSXSAVE)) { 2021f32183f5SJim Mattson unsigned long cr4 = read_cr4() | X86_CR4_OSXSAVE; 2022f32183f5SJim Mattson 2023f32183f5SJim Mattson write_cr4(cr4); 2024f32183f5SJim Mattson vmcb->save.cr4 = cr4; 2025f32183f5SJim Mattson } 2026f32183f5SJim Mattson 2027f32183f5SJim Mattson report(cpuid_osxsave(), "CPUID.01H:ECX.XSAVE set before VMRUN"); 2028f32183f5SJim Mattson 2029f32183f5SJim Mattson test_set_guest(svm_cr4_osxsave_test_guest); 2030f32183f5SJim Mattson report(svm_vmrun() == SVM_EXIT_VMMCALL, 2031f32183f5SJim Mattson "svm_cr4_osxsave_test_guest finished with VMMCALL"); 2032f32183f5SJim Mattson 2033f32183f5SJim Mattson report(cpuid_osxsave(), "CPUID.01H:ECX.XSAVE set after VMRUN"); 2034f32183f5SJim Mattson } 2035f32183f5SJim Mattson 2036ba29942cSKrish Sadhukhan static void basic_guest_main(struct svm_test *test) 2037ba29942cSKrish Sadhukhan { 2038ba29942cSKrish Sadhukhan } 2039ba29942cSKrish Sadhukhan 2040eae10e8fSKrish Sadhukhan 2041eae10e8fSKrish Sadhukhan #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val, \ 2042eae10e8fSKrish Sadhukhan resv_mask) \ 2043eae10e8fSKrish Sadhukhan { \ 2044eae10e8fSKrish Sadhukhan u64 tmp, mask; \ 2045eae10e8fSKrish Sadhukhan int i; \ 2046eae10e8fSKrish Sadhukhan \ 2047eae10e8fSKrish Sadhukhan for (i = start; i <= end; i = i + inc) { \ 2048eae10e8fSKrish Sadhukhan mask = 1ull << i; \ 2049eae10e8fSKrish Sadhukhan if (!(mask & resv_mask)) \ 2050eae10e8fSKrish Sadhukhan continue; \ 2051eae10e8fSKrish Sadhukhan tmp = val | mask; \ 2052eae10e8fSKrish Sadhukhan reg = tmp; \ 2053eae10e8fSKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "Test %s %d:%d: %lx", \ 2054eae10e8fSKrish Sadhukhan str_name, end, start, tmp); \ 2055eae10e8fSKrish Sadhukhan } \ 2056eae10e8fSKrish Sadhukhan } 2057eae10e8fSKrish Sadhukhan 20586d0ecbf6SKrish Sadhukhan #define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask, \ 2059cb6524f3SPaolo Bonzini exit_code, test_name) \ 2060a79c9495SKrish Sadhukhan { \ 2061a79c9495SKrish Sadhukhan u64 tmp, mask; \ 20628ae6d77fSSean Christopherson u32 r; \ 2063a79c9495SKrish Sadhukhan int i; \ 2064a79c9495SKrish Sadhukhan \ 2065a79c9495SKrish Sadhukhan for (i = start; i <= end; i = i + inc) { \ 2066a79c9495SKrish Sadhukhan mask = 1ull << i; \ 2067a79c9495SKrish Sadhukhan if (!(mask & resv_mask)) \ 2068a79c9495SKrish Sadhukhan continue; \ 2069a79c9495SKrish Sadhukhan tmp = val | mask; \ 2070a79c9495SKrish Sadhukhan switch (cr) { \ 2071a79c9495SKrish Sadhukhan case 0: \ 2072a79c9495SKrish Sadhukhan vmcb->save.cr0 = tmp; \ 2073a79c9495SKrish Sadhukhan break; \ 2074a79c9495SKrish Sadhukhan case 3: \ 2075a79c9495SKrish Sadhukhan vmcb->save.cr3 = tmp; \ 2076a79c9495SKrish Sadhukhan break; \ 2077a79c9495SKrish Sadhukhan case 4: \ 2078a79c9495SKrish Sadhukhan vmcb->save.cr4 = tmp; \ 2079a79c9495SKrish Sadhukhan } \ 20808ae6d77fSSean Christopherson r = svm_vmrun(); \ 20818ae6d77fSSean Christopherson report(r == exit_code, "Test CR%d %s%d:%d: %lx, wanted exit 0x%x, got 0x%x", \ 20828ae6d77fSSean Christopherson cr, test_name, end, start, tmp, exit_code, r); \ 2083a79c9495SKrish Sadhukhan } \ 2084a79c9495SKrish Sadhukhan } 2085e8d7a8f6SKrish Sadhukhan 2086a79c9495SKrish Sadhukhan static void test_efer(void) 2087a79c9495SKrish Sadhukhan { 2088e8d7a8f6SKrish Sadhukhan /* 2089e8d7a8f6SKrish Sadhukhan * Un-setting EFER.SVME is illegal 2090e8d7a8f6SKrish Sadhukhan */ 2091ba29942cSKrish Sadhukhan u64 efer_saved = vmcb->save.efer; 2092ba29942cSKrish Sadhukhan u64 efer = efer_saved; 2093ba29942cSKrish Sadhukhan 2094ba29942cSKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer); 2095ba29942cSKrish Sadhukhan efer &= ~EFER_SVME; 2096ba29942cSKrish Sadhukhan vmcb->save.efer = efer; 2097ba29942cSKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer); 2098ba29942cSKrish Sadhukhan vmcb->save.efer = efer_saved; 2099e8d7a8f6SKrish Sadhukhan 2100e8d7a8f6SKrish Sadhukhan /* 2101a79c9495SKrish Sadhukhan * EFER MBZ bits: 63:16, 9 2102a79c9495SKrish Sadhukhan */ 2103a79c9495SKrish Sadhukhan efer_saved = vmcb->save.efer; 2104a79c9495SKrish Sadhukhan 2105a79c9495SKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(8, 9, 1, "EFER", vmcb->save.efer, 2106a79c9495SKrish Sadhukhan efer_saved, SVM_EFER_RESERVED_MASK); 2107a79c9495SKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(16, 63, 4, "EFER", vmcb->save.efer, 2108a79c9495SKrish Sadhukhan efer_saved, SVM_EFER_RESERVED_MASK); 2109a79c9495SKrish Sadhukhan 21101d7bde08SKrish Sadhukhan /* 21111d7bde08SKrish Sadhukhan * EFER.LME and CR0.PG are both set and CR4.PAE is zero. 21121d7bde08SKrish Sadhukhan */ 21131d7bde08SKrish Sadhukhan u64 cr0_saved = vmcb->save.cr0; 21141d7bde08SKrish Sadhukhan u64 cr0; 21151d7bde08SKrish Sadhukhan u64 cr4_saved = vmcb->save.cr4; 21161d7bde08SKrish Sadhukhan u64 cr4; 21171d7bde08SKrish Sadhukhan 21181d7bde08SKrish Sadhukhan efer = efer_saved | EFER_LME; 21191d7bde08SKrish Sadhukhan vmcb->save.efer = efer; 21201d7bde08SKrish Sadhukhan cr0 = cr0_saved | X86_CR0_PG | X86_CR0_PE; 21211d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0; 21221d7bde08SKrish Sadhukhan cr4 = cr4_saved & ~X86_CR4_PAE; 21231d7bde08SKrish Sadhukhan vmcb->save.cr4 = cr4; 21241d7bde08SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 21251d7bde08SKrish Sadhukhan "CR0.PG=1 (%lx) and CR4.PAE=0 (%lx)", efer, cr0, cr4); 21261d7bde08SKrish Sadhukhan 21271d7bde08SKrish Sadhukhan /* 21281d7bde08SKrish Sadhukhan * EFER.LME and CR0.PG are both set and CR0.PE is zero. 2129fc050452SLara Lazier * CR4.PAE needs to be set as we otherwise cannot 2130fc050452SLara Lazier * determine if CR4.PAE=0 or CR0.PE=0 triggered the 2131fc050452SLara Lazier * SVM_EXIT_ERR. 21321d7bde08SKrish Sadhukhan */ 2133fc050452SLara Lazier cr4 = cr4_saved | X86_CR4_PAE; 2134fc050452SLara Lazier vmcb->save.cr4 = cr4; 21351d7bde08SKrish Sadhukhan cr0 &= ~X86_CR0_PE; 21361d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0; 21371d7bde08SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 21381d7bde08SKrish Sadhukhan "CR0.PG=1 and CR0.PE=0 (%lx)", efer, cr0); 21391d7bde08SKrish Sadhukhan 21401d7bde08SKrish Sadhukhan /* 21411d7bde08SKrish Sadhukhan * EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero. 21421d7bde08SKrish Sadhukhan */ 21431d7bde08SKrish Sadhukhan u32 cs_attrib_saved = vmcb->save.cs.attrib; 21441d7bde08SKrish Sadhukhan u32 cs_attrib; 21451d7bde08SKrish Sadhukhan 21461d7bde08SKrish Sadhukhan cr0 |= X86_CR0_PE; 21471d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0; 21481d7bde08SKrish Sadhukhan cs_attrib = cs_attrib_saved | SVM_SELECTOR_L_MASK | 21491d7bde08SKrish Sadhukhan SVM_SELECTOR_DB_MASK; 21501d7bde08SKrish Sadhukhan vmcb->save.cs.attrib = cs_attrib; 21511d7bde08SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 21521d7bde08SKrish Sadhukhan "CR0.PG=1 (%lx), CR4.PAE=1 (%lx), CS.L=1 and CS.D=1 (%x)", 21531d7bde08SKrish Sadhukhan efer, cr0, cr4, cs_attrib); 21541d7bde08SKrish Sadhukhan 21551d7bde08SKrish Sadhukhan vmcb->save.cr0 = cr0_saved; 21561d7bde08SKrish Sadhukhan vmcb->save.cr4 = cr4_saved; 2157a79c9495SKrish Sadhukhan vmcb->save.efer = efer_saved; 21581d7bde08SKrish Sadhukhan vmcb->save.cs.attrib = cs_attrib_saved; 2159a79c9495SKrish Sadhukhan } 2160a79c9495SKrish Sadhukhan 2161a79c9495SKrish Sadhukhan static void test_cr0(void) 2162a79c9495SKrish Sadhukhan { 2163a79c9495SKrish Sadhukhan /* 2164e8d7a8f6SKrish Sadhukhan * Un-setting CR0.CD and setting CR0.NW is illegal combination 2165e8d7a8f6SKrish Sadhukhan */ 2166e8d7a8f6SKrish Sadhukhan u64 cr0_saved = vmcb->save.cr0; 2167e8d7a8f6SKrish Sadhukhan u64 cr0 = cr0_saved; 2168e8d7a8f6SKrish Sadhukhan 2169e8d7a8f6SKrish Sadhukhan cr0 |= X86_CR0_CD; 2170e8d7a8f6SKrish Sadhukhan cr0 &= ~X86_CR0_NW; 2171e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2172a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx", 2173a79c9495SKrish Sadhukhan cr0); 2174e8d7a8f6SKrish Sadhukhan cr0 |= X86_CR0_NW; 2175e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2176a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx", 2177a79c9495SKrish Sadhukhan cr0); 2178e8d7a8f6SKrish Sadhukhan cr0 &= ~X86_CR0_NW; 2179e8d7a8f6SKrish Sadhukhan cr0 &= ~X86_CR0_CD; 2180e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2181a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx", 2182a79c9495SKrish Sadhukhan cr0); 2183e8d7a8f6SKrish Sadhukhan cr0 |= X86_CR0_NW; 2184e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0; 2185a79c9495SKrish Sadhukhan report (svm_vmrun() == SVM_EXIT_ERR, "Test CR0 CD=0,NW=1: %lx", 2186a79c9495SKrish Sadhukhan cr0); 2187e8d7a8f6SKrish Sadhukhan vmcb->save.cr0 = cr0_saved; 21885c052c90SKrish Sadhukhan 21895c052c90SKrish Sadhukhan /* 21905c052c90SKrish Sadhukhan * CR0[63:32] are not zero 21915c052c90SKrish Sadhukhan */ 21925c052c90SKrish Sadhukhan cr0 = cr0_saved; 2193eae10e8fSKrish Sadhukhan 2194eae10e8fSKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "CR0", vmcb->save.cr0, cr0_saved, 2195eae10e8fSKrish Sadhukhan SVM_CR0_RESERVED_MASK); 21965c052c90SKrish Sadhukhan vmcb->save.cr0 = cr0_saved; 2197a79c9495SKrish Sadhukhan } 2198eae10e8fSKrish Sadhukhan 2199a79c9495SKrish Sadhukhan static void test_cr3(void) 2200a79c9495SKrish Sadhukhan { 2201a79c9495SKrish Sadhukhan /* 2202a79c9495SKrish Sadhukhan * CR3 MBZ bits based on different modes: 220329a01803SNadav Amit * [63:52] - long mode 2204a79c9495SKrish Sadhukhan */ 2205a79c9495SKrish Sadhukhan u64 cr3_saved = vmcb->save.cr3; 2206a79c9495SKrish Sadhukhan 2207a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved, 2208cb6524f3SPaolo Bonzini SVM_CR3_LONG_MBZ_MASK, SVM_EXIT_ERR, ""); 22096d0ecbf6SKrish Sadhukhan 22106d0ecbf6SKrish Sadhukhan vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_MBZ_MASK; 22116d0ecbf6SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx", 22126d0ecbf6SKrish Sadhukhan vmcb->save.cr3); 22136d0ecbf6SKrish Sadhukhan 22146d0ecbf6SKrish Sadhukhan /* 22156d0ecbf6SKrish Sadhukhan * CR3 non-MBZ reserved bits based on different modes: 2216cb6524f3SPaolo Bonzini * [11:5] [2:0] - long mode (PCIDE=0) 22176d0ecbf6SKrish Sadhukhan * [2:0] - PAE legacy mode 22186d0ecbf6SKrish Sadhukhan */ 22196d0ecbf6SKrish Sadhukhan u64 cr4_saved = vmcb->save.cr4; 22206d0ecbf6SKrish Sadhukhan u64 *pdpe = npt_get_pml4e(); 22216d0ecbf6SKrish Sadhukhan 22226d0ecbf6SKrish Sadhukhan /* 22236d0ecbf6SKrish Sadhukhan * Long mode 22246d0ecbf6SKrish Sadhukhan */ 22256d0ecbf6SKrish Sadhukhan if (this_cpu_has(X86_FEATURE_PCID)) { 22266d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved | X86_CR4_PCIDE; 22276d0ecbf6SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved, 2228cb6524f3SPaolo Bonzini SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_VMMCALL, "(PCIDE=1) "); 22296d0ecbf6SKrish Sadhukhan 22306d0ecbf6SKrish Sadhukhan vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK; 22316d0ecbf6SKrish Sadhukhan report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx", 22326d0ecbf6SKrish Sadhukhan vmcb->save.cr3); 2233cb6524f3SPaolo Bonzini } 22346d0ecbf6SKrish Sadhukhan 22356d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved & ~X86_CR4_PCIDE; 22366d0ecbf6SKrish Sadhukhan 2237993749ffSSean Christopherson if (!npt_supported()) 2238993749ffSSean Christopherson goto skip_npt_only; 2239993749ffSSean Christopherson 22406d0ecbf6SKrish Sadhukhan /* Clear P (Present) bit in NPT in order to trigger #NPF */ 22416d0ecbf6SKrish Sadhukhan pdpe[0] &= ~1ULL; 22426d0ecbf6SKrish Sadhukhan 22436d0ecbf6SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved, 2244cb6524f3SPaolo Bonzini SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_NPF, "(PCIDE=0) "); 22456d0ecbf6SKrish Sadhukhan 22466d0ecbf6SKrish Sadhukhan pdpe[0] |= 1ULL; 2247cb6524f3SPaolo Bonzini vmcb->save.cr3 = cr3_saved; 22486d0ecbf6SKrish Sadhukhan 22496d0ecbf6SKrish Sadhukhan /* 22506d0ecbf6SKrish Sadhukhan * PAE legacy 22516d0ecbf6SKrish Sadhukhan */ 22526d0ecbf6SKrish Sadhukhan pdpe[0] &= ~1ULL; 22536d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved | X86_CR4_PAE; 22546d0ecbf6SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(0, 2, 1, 3, cr3_saved, 2255cb6524f3SPaolo Bonzini SVM_CR3_PAE_LEGACY_RESERVED_MASK, SVM_EXIT_NPF, "(PAE) "); 22566d0ecbf6SKrish Sadhukhan 22576d0ecbf6SKrish Sadhukhan pdpe[0] |= 1ULL; 2258993749ffSSean Christopherson 2259993749ffSSean Christopherson skip_npt_only: 2260a79c9495SKrish Sadhukhan vmcb->save.cr3 = cr3_saved; 22616d0ecbf6SKrish Sadhukhan vmcb->save.cr4 = cr4_saved; 2262a79c9495SKrish Sadhukhan } 2263a79c9495SKrish Sadhukhan 2264d30973c3SWei Huang /* Test CR4 MBZ bits based on legacy or long modes */ 2265a79c9495SKrish Sadhukhan static void test_cr4(void) 2266a79c9495SKrish Sadhukhan { 2267a79c9495SKrish Sadhukhan u64 cr4_saved = vmcb->save.cr4; 2268a79c9495SKrish Sadhukhan u64 efer_saved = vmcb->save.efer; 2269a79c9495SKrish Sadhukhan u64 efer = efer_saved; 2270a79c9495SKrish Sadhukhan 2271a79c9495SKrish Sadhukhan efer &= ~EFER_LME; 2272a79c9495SKrish Sadhukhan vmcb->save.efer = efer; 2273a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2274cb6524f3SPaolo Bonzini SVM_CR4_LEGACY_RESERVED_MASK, SVM_EXIT_ERR, ""); 2275a79c9495SKrish Sadhukhan 2276a79c9495SKrish Sadhukhan efer |= EFER_LME; 2277a79c9495SKrish Sadhukhan vmcb->save.efer = efer; 2278a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2279cb6524f3SPaolo Bonzini SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, ""); 2280a79c9495SKrish Sadhukhan SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved, 2281cb6524f3SPaolo Bonzini SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, ""); 2282a79c9495SKrish Sadhukhan 2283a79c9495SKrish Sadhukhan vmcb->save.cr4 = cr4_saved; 2284a79c9495SKrish Sadhukhan vmcb->save.efer = efer_saved; 2285a79c9495SKrish Sadhukhan } 2286a79c9495SKrish Sadhukhan 2287a79c9495SKrish Sadhukhan static void test_dr(void) 2288a79c9495SKrish Sadhukhan { 2289eae10e8fSKrish Sadhukhan /* 2290eae10e8fSKrish Sadhukhan * DR6[63:32] and DR7[63:32] are MBZ 2291eae10e8fSKrish Sadhukhan */ 2292eae10e8fSKrish Sadhukhan u64 dr_saved = vmcb->save.dr6; 2293eae10e8fSKrish Sadhukhan 2294eae10e8fSKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR6", vmcb->save.dr6, dr_saved, 2295eae10e8fSKrish Sadhukhan SVM_DR6_RESERVED_MASK); 2296eae10e8fSKrish Sadhukhan vmcb->save.dr6 = dr_saved; 2297eae10e8fSKrish Sadhukhan 2298eae10e8fSKrish Sadhukhan dr_saved = vmcb->save.dr7; 2299eae10e8fSKrish Sadhukhan SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR7", vmcb->save.dr7, dr_saved, 2300eae10e8fSKrish Sadhukhan SVM_DR7_RESERVED_MASK); 2301eae10e8fSKrish Sadhukhan 2302eae10e8fSKrish Sadhukhan vmcb->save.dr7 = dr_saved; 2303a79c9495SKrish Sadhukhan } 2304eae10e8fSKrish Sadhukhan 2305abe82380SKrish Sadhukhan /* TODO: verify if high 32-bits are sign- or zero-extended on bare metal */ 2306abe82380SKrish Sadhukhan #define TEST_BITMAP_ADDR(save_intercept, type, addr, exit_code, \ 2307abe82380SKrish Sadhukhan msg) { \ 2308abe82380SKrish Sadhukhan vmcb->control.intercept = saved_intercept | 1ULL << type; \ 2309abe82380SKrish Sadhukhan if (type == INTERCEPT_MSR_PROT) \ 2310abe82380SKrish Sadhukhan vmcb->control.msrpm_base_pa = addr; \ 2311abe82380SKrish Sadhukhan else \ 2312abe82380SKrish Sadhukhan vmcb->control.iopm_base_pa = addr; \ 2313abe82380SKrish Sadhukhan report(svm_vmrun() == exit_code, \ 2314abe82380SKrish Sadhukhan "Test %s address: %lx", msg, addr); \ 2315abe82380SKrish Sadhukhan } 2316abe82380SKrish Sadhukhan 2317abe82380SKrish Sadhukhan /* 2318abe82380SKrish Sadhukhan * If the MSR or IOIO intercept table extends to a physical address that 2319abe82380SKrish Sadhukhan * is greater than or equal to the maximum supported physical address, the 2320abe82380SKrish Sadhukhan * guest state is illegal. 2321abe82380SKrish Sadhukhan * 2322abe82380SKrish Sadhukhan * The VMRUN instruction ignores the lower 12 bits of the address specified 2323abe82380SKrish Sadhukhan * in the VMCB. 2324abe82380SKrish Sadhukhan * 2325abe82380SKrish Sadhukhan * MSRPM spans 2 contiguous 4KB pages while IOPM spans 2 contiguous 4KB 2326abe82380SKrish Sadhukhan * pages + 1 byte. 2327abe82380SKrish Sadhukhan * 2328abe82380SKrish Sadhukhan * [APM vol 2] 2329abe82380SKrish Sadhukhan * 2330abe82380SKrish Sadhukhan * Note: Unallocated MSRPM addresses conforming to consistency checks, generate 2331abe82380SKrish Sadhukhan * #NPF. 2332abe82380SKrish Sadhukhan */ 2333abe82380SKrish Sadhukhan static void test_msrpm_iopm_bitmap_addrs(void) 2334abe82380SKrish Sadhukhan { 2335abe82380SKrish Sadhukhan u64 saved_intercept = vmcb->control.intercept; 2336abe82380SKrish Sadhukhan u64 addr_beyond_limit = 1ull << cpuid_maxphyaddr(); 2337abe82380SKrish Sadhukhan u64 addr = virt_to_phys(msr_bitmap) & (~((1ull << 12) - 1)); 2338abe82380SKrish Sadhukhan 2339abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2340abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR, 2341abe82380SKrish Sadhukhan "MSRPM"); 2342abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2343abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE + 1, SVM_EXIT_ERR, 2344abe82380SKrish Sadhukhan "MSRPM"); 2345abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2346abe82380SKrish Sadhukhan addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR, 2347abe82380SKrish Sadhukhan "MSRPM"); 2348abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr, 2349abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "MSRPM"); 2350abe82380SKrish Sadhukhan addr |= (1ull << 12) - 1; 2351abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr, 2352abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "MSRPM"); 2353abe82380SKrish Sadhukhan 2354abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2355abe82380SKrish Sadhukhan addr_beyond_limit - 4 * PAGE_SIZE, SVM_EXIT_VMMCALL, 2356abe82380SKrish Sadhukhan "IOPM"); 2357abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2358abe82380SKrish Sadhukhan addr_beyond_limit - 3 * PAGE_SIZE, SVM_EXIT_VMMCALL, 2359abe82380SKrish Sadhukhan "IOPM"); 2360abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2361abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE - 2, SVM_EXIT_VMMCALL, 2362abe82380SKrish Sadhukhan "IOPM"); 2363abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2364abe82380SKrish Sadhukhan addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR, 2365abe82380SKrish Sadhukhan "IOPM"); 2366abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2367abe82380SKrish Sadhukhan addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR, 2368abe82380SKrish Sadhukhan "IOPM"); 2369abe82380SKrish Sadhukhan addr = virt_to_phys(io_bitmap) & (~((1ull << 11) - 1)); 2370abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr, 2371abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "IOPM"); 2372abe82380SKrish Sadhukhan addr |= (1ull << 12) - 1; 2373abe82380SKrish Sadhukhan TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr, 2374abe82380SKrish Sadhukhan SVM_EXIT_VMMCALL, "IOPM"); 2375abe82380SKrish Sadhukhan 2376abe82380SKrish Sadhukhan vmcb->control.intercept = saved_intercept; 2377abe82380SKrish Sadhukhan } 2378abe82380SKrish Sadhukhan 2379ba3c9773SLara Lazier /* 2380ba3c9773SLara Lazier * Unlike VMSAVE, VMRUN seems not to update the value of noncanonical 2381ba3c9773SLara Lazier * segment bases in the VMCB. However, VMENTRY succeeds as documented. 2382ba3c9773SLara Lazier */ 2383ba3c9773SLara Lazier #define TEST_CANONICAL_VMRUN(seg_base, msg) \ 2384a99070ebSKrish Sadhukhan saved_addr = seg_base; \ 2385a99070ebSKrish Sadhukhan seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ 2386ba3c9773SLara Lazier return_value = svm_vmrun(); \ 2387ba3c9773SLara Lazier report(return_value == SVM_EXIT_VMMCALL, \ 2388ba3c9773SLara Lazier "Successful VMRUN with noncanonical %s.base", msg); \ 2389a99070ebSKrish Sadhukhan seg_base = saved_addr; 2390a99070ebSKrish Sadhukhan 2391ba3c9773SLara Lazier 2392ba3c9773SLara Lazier #define TEST_CANONICAL_VMLOAD(seg_base, msg) \ 2393ba3c9773SLara Lazier saved_addr = seg_base; \ 2394ba3c9773SLara Lazier seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ 2395ba3c9773SLara Lazier asm volatile ("vmload %0" : : "a"(vmcb_phys) : "memory"); \ 2396ba3c9773SLara Lazier asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory"); \ 2397ba3c9773SLara Lazier report(is_canonical(seg_base), \ 2398ba3c9773SLara Lazier "Test %s.base for canonical form: %lx", msg, seg_base); \ 2399ba3c9773SLara Lazier seg_base = saved_addr; 2400ba3c9773SLara Lazier 2401ba3c9773SLara Lazier static void test_canonicalization(void) 2402a99070ebSKrish Sadhukhan { 2403a99070ebSKrish Sadhukhan u64 saved_addr; 2404ba3c9773SLara Lazier u64 return_value; 2405ba3c9773SLara Lazier u64 addr_limit; 2406ba3c9773SLara Lazier u64 vmcb_phys = virt_to_phys(vmcb); 2407ba3c9773SLara Lazier 2408ba3c9773SLara Lazier addr_limit = (this_cpu_has(X86_FEATURE_LA57)) ? 57 : 48; 2409a99070ebSKrish Sadhukhan u64 noncanonical_mask = NONCANONICAL & ~((1ul << addr_limit) - 1); 2410a99070ebSKrish Sadhukhan 2411ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.fs.base, "FS"); 2412ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.gs.base, "GS"); 2413ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.ldtr.base, "LDTR"); 2414ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.tr.base, "TR"); 2415ba3c9773SLara Lazier TEST_CANONICAL_VMLOAD(vmcb->save.kernel_gs_base, "KERNEL GS"); 2416ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.es.base, "ES"); 2417ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.cs.base, "CS"); 2418ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.ss.base, "SS"); 2419ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.ds.base, "DS"); 2420ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.gdtr.base, "GDTR"); 2421ba3c9773SLara Lazier TEST_CANONICAL_VMRUN(vmcb->save.idtr.base, "IDTR"); 2422a99070ebSKrish Sadhukhan } 2423a99070ebSKrish Sadhukhan 2424665f5677SKrish Sadhukhan /* 2425665f5677SKrish Sadhukhan * When VMRUN loads a guest value of 1 in EFLAGS.TF, that value does not 2426665f5677SKrish Sadhukhan * cause a trace trap between the VMRUN and the first guest instruction, but 2427665f5677SKrish Sadhukhan * rather after completion of the first guest instruction. 2428665f5677SKrish Sadhukhan * 2429665f5677SKrish Sadhukhan * [APM vol 2] 2430665f5677SKrish Sadhukhan */ 2431665f5677SKrish Sadhukhan u64 guest_rflags_test_trap_rip; 2432665f5677SKrish Sadhukhan 2433665f5677SKrish Sadhukhan static void guest_rflags_test_db_handler(struct ex_regs *r) 2434665f5677SKrish Sadhukhan { 2435665f5677SKrish Sadhukhan guest_rflags_test_trap_rip = r->rip; 2436665f5677SKrish Sadhukhan r->rflags &= ~X86_EFLAGS_TF; 2437665f5677SKrish Sadhukhan } 2438665f5677SKrish Sadhukhan 2439a79c9495SKrish Sadhukhan static void svm_guest_state_test(void) 2440a79c9495SKrish Sadhukhan { 2441a79c9495SKrish Sadhukhan test_set_guest(basic_guest_main); 2442a79c9495SKrish Sadhukhan test_efer(); 2443a79c9495SKrish Sadhukhan test_cr0(); 2444a79c9495SKrish Sadhukhan test_cr3(); 2445a79c9495SKrish Sadhukhan test_cr4(); 2446a79c9495SKrish Sadhukhan test_dr(); 2447abe82380SKrish Sadhukhan test_msrpm_iopm_bitmap_addrs(); 2448ba3c9773SLara Lazier test_canonicalization(); 2449ba29942cSKrish Sadhukhan } 2450ba29942cSKrish Sadhukhan 2451665f5677SKrish Sadhukhan extern void guest_rflags_test_guest(struct svm_test *test); 2452665f5677SKrish Sadhukhan extern u64 *insn2; 2453665f5677SKrish Sadhukhan extern u64 *guest_end; 2454665f5677SKrish Sadhukhan 2455665f5677SKrish Sadhukhan asm("guest_rflags_test_guest:\n\t" 2456665f5677SKrish Sadhukhan "push %rbp\n\t" 2457665f5677SKrish Sadhukhan ".global insn2\n\t" 2458665f5677SKrish Sadhukhan "insn2:\n\t" 2459665f5677SKrish Sadhukhan "mov %rsp,%rbp\n\t" 2460665f5677SKrish Sadhukhan "vmmcall\n\t" 2461665f5677SKrish Sadhukhan "vmmcall\n\t" 2462665f5677SKrish Sadhukhan ".global guest_end\n\t" 2463665f5677SKrish Sadhukhan "guest_end:\n\t" 2464665f5677SKrish Sadhukhan "vmmcall\n\t" 2465665f5677SKrish Sadhukhan "pop %rbp\n\t" 2466665f5677SKrish Sadhukhan "ret"); 2467665f5677SKrish Sadhukhan 2468665f5677SKrish Sadhukhan static void svm_test_singlestep(void) 2469665f5677SKrish Sadhukhan { 2470665f5677SKrish Sadhukhan handle_exception(DB_VECTOR, guest_rflags_test_db_handler); 2471665f5677SKrish Sadhukhan 2472665f5677SKrish Sadhukhan /* 2473665f5677SKrish Sadhukhan * Trap expected after completion of first guest instruction 2474665f5677SKrish Sadhukhan */ 2475665f5677SKrish Sadhukhan vmcb->save.rflags |= X86_EFLAGS_TF; 2476665f5677SKrish Sadhukhan report (__svm_vmrun((u64)guest_rflags_test_guest) == SVM_EXIT_VMMCALL && 2477665f5677SKrish Sadhukhan guest_rflags_test_trap_rip == (u64)&insn2, 2478665f5677SKrish Sadhukhan "Test EFLAGS.TF on VMRUN: trap expected after completion of first guest instruction"); 2479665f5677SKrish Sadhukhan /* 2480665f5677SKrish Sadhukhan * No trap expected 2481665f5677SKrish Sadhukhan */ 2482665f5677SKrish Sadhukhan guest_rflags_test_trap_rip = 0; 2483665f5677SKrish Sadhukhan vmcb->save.rip += 3; 2484665f5677SKrish Sadhukhan vmcb->save.rflags |= X86_EFLAGS_TF; 2485665f5677SKrish Sadhukhan report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL && 2486665f5677SKrish Sadhukhan guest_rflags_test_trap_rip == 0, "Test EFLAGS.TF on VMRUN: trap not expected"); 2487665f5677SKrish Sadhukhan 2488665f5677SKrish Sadhukhan /* 2489665f5677SKrish Sadhukhan * Let guest finish execution 2490665f5677SKrish Sadhukhan */ 2491665f5677SKrish Sadhukhan vmcb->save.rip += 3; 2492665f5677SKrish Sadhukhan report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL && 2493665f5677SKrish Sadhukhan vmcb->save.rip == (u64)&guest_end, "Test EFLAGS.TF on VMRUN: guest execution completion"); 2494665f5677SKrish Sadhukhan } 2495665f5677SKrish Sadhukhan 24967a57ef5dSMaxim Levitsky static bool volatile svm_errata_reproduced = false; 24977a57ef5dSMaxim Levitsky static unsigned long volatile physical = 0; 24987a57ef5dSMaxim Levitsky 24997a57ef5dSMaxim Levitsky 25007a57ef5dSMaxim Levitsky /* 25017a57ef5dSMaxim Levitsky * 25027a57ef5dSMaxim Levitsky * Test the following errata: 25037a57ef5dSMaxim Levitsky * If the VMRUN/VMSAVE/VMLOAD are attempted by the nested guest, 25047a57ef5dSMaxim Levitsky * the CPU would first check the EAX against host reserved memory 25057a57ef5dSMaxim Levitsky * regions (so far only SMM_ADDR/SMM_MASK are known to cause it), 25067a57ef5dSMaxim Levitsky * and only then signal #VMexit 25077a57ef5dSMaxim Levitsky * 25087a57ef5dSMaxim Levitsky * Try to reproduce this by trying vmsave on each possible 4K aligned memory 25097a57ef5dSMaxim Levitsky * address in the low 4G where the SMM area has to reside. 25107a57ef5dSMaxim Levitsky */ 25117a57ef5dSMaxim Levitsky 25127a57ef5dSMaxim Levitsky static void gp_isr(struct ex_regs *r) 25137a57ef5dSMaxim Levitsky { 25147a57ef5dSMaxim Levitsky svm_errata_reproduced = true; 25157a57ef5dSMaxim Levitsky /* skip over the vmsave instruction*/ 25167a57ef5dSMaxim Levitsky r->rip += 3; 25177a57ef5dSMaxim Levitsky } 25187a57ef5dSMaxim Levitsky 25197a57ef5dSMaxim Levitsky static void svm_vmrun_errata_test(void) 25207a57ef5dSMaxim Levitsky { 25217a57ef5dSMaxim Levitsky unsigned long *last_page = NULL; 25227a57ef5dSMaxim Levitsky 25237a57ef5dSMaxim Levitsky handle_exception(GP_VECTOR, gp_isr); 25247a57ef5dSMaxim Levitsky 25257a57ef5dSMaxim Levitsky while (!svm_errata_reproduced) { 25267a57ef5dSMaxim Levitsky 25277a57ef5dSMaxim Levitsky unsigned long *page = alloc_pages(1); 25287a57ef5dSMaxim Levitsky 25297a57ef5dSMaxim Levitsky if (!page) { 25305c3582f0SJanis Schoetterl-Glausch report_pass("All guest memory tested, no bug found"); 25317a57ef5dSMaxim Levitsky break; 25327a57ef5dSMaxim Levitsky } 25337a57ef5dSMaxim Levitsky 25347a57ef5dSMaxim Levitsky physical = virt_to_phys(page); 25357a57ef5dSMaxim Levitsky 25367a57ef5dSMaxim Levitsky asm volatile ( 25377a57ef5dSMaxim Levitsky "mov %[_physical], %%rax\n\t" 25387a57ef5dSMaxim Levitsky "vmsave %%rax\n\t" 25397a57ef5dSMaxim Levitsky 25407a57ef5dSMaxim Levitsky : [_physical] "=m" (physical) 25417a57ef5dSMaxim Levitsky : /* no inputs*/ 25427a57ef5dSMaxim Levitsky : "rax" /*clobbers*/ 25437a57ef5dSMaxim Levitsky ); 25447a57ef5dSMaxim Levitsky 25457a57ef5dSMaxim Levitsky if (svm_errata_reproduced) { 2546198dfd0eSJanis Schoetterl-Glausch report_fail("Got #GP exception - svm errata reproduced at 0x%lx", 25477a57ef5dSMaxim Levitsky physical); 25487a57ef5dSMaxim Levitsky break; 25497a57ef5dSMaxim Levitsky } 25507a57ef5dSMaxim Levitsky 25517a57ef5dSMaxim Levitsky *page = (unsigned long)last_page; 25527a57ef5dSMaxim Levitsky last_page = page; 25537a57ef5dSMaxim Levitsky } 25547a57ef5dSMaxim Levitsky 25557a57ef5dSMaxim Levitsky while (last_page) { 25567a57ef5dSMaxim Levitsky unsigned long *page = last_page; 25577a57ef5dSMaxim Levitsky last_page = (unsigned long *)*last_page; 25587a57ef5dSMaxim Levitsky free_pages_by_order(page, 1); 25597a57ef5dSMaxim Levitsky } 25607a57ef5dSMaxim Levitsky } 25617a57ef5dSMaxim Levitsky 25620b6f6cedSKrish Sadhukhan static void vmload_vmsave_guest_main(struct svm_test *test) 25630b6f6cedSKrish Sadhukhan { 25640b6f6cedSKrish Sadhukhan u64 vmcb_phys = virt_to_phys(vmcb); 25650b6f6cedSKrish Sadhukhan 25660b6f6cedSKrish Sadhukhan asm volatile ("vmload %0" : : "a"(vmcb_phys)); 25670b6f6cedSKrish Sadhukhan asm volatile ("vmsave %0" : : "a"(vmcb_phys)); 25680b6f6cedSKrish Sadhukhan } 25690b6f6cedSKrish Sadhukhan 25700b6f6cedSKrish Sadhukhan static void svm_vmload_vmsave(void) 25710b6f6cedSKrish Sadhukhan { 25720b6f6cedSKrish Sadhukhan u32 intercept_saved = vmcb->control.intercept; 25730b6f6cedSKrish Sadhukhan 25740b6f6cedSKrish Sadhukhan test_set_guest(vmload_vmsave_guest_main); 25750b6f6cedSKrish Sadhukhan 25760b6f6cedSKrish Sadhukhan /* 25770b6f6cedSKrish Sadhukhan * Disabling intercept for VMLOAD and VMSAVE doesn't cause 25780b6f6cedSKrish Sadhukhan * respective #VMEXIT to host 25790b6f6cedSKrish Sadhukhan */ 25800b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 25810b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 25820b6f6cedSKrish Sadhukhan svm_vmrun(); 25830b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 25840b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 25850b6f6cedSKrish Sadhukhan 25860b6f6cedSKrish Sadhukhan /* 25870b6f6cedSKrish Sadhukhan * Enabling intercept for VMLOAD and VMSAVE causes respective 25880b6f6cedSKrish Sadhukhan * #VMEXIT to host 25890b6f6cedSKrish Sadhukhan */ 25900b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD); 25910b6f6cedSKrish Sadhukhan svm_vmrun(); 25920b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test " 25930b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT"); 25940b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 25950b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE); 25960b6f6cedSKrish Sadhukhan svm_vmrun(); 25970b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test " 25980b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT"); 25990b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 26000b6f6cedSKrish Sadhukhan svm_vmrun(); 26010b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 26020b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 26030b6f6cedSKrish Sadhukhan 26040b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD); 26050b6f6cedSKrish Sadhukhan svm_vmrun(); 26060b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test " 26070b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT"); 26080b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 26090b6f6cedSKrish Sadhukhan svm_vmrun(); 26100b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 26110b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 26120b6f6cedSKrish Sadhukhan 26130b6f6cedSKrish Sadhukhan vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE); 26140b6f6cedSKrish Sadhukhan svm_vmrun(); 26150b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test " 26160b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT"); 26170b6f6cedSKrish Sadhukhan vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 26180b6f6cedSKrish Sadhukhan svm_vmrun(); 26190b6f6cedSKrish Sadhukhan report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 26200b6f6cedSKrish Sadhukhan "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 26210b6f6cedSKrish Sadhukhan 26220b6f6cedSKrish Sadhukhan vmcb->control.intercept = intercept_saved; 26230b6f6cedSKrish Sadhukhan } 26240b6f6cedSKrish Sadhukhan 2625f6972bd6SLara Lazier static void prepare_vgif_enabled(struct svm_test *test) 2626f6972bd6SLara Lazier { 2627f6972bd6SLara Lazier default_prepare(test); 2628f6972bd6SLara Lazier } 2629f6972bd6SLara Lazier 2630f6972bd6SLara Lazier static void test_vgif(struct svm_test *test) 2631f6972bd6SLara Lazier { 2632f6972bd6SLara Lazier asm volatile ("vmmcall\n\tstgi\n\tvmmcall\n\tclgi\n\tvmmcall\n\t"); 2633f6972bd6SLara Lazier } 2634f6972bd6SLara Lazier 2635f6972bd6SLara Lazier static bool vgif_finished(struct svm_test *test) 2636f6972bd6SLara Lazier { 2637f6972bd6SLara Lazier switch (get_test_stage(test)) 2638f6972bd6SLara Lazier { 2639f6972bd6SLara Lazier case 0: 2640f6972bd6SLara Lazier if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2641198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall."); 2642f6972bd6SLara Lazier return true; 2643f6972bd6SLara Lazier } 2644f6972bd6SLara Lazier vmcb->control.int_ctl |= V_GIF_ENABLED_MASK; 2645f6972bd6SLara Lazier vmcb->save.rip += 3; 2646f6972bd6SLara Lazier inc_test_stage(test); 2647f6972bd6SLara Lazier break; 2648f6972bd6SLara Lazier case 1: 2649f6972bd6SLara Lazier if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2650198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall."); 2651f6972bd6SLara Lazier return true; 2652f6972bd6SLara Lazier } 2653f6972bd6SLara Lazier if (!(vmcb->control.int_ctl & V_GIF_MASK)) { 2654198dfd0eSJanis Schoetterl-Glausch report_fail("Failed to set VGIF when executing STGI."); 2655f6972bd6SLara Lazier vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2656f6972bd6SLara Lazier return true; 2657f6972bd6SLara Lazier } 26585c3582f0SJanis Schoetterl-Glausch report_pass("STGI set VGIF bit."); 2659f6972bd6SLara Lazier vmcb->save.rip += 3; 2660f6972bd6SLara Lazier inc_test_stage(test); 2661f6972bd6SLara Lazier break; 2662f6972bd6SLara Lazier case 2: 2663f6972bd6SLara Lazier if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2664198dfd0eSJanis Schoetterl-Glausch report_fail("VMEXIT not due to vmmcall."); 2665f6972bd6SLara Lazier return true; 2666f6972bd6SLara Lazier } 2667f6972bd6SLara Lazier if (vmcb->control.int_ctl & V_GIF_MASK) { 2668198dfd0eSJanis Schoetterl-Glausch report_fail("Failed to clear VGIF when executing CLGI."); 2669f6972bd6SLara Lazier vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2670f6972bd6SLara Lazier return true; 2671f6972bd6SLara Lazier } 26725c3582f0SJanis Schoetterl-Glausch report_pass("CLGI cleared VGIF bit."); 2673f6972bd6SLara Lazier vmcb->save.rip += 3; 2674f6972bd6SLara Lazier inc_test_stage(test); 2675f6972bd6SLara Lazier vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2676f6972bd6SLara Lazier break; 2677f6972bd6SLara Lazier default: 2678f6972bd6SLara Lazier return true; 2679f6972bd6SLara Lazier break; 2680f6972bd6SLara Lazier } 2681f6972bd6SLara Lazier 2682f6972bd6SLara Lazier return get_test_stage(test) == 3; 2683f6972bd6SLara Lazier } 2684f6972bd6SLara Lazier 2685f6972bd6SLara Lazier static bool vgif_check(struct svm_test *test) 2686f6972bd6SLara Lazier { 2687f6972bd6SLara Lazier return get_test_stage(test) == 3; 2688f6972bd6SLara Lazier } 2689f6972bd6SLara Lazier 26908650dffeSMaxim Levitsky 26918650dffeSMaxim Levitsky static int pause_test_counter; 26928650dffeSMaxim Levitsky static int wait_counter; 26938650dffeSMaxim Levitsky 26948650dffeSMaxim Levitsky static void pause_filter_test_guest_main(struct svm_test *test) 26958650dffeSMaxim Levitsky { 26968650dffeSMaxim Levitsky int i; 26978650dffeSMaxim Levitsky for (i = 0 ; i < pause_test_counter ; i++) 26988650dffeSMaxim Levitsky pause(); 26998650dffeSMaxim Levitsky 27008650dffeSMaxim Levitsky if (!wait_counter) 27018650dffeSMaxim Levitsky return; 27028650dffeSMaxim Levitsky 27038650dffeSMaxim Levitsky for (i = 0; i < wait_counter; i++) 27048650dffeSMaxim Levitsky ; 27058650dffeSMaxim Levitsky 27068650dffeSMaxim Levitsky for (i = 0 ; i < pause_test_counter ; i++) 27078650dffeSMaxim Levitsky pause(); 27088650dffeSMaxim Levitsky 27098650dffeSMaxim Levitsky } 27108650dffeSMaxim Levitsky 27118650dffeSMaxim Levitsky static void pause_filter_run_test(int pause_iterations, int filter_value, int wait_iterations, int threshold) 27128650dffeSMaxim Levitsky { 27138650dffeSMaxim Levitsky test_set_guest(pause_filter_test_guest_main); 27148650dffeSMaxim Levitsky 27158650dffeSMaxim Levitsky pause_test_counter = pause_iterations; 27168650dffeSMaxim Levitsky wait_counter = wait_iterations; 27178650dffeSMaxim Levitsky 27188650dffeSMaxim Levitsky vmcb->control.pause_filter_count = filter_value; 27198650dffeSMaxim Levitsky vmcb->control.pause_filter_thresh = threshold; 27208650dffeSMaxim Levitsky svm_vmrun(); 27218650dffeSMaxim Levitsky 27228650dffeSMaxim Levitsky if (filter_value <= pause_iterations || wait_iterations < threshold) 27238650dffeSMaxim Levitsky report(vmcb->control.exit_code == SVM_EXIT_PAUSE, "expected PAUSE vmexit"); 27248650dffeSMaxim Levitsky else 27258650dffeSMaxim Levitsky report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "no expected PAUSE vmexit"); 27268650dffeSMaxim Levitsky } 27278650dffeSMaxim Levitsky 27288650dffeSMaxim Levitsky static void pause_filter_test(void) 27298650dffeSMaxim Levitsky { 27308650dffeSMaxim Levitsky if (!pause_filter_supported()) { 27318650dffeSMaxim Levitsky report_skip("PAUSE filter not supported in the guest"); 27328650dffeSMaxim Levitsky return; 27338650dffeSMaxim Levitsky } 27348650dffeSMaxim Levitsky 27358650dffeSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_PAUSE); 27368650dffeSMaxim Levitsky 27378650dffeSMaxim Levitsky // filter count more that pause count - no VMexit 27388650dffeSMaxim Levitsky pause_filter_run_test(10, 9, 0, 0); 27398650dffeSMaxim Levitsky 27408650dffeSMaxim Levitsky // filter count smaller pause count - no VMexit 27418650dffeSMaxim Levitsky pause_filter_run_test(20, 21, 0, 0); 27428650dffeSMaxim Levitsky 27438650dffeSMaxim Levitsky 27448650dffeSMaxim Levitsky if (pause_threshold_supported()) { 27458650dffeSMaxim Levitsky // filter count smaller pause count - no VMexit + large enough threshold 27468650dffeSMaxim Levitsky // so that filter counter resets 27478650dffeSMaxim Levitsky pause_filter_run_test(20, 21, 1000, 10); 27488650dffeSMaxim Levitsky 27498650dffeSMaxim Levitsky // filter count smaller pause count - no VMexit + small threshold 27508650dffeSMaxim Levitsky // so that filter doesn't reset 27518650dffeSMaxim Levitsky pause_filter_run_test(20, 21, 10, 1000); 27528650dffeSMaxim Levitsky } else { 27538650dffeSMaxim Levitsky report_skip("PAUSE threshold not supported in the guest"); 27548650dffeSMaxim Levitsky return; 27558650dffeSMaxim Levitsky } 27568650dffeSMaxim Levitsky } 27578650dffeSMaxim Levitsky 27588650dffeSMaxim Levitsky 2759af13008dSManali Shukla static int of_test_counter; 2760af13008dSManali Shukla 2761af13008dSManali Shukla static void guest_test_of_handler(struct ex_regs *r) 2762af13008dSManali Shukla { 2763af13008dSManali Shukla of_test_counter++; 2764af13008dSManali Shukla } 2765af13008dSManali Shukla 2766af13008dSManali Shukla static void svm_of_test_guest(struct svm_test *test) 2767af13008dSManali Shukla { 2768af13008dSManali Shukla struct far_pointer32 fp = { 2769af13008dSManali Shukla .offset = (uintptr_t)&&into, 2770af13008dSManali Shukla .selector = KERNEL_CS32, 2771af13008dSManali Shukla }; 2772af13008dSManali Shukla uintptr_t rsp; 2773af13008dSManali Shukla 2774af13008dSManali Shukla asm volatile ("mov %%rsp, %0" : "=r"(rsp)); 2775af13008dSManali Shukla 2776af13008dSManali Shukla if (fp.offset != (uintptr_t)&&into) { 2777af13008dSManali Shukla printf("Codee address too high.\n"); 2778af13008dSManali Shukla return; 2779af13008dSManali Shukla } 2780af13008dSManali Shukla 2781af13008dSManali Shukla if ((u32)rsp != rsp) { 2782af13008dSManali Shukla printf("Stack address too high.\n"); 2783af13008dSManali Shukla } 2784af13008dSManali Shukla 2785af13008dSManali Shukla asm goto("lcall *%0" : : "m" (fp) : "rax" : into); 2786af13008dSManali Shukla return; 2787af13008dSManali Shukla into: 2788af13008dSManali Shukla 2789af13008dSManali Shukla asm volatile (".code32;" 2790af13008dSManali Shukla "movl $0x7fffffff, %eax;" 2791af13008dSManali Shukla "addl %eax, %eax;" 2792af13008dSManali Shukla "into;" 2793af13008dSManali Shukla "lret;" 2794af13008dSManali Shukla ".code64"); 2795af13008dSManali Shukla __builtin_unreachable(); 2796af13008dSManali Shukla } 2797af13008dSManali Shukla 2798af13008dSManali Shukla static void svm_into_test(void) 2799af13008dSManali Shukla { 2800af13008dSManali Shukla handle_exception(OF_VECTOR, guest_test_of_handler); 2801af13008dSManali Shukla test_set_guest(svm_of_test_guest); 2802af13008dSManali Shukla report(svm_vmrun() == SVM_EXIT_VMMCALL && of_test_counter == 1, 28033f27d772SManali Shukla "#OF is generated in L2 exception handler"); 2804af13008dSManali Shukla } 2805af13008dSManali Shukla 2806c8e16d20SManali Shukla static int bp_test_counter; 2807c8e16d20SManali Shukla 2808c8e16d20SManali Shukla static void guest_test_bp_handler(struct ex_regs *r) 2809c8e16d20SManali Shukla { 2810c8e16d20SManali Shukla bp_test_counter++; 2811c8e16d20SManali Shukla } 2812c8e16d20SManali Shukla 2813c8e16d20SManali Shukla static void svm_bp_test_guest(struct svm_test *test) 2814c8e16d20SManali Shukla { 2815c8e16d20SManali Shukla asm volatile("int3"); 2816c8e16d20SManali Shukla } 2817c8e16d20SManali Shukla 2818c8e16d20SManali Shukla static void svm_int3_test(void) 2819c8e16d20SManali Shukla { 2820c8e16d20SManali Shukla handle_exception(BP_VECTOR, guest_test_bp_handler); 2821c8e16d20SManali Shukla test_set_guest(svm_bp_test_guest); 2822c8e16d20SManali Shukla report(svm_vmrun() == SVM_EXIT_VMMCALL && bp_test_counter == 1, 2823c8e16d20SManali Shukla "#BP is handled in L2 exception handler"); 2824c8e16d20SManali Shukla } 2825c8e16d20SManali Shukla 28265c92f156SManali Shukla static int nm_test_counter; 28275c92f156SManali Shukla 28285c92f156SManali Shukla static void guest_test_nm_handler(struct ex_regs *r) 28295c92f156SManali Shukla { 28305c92f156SManali Shukla nm_test_counter++; 28315c92f156SManali Shukla write_cr0(read_cr0() & ~X86_CR0_TS); 28325c92f156SManali Shukla write_cr0(read_cr0() & ~X86_CR0_EM); 28335c92f156SManali Shukla } 28345c92f156SManali Shukla 28355c92f156SManali Shukla static void svm_nm_test_guest(struct svm_test *test) 28365c92f156SManali Shukla { 28375c92f156SManali Shukla asm volatile("fnop"); 28385c92f156SManali Shukla } 28395c92f156SManali Shukla 28405c92f156SManali Shukla /* This test checks that: 28415c92f156SManali Shukla * 28425c92f156SManali Shukla * (a) If CR0.TS is set in L2, #NM is handled by L2 when 28435c92f156SManali Shukla * just an L2 handler is registered. 28445c92f156SManali Shukla * 28455c92f156SManali Shukla * (b) If CR0.TS is cleared and CR0.EM is set, #NM is handled 28465c92f156SManali Shukla * by L2 when just an l2 handler is registered. 28475c92f156SManali Shukla * 28485c92f156SManali Shukla * (c) If CR0.TS and CR0.EM are cleared in L2, no exception 28495c92f156SManali Shukla * is generated. 28505c92f156SManali Shukla */ 28515c92f156SManali Shukla 28525c92f156SManali Shukla static void svm_nm_test(void) 28535c92f156SManali Shukla { 28545c92f156SManali Shukla handle_exception(NM_VECTOR, guest_test_nm_handler); 28555c92f156SManali Shukla write_cr0(read_cr0() & ~X86_CR0_TS); 28565c92f156SManali Shukla test_set_guest(svm_nm_test_guest); 28575c92f156SManali Shukla 28585c92f156SManali Shukla vmcb->save.cr0 = vmcb->save.cr0 | X86_CR0_TS; 28595c92f156SManali Shukla report(svm_vmrun() == SVM_EXIT_VMMCALL && nm_test_counter == 1, 28605c92f156SManali Shukla "fnop with CR0.TS set in L2, #NM is triggered"); 28615c92f156SManali Shukla 28625c92f156SManali Shukla vmcb->save.cr0 = (vmcb->save.cr0 & ~X86_CR0_TS) | X86_CR0_EM; 28635c92f156SManali Shukla report(svm_vmrun() == SVM_EXIT_VMMCALL && nm_test_counter == 2, 28645c92f156SManali Shukla "fnop with CR0.EM set in L2, #NM is triggered"); 28655c92f156SManali Shukla 28665c92f156SManali Shukla vmcb->save.cr0 = vmcb->save.cr0 & ~(X86_CR0_TS | X86_CR0_EM); 28675c92f156SManali Shukla report(svm_vmrun() == SVM_EXIT_VMMCALL && nm_test_counter == 2, 28683f27d772SManali Shukla "fnop with CR0.TS and CR0.EM unset no #NM excpetion"); 28695c92f156SManali Shukla } 2870f6972bd6SLara Lazier 2871537d39dfSMaxim Levitsky static bool check_lbr(u64 *from_excepted, u64 *to_expected) 2872537d39dfSMaxim Levitsky { 2873537d39dfSMaxim Levitsky u64 from = rdmsr(MSR_IA32_LASTBRANCHFROMIP); 2874537d39dfSMaxim Levitsky u64 to = rdmsr(MSR_IA32_LASTBRANCHTOIP); 2875537d39dfSMaxim Levitsky 2876537d39dfSMaxim Levitsky if ((u64)from_excepted != from) { 2877537d39dfSMaxim Levitsky report(false, "MSR_IA32_LASTBRANCHFROMIP, expected=0x%lx, actual=0x%lx", 2878537d39dfSMaxim Levitsky (u64)from_excepted, from); 2879537d39dfSMaxim Levitsky return false; 2880537d39dfSMaxim Levitsky } 2881537d39dfSMaxim Levitsky 2882537d39dfSMaxim Levitsky if ((u64)to_expected != to) { 2883537d39dfSMaxim Levitsky report(false, "MSR_IA32_LASTBRANCHFROMIP, expected=0x%lx, actual=0x%lx", 2884537d39dfSMaxim Levitsky (u64)from_excepted, from); 2885537d39dfSMaxim Levitsky return false; 2886537d39dfSMaxim Levitsky } 2887537d39dfSMaxim Levitsky 2888537d39dfSMaxim Levitsky return true; 2889537d39dfSMaxim Levitsky } 2890537d39dfSMaxim Levitsky 2891537d39dfSMaxim Levitsky static bool check_dbgctl(u64 dbgctl, u64 dbgctl_expected) 2892537d39dfSMaxim Levitsky { 2893537d39dfSMaxim Levitsky if (dbgctl != dbgctl_expected) { 2894537d39dfSMaxim Levitsky report(false, "Unexpected MSR_IA32_DEBUGCTLMSR value 0x%lx", dbgctl); 2895537d39dfSMaxim Levitsky return false; 2896537d39dfSMaxim Levitsky } 2897537d39dfSMaxim Levitsky return true; 2898537d39dfSMaxim Levitsky } 2899537d39dfSMaxim Levitsky 2900537d39dfSMaxim Levitsky 2901537d39dfSMaxim Levitsky #define DO_BRANCH(branch_name) \ 2902537d39dfSMaxim Levitsky asm volatile ( \ 2903537d39dfSMaxim Levitsky # branch_name "_from:" \ 2904537d39dfSMaxim Levitsky "jmp " # branch_name "_to\n" \ 2905537d39dfSMaxim Levitsky "nop\n" \ 2906537d39dfSMaxim Levitsky "nop\n" \ 2907537d39dfSMaxim Levitsky # branch_name "_to:" \ 2908537d39dfSMaxim Levitsky "nop\n" \ 2909537d39dfSMaxim Levitsky ) 2910537d39dfSMaxim Levitsky 2911537d39dfSMaxim Levitsky 2912537d39dfSMaxim Levitsky extern u64 guest_branch0_from, guest_branch0_to; 2913537d39dfSMaxim Levitsky extern u64 guest_branch2_from, guest_branch2_to; 2914537d39dfSMaxim Levitsky 2915537d39dfSMaxim Levitsky extern u64 host_branch0_from, host_branch0_to; 2916537d39dfSMaxim Levitsky extern u64 host_branch2_from, host_branch2_to; 2917537d39dfSMaxim Levitsky extern u64 host_branch3_from, host_branch3_to; 2918537d39dfSMaxim Levitsky extern u64 host_branch4_from, host_branch4_to; 2919537d39dfSMaxim Levitsky 2920537d39dfSMaxim Levitsky u64 dbgctl; 2921537d39dfSMaxim Levitsky 2922537d39dfSMaxim Levitsky static void svm_lbrv_test_guest1(void) 2923537d39dfSMaxim Levitsky { 2924537d39dfSMaxim Levitsky /* 2925537d39dfSMaxim Levitsky * This guest expects the LBR to be already enabled when it starts, 2926537d39dfSMaxim Levitsky * it does a branch, and then disables the LBR and then checks. 2927537d39dfSMaxim Levitsky */ 2928537d39dfSMaxim Levitsky 2929537d39dfSMaxim Levitsky DO_BRANCH(guest_branch0); 2930537d39dfSMaxim Levitsky 2931537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2932537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2933537d39dfSMaxim Levitsky 2934537d39dfSMaxim Levitsky if (dbgctl != DEBUGCTLMSR_LBR) 2935537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2936537d39dfSMaxim Levitsky if (rdmsr(MSR_IA32_DEBUGCTLMSR) != 0) 2937537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2938537d39dfSMaxim Levitsky if (rdmsr(MSR_IA32_LASTBRANCHFROMIP) != (u64)&guest_branch0_from) 2939537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2940537d39dfSMaxim Levitsky if (rdmsr(MSR_IA32_LASTBRANCHTOIP) != (u64)&guest_branch0_to) 2941537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2942537d39dfSMaxim Levitsky 2943537d39dfSMaxim Levitsky asm volatile ("vmmcall\n"); 2944537d39dfSMaxim Levitsky } 2945537d39dfSMaxim Levitsky 2946537d39dfSMaxim Levitsky static void svm_lbrv_test_guest2(void) 2947537d39dfSMaxim Levitsky { 2948537d39dfSMaxim Levitsky /* 2949537d39dfSMaxim Levitsky * This guest expects the LBR to be disabled when it starts, 2950537d39dfSMaxim Levitsky * enables it, does a branch, disables it and then checks. 2951537d39dfSMaxim Levitsky */ 2952537d39dfSMaxim Levitsky 2953537d39dfSMaxim Levitsky DO_BRANCH(guest_branch1); 2954537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2955537d39dfSMaxim Levitsky 2956537d39dfSMaxim Levitsky if (dbgctl != 0) 2957537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2958537d39dfSMaxim Levitsky 2959537d39dfSMaxim Levitsky if (rdmsr(MSR_IA32_LASTBRANCHFROMIP) != (u64)&host_branch2_from) 2960537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2961537d39dfSMaxim Levitsky if (rdmsr(MSR_IA32_LASTBRANCHTOIP) != (u64)&host_branch2_to) 2962537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2963537d39dfSMaxim Levitsky 2964537d39dfSMaxim Levitsky 2965537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2966537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2967537d39dfSMaxim Levitsky DO_BRANCH(guest_branch2); 2968537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2969537d39dfSMaxim Levitsky 2970537d39dfSMaxim Levitsky if (dbgctl != DEBUGCTLMSR_LBR) 2971537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2972537d39dfSMaxim Levitsky if (rdmsr(MSR_IA32_LASTBRANCHFROMIP) != (u64)&guest_branch2_from) 2973537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2974537d39dfSMaxim Levitsky if (rdmsr(MSR_IA32_LASTBRANCHTOIP) != (u64)&guest_branch2_to) 2975537d39dfSMaxim Levitsky asm volatile("ud2\n"); 2976537d39dfSMaxim Levitsky 2977537d39dfSMaxim Levitsky asm volatile ("vmmcall\n"); 2978537d39dfSMaxim Levitsky } 2979537d39dfSMaxim Levitsky 2980537d39dfSMaxim Levitsky static void svm_lbrv_test0(void) 2981537d39dfSMaxim Levitsky { 2982537d39dfSMaxim Levitsky report(true, "Basic LBR test"); 2983537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2984537d39dfSMaxim Levitsky DO_BRANCH(host_branch0); 2985537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2986537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2987537d39dfSMaxim Levitsky 2988537d39dfSMaxim Levitsky check_dbgctl(dbgctl, DEBUGCTLMSR_LBR); 2989537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2990537d39dfSMaxim Levitsky check_dbgctl(dbgctl, 0); 2991537d39dfSMaxim Levitsky 2992537d39dfSMaxim Levitsky check_lbr(&host_branch0_from, &host_branch0_to); 2993537d39dfSMaxim Levitsky } 2994537d39dfSMaxim Levitsky 2995537d39dfSMaxim Levitsky static void svm_lbrv_test1(void) 2996537d39dfSMaxim Levitsky { 2997537d39dfSMaxim Levitsky report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(1)"); 2998537d39dfSMaxim Levitsky 2999537d39dfSMaxim Levitsky vmcb->save.rip = (ulong)svm_lbrv_test_guest1; 3000537d39dfSMaxim Levitsky vmcb->control.virt_ext = 0; 3001537d39dfSMaxim Levitsky 3002537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3003537d39dfSMaxim Levitsky DO_BRANCH(host_branch1); 3004537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 3005537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3006537d39dfSMaxim Levitsky 3007537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 3008537d39dfSMaxim Levitsky report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 3009537d39dfSMaxim Levitsky vmcb->control.exit_code); 3010537d39dfSMaxim Levitsky return; 3011537d39dfSMaxim Levitsky } 3012537d39dfSMaxim Levitsky 3013537d39dfSMaxim Levitsky check_dbgctl(dbgctl, 0); 3014537d39dfSMaxim Levitsky check_lbr(&guest_branch0_from, &guest_branch0_to); 3015537d39dfSMaxim Levitsky } 3016537d39dfSMaxim Levitsky 3017537d39dfSMaxim Levitsky static void svm_lbrv_test2(void) 3018537d39dfSMaxim Levitsky { 3019537d39dfSMaxim Levitsky report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(2)"); 3020537d39dfSMaxim Levitsky 3021537d39dfSMaxim Levitsky vmcb->save.rip = (ulong)svm_lbrv_test_guest2; 3022537d39dfSMaxim Levitsky vmcb->control.virt_ext = 0; 3023537d39dfSMaxim Levitsky 3024537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3025537d39dfSMaxim Levitsky DO_BRANCH(host_branch2); 3026537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3027537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 3028537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3029537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3030537d39dfSMaxim Levitsky 3031537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 3032537d39dfSMaxim Levitsky report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 3033537d39dfSMaxim Levitsky vmcb->control.exit_code); 3034537d39dfSMaxim Levitsky return; 3035537d39dfSMaxim Levitsky } 3036537d39dfSMaxim Levitsky 3037537d39dfSMaxim Levitsky check_dbgctl(dbgctl, 0); 3038537d39dfSMaxim Levitsky check_lbr(&guest_branch2_from, &guest_branch2_to); 3039537d39dfSMaxim Levitsky } 3040537d39dfSMaxim Levitsky 3041537d39dfSMaxim Levitsky static void svm_lbrv_nested_test1(void) 3042537d39dfSMaxim Levitsky { 3043537d39dfSMaxim Levitsky if (!lbrv_supported()) { 3044537d39dfSMaxim Levitsky report_skip("LBRV not supported in the guest"); 3045537d39dfSMaxim Levitsky return; 3046537d39dfSMaxim Levitsky } 3047537d39dfSMaxim Levitsky 3048537d39dfSMaxim Levitsky report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (1)"); 3049537d39dfSMaxim Levitsky vmcb->save.rip = (ulong)svm_lbrv_test_guest1; 3050537d39dfSMaxim Levitsky vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK; 3051537d39dfSMaxim Levitsky vmcb->save.dbgctl = DEBUGCTLMSR_LBR; 3052537d39dfSMaxim Levitsky 3053537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3054537d39dfSMaxim Levitsky DO_BRANCH(host_branch3); 3055537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 3056537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3057537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3058537d39dfSMaxim Levitsky 3059537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 3060537d39dfSMaxim Levitsky report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 3061537d39dfSMaxim Levitsky vmcb->control.exit_code); 3062537d39dfSMaxim Levitsky return; 3063537d39dfSMaxim Levitsky } 3064537d39dfSMaxim Levitsky 3065537d39dfSMaxim Levitsky if (vmcb->save.dbgctl != 0) { 3066537d39dfSMaxim Levitsky report(false, "unexpected virtual guest MSR_IA32_DEBUGCTLMSR value 0x%lx", vmcb->save.dbgctl); 3067537d39dfSMaxim Levitsky return; 3068537d39dfSMaxim Levitsky } 3069537d39dfSMaxim Levitsky 3070537d39dfSMaxim Levitsky check_dbgctl(dbgctl, DEBUGCTLMSR_LBR); 3071537d39dfSMaxim Levitsky check_lbr(&host_branch3_from, &host_branch3_to); 3072537d39dfSMaxim Levitsky } 30733f27d772SManali Shukla 3074537d39dfSMaxim Levitsky static void svm_lbrv_nested_test2(void) 3075537d39dfSMaxim Levitsky { 3076537d39dfSMaxim Levitsky if (!lbrv_supported()) { 3077537d39dfSMaxim Levitsky report_skip("LBRV not supported in the guest"); 3078537d39dfSMaxim Levitsky return; 3079537d39dfSMaxim Levitsky } 3080537d39dfSMaxim Levitsky 3081537d39dfSMaxim Levitsky report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (2)"); 3082537d39dfSMaxim Levitsky vmcb->save.rip = (ulong)svm_lbrv_test_guest2; 3083537d39dfSMaxim Levitsky vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK; 3084537d39dfSMaxim Levitsky 3085537d39dfSMaxim Levitsky vmcb->save.dbgctl = 0; 3086537d39dfSMaxim Levitsky vmcb->save.br_from = (u64)&host_branch2_from; 3087537d39dfSMaxim Levitsky vmcb->save.br_to = (u64)&host_branch2_to; 3088537d39dfSMaxim Levitsky 3089537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3090537d39dfSMaxim Levitsky DO_BRANCH(host_branch4); 3091537d39dfSMaxim Levitsky SVM_BARE_VMRUN; 3092537d39dfSMaxim Levitsky dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3093537d39dfSMaxim Levitsky wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3094537d39dfSMaxim Levitsky 3095537d39dfSMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 3096537d39dfSMaxim Levitsky report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 3097537d39dfSMaxim Levitsky vmcb->control.exit_code); 3098537d39dfSMaxim Levitsky return; 3099537d39dfSMaxim Levitsky } 3100537d39dfSMaxim Levitsky 3101537d39dfSMaxim Levitsky check_dbgctl(dbgctl, DEBUGCTLMSR_LBR); 3102537d39dfSMaxim Levitsky check_lbr(&host_branch4_from, &host_branch4_to); 3103537d39dfSMaxim Levitsky } 3104537d39dfSMaxim Levitsky 3105c45bccfcSMaxim Levitsky 3106c45bccfcSMaxim Levitsky // test that a nested guest which does enable INTR interception 3107c45bccfcSMaxim Levitsky // but doesn't enable virtual interrupt masking works 3108c45bccfcSMaxim Levitsky 3109c45bccfcSMaxim Levitsky static volatile int dummy_isr_recevied; 3110c45bccfcSMaxim Levitsky static void dummy_isr(isr_regs_t *regs) 3111c45bccfcSMaxim Levitsky { 3112c45bccfcSMaxim Levitsky dummy_isr_recevied++; 3113c45bccfcSMaxim Levitsky eoi(); 3114c45bccfcSMaxim Levitsky } 3115c45bccfcSMaxim Levitsky 3116c45bccfcSMaxim Levitsky 3117c45bccfcSMaxim Levitsky static volatile int nmi_recevied; 3118c45bccfcSMaxim Levitsky static void dummy_nmi_handler(struct ex_regs *regs) 3119c45bccfcSMaxim Levitsky { 3120c45bccfcSMaxim Levitsky nmi_recevied++; 3121c45bccfcSMaxim Levitsky } 3122c45bccfcSMaxim Levitsky 3123c45bccfcSMaxim Levitsky 3124c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_run_guest(volatile int *counter, int expected_vmexit) 3125c45bccfcSMaxim Levitsky { 3126c45bccfcSMaxim Levitsky if (counter) 3127c45bccfcSMaxim Levitsky *counter = 0; 3128c45bccfcSMaxim Levitsky 3129c45bccfcSMaxim Levitsky sti(); // host IF value should not matter 3130c45bccfcSMaxim Levitsky clgi(); // vmrun will set back GI to 1 3131c45bccfcSMaxim Levitsky 3132c45bccfcSMaxim Levitsky svm_vmrun(); 3133c45bccfcSMaxim Levitsky 3134c45bccfcSMaxim Levitsky if (counter) 3135c45bccfcSMaxim Levitsky report(!*counter, "No interrupt expected"); 3136c45bccfcSMaxim Levitsky 3137c45bccfcSMaxim Levitsky stgi(); 3138c45bccfcSMaxim Levitsky 3139c45bccfcSMaxim Levitsky if (counter) 3140c45bccfcSMaxim Levitsky report(*counter == 1, "Interrupt is expected"); 3141c45bccfcSMaxim Levitsky 3142c45bccfcSMaxim Levitsky report (vmcb->control.exit_code == expected_vmexit, "Test expected VM exit"); 3143c45bccfcSMaxim Levitsky report(vmcb->save.rflags & X86_EFLAGS_IF, "Guest should have EFLAGS.IF set now"); 3144c45bccfcSMaxim Levitsky cli(); 3145c45bccfcSMaxim Levitsky } 3146c45bccfcSMaxim Levitsky 3147c45bccfcSMaxim Levitsky 3148c45bccfcSMaxim Levitsky // subtest: test that enabling EFLAGS.IF is enought to trigger an interrupt 3149c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_if_guest(struct svm_test *test) 3150c45bccfcSMaxim Levitsky { 3151c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3152c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3153c45bccfcSMaxim Levitsky sti(); 3154c45bccfcSMaxim Levitsky asm volatile("nop"); 3155c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3156c45bccfcSMaxim Levitsky } 3157c45bccfcSMaxim Levitsky 3158c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_if(void) 3159c45bccfcSMaxim Levitsky { 3160c45bccfcSMaxim Levitsky // make a physical interrupt to be pending 3161c45bccfcSMaxim Levitsky handle_irq(0x55, dummy_isr); 3162c45bccfcSMaxim Levitsky 3163c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3164c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3165c45bccfcSMaxim Levitsky vmcb->save.rflags &= ~X86_EFLAGS_IF; 3166c45bccfcSMaxim Levitsky 3167c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_if_guest); 3168c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3169c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3170c45bccfcSMaxim Levitsky } 3171c45bccfcSMaxim Levitsky 3172c45bccfcSMaxim Levitsky 3173c45bccfcSMaxim Levitsky // subtest: test that a clever guest can trigger an interrupt by setting GIF 3174c45bccfcSMaxim Levitsky // if GIF is not intercepted 3175c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif_guest(struct svm_test *test) 3176c45bccfcSMaxim Levitsky { 3177c45bccfcSMaxim Levitsky 3178c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3179c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3180c45bccfcSMaxim Levitsky 3181c45bccfcSMaxim Levitsky // clear GIF and enable IF 3182c45bccfcSMaxim Levitsky // that should still not cause VM exit 3183c45bccfcSMaxim Levitsky clgi(); 3184c45bccfcSMaxim Levitsky sti(); 3185c45bccfcSMaxim Levitsky asm volatile("nop"); 3186c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3187c45bccfcSMaxim Levitsky 3188c45bccfcSMaxim Levitsky stgi(); 3189c45bccfcSMaxim Levitsky asm volatile("nop"); 3190c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3191c45bccfcSMaxim Levitsky } 3192c45bccfcSMaxim Levitsky 3193c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif(void) 3194c45bccfcSMaxim Levitsky { 3195c45bccfcSMaxim Levitsky handle_irq(0x55, dummy_isr); 3196c45bccfcSMaxim Levitsky 3197c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3198c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3199c45bccfcSMaxim Levitsky vmcb->save.rflags &= ~X86_EFLAGS_IF; 3200c45bccfcSMaxim Levitsky 3201c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_gif_guest); 3202c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3203c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3204c45bccfcSMaxim Levitsky } 3205c45bccfcSMaxim Levitsky 3206c45bccfcSMaxim Levitsky // subtest: test that a clever guest can trigger an interrupt by setting GIF 3207c45bccfcSMaxim Levitsky // if GIF is not intercepted and interrupt comes after guest 3208c45bccfcSMaxim Levitsky // started running 3209c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif_guest2(struct svm_test *test) 3210c45bccfcSMaxim Levitsky { 3211c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3212c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3213c45bccfcSMaxim Levitsky 3214c45bccfcSMaxim Levitsky clgi(); 3215c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3216c45bccfcSMaxim Levitsky report(!dummy_isr_recevied, "No interrupt expected"); 3217c45bccfcSMaxim Levitsky 3218c45bccfcSMaxim Levitsky stgi(); 3219c45bccfcSMaxim Levitsky asm volatile("nop"); 3220c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3221c45bccfcSMaxim Levitsky } 3222c45bccfcSMaxim Levitsky 3223c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_gif2(void) 3224c45bccfcSMaxim Levitsky { 3225c45bccfcSMaxim Levitsky handle_irq(0x55, dummy_isr); 3226c45bccfcSMaxim Levitsky 3227c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3228c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3229c45bccfcSMaxim Levitsky vmcb->save.rflags |= X86_EFLAGS_IF; 3230c45bccfcSMaxim Levitsky 3231c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_gif_guest2); 3232c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3233c45bccfcSMaxim Levitsky } 3234c45bccfcSMaxim Levitsky 3235c45bccfcSMaxim Levitsky 3236c45bccfcSMaxim Levitsky // subtest: test that pending NMI will be handled when guest enables GIF 3237c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_nmi_guest(struct svm_test *test) 3238c45bccfcSMaxim Levitsky { 3239c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3240c45bccfcSMaxim Levitsky report(!nmi_recevied, "No NMI expected"); 3241c45bccfcSMaxim Levitsky cli(); // should have no effect 3242c45bccfcSMaxim Levitsky 3243c45bccfcSMaxim Levitsky clgi(); 3244c45bccfcSMaxim Levitsky asm volatile("nop"); 3245c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI, 0); 3246c45bccfcSMaxim Levitsky sti(); // should have no effect 3247c45bccfcSMaxim Levitsky asm volatile("nop"); 3248c45bccfcSMaxim Levitsky report(!nmi_recevied, "No NMI expected"); 3249c45bccfcSMaxim Levitsky 3250c45bccfcSMaxim Levitsky stgi(); 3251c45bccfcSMaxim Levitsky asm volatile("nop"); 3252c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3253c45bccfcSMaxim Levitsky } 3254c45bccfcSMaxim Levitsky 3255c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_nmi(void) 3256c45bccfcSMaxim Levitsky { 3257c45bccfcSMaxim Levitsky handle_exception(2, dummy_nmi_handler); 3258c45bccfcSMaxim Levitsky 3259c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_NMI); 3260c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3261c45bccfcSMaxim Levitsky vmcb->save.rflags |= X86_EFLAGS_IF; 3262c45bccfcSMaxim Levitsky 3263c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_nmi_guest); 3264c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(&nmi_recevied, SVM_EXIT_NMI); 3265c45bccfcSMaxim Levitsky } 3266c45bccfcSMaxim Levitsky 3267c45bccfcSMaxim Levitsky // test that pending SMI will be handled when guest enables GIF 3268c45bccfcSMaxim Levitsky // TODO: can't really count #SMIs so just test that guest doesn't hang 3269c45bccfcSMaxim Levitsky // and VMexits on SMI 3270c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_smi_guest(struct svm_test *test) 3271c45bccfcSMaxim Levitsky { 3272c45bccfcSMaxim Levitsky asm volatile("nop;nop;nop;nop"); 3273c45bccfcSMaxim Levitsky 3274c45bccfcSMaxim Levitsky clgi(); 3275c45bccfcSMaxim Levitsky asm volatile("nop"); 3276c45bccfcSMaxim Levitsky apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_SMI, 0); 3277c45bccfcSMaxim Levitsky sti(); // should have no effect 3278c45bccfcSMaxim Levitsky asm volatile("nop"); 3279c45bccfcSMaxim Levitsky stgi(); 3280c45bccfcSMaxim Levitsky asm volatile("nop"); 3281c45bccfcSMaxim Levitsky report(0, "must not reach here"); 3282c45bccfcSMaxim Levitsky } 3283c45bccfcSMaxim Levitsky 3284c45bccfcSMaxim Levitsky static void svm_intr_intercept_mix_smi(void) 3285c45bccfcSMaxim Levitsky { 3286c45bccfcSMaxim Levitsky vmcb->control.intercept |= (1 << INTERCEPT_SMI); 3287c45bccfcSMaxim Levitsky vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3288c45bccfcSMaxim Levitsky test_set_guest(svm_intr_intercept_mix_smi_guest); 3289c45bccfcSMaxim Levitsky svm_intr_intercept_mix_run_guest(NULL, SVM_EXIT_SMI); 3290c45bccfcSMaxim Levitsky } 3291c45bccfcSMaxim Levitsky 32923f27d772SManali Shukla struct svm_test svm_tests[] = { 3293ad879127SKrish Sadhukhan { "null", default_supported, default_prepare, 3294ad879127SKrish Sadhukhan default_prepare_gif_clear, null_test, 3295ad879127SKrish Sadhukhan default_finished, null_check }, 3296ad879127SKrish Sadhukhan { "vmrun", default_supported, default_prepare, 3297ad879127SKrish Sadhukhan default_prepare_gif_clear, test_vmrun, 3298ad879127SKrish Sadhukhan default_finished, check_vmrun }, 3299ad879127SKrish Sadhukhan { "ioio", default_supported, prepare_ioio, 3300ad879127SKrish Sadhukhan default_prepare_gif_clear, test_ioio, 3301ad879127SKrish Sadhukhan ioio_finished, check_ioio }, 3302ad879127SKrish Sadhukhan { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 3303ad879127SKrish Sadhukhan default_prepare_gif_clear, null_test, default_finished, 3304ad879127SKrish Sadhukhan check_no_vmrun_int }, 3305401299a5SPaolo Bonzini { "rsm", default_supported, 3306401299a5SPaolo Bonzini prepare_rsm_intercept, default_prepare_gif_clear, 3307401299a5SPaolo Bonzini test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept }, 3308ad879127SKrish Sadhukhan { "cr3 read intercept", default_supported, 3309ad879127SKrish Sadhukhan prepare_cr3_intercept, default_prepare_gif_clear, 3310ad879127SKrish Sadhukhan test_cr3_intercept, default_finished, check_cr3_intercept }, 3311ad879127SKrish Sadhukhan { "cr3 read nointercept", default_supported, default_prepare, 3312ad879127SKrish Sadhukhan default_prepare_gif_clear, test_cr3_intercept, default_finished, 3313ad879127SKrish Sadhukhan check_cr3_nointercept }, 3314ad879127SKrish Sadhukhan { "cr3 read intercept emulate", smp_supported, 3315ad879127SKrish Sadhukhan prepare_cr3_intercept_bypass, default_prepare_gif_clear, 3316ad879127SKrish Sadhukhan test_cr3_intercept_bypass, default_finished, check_cr3_intercept }, 3317ad879127SKrish Sadhukhan { "dr intercept check", default_supported, prepare_dr_intercept, 3318ad879127SKrish Sadhukhan default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished, 3319ad879127SKrish Sadhukhan check_dr_intercept }, 3320ad879127SKrish Sadhukhan { "next_rip", next_rip_supported, prepare_next_rip, 3321ad879127SKrish Sadhukhan default_prepare_gif_clear, test_next_rip, 3322ad879127SKrish Sadhukhan default_finished, check_next_rip }, 3323ad879127SKrish Sadhukhan { "msr intercept check", default_supported, prepare_msr_intercept, 3324ad879127SKrish Sadhukhan default_prepare_gif_clear, test_msr_intercept, 3325ad879127SKrish Sadhukhan msr_intercept_finished, check_msr_intercept }, 3326ad879127SKrish Sadhukhan { "mode_switch", default_supported, prepare_mode_switch, 3327ad879127SKrish Sadhukhan default_prepare_gif_clear, test_mode_switch, 3328ad879127SKrish Sadhukhan mode_switch_finished, check_mode_switch }, 3329ad879127SKrish Sadhukhan { "asid_zero", default_supported, prepare_asid_zero, 3330ad879127SKrish Sadhukhan default_prepare_gif_clear, test_asid_zero, 3331ad879127SKrish Sadhukhan default_finished, check_asid_zero }, 3332ad879127SKrish Sadhukhan { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, 3333ad879127SKrish Sadhukhan default_prepare_gif_clear, sel_cr0_bug_test, 3334ad879127SKrish Sadhukhan sel_cr0_bug_finished, sel_cr0_bug_check }, 333510a65fc4SNadav Amit { "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare, 3336ad879127SKrish Sadhukhan default_prepare_gif_clear, tsc_adjust_test, 3337ad879127SKrish Sadhukhan default_finished, tsc_adjust_check }, 3338ad879127SKrish Sadhukhan { "latency_run_exit", default_supported, latency_prepare, 3339ad879127SKrish Sadhukhan default_prepare_gif_clear, latency_test, 3340ad879127SKrish Sadhukhan latency_finished, latency_check }, 3341f7fa53dcSPaolo Bonzini { "latency_run_exit_clean", default_supported, latency_prepare, 3342f7fa53dcSPaolo Bonzini default_prepare_gif_clear, latency_test, 3343f7fa53dcSPaolo Bonzini latency_finished_clean, latency_check }, 3344ad879127SKrish Sadhukhan { "latency_svm_insn", default_supported, lat_svm_insn_prepare, 3345ad879127SKrish Sadhukhan default_prepare_gif_clear, null_test, 3346ad879127SKrish Sadhukhan lat_svm_insn_finished, lat_svm_insn_check }, 33474b4fb247SPaolo Bonzini { "exc_inject", default_supported, exc_inject_prepare, 33484b4fb247SPaolo Bonzini default_prepare_gif_clear, exc_inject_test, 33494b4fb247SPaolo Bonzini exc_inject_finished, exc_inject_check }, 3350ad879127SKrish Sadhukhan { "pending_event", default_supported, pending_event_prepare, 3351ad879127SKrish Sadhukhan default_prepare_gif_clear, 3352ad879127SKrish Sadhukhan pending_event_test, pending_event_finished, pending_event_check }, 335385dc2aceSPaolo Bonzini { "pending_event_cli", default_supported, pending_event_cli_prepare, 335485dc2aceSPaolo Bonzini pending_event_cli_prepare_gif_clear, 335585dc2aceSPaolo Bonzini pending_event_cli_test, pending_event_cli_finished, 335685dc2aceSPaolo Bonzini pending_event_cli_check }, 335785dc2aceSPaolo Bonzini { "interrupt", default_supported, interrupt_prepare, 335885dc2aceSPaolo Bonzini default_prepare_gif_clear, interrupt_test, 335985dc2aceSPaolo Bonzini interrupt_finished, interrupt_check }, 3360d4db486bSCathy Avery { "nmi", default_supported, nmi_prepare, 3361d4db486bSCathy Avery default_prepare_gif_clear, nmi_test, 3362d4db486bSCathy Avery nmi_finished, nmi_check }, 33639da1f4d8SCathy Avery { "nmi_hlt", smp_supported, nmi_prepare, 33649da1f4d8SCathy Avery default_prepare_gif_clear, nmi_hlt_test, 33659da1f4d8SCathy Avery nmi_hlt_finished, nmi_hlt_check }, 33669c838954SCathy Avery { "virq_inject", default_supported, virq_inject_prepare, 33679c838954SCathy Avery default_prepare_gif_clear, virq_inject_test, 33689c838954SCathy Avery virq_inject_finished, virq_inject_check }, 3369da338a31SMaxim Levitsky { "reg_corruption", default_supported, reg_corruption_prepare, 3370da338a31SMaxim Levitsky default_prepare_gif_clear, reg_corruption_test, 3371da338a31SMaxim Levitsky reg_corruption_finished, reg_corruption_check }, 33724770e9c8SCathy Avery { "svm_init_startup_test", smp_supported, init_startup_prepare, 33734770e9c8SCathy Avery default_prepare_gif_clear, null_test, 33744770e9c8SCathy Avery init_startup_finished, init_startup_check }, 3375d5da6dfeSCathy Avery { "svm_init_intercept_test", smp_supported, init_intercept_prepare, 3376d5da6dfeSCathy Avery default_prepare_gif_clear, init_intercept_test, 3377d5da6dfeSCathy Avery init_intercept_finished, init_intercept_check, .on_vcpu = 2 }, 33787839b0ecSKrish Sadhukhan { "host_rflags", default_supported, host_rflags_prepare, 33797839b0ecSKrish Sadhukhan host_rflags_prepare_gif_clear, host_rflags_test, 33807839b0ecSKrish Sadhukhan host_rflags_finished, host_rflags_check }, 3381f6972bd6SLara Lazier { "vgif", vgif_supported, prepare_vgif_enabled, 3382f6972bd6SLara Lazier default_prepare_gif_clear, test_vgif, vgif_finished, 3383f6972bd6SLara Lazier vgif_check }, 3384f32183f5SJim Mattson TEST(svm_cr4_osxsave_test), 3385ba29942cSKrish Sadhukhan TEST(svm_guest_state_test), 33867a57ef5dSMaxim Levitsky TEST(svm_vmrun_errata_test), 33870b6f6cedSKrish Sadhukhan TEST(svm_vmload_vmsave), 3388665f5677SKrish Sadhukhan TEST(svm_test_singlestep), 33895c92f156SManali Shukla TEST(svm_nm_test), 3390c8e16d20SManali Shukla TEST(svm_int3_test), 3391af13008dSManali Shukla TEST(svm_into_test), 3392537d39dfSMaxim Levitsky TEST(svm_lbrv_test0), 3393537d39dfSMaxim Levitsky TEST(svm_lbrv_test1), 3394537d39dfSMaxim Levitsky TEST(svm_lbrv_test2), 3395537d39dfSMaxim Levitsky TEST(svm_lbrv_nested_test1), 3396537d39dfSMaxim Levitsky TEST(svm_lbrv_nested_test2), 3397c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_if), 3398c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_gif), 3399c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_gif2), 3400c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_nmi), 3401c45bccfcSMaxim Levitsky TEST(svm_intr_intercept_mix_smi), 3402a8503d50SMaxim Levitsky TEST(svm_tsc_scale_test), 34038650dffeSMaxim Levitsky TEST(pause_filter_test), 3404ad879127SKrish Sadhukhan { NULL, NULL, NULL, NULL, NULL, NULL, NULL } 3405ad879127SKrish Sadhukhan }; 3406712840d5SManali Shukla 3407712840d5SManali Shukla int main(int ac, char **av) 3408712840d5SManali Shukla { 3409ade7601dSSean Christopherson setup_vm(); 3410712840d5SManali Shukla return run_svm_tests(ac, av, svm_tests); 3411712840d5SManali Shukla } 3412