Lines Matching +full:0 +full:x80
53 __asm__ __volatile__("bsf %1, %%eax; cmovnz %%eax, %0" in ffs()
93 "mov %%rsp, %0\n\t" in vmenter_main()
99 : "g"(0xABCD)); in vmenter_main()
100 report((rax == 0xFFFF) && (rsp == resume_rsp), "test vmresume"); in vmenter_main()
109 if (regs.rax != 0xABCD) { in vmenter_exit_handler()
113 regs.rax = 0xFFFF; in vmenter_exit_handler()
137 preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F; in preemption_timer_init()
149 vmx_set_test_stage(0); in preemption_timer_main()
203 "preemption timer with 0 value"); in preemption_timer_exit_handler()
214 case 0: in preemption_timer_exit_handler()
239 vmcs_write(PREEMPT_TIMER_VALUE, 0); in preemption_timer_exit_handler()
243 report_fail("preemption timer with 0 value (vmcall stage 5)"); in preemption_timer_exit_handler()
254 report_fail("Unknown exit reason, 0x%x", exit_reason.full); in preemption_timer_exit_handler()
281 memset(msr_bitmap, 0xff, PAGE_SIZE); in get_msr_bitmap()
295 msr < (APIC_BASE_MSR+0xff); in disable_intercept_for_x2apic_msrs()
299 msr_bitmap[word] = 0; in disable_intercept_for_x2apic_msrs()
300 msr_bitmap[word + (0x800 / sizeof(long))] = 0; in disable_intercept_for_x2apic_msrs()
324 vmcs_write(GUEST_PAT, 0x0); in test_ctrl_pat_init()
337 if (guest_ia32_pat != 0) { in test_ctrl_pat_main()
342 wrmsr(MSR_IA32_CR_PAT, 0x6); in test_ctrl_pat_main()
360 vmcs_write(GUEST_PAT, 0x6); in test_ctrl_pat_exit_handler()
362 report(guest_pat == 0x6, "Exit save PAT"); in test_ctrl_pat_exit_handler()
373 printf("ERROR : Unknown exit reason, 0x%x.\n", exit_reason.full); in test_ctrl_pat_exit_handler()
441 printf("ERROR : Unknown exit reason, 0x%x.\n", exit_reason.full); in test_ctrl_efer_exit_handler()
454 vmx_set_test_stage(0); in cr_shadowing_main()
507 asm volatile("mov %0, %%rsi\n\t" in cr_shadowing_main()
515 asm volatile("mov %0, %%rsi\n\t" in cr_shadowing_main()
523 asm volatile("mov %0, %%rsi\n\t" in cr_shadowing_main()
531 asm volatile("mov %0, %%rsi\n\t" in cr_shadowing_main()
551 case 0: in cr_shadowing_exit_handler()
612 // 0x600 encodes "mov %esi, %cr0" in cr_shadowing_exit_handler()
613 if (exit_qual == 0x600) in cr_shadowing_exit_handler()
618 // 0x604 encodes "mov %esi, %cr4" in cr_shadowing_exit_handler()
619 if (exit_qual == 0x604) in cr_shadowing_exit_handler()
632 report_fail("Unknown exit reason, 0x%x", exit_reason.full); in cr_shadowing_exit_handler()
655 // stage 0, test IO pass in iobmp_main()
656 vmx_set_test_stage(0); in iobmp_main()
657 inb(0x5000); in iobmp_main()
658 outb(0x0, 0x5000); in iobmp_main()
659 report(vmx_get_test_stage() == 0, "I/O bitmap - I/O pass"); in iobmp_main()
661 ((u8 *)io_bitmap_a)[0] = 0xFF; in iobmp_main()
663 inb(0x0); in iobmp_main()
666 outw(0x0, 0x0); in iobmp_main()
669 inl(0x0); in iobmp_main()
673 ((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8)); in iobmp_main()
674 inb(0x5000); in iobmp_main()
677 ((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8)); in iobmp_main()
678 inb(0x9000); in iobmp_main()
682 inl(0x4FFF); in iobmp_main()
686 memset(io_bitmap_a, 0x0, PAGE_SIZE); in iobmp_main()
687 memset(io_bitmap_b, 0x0, PAGE_SIZE); in iobmp_main()
688 inl(0xFFFF); in iobmp_main()
692 outb(0x0, 0x0); in iobmp_main()
697 outb(0x0, 0x0); in iobmp_main()
714 case 0: in iobmp_exit_handler()
738 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000) in iobmp_exit_handler()
742 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000) in iobmp_exit_handler()
746 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF) in iobmp_exit_handler()
750 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF) in iobmp_exit_handler()
791 printf("\tERROR : Unknown exit reason, 0x%x\n", exit_reason.full); in iobmp_exit_handler()
797 #define INSN_CPU0 0
801 #define FIELD_EXIT_QUAL (1 << 0)
806 "insn_invlpg: invlpg 0x12345678;ret\n\t"
884 * tested in exit handler. If set to 0, only "reason" is checked.
888 {"HLT", CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
890 0x12345678, 0, FIELD_EXIT_QUAL},
891 {"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0, this_cpu_has_mwait},
892 {"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0, this_cpu_has_pmu},
893 {"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
894 {"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0,
896 {"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0,
898 {"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0,
900 {"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0,
902 {"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0, this_cpu_has_mwait},
903 {"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
905 {"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
906 {"DESC_TABLE (SGDT)", CPU_DESC_TABLE, insn_sgdt, INSN_CPU1, 46, 0, 0, 0},
907 {"DESC_TABLE (LGDT)", CPU_DESC_TABLE, insn_lgdt, INSN_CPU1, 46, 0, 0, 0},
908 {"DESC_TABLE (SIDT)", CPU_DESC_TABLE, insn_sidt, INSN_CPU1, 46, 0, 0, 0},
909 {"DESC_TABLE (LIDT)", CPU_DESC_TABLE, insn_lidt, INSN_CPU1, 46, 0, 0, 0},
910 {"DESC_TABLE (SLDT)", CPU_DESC_TABLE, insn_sldt, INSN_CPU1, 47, 0, 0, 0},
911 {"DESC_TABLE (LLDT)", CPU_DESC_TABLE, insn_lldt, INSN_CPU1, 47, 0, 0, 0},
912 {"DESC_TABLE (STR)", CPU_DESC_TABLE, insn_str, INSN_CPU1, 47, 0, 0, 0},
914 {"RDRAND", CPU_RDRAND, insn_rdrand, INSN_CPU1, VMX_RDRAND, 0, 0, 0},
915 {"RDSEED", CPU_RDSEED, insn_rdseed, INSN_CPU1, VMX_RDSEED, 0, 0, 0},
917 {"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
918 {"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
927 ctrl_cpu = ctrl_cpu_rev[0].set | CPU_SECONDARY; in insn_intercept_init()
928 ctrl_cpu &= ctrl_cpu_rev[0].clr; in insn_intercept_init()
933 for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) { in insn_intercept_init()
943 for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) { in insn_intercept_main()
946 !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag)) || in insn_intercept_main()
962 !(ctrl_cpu_rev[0].set & insn_table[cur_insn].flag)) || in insn_intercept_main()
1002 u32 val = 0; in insn_intercept_exit_handler()
1015 vmcs_write(CPU_EXEC_CTRL0, val | ctrl_cpu_rev[0].set); in insn_intercept_exit_handler()
1039 * Returns 0 on success, 1 on failure.
1042 * expected to fail, e.g. setup_dummy_ept() arbitrarily passes '0' to satisfy
1043 * the various EPTP consistency checks, but doesn't ensure backing for HPA '0'.
1047 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || in __setup_ept()
1074 return 0; in __setup_ept()
1081 * Returns 0 on success, 1 on failure.
1101 setup_ept_range(pml4, 0, end_of_memory, 0, in setup_ept()
1104 return 0; in setup_ept()
1117 if (__setup_ept(0, false)) in setup_dummy_ept()
1123 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || in enable_unrestricted_guest()
1136 return 0; in enable_unrestricted_guest()
1202 vmx_set_test_stage(0); in ept_common()
1258 report(*((u32 *)0xfee00030UL) == apic_version, "EPT - MMIO access"); in ept_main()
1294 case 0: in pml_exit_handler()
1322 report_fail("Unknown exit reason, 0x%x", exit_reason.full); in pml_exit_handler()
1345 case 0: in ept_exit_handler_common()
1348 have_ad ? EPT_ACCESS_FLAG : 0, in ept_exit_handler_common()
1349 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); in ept_exit_handler_common()
1352 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0, in ept_exit_handler_common()
1353 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); in ept_exit_handler_common()
1398 (unsigned long)pci_physaddr, 0); in ept_exit_handler_common()
1402 if (!invept_test(0, eptp)) in ept_exit_handler_common()
1442 check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0, in ept_exit_handler_common()
1443 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); in ept_exit_handler_common()
1453 check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0, in ept_exit_handler_common()
1454 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); in ept_exit_handler_common()
1457 (have_ad ? EPT_VLT_WR : 0) | in ept_exit_handler_common()
1489 report_fail("Unknown exit reason, 0x%x", exit_reason.full); in ept_exit_handler_common()
1523 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || in pml_init()
1541 int count = 0; in pml_main()
1543 vmx_set_test_stage(0); in pml_main()
1544 *((u32 *)data_page2) = 0x1; in pml_main()
1550 *((u32 *)data_page2) = 0x1; in pml_main()
1574 apic_write(APIC_EOI, 0); in timer_isr()
1589 vmx_set_test_stage(0); in interrupt_main()
1595 for (loops = 0; loops < 10000000 && !timer_fired; loops++) in interrupt_main()
1599 apic_write(APIC_TMICT, 0); in interrupt_main()
1604 for (loops = 0; loops < 10000000 && !timer_fired; loops++) in interrupt_main()
1609 apic_write(APIC_TMICT, 0); in interrupt_main()
1621 apic_write(APIC_TMICT, 0); in interrupt_main()
1633 apic_write(APIC_TMICT, 0); in interrupt_main()
1646 apic_write(APIC_TMICT, 0); in interrupt_main()
1659 apic_write(APIC_TMICT, 0); in interrupt_main()
1665 for (loops = 0; loops < 10000000 && !timer_fired; loops++) in interrupt_main()
1670 apic_write(APIC_TMICT, 0); in interrupt_main()
1685 case 0: in interrupt_exit_handler()
1719 int vector = vmcs_read(EXI_INTR_INFO) & 0xff; in interrupt_exit_handler()
1728 report_fail("Unknown exit reason, 0x%x", exit_reason.full); in interrupt_exit_handler()
1762 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); in nmi_message_thread()
1768 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); in nmi_message_thread()
1781 vmx_set_test_stage(0); in nmi_hlt_main()
1815 report_fail("VMEXIT not due to vmcall. Exit reason 0x%x", in nmi_hlt_exit_handler()
1830 report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x", in nmi_hlt_exit_handler()
1855 u64 dr7 = 0x402; in dbgctls_init()
1856 u64 zero = 0; in dbgctls_init()
1860 "mov %0,%%dr0\n\t" in dbgctls_init()
1861 "mov %0,%%dr1\n\t" in dbgctls_init()
1862 "mov %0,%%dr2\n\t" in dbgctls_init()
1865 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1); in dbgctls_init()
1866 vmcs_write(GUEST_DR7, 0x404); in dbgctls_init()
1867 vmcs_write(GUEST_DEBUGCTL, 0x2); in dbgctls_init()
1879 asm volatile("mov %%dr7,%0" : "=r" (dr7)); in dbgctls_main()
1883 report(dr7 == 0x404, "Load debug controls" /* && debugctl == 0x2 */); in dbgctls_main()
1885 dr7 = 0x408; in dbgctls_main()
1886 asm volatile("mov %0,%%dr7" : : "r" (dr7)); in dbgctls_main()
1887 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3); in dbgctls_main()
1889 vmx_set_test_stage(0); in dbgctls_main()
1901 asm volatile("mov %%dr7,%0" : "=r" (dr7)); in dbgctls_main()
1905 report(dr7 == 0x402, in dbgctls_main()
1906 "Guest=host debug controls" /* && debugctl == 0x1 */); in dbgctls_main()
1908 dr7 = 0x408; in dbgctls_main()
1909 asm volatile("mov %0,%%dr7" : : "r" (dr7)); in dbgctls_main()
1910 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3); in dbgctls_main()
1923 asm volatile("mov %%dr7,%0" : "=r" (dr7)); in dbgctls_exit_handler()
1929 case 0: in dbgctls_exit_handler()
1930 if (dr7 == 0x400 && debugctl == 0 && in dbgctls_exit_handler()
1931 vmcs_read(GUEST_DR7) == 0x408 /* && in dbgctls_exit_handler()
1933 vmcs_read(GUEST_DEBUGCTL) == 0x3 */) in dbgctls_exit_handler()
1937 dr7 = 0x402; in dbgctls_exit_handler()
1938 asm volatile("mov %0,%%dr7" : : "r" (dr7)); in dbgctls_exit_handler()
1939 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1); in dbgctls_exit_handler()
1940 vmcs_write(GUEST_DR7, 0x404); in dbgctls_exit_handler()
1941 vmcs_write(GUEST_DEBUGCTL, 0x2); in dbgctls_exit_handler()
1949 if (dr7 == 0x400 && debugctl == 0 && in dbgctls_exit_handler()
1950 vmcs_read(GUEST_DR7) == 0x404 /* && in dbgctls_exit_handler()
1952 vmcs_read(GUEST_DEBUGCTL) == 0x2 */) in dbgctls_exit_handler()
1971 #define MSR_MAGIC 0x31415926
1980 entry_msr_load[0].index = MSR_KERNEL_GS_BASE; in msr_switch_init()
1981 entry_msr_load[0].value = MSR_MAGIC; in msr_switch_init()
2000 exit_msr_store[0].index = MSR_KERNEL_GS_BASE; in msr_switch_main()
2001 exit_msr_load[0].index = MSR_KERNEL_GS_BASE; in msr_switch_main()
2002 exit_msr_load[0].value = MSR_MAGIC + 2; in msr_switch_main()
2010 report(exit_msr_store[0].value == MSR_MAGIC + 1, in msr_switch_exit_handler()
2015 entry_msr_load[0].index = MSR_FS_BASE; in msr_switch_exit_handler()
2018 printf("ERROR %s: unexpected stage=%u or reason=0x%x\n", in msr_switch_exit_handler()
2051 "mov $0xABCD, %%rax\n\t" in vmmcall_main()
2066 report((vmcs_read(EXI_INTR_INFO) & 0xff) == UD_VECTOR, in vmmcall_exit_handler()
2070 report_fail("Unknown exit reason, 0x%x", exit_reason.full); in vmmcall_exit_handler()
2081 if (ctrl_cpu_rev[0].clr & CPU_SECONDARY) { in disable_rdtscp_init()
2093 case 0: in disable_rdtscp_ud_handler()
2113 vmx_set_test_stage(0); in disable_rdtscp_main()
2116 asm volatile(".byte 0xf3, 0x0f, 0xc7, 0xf8" : : : "eax"); in disable_rdtscp_main()
2118 handle_exception(UD_VECTOR, 0); in disable_rdtscp_main()
2127 case 0: in disable_rdtscp_exit_handler()
2142 report_fail("Unknown exit reason, 0x%x", exit_reason.full); in disable_rdtscp_exit_handler()
2150 printf("Calling exit(0) from l2...\n"); in exit_monitor_from_l2_main()
2151 exit(0); in exit_monitor_from_l2_main()
2288 } while (0) in diagnose_ept_violation_qual()
2369 #if 0 in do_ept_violation()
2487 TEST_ASSERT_EQ(gpa & ~PAGE_MASK, 0); in ept_access_paddr()
2494 orig_epte = ept_twiddle(gpa, /*mkhuge=*/0, /*level=*/1, in ept_access_paddr()
2505 for (i = EPT_PAGE_LEVEL; i > 0; i--) { in ept_access_paddr()
2511 TEST_ASSERT_EQ(epte & EPT_DIRTY_FLAG, 0); in ept_access_paddr()
2580 ept_allowed(0, 1ul << bit, OP_READ); in ept_ignored_bit()
2581 ept_allowed(0, 1ul << bit, OP_WRITE); in ept_ignored_bit()
2582 ept_allowed(0, 1ul << bit, OP_EXEC); in ept_ignored_bit()
2585 ept_allowed(1ul << bit, 0, OP_READ); in ept_ignored_bit()
2586 ept_allowed(1ul << bit, 0, OP_WRITE); in ept_ignored_bit()
2587 ept_allowed(1ul << bit, 0, OP_EXEC); in ept_ignored_bit()
2610 #if 0 in ept_misconfig_at_level_mkhuge_op()
2612 TEST_EXPECT_EQ_MSG(vmcs_read(EXI_QUALIFICATION), 0); in ept_misconfig_at_level_mkhuge_op()
2614 #if 0 in ept_misconfig_at_level_mkhuge_op()
2667 ept_misconfig_at_level_mkhuge(false, level, 0, 1ul << bit); in ept_reserved_bit_at_level_nohuge()
2677 ept_misconfig_at_level_mkhuge(true, level, 0, 1ul << bit); in ept_reserved_bit_at_level_huge()
2687 ept_misconfig_at_level(level, 0, 1ul << bit); in ept_reserved_bit_at_level()
2782 TEST_ASSERT(get_ept_pte(pml4, data->gpa, 4, &pte) && pte == 0); in ept_access_test_setup()
2783 TEST_ASSERT(get_ept_pte(pml4, data->gpa + size - 1, 4, &pte) && pte == 0); in ept_access_test_setup()
2786 data->hva[0] = MAGIC_VAL_1; in ept_access_test_setup()
2794 ept_access_violation(0, OP_READ, EPT_VLT_RD); in ept_access_test_not_present()
2795 ept_access_violation(0, OP_WRITE, EPT_VLT_WR); in ept_access_test_not_present()
2796 ept_access_violation(0, OP_EXEC, EPT_VLT_FETCH); in ept_access_test_not_present()
2918 * control is 0. in ept_access_test_ignored_bits()
2943 ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, EPT_VLT_RD); in ept_access_test_paddr_not_present_ad_disabled()
2944 ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, EPT_VLT_RD); in ept_access_test_paddr_not_present_ad_disabled()
2945 ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, EPT_VLT_RD); in ept_access_test_paddr_not_present_ad_disabled()
2955 ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, qual); in ept_access_test_paddr_not_present_ad_enabled()
2956 ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, qual); in ept_access_test_paddr_not_present_ad_enabled()
2957 ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, qual); in ept_access_test_paddr_not_present_ad_enabled()
2974 ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual); in ept_access_test_paddr_read_only_ad_disabled()
2975 ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual); in ept_access_test_paddr_read_only_ad_disabled()
2976 ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual); in ept_access_test_paddr_read_only_ad_disabled()
2999 ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual); in ept_access_test_paddr_read_only_ad_enabled()
3000 ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual); in ept_access_test_paddr_read_only_ad_enabled()
3001 ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual); in ept_access_test_paddr_read_only_ad_enabled()
3014 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_READ); in ept_access_test_paddr_read_write()
3015 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_WRITE); in ept_access_test_paddr_read_write()
3016 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_EXEC); in ept_access_test_paddr_read_write()
3023 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_READ); in ept_access_test_paddr_read_write_execute()
3024 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_WRITE); in ept_access_test_paddr_read_write_execute()
3025 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_EXEC); in ept_access_test_paddr_read_write_execute()
3042 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual); in ept_access_test_paddr_read_execute_ad_disabled()
3043 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual); in ept_access_test_paddr_read_execute_ad_disabled()
3044 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual); in ept_access_test_paddr_read_execute_ad_disabled()
3067 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual); in ept_access_test_paddr_read_execute_ad_enabled()
3068 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual); in ept_access_test_paddr_read_execute_ad_enabled()
3069 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual); in ept_access_test_paddr_read_execute_ad_enabled()
3094 ept_allowed_at_level_mkhuge(true, 2, 0, 0, OP_READ); in ept_access_test_force_2m_page()
3128 vmcs_read(~0); in try_invvpid()
3149 asm volatile("invvpid %0, %1" in ds_invvpid()
3162 asm volatile("sub %%rsp,%0; invvpid (%%rsp,%0,1), %1" in ss_invvpid()
3202 asm volatile ("mov %%rsp, %0" : "=r"(rsp)); in try_compat_invvpid()
3208 asm goto ("lcall *%0" : : "m" (fp) : "rax" : invvpid); in try_compat_invvpid()
3260 operand->vpid = 0xffff; in invvpid_test_lam()
3271 try_invvpid(INVVPID_ADDR, 0xffff, (u64)operand); in invvpid_test_lam()
3278 * or CPL > 0.
3283 unsigned types = 0; in invvpid_test()
3305 try_invvpid(i, 0xffff, 0); in invvpid_test()
3310 for (i = 0; i < 64; i++) in invvpid_test()
3311 for (type = 0; type < 4; type++) in invvpid_test()
3313 try_invvpid(type, 1ul << i, 0); in invvpid_test()
3318 for (type = 0; type < 4; type++) in invvpid_test()
3320 try_invvpid(type, 0, 0); in invvpid_test()
3326 try_invvpid(INVVPID_ADDR, 0xffff, NONCANONICAL); in invvpid_test()
3378 vmcs_write(~0u, 0); in vmlaunch()
3381 __asm__ __volatile__ goto ("vmwrite %%rsp, %0; vmlaunch" in vmlaunch()
3435 test_vmx_vmlaunch(0); in test_vmx_valid_controls()
3487 test_rsvd_ctl_bit_value(name, msr, encoding, bit, 0); in test_rsvd_ctl_bit()
3503 for (bit = 0; bit < 32; bit++) in test_pin_based_ctls()
3519 "MSR_IA32_VMX_PROCBASED_CTLS", ctrl_cpu_rev[0].val); in test_primary_processor_based_ctls()
3520 for (bit = 0; bit < 32; bit++) in test_primary_processor_based_ctls()
3522 ctrl_cpu_rev[0], CPU_EXEC_CTRL0, bit); in test_primary_processor_based_ctls()
3532 * VM-execution control is 0 (or if the processor does not support the
3543 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) in test_secondary_processor_based_ctls()
3551 for (bit = 0; bit < 32; bit++) in test_secondary_processor_based_ctls()
3560 vmcs_write(CPU_EXEC_CTRL1, ~0); in test_secondary_processor_based_ctls()
3569 report_prefix_pushf("CR3 target count 0x%x", i); in try_cr3_target_count()
3587 unsigned supported_targets = (rdmsr(MSR_IA32_VMX_MISC) >> 16) & 0x1ff; in test_cr3_targets()
3595 try_cr3_target_count(0x80000000, supported_targets); in test_cr3_targets()
3596 try_cr3_target_count(0x7fffffff, supported_targets); in test_cr3_targets()
3597 for (i = 0; i <= supported_targets + 1; i++) in test_cr3_targets()
3603 TEST_ASSERT(vmcs_write(CR3_TARGET_0 + i*2, 0)); in test_cr3_targets()
3675 if (!(ctrl_cpu_rev[0].clr & control_bit)) in test_vmcs_addr_reference()
3693 skip_beyond_mapped_ram, 0, 63); in test_vmcs_addr_reference()
3704 test_vmcs_addr_values(field_name, field, align, true, false, 0, 63); in test_vmcs_addr_reference()
3713 * If the "use I/O bitmaps" VM-execution control is 1, bits 11:0 of
3714 * each I/O-bitmap address must be 0. Neither address should set any
3729 * If the "use MSR bitmaps" VM-execution control is 1, bits 11:0 of
3730 * the MSR-bitmap address must be 0. The address should not set any
3744 * - Bits 11:0 of the address must be 0.
3767 * - Bits 11:0 of the address must be 0.
3794 for (i = 0; i < ARRAY_SIZE(test_bits); i++) { in set_bit_pattern()
3806 * If the "use TPR shadow" VM-execution control is 0, the following
3807 * VM-execution controls must also be 0:
3814 * "virtualize APIC accesses" VM-execution control must be 0.
3825 u8 i = 0, j; in test_apic_virtual_ctls()
3830 if (!((ctrl_cpu_rev[0].clr & (CPU_SECONDARY | CPU_TPR_SHADOW)) == in test_apic_virtual_ctls()
3953 …report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-descriptor-address 0x%lx"… in test_pi_desc_addr()
3968 * - range 0 - 255 (bits 15:8 are all 0).
3969 * - Bits 5:0 of the posted-interrupt descriptor address are all 0.
4038 for (i = 0; i < 8; i++) { in test_posted_intr()
4053 vec &= ~(0xff << 8); in test_posted_intr()
4062 for (i = 0; i < 6; i++) { in test_posted_intr()
4066 test_pi_desc_addr(0xf0, false); in test_posted_intr()
4067 test_pi_desc_addr(0xff, false); in test_posted_intr()
4068 test_pi_desc_addr(0x0f, false); in test_posted_intr()
4069 test_pi_desc_addr(0x8000, true); in test_posted_intr()
4070 test_pi_desc_addr(0x00, true); in test_posted_intr()
4071 test_pi_desc_addr(0xc000, true); in test_posted_intr()
4075 false, false, 0, 63); in test_posted_intr()
4100 u16 vpid = 0x0000; in test_vpid()
4120 for (i = 0; i < 16; i++) { in test_vpid()
4146 valid = (threshold & 0xf) <= ((vtpr >> 4) & 0xf); in try_tpr_threshold_and_vtpr()
4149 report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0x%x", in try_tpr_threshold_and_vtpr()
4150 threshold, (vtpr >> 4) & 0xf); in try_tpr_threshold_and_vtpr()
4172 vmcs_write(ENT_INTR_ERROR, 0x00000000); in test_invalid_event_injection()
4173 vmcs_write(ENT_INST_LEN, 0x00000001); in test_invalid_event_injection()
4177 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4186 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4193 /* If the interruption type is other event, the vector is 0. */ in test_invalid_event_injection()
4195 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4196 "(OTHER EVENT && vector != 0) invalid [-]", in test_invalid_event_injection()
4204 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4212 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4222 ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION | 0x20; in test_invalid_event_injection()
4223 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4232 * (a) the "unrestricted guest" VM-execution control is 0 in test_invalid_event_injection()
4237 assert(!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || in test_invalid_event_injection()
4242 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4255 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4268 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4278 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4297 for (cnt = 0; cnt < 8; cnt++) { in test_invalid_event_injection()
4301 INTR_INFO_DELIVER_CODE_MASK : 0; in test_invalid_event_injection()
4305 report_prefix_pushf("VM-entry intr info=0x%x [-]", in test_invalid_event_injection()
4322 for (cnt = 0; cnt < 32; cnt++) { in test_invalid_event_injection()
4342 0 : in test_invalid_event_injection()
4346 report_prefix_pushf("VM-entry intr info=0x%x [-]", in test_invalid_event_injection()
4358 0; in test_invalid_event_injection()
4361 report_prefix_pushf("VM-entry intr info=0x%x [+]", in test_invalid_event_injection()
4369 /* Reserved bits in the field (30:12) are 0. */ in test_invalid_event_injection()
4376 report_prefix_pushf("VM-entry intr info=0x%x [-]", in test_invalid_event_injection()
4386 * bits 31:16 of the VM-entry exception error-code field are 0. in test_invalid_event_injection()
4390 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4396 report_prefix_pushf("VM-entry intr error=0x%x [-]", in test_invalid_event_injection()
4402 vmcs_write(ENT_INTR_ERROR, 0x00000000); in test_invalid_event_injection()
4408 * field is in the range 0 - 15. in test_invalid_event_injection()
4411 for (cnt = 0; cnt < 3; cnt++) { in test_invalid_event_injection()
4413 case 0: in test_invalid_event_injection()
4426 report_prefix_pushf("%s, VM-entry intr info=0x%x", in test_invalid_event_injection()
4431 /* Instruction length set to -1 (0xFFFFFFFF) should fail */ in test_invalid_event_injection()
4433 report_prefix_pushf("VM-entry intr length = 0x%x [-]", in test_invalid_event_injection()
4440 ent_intr_len = 0x00000010; in test_invalid_event_injection()
4441 report_prefix_pushf("VM-entry intr length = 0x%x [-]", in test_invalid_event_injection()
4443 vmcs_write(ENT_INST_LEN, 0x00000010); in test_invalid_event_injection()
4483 report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0xf", threshold); in try_tpr_threshold()
4501 for (i = 0; i < 0x10; i++) in test_tpr_threshold_values()
4506 try_tpr_threshold(0x7fffffff); in test_tpr_threshold_values()
4513 * "virtual-interrupt delivery" VM-execution control is 0, bits
4515 be 0.
4519 * "virtual-interrupt delivery" VM-execution control is 0
4521 * is 0, the value of bits 3:0 of the TPR threshold VM-execution
4533 if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) in test_tpr_threshold()
4537 memset(virtual_apic_page, 0xff, PAGE_SIZE); in test_tpr_threshold()
4549 if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) && in test_tpr_threshold()
4610 * If the "NMI exiting" VM-execution control is 0, "Virtual NMIs"
4611 * VM-execution control must be 0.
4614 * If the "virtual NMIs" VM-execution control is 0, the "NMI-window
4615 * exiting" VM-execution control must be 0.
4655 if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) { in test_nmi_ctrls()
4694 (eptp & EPTP_AD_FLAG) ? "1": "0"); in test_eptp_ad_bit()
4707 * - The EPT memory type (bits 2:0) must be a value supported by the
4712 * 0 if bit 21 of the IA32_VMX_EPT_VPID_CAP MSR is read as 0,
4716 * physical-address width) must all be 0.
4729 u64 j, resv_bits_mask = 0; in test_ept_eptp()
4731 if (__setup_ept(0xfed40000, false)) { in test_ept_eptp()
4743 for (i = 0; i < 8; i++) { in test_ept_eptp()
4761 for (i = 0; i < 8; i++) { in test_ept_eptp()
4802 for (i = 0; i < 32; i++) { in test_ept_eptp()
4810 if (i == 0) in test_ept_eptp()
4820 for (i = 0; i < (63 - maxphysaddr + 1); i++) { in test_ept_eptp()
4826 (j < maxphysaddr ? 0 : 1ul << j); in test_ept_eptp()
4875 * * Bits 11:0 of the address must be 0.
4888 if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) && in test_pml()
4928 * If the "activate VMX-preemption timer" VM-execution control is 0, the
4929 * the "save VMX-preemption timer value" VM-exit control must also be 0.
4984 "out %al, $0x80;\n\t" in test_mtf_guest()
4987 "out %al, $0x80;\n\t" in test_mtf_guest()
4998 "mov $0xaaaaaaaaaaaaaaaa, %rax;\n\t" in test_mtf_guest()
5008 ".byte 0xf1;\n\t" in test_mtf_guest()
5011 "mov $0, %eax;\n\t"); in test_mtf_guest()
5056 report(rip == exp_rip, "MTF VM-exit after %s. RIP: 0x%lx (expected 0x%lx)", in report_mtf()
5065 if (!(ctrl_cpu_rev[0].clr & CPU_MTF)) { in vmx_mtf_test()
5095 "'pending debug exceptions' field after MTF VM-exit: 0x%lx (expected 0x%lx)", in vmx_mtf_test()
5100 vmcs_write(GUEST_PENDING_DEBUG, 0); in vmx_mtf_test()
5146 "vmcall\n\t" /* on return from this CR0.PG=0 */
5168 if (!(ctrl_cpu_rev[0].clr & CPU_MTF)) { in vmx_mtf_pdpte_test()
5178 vmcs_write(EXC_BITMAP, ~0); in vmx_mtf_pdpte_test()
5206 * 3:0 B execute/read/accessed in vmx_mtf_pdpte_test()
5208 * 6:5 0 descriptor privilege level in vmx_mtf_pdpte_test()
5210 * 11:8 0 reserved in vmx_mtf_pdpte_test()
5211 * 12 0 available for use by system software in vmx_mtf_pdpte_test()
5212 * 13 0 64 bit mode not active in vmx_mtf_pdpte_test()
5215 * 16 0 segment usable in vmx_mtf_pdpte_test()
5216 * 31:17 0 reserved in vmx_mtf_pdpte_test()
5219 vmcs_write(GUEST_AR_CS, 0xc09b); in vmx_mtf_pdpte_test()
5230 * Turn the 4-level page table into a PAE page table by following the 0th in vmx_mtf_pdpte_test()
5237 * Bits 11:0 page offset in vmx_mtf_pdpte_test()
5249 * Get a pointer to PTE for GVA=0 in the page directory pointer table in vmx_mtf_pdpte_test()
5252 (pgd_t *)phys_to_virt(guest_cr3 & ~X86_CR3_PCID_MASK), 0, in vmx_mtf_pdpte_test()
5268 for (i = 0; i < 4; i++) { in vmx_mtf_pdpte_test()
5269 TEST_ASSERT_EQ_MSG(0, (pte[i] & PDPTE64_RSVD_MASK), in vmx_mtf_pdpte_test()
5271 TEST_ASSERT_EQ_MSG(0, (pte[i] & PDPTE64_PAGE_SIZE_MASK), in vmx_mtf_pdpte_test()
5286 for (i = 0; i < 4; i++) { in vmx_mtf_pdpte_test()
5289 report(pdpte == pdpt[i], "PDPTE%d is 0x%lx (expected 0x%lx)", in vmx_mtf_pdpte_test()
5337 * - The lower 4 bits of the VM-entry MSR-load address must be 0.
5361 for (i = 0; i < 4; i++) { in test_entry_msr_load()
5364 report_prefix_pushf("VM-entry MSR-load addr [4:0] %lx", in test_entry_msr_load()
5365 tmp & 0xf); in test_entry_msr_load()
5380 entry_msr_load = (struct vmx_msr_entry *)((u64)entry_msr_load & ~0xf); in test_entry_msr_load()
5418 "Guest state is 0x%lx (expected 0x%lx)", in guest_state_test_main()
5463 * - The lower 4 bits of the VM-exit MSR-store address must be 0.
5489 for (i = 0; i < 4; i++) { in test_exit_msr_store()
5492 report_prefix_pushf("VM-exit MSR-store addr [4:0] %lx", in test_exit_msr_store()
5493 tmp & 0xf); in test_exit_msr_store()
5508 exit_msr_store = (struct vmx_msr_entry *)((u64)exit_msr_store & ~0xf); in test_exit_msr_store()
5547 vmcs_write(GUEST_RFLAGS, 0); in vmx_controls_test()
5587 return val & 0xf0; in apic_virt_nibble1()
5592 return val & (0xff << 24); in apic_virt_byte3()
5660 case APIC_ISR ... APIC_ISR + 0x70: in apic_reg_virt_exit_expectation()
5661 case APIC_TMR ... APIC_TMR + 0x70: in apic_reg_virt_exit_expectation()
5662 case APIC_IRR ... APIC_IRR + 0x70: in apic_reg_virt_exit_expectation()
5896 "read 0x%x, expected 0x%x.", got, want); in apic_reg_virt_guest()
5919 report_prefix_pushf("xapic - reading 0x%03x", reg); in test_xapic_rd()
5932 virtual_apic_page[apic_reg_index(reg)] = 0; in test_xapic_rd()
5934 apic_access_address[apic_reg_index(reg)] = 0; in test_xapic_rd()
5946 u32 apic_page_offset = vmcs_read(EXI_QUALIFICATION) & 0xfff; in test_xapic_rd()
5950 "got APIC access exit @ page offset 0x%03x, want 0x%03x", in test_xapic_rd()
5977 report_prefix_pushf("xapic - writing 0x%x to 0x%03x", val, reg); in test_xapic_wr()
5987 apic_access_address[apic_reg_index(reg)] = 0; in test_xapic_wr()
5988 virtual_apic_page[apic_reg_index(reg)] = 0; in test_xapic_wr()
5999 u32 apic_page_offset = vmcs_read(EXI_QUALIFICATION) & 0xfff; in test_xapic_wr()
6003 "got APIC access exit @ page offset 0x%03x, want 0x%03x", in test_xapic_wr()
6012 "got APIC write exit @ page offset 0x%03x; val is 0x%x, want 0x%x", in test_xapic_wr()
6029 report(got == want, "exitless write; val is 0x%x, want 0x%x", in test_xapic_wr()
6035 "non-virtualized write; val is 0x%x, want 0x%x", got, in test_xapic_wr()
6071 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) { in configure_apic_reg_virt_test()
6091 if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) { in configure_apic_reg_virt_test()
6170 for (i = 0; i < ARRAY_SIZE(apic_reg_tests); i++) { in apic_reg_virt_test()
6190 for (reg = 0; reg < PAGE_SIZE / sizeof(u32); reg += 0x10) { in apic_reg_virt_test()
6266 return val & 0xf0; in virt_x2apic_mode_nibble1()
6310 *val &= 0xff; in get_x2apic_wr_val()
6316 * EOI, ESR: WRMSR of a non-zero value causes #GP(0). in get_x2apic_wr_val()
6317 * TMICT: A write of 0 to the initial-count register effectively in get_x2apic_wr_val()
6321 *val = 0; in get_x2apic_wr_val()
6344 *val = 0x40000 | 0xf1; in get_x2apic_wr_val()
6354 *val = 0x0; in get_x2apic_wr_val()
6405 * Thus, set the write value to 0, which seems in virt_x2apic_mode_wr_expectation()
6408 expectation->wr_val = 0; in virt_x2apic_mode_wr_expectation()
6664 handle_x2apic_gp_insn_len = 0; in teardown_x2apic_gp_handler()
6697 "APIC read; got 0x%lx, want 0x%lx.", in virt_x2apic_mode_guest()
6727 report_prefix_pushf("x2apic - reading 0x%03x", reg); in test_x2apic_rd()
6765 int ipi_vector = 0xf1; in test_x2apic_wr()
6766 u32 restore_val = 0; in test_x2apic_wr()
6768 report_prefix_pushf("x2apic - writing 0x%lx to 0x%03x", val, reg); in test_x2apic_wr()
6778 virtual_apic_page[apic_reg_index(reg)] = 0; in test_x2apic_wr()
6805 "got APIC write exit @ page offset 0x%03x; val is 0x%x, want 0x%lx", in test_x2apic_wr()
6820 report(got == want, "x2APIC write; got 0x%x, want 0x%lx", got, in test_x2apic_wr()
6830 * WRMSR(0x808, 0x78), then, L1 might read 0x70. in test_x2apic_wr()
6833 * 1. L2 executes WRMSR(0x808, 0x78). in test_x2apic_wr()
6851 "non-virtualized write; val is 0x%x, want 0x%lx", in test_x2apic_wr()
6893 for (msr = 0x800; msr <= 0x8ff; msr++) { in configure_virt_x2apic_mode_test()
6895 clear_bit(msr, msr_bitmap_page + 0x000); in configure_virt_x2apic_mode_test()
6896 clear_bit(msr, msr_bitmap_page + 0x800); in configure_virt_x2apic_mode_test()
6898 set_bit(msr, msr_bitmap_page + 0x000); in configure_virt_x2apic_mode_test()
6899 set_bit(msr, msr_bitmap_page + 0x800); in configure_virt_x2apic_mode_test()
6941 if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) { in virt_x2apic_mode_test()
6944 } else if (!(ctrl_cpu_rev[0].clr & CPU_MSR_BITMAP)) { in virt_x2apic_mode_test()
6955 memset(msr_bitmap_page, 0xff, PAGE_SIZE); in virt_x2apic_mode_test()
6958 for (i = 0; i < ARRAY_SIZE(virt_x2apic_mode_tests); i++) { in virt_x2apic_mode_test()
6978 for (reg = 0; reg < PAGE_SIZE / sizeof(u32); reg += 0x10) { in virt_x2apic_mode_test()
7011 test_vmx_vmlaunch(0); in test_ctl_reg()
7016 for (i = 0; i < 64; i++) { in test_ctl_reg()
7018 /* Set a bit when the corresponding bit in fixed1 is 0 */ in test_ctl_reg()
7019 if ((fixed1 & (1ull << i)) == 0) { in test_ctl_reg()
7053 * processor's physical-address width must be 0.
7081 test_vmx_vmlaunch(0); in test_host_ctl_regs()
7094 test_vmx_vmlaunch(0); in test_efer_vmlaunch()
7151 for (i = 0; i < 4; i++) { in test_efer_bit()
7193 for (i = 0; i < 64; i++) { in test_efer()
7204 for (i = 0; i < 64; i++) { in test_efer()
7242 * IA32_EFER MSR must be 0 in the field for that register. In addition,
7257 * IA32_EFER MSR must be 0 in the field for that register. In addition,
7293 for (i = 0; i < 256; i = (i < PAT_VAL_LIMIT) ? i + 1 : i * 2) { in test_pat()
7295 for (j = 0; j < (i ? 8 : 1); j++) { in test_pat()
7300 test_vmx_vmlaunch(0); in test_pat()
7311 for (i = 0; i < 256; i = (i < PAT_VAL_LIMIT) ? i + 1 : i * 2) { in test_pat()
7313 for (j = 0; j < (i ? 8 : 1); j++) { in test_pat()
7319 if (i == 0x2 || i == 0x3 || i >= 0x8) in test_pat()
7323 error = 0; in test_pat()
7329 "Expected PAT = 0x%lx, got 0x%lx", in test_pat()
7336 error = (i == 0x2 || i == 0x3 || i >= 0x8); in test_pat()
7354 * without fault at CPL 0. Specifically, each of the 8 bytes in the
7355 * field must have one of the values 0 (UC), 1 (WC), 4 (WT), 5 (WP),
7407 id = cpuid(0xA); in valid_pgc()
7412 /* FxCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i); */ in valid_pgc()
7437 "Host state is 0x%lx (expected 0x%lx)", in test_pgc_vmlaunch()
7471 report_prefix_pushf("\"load IA32_PERF_GLOBAL_CTRL\"=0 on %s", in test_perf_global_ctrl()
7474 for (i = 0; i < 64; i++) { in test_perf_global_ctrl()
7477 report_prefix_pushf("%s = 0x%lx", name, val); in test_perf_global_ctrl()
7478 test_pgc_vmlaunch(0, VMX_VMCALL, false, host); in test_perf_global_ctrl()
7487 for (i = 0; i < 64; i++) { in test_perf_global_ctrl()
7491 report_prefix_pushf("%s = 0x%lx", name, val); in test_perf_global_ctrl()
7493 test_pgc_vmlaunch(0, VMX_VMCALL, false, host); in test_perf_global_ctrl()
7498 0, in test_perf_global_ctrl()
7503 0, in test_perf_global_ctrl()
7568 u64 mask = ~0ull; in test_vmcs_field()
7577 test_vmx_vmlaunch(0); in test_vmcs_field()
7583 bit_on = ((1ull < i) & (val << bit_start)) ? 0 : 1; in test_vmcs_field()
7593 test_vmx_vmlaunch(0); in test_vmcs_field()
7612 test_vmx_vmlaunch(0); in test_canonical()
7635 test_vmcs_field(reg, name, 0, 2, 0x0, true, \
7639 test_vmcs_field(reg, name, 3, 15, 0x0000, false, \
7644 * RPL (bits 1:0) and the TI flag (bit 2) must be 0.
7647 * size" VM-exit control is 0.
7667 * Test that CS and TR fields can not be 0x0000 in test_host_segment_regs()
7673 * SS field can not be 0x0000 if "host address-space size" VM-exit in test_host_segment_regs()
7674 * control is 0 in test_host_segment_regs()
7677 vmcs_write(HOST_SEL_SS, 0); in test_host_segment_regs()
7678 report_prefix_pushf("HOST_SEL_SS 0"); in test_host_segment_regs()
7680 test_vmx_vmlaunch(0); in test_host_segment_regs()
7707 * If the "host address-space size" VM-exit control is 0, the following must
7709 * - The "IA-32e mode guest" VM-entry control is 0.
7710 * - Bit 17 of the CR4 field (corresponding to CR4.PCIDE) is 0.
7711 * - Bits 63:32 in the RIP field are 0.
7730 test_vmx_vmlaunch(0); in test_host_addr_size()
7736 test_vmx_vmlaunch(0); in test_host_addr_size()
7767 test_vmx_vmlaunch(0); in test_host_addr_size()
7782 vmcs_write(GUEST_RFLAGS, 0); in vmx_host_state_area_test()
7799 * the DR7 field must be 0.
7812 for (i = 0; i < 64; i++) { in test_guest_dr7()
7821 for (i = 0; i < 64; i++) { in test_guest_dr7()
7835 * without fault at CPL 0. Specifically, each of the 8 bytes in the
7836 * field must have one of the values 0 (UC), 1 (WC), 4 (WT), 5 (WP),
7854 #define MSR_IA32_BNDCFGS_RSVD_MASK 0x00000ffc
7860 * - Bits reserved in the IA32_BNDCFGS MSR must be 0.
7906 } while (0)
7917 * - TR. The TI flag (bit 2) must be 0.
7918 * - LDTR. If LDTR is usable, the TI flag (bit 2) must be 0.
7920 * guest" VM-execution control is 0, the RPL (bits 1:0) must equal
7966 cs_rpl_bits = vmcs_read(GUEST_SEL_CS) & 0x3; in test_guest_segment_sel_fields()
7968 TEST_INVALID_SEG_SEL(GUEST_SEL_SS, ((sel_saved & ~0x3) | (~cs_rpl_bits & 0x3))); in test_guest_segment_sel_fields()
7969 TEST_VALID_SEG_SEL(GUEST_SEL_SS, ((sel_saved & ~0x3) | (cs_rpl_bits & 0x3))); in test_guest_segment_sel_fields()
7975 TEST_INVALID_SEG_SEL(GUEST_SEL_SS, ((sel_saved & ~0x3) | (~cs_rpl_bits & 0x3))); in test_guest_segment_sel_fields()
7976 TEST_VALID_SEG_SEL(GUEST_SEL_SS, ((sel_saved & ~0x3) | (cs_rpl_bits & 0x3))); in test_guest_segment_sel_fields()
7982 TEST_VALID_SEG_SEL(GUEST_SEL_SS, ((sel_saved & ~0x3) | (~cs_rpl_bits & 0x3))); in test_guest_segment_sel_fields()
7983 TEST_VALID_SEG_SEL(GUEST_SEL_SS, ((sel_saved & ~0x3) | (cs_rpl_bits & 0x3))); in test_guest_segment_sel_fields()
7990 TEST_VALID_SEG_SEL(GUEST_SEL_SS, ((sel_saved & ~0x3) | (~cs_rpl_bits & 0x3))); in test_guest_segment_sel_fields()
7991 TEST_VALID_SEG_SEL(GUEST_SEL_SS, ((sel_saved & ~0x3) | (cs_rpl_bits & 0x3))); in test_guest_segment_sel_fields()
8006 test_guest_state("seg.BASE[63:32] != 0, usable = " xstr(xfail), \
8010 } while (0)
8019 } while (0)
8120 test_guest_state("GDT.limit > 0xffff", true, tmp, "GUEST_LIMIT_GDTR"); in vmx_guest_state_area_test()
8128 test_guest_state("IDT.limit > 0xffff", true, tmp, "GUEST_LIMIT_IDTR"); in vmx_guest_state_area_test()
8212 vmcs_write(~0u, 0); in try_vmentry_in_movss_shadow()
8216 "mov 0f, %%rax;" in try_vmentry_in_movss_shadow()
8225 "0: lahf;" in try_vmentry_in_movss_shadow()
8235 report(flags == expected_flags, "RFLAGS[8:0] is %x (actual %x)", in try_vmentry_in_movss_shadow()
8260 vmcs_write(GUEST_RFLAGS, 0); in vmentry_movss_shadow_test()
8292 * Ensure that the L1 LDTR is set to 0 on VM-exit.
8296 const u8 ldt_ar = 0x82; /* Present LDT */ in vmx_ldtr_test()
8300 set_gdt_entry(sel, 0, 0, ldt_ar, 0); in vmx_ldtr_test()
8318 report(!sel, "Expected 0 for L1 LDTR selector (got %x)", sel); in vmx_ldtr_test()
8329 u32 ctrls[2] = {0}; in vmx_cr_load_test()
8348 cr3 = orig_cr3 | 0x1; in vmx_cr_load_test()
8416 if (ctrls[0]) { in vmx_cr_load_test()
8417 vmcs_write(CPU_EXEC_CTRL0, ctrls[0]); in vmx_cr_load_test()
8420 ctrls[0] = vmcs_read(CPU_EXEC_CTRL0); in vmx_cr_load_test()
8492 int ipi_vector = 0xf1; in vmx_pending_event_test_core()
8511 0); in vmx_pending_event_test_core()
8579 if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) { in vmx_nmi_window_test()
8584 vmx_window_test_db_count = 0; in vmx_nmi_window_test()
8710 if (!(ctrl_cpu_rev[0].clr & CPU_INTR_WINDOW)) { in vmx_intr_window_test()
8800 report_prefix_push("active, RFLAGS.IF = 0"); in vmx_intr_window_test()
8862 if (!(ctrl_cpu_rev[0].clr & CPU_USE_TSC_OFFSET)) { in vmx_store_tsc_test()
8900 vmcs_write(PREEMPT_TIMER_VALUE, 0); in vmx_preemption_timer_zero_activate_preemption_timer()
8915 vmcs_write(EXC_BITMAP, intercept_db ? 1 << DB_VECTOR : 0); in vmx_preemption_timer_zero_inject_db()
8933 "Exit reason is 0x%x (expected 0x%x) and guest RIP is %lx (0x%lx expected).", in vmx_preemption_timer_zero_expect_preempt_at_rip()
8965 vmx_set_test_stage(0); in vmx_preemption_timer_zero_test()
8966 vmx_preemption_timer_zero_inject_db(0); in vmx_preemption_timer_zero_test()
8985 vmx_preemption_timer_zero_set_pending_dbg(0); in vmx_preemption_timer_zero_test()
8997 report(reason == VMX_EXC_NMI, "Exit reason is 0x%x (expected 0x%x)", in vmx_preemption_timer_zero_test()
9046 "0: wbinvd\n\t" in vmx_preemption_timer_tf_test_guest()
9047 "1: jmp 0b\n\t" in vmx_preemption_timer_tf_test_guest()
9090 for (i = 0; i < 10000; i++) { in vmx_preemption_timer_tf_test()
9119 while (vmx_get_test_stage() == 0) in vmx_preemption_timer_expiry_test_guest()
9130 * IA32_VMX_MISC[4:0]). If the timer counts down to zero in any state
9171 vmx_set_test_stage(0); in vmx_preemption_timer_expiry_test()
9232 vmcs_write(GUEST_PENDING_DEBUG, 0); in dismiss_db()
9258 report_xfail(xfail_pdbg, 0 == guest_pending_dbg, in check_db_exit()
9259 "Expected pending debug exceptions 0 (actual %lx)", in check_db_exit()
9329 single_step_guest("Hardware delivered single-step", starting_dr6, 0); in vmx_db_test()
9350 single_step_guest("Software synthesized single-step", starting_dr6, 0); in vmx_db_test()
9384 "transactional region", starting_dr6, 0); in vmx_db_test()
9407 vmcs_write(EOI_EXIT_BITMAP0, 0x0); in enable_vid()
9408 vmcs_write(EOI_EXIT_BITMAP1, 0x0); in enable_vid()
9409 vmcs_write(EOI_EXIT_BITMAP2, 0x0); in enable_vid()
9410 vmcs_write(EOI_EXIT_BITMAP3, 0x0); in enable_vid()
9435 ioapic_set_redir(0xf, 0x79, TRIGGER_LEVEL); in trigger_ioapic_scan_thread()
9455 handle_irq(0x79, irq_79_handler_guest); in vmx_eoi_bitmap_ioapic_scan_test_guest()
9471 asm volatile ("int $0x79"); in vmx_eoi_bitmap_ioapic_scan_test_guest()
9500 #define HLT_WITH_RVI_VECTOR (0xf1)
9547 /* Set irq-line 0xf to raise vector 0x78 for vCPU 0 */ in set_irq_line_thread()
9548 ioapic_set_redir(0xf, 0x78, TRIGGER_LEVEL); in set_irq_line_thread()
9555 set_irq_line(0xf, 0); in irq_78_handler_guest()
9564 handle_irq(0x78, irq_78_handler_guest); in vmx_apic_passthrough_guest()
9574 set_irq_line(0xf, 1); in vmx_apic_passthrough_guest()
9590 u64 cpu_ctrl_1 = 0; in vmx_apic_passthrough()
9604 ioapic_set_redir(0xf, 0x78, TRIGGER_LEVEL); in vmx_apic_passthrough()
9613 TEST_ASSERT_EQ_MSG(1, (int)ioapic_read_redir(0xf).remote_irr, in vmx_apic_passthrough()
9620 TEST_ASSERT_EQ_MSG(0, (int)ioapic_read_redir(0xf).remote_irr, in vmx_apic_passthrough()
9621 "IOAPIC pass-through: remote_irr=0 after EOI"); in vmx_apic_passthrough()
9641 apic_set_tpr(0); in vmx_apic_passthrough_tpr_threshold_guest()
9653 int ipi_vector = 0xe1; in vmx_apic_passthrough_tpr_threshold_test()
9663 0); in vmx_apic_passthrough_tpr_threshold_test()
9668 report(apic_get_tpr() == 0, "TPR was zero by guest"); in vmx_apic_passthrough_tpr_threshold_test()
9774 vmx_set_test_stage(0); in vmx_init_signal_test()
9865 if (apic_id() == 0) { in vmx_sipi_test_guest()
9909 u64 cpu_ctrl_1 = 0; in sipi_test_ap_thread()
9975 u64 cpu_ctrl_1 = 0; in vmx_sipi_signal_test()
9991 vmx_set_test_stage(0); in vmx_sipi_signal_test()
10017 asm volatile ("vmread %2, %1; pushf; pop %0" in vmread_flags()
10026 asm volatile ("vmwrite %1, %2; pushf; pop %0" in vmwrite_flags()
10046 c->flags = vmwrite_flags(c->field, 0); in vmx_vmcs_shadow_test_guest()
10087 vmcs_write(VMX_INST_ERROR, 0); in vmcs_shadow_test_access()
10089 c->reason = vmcs_read(EXI_REASON) & 0xffff; in vmcs_shadow_test_access()
10181 report(value == 0, in vmcs_shadow_test_field()
10183 0ul); in vmcs_shadow_test_field()
10209 report(value == 0, in vmcs_shadow_test_field()
10211 0ul); in vmcs_shadow_test_field()
10225 report(c->value == 0, in vmcs_shadow_test_field()
10227 c->value, 0ul); in vmcs_shadow_test_field()
10246 for (base = 0; in vmx_vmcs_shadow_test_body()
10249 for (index = 0; index <= highest_index; index++) in vmx_vmcs_shadow_test_body()
10265 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) { in vmx_vmcs_shadow_test()
10325 * with an effective TSC value of 0.
10337 for (i = 0; i < RDTSC_DIFF_ITERS; i++) in rdtsc_vmexit_diff_test_guest()
10349 TEST_ASSERT(!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || in host_time_to_guest_time()
10365 host_to_guest_tsc = host_time_to_guest_time(exit_msr_store[0].value); in rdtsc_vmexit_diff_test_iteration()
10373 int fail = 0; in rdtsc_vmexit_diff_test()
10376 if (!(ctrl_cpu_rev[0].clr & CPU_USE_TSC_OFFSET)) in rdtsc_vmexit_diff_test()
10391 exit_msr_store[0].index = MSR_IA32_TSC; in rdtsc_vmexit_diff_test()
10395 for (i = 0; i < RDTSC_DIFF_ITERS && fail < RDTSC_DIFF_FAILS; i++) { in rdtsc_vmexit_diff_test()
10417 preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F; in invalid_msr_init()
10423 vmcs_write(ENTER_MSR_LD_ADDR, (u64)0x13370000); in invalid_msr_init()
10469 for (i = 0; i < count; i++) { in populate_msr_list()
10471 msr_list[i].reserved = 0; in populate_msr_list()
10472 msr_list[i].value = 0x1234567890abcdef; in populate_msr_list()
10475 memset(msr_list + count, 0xff, in populate_msr_list()
10548 vmcs_write(ENT_MSR_LD_CNT, 0); in atomic_switch_msrs_test()
10549 vmcs_write(EXI_MSR_LD_CNT, 0); in atomic_switch_msrs_test()
10550 vmcs_write(EXI_MSR_ST_CNT, 0); in atomic_switch_msrs_test()
10551 for (i = 0; i < cleanup_count; i++) { in atomic_switch_msrs_test()
10609 regs.rax = efer & 0xffffffff; in __vmx_pf_exception_test()
10613 efer = regs.rdx << 32 | (regs.rax & 0xffffffff); in __vmx_pf_exception_test()
10617 cpuid = (struct cpuid) {0, 0, 0, 0}; in __vmx_pf_exception_test()
10629 "Unexpected exit to L1, exit_reason: %s (0x%lx)", in __vmx_pf_exception_test()
10679 if (*vpid == 0) { in invalidate_tlb_new_vpid()
10681 invvpid(INVVPID_ALL, 0, 0); in invalidate_tlb_new_vpid()
10706 __vmx_pf_vpid_test(invalidate_tlb_invvpid_addr, 0xaaaa); in vmx_pf_invvpid_test()
10725 run_in_user(generate_usermode_ac, AC_VECTOR, 0, 0, 0, 0, &hit_ac); in vmx_l2_ac_test()
10800 for (i = 0; i < ARRAY_SIZE(vmx_exception_tests); i++) { in vmx_exception_test()
10821 #define TEST_DIRECT_VALUE 0xff45454545000000
10822 #define TEST_VMCS_VALUE 0xff55555555000000
10860 assert(0); in get_host_value()
10863 return 0; in get_host_value()
10880 return 0; in set_host_value()
10892 set_gdt_entry(FIRST_SPARE_SEL, value, 0x200, 0x89, 0); in set_host_value()
10895 assert(0); in set_host_value()
10901 u64 value = 0; in test_host_value_direct()
10910 report_fail("Exception %d when setting %s to 0x%lx via direct write", in test_host_value_direct()
10920 "%s: HOST value set to 0x%lx (wanted 0x%lx) via direct write", in test_host_value_direct()
10926 u64 value = 0; in test_host_value_vmcs()
10940 "%s: HOST value set to 0x%lx (wanted 0x%lx) via VMLAUNCH/VMRESUME", in test_host_value_vmcs()
11011 u32 page_offset = (0x200 | ((nr & 0xE0) >> 1)) / sizeof(u32); in set_virr_bit()
11012 u32 mask = 1 << (nr & 0x1f); in set_virr_bit()
11019 u32 page_offset = (0x200 | ((nr & 0xE0) >> 1)) / sizeof(u32); in clear_virr_bit()
11020 u32 mask = 1 << (nr & 0x1f); in clear_virr_bit()
11027 u32 page_offset = (0x200 | ((nr & 0xE0) >> 1)) / sizeof(u32); in get_virr_bit()
11028 u32 mask = 1 << (nr & 0x1f); in get_virr_bit()
11105 for (nr = 0x21; nr < 0x100; nr++) { in set_isrs_for_vmx_basic_vid_test()
11106 vmcs_write(GUEST_INT_STATUS, 0); in set_isrs_for_vmx_basic_vid_test()
11110 args->isr_exec_cnt = 0; in set_isrs_for_vmx_basic_vid_test()
11144 u16 rvi_want = isr_exec_cnt_want ? 0 : nr; in test_basic_vid()
11149 * IF "interrupt-window exiting" is 0 AND in test_basic_vid()
11158 * delivery, sets VPPR to VTPR, when SVI is 0. in test_basic_vid()
11160 args->isr_exec_cnt = 0; in test_basic_vid()
11166 vmcs_write(GUEST_INT_STATUS, 0); in test_basic_vid()
11168 set_vtpr(0); in test_basic_vid()
11173 set_vtpr(0xff); in test_basic_vid()
11178 vmcs_write(GUEST_INT_STATUS, 0); in test_basic_vid()
11232 for (nr_sub_class = 0; nr_sub_class < 16; nr_sub_class++) { in vmx_basic_vid_test()
11241 if (nr == 0x20) in vmx_basic_vid_test()
11244 test_basic_vid(nr, /*tpr=*/0, VID_OP_SELF_IPI, in vmx_basic_vid_test()
11247 for (tpr = 0; tpr < 256; tpr++) { in vmx_basic_vid_test()
11250 task_priority_class(tpr) ? 1 : 0; in vmx_basic_vid_test()
11259 report(true, "TPR 0-255 for vector 0x%x.", nr); in vmx_basic_vid_test()
11274 test_basic_vid(nr, /*tpr=*/0, VID_OP_NOP, /*isr_exec_cnt_want=*/2, in test_eoi_virt()
11297 for (nr = 0x22; nr < 0x100; nr++) { in vmx_eoi_virt_test()
11298 for (lo_pri_nr = 0x21; lo_pri_nr < nr; lo_pri_nr++) in vmx_eoi_virt_test()
11302 report(true, "Low priority nrs 0x21-0x%x for nr 0x%x.", in vmx_eoi_virt_test()
11307 vmcs_write(EOI_EXIT_BITMAP0, GENMASK_ULL(63, 0)); in vmx_eoi_virt_test()
11308 vmcs_write(EOI_EXIT_BITMAP1, GENMASK_ULL(63, 0)); in vmx_eoi_virt_test()
11309 vmcs_write(EOI_EXIT_BITMAP2, GENMASK_ULL(63, 0)); in vmx_eoi_virt_test()
11310 vmcs_write(EOI_EXIT_BITMAP3, GENMASK_ULL(63, 0)); in vmx_eoi_virt_test()
11311 for (nr = 0x22; nr < 0x100; nr++) { in vmx_eoi_virt_test()
11312 for (lo_pri_nr = 0x21; lo_pri_nr < nr; lo_pri_nr++) in vmx_eoi_virt_test()
11317 "Low priority nrs 0x21-0x%x for nr 0x%x, with induced EOI exits.", in vmx_eoi_virt_test()
11352 for (class = 0; class < 16; class++) { in vmx_posted_interrupts_test()
11364 VID_OP_SPIN_IRR, 0, false); in vmx_posted_interrupts_test()
11372 report(true, "Posted vectors 33-25 cross TPR classes 0-0xf, running and sometimes halted\n"); in vmx_posted_interrupts_test()
11383 { "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
11384 { "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
11386 preemption_timer_exit_handler, NULL, {0} },
11388 test_ctrl_pat_exit_handler, NULL, {0} },
11390 test_ctrl_efer_exit_handler, NULL, {0} },
11392 cr_shadowing_exit_handler, NULL, {0} },
11394 NULL, {0} },
11396 insn_intercept_exit_handler, NULL, {0} },
11397 { "EPT A/D disabled", ept_init, ept_main, ept_exit_handler, NULL, {0} },
11398 { "EPT A/D enabled", eptad_init, eptad_main, eptad_exit_handler, NULL, {0} },
11399 { "PML", pml_init, pml_main, pml_exit_handler, NULL, {0} },
11401 interrupt_exit_handler, NULL, {0} },
11403 nmi_hlt_exit_handler, NULL, {0} },
11405 NULL, {0} },
11407 msr_switch_exit_handler, NULL, {0}, msr_switch_entry_failure },
11408 { "vmmcall", vmmcall_init, vmmcall_main, vmmcall_exit_handler, NULL, {0} },
11410 disable_rdtscp_exit_handler, NULL, {0} },
11412 exit_monitor_from_l2_handler, NULL, {0} },
11414 invalid_msr_exit_handler, NULL, {0}, invalid_msr_entry_failure},
11492 { NULL, NULL, NULL, NULL, NULL, {0} },