Lines Matching +full:0 +full:x8ff

24 static bool __read_mostly nested_early_check = 0;
73 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); in init_vmcs_shadow_fields()
74 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); in init_vmcs_shadow_fields()
76 for (i = j = 0; i < max_shadow_read_only_fields; i++) { in init_vmcs_shadow_fields()
97 for (i = j = 0; i < max_shadow_read_write_fields; i++) { in init_vmcs_shadow_fields()
242 hv_vcpu->nested.vm_id = 0; in nested_release_evmcs()
243 hv_vcpu->nested.vp_id = 0; in nested_release_evmcs()
316 vcpu->arch.regs_dirty = 0; in vmx_switch_vmcs()
390 unsigned long roots = 0; in nested_ept_invalidate_addr()
396 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { in nested_ept_invalidate_addr()
436 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); in nested_ept_inject_page_fault()
475 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; in nested_vmx_is_page_fault_vmexit()
504 return 0; in nested_vmx_check_io_bitmap_controls()
510 return 0; in nested_vmx_check_io_bitmap_controls()
517 return 0; in nested_vmx_check_msr_bitmap_controls()
522 return 0; in nested_vmx_check_msr_bitmap_controls()
529 return 0; in nested_vmx_check_tpr_shadow_controls()
534 return 0; in nested_vmx_check_tpr_shadow_controls()
557 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { in enable_x2apic_msr_intercepts()
560 msr_bitmap[word] = ~0; in enable_x2apic_msr_intercepts()
561 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; in enable_x2apic_msr_intercepts()
642 * L0 need not intercept reads for MSRs between 0x800 in nested_vmx_prepare_msr_bitmap()
643 * and 0x8ff, it just lets the processor take the value in nested_vmx_prepare_msr_bitmap()
647 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { in nested_vmx_prepare_msr_bitmap()
756 return 0; in nested_vmx_check_apic_access_controls()
766 return 0; in nested_vmx_check_apicv_controls()
788 * bits 5:0 of posted_intr_desc_addr should be zero. in nested_vmx_check_apicv_controls()
793 CC((vmcs12->posted_intr_nv & 0xff00)) || in nested_vmx_check_apicv_controls()
801 return 0; in nested_vmx_check_apicv_controls()
807 if (count == 0) in nested_vmx_check_msr_switch()
808 return 0; in nested_vmx_check_msr_switch()
814 return 0; in nested_vmx_check_msr_switch()
828 return 0; in nested_vmx_check_exit_msr_switch_controls()
839 return 0; in nested_vmx_check_entry_msr_switch_controls()
846 return 0; in nested_vmx_check_pml_controls()
852 return 0; in nested_vmx_check_pml_controls()
861 return 0; in nested_vmx_check_unrestricted_guest_controls()
870 return 0; in nested_vmx_check_mode_based_ept_exec_controls()
877 return 0; in nested_vmx_check_shadow_vmcs_controls()
883 return 0; in nested_vmx_check_shadow_vmcs_controls()
890 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) in nested_vmx_msr_check_common()
895 if (CC(e->reserved != 0)) in nested_vmx_msr_check_common()
897 return 0; in nested_vmx_msr_check_common()
908 return 0; in nested_vmx_load_msr_check()
917 return 0; in nested_vmx_store_msr_check()
931 * return 0 for success, entry index for failure.
944 for (i = 0; i < count; i++) { in nested_vmx_load_msr()
951 "%s cannot read MSR entry (%u, 0x%08llx)\n", in nested_vmx_load_msr()
957 "%s check failed (%u, 0x%x, 0x%x)\n", in nested_vmx_load_msr()
963 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", in nested_vmx_load_msr()
968 return 0; in nested_vmx_load_msr()
989 if (i >= 0) { in nested_vmx_get_vmexit_msr_value()
998 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, in nested_vmx_get_vmexit_msr_value()
1012 "%s cannot read MSR entry (%u, 0x%08llx)\n", in read_and_check_msr_entry()
1018 "%s check failed (%u, 0x%x, 0x%x)\n", in read_and_check_msr_entry()
1032 for (i = 0; i < count; i++) { in nested_vmx_store_msr()
1047 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", in nested_vmx_store_msr()
1052 return 0; in nested_vmx_store_msr()
1063 for (i = 0; i < count; i++) { in nested_msr_store_list_has_msr()
1084 in_autostore_list = msr_autostore_slot >= 0; in prepare_vmx_msr_autostore_list()
1143 return 0; in nested_vmx_load_cr3()
1256 return 0; in vmx_restore_vmx_basic()
1299 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) in vmx_restore_control_msr()
1302 /* Check must-be-0 bits are still 0. */ in vmx_restore_control_msr()
1309 return 0; in vmx_restore_control_msr()
1344 return 0; in vmx_restore_vmx_misc()
1358 return 0; in vmx_restore_vmx_ept_vpid_cap()
1385 return 0; in vmx_restore_fixed0_msr()
1391 * Returns 0 on success, non-0 otherwise.
1415 * If userspace wants to emulate VMX_BASIC[55]=0, userspace in vmx_set_vmx_msr()
1443 return 0; in vmx_set_vmx_msr()
1448 return 0; in vmx_set_vmx_msr()
1457 /* Returns 0 on success, non-0 otherwise. */
1532 return 0; in vmx_get_vmx_msr()
1558 for (i = 0; i < max_shadow_read_write_fields; i++) { in copy_shadow_to_vmcs12()
1591 for (q = 0; q < ARRAY_SIZE(fields); q++) { in copy_vmcs12_to_shadow()
1592 for (i = 0; i < max_fields[q]; i++) { in copy_vmcs12_to_shadow()
2069 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is in nested_vmx_handle_enlightened_vmptrld()
2078 * CPUID.0x4000000A.EAX[0:15]. in nested_vmx_handle_enlightened_vmptrld()
2101 memset(vmcs12, 0, sizeof(*vmcs12)); in nested_vmx_handle_enlightened_vmptrld()
2173 if (preemption_timeout == 0) { in vmx_start_preemption_timer()
2178 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
2221 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); in prepare_vmcs02_constant_state()
2225 vmcs_write64(VM_FUNCTION_CONTROL, 0); in prepare_vmcs02_constant_state()
2239 vmcs_write64(PML_ADDRESS, 0); in prepare_vmcs02_constant_state()
2437 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); in prepare_vmcs02_early()
2484 vmx->segment_cache.bitmask = 0; in prepare_vmcs02_rare()
2521 * setting MASK=MATCH=0 and (see below) EB.PF=1. in prepare_vmcs02_rare()
2524 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when in prepare_vmcs02_rare()
2532 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); in prepare_vmcs02_rare()
2533 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); in prepare_vmcs02_rare()
2567 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2707 return 0; in prepare_vmcs02()
2720 return 0; in nested_vmx_check_nmi_controls()
2756 if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) in nested_vmx_check_eptp()
2824 return 0; in nested_check_vm_execution_controls()
2841 return 0; in nested_check_vm_exit_controls()
2882 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) in nested_check_vm_entry_controls()
2907 CC(vmcs12->vm_entry_instruction_len == 0 && in nested_check_vm_entry_controls()
2916 return 0; in nested_check_vm_entry_controls()
2932 return 0; in nested_vmx_check_controls()
2943 return 0; in nested_vmx_check_address_space_size()
2986 CC(vmcs12->host_cs_selector == 0) || in nested_vmx_check_host_state()
2987 CC(vmcs12->host_tr_selector == 0) || in nested_vmx_check_host_state()
2988 CC(vmcs12->host_ss_selector == 0 && !ia32e)) in nested_vmx_check_host_state()
3001 * IA32_EFER MSR must be 0 in the field for that register. In addition, in nested_vmx_check_host_state()
3012 return 0; in nested_vmx_check_host_state()
3023 return 0; in nested_vmx_check_vmcs_link_ptr()
3042 return 0; in nested_vmx_check_vmcs_link_ptr()
3055 return 0; in nested_check_guest_non_reg_state()
3098 * - Bits reserved in the IA32_EFER MSR must be 0. in nested_vmx_check_guest_state()
3121 return 0; in nested_vmx_check_guest_state()
3131 return 0; in nested_vmx_check_vmentry_hw()
3134 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); in nested_vmx_check_vmentry_hw()
3136 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); in nested_vmx_check_vmentry_hw()
3148 vmcs_writel(GUEST_RFLAGS, 0); in nested_vmx_check_vmentry_hw()
3199 return 0; in nested_vmx_check_vmentry_hw()
3261 vcpu->run->internal.ndata = 0; in nested_get_vmcs12_pages()
3335 vcpu->run->internal.ndata = 0; in vmx_get_nested_state_pages()
3354 return 0; in nested_vmx_write_pml_buffer()
3365 return 0; in nested_vmx_write_pml_buffer()
3372 gpa &= ~0xFFFull; in nested_vmx_write_pml_buffer()
3377 return 0; in nested_vmx_write_pml_buffer()
3381 return 0; in nested_vmx_write_pml_buffer()
3395 return 0; in nested_vmx_check_permission()
3399 kvm_inject_gp(vcpu, 0); in nested_vmx_check_permission()
3400 return 0; in nested_vmx_check_permission()
3411 return ((rvi & 0xf0) > (vppr & 0xf0)); in vmx_has_apicv_interrupt()
3710 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3715 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3725 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3727 return 0; in nested_vmx_run()
3798 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3833 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3867 return 0; in vmx_complete_nested_posted_interrupt()
3875 return 0; in vmx_complete_nested_posted_interrupt()
3886 if ((u8)max_irr > ((u8)status & 0xff)) { in vmx_complete_nested_posted_interrupt()
3887 status &= ~0xff; in vmx_complete_nested_posted_interrupt()
3894 return 0; in vmx_complete_nested_posted_interrupt()
3917 exit_qual = 0; in nested_vmx_inject_exception_vmexit()
3970 return 0; in vmx_get_pending_dbg_trap()
4124 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); in vmx_check_nested_events()
4128 return 0; in vmx_check_nested_events()
4138 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, in vmx_check_nested_events()
4139 apic->sipi_vector & 0xFFUL); in vmx_check_nested_events()
4140 return 0; in vmx_check_nested_events()
4161 return 0; in vmx_check_nested_events()
4175 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); in vmx_check_nested_events()
4176 return 0; in vmx_check_nested_events()
4184 return 0; in vmx_check_nested_events()
4196 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); in vmx_check_nested_events()
4197 return 0; in vmx_check_nested_events()
4214 INTR_INFO_VALID_MASK, 0); in vmx_check_nested_events()
4219 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
4221 return 0; in vmx_check_nested_events()
4229 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); in vmx_check_nested_events()
4230 return 0; in vmx_check_nested_events()
4243 if (ktime_to_ns(remaining) <= 0) in vmx_get_preemption_timer_value()
4244 return 0; in vmx_get_preemption_timer_value()
4526 vmx_set_interrupt_shadow(vcpu, 0); in load_vmcs12_host_state()
4558 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); in load_vmcs12_host_state()
4559 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); in load_vmcs12_host_state()
4563 vmcs_write64(GUEST_BNDCFGS, 0); in load_vmcs12_host_state()
4577 .base = 0, in load_vmcs12_host_state()
4578 .limit = 0xFFFFFFFF, in load_vmcs12_host_state()
4591 .base = 0, in load_vmcs12_host_state()
4592 .limit = 0xFFFFFFFF, in load_vmcs12_host_state()
4613 .limit = 0x67, in load_vmcs12_host_state()
4620 memset(&seg, 0, sizeof(seg)); in load_vmcs12_host_state()
4624 kvm_set_dr(vcpu, 7, 0x400); in load_vmcs12_host_state()
4625 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); in load_vmcs12_host_state()
4645 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { in nested_vmx_get_vmcs01_guest_efer()
4718 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { in nested_vmx_restore_host_state()
4722 "%s read MSR index failed (%u, 0x%08llx)\n", in nested_vmx_restore_host_state()
4727 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { in nested_vmx_restore_host_state()
4731 "%s read MSR failed (%u, 0x%08llx)\n", in nested_vmx_restore_host_state()
4742 "%s check failed (%u, 0x%x, 0x%x)\n", in nested_vmx_restore_host_state()
4749 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", in nested_vmx_restore_host_state()
4913 WARN_ON(irq < 0); in nested_vmx_vmexit()
4948 vmx->fail = 0; in nested_vmx_vmexit()
4954 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); in nested_vmx_triple_fault()
4960 * On success, returns 0. When the operand is invalid, returns 1 and throws
4982 int index_reg = (vmx_instruction_info >> 18) & 0xf; in get_vmx_mem_address()
4984 int base_reg = (vmx_instruction_info >> 23) & 0xf; in get_vmx_mem_address()
4997 else if (addr_size == 0) in get_vmx_mem_address()
5012 off &= 0xffffffff; in get_vmx_mem_address()
5013 else if (addr_size == 0) /* 16 bit */ in get_vmx_mem_address()
5014 off &= 0xffff; in get_vmx_mem_address()
5029 *ret = vmx_get_untagged_addr(vcpu, *ret, 0); in get_vmx_mem_address()
5030 /* Long mode: #GP(0)/#SS(0) if the memory address is in a in get_vmx_mem_address()
5041 *ret = (s.base + off) & 0xffffffff; in get_vmx_mem_address()
5045 * - segment type check (#GP(0) may be thrown) in get_vmx_mem_address()
5046 * - usability check (#GP(0)/#SS(0)) in get_vmx_mem_address()
5047 * - limit check (#GP(0)/#SS(0)) in get_vmx_mem_address()
5050 /* #GP(0) if the destination operand is located in a in get_vmx_mem_address()
5053 exn = ((s.type & 0xa) == 0 || (s.type & 8)); in get_vmx_mem_address()
5055 /* #GP(0) if the source operand is located in an in get_vmx_mem_address()
5058 exn = ((s.type & 0xa) == 8); in get_vmx_mem_address()
5060 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in get_vmx_mem_address()
5063 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. in get_vmx_mem_address()
5065 exn = (s.unusable != 0); in get_vmx_mem_address()
5068 * Protected mode: #GP(0)/#SS(0) if the memory operand is in get_vmx_mem_address()
5070 * limit checks for flat segments, i.e. segments with base==0, in get_vmx_mem_address()
5071 * limit==0xffffffff and of type expand-up data or code. in get_vmx_mem_address()
5073 if (!(s.base == 0 && s.limit == 0xffffffff && in get_vmx_mem_address()
5081 0); in get_vmx_mem_address()
5085 return 0; in get_vmx_mem_address()
5108 return 0; in nested_vmx_get_vmptr()
5145 if (r < 0) in enter_vmx_operation()
5170 vmx->pt_desc.guest.ctl = 0; in enter_vmx_operation()
5174 return 0; in enter_vmx_operation()
5219 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, in handle_vmxon()
5226 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's in handle_vmxon()
5231 kvm_inject_gp(vcpu, 0); in handle_vmxon()
5245 kvm_inject_gp(vcpu, 0); in handle_vmxon()
5251 kvm_inject_gp(vcpu, 0); in handle_vmxon()
5301 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
5326 u32 zero = 0; in handle_vmclear()
5387 gva_t gva = 0; in handle_vmread()
5395 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); in handle_vmread()
5408 if (offset < 0) in handle_vmread()
5431 if (offset < 0) in handle_vmread()
5444 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value); in handle_vmread()
5450 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ in handle_vmread()
5503 u64 value = 0; in handle_vmwrite()
5518 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf)); in handle_vmwrite()
5529 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); in handle_vmwrite()
5532 if (offset < 0) in handle_vmwrite()
5559 value &= 0x1f0ff; in handle_vmwrite()
5692 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ in handle_vmptrst()
5756 roots_to_free = 0; in handle_invept()
5761 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { in handle_invept()
5905 return 0; in nested_vmx_eptp_switching()
5938 case 0: in handle_vmfunc()
5973 while (size > 0) { in nested_vmx_check_io_bitmaps()
5974 if (port < 0x8000) in nested_vmx_check_io_bitmaps()
5976 else if (port < 0x10000) in nested_vmx_check_io_bitmaps()
5980 bitmap += (port & 0x7fff) / 8; in nested_vmx_check_io_bitmaps()
6038 if (msr_index >= 0xc0000000) { in nested_vmx_exit_handled_msr()
6039 msr_index -= 0xc0000000; in nested_vmx_exit_handled_msr()
6067 case 0: /* mov to cr */ in nested_vmx_exit_handled_cr()
6071 case 0: in nested_vmx_exit_handled_cr()
6112 * lmsw can change bits 1..3 of cr0, and only set bit 0 of in nested_vmx_exit_handled_cr()
6115 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; in nested_vmx_exit_handled_cr()
6116 if (vmcs12->cr0_guest_host_mask & 0xe & in nested_vmx_exit_handled_cr()
6119 if ((vmcs12->cr0_guest_host_mask & 0x1) && in nested_vmx_exit_handled_cr()
6120 !(vmcs12->cr0_read_shadow & 0x1) && in nested_vmx_exit_handled_cr()
6121 (val & 0x1)) in nested_vmx_exit_handled_cr()
6155 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); in nested_vmx_exit_handled_vmcs_access()
6176 * interruption-type to 7 (other event) and the vector field to 0. Such in nested_vmx_exit_handled_mtf()
6410 exit_intr_info = 0; in nested_vmx_reflect_vmexit()
6411 exit_qual = 0; in nested_vmx_reflect_vmexit()
6452 .flags = 0, in vmx_get_nested_state()
6455 .hdr.vmx.flags = 0, in vmx_get_nested_state()
6458 .hdr.vmx.preemption_timer_deadline = 0, in vmx_get_nested_state()
6461 &user_kvm_nested_state->data.vmx[0]; in vmx_get_nested_state()
6542 copy_enlightened_to_vmcs12(vmx, 0); in vmx_get_nested_state()
6571 to_vmx(vcpu)->nested.nested_run_pending = 0; in vmx_leave_nested()
6572 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_leave_nested()
6585 &user_kvm_nested_state->data.vmx[0]; in vmx_set_nested_state()
6651 return 0; in vmx_set_nested_state()
6666 return 0; in vmx_set_nested_state()
6706 return 0; in vmx_set_nested_state()
6757 return 0; in vmx_set_nested_state()
6760 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()
6792 max_idx = 0; in nested_vmx_calc_vmcs_enum_msr()
6793 for (i = 0; i < nr_vmcs12_fields; i++) { in nested_vmx_calc_vmcs_enum_msr()
6817 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); in nested_vmx_setup_pinbased_ctls()
6906 msrs->secondary_ctls_low = 0; in nested_vmx_setup_secondary_ctls()
6994 msrs->misc_high = 0; in nested_vmx_setup_misc_data()
7056 * be set to 0, meaning that L1 may turn off any of these bits. The in nested_vmx_setup_ctls_msrs()
7087 for (i = 0; i < VMX_BITMAP_NR; i++) in nested_vmx_hardware_unsetup()
7097 enable_shadow_vmcs = 0; in nested_vmx_hardware_setup()
7099 for (i = 0; i < VMX_BITMAP_NR; i++) { in nested_vmx_hardware_setup()
7128 return 0; in nested_vmx_hardware_setup()