1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
4
5 #include <linux/kvm_host.h>
6
7 #include <asm/kvm.h>
8 #include <asm/intel_pt.h>
9 #include <asm/perf_event.h>
10 #include <asm/posted_intr.h>
11
12 #include "capabilities.h"
13 #include "../kvm_cache_regs.h"
14 #include "pmu_intel.h"
15 #include "vmcs.h"
16 #include "vmx_ops.h"
17 #include "../cpuid.h"
18 #include "run_flags.h"
19 #include "../mmu.h"
20 #include "common.h"
21
22 #ifdef CONFIG_X86_64
23 #define MAX_NR_USER_RETURN_MSRS 7
24 #else
25 #define MAX_NR_USER_RETURN_MSRS 4
26 #endif
27
28 #define MAX_NR_LOADSTORE_MSRS 8
29
30 struct vmx_msrs {
31 unsigned int nr;
32 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
33 };
34
35 struct vmx_uret_msr {
36 bool load_into_hardware;
37 u64 data;
38 u64 mask;
39 };
40
41 enum segment_cache_field {
42 SEG_FIELD_SEL = 0,
43 SEG_FIELD_BASE = 1,
44 SEG_FIELD_LIMIT = 2,
45 SEG_FIELD_AR = 3,
46
47 SEG_FIELD_NR = 4
48 };
49
50 #define RTIT_ADDR_RANGE 4
51
52 struct pt_ctx {
53 u64 ctl;
54 u64 status;
55 u64 output_base;
56 u64 output_mask;
57 u64 cr3_match;
58 u64 addr_a[RTIT_ADDR_RANGE];
59 u64 addr_b[RTIT_ADDR_RANGE];
60 };
61
62 struct pt_desc {
63 u64 ctl_bitmask;
64 u32 num_address_ranges;
65 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
66 struct pt_ctx host;
67 struct pt_ctx guest;
68 };
69
70 /*
71 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
72 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
73 */
74 struct nested_vmx {
75 /* Has the level1 guest done vmxon? */
76 bool vmxon;
77 gpa_t vmxon_ptr;
78 bool pml_full;
79
80 /* The guest-physical address of the current VMCS L1 keeps for L2 */
81 gpa_t current_vmptr;
82 /*
83 * Cache of the guest's VMCS, existing outside of guest memory.
84 * Loaded from guest memory during VMPTRLD. Flushed to guest
85 * memory during VMCLEAR and VMPTRLD.
86 */
87 struct vmcs12 *cached_vmcs12;
88 /*
89 * Cache of the guest's shadow VMCS, existing outside of guest
90 * memory. Loaded from guest memory during VM entry. Flushed
91 * to guest memory during VM exit.
92 */
93 struct vmcs12 *cached_shadow_vmcs12;
94
95 /*
96 * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
97 */
98 struct gfn_to_hva_cache shadow_vmcs12_cache;
99
100 /*
101 * GPA to HVA cache for VMCS12
102 */
103 struct gfn_to_hva_cache vmcs12_cache;
104
105 /*
106 * Indicates if the shadow vmcs or enlightened vmcs must be updated
107 * with the data held by struct vmcs12.
108 */
109 bool need_vmcs12_to_shadow_sync;
110 bool dirty_vmcs12;
111
112 /*
113 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
114 * changes in MSR bitmap for L1 or switching to a different L2. Note,
115 * this flag can only be used reliably in conjunction with a paravirt L1
116 * which informs L0 whether any changes to MSR bitmap for L2 were done
117 * on its side.
118 */
119 bool force_msr_bitmap_recalc;
120
121 /*
122 * Indicates lazily loaded guest state has not yet been decached from
123 * vmcs02.
124 */
125 bool need_sync_vmcs02_to_vmcs12_rare;
126
127 /*
128 * vmcs02 has been initialized, i.e. state that is constant for
129 * vmcs02 has been written to the backing VMCS. Initialization
130 * is delayed until L1 actually attempts to run a nested VM.
131 */
132 bool vmcs02_initialized;
133
134 /*
135 * Enlightened VMCS has been enabled. It does not mean that L1 has to
136 * use it. However, VMX features available to L1 will be limited based
137 * on what the enlightened VMCS supports.
138 */
139 bool enlightened_vmcs_enabled;
140
141 /* L2 must run next, and mustn't decide to exit to L1. */
142 bool nested_run_pending;
143
144 /* Pending MTF VM-exit into L1. */
145 bool mtf_pending;
146
147 struct loaded_vmcs vmcs02;
148
149 /*
150 * Guest pages referred to in the vmcs02 with host-physical
151 * pointers, so we must keep them pinned while L2 runs.
152 */
153 struct kvm_host_map apic_access_page_map;
154 struct kvm_host_map virtual_apic_map;
155 struct kvm_host_map pi_desc_map;
156
157 struct pi_desc *pi_desc;
158 bool pi_pending;
159 u16 posted_intr_nv;
160
161 struct hrtimer preemption_timer;
162 u64 preemption_timer_deadline;
163 bool has_preemption_timer_deadline;
164 bool preemption_timer_expired;
165
166 /*
167 * Used to snapshot MSRs that are conditionally loaded on VM-Enter in
168 * order to propagate the guest's pre-VM-Enter value into vmcs02. For
169 * emulation of VMLAUNCH/VMRESUME, the snapshot will be of L1's value.
170 * For KVM_SET_NESTED_STATE, the snapshot is of L2's value, _if_
171 * userspace restores MSRs before nested state. If userspace restores
172 * MSRs after nested state, the snapshot holds garbage, but KVM can't
173 * detect that, and the garbage value in vmcs02 will be overwritten by
174 * MSR restoration in any case.
175 */
176 u64 pre_vmenter_debugctl;
177 u64 pre_vmenter_bndcfgs;
178 u64 pre_vmenter_s_cet;
179 u64 pre_vmenter_ssp;
180 u64 pre_vmenter_ssp_tbl;
181
182 u16 vpid02;
183 u16 last_vpid;
184
185 int tsc_autostore_slot;
186 struct nested_vmx_msrs msrs;
187
188 /* SMM related state */
189 struct {
190 /* in VMX operation on SMM entry? */
191 bool vmxon;
192 /* in guest mode on SMM entry? */
193 bool guest_mode;
194 } smm;
195
196 #ifdef CONFIG_KVM_HYPERV
197 gpa_t hv_evmcs_vmptr;
198 struct kvm_host_map hv_evmcs_map;
199 struct hv_enlightened_vmcs *hv_evmcs;
200 #endif
201 };
202
203 struct vcpu_vmx {
204 struct kvm_vcpu vcpu;
205 struct vcpu_vt vt;
206 u8 fail;
207 u8 x2apic_msr_bitmap_mode;
208
209 u32 idt_vectoring_info;
210 ulong rflags;
211
212 /*
213 * User return MSRs are always emulated when enabled in the guest, but
214 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
215 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
216 * be loaded into hardware if those conditions aren't met.
217 */
218 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
219 bool guest_uret_msrs_loaded;
220 #ifdef CONFIG_X86_64
221 u64 msr_guest_kernel_gs_base;
222 #endif
223
224 u64 spec_ctrl;
225 u32 msr_ia32_umwait_control;
226
227 /*
228 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
229 * non-nested (L1) guest, it always points to vmcs01. For a nested
230 * guest (L2), it points to a different VMCS.
231 */
232 struct loaded_vmcs vmcs01;
233 struct loaded_vmcs *loaded_vmcs;
234
235 struct msr_autoload {
236 struct vmx_msrs guest;
237 struct vmx_msrs host;
238 } msr_autoload;
239
240 struct vmx_msrs msr_autostore;
241
242 struct {
243 int vm86_active;
244 ulong save_rflags;
245 struct kvm_segment segs[8];
246 } rmode;
247 struct {
248 u32 bitmask; /* 4 bits per segment (1 bit per field) */
249 struct kvm_save_segment {
250 u16 selector;
251 unsigned long base;
252 u32 limit;
253 u32 ar;
254 } seg[8];
255 } segment_cache;
256 int vpid;
257
258 /* Support for a guest hypervisor (nested VMX) */
259 struct nested_vmx nested;
260
261 /* Dynamic PLE window. */
262 unsigned int ple_window;
263 bool ple_window_dirty;
264
265 /* Support for PML */
266 #define PML_LOG_NR_ENTRIES 512
267 /* PML is written backwards: this is the first entry written by the CPU */
268 #define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1)
269
270 struct page *pml_pg;
271
272 /* apic deadline value in host tsc */
273 u64 hv_deadline_tsc;
274
275 /*
276 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
277 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
278 * in msr_ia32_feature_control_valid_bits.
279 */
280 u64 msr_ia32_feature_control;
281 u64 msr_ia32_feature_control_valid_bits;
282 /* SGX Launch Control public key hash */
283 u64 msr_ia32_sgxlepubkeyhash[4];
284 u64 msr_ia32_mcu_opt_ctrl;
285 bool disable_fb_clear;
286
287 struct pt_desc pt_desc;
288 struct lbr_desc lbr_desc;
289
290 /* ve_info must be page aligned. */
291 struct vmx_ve_information *ve_info;
292 };
293
294 struct kvm_vmx {
295 struct kvm kvm;
296
297 unsigned int tss_addr;
298 bool ept_identity_pagetable_done;
299 gpa_t ept_identity_map_addr;
300 /* Posted Interrupt Descriptor (PID) table for IPI virtualization */
301 u64 *pid_table;
302 };
303
to_vt(struct kvm_vcpu * vcpu)304 static __always_inline struct vcpu_vt *to_vt(struct kvm_vcpu *vcpu)
305 {
306 return &(container_of(vcpu, struct vcpu_vmx, vcpu)->vt);
307 }
308
vt_to_vcpu(struct vcpu_vt * vt)309 static __always_inline struct kvm_vcpu *vt_to_vcpu(struct vcpu_vt *vt)
310 {
311 return &(container_of(vt, struct vcpu_vmx, vt)->vcpu);
312 }
313
vmx_get_exit_reason(struct kvm_vcpu * vcpu)314 static __always_inline union vmx_exit_reason vmx_get_exit_reason(struct kvm_vcpu *vcpu)
315 {
316 return to_vt(vcpu)->exit_reason;
317 }
318
vmx_get_exit_qual(struct kvm_vcpu * vcpu)319 static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
320 {
321 struct vcpu_vt *vt = to_vt(vcpu);
322
323 if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1) &&
324 !WARN_ON_ONCE(is_td_vcpu(vcpu)))
325 vt->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
326
327 return vt->exit_qualification;
328 }
329
vmx_get_intr_info(struct kvm_vcpu * vcpu)330 static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
331 {
332 struct vcpu_vt *vt = to_vt(vcpu);
333
334 if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2) &&
335 !WARN_ON_ONCE(is_td_vcpu(vcpu)))
336 vt->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
337
338 return vt->exit_intr_info;
339 }
340
341 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
342 int allocate_vpid(void);
343 void free_vpid(int vpid);
344 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
345 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
346 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
347 unsigned long fs_base, unsigned long gs_base);
348 int vmx_get_cpl(struct kvm_vcpu *vcpu);
349 int vmx_get_cpl_no_cache(struct kvm_vcpu *vcpu);
350 bool vmx_emulation_required(struct kvm_vcpu *vcpu);
351 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
352 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
353 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
354 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
355 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
356 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
357 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
358 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
359 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
360 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
361 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
362
363 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
364 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
365 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
366 bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
367 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
368 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
369 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
370 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
371 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
372 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
373 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
374 void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
375 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
376 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
377 unsigned int flags);
378 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
379
380 void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
381
vmx_disable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)382 static inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
383 u32 msr, int type)
384 {
385 vmx_set_intercept_for_msr(vcpu, msr, type, false);
386 }
387
vmx_enable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)388 static inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
389 u32 msr, int type)
390 {
391 vmx_set_intercept_for_msr(vcpu, msr, type, true);
392 }
393
394 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
395 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
396
397 gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
398
399 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
400
401 u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated);
402 bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated);
403
404 #define VMX_HOST_OWNED_DEBUGCTL_BITS (DEBUGCTLMSR_FREEZE_IN_SMM)
405
vmx_guest_debugctl_write(struct kvm_vcpu * vcpu,u64 val)406 static inline void vmx_guest_debugctl_write(struct kvm_vcpu *vcpu, u64 val)
407 {
408 WARN_ON_ONCE(val & VMX_HOST_OWNED_DEBUGCTL_BITS);
409
410 val |= vcpu->arch.host_debugctl & VMX_HOST_OWNED_DEBUGCTL_BITS;
411 vmcs_write64(GUEST_IA32_DEBUGCTL, val);
412 }
413
vmx_guest_debugctl_read(void)414 static inline u64 vmx_guest_debugctl_read(void)
415 {
416 return vmcs_read64(GUEST_IA32_DEBUGCTL) & ~VMX_HOST_OWNED_DEBUGCTL_BITS;
417 }
418
vmx_reload_guest_debugctl(struct kvm_vcpu * vcpu)419 static inline void vmx_reload_guest_debugctl(struct kvm_vcpu *vcpu)
420 {
421 u64 val = vmcs_read64(GUEST_IA32_DEBUGCTL);
422
423 if (!((val ^ vcpu->arch.host_debugctl) & VMX_HOST_OWNED_DEBUGCTL_BITS))
424 return;
425
426 vmx_guest_debugctl_write(vcpu, val & ~VMX_HOST_OWNED_DEBUGCTL_BITS);
427 }
428
429 /*
430 * Note, early Intel manuals have the write-low and read-high bitmap offsets
431 * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and
432 * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and
433 * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and
434 * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always
435 * VM-Exit.
436 */
437 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
438 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
439 u32 msr) \
440 { \
441 int f = sizeof(unsigned long); \
442 \
443 if (msr <= 0x1fff) \
444 return bitop##_bit(msr, bitmap + base / f); \
445 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
446 return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
447 return (rtype)true; \
448 }
449 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
450 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
451 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
452
BUILD_VMX_MSR_BITMAP_HELPERS(bool,test,test)453 BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
454 BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
455 BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
456
457 static inline u8 vmx_get_rvi(void)
458 {
459 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
460 }
461
462 #define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
463 (VM_ENTRY_LOAD_DEBUG_CONTROLS)
464 #ifdef CONFIG_X86_64
465 #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
466 (__KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS | \
467 VM_ENTRY_IA32E_MODE)
468 #else
469 #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
470 __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
471 #endif
472 #define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS \
473 (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
474 VM_ENTRY_LOAD_IA32_PAT | \
475 VM_ENTRY_LOAD_IA32_EFER | \
476 VM_ENTRY_LOAD_BNDCFGS | \
477 VM_ENTRY_PT_CONCEAL_PIP | \
478 VM_ENTRY_LOAD_IA32_RTIT_CTL | \
479 VM_ENTRY_LOAD_CET_STATE)
480
481 #define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
482 (VM_EXIT_SAVE_DEBUG_CONTROLS | \
483 VM_EXIT_ACK_INTR_ON_EXIT)
484 #ifdef CONFIG_X86_64
485 #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
486 (__KVM_REQUIRED_VMX_VM_EXIT_CONTROLS | \
487 VM_EXIT_HOST_ADDR_SPACE_SIZE)
488 #else
489 #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
490 __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
491 #endif
492 #define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS \
493 (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
494 VM_EXIT_SAVE_IA32_PAT | \
495 VM_EXIT_LOAD_IA32_PAT | \
496 VM_EXIT_SAVE_IA32_EFER | \
497 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | \
498 VM_EXIT_LOAD_IA32_EFER | \
499 VM_EXIT_CLEAR_BNDCFGS | \
500 VM_EXIT_PT_CONCEAL_PIP | \
501 VM_EXIT_CLEAR_IA32_RTIT_CTL | \
502 VM_EXIT_LOAD_CET_STATE | \
503 VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL)
504
505 #define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL \
506 (PIN_BASED_EXT_INTR_MASK | \
507 PIN_BASED_NMI_EXITING)
508 #define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL \
509 (PIN_BASED_VIRTUAL_NMIS | \
510 PIN_BASED_POSTED_INTR | \
511 PIN_BASED_VMX_PREEMPTION_TIMER)
512
513 #define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
514 (CPU_BASED_HLT_EXITING | \
515 CPU_BASED_CR3_LOAD_EXITING | \
516 CPU_BASED_CR3_STORE_EXITING | \
517 CPU_BASED_UNCOND_IO_EXITING | \
518 CPU_BASED_MOV_DR_EXITING | \
519 CPU_BASED_USE_TSC_OFFSETTING | \
520 CPU_BASED_MWAIT_EXITING | \
521 CPU_BASED_MONITOR_EXITING | \
522 CPU_BASED_INVLPG_EXITING | \
523 CPU_BASED_RDPMC_EXITING | \
524 CPU_BASED_INTR_WINDOW_EXITING)
525
526 #ifdef CONFIG_X86_64
527 #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
528 (__KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL | \
529 CPU_BASED_CR8_LOAD_EXITING | \
530 CPU_BASED_CR8_STORE_EXITING)
531 #else
532 #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
533 __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
534 #endif
535
536 #define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL \
537 (CPU_BASED_RDTSC_EXITING | \
538 CPU_BASED_TPR_SHADOW | \
539 CPU_BASED_USE_IO_BITMAPS | \
540 CPU_BASED_MONITOR_TRAP_FLAG | \
541 CPU_BASED_USE_MSR_BITMAPS | \
542 CPU_BASED_NMI_WINDOW_EXITING | \
543 CPU_BASED_PAUSE_EXITING | \
544 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | \
545 CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
546
547 #define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL 0
548 #define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL \
549 (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
550 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
551 SECONDARY_EXEC_WBINVD_EXITING | \
552 SECONDARY_EXEC_ENABLE_VPID | \
553 SECONDARY_EXEC_ENABLE_EPT | \
554 SECONDARY_EXEC_UNRESTRICTED_GUEST | \
555 SECONDARY_EXEC_PAUSE_LOOP_EXITING | \
556 SECONDARY_EXEC_DESC | \
557 SECONDARY_EXEC_ENABLE_RDTSCP | \
558 SECONDARY_EXEC_ENABLE_INVPCID | \
559 SECONDARY_EXEC_APIC_REGISTER_VIRT | \
560 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
561 SECONDARY_EXEC_SHADOW_VMCS | \
562 SECONDARY_EXEC_ENABLE_XSAVES | \
563 SECONDARY_EXEC_RDSEED_EXITING | \
564 SECONDARY_EXEC_RDRAND_EXITING | \
565 SECONDARY_EXEC_ENABLE_PML | \
566 SECONDARY_EXEC_TSC_SCALING | \
567 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
568 SECONDARY_EXEC_PT_USE_GPA | \
569 SECONDARY_EXEC_PT_CONCEAL_VMX | \
570 SECONDARY_EXEC_ENABLE_VMFUNC | \
571 SECONDARY_EXEC_BUS_LOCK_DETECTION | \
572 SECONDARY_EXEC_NOTIFY_VM_EXITING | \
573 SECONDARY_EXEC_ENCLS_EXITING | \
574 SECONDARY_EXEC_EPT_VIOLATION_VE)
575
576 #define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
577 #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \
578 (TERTIARY_EXEC_IPI_VIRT)
579
580 #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
581 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
582 { \
583 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
584 vmcs_write##bits(uname, val); \
585 vmx->loaded_vmcs->controls_shadow.lname = val; \
586 } \
587 } \
588 static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
589 { \
590 return vmcs->controls_shadow.lname; \
591 } \
592 static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
593 { \
594 return __##lname##_controls_get(vmx->loaded_vmcs); \
595 } \
596 static __always_inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
597 { \
598 BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
599 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
600 } \
601 static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
602 { \
603 BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
604 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
605 } \
606 static __always_inline void lname##_controls_changebit(struct vcpu_vmx *vmx, u##bits val, \
607 bool set) \
608 { \
609 if (set) \
610 lname##_controls_setbit(vmx, val); \
611 else \
612 lname##_controls_clearbit(vmx, val); \
613 }
614 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
615 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
616 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32)
617 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32)
618 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32)
619 BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
620
621 /*
622 * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
623 * cache on demand. Other registers not listed here are synced to
624 * the cache immediately after VM-Exit.
625 */
626 #define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
627 (1 << VCPU_REGS_RSP) | \
628 (1 << VCPU_EXREG_RFLAGS) | \
629 (1 << VCPU_EXREG_PDPTR) | \
630 (1 << VCPU_EXREG_SEGMENTS) | \
631 (1 << VCPU_EXREG_CR0) | \
632 (1 << VCPU_EXREG_CR3) | \
633 (1 << VCPU_EXREG_CR4) | \
634 (1 << VCPU_EXREG_EXIT_INFO_1) | \
635 (1 << VCPU_EXREG_EXIT_INFO_2))
636
vmx_l1_guest_owned_cr0_bits(void)637 static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
638 {
639 unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
640
641 /*
642 * CR0.WP needs to be intercepted when KVM is shadowing legacy paging
643 * in order to construct shadow PTEs with the correct protections.
644 * Note! CR0.WP technically can be passed through to the guest if
645 * paging is disabled, but checking CR0.PG would generate a cyclical
646 * dependency of sorts due to forcing the caller to ensure CR0 holds
647 * the correct value prior to determining which CR0 bits can be owned
648 * by L1. Keep it simple and limit the optimization to EPT.
649 */
650 if (!enable_ept)
651 bits &= ~X86_CR0_WP;
652 return bits;
653 }
654
to_kvm_vmx(struct kvm * kvm)655 static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
656 {
657 return container_of(kvm, struct kvm_vmx, kvm);
658 }
659
to_vmx(struct kvm_vcpu * vcpu)660 static __always_inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
661 {
662 return container_of(vcpu, struct vcpu_vmx, vcpu);
663 }
664
665 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
666 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
667 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
668
669 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
670 void free_vmcs(struct vmcs *vmcs);
671 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
672 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
673
alloc_vmcs(bool shadow)674 static inline struct vmcs *alloc_vmcs(bool shadow)
675 {
676 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
677 GFP_KERNEL_ACCOUNT);
678 }
679
vmx_has_waitpkg(struct vcpu_vmx * vmx)680 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
681 {
682 return secondary_exec_controls_get(vmx) &
683 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
684 }
685
vmx_need_pf_intercept(struct kvm_vcpu * vcpu)686 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
687 {
688 if (!enable_ept)
689 return true;
690
691 return allow_smaller_maxphyaddr &&
692 cpuid_maxphyaddr(vcpu) < kvm_host.maxphyaddr;
693 }
694
is_unrestricted_guest(struct kvm_vcpu * vcpu)695 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
696 {
697 return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
698 (secondary_exec_controls_get(to_vmx(vcpu)) &
699 SECONDARY_EXEC_UNRESTRICTED_GUEST));
700 }
701
702 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
vmx_guest_state_valid(struct kvm_vcpu * vcpu)703 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
704 {
705 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
706 }
707
708 void dump_vmcs(struct kvm_vcpu *vcpu);
709
vmx_get_instr_info_reg(u32 vmx_instr_info)710 static inline int vmx_get_instr_info_reg(u32 vmx_instr_info)
711 {
712 return (vmx_instr_info >> 3) & 0xf;
713 }
714
vmx_get_instr_info_reg2(u32 vmx_instr_info)715 static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
716 {
717 return (vmx_instr_info >> 28) & 0xf;
718 }
719
vmx_can_use_ipiv(struct kvm_vcpu * vcpu)720 static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
721 {
722 return lapic_in_kernel(vcpu) && enable_ipiv;
723 }
724
vmx_segment_cache_clear(struct vcpu_vmx * vmx)725 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
726 {
727 vmx->segment_cache.bitmask = 0;
728 }
729
730 int vmx_init(void);
731 void vmx_exit(void);
732
733 #endif /* __KVM_X86_VMX_H */
734