xref: /linux/arch/x86/kvm/vmx/vmx_ops.h (revision 0b5e7a16a0a79a3742f0df9e45bca46f01b40e6a)
189b0c9f5SSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */
289b0c9f5SSean Christopherson #ifndef __KVM_X86_VMX_INSN_H
389b0c9f5SSean Christopherson #define __KVM_X86_VMX_INSN_H
489b0c9f5SSean Christopherson 
589b0c9f5SSean Christopherson #include <linux/nospec.h>
689b0c9f5SSean Christopherson 
789b0c9f5SSean Christopherson #include <asm/vmx.h>
889b0c9f5SSean Christopherson 
9a789aebaSVitaly Kuznetsov #include "hyperv.h"
1089b0c9f5SSean Christopherson #include "vmcs.h"
1107853adcSJosh Poimboeuf #include "../x86.h"
1289b0c9f5SSean Christopherson 
1357abfa11SUros Bizjak void vmread_error(unsigned long field, bool fault);
1452a9fcbcSSean Christopherson void vmwrite_error(unsigned long field, unsigned long value);
1552a9fcbcSSean Christopherson void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
1652a9fcbcSSean Christopherson void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
1752a9fcbcSSean Christopherson void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
1852a9fcbcSSean Christopherson void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
1952a9fcbcSSean Christopherson 
20*0b5e7a16SSean Christopherson #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
21*0b5e7a16SSean Christopherson /*
22*0b5e7a16SSean Christopherson  * The VMREAD error trampoline _always_ uses the stack to pass parameters, even
23*0b5e7a16SSean Christopherson  * for 64-bit targets.  Preserving all registers allows the VMREAD inline asm
24*0b5e7a16SSean Christopherson  * blob to avoid clobbering GPRs, which in turn allows the compiler to better
25*0b5e7a16SSean Christopherson  * optimize sequences of VMREADs.
26*0b5e7a16SSean Christopherson  *
27*0b5e7a16SSean Christopherson  * Declare the trampoline as an opaque label as it's not safe to call from C
28*0b5e7a16SSean Christopherson  * code; there is no way to tell the compiler to pass params on the stack for
29*0b5e7a16SSean Christopherson  * 64-bit targets.
30*0b5e7a16SSean Christopherson  *
31*0b5e7a16SSean Christopherson  * void vmread_error_trampoline(unsigned long field, bool fault);
32*0b5e7a16SSean Christopherson  */
33*0b5e7a16SSean Christopherson extern unsigned long vmread_error_trampoline;
34*0b5e7a16SSean Christopherson #endif
35*0b5e7a16SSean Christopherson 
3689b0c9f5SSean Christopherson static __always_inline void vmcs_check16(unsigned long field)
3789b0c9f5SSean Christopherson {
3889b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
3989b0c9f5SSean Christopherson 			 "16-bit accessor invalid for 64-bit field");
4089b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
4189b0c9f5SSean Christopherson 			 "16-bit accessor invalid for 64-bit high field");
4289b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
4389b0c9f5SSean Christopherson 			 "16-bit accessor invalid for 32-bit high field");
4489b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
4589b0c9f5SSean Christopherson 			 "16-bit accessor invalid for natural width field");
4689b0c9f5SSean Christopherson }
4789b0c9f5SSean Christopherson 
4889b0c9f5SSean Christopherson static __always_inline void vmcs_check32(unsigned long field)
4989b0c9f5SSean Christopherson {
5089b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
5189b0c9f5SSean Christopherson 			 "32-bit accessor invalid for 16-bit field");
52870c575aSHaiwei Li 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
53870c575aSHaiwei Li 			 "32-bit accessor invalid for 64-bit field");
54870c575aSHaiwei Li 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
55870c575aSHaiwei Li 			 "32-bit accessor invalid for 64-bit high field");
5689b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
5789b0c9f5SSean Christopherson 			 "32-bit accessor invalid for natural width field");
5889b0c9f5SSean Christopherson }
5989b0c9f5SSean Christopherson 
6089b0c9f5SSean Christopherson static __always_inline void vmcs_check64(unsigned long field)
6189b0c9f5SSean Christopherson {
6289b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
6389b0c9f5SSean Christopherson 			 "64-bit accessor invalid for 16-bit field");
6489b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
6589b0c9f5SSean Christopherson 			 "64-bit accessor invalid for 64-bit high field");
6689b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
6789b0c9f5SSean Christopherson 			 "64-bit accessor invalid for 32-bit field");
6889b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
6989b0c9f5SSean Christopherson 			 "64-bit accessor invalid for natural width field");
7089b0c9f5SSean Christopherson }
7189b0c9f5SSean Christopherson 
7289b0c9f5SSean Christopherson static __always_inline void vmcs_checkl(unsigned long field)
7389b0c9f5SSean Christopherson {
7489b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
7589b0c9f5SSean Christopherson 			 "Natural width accessor invalid for 16-bit field");
7689b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
7789b0c9f5SSean Christopherson 			 "Natural width accessor invalid for 64-bit field");
7889b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
7989b0c9f5SSean Christopherson 			 "Natural width accessor invalid for 64-bit high field");
8089b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
8189b0c9f5SSean Christopherson 			 "Natural width accessor invalid for 32-bit field");
8289b0c9f5SSean Christopherson }
8389b0c9f5SSean Christopherson 
8489b0c9f5SSean Christopherson static __always_inline unsigned long __vmcs_readl(unsigned long field)
8589b0c9f5SSean Christopherson {
8689b0c9f5SSean Christopherson 	unsigned long value;
8789b0c9f5SSean Christopherson 
88907d1393SPeter Zijlstra #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
89907d1393SPeter Zijlstra 
90907d1393SPeter Zijlstra 	asm_volatile_goto("1: vmread %[field], %[output]\n\t"
91907d1393SPeter Zijlstra 			  "jna %l[do_fail]\n\t"
92907d1393SPeter Zijlstra 
93907d1393SPeter Zijlstra 			  _ASM_EXTABLE(1b, %l[do_exception])
94907d1393SPeter Zijlstra 
95907d1393SPeter Zijlstra 			  : [output] "=r" (value)
96907d1393SPeter Zijlstra 			  : [field] "r" (field)
97907d1393SPeter Zijlstra 			  : "cc"
98907d1393SPeter Zijlstra 			  : do_fail, do_exception);
99907d1393SPeter Zijlstra 
100907d1393SPeter Zijlstra 	return value;
101907d1393SPeter Zijlstra 
102907d1393SPeter Zijlstra do_fail:
103907d1393SPeter Zijlstra 	WARN_ONCE(1, "kvm: vmread failed: field=%lx\n", field);
104907d1393SPeter Zijlstra 	pr_warn_ratelimited("kvm: vmread failed: field=%lx\n", field);
105907d1393SPeter Zijlstra 	return 0;
106907d1393SPeter Zijlstra 
107907d1393SPeter Zijlstra do_exception:
108907d1393SPeter Zijlstra 	kvm_spurious_fault();
109907d1393SPeter Zijlstra 	return 0;
110907d1393SPeter Zijlstra 
111907d1393SPeter Zijlstra #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
112907d1393SPeter Zijlstra 
1136e202097SSean Christopherson 	asm volatile("1: vmread %2, %1\n\t"
1146e202097SSean Christopherson 		     ".byte 0x3e\n\t" /* branch taken hint */
1156e202097SSean Christopherson 		     "ja 3f\n\t"
116842f4be9SSean Christopherson 
117842f4be9SSean Christopherson 		     /*
118842f4be9SSean Christopherson 		      * VMREAD failed.  Push '0' for @fault, push the failing
119842f4be9SSean Christopherson 		      * @field, and bounce through the trampoline to preserve
120842f4be9SSean Christopherson 		      * volatile registers.
121842f4be9SSean Christopherson 		      */
1223e8ea780SPeter Zijlstra 		     "xorl %k1, %k1\n\t"
1233e8ea780SPeter Zijlstra 		     "2:\n\t"
1243e8ea780SPeter Zijlstra 		     "push %1\n\t"
125842f4be9SSean Christopherson 		     "push %2\n\t"
1263e8ea780SPeter Zijlstra 		     "call vmread_error_trampoline\n\t"
127842f4be9SSean Christopherson 
128842f4be9SSean Christopherson 		     /*
129842f4be9SSean Christopherson 		      * Unwind the stack.  Note, the trampoline zeros out the
130842f4be9SSean Christopherson 		      * memory for @fault so that the result is '0' on error.
131842f4be9SSean Christopherson 		      */
132842f4be9SSean Christopherson 		     "pop %2\n\t"
133842f4be9SSean Christopherson 		     "pop %1\n\t"
1346e202097SSean Christopherson 		     "3:\n\t"
1356e202097SSean Christopherson 
136842f4be9SSean Christopherson 		     /* VMREAD faulted.  As above, except push '1' for @fault. */
1373e8ea780SPeter Zijlstra 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
1383e8ea780SPeter Zijlstra 
1393e8ea780SPeter Zijlstra 		     : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
14089b0c9f5SSean Christopherson 	return value;
141907d1393SPeter Zijlstra 
142907d1393SPeter Zijlstra #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
14389b0c9f5SSean Christopherson }
14489b0c9f5SSean Christopherson 
14589b0c9f5SSean Christopherson static __always_inline u16 vmcs_read16(unsigned long field)
14689b0c9f5SSean Christopherson {
14789b0c9f5SSean Christopherson 	vmcs_check16(field);
14889b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
14989b0c9f5SSean Christopherson 		return evmcs_read16(field);
15089b0c9f5SSean Christopherson 	return __vmcs_readl(field);
15189b0c9f5SSean Christopherson }
15289b0c9f5SSean Christopherson 
15389b0c9f5SSean Christopherson static __always_inline u32 vmcs_read32(unsigned long field)
15489b0c9f5SSean Christopherson {
15589b0c9f5SSean Christopherson 	vmcs_check32(field);
15689b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
15789b0c9f5SSean Christopherson 		return evmcs_read32(field);
15889b0c9f5SSean Christopherson 	return __vmcs_readl(field);
15989b0c9f5SSean Christopherson }
16089b0c9f5SSean Christopherson 
16189b0c9f5SSean Christopherson static __always_inline u64 vmcs_read64(unsigned long field)
16289b0c9f5SSean Christopherson {
16389b0c9f5SSean Christopherson 	vmcs_check64(field);
16489b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
16589b0c9f5SSean Christopherson 		return evmcs_read64(field);
16689b0c9f5SSean Christopherson #ifdef CONFIG_X86_64
16789b0c9f5SSean Christopherson 	return __vmcs_readl(field);
16889b0c9f5SSean Christopherson #else
16989b0c9f5SSean Christopherson 	return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
17089b0c9f5SSean Christopherson #endif
17189b0c9f5SSean Christopherson }
17289b0c9f5SSean Christopherson 
17389b0c9f5SSean Christopherson static __always_inline unsigned long vmcs_readl(unsigned long field)
17489b0c9f5SSean Christopherson {
17589b0c9f5SSean Christopherson 	vmcs_checkl(field);
17689b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
17789b0c9f5SSean Christopherson 		return evmcs_read64(field);
17889b0c9f5SSean Christopherson 	return __vmcs_readl(field);
17989b0c9f5SSean Christopherson }
18089b0c9f5SSean Christopherson 
18152a9fcbcSSean Christopherson #define vmx_asm1(insn, op1, error_args...)				\
18252a9fcbcSSean Christopherson do {									\
18352a9fcbcSSean Christopherson 	asm_volatile_goto("1: " __stringify(insn) " %0\n\t"		\
18452a9fcbcSSean Christopherson 			  ".byte 0x2e\n\t" /* branch not taken hint */	\
18552a9fcbcSSean Christopherson 			  "jna %l[error]\n\t"				\
18652a9fcbcSSean Christopherson 			  _ASM_EXTABLE(1b, %l[fault])			\
18752a9fcbcSSean Christopherson 			  : : op1 : "cc" : error, fault);		\
18852a9fcbcSSean Christopherson 	return;								\
18952a9fcbcSSean Christopherson error:									\
1903ebccdf3SThomas Gleixner 	instrumentation_begin();					\
19152a9fcbcSSean Christopherson 	insn##_error(error_args);					\
1923ebccdf3SThomas Gleixner 	instrumentation_end();						\
19352a9fcbcSSean Christopherson 	return;								\
19452a9fcbcSSean Christopherson fault:									\
19552a9fcbcSSean Christopherson 	kvm_spurious_fault();						\
19652a9fcbcSSean Christopherson } while (0)
19752a9fcbcSSean Christopherson 
19852a9fcbcSSean Christopherson #define vmx_asm2(insn, op1, op2, error_args...)				\
19952a9fcbcSSean Christopherson do {									\
20052a9fcbcSSean Christopherson 	asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"	\
20152a9fcbcSSean Christopherson 			  ".byte 0x2e\n\t" /* branch not taken hint */	\
20252a9fcbcSSean Christopherson 			  "jna %l[error]\n\t"				\
20352a9fcbcSSean Christopherson 			  _ASM_EXTABLE(1b, %l[fault])			\
20452a9fcbcSSean Christopherson 			  : : op1, op2 : "cc" : error, fault);		\
20552a9fcbcSSean Christopherson 	return;								\
20652a9fcbcSSean Christopherson error:									\
2073ebccdf3SThomas Gleixner 	instrumentation_begin();					\
20852a9fcbcSSean Christopherson 	insn##_error(error_args);					\
2093ebccdf3SThomas Gleixner 	instrumentation_end();						\
21052a9fcbcSSean Christopherson 	return;								\
21152a9fcbcSSean Christopherson fault:									\
21252a9fcbcSSean Christopherson 	kvm_spurious_fault();						\
21352a9fcbcSSean Christopherson } while (0)
21489b0c9f5SSean Christopherson 
21589b0c9f5SSean Christopherson static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
21689b0c9f5SSean Christopherson {
21752a9fcbcSSean Christopherson 	vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
21889b0c9f5SSean Christopherson }
21989b0c9f5SSean Christopherson 
22089b0c9f5SSean Christopherson static __always_inline void vmcs_write16(unsigned long field, u16 value)
22189b0c9f5SSean Christopherson {
22289b0c9f5SSean Christopherson 	vmcs_check16(field);
22389b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
22489b0c9f5SSean Christopherson 		return evmcs_write16(field, value);
22589b0c9f5SSean Christopherson 
22689b0c9f5SSean Christopherson 	__vmcs_writel(field, value);
22789b0c9f5SSean Christopherson }
22889b0c9f5SSean Christopherson 
22989b0c9f5SSean Christopherson static __always_inline void vmcs_write32(unsigned long field, u32 value)
23089b0c9f5SSean Christopherson {
23189b0c9f5SSean Christopherson 	vmcs_check32(field);
23289b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
23389b0c9f5SSean Christopherson 		return evmcs_write32(field, value);
23489b0c9f5SSean Christopherson 
23589b0c9f5SSean Christopherson 	__vmcs_writel(field, value);
23689b0c9f5SSean Christopherson }
23789b0c9f5SSean Christopherson 
23889b0c9f5SSean Christopherson static __always_inline void vmcs_write64(unsigned long field, u64 value)
23989b0c9f5SSean Christopherson {
24089b0c9f5SSean Christopherson 	vmcs_check64(field);
24189b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
24289b0c9f5SSean Christopherson 		return evmcs_write64(field, value);
24389b0c9f5SSean Christopherson 
24489b0c9f5SSean Christopherson 	__vmcs_writel(field, value);
24589b0c9f5SSean Christopherson #ifndef CONFIG_X86_64
24689b0c9f5SSean Christopherson 	__vmcs_writel(field+1, value >> 32);
24789b0c9f5SSean Christopherson #endif
24889b0c9f5SSean Christopherson }
24989b0c9f5SSean Christopherson 
25089b0c9f5SSean Christopherson static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
25189b0c9f5SSean Christopherson {
25289b0c9f5SSean Christopherson 	vmcs_checkl(field);
25389b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
25489b0c9f5SSean Christopherson 		return evmcs_write64(field, value);
25589b0c9f5SSean Christopherson 
25689b0c9f5SSean Christopherson 	__vmcs_writel(field, value);
25789b0c9f5SSean Christopherson }
25889b0c9f5SSean Christopherson 
25989b0c9f5SSean Christopherson static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
26089b0c9f5SSean Christopherson {
26189b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
26289b0c9f5SSean Christopherson 			 "vmcs_clear_bits does not support 64-bit fields");
26389b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
26489b0c9f5SSean Christopherson 		return evmcs_write32(field, evmcs_read32(field) & ~mask);
26589b0c9f5SSean Christopherson 
26689b0c9f5SSean Christopherson 	__vmcs_writel(field, __vmcs_readl(field) & ~mask);
26789b0c9f5SSean Christopherson }
26889b0c9f5SSean Christopherson 
26989b0c9f5SSean Christopherson static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
27089b0c9f5SSean Christopherson {
27189b0c9f5SSean Christopherson 	BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
27289b0c9f5SSean Christopherson 			 "vmcs_set_bits does not support 64-bit fields");
27389b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
27489b0c9f5SSean Christopherson 		return evmcs_write32(field, evmcs_read32(field) | mask);
27589b0c9f5SSean Christopherson 
27689b0c9f5SSean Christopherson 	__vmcs_writel(field, __vmcs_readl(field) | mask);
27789b0c9f5SSean Christopherson }
27889b0c9f5SSean Christopherson 
27989b0c9f5SSean Christopherson static inline void vmcs_clear(struct vmcs *vmcs)
28089b0c9f5SSean Christopherson {
28189b0c9f5SSean Christopherson 	u64 phys_addr = __pa(vmcs);
28289b0c9f5SSean Christopherson 
28352a9fcbcSSean Christopherson 	vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
28489b0c9f5SSean Christopherson }
28589b0c9f5SSean Christopherson 
28689b0c9f5SSean Christopherson static inline void vmcs_load(struct vmcs *vmcs)
28789b0c9f5SSean Christopherson {
28889b0c9f5SSean Christopherson 	u64 phys_addr = __pa(vmcs);
28989b0c9f5SSean Christopherson 
29089b0c9f5SSean Christopherson 	if (static_branch_unlikely(&enable_evmcs))
29189b0c9f5SSean Christopherson 		return evmcs_load(phys_addr);
29289b0c9f5SSean Christopherson 
29352a9fcbcSSean Christopherson 	vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
29489b0c9f5SSean Christopherson }
29589b0c9f5SSean Christopherson 
29689b0c9f5SSean Christopherson static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
29789b0c9f5SSean Christopherson {
29889b0c9f5SSean Christopherson 	struct {
29989b0c9f5SSean Christopherson 		u64 vpid : 16;
30089b0c9f5SSean Christopherson 		u64 rsvd : 48;
30189b0c9f5SSean Christopherson 		u64 gva;
30289b0c9f5SSean Christopherson 	} operand = { vpid, 0, gva };
30389b0c9f5SSean Christopherson 
30452a9fcbcSSean Christopherson 	vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
30589b0c9f5SSean Christopherson }
30689b0c9f5SSean Christopherson 
30789b0c9f5SSean Christopherson static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
30889b0c9f5SSean Christopherson {
30989b0c9f5SSean Christopherson 	struct {
31089b0c9f5SSean Christopherson 		u64 eptp, gpa;
31189b0c9f5SSean Christopherson 	} operand = {eptp, gpa};
31289b0c9f5SSean Christopherson 
31352a9fcbcSSean Christopherson 	vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
31489b0c9f5SSean Christopherson }
31589b0c9f5SSean Christopherson 
31689b0c9f5SSean Christopherson static inline void vpid_sync_vcpu_single(int vpid)
31789b0c9f5SSean Christopherson {
31889b0c9f5SSean Christopherson 	if (vpid == 0)
31989b0c9f5SSean Christopherson 		return;
32089b0c9f5SSean Christopherson 
32189b0c9f5SSean Christopherson 	__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
32289b0c9f5SSean Christopherson }
32389b0c9f5SSean Christopherson 
32489b0c9f5SSean Christopherson static inline void vpid_sync_vcpu_global(void)
32589b0c9f5SSean Christopherson {
32689b0c9f5SSean Christopherson 	__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
32789b0c9f5SSean Christopherson }
32889b0c9f5SSean Christopherson 
32989b0c9f5SSean Christopherson static inline void vpid_sync_context(int vpid)
33089b0c9f5SSean Christopherson {
33189b0c9f5SSean Christopherson 	if (cpu_has_vmx_invvpid_single())
33289b0c9f5SSean Christopherson 		vpid_sync_vcpu_single(vpid);
333c746b3a4SSean Christopherson 	else if (vpid != 0)
33489b0c9f5SSean Christopherson 		vpid_sync_vcpu_global();
33589b0c9f5SSean Christopherson }
33689b0c9f5SSean Christopherson 
337ab4b3597SSean Christopherson static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
3388a8b097cSSean Christopherson {
3398a8b097cSSean Christopherson 	if (vpid == 0)
340ab4b3597SSean Christopherson 		return;
3418a8b097cSSean Christopherson 
342ab4b3597SSean Christopherson 	if (cpu_has_vmx_invvpid_individual_addr())
3438a8b097cSSean Christopherson 		__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
344ab4b3597SSean Christopherson 	else
345ab4b3597SSean Christopherson 		vpid_sync_context(vpid);
3468a8b097cSSean Christopherson }
3478a8b097cSSean Christopherson 
34889b0c9f5SSean Christopherson static inline void ept_sync_global(void)
34989b0c9f5SSean Christopherson {
35089b0c9f5SSean Christopherson 	__invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
35189b0c9f5SSean Christopherson }
35289b0c9f5SSean Christopherson 
35389b0c9f5SSean Christopherson static inline void ept_sync_context(u64 eptp)
35489b0c9f5SSean Christopherson {
35589b0c9f5SSean Christopherson 	if (cpu_has_vmx_invept_context())
35689b0c9f5SSean Christopherson 		__invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
35789b0c9f5SSean Christopherson 	else
35889b0c9f5SSean Christopherson 		ept_sync_global();
35989b0c9f5SSean Christopherson }
36089b0c9f5SSean Christopherson 
36189b0c9f5SSean Christopherson #endif /* __KVM_X86_VMX_INSN_H */
362