1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_INSN_H
3 #define __KVM_X86_VMX_INSN_H
4
5 #include <linux/nospec.h>
6
7 #include <asm/vmx.h>
8
9 #include "vmx_onhyperv.h"
10 #include "vmcs.h"
11 #include "../x86.h"
12
13 void vmread_error(unsigned long field);
14 void vmwrite_error(unsigned long field, unsigned long value);
15 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
16 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
17 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
18 void invept_error(unsigned long ext, u64 eptp);
19
20 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
21 /*
22 * The VMREAD error trampoline _always_ uses the stack to pass parameters, even
23 * for 64-bit targets. Preserving all registers allows the VMREAD inline asm
24 * blob to avoid clobbering GPRs, which in turn allows the compiler to better
25 * optimize sequences of VMREADs.
26 *
27 * Declare the trampoline as an opaque label as it's not safe to call from C
28 * code; there is no way to tell the compiler to pass params on the stack for
29 * 64-bit targets.
30 *
31 * void vmread_error_trampoline(unsigned long field, bool fault);
32 */
33 extern unsigned long vmread_error_trampoline;
34
35 /*
36 * The second VMREAD error trampoline, called from the assembly trampoline,
37 * exists primarily to enable instrumentation for the VM-Fail path.
38 */
39 void vmread_error_trampoline2(unsigned long field, bool fault);
40
41 #endif
42
vmcs_check16(unsigned long field)43 static __always_inline void vmcs_check16(unsigned long field)
44 {
45 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
46 "16-bit accessor invalid for 64-bit field");
47 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
48 "16-bit accessor invalid for 64-bit high field");
49 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
50 "16-bit accessor invalid for 32-bit field");
51 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
52 "16-bit accessor invalid for natural width field");
53 }
54
vmcs_check32(unsigned long field)55 static __always_inline void vmcs_check32(unsigned long field)
56 {
57 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
58 "32-bit accessor invalid for 16-bit field");
59 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
60 "32-bit accessor invalid for 64-bit field");
61 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
62 "32-bit accessor invalid for 64-bit high field");
63 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
64 "32-bit accessor invalid for natural width field");
65 }
66
vmcs_check64(unsigned long field)67 static __always_inline void vmcs_check64(unsigned long field)
68 {
69 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
70 "64-bit accessor invalid for 16-bit field");
71 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
72 "64-bit accessor invalid for 64-bit high field");
73 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
74 "64-bit accessor invalid for 32-bit field");
75 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
76 "64-bit accessor invalid for natural width field");
77 }
78
vmcs_checkl(unsigned long field)79 static __always_inline void vmcs_checkl(unsigned long field)
80 {
81 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
82 "Natural width accessor invalid for 16-bit field");
83 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
84 "Natural width accessor invalid for 64-bit field");
85 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
86 "Natural width accessor invalid for 64-bit high field");
87 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
88 "Natural width accessor invalid for 32-bit field");
89 }
90
__vmcs_readl(unsigned long field)91 static __always_inline unsigned long __vmcs_readl(unsigned long field)
92 {
93 unsigned long value;
94
95 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
96
97 asm_goto_output("1: vmread %[field], %[output]\n\t"
98 "jna %l[do_fail]\n\t"
99
100 _ASM_EXTABLE(1b, %l[do_exception])
101
102 : [output] "=r" (value)
103 : [field] "r" (field)
104 : "cc"
105 : do_fail, do_exception);
106
107 return value;
108
109 do_fail:
110 instrumentation_begin();
111 vmread_error(field);
112 instrumentation_end();
113 return 0;
114
115 do_exception:
116 kvm_spurious_fault();
117 return 0;
118
119 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
120
121 asm volatile("1: vmread %[field], %[output]\n\t"
122 ".byte 0x3e\n\t" /* branch taken hint */
123 "ja 3f\n\t"
124
125 /*
126 * VMREAD failed. Push '0' for @fault, push the failing
127 * @field, and bounce through the trampoline to preserve
128 * volatile registers.
129 */
130 "xorl %k[output], %k[output]\n\t"
131 "2:\n\t"
132 "push %[output]\n\t"
133 "push %[field]\n\t"
134 "call vmread_error_trampoline\n\t"
135
136 /*
137 * Unwind the stack. Note, the trampoline zeros out the
138 * memory for @fault so that the result is '0' on error.
139 */
140 "pop %[field]\n\t"
141 "pop %[output]\n\t"
142 "3:\n\t"
143
144 /* VMREAD faulted. As above, except push '1' for @fault. */
145 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[output])
146
147 : ASM_CALL_CONSTRAINT, [output] "=&r" (value)
148 : [field] "r" (field)
149 : "cc");
150 return value;
151
152 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
153 }
154
vmcs_read16(unsigned long field)155 static __always_inline u16 vmcs_read16(unsigned long field)
156 {
157 vmcs_check16(field);
158 if (kvm_is_using_evmcs())
159 return evmcs_read16(field);
160 return __vmcs_readl(field);
161 }
162
vmcs_read32(unsigned long field)163 static __always_inline u32 vmcs_read32(unsigned long field)
164 {
165 vmcs_check32(field);
166 if (kvm_is_using_evmcs())
167 return evmcs_read32(field);
168 return __vmcs_readl(field);
169 }
170
vmcs_read64(unsigned long field)171 static __always_inline u64 vmcs_read64(unsigned long field)
172 {
173 vmcs_check64(field);
174 if (kvm_is_using_evmcs())
175 return evmcs_read64(field);
176 #ifdef CONFIG_X86_64
177 return __vmcs_readl(field);
178 #else
179 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
180 #endif
181 }
182
vmcs_readl(unsigned long field)183 static __always_inline unsigned long vmcs_readl(unsigned long field)
184 {
185 vmcs_checkl(field);
186 if (kvm_is_using_evmcs())
187 return evmcs_read64(field);
188 return __vmcs_readl(field);
189 }
190
191 #define vmx_asm1(insn, op1, error_args...) \
192 do { \
193 asm goto("1: " __stringify(insn) " %0\n\t" \
194 ".byte 0x2e\n\t" /* branch not taken hint */ \
195 "jna %l[error]\n\t" \
196 _ASM_EXTABLE(1b, %l[fault]) \
197 : : op1 : "cc" : error, fault); \
198 return; \
199 error: \
200 instrumentation_begin(); \
201 insn##_error(error_args); \
202 instrumentation_end(); \
203 return; \
204 fault: \
205 kvm_spurious_fault(); \
206 } while (0)
207
208 #define vmx_asm2(insn, op1, op2, error_args...) \
209 do { \
210 asm goto("1: " __stringify(insn) " %1, %0\n\t" \
211 ".byte 0x2e\n\t" /* branch not taken hint */ \
212 "jna %l[error]\n\t" \
213 _ASM_EXTABLE(1b, %l[fault]) \
214 : : op1, op2 : "cc" : error, fault); \
215 return; \
216 error: \
217 instrumentation_begin(); \
218 insn##_error(error_args); \
219 instrumentation_end(); \
220 return; \
221 fault: \
222 kvm_spurious_fault(); \
223 } while (0)
224
__vmcs_writel(unsigned long field,unsigned long value)225 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
226 {
227 vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
228 }
229
vmcs_write16(unsigned long field,u16 value)230 static __always_inline void vmcs_write16(unsigned long field, u16 value)
231 {
232 vmcs_check16(field);
233 if (kvm_is_using_evmcs())
234 return evmcs_write16(field, value);
235
236 __vmcs_writel(field, value);
237 }
238
vmcs_write32(unsigned long field,u32 value)239 static __always_inline void vmcs_write32(unsigned long field, u32 value)
240 {
241 vmcs_check32(field);
242 if (kvm_is_using_evmcs())
243 return evmcs_write32(field, value);
244
245 __vmcs_writel(field, value);
246 }
247
vmcs_write64(unsigned long field,u64 value)248 static __always_inline void vmcs_write64(unsigned long field, u64 value)
249 {
250 vmcs_check64(field);
251 if (kvm_is_using_evmcs())
252 return evmcs_write64(field, value);
253
254 __vmcs_writel(field, value);
255 #ifndef CONFIG_X86_64
256 __vmcs_writel(field+1, value >> 32);
257 #endif
258 }
259
vmcs_writel(unsigned long field,unsigned long value)260 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
261 {
262 vmcs_checkl(field);
263 if (kvm_is_using_evmcs())
264 return evmcs_write64(field, value);
265
266 __vmcs_writel(field, value);
267 }
268
vmcs_clear_bits(unsigned long field,u32 mask)269 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
270 {
271 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
272 "vmcs_clear_bits does not support 64-bit fields");
273 if (kvm_is_using_evmcs())
274 return evmcs_write32(field, evmcs_read32(field) & ~mask);
275
276 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
277 }
278
vmcs_set_bits(unsigned long field,u32 mask)279 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
280 {
281 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
282 "vmcs_set_bits does not support 64-bit fields");
283 if (kvm_is_using_evmcs())
284 return evmcs_write32(field, evmcs_read32(field) | mask);
285
286 __vmcs_writel(field, __vmcs_readl(field) | mask);
287 }
288
vmcs_clear(struct vmcs * vmcs)289 static inline void vmcs_clear(struct vmcs *vmcs)
290 {
291 u64 phys_addr = __pa(vmcs);
292
293 vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
294 }
295
vmcs_load(struct vmcs * vmcs)296 static inline void vmcs_load(struct vmcs *vmcs)
297 {
298 u64 phys_addr = __pa(vmcs);
299
300 if (kvm_is_using_evmcs())
301 return evmcs_load(phys_addr);
302
303 vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
304 }
305
__invvpid(unsigned long ext,u16 vpid,gva_t gva)306 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
307 {
308 struct {
309 u64 vpid : 16;
310 u64 rsvd : 48;
311 u64 gva;
312 } operand = { vpid, 0, gva };
313
314 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
315 }
316
__invept(unsigned long ext,u64 eptp)317 static inline void __invept(unsigned long ext, u64 eptp)
318 {
319 struct {
320 u64 eptp;
321 u64 reserved_0;
322 } operand = { eptp, 0 };
323 vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp);
324 }
325
vpid_sync_vcpu_single(int vpid)326 static inline void vpid_sync_vcpu_single(int vpid)
327 {
328 if (vpid == 0)
329 return;
330
331 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
332 }
333
vpid_sync_vcpu_global(void)334 static inline void vpid_sync_vcpu_global(void)
335 {
336 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
337 }
338
vpid_sync_context(int vpid)339 static inline void vpid_sync_context(int vpid)
340 {
341 if (cpu_has_vmx_invvpid_single())
342 vpid_sync_vcpu_single(vpid);
343 else if (vpid != 0)
344 vpid_sync_vcpu_global();
345 }
346
vpid_sync_vcpu_addr(int vpid,gva_t addr)347 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
348 {
349 if (vpid == 0)
350 return;
351
352 if (cpu_has_vmx_invvpid_individual_addr())
353 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
354 else
355 vpid_sync_context(vpid);
356 }
357
ept_sync_global(void)358 static inline void ept_sync_global(void)
359 {
360 __invept(VMX_EPT_EXTENT_GLOBAL, 0);
361 }
362
ept_sync_context(u64 eptp)363 static inline void ept_sync_context(u64 eptp)
364 {
365 if (cpu_has_vmx_invept_context())
366 __invept(VMX_EPT_EXTENT_CONTEXT, eptp);
367 else
368 ept_sync_global();
369 }
370
371 #endif /* __KVM_X86_VMX_INSN_H */
372