1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_X86_H
3 #define ARCH_X86_KVM_X86_H
4
5 #include <linux/kvm_host.h>
6 #include <asm/fpu/xstate.h>
7 #include <asm/mce.h>
8 #include <asm/pvclock.h>
9 #include "kvm_cache_regs.h"
10 #include "kvm_emulate.h"
11
12 struct kvm_caps {
13 /* control of guest tsc rate supported? */
14 bool has_tsc_control;
15 /* maximum supported tsc_khz for guests */
16 u32 max_guest_tsc_khz;
17 /* number of bits of the fractional part of the TSC scaling ratio */
18 u8 tsc_scaling_ratio_frac_bits;
19 /* maximum allowed value of TSC scaling ratio */
20 u64 max_tsc_scaling_ratio;
21 /* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
22 u64 default_tsc_scaling_ratio;
23 /* bus lock detection supported? */
24 bool has_bus_lock_exit;
25 /* notify VM exit supported? */
26 bool has_notify_vmexit;
27
28 u64 supported_mce_cap;
29 u64 supported_xcr0;
30 u64 supported_xss;
31 u64 supported_perf_cap;
32 };
33
34 void kvm_spurious_fault(void);
35
36 #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
37 ({ \
38 bool failed = (consistency_check); \
39 if (failed) \
40 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
41 failed; \
42 })
43
44 /*
45 * The first...last VMX feature MSRs that are emulated by KVM. This may or may
46 * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an
47 * associated feature that KVM supports for nested virtualization.
48 */
49 #define KVM_FIRST_EMULATED_VMX_MSR MSR_IA32_VMX_BASIC
50 #define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_VMFUNC
51
52 #define KVM_DEFAULT_PLE_GAP 128
53 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
54 #define KVM_DEFAULT_PLE_WINDOW_GROW 2
55 #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
56 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX
57 #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
58 #define KVM_SVM_DEFAULT_PLE_WINDOW 3000
59
__grow_ple_window(unsigned int val,unsigned int base,unsigned int modifier,unsigned int max)60 static inline unsigned int __grow_ple_window(unsigned int val,
61 unsigned int base, unsigned int modifier, unsigned int max)
62 {
63 u64 ret = val;
64
65 if (modifier < 1)
66 return base;
67
68 if (modifier < base)
69 ret *= modifier;
70 else
71 ret += modifier;
72
73 return min(ret, (u64)max);
74 }
75
__shrink_ple_window(unsigned int val,unsigned int base,unsigned int modifier,unsigned int min)76 static inline unsigned int __shrink_ple_window(unsigned int val,
77 unsigned int base, unsigned int modifier, unsigned int min)
78 {
79 if (modifier < 1)
80 return base;
81
82 if (modifier < base)
83 val /= modifier;
84 else
85 val -= modifier;
86
87 return max(val, min);
88 }
89
90 #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
91
92 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
93 int kvm_check_nested_events(struct kvm_vcpu *vcpu);
94
kvm_vcpu_has_run(struct kvm_vcpu * vcpu)95 static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
96 {
97 return vcpu->arch.last_vmentry_cpu != -1;
98 }
99
kvm_is_exception_pending(struct kvm_vcpu * vcpu)100 static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
101 {
102 return vcpu->arch.exception.pending ||
103 vcpu->arch.exception_vmexit.pending ||
104 kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
105 }
106
kvm_clear_exception_queue(struct kvm_vcpu * vcpu)107 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
108 {
109 vcpu->arch.exception.pending = false;
110 vcpu->arch.exception.injected = false;
111 vcpu->arch.exception_vmexit.pending = false;
112 }
113
kvm_queue_interrupt(struct kvm_vcpu * vcpu,u8 vector,bool soft)114 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
115 bool soft)
116 {
117 vcpu->arch.interrupt.injected = true;
118 vcpu->arch.interrupt.soft = soft;
119 vcpu->arch.interrupt.nr = vector;
120 }
121
kvm_clear_interrupt_queue(struct kvm_vcpu * vcpu)122 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
123 {
124 vcpu->arch.interrupt.injected = false;
125 }
126
kvm_event_needs_reinjection(struct kvm_vcpu * vcpu)127 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
128 {
129 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
130 vcpu->arch.nmi_injected;
131 }
132
kvm_exception_is_soft(unsigned int nr)133 static inline bool kvm_exception_is_soft(unsigned int nr)
134 {
135 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
136 }
137
is_protmode(struct kvm_vcpu * vcpu)138 static inline bool is_protmode(struct kvm_vcpu *vcpu)
139 {
140 return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
141 }
142
is_long_mode(struct kvm_vcpu * vcpu)143 static inline bool is_long_mode(struct kvm_vcpu *vcpu)
144 {
145 #ifdef CONFIG_X86_64
146 return !!(vcpu->arch.efer & EFER_LMA);
147 #else
148 return false;
149 #endif
150 }
151
is_64_bit_mode(struct kvm_vcpu * vcpu)152 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
153 {
154 int cs_db, cs_l;
155
156 WARN_ON_ONCE(vcpu->arch.guest_state_protected);
157
158 if (!is_long_mode(vcpu))
159 return false;
160 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
161 return cs_l;
162 }
163
is_64_bit_hypercall(struct kvm_vcpu * vcpu)164 static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
165 {
166 /*
167 * If running with protected guest state, the CS register is not
168 * accessible. The hypercall register values will have had to been
169 * provided in 64-bit mode, so assume the guest is in 64-bit.
170 */
171 return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
172 }
173
x86_exception_has_error_code(unsigned int vector)174 static inline bool x86_exception_has_error_code(unsigned int vector)
175 {
176 static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
177 BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
178 BIT(PF_VECTOR) | BIT(AC_VECTOR);
179
180 return (1U << vector) & exception_has_error_code;
181 }
182
mmu_is_nested(struct kvm_vcpu * vcpu)183 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
184 {
185 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
186 }
187
is_pae(struct kvm_vcpu * vcpu)188 static inline bool is_pae(struct kvm_vcpu *vcpu)
189 {
190 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
191 }
192
is_pse(struct kvm_vcpu * vcpu)193 static inline bool is_pse(struct kvm_vcpu *vcpu)
194 {
195 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
196 }
197
is_paging(struct kvm_vcpu * vcpu)198 static inline bool is_paging(struct kvm_vcpu *vcpu)
199 {
200 return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
201 }
202
is_pae_paging(struct kvm_vcpu * vcpu)203 static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
204 {
205 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
206 }
207
vcpu_virt_addr_bits(struct kvm_vcpu * vcpu)208 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
209 {
210 return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
211 }
212
is_noncanonical_address(u64 la,struct kvm_vcpu * vcpu)213 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
214 {
215 return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
216 }
217
vcpu_cache_mmio_info(struct kvm_vcpu * vcpu,gva_t gva,gfn_t gfn,unsigned access)218 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
219 gva_t gva, gfn_t gfn, unsigned access)
220 {
221 u64 gen = kvm_memslots(vcpu->kvm)->generation;
222
223 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
224 return;
225
226 /*
227 * If this is a shadow nested page table, the "GVA" is
228 * actually a nGPA.
229 */
230 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
231 vcpu->arch.mmio_access = access;
232 vcpu->arch.mmio_gfn = gfn;
233 vcpu->arch.mmio_gen = gen;
234 }
235
vcpu_match_mmio_gen(struct kvm_vcpu * vcpu)236 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
237 {
238 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
239 }
240
241 /*
242 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
243 * clear all mmio cache info.
244 */
245 #define MMIO_GVA_ANY (~(gva_t)0)
246
vcpu_clear_mmio_info(struct kvm_vcpu * vcpu,gva_t gva)247 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
248 {
249 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
250 return;
251
252 vcpu->arch.mmio_gva = 0;
253 }
254
vcpu_match_mmio_gva(struct kvm_vcpu * vcpu,unsigned long gva)255 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
256 {
257 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
258 vcpu->arch.mmio_gva == (gva & PAGE_MASK))
259 return true;
260
261 return false;
262 }
263
vcpu_match_mmio_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)264 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
265 {
266 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
267 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
268 return true;
269
270 return false;
271 }
272
kvm_register_read(struct kvm_vcpu * vcpu,int reg)273 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
274 {
275 unsigned long val = kvm_register_read_raw(vcpu, reg);
276
277 return is_64_bit_mode(vcpu) ? val : (u32)val;
278 }
279
kvm_register_write(struct kvm_vcpu * vcpu,int reg,unsigned long val)280 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
281 int reg, unsigned long val)
282 {
283 if (!is_64_bit_mode(vcpu))
284 val = (u32)val;
285 return kvm_register_write_raw(vcpu, reg, val);
286 }
287
kvm_check_has_quirk(struct kvm * kvm,u64 quirk)288 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
289 {
290 return !(kvm->arch.disabled_quirks & quirk);
291 }
292
293 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
294
295 u64 get_kvmclock_ns(struct kvm *kvm);
296 uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm);
297
298 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
299 gva_t addr, void *val, unsigned int bytes,
300 struct x86_exception *exception);
301
302 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
303 gva_t addr, void *val, unsigned int bytes,
304 struct x86_exception *exception);
305
306 int handle_ud(struct kvm_vcpu *vcpu);
307
308 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
309 struct kvm_queued_exception *ex);
310
311 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
312 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
313 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
314 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
315 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
316 int page_num);
317 bool kvm_vector_hashing_enabled(void);
318 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
319 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
320 void *insn, int insn_len);
321 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
322 int emulation_type, void *insn, int insn_len);
323 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
324
325 extern u64 host_xcr0;
326 extern u64 host_xss;
327 extern u64 host_arch_capabilities;
328
329 extern struct kvm_caps kvm_caps;
330
331 extern bool enable_pmu;
332
333 /*
334 * Get a filtered version of KVM's supported XCR0 that strips out dynamic
335 * features for which the current process doesn't (yet) have permission to use.
336 * This is intended to be used only when enumerating support to userspace,
337 * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be
338 * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if
339 * userspace attempts to enable unpermitted features.
340 */
kvm_get_filtered_xcr0(void)341 static inline u64 kvm_get_filtered_xcr0(void)
342 {
343 u64 permitted_xcr0 = kvm_caps.supported_xcr0;
344
345 BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
346
347 if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) {
348 permitted_xcr0 &= xstate_get_guest_group_perm();
349
350 /*
351 * Treat XTILE_CFG as unsupported if the current process isn't
352 * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in
353 * XCR0 without setting XTILE_DATA is architecturally illegal.
354 */
355 if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA))
356 permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG;
357 }
358 return permitted_xcr0;
359 }
360
kvm_mpx_supported(void)361 static inline bool kvm_mpx_supported(void)
362 {
363 return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
364 == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
365 }
366
367 extern unsigned int min_timer_period_us;
368
369 extern bool enable_vmware_backdoor;
370
371 extern int pi_inject_timer;
372
373 extern bool report_ignored_msrs;
374
375 extern bool eager_page_split;
376
kvm_pr_unimpl_wrmsr(struct kvm_vcpu * vcpu,u32 msr,u64 data)377 static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
378 {
379 if (report_ignored_msrs)
380 vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
381 }
382
kvm_pr_unimpl_rdmsr(struct kvm_vcpu * vcpu,u32 msr)383 static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr)
384 {
385 if (report_ignored_msrs)
386 vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
387 }
388
nsec_to_cycles(struct kvm_vcpu * vcpu,u64 nsec)389 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
390 {
391 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
392 vcpu->arch.virtual_tsc_shift);
393 }
394
395 /* Same "calling convention" as do_div:
396 * - divide (n << 32) by base
397 * - put result in n
398 * - return remainder
399 */
400 #define do_shl32_div32(n, base) \
401 ({ \
402 u32 __quot, __rem; \
403 asm("divl %2" : "=a" (__quot), "=d" (__rem) \
404 : "rm" (base), "0" (0), "1" ((u32) n)); \
405 n = __quot; \
406 __rem; \
407 })
408
kvm_mwait_in_guest(struct kvm * kvm)409 static inline bool kvm_mwait_in_guest(struct kvm *kvm)
410 {
411 return kvm->arch.mwait_in_guest;
412 }
413
kvm_hlt_in_guest(struct kvm * kvm)414 static inline bool kvm_hlt_in_guest(struct kvm *kvm)
415 {
416 return kvm->arch.hlt_in_guest;
417 }
418
kvm_pause_in_guest(struct kvm * kvm)419 static inline bool kvm_pause_in_guest(struct kvm *kvm)
420 {
421 return kvm->arch.pause_in_guest;
422 }
423
kvm_cstate_in_guest(struct kvm * kvm)424 static inline bool kvm_cstate_in_guest(struct kvm *kvm)
425 {
426 return kvm->arch.cstate_in_guest;
427 }
428
kvm_notify_vmexit_enabled(struct kvm * kvm)429 static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
430 {
431 return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
432 }
433
434 enum kvm_intr_type {
435 /* Values are arbitrary, but must be non-zero. */
436 KVM_HANDLING_IRQ = 1,
437 KVM_HANDLING_NMI,
438 };
439
kvm_before_interrupt(struct kvm_vcpu * vcpu,enum kvm_intr_type intr)440 static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
441 enum kvm_intr_type intr)
442 {
443 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
444 }
445
kvm_after_interrupt(struct kvm_vcpu * vcpu)446 static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
447 {
448 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
449 }
450
kvm_handling_nmi_from_guest(struct kvm_vcpu * vcpu)451 static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
452 {
453 return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
454 }
455
kvm_pat_valid(u64 data)456 static inline bool kvm_pat_valid(u64 data)
457 {
458 if (data & 0xF8F8F8F8F8F8F8F8ull)
459 return false;
460 /* 0, 1, 4, 5, 6, 7 are valid values. */
461 return (data | ((data & 0x0202020202020202ull) << 1)) == data;
462 }
463
kvm_dr7_valid(u64 data)464 static inline bool kvm_dr7_valid(u64 data)
465 {
466 /* Bits [63:32] are reserved */
467 return !(data >> 32);
468 }
kvm_dr6_valid(u64 data)469 static inline bool kvm_dr6_valid(u64 data)
470 {
471 /* Bits [63:32] are reserved */
472 return !(data >> 32);
473 }
474
475 /*
476 * Trigger machine check on the host. We assume all the MSRs are already set up
477 * by the CPU and that we still run on the same CPU as the MCE occurred on.
478 * We pass a fake environment to the machine check handler because we want
479 * the guest to be always treated like user space, no matter what context
480 * it used internally.
481 */
kvm_machine_check(void)482 static inline void kvm_machine_check(void)
483 {
484 #if defined(CONFIG_X86_MCE)
485 struct pt_regs regs = {
486 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
487 .flags = X86_EFLAGS_IF,
488 };
489
490 do_machine_check(®s);
491 #endif
492 }
493
494 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
495 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
496 int kvm_spec_ctrl_test_value(u64 value);
497 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
498 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
499 struct x86_exception *e);
500 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
501 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
502
503 /*
504 * Internal error codes that are used to indicate that MSR emulation encountered
505 * an error that should result in #GP in the guest, unless userspace
506 * handles it.
507 */
508 #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
509 #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
510
511 #define __cr4_reserved_bits(__cpu_has, __c) \
512 ({ \
513 u64 __reserved_bits = CR4_RESERVED_BITS; \
514 \
515 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
516 __reserved_bits |= X86_CR4_OSXSAVE; \
517 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
518 __reserved_bits |= X86_CR4_SMEP; \
519 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
520 __reserved_bits |= X86_CR4_SMAP; \
521 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
522 __reserved_bits |= X86_CR4_FSGSBASE; \
523 if (!__cpu_has(__c, X86_FEATURE_PKU)) \
524 __reserved_bits |= X86_CR4_PKE; \
525 if (!__cpu_has(__c, X86_FEATURE_LA57)) \
526 __reserved_bits |= X86_CR4_LA57; \
527 if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
528 __reserved_bits |= X86_CR4_UMIP; \
529 if (!__cpu_has(__c, X86_FEATURE_VMX)) \
530 __reserved_bits |= X86_CR4_VMXE; \
531 if (!__cpu_has(__c, X86_FEATURE_PCID)) \
532 __reserved_bits |= X86_CR4_PCIDE; \
533 if (!__cpu_has(__c, X86_FEATURE_LAM)) \
534 __reserved_bits |= X86_CR4_LAM_SUP; \
535 __reserved_bits; \
536 })
537
538 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
539 void *dst);
540 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
541 void *dst);
542 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
543 unsigned int port, void *data, unsigned int count,
544 int in);
545
546 #endif
547