1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4  * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5  *
6  * KVM Xen emulation
7  */
8 
9 #ifndef __ARCH_X86_KVM_XEN_H__
10 #define __ARCH_X86_KVM_XEN_H__
11 
12 #include <asm/xen/cpuid.h>
13 #include <asm/xen/hypervisor.h>
14 
15 #ifdef CONFIG_KVM_XEN
16 #include <linux/jump_label_ratelimit.h>
17 
18 extern struct static_key_false_deferred kvm_xen_enabled;
19 
20 int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
21 void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
22 void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
23 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
24 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
25 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
26 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
27 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
28 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
29 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
30 void kvm_xen_init_vm(struct kvm *kvm);
31 void kvm_xen_destroy_vm(struct kvm *kvm);
32 void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu);
33 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
34 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
35 			    struct kvm *kvm);
36 int kvm_xen_setup_evtchn(struct kvm *kvm,
37 			 struct kvm_kernel_irq_routing_entry *e,
38 			 const struct kvm_irq_routing_entry *ue);
39 
kvm_xen_sw_enable_lapic(struct kvm_vcpu * vcpu)40 static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
41 {
42 	/*
43 	 * The local APIC is being enabled. If the per-vCPU upcall vector is
44 	 * set and the vCPU's evtchn_upcall_pending flag is set, inject the
45 	 * interrupt.
46 	 */
47 	if (static_branch_unlikely(&kvm_xen_enabled.key) &&
48 	    vcpu->arch.xen.vcpu_info_cache.active &&
49 	    vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
50 		kvm_xen_inject_vcpu_vector(vcpu);
51 }
52 
kvm_xen_is_tsc_leaf(struct kvm_vcpu * vcpu,u32 function)53 static inline bool kvm_xen_is_tsc_leaf(struct kvm_vcpu *vcpu, u32 function)
54 {
55 	return static_branch_unlikely(&kvm_xen_enabled.key) &&
56 	       vcpu->arch.xen.cpuid.base &&
57 	       function <= vcpu->arch.xen.cpuid.limit &&
58 	       function == (vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3));
59 }
60 
kvm_xen_msr_enabled(struct kvm * kvm)61 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
62 {
63 	return static_branch_unlikely(&kvm_xen_enabled.key) &&
64 		kvm->arch.xen.hvm_config.msr;
65 }
66 
kvm_xen_is_hypercall_page_msr(struct kvm * kvm,u32 msr)67 static inline bool kvm_xen_is_hypercall_page_msr(struct kvm *kvm, u32 msr)
68 {
69 	if (!static_branch_unlikely(&kvm_xen_enabled.key))
70 		return false;
71 
72 	return msr && msr == kvm->arch.xen.hvm_config.msr;
73 }
74 
kvm_xen_hypercall_enabled(struct kvm * kvm)75 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
76 {
77 	return static_branch_unlikely(&kvm_xen_enabled.key) &&
78 		(kvm->arch.xen.hvm_config.flags &
79 		 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
80 }
81 
kvm_xen_has_interrupt(struct kvm_vcpu * vcpu)82 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
83 {
84 	if (static_branch_unlikely(&kvm_xen_enabled.key) &&
85 	    vcpu->arch.xen.vcpu_info_cache.active &&
86 	    vcpu->kvm->arch.xen.upcall_vector)
87 		return __kvm_xen_has_interrupt(vcpu);
88 
89 	return 0;
90 }
91 
kvm_xen_has_pending_events(struct kvm_vcpu * vcpu)92 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
93 {
94 	return static_branch_unlikely(&kvm_xen_enabled.key) &&
95 		vcpu->arch.xen.evtchn_pending_sel;
96 }
97 
kvm_xen_timer_enabled(struct kvm_vcpu * vcpu)98 static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
99 {
100 	return !!vcpu->arch.xen.timer_virq;
101 }
102 
kvm_xen_has_pending_timer(struct kvm_vcpu * vcpu)103 static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
104 {
105 	if (kvm_xen_hypercall_enabled(vcpu->kvm) && kvm_xen_timer_enabled(vcpu))
106 		return atomic_read(&vcpu->arch.xen.timer_pending);
107 
108 	return 0;
109 }
110 
111 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu);
112 #else
kvm_xen_write_hypercall_page(struct kvm_vcpu * vcpu,u64 data)113 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
114 {
115 	return 1;
116 }
117 
kvm_xen_init_vm(struct kvm * kvm)118 static inline void kvm_xen_init_vm(struct kvm *kvm)
119 {
120 }
121 
kvm_xen_destroy_vm(struct kvm * kvm)122 static inline void kvm_xen_destroy_vm(struct kvm *kvm)
123 {
124 }
125 
kvm_xen_init_vcpu(struct kvm_vcpu * vcpu)126 static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
127 {
128 }
129 
kvm_xen_destroy_vcpu(struct kvm_vcpu * vcpu)130 static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
131 {
132 }
133 
kvm_xen_sw_enable_lapic(struct kvm_vcpu * vcpu)134 static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
135 {
136 }
137 
kvm_xen_msr_enabled(struct kvm * kvm)138 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
139 {
140 	return false;
141 }
142 
kvm_xen_is_hypercall_page_msr(struct kvm * kvm,u32 msr)143 static inline bool kvm_xen_is_hypercall_page_msr(struct kvm *kvm, u32 msr)
144 {
145 	return false;
146 }
147 
kvm_xen_hypercall_enabled(struct kvm * kvm)148 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
149 {
150 	return false;
151 }
152 
kvm_xen_has_interrupt(struct kvm_vcpu * vcpu)153 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
154 {
155 	return 0;
156 }
157 
kvm_xen_inject_pending_events(struct kvm_vcpu * vcpu)158 static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
159 {
160 }
161 
kvm_xen_has_pending_events(struct kvm_vcpu * vcpu)162 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
163 {
164 	return false;
165 }
166 
kvm_xen_has_pending_timer(struct kvm_vcpu * vcpu)167 static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
168 {
169 	return 0;
170 }
171 
kvm_xen_inject_timer_irqs(struct kvm_vcpu * vcpu)172 static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
173 {
174 }
175 
kvm_xen_timer_enabled(struct kvm_vcpu * vcpu)176 static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
177 {
178 	return false;
179 }
180 
kvm_xen_is_tsc_leaf(struct kvm_vcpu * vcpu,u32 function)181 static inline bool kvm_xen_is_tsc_leaf(struct kvm_vcpu *vcpu, u32 function)
182 {
183 	return false;
184 }
185 #endif
186 
187 int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
188 
189 #include <asm/pvclock-abi.h>
190 #include <asm/xen/interface.h>
191 #include <xen/interface/vcpu.h>
192 
193 void kvm_xen_update_runstate(struct kvm_vcpu *vcpu, int state);
194 
kvm_xen_runstate_set_running(struct kvm_vcpu * vcpu)195 static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
196 {
197 	kvm_xen_update_runstate(vcpu, RUNSTATE_running);
198 }
199 
kvm_xen_runstate_set_preempted(struct kvm_vcpu * vcpu)200 static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
201 {
202 	/*
203 	 * If the vCPU wasn't preempted but took a normal exit for
204 	 * some reason (hypercalls, I/O, etc.), that is accounted as
205 	 * still RUNSTATE_running, as the VMM is still operating on
206 	 * behalf of the vCPU. Only if the VMM does actually block
207 	 * does it need to enter RUNSTATE_blocked.
208 	 */
209 	if (WARN_ON_ONCE(!vcpu->preempted))
210 		return;
211 
212 	kvm_xen_update_runstate(vcpu, RUNSTATE_runnable);
213 }
214 
215 /* 32-bit compatibility definitions, also used natively in 32-bit build */
216 struct compat_arch_vcpu_info {
217 	unsigned int cr2;
218 	unsigned int pad[5];
219 };
220 
221 struct compat_vcpu_info {
222 	uint8_t evtchn_upcall_pending;
223 	uint8_t evtchn_upcall_mask;
224 	uint16_t pad;
225 	uint32_t evtchn_pending_sel;
226 	struct compat_arch_vcpu_info arch;
227 	struct pvclock_vcpu_time_info time;
228 }; /* 64 bytes (x86) */
229 
230 struct compat_arch_shared_info {
231 	unsigned int max_pfn;
232 	unsigned int pfn_to_mfn_frame_list_list;
233 	unsigned int nmi_reason;
234 	unsigned int p2m_cr3;
235 	unsigned int p2m_vaddr;
236 	unsigned int p2m_generation;
237 	uint32_t wc_sec_hi;
238 };
239 
240 struct compat_shared_info {
241 	struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
242 	uint32_t evtchn_pending[32];
243 	uint32_t evtchn_mask[32];
244 	struct pvclock_wall_clock wc;
245 	struct compat_arch_shared_info arch;
246 };
247 
248 #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 *				\
249 				      sizeof_field(struct compat_shared_info, \
250 						   evtchn_pending))
251 struct compat_vcpu_runstate_info {
252     int state;
253     uint64_t state_entry_time;
254     uint64_t time[4];
255 } __attribute__((packed));
256 
257 struct compat_sched_poll {
258 	/* This is actually a guest virtual address which points to ports. */
259 	uint32_t ports;
260 	unsigned int nr_ports;
261 	uint64_t timeout;
262 };
263 
264 #endif /* __ARCH_X86_KVM_XEN_H__ */
265