xref: /linux/arch/arm64/kvm/hyp/include/hyp/switch.h (revision c43267e6794a36013fd495a4d81bf7f748fe4615) !
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM64_KVM_HYP_SWITCH_H__
8 #define __ARM64_KVM_HYP_SWITCH_H__
9 
10 #include <hyp/adjust_pc.h>
11 #include <hyp/fault.h>
12 
13 #include <linux/arm-smccc.h>
14 #include <linux/kvm_host.h>
15 #include <linux/types.h>
16 #include <linux/jump_label.h>
17 #include <uapi/linux/psci.h>
18 
19 #include <kvm/arm_psci.h>
20 
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/extable.h>
24 #include <asm/kprobes.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/kvm_nested.h>
30 #include <asm/fpsimd.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/processor.h>
33 #include <asm/traps.h>
34 
35 struct kvm_exception_table_entry {
36 	int insn, fixup;
37 };
38 
39 extern struct kvm_exception_table_entry __start___kvm_ex_table;
40 extern struct kvm_exception_table_entry __stop___kvm_ex_table;
41 
42 /* Save the 32-bit only FPSIMD system register state */
__fpsimd_save_fpexc32(struct kvm_vcpu * vcpu)43 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
44 {
45 	if (!vcpu_el1_is_32bit(vcpu))
46 		return;
47 
48 	__vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2));
49 }
50 
__activate_traps_fpsimd32(struct kvm_vcpu * vcpu)51 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
52 {
53 	/*
54 	 * We are about to set CPTR_EL2.TFP to trap all floating point
55 	 * register accesses to EL2, however, the ARM ARM clearly states that
56 	 * traps are only taken to EL2 if the operation would not otherwise
57 	 * trap to EL1.  Therefore, always make sure that for 32-bit guests,
58 	 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
59 	 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
60 	 * it will cause an exception.
61 	 */
62 	if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd())
63 		write_sysreg(1 << 30, fpexc32_el2);
64 }
65 
__activate_cptr_traps_nvhe(struct kvm_vcpu * vcpu)66 static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
67 {
68 	u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA;
69 
70 	/*
71 	 * Always trap SME since it's not supported in KVM.
72 	 * TSM is RES1 if SME isn't implemented.
73 	 */
74 	val |= CPTR_EL2_TSM;
75 
76 	if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
77 		val |= CPTR_EL2_TZ;
78 
79 	if (!guest_owns_fp_regs())
80 		val |= CPTR_EL2_TFP;
81 
82 	write_sysreg(val, cptr_el2);
83 }
84 
__activate_cptr_traps_vhe(struct kvm_vcpu * vcpu)85 static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
86 {
87 	/*
88 	 * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
89 	 * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
90 	 * except for some missing controls, such as TAM.
91 	 * In this case, CPTR_EL2.TAM has the same position with or without
92 	 * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
93 	 * shift value for trapping the AMU accesses.
94 	 */
95 	u64 val = CPTR_EL2_TAM | CPACR_EL1_TTA;
96 	u64 cptr;
97 
98 	if (guest_owns_fp_regs()) {
99 		val |= CPACR_EL1_FPEN;
100 		if (vcpu_has_sve(vcpu))
101 			val |= CPACR_EL1_ZEN;
102 	}
103 
104 	if (!vcpu_has_nv(vcpu))
105 		goto write;
106 
107 	/*
108 	 * The architecture is a bit crap (what a surprise): an EL2 guest
109 	 * writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
110 	 * as they are RES0 in the guest's view. To work around it, trap the
111 	 * sucker using the very same bit it can't set...
112 	 */
113 	if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
114 		val |= CPTR_EL2_TCPAC;
115 
116 	/*
117 	 * Layer the guest hypervisor's trap configuration on top of our own if
118 	 * we're in a nested context.
119 	 */
120 	if (is_hyp_ctxt(vcpu))
121 		goto write;
122 
123 	cptr = vcpu_sanitised_cptr_el2(vcpu);
124 
125 	/*
126 	 * Pay attention, there's some interesting detail here.
127 	 *
128 	 * The CPTR_EL2.xEN fields are 2 bits wide, although there are only two
129 	 * meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest):
130 	 *
131 	 *  - CPTR_EL2.xEN = x0, traps are enabled
132 	 *  - CPTR_EL2.xEN = x1, traps are disabled
133 	 *
134 	 * In other words, bit[0] determines if guest accesses trap or not. In
135 	 * the interest of simplicity, clear the entire field if the guest
136 	 * hypervisor has traps enabled to dispel any illusion of something more
137 	 * complicated taking place.
138 	 */
139 	if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0)))
140 		val &= ~CPACR_EL1_FPEN;
141 	if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
142 		val &= ~CPACR_EL1_ZEN;
143 
144 	if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
145 		val |= cptr & CPACR_EL1_E0POE;
146 
147 	val |= cptr & CPTR_EL2_TCPAC;
148 
149 write:
150 	write_sysreg(val, cpacr_el1);
151 }
152 
__activate_cptr_traps(struct kvm_vcpu * vcpu)153 static inline void __activate_cptr_traps(struct kvm_vcpu *vcpu)
154 {
155 	if (!guest_owns_fp_regs())
156 		__activate_traps_fpsimd32(vcpu);
157 
158 	if (has_vhe() || has_hvhe())
159 		__activate_cptr_traps_vhe(vcpu);
160 	else
161 		__activate_cptr_traps_nvhe(vcpu);
162 }
163 
__deactivate_cptr_traps_nvhe(struct kvm_vcpu * vcpu)164 static inline void __deactivate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
165 {
166 	u64 val = CPTR_NVHE_EL2_RES1;
167 
168 	if (!cpus_have_final_cap(ARM64_SVE))
169 		val |= CPTR_EL2_TZ;
170 	if (!cpus_have_final_cap(ARM64_SME))
171 		val |= CPTR_EL2_TSM;
172 
173 	write_sysreg(val, cptr_el2);
174 }
175 
__deactivate_cptr_traps_vhe(struct kvm_vcpu * vcpu)176 static inline void __deactivate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
177 {
178 	u64 val = CPACR_EL1_FPEN;
179 
180 	if (cpus_have_final_cap(ARM64_SVE))
181 		val |= CPACR_EL1_ZEN;
182 	if (cpus_have_final_cap(ARM64_SME))
183 		val |= CPACR_EL1_SMEN;
184 
185 	write_sysreg(val, cpacr_el1);
186 }
187 
__deactivate_cptr_traps(struct kvm_vcpu * vcpu)188 static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
189 {
190 	if (has_vhe() || has_hvhe())
191 		__deactivate_cptr_traps_vhe(vcpu);
192 	else
193 		__deactivate_cptr_traps_nvhe(vcpu);
194 }
195 
cpu_has_amu(void)196 static inline bool cpu_has_amu(void)
197 {
198        u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
199 
200        return cpuid_feature_extract_unsigned_field(pfr0,
201                ID_AA64PFR0_EL1_AMU_SHIFT);
202 }
203 
204 #define __activate_fgt(hctxt, vcpu, reg)				\
205 	do {								\
206 		ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg);	\
207 		write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg);	\
208 	} while (0)
209 
__activate_traps_hfgxtr(struct kvm_vcpu * vcpu)210 static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
211 {
212 	struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
213 
214 	if (!cpus_have_final_cap(ARM64_HAS_FGT))
215 		return;
216 
217 	__activate_fgt(hctxt, vcpu, HFGRTR_EL2);
218 	__activate_fgt(hctxt, vcpu, HFGWTR_EL2);
219 	__activate_fgt(hctxt, vcpu, HFGITR_EL2);
220 	__activate_fgt(hctxt, vcpu, HDFGRTR_EL2);
221 	__activate_fgt(hctxt, vcpu, HDFGWTR_EL2);
222 
223 	if (cpu_has_amu())
224 		__activate_fgt(hctxt, vcpu, HAFGRTR_EL2);
225 
226 	if (!cpus_have_final_cap(ARM64_HAS_FGT2))
227 	    return;
228 
229 	__activate_fgt(hctxt, vcpu, HFGRTR2_EL2);
230 	__activate_fgt(hctxt, vcpu, HFGWTR2_EL2);
231 	__activate_fgt(hctxt, vcpu, HFGITR2_EL2);
232 	__activate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
233 	__activate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
234 }
235 
236 #define __deactivate_fgt(htcxt, vcpu, reg)				\
237 	do {								\
238 		write_sysreg_s(ctxt_sys_reg(hctxt, reg),		\
239 			       SYS_ ## reg);				\
240 	} while(0)
241 
__deactivate_traps_hfgxtr(struct kvm_vcpu * vcpu)242 static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
243 {
244 	struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
245 
246 	if (!cpus_have_final_cap(ARM64_HAS_FGT))
247 		return;
248 
249 	__deactivate_fgt(hctxt, vcpu, HFGRTR_EL2);
250 	__deactivate_fgt(hctxt, vcpu, HFGWTR_EL2);
251 	__deactivate_fgt(hctxt, vcpu, HFGITR_EL2);
252 	__deactivate_fgt(hctxt, vcpu, HDFGRTR_EL2);
253 	__deactivate_fgt(hctxt, vcpu, HDFGWTR_EL2);
254 
255 	if (cpu_has_amu())
256 		__deactivate_fgt(hctxt, vcpu, HAFGRTR_EL2);
257 
258 	if (!cpus_have_final_cap(ARM64_HAS_FGT2))
259 	    return;
260 
261 	__deactivate_fgt(hctxt, vcpu, HFGRTR2_EL2);
262 	__deactivate_fgt(hctxt, vcpu, HFGWTR2_EL2);
263 	__deactivate_fgt(hctxt, vcpu, HFGITR2_EL2);
264 	__deactivate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
265 	__deactivate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
266 }
267 
__activate_traps_mpam(struct kvm_vcpu * vcpu)268 static inline void  __activate_traps_mpam(struct kvm_vcpu *vcpu)
269 {
270 	u64 clr = MPAM2_EL2_EnMPAMSM;
271 	u64 set = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1;
272 
273 	if (!system_supports_mpam())
274 		return;
275 
276 	/* trap guest access to MPAMIDR_EL1 */
277 	if (system_supports_mpam_hcr()) {
278 		write_sysreg_s(MPAMHCR_EL2_TRAP_MPAMIDR_EL1, SYS_MPAMHCR_EL2);
279 	} else {
280 		/* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */
281 		set |= MPAM2_EL2_TIDR;
282 	}
283 
284 	sysreg_clear_set_s(SYS_MPAM2_EL2, clr, set);
285 }
286 
__deactivate_traps_mpam(void)287 static inline void __deactivate_traps_mpam(void)
288 {
289 	u64 clr = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1 | MPAM2_EL2_TIDR;
290 	u64 set = MPAM2_EL2_EnMPAMSM;
291 
292 	if (!system_supports_mpam())
293 		return;
294 
295 	sysreg_clear_set_s(SYS_MPAM2_EL2, clr, set);
296 
297 	if (system_supports_mpam_hcr())
298 		write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2);
299 }
300 
__activate_traps_common(struct kvm_vcpu * vcpu)301 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
302 {
303 	struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
304 
305 	/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
306 	write_sysreg(1 << 15, hstr_el2);
307 
308 	/*
309 	 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
310 	 * PMSELR_EL0 to make sure it never contains the cycle
311 	 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
312 	 * EL1 instead of being trapped to EL2.
313 	 */
314 	if (system_supports_pmuv3()) {
315 		write_sysreg(0, pmselr_el0);
316 
317 		ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
318 		write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
319 		vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
320 	}
321 
322 	if (cpus_have_final_cap(ARM64_HAS_HCX)) {
323 		u64 hcrx = vcpu->arch.hcrx_el2;
324 		if (is_nested_ctxt(vcpu)) {
325 			u64 val = __vcpu_sys_reg(vcpu, HCRX_EL2);
326 			hcrx |= val & __HCRX_EL2_MASK;
327 			hcrx &= ~(~val & __HCRX_EL2_nMASK);
328 		}
329 
330 		ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
331 		write_sysreg_s(hcrx, SYS_HCRX_EL2);
332 	}
333 
334 	__activate_traps_hfgxtr(vcpu);
335 	__activate_traps_mpam(vcpu);
336 }
337 
__deactivate_traps_common(struct kvm_vcpu * vcpu)338 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
339 {
340 	struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
341 
342 	write_sysreg(0, hstr_el2);
343 	if (system_supports_pmuv3()) {
344 		write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
345 		vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
346 	}
347 
348 	if (cpus_have_final_cap(ARM64_HAS_HCX))
349 		write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
350 
351 	__deactivate_traps_hfgxtr(vcpu);
352 	__deactivate_traps_mpam();
353 }
354 
___activate_traps(struct kvm_vcpu * vcpu,u64 hcr)355 static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
356 {
357 	if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
358 		hcr |= HCR_TVM;
359 
360 	write_sysreg_hcr(hcr);
361 
362 	if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) {
363 		u64 vsesr;
364 
365 		/*
366 		 * When HCR_EL2.AMO is set, physical SErrors are taken to EL2
367 		 * and vSError injection is enabled for EL1. Conveniently, for
368 		 * NV this means that it is never the case where a 'physical'
369 		 * SError (injected by KVM or userspace) and vSError are
370 		 * deliverable to the same context.
371 		 *
372 		 * As such, we can trivially select between the host or guest's
373 		 * VSESR_EL2. Except for the case that FEAT_RAS hasn't been
374 		 * exposed to the guest, where ESR propagation in hardware
375 		 * occurs unconditionally.
376 		 *
377 		 * Paper over the architectural wart and use an IMPLEMENTATION
378 		 * DEFINED ESR value in case FEAT_RAS is hidden from the guest.
379 		 */
380 		if (!vserror_state_is_nested(vcpu))
381 			vsesr = vcpu->arch.vsesr_el2;
382 		else if (kvm_has_ras(kern_hyp_va(vcpu->kvm)))
383 			vsesr = __vcpu_sys_reg(vcpu, VSESR_EL2);
384 		else
385 			vsesr = ESR_ELx_ISV;
386 
387 		write_sysreg_s(vsesr, SYS_VSESR_EL2);
388 	}
389 }
390 
___deactivate_traps(struct kvm_vcpu * vcpu)391 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
392 {
393 	u64 *hcr;
394 
395 	if (vserror_state_is_nested(vcpu))
396 		hcr = __ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2);
397 	else
398 		hcr = &vcpu->arch.hcr_el2;
399 
400 	/*
401 	 * If we pended a virtual abort, preserve it until it gets
402 	 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
403 	 * the crucial bit is "On taking a vSError interrupt,
404 	 * HCR_EL2.VSE is cleared to 0."
405 	 *
406 	 * Additionally, when in a nested context we need to propagate the
407 	 * updated state to the guest hypervisor's HCR_EL2.
408 	 */
409 	if (*hcr & HCR_VSE) {
410 		*hcr &= ~HCR_VSE;
411 		*hcr |= read_sysreg(hcr_el2) & HCR_VSE;
412 	}
413 }
414 
__populate_fault_info(struct kvm_vcpu * vcpu)415 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
416 {
417 	return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
418 }
419 
kvm_hyp_handle_mops(struct kvm_vcpu * vcpu,u64 * exit_code)420 static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
421 {
422 	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
423 	arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
424 	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
425 
426 	/*
427 	 * Finish potential single step before executing the prologue
428 	 * instruction.
429 	 */
430 	*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
431 	write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
432 
433 	return true;
434 }
435 
__hyp_sve_restore_guest(struct kvm_vcpu * vcpu)436 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
437 {
438 	/*
439 	 * The vCPU's saved SVE state layout always matches the max VL of the
440 	 * vCPU. Start off with the max VL so we can load the SVE state.
441 	 */
442 	sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
443 	__sve_restore_state(vcpu_sve_pffr(vcpu),
444 			    &vcpu->arch.ctxt.fp_regs.fpsr,
445 			    true);
446 
447 	/*
448 	 * The effective VL for a VM could differ from the max VL when running a
449 	 * nested guest, as the guest hypervisor could select a smaller VL. Slap
450 	 * that into hardware before wrapping up.
451 	 */
452 	if (is_nested_ctxt(vcpu))
453 		sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
454 
455 	write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
456 }
457 
__hyp_sve_save_host(void)458 static inline void __hyp_sve_save_host(void)
459 {
460 	struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
461 
462 	sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
463 	write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
464 	__sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
465 			 &sve_state->fpsr,
466 			 true);
467 }
468 
fpsimd_lazy_switch_to_guest(struct kvm_vcpu * vcpu)469 static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
470 {
471 	u64 zcr_el1, zcr_el2;
472 
473 	if (!guest_owns_fp_regs())
474 		return;
475 
476 	if (vcpu_has_sve(vcpu)) {
477 		/* A guest hypervisor may restrict the effective max VL. */
478 		if (is_nested_ctxt(vcpu))
479 			zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
480 		else
481 			zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
482 
483 		write_sysreg_el2(zcr_el2, SYS_ZCR);
484 
485 		zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
486 		write_sysreg_el1(zcr_el1, SYS_ZCR);
487 	}
488 }
489 
fpsimd_lazy_switch_to_host(struct kvm_vcpu * vcpu)490 static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
491 {
492 	u64 zcr_el1, zcr_el2;
493 
494 	if (!guest_owns_fp_regs())
495 		return;
496 
497 	/*
498 	 * When the guest owns the FP regs, we know that guest+hyp traps for
499 	 * any FPSIMD/SVE/SME features exposed to the guest have been disabled
500 	 * by either __activate_cptr_traps() or kvm_hyp_handle_fpsimd()
501 	 * prior to __guest_entry(). As __guest_entry() guarantees a context
502 	 * synchronization event, we don't need an ISB here to avoid taking
503 	 * traps for anything that was exposed to the guest.
504 	 */
505 	if (vcpu_has_sve(vcpu)) {
506 		zcr_el1 = read_sysreg_el1(SYS_ZCR);
507 		__vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
508 
509 		/*
510 		 * The guest's state is always saved using the guest's max VL.
511 		 * Ensure that the host has the guest's max VL active such that
512 		 * the host can save the guest's state lazily, but don't
513 		 * artificially restrict the host to the guest's max VL.
514 		 */
515 		if (has_vhe()) {
516 			zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
517 			write_sysreg_el2(zcr_el2, SYS_ZCR);
518 		} else {
519 			zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
520 			write_sysreg_el2(zcr_el2, SYS_ZCR);
521 
522 			zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
523 			write_sysreg_el1(zcr_el1, SYS_ZCR);
524 		}
525 	}
526 }
527 
kvm_hyp_save_fpsimd_host(struct kvm_vcpu * vcpu)528 static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
529 {
530 	/*
531 	 * Non-protected kvm relies on the host restoring its sve state.
532 	 * Protected kvm restores the host's sve state as not to reveal that
533 	 * fpsimd was used by a guest nor leak upper sve bits.
534 	 */
535 	if (system_supports_sve()) {
536 		__hyp_sve_save_host();
537 	} else {
538 		__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
539 	}
540 
541 	if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
542 		*host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
543 }
544 
545 
546 /*
547  * We trap the first access to the FP/SIMD to save the host context and
548  * restore the guest context lazily.
549  * If FP/SIMD is not implemented, handle the trap and inject an undefined
550  * instruction exception to the guest. Similarly for trapped SVE accesses.
551  */
kvm_hyp_handle_fpsimd(struct kvm_vcpu * vcpu,u64 * exit_code)552 static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
553 {
554 	bool sve_guest;
555 	u8 esr_ec;
556 
557 	if (!system_supports_fpsimd())
558 		return false;
559 
560 	sve_guest = vcpu_has_sve(vcpu);
561 	esr_ec = kvm_vcpu_trap_get_class(vcpu);
562 
563 	/* Only handle traps the vCPU can support here: */
564 	switch (esr_ec) {
565 	case ESR_ELx_EC_FP_ASIMD:
566 		/* Forward traps to the guest hypervisor as required */
567 		if (guest_hyp_fpsimd_traps_enabled(vcpu))
568 			return false;
569 		break;
570 	case ESR_ELx_EC_SYS64:
571 		if (WARN_ON_ONCE(!is_hyp_ctxt(vcpu)))
572 			return false;
573 		fallthrough;
574 	case ESR_ELx_EC_SVE:
575 		if (!sve_guest)
576 			return false;
577 		if (guest_hyp_sve_traps_enabled(vcpu))
578 			return false;
579 		break;
580 	default:
581 		return false;
582 	}
583 
584 	/* Valid trap.  Switch the context: */
585 
586 	/* First disable enough traps to allow us to update the registers */
587 	__deactivate_cptr_traps(vcpu);
588 	isb();
589 
590 	/* Write out the host state if it's in the registers */
591 	if (is_protected_kvm_enabled() && host_owns_fp_regs())
592 		kvm_hyp_save_fpsimd_host(vcpu);
593 
594 	/* Restore the guest state */
595 	if (sve_guest)
596 		__hyp_sve_restore_guest(vcpu);
597 	else
598 		__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
599 
600 	if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
601 		write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
602 
603 	/* Skip restoring fpexc32 for AArch64 guests */
604 	if (!(read_sysreg(hcr_el2) & HCR_RW))
605 		write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
606 
607 	*host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;
608 
609 	/*
610 	 * Re-enable traps necessary for the current state of the guest, e.g.
611 	 * those enabled by a guest hypervisor. The ERET to the guest will
612 	 * provide the necessary context synchronization.
613 	 */
614 	__activate_cptr_traps(vcpu);
615 
616 	return true;
617 }
618 
handle_tx2_tvm(struct kvm_vcpu * vcpu)619 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
620 {
621 	u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
622 	int rt = kvm_vcpu_sys_get_rt(vcpu);
623 	u64 val = vcpu_get_reg(vcpu, rt);
624 
625 	/*
626 	 * The normal sysreg handling code expects to see the traps,
627 	 * let's not do anything here.
628 	 */
629 	if (vcpu->arch.hcr_el2 & HCR_TVM)
630 		return false;
631 
632 	switch (sysreg) {
633 	case SYS_SCTLR_EL1:
634 		write_sysreg_el1(val, SYS_SCTLR);
635 		break;
636 	case SYS_TTBR0_EL1:
637 		write_sysreg_el1(val, SYS_TTBR0);
638 		break;
639 	case SYS_TTBR1_EL1:
640 		write_sysreg_el1(val, SYS_TTBR1);
641 		break;
642 	case SYS_TCR_EL1:
643 		write_sysreg_el1(val, SYS_TCR);
644 		break;
645 	case SYS_ESR_EL1:
646 		write_sysreg_el1(val, SYS_ESR);
647 		break;
648 	case SYS_FAR_EL1:
649 		write_sysreg_el1(val, SYS_FAR);
650 		break;
651 	case SYS_AFSR0_EL1:
652 		write_sysreg_el1(val, SYS_AFSR0);
653 		break;
654 	case SYS_AFSR1_EL1:
655 		write_sysreg_el1(val, SYS_AFSR1);
656 		break;
657 	case SYS_MAIR_EL1:
658 		write_sysreg_el1(val, SYS_MAIR);
659 		break;
660 	case SYS_AMAIR_EL1:
661 		write_sysreg_el1(val, SYS_AMAIR);
662 		break;
663 	case SYS_CONTEXTIDR_EL1:
664 		write_sysreg_el1(val, SYS_CONTEXTIDR);
665 		break;
666 	default:
667 		return false;
668 	}
669 
670 	__kvm_skip_instr(vcpu);
671 	return true;
672 }
673 
674 /* Open-coded version of timer_get_offset() to allow for kern_hyp_va() */
hyp_timer_get_offset(struct arch_timer_context * ctxt)675 static inline u64 hyp_timer_get_offset(struct arch_timer_context *ctxt)
676 {
677 	u64 offset = 0;
678 
679 	if (ctxt->offset.vm_offset)
680 		offset += *kern_hyp_va(ctxt->offset.vm_offset);
681 	if (ctxt->offset.vcpu_offset)
682 		offset += *kern_hyp_va(ctxt->offset.vcpu_offset);
683 
684 	return offset;
685 }
686 
compute_counter_value(struct arch_timer_context * ctxt)687 static inline u64 compute_counter_value(struct arch_timer_context *ctxt)
688 {
689 	return arch_timer_read_cntpct_el0() - hyp_timer_get_offset(ctxt);
690 }
691 
kvm_handle_cntxct(struct kvm_vcpu * vcpu)692 static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu)
693 {
694 	struct arch_timer_context *ctxt;
695 	u32 sysreg;
696 	u64 val;
697 
698 	/*
699 	 * We only get here for 64bit guests, 32bit guests will hit
700 	 * the long and winding road all the way to the standard
701 	 * handling. Yes, it sucks to be irrelevant.
702 	 *
703 	 * Also, we only deal with non-hypervisor context here (either
704 	 * an EL1 guest, or a non-HYP context of an EL2 guest).
705 	 */
706 	if (is_hyp_ctxt(vcpu))
707 		return false;
708 
709 	sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
710 
711 	switch (sysreg) {
712 	case SYS_CNTPCT_EL0:
713 	case SYS_CNTPCTSS_EL0:
714 		if (vcpu_has_nv(vcpu)) {
715 			/* Check for guest hypervisor trapping */
716 			val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
717 			if (!vcpu_el2_e2h_is_set(vcpu))
718 				val = (val & CNTHCTL_EL1PCTEN) << 10;
719 
720 			if (!(val & (CNTHCTL_EL1PCTEN << 10)))
721 				return false;
722 		}
723 
724 		ctxt = vcpu_ptimer(vcpu);
725 		break;
726 	case SYS_CNTVCT_EL0:
727 	case SYS_CNTVCTSS_EL0:
728 		if (vcpu_has_nv(vcpu)) {
729 			/* Check for guest hypervisor trapping */
730 			val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
731 
732 			if (val & CNTHCTL_EL1TVCT)
733 				return false;
734 		}
735 
736 		ctxt = vcpu_vtimer(vcpu);
737 		break;
738 	default:
739 		return false;
740 	}
741 
742 	val = compute_counter_value(ctxt);
743 
744 	vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
745 	__kvm_skip_instr(vcpu);
746 	return true;
747 }
748 
handle_ampere1_tcr(struct kvm_vcpu * vcpu)749 static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
750 {
751 	u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
752 	int rt = kvm_vcpu_sys_get_rt(vcpu);
753 	u64 val = vcpu_get_reg(vcpu, rt);
754 
755 	if (sysreg != SYS_TCR_EL1)
756 		return false;
757 
758 	/*
759 	 * Affected parts do not advertise support for hardware Access Flag /
760 	 * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying
761 	 * control bits are still functional. The architecture requires these be
762 	 * RES0 on systems that do not implement FEAT_HAFDBS.
763 	 *
764 	 * Uphold the requirements of the architecture by masking guest writes
765 	 * to TCR_EL1.{HA,HD} here.
766 	 */
767 	val &= ~(TCR_HD | TCR_HA);
768 	write_sysreg_el1(val, SYS_TCR);
769 	__kvm_skip_instr(vcpu);
770 	return true;
771 }
772 
kvm_hyp_handle_sysreg(struct kvm_vcpu * vcpu,u64 * exit_code)773 static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
774 {
775 	if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
776 	    handle_tx2_tvm(vcpu))
777 		return true;
778 
779 	if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) &&
780 	    handle_ampere1_tcr(vcpu))
781 		return true;
782 
783 	if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
784 	    __vgic_v3_perform_cpuif_access(vcpu) == 1)
785 		return true;
786 
787 	if (kvm_handle_cntxct(vcpu))
788 		return true;
789 
790 	return false;
791 }
792 
kvm_hyp_handle_cp15_32(struct kvm_vcpu * vcpu,u64 * exit_code)793 static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
794 {
795 	if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
796 	    __vgic_v3_perform_cpuif_access(vcpu) == 1)
797 		return true;
798 
799 	return false;
800 }
801 
kvm_hyp_handle_memory_fault(struct kvm_vcpu * vcpu,u64 * exit_code)802 static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
803 					       u64 *exit_code)
804 {
805 	if (!__populate_fault_info(vcpu))
806 		return true;
807 
808 	return false;
809 }
810 #define kvm_hyp_handle_iabt_low		kvm_hyp_handle_memory_fault
811 #define kvm_hyp_handle_watchpt_low	kvm_hyp_handle_memory_fault
812 
kvm_hyp_handle_dabt_low(struct kvm_vcpu * vcpu,u64 * exit_code)813 static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
814 {
815 	if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
816 		return true;
817 
818 	if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
819 		bool valid;
820 
821 		valid = kvm_vcpu_trap_is_translation_fault(vcpu) &&
822 			kvm_vcpu_dabt_isvalid(vcpu) &&
823 			!kvm_vcpu_abt_issea(vcpu) &&
824 			!kvm_vcpu_abt_iss1tw(vcpu);
825 
826 		if (valid) {
827 			int ret = __vgic_v2_perform_cpuif_access(vcpu);
828 
829 			if (ret == 1)
830 				return true;
831 
832 			/* Promote an illegal access to an SError.*/
833 			if (ret == -1)
834 				*exit_code = ARM_EXCEPTION_EL1_SERROR;
835 		}
836 	}
837 
838 	return false;
839 }
840 
841 typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
842 
843 /*
844  * Allow the hypervisor to handle the exit with an exit handler if it has one.
845  *
846  * Returns true if the hypervisor handled the exit, and control should go back
847  * to the guest, or false if it hasn't.
848  */
kvm_hyp_handle_exit(struct kvm_vcpu * vcpu,u64 * exit_code,const exit_handler_fn * handlers)849 static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
850 				       const exit_handler_fn *handlers)
851 {
852 	exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
853 	if (fn)
854 		return fn(vcpu, exit_code);
855 
856 	return false;
857 }
858 
synchronize_vcpu_pstate(struct kvm_vcpu * vcpu)859 static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu)
860 {
861 	/*
862 	 * Check for the conditions of Cortex-A510's #2077057. When these occur
863 	 * SPSR_EL2 can't be trusted, but isn't needed either as it is
864 	 * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
865 	 * Are we single-stepping the guest, and took a PAC exception from the
866 	 * active-not-pending state?
867 	 */
868 	if (cpus_have_final_cap(ARM64_WORKAROUND_2077057)		&&
869 	    vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP			&&
870 	    *vcpu_cpsr(vcpu) & DBG_SPSR_SS				&&
871 	    ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
872 		write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
873 
874 	vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
875 }
876 
877 /*
878  * Return true when we were able to fixup the guest exit and should return to
879  * the guest, false when we should restore the host state and return to the
880  * main run loop.
881  */
__fixup_guest_exit(struct kvm_vcpu * vcpu,u64 * exit_code,const exit_handler_fn * handlers)882 static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
883 				      const exit_handler_fn *handlers)
884 {
885 	if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
886 		vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
887 
888 	if (ARM_SERROR_PENDING(*exit_code) &&
889 	    ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
890 		u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
891 
892 		/*
893 		 * HVC already have an adjusted PC, which we need to
894 		 * correct in order to return to after having injected
895 		 * the SError.
896 		 *
897 		 * SMC, on the other hand, is *trapped*, meaning its
898 		 * preferred return address is the SMC itself.
899 		 */
900 		if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
901 			write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
902 	}
903 
904 	/*
905 	 * We're using the raw exception code in order to only process
906 	 * the trap if no SError is pending. We will come back to the
907 	 * same PC once the SError has been injected, and replay the
908 	 * trapping instruction.
909 	 */
910 	if (*exit_code != ARM_EXCEPTION_TRAP)
911 		goto exit;
912 
913 	/* Check if there's an exit handler and allow it to handle the exit. */
914 	if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
915 		goto guest;
916 exit:
917 	/* Return to the host kernel and handle the exit */
918 	return false;
919 
920 guest:
921 	/* Re-enter the guest */
922 	asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
923 	return true;
924 }
925 
__kvm_unexpected_el2_exception(void)926 static inline void __kvm_unexpected_el2_exception(void)
927 {
928 	extern char __guest_exit_restore_elr_and_panic[];
929 	unsigned long addr, fixup;
930 	struct kvm_exception_table_entry *entry, *end;
931 	unsigned long elr_el2 = read_sysreg(elr_el2);
932 
933 	entry = &__start___kvm_ex_table;
934 	end = &__stop___kvm_ex_table;
935 
936 	while (entry < end) {
937 		addr = (unsigned long)&entry->insn + entry->insn;
938 		fixup = (unsigned long)&entry->fixup + entry->fixup;
939 
940 		if (addr != elr_el2) {
941 			entry++;
942 			continue;
943 		}
944 
945 		write_sysreg(fixup, elr_el2);
946 		return;
947 	}
948 
949 	/* Trigger a panic after restoring the hyp context. */
950 	this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
951 	write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2);
952 }
953 
954 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */
955