xref: /linux/arch/arm64/kvm/hyp/vhe/sysreg-sr.c (revision c43267e6794a36013fd495a4d81bf7f748fe4615)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012-2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <hyp/sysreg-sr.h>
8 
9 #include <linux/compiler.h>
10 #include <linux/kvm_host.h>
11 
12 #include <asm/kprobes.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_hyp.h>
16 #include <asm/kvm_nested.h>
17 
__sysreg_save_vel2_state(struct kvm_vcpu * vcpu)18 static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
19 {
20 	/* These registers are common with EL1 */
21 	__vcpu_assign_sys_reg(vcpu, PAR_EL1,	 read_sysreg(par_el1));
22 	__vcpu_assign_sys_reg(vcpu, TPIDR_EL1,	 read_sysreg(tpidr_el1));
23 
24 	__vcpu_assign_sys_reg(vcpu, ESR_EL2,	 read_sysreg_el1(SYS_ESR));
25 	__vcpu_assign_sys_reg(vcpu, AFSR0_EL2,	 read_sysreg_el1(SYS_AFSR0));
26 	__vcpu_assign_sys_reg(vcpu, AFSR1_EL2,	 read_sysreg_el1(SYS_AFSR1));
27 	__vcpu_assign_sys_reg(vcpu, FAR_EL2,	 read_sysreg_el1(SYS_FAR));
28 	__vcpu_assign_sys_reg(vcpu, MAIR_EL2,	 read_sysreg_el1(SYS_MAIR));
29 	__vcpu_assign_sys_reg(vcpu, VBAR_EL2,	 read_sysreg_el1(SYS_VBAR));
30 	__vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR));
31 	__vcpu_assign_sys_reg(vcpu, AMAIR_EL2,	 read_sysreg_el1(SYS_AMAIR));
32 
33 	/*
34 	 * In VHE mode those registers are compatible between EL1 and EL2,
35 	 * and the guest uses the _EL1 versions on the CPU naturally.
36 	 * So we save them into their _EL2 versions here.
37 	 * For nVHE mode we trap accesses to those registers, so our
38 	 * _EL2 copy in sys_regs[] is always up-to-date and we don't need
39 	 * to save anything here.
40 	 */
41 	if (vcpu_el2_e2h_is_set(vcpu)) {
42 		u64 val;
43 
44 		/*
45 		 * We don't save CPTR_EL2, as accesses to CPACR_EL1
46 		 * are always trapped, ensuring that the in-memory
47 		 * copy is always up-to-date. A small blessing...
48 		 */
49 		__vcpu_assign_sys_reg(vcpu, SCTLR_EL2,	 read_sysreg_el1(SYS_SCTLR));
50 		__vcpu_assign_sys_reg(vcpu, TTBR0_EL2,	 read_sysreg_el1(SYS_TTBR0));
51 		__vcpu_assign_sys_reg(vcpu, TTBR1_EL2,	 read_sysreg_el1(SYS_TTBR1));
52 		__vcpu_assign_sys_reg(vcpu, TCR_EL2,	 read_sysreg_el1(SYS_TCR));
53 
54 		if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
55 			__vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2));
56 
57 			if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
58 				__vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0));
59 				__vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR));
60 			}
61 
62 			if (ctxt_has_s1poe(&vcpu->arch.ctxt))
63 				__vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR));
64 		}
65 
66 		/*
67 		 * The EL1 view of CNTKCTL_EL1 has a bunch of RES0 bits where
68 		 * the interesting CNTHCTL_EL2 bits live. So preserve these
69 		 * bits when reading back the guest-visible value.
70 		 */
71 		val = read_sysreg_el1(SYS_CNTKCTL);
72 		val &= CNTKCTL_VALID_BITS;
73 		__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS);
74 		__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val);
75 	}
76 
77 	__vcpu_assign_sys_reg(vcpu, SP_EL2,	 read_sysreg(sp_el1));
78 	__vcpu_assign_sys_reg(vcpu, ELR_EL2,	 read_sysreg_el1(SYS_ELR));
79 	__vcpu_assign_sys_reg(vcpu, SPSR_EL2,	 read_sysreg_el1(SYS_SPSR));
80 
81 	if (ctxt_has_sctlr2(&vcpu->arch.ctxt))
82 		__vcpu_assign_sys_reg(vcpu, SCTLR2_EL2, read_sysreg_el1(SYS_SCTLR2));
83 }
84 
__sysreg_restore_vel2_state(struct kvm_vcpu * vcpu)85 static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
86 {
87 	u64 val;
88 
89 	/* These registers are common with EL1 */
90 	write_sysreg(__vcpu_sys_reg(vcpu, PAR_EL1),	par_el1);
91 	write_sysreg(__vcpu_sys_reg(vcpu, TPIDR_EL1),	tpidr_el1);
92 
93 	write_sysreg(ctxt_midr_el1(&vcpu->arch.ctxt),			vpidr_el2);
94 	write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1),			vmpidr_el2);
95 	write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2),		SYS_MAIR);
96 	write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2),		SYS_VBAR);
97 	write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2),		SYS_CONTEXTIDR);
98 	write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2),		SYS_AMAIR);
99 
100 	if (vcpu_el2_e2h_is_set(vcpu)) {
101 		/*
102 		 * In VHE mode those registers are compatible between
103 		 * EL1 and EL2.
104 		 */
105 		write_sysreg_el1(__vcpu_sys_reg(vcpu, SCTLR_EL2),   SYS_SCTLR);
106 		write_sysreg_el1(__vcpu_sys_reg(vcpu, CPTR_EL2),    SYS_CPACR);
107 		write_sysreg_el1(__vcpu_sys_reg(vcpu, TTBR0_EL2),   SYS_TTBR0);
108 		write_sysreg_el1(__vcpu_sys_reg(vcpu, TTBR1_EL2),   SYS_TTBR1);
109 		write_sysreg_el1(__vcpu_sys_reg(vcpu, TCR_EL2),	    SYS_TCR);
110 		write_sysreg_el1(__vcpu_sys_reg(vcpu, CNTHCTL_EL2), SYS_CNTKCTL);
111 	} else {
112 		/*
113 		 * CNTHCTL_EL2 only affects EL1 when running nVHE, so
114 		 * no need to restore it.
115 		 */
116 		val = translate_sctlr_el2_to_sctlr_el1(__vcpu_sys_reg(vcpu, SCTLR_EL2));
117 		write_sysreg_el1(val, SYS_SCTLR);
118 		val = translate_cptr_el2_to_cpacr_el1(__vcpu_sys_reg(vcpu, CPTR_EL2));
119 		write_sysreg_el1(val, SYS_CPACR);
120 		val = translate_ttbr0_el2_to_ttbr0_el1(__vcpu_sys_reg(vcpu, TTBR0_EL2));
121 		write_sysreg_el1(val, SYS_TTBR0);
122 		val = translate_tcr_el2_to_tcr_el1(__vcpu_sys_reg(vcpu, TCR_EL2));
123 		write_sysreg_el1(val, SYS_TCR);
124 	}
125 
126 	if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
127 		write_sysreg_el1(__vcpu_sys_reg(vcpu, TCR2_EL2), SYS_TCR2);
128 
129 		if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
130 			write_sysreg_el1(__vcpu_sys_reg(vcpu, PIR_EL2), SYS_PIR);
131 			write_sysreg_el1(__vcpu_sys_reg(vcpu, PIRE0_EL2), SYS_PIRE0);
132 		}
133 
134 		if (ctxt_has_s1poe(&vcpu->arch.ctxt))
135 			write_sysreg_el1(__vcpu_sys_reg(vcpu, POR_EL2), SYS_POR);
136 	}
137 
138 	write_sysreg_el1(__vcpu_sys_reg(vcpu, ESR_EL2),		SYS_ESR);
139 	write_sysreg_el1(__vcpu_sys_reg(vcpu, AFSR0_EL2),	SYS_AFSR0);
140 	write_sysreg_el1(__vcpu_sys_reg(vcpu, AFSR1_EL2),	SYS_AFSR1);
141 	write_sysreg_el1(__vcpu_sys_reg(vcpu, FAR_EL2),		SYS_FAR);
142 	write_sysreg(__vcpu_sys_reg(vcpu, SP_EL2),		sp_el1);
143 	write_sysreg_el1(__vcpu_sys_reg(vcpu, ELR_EL2),		SYS_ELR);
144 	write_sysreg_el1(__vcpu_sys_reg(vcpu, SPSR_EL2),	SYS_SPSR);
145 
146 	if (ctxt_has_sctlr2(&vcpu->arch.ctxt))
147 		write_sysreg_el1(__vcpu_sys_reg(vcpu, SCTLR2_EL2), SYS_SCTLR2);
148 }
149 
150 /*
151  * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
152  * pstate, which are handled as part of the el2 return state) on every
153  * switch (sp_el0 is being dealt with in the assembly code).
154  * tpidr_el0 and tpidrro_el0 only need to be switched when going
155  * to host userspace or a different VCPU.  EL1 registers only need to be
156  * switched when potentially going to run a different VCPU.  The latter two
157  * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
158  */
159 
sysreg_save_host_state_vhe(struct kvm_cpu_context * ctxt)160 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
161 {
162 	__sysreg_save_common_state(ctxt);
163 }
164 NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
165 
sysreg_save_guest_state_vhe(struct kvm_cpu_context * ctxt)166 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
167 {
168 	__sysreg_save_common_state(ctxt);
169 	__sysreg_save_el2_return_state(ctxt);
170 }
171 NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
172 
sysreg_restore_host_state_vhe(struct kvm_cpu_context * ctxt)173 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
174 {
175 	__sysreg_restore_common_state(ctxt);
176 }
177 NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
178 
sysreg_restore_guest_state_vhe(struct kvm_cpu_context * ctxt)179 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
180 {
181 	__sysreg_restore_common_state(ctxt);
182 	__sysreg_restore_el2_return_state(ctxt);
183 }
184 NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
185 
186 /*
187  * The _EL0 value was written by the host's context switch and belongs to the
188  * VMM. Copy this into the guest's _EL1 register.
189  */
__mpam_guest_load(void)190 static inline void __mpam_guest_load(void)
191 {
192 	u64 mask = MPAM0_EL1_PARTID_D | MPAM0_EL1_PARTID_I | MPAM0_EL1_PMG_D | MPAM0_EL1_PMG_I;
193 
194 	if (system_supports_mpam()) {
195 		u64 val = (read_sysreg_s(SYS_MPAM0_EL1) & mask) | MPAM1_EL1_MPAMEN;
196 
197 		write_sysreg_el1(val, SYS_MPAM1);
198 	}
199 }
200 
201 /**
202  * __vcpu_load_switch_sysregs - Load guest system registers to the physical CPU
203  *
204  * @vcpu: The VCPU pointer
205  *
206  * Load system registers that do not affect the host's execution, for
207  * example EL1 system registers on a VHE system where the host kernel
208  * runs at EL2.  This function is called from KVM's vcpu_load() function
209  * and loading system register state early avoids having to load them on
210  * every entry to the VM.
211  */
__vcpu_load_switch_sysregs(struct kvm_vcpu * vcpu)212 void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
213 {
214 	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
215 	struct kvm_cpu_context *host_ctxt;
216 	u64 midr, mpidr;
217 
218 	host_ctxt = host_data_ptr(host_ctxt);
219 	__sysreg_save_user_state(host_ctxt);
220 
221 	/*
222 	 * When running a normal EL1 guest, we only load a new vcpu
223 	 * after a context switch, which involves a DSB, so all
224 	 * speculative EL1&0 walks will have already completed.
225 	 * If running NV, the vcpu may transition between vEL1 and
226 	 * vEL2 without a context switch, so make sure we complete
227 	 * those walks before loading a new context.
228 	 */
229 	if (vcpu_has_nv(vcpu))
230 		dsb(nsh);
231 
232 	/*
233 	 * Load guest EL1 and user state
234 	 *
235 	 * We must restore the 32-bit state before the sysregs, thanks
236 	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
237 	 */
238 	__sysreg32_restore_state(vcpu);
239 	__sysreg_restore_user_state(guest_ctxt);
240 	__mpam_guest_load();
241 
242 	if (unlikely(is_hyp_ctxt(vcpu))) {
243 		__sysreg_restore_vel2_state(vcpu);
244 	} else {
245 		if (vcpu_has_nv(vcpu)) {
246 			/*
247 			 * As we're restoring a nested guest, set the value
248 			 * provided by the guest hypervisor.
249 			 */
250 			midr = ctxt_sys_reg(guest_ctxt, VPIDR_EL2);
251 			mpidr = ctxt_sys_reg(guest_ctxt, VMPIDR_EL2);
252 		} else {
253 			midr = ctxt_midr_el1(guest_ctxt);
254 			mpidr = ctxt_sys_reg(guest_ctxt, MPIDR_EL1);
255 		}
256 
257 		__sysreg_restore_el1_state(guest_ctxt, midr, mpidr);
258 	}
259 
260 	vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
261 }
262 
263 /**
264  * __vcpu_put_switch_sysregs - Restore host system registers to the physical CPU
265  *
266  * @vcpu: The VCPU pointer
267  *
268  * Save guest system registers that do not affect the host's execution, for
269  * example EL1 system registers on a VHE system where the host kernel
270  * runs at EL2.  This function is called from KVM's vcpu_put() function
271  * and deferring saving system register state until we're no longer running the
272  * VCPU avoids having to save them on every exit from the VM.
273  */
__vcpu_put_switch_sysregs(struct kvm_vcpu * vcpu)274 void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
275 {
276 	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
277 	struct kvm_cpu_context *host_ctxt;
278 
279 	host_ctxt = host_data_ptr(host_ctxt);
280 
281 	if (unlikely(is_hyp_ctxt(vcpu)))
282 		__sysreg_save_vel2_state(vcpu);
283 	else
284 		__sysreg_save_el1_state(guest_ctxt);
285 
286 	__sysreg_save_user_state(guest_ctxt);
287 	__sysreg32_save_state(vcpu);
288 
289 	/* Restore host user state */
290 	__sysreg_restore_user_state(host_ctxt);
291 
292 	vcpu_clear_flag(vcpu, SYSREGS_ON_CPU);
293 }
294