1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012-2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM64_KVM_HYP_SYSREG_SR_H__
8 #define __ARM64_KVM_HYP_SYSREG_SR_H__
9 
10 #include <linux/compiler.h>
11 #include <linux/kvm_host.h>
12 
13 #include <asm/kprobes.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_emulate.h>
16 #include <asm/kvm_hyp.h>
17 
__sysreg_save_common_state(struct kvm_cpu_context * ctxt)18 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
19 {
20 	ctxt_sys_reg(ctxt, MDSCR_EL1)	= read_sysreg(mdscr_el1);
21 }
22 
__sysreg_save_user_state(struct kvm_cpu_context * ctxt)23 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
24 {
25 	ctxt_sys_reg(ctxt, TPIDR_EL0)	= read_sysreg(tpidr_el0);
26 	ctxt_sys_reg(ctxt, TPIDRRO_EL0)	= read_sysreg(tpidrro_el0);
27 }
28 
__sysreg_save_el1_state(struct kvm_cpu_context * ctxt)29 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
30 {
31 	ctxt_sys_reg(ctxt, CSSELR_EL1)	= read_sysreg(csselr_el1);
32 	ctxt_sys_reg(ctxt, SCTLR_EL1)	= read_sysreg_el1(SYS_SCTLR);
33 	ctxt_sys_reg(ctxt, CPACR_EL1)	= read_sysreg_el1(SYS_CPACR);
34 	ctxt_sys_reg(ctxt, TTBR0_EL1)	= read_sysreg_el1(SYS_TTBR0);
35 	ctxt_sys_reg(ctxt, TTBR1_EL1)	= read_sysreg_el1(SYS_TTBR1);
36 	ctxt_sys_reg(ctxt, TCR_EL1)	= read_sysreg_el1(SYS_TCR);
37 	ctxt_sys_reg(ctxt, ESR_EL1)	= read_sysreg_el1(SYS_ESR);
38 	ctxt_sys_reg(ctxt, AFSR0_EL1)	= read_sysreg_el1(SYS_AFSR0);
39 	ctxt_sys_reg(ctxt, AFSR1_EL1)	= read_sysreg_el1(SYS_AFSR1);
40 	ctxt_sys_reg(ctxt, FAR_EL1)	= read_sysreg_el1(SYS_FAR);
41 	ctxt_sys_reg(ctxt, MAIR_EL1)	= read_sysreg_el1(SYS_MAIR);
42 	ctxt_sys_reg(ctxt, VBAR_EL1)	= read_sysreg_el1(SYS_VBAR);
43 	ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
44 	ctxt_sys_reg(ctxt, AMAIR_EL1)	= read_sysreg_el1(SYS_AMAIR);
45 	ctxt_sys_reg(ctxt, CNTKCTL_EL1)	= read_sysreg_el1(SYS_CNTKCTL);
46 	ctxt_sys_reg(ctxt, PAR_EL1)	= read_sysreg_par();
47 	ctxt_sys_reg(ctxt, TPIDR_EL1)	= read_sysreg(tpidr_el1);
48 
49 	ctxt_sys_reg(ctxt, SP_EL1)	= read_sysreg(sp_el1);
50 	ctxt_sys_reg(ctxt, ELR_EL1)	= read_sysreg_el1(SYS_ELR);
51 	ctxt_sys_reg(ctxt, SPSR_EL1)	= read_sysreg_el1(SYS_SPSR);
52 }
53 
__sysreg_save_el2_return_state(struct kvm_cpu_context * ctxt)54 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
55 {
56 	ctxt->regs.pc			= read_sysreg_el2(SYS_ELR);
57 	ctxt->regs.pstate		= read_sysreg_el2(SYS_SPSR);
58 
59 	if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
60 		ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
61 }
62 
__sysreg_restore_common_state(struct kvm_cpu_context * ctxt)63 static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
64 {
65 	write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1),  mdscr_el1);
66 }
67 
__sysreg_restore_user_state(struct kvm_cpu_context * ctxt)68 static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
69 {
70 	write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0),	tpidr_el0);
71 	write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0),	tpidrro_el0);
72 }
73 
__sysreg_restore_el1_state(struct kvm_cpu_context * ctxt)74 static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
75 {
76 	write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1),	vmpidr_el2);
77 	write_sysreg(ctxt_sys_reg(ctxt, CSSELR_EL1),	csselr_el1);
78 
79 	if (has_vhe() ||
80 	    !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
81 		write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1),	SYS_SCTLR);
82 		write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),	SYS_TCR);
83 	} else	if (!ctxt->__hyp_running_vcpu) {
84 		/*
85 		 * Must only be done for guest registers, hence the context
86 		 * test. We're coming from the host, so SCTLR.M is already
87 		 * set. Pairs with nVHE's __activate_traps().
88 		 */
89 		write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) |
90 				  TCR_EPD1_MASK | TCR_EPD0_MASK),
91 				 SYS_TCR);
92 		isb();
93 	}
94 
95 	write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1),	SYS_CPACR);
96 	write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR0_EL1),	SYS_TTBR0);
97 	write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR1_EL1),	SYS_TTBR1);
98 	write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1),	SYS_ESR);
99 	write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1),	SYS_AFSR0);
100 	write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR1_EL1),	SYS_AFSR1);
101 	write_sysreg_el1(ctxt_sys_reg(ctxt, FAR_EL1),	SYS_FAR);
102 	write_sysreg_el1(ctxt_sys_reg(ctxt, MAIR_EL1),	SYS_MAIR);
103 	write_sysreg_el1(ctxt_sys_reg(ctxt, VBAR_EL1),	SYS_VBAR);
104 	write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
105 	write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1),	SYS_AMAIR);
106 	write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
107 	write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1),	par_el1);
108 	write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1),	tpidr_el1);
109 
110 	if (!has_vhe() &&
111 	    cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
112 	    ctxt->__hyp_running_vcpu) {
113 		/*
114 		 * Must only be done for host registers, hence the context
115 		 * test. Pairs with nVHE's __deactivate_traps().
116 		 */
117 		isb();
118 		/*
119 		 * At this stage, and thanks to the above isb(), S2 is
120 		 * deconfigured and disabled. We can now restore the host's
121 		 * S1 configuration: SCTLR, and only then TCR.
122 		 */
123 		write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1),	SYS_SCTLR);
124 		isb();
125 		write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),	SYS_TCR);
126 	}
127 
128 	write_sysreg(ctxt_sys_reg(ctxt, SP_EL1),	sp_el1);
129 	write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1),	SYS_ELR);
130 	write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1),	SYS_SPSR);
131 }
132 
__sysreg_restore_el2_return_state(struct kvm_cpu_context * ctxt)133 static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
134 {
135 	u64 pstate = ctxt->regs.pstate;
136 	u64 mode = pstate & PSR_AA32_MODE_MASK;
137 
138 	/*
139 	 * Safety check to ensure we're setting the CPU up to enter the guest
140 	 * in a less privileged mode.
141 	 *
142 	 * If we are attempting a return to EL2 or higher in AArch64 state,
143 	 * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
144 	 * we'll take an illegal exception state exception immediately after
145 	 * the ERET to the guest.  Attempts to return to AArch32 Hyp will
146 	 * result in an illegal exception return because EL2's execution state
147 	 * is determined by SCR_EL3.RW.
148 	 */
149 	if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
150 		pstate = PSR_MODE_EL2h | PSR_IL_BIT;
151 
152 	write_sysreg_el2(ctxt->regs.pc,			SYS_ELR);
153 	write_sysreg_el2(pstate,			SYS_SPSR);
154 
155 	if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
156 		write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
157 }
158 
__sysreg32_save_state(struct kvm_vcpu * vcpu)159 static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
160 {
161 	if (!vcpu_el1_is_32bit(vcpu))
162 		return;
163 
164 	vcpu->arch.ctxt.spsr_abt = read_sysreg(spsr_abt);
165 	vcpu->arch.ctxt.spsr_und = read_sysreg(spsr_und);
166 	vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
167 	vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
168 
169 	__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
170 	__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
171 
172 	if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
173 		__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
174 }
175 
__sysreg32_restore_state(struct kvm_vcpu * vcpu)176 static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
177 {
178 	if (!vcpu_el1_is_32bit(vcpu))
179 		return;
180 
181 	write_sysreg(vcpu->arch.ctxt.spsr_abt, spsr_abt);
182 	write_sysreg(vcpu->arch.ctxt.spsr_und, spsr_und);
183 	write_sysreg(vcpu->arch.ctxt.spsr_irq, spsr_irq);
184 	write_sysreg(vcpu->arch.ctxt.spsr_fiq, spsr_fiq);
185 
186 	write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
187 	write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
188 
189 	if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
190 		write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
191 }
192 
193 #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */
194