xref: /qemu/target/arm/hvf/hvf.c (revision 989dd906ed5556563a57b32ae7abf9db5e1f38ba)
1 /*
2  * QEMU Hypervisor.framework support for Apple Silicon
3 
4  * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5  * Copyright 2020 Google LLC
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 #include "qemu/log.h"
15 
16 #include "system/runstate.h"
17 #include "system/hvf.h"
18 #include "system/hvf_int.h"
19 #include "system/hw_accel.h"
20 #include "hvf_arm.h"
21 #include "cpregs.h"
22 #include "cpu-sysregs.h"
23 
24 #include <mach/mach_time.h>
25 
26 #include "system/address-spaces.h"
27 #include "system/memory.h"
28 #include "hw/boards.h"
29 #include "hw/irq.h"
30 #include "qemu/main-loop.h"
31 #include "system/cpus.h"
32 #include "arm-powerctl.h"
33 #include "target/arm/cpu.h"
34 #include "target/arm/internals.h"
35 #include "target/arm/multiprocessing.h"
36 #include "target/arm/gtimer.h"
37 #include "trace.h"
38 #include "migration/vmstate.h"
39 
40 #include "gdbstub/enums.h"
41 
42 #define MDSCR_EL1_SS_SHIFT  0
43 #define MDSCR_EL1_MDE_SHIFT 15
44 
45 static const uint16_t dbgbcr_regs[] = {
46     HV_SYS_REG_DBGBCR0_EL1,
47     HV_SYS_REG_DBGBCR1_EL1,
48     HV_SYS_REG_DBGBCR2_EL1,
49     HV_SYS_REG_DBGBCR3_EL1,
50     HV_SYS_REG_DBGBCR4_EL1,
51     HV_SYS_REG_DBGBCR5_EL1,
52     HV_SYS_REG_DBGBCR6_EL1,
53     HV_SYS_REG_DBGBCR7_EL1,
54     HV_SYS_REG_DBGBCR8_EL1,
55     HV_SYS_REG_DBGBCR9_EL1,
56     HV_SYS_REG_DBGBCR10_EL1,
57     HV_SYS_REG_DBGBCR11_EL1,
58     HV_SYS_REG_DBGBCR12_EL1,
59     HV_SYS_REG_DBGBCR13_EL1,
60     HV_SYS_REG_DBGBCR14_EL1,
61     HV_SYS_REG_DBGBCR15_EL1,
62 };
63 
64 static const uint16_t dbgbvr_regs[] = {
65     HV_SYS_REG_DBGBVR0_EL1,
66     HV_SYS_REG_DBGBVR1_EL1,
67     HV_SYS_REG_DBGBVR2_EL1,
68     HV_SYS_REG_DBGBVR3_EL1,
69     HV_SYS_REG_DBGBVR4_EL1,
70     HV_SYS_REG_DBGBVR5_EL1,
71     HV_SYS_REG_DBGBVR6_EL1,
72     HV_SYS_REG_DBGBVR7_EL1,
73     HV_SYS_REG_DBGBVR8_EL1,
74     HV_SYS_REG_DBGBVR9_EL1,
75     HV_SYS_REG_DBGBVR10_EL1,
76     HV_SYS_REG_DBGBVR11_EL1,
77     HV_SYS_REG_DBGBVR12_EL1,
78     HV_SYS_REG_DBGBVR13_EL1,
79     HV_SYS_REG_DBGBVR14_EL1,
80     HV_SYS_REG_DBGBVR15_EL1,
81 };
82 
83 static const uint16_t dbgwcr_regs[] = {
84     HV_SYS_REG_DBGWCR0_EL1,
85     HV_SYS_REG_DBGWCR1_EL1,
86     HV_SYS_REG_DBGWCR2_EL1,
87     HV_SYS_REG_DBGWCR3_EL1,
88     HV_SYS_REG_DBGWCR4_EL1,
89     HV_SYS_REG_DBGWCR5_EL1,
90     HV_SYS_REG_DBGWCR6_EL1,
91     HV_SYS_REG_DBGWCR7_EL1,
92     HV_SYS_REG_DBGWCR8_EL1,
93     HV_SYS_REG_DBGWCR9_EL1,
94     HV_SYS_REG_DBGWCR10_EL1,
95     HV_SYS_REG_DBGWCR11_EL1,
96     HV_SYS_REG_DBGWCR12_EL1,
97     HV_SYS_REG_DBGWCR13_EL1,
98     HV_SYS_REG_DBGWCR14_EL1,
99     HV_SYS_REG_DBGWCR15_EL1,
100 };
101 
102 static const uint16_t dbgwvr_regs[] = {
103     HV_SYS_REG_DBGWVR0_EL1,
104     HV_SYS_REG_DBGWVR1_EL1,
105     HV_SYS_REG_DBGWVR2_EL1,
106     HV_SYS_REG_DBGWVR3_EL1,
107     HV_SYS_REG_DBGWVR4_EL1,
108     HV_SYS_REG_DBGWVR5_EL1,
109     HV_SYS_REG_DBGWVR6_EL1,
110     HV_SYS_REG_DBGWVR7_EL1,
111     HV_SYS_REG_DBGWVR8_EL1,
112     HV_SYS_REG_DBGWVR9_EL1,
113     HV_SYS_REG_DBGWVR10_EL1,
114     HV_SYS_REG_DBGWVR11_EL1,
115     HV_SYS_REG_DBGWVR12_EL1,
116     HV_SYS_REG_DBGWVR13_EL1,
117     HV_SYS_REG_DBGWVR14_EL1,
118     HV_SYS_REG_DBGWVR15_EL1,
119 };
120 
hvf_arm_num_brps(hv_vcpu_config_t config)121 static inline int hvf_arm_num_brps(hv_vcpu_config_t config)
122 {
123     uint64_t val;
124     hv_return_t ret;
125     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
126                                          &val);
127     assert_hvf_ok(ret);
128     return FIELD_EX64(val, ID_AA64DFR0, BRPS) + 1;
129 }
130 
hvf_arm_num_wrps(hv_vcpu_config_t config)131 static inline int hvf_arm_num_wrps(hv_vcpu_config_t config)
132 {
133     uint64_t val;
134     hv_return_t ret;
135     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
136                                          &val);
137     assert_hvf_ok(ret);
138     return FIELD_EX64(val, ID_AA64DFR0, WRPS) + 1;
139 }
140 
hvf_arm_init_debug(void)141 void hvf_arm_init_debug(void)
142 {
143     hv_vcpu_config_t config;
144     config = hv_vcpu_config_create();
145 
146     max_hw_bps = hvf_arm_num_brps(config);
147     hw_breakpoints =
148         g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps);
149 
150     max_hw_wps = hvf_arm_num_wrps(config);
151     hw_watchpoints =
152         g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps);
153 }
154 
155 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
156         ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
157 
158 #define SYSREG_OP0_SHIFT      20
159 #define SYSREG_OP0_MASK       0x3
160 #define SYSREG_OP0(sysreg)    ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK)
161 #define SYSREG_OP1_SHIFT      14
162 #define SYSREG_OP1_MASK       0x7
163 #define SYSREG_OP1(sysreg)    ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK)
164 #define SYSREG_CRN_SHIFT      10
165 #define SYSREG_CRN_MASK       0xf
166 #define SYSREG_CRN(sysreg)    ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK)
167 #define SYSREG_CRM_SHIFT      1
168 #define SYSREG_CRM_MASK       0xf
169 #define SYSREG_CRM(sysreg)    ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK)
170 #define SYSREG_OP2_SHIFT      17
171 #define SYSREG_OP2_MASK       0x7
172 #define SYSREG_OP2(sysreg)    ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK)
173 
174 #define SYSREG(op0, op1, crn, crm, op2) \
175     ((op0 << SYSREG_OP0_SHIFT) | \
176      (op1 << SYSREG_OP1_SHIFT) | \
177      (crn << SYSREG_CRN_SHIFT) | \
178      (crm << SYSREG_CRM_SHIFT) | \
179      (op2 << SYSREG_OP2_SHIFT))
180 #define SYSREG_MASK \
181     SYSREG(SYSREG_OP0_MASK, \
182            SYSREG_OP1_MASK, \
183            SYSREG_CRN_MASK, \
184            SYSREG_CRM_MASK, \
185            SYSREG_OP2_MASK)
186 #define SYSREG_OSLAR_EL1      SYSREG(2, 0, 1, 0, 4)
187 #define SYSREG_OSLSR_EL1      SYSREG(2, 0, 1, 1, 4)
188 #define SYSREG_OSDLR_EL1      SYSREG(2, 0, 1, 3, 4)
189 #define SYSREG_CNTPCT_EL0     SYSREG(3, 3, 14, 0, 1)
190 #define SYSREG_CNTP_CTL_EL0   SYSREG(3, 3, 14, 2, 1)
191 #define SYSREG_PMCR_EL0       SYSREG(3, 3, 9, 12, 0)
192 #define SYSREG_PMUSERENR_EL0  SYSREG(3, 3, 9, 14, 0)
193 #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
194 #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
195 #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
196 #define SYSREG_PMOVSCLR_EL0   SYSREG(3, 3, 9, 12, 3)
197 #define SYSREG_PMSWINC_EL0    SYSREG(3, 3, 9, 12, 4)
198 #define SYSREG_PMSELR_EL0     SYSREG(3, 3, 9, 12, 5)
199 #define SYSREG_PMCEID0_EL0    SYSREG(3, 3, 9, 12, 6)
200 #define SYSREG_PMCEID1_EL0    SYSREG(3, 3, 9, 12, 7)
201 #define SYSREG_PMCCNTR_EL0    SYSREG(3, 3, 9, 13, 0)
202 #define SYSREG_PMCCFILTR_EL0  SYSREG(3, 3, 14, 15, 7)
203 
204 #define SYSREG_ICC_AP0R0_EL1     SYSREG(3, 0, 12, 8, 4)
205 #define SYSREG_ICC_AP0R1_EL1     SYSREG(3, 0, 12, 8, 5)
206 #define SYSREG_ICC_AP0R2_EL1     SYSREG(3, 0, 12, 8, 6)
207 #define SYSREG_ICC_AP0R3_EL1     SYSREG(3, 0, 12, 8, 7)
208 #define SYSREG_ICC_AP1R0_EL1     SYSREG(3, 0, 12, 9, 0)
209 #define SYSREG_ICC_AP1R1_EL1     SYSREG(3, 0, 12, 9, 1)
210 #define SYSREG_ICC_AP1R2_EL1     SYSREG(3, 0, 12, 9, 2)
211 #define SYSREG_ICC_AP1R3_EL1     SYSREG(3, 0, 12, 9, 3)
212 #define SYSREG_ICC_ASGI1R_EL1    SYSREG(3, 0, 12, 11, 6)
213 #define SYSREG_ICC_BPR0_EL1      SYSREG(3, 0, 12, 8, 3)
214 #define SYSREG_ICC_BPR1_EL1      SYSREG(3, 0, 12, 12, 3)
215 #define SYSREG_ICC_CTLR_EL1      SYSREG(3, 0, 12, 12, 4)
216 #define SYSREG_ICC_DIR_EL1       SYSREG(3, 0, 12, 11, 1)
217 #define SYSREG_ICC_EOIR0_EL1     SYSREG(3, 0, 12, 8, 1)
218 #define SYSREG_ICC_EOIR1_EL1     SYSREG(3, 0, 12, 12, 1)
219 #define SYSREG_ICC_HPPIR0_EL1    SYSREG(3, 0, 12, 8, 2)
220 #define SYSREG_ICC_HPPIR1_EL1    SYSREG(3, 0, 12, 12, 2)
221 #define SYSREG_ICC_IAR0_EL1      SYSREG(3, 0, 12, 8, 0)
222 #define SYSREG_ICC_IAR1_EL1      SYSREG(3, 0, 12, 12, 0)
223 #define SYSREG_ICC_IGRPEN0_EL1   SYSREG(3, 0, 12, 12, 6)
224 #define SYSREG_ICC_IGRPEN1_EL1   SYSREG(3, 0, 12, 12, 7)
225 #define SYSREG_ICC_PMR_EL1       SYSREG(3, 0, 4, 6, 0)
226 #define SYSREG_ICC_RPR_EL1       SYSREG(3, 0, 12, 11, 3)
227 #define SYSREG_ICC_SGI0R_EL1     SYSREG(3, 0, 12, 11, 7)
228 #define SYSREG_ICC_SGI1R_EL1     SYSREG(3, 0, 12, 11, 5)
229 #define SYSREG_ICC_SRE_EL1       SYSREG(3, 0, 12, 12, 5)
230 
231 #define SYSREG_MDSCR_EL1      SYSREG(2, 0, 0, 2, 2)
232 #define SYSREG_DBGBVR0_EL1    SYSREG(2, 0, 0, 0, 4)
233 #define SYSREG_DBGBCR0_EL1    SYSREG(2, 0, 0, 0, 5)
234 #define SYSREG_DBGWVR0_EL1    SYSREG(2, 0, 0, 0, 6)
235 #define SYSREG_DBGWCR0_EL1    SYSREG(2, 0, 0, 0, 7)
236 #define SYSREG_DBGBVR1_EL1    SYSREG(2, 0, 0, 1, 4)
237 #define SYSREG_DBGBCR1_EL1    SYSREG(2, 0, 0, 1, 5)
238 #define SYSREG_DBGWVR1_EL1    SYSREG(2, 0, 0, 1, 6)
239 #define SYSREG_DBGWCR1_EL1    SYSREG(2, 0, 0, 1, 7)
240 #define SYSREG_DBGBVR2_EL1    SYSREG(2, 0, 0, 2, 4)
241 #define SYSREG_DBGBCR2_EL1    SYSREG(2, 0, 0, 2, 5)
242 #define SYSREG_DBGWVR2_EL1    SYSREG(2, 0, 0, 2, 6)
243 #define SYSREG_DBGWCR2_EL1    SYSREG(2, 0, 0, 2, 7)
244 #define SYSREG_DBGBVR3_EL1    SYSREG(2, 0, 0, 3, 4)
245 #define SYSREG_DBGBCR3_EL1    SYSREG(2, 0, 0, 3, 5)
246 #define SYSREG_DBGWVR3_EL1    SYSREG(2, 0, 0, 3, 6)
247 #define SYSREG_DBGWCR3_EL1    SYSREG(2, 0, 0, 3, 7)
248 #define SYSREG_DBGBVR4_EL1    SYSREG(2, 0, 0, 4, 4)
249 #define SYSREG_DBGBCR4_EL1    SYSREG(2, 0, 0, 4, 5)
250 #define SYSREG_DBGWVR4_EL1    SYSREG(2, 0, 0, 4, 6)
251 #define SYSREG_DBGWCR4_EL1    SYSREG(2, 0, 0, 4, 7)
252 #define SYSREG_DBGBVR5_EL1    SYSREG(2, 0, 0, 5, 4)
253 #define SYSREG_DBGBCR5_EL1    SYSREG(2, 0, 0, 5, 5)
254 #define SYSREG_DBGWVR5_EL1    SYSREG(2, 0, 0, 5, 6)
255 #define SYSREG_DBGWCR5_EL1    SYSREG(2, 0, 0, 5, 7)
256 #define SYSREG_DBGBVR6_EL1    SYSREG(2, 0, 0, 6, 4)
257 #define SYSREG_DBGBCR6_EL1    SYSREG(2, 0, 0, 6, 5)
258 #define SYSREG_DBGWVR6_EL1    SYSREG(2, 0, 0, 6, 6)
259 #define SYSREG_DBGWCR6_EL1    SYSREG(2, 0, 0, 6, 7)
260 #define SYSREG_DBGBVR7_EL1    SYSREG(2, 0, 0, 7, 4)
261 #define SYSREG_DBGBCR7_EL1    SYSREG(2, 0, 0, 7, 5)
262 #define SYSREG_DBGWVR7_EL1    SYSREG(2, 0, 0, 7, 6)
263 #define SYSREG_DBGWCR7_EL1    SYSREG(2, 0, 0, 7, 7)
264 #define SYSREG_DBGBVR8_EL1    SYSREG(2, 0, 0, 8, 4)
265 #define SYSREG_DBGBCR8_EL1    SYSREG(2, 0, 0, 8, 5)
266 #define SYSREG_DBGWVR8_EL1    SYSREG(2, 0, 0, 8, 6)
267 #define SYSREG_DBGWCR8_EL1    SYSREG(2, 0, 0, 8, 7)
268 #define SYSREG_DBGBVR9_EL1    SYSREG(2, 0, 0, 9, 4)
269 #define SYSREG_DBGBCR9_EL1    SYSREG(2, 0, 0, 9, 5)
270 #define SYSREG_DBGWVR9_EL1    SYSREG(2, 0, 0, 9, 6)
271 #define SYSREG_DBGWCR9_EL1    SYSREG(2, 0, 0, 9, 7)
272 #define SYSREG_DBGBVR10_EL1   SYSREG(2, 0, 0, 10, 4)
273 #define SYSREG_DBGBCR10_EL1   SYSREG(2, 0, 0, 10, 5)
274 #define SYSREG_DBGWVR10_EL1   SYSREG(2, 0, 0, 10, 6)
275 #define SYSREG_DBGWCR10_EL1   SYSREG(2, 0, 0, 10, 7)
276 #define SYSREG_DBGBVR11_EL1   SYSREG(2, 0, 0, 11, 4)
277 #define SYSREG_DBGBCR11_EL1   SYSREG(2, 0, 0, 11, 5)
278 #define SYSREG_DBGWVR11_EL1   SYSREG(2, 0, 0, 11, 6)
279 #define SYSREG_DBGWCR11_EL1   SYSREG(2, 0, 0, 11, 7)
280 #define SYSREG_DBGBVR12_EL1   SYSREG(2, 0, 0, 12, 4)
281 #define SYSREG_DBGBCR12_EL1   SYSREG(2, 0, 0, 12, 5)
282 #define SYSREG_DBGWVR12_EL1   SYSREG(2, 0, 0, 12, 6)
283 #define SYSREG_DBGWCR12_EL1   SYSREG(2, 0, 0, 12, 7)
284 #define SYSREG_DBGBVR13_EL1   SYSREG(2, 0, 0, 13, 4)
285 #define SYSREG_DBGBCR13_EL1   SYSREG(2, 0, 0, 13, 5)
286 #define SYSREG_DBGWVR13_EL1   SYSREG(2, 0, 0, 13, 6)
287 #define SYSREG_DBGWCR13_EL1   SYSREG(2, 0, 0, 13, 7)
288 #define SYSREG_DBGBVR14_EL1   SYSREG(2, 0, 0, 14, 4)
289 #define SYSREG_DBGBCR14_EL1   SYSREG(2, 0, 0, 14, 5)
290 #define SYSREG_DBGWVR14_EL1   SYSREG(2, 0, 0, 14, 6)
291 #define SYSREG_DBGWCR14_EL1   SYSREG(2, 0, 0, 14, 7)
292 #define SYSREG_DBGBVR15_EL1   SYSREG(2, 0, 0, 15, 4)
293 #define SYSREG_DBGBCR15_EL1   SYSREG(2, 0, 0, 15, 5)
294 #define SYSREG_DBGWVR15_EL1   SYSREG(2, 0, 0, 15, 6)
295 #define SYSREG_DBGWCR15_EL1   SYSREG(2, 0, 0, 15, 7)
296 
297 #define WFX_IS_WFE (1 << 0)
298 
299 #define TMR_CTL_ENABLE  (1 << 0)
300 #define TMR_CTL_IMASK   (1 << 1)
301 #define TMR_CTL_ISTATUS (1 << 2)
302 
303 static void hvf_wfi(CPUState *cpu);
304 
305 static uint32_t chosen_ipa_bit_size;
306 
307 typedef struct HVFVTimer {
308     /* Vtimer value during migration and paused state */
309     uint64_t vtimer_val;
310 } HVFVTimer;
311 
312 static HVFVTimer vtimer;
313 
314 typedef struct ARMHostCPUFeatures {
315     ARMISARegisters isar;
316     uint64_t features;
317     uint64_t midr;
318     uint32_t reset_sctlr;
319     const char *dtb_compatible;
320 } ARMHostCPUFeatures;
321 
322 static ARMHostCPUFeatures arm_host_cpu_features;
323 
324 struct hvf_reg_match {
325     int reg;
326     uint64_t offset;
327 };
328 
329 static const struct hvf_reg_match hvf_reg_match[] = {
330     { HV_REG_X0,   offsetof(CPUARMState, xregs[0]) },
331     { HV_REG_X1,   offsetof(CPUARMState, xregs[1]) },
332     { HV_REG_X2,   offsetof(CPUARMState, xregs[2]) },
333     { HV_REG_X3,   offsetof(CPUARMState, xregs[3]) },
334     { HV_REG_X4,   offsetof(CPUARMState, xregs[4]) },
335     { HV_REG_X5,   offsetof(CPUARMState, xregs[5]) },
336     { HV_REG_X6,   offsetof(CPUARMState, xregs[6]) },
337     { HV_REG_X7,   offsetof(CPUARMState, xregs[7]) },
338     { HV_REG_X8,   offsetof(CPUARMState, xregs[8]) },
339     { HV_REG_X9,   offsetof(CPUARMState, xregs[9]) },
340     { HV_REG_X10,  offsetof(CPUARMState, xregs[10]) },
341     { HV_REG_X11,  offsetof(CPUARMState, xregs[11]) },
342     { HV_REG_X12,  offsetof(CPUARMState, xregs[12]) },
343     { HV_REG_X13,  offsetof(CPUARMState, xregs[13]) },
344     { HV_REG_X14,  offsetof(CPUARMState, xregs[14]) },
345     { HV_REG_X15,  offsetof(CPUARMState, xregs[15]) },
346     { HV_REG_X16,  offsetof(CPUARMState, xregs[16]) },
347     { HV_REG_X17,  offsetof(CPUARMState, xregs[17]) },
348     { HV_REG_X18,  offsetof(CPUARMState, xregs[18]) },
349     { HV_REG_X19,  offsetof(CPUARMState, xregs[19]) },
350     { HV_REG_X20,  offsetof(CPUARMState, xregs[20]) },
351     { HV_REG_X21,  offsetof(CPUARMState, xregs[21]) },
352     { HV_REG_X22,  offsetof(CPUARMState, xregs[22]) },
353     { HV_REG_X23,  offsetof(CPUARMState, xregs[23]) },
354     { HV_REG_X24,  offsetof(CPUARMState, xregs[24]) },
355     { HV_REG_X25,  offsetof(CPUARMState, xregs[25]) },
356     { HV_REG_X26,  offsetof(CPUARMState, xregs[26]) },
357     { HV_REG_X27,  offsetof(CPUARMState, xregs[27]) },
358     { HV_REG_X28,  offsetof(CPUARMState, xregs[28]) },
359     { HV_REG_X29,  offsetof(CPUARMState, xregs[29]) },
360     { HV_REG_X30,  offsetof(CPUARMState, xregs[30]) },
361     { HV_REG_PC,   offsetof(CPUARMState, pc) },
362 };
363 
364 static const struct hvf_reg_match hvf_fpreg_match[] = {
365     { HV_SIMD_FP_REG_Q0,  offsetof(CPUARMState, vfp.zregs[0]) },
366     { HV_SIMD_FP_REG_Q1,  offsetof(CPUARMState, vfp.zregs[1]) },
367     { HV_SIMD_FP_REG_Q2,  offsetof(CPUARMState, vfp.zregs[2]) },
368     { HV_SIMD_FP_REG_Q3,  offsetof(CPUARMState, vfp.zregs[3]) },
369     { HV_SIMD_FP_REG_Q4,  offsetof(CPUARMState, vfp.zregs[4]) },
370     { HV_SIMD_FP_REG_Q5,  offsetof(CPUARMState, vfp.zregs[5]) },
371     { HV_SIMD_FP_REG_Q6,  offsetof(CPUARMState, vfp.zregs[6]) },
372     { HV_SIMD_FP_REG_Q7,  offsetof(CPUARMState, vfp.zregs[7]) },
373     { HV_SIMD_FP_REG_Q8,  offsetof(CPUARMState, vfp.zregs[8]) },
374     { HV_SIMD_FP_REG_Q9,  offsetof(CPUARMState, vfp.zregs[9]) },
375     { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
376     { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
377     { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
378     { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
379     { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
380     { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
381     { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
382     { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
383     { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
384     { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
385     { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
386     { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
387     { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
388     { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
389     { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
390     { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
391     { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
392     { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
393     { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
394     { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
395     { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
396     { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
397 };
398 
399 struct hvf_sreg_match {
400     int reg;
401     uint32_t key;
402     uint32_t cp_idx;
403 };
404 
405 static struct hvf_sreg_match hvf_sreg_match[] = {
406     { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 4) },
407     { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 5) },
408     { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 6) },
409     { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 7) },
410 
411     { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 4) },
412     { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 5) },
413     { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 6) },
414     { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 7) },
415 
416     { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 4) },
417     { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 5) },
418     { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 6) },
419     { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 7) },
420 
421     { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 4) },
422     { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 5) },
423     { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 6) },
424     { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 7) },
425 
426     { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 4) },
427     { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 5) },
428     { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 6) },
429     { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 7) },
430 
431     { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 4) },
432     { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 5) },
433     { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 6) },
434     { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 7) },
435 
436     { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 4) },
437     { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 5) },
438     { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 6) },
439     { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 7) },
440 
441     { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 4) },
442     { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 5) },
443     { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 6) },
444     { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 7) },
445 
446     { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 4) },
447     { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 5) },
448     { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 6) },
449     { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 7) },
450 
451     { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 4) },
452     { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 5) },
453     { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 6) },
454     { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 7) },
455 
456     { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 4) },
457     { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 5) },
458     { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 6) },
459     { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 7) },
460 
461     { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 4) },
462     { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 5) },
463     { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 6) },
464     { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 7) },
465 
466     { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 4) },
467     { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 5) },
468     { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 6) },
469     { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 7) },
470 
471     { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 4) },
472     { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 5) },
473     { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 6) },
474     { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 7) },
475 
476     { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 4) },
477     { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 5) },
478     { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 6) },
479     { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 7) },
480 
481     { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 4) },
482     { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 5) },
483     { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 6) },
484     { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 7) },
485 
486 #ifdef SYNC_NO_RAW_REGS
487     /*
488      * The registers below are manually synced on init because they are
489      * marked as NO_RAW. We still list them to make number space sync easier.
490      */
491     { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
492     { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
493     { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
494     { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
495 #endif
496     { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 1) },
497     { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
498     { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
499     { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
500     { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
501 #ifdef SYNC_NO_MMFR0
502     /* We keep the hardware MMFR0 around. HW limits are there anyway */
503     { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
504 #endif
505     { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
506     { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
507     /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
508 
509     { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
510     { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
511     { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
512     { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
513     { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
514     { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
515 
516     { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
517     { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
518     { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
519     { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
520     { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
521     { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
522     { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
523     { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
524     { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
525     { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
526 
527     { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
528     { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
529     { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
530     { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
531     { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
532     { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
533     { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
534     { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
535     { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
536     { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
537     { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
538     { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
539     { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
540     { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
541     { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
542     { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
543     { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
544     { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
545     { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
546     { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
547 };
548 
hvf_get_registers(CPUState * cpu)549 int hvf_get_registers(CPUState *cpu)
550 {
551     ARMCPU *arm_cpu = ARM_CPU(cpu);
552     CPUARMState *env = &arm_cpu->env;
553     hv_return_t ret;
554     uint64_t val;
555     hv_simd_fp_uchar16_t fpval;
556     int i;
557 
558     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
559         ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val);
560         *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
561         assert_hvf_ok(ret);
562     }
563 
564     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
565         ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
566                                       &fpval);
567         memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
568         assert_hvf_ok(ret);
569     }
570 
571     val = 0;
572     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val);
573     assert_hvf_ok(ret);
574     vfp_set_fpcr(env, val);
575 
576     val = 0;
577     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val);
578     assert_hvf_ok(ret);
579     vfp_set_fpsr(env, val);
580 
581     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val);
582     assert_hvf_ok(ret);
583     pstate_write(env, val);
584 
585     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
586         if (hvf_sreg_match[i].cp_idx == -1) {
587             continue;
588         }
589 
590         if (cpu->accel->guest_debug_enabled) {
591             /* Handle debug registers */
592             switch (hvf_sreg_match[i].reg) {
593             case HV_SYS_REG_DBGBVR0_EL1:
594             case HV_SYS_REG_DBGBCR0_EL1:
595             case HV_SYS_REG_DBGWVR0_EL1:
596             case HV_SYS_REG_DBGWCR0_EL1:
597             case HV_SYS_REG_DBGBVR1_EL1:
598             case HV_SYS_REG_DBGBCR1_EL1:
599             case HV_SYS_REG_DBGWVR1_EL1:
600             case HV_SYS_REG_DBGWCR1_EL1:
601             case HV_SYS_REG_DBGBVR2_EL1:
602             case HV_SYS_REG_DBGBCR2_EL1:
603             case HV_SYS_REG_DBGWVR2_EL1:
604             case HV_SYS_REG_DBGWCR2_EL1:
605             case HV_SYS_REG_DBGBVR3_EL1:
606             case HV_SYS_REG_DBGBCR3_EL1:
607             case HV_SYS_REG_DBGWVR3_EL1:
608             case HV_SYS_REG_DBGWCR3_EL1:
609             case HV_SYS_REG_DBGBVR4_EL1:
610             case HV_SYS_REG_DBGBCR4_EL1:
611             case HV_SYS_REG_DBGWVR4_EL1:
612             case HV_SYS_REG_DBGWCR4_EL1:
613             case HV_SYS_REG_DBGBVR5_EL1:
614             case HV_SYS_REG_DBGBCR5_EL1:
615             case HV_SYS_REG_DBGWVR5_EL1:
616             case HV_SYS_REG_DBGWCR5_EL1:
617             case HV_SYS_REG_DBGBVR6_EL1:
618             case HV_SYS_REG_DBGBCR6_EL1:
619             case HV_SYS_REG_DBGWVR6_EL1:
620             case HV_SYS_REG_DBGWCR6_EL1:
621             case HV_SYS_REG_DBGBVR7_EL1:
622             case HV_SYS_REG_DBGBCR7_EL1:
623             case HV_SYS_REG_DBGWVR7_EL1:
624             case HV_SYS_REG_DBGWCR7_EL1:
625             case HV_SYS_REG_DBGBVR8_EL1:
626             case HV_SYS_REG_DBGBCR8_EL1:
627             case HV_SYS_REG_DBGWVR8_EL1:
628             case HV_SYS_REG_DBGWCR8_EL1:
629             case HV_SYS_REG_DBGBVR9_EL1:
630             case HV_SYS_REG_DBGBCR9_EL1:
631             case HV_SYS_REG_DBGWVR9_EL1:
632             case HV_SYS_REG_DBGWCR9_EL1:
633             case HV_SYS_REG_DBGBVR10_EL1:
634             case HV_SYS_REG_DBGBCR10_EL1:
635             case HV_SYS_REG_DBGWVR10_EL1:
636             case HV_SYS_REG_DBGWCR10_EL1:
637             case HV_SYS_REG_DBGBVR11_EL1:
638             case HV_SYS_REG_DBGBCR11_EL1:
639             case HV_SYS_REG_DBGWVR11_EL1:
640             case HV_SYS_REG_DBGWCR11_EL1:
641             case HV_SYS_REG_DBGBVR12_EL1:
642             case HV_SYS_REG_DBGBCR12_EL1:
643             case HV_SYS_REG_DBGWVR12_EL1:
644             case HV_SYS_REG_DBGWCR12_EL1:
645             case HV_SYS_REG_DBGBVR13_EL1:
646             case HV_SYS_REG_DBGBCR13_EL1:
647             case HV_SYS_REG_DBGWVR13_EL1:
648             case HV_SYS_REG_DBGWCR13_EL1:
649             case HV_SYS_REG_DBGBVR14_EL1:
650             case HV_SYS_REG_DBGBCR14_EL1:
651             case HV_SYS_REG_DBGWVR14_EL1:
652             case HV_SYS_REG_DBGWCR14_EL1:
653             case HV_SYS_REG_DBGBVR15_EL1:
654             case HV_SYS_REG_DBGBCR15_EL1:
655             case HV_SYS_REG_DBGWVR15_EL1:
656             case HV_SYS_REG_DBGWCR15_EL1: {
657                 /*
658                  * If the guest is being debugged, the vCPU's debug registers
659                  * are holding the gdbstub's view of the registers (set in
660                  * hvf_arch_update_guest_debug()).
661                  * Since the environment is used to store only the guest's view
662                  * of the registers, don't update it with the values from the
663                  * vCPU but simply keep the values from the previous
664                  * environment.
665                  */
666                 const ARMCPRegInfo *ri;
667                 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key);
668                 val = read_raw_cp_reg(env, ri);
669 
670                 arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
671                 continue;
672             }
673             }
674         }
675 
676         ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val);
677         assert_hvf_ok(ret);
678 
679         arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
680     }
681     assert(write_list_to_cpustate(arm_cpu));
682 
683     aarch64_restore_sp(env, arm_current_el(env));
684 
685     return 0;
686 }
687 
hvf_put_registers(CPUState * cpu)688 int hvf_put_registers(CPUState *cpu)
689 {
690     ARMCPU *arm_cpu = ARM_CPU(cpu);
691     CPUARMState *env = &arm_cpu->env;
692     hv_return_t ret;
693     uint64_t val;
694     hv_simd_fp_uchar16_t fpval;
695     int i;
696 
697     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
698         val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
699         ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val);
700         assert_hvf_ok(ret);
701     }
702 
703     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
704         memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
705         ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
706                                       fpval);
707         assert_hvf_ok(ret);
708     }
709 
710     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env));
711     assert_hvf_ok(ret);
712 
713     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env));
714     assert_hvf_ok(ret);
715 
716     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env));
717     assert_hvf_ok(ret);
718 
719     aarch64_save_sp(env, arm_current_el(env));
720 
721     assert(write_cpustate_to_list(arm_cpu, false));
722     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
723         if (hvf_sreg_match[i].cp_idx == -1) {
724             continue;
725         }
726 
727         if (cpu->accel->guest_debug_enabled) {
728             /* Handle debug registers */
729             switch (hvf_sreg_match[i].reg) {
730             case HV_SYS_REG_DBGBVR0_EL1:
731             case HV_SYS_REG_DBGBCR0_EL1:
732             case HV_SYS_REG_DBGWVR0_EL1:
733             case HV_SYS_REG_DBGWCR0_EL1:
734             case HV_SYS_REG_DBGBVR1_EL1:
735             case HV_SYS_REG_DBGBCR1_EL1:
736             case HV_SYS_REG_DBGWVR1_EL1:
737             case HV_SYS_REG_DBGWCR1_EL1:
738             case HV_SYS_REG_DBGBVR2_EL1:
739             case HV_SYS_REG_DBGBCR2_EL1:
740             case HV_SYS_REG_DBGWVR2_EL1:
741             case HV_SYS_REG_DBGWCR2_EL1:
742             case HV_SYS_REG_DBGBVR3_EL1:
743             case HV_SYS_REG_DBGBCR3_EL1:
744             case HV_SYS_REG_DBGWVR3_EL1:
745             case HV_SYS_REG_DBGWCR3_EL1:
746             case HV_SYS_REG_DBGBVR4_EL1:
747             case HV_SYS_REG_DBGBCR4_EL1:
748             case HV_SYS_REG_DBGWVR4_EL1:
749             case HV_SYS_REG_DBGWCR4_EL1:
750             case HV_SYS_REG_DBGBVR5_EL1:
751             case HV_SYS_REG_DBGBCR5_EL1:
752             case HV_SYS_REG_DBGWVR5_EL1:
753             case HV_SYS_REG_DBGWCR5_EL1:
754             case HV_SYS_REG_DBGBVR6_EL1:
755             case HV_SYS_REG_DBGBCR6_EL1:
756             case HV_SYS_REG_DBGWVR6_EL1:
757             case HV_SYS_REG_DBGWCR6_EL1:
758             case HV_SYS_REG_DBGBVR7_EL1:
759             case HV_SYS_REG_DBGBCR7_EL1:
760             case HV_SYS_REG_DBGWVR7_EL1:
761             case HV_SYS_REG_DBGWCR7_EL1:
762             case HV_SYS_REG_DBGBVR8_EL1:
763             case HV_SYS_REG_DBGBCR8_EL1:
764             case HV_SYS_REG_DBGWVR8_EL1:
765             case HV_SYS_REG_DBGWCR8_EL1:
766             case HV_SYS_REG_DBGBVR9_EL1:
767             case HV_SYS_REG_DBGBCR9_EL1:
768             case HV_SYS_REG_DBGWVR9_EL1:
769             case HV_SYS_REG_DBGWCR9_EL1:
770             case HV_SYS_REG_DBGBVR10_EL1:
771             case HV_SYS_REG_DBGBCR10_EL1:
772             case HV_SYS_REG_DBGWVR10_EL1:
773             case HV_SYS_REG_DBGWCR10_EL1:
774             case HV_SYS_REG_DBGBVR11_EL1:
775             case HV_SYS_REG_DBGBCR11_EL1:
776             case HV_SYS_REG_DBGWVR11_EL1:
777             case HV_SYS_REG_DBGWCR11_EL1:
778             case HV_SYS_REG_DBGBVR12_EL1:
779             case HV_SYS_REG_DBGBCR12_EL1:
780             case HV_SYS_REG_DBGWVR12_EL1:
781             case HV_SYS_REG_DBGWCR12_EL1:
782             case HV_SYS_REG_DBGBVR13_EL1:
783             case HV_SYS_REG_DBGBCR13_EL1:
784             case HV_SYS_REG_DBGWVR13_EL1:
785             case HV_SYS_REG_DBGWCR13_EL1:
786             case HV_SYS_REG_DBGBVR14_EL1:
787             case HV_SYS_REG_DBGBCR14_EL1:
788             case HV_SYS_REG_DBGWVR14_EL1:
789             case HV_SYS_REG_DBGWCR14_EL1:
790             case HV_SYS_REG_DBGBVR15_EL1:
791             case HV_SYS_REG_DBGBCR15_EL1:
792             case HV_SYS_REG_DBGWVR15_EL1:
793             case HV_SYS_REG_DBGWCR15_EL1:
794                 /*
795                  * If the guest is being debugged, the vCPU's debug registers
796                  * are already holding the gdbstub's view of the registers (set
797                  * in hvf_arch_update_guest_debug()).
798                  */
799                 continue;
800             }
801         }
802 
803         val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
804         ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val);
805         assert_hvf_ok(ret);
806     }
807 
808     ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset);
809     assert_hvf_ok(ret);
810 
811     return 0;
812 }
813 
flush_cpu_state(CPUState * cpu)814 static void flush_cpu_state(CPUState *cpu)
815 {
816     if (cpu->vcpu_dirty) {
817         hvf_put_registers(cpu);
818         cpu->vcpu_dirty = false;
819     }
820 }
821 
hvf_set_reg(CPUState * cpu,int rt,uint64_t val)822 static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
823 {
824     hv_return_t r;
825 
826     flush_cpu_state(cpu);
827 
828     if (rt < 31) {
829         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val);
830         assert_hvf_ok(r);
831     }
832 }
833 
hvf_get_reg(CPUState * cpu,int rt)834 static uint64_t hvf_get_reg(CPUState *cpu, int rt)
835 {
836     uint64_t val = 0;
837     hv_return_t r;
838 
839     flush_cpu_state(cpu);
840 
841     if (rt < 31) {
842         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val);
843         assert_hvf_ok(r);
844     }
845 
846     return val;
847 }
848 
clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters * isar)849 static void clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters *isar)
850 {
851     uint32_t ipa_size = chosen_ipa_bit_size ?
852             chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
853     uint64_t id_aa64mmfr0;
854 
855     /* Clamp down the PARange to the IPA size the kernel supports. */
856     uint8_t index = round_down_to_parange_index(ipa_size);
857     id_aa64mmfr0 = GET_IDREG(isar, ID_AA64MMFR0);
858     id_aa64mmfr0 = (id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
859     SET_IDREG(isar, ID_AA64MMFR0, id_aa64mmfr0);
860 }
861 
hvf_arm_get_host_cpu_features(ARMHostCPUFeatures * ahcf)862 static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
863 {
864     ARMISARegisters host_isar = {};
865     const struct isar_regs {
866         int reg;
867         uint64_t *val;
868     } regs[] = {
869         { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.idregs[ID_AA64PFR0_EL1_IDX] },
870         { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.idregs[ID_AA64PFR1_EL1_IDX] },
871         { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.idregs[ID_AA64DFR0_EL1_IDX] },
872         { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.idregs[ID_AA64DFR1_EL1_IDX] },
873         { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.idregs[ID_AA64ISAR0_EL1_IDX] },
874         { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.idregs[ID_AA64ISAR1_EL1_IDX] },
875         /* Add ID_AA64ISAR2_EL1 here when HVF supports it */
876         { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.idregs[ID_AA64MMFR0_EL1_IDX] },
877         { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.idregs[ID_AA64MMFR1_EL1_IDX] },
878         { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.idregs[ID_AA64MMFR2_EL1_IDX] },
879         /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
880     };
881     hv_vcpu_t fd;
882     hv_return_t r = HV_SUCCESS;
883     hv_vcpu_exit_t *exit;
884     int i;
885 
886     ahcf->dtb_compatible = "arm,armv8";
887     ahcf->features = (1ULL << ARM_FEATURE_V8) |
888                      (1ULL << ARM_FEATURE_NEON) |
889                      (1ULL << ARM_FEATURE_AARCH64) |
890                      (1ULL << ARM_FEATURE_PMU) |
891                      (1ULL << ARM_FEATURE_GENERIC_TIMER);
892 
893     /* We set up a small vcpu to extract host registers */
894 
895     if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
896         return false;
897     }
898 
899     for (i = 0; i < ARRAY_SIZE(regs); i++) {
900         r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
901     }
902     r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
903     r |= hv_vcpu_destroy(fd);
904 
905     clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar);
906 
907     /*
908      * Disable SME, which is not properly handled by QEMU hvf yet.
909      * To allow this through we would need to:
910      * - make sure that the SME state is correctly handled in the
911      *   get_registers/put_registers functions
912      * - get the SME-specific CPU properties to work with accelerators
913      *   other than TCG
914      * - fix any assumptions we made that SME implies SVE (since
915      *   on the M4 there is SME but not SVE)
916      */
917     SET_IDREG(&host_isar, ID_AA64PFR1,
918               GET_IDREG(&host_isar, ID_AA64PFR1) & ~R_ID_AA64PFR1_SME_MASK);
919 
920     ahcf->isar = host_isar;
921 
922     /*
923      * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
924      * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
925      */
926     ahcf->reset_sctlr = 0x30100180;
927     /*
928      * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
929      * let's disable it on boot and then allow guest software to turn it on by
930      * setting it to 0.
931      */
932     ahcf->reset_sctlr |= 0x00800000;
933 
934     /* Make sure we don't advertise AArch32 support for EL0/EL1 */
935     if ((GET_IDREG(&host_isar, ID_AA64PFR0) & 0xff) != 0x11) {
936         return false;
937     }
938 
939     return r == HV_SUCCESS;
940 }
941 
hvf_arm_get_default_ipa_bit_size(void)942 uint32_t hvf_arm_get_default_ipa_bit_size(void)
943 {
944     uint32_t default_ipa_size;
945     hv_return_t ret = hv_vm_config_get_default_ipa_size(&default_ipa_size);
946     assert_hvf_ok(ret);
947 
948     return default_ipa_size;
949 }
950 
hvf_arm_get_max_ipa_bit_size(void)951 uint32_t hvf_arm_get_max_ipa_bit_size(void)
952 {
953     uint32_t max_ipa_size;
954     hv_return_t ret = hv_vm_config_get_max_ipa_size(&max_ipa_size);
955     assert_hvf_ok(ret);
956 
957     /*
958      * We clamp any IPA size we want to back the VM with to a valid PARange
959      * value so the guest doesn't try and map memory outside of the valid range.
960      * This logic just clamps the passed in IPA bit size to the first valid
961      * PARange value <= to it.
962      */
963     return round_down_to_parange_bit_size(max_ipa_size);
964 }
965 
hvf_arm_set_cpu_features_from_host(ARMCPU * cpu)966 void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
967 {
968     if (!arm_host_cpu_features.dtb_compatible) {
969         if (!hvf_enabled() ||
970             !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
971             /*
972              * We can't report this error yet, so flag that we need to
973              * in arm_cpu_realizefn().
974              */
975             cpu->host_cpu_probe_failed = true;
976             return;
977         }
978     }
979 
980     cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
981     cpu->isar = arm_host_cpu_features.isar;
982     cpu->env.features = arm_host_cpu_features.features;
983     cpu->midr = arm_host_cpu_features.midr;
984     cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
985 }
986 
hvf_arch_vcpu_destroy(CPUState * cpu)987 void hvf_arch_vcpu_destroy(CPUState *cpu)
988 {
989 }
990 
hvf_arch_vm_create(MachineState * ms,uint32_t pa_range)991 hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range)
992 {
993     hv_return_t ret;
994     hv_vm_config_t config = hv_vm_config_create();
995 
996     ret = hv_vm_config_set_ipa_size(config, pa_range);
997     if (ret != HV_SUCCESS) {
998         goto cleanup;
999     }
1000     chosen_ipa_bit_size = pa_range;
1001 
1002     ret = hv_vm_create(config);
1003 
1004 cleanup:
1005     os_release(config);
1006 
1007     return ret;
1008 }
1009 
hvf_arch_init_vcpu(CPUState * cpu)1010 int hvf_arch_init_vcpu(CPUState *cpu)
1011 {
1012     ARMCPU *arm_cpu = ARM_CPU(cpu);
1013     CPUARMState *env = &arm_cpu->env;
1014     uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
1015     uint32_t sregs_cnt = 0;
1016     uint64_t pfr;
1017     hv_return_t ret;
1018     int i;
1019 
1020     env->aarch64 = true;
1021     asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
1022 
1023     /* Allocate enough space for our sysreg sync */
1024     arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
1025                                      sregs_match_len);
1026     arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
1027                                     sregs_match_len);
1028     arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
1029                                              arm_cpu->cpreg_vmstate_indexes,
1030                                              sregs_match_len);
1031     arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
1032                                             arm_cpu->cpreg_vmstate_values,
1033                                             sregs_match_len);
1034 
1035     memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
1036 
1037     /* Populate cp list for all known sysregs */
1038     for (i = 0; i < sregs_match_len; i++) {
1039         const ARMCPRegInfo *ri;
1040         uint32_t key = hvf_sreg_match[i].key;
1041 
1042         ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
1043         if (ri) {
1044             assert(!(ri->type & ARM_CP_NO_RAW));
1045             hvf_sreg_match[i].cp_idx = sregs_cnt;
1046             arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
1047         } else {
1048             hvf_sreg_match[i].cp_idx = -1;
1049         }
1050     }
1051     arm_cpu->cpreg_array_len = sregs_cnt;
1052     arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
1053 
1054     assert(write_cpustate_to_list(arm_cpu, false));
1055 
1056     /* Set CP_NO_RAW system registers on init */
1057     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1,
1058                               arm_cpu->midr);
1059     assert_hvf_ok(ret);
1060 
1061     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1,
1062                               arm_cpu->mp_affinity);
1063     assert_hvf_ok(ret);
1064 
1065     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
1066     assert_hvf_ok(ret);
1067     pfr |= env->gicv3state ? (1 << 24) : 0;
1068     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
1069     assert_hvf_ok(ret);
1070 
1071     /* We're limited to underlying hardware caps, override internal versions */
1072     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1073                               &arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
1074     assert_hvf_ok(ret);
1075 
1076     clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar);
1077     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1078                               arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
1079     assert_hvf_ok(ret);
1080 
1081     return 0;
1082 }
1083 
hvf_kick_vcpu_thread(CPUState * cpu)1084 void hvf_kick_vcpu_thread(CPUState *cpu)
1085 {
1086     cpus_kick_thread(cpu);
1087     hv_vcpus_exit(&cpu->accel->fd, 1);
1088 }
1089 
hvf_raise_exception(CPUState * cpu,uint32_t excp,uint32_t syndrome,int target_el)1090 static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
1091                                 uint32_t syndrome, int target_el)
1092 {
1093     ARMCPU *arm_cpu = ARM_CPU(cpu);
1094     CPUARMState *env = &arm_cpu->env;
1095 
1096     cpu->exception_index = excp;
1097     env->exception.target_el = target_el;
1098     env->exception.syndrome = syndrome;
1099 
1100     arm_cpu_do_interrupt(cpu);
1101 }
1102 
hvf_psci_cpu_off(ARMCPU * arm_cpu)1103 static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
1104 {
1105     int32_t ret = arm_set_cpu_off(arm_cpu_mp_affinity(arm_cpu));
1106     assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
1107 }
1108 
1109 /*
1110  * Handle a PSCI call.
1111  *
1112  * Returns 0 on success
1113  *         -1 when the PSCI call is unknown,
1114  */
hvf_handle_psci_call(CPUState * cpu)1115 static bool hvf_handle_psci_call(CPUState *cpu)
1116 {
1117     ARMCPU *arm_cpu = ARM_CPU(cpu);
1118     CPUARMState *env = &arm_cpu->env;
1119     uint64_t param[4] = {
1120         env->xregs[0],
1121         env->xregs[1],
1122         env->xregs[2],
1123         env->xregs[3]
1124     };
1125     uint64_t context_id, mpidr;
1126     bool target_aarch64 = true;
1127     CPUState *target_cpu_state;
1128     ARMCPU *target_cpu;
1129     target_ulong entry;
1130     int target_el = 1;
1131     int32_t ret = 0;
1132 
1133     trace_hvf_psci_call(param[0], param[1], param[2], param[3],
1134                         arm_cpu_mp_affinity(arm_cpu));
1135 
1136     switch (param[0]) {
1137     case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1138         ret = QEMU_PSCI_VERSION_1_1;
1139         break;
1140     case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1141         ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
1142         break;
1143     case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1144     case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1145         mpidr = param[1];
1146 
1147         switch (param[2]) {
1148         case 0:
1149             target_cpu_state = arm_get_cpu_by_id(mpidr);
1150             if (!target_cpu_state) {
1151                 ret = QEMU_PSCI_RET_INVALID_PARAMS;
1152                 break;
1153             }
1154             target_cpu = ARM_CPU(target_cpu_state);
1155 
1156             ret = target_cpu->power_state;
1157             break;
1158         default:
1159             /* Everything above affinity level 0 is always on. */
1160             ret = 0;
1161         }
1162         break;
1163     case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1164         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1165         /*
1166          * QEMU reset and shutdown are async requests, but PSCI
1167          * mandates that we never return from the reset/shutdown
1168          * call, so power the CPU off now so it doesn't execute
1169          * anything further.
1170          */
1171         hvf_psci_cpu_off(arm_cpu);
1172         break;
1173     case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1174         qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
1175         hvf_psci_cpu_off(arm_cpu);
1176         break;
1177     case QEMU_PSCI_0_1_FN_CPU_ON:
1178     case QEMU_PSCI_0_2_FN_CPU_ON:
1179     case QEMU_PSCI_0_2_FN64_CPU_ON:
1180         mpidr = param[1];
1181         entry = param[2];
1182         context_id = param[3];
1183         ret = arm_set_cpu_on(mpidr, entry, context_id,
1184                              target_el, target_aarch64);
1185         break;
1186     case QEMU_PSCI_0_1_FN_CPU_OFF:
1187     case QEMU_PSCI_0_2_FN_CPU_OFF:
1188         hvf_psci_cpu_off(arm_cpu);
1189         break;
1190     case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1191     case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1192     case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1193         /* Affinity levels are not supported in QEMU */
1194         if (param[1] & 0xfffe0000) {
1195             ret = QEMU_PSCI_RET_INVALID_PARAMS;
1196             break;
1197         }
1198         /* Powerdown is not supported, we always go into WFI */
1199         env->xregs[0] = 0;
1200         hvf_wfi(cpu);
1201         break;
1202     case QEMU_PSCI_0_1_FN_MIGRATE:
1203     case QEMU_PSCI_0_2_FN_MIGRATE:
1204         ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1205         break;
1206     case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1207         switch (param[1]) {
1208         case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1209         case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1210         case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1211         case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1212         case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1213         case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1214         case QEMU_PSCI_0_1_FN_CPU_ON:
1215         case QEMU_PSCI_0_2_FN_CPU_ON:
1216         case QEMU_PSCI_0_2_FN64_CPU_ON:
1217         case QEMU_PSCI_0_1_FN_CPU_OFF:
1218         case QEMU_PSCI_0_2_FN_CPU_OFF:
1219         case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1220         case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1221         case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1222         case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1223             ret = 0;
1224             break;
1225         case QEMU_PSCI_0_1_FN_MIGRATE:
1226         case QEMU_PSCI_0_2_FN_MIGRATE:
1227         default:
1228             ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1229         }
1230         break;
1231     default:
1232         return false;
1233     }
1234 
1235     env->xregs[0] = ret;
1236     return true;
1237 }
1238 
is_id_sysreg(uint32_t reg)1239 static bool is_id_sysreg(uint32_t reg)
1240 {
1241     return SYSREG_OP0(reg) == 3 &&
1242            SYSREG_OP1(reg) == 0 &&
1243            SYSREG_CRN(reg) == 0 &&
1244            SYSREG_CRM(reg) >= 1 &&
1245            SYSREG_CRM(reg) < 8;
1246 }
1247 
hvf_reg2cp_reg(uint32_t reg)1248 static uint32_t hvf_reg2cp_reg(uint32_t reg)
1249 {
1250     return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1251                               (reg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK,
1252                               (reg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK,
1253                               (reg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK,
1254                               (reg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK,
1255                               (reg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK);
1256 }
1257 
hvf_sysreg_read_cp(CPUState * cpu,uint32_t reg,uint64_t * val)1258 static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
1259 {
1260     ARMCPU *arm_cpu = ARM_CPU(cpu);
1261     CPUARMState *env = &arm_cpu->env;
1262     const ARMCPRegInfo *ri;
1263 
1264     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1265     if (ri) {
1266         if (ri->accessfn) {
1267             if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) {
1268                 return false;
1269             }
1270         }
1271         if (ri->type & ARM_CP_CONST) {
1272             *val = ri->resetvalue;
1273         } else if (ri->readfn) {
1274             *val = ri->readfn(env, ri);
1275         } else {
1276             *val = CPREG_FIELD64(env, ri);
1277         }
1278         trace_hvf_vgic_read(ri->name, *val);
1279         return true;
1280     }
1281 
1282     return false;
1283 }
1284 
hvf_sysreg_read(CPUState * cpu,uint32_t reg,uint64_t * val)1285 static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
1286 {
1287     ARMCPU *arm_cpu = ARM_CPU(cpu);
1288     CPUARMState *env = &arm_cpu->env;
1289 
1290     if (arm_feature(env, ARM_FEATURE_PMU)) {
1291         switch (reg) {
1292         case SYSREG_PMCR_EL0:
1293             *val = env->cp15.c9_pmcr;
1294             return 0;
1295         case SYSREG_PMCCNTR_EL0:
1296             pmu_op_start(env);
1297             *val = env->cp15.c15_ccnt;
1298             pmu_op_finish(env);
1299             return 0;
1300         case SYSREG_PMCNTENCLR_EL0:
1301             *val = env->cp15.c9_pmcnten;
1302             return 0;
1303         case SYSREG_PMOVSCLR_EL0:
1304             *val = env->cp15.c9_pmovsr;
1305             return 0;
1306         case SYSREG_PMSELR_EL0:
1307             *val = env->cp15.c9_pmselr;
1308             return 0;
1309         case SYSREG_PMINTENCLR_EL1:
1310             *val = env->cp15.c9_pminten;
1311             return 0;
1312         case SYSREG_PMCCFILTR_EL0:
1313             *val = env->cp15.pmccfiltr_el0;
1314             return 0;
1315         case SYSREG_PMCNTENSET_EL0:
1316             *val = env->cp15.c9_pmcnten;
1317             return 0;
1318         case SYSREG_PMUSERENR_EL0:
1319             *val = env->cp15.c9_pmuserenr;
1320             return 0;
1321         case SYSREG_PMCEID0_EL0:
1322         case SYSREG_PMCEID1_EL0:
1323             /* We can't really count anything yet, declare all events invalid */
1324             *val = 0;
1325             return 0;
1326         }
1327     }
1328 
1329     switch (reg) {
1330     case SYSREG_CNTPCT_EL0:
1331         *val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
1332               gt_cntfrq_period_ns(arm_cpu);
1333         return 0;
1334     case SYSREG_OSLSR_EL1:
1335         *val = env->cp15.oslsr_el1;
1336         return 0;
1337     case SYSREG_OSDLR_EL1:
1338         /* Dummy register */
1339         return 0;
1340     case SYSREG_ICC_AP0R0_EL1:
1341     case SYSREG_ICC_AP0R1_EL1:
1342     case SYSREG_ICC_AP0R2_EL1:
1343     case SYSREG_ICC_AP0R3_EL1:
1344     case SYSREG_ICC_AP1R0_EL1:
1345     case SYSREG_ICC_AP1R1_EL1:
1346     case SYSREG_ICC_AP1R2_EL1:
1347     case SYSREG_ICC_AP1R3_EL1:
1348     case SYSREG_ICC_ASGI1R_EL1:
1349     case SYSREG_ICC_BPR0_EL1:
1350     case SYSREG_ICC_BPR1_EL1:
1351     case SYSREG_ICC_DIR_EL1:
1352     case SYSREG_ICC_EOIR0_EL1:
1353     case SYSREG_ICC_EOIR1_EL1:
1354     case SYSREG_ICC_HPPIR0_EL1:
1355     case SYSREG_ICC_HPPIR1_EL1:
1356     case SYSREG_ICC_IAR0_EL1:
1357     case SYSREG_ICC_IAR1_EL1:
1358     case SYSREG_ICC_IGRPEN0_EL1:
1359     case SYSREG_ICC_IGRPEN1_EL1:
1360     case SYSREG_ICC_PMR_EL1:
1361     case SYSREG_ICC_SGI0R_EL1:
1362     case SYSREG_ICC_SGI1R_EL1:
1363     case SYSREG_ICC_SRE_EL1:
1364     case SYSREG_ICC_CTLR_EL1:
1365         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1366         if (hvf_sysreg_read_cp(cpu, reg, val)) {
1367             return 0;
1368         }
1369         break;
1370     case SYSREG_DBGBVR0_EL1:
1371     case SYSREG_DBGBVR1_EL1:
1372     case SYSREG_DBGBVR2_EL1:
1373     case SYSREG_DBGBVR3_EL1:
1374     case SYSREG_DBGBVR4_EL1:
1375     case SYSREG_DBGBVR5_EL1:
1376     case SYSREG_DBGBVR6_EL1:
1377     case SYSREG_DBGBVR7_EL1:
1378     case SYSREG_DBGBVR8_EL1:
1379     case SYSREG_DBGBVR9_EL1:
1380     case SYSREG_DBGBVR10_EL1:
1381     case SYSREG_DBGBVR11_EL1:
1382     case SYSREG_DBGBVR12_EL1:
1383     case SYSREG_DBGBVR13_EL1:
1384     case SYSREG_DBGBVR14_EL1:
1385     case SYSREG_DBGBVR15_EL1:
1386         *val = env->cp15.dbgbvr[SYSREG_CRM(reg)];
1387         return 0;
1388     case SYSREG_DBGBCR0_EL1:
1389     case SYSREG_DBGBCR1_EL1:
1390     case SYSREG_DBGBCR2_EL1:
1391     case SYSREG_DBGBCR3_EL1:
1392     case SYSREG_DBGBCR4_EL1:
1393     case SYSREG_DBGBCR5_EL1:
1394     case SYSREG_DBGBCR6_EL1:
1395     case SYSREG_DBGBCR7_EL1:
1396     case SYSREG_DBGBCR8_EL1:
1397     case SYSREG_DBGBCR9_EL1:
1398     case SYSREG_DBGBCR10_EL1:
1399     case SYSREG_DBGBCR11_EL1:
1400     case SYSREG_DBGBCR12_EL1:
1401     case SYSREG_DBGBCR13_EL1:
1402     case SYSREG_DBGBCR14_EL1:
1403     case SYSREG_DBGBCR15_EL1:
1404         *val = env->cp15.dbgbcr[SYSREG_CRM(reg)];
1405         return 0;
1406     case SYSREG_DBGWVR0_EL1:
1407     case SYSREG_DBGWVR1_EL1:
1408     case SYSREG_DBGWVR2_EL1:
1409     case SYSREG_DBGWVR3_EL1:
1410     case SYSREG_DBGWVR4_EL1:
1411     case SYSREG_DBGWVR5_EL1:
1412     case SYSREG_DBGWVR6_EL1:
1413     case SYSREG_DBGWVR7_EL1:
1414     case SYSREG_DBGWVR8_EL1:
1415     case SYSREG_DBGWVR9_EL1:
1416     case SYSREG_DBGWVR10_EL1:
1417     case SYSREG_DBGWVR11_EL1:
1418     case SYSREG_DBGWVR12_EL1:
1419     case SYSREG_DBGWVR13_EL1:
1420     case SYSREG_DBGWVR14_EL1:
1421     case SYSREG_DBGWVR15_EL1:
1422         *val = env->cp15.dbgwvr[SYSREG_CRM(reg)];
1423         return 0;
1424     case SYSREG_DBGWCR0_EL1:
1425     case SYSREG_DBGWCR1_EL1:
1426     case SYSREG_DBGWCR2_EL1:
1427     case SYSREG_DBGWCR3_EL1:
1428     case SYSREG_DBGWCR4_EL1:
1429     case SYSREG_DBGWCR5_EL1:
1430     case SYSREG_DBGWCR6_EL1:
1431     case SYSREG_DBGWCR7_EL1:
1432     case SYSREG_DBGWCR8_EL1:
1433     case SYSREG_DBGWCR9_EL1:
1434     case SYSREG_DBGWCR10_EL1:
1435     case SYSREG_DBGWCR11_EL1:
1436     case SYSREG_DBGWCR12_EL1:
1437     case SYSREG_DBGWCR13_EL1:
1438     case SYSREG_DBGWCR14_EL1:
1439     case SYSREG_DBGWCR15_EL1:
1440         *val = env->cp15.dbgwcr[SYSREG_CRM(reg)];
1441         return 0;
1442     default:
1443         if (is_id_sysreg(reg)) {
1444             /* ID system registers read as RES0 */
1445             *val = 0;
1446             return 0;
1447         }
1448     }
1449 
1450     cpu_synchronize_state(cpu);
1451     trace_hvf_unhandled_sysreg_read(env->pc, reg,
1452                                     SYSREG_OP0(reg),
1453                                     SYSREG_OP1(reg),
1454                                     SYSREG_CRN(reg),
1455                                     SYSREG_CRM(reg),
1456                                     SYSREG_OP2(reg));
1457     hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
1458     return 1;
1459 }
1460 
pmu_update_irq(CPUARMState * env)1461 static void pmu_update_irq(CPUARMState *env)
1462 {
1463     ARMCPU *cpu = env_archcpu(env);
1464     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1465             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1466 }
1467 
pmu_event_supported(uint16_t number)1468 static bool pmu_event_supported(uint16_t number)
1469 {
1470     return false;
1471 }
1472 
1473 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1474  * the current EL, security state, and register configuration.
1475  */
pmu_counter_enabled(CPUARMState * env,uint8_t counter)1476 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1477 {
1478     uint64_t filter;
1479     bool enabled, filtered = true;
1480     int el = arm_current_el(env);
1481 
1482     enabled = (env->cp15.c9_pmcr & PMCRE) &&
1483               (env->cp15.c9_pmcnten & (1 << counter));
1484 
1485     if (counter == 31) {
1486         filter = env->cp15.pmccfiltr_el0;
1487     } else {
1488         filter = env->cp15.c14_pmevtyper[counter];
1489     }
1490 
1491     if (el == 0) {
1492         filtered = filter & PMXEVTYPER_U;
1493     } else if (el == 1) {
1494         filtered = filter & PMXEVTYPER_P;
1495     }
1496 
1497     if (counter != 31) {
1498         /*
1499          * If not checking PMCCNTR, ensure the counter is setup to an event we
1500          * support
1501          */
1502         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1503         if (!pmu_event_supported(event)) {
1504             return false;
1505         }
1506     }
1507 
1508     return enabled && !filtered;
1509 }
1510 
pmswinc_write(CPUARMState * env,uint64_t value)1511 static void pmswinc_write(CPUARMState *env, uint64_t value)
1512 {
1513     unsigned int i;
1514     for (i = 0; i < pmu_num_counters(env); i++) {
1515         /* Increment a counter's count iff: */
1516         if ((value & (1 << i)) && /* counter's bit is set */
1517                 /* counter is enabled and not filtered */
1518                 pmu_counter_enabled(env, i) &&
1519                 /* counter is SW_INCR */
1520                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1521             /*
1522              * Detect if this write causes an overflow since we can't predict
1523              * PMSWINC overflows like we can for other events
1524              */
1525             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1526 
1527             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1528                 env->cp15.c9_pmovsr |= (1 << i);
1529                 pmu_update_irq(env);
1530             }
1531 
1532             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1533         }
1534     }
1535 }
1536 
hvf_sysreg_write_cp(CPUState * cpu,uint32_t reg,uint64_t val)1537 static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
1538 {
1539     ARMCPU *arm_cpu = ARM_CPU(cpu);
1540     CPUARMState *env = &arm_cpu->env;
1541     const ARMCPRegInfo *ri;
1542 
1543     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1544 
1545     if (ri) {
1546         if (ri->accessfn) {
1547             if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) {
1548                 return false;
1549             }
1550         }
1551         if (ri->writefn) {
1552             ri->writefn(env, ri, val);
1553         } else {
1554             CPREG_FIELD64(env, ri) = val;
1555         }
1556 
1557         trace_hvf_vgic_write(ri->name, val);
1558         return true;
1559     }
1560 
1561     return false;
1562 }
1563 
hvf_sysreg_write(CPUState * cpu,uint32_t reg,uint64_t val)1564 static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
1565 {
1566     ARMCPU *arm_cpu = ARM_CPU(cpu);
1567     CPUARMState *env = &arm_cpu->env;
1568 
1569     trace_hvf_sysreg_write(reg,
1570                            SYSREG_OP0(reg),
1571                            SYSREG_OP1(reg),
1572                            SYSREG_CRN(reg),
1573                            SYSREG_CRM(reg),
1574                            SYSREG_OP2(reg),
1575                            val);
1576 
1577     if (arm_feature(env, ARM_FEATURE_PMU)) {
1578         switch (reg) {
1579         case SYSREG_PMCCNTR_EL0:
1580             pmu_op_start(env);
1581             env->cp15.c15_ccnt = val;
1582             pmu_op_finish(env);
1583             return 0;
1584         case SYSREG_PMCR_EL0:
1585             pmu_op_start(env);
1586 
1587             if (val & PMCRC) {
1588                 /* The counter has been reset */
1589                 env->cp15.c15_ccnt = 0;
1590             }
1591 
1592             if (val & PMCRP) {
1593                 unsigned int i;
1594                 for (i = 0; i < pmu_num_counters(env); i++) {
1595                     env->cp15.c14_pmevcntr[i] = 0;
1596                 }
1597             }
1598 
1599             env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1600             env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
1601 
1602             pmu_op_finish(env);
1603             return 0;
1604         case SYSREG_PMUSERENR_EL0:
1605             env->cp15.c9_pmuserenr = val & 0xf;
1606             return 0;
1607         case SYSREG_PMCNTENSET_EL0:
1608             env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
1609             return 0;
1610         case SYSREG_PMCNTENCLR_EL0:
1611             env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
1612             return 0;
1613         case SYSREG_PMINTENCLR_EL1:
1614             pmu_op_start(env);
1615             env->cp15.c9_pminten |= val;
1616             pmu_op_finish(env);
1617             return 0;
1618         case SYSREG_PMOVSCLR_EL0:
1619             pmu_op_start(env);
1620             env->cp15.c9_pmovsr &= ~val;
1621             pmu_op_finish(env);
1622             return 0;
1623         case SYSREG_PMSWINC_EL0:
1624             pmu_op_start(env);
1625             pmswinc_write(env, val);
1626             pmu_op_finish(env);
1627             return 0;
1628         case SYSREG_PMSELR_EL0:
1629             env->cp15.c9_pmselr = val & 0x1f;
1630             return 0;
1631         case SYSREG_PMCCFILTR_EL0:
1632             pmu_op_start(env);
1633             env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
1634             pmu_op_finish(env);
1635             return 0;
1636         }
1637     }
1638 
1639     switch (reg) {
1640     case SYSREG_OSLAR_EL1:
1641         env->cp15.oslsr_el1 = val & 1;
1642         return 0;
1643     case SYSREG_CNTP_CTL_EL0:
1644         /*
1645          * Guests should not rely on the physical counter, but macOS emits
1646          * disable writes to it. Let it do so, but ignore the requests.
1647          */
1648         qemu_log_mask(LOG_UNIMP, "Unsupported write to CNTP_CTL_EL0\n");
1649         return 0;
1650     case SYSREG_OSDLR_EL1:
1651         /* Dummy register */
1652         return 0;
1653     case SYSREG_ICC_AP0R0_EL1:
1654     case SYSREG_ICC_AP0R1_EL1:
1655     case SYSREG_ICC_AP0R2_EL1:
1656     case SYSREG_ICC_AP0R3_EL1:
1657     case SYSREG_ICC_AP1R0_EL1:
1658     case SYSREG_ICC_AP1R1_EL1:
1659     case SYSREG_ICC_AP1R2_EL1:
1660     case SYSREG_ICC_AP1R3_EL1:
1661     case SYSREG_ICC_ASGI1R_EL1:
1662     case SYSREG_ICC_BPR0_EL1:
1663     case SYSREG_ICC_BPR1_EL1:
1664     case SYSREG_ICC_CTLR_EL1:
1665     case SYSREG_ICC_DIR_EL1:
1666     case SYSREG_ICC_EOIR0_EL1:
1667     case SYSREG_ICC_EOIR1_EL1:
1668     case SYSREG_ICC_HPPIR0_EL1:
1669     case SYSREG_ICC_HPPIR1_EL1:
1670     case SYSREG_ICC_IAR0_EL1:
1671     case SYSREG_ICC_IAR1_EL1:
1672     case SYSREG_ICC_IGRPEN0_EL1:
1673     case SYSREG_ICC_IGRPEN1_EL1:
1674     case SYSREG_ICC_PMR_EL1:
1675     case SYSREG_ICC_SGI0R_EL1:
1676     case SYSREG_ICC_SGI1R_EL1:
1677     case SYSREG_ICC_SRE_EL1:
1678         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1679         if (hvf_sysreg_write_cp(cpu, reg, val)) {
1680             return 0;
1681         }
1682         break;
1683     case SYSREG_MDSCR_EL1:
1684         env->cp15.mdscr_el1 = val;
1685         return 0;
1686     case SYSREG_DBGBVR0_EL1:
1687     case SYSREG_DBGBVR1_EL1:
1688     case SYSREG_DBGBVR2_EL1:
1689     case SYSREG_DBGBVR3_EL1:
1690     case SYSREG_DBGBVR4_EL1:
1691     case SYSREG_DBGBVR5_EL1:
1692     case SYSREG_DBGBVR6_EL1:
1693     case SYSREG_DBGBVR7_EL1:
1694     case SYSREG_DBGBVR8_EL1:
1695     case SYSREG_DBGBVR9_EL1:
1696     case SYSREG_DBGBVR10_EL1:
1697     case SYSREG_DBGBVR11_EL1:
1698     case SYSREG_DBGBVR12_EL1:
1699     case SYSREG_DBGBVR13_EL1:
1700     case SYSREG_DBGBVR14_EL1:
1701     case SYSREG_DBGBVR15_EL1:
1702         env->cp15.dbgbvr[SYSREG_CRM(reg)] = val;
1703         return 0;
1704     case SYSREG_DBGBCR0_EL1:
1705     case SYSREG_DBGBCR1_EL1:
1706     case SYSREG_DBGBCR2_EL1:
1707     case SYSREG_DBGBCR3_EL1:
1708     case SYSREG_DBGBCR4_EL1:
1709     case SYSREG_DBGBCR5_EL1:
1710     case SYSREG_DBGBCR6_EL1:
1711     case SYSREG_DBGBCR7_EL1:
1712     case SYSREG_DBGBCR8_EL1:
1713     case SYSREG_DBGBCR9_EL1:
1714     case SYSREG_DBGBCR10_EL1:
1715     case SYSREG_DBGBCR11_EL1:
1716     case SYSREG_DBGBCR12_EL1:
1717     case SYSREG_DBGBCR13_EL1:
1718     case SYSREG_DBGBCR14_EL1:
1719     case SYSREG_DBGBCR15_EL1:
1720         env->cp15.dbgbcr[SYSREG_CRM(reg)] = val;
1721         return 0;
1722     case SYSREG_DBGWVR0_EL1:
1723     case SYSREG_DBGWVR1_EL1:
1724     case SYSREG_DBGWVR2_EL1:
1725     case SYSREG_DBGWVR3_EL1:
1726     case SYSREG_DBGWVR4_EL1:
1727     case SYSREG_DBGWVR5_EL1:
1728     case SYSREG_DBGWVR6_EL1:
1729     case SYSREG_DBGWVR7_EL1:
1730     case SYSREG_DBGWVR8_EL1:
1731     case SYSREG_DBGWVR9_EL1:
1732     case SYSREG_DBGWVR10_EL1:
1733     case SYSREG_DBGWVR11_EL1:
1734     case SYSREG_DBGWVR12_EL1:
1735     case SYSREG_DBGWVR13_EL1:
1736     case SYSREG_DBGWVR14_EL1:
1737     case SYSREG_DBGWVR15_EL1:
1738         env->cp15.dbgwvr[SYSREG_CRM(reg)] = val;
1739         return 0;
1740     case SYSREG_DBGWCR0_EL1:
1741     case SYSREG_DBGWCR1_EL1:
1742     case SYSREG_DBGWCR2_EL1:
1743     case SYSREG_DBGWCR3_EL1:
1744     case SYSREG_DBGWCR4_EL1:
1745     case SYSREG_DBGWCR5_EL1:
1746     case SYSREG_DBGWCR6_EL1:
1747     case SYSREG_DBGWCR7_EL1:
1748     case SYSREG_DBGWCR8_EL1:
1749     case SYSREG_DBGWCR9_EL1:
1750     case SYSREG_DBGWCR10_EL1:
1751     case SYSREG_DBGWCR11_EL1:
1752     case SYSREG_DBGWCR12_EL1:
1753     case SYSREG_DBGWCR13_EL1:
1754     case SYSREG_DBGWCR14_EL1:
1755     case SYSREG_DBGWCR15_EL1:
1756         env->cp15.dbgwcr[SYSREG_CRM(reg)] = val;
1757         return 0;
1758     }
1759 
1760     cpu_synchronize_state(cpu);
1761     trace_hvf_unhandled_sysreg_write(env->pc, reg,
1762                                      SYSREG_OP0(reg),
1763                                      SYSREG_OP1(reg),
1764                                      SYSREG_CRN(reg),
1765                                      SYSREG_CRM(reg),
1766                                      SYSREG_OP2(reg));
1767     hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
1768     return 1;
1769 }
1770 
hvf_inject_interrupts(CPUState * cpu)1771 static int hvf_inject_interrupts(CPUState *cpu)
1772 {
1773     if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
1774         trace_hvf_inject_fiq();
1775         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ,
1776                                       true);
1777     }
1778 
1779     if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
1780         trace_hvf_inject_irq();
1781         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ,
1782                                       true);
1783     }
1784 
1785     return 0;
1786 }
1787 
hvf_vtimer_val_raw(void)1788 static uint64_t hvf_vtimer_val_raw(void)
1789 {
1790     /*
1791      * mach_absolute_time() returns the vtimer value without the VM
1792      * offset that we define. Add our own offset on top.
1793      */
1794     return mach_absolute_time() - hvf_state->vtimer_offset;
1795 }
1796 
hvf_vtimer_val(void)1797 static uint64_t hvf_vtimer_val(void)
1798 {
1799     if (!runstate_is_running()) {
1800         /* VM is paused, the vtimer value is in vtimer.vtimer_val */
1801         return vtimer.vtimer_val;
1802     }
1803 
1804     return hvf_vtimer_val_raw();
1805 }
1806 
hvf_wait_for_ipi(CPUState * cpu,struct timespec * ts)1807 static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
1808 {
1809     /*
1810      * Use pselect to sleep so that other threads can IPI us while we're
1811      * sleeping.
1812      */
1813     qatomic_set_mb(&cpu->thread_kicked, false);
1814     bql_unlock();
1815     pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
1816     bql_lock();
1817 }
1818 
hvf_wfi(CPUState * cpu)1819 static void hvf_wfi(CPUState *cpu)
1820 {
1821     ARMCPU *arm_cpu = ARM_CPU(cpu);
1822     struct timespec ts;
1823     hv_return_t r;
1824     uint64_t ctl;
1825     uint64_t cval;
1826     int64_t ticks_to_sleep;
1827     uint64_t seconds;
1828     uint64_t nanos;
1829     uint32_t cntfrq;
1830 
1831     if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
1832         /* Interrupt pending, no need to wait */
1833         return;
1834     }
1835 
1836     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1837     assert_hvf_ok(r);
1838 
1839     if (!(ctl & 1) || (ctl & 2)) {
1840         /* Timer disabled or masked, just wait for an IPI. */
1841         hvf_wait_for_ipi(cpu, NULL);
1842         return;
1843     }
1844 
1845     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
1846     assert_hvf_ok(r);
1847 
1848     ticks_to_sleep = cval - hvf_vtimer_val();
1849     if (ticks_to_sleep < 0) {
1850         return;
1851     }
1852 
1853     cntfrq = gt_cntfrq_period_ns(arm_cpu);
1854     seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
1855     ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
1856     nanos = ticks_to_sleep * cntfrq;
1857 
1858     /*
1859      * Don't sleep for less than the time a context switch would take,
1860      * so that we can satisfy fast timer requests on the same CPU.
1861      * Measurements on M1 show the sweet spot to be ~2ms.
1862      */
1863     if (!seconds && nanos < (2 * SCALE_MS)) {
1864         return;
1865     }
1866 
1867     ts = (struct timespec) { seconds, nanos };
1868     hvf_wait_for_ipi(cpu, &ts);
1869 }
1870 
hvf_sync_vtimer(CPUState * cpu)1871 static void hvf_sync_vtimer(CPUState *cpu)
1872 {
1873     ARMCPU *arm_cpu = ARM_CPU(cpu);
1874     hv_return_t r;
1875     uint64_t ctl;
1876     bool irq_state;
1877 
1878     if (!cpu->accel->vtimer_masked) {
1879         /* We will get notified on vtimer changes by hvf, nothing to do */
1880         return;
1881     }
1882 
1883     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1884     assert_hvf_ok(r);
1885 
1886     irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
1887                 (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
1888     qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
1889 
1890     if (!irq_state) {
1891         /* Timer no longer asserting, we can unmask it */
1892         hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
1893         cpu->accel->vtimer_masked = false;
1894     }
1895 }
1896 
hvf_vcpu_exec(CPUState * cpu)1897 int hvf_vcpu_exec(CPUState *cpu)
1898 {
1899     ARMCPU *arm_cpu = ARM_CPU(cpu);
1900     CPUARMState *env = &arm_cpu->env;
1901     int ret;
1902     hv_vcpu_exit_t *hvf_exit = cpu->accel->exit;
1903     hv_return_t r;
1904     bool advance_pc = false;
1905 
1906     if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) &&
1907         hvf_inject_interrupts(cpu)) {
1908         return EXCP_INTERRUPT;
1909     }
1910 
1911     if (cpu->halted) {
1912         return EXCP_HLT;
1913     }
1914 
1915     flush_cpu_state(cpu);
1916 
1917     bql_unlock();
1918     r = hv_vcpu_run(cpu->accel->fd);
1919     bql_lock();
1920     switch (r) {
1921     case HV_SUCCESS:
1922         break;
1923     case HV_ILLEGAL_GUEST_STATE:
1924         trace_hvf_illegal_guest_state();
1925         /* fall through */
1926     default:
1927         g_assert_not_reached();
1928     }
1929 
1930     /* handle VMEXIT */
1931     uint64_t exit_reason = hvf_exit->reason;
1932     uint64_t syndrome = hvf_exit->exception.syndrome;
1933     uint32_t ec = syn_get_ec(syndrome);
1934 
1935     ret = 0;
1936     switch (exit_reason) {
1937     case HV_EXIT_REASON_EXCEPTION:
1938         /* This is the main one, handle below. */
1939         break;
1940     case HV_EXIT_REASON_VTIMER_ACTIVATED:
1941         qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
1942         cpu->accel->vtimer_masked = true;
1943         return 0;
1944     case HV_EXIT_REASON_CANCELED:
1945         /* we got kicked, no exit to process */
1946         return 0;
1947     default:
1948         g_assert_not_reached();
1949     }
1950 
1951     hvf_sync_vtimer(cpu);
1952 
1953     switch (ec) {
1954     case EC_SOFTWARESTEP: {
1955         ret = EXCP_DEBUG;
1956 
1957         if (!cpu->singlestep_enabled) {
1958             error_report("EC_SOFTWARESTEP but single-stepping not enabled");
1959         }
1960         break;
1961     }
1962     case EC_AA64_BKPT: {
1963         ret = EXCP_DEBUG;
1964 
1965         cpu_synchronize_state(cpu);
1966 
1967         if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
1968             /* Re-inject into the guest */
1969             ret = 0;
1970             hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0), 1);
1971         }
1972         break;
1973     }
1974     case EC_BREAKPOINT: {
1975         ret = EXCP_DEBUG;
1976 
1977         cpu_synchronize_state(cpu);
1978 
1979         if (!find_hw_breakpoint(cpu, env->pc)) {
1980             error_report("EC_BREAKPOINT but unknown hw breakpoint");
1981         }
1982         break;
1983     }
1984     case EC_WATCHPOINT: {
1985         ret = EXCP_DEBUG;
1986 
1987         cpu_synchronize_state(cpu);
1988 
1989         CPUWatchpoint *wp =
1990             find_hw_watchpoint(cpu, hvf_exit->exception.virtual_address);
1991         if (!wp) {
1992             error_report("EXCP_DEBUG but unknown hw watchpoint");
1993         }
1994         cpu->watchpoint_hit = wp;
1995         break;
1996     }
1997     case EC_DATAABORT: {
1998         bool isv = syndrome & ARM_EL_ISV;
1999         bool iswrite = (syndrome >> 6) & 1;
2000         bool s1ptw = (syndrome >> 7) & 1;
2001         bool sse = (syndrome >> 21) & 1;
2002         uint32_t sas = (syndrome >> 22) & 3;
2003         uint32_t len = 1 << sas;
2004         uint32_t srt = (syndrome >> 16) & 0x1f;
2005         uint32_t cm = (syndrome >> 8) & 0x1;
2006         uint64_t val = 0;
2007 
2008         trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
2009                              hvf_exit->exception.physical_address, isv,
2010                              iswrite, s1ptw, len, srt);
2011 
2012         if (cm) {
2013             /* We don't cache MMIO regions */
2014             advance_pc = true;
2015             break;
2016         }
2017 
2018         assert(isv);
2019 
2020         if (iswrite) {
2021             val = hvf_get_reg(cpu, srt);
2022             address_space_write(&address_space_memory,
2023                                 hvf_exit->exception.physical_address,
2024                                 MEMTXATTRS_UNSPECIFIED, &val, len);
2025         } else {
2026             address_space_read(&address_space_memory,
2027                                hvf_exit->exception.physical_address,
2028                                MEMTXATTRS_UNSPECIFIED, &val, len);
2029             if (sse) {
2030                 val = sextract64(val, 0, len * 8);
2031             }
2032             hvf_set_reg(cpu, srt, val);
2033         }
2034 
2035         advance_pc = true;
2036         break;
2037     }
2038     case EC_SYSTEMREGISTERTRAP: {
2039         bool isread = (syndrome >> 0) & 1;
2040         uint32_t rt = (syndrome >> 5) & 0x1f;
2041         uint32_t reg = syndrome & SYSREG_MASK;
2042         uint64_t val;
2043         int sysreg_ret = 0;
2044 
2045         if (isread) {
2046             sysreg_ret = hvf_sysreg_read(cpu, reg, &val);
2047             if (!sysreg_ret) {
2048                 trace_hvf_sysreg_read(reg,
2049                                       SYSREG_OP0(reg),
2050                                       SYSREG_OP1(reg),
2051                                       SYSREG_CRN(reg),
2052                                       SYSREG_CRM(reg),
2053                                       SYSREG_OP2(reg),
2054                                       val);
2055                 hvf_set_reg(cpu, rt, val);
2056             }
2057         } else {
2058             val = hvf_get_reg(cpu, rt);
2059             sysreg_ret = hvf_sysreg_write(cpu, reg, val);
2060         }
2061 
2062         advance_pc = !sysreg_ret;
2063         break;
2064     }
2065     case EC_WFX_TRAP:
2066         advance_pc = true;
2067         if (!(syndrome & WFX_IS_WFE)) {
2068             hvf_wfi(cpu);
2069         }
2070         break;
2071     case EC_AA64_HVC:
2072         cpu_synchronize_state(cpu);
2073         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
2074             if (!hvf_handle_psci_call(cpu)) {
2075                 trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
2076                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2077                 env->xregs[0] = -1;
2078             }
2079         } else {
2080             trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
2081             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
2082         }
2083         break;
2084     case EC_AA64_SMC:
2085         cpu_synchronize_state(cpu);
2086         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
2087             advance_pc = true;
2088 
2089             if (!hvf_handle_psci_call(cpu)) {
2090                 trace_hvf_unknown_smc(env->xregs[0]);
2091                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2092                 env->xregs[0] = -1;
2093             }
2094         } else {
2095             trace_hvf_unknown_smc(env->xregs[0]);
2096             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
2097         }
2098         break;
2099     default:
2100         cpu_synchronize_state(cpu);
2101         trace_hvf_exit(syndrome, ec, env->pc);
2102         error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
2103     }
2104 
2105     if (advance_pc) {
2106         uint64_t pc;
2107 
2108         flush_cpu_state(cpu);
2109 
2110         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc);
2111         assert_hvf_ok(r);
2112         pc += 4;
2113         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc);
2114         assert_hvf_ok(r);
2115 
2116         /* Handle single-stepping over instructions which trigger a VM exit */
2117         if (cpu->singlestep_enabled) {
2118             ret = EXCP_DEBUG;
2119         }
2120     }
2121 
2122     return ret;
2123 }
2124 
2125 static const VMStateDescription vmstate_hvf_vtimer = {
2126     .name = "hvf-vtimer",
2127     .version_id = 1,
2128     .minimum_version_id = 1,
2129     .fields = (const VMStateField[]) {
2130         VMSTATE_UINT64(vtimer_val, HVFVTimer),
2131         VMSTATE_END_OF_LIST()
2132     },
2133 };
2134 
hvf_vm_state_change(void * opaque,bool running,RunState state)2135 static void hvf_vm_state_change(void *opaque, bool running, RunState state)
2136 {
2137     HVFVTimer *s = opaque;
2138 
2139     if (running) {
2140         /* Update vtimer offset on all CPUs */
2141         hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
2142         cpu_synchronize_all_states();
2143     } else {
2144         /* Remember vtimer value on every pause */
2145         s->vtimer_val = hvf_vtimer_val_raw();
2146     }
2147 }
2148 
hvf_arch_init(void)2149 int hvf_arch_init(void)
2150 {
2151     hvf_state->vtimer_offset = mach_absolute_time();
2152     vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
2153     qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
2154 
2155     hvf_arm_init_debug();
2156 
2157     return 0;
2158 }
2159 
2160 static const uint32_t brk_insn = 0xd4200000;
2161 
hvf_arch_insert_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2162 int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2163 {
2164     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
2165         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
2166         return -EINVAL;
2167     }
2168     return 0;
2169 }
2170 
hvf_arch_remove_sw_breakpoint(CPUState * cpu,struct hvf_sw_breakpoint * bp)2171 int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2172 {
2173     static uint32_t brk;
2174 
2175     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk, 4, 0) ||
2176         brk != brk_insn ||
2177         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2178         return -EINVAL;
2179     }
2180     return 0;
2181 }
2182 
hvf_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)2183 int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2184 {
2185     switch (type) {
2186     case GDB_BREAKPOINT_HW:
2187         return insert_hw_breakpoint(addr);
2188     case GDB_WATCHPOINT_READ:
2189     case GDB_WATCHPOINT_WRITE:
2190     case GDB_WATCHPOINT_ACCESS:
2191         return insert_hw_watchpoint(addr, len, type);
2192     default:
2193         return -ENOSYS;
2194     }
2195 }
2196 
hvf_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)2197 int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2198 {
2199     switch (type) {
2200     case GDB_BREAKPOINT_HW:
2201         return delete_hw_breakpoint(addr);
2202     case GDB_WATCHPOINT_READ:
2203     case GDB_WATCHPOINT_WRITE:
2204     case GDB_WATCHPOINT_ACCESS:
2205         return delete_hw_watchpoint(addr, len, type);
2206     default:
2207         return -ENOSYS;
2208     }
2209 }
2210 
hvf_arch_remove_all_hw_breakpoints(void)2211 void hvf_arch_remove_all_hw_breakpoints(void)
2212 {
2213     if (cur_hw_wps > 0) {
2214         g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
2215     }
2216     if (cur_hw_bps > 0) {
2217         g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
2218     }
2219 }
2220 
2221 /*
2222  * Update the vCPU with the gdbstub's view of debug registers. This view
2223  * consists of all hardware breakpoints and watchpoints inserted so far while
2224  * debugging the guest.
2225  */
hvf_put_gdbstub_debug_registers(CPUState * cpu)2226 static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
2227 {
2228     hv_return_t r = HV_SUCCESS;
2229     int i;
2230 
2231     for (i = 0; i < cur_hw_bps; i++) {
2232         HWBreakpoint *bp = get_hw_bp(i);
2233         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], bp->bcr);
2234         assert_hvf_ok(r);
2235         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], bp->bvr);
2236         assert_hvf_ok(r);
2237     }
2238     for (i = cur_hw_bps; i < max_hw_bps; i++) {
2239         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], 0);
2240         assert_hvf_ok(r);
2241         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], 0);
2242         assert_hvf_ok(r);
2243     }
2244 
2245     for (i = 0; i < cur_hw_wps; i++) {
2246         HWWatchpoint *wp = get_hw_wp(i);
2247         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], wp->wcr);
2248         assert_hvf_ok(r);
2249         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], wp->wvr);
2250         assert_hvf_ok(r);
2251     }
2252     for (i = cur_hw_wps; i < max_hw_wps; i++) {
2253         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], 0);
2254         assert_hvf_ok(r);
2255         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], 0);
2256         assert_hvf_ok(r);
2257     }
2258 }
2259 
2260 /*
2261  * Update the vCPU with the guest's view of debug registers. This view is kept
2262  * in the environment at all times.
2263  */
hvf_put_guest_debug_registers(CPUState * cpu)2264 static void hvf_put_guest_debug_registers(CPUState *cpu)
2265 {
2266     ARMCPU *arm_cpu = ARM_CPU(cpu);
2267     CPUARMState *env = &arm_cpu->env;
2268     hv_return_t r = HV_SUCCESS;
2269     int i;
2270 
2271     for (i = 0; i < max_hw_bps; i++) {
2272         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i],
2273                                 env->cp15.dbgbcr[i]);
2274         assert_hvf_ok(r);
2275         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i],
2276                                 env->cp15.dbgbvr[i]);
2277         assert_hvf_ok(r);
2278     }
2279 
2280     for (i = 0; i < max_hw_wps; i++) {
2281         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i],
2282                                 env->cp15.dbgwcr[i]);
2283         assert_hvf_ok(r);
2284         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i],
2285                                 env->cp15.dbgwvr[i]);
2286         assert_hvf_ok(r);
2287     }
2288 }
2289 
hvf_arm_hw_debug_active(CPUState * cpu)2290 static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
2291 {
2292     return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
2293 }
2294 
hvf_arch_set_traps(CPUState * cpu)2295 static void hvf_arch_set_traps(CPUState *cpu)
2296 {
2297     bool should_enable_traps = false;
2298     hv_return_t r = HV_SUCCESS;
2299 
2300     /* Check whether guest debugging is enabled for at least one vCPU; if it
2301      * is, enable exiting the guest on all vCPUs */
2302     should_enable_traps |= cpu->accel->guest_debug_enabled;
2303     /* Set whether debug exceptions exit the guest */
2304     r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
2305                                             should_enable_traps);
2306     assert_hvf_ok(r);
2307 
2308     /* Set whether accesses to debug registers exit the guest */
2309     r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
2310                                             should_enable_traps);
2311     assert_hvf_ok(r);
2312 }
2313 
hvf_arch_update_guest_debug(CPUState * cpu)2314 void hvf_arch_update_guest_debug(CPUState *cpu)
2315 {
2316     ARMCPU *arm_cpu = ARM_CPU(cpu);
2317     CPUARMState *env = &arm_cpu->env;
2318 
2319     /* Check whether guest debugging is enabled */
2320     cpu->accel->guest_debug_enabled = cpu->singlestep_enabled ||
2321                                     hvf_sw_breakpoints_active(cpu) ||
2322                                     hvf_arm_hw_debug_active(cpu);
2323 
2324     /* Update debug registers */
2325     if (cpu->accel->guest_debug_enabled) {
2326         hvf_put_gdbstub_debug_registers(cpu);
2327     } else {
2328         hvf_put_guest_debug_registers(cpu);
2329     }
2330 
2331     cpu_synchronize_state(cpu);
2332 
2333     /* Enable/disable single-stepping */
2334     if (cpu->singlestep_enabled) {
2335         env->cp15.mdscr_el1 =
2336             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 1);
2337         pstate_write(env, pstate_read(env) | PSTATE_SS);
2338     } else {
2339         env->cp15.mdscr_el1 =
2340             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 0);
2341     }
2342 
2343     /* Enable/disable Breakpoint exceptions */
2344     if (hvf_arm_hw_debug_active(cpu)) {
2345         env->cp15.mdscr_el1 =
2346             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 1);
2347     } else {
2348         env->cp15.mdscr_el1 =
2349             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
2350     }
2351 
2352     hvf_arch_set_traps(cpu);
2353 }
2354 
hvf_arch_supports_guest_debug(void)2355 bool hvf_arch_supports_guest_debug(void)
2356 {
2357     return true;
2358 }
2359