xref: /qemu/target/arm/hvf/hvf.c (revision 6ff5da16000f908140723e164d33a0b51a6c4162)
1 /*
2  * QEMU Hypervisor.framework support for Apple Silicon
3 
4  * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5  * Copyright 2020 Google LLC
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 #include "qemu/log.h"
15 
16 #include "system/runstate.h"
17 #include "system/hvf.h"
18 #include "system/hvf_int.h"
19 #include "system/hw_accel.h"
20 #include "hvf_arm.h"
21 #include "cpregs.h"
22 
23 #include <mach/mach_time.h>
24 
25 #include "exec/address-spaces.h"
26 #include "hw/boards.h"
27 #include "hw/irq.h"
28 #include "qemu/main-loop.h"
29 #include "system/cpus.h"
30 #include "arm-powerctl.h"
31 #include "target/arm/cpu.h"
32 #include "target/arm/internals.h"
33 #include "target/arm/multiprocessing.h"
34 #include "target/arm/gtimer.h"
35 #include "trace.h"
36 #include "migration/vmstate.h"
37 
38 #include "gdbstub/enums.h"
39 
40 #define MDSCR_EL1_SS_SHIFT  0
41 #define MDSCR_EL1_MDE_SHIFT 15
42 
43 static const uint16_t dbgbcr_regs[] = {
44     HV_SYS_REG_DBGBCR0_EL1,
45     HV_SYS_REG_DBGBCR1_EL1,
46     HV_SYS_REG_DBGBCR2_EL1,
47     HV_SYS_REG_DBGBCR3_EL1,
48     HV_SYS_REG_DBGBCR4_EL1,
49     HV_SYS_REG_DBGBCR5_EL1,
50     HV_SYS_REG_DBGBCR6_EL1,
51     HV_SYS_REG_DBGBCR7_EL1,
52     HV_SYS_REG_DBGBCR8_EL1,
53     HV_SYS_REG_DBGBCR9_EL1,
54     HV_SYS_REG_DBGBCR10_EL1,
55     HV_SYS_REG_DBGBCR11_EL1,
56     HV_SYS_REG_DBGBCR12_EL1,
57     HV_SYS_REG_DBGBCR13_EL1,
58     HV_SYS_REG_DBGBCR14_EL1,
59     HV_SYS_REG_DBGBCR15_EL1,
60 };
61 
62 static const uint16_t dbgbvr_regs[] = {
63     HV_SYS_REG_DBGBVR0_EL1,
64     HV_SYS_REG_DBGBVR1_EL1,
65     HV_SYS_REG_DBGBVR2_EL1,
66     HV_SYS_REG_DBGBVR3_EL1,
67     HV_SYS_REG_DBGBVR4_EL1,
68     HV_SYS_REG_DBGBVR5_EL1,
69     HV_SYS_REG_DBGBVR6_EL1,
70     HV_SYS_REG_DBGBVR7_EL1,
71     HV_SYS_REG_DBGBVR8_EL1,
72     HV_SYS_REG_DBGBVR9_EL1,
73     HV_SYS_REG_DBGBVR10_EL1,
74     HV_SYS_REG_DBGBVR11_EL1,
75     HV_SYS_REG_DBGBVR12_EL1,
76     HV_SYS_REG_DBGBVR13_EL1,
77     HV_SYS_REG_DBGBVR14_EL1,
78     HV_SYS_REG_DBGBVR15_EL1,
79 };
80 
81 static const uint16_t dbgwcr_regs[] = {
82     HV_SYS_REG_DBGWCR0_EL1,
83     HV_SYS_REG_DBGWCR1_EL1,
84     HV_SYS_REG_DBGWCR2_EL1,
85     HV_SYS_REG_DBGWCR3_EL1,
86     HV_SYS_REG_DBGWCR4_EL1,
87     HV_SYS_REG_DBGWCR5_EL1,
88     HV_SYS_REG_DBGWCR6_EL1,
89     HV_SYS_REG_DBGWCR7_EL1,
90     HV_SYS_REG_DBGWCR8_EL1,
91     HV_SYS_REG_DBGWCR9_EL1,
92     HV_SYS_REG_DBGWCR10_EL1,
93     HV_SYS_REG_DBGWCR11_EL1,
94     HV_SYS_REG_DBGWCR12_EL1,
95     HV_SYS_REG_DBGWCR13_EL1,
96     HV_SYS_REG_DBGWCR14_EL1,
97     HV_SYS_REG_DBGWCR15_EL1,
98 };
99 
100 static const uint16_t dbgwvr_regs[] = {
101     HV_SYS_REG_DBGWVR0_EL1,
102     HV_SYS_REG_DBGWVR1_EL1,
103     HV_SYS_REG_DBGWVR2_EL1,
104     HV_SYS_REG_DBGWVR3_EL1,
105     HV_SYS_REG_DBGWVR4_EL1,
106     HV_SYS_REG_DBGWVR5_EL1,
107     HV_SYS_REG_DBGWVR6_EL1,
108     HV_SYS_REG_DBGWVR7_EL1,
109     HV_SYS_REG_DBGWVR8_EL1,
110     HV_SYS_REG_DBGWVR9_EL1,
111     HV_SYS_REG_DBGWVR10_EL1,
112     HV_SYS_REG_DBGWVR11_EL1,
113     HV_SYS_REG_DBGWVR12_EL1,
114     HV_SYS_REG_DBGWVR13_EL1,
115     HV_SYS_REG_DBGWVR14_EL1,
116     HV_SYS_REG_DBGWVR15_EL1,
117 };
118 
119 static inline int hvf_arm_num_brps(hv_vcpu_config_t config)
120 {
121     uint64_t val;
122     hv_return_t ret;
123     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
124                                          &val);
125     assert_hvf_ok(ret);
126     return FIELD_EX64(val, ID_AA64DFR0, BRPS) + 1;
127 }
128 
129 static inline int hvf_arm_num_wrps(hv_vcpu_config_t config)
130 {
131     uint64_t val;
132     hv_return_t ret;
133     ret = hv_vcpu_config_get_feature_reg(config, HV_FEATURE_REG_ID_AA64DFR0_EL1,
134                                          &val);
135     assert_hvf_ok(ret);
136     return FIELD_EX64(val, ID_AA64DFR0, WRPS) + 1;
137 }
138 
139 void hvf_arm_init_debug(void)
140 {
141     hv_vcpu_config_t config;
142     config = hv_vcpu_config_create();
143 
144     max_hw_bps = hvf_arm_num_brps(config);
145     hw_breakpoints =
146         g_array_sized_new(true, true, sizeof(HWBreakpoint), max_hw_bps);
147 
148     max_hw_wps = hvf_arm_num_wrps(config);
149     hw_watchpoints =
150         g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps);
151 }
152 
153 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
154         ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
155 
156 #define SYSREG_OP0_SHIFT      20
157 #define SYSREG_OP0_MASK       0x3
158 #define SYSREG_OP0(sysreg)    ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK)
159 #define SYSREG_OP1_SHIFT      14
160 #define SYSREG_OP1_MASK       0x7
161 #define SYSREG_OP1(sysreg)    ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK)
162 #define SYSREG_CRN_SHIFT      10
163 #define SYSREG_CRN_MASK       0xf
164 #define SYSREG_CRN(sysreg)    ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK)
165 #define SYSREG_CRM_SHIFT      1
166 #define SYSREG_CRM_MASK       0xf
167 #define SYSREG_CRM(sysreg)    ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK)
168 #define SYSREG_OP2_SHIFT      17
169 #define SYSREG_OP2_MASK       0x7
170 #define SYSREG_OP2(sysreg)    ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK)
171 
172 #define SYSREG(op0, op1, crn, crm, op2) \
173     ((op0 << SYSREG_OP0_SHIFT) | \
174      (op1 << SYSREG_OP1_SHIFT) | \
175      (crn << SYSREG_CRN_SHIFT) | \
176      (crm << SYSREG_CRM_SHIFT) | \
177      (op2 << SYSREG_OP2_SHIFT))
178 #define SYSREG_MASK \
179     SYSREG(SYSREG_OP0_MASK, \
180            SYSREG_OP1_MASK, \
181            SYSREG_CRN_MASK, \
182            SYSREG_CRM_MASK, \
183            SYSREG_OP2_MASK)
184 #define SYSREG_OSLAR_EL1      SYSREG(2, 0, 1, 0, 4)
185 #define SYSREG_OSLSR_EL1      SYSREG(2, 0, 1, 1, 4)
186 #define SYSREG_OSDLR_EL1      SYSREG(2, 0, 1, 3, 4)
187 #define SYSREG_CNTPCT_EL0     SYSREG(3, 3, 14, 0, 1)
188 #define SYSREG_CNTP_CTL_EL0   SYSREG(3, 3, 14, 2, 1)
189 #define SYSREG_PMCR_EL0       SYSREG(3, 3, 9, 12, 0)
190 #define SYSREG_PMUSERENR_EL0  SYSREG(3, 3, 9, 14, 0)
191 #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
192 #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
193 #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
194 #define SYSREG_PMOVSCLR_EL0   SYSREG(3, 3, 9, 12, 3)
195 #define SYSREG_PMSWINC_EL0    SYSREG(3, 3, 9, 12, 4)
196 #define SYSREG_PMSELR_EL0     SYSREG(3, 3, 9, 12, 5)
197 #define SYSREG_PMCEID0_EL0    SYSREG(3, 3, 9, 12, 6)
198 #define SYSREG_PMCEID1_EL0    SYSREG(3, 3, 9, 12, 7)
199 #define SYSREG_PMCCNTR_EL0    SYSREG(3, 3, 9, 13, 0)
200 #define SYSREG_PMCCFILTR_EL0  SYSREG(3, 3, 14, 15, 7)
201 
202 #define SYSREG_ICC_AP0R0_EL1     SYSREG(3, 0, 12, 8, 4)
203 #define SYSREG_ICC_AP0R1_EL1     SYSREG(3, 0, 12, 8, 5)
204 #define SYSREG_ICC_AP0R2_EL1     SYSREG(3, 0, 12, 8, 6)
205 #define SYSREG_ICC_AP0R3_EL1     SYSREG(3, 0, 12, 8, 7)
206 #define SYSREG_ICC_AP1R0_EL1     SYSREG(3, 0, 12, 9, 0)
207 #define SYSREG_ICC_AP1R1_EL1     SYSREG(3, 0, 12, 9, 1)
208 #define SYSREG_ICC_AP1R2_EL1     SYSREG(3, 0, 12, 9, 2)
209 #define SYSREG_ICC_AP1R3_EL1     SYSREG(3, 0, 12, 9, 3)
210 #define SYSREG_ICC_ASGI1R_EL1    SYSREG(3, 0, 12, 11, 6)
211 #define SYSREG_ICC_BPR0_EL1      SYSREG(3, 0, 12, 8, 3)
212 #define SYSREG_ICC_BPR1_EL1      SYSREG(3, 0, 12, 12, 3)
213 #define SYSREG_ICC_CTLR_EL1      SYSREG(3, 0, 12, 12, 4)
214 #define SYSREG_ICC_DIR_EL1       SYSREG(3, 0, 12, 11, 1)
215 #define SYSREG_ICC_EOIR0_EL1     SYSREG(3, 0, 12, 8, 1)
216 #define SYSREG_ICC_EOIR1_EL1     SYSREG(3, 0, 12, 12, 1)
217 #define SYSREG_ICC_HPPIR0_EL1    SYSREG(3, 0, 12, 8, 2)
218 #define SYSREG_ICC_HPPIR1_EL1    SYSREG(3, 0, 12, 12, 2)
219 #define SYSREG_ICC_IAR0_EL1      SYSREG(3, 0, 12, 8, 0)
220 #define SYSREG_ICC_IAR1_EL1      SYSREG(3, 0, 12, 12, 0)
221 #define SYSREG_ICC_IGRPEN0_EL1   SYSREG(3, 0, 12, 12, 6)
222 #define SYSREG_ICC_IGRPEN1_EL1   SYSREG(3, 0, 12, 12, 7)
223 #define SYSREG_ICC_PMR_EL1       SYSREG(3, 0, 4, 6, 0)
224 #define SYSREG_ICC_RPR_EL1       SYSREG(3, 0, 12, 11, 3)
225 #define SYSREG_ICC_SGI0R_EL1     SYSREG(3, 0, 12, 11, 7)
226 #define SYSREG_ICC_SGI1R_EL1     SYSREG(3, 0, 12, 11, 5)
227 #define SYSREG_ICC_SRE_EL1       SYSREG(3, 0, 12, 12, 5)
228 
229 #define SYSREG_MDSCR_EL1      SYSREG(2, 0, 0, 2, 2)
230 #define SYSREG_DBGBVR0_EL1    SYSREG(2, 0, 0, 0, 4)
231 #define SYSREG_DBGBCR0_EL1    SYSREG(2, 0, 0, 0, 5)
232 #define SYSREG_DBGWVR0_EL1    SYSREG(2, 0, 0, 0, 6)
233 #define SYSREG_DBGWCR0_EL1    SYSREG(2, 0, 0, 0, 7)
234 #define SYSREG_DBGBVR1_EL1    SYSREG(2, 0, 0, 1, 4)
235 #define SYSREG_DBGBCR1_EL1    SYSREG(2, 0, 0, 1, 5)
236 #define SYSREG_DBGWVR1_EL1    SYSREG(2, 0, 0, 1, 6)
237 #define SYSREG_DBGWCR1_EL1    SYSREG(2, 0, 0, 1, 7)
238 #define SYSREG_DBGBVR2_EL1    SYSREG(2, 0, 0, 2, 4)
239 #define SYSREG_DBGBCR2_EL1    SYSREG(2, 0, 0, 2, 5)
240 #define SYSREG_DBGWVR2_EL1    SYSREG(2, 0, 0, 2, 6)
241 #define SYSREG_DBGWCR2_EL1    SYSREG(2, 0, 0, 2, 7)
242 #define SYSREG_DBGBVR3_EL1    SYSREG(2, 0, 0, 3, 4)
243 #define SYSREG_DBGBCR3_EL1    SYSREG(2, 0, 0, 3, 5)
244 #define SYSREG_DBGWVR3_EL1    SYSREG(2, 0, 0, 3, 6)
245 #define SYSREG_DBGWCR3_EL1    SYSREG(2, 0, 0, 3, 7)
246 #define SYSREG_DBGBVR4_EL1    SYSREG(2, 0, 0, 4, 4)
247 #define SYSREG_DBGBCR4_EL1    SYSREG(2, 0, 0, 4, 5)
248 #define SYSREG_DBGWVR4_EL1    SYSREG(2, 0, 0, 4, 6)
249 #define SYSREG_DBGWCR4_EL1    SYSREG(2, 0, 0, 4, 7)
250 #define SYSREG_DBGBVR5_EL1    SYSREG(2, 0, 0, 5, 4)
251 #define SYSREG_DBGBCR5_EL1    SYSREG(2, 0, 0, 5, 5)
252 #define SYSREG_DBGWVR5_EL1    SYSREG(2, 0, 0, 5, 6)
253 #define SYSREG_DBGWCR5_EL1    SYSREG(2, 0, 0, 5, 7)
254 #define SYSREG_DBGBVR6_EL1    SYSREG(2, 0, 0, 6, 4)
255 #define SYSREG_DBGBCR6_EL1    SYSREG(2, 0, 0, 6, 5)
256 #define SYSREG_DBGWVR6_EL1    SYSREG(2, 0, 0, 6, 6)
257 #define SYSREG_DBGWCR6_EL1    SYSREG(2, 0, 0, 6, 7)
258 #define SYSREG_DBGBVR7_EL1    SYSREG(2, 0, 0, 7, 4)
259 #define SYSREG_DBGBCR7_EL1    SYSREG(2, 0, 0, 7, 5)
260 #define SYSREG_DBGWVR7_EL1    SYSREG(2, 0, 0, 7, 6)
261 #define SYSREG_DBGWCR7_EL1    SYSREG(2, 0, 0, 7, 7)
262 #define SYSREG_DBGBVR8_EL1    SYSREG(2, 0, 0, 8, 4)
263 #define SYSREG_DBGBCR8_EL1    SYSREG(2, 0, 0, 8, 5)
264 #define SYSREG_DBGWVR8_EL1    SYSREG(2, 0, 0, 8, 6)
265 #define SYSREG_DBGWCR8_EL1    SYSREG(2, 0, 0, 8, 7)
266 #define SYSREG_DBGBVR9_EL1    SYSREG(2, 0, 0, 9, 4)
267 #define SYSREG_DBGBCR9_EL1    SYSREG(2, 0, 0, 9, 5)
268 #define SYSREG_DBGWVR9_EL1    SYSREG(2, 0, 0, 9, 6)
269 #define SYSREG_DBGWCR9_EL1    SYSREG(2, 0, 0, 9, 7)
270 #define SYSREG_DBGBVR10_EL1   SYSREG(2, 0, 0, 10, 4)
271 #define SYSREG_DBGBCR10_EL1   SYSREG(2, 0, 0, 10, 5)
272 #define SYSREG_DBGWVR10_EL1   SYSREG(2, 0, 0, 10, 6)
273 #define SYSREG_DBGWCR10_EL1   SYSREG(2, 0, 0, 10, 7)
274 #define SYSREG_DBGBVR11_EL1   SYSREG(2, 0, 0, 11, 4)
275 #define SYSREG_DBGBCR11_EL1   SYSREG(2, 0, 0, 11, 5)
276 #define SYSREG_DBGWVR11_EL1   SYSREG(2, 0, 0, 11, 6)
277 #define SYSREG_DBGWCR11_EL1   SYSREG(2, 0, 0, 11, 7)
278 #define SYSREG_DBGBVR12_EL1   SYSREG(2, 0, 0, 12, 4)
279 #define SYSREG_DBGBCR12_EL1   SYSREG(2, 0, 0, 12, 5)
280 #define SYSREG_DBGWVR12_EL1   SYSREG(2, 0, 0, 12, 6)
281 #define SYSREG_DBGWCR12_EL1   SYSREG(2, 0, 0, 12, 7)
282 #define SYSREG_DBGBVR13_EL1   SYSREG(2, 0, 0, 13, 4)
283 #define SYSREG_DBGBCR13_EL1   SYSREG(2, 0, 0, 13, 5)
284 #define SYSREG_DBGWVR13_EL1   SYSREG(2, 0, 0, 13, 6)
285 #define SYSREG_DBGWCR13_EL1   SYSREG(2, 0, 0, 13, 7)
286 #define SYSREG_DBGBVR14_EL1   SYSREG(2, 0, 0, 14, 4)
287 #define SYSREG_DBGBCR14_EL1   SYSREG(2, 0, 0, 14, 5)
288 #define SYSREG_DBGWVR14_EL1   SYSREG(2, 0, 0, 14, 6)
289 #define SYSREG_DBGWCR14_EL1   SYSREG(2, 0, 0, 14, 7)
290 #define SYSREG_DBGBVR15_EL1   SYSREG(2, 0, 0, 15, 4)
291 #define SYSREG_DBGBCR15_EL1   SYSREG(2, 0, 0, 15, 5)
292 #define SYSREG_DBGWVR15_EL1   SYSREG(2, 0, 0, 15, 6)
293 #define SYSREG_DBGWCR15_EL1   SYSREG(2, 0, 0, 15, 7)
294 
295 #define WFX_IS_WFE (1 << 0)
296 
297 #define TMR_CTL_ENABLE  (1 << 0)
298 #define TMR_CTL_IMASK   (1 << 1)
299 #define TMR_CTL_ISTATUS (1 << 2)
300 
301 static void hvf_wfi(CPUState *cpu);
302 
303 static uint32_t chosen_ipa_bit_size;
304 
305 typedef struct HVFVTimer {
306     /* Vtimer value during migration and paused state */
307     uint64_t vtimer_val;
308 } HVFVTimer;
309 
310 static HVFVTimer vtimer;
311 
312 typedef struct ARMHostCPUFeatures {
313     ARMISARegisters isar;
314     uint64_t features;
315     uint64_t midr;
316     uint32_t reset_sctlr;
317     const char *dtb_compatible;
318 } ARMHostCPUFeatures;
319 
320 static ARMHostCPUFeatures arm_host_cpu_features;
321 
322 struct hvf_reg_match {
323     int reg;
324     uint64_t offset;
325 };
326 
327 static const struct hvf_reg_match hvf_reg_match[] = {
328     { HV_REG_X0,   offsetof(CPUARMState, xregs[0]) },
329     { HV_REG_X1,   offsetof(CPUARMState, xregs[1]) },
330     { HV_REG_X2,   offsetof(CPUARMState, xregs[2]) },
331     { HV_REG_X3,   offsetof(CPUARMState, xregs[3]) },
332     { HV_REG_X4,   offsetof(CPUARMState, xregs[4]) },
333     { HV_REG_X5,   offsetof(CPUARMState, xregs[5]) },
334     { HV_REG_X6,   offsetof(CPUARMState, xregs[6]) },
335     { HV_REG_X7,   offsetof(CPUARMState, xregs[7]) },
336     { HV_REG_X8,   offsetof(CPUARMState, xregs[8]) },
337     { HV_REG_X9,   offsetof(CPUARMState, xregs[9]) },
338     { HV_REG_X10,  offsetof(CPUARMState, xregs[10]) },
339     { HV_REG_X11,  offsetof(CPUARMState, xregs[11]) },
340     { HV_REG_X12,  offsetof(CPUARMState, xregs[12]) },
341     { HV_REG_X13,  offsetof(CPUARMState, xregs[13]) },
342     { HV_REG_X14,  offsetof(CPUARMState, xregs[14]) },
343     { HV_REG_X15,  offsetof(CPUARMState, xregs[15]) },
344     { HV_REG_X16,  offsetof(CPUARMState, xregs[16]) },
345     { HV_REG_X17,  offsetof(CPUARMState, xregs[17]) },
346     { HV_REG_X18,  offsetof(CPUARMState, xregs[18]) },
347     { HV_REG_X19,  offsetof(CPUARMState, xregs[19]) },
348     { HV_REG_X20,  offsetof(CPUARMState, xregs[20]) },
349     { HV_REG_X21,  offsetof(CPUARMState, xregs[21]) },
350     { HV_REG_X22,  offsetof(CPUARMState, xregs[22]) },
351     { HV_REG_X23,  offsetof(CPUARMState, xregs[23]) },
352     { HV_REG_X24,  offsetof(CPUARMState, xregs[24]) },
353     { HV_REG_X25,  offsetof(CPUARMState, xregs[25]) },
354     { HV_REG_X26,  offsetof(CPUARMState, xregs[26]) },
355     { HV_REG_X27,  offsetof(CPUARMState, xregs[27]) },
356     { HV_REG_X28,  offsetof(CPUARMState, xregs[28]) },
357     { HV_REG_X29,  offsetof(CPUARMState, xregs[29]) },
358     { HV_REG_X30,  offsetof(CPUARMState, xregs[30]) },
359     { HV_REG_PC,   offsetof(CPUARMState, pc) },
360 };
361 
362 static const struct hvf_reg_match hvf_fpreg_match[] = {
363     { HV_SIMD_FP_REG_Q0,  offsetof(CPUARMState, vfp.zregs[0]) },
364     { HV_SIMD_FP_REG_Q1,  offsetof(CPUARMState, vfp.zregs[1]) },
365     { HV_SIMD_FP_REG_Q2,  offsetof(CPUARMState, vfp.zregs[2]) },
366     { HV_SIMD_FP_REG_Q3,  offsetof(CPUARMState, vfp.zregs[3]) },
367     { HV_SIMD_FP_REG_Q4,  offsetof(CPUARMState, vfp.zregs[4]) },
368     { HV_SIMD_FP_REG_Q5,  offsetof(CPUARMState, vfp.zregs[5]) },
369     { HV_SIMD_FP_REG_Q6,  offsetof(CPUARMState, vfp.zregs[6]) },
370     { HV_SIMD_FP_REG_Q7,  offsetof(CPUARMState, vfp.zregs[7]) },
371     { HV_SIMD_FP_REG_Q8,  offsetof(CPUARMState, vfp.zregs[8]) },
372     { HV_SIMD_FP_REG_Q9,  offsetof(CPUARMState, vfp.zregs[9]) },
373     { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
374     { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
375     { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
376     { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
377     { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
378     { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
379     { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
380     { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
381     { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
382     { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
383     { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
384     { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
385     { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
386     { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
387     { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
388     { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
389     { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
390     { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
391     { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
392     { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
393     { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
394     { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
395 };
396 
397 struct hvf_sreg_match {
398     int reg;
399     uint32_t key;
400     uint32_t cp_idx;
401 };
402 
403 static struct hvf_sreg_match hvf_sreg_match[] = {
404     { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 4) },
405     { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 5) },
406     { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 6) },
407     { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 7) },
408 
409     { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 4) },
410     { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 5) },
411     { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 6) },
412     { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 7) },
413 
414     { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 4) },
415     { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 5) },
416     { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 6) },
417     { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 7) },
418 
419     { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 4) },
420     { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 5) },
421     { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 6) },
422     { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 7) },
423 
424     { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 4) },
425     { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 5) },
426     { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 6) },
427     { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 7) },
428 
429     { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 4) },
430     { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 5) },
431     { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 6) },
432     { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 7) },
433 
434     { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 4) },
435     { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 5) },
436     { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 6) },
437     { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 7) },
438 
439     { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 4) },
440     { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 5) },
441     { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 6) },
442     { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 7) },
443 
444     { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 4) },
445     { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 5) },
446     { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 6) },
447     { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 7) },
448 
449     { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 4) },
450     { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 5) },
451     { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 6) },
452     { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 7) },
453 
454     { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 4) },
455     { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 5) },
456     { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 6) },
457     { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 7) },
458 
459     { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 4) },
460     { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 5) },
461     { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 6) },
462     { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 7) },
463 
464     { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 4) },
465     { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 5) },
466     { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 6) },
467     { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 7) },
468 
469     { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 4) },
470     { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 5) },
471     { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 6) },
472     { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 7) },
473 
474     { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 4) },
475     { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 5) },
476     { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 6) },
477     { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 7) },
478 
479     { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 4) },
480     { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 5) },
481     { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 6) },
482     { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 7) },
483 
484 #ifdef SYNC_NO_RAW_REGS
485     /*
486      * The registers below are manually synced on init because they are
487      * marked as NO_RAW. We still list them to make number space sync easier.
488      */
489     { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
490     { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
491     { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
492     { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
493 #endif
494     { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 1) },
495     { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
496     { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
497     { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
498     { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
499 #ifdef SYNC_NO_MMFR0
500     /* We keep the hardware MMFR0 around. HW limits are there anyway */
501     { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
502 #endif
503     { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
504     { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
505     /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
506 
507     { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
508     { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
509     { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
510     { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
511     { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
512     { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
513 
514     { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
515     { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
516     { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
517     { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
518     { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
519     { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
520     { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
521     { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
522     { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
523     { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
524 
525     { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
526     { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
527     { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
528     { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
529     { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
530     { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
531     { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
532     { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
533     { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
534     { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
535     { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
536     { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
537     { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
538     { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
539     { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
540     { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
541     { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
542     { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
543     { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
544     { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
545 };
546 
547 int hvf_get_registers(CPUState *cpu)
548 {
549     ARMCPU *arm_cpu = ARM_CPU(cpu);
550     CPUARMState *env = &arm_cpu->env;
551     hv_return_t ret;
552     uint64_t val;
553     hv_simd_fp_uchar16_t fpval;
554     int i;
555 
556     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
557         ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val);
558         *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
559         assert_hvf_ok(ret);
560     }
561 
562     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
563         ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
564                                       &fpval);
565         memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
566         assert_hvf_ok(ret);
567     }
568 
569     val = 0;
570     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val);
571     assert_hvf_ok(ret);
572     vfp_set_fpcr(env, val);
573 
574     val = 0;
575     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val);
576     assert_hvf_ok(ret);
577     vfp_set_fpsr(env, val);
578 
579     ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val);
580     assert_hvf_ok(ret);
581     pstate_write(env, val);
582 
583     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
584         if (hvf_sreg_match[i].cp_idx == -1) {
585             continue;
586         }
587 
588         if (cpu->accel->guest_debug_enabled) {
589             /* Handle debug registers */
590             switch (hvf_sreg_match[i].reg) {
591             case HV_SYS_REG_DBGBVR0_EL1:
592             case HV_SYS_REG_DBGBCR0_EL1:
593             case HV_SYS_REG_DBGWVR0_EL1:
594             case HV_SYS_REG_DBGWCR0_EL1:
595             case HV_SYS_REG_DBGBVR1_EL1:
596             case HV_SYS_REG_DBGBCR1_EL1:
597             case HV_SYS_REG_DBGWVR1_EL1:
598             case HV_SYS_REG_DBGWCR1_EL1:
599             case HV_SYS_REG_DBGBVR2_EL1:
600             case HV_SYS_REG_DBGBCR2_EL1:
601             case HV_SYS_REG_DBGWVR2_EL1:
602             case HV_SYS_REG_DBGWCR2_EL1:
603             case HV_SYS_REG_DBGBVR3_EL1:
604             case HV_SYS_REG_DBGBCR3_EL1:
605             case HV_SYS_REG_DBGWVR3_EL1:
606             case HV_SYS_REG_DBGWCR3_EL1:
607             case HV_SYS_REG_DBGBVR4_EL1:
608             case HV_SYS_REG_DBGBCR4_EL1:
609             case HV_SYS_REG_DBGWVR4_EL1:
610             case HV_SYS_REG_DBGWCR4_EL1:
611             case HV_SYS_REG_DBGBVR5_EL1:
612             case HV_SYS_REG_DBGBCR5_EL1:
613             case HV_SYS_REG_DBGWVR5_EL1:
614             case HV_SYS_REG_DBGWCR5_EL1:
615             case HV_SYS_REG_DBGBVR6_EL1:
616             case HV_SYS_REG_DBGBCR6_EL1:
617             case HV_SYS_REG_DBGWVR6_EL1:
618             case HV_SYS_REG_DBGWCR6_EL1:
619             case HV_SYS_REG_DBGBVR7_EL1:
620             case HV_SYS_REG_DBGBCR7_EL1:
621             case HV_SYS_REG_DBGWVR7_EL1:
622             case HV_SYS_REG_DBGWCR7_EL1:
623             case HV_SYS_REG_DBGBVR8_EL1:
624             case HV_SYS_REG_DBGBCR8_EL1:
625             case HV_SYS_REG_DBGWVR8_EL1:
626             case HV_SYS_REG_DBGWCR8_EL1:
627             case HV_SYS_REG_DBGBVR9_EL1:
628             case HV_SYS_REG_DBGBCR9_EL1:
629             case HV_SYS_REG_DBGWVR9_EL1:
630             case HV_SYS_REG_DBGWCR9_EL1:
631             case HV_SYS_REG_DBGBVR10_EL1:
632             case HV_SYS_REG_DBGBCR10_EL1:
633             case HV_SYS_REG_DBGWVR10_EL1:
634             case HV_SYS_REG_DBGWCR10_EL1:
635             case HV_SYS_REG_DBGBVR11_EL1:
636             case HV_SYS_REG_DBGBCR11_EL1:
637             case HV_SYS_REG_DBGWVR11_EL1:
638             case HV_SYS_REG_DBGWCR11_EL1:
639             case HV_SYS_REG_DBGBVR12_EL1:
640             case HV_SYS_REG_DBGBCR12_EL1:
641             case HV_SYS_REG_DBGWVR12_EL1:
642             case HV_SYS_REG_DBGWCR12_EL1:
643             case HV_SYS_REG_DBGBVR13_EL1:
644             case HV_SYS_REG_DBGBCR13_EL1:
645             case HV_SYS_REG_DBGWVR13_EL1:
646             case HV_SYS_REG_DBGWCR13_EL1:
647             case HV_SYS_REG_DBGBVR14_EL1:
648             case HV_SYS_REG_DBGBCR14_EL1:
649             case HV_SYS_REG_DBGWVR14_EL1:
650             case HV_SYS_REG_DBGWCR14_EL1:
651             case HV_SYS_REG_DBGBVR15_EL1:
652             case HV_SYS_REG_DBGBCR15_EL1:
653             case HV_SYS_REG_DBGWVR15_EL1:
654             case HV_SYS_REG_DBGWCR15_EL1: {
655                 /*
656                  * If the guest is being debugged, the vCPU's debug registers
657                  * are holding the gdbstub's view of the registers (set in
658                  * hvf_arch_update_guest_debug()).
659                  * Since the environment is used to store only the guest's view
660                  * of the registers, don't update it with the values from the
661                  * vCPU but simply keep the values from the previous
662                  * environment.
663                  */
664                 const ARMCPRegInfo *ri;
665                 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key);
666                 val = read_raw_cp_reg(env, ri);
667 
668                 arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
669                 continue;
670             }
671             }
672         }
673 
674         ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val);
675         assert_hvf_ok(ret);
676 
677         arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
678     }
679     assert(write_list_to_cpustate(arm_cpu));
680 
681     aarch64_restore_sp(env, arm_current_el(env));
682 
683     return 0;
684 }
685 
686 int hvf_put_registers(CPUState *cpu)
687 {
688     ARMCPU *arm_cpu = ARM_CPU(cpu);
689     CPUARMState *env = &arm_cpu->env;
690     hv_return_t ret;
691     uint64_t val;
692     hv_simd_fp_uchar16_t fpval;
693     int i;
694 
695     for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
696         val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
697         ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val);
698         assert_hvf_ok(ret);
699     }
700 
701     for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
702         memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
703         ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
704                                       fpval);
705         assert_hvf_ok(ret);
706     }
707 
708     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env));
709     assert_hvf_ok(ret);
710 
711     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env));
712     assert_hvf_ok(ret);
713 
714     ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env));
715     assert_hvf_ok(ret);
716 
717     aarch64_save_sp(env, arm_current_el(env));
718 
719     assert(write_cpustate_to_list(arm_cpu, false));
720     for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
721         if (hvf_sreg_match[i].cp_idx == -1) {
722             continue;
723         }
724 
725         if (cpu->accel->guest_debug_enabled) {
726             /* Handle debug registers */
727             switch (hvf_sreg_match[i].reg) {
728             case HV_SYS_REG_DBGBVR0_EL1:
729             case HV_SYS_REG_DBGBCR0_EL1:
730             case HV_SYS_REG_DBGWVR0_EL1:
731             case HV_SYS_REG_DBGWCR0_EL1:
732             case HV_SYS_REG_DBGBVR1_EL1:
733             case HV_SYS_REG_DBGBCR1_EL1:
734             case HV_SYS_REG_DBGWVR1_EL1:
735             case HV_SYS_REG_DBGWCR1_EL1:
736             case HV_SYS_REG_DBGBVR2_EL1:
737             case HV_SYS_REG_DBGBCR2_EL1:
738             case HV_SYS_REG_DBGWVR2_EL1:
739             case HV_SYS_REG_DBGWCR2_EL1:
740             case HV_SYS_REG_DBGBVR3_EL1:
741             case HV_SYS_REG_DBGBCR3_EL1:
742             case HV_SYS_REG_DBGWVR3_EL1:
743             case HV_SYS_REG_DBGWCR3_EL1:
744             case HV_SYS_REG_DBGBVR4_EL1:
745             case HV_SYS_REG_DBGBCR4_EL1:
746             case HV_SYS_REG_DBGWVR4_EL1:
747             case HV_SYS_REG_DBGWCR4_EL1:
748             case HV_SYS_REG_DBGBVR5_EL1:
749             case HV_SYS_REG_DBGBCR5_EL1:
750             case HV_SYS_REG_DBGWVR5_EL1:
751             case HV_SYS_REG_DBGWCR5_EL1:
752             case HV_SYS_REG_DBGBVR6_EL1:
753             case HV_SYS_REG_DBGBCR6_EL1:
754             case HV_SYS_REG_DBGWVR6_EL1:
755             case HV_SYS_REG_DBGWCR6_EL1:
756             case HV_SYS_REG_DBGBVR7_EL1:
757             case HV_SYS_REG_DBGBCR7_EL1:
758             case HV_SYS_REG_DBGWVR7_EL1:
759             case HV_SYS_REG_DBGWCR7_EL1:
760             case HV_SYS_REG_DBGBVR8_EL1:
761             case HV_SYS_REG_DBGBCR8_EL1:
762             case HV_SYS_REG_DBGWVR8_EL1:
763             case HV_SYS_REG_DBGWCR8_EL1:
764             case HV_SYS_REG_DBGBVR9_EL1:
765             case HV_SYS_REG_DBGBCR9_EL1:
766             case HV_SYS_REG_DBGWVR9_EL1:
767             case HV_SYS_REG_DBGWCR9_EL1:
768             case HV_SYS_REG_DBGBVR10_EL1:
769             case HV_SYS_REG_DBGBCR10_EL1:
770             case HV_SYS_REG_DBGWVR10_EL1:
771             case HV_SYS_REG_DBGWCR10_EL1:
772             case HV_SYS_REG_DBGBVR11_EL1:
773             case HV_SYS_REG_DBGBCR11_EL1:
774             case HV_SYS_REG_DBGWVR11_EL1:
775             case HV_SYS_REG_DBGWCR11_EL1:
776             case HV_SYS_REG_DBGBVR12_EL1:
777             case HV_SYS_REG_DBGBCR12_EL1:
778             case HV_SYS_REG_DBGWVR12_EL1:
779             case HV_SYS_REG_DBGWCR12_EL1:
780             case HV_SYS_REG_DBGBVR13_EL1:
781             case HV_SYS_REG_DBGBCR13_EL1:
782             case HV_SYS_REG_DBGWVR13_EL1:
783             case HV_SYS_REG_DBGWCR13_EL1:
784             case HV_SYS_REG_DBGBVR14_EL1:
785             case HV_SYS_REG_DBGBCR14_EL1:
786             case HV_SYS_REG_DBGWVR14_EL1:
787             case HV_SYS_REG_DBGWCR14_EL1:
788             case HV_SYS_REG_DBGBVR15_EL1:
789             case HV_SYS_REG_DBGBCR15_EL1:
790             case HV_SYS_REG_DBGWVR15_EL1:
791             case HV_SYS_REG_DBGWCR15_EL1:
792                 /*
793                  * If the guest is being debugged, the vCPU's debug registers
794                  * are already holding the gdbstub's view of the registers (set
795                  * in hvf_arch_update_guest_debug()).
796                  */
797                 continue;
798             }
799         }
800 
801         val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
802         ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val);
803         assert_hvf_ok(ret);
804     }
805 
806     ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset);
807     assert_hvf_ok(ret);
808 
809     return 0;
810 }
811 
812 static void flush_cpu_state(CPUState *cpu)
813 {
814     if (cpu->accel->dirty) {
815         hvf_put_registers(cpu);
816         cpu->accel->dirty = false;
817     }
818 }
819 
820 static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
821 {
822     hv_return_t r;
823 
824     flush_cpu_state(cpu);
825 
826     if (rt < 31) {
827         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val);
828         assert_hvf_ok(r);
829     }
830 }
831 
832 static uint64_t hvf_get_reg(CPUState *cpu, int rt)
833 {
834     uint64_t val = 0;
835     hv_return_t r;
836 
837     flush_cpu_state(cpu);
838 
839     if (rt < 31) {
840         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val);
841         assert_hvf_ok(r);
842     }
843 
844     return val;
845 }
846 
847 static void clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t *id_aa64mmfr0)
848 {
849     uint32_t ipa_size = chosen_ipa_bit_size ?
850             chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
851 
852     /* Clamp down the PARange to the IPA size the kernel supports. */
853     uint8_t index = round_down_to_parange_index(ipa_size);
854     *id_aa64mmfr0 = (*id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
855 }
856 
857 static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
858 {
859     ARMISARegisters host_isar = {};
860     const struct isar_regs {
861         int reg;
862         uint64_t *val;
863     } regs[] = {
864         { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
865         { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
866         { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
867         { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
868         { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
869         { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
870         /* Add ID_AA64ISAR2_EL1 here when HVF supports it */
871         { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
872         { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
873         { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
874         /* Add ID_AA64MMFR3_EL1 here when HVF supports it */
875     };
876     hv_vcpu_t fd;
877     hv_return_t r = HV_SUCCESS;
878     hv_vcpu_exit_t *exit;
879     int i;
880 
881     ahcf->dtb_compatible = "arm,arm-v8";
882     ahcf->features = (1ULL << ARM_FEATURE_V8) |
883                      (1ULL << ARM_FEATURE_NEON) |
884                      (1ULL << ARM_FEATURE_AARCH64) |
885                      (1ULL << ARM_FEATURE_PMU) |
886                      (1ULL << ARM_FEATURE_GENERIC_TIMER);
887 
888     /* We set up a small vcpu to extract host registers */
889 
890     if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
891         return false;
892     }
893 
894     for (i = 0; i < ARRAY_SIZE(regs); i++) {
895         r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
896     }
897     r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
898     r |= hv_vcpu_destroy(fd);
899 
900     clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar.id_aa64mmfr0);
901 
902     /*
903      * Disable SME, which is not properly handled by QEMU hvf yet.
904      * To allow this through we would need to:
905      * - make sure that the SME state is correctly handled in the
906      *   get_registers/put_registers functions
907      * - get the SME-specific CPU properties to work with accelerators
908      *   other than TCG
909      * - fix any assumptions we made that SME implies SVE (since
910      *   on the M4 there is SME but not SVE)
911      */
912     host_isar.id_aa64pfr1 &= ~R_ID_AA64PFR1_SME_MASK;
913 
914     ahcf->isar = host_isar;
915 
916     /*
917      * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
918      * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
919      */
920     ahcf->reset_sctlr = 0x30100180;
921     /*
922      * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
923      * let's disable it on boot and then allow guest software to turn it on by
924      * setting it to 0.
925      */
926     ahcf->reset_sctlr |= 0x00800000;
927 
928     /* Make sure we don't advertise AArch32 support for EL0/EL1 */
929     if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
930         return false;
931     }
932 
933     return r == HV_SUCCESS;
934 }
935 
936 uint32_t hvf_arm_get_default_ipa_bit_size(void)
937 {
938     uint32_t default_ipa_size;
939     hv_return_t ret = hv_vm_config_get_default_ipa_size(&default_ipa_size);
940     assert_hvf_ok(ret);
941 
942     return default_ipa_size;
943 }
944 
945 uint32_t hvf_arm_get_max_ipa_bit_size(void)
946 {
947     uint32_t max_ipa_size;
948     hv_return_t ret = hv_vm_config_get_max_ipa_size(&max_ipa_size);
949     assert_hvf_ok(ret);
950 
951     /*
952      * We clamp any IPA size we want to back the VM with to a valid PARange
953      * value so the guest doesn't try and map memory outside of the valid range.
954      * This logic just clamps the passed in IPA bit size to the first valid
955      * PARange value <= to it.
956      */
957     return round_down_to_parange_bit_size(max_ipa_size);
958 }
959 
960 void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
961 {
962     if (!arm_host_cpu_features.dtb_compatible) {
963         if (!hvf_enabled() ||
964             !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
965             /*
966              * We can't report this error yet, so flag that we need to
967              * in arm_cpu_realizefn().
968              */
969             cpu->host_cpu_probe_failed = true;
970             return;
971         }
972     }
973 
974     cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
975     cpu->isar = arm_host_cpu_features.isar;
976     cpu->env.features = arm_host_cpu_features.features;
977     cpu->midr = arm_host_cpu_features.midr;
978     cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
979 }
980 
981 void hvf_arch_vcpu_destroy(CPUState *cpu)
982 {
983 }
984 
985 hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range)
986 {
987     hv_return_t ret;
988     hv_vm_config_t config = hv_vm_config_create();
989 
990     ret = hv_vm_config_set_ipa_size(config, pa_range);
991     if (ret != HV_SUCCESS) {
992         goto cleanup;
993     }
994     chosen_ipa_bit_size = pa_range;
995 
996     ret = hv_vm_create(config);
997 
998 cleanup:
999     os_release(config);
1000 
1001     return ret;
1002 }
1003 
1004 int hvf_arch_init_vcpu(CPUState *cpu)
1005 {
1006     ARMCPU *arm_cpu = ARM_CPU(cpu);
1007     CPUARMState *env = &arm_cpu->env;
1008     uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
1009     uint32_t sregs_cnt = 0;
1010     uint64_t pfr;
1011     hv_return_t ret;
1012     int i;
1013 
1014     env->aarch64 = true;
1015     asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
1016 
1017     /* Allocate enough space for our sysreg sync */
1018     arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
1019                                      sregs_match_len);
1020     arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
1021                                     sregs_match_len);
1022     arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
1023                                              arm_cpu->cpreg_vmstate_indexes,
1024                                              sregs_match_len);
1025     arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
1026                                             arm_cpu->cpreg_vmstate_values,
1027                                             sregs_match_len);
1028 
1029     memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
1030 
1031     /* Populate cp list for all known sysregs */
1032     for (i = 0; i < sregs_match_len; i++) {
1033         const ARMCPRegInfo *ri;
1034         uint32_t key = hvf_sreg_match[i].key;
1035 
1036         ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
1037         if (ri) {
1038             assert(!(ri->type & ARM_CP_NO_RAW));
1039             hvf_sreg_match[i].cp_idx = sregs_cnt;
1040             arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
1041         } else {
1042             hvf_sreg_match[i].cp_idx = -1;
1043         }
1044     }
1045     arm_cpu->cpreg_array_len = sregs_cnt;
1046     arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
1047 
1048     assert(write_cpustate_to_list(arm_cpu, false));
1049 
1050     /* Set CP_NO_RAW system registers on init */
1051     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1,
1052                               arm_cpu->midr);
1053     assert_hvf_ok(ret);
1054 
1055     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1,
1056                               arm_cpu->mp_affinity);
1057     assert_hvf_ok(ret);
1058 
1059     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
1060     assert_hvf_ok(ret);
1061     pfr |= env->gicv3state ? (1 << 24) : 0;
1062     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
1063     assert_hvf_ok(ret);
1064 
1065     /* We're limited to underlying hardware caps, override internal versions */
1066     ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1067                               &arm_cpu->isar.id_aa64mmfr0);
1068     assert_hvf_ok(ret);
1069 
1070     clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar.id_aa64mmfr0);
1071     ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
1072                               arm_cpu->isar.id_aa64mmfr0);
1073     assert_hvf_ok(ret);
1074 
1075     return 0;
1076 }
1077 
1078 void hvf_kick_vcpu_thread(CPUState *cpu)
1079 {
1080     cpus_kick_thread(cpu);
1081     hv_vcpus_exit(&cpu->accel->fd, 1);
1082 }
1083 
1084 static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
1085                                 uint32_t syndrome)
1086 {
1087     ARMCPU *arm_cpu = ARM_CPU(cpu);
1088     CPUARMState *env = &arm_cpu->env;
1089 
1090     cpu->exception_index = excp;
1091     env->exception.target_el = 1;
1092     env->exception.syndrome = syndrome;
1093 
1094     arm_cpu_do_interrupt(cpu);
1095 }
1096 
1097 static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
1098 {
1099     int32_t ret = arm_set_cpu_off(arm_cpu_mp_affinity(arm_cpu));
1100     assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
1101 }
1102 
1103 /*
1104  * Handle a PSCI call.
1105  *
1106  * Returns 0 on success
1107  *         -1 when the PSCI call is unknown,
1108  */
1109 static bool hvf_handle_psci_call(CPUState *cpu)
1110 {
1111     ARMCPU *arm_cpu = ARM_CPU(cpu);
1112     CPUARMState *env = &arm_cpu->env;
1113     uint64_t param[4] = {
1114         env->xregs[0],
1115         env->xregs[1],
1116         env->xregs[2],
1117         env->xregs[3]
1118     };
1119     uint64_t context_id, mpidr;
1120     bool target_aarch64 = true;
1121     CPUState *target_cpu_state;
1122     ARMCPU *target_cpu;
1123     target_ulong entry;
1124     int target_el = 1;
1125     int32_t ret = 0;
1126 
1127     trace_hvf_psci_call(param[0], param[1], param[2], param[3],
1128                         arm_cpu_mp_affinity(arm_cpu));
1129 
1130     switch (param[0]) {
1131     case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1132         ret = QEMU_PSCI_VERSION_1_1;
1133         break;
1134     case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1135         ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
1136         break;
1137     case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1138     case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1139         mpidr = param[1];
1140 
1141         switch (param[2]) {
1142         case 0:
1143             target_cpu_state = arm_get_cpu_by_id(mpidr);
1144             if (!target_cpu_state) {
1145                 ret = QEMU_PSCI_RET_INVALID_PARAMS;
1146                 break;
1147             }
1148             target_cpu = ARM_CPU(target_cpu_state);
1149 
1150             ret = target_cpu->power_state;
1151             break;
1152         default:
1153             /* Everything above affinity level 0 is always on. */
1154             ret = 0;
1155         }
1156         break;
1157     case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1158         qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1159         /*
1160          * QEMU reset and shutdown are async requests, but PSCI
1161          * mandates that we never return from the reset/shutdown
1162          * call, so power the CPU off now so it doesn't execute
1163          * anything further.
1164          */
1165         hvf_psci_cpu_off(arm_cpu);
1166         break;
1167     case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1168         qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
1169         hvf_psci_cpu_off(arm_cpu);
1170         break;
1171     case QEMU_PSCI_0_1_FN_CPU_ON:
1172     case QEMU_PSCI_0_2_FN_CPU_ON:
1173     case QEMU_PSCI_0_2_FN64_CPU_ON:
1174         mpidr = param[1];
1175         entry = param[2];
1176         context_id = param[3];
1177         ret = arm_set_cpu_on(mpidr, entry, context_id,
1178                              target_el, target_aarch64);
1179         break;
1180     case QEMU_PSCI_0_1_FN_CPU_OFF:
1181     case QEMU_PSCI_0_2_FN_CPU_OFF:
1182         hvf_psci_cpu_off(arm_cpu);
1183         break;
1184     case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1185     case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1186     case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1187         /* Affinity levels are not supported in QEMU */
1188         if (param[1] & 0xfffe0000) {
1189             ret = QEMU_PSCI_RET_INVALID_PARAMS;
1190             break;
1191         }
1192         /* Powerdown is not supported, we always go into WFI */
1193         env->xregs[0] = 0;
1194         hvf_wfi(cpu);
1195         break;
1196     case QEMU_PSCI_0_1_FN_MIGRATE:
1197     case QEMU_PSCI_0_2_FN_MIGRATE:
1198         ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1199         break;
1200     case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1201         switch (param[1]) {
1202         case QEMU_PSCI_0_2_FN_PSCI_VERSION:
1203         case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
1204         case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
1205         case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
1206         case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
1207         case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
1208         case QEMU_PSCI_0_1_FN_CPU_ON:
1209         case QEMU_PSCI_0_2_FN_CPU_ON:
1210         case QEMU_PSCI_0_2_FN64_CPU_ON:
1211         case QEMU_PSCI_0_1_FN_CPU_OFF:
1212         case QEMU_PSCI_0_2_FN_CPU_OFF:
1213         case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
1214         case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
1215         case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
1216         case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
1217             ret = 0;
1218             break;
1219         case QEMU_PSCI_0_1_FN_MIGRATE:
1220         case QEMU_PSCI_0_2_FN_MIGRATE:
1221         default:
1222             ret = QEMU_PSCI_RET_NOT_SUPPORTED;
1223         }
1224         break;
1225     default:
1226         return false;
1227     }
1228 
1229     env->xregs[0] = ret;
1230     return true;
1231 }
1232 
1233 static bool is_id_sysreg(uint32_t reg)
1234 {
1235     return SYSREG_OP0(reg) == 3 &&
1236            SYSREG_OP1(reg) == 0 &&
1237            SYSREG_CRN(reg) == 0 &&
1238            SYSREG_CRM(reg) >= 1 &&
1239            SYSREG_CRM(reg) < 8;
1240 }
1241 
1242 static uint32_t hvf_reg2cp_reg(uint32_t reg)
1243 {
1244     return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1245                               (reg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK,
1246                               (reg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK,
1247                               (reg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK,
1248                               (reg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK,
1249                               (reg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK);
1250 }
1251 
1252 static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
1253 {
1254     ARMCPU *arm_cpu = ARM_CPU(cpu);
1255     CPUARMState *env = &arm_cpu->env;
1256     const ARMCPRegInfo *ri;
1257 
1258     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1259     if (ri) {
1260         if (ri->accessfn) {
1261             if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) {
1262                 return false;
1263             }
1264         }
1265         if (ri->type & ARM_CP_CONST) {
1266             *val = ri->resetvalue;
1267         } else if (ri->readfn) {
1268             *val = ri->readfn(env, ri);
1269         } else {
1270             *val = CPREG_FIELD64(env, ri);
1271         }
1272         trace_hvf_vgic_read(ri->name, *val);
1273         return true;
1274     }
1275 
1276     return false;
1277 }
1278 
1279 static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
1280 {
1281     ARMCPU *arm_cpu = ARM_CPU(cpu);
1282     CPUARMState *env = &arm_cpu->env;
1283 
1284     if (arm_feature(env, ARM_FEATURE_PMU)) {
1285         switch (reg) {
1286         case SYSREG_PMCR_EL0:
1287             *val = env->cp15.c9_pmcr;
1288             return 0;
1289         case SYSREG_PMCCNTR_EL0:
1290             pmu_op_start(env);
1291             *val = env->cp15.c15_ccnt;
1292             pmu_op_finish(env);
1293             return 0;
1294         case SYSREG_PMCNTENCLR_EL0:
1295             *val = env->cp15.c9_pmcnten;
1296             return 0;
1297         case SYSREG_PMOVSCLR_EL0:
1298             *val = env->cp15.c9_pmovsr;
1299             return 0;
1300         case SYSREG_PMSELR_EL0:
1301             *val = env->cp15.c9_pmselr;
1302             return 0;
1303         case SYSREG_PMINTENCLR_EL1:
1304             *val = env->cp15.c9_pminten;
1305             return 0;
1306         case SYSREG_PMCCFILTR_EL0:
1307             *val = env->cp15.pmccfiltr_el0;
1308             return 0;
1309         case SYSREG_PMCNTENSET_EL0:
1310             *val = env->cp15.c9_pmcnten;
1311             return 0;
1312         case SYSREG_PMUSERENR_EL0:
1313             *val = env->cp15.c9_pmuserenr;
1314             return 0;
1315         case SYSREG_PMCEID0_EL0:
1316         case SYSREG_PMCEID1_EL0:
1317             /* We can't really count anything yet, declare all events invalid */
1318             *val = 0;
1319             return 0;
1320         }
1321     }
1322 
1323     switch (reg) {
1324     case SYSREG_CNTPCT_EL0:
1325         *val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
1326               gt_cntfrq_period_ns(arm_cpu);
1327         return 0;
1328     case SYSREG_OSLSR_EL1:
1329         *val = env->cp15.oslsr_el1;
1330         return 0;
1331     case SYSREG_OSDLR_EL1:
1332         /* Dummy register */
1333         return 0;
1334     case SYSREG_ICC_AP0R0_EL1:
1335     case SYSREG_ICC_AP0R1_EL1:
1336     case SYSREG_ICC_AP0R2_EL1:
1337     case SYSREG_ICC_AP0R3_EL1:
1338     case SYSREG_ICC_AP1R0_EL1:
1339     case SYSREG_ICC_AP1R1_EL1:
1340     case SYSREG_ICC_AP1R2_EL1:
1341     case SYSREG_ICC_AP1R3_EL1:
1342     case SYSREG_ICC_ASGI1R_EL1:
1343     case SYSREG_ICC_BPR0_EL1:
1344     case SYSREG_ICC_BPR1_EL1:
1345     case SYSREG_ICC_DIR_EL1:
1346     case SYSREG_ICC_EOIR0_EL1:
1347     case SYSREG_ICC_EOIR1_EL1:
1348     case SYSREG_ICC_HPPIR0_EL1:
1349     case SYSREG_ICC_HPPIR1_EL1:
1350     case SYSREG_ICC_IAR0_EL1:
1351     case SYSREG_ICC_IAR1_EL1:
1352     case SYSREG_ICC_IGRPEN0_EL1:
1353     case SYSREG_ICC_IGRPEN1_EL1:
1354     case SYSREG_ICC_PMR_EL1:
1355     case SYSREG_ICC_SGI0R_EL1:
1356     case SYSREG_ICC_SGI1R_EL1:
1357     case SYSREG_ICC_SRE_EL1:
1358     case SYSREG_ICC_CTLR_EL1:
1359         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1360         if (hvf_sysreg_read_cp(cpu, reg, val)) {
1361             return 0;
1362         }
1363         break;
1364     case SYSREG_DBGBVR0_EL1:
1365     case SYSREG_DBGBVR1_EL1:
1366     case SYSREG_DBGBVR2_EL1:
1367     case SYSREG_DBGBVR3_EL1:
1368     case SYSREG_DBGBVR4_EL1:
1369     case SYSREG_DBGBVR5_EL1:
1370     case SYSREG_DBGBVR6_EL1:
1371     case SYSREG_DBGBVR7_EL1:
1372     case SYSREG_DBGBVR8_EL1:
1373     case SYSREG_DBGBVR9_EL1:
1374     case SYSREG_DBGBVR10_EL1:
1375     case SYSREG_DBGBVR11_EL1:
1376     case SYSREG_DBGBVR12_EL1:
1377     case SYSREG_DBGBVR13_EL1:
1378     case SYSREG_DBGBVR14_EL1:
1379     case SYSREG_DBGBVR15_EL1:
1380         *val = env->cp15.dbgbvr[SYSREG_CRM(reg)];
1381         return 0;
1382     case SYSREG_DBGBCR0_EL1:
1383     case SYSREG_DBGBCR1_EL1:
1384     case SYSREG_DBGBCR2_EL1:
1385     case SYSREG_DBGBCR3_EL1:
1386     case SYSREG_DBGBCR4_EL1:
1387     case SYSREG_DBGBCR5_EL1:
1388     case SYSREG_DBGBCR6_EL1:
1389     case SYSREG_DBGBCR7_EL1:
1390     case SYSREG_DBGBCR8_EL1:
1391     case SYSREG_DBGBCR9_EL1:
1392     case SYSREG_DBGBCR10_EL1:
1393     case SYSREG_DBGBCR11_EL1:
1394     case SYSREG_DBGBCR12_EL1:
1395     case SYSREG_DBGBCR13_EL1:
1396     case SYSREG_DBGBCR14_EL1:
1397     case SYSREG_DBGBCR15_EL1:
1398         *val = env->cp15.dbgbcr[SYSREG_CRM(reg)];
1399         return 0;
1400     case SYSREG_DBGWVR0_EL1:
1401     case SYSREG_DBGWVR1_EL1:
1402     case SYSREG_DBGWVR2_EL1:
1403     case SYSREG_DBGWVR3_EL1:
1404     case SYSREG_DBGWVR4_EL1:
1405     case SYSREG_DBGWVR5_EL1:
1406     case SYSREG_DBGWVR6_EL1:
1407     case SYSREG_DBGWVR7_EL1:
1408     case SYSREG_DBGWVR8_EL1:
1409     case SYSREG_DBGWVR9_EL1:
1410     case SYSREG_DBGWVR10_EL1:
1411     case SYSREG_DBGWVR11_EL1:
1412     case SYSREG_DBGWVR12_EL1:
1413     case SYSREG_DBGWVR13_EL1:
1414     case SYSREG_DBGWVR14_EL1:
1415     case SYSREG_DBGWVR15_EL1:
1416         *val = env->cp15.dbgwvr[SYSREG_CRM(reg)];
1417         return 0;
1418     case SYSREG_DBGWCR0_EL1:
1419     case SYSREG_DBGWCR1_EL1:
1420     case SYSREG_DBGWCR2_EL1:
1421     case SYSREG_DBGWCR3_EL1:
1422     case SYSREG_DBGWCR4_EL1:
1423     case SYSREG_DBGWCR5_EL1:
1424     case SYSREG_DBGWCR6_EL1:
1425     case SYSREG_DBGWCR7_EL1:
1426     case SYSREG_DBGWCR8_EL1:
1427     case SYSREG_DBGWCR9_EL1:
1428     case SYSREG_DBGWCR10_EL1:
1429     case SYSREG_DBGWCR11_EL1:
1430     case SYSREG_DBGWCR12_EL1:
1431     case SYSREG_DBGWCR13_EL1:
1432     case SYSREG_DBGWCR14_EL1:
1433     case SYSREG_DBGWCR15_EL1:
1434         *val = env->cp15.dbgwcr[SYSREG_CRM(reg)];
1435         return 0;
1436     default:
1437         if (is_id_sysreg(reg)) {
1438             /* ID system registers read as RES0 */
1439             *val = 0;
1440             return 0;
1441         }
1442     }
1443 
1444     cpu_synchronize_state(cpu);
1445     trace_hvf_unhandled_sysreg_read(env->pc, reg,
1446                                     SYSREG_OP0(reg),
1447                                     SYSREG_OP1(reg),
1448                                     SYSREG_CRN(reg),
1449                                     SYSREG_CRM(reg),
1450                                     SYSREG_OP2(reg));
1451     hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1452     return 1;
1453 }
1454 
1455 static void pmu_update_irq(CPUARMState *env)
1456 {
1457     ARMCPU *cpu = env_archcpu(env);
1458     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1459             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1460 }
1461 
1462 static bool pmu_event_supported(uint16_t number)
1463 {
1464     return false;
1465 }
1466 
1467 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1468  * the current EL, security state, and register configuration.
1469  */
1470 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1471 {
1472     uint64_t filter;
1473     bool enabled, filtered = true;
1474     int el = arm_current_el(env);
1475 
1476     enabled = (env->cp15.c9_pmcr & PMCRE) &&
1477               (env->cp15.c9_pmcnten & (1 << counter));
1478 
1479     if (counter == 31) {
1480         filter = env->cp15.pmccfiltr_el0;
1481     } else {
1482         filter = env->cp15.c14_pmevtyper[counter];
1483     }
1484 
1485     if (el == 0) {
1486         filtered = filter & PMXEVTYPER_U;
1487     } else if (el == 1) {
1488         filtered = filter & PMXEVTYPER_P;
1489     }
1490 
1491     if (counter != 31) {
1492         /*
1493          * If not checking PMCCNTR, ensure the counter is setup to an event we
1494          * support
1495          */
1496         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1497         if (!pmu_event_supported(event)) {
1498             return false;
1499         }
1500     }
1501 
1502     return enabled && !filtered;
1503 }
1504 
1505 static void pmswinc_write(CPUARMState *env, uint64_t value)
1506 {
1507     unsigned int i;
1508     for (i = 0; i < pmu_num_counters(env); i++) {
1509         /* Increment a counter's count iff: */
1510         if ((value & (1 << i)) && /* counter's bit is set */
1511                 /* counter is enabled and not filtered */
1512                 pmu_counter_enabled(env, i) &&
1513                 /* counter is SW_INCR */
1514                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1515             /*
1516              * Detect if this write causes an overflow since we can't predict
1517              * PMSWINC overflows like we can for other events
1518              */
1519             uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1520 
1521             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1522                 env->cp15.c9_pmovsr |= (1 << i);
1523                 pmu_update_irq(env);
1524             }
1525 
1526             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1527         }
1528     }
1529 }
1530 
1531 static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
1532 {
1533     ARMCPU *arm_cpu = ARM_CPU(cpu);
1534     CPUARMState *env = &arm_cpu->env;
1535     const ARMCPRegInfo *ri;
1536 
1537     ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
1538 
1539     if (ri) {
1540         if (ri->accessfn) {
1541             if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) {
1542                 return false;
1543             }
1544         }
1545         if (ri->writefn) {
1546             ri->writefn(env, ri, val);
1547         } else {
1548             CPREG_FIELD64(env, ri) = val;
1549         }
1550 
1551         trace_hvf_vgic_write(ri->name, val);
1552         return true;
1553     }
1554 
1555     return false;
1556 }
1557 
1558 static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
1559 {
1560     ARMCPU *arm_cpu = ARM_CPU(cpu);
1561     CPUARMState *env = &arm_cpu->env;
1562 
1563     trace_hvf_sysreg_write(reg,
1564                            SYSREG_OP0(reg),
1565                            SYSREG_OP1(reg),
1566                            SYSREG_CRN(reg),
1567                            SYSREG_CRM(reg),
1568                            SYSREG_OP2(reg),
1569                            val);
1570 
1571     if (arm_feature(env, ARM_FEATURE_PMU)) {
1572         switch (reg) {
1573         case SYSREG_PMCCNTR_EL0:
1574             pmu_op_start(env);
1575             env->cp15.c15_ccnt = val;
1576             pmu_op_finish(env);
1577             return 0;
1578         case SYSREG_PMCR_EL0:
1579             pmu_op_start(env);
1580 
1581             if (val & PMCRC) {
1582                 /* The counter has been reset */
1583                 env->cp15.c15_ccnt = 0;
1584             }
1585 
1586             if (val & PMCRP) {
1587                 unsigned int i;
1588                 for (i = 0; i < pmu_num_counters(env); i++) {
1589                     env->cp15.c14_pmevcntr[i] = 0;
1590                 }
1591             }
1592 
1593             env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1594             env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK);
1595 
1596             pmu_op_finish(env);
1597             return 0;
1598         case SYSREG_PMUSERENR_EL0:
1599             env->cp15.c9_pmuserenr = val & 0xf;
1600             return 0;
1601         case SYSREG_PMCNTENSET_EL0:
1602             env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
1603             return 0;
1604         case SYSREG_PMCNTENCLR_EL0:
1605             env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
1606             return 0;
1607         case SYSREG_PMINTENCLR_EL1:
1608             pmu_op_start(env);
1609             env->cp15.c9_pminten |= val;
1610             pmu_op_finish(env);
1611             return 0;
1612         case SYSREG_PMOVSCLR_EL0:
1613             pmu_op_start(env);
1614             env->cp15.c9_pmovsr &= ~val;
1615             pmu_op_finish(env);
1616             return 0;
1617         case SYSREG_PMSWINC_EL0:
1618             pmu_op_start(env);
1619             pmswinc_write(env, val);
1620             pmu_op_finish(env);
1621             return 0;
1622         case SYSREG_PMSELR_EL0:
1623             env->cp15.c9_pmselr = val & 0x1f;
1624             return 0;
1625         case SYSREG_PMCCFILTR_EL0:
1626             pmu_op_start(env);
1627             env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
1628             pmu_op_finish(env);
1629             return 0;
1630         }
1631     }
1632 
1633     switch (reg) {
1634     case SYSREG_OSLAR_EL1:
1635         env->cp15.oslsr_el1 = val & 1;
1636         return 0;
1637     case SYSREG_CNTP_CTL_EL0:
1638         /*
1639          * Guests should not rely on the physical counter, but macOS emits
1640          * disable writes to it. Let it do so, but ignore the requests.
1641          */
1642         qemu_log_mask(LOG_UNIMP, "Unsupported write to CNTP_CTL_EL0\n");
1643         return 0;
1644     case SYSREG_OSDLR_EL1:
1645         /* Dummy register */
1646         return 0;
1647     case SYSREG_ICC_AP0R0_EL1:
1648     case SYSREG_ICC_AP0R1_EL1:
1649     case SYSREG_ICC_AP0R2_EL1:
1650     case SYSREG_ICC_AP0R3_EL1:
1651     case SYSREG_ICC_AP1R0_EL1:
1652     case SYSREG_ICC_AP1R1_EL1:
1653     case SYSREG_ICC_AP1R2_EL1:
1654     case SYSREG_ICC_AP1R3_EL1:
1655     case SYSREG_ICC_ASGI1R_EL1:
1656     case SYSREG_ICC_BPR0_EL1:
1657     case SYSREG_ICC_BPR1_EL1:
1658     case SYSREG_ICC_CTLR_EL1:
1659     case SYSREG_ICC_DIR_EL1:
1660     case SYSREG_ICC_EOIR0_EL1:
1661     case SYSREG_ICC_EOIR1_EL1:
1662     case SYSREG_ICC_HPPIR0_EL1:
1663     case SYSREG_ICC_HPPIR1_EL1:
1664     case SYSREG_ICC_IAR0_EL1:
1665     case SYSREG_ICC_IAR1_EL1:
1666     case SYSREG_ICC_IGRPEN0_EL1:
1667     case SYSREG_ICC_IGRPEN1_EL1:
1668     case SYSREG_ICC_PMR_EL1:
1669     case SYSREG_ICC_SGI0R_EL1:
1670     case SYSREG_ICC_SGI1R_EL1:
1671     case SYSREG_ICC_SRE_EL1:
1672         /* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
1673         if (hvf_sysreg_write_cp(cpu, reg, val)) {
1674             return 0;
1675         }
1676         break;
1677     case SYSREG_MDSCR_EL1:
1678         env->cp15.mdscr_el1 = val;
1679         return 0;
1680     case SYSREG_DBGBVR0_EL1:
1681     case SYSREG_DBGBVR1_EL1:
1682     case SYSREG_DBGBVR2_EL1:
1683     case SYSREG_DBGBVR3_EL1:
1684     case SYSREG_DBGBVR4_EL1:
1685     case SYSREG_DBGBVR5_EL1:
1686     case SYSREG_DBGBVR6_EL1:
1687     case SYSREG_DBGBVR7_EL1:
1688     case SYSREG_DBGBVR8_EL1:
1689     case SYSREG_DBGBVR9_EL1:
1690     case SYSREG_DBGBVR10_EL1:
1691     case SYSREG_DBGBVR11_EL1:
1692     case SYSREG_DBGBVR12_EL1:
1693     case SYSREG_DBGBVR13_EL1:
1694     case SYSREG_DBGBVR14_EL1:
1695     case SYSREG_DBGBVR15_EL1:
1696         env->cp15.dbgbvr[SYSREG_CRM(reg)] = val;
1697         return 0;
1698     case SYSREG_DBGBCR0_EL1:
1699     case SYSREG_DBGBCR1_EL1:
1700     case SYSREG_DBGBCR2_EL1:
1701     case SYSREG_DBGBCR3_EL1:
1702     case SYSREG_DBGBCR4_EL1:
1703     case SYSREG_DBGBCR5_EL1:
1704     case SYSREG_DBGBCR6_EL1:
1705     case SYSREG_DBGBCR7_EL1:
1706     case SYSREG_DBGBCR8_EL1:
1707     case SYSREG_DBGBCR9_EL1:
1708     case SYSREG_DBGBCR10_EL1:
1709     case SYSREG_DBGBCR11_EL1:
1710     case SYSREG_DBGBCR12_EL1:
1711     case SYSREG_DBGBCR13_EL1:
1712     case SYSREG_DBGBCR14_EL1:
1713     case SYSREG_DBGBCR15_EL1:
1714         env->cp15.dbgbcr[SYSREG_CRM(reg)] = val;
1715         return 0;
1716     case SYSREG_DBGWVR0_EL1:
1717     case SYSREG_DBGWVR1_EL1:
1718     case SYSREG_DBGWVR2_EL1:
1719     case SYSREG_DBGWVR3_EL1:
1720     case SYSREG_DBGWVR4_EL1:
1721     case SYSREG_DBGWVR5_EL1:
1722     case SYSREG_DBGWVR6_EL1:
1723     case SYSREG_DBGWVR7_EL1:
1724     case SYSREG_DBGWVR8_EL1:
1725     case SYSREG_DBGWVR9_EL1:
1726     case SYSREG_DBGWVR10_EL1:
1727     case SYSREG_DBGWVR11_EL1:
1728     case SYSREG_DBGWVR12_EL1:
1729     case SYSREG_DBGWVR13_EL1:
1730     case SYSREG_DBGWVR14_EL1:
1731     case SYSREG_DBGWVR15_EL1:
1732         env->cp15.dbgwvr[SYSREG_CRM(reg)] = val;
1733         return 0;
1734     case SYSREG_DBGWCR0_EL1:
1735     case SYSREG_DBGWCR1_EL1:
1736     case SYSREG_DBGWCR2_EL1:
1737     case SYSREG_DBGWCR3_EL1:
1738     case SYSREG_DBGWCR4_EL1:
1739     case SYSREG_DBGWCR5_EL1:
1740     case SYSREG_DBGWCR6_EL1:
1741     case SYSREG_DBGWCR7_EL1:
1742     case SYSREG_DBGWCR8_EL1:
1743     case SYSREG_DBGWCR9_EL1:
1744     case SYSREG_DBGWCR10_EL1:
1745     case SYSREG_DBGWCR11_EL1:
1746     case SYSREG_DBGWCR12_EL1:
1747     case SYSREG_DBGWCR13_EL1:
1748     case SYSREG_DBGWCR14_EL1:
1749     case SYSREG_DBGWCR15_EL1:
1750         env->cp15.dbgwcr[SYSREG_CRM(reg)] = val;
1751         return 0;
1752     }
1753 
1754     cpu_synchronize_state(cpu);
1755     trace_hvf_unhandled_sysreg_write(env->pc, reg,
1756                                      SYSREG_OP0(reg),
1757                                      SYSREG_OP1(reg),
1758                                      SYSREG_CRN(reg),
1759                                      SYSREG_CRM(reg),
1760                                      SYSREG_OP2(reg));
1761     hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
1762     return 1;
1763 }
1764 
1765 static int hvf_inject_interrupts(CPUState *cpu)
1766 {
1767     if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
1768         trace_hvf_inject_fiq();
1769         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ,
1770                                       true);
1771     }
1772 
1773     if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
1774         trace_hvf_inject_irq();
1775         hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ,
1776                                       true);
1777     }
1778 
1779     return 0;
1780 }
1781 
1782 static uint64_t hvf_vtimer_val_raw(void)
1783 {
1784     /*
1785      * mach_absolute_time() returns the vtimer value without the VM
1786      * offset that we define. Add our own offset on top.
1787      */
1788     return mach_absolute_time() - hvf_state->vtimer_offset;
1789 }
1790 
1791 static uint64_t hvf_vtimer_val(void)
1792 {
1793     if (!runstate_is_running()) {
1794         /* VM is paused, the vtimer value is in vtimer.vtimer_val */
1795         return vtimer.vtimer_val;
1796     }
1797 
1798     return hvf_vtimer_val_raw();
1799 }
1800 
1801 static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
1802 {
1803     /*
1804      * Use pselect to sleep so that other threads can IPI us while we're
1805      * sleeping.
1806      */
1807     qatomic_set_mb(&cpu->thread_kicked, false);
1808     bql_unlock();
1809     pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
1810     bql_lock();
1811 }
1812 
1813 static void hvf_wfi(CPUState *cpu)
1814 {
1815     ARMCPU *arm_cpu = ARM_CPU(cpu);
1816     struct timespec ts;
1817     hv_return_t r;
1818     uint64_t ctl;
1819     uint64_t cval;
1820     int64_t ticks_to_sleep;
1821     uint64_t seconds;
1822     uint64_t nanos;
1823     uint32_t cntfrq;
1824 
1825     if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
1826         /* Interrupt pending, no need to wait */
1827         return;
1828     }
1829 
1830     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1831     assert_hvf_ok(r);
1832 
1833     if (!(ctl & 1) || (ctl & 2)) {
1834         /* Timer disabled or masked, just wait for an IPI. */
1835         hvf_wait_for_ipi(cpu, NULL);
1836         return;
1837     }
1838 
1839     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
1840     assert_hvf_ok(r);
1841 
1842     ticks_to_sleep = cval - hvf_vtimer_val();
1843     if (ticks_to_sleep < 0) {
1844         return;
1845     }
1846 
1847     cntfrq = gt_cntfrq_period_ns(arm_cpu);
1848     seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
1849     ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
1850     nanos = ticks_to_sleep * cntfrq;
1851 
1852     /*
1853      * Don't sleep for less than the time a context switch would take,
1854      * so that we can satisfy fast timer requests on the same CPU.
1855      * Measurements on M1 show the sweet spot to be ~2ms.
1856      */
1857     if (!seconds && nanos < (2 * SCALE_MS)) {
1858         return;
1859     }
1860 
1861     ts = (struct timespec) { seconds, nanos };
1862     hvf_wait_for_ipi(cpu, &ts);
1863 }
1864 
1865 static void hvf_sync_vtimer(CPUState *cpu)
1866 {
1867     ARMCPU *arm_cpu = ARM_CPU(cpu);
1868     hv_return_t r;
1869     uint64_t ctl;
1870     bool irq_state;
1871 
1872     if (!cpu->accel->vtimer_masked) {
1873         /* We will get notified on vtimer changes by hvf, nothing to do */
1874         return;
1875     }
1876 
1877     r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
1878     assert_hvf_ok(r);
1879 
1880     irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
1881                 (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
1882     qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
1883 
1884     if (!irq_state) {
1885         /* Timer no longer asserting, we can unmask it */
1886         hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
1887         cpu->accel->vtimer_masked = false;
1888     }
1889 }
1890 
1891 int hvf_vcpu_exec(CPUState *cpu)
1892 {
1893     ARMCPU *arm_cpu = ARM_CPU(cpu);
1894     CPUARMState *env = &arm_cpu->env;
1895     int ret;
1896     hv_vcpu_exit_t *hvf_exit = cpu->accel->exit;
1897     hv_return_t r;
1898     bool advance_pc = false;
1899 
1900     if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) &&
1901         hvf_inject_interrupts(cpu)) {
1902         return EXCP_INTERRUPT;
1903     }
1904 
1905     if (cpu->halted) {
1906         return EXCP_HLT;
1907     }
1908 
1909     flush_cpu_state(cpu);
1910 
1911     bql_unlock();
1912     assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
1913 
1914     /* handle VMEXIT */
1915     uint64_t exit_reason = hvf_exit->reason;
1916     uint64_t syndrome = hvf_exit->exception.syndrome;
1917     uint32_t ec = syn_get_ec(syndrome);
1918 
1919     ret = 0;
1920     bql_lock();
1921     switch (exit_reason) {
1922     case HV_EXIT_REASON_EXCEPTION:
1923         /* This is the main one, handle below. */
1924         break;
1925     case HV_EXIT_REASON_VTIMER_ACTIVATED:
1926         qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
1927         cpu->accel->vtimer_masked = true;
1928         return 0;
1929     case HV_EXIT_REASON_CANCELED:
1930         /* we got kicked, no exit to process */
1931         return 0;
1932     default:
1933         g_assert_not_reached();
1934     }
1935 
1936     hvf_sync_vtimer(cpu);
1937 
1938     switch (ec) {
1939     case EC_SOFTWARESTEP: {
1940         ret = EXCP_DEBUG;
1941 
1942         if (!cpu->singlestep_enabled) {
1943             error_report("EC_SOFTWARESTEP but single-stepping not enabled");
1944         }
1945         break;
1946     }
1947     case EC_AA64_BKPT: {
1948         ret = EXCP_DEBUG;
1949 
1950         cpu_synchronize_state(cpu);
1951 
1952         if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
1953             /* Re-inject into the guest */
1954             ret = 0;
1955             hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0));
1956         }
1957         break;
1958     }
1959     case EC_BREAKPOINT: {
1960         ret = EXCP_DEBUG;
1961 
1962         cpu_synchronize_state(cpu);
1963 
1964         if (!find_hw_breakpoint(cpu, env->pc)) {
1965             error_report("EC_BREAKPOINT but unknown hw breakpoint");
1966         }
1967         break;
1968     }
1969     case EC_WATCHPOINT: {
1970         ret = EXCP_DEBUG;
1971 
1972         cpu_synchronize_state(cpu);
1973 
1974         CPUWatchpoint *wp =
1975             find_hw_watchpoint(cpu, hvf_exit->exception.virtual_address);
1976         if (!wp) {
1977             error_report("EXCP_DEBUG but unknown hw watchpoint");
1978         }
1979         cpu->watchpoint_hit = wp;
1980         break;
1981     }
1982     case EC_DATAABORT: {
1983         bool isv = syndrome & ARM_EL_ISV;
1984         bool iswrite = (syndrome >> 6) & 1;
1985         bool s1ptw = (syndrome >> 7) & 1;
1986         bool sse = (syndrome >> 21) & 1;
1987         uint32_t sas = (syndrome >> 22) & 3;
1988         uint32_t len = 1 << sas;
1989         uint32_t srt = (syndrome >> 16) & 0x1f;
1990         uint32_t cm = (syndrome >> 8) & 0x1;
1991         uint64_t val = 0;
1992 
1993         trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
1994                              hvf_exit->exception.physical_address, isv,
1995                              iswrite, s1ptw, len, srt);
1996 
1997         if (cm) {
1998             /* We don't cache MMIO regions */
1999             advance_pc = true;
2000             break;
2001         }
2002 
2003         assert(isv);
2004 
2005         if (iswrite) {
2006             val = hvf_get_reg(cpu, srt);
2007             address_space_write(&address_space_memory,
2008                                 hvf_exit->exception.physical_address,
2009                                 MEMTXATTRS_UNSPECIFIED, &val, len);
2010         } else {
2011             address_space_read(&address_space_memory,
2012                                hvf_exit->exception.physical_address,
2013                                MEMTXATTRS_UNSPECIFIED, &val, len);
2014             if (sse) {
2015                 val = sextract64(val, 0, len * 8);
2016             }
2017             hvf_set_reg(cpu, srt, val);
2018         }
2019 
2020         advance_pc = true;
2021         break;
2022     }
2023     case EC_SYSTEMREGISTERTRAP: {
2024         bool isread = (syndrome >> 0) & 1;
2025         uint32_t rt = (syndrome >> 5) & 0x1f;
2026         uint32_t reg = syndrome & SYSREG_MASK;
2027         uint64_t val;
2028         int sysreg_ret = 0;
2029 
2030         if (isread) {
2031             sysreg_ret = hvf_sysreg_read(cpu, reg, &val);
2032             if (!sysreg_ret) {
2033                 trace_hvf_sysreg_read(reg,
2034                                       SYSREG_OP0(reg),
2035                                       SYSREG_OP1(reg),
2036                                       SYSREG_CRN(reg),
2037                                       SYSREG_CRM(reg),
2038                                       SYSREG_OP2(reg),
2039                                       val);
2040                 hvf_set_reg(cpu, rt, val);
2041             }
2042         } else {
2043             val = hvf_get_reg(cpu, rt);
2044             sysreg_ret = hvf_sysreg_write(cpu, reg, val);
2045         }
2046 
2047         advance_pc = !sysreg_ret;
2048         break;
2049     }
2050     case EC_WFX_TRAP:
2051         advance_pc = true;
2052         if (!(syndrome & WFX_IS_WFE)) {
2053             hvf_wfi(cpu);
2054         }
2055         break;
2056     case EC_AA64_HVC:
2057         cpu_synchronize_state(cpu);
2058         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
2059             if (!hvf_handle_psci_call(cpu)) {
2060                 trace_hvf_unknown_hvc(env->xregs[0]);
2061                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2062                 env->xregs[0] = -1;
2063             }
2064         } else {
2065             trace_hvf_unknown_hvc(env->xregs[0]);
2066             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
2067         }
2068         break;
2069     case EC_AA64_SMC:
2070         cpu_synchronize_state(cpu);
2071         if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
2072             advance_pc = true;
2073 
2074             if (!hvf_handle_psci_call(cpu)) {
2075                 trace_hvf_unknown_smc(env->xregs[0]);
2076                 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
2077                 env->xregs[0] = -1;
2078             }
2079         } else {
2080             trace_hvf_unknown_smc(env->xregs[0]);
2081             hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
2082         }
2083         break;
2084     default:
2085         cpu_synchronize_state(cpu);
2086         trace_hvf_exit(syndrome, ec, env->pc);
2087         error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
2088     }
2089 
2090     if (advance_pc) {
2091         uint64_t pc;
2092 
2093         flush_cpu_state(cpu);
2094 
2095         r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc);
2096         assert_hvf_ok(r);
2097         pc += 4;
2098         r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc);
2099         assert_hvf_ok(r);
2100 
2101         /* Handle single-stepping over instructions which trigger a VM exit */
2102         if (cpu->singlestep_enabled) {
2103             ret = EXCP_DEBUG;
2104         }
2105     }
2106 
2107     return ret;
2108 }
2109 
2110 static const VMStateDescription vmstate_hvf_vtimer = {
2111     .name = "hvf-vtimer",
2112     .version_id = 1,
2113     .minimum_version_id = 1,
2114     .fields = (const VMStateField[]) {
2115         VMSTATE_UINT64(vtimer_val, HVFVTimer),
2116         VMSTATE_END_OF_LIST()
2117     },
2118 };
2119 
2120 static void hvf_vm_state_change(void *opaque, bool running, RunState state)
2121 {
2122     HVFVTimer *s = opaque;
2123 
2124     if (running) {
2125         /* Update vtimer offset on all CPUs */
2126         hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
2127         cpu_synchronize_all_states();
2128     } else {
2129         /* Remember vtimer value on every pause */
2130         s->vtimer_val = hvf_vtimer_val_raw();
2131     }
2132 }
2133 
2134 int hvf_arch_init(void)
2135 {
2136     hvf_state->vtimer_offset = mach_absolute_time();
2137     vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
2138     qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
2139 
2140     hvf_arm_init_debug();
2141 
2142     return 0;
2143 }
2144 
2145 static const uint32_t brk_insn = 0xd4200000;
2146 
2147 int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2148 {
2149     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
2150         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
2151         return -EINVAL;
2152     }
2153     return 0;
2154 }
2155 
2156 int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
2157 {
2158     static uint32_t brk;
2159 
2160     if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&brk, 4, 0) ||
2161         brk != brk_insn ||
2162         cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2163         return -EINVAL;
2164     }
2165     return 0;
2166 }
2167 
2168 int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2169 {
2170     switch (type) {
2171     case GDB_BREAKPOINT_HW:
2172         return insert_hw_breakpoint(addr);
2173     case GDB_WATCHPOINT_READ:
2174     case GDB_WATCHPOINT_WRITE:
2175     case GDB_WATCHPOINT_ACCESS:
2176         return insert_hw_watchpoint(addr, len, type);
2177     default:
2178         return -ENOSYS;
2179     }
2180 }
2181 
2182 int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2183 {
2184     switch (type) {
2185     case GDB_BREAKPOINT_HW:
2186         return delete_hw_breakpoint(addr);
2187     case GDB_WATCHPOINT_READ:
2188     case GDB_WATCHPOINT_WRITE:
2189     case GDB_WATCHPOINT_ACCESS:
2190         return delete_hw_watchpoint(addr, len, type);
2191     default:
2192         return -ENOSYS;
2193     }
2194 }
2195 
2196 void hvf_arch_remove_all_hw_breakpoints(void)
2197 {
2198     if (cur_hw_wps > 0) {
2199         g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
2200     }
2201     if (cur_hw_bps > 0) {
2202         g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
2203     }
2204 }
2205 
2206 /*
2207  * Update the vCPU with the gdbstub's view of debug registers. This view
2208  * consists of all hardware breakpoints and watchpoints inserted so far while
2209  * debugging the guest.
2210  */
2211 static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
2212 {
2213     hv_return_t r = HV_SUCCESS;
2214     int i;
2215 
2216     for (i = 0; i < cur_hw_bps; i++) {
2217         HWBreakpoint *bp = get_hw_bp(i);
2218         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], bp->bcr);
2219         assert_hvf_ok(r);
2220         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], bp->bvr);
2221         assert_hvf_ok(r);
2222     }
2223     for (i = cur_hw_bps; i < max_hw_bps; i++) {
2224         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], 0);
2225         assert_hvf_ok(r);
2226         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], 0);
2227         assert_hvf_ok(r);
2228     }
2229 
2230     for (i = 0; i < cur_hw_wps; i++) {
2231         HWWatchpoint *wp = get_hw_wp(i);
2232         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], wp->wcr);
2233         assert_hvf_ok(r);
2234         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], wp->wvr);
2235         assert_hvf_ok(r);
2236     }
2237     for (i = cur_hw_wps; i < max_hw_wps; i++) {
2238         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], 0);
2239         assert_hvf_ok(r);
2240         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], 0);
2241         assert_hvf_ok(r);
2242     }
2243 }
2244 
2245 /*
2246  * Update the vCPU with the guest's view of debug registers. This view is kept
2247  * in the environment at all times.
2248  */
2249 static void hvf_put_guest_debug_registers(CPUState *cpu)
2250 {
2251     ARMCPU *arm_cpu = ARM_CPU(cpu);
2252     CPUARMState *env = &arm_cpu->env;
2253     hv_return_t r = HV_SUCCESS;
2254     int i;
2255 
2256     for (i = 0; i < max_hw_bps; i++) {
2257         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i],
2258                                 env->cp15.dbgbcr[i]);
2259         assert_hvf_ok(r);
2260         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i],
2261                                 env->cp15.dbgbvr[i]);
2262         assert_hvf_ok(r);
2263     }
2264 
2265     for (i = 0; i < max_hw_wps; i++) {
2266         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i],
2267                                 env->cp15.dbgwcr[i]);
2268         assert_hvf_ok(r);
2269         r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i],
2270                                 env->cp15.dbgwvr[i]);
2271         assert_hvf_ok(r);
2272     }
2273 }
2274 
2275 static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
2276 {
2277     return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
2278 }
2279 
2280 static void hvf_arch_set_traps(void)
2281 {
2282     CPUState *cpu;
2283     bool should_enable_traps = false;
2284     hv_return_t r = HV_SUCCESS;
2285 
2286     /* Check whether guest debugging is enabled for at least one vCPU; if it
2287      * is, enable exiting the guest on all vCPUs */
2288     CPU_FOREACH(cpu) {
2289         should_enable_traps |= cpu->accel->guest_debug_enabled;
2290     }
2291     CPU_FOREACH(cpu) {
2292         /* Set whether debug exceptions exit the guest */
2293         r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
2294                                               should_enable_traps);
2295         assert_hvf_ok(r);
2296 
2297         /* Set whether accesses to debug registers exit the guest */
2298         r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
2299                                                 should_enable_traps);
2300         assert_hvf_ok(r);
2301     }
2302 }
2303 
2304 void hvf_arch_update_guest_debug(CPUState *cpu)
2305 {
2306     ARMCPU *arm_cpu = ARM_CPU(cpu);
2307     CPUARMState *env = &arm_cpu->env;
2308 
2309     /* Check whether guest debugging is enabled */
2310     cpu->accel->guest_debug_enabled = cpu->singlestep_enabled ||
2311                                     hvf_sw_breakpoints_active(cpu) ||
2312                                     hvf_arm_hw_debug_active(cpu);
2313 
2314     /* Update debug registers */
2315     if (cpu->accel->guest_debug_enabled) {
2316         hvf_put_gdbstub_debug_registers(cpu);
2317     } else {
2318         hvf_put_guest_debug_registers(cpu);
2319     }
2320 
2321     cpu_synchronize_state(cpu);
2322 
2323     /* Enable/disable single-stepping */
2324     if (cpu->singlestep_enabled) {
2325         env->cp15.mdscr_el1 =
2326             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 1);
2327         pstate_write(env, pstate_read(env) | PSTATE_SS);
2328     } else {
2329         env->cp15.mdscr_el1 =
2330             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_SS_SHIFT, 1, 0);
2331     }
2332 
2333     /* Enable/disable Breakpoint exceptions */
2334     if (hvf_arm_hw_debug_active(cpu)) {
2335         env->cp15.mdscr_el1 =
2336             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 1);
2337     } else {
2338         env->cp15.mdscr_el1 =
2339             deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
2340     }
2341 
2342     hvf_arch_set_traps();
2343 }
2344 
2345 bool hvf_arch_supports_guest_debug(void)
2346 {
2347     return true;
2348 }
2349