xref: /qemu/target/arm/internals.h (revision cc1f4b34d011e908dcaf24721f1d5808e02ab0bd)
1 /*
2  * QEMU ARM CPU -- internal functions and types
3  *
4  * Copyright (c) 2014 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  *
20  * This header defines functions, types, etc which need to be shared
21  * between different source files within target/arm/ but which are
22  * private to it and not required by the rest of QEMU.
23  */
24 
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27 
28 #include "exec/hwaddr.h"
29 #include "exec/vaddr.h"
30 #include "exec/breakpoint.h"
31 #include "accel/tcg/tb-cpu-state.h"
32 #include "hw/registerfields.h"
33 #include "tcg/tcg-gvec-desc.h"
34 #include "system/memory.h"
35 #include "syndrome.h"
36 #include "cpu-features.h"
37 
38 /* register banks for CPU modes */
39 #define BANK_USRSYS 0
40 #define BANK_SVC    1
41 #define BANK_ABT    2
42 #define BANK_UND    3
43 #define BANK_IRQ    4
44 #define BANK_FIQ    5
45 #define BANK_HYP    6
46 #define BANK_MON    7
47 
48 static inline int arm_env_mmu_index(CPUARMState *env)
49 {
50     return EX_TBFLAG_ANY(env->hflags, MMUIDX);
51 }
52 
53 static inline bool excp_is_internal(int excp)
54 {
55     /* Return true if this exception number represents a QEMU-internal
56      * exception that will not be passed to the guest.
57      */
58     return excp == EXCP_INTERRUPT
59         || excp == EXCP_HLT
60         || excp == EXCP_DEBUG
61         || excp == EXCP_HALTED
62         || excp == EXCP_EXCEPTION_EXIT
63         || excp == EXCP_KERNEL_TRAP
64         || excp == EXCP_SEMIHOST;
65 }
66 
67 /*
68  * Default frequency for the generic timer, in Hz.
69  * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before
70  * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz,
71  * which gives a 16ns tick period.
72  *
73  * We will use the back-compat value:
74  *  - for QEMU CPU types added before we standardized on 1GHz
75  *  - for versioned machine types with a version of 9.0 or earlier
76  * In any case, the machine model may override via the cntfrq property.
77  */
78 #define GTIMER_DEFAULT_HZ 1000000000
79 #define GTIMER_BACKCOMPAT_HZ 62500000
80 
81 /* Bit definitions for the v7M CONTROL register */
82 FIELD(V7M_CONTROL, NPRIV, 0, 1)
83 FIELD(V7M_CONTROL, SPSEL, 1, 1)
84 FIELD(V7M_CONTROL, FPCA, 2, 1)
85 FIELD(V7M_CONTROL, SFPA, 3, 1)
86 
87 /* Bit definitions for v7M exception return payload */
88 FIELD(V7M_EXCRET, ES, 0, 1)
89 FIELD(V7M_EXCRET, RES0, 1, 1)
90 FIELD(V7M_EXCRET, SPSEL, 2, 1)
91 FIELD(V7M_EXCRET, MODE, 3, 1)
92 FIELD(V7M_EXCRET, FTYPE, 4, 1)
93 FIELD(V7M_EXCRET, DCRS, 5, 1)
94 FIELD(V7M_EXCRET, S, 6, 1)
95 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
96 
97 /* Minimum value which is a magic number for exception return */
98 #define EXC_RETURN_MIN_MAGIC 0xff000000
99 /* Minimum number which is a magic number for function or exception return
100  * when using v8M security extension
101  */
102 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
103 
104 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
105 FIELD(DBGWCR, E, 0, 1)
106 FIELD(DBGWCR, PAC, 1, 2)
107 FIELD(DBGWCR, LSC, 3, 2)
108 FIELD(DBGWCR, BAS, 5, 8)
109 FIELD(DBGWCR, HMC, 13, 1)
110 FIELD(DBGWCR, SSC, 14, 2)
111 FIELD(DBGWCR, LBN, 16, 4)
112 FIELD(DBGWCR, WT, 20, 1)
113 FIELD(DBGWCR, MASK, 24, 5)
114 FIELD(DBGWCR, SSCE, 29, 1)
115 
116 #define VTCR_NSW (1u << 29)
117 #define VTCR_NSA (1u << 30)
118 #define VSTCR_SW VTCR_NSW
119 #define VSTCR_SA VTCR_NSA
120 
121 /* Bit definitions for CPACR (AArch32 only) */
122 FIELD(CPACR, CP10, 20, 2)
123 FIELD(CPACR, CP11, 22, 2)
124 FIELD(CPACR, TRCDIS, 28, 1)    /* matches CPACR_EL1.TTA */
125 FIELD(CPACR, D32DIS, 30, 1)    /* up to v7; RAZ in v8 */
126 FIELD(CPACR, ASEDIS, 31, 1)
127 
128 /* Bit definitions for CPACR_EL1 (AArch64 only) */
129 FIELD(CPACR_EL1, ZEN, 16, 2)
130 FIELD(CPACR_EL1, FPEN, 20, 2)
131 FIELD(CPACR_EL1, SMEN, 24, 2)
132 FIELD(CPACR_EL1, TTA, 28, 1)   /* matches CPACR.TRCDIS */
133 
134 /* Bit definitions for HCPTR (AArch32 only) */
135 FIELD(HCPTR, TCP10, 10, 1)
136 FIELD(HCPTR, TCP11, 11, 1)
137 FIELD(HCPTR, TASE, 15, 1)
138 FIELD(HCPTR, TTA, 20, 1)
139 FIELD(HCPTR, TAM, 30, 1)       /* matches CPTR_EL2.TAM */
140 FIELD(HCPTR, TCPAC, 31, 1)     /* matches CPTR_EL2.TCPAC */
141 
142 /* Bit definitions for CPTR_EL2 (AArch64 only) */
143 FIELD(CPTR_EL2, TZ, 8, 1)      /* !E2H */
144 FIELD(CPTR_EL2, TFP, 10, 1)    /* !E2H, matches HCPTR.TCP10 */
145 FIELD(CPTR_EL2, TSM, 12, 1)    /* !E2H */
146 FIELD(CPTR_EL2, ZEN, 16, 2)    /* E2H */
147 FIELD(CPTR_EL2, FPEN, 20, 2)   /* E2H */
148 FIELD(CPTR_EL2, SMEN, 24, 2)   /* E2H */
149 FIELD(CPTR_EL2, TTA, 28, 1)
150 FIELD(CPTR_EL2, TAM, 30, 1)    /* matches HCPTR.TAM */
151 FIELD(CPTR_EL2, TCPAC, 31, 1)  /* matches HCPTR.TCPAC */
152 
153 /* Bit definitions for CPTR_EL3 (AArch64 only) */
154 FIELD(CPTR_EL3, EZ, 8, 1)
155 FIELD(CPTR_EL3, TFP, 10, 1)
156 FIELD(CPTR_EL3, ESM, 12, 1)
157 FIELD(CPTR_EL3, TTA, 20, 1)
158 FIELD(CPTR_EL3, TAM, 30, 1)
159 FIELD(CPTR_EL3, TCPAC, 31, 1)
160 
161 #define MDCR_MTPME    (1U << 28)
162 #define MDCR_TDCC     (1U << 27)
163 #define MDCR_HLP      (1U << 26)  /* MDCR_EL2 */
164 #define MDCR_SCCD     (1U << 23)  /* MDCR_EL3 */
165 #define MDCR_HCCD     (1U << 23)  /* MDCR_EL2 */
166 #define MDCR_EPMAD    (1U << 21)
167 #define MDCR_EDAD     (1U << 20)
168 #define MDCR_TTRF     (1U << 19)
169 #define MDCR_STE      (1U << 18)  /* MDCR_EL3 */
170 #define MDCR_SPME     (1U << 17)  /* MDCR_EL3 */
171 #define MDCR_HPMD     (1U << 17)  /* MDCR_EL2 */
172 #define MDCR_SDD      (1U << 16)
173 #define MDCR_SPD      (3U << 14)
174 #define MDCR_TDRA     (1U << 11)
175 #define MDCR_TDOSA    (1U << 10)
176 #define MDCR_TDA      (1U << 9)
177 #define MDCR_TDE      (1U << 8)
178 #define MDCR_HPME     (1U << 7)
179 #define MDCR_TPM      (1U << 6)
180 #define MDCR_TPMCR    (1U << 5)
181 #define MDCR_HPMN     (0x1fU)
182 
183 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
184 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \
185                          MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \
186                          MDCR_STE | MDCR_SPME | MDCR_SPD)
187 
188 #define TTBCR_N      (7U << 0) /* TTBCR.EAE==0 */
189 #define TTBCR_T0SZ   (7U << 0) /* TTBCR.EAE==1 */
190 #define TTBCR_PD0    (1U << 4)
191 #define TTBCR_PD1    (1U << 5)
192 #define TTBCR_EPD0   (1U << 7)
193 #define TTBCR_IRGN0  (3U << 8)
194 #define TTBCR_ORGN0  (3U << 10)
195 #define TTBCR_SH0    (3U << 12)
196 #define TTBCR_T1SZ   (3U << 16)
197 #define TTBCR_A1     (1U << 22)
198 #define TTBCR_EPD1   (1U << 23)
199 #define TTBCR_IRGN1  (3U << 24)
200 #define TTBCR_ORGN1  (3U << 26)
201 #define TTBCR_SH1    (1U << 28)
202 #define TTBCR_EAE    (1U << 31)
203 
204 FIELD(VTCR, T0SZ, 0, 6)
205 FIELD(VTCR, SL0, 6, 2)
206 FIELD(VTCR, IRGN0, 8, 2)
207 FIELD(VTCR, ORGN0, 10, 2)
208 FIELD(VTCR, SH0, 12, 2)
209 FIELD(VTCR, TG0, 14, 2)
210 FIELD(VTCR, PS, 16, 3)
211 FIELD(VTCR, VS, 19, 1)
212 FIELD(VTCR, HA, 21, 1)
213 FIELD(VTCR, HD, 22, 1)
214 FIELD(VTCR, HWU59, 25, 1)
215 FIELD(VTCR, HWU60, 26, 1)
216 FIELD(VTCR, HWU61, 27, 1)
217 FIELD(VTCR, HWU62, 28, 1)
218 FIELD(VTCR, NSW, 29, 1)
219 FIELD(VTCR, NSA, 30, 1)
220 FIELD(VTCR, DS, 32, 1)
221 FIELD(VTCR, SL2, 33, 1)
222 
223 #define HCRX_ENAS0    (1ULL << 0)
224 #define HCRX_ENALS    (1ULL << 1)
225 #define HCRX_ENASR    (1ULL << 2)
226 #define HCRX_FNXS     (1ULL << 3)
227 #define HCRX_FGTNXS   (1ULL << 4)
228 #define HCRX_SMPME    (1ULL << 5)
229 #define HCRX_TALLINT  (1ULL << 6)
230 #define HCRX_VINMI    (1ULL << 7)
231 #define HCRX_VFNMI    (1ULL << 8)
232 #define HCRX_CMOW     (1ULL << 9)
233 #define HCRX_MCE2     (1ULL << 10)
234 #define HCRX_MSCEN    (1ULL << 11)
235 
236 #define HPFAR_NS      (1ULL << 63)
237 
238 #define HSTR_TTEE (1 << 16)
239 #define HSTR_TJDBX (1 << 17)
240 
241 /*
242  * Depending on the value of HCR_EL2.E2H, bits 0 and 1
243  * have different bit definitions, and EL1PCTEN might be
244  * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to
245  * disambiguate if necessary.
246  */
247 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1)
248 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1)
249 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1)
250 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1)
251 FIELD(CNTHCTL, EVNTEN, 2, 1)
252 FIELD(CNTHCTL, EVNTDIR, 3, 1)
253 FIELD(CNTHCTL, EVNTI, 4, 4)
254 FIELD(CNTHCTL, EL0VTEN, 8, 1)
255 FIELD(CNTHCTL, EL0PTEN, 9, 1)
256 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1)
257 FIELD(CNTHCTL, EL1PTEN, 11, 1)
258 FIELD(CNTHCTL, ECV, 12, 1)
259 FIELD(CNTHCTL, EL1TVT, 13, 1)
260 FIELD(CNTHCTL, EL1TVCT, 14, 1)
261 FIELD(CNTHCTL, EL1NVPCT, 15, 1)
262 FIELD(CNTHCTL, EL1NVVCT, 16, 1)
263 FIELD(CNTHCTL, EVNTIS, 17, 1)
264 FIELD(CNTHCTL, CNTVMASK, 18, 1)
265 FIELD(CNTHCTL, CNTPMASK, 19, 1)
266 
267 /* We use a few fake FSR values for internal purposes in M profile.
268  * M profile cores don't have A/R format FSRs, but currently our
269  * get_phys_addr() code assumes A/R profile and reports failures via
270  * an A/R format FSR value. We then translate that into the proper
271  * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
272  * Mostly the FSR values we use for this are those defined for v7PMSA,
273  * since we share some of that codepath. A few kinds of fault are
274  * only for M profile and have no A/R equivalent, though, so we have
275  * to pick a value from the reserved range (which we never otherwise
276  * generate) to use for these.
277  * These values will never be visible to the guest.
278  */
279 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
280 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
281 
282 /**
283  * raise_exception: Raise the specified exception.
284  * Raise a guest exception with the specified value, syndrome register
285  * and target exception level. This should be called from helper functions,
286  * and never returns because we will longjump back up to the CPU main loop.
287  */
288 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
289                                 uint32_t syndrome, uint32_t target_el);
290 
291 /*
292  * Similarly, but also use unwinding to restore cpu state.
293  */
294 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
295                                       uint32_t syndrome, uint32_t target_el,
296                                       uintptr_t ra);
297 
298 /*
299  * For AArch64, map a given EL to an index in the banked_spsr array.
300  * Note that this mapping and the AArch32 mapping defined in bank_number()
301  * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
302  * mandated mapping between each other.
303  */
304 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
305 {
306     static const unsigned int map[4] = {
307         [1] = BANK_SVC, /* EL1.  */
308         [2] = BANK_HYP, /* EL2.  */
309         [3] = BANK_MON, /* EL3.  */
310     };
311     assert(el >= 1 && el <= 3);
312     return map[el];
313 }
314 
315 /* Map CPU modes onto saved register banks.  */
316 static inline int bank_number(int mode)
317 {
318     switch (mode) {
319     case ARM_CPU_MODE_USR:
320     case ARM_CPU_MODE_SYS:
321         return BANK_USRSYS;
322     case ARM_CPU_MODE_SVC:
323         return BANK_SVC;
324     case ARM_CPU_MODE_ABT:
325         return BANK_ABT;
326     case ARM_CPU_MODE_UND:
327         return BANK_UND;
328     case ARM_CPU_MODE_IRQ:
329         return BANK_IRQ;
330     case ARM_CPU_MODE_FIQ:
331         return BANK_FIQ;
332     case ARM_CPU_MODE_HYP:
333         return BANK_HYP;
334     case ARM_CPU_MODE_MON:
335         return BANK_MON;
336     }
337     g_assert_not_reached();
338 }
339 
340 /**
341  * r14_bank_number: Map CPU mode onto register bank for r14
342  *
343  * Given an AArch32 CPU mode, return the index into the saved register
344  * banks to use for the R14 (LR) in that mode. This is the same as
345  * bank_number(), except for the special case of Hyp mode, where
346  * R14 is shared with USR and SYS, unlike its R13 and SPSR.
347  * This should be used as the index into env->banked_r14[], and
348  * bank_number() used for the index into env->banked_r13[] and
349  * env->banked_spsr[].
350  */
351 static inline int r14_bank_number(int mode)
352 {
353     return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
354 }
355 
356 void arm_cpu_register(const ARMCPUInfo *info);
357 void aarch64_cpu_register(const ARMCPUInfo *info);
358 
359 void register_cp_regs_for_features(ARMCPU *cpu);
360 void init_cpreg_list(ARMCPU *cpu);
361 
362 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
363 void arm_translate_init(void);
364 void arm_translate_code(CPUState *cs, TranslationBlock *tb,
365                         int *max_insns, vaddr pc, void *host_pc);
366 
367 void arm_cpu_register_gdb_commands(ARMCPU *cpu);
368 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
369                                        GPtrArray *, GPtrArray *);
370 
371 void arm_restore_state_to_opc(CPUState *cs,
372                               const TranslationBlock *tb,
373                               const uint64_t *data);
374 
375 #ifdef CONFIG_TCG
376 TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs);
377 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
378 
379 /* Our implementation of TCGCPUOps::cpu_exec_halt */
380 bool arm_cpu_exec_halt(CPUState *cs);
381 int arm_cpu_mmu_index(CPUState *cs, bool ifetch);
382 #endif /* CONFIG_TCG */
383 
384 typedef enum ARMFPRounding {
385     FPROUNDING_TIEEVEN,
386     FPROUNDING_POSINF,
387     FPROUNDING_NEGINF,
388     FPROUNDING_ZERO,
389     FPROUNDING_TIEAWAY,
390     FPROUNDING_ODD
391 } ARMFPRounding;
392 
393 extern const FloatRoundMode arm_rmode_to_sf_map[6];
394 
395 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
396 {
397     assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
398     return arm_rmode_to_sf_map[rmode];
399 }
400 
401 /* Return the effective value of SCR_EL3.RW */
402 static inline bool arm_scr_rw_eff(CPUARMState *env)
403 {
404     /*
405      * SCR_EL3.RW has an effective value of 1 if:
406      *  - we are NS and EL2 is implemented but doesn't support AArch32
407      *  - we are S and EL2 is enabled (in which case it must be AArch64)
408      */
409     ARMCPU *cpu = env_archcpu(env);
410 
411     if (env->cp15.scr_el3 & SCR_RW) {
412         return true;
413     }
414     if (env->cp15.scr_el3 & SCR_NS) {
415         return arm_feature(env, ARM_FEATURE_EL2) &&
416             !cpu_isar_feature(aa64_aa32_el2, cpu);
417     } else {
418         return env->cp15.scr_el3 & SCR_EEL2;
419     }
420 }
421 
422 /* Return true if the specified exception level is running in AArch64 state. */
423 static inline bool arm_el_is_aa64(CPUARMState *env, int el)
424 {
425     /*
426      * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
427      * and if we're not in EL0 then the state of EL0 isn't well defined.)
428      */
429     assert(el >= 1 && el <= 3);
430     bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
431 
432     /*
433      * The highest exception level is always at the maximum supported
434      * register width, and then lower levels have a register width controlled
435      * by bits in the SCR or HCR registers.
436      */
437     if (el == 3) {
438         return aa64;
439     }
440 
441     if (arm_feature(env, ARM_FEATURE_EL3)) {
442         aa64 = aa64 && arm_scr_rw_eff(env);
443     }
444 
445     if (el == 2) {
446         return aa64;
447     }
448 
449     if (arm_is_el2_enabled(env)) {
450         aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
451     }
452 
453     return aa64;
454 }
455 
456 /*
457  * Return the current Exception Level (as per ARMv8; note that this differs
458  * from the ARMv7 Privilege Level).
459  */
460 static inline int arm_current_el(CPUARMState *env)
461 {
462     if (arm_feature(env, ARM_FEATURE_M)) {
463         return arm_v7m_is_handler_mode(env) ||
464             !(env->v7m.control[env->v7m.secure] & 1);
465     }
466 
467     if (is_a64(env)) {
468         return extract32(env->pstate, 2, 2);
469     }
470 
471     switch (env->uncached_cpsr & 0x1f) {
472     case ARM_CPU_MODE_USR:
473         return 0;
474     case ARM_CPU_MODE_HYP:
475         return 2;
476     case ARM_CPU_MODE_MON:
477         return 3;
478     default:
479         if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
480             /* If EL3 is 32-bit then all secure privileged modes run in EL3 */
481             return 3;
482         }
483 
484         return 1;
485     }
486 }
487 
488 static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
489                                                   bool sctlr_b)
490 {
491 #ifdef CONFIG_USER_ONLY
492     /*
493      * In system mode, BE32 is modelled in line with the
494      * architecture (as word-invariant big-endianness), where loads
495      * and stores are done little endian but from addresses which
496      * are adjusted by XORing with the appropriate constant. So the
497      * endianness to use for the raw data access is not affected by
498      * SCTLR.B.
499      * In user mode, however, we model BE32 as byte-invariant
500      * big-endianness (because user-only code cannot tell the
501      * difference), and so we need to use a data access endianness
502      * that depends on SCTLR.B.
503      */
504     if (sctlr_b) {
505         return true;
506     }
507 #endif
508     /* In 32bit endianness is determined by looking at CPSR's E bit */
509     return env->uncached_cpsr & CPSR_E;
510 }
511 
512 static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
513 {
514     return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
515 }
516 
517 /* Return true if the processor is in big-endian mode. */
518 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
519 {
520     if (!is_a64(env)) {
521         return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
522     } else {
523         int cur_el = arm_current_el(env);
524         uint64_t sctlr = arm_sctlr(env, cur_el);
525         return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
526     }
527 }
528 
529 #ifdef CONFIG_USER_ONLY
530 static inline bool arm_cpu_bswap_data(CPUARMState *env)
531 {
532     return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env);
533 }
534 #endif
535 
536 static inline void aarch64_save_sp(CPUARMState *env, int el)
537 {
538     if (env->pstate & PSTATE_SP) {
539         env->sp_el[el] = env->xregs[31];
540     } else {
541         env->sp_el[0] = env->xregs[31];
542     }
543 }
544 
545 static inline void aarch64_restore_sp(CPUARMState *env, int el)
546 {
547     if (env->pstate & PSTATE_SP) {
548         env->xregs[31] = env->sp_el[el];
549     } else {
550         env->xregs[31] = env->sp_el[0];
551     }
552 }
553 
554 static inline void update_spsel(CPUARMState *env, uint32_t imm)
555 {
556     unsigned int cur_el = arm_current_el(env);
557     /* Update PSTATE SPSel bit; this requires us to update the
558      * working stack pointer in xregs[31].
559      */
560     if (!((imm ^ env->pstate) & PSTATE_SP)) {
561         return;
562     }
563     aarch64_save_sp(env, cur_el);
564     env->pstate = deposit32(env->pstate, 0, 1, imm);
565 
566     /* We rely on illegal updates to SPsel from EL0 to get trapped
567      * at translation time.
568      */
569     assert(cur_el >= 1 && cur_el <= 3);
570     aarch64_restore_sp(env, cur_el);
571 }
572 
573 /*
574  * arm_pamax
575  * @cpu: ARMCPU
576  *
577  * Returns the implementation defined bit-width of physical addresses.
578  * The ARMv8 reference manuals refer to this as PAMax().
579  */
580 unsigned int arm_pamax(ARMCPU *cpu);
581 
582 /*
583  * round_down_to_parange_index
584  * @bit_size: uint8_t
585  *
586  * Rounds down the bit_size supplied to the first supported ARM physical
587  * address range and returns the index for this. The index is intended to
588  * be used to set ID_AA64MMFR0_EL1's PARANGE bits.
589  */
590 uint8_t round_down_to_parange_index(uint8_t bit_size);
591 
592 /*
593  * round_down_to_parange_bit_size
594  * @bit_size: uint8_t
595  *
596  * Rounds down the bit_size supplied to the first supported ARM physical
597  * address range bit size and returns this.
598  */
599 uint8_t round_down_to_parange_bit_size(uint8_t bit_size);
600 
601 /* Return true if extended addresses are enabled.
602  * This is always the case if our translation regime is 64 bit,
603  * but depends on TTBCR.EAE for 32 bit.
604  */
605 static inline bool extended_addresses_enabled(CPUARMState *env)
606 {
607     uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
608     if (arm_feature(env, ARM_FEATURE_PMSA) &&
609         arm_feature(env, ARM_FEATURE_V8)) {
610         return true;
611     }
612     return arm_el_is_aa64(env, 1) ||
613            (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
614 }
615 
616 /* Update a QEMU watchpoint based on the information the guest has set in the
617  * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
618  */
619 void hw_watchpoint_update(ARMCPU *cpu, int n);
620 /* Update the QEMU watchpoints for every guest watchpoint. This does a
621  * complete delete-and-reinstate of the QEMU watchpoint list and so is
622  * suitable for use after migration or on reset.
623  */
624 void hw_watchpoint_update_all(ARMCPU *cpu);
625 /* Update a QEMU breakpoint based on the information the guest has set in the
626  * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
627  */
628 void hw_breakpoint_update(ARMCPU *cpu, int n);
629 /* Update the QEMU breakpoints for every guest breakpoint. This does a
630  * complete delete-and-reinstate of the QEMU breakpoint list and so is
631  * suitable for use after migration or on reset.
632  */
633 void hw_breakpoint_update_all(ARMCPU *cpu);
634 
635 /* Callback function for checking if a breakpoint should trigger. */
636 bool arm_debug_check_breakpoint(CPUState *cs);
637 
638 /* Callback function for checking if a watchpoint should trigger. */
639 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
640 
641 /* Adjust addresses (in BE32 mode) before testing against watchpoint
642  * addresses.
643  */
644 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
645 
646 /* Callback function for when a watchpoint or breakpoint triggers. */
647 void arm_debug_excp_handler(CPUState *cs);
648 
649 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
650 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
651 {
652     return false;
653 }
654 static inline void arm_handle_psci_call(ARMCPU *cpu)
655 {
656     g_assert_not_reached();
657 }
658 #else
659 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
660 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
661 /* Actually handle a PSCI call */
662 void arm_handle_psci_call(ARMCPU *cpu);
663 #endif
664 
665 /**
666  * arm_clear_exclusive: clear the exclusive monitor
667  * @env: CPU env
668  * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
669  */
670 static inline void arm_clear_exclusive(CPUARMState *env)
671 {
672     env->exclusive_addr = -1;
673 }
674 
675 /**
676  * ARMFaultType: type of an ARM MMU fault
677  * This corresponds to the v8A pseudocode's Fault enumeration,
678  * with extensions for QEMU internal conditions.
679  */
680 typedef enum ARMFaultType {
681     ARMFault_None,
682     ARMFault_AccessFlag,
683     ARMFault_Alignment,
684     ARMFault_Background,
685     ARMFault_Domain,
686     ARMFault_Permission,
687     ARMFault_Translation,
688     ARMFault_AddressSize,
689     ARMFault_SyncExternal,
690     ARMFault_SyncExternalOnWalk,
691     ARMFault_SyncParity,
692     ARMFault_SyncParityOnWalk,
693     ARMFault_AsyncParity,
694     ARMFault_AsyncExternal,
695     ARMFault_Debug,
696     ARMFault_TLBConflict,
697     ARMFault_UnsuppAtomicUpdate,
698     ARMFault_Lockdown,
699     ARMFault_Exclusive,
700     ARMFault_ICacheMaint,
701     ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
702     ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
703     ARMFault_GPCFOnWalk,
704     ARMFault_GPCFOnOutput,
705 } ARMFaultType;
706 
707 typedef enum ARMGPCF {
708     GPCF_None,
709     GPCF_AddressSize,
710     GPCF_Walk,
711     GPCF_EABT,
712     GPCF_Fail,
713 } ARMGPCF;
714 
715 /**
716  * ARMMMUFaultInfo: Information describing an ARM MMU Fault
717  * @type: Type of fault
718  * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
719  * @level: Table walk level (for translation, access flag and permission faults)
720  * @domain: Domain of the fault address (for non-LPAE CPUs only)
721  * @s2addr: Address that caused a fault at stage 2
722  * @paddr: physical address that caused a fault for gpc
723  * @paddr_space: physical address space that caused a fault for gpc
724  * @stage2: True if we faulted at stage 2
725  * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
726  * @s1ns: True if we faulted on a non-secure IPA while in secure state
727  * @ea: True if we should set the EA (external abort type) bit in syndrome
728  */
729 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
730 struct ARMMMUFaultInfo {
731     ARMFaultType type;
732     ARMGPCF gpcf;
733     hwaddr s2addr;
734     hwaddr paddr;
735     ARMSecuritySpace paddr_space;
736     int level;
737     int domain;
738     bool stage2;
739     bool s1ptw;
740     bool s1ns;
741     bool ea;
742 };
743 
744 /**
745  * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
746  * Compare pseudocode EncodeSDFSC(), though unlike that function
747  * we set up a whole FSR-format code including domain field and
748  * putting the high bit of the FSC into bit 10.
749  */
750 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
751 {
752     uint32_t fsc;
753 
754     switch (fi->type) {
755     case ARMFault_None:
756         return 0;
757     case ARMFault_AccessFlag:
758         fsc = fi->level == 1 ? 0x3 : 0x6;
759         break;
760     case ARMFault_Alignment:
761         fsc = 0x1;
762         break;
763     case ARMFault_Permission:
764         fsc = fi->level == 1 ? 0xd : 0xf;
765         break;
766     case ARMFault_Domain:
767         fsc = fi->level == 1 ? 0x9 : 0xb;
768         break;
769     case ARMFault_Translation:
770         fsc = fi->level == 1 ? 0x5 : 0x7;
771         break;
772     case ARMFault_SyncExternal:
773         fsc = 0x8 | (fi->ea << 12);
774         break;
775     case ARMFault_SyncExternalOnWalk:
776         fsc = fi->level == 1 ? 0xc : 0xe;
777         fsc |= (fi->ea << 12);
778         break;
779     case ARMFault_SyncParity:
780         fsc = 0x409;
781         break;
782     case ARMFault_SyncParityOnWalk:
783         fsc = fi->level == 1 ? 0x40c : 0x40e;
784         break;
785     case ARMFault_AsyncParity:
786         fsc = 0x408;
787         break;
788     case ARMFault_AsyncExternal:
789         fsc = 0x406 | (fi->ea << 12);
790         break;
791     case ARMFault_Debug:
792         fsc = 0x2;
793         break;
794     case ARMFault_TLBConflict:
795         fsc = 0x400;
796         break;
797     case ARMFault_Lockdown:
798         fsc = 0x404;
799         break;
800     case ARMFault_Exclusive:
801         fsc = 0x405;
802         break;
803     case ARMFault_ICacheMaint:
804         fsc = 0x4;
805         break;
806     case ARMFault_Background:
807         fsc = 0x0;
808         break;
809     case ARMFault_QEMU_NSCExec:
810         fsc = M_FAKE_FSR_NSC_EXEC;
811         break;
812     case ARMFault_QEMU_SFault:
813         fsc = M_FAKE_FSR_SFAULT;
814         break;
815     default:
816         /* Other faults can't occur in a context that requires a
817          * short-format status code.
818          */
819         g_assert_not_reached();
820     }
821 
822     fsc |= (fi->domain << 4);
823     return fsc;
824 }
825 
826 /**
827  * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
828  * Compare pseudocode EncodeLDFSC(), though unlike that function
829  * we fill in also the LPAE bit 9 of a DFSR format.
830  */
831 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
832 {
833     uint32_t fsc;
834 
835     switch (fi->type) {
836     case ARMFault_None:
837         return 0;
838     case ARMFault_AddressSize:
839         assert(fi->level >= -1 && fi->level <= 3);
840         if (fi->level < 0) {
841             fsc = 0b101001;
842         } else {
843             fsc = fi->level;
844         }
845         break;
846     case ARMFault_AccessFlag:
847         assert(fi->level >= 0 && fi->level <= 3);
848         fsc = 0b001000 | fi->level;
849         break;
850     case ARMFault_Permission:
851         assert(fi->level >= 0 && fi->level <= 3);
852         fsc = 0b001100 | fi->level;
853         break;
854     case ARMFault_Translation:
855         assert(fi->level >= -1 && fi->level <= 3);
856         if (fi->level < 0) {
857             fsc = 0b101011;
858         } else {
859             fsc = 0b000100 | fi->level;
860         }
861         break;
862     case ARMFault_SyncExternal:
863         fsc = 0x10 | (fi->ea << 12);
864         break;
865     case ARMFault_SyncExternalOnWalk:
866         assert(fi->level >= -1 && fi->level <= 3);
867         if (fi->level < 0) {
868             fsc = 0b010011;
869         } else {
870             fsc = 0b010100 | fi->level;
871         }
872         fsc |= fi->ea << 12;
873         break;
874     case ARMFault_SyncParity:
875         fsc = 0x18;
876         break;
877     case ARMFault_SyncParityOnWalk:
878         assert(fi->level >= -1 && fi->level <= 3);
879         if (fi->level < 0) {
880             fsc = 0b011011;
881         } else {
882             fsc = 0b011100 | fi->level;
883         }
884         break;
885     case ARMFault_AsyncParity:
886         fsc = 0x19;
887         break;
888     case ARMFault_AsyncExternal:
889         fsc = 0x11 | (fi->ea << 12);
890         break;
891     case ARMFault_Alignment:
892         fsc = 0x21;
893         break;
894     case ARMFault_Debug:
895         fsc = 0x22;
896         break;
897     case ARMFault_TLBConflict:
898         fsc = 0x30;
899         break;
900     case ARMFault_UnsuppAtomicUpdate:
901         fsc = 0x31;
902         break;
903     case ARMFault_Lockdown:
904         fsc = 0x34;
905         break;
906     case ARMFault_Exclusive:
907         fsc = 0x35;
908         break;
909     case ARMFault_GPCFOnWalk:
910         assert(fi->level >= -1 && fi->level <= 3);
911         if (fi->level < 0) {
912             fsc = 0b100011;
913         } else {
914             fsc = 0b100100 | fi->level;
915         }
916         break;
917     case ARMFault_GPCFOnOutput:
918         fsc = 0b101000;
919         break;
920     default:
921         /* Other faults can't occur in a context that requires a
922          * long-format status code.
923          */
924         g_assert_not_reached();
925     }
926 
927     fsc |= 1 << 9;
928     return fsc;
929 }
930 
931 static inline bool arm_extabort_type(MemTxResult result)
932 {
933     /* The EA bit in syndromes and fault status registers is an
934      * IMPDEF classification of external aborts. ARM implementations
935      * usually use this to indicate AXI bus Decode error (0) or
936      * Slave error (1); in QEMU we follow that.
937      */
938     return result != MEMTX_DECODE_ERROR;
939 }
940 
941 #ifdef CONFIG_USER_ONLY
942 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
943                             MMUAccessType access_type,
944                             bool maperr, uintptr_t ra);
945 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
946                            MMUAccessType access_type, uintptr_t ra);
947 #else
948 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
949                             MMUAccessType access_type, int mmu_idx,
950                             MemOp memop, int size, bool probe, uintptr_t ra);
951 #endif
952 
953 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
954 {
955     return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
956 }
957 
958 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
959 {
960     if (arm_feature(env, ARM_FEATURE_M)) {
961         return mmu_idx | ARM_MMU_IDX_M;
962     } else {
963         return mmu_idx | ARM_MMU_IDX_A;
964     }
965 }
966 
967 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
968 {
969     /* AArch64 is always a-profile. */
970     return mmu_idx | ARM_MMU_IDX_A;
971 }
972 
973 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
974 
975 /* Return the MMU index for a v7M CPU in the specified security state */
976 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
977 
978 /*
979  * Return true if the stage 1 translation regime is using LPAE
980  * format page tables
981  */
982 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
983 
984 /* Raise a data fault alignment exception for the specified virtual address */
985 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
986                                             MMUAccessType access_type,
987                                             int mmu_idx, uintptr_t retaddr);
988 
989 #ifndef CONFIG_USER_ONLY
990 /* arm_cpu_do_transaction_failed: handle a memory system error response
991  * (eg "no device/memory present at address") by raising an external abort
992  * exception
993  */
994 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
995                                    vaddr addr, unsigned size,
996                                    MMUAccessType access_type,
997                                    int mmu_idx, MemTxAttrs attrs,
998                                    MemTxResult response, uintptr_t retaddr);
999 #endif
1000 
1001 /* Call any registered EL change hooks */
1002 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
1003 {
1004     ARMELChangeHook *hook, *next;
1005     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
1006         hook->hook(cpu, hook->opaque);
1007     }
1008 }
1009 static inline void arm_call_el_change_hook(ARMCPU *cpu)
1010 {
1011     ARMELChangeHook *hook, *next;
1012     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
1013         hook->hook(cpu, hook->opaque);
1014     }
1015 }
1016 
1017 /*
1018  * Return true if this address translation regime has two ranges.
1019  * Note that this will not return the correct answer for AArch32
1020  * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
1021  * never called from a context where EL3 can be AArch32. (The
1022  * correct return value for ARMMMUIdx_E3 would be different for
1023  * that case, so we can't just make the function return the
1024  * correct value anyway; we would need an extra "bool e3_is_aarch32"
1025  * argument which all the current callsites would pass as 'false'.)
1026  */
1027 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
1028 {
1029     switch (mmu_idx) {
1030     case ARMMMUIdx_Stage1_E0:
1031     case ARMMMUIdx_Stage1_E1:
1032     case ARMMMUIdx_Stage1_E1_PAN:
1033     case ARMMMUIdx_E10_0:
1034     case ARMMMUIdx_E10_1:
1035     case ARMMMUIdx_E10_1_PAN:
1036     case ARMMMUIdx_E20_0:
1037     case ARMMMUIdx_E20_2:
1038     case ARMMMUIdx_E20_2_PAN:
1039         return true;
1040     default:
1041         return false;
1042     }
1043 }
1044 
1045 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
1046 {
1047     switch (mmu_idx) {
1048     case ARMMMUIdx_Stage1_E1_PAN:
1049     case ARMMMUIdx_E10_1_PAN:
1050     case ARMMMUIdx_E20_2_PAN:
1051     case ARMMMUIdx_E30_3_PAN:
1052         return true;
1053     default:
1054         return false;
1055     }
1056 }
1057 
1058 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
1059 {
1060     return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
1061 }
1062 
1063 /* Return the exception level which controls this address translation regime */
1064 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
1065 {
1066     switch (mmu_idx) {
1067     case ARMMMUIdx_E20_0:
1068     case ARMMMUIdx_E20_2:
1069     case ARMMMUIdx_E20_2_PAN:
1070     case ARMMMUIdx_Stage2:
1071     case ARMMMUIdx_Stage2_S:
1072     case ARMMMUIdx_E2:
1073         return 2;
1074     case ARMMMUIdx_E3:
1075     case ARMMMUIdx_E30_0:
1076     case ARMMMUIdx_E30_3_PAN:
1077         return 3;
1078     case ARMMMUIdx_E10_0:
1079     case ARMMMUIdx_Stage1_E0:
1080     case ARMMMUIdx_Stage1_E1:
1081     case ARMMMUIdx_Stage1_E1_PAN:
1082     case ARMMMUIdx_E10_1:
1083     case ARMMMUIdx_E10_1_PAN:
1084     case ARMMMUIdx_MPrivNegPri:
1085     case ARMMMUIdx_MUserNegPri:
1086     case ARMMMUIdx_MPriv:
1087     case ARMMMUIdx_MUser:
1088     case ARMMMUIdx_MSPrivNegPri:
1089     case ARMMMUIdx_MSUserNegPri:
1090     case ARMMMUIdx_MSPriv:
1091     case ARMMMUIdx_MSUser:
1092         return 1;
1093     default:
1094         g_assert_not_reached();
1095     }
1096 }
1097 
1098 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
1099 {
1100     switch (mmu_idx) {
1101     case ARMMMUIdx_E10_0:
1102     case ARMMMUIdx_E20_0:
1103     case ARMMMUIdx_E30_0:
1104     case ARMMMUIdx_Stage1_E0:
1105     case ARMMMUIdx_MUser:
1106     case ARMMMUIdx_MSUser:
1107     case ARMMMUIdx_MUserNegPri:
1108     case ARMMMUIdx_MSUserNegPri:
1109         return true;
1110     default:
1111         return false;
1112     }
1113 }
1114 
1115 /* Return the SCTLR value which controls this address translation regime */
1116 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
1117 {
1118     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
1119 }
1120 
1121 /*
1122  * These are the fields in VTCR_EL2 which affect both the Secure stage 2
1123  * and the Non-Secure stage 2 translation regimes (and hence which are
1124  * not present in VSTCR_EL2).
1125  */
1126 #define VTCR_SHARED_FIELD_MASK \
1127     (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
1128      R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
1129      R_VTCR_DS_MASK)
1130 
1131 /* Return the value of the TCR controlling this translation regime */
1132 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
1133 {
1134     if (mmu_idx == ARMMMUIdx_Stage2) {
1135         return env->cp15.vtcr_el2;
1136     }
1137     if (mmu_idx == ARMMMUIdx_Stage2_S) {
1138         /*
1139          * Secure stage 2 shares fields from VTCR_EL2. We merge those
1140          * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
1141          * value so the callers don't need to special case this.
1142          *
1143          * If a future architecture change defines bits in VSTCR_EL2 that
1144          * overlap with these VTCR_EL2 fields we may need to revisit this.
1145          */
1146         uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
1147         v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
1148         return v;
1149     }
1150     return env->cp15.tcr_el[regime_el(env, mmu_idx)];
1151 }
1152 
1153 /* Return true if the translation regime is using LPAE format page tables */
1154 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
1155 {
1156     int el = regime_el(env, mmu_idx);
1157     if (el == 2 || arm_el_is_aa64(env, el)) {
1158         return true;
1159     }
1160     if (arm_feature(env, ARM_FEATURE_PMSA) &&
1161         arm_feature(env, ARM_FEATURE_V8)) {
1162         return true;
1163     }
1164     if (arm_feature(env, ARM_FEATURE_LPAE)
1165         && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
1166         return true;
1167     }
1168     return false;
1169 }
1170 
1171 /**
1172  * arm_num_brps: Return number of implemented breakpoints.
1173  * Note that the ID register BRPS field is "number of bps - 1",
1174  * and we return the actual number of breakpoints.
1175  */
1176 static inline int arm_num_brps(ARMCPU *cpu)
1177 {
1178     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1179         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
1180     } else {
1181         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
1182     }
1183 }
1184 
1185 /**
1186  * arm_num_wrps: Return number of implemented watchpoints.
1187  * Note that the ID register WRPS field is "number of wps - 1",
1188  * and we return the actual number of watchpoints.
1189  */
1190 static inline int arm_num_wrps(ARMCPU *cpu)
1191 {
1192     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1193         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
1194     } else {
1195         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
1196     }
1197 }
1198 
1199 /**
1200  * arm_num_ctx_cmps: Return number of implemented context comparators.
1201  * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1202  * and we return the actual number of comparators.
1203  */
1204 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
1205 {
1206     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1207         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
1208     } else {
1209         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
1210     }
1211 }
1212 
1213 /**
1214  * v7m_using_psp: Return true if using process stack pointer
1215  * Return true if the CPU is currently using the process stack
1216  * pointer, or false if it is using the main stack pointer.
1217  */
1218 static inline bool v7m_using_psp(CPUARMState *env)
1219 {
1220     /* Handler mode always uses the main stack; for thread mode
1221      * the CONTROL.SPSEL bit determines the answer.
1222      * Note that in v7M it is not possible to be in Handler mode with
1223      * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
1224      */
1225     return !arm_v7m_is_handler_mode(env) &&
1226         env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
1227 }
1228 
1229 /**
1230  * v7m_sp_limit: Return SP limit for current CPU state
1231  * Return the SP limit value for the current CPU security state
1232  * and stack pointer.
1233  */
1234 static inline uint32_t v7m_sp_limit(CPUARMState *env)
1235 {
1236     if (v7m_using_psp(env)) {
1237         return env->v7m.psplim[env->v7m.secure];
1238     } else {
1239         return env->v7m.msplim[env->v7m.secure];
1240     }
1241 }
1242 
1243 /**
1244  * v7m_cpacr_pass:
1245  * Return true if the v7M CPACR permits access to the FPU for the specified
1246  * security state and privilege level.
1247  */
1248 static inline bool v7m_cpacr_pass(CPUARMState *env,
1249                                   bool is_secure, bool is_priv)
1250 {
1251     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
1252     case 0:
1253     case 2: /* UNPREDICTABLE: we treat like 0 */
1254         return false;
1255     case 1:
1256         return is_priv;
1257     case 3:
1258         return true;
1259     default:
1260         g_assert_not_reached();
1261     }
1262 }
1263 
1264 /**
1265  * aarch32_mode_name(): Return name of the AArch32 CPU mode
1266  * @psr: Program Status Register indicating CPU mode
1267  *
1268  * Returns, for debug logging purposes, a printable representation
1269  * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
1270  * the low bits of the specified PSR.
1271  */
1272 static inline const char *aarch32_mode_name(uint32_t psr)
1273 {
1274     static const char cpu_mode_names[16][4] = {
1275         "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
1276         "???", "???", "hyp", "und", "???", "???", "???", "sys"
1277     };
1278 
1279     return cpu_mode_names[psr & 0xf];
1280 }
1281 
1282 /**
1283  * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1284  *
1285  * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1286  * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1287  * Must be called with the BQL held.
1288  */
1289 void arm_cpu_update_virq(ARMCPU *cpu);
1290 
1291 /**
1292  * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1293  *
1294  * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1295  * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1296  * Must be called with the BQL held.
1297  */
1298 void arm_cpu_update_vfiq(ARMCPU *cpu);
1299 
1300 /**
1301  * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request
1302  *
1303  * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following
1304  * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI.
1305  * Must be called with the BQL held.
1306  */
1307 void arm_cpu_update_vinmi(ARMCPU *cpu);
1308 
1309 /**
1310  * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request
1311  *
1312  * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following
1313  * a change to the HCRX_EL2.VFNMI.
1314  * Must be called with the BQL held.
1315  */
1316 void arm_cpu_update_vfnmi(ARMCPU *cpu);
1317 
1318 /**
1319  * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
1320  *
1321  * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
1322  * following a change to the HCR_EL2.VSE bit.
1323  */
1324 void arm_cpu_update_vserr(ARMCPU *cpu);
1325 
1326 /**
1327  * arm_mmu_idx_el:
1328  * @env: The cpu environment
1329  * @el: The EL to use.
1330  *
1331  * Return the full ARMMMUIdx for the translation regime for EL.
1332  */
1333 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
1334 
1335 /**
1336  * arm_mmu_idx:
1337  * @env: The cpu environment
1338  *
1339  * Return the full ARMMMUIdx for the current translation regime.
1340  */
1341 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
1342 
1343 /**
1344  * arm_stage1_mmu_idx:
1345  * @env: The cpu environment
1346  *
1347  * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1348  */
1349 #ifdef CONFIG_USER_ONLY
1350 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
1351 {
1352     return ARMMMUIdx_Stage1_E0;
1353 }
1354 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1355 {
1356     return ARMMMUIdx_Stage1_E0;
1357 }
1358 #else
1359 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
1360 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
1361 #endif
1362 
1363 /**
1364  * arm_mmu_idx_is_stage1_of_2:
1365  * @mmu_idx: The ARMMMUIdx to test
1366  *
1367  * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1368  * first stage of a two stage regime.
1369  */
1370 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1371 {
1372     switch (mmu_idx) {
1373     case ARMMMUIdx_Stage1_E0:
1374     case ARMMMUIdx_Stage1_E1:
1375     case ARMMMUIdx_Stage1_E1_PAN:
1376         return true;
1377     default:
1378         return false;
1379     }
1380 }
1381 
1382 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1383                                                const ARMISARegisters *id)
1384 {
1385     uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1386 
1387     if ((features >> ARM_FEATURE_V4T) & 1) {
1388         valid |= CPSR_T;
1389     }
1390     if ((features >> ARM_FEATURE_V5) & 1) {
1391         valid |= CPSR_Q; /* V5TE in reality*/
1392     }
1393     if ((features >> ARM_FEATURE_V6) & 1) {
1394         valid |= CPSR_E | CPSR_GE;
1395     }
1396     if ((features >> ARM_FEATURE_THUMB2) & 1) {
1397         valid |= CPSR_IT;
1398     }
1399     if (isar_feature_aa32_jazelle(id)) {
1400         valid |= CPSR_J;
1401     }
1402     if (isar_feature_aa32_pan(id)) {
1403         valid |= CPSR_PAN;
1404     }
1405     if (isar_feature_aa32_dit(id)) {
1406         valid |= CPSR_DIT;
1407     }
1408     if (isar_feature_aa32_ssbs(id)) {
1409         valid |= CPSR_SSBS;
1410     }
1411 
1412     return valid;
1413 }
1414 
1415 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1416 {
1417     uint32_t valid;
1418 
1419     valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1420     if (isar_feature_aa64_bti(id)) {
1421         valid |= PSTATE_BTYPE;
1422     }
1423     if (isar_feature_aa64_pan(id)) {
1424         valid |= PSTATE_PAN;
1425     }
1426     if (isar_feature_aa64_uao(id)) {
1427         valid |= PSTATE_UAO;
1428     }
1429     if (isar_feature_aa64_dit(id)) {
1430         valid |= PSTATE_DIT;
1431     }
1432     if (isar_feature_aa64_ssbs(id)) {
1433         valid |= PSTATE_SSBS;
1434     }
1435     if (isar_feature_aa64_mte(id)) {
1436         valid |= PSTATE_TCO;
1437     }
1438     if (isar_feature_aa64_nmi(id)) {
1439         valid |= PSTATE_ALLINT;
1440     }
1441 
1442     return valid;
1443 }
1444 
1445 /* Granule size (i.e. page size) */
1446 typedef enum ARMGranuleSize {
1447     /* Same order as TG0 encoding */
1448     Gran4K,
1449     Gran64K,
1450     Gran16K,
1451     GranInvalid,
1452 } ARMGranuleSize;
1453 
1454 /**
1455  * arm_granule_bits: Return address size of the granule in bits
1456  *
1457  * Return the address size of the granule in bits. This corresponds
1458  * to the pseudocode TGxGranuleBits().
1459  */
1460 static inline int arm_granule_bits(ARMGranuleSize gran)
1461 {
1462     switch (gran) {
1463     case Gran64K:
1464         return 16;
1465     case Gran16K:
1466         return 14;
1467     case Gran4K:
1468         return 12;
1469     default:
1470         g_assert_not_reached();
1471     }
1472 }
1473 
1474 /*
1475  * Parameters of a given virtual address, as extracted from the
1476  * translation control register (TCR) for a given regime.
1477  */
1478 typedef struct ARMVAParameters {
1479     unsigned tsz    : 8;
1480     unsigned ps     : 3;
1481     unsigned sh     : 2;
1482     unsigned select : 1;
1483     bool tbi        : 1;
1484     bool epd        : 1;
1485     bool hpd        : 1;
1486     bool tsz_oob    : 1;  /* tsz has been clamped to legal range */
1487     bool ds         : 1;
1488     bool ha         : 1;
1489     bool hd         : 1;
1490     ARMGranuleSize gran : 2;
1491 } ARMVAParameters;
1492 
1493 /**
1494  * aa64_va_parameters: Return parameters for an AArch64 virtual address
1495  * @env: CPU
1496  * @va: virtual address to look up
1497  * @mmu_idx: determines translation regime to use
1498  * @data: true if this is a data access
1499  * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1500  *  (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1501  */
1502 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1503                                    ARMMMUIdx mmu_idx, bool data,
1504                                    bool el1_is_aa32);
1505 
1506 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1507 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1508 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1509 
1510 /* Determine if allocation tags are available.  */
1511 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1512                                                  uint64_t sctlr)
1513 {
1514     if (el < 3
1515         && arm_feature(env, ARM_FEATURE_EL3)
1516         && !(env->cp15.scr_el3 & SCR_ATA)) {
1517         return false;
1518     }
1519     if (el < 2 && arm_is_el2_enabled(env)) {
1520         uint64_t hcr = arm_hcr_el2_eff(env);
1521         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1522             return false;
1523         }
1524     }
1525     sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1526     return sctlr != 0;
1527 }
1528 
1529 #ifndef CONFIG_USER_ONLY
1530 
1531 /* Security attributes for an address, as returned by v8m_security_lookup. */
1532 typedef struct V8M_SAttributes {
1533     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1534     bool ns;
1535     bool nsc;
1536     uint8_t sregion;
1537     bool srvalid;
1538     uint8_t iregion;
1539     bool irvalid;
1540 } V8M_SAttributes;
1541 
1542 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1543                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1544                          bool secure, V8M_SAttributes *sattrs);
1545 
1546 /* Cacheability and shareability attributes for a memory access */
1547 typedef struct ARMCacheAttrs {
1548     /*
1549      * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1550      * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1551      */
1552     unsigned int attrs:8;
1553     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1554     bool is_s2_format:1;
1555 } ARMCacheAttrs;
1556 
1557 /* Fields that are valid upon success. */
1558 typedef struct GetPhysAddrResult {
1559     CPUTLBEntryFull f;
1560     ARMCacheAttrs cacheattrs;
1561 } GetPhysAddrResult;
1562 
1563 /**
1564  * get_phys_addr: get the physical address for a virtual address
1565  * @env: CPUARMState
1566  * @address: virtual address to get physical address for
1567  * @access_type: 0 for read, 1 for write, 2 for execute
1568  * @memop: memory operation feeding this access, or 0 for none
1569  * @mmu_idx: MMU index indicating required translation regime
1570  * @result: set on translation success.
1571  * @fi: set to fault info if the translation fails
1572  *
1573  * Find the physical address corresponding to the given virtual address,
1574  * by doing a translation table walk on MMU based systems or using the
1575  * MPU state on MPU based systems.
1576  *
1577  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1578  * prot and page_size may not be filled in, and the populated fsr value provides
1579  * information on why the translation aborted, in the format of a
1580  * DFSR/IFSR fault register, with the following caveats:
1581  *  * we honour the short vs long DFSR format differences.
1582  *  * the WnR bit is never set (the caller must do this).
1583  *  * for PSMAv5 based systems we don't bother to return a full FSR format
1584  *    value.
1585  */
1586 bool get_phys_addr(CPUARMState *env, vaddr address,
1587                    MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
1588                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1589     __attribute__((nonnull));
1590 
1591 /**
1592  * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1593  *                                 address
1594  * @env: CPUARMState
1595  * @address: virtual address to get physical address for
1596  * @access_type: 0 for read, 1 for write, 2 for execute
1597  * @memop: memory operation feeding this access, or 0 for none
1598  * @mmu_idx: MMU index indicating required translation regime
1599  * @space: security space for the access
1600  * @result: set on translation success.
1601  * @fi: set to fault info if the translation fails
1602  *
1603  * Similar to get_phys_addr, but use the given security space and don't perform
1604  * a Granule Protection Check on the resulting address.
1605  */
1606 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
1607                                     MMUAccessType access_type, MemOp memop,
1608                                     ARMMMUIdx mmu_idx, ARMSecuritySpace space,
1609                                     GetPhysAddrResult *result,
1610                                     ARMMMUFaultInfo *fi)
1611     __attribute__((nonnull));
1612 
1613 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1614                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1615                        bool is_secure, GetPhysAddrResult *result,
1616                        ARMMMUFaultInfo *fi, uint32_t *mregion);
1617 
1618 void arm_log_exception(CPUState *cs);
1619 
1620 #endif /* !CONFIG_USER_ONLY */
1621 
1622 /*
1623  * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1624  * the same simd_desc() encoding due to restrictions on size.
1625  * Use these instead.
1626  */
1627 FIELD(PREDDESC, OPRSZ, 0, 6)
1628 FIELD(PREDDESC, ESZ, 6, 2)
1629 FIELD(PREDDESC, DATA, 8, 24)
1630 
1631 /*
1632  * The SVE simd_data field, for memory ops, contains either
1633  * rd (5 bits) or a shift count (2 bits).
1634  */
1635 #define SVE_MTEDESC_SHIFT 5
1636 
1637 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1638 FIELD(MTEDESC, MIDX,  0, 4)
1639 FIELD(MTEDESC, TBI,   4, 2)
1640 FIELD(MTEDESC, TCMA,  6, 2)
1641 FIELD(MTEDESC, WRITE, 8, 1)
1642 FIELD(MTEDESC, ALIGN, 9, 3)
1643 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12)  /* size - 1 */
1644 
1645 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1646 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1647 
1648 /**
1649  * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1650  * @env: CPU env
1651  * @ptr: start address of memory region (dirty pointer)
1652  * @size: length of region (guaranteed not to cross a page boundary)
1653  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1654  * Returns: the size of the region that can be copied without hitting
1655  *          an MTE tag failure
1656  *
1657  * Note that we assume that the caller has already checked the TBI
1658  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1659  * required.
1660  */
1661 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
1662                         uint32_t desc);
1663 
1664 /**
1665  * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
1666  *                     operation going in the reverse direction
1667  * @env: CPU env
1668  * @ptr: *end* address of memory region (dirty pointer)
1669  * @size: length of region (guaranteed not to cross a page boundary)
1670  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1671  * Returns: the size of the region that can be copied without hitting
1672  *          an MTE tag failure
1673  *
1674  * Note that we assume that the caller has already checked the TBI
1675  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1676  * required.
1677  */
1678 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
1679                             uint32_t desc);
1680 
1681 /**
1682  * mte_check_fail: Record an MTE tag check failure
1683  * @env: CPU env
1684  * @desc: MTEDESC descriptor word
1685  * @dirty_ptr: Failing dirty address
1686  * @ra: TCG retaddr
1687  *
1688  * This may never return (if the MTE tag checks are configured to fault).
1689  */
1690 void mte_check_fail(CPUARMState *env, uint32_t desc,
1691                     uint64_t dirty_ptr, uintptr_t ra);
1692 
1693 /**
1694  * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
1695  * @env: CPU env
1696  * @dirty_ptr: Start address of memory region (dirty pointer)
1697  * @size: length of region (guaranteed not to cross page boundary)
1698  * @desc: MTEDESC descriptor word
1699  */
1700 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
1701                        uint32_t desc);
1702 
1703 static inline int allocation_tag_from_addr(uint64_t ptr)
1704 {
1705     return extract64(ptr, 56, 4);
1706 }
1707 
1708 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1709 {
1710     return deposit64(ptr, 56, 4, rtag);
1711 }
1712 
1713 /* Return true if tbi bits mean that the access is checked.  */
1714 static inline bool tbi_check(uint32_t desc, int bit55)
1715 {
1716     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1717 }
1718 
1719 /* Return true if tcma bits mean that the access is unchecked.  */
1720 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1721 {
1722     /*
1723      * We had extracted bit55 and ptr_tag for other reasons, so fold
1724      * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1725      */
1726     bool match = ((ptr_tag + bit55) & 0xf) == 0;
1727     bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1728     return tcma && match;
1729 }
1730 
1731 /*
1732  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
1733  * for the tag to be present in the FAR_ELx register.  But for user-only
1734  * mode, we do not have a TLB with which to implement this, so we must
1735  * remove the top byte.
1736  */
1737 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1738 {
1739 #ifdef CONFIG_USER_ONLY
1740     /* TBI0 is known to be enabled, while TBI1 is disabled. */
1741     ptr &= sextract64(ptr, 0, 56);
1742 #endif
1743     return ptr;
1744 }
1745 
1746 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1747 {
1748 #ifdef CONFIG_USER_ONLY
1749     int64_t clean_ptr = sextract64(ptr, 0, 56);
1750     if (tbi_check(desc, clean_ptr < 0)) {
1751         ptr = clean_ptr;
1752     }
1753 #endif
1754     return ptr;
1755 }
1756 
1757 /* Values for M-profile PSR.ECI for MVE insns */
1758 enum MVEECIState {
1759     ECI_NONE = 0, /* No completed beats */
1760     ECI_A0 = 1, /* Completed: A0 */
1761     ECI_A0A1 = 2, /* Completed: A0, A1 */
1762     /* 3 is reserved */
1763     ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1764     ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1765     /* All other values reserved */
1766 };
1767 
1768 /* Definitions for the PMU registers */
1769 #define PMCRN_MASK  0xf800
1770 #define PMCRN_SHIFT 11
1771 #define PMCRLP  0x80
1772 #define PMCRLC  0x40
1773 #define PMCRDP  0x20
1774 #define PMCRX   0x10
1775 #define PMCRD   0x8
1776 #define PMCRC   0x4
1777 #define PMCRP   0x2
1778 #define PMCRE   0x1
1779 /*
1780  * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1781  * which can be written as 1 to trigger behaviour but which stay RAZ).
1782  */
1783 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1784 
1785 #define PMXEVTYPER_P          0x80000000
1786 #define PMXEVTYPER_U          0x40000000
1787 #define PMXEVTYPER_NSK        0x20000000
1788 #define PMXEVTYPER_NSU        0x10000000
1789 #define PMXEVTYPER_NSH        0x08000000
1790 #define PMXEVTYPER_M          0x04000000
1791 #define PMXEVTYPER_MT         0x02000000
1792 #define PMXEVTYPER_EVTCOUNT   0x0000ffff
1793 #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1794                                PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1795                                PMXEVTYPER_M | PMXEVTYPER_MT | \
1796                                PMXEVTYPER_EVTCOUNT)
1797 
1798 #define PMCCFILTR             0xf8000000
1799 #define PMCCFILTR_M           PMXEVTYPER_M
1800 #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1801 
1802 static inline uint32_t pmu_num_counters(CPUARMState *env)
1803 {
1804     ARMCPU *cpu = env_archcpu(env);
1805 
1806     return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1807 }
1808 
1809 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1810 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1811 {
1812   return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1813 }
1814 
1815 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
1816 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
1817 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
1818 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg);
1819 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg);
1820 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg);
1821 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg);
1822 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg);
1823 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg);
1824 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1825 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1826 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1827 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1828 void aarch64_max_tcg_initfn(Object *obj);
1829 void aarch64_add_pauth_properties(Object *obj);
1830 void aarch64_add_sve_properties(Object *obj);
1831 void aarch64_add_sme_properties(Object *obj);
1832 
1833 /* Return true if the gdbstub is presenting an AArch64 CPU */
1834 static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu)
1835 {
1836     return object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU);
1837 }
1838 
1839 /* Read the CONTROL register as the MRS instruction would. */
1840 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1841 
1842 /*
1843  * Return a pointer to the location where we currently store the
1844  * stack pointer for the requested security state and thread mode.
1845  * This pointer will become invalid if the CPU state is updated
1846  * such that the stack pointers are switched around (eg changing
1847  * the SPSEL control bit).
1848  */
1849 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1850                              bool threadmode, bool spsel);
1851 
1852 bool el_is_in_host(CPUARMState *env, int el);
1853 
1854 void aa32_max_features(ARMCPU *cpu);
1855 int exception_target_el(CPUARMState *env);
1856 bool arm_singlestep_active(CPUARMState *env);
1857 bool arm_generate_debug_exceptions(CPUARMState *env);
1858 
1859 /**
1860  * pauth_ptr_mask:
1861  * @param: parameters defining the MMU setup
1862  *
1863  * Return a mask of the address bits that contain the authentication code,
1864  * given the MMU config defined by @param.
1865  */
1866 static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1867 {
1868     int bot_pac_bit = 64 - param.tsz;
1869     int top_pac_bit = 64 - 8 * param.tbi;
1870 
1871     return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1872 }
1873 
1874 /* Add the cpreg definitions for debug related system registers */
1875 void define_debug_regs(ARMCPU *cpu);
1876 
1877 /* Add the cpreg definitions for TLBI instructions */
1878 void define_tlb_insn_regs(ARMCPU *cpu);
1879 
1880 /* Effective value of MDCR_EL2 */
1881 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1882 {
1883     return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1884 }
1885 
1886 /* Powers of 2 for sve_vq_map et al. */
1887 #define SVE_VQ_POW2_MAP                                 \
1888     ((1 << (1 - 1)) | (1 << (2 - 1)) |                  \
1889      (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1890 
1891 /*
1892  * Return true if it is possible to take a fine-grained-trap to EL2.
1893  */
1894 static inline bool arm_fgt_active(CPUARMState *env, int el)
1895 {
1896     /*
1897      * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1898      * that can affect EL0, but it is harmless to do the test also for
1899      * traps on registers that are only accessible at EL1 because if the test
1900      * returns true then we can't be executing at EL1 anyway.
1901      * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1902      * traps from AArch32 only happen for the EL0 is AArch32 case.
1903      */
1904     return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1905         el < 2 && arm_is_el2_enabled(env) &&
1906         arm_el_is_aa64(env, 1) &&
1907         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1908         (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1909 }
1910 
1911 /*
1912  * Although the ARM implementation of hardware assisted debugging
1913  * allows for different breakpoints per-core, the current GDB
1914  * interface treats them as a global pool of registers (which seems to
1915  * be the case for x86, ppc and s390). As a result we store one copy
1916  * of registers which is used for all active cores.
1917  *
1918  * Write access is serialised by virtue of the GDB protocol which
1919  * updates things. Read access (i.e. when the values are copied to the
1920  * vCPU) is also gated by GDB's run control.
1921  *
1922  * This is not unreasonable as most of the time debugging kernels you
1923  * never know which core will eventually execute your function.
1924  */
1925 
1926 typedef struct {
1927     uint64_t bcr;
1928     uint64_t bvr;
1929 } HWBreakpoint;
1930 
1931 /*
1932  * The watchpoint registers can cover more area than the requested
1933  * watchpoint so we need to store the additional information
1934  * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1935  * when the watchpoint is hit.
1936  */
1937 typedef struct {
1938     uint64_t wcr;
1939     uint64_t wvr;
1940     CPUWatchpoint details;
1941 } HWWatchpoint;
1942 
1943 /* Maximum and current break/watch point counts */
1944 extern int max_hw_bps, max_hw_wps;
1945 extern GArray *hw_breakpoints, *hw_watchpoints;
1946 
1947 #define cur_hw_wps      (hw_watchpoints->len)
1948 #define cur_hw_bps      (hw_breakpoints->len)
1949 #define get_hw_bp(i)    (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1950 #define get_hw_wp(i)    (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1951 
1952 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1953 int insert_hw_breakpoint(target_ulong pc);
1954 int delete_hw_breakpoint(target_ulong pc);
1955 
1956 bool check_watchpoint_in_range(int i, vaddr addr);
1957 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr);
1958 int insert_hw_watchpoint(vaddr addr, vaddr len, int type);
1959 int delete_hw_watchpoint(vaddr addr, vaddr len, int type);
1960 
1961 /* Return the current value of the system counter in ticks */
1962 uint64_t gt_get_countervalue(CPUARMState *env);
1963 /*
1964  * Return the currently applicable offset between the system counter
1965  * and the counter for the specified timer, as used for direct register
1966  * accesses.
1967  */
1968 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx);
1969 
1970 /*
1971  * Return mask of ARMMMUIdxBit values corresponding to an "invalidate
1972  * all EL1" scope; this covers stage 1 and stage 2.
1973  */
1974 int alle1_tlbmask(CPUARMState *env);
1975 
1976 /* Set the float_status behaviour to match the Arm defaults */
1977 void arm_set_default_fp_behaviours(float_status *s);
1978 /* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */
1979 void arm_set_ah_fp_behaviours(float_status *s);
1980 /* Read the float_status info and return the appropriate FPSR value */
1981 uint32_t vfp_get_fpsr_from_host(CPUARMState *env);
1982 /* Clear the exception status flags from all float_status fields */
1983 void vfp_clear_float_status_exc_flags(CPUARMState *env);
1984 /*
1985  * Update float_status fields to handle the bits of the FPCR
1986  * specified by mask changing to the values in val.
1987  */
1988 void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask);
1989 
1990 #endif
1991