xref: /qemu/target/arm/internals.h (revision 5cb8b0988bdf1e1b22f66925604fe9a44a568993)
1 /*
2  * QEMU ARM CPU -- internal functions and types
3  *
4  * Copyright (c) 2014 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  *
20  * This header defines functions, types, etc which need to be shared
21  * between different source files within target/arm/ but which are
22  * private to it and not required by the rest of QEMU.
23  */
24 
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27 
28 #include "exec/hwaddr.h"
29 #include "exec/vaddr.h"
30 #include "exec/breakpoint.h"
31 #include "accel/tcg/tb-cpu-state.h"
32 #include "hw/registerfields.h"
33 #include "tcg/tcg-gvec-desc.h"
34 #include "system/memory.h"
35 #include "syndrome.h"
36 #include "cpu-features.h"
37 
38 /* register banks for CPU modes */
39 #define BANK_USRSYS 0
40 #define BANK_SVC    1
41 #define BANK_ABT    2
42 #define BANK_UND    3
43 #define BANK_IRQ    4
44 #define BANK_FIQ    5
45 #define BANK_HYP    6
46 #define BANK_MON    7
47 
arm_env_mmu_index(CPUARMState * env)48 static inline int arm_env_mmu_index(CPUARMState *env)
49 {
50     return EX_TBFLAG_ANY(env->hflags, MMUIDX);
51 }
52 
excp_is_internal(int excp)53 static inline bool excp_is_internal(int excp)
54 {
55     /* Return true if this exception number represents a QEMU-internal
56      * exception that will not be passed to the guest.
57      */
58     return excp == EXCP_INTERRUPT
59         || excp == EXCP_HLT
60         || excp == EXCP_DEBUG
61         || excp == EXCP_HALTED
62         || excp == EXCP_EXCEPTION_EXIT
63         || excp == EXCP_KERNEL_TRAP
64         || excp == EXCP_SEMIHOST;
65 }
66 
67 /*
68  * Default frequency for the generic timer, in Hz.
69  * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before
70  * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz,
71  * which gives a 16ns tick period.
72  *
73  * We will use the back-compat value:
74  *  - for QEMU CPU types added before we standardized on 1GHz
75  *  - for versioned machine types with a version of 9.0 or earlier
76  * In any case, the machine model may override via the cntfrq property.
77  */
78 #define GTIMER_DEFAULT_HZ 1000000000
79 #define GTIMER_BACKCOMPAT_HZ 62500000
80 
81 /* Bit definitions for the v7M CONTROL register */
82 FIELD(V7M_CONTROL, NPRIV, 0, 1)
83 FIELD(V7M_CONTROL, SPSEL, 1, 1)
84 FIELD(V7M_CONTROL, FPCA, 2, 1)
85 FIELD(V7M_CONTROL, SFPA, 3, 1)
86 
87 /* Bit definitions for v7M exception return payload */
88 FIELD(V7M_EXCRET, ES, 0, 1)
89 FIELD(V7M_EXCRET, RES0, 1, 1)
90 FIELD(V7M_EXCRET, SPSEL, 2, 1)
91 FIELD(V7M_EXCRET, MODE, 3, 1)
92 FIELD(V7M_EXCRET, FTYPE, 4, 1)
93 FIELD(V7M_EXCRET, DCRS, 5, 1)
94 FIELD(V7M_EXCRET, S, 6, 1)
95 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
96 
97 /* Minimum value which is a magic number for exception return */
98 #define EXC_RETURN_MIN_MAGIC 0xff000000
99 /* Minimum number which is a magic number for function or exception return
100  * when using v8M security extension
101  */
102 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
103 
104 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
105 FIELD(DBGWCR, E, 0, 1)
106 FIELD(DBGWCR, PAC, 1, 2)
107 FIELD(DBGWCR, LSC, 3, 2)
108 FIELD(DBGWCR, BAS, 5, 8)
109 FIELD(DBGWCR, HMC, 13, 1)
110 FIELD(DBGWCR, SSC, 14, 2)
111 FIELD(DBGWCR, LBN, 16, 4)
112 FIELD(DBGWCR, WT, 20, 1)
113 FIELD(DBGWCR, MASK, 24, 5)
114 FIELD(DBGWCR, SSCE, 29, 1)
115 
116 #define VTCR_NSW (1u << 29)
117 #define VTCR_NSA (1u << 30)
118 #define VSTCR_SW VTCR_NSW
119 #define VSTCR_SA VTCR_NSA
120 
121 /* Bit definitions for CPACR (AArch32 only) */
122 FIELD(CPACR, CP10, 20, 2)
123 FIELD(CPACR, CP11, 22, 2)
124 FIELD(CPACR, TRCDIS, 28, 1)    /* matches CPACR_EL1.TTA */
125 FIELD(CPACR, D32DIS, 30, 1)    /* up to v7; RAZ in v8 */
126 FIELD(CPACR, ASEDIS, 31, 1)
127 
128 /* Bit definitions for CPACR_EL1 (AArch64 only) */
129 FIELD(CPACR_EL1, ZEN, 16, 2)
130 FIELD(CPACR_EL1, FPEN, 20, 2)
131 FIELD(CPACR_EL1, SMEN, 24, 2)
132 FIELD(CPACR_EL1, TTA, 28, 1)   /* matches CPACR.TRCDIS */
133 
134 /* Bit definitions for HCPTR (AArch32 only) */
135 FIELD(HCPTR, TCP10, 10, 1)
136 FIELD(HCPTR, TCP11, 11, 1)
137 FIELD(HCPTR, TASE, 15, 1)
138 FIELD(HCPTR, TTA, 20, 1)
139 FIELD(HCPTR, TAM, 30, 1)       /* matches CPTR_EL2.TAM */
140 FIELD(HCPTR, TCPAC, 31, 1)     /* matches CPTR_EL2.TCPAC */
141 
142 /* Bit definitions for CPTR_EL2 (AArch64 only) */
143 FIELD(CPTR_EL2, TZ, 8, 1)      /* !E2H */
144 FIELD(CPTR_EL2, TFP, 10, 1)    /* !E2H, matches HCPTR.TCP10 */
145 FIELD(CPTR_EL2, TSM, 12, 1)    /* !E2H */
146 FIELD(CPTR_EL2, ZEN, 16, 2)    /* E2H */
147 FIELD(CPTR_EL2, FPEN, 20, 2)   /* E2H */
148 FIELD(CPTR_EL2, SMEN, 24, 2)   /* E2H */
149 FIELD(CPTR_EL2, TTA, 28, 1)
150 FIELD(CPTR_EL2, TAM, 30, 1)    /* matches HCPTR.TAM */
151 FIELD(CPTR_EL2, TCPAC, 31, 1)  /* matches HCPTR.TCPAC */
152 
153 /* Bit definitions for CPTR_EL3 (AArch64 only) */
154 FIELD(CPTR_EL3, EZ, 8, 1)
155 FIELD(CPTR_EL3, TFP, 10, 1)
156 FIELD(CPTR_EL3, ESM, 12, 1)
157 FIELD(CPTR_EL3, TTA, 20, 1)
158 FIELD(CPTR_EL3, TAM, 30, 1)
159 FIELD(CPTR_EL3, TCPAC, 31, 1)
160 
161 #define MDCR_MTPME    (1U << 28)
162 #define MDCR_TDCC     (1U << 27)
163 #define MDCR_HLP      (1U << 26)  /* MDCR_EL2 */
164 #define MDCR_SCCD     (1U << 23)  /* MDCR_EL3 */
165 #define MDCR_HCCD     (1U << 23)  /* MDCR_EL2 */
166 #define MDCR_EPMAD    (1U << 21)
167 #define MDCR_EDAD     (1U << 20)
168 #define MDCR_TTRF     (1U << 19)
169 #define MDCR_STE      (1U << 18)  /* MDCR_EL3 */
170 #define MDCR_SPME     (1U << 17)  /* MDCR_EL3 */
171 #define MDCR_HPMD     (1U << 17)  /* MDCR_EL2 */
172 #define MDCR_SDD      (1U << 16)
173 #define MDCR_SPD      (3U << 14)
174 #define MDCR_TDRA     (1U << 11)
175 #define MDCR_TDOSA    (1U << 10)
176 #define MDCR_TDA      (1U << 9)
177 #define MDCR_TDE      (1U << 8)
178 #define MDCR_HPME     (1U << 7)
179 #define MDCR_TPM      (1U << 6)
180 #define MDCR_TPMCR    (1U << 5)
181 #define MDCR_HPMN     (0x1fU)
182 
183 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
184 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \
185                          MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \
186                          MDCR_STE | MDCR_SPME | MDCR_SPD)
187 
188 #define TTBCR_N      (7U << 0) /* TTBCR.EAE==0 */
189 #define TTBCR_T0SZ   (7U << 0) /* TTBCR.EAE==1 */
190 #define TTBCR_PD0    (1U << 4)
191 #define TTBCR_PD1    (1U << 5)
192 #define TTBCR_EPD0   (1U << 7)
193 #define TTBCR_IRGN0  (3U << 8)
194 #define TTBCR_ORGN0  (3U << 10)
195 #define TTBCR_SH0    (3U << 12)
196 #define TTBCR_T1SZ   (3U << 16)
197 #define TTBCR_A1     (1U << 22)
198 #define TTBCR_EPD1   (1U << 23)
199 #define TTBCR_IRGN1  (3U << 24)
200 #define TTBCR_ORGN1  (3U << 26)
201 #define TTBCR_SH1    (1U << 28)
202 #define TTBCR_EAE    (1U << 31)
203 
204 FIELD(VTCR, T0SZ, 0, 6)
205 FIELD(VTCR, SL0, 6, 2)
206 FIELD(VTCR, IRGN0, 8, 2)
207 FIELD(VTCR, ORGN0, 10, 2)
208 FIELD(VTCR, SH0, 12, 2)
209 FIELD(VTCR, TG0, 14, 2)
210 FIELD(VTCR, PS, 16, 3)
211 FIELD(VTCR, VS, 19, 1)
212 FIELD(VTCR, HA, 21, 1)
213 FIELD(VTCR, HD, 22, 1)
214 FIELD(VTCR, HWU59, 25, 1)
215 FIELD(VTCR, HWU60, 26, 1)
216 FIELD(VTCR, HWU61, 27, 1)
217 FIELD(VTCR, HWU62, 28, 1)
218 FIELD(VTCR, NSW, 29, 1)
219 FIELD(VTCR, NSA, 30, 1)
220 FIELD(VTCR, DS, 32, 1)
221 FIELD(VTCR, SL2, 33, 1)
222 
223 #define HCRX_ENAS0    (1ULL << 0)
224 #define HCRX_ENALS    (1ULL << 1)
225 #define HCRX_ENASR    (1ULL << 2)
226 #define HCRX_FNXS     (1ULL << 3)
227 #define HCRX_FGTNXS   (1ULL << 4)
228 #define HCRX_SMPME    (1ULL << 5)
229 #define HCRX_TALLINT  (1ULL << 6)
230 #define HCRX_VINMI    (1ULL << 7)
231 #define HCRX_VFNMI    (1ULL << 8)
232 #define HCRX_CMOW     (1ULL << 9)
233 #define HCRX_MCE2     (1ULL << 10)
234 #define HCRX_MSCEN    (1ULL << 11)
235 
236 #define HPFAR_NS      (1ULL << 63)
237 
238 #define HSTR_TTEE (1 << 16)
239 #define HSTR_TJDBX (1 << 17)
240 
241 /*
242  * Depending on the value of HCR_EL2.E2H, bits 0 and 1
243  * have different bit definitions, and EL1PCTEN might be
244  * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to
245  * disambiguate if necessary.
246  */
247 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1)
248 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1)
249 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1)
250 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1)
251 FIELD(CNTHCTL, EVNTEN, 2, 1)
252 FIELD(CNTHCTL, EVNTDIR, 3, 1)
253 FIELD(CNTHCTL, EVNTI, 4, 4)
254 FIELD(CNTHCTL, EL0VTEN, 8, 1)
255 FIELD(CNTHCTL, EL0PTEN, 9, 1)
256 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1)
257 FIELD(CNTHCTL, EL1PTEN, 11, 1)
258 FIELD(CNTHCTL, ECV, 12, 1)
259 FIELD(CNTHCTL, EL1TVT, 13, 1)
260 FIELD(CNTHCTL, EL1TVCT, 14, 1)
261 FIELD(CNTHCTL, EL1NVPCT, 15, 1)
262 FIELD(CNTHCTL, EL1NVVCT, 16, 1)
263 FIELD(CNTHCTL, EVNTIS, 17, 1)
264 FIELD(CNTHCTL, CNTVMASK, 18, 1)
265 FIELD(CNTHCTL, CNTPMASK, 19, 1)
266 
267 /* We use a few fake FSR values for internal purposes in M profile.
268  * M profile cores don't have A/R format FSRs, but currently our
269  * get_phys_addr() code assumes A/R profile and reports failures via
270  * an A/R format FSR value. We then translate that into the proper
271  * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
272  * Mostly the FSR values we use for this are those defined for v7PMSA,
273  * since we share some of that codepath. A few kinds of fault are
274  * only for M profile and have no A/R equivalent, though, so we have
275  * to pick a value from the reserved range (which we never otherwise
276  * generate) to use for these.
277  * These values will never be visible to the guest.
278  */
279 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
280 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
281 
282 /**
283  * raise_exception: Raise the specified exception.
284  * Raise a guest exception with the specified value, syndrome register
285  * and target exception level. This should be called from helper functions,
286  * and never returns because we will longjump back up to the CPU main loop.
287  */
288 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
289                                 uint32_t syndrome, uint32_t target_el);
290 
291 /*
292  * Similarly, but also use unwinding to restore cpu state.
293  */
294 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
295                                       uint32_t syndrome, uint32_t target_el,
296                                       uintptr_t ra);
297 
298 /*
299  * For AArch64, map a given EL to an index in the banked_spsr array.
300  * Note that this mapping and the AArch32 mapping defined in bank_number()
301  * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
302  * mandated mapping between each other.
303  */
aarch64_banked_spsr_index(unsigned int el)304 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
305 {
306     static const unsigned int map[4] = {
307         [1] = BANK_SVC, /* EL1.  */
308         [2] = BANK_HYP, /* EL2.  */
309         [3] = BANK_MON, /* EL3.  */
310     };
311     assert(el >= 1 && el <= 3);
312     return map[el];
313 }
314 
315 /* Map CPU modes onto saved register banks.  */
bank_number(int mode)316 static inline int bank_number(int mode)
317 {
318     switch (mode) {
319     case ARM_CPU_MODE_USR:
320     case ARM_CPU_MODE_SYS:
321         return BANK_USRSYS;
322     case ARM_CPU_MODE_SVC:
323         return BANK_SVC;
324     case ARM_CPU_MODE_ABT:
325         return BANK_ABT;
326     case ARM_CPU_MODE_UND:
327         return BANK_UND;
328     case ARM_CPU_MODE_IRQ:
329         return BANK_IRQ;
330     case ARM_CPU_MODE_FIQ:
331         return BANK_FIQ;
332     case ARM_CPU_MODE_HYP:
333         return BANK_HYP;
334     case ARM_CPU_MODE_MON:
335         return BANK_MON;
336     }
337     g_assert_not_reached();
338 }
339 
340 /**
341  * r14_bank_number: Map CPU mode onto register bank for r14
342  *
343  * Given an AArch32 CPU mode, return the index into the saved register
344  * banks to use for the R14 (LR) in that mode. This is the same as
345  * bank_number(), except for the special case of Hyp mode, where
346  * R14 is shared with USR and SYS, unlike its R13 and SPSR.
347  * This should be used as the index into env->banked_r14[], and
348  * bank_number() used for the index into env->banked_r13[] and
349  * env->banked_spsr[].
350  */
r14_bank_number(int mode)351 static inline int r14_bank_number(int mode)
352 {
353     return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
354 }
355 
356 void arm_cpu_register(const ARMCPUInfo *info);
357 
358 void register_cp_regs_for_features(ARMCPU *cpu);
359 void init_cpreg_list(ARMCPU *cpu);
360 
361 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
362 void arm_translate_init(void);
363 void arm_translate_code(CPUState *cs, TranslationBlock *tb,
364                         int *max_insns, vaddr pc, void *host_pc);
365 
366 void arm_cpu_register_gdb_commands(ARMCPU *cpu);
367 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
368                                        GPtrArray *, GPtrArray *);
369 
370 void arm_restore_state_to_opc(CPUState *cs,
371                               const TranslationBlock *tb,
372                               const uint64_t *data);
373 
374 #ifdef CONFIG_TCG
375 TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs);
376 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
377 
378 /* Our implementation of TCGCPUOps::cpu_exec_halt */
379 bool arm_cpu_exec_halt(CPUState *cs);
380 int arm_cpu_mmu_index(CPUState *cs, bool ifetch);
381 #endif /* CONFIG_TCG */
382 
383 typedef enum ARMFPRounding {
384     FPROUNDING_TIEEVEN,
385     FPROUNDING_POSINF,
386     FPROUNDING_NEGINF,
387     FPROUNDING_ZERO,
388     FPROUNDING_TIEAWAY,
389     FPROUNDING_ODD
390 } ARMFPRounding;
391 
392 extern const FloatRoundMode arm_rmode_to_sf_map[6];
393 
arm_rmode_to_sf(ARMFPRounding rmode)394 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
395 {
396     assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
397     return arm_rmode_to_sf_map[rmode];
398 }
399 
400 /* Return the effective value of SCR_EL3.RW */
arm_scr_rw_eff(CPUARMState * env)401 static inline bool arm_scr_rw_eff(CPUARMState *env)
402 {
403     /*
404      * SCR_EL3.RW has an effective value of 1 if:
405      *  - we are NS and EL2 is implemented but doesn't support AArch32
406      *  - we are S and EL2 is enabled (in which case it must be AArch64)
407      */
408     ARMCPU *cpu = env_archcpu(env);
409 
410     if (env->cp15.scr_el3 & SCR_RW) {
411         return true;
412     }
413     if (env->cp15.scr_el3 & SCR_NS) {
414         return arm_feature(env, ARM_FEATURE_EL2) &&
415             !cpu_isar_feature(aa64_aa32_el2, cpu);
416     } else {
417         return env->cp15.scr_el3 & SCR_EEL2;
418     }
419 }
420 
421 /* Return true if the specified exception level is running in AArch64 state. */
arm_el_is_aa64(CPUARMState * env,int el)422 static inline bool arm_el_is_aa64(CPUARMState *env, int el)
423 {
424     /*
425      * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
426      * and if we're not in EL0 then the state of EL0 isn't well defined.)
427      */
428     assert(el >= 1 && el <= 3);
429     bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
430 
431     /*
432      * The highest exception level is always at the maximum supported
433      * register width, and then lower levels have a register width controlled
434      * by bits in the SCR or HCR registers.
435      */
436     if (el == 3) {
437         return aa64;
438     }
439 
440     if (arm_feature(env, ARM_FEATURE_EL3)) {
441         aa64 = aa64 && arm_scr_rw_eff(env);
442     }
443 
444     if (el == 2) {
445         return aa64;
446     }
447 
448     if (arm_is_el2_enabled(env)) {
449         aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
450     }
451 
452     return aa64;
453 }
454 
455 /*
456  * Return the current Exception Level (as per ARMv8; note that this differs
457  * from the ARMv7 Privilege Level).
458  */
arm_current_el(CPUARMState * env)459 static inline int arm_current_el(CPUARMState *env)
460 {
461     if (arm_feature(env, ARM_FEATURE_M)) {
462         return arm_v7m_is_handler_mode(env) ||
463             !(env->v7m.control[env->v7m.secure] & 1);
464     }
465 
466     if (is_a64(env)) {
467         return extract32(env->pstate, 2, 2);
468     }
469 
470     switch (env->uncached_cpsr & 0x1f) {
471     case ARM_CPU_MODE_USR:
472         return 0;
473     case ARM_CPU_MODE_HYP:
474         return 2;
475     case ARM_CPU_MODE_MON:
476         return 3;
477     default:
478         if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
479             /* If EL3 is 32-bit then all secure privileged modes run in EL3 */
480             return 3;
481         }
482 
483         return 1;
484     }
485 }
486 
arm_cpu_data_is_big_endian_a32(CPUARMState * env,bool sctlr_b)487 static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
488                                                   bool sctlr_b)
489 {
490 #ifdef CONFIG_USER_ONLY
491     /*
492      * In system mode, BE32 is modelled in line with the
493      * architecture (as word-invariant big-endianness), where loads
494      * and stores are done little endian but from addresses which
495      * are adjusted by XORing with the appropriate constant. So the
496      * endianness to use for the raw data access is not affected by
497      * SCTLR.B.
498      * In user mode, however, we model BE32 as byte-invariant
499      * big-endianness (because user-only code cannot tell the
500      * difference), and so we need to use a data access endianness
501      * that depends on SCTLR.B.
502      */
503     if (sctlr_b) {
504         return true;
505     }
506 #endif
507     /* In 32bit endianness is determined by looking at CPSR's E bit */
508     return env->uncached_cpsr & CPSR_E;
509 }
510 
arm_cpu_data_is_big_endian_a64(int el,uint64_t sctlr)511 static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
512 {
513     return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
514 }
515 
516 /* Return true if the processor is in big-endian mode. */
arm_cpu_data_is_big_endian(CPUARMState * env)517 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
518 {
519     if (!is_a64(env)) {
520         return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
521     } else {
522         int cur_el = arm_current_el(env);
523         uint64_t sctlr = arm_sctlr(env, cur_el);
524         return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
525     }
526 }
527 
528 #ifdef CONFIG_USER_ONLY
arm_cpu_bswap_data(CPUARMState * env)529 static inline bool arm_cpu_bswap_data(CPUARMState *env)
530 {
531     return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env);
532 }
533 #endif
534 
aarch64_save_sp(CPUARMState * env,int el)535 static inline void aarch64_save_sp(CPUARMState *env, int el)
536 {
537     if (env->pstate & PSTATE_SP) {
538         env->sp_el[el] = env->xregs[31];
539     } else {
540         env->sp_el[0] = env->xregs[31];
541     }
542 }
543 
aarch64_restore_sp(CPUARMState * env,int el)544 static inline void aarch64_restore_sp(CPUARMState *env, int el)
545 {
546     if (env->pstate & PSTATE_SP) {
547         env->xregs[31] = env->sp_el[el];
548     } else {
549         env->xregs[31] = env->sp_el[0];
550     }
551 }
552 
update_spsel(CPUARMState * env,uint32_t imm)553 static inline void update_spsel(CPUARMState *env, uint32_t imm)
554 {
555     unsigned int cur_el = arm_current_el(env);
556     /* Update PSTATE SPSel bit; this requires us to update the
557      * working stack pointer in xregs[31].
558      */
559     if (!((imm ^ env->pstate) & PSTATE_SP)) {
560         return;
561     }
562     aarch64_save_sp(env, cur_el);
563     env->pstate = deposit32(env->pstate, 0, 1, imm);
564 
565     /* We rely on illegal updates to SPsel from EL0 to get trapped
566      * at translation time.
567      */
568     assert(cur_el >= 1 && cur_el <= 3);
569     aarch64_restore_sp(env, cur_el);
570 }
571 
572 /*
573  * arm_pamax
574  * @cpu: ARMCPU
575  *
576  * Returns the implementation defined bit-width of physical addresses.
577  * The ARMv8 reference manuals refer to this as PAMax().
578  */
579 unsigned int arm_pamax(ARMCPU *cpu);
580 
581 /*
582  * round_down_to_parange_index
583  * @bit_size: uint8_t
584  *
585  * Rounds down the bit_size supplied to the first supported ARM physical
586  * address range and returns the index for this. The index is intended to
587  * be used to set ID_AA64MMFR0_EL1's PARANGE bits.
588  */
589 uint8_t round_down_to_parange_index(uint8_t bit_size);
590 
591 /*
592  * round_down_to_parange_bit_size
593  * @bit_size: uint8_t
594  *
595  * Rounds down the bit_size supplied to the first supported ARM physical
596  * address range bit size and returns this.
597  */
598 uint8_t round_down_to_parange_bit_size(uint8_t bit_size);
599 
600 /* Return true if extended addresses are enabled.
601  * This is always the case if our translation regime is 64 bit,
602  * but depends on TTBCR.EAE for 32 bit.
603  */
extended_addresses_enabled(CPUARMState * env)604 static inline bool extended_addresses_enabled(CPUARMState *env)
605 {
606     uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
607     if (arm_feature(env, ARM_FEATURE_PMSA) &&
608         arm_feature(env, ARM_FEATURE_V8)) {
609         return true;
610     }
611     return arm_el_is_aa64(env, 1) ||
612            (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
613 }
614 
615 /* Update a QEMU watchpoint based on the information the guest has set in the
616  * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
617  */
618 void hw_watchpoint_update(ARMCPU *cpu, int n);
619 /* Update the QEMU watchpoints for every guest watchpoint. This does a
620  * complete delete-and-reinstate of the QEMU watchpoint list and so is
621  * suitable for use after migration or on reset.
622  */
623 void hw_watchpoint_update_all(ARMCPU *cpu);
624 /* Update a QEMU breakpoint based on the information the guest has set in the
625  * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
626  */
627 void hw_breakpoint_update(ARMCPU *cpu, int n);
628 /* Update the QEMU breakpoints for every guest breakpoint. This does a
629  * complete delete-and-reinstate of the QEMU breakpoint list and so is
630  * suitable for use after migration or on reset.
631  */
632 void hw_breakpoint_update_all(ARMCPU *cpu);
633 
634 /* Callback function for checking if a breakpoint should trigger. */
635 bool arm_debug_check_breakpoint(CPUState *cs);
636 
637 /* Callback function for checking if a watchpoint should trigger. */
638 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
639 
640 /* Adjust addresses (in BE32 mode) before testing against watchpoint
641  * addresses.
642  */
643 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
644 
645 /* Callback function for when a watchpoint or breakpoint triggers. */
646 void arm_debug_excp_handler(CPUState *cs);
647 
648 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
arm_is_psci_call(ARMCPU * cpu,int excp_type)649 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
650 {
651     return false;
652 }
arm_handle_psci_call(ARMCPU * cpu)653 static inline void arm_handle_psci_call(ARMCPU *cpu)
654 {
655     g_assert_not_reached();
656 }
657 #else
658 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
659 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
660 /* Actually handle a PSCI call */
661 void arm_handle_psci_call(ARMCPU *cpu);
662 #endif
663 
664 /**
665  * arm_clear_exclusive: clear the exclusive monitor
666  * @env: CPU env
667  * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
668  */
arm_clear_exclusive(CPUARMState * env)669 static inline void arm_clear_exclusive(CPUARMState *env)
670 {
671     env->exclusive_addr = -1;
672 }
673 
674 /**
675  * ARMFaultType: type of an ARM MMU fault
676  * This corresponds to the v8A pseudocode's Fault enumeration,
677  * with extensions for QEMU internal conditions.
678  */
679 typedef enum ARMFaultType {
680     ARMFault_None,
681     ARMFault_AccessFlag,
682     ARMFault_Alignment,
683     ARMFault_Background,
684     ARMFault_Domain,
685     ARMFault_Permission,
686     ARMFault_Translation,
687     ARMFault_AddressSize,
688     ARMFault_SyncExternal,
689     ARMFault_SyncExternalOnWalk,
690     ARMFault_SyncParity,
691     ARMFault_SyncParityOnWalk,
692     ARMFault_AsyncParity,
693     ARMFault_AsyncExternal,
694     ARMFault_Debug,
695     ARMFault_TLBConflict,
696     ARMFault_UnsuppAtomicUpdate,
697     ARMFault_Lockdown,
698     ARMFault_Exclusive,
699     ARMFault_ICacheMaint,
700     ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
701     ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
702     ARMFault_GPCFOnWalk,
703     ARMFault_GPCFOnOutput,
704 } ARMFaultType;
705 
706 typedef enum ARMGPCF {
707     GPCF_None,
708     GPCF_AddressSize,
709     GPCF_Walk,
710     GPCF_EABT,
711     GPCF_Fail,
712 } ARMGPCF;
713 
714 /**
715  * ARMMMUFaultInfo: Information describing an ARM MMU Fault
716  * @type: Type of fault
717  * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
718  * @level: Table walk level (for translation, access flag and permission faults)
719  * @domain: Domain of the fault address (for non-LPAE CPUs only)
720  * @s2addr: Address that caused a fault at stage 2
721  * @paddr: physical address that caused a fault for gpc
722  * @paddr_space: physical address space that caused a fault for gpc
723  * @stage2: True if we faulted at stage 2
724  * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
725  * @s1ns: True if we faulted on a non-secure IPA while in secure state
726  * @ea: True if we should set the EA (external abort type) bit in syndrome
727  */
728 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
729 struct ARMMMUFaultInfo {
730     ARMFaultType type;
731     ARMGPCF gpcf;
732     hwaddr s2addr;
733     hwaddr paddr;
734     ARMSecuritySpace paddr_space;
735     int level;
736     int domain;
737     bool stage2;
738     bool s1ptw;
739     bool s1ns;
740     bool ea;
741 };
742 
743 /**
744  * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
745  * Compare pseudocode EncodeSDFSC(), though unlike that function
746  * we set up a whole FSR-format code including domain field and
747  * putting the high bit of the FSC into bit 10.
748  */
arm_fi_to_sfsc(ARMMMUFaultInfo * fi)749 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
750 {
751     uint32_t fsc;
752 
753     switch (fi->type) {
754     case ARMFault_None:
755         return 0;
756     case ARMFault_AccessFlag:
757         fsc = fi->level == 1 ? 0x3 : 0x6;
758         break;
759     case ARMFault_Alignment:
760         fsc = 0x1;
761         break;
762     case ARMFault_Permission:
763         fsc = fi->level == 1 ? 0xd : 0xf;
764         break;
765     case ARMFault_Domain:
766         fsc = fi->level == 1 ? 0x9 : 0xb;
767         break;
768     case ARMFault_Translation:
769         fsc = fi->level == 1 ? 0x5 : 0x7;
770         break;
771     case ARMFault_SyncExternal:
772         fsc = 0x8 | (fi->ea << 12);
773         break;
774     case ARMFault_SyncExternalOnWalk:
775         fsc = fi->level == 1 ? 0xc : 0xe;
776         fsc |= (fi->ea << 12);
777         break;
778     case ARMFault_SyncParity:
779         fsc = 0x409;
780         break;
781     case ARMFault_SyncParityOnWalk:
782         fsc = fi->level == 1 ? 0x40c : 0x40e;
783         break;
784     case ARMFault_AsyncParity:
785         fsc = 0x408;
786         break;
787     case ARMFault_AsyncExternal:
788         fsc = 0x406 | (fi->ea << 12);
789         break;
790     case ARMFault_Debug:
791         fsc = 0x2;
792         break;
793     case ARMFault_TLBConflict:
794         fsc = 0x400;
795         break;
796     case ARMFault_Lockdown:
797         fsc = 0x404;
798         break;
799     case ARMFault_Exclusive:
800         fsc = 0x405;
801         break;
802     case ARMFault_ICacheMaint:
803         fsc = 0x4;
804         break;
805     case ARMFault_Background:
806         fsc = 0x0;
807         break;
808     case ARMFault_QEMU_NSCExec:
809         fsc = M_FAKE_FSR_NSC_EXEC;
810         break;
811     case ARMFault_QEMU_SFault:
812         fsc = M_FAKE_FSR_SFAULT;
813         break;
814     default:
815         /* Other faults can't occur in a context that requires a
816          * short-format status code.
817          */
818         g_assert_not_reached();
819     }
820 
821     fsc |= (fi->domain << 4);
822     return fsc;
823 }
824 
825 /**
826  * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
827  * Compare pseudocode EncodeLDFSC(), though unlike that function
828  * we fill in also the LPAE bit 9 of a DFSR format.
829  */
arm_fi_to_lfsc(ARMMMUFaultInfo * fi)830 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
831 {
832     uint32_t fsc;
833 
834     switch (fi->type) {
835     case ARMFault_None:
836         return 0;
837     case ARMFault_AddressSize:
838         assert(fi->level >= -1 && fi->level <= 3);
839         if (fi->level < 0) {
840             fsc = 0b101001;
841         } else {
842             fsc = fi->level;
843         }
844         break;
845     case ARMFault_AccessFlag:
846         assert(fi->level >= 0 && fi->level <= 3);
847         fsc = 0b001000 | fi->level;
848         break;
849     case ARMFault_Permission:
850         assert(fi->level >= 0 && fi->level <= 3);
851         fsc = 0b001100 | fi->level;
852         break;
853     case ARMFault_Translation:
854         assert(fi->level >= -1 && fi->level <= 3);
855         if (fi->level < 0) {
856             fsc = 0b101011;
857         } else {
858             fsc = 0b000100 | fi->level;
859         }
860         break;
861     case ARMFault_SyncExternal:
862         fsc = 0x10 | (fi->ea << 12);
863         break;
864     case ARMFault_SyncExternalOnWalk:
865         assert(fi->level >= -1 && fi->level <= 3);
866         if (fi->level < 0) {
867             fsc = 0b010011;
868         } else {
869             fsc = 0b010100 | fi->level;
870         }
871         fsc |= fi->ea << 12;
872         break;
873     case ARMFault_SyncParity:
874         fsc = 0x18;
875         break;
876     case ARMFault_SyncParityOnWalk:
877         assert(fi->level >= -1 && fi->level <= 3);
878         if (fi->level < 0) {
879             fsc = 0b011011;
880         } else {
881             fsc = 0b011100 | fi->level;
882         }
883         break;
884     case ARMFault_AsyncParity:
885         fsc = 0x19;
886         break;
887     case ARMFault_AsyncExternal:
888         fsc = 0x11 | (fi->ea << 12);
889         break;
890     case ARMFault_Alignment:
891         fsc = 0x21;
892         break;
893     case ARMFault_Debug:
894         fsc = 0x22;
895         break;
896     case ARMFault_TLBConflict:
897         fsc = 0x30;
898         break;
899     case ARMFault_UnsuppAtomicUpdate:
900         fsc = 0x31;
901         break;
902     case ARMFault_Lockdown:
903         fsc = 0x34;
904         break;
905     case ARMFault_Exclusive:
906         fsc = 0x35;
907         break;
908     case ARMFault_GPCFOnWalk:
909         assert(fi->level >= -1 && fi->level <= 3);
910         if (fi->level < 0) {
911             fsc = 0b100011;
912         } else {
913             fsc = 0b100100 | fi->level;
914         }
915         break;
916     case ARMFault_GPCFOnOutput:
917         fsc = 0b101000;
918         break;
919     default:
920         /* Other faults can't occur in a context that requires a
921          * long-format status code.
922          */
923         g_assert_not_reached();
924     }
925 
926     fsc |= 1 << 9;
927     return fsc;
928 }
929 
arm_extabort_type(MemTxResult result)930 static inline bool arm_extabort_type(MemTxResult result)
931 {
932     /* The EA bit in syndromes and fault status registers is an
933      * IMPDEF classification of external aborts. ARM implementations
934      * usually use this to indicate AXI bus Decode error (0) or
935      * Slave error (1); in QEMU we follow that.
936      */
937     return result != MEMTX_DECODE_ERROR;
938 }
939 
940 #ifdef CONFIG_USER_ONLY
941 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
942                             MMUAccessType access_type,
943                             bool maperr, uintptr_t ra);
944 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
945                            MMUAccessType access_type, uintptr_t ra);
946 #else
947 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
948                             MMUAccessType access_type, int mmu_idx,
949                             MemOp memop, int size, bool probe, uintptr_t ra);
950 #endif
951 
arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)952 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
953 {
954     return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
955 }
956 
core_to_arm_mmu_idx(CPUARMState * env,int mmu_idx)957 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
958 {
959     if (arm_feature(env, ARM_FEATURE_M)) {
960         return mmu_idx | ARM_MMU_IDX_M;
961     } else {
962         return mmu_idx | ARM_MMU_IDX_A;
963     }
964 }
965 
core_to_aa64_mmu_idx(int mmu_idx)966 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
967 {
968     /* AArch64 is always a-profile. */
969     return mmu_idx | ARM_MMU_IDX_A;
970 }
971 
972 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
973 
974 /* Return the MMU index for a v7M CPU in the specified security state */
975 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
976 
977 /*
978  * Return true if the stage 1 translation regime is using LPAE
979  * format page tables
980  */
981 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
982 
983 /* Raise a data fault alignment exception for the specified virtual address */
984 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
985                                             MMUAccessType access_type,
986                                             int mmu_idx, uintptr_t retaddr);
987 
988 #ifndef CONFIG_USER_ONLY
989 /* arm_cpu_do_transaction_failed: handle a memory system error response
990  * (eg "no device/memory present at address") by raising an external abort
991  * exception
992  */
993 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
994                                    vaddr addr, unsigned size,
995                                    MMUAccessType access_type,
996                                    int mmu_idx, MemTxAttrs attrs,
997                                    MemTxResult response, uintptr_t retaddr);
998 #endif
999 
1000 /* Call any registered EL change hooks */
arm_call_pre_el_change_hook(ARMCPU * cpu)1001 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
1002 {
1003     ARMELChangeHook *hook, *next;
1004     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
1005         hook->hook(cpu, hook->opaque);
1006     }
1007 }
arm_call_el_change_hook(ARMCPU * cpu)1008 static inline void arm_call_el_change_hook(ARMCPU *cpu)
1009 {
1010     ARMELChangeHook *hook, *next;
1011     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
1012         hook->hook(cpu, hook->opaque);
1013     }
1014 }
1015 
1016 /*
1017  * Return true if this address translation regime has two ranges.
1018  * Note that this will not return the correct answer for AArch32
1019  * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
1020  * never called from a context where EL3 can be AArch32. (The
1021  * correct return value for ARMMMUIdx_E3 would be different for
1022  * that case, so we can't just make the function return the
1023  * correct value anyway; we would need an extra "bool e3_is_aarch32"
1024  * argument which all the current callsites would pass as 'false'.)
1025  */
regime_has_2_ranges(ARMMMUIdx mmu_idx)1026 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
1027 {
1028     switch (mmu_idx) {
1029     case ARMMMUIdx_Stage1_E0:
1030     case ARMMMUIdx_Stage1_E1:
1031     case ARMMMUIdx_Stage1_E1_PAN:
1032     case ARMMMUIdx_E10_0:
1033     case ARMMMUIdx_E10_1:
1034     case ARMMMUIdx_E10_1_PAN:
1035     case ARMMMUIdx_E20_0:
1036     case ARMMMUIdx_E20_2:
1037     case ARMMMUIdx_E20_2_PAN:
1038         return true;
1039     default:
1040         return false;
1041     }
1042 }
1043 
regime_is_pan(CPUARMState * env,ARMMMUIdx mmu_idx)1044 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
1045 {
1046     switch (mmu_idx) {
1047     case ARMMMUIdx_Stage1_E1_PAN:
1048     case ARMMMUIdx_E10_1_PAN:
1049     case ARMMMUIdx_E20_2_PAN:
1050     case ARMMMUIdx_E30_3_PAN:
1051         return true;
1052     default:
1053         return false;
1054     }
1055 }
1056 
regime_is_stage2(ARMMMUIdx mmu_idx)1057 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
1058 {
1059     return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
1060 }
1061 
1062 /* Return the exception level which controls this address translation regime */
regime_el(CPUARMState * env,ARMMMUIdx mmu_idx)1063 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
1064 {
1065     switch (mmu_idx) {
1066     case ARMMMUIdx_E20_0:
1067     case ARMMMUIdx_E20_2:
1068     case ARMMMUIdx_E20_2_PAN:
1069     case ARMMMUIdx_Stage2:
1070     case ARMMMUIdx_Stage2_S:
1071     case ARMMMUIdx_E2:
1072         return 2;
1073     case ARMMMUIdx_E3:
1074     case ARMMMUIdx_E30_0:
1075     case ARMMMUIdx_E30_3_PAN:
1076         return 3;
1077     case ARMMMUIdx_E10_0:
1078     case ARMMMUIdx_Stage1_E0:
1079     case ARMMMUIdx_Stage1_E1:
1080     case ARMMMUIdx_Stage1_E1_PAN:
1081     case ARMMMUIdx_E10_1:
1082     case ARMMMUIdx_E10_1_PAN:
1083     case ARMMMUIdx_MPrivNegPri:
1084     case ARMMMUIdx_MUserNegPri:
1085     case ARMMMUIdx_MPriv:
1086     case ARMMMUIdx_MUser:
1087     case ARMMMUIdx_MSPrivNegPri:
1088     case ARMMMUIdx_MSUserNegPri:
1089     case ARMMMUIdx_MSPriv:
1090     case ARMMMUIdx_MSUser:
1091         return 1;
1092     default:
1093         g_assert_not_reached();
1094     }
1095 }
1096 
regime_is_user(CPUARMState * env,ARMMMUIdx mmu_idx)1097 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
1098 {
1099     switch (mmu_idx) {
1100     case ARMMMUIdx_E10_0:
1101     case ARMMMUIdx_E20_0:
1102     case ARMMMUIdx_E30_0:
1103     case ARMMMUIdx_Stage1_E0:
1104     case ARMMMUIdx_MUser:
1105     case ARMMMUIdx_MSUser:
1106     case ARMMMUIdx_MUserNegPri:
1107     case ARMMMUIdx_MSUserNegPri:
1108         return true;
1109     default:
1110         return false;
1111     }
1112 }
1113 
1114 /* Return the SCTLR value which controls this address translation regime */
regime_sctlr(CPUARMState * env,ARMMMUIdx mmu_idx)1115 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
1116 {
1117     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
1118 }
1119 
1120 /*
1121  * These are the fields in VTCR_EL2 which affect both the Secure stage 2
1122  * and the Non-Secure stage 2 translation regimes (and hence which are
1123  * not present in VSTCR_EL2).
1124  */
1125 #define VTCR_SHARED_FIELD_MASK \
1126     (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
1127      R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
1128      R_VTCR_DS_MASK)
1129 
1130 /* Return the value of the TCR controlling this translation regime */
regime_tcr(CPUARMState * env,ARMMMUIdx mmu_idx)1131 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
1132 {
1133     if (mmu_idx == ARMMMUIdx_Stage2) {
1134         return env->cp15.vtcr_el2;
1135     }
1136     if (mmu_idx == ARMMMUIdx_Stage2_S) {
1137         /*
1138          * Secure stage 2 shares fields from VTCR_EL2. We merge those
1139          * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
1140          * value so the callers don't need to special case this.
1141          *
1142          * If a future architecture change defines bits in VSTCR_EL2 that
1143          * overlap with these VTCR_EL2 fields we may need to revisit this.
1144          */
1145         uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
1146         v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
1147         return v;
1148     }
1149     return env->cp15.tcr_el[regime_el(env, mmu_idx)];
1150 }
1151 
1152 /* Return true if the translation regime is using LPAE format page tables */
regime_using_lpae_format(CPUARMState * env,ARMMMUIdx mmu_idx)1153 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
1154 {
1155     int el = regime_el(env, mmu_idx);
1156     if (el == 2 || arm_el_is_aa64(env, el)) {
1157         return true;
1158     }
1159     if (arm_feature(env, ARM_FEATURE_PMSA) &&
1160         arm_feature(env, ARM_FEATURE_V8)) {
1161         return true;
1162     }
1163     if (arm_feature(env, ARM_FEATURE_LPAE)
1164         && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
1165         return true;
1166     }
1167     return false;
1168 }
1169 
1170 /**
1171  * arm_num_brps: Return number of implemented breakpoints.
1172  * Note that the ID register BRPS field is "number of bps - 1",
1173  * and we return the actual number of breakpoints.
1174  */
arm_num_brps(ARMCPU * cpu)1175 static inline int arm_num_brps(ARMCPU *cpu)
1176 {
1177     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1178         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
1179     } else {
1180         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
1181     }
1182 }
1183 
1184 /**
1185  * arm_num_wrps: Return number of implemented watchpoints.
1186  * Note that the ID register WRPS field is "number of wps - 1",
1187  * and we return the actual number of watchpoints.
1188  */
arm_num_wrps(ARMCPU * cpu)1189 static inline int arm_num_wrps(ARMCPU *cpu)
1190 {
1191     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1192         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
1193     } else {
1194         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
1195     }
1196 }
1197 
1198 /**
1199  * arm_num_ctx_cmps: Return number of implemented context comparators.
1200  * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1201  * and we return the actual number of comparators.
1202  */
arm_num_ctx_cmps(ARMCPU * cpu)1203 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
1204 {
1205     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1206         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
1207     } else {
1208         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
1209     }
1210 }
1211 
1212 /**
1213  * v7m_using_psp: Return true if using process stack pointer
1214  * Return true if the CPU is currently using the process stack
1215  * pointer, or false if it is using the main stack pointer.
1216  */
v7m_using_psp(CPUARMState * env)1217 static inline bool v7m_using_psp(CPUARMState *env)
1218 {
1219     /* Handler mode always uses the main stack; for thread mode
1220      * the CONTROL.SPSEL bit determines the answer.
1221      * Note that in v7M it is not possible to be in Handler mode with
1222      * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
1223      */
1224     return !arm_v7m_is_handler_mode(env) &&
1225         env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
1226 }
1227 
1228 /**
1229  * v7m_sp_limit: Return SP limit for current CPU state
1230  * Return the SP limit value for the current CPU security state
1231  * and stack pointer.
1232  */
v7m_sp_limit(CPUARMState * env)1233 static inline uint32_t v7m_sp_limit(CPUARMState *env)
1234 {
1235     if (v7m_using_psp(env)) {
1236         return env->v7m.psplim[env->v7m.secure];
1237     } else {
1238         return env->v7m.msplim[env->v7m.secure];
1239     }
1240 }
1241 
1242 /**
1243  * v7m_cpacr_pass:
1244  * Return true if the v7M CPACR permits access to the FPU for the specified
1245  * security state and privilege level.
1246  */
v7m_cpacr_pass(CPUARMState * env,bool is_secure,bool is_priv)1247 static inline bool v7m_cpacr_pass(CPUARMState *env,
1248                                   bool is_secure, bool is_priv)
1249 {
1250     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
1251     case 0:
1252     case 2: /* UNPREDICTABLE: we treat like 0 */
1253         return false;
1254     case 1:
1255         return is_priv;
1256     case 3:
1257         return true;
1258     default:
1259         g_assert_not_reached();
1260     }
1261 }
1262 
1263 /**
1264  * aarch32_mode_name(): Return name of the AArch32 CPU mode
1265  * @psr: Program Status Register indicating CPU mode
1266  *
1267  * Returns, for debug logging purposes, a printable representation
1268  * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
1269  * the low bits of the specified PSR.
1270  */
aarch32_mode_name(uint32_t psr)1271 static inline const char *aarch32_mode_name(uint32_t psr)
1272 {
1273     static const char cpu_mode_names[16][4] = {
1274         "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
1275         "???", "???", "hyp", "und", "???", "???", "???", "sys"
1276     };
1277 
1278     return cpu_mode_names[psr & 0xf];
1279 }
1280 
1281 /**
1282  * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1283  *
1284  * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1285  * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1286  * Must be called with the BQL held.
1287  */
1288 void arm_cpu_update_virq(ARMCPU *cpu);
1289 
1290 /**
1291  * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1292  *
1293  * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1294  * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1295  * Must be called with the BQL held.
1296  */
1297 void arm_cpu_update_vfiq(ARMCPU *cpu);
1298 
1299 /**
1300  * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request
1301  *
1302  * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following
1303  * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI.
1304  * Must be called with the BQL held.
1305  */
1306 void arm_cpu_update_vinmi(ARMCPU *cpu);
1307 
1308 /**
1309  * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request
1310  *
1311  * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following
1312  * a change to the HCRX_EL2.VFNMI.
1313  * Must be called with the BQL held.
1314  */
1315 void arm_cpu_update_vfnmi(ARMCPU *cpu);
1316 
1317 /**
1318  * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
1319  *
1320  * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
1321  * following a change to the HCR_EL2.VSE bit.
1322  */
1323 void arm_cpu_update_vserr(ARMCPU *cpu);
1324 
1325 /**
1326  * arm_mmu_idx_el:
1327  * @env: The cpu environment
1328  * @el: The EL to use.
1329  *
1330  * Return the full ARMMMUIdx for the translation regime for EL.
1331  */
1332 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
1333 
1334 /**
1335  * arm_mmu_idx:
1336  * @env: The cpu environment
1337  *
1338  * Return the full ARMMMUIdx for the current translation regime.
1339  */
1340 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
1341 
1342 /**
1343  * arm_stage1_mmu_idx:
1344  * @env: The cpu environment
1345  *
1346  * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1347  */
1348 #ifdef CONFIG_USER_ONLY
stage_1_mmu_idx(ARMMMUIdx mmu_idx)1349 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
1350 {
1351     return ARMMMUIdx_Stage1_E0;
1352 }
arm_stage1_mmu_idx(CPUARMState * env)1353 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1354 {
1355     return ARMMMUIdx_Stage1_E0;
1356 }
1357 #else
1358 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
1359 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
1360 #endif
1361 
1362 /**
1363  * arm_mmu_idx_is_stage1_of_2:
1364  * @mmu_idx: The ARMMMUIdx to test
1365  *
1366  * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1367  * first stage of a two stage regime.
1368  */
arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)1369 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1370 {
1371     switch (mmu_idx) {
1372     case ARMMMUIdx_Stage1_E0:
1373     case ARMMMUIdx_Stage1_E1:
1374     case ARMMMUIdx_Stage1_E1_PAN:
1375         return true;
1376     default:
1377         return false;
1378     }
1379 }
1380 
aarch32_cpsr_valid_mask(uint64_t features,const ARMISARegisters * id)1381 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1382                                                const ARMISARegisters *id)
1383 {
1384     uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1385 
1386     if ((features >> ARM_FEATURE_V4T) & 1) {
1387         valid |= CPSR_T;
1388     }
1389     if ((features >> ARM_FEATURE_V5) & 1) {
1390         valid |= CPSR_Q; /* V5TE in reality*/
1391     }
1392     if ((features >> ARM_FEATURE_V6) & 1) {
1393         valid |= CPSR_E | CPSR_GE;
1394     }
1395     if ((features >> ARM_FEATURE_THUMB2) & 1) {
1396         valid |= CPSR_IT;
1397     }
1398     if (isar_feature_aa32_jazelle(id)) {
1399         valid |= CPSR_J;
1400     }
1401     if (isar_feature_aa32_pan(id)) {
1402         valid |= CPSR_PAN;
1403     }
1404     if (isar_feature_aa32_dit(id)) {
1405         valid |= CPSR_DIT;
1406     }
1407     if (isar_feature_aa32_ssbs(id)) {
1408         valid |= CPSR_SSBS;
1409     }
1410 
1411     return valid;
1412 }
1413 
aarch64_pstate_valid_mask(const ARMISARegisters * id)1414 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1415 {
1416     uint32_t valid;
1417 
1418     valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1419     if (isar_feature_aa64_bti(id)) {
1420         valid |= PSTATE_BTYPE;
1421     }
1422     if (isar_feature_aa64_pan(id)) {
1423         valid |= PSTATE_PAN;
1424     }
1425     if (isar_feature_aa64_uao(id)) {
1426         valid |= PSTATE_UAO;
1427     }
1428     if (isar_feature_aa64_dit(id)) {
1429         valid |= PSTATE_DIT;
1430     }
1431     if (isar_feature_aa64_ssbs(id)) {
1432         valid |= PSTATE_SSBS;
1433     }
1434     if (isar_feature_aa64_mte(id)) {
1435         valid |= PSTATE_TCO;
1436     }
1437     if (isar_feature_aa64_nmi(id)) {
1438         valid |= PSTATE_ALLINT;
1439     }
1440 
1441     return valid;
1442 }
1443 
1444 /* Granule size (i.e. page size) */
1445 typedef enum ARMGranuleSize {
1446     /* Same order as TG0 encoding */
1447     Gran4K,
1448     Gran64K,
1449     Gran16K,
1450     GranInvalid,
1451 } ARMGranuleSize;
1452 
1453 /**
1454  * arm_granule_bits: Return address size of the granule in bits
1455  *
1456  * Return the address size of the granule in bits. This corresponds
1457  * to the pseudocode TGxGranuleBits().
1458  */
arm_granule_bits(ARMGranuleSize gran)1459 static inline int arm_granule_bits(ARMGranuleSize gran)
1460 {
1461     switch (gran) {
1462     case Gran64K:
1463         return 16;
1464     case Gran16K:
1465         return 14;
1466     case Gran4K:
1467         return 12;
1468     default:
1469         g_assert_not_reached();
1470     }
1471 }
1472 
1473 /*
1474  * Parameters of a given virtual address, as extracted from the
1475  * translation control register (TCR) for a given regime.
1476  */
1477 typedef struct ARMVAParameters {
1478     unsigned tsz    : 8;
1479     unsigned ps     : 3;
1480     unsigned sh     : 2;
1481     unsigned select : 1;
1482     bool tbi        : 1;
1483     bool epd        : 1;
1484     bool hpd        : 1;
1485     bool tsz_oob    : 1;  /* tsz has been clamped to legal range */
1486     bool ds         : 1;
1487     bool ha         : 1;
1488     bool hd         : 1;
1489     ARMGranuleSize gran : 2;
1490 } ARMVAParameters;
1491 
1492 /**
1493  * aa64_va_parameters: Return parameters for an AArch64 virtual address
1494  * @env: CPU
1495  * @va: virtual address to look up
1496  * @mmu_idx: determines translation regime to use
1497  * @data: true if this is a data access
1498  * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1499  *  (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1500  */
1501 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1502                                    ARMMMUIdx mmu_idx, bool data,
1503                                    bool el1_is_aa32);
1504 
1505 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1506 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1507 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1508 
1509 /* Determine if allocation tags are available.  */
allocation_tag_access_enabled(CPUARMState * env,int el,uint64_t sctlr)1510 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1511                                                  uint64_t sctlr)
1512 {
1513     if (el < 3
1514         && arm_feature(env, ARM_FEATURE_EL3)
1515         && !(env->cp15.scr_el3 & SCR_ATA)) {
1516         return false;
1517     }
1518     if (el < 2 && arm_is_el2_enabled(env)) {
1519         uint64_t hcr = arm_hcr_el2_eff(env);
1520         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1521             return false;
1522         }
1523     }
1524     sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1525     return sctlr != 0;
1526 }
1527 
1528 #ifndef CONFIG_USER_ONLY
1529 
1530 /* Security attributes for an address, as returned by v8m_security_lookup. */
1531 typedef struct V8M_SAttributes {
1532     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1533     bool ns;
1534     bool nsc;
1535     uint8_t sregion;
1536     bool srvalid;
1537     uint8_t iregion;
1538     bool irvalid;
1539 } V8M_SAttributes;
1540 
1541 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1542                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1543                          bool secure, V8M_SAttributes *sattrs);
1544 
1545 /* Cacheability and shareability attributes for a memory access */
1546 typedef struct ARMCacheAttrs {
1547     /*
1548      * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1549      * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1550      */
1551     unsigned int attrs:8;
1552     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1553     bool is_s2_format:1;
1554 } ARMCacheAttrs;
1555 
1556 /* Fields that are valid upon success. */
1557 typedef struct GetPhysAddrResult {
1558     CPUTLBEntryFull f;
1559     ARMCacheAttrs cacheattrs;
1560 } GetPhysAddrResult;
1561 
1562 /**
1563  * get_phys_addr: get the physical address for a virtual address
1564  * @env: CPUARMState
1565  * @address: virtual address to get physical address for
1566  * @access_type: 0 for read, 1 for write, 2 for execute
1567  * @memop: memory operation feeding this access, or 0 for none
1568  * @mmu_idx: MMU index indicating required translation regime
1569  * @result: set on translation success.
1570  * @fi: set to fault info if the translation fails
1571  *
1572  * Find the physical address corresponding to the given virtual address,
1573  * by doing a translation table walk on MMU based systems or using the
1574  * MPU state on MPU based systems.
1575  *
1576  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1577  * prot and page_size may not be filled in, and the populated fsr value provides
1578  * information on why the translation aborted, in the format of a
1579  * DFSR/IFSR fault register, with the following caveats:
1580  *  * we honour the short vs long DFSR format differences.
1581  *  * the WnR bit is never set (the caller must do this).
1582  *  * for PSMAv5 based systems we don't bother to return a full FSR format
1583  *    value.
1584  */
1585 bool get_phys_addr(CPUARMState *env, vaddr address,
1586                    MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
1587                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1588     __attribute__((nonnull));
1589 
1590 /**
1591  * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1592  *                                 address
1593  * @env: CPUARMState
1594  * @address: virtual address to get physical address for
1595  * @access_type: 0 for read, 1 for write, 2 for execute
1596  * @memop: memory operation feeding this access, or 0 for none
1597  * @mmu_idx: MMU index indicating required translation regime
1598  * @space: security space for the access
1599  * @result: set on translation success.
1600  * @fi: set to fault info if the translation fails
1601  *
1602  * Similar to get_phys_addr, but use the given security space and don't perform
1603  * a Granule Protection Check on the resulting address.
1604  */
1605 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
1606                                     MMUAccessType access_type, MemOp memop,
1607                                     ARMMMUIdx mmu_idx, ARMSecuritySpace space,
1608                                     GetPhysAddrResult *result,
1609                                     ARMMMUFaultInfo *fi)
1610     __attribute__((nonnull));
1611 
1612 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1613                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1614                        bool is_secure, GetPhysAddrResult *result,
1615                        ARMMMUFaultInfo *fi, uint32_t *mregion);
1616 
1617 void arm_log_exception(CPUState *cs);
1618 
1619 #endif /* !CONFIG_USER_ONLY */
1620 
1621 /*
1622  * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1623  * the same simd_desc() encoding due to restrictions on size.
1624  * Use these instead.
1625  */
1626 FIELD(PREDDESC, OPRSZ, 0, 6)
1627 FIELD(PREDDESC, ESZ, 6, 2)
1628 FIELD(PREDDESC, DATA, 8, 24)
1629 
1630 /*
1631  * The SVE simd_data field, for memory ops, contains either
1632  * rd (5 bits) or a shift count (2 bits).
1633  */
1634 #define SVE_MTEDESC_SHIFT 5
1635 
1636 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1637 FIELD(MTEDESC, MIDX,  0, 4)
1638 FIELD(MTEDESC, TBI,   4, 2)
1639 FIELD(MTEDESC, TCMA,  6, 2)
1640 FIELD(MTEDESC, WRITE, 8, 1)
1641 FIELD(MTEDESC, ALIGN, 9, 3)
1642 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12)  /* size - 1 */
1643 
1644 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1645 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1646 
1647 /**
1648  * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1649  * @env: CPU env
1650  * @ptr: start address of memory region (dirty pointer)
1651  * @size: length of region (guaranteed not to cross a page boundary)
1652  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1653  * Returns: the size of the region that can be copied without hitting
1654  *          an MTE tag failure
1655  *
1656  * Note that we assume that the caller has already checked the TBI
1657  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1658  * required.
1659  */
1660 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
1661                         uint32_t desc);
1662 
1663 /**
1664  * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
1665  *                     operation going in the reverse direction
1666  * @env: CPU env
1667  * @ptr: *end* address of memory region (dirty pointer)
1668  * @size: length of region (guaranteed not to cross a page boundary)
1669  * @desc: MTEDESC descriptor word (0 means no MTE checks)
1670  * Returns: the size of the region that can be copied without hitting
1671  *          an MTE tag failure
1672  *
1673  * Note that we assume that the caller has already checked the TBI
1674  * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1675  * required.
1676  */
1677 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
1678                             uint32_t desc);
1679 
1680 /**
1681  * mte_check_fail: Record an MTE tag check failure
1682  * @env: CPU env
1683  * @desc: MTEDESC descriptor word
1684  * @dirty_ptr: Failing dirty address
1685  * @ra: TCG retaddr
1686  *
1687  * This may never return (if the MTE tag checks are configured to fault).
1688  */
1689 void mte_check_fail(CPUARMState *env, uint32_t desc,
1690                     uint64_t dirty_ptr, uintptr_t ra);
1691 
1692 /**
1693  * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
1694  * @env: CPU env
1695  * @dirty_ptr: Start address of memory region (dirty pointer)
1696  * @size: length of region (guaranteed not to cross page boundary)
1697  * @desc: MTEDESC descriptor word
1698  */
1699 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
1700                        uint32_t desc);
1701 
allocation_tag_from_addr(uint64_t ptr)1702 static inline int allocation_tag_from_addr(uint64_t ptr)
1703 {
1704     return extract64(ptr, 56, 4);
1705 }
1706 
address_with_allocation_tag(uint64_t ptr,int rtag)1707 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1708 {
1709     return deposit64(ptr, 56, 4, rtag);
1710 }
1711 
1712 /* Return true if tbi bits mean that the access is checked.  */
tbi_check(uint32_t desc,int bit55)1713 static inline bool tbi_check(uint32_t desc, int bit55)
1714 {
1715     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1716 }
1717 
1718 /* Return true if tcma bits mean that the access is unchecked.  */
tcma_check(uint32_t desc,int bit55,int ptr_tag)1719 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1720 {
1721     /*
1722      * We had extracted bit55 and ptr_tag for other reasons, so fold
1723      * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1724      */
1725     bool match = ((ptr_tag + bit55) & 0xf) == 0;
1726     bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1727     return tcma && match;
1728 }
1729 
1730 /*
1731  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
1732  * for the tag to be present in the FAR_ELx register.  But for user-only
1733  * mode, we do not have a TLB with which to implement this, so we must
1734  * remove the top byte.
1735  */
useronly_clean_ptr(uint64_t ptr)1736 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1737 {
1738 #ifdef CONFIG_USER_ONLY
1739     /* TBI0 is known to be enabled, while TBI1 is disabled. */
1740     ptr &= sextract64(ptr, 0, 56);
1741 #endif
1742     return ptr;
1743 }
1744 
useronly_maybe_clean_ptr(uint32_t desc,uint64_t ptr)1745 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1746 {
1747 #ifdef CONFIG_USER_ONLY
1748     int64_t clean_ptr = sextract64(ptr, 0, 56);
1749     if (tbi_check(desc, clean_ptr < 0)) {
1750         ptr = clean_ptr;
1751     }
1752 #endif
1753     return ptr;
1754 }
1755 
1756 /* Values for M-profile PSR.ECI for MVE insns */
1757 enum MVEECIState {
1758     ECI_NONE = 0, /* No completed beats */
1759     ECI_A0 = 1, /* Completed: A0 */
1760     ECI_A0A1 = 2, /* Completed: A0, A1 */
1761     /* 3 is reserved */
1762     ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1763     ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1764     /* All other values reserved */
1765 };
1766 
1767 /* Definitions for the PMU registers */
1768 #define PMCRN_MASK  0xf800
1769 #define PMCRN_SHIFT 11
1770 #define PMCRLP  0x80
1771 #define PMCRLC  0x40
1772 #define PMCRDP  0x20
1773 #define PMCRX   0x10
1774 #define PMCRD   0x8
1775 #define PMCRC   0x4
1776 #define PMCRP   0x2
1777 #define PMCRE   0x1
1778 /*
1779  * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1780  * which can be written as 1 to trigger behaviour but which stay RAZ).
1781  */
1782 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1783 
1784 #define PMXEVTYPER_P          0x80000000
1785 #define PMXEVTYPER_U          0x40000000
1786 #define PMXEVTYPER_NSK        0x20000000
1787 #define PMXEVTYPER_NSU        0x10000000
1788 #define PMXEVTYPER_NSH        0x08000000
1789 #define PMXEVTYPER_M          0x04000000
1790 #define PMXEVTYPER_MT         0x02000000
1791 #define PMXEVTYPER_EVTCOUNT   0x0000ffff
1792 #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1793                                PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1794                                PMXEVTYPER_M | PMXEVTYPER_MT | \
1795                                PMXEVTYPER_EVTCOUNT)
1796 
1797 #define PMCCFILTR             0xf8000000
1798 #define PMCCFILTR_M           PMXEVTYPER_M
1799 #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1800 
pmu_num_counters(CPUARMState * env)1801 static inline uint32_t pmu_num_counters(CPUARMState *env)
1802 {
1803     ARMCPU *cpu = env_archcpu(env);
1804 
1805     return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1806 }
1807 
1808 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
pmu_counter_mask(CPUARMState * env)1809 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1810 {
1811   return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1812 }
1813 
1814 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
1815 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
1816 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
1817 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg);
1818 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg);
1819 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg);
1820 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg);
1821 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg);
1822 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg);
1823 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1824 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1825 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1826 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1827 void aarch64_max_tcg_initfn(Object *obj);
1828 void aarch64_add_pauth_properties(Object *obj);
1829 void aarch64_add_sve_properties(Object *obj);
1830 void aarch64_add_sme_properties(Object *obj);
1831 
1832 /* Return true if the gdbstub is presenting an AArch64 CPU */
arm_gdbstub_is_aarch64(ARMCPU * cpu)1833 static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu)
1834 {
1835     return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
1836 }
1837 
1838 /* Read the CONTROL register as the MRS instruction would. */
1839 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1840 
1841 /*
1842  * Return a pointer to the location where we currently store the
1843  * stack pointer for the requested security state and thread mode.
1844  * This pointer will become invalid if the CPU state is updated
1845  * such that the stack pointers are switched around (eg changing
1846  * the SPSEL control bit).
1847  */
1848 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1849                              bool threadmode, bool spsel);
1850 
1851 bool el_is_in_host(CPUARMState *env, int el);
1852 
1853 void aa32_max_features(ARMCPU *cpu);
1854 int exception_target_el(CPUARMState *env);
1855 bool arm_singlestep_active(CPUARMState *env);
1856 bool arm_generate_debug_exceptions(CPUARMState *env);
1857 
1858 /**
1859  * pauth_ptr_mask:
1860  * @param: parameters defining the MMU setup
1861  *
1862  * Return a mask of the address bits that contain the authentication code,
1863  * given the MMU config defined by @param.
1864  */
pauth_ptr_mask(ARMVAParameters param)1865 static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1866 {
1867     int bot_pac_bit = 64 - param.tsz;
1868     int top_pac_bit = 64 - 8 * param.tbi;
1869 
1870     return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1871 }
1872 
1873 /* Add the cpreg definitions for debug related system registers */
1874 void define_debug_regs(ARMCPU *cpu);
1875 
1876 /* Add the cpreg definitions for TLBI instructions */
1877 void define_tlb_insn_regs(ARMCPU *cpu);
1878 
1879 /* Effective value of MDCR_EL2 */
arm_mdcr_el2_eff(CPUARMState * env)1880 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1881 {
1882     return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1883 }
1884 
1885 /* Powers of 2 for sve_vq_map et al. */
1886 #define SVE_VQ_POW2_MAP                                 \
1887     ((1 << (1 - 1)) | (1 << (2 - 1)) |                  \
1888      (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1889 
1890 /*
1891  * Return true if it is possible to take a fine-grained-trap to EL2.
1892  */
arm_fgt_active(CPUARMState * env,int el)1893 static inline bool arm_fgt_active(CPUARMState *env, int el)
1894 {
1895     /*
1896      * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1897      * that can affect EL0, but it is harmless to do the test also for
1898      * traps on registers that are only accessible at EL1 because if the test
1899      * returns true then we can't be executing at EL1 anyway.
1900      * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1901      * traps from AArch32 only happen for the EL0 is AArch32 case.
1902      */
1903     return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1904         el < 2 && arm_is_el2_enabled(env) &&
1905         arm_el_is_aa64(env, 1) &&
1906         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1907         (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1908 }
1909 
1910 /*
1911  * Although the ARM implementation of hardware assisted debugging
1912  * allows for different breakpoints per-core, the current GDB
1913  * interface treats them as a global pool of registers (which seems to
1914  * be the case for x86, ppc and s390). As a result we store one copy
1915  * of registers which is used for all active cores.
1916  *
1917  * Write access is serialised by virtue of the GDB protocol which
1918  * updates things. Read access (i.e. when the values are copied to the
1919  * vCPU) is also gated by GDB's run control.
1920  *
1921  * This is not unreasonable as most of the time debugging kernels you
1922  * never know which core will eventually execute your function.
1923  */
1924 
1925 typedef struct {
1926     uint64_t bcr;
1927     uint64_t bvr;
1928 } HWBreakpoint;
1929 
1930 /*
1931  * The watchpoint registers can cover more area than the requested
1932  * watchpoint so we need to store the additional information
1933  * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1934  * when the watchpoint is hit.
1935  */
1936 typedef struct {
1937     uint64_t wcr;
1938     uint64_t wvr;
1939     CPUWatchpoint details;
1940 } HWWatchpoint;
1941 
1942 /* Maximum and current break/watch point counts */
1943 extern int max_hw_bps, max_hw_wps;
1944 extern GArray *hw_breakpoints, *hw_watchpoints;
1945 
1946 #define cur_hw_wps      (hw_watchpoints->len)
1947 #define cur_hw_bps      (hw_breakpoints->len)
1948 #define get_hw_bp(i)    (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1949 #define get_hw_wp(i)    (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1950 
1951 bool find_hw_breakpoint(CPUState *cpu, vaddr pc);
1952 int insert_hw_breakpoint(vaddr pc);
1953 int delete_hw_breakpoint(vaddr pc);
1954 
1955 bool check_watchpoint_in_range(int i, vaddr addr);
1956 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr);
1957 int insert_hw_watchpoint(vaddr addr, vaddr len, int type);
1958 int delete_hw_watchpoint(vaddr addr, vaddr len, int type);
1959 
1960 /* Return the current value of the system counter in ticks */
1961 uint64_t gt_get_countervalue(CPUARMState *env);
1962 /*
1963  * Return the currently applicable offset between the system counter
1964  * and the counter for the specified timer, as used for direct register
1965  * accesses.
1966  */
1967 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx);
1968 
1969 /*
1970  * Return mask of ARMMMUIdxBit values corresponding to an "invalidate
1971  * all EL1" scope; this covers stage 1 and stage 2.
1972  */
1973 int alle1_tlbmask(CPUARMState *env);
1974 
1975 /* Set the float_status behaviour to match the Arm defaults */
1976 void arm_set_default_fp_behaviours(float_status *s);
1977 /* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */
1978 void arm_set_ah_fp_behaviours(float_status *s);
1979 /* Read the float_status info and return the appropriate FPSR value */
1980 uint32_t vfp_get_fpsr_from_host(CPUARMState *env);
1981 /* Clear the exception status flags from all float_status fields */
1982 void vfp_clear_float_status_exc_flags(CPUARMState *env);
1983 /*
1984  * Update float_status fields to handle the bits of the FPCR
1985  * specified by mask changing to the values in val.
1986  */
1987 void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask);
1988 
1989 #endif
1990