1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/breakpoint.h" 29 #include "hw/registerfields.h" 30 #include "tcg/tcg-gvec-desc.h" 31 #include "syndrome.h" 32 #include "cpu-features.h" 33 34 /* register banks for CPU modes */ 35 #define BANK_USRSYS 0 36 #define BANK_SVC 1 37 #define BANK_ABT 2 38 #define BANK_UND 3 39 #define BANK_IRQ 4 40 #define BANK_FIQ 5 41 #define BANK_HYP 6 42 #define BANK_MON 7 43 44 static inline int arm_env_mmu_index(CPUARMState *env) 45 { 46 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 47 } 48 49 static inline bool excp_is_internal(int excp) 50 { 51 /* Return true if this exception number represents a QEMU-internal 52 * exception that will not be passed to the guest. 53 */ 54 return excp == EXCP_INTERRUPT 55 || excp == EXCP_HLT 56 || excp == EXCP_DEBUG 57 || excp == EXCP_HALTED 58 || excp == EXCP_EXCEPTION_EXIT 59 || excp == EXCP_KERNEL_TRAP 60 || excp == EXCP_SEMIHOST; 61 } 62 63 /* 64 * Default frequency for the generic timer, in Hz. 65 * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before 66 * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz, 67 * which gives a 16ns tick period. 68 * 69 * We will use the back-compat value: 70 * - for QEMU CPU types added before we standardized on 1GHz 71 * - for versioned machine types with a version of 9.0 or earlier 72 * In any case, the machine model may override via the cntfrq property. 73 */ 74 #define GTIMER_DEFAULT_HZ 1000000000 75 #define GTIMER_BACKCOMPAT_HZ 62500000 76 77 /* Bit definitions for the v7M CONTROL register */ 78 FIELD(V7M_CONTROL, NPRIV, 0, 1) 79 FIELD(V7M_CONTROL, SPSEL, 1, 1) 80 FIELD(V7M_CONTROL, FPCA, 2, 1) 81 FIELD(V7M_CONTROL, SFPA, 3, 1) 82 83 /* Bit definitions for v7M exception return payload */ 84 FIELD(V7M_EXCRET, ES, 0, 1) 85 FIELD(V7M_EXCRET, RES0, 1, 1) 86 FIELD(V7M_EXCRET, SPSEL, 2, 1) 87 FIELD(V7M_EXCRET, MODE, 3, 1) 88 FIELD(V7M_EXCRET, FTYPE, 4, 1) 89 FIELD(V7M_EXCRET, DCRS, 5, 1) 90 FIELD(V7M_EXCRET, S, 6, 1) 91 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 92 93 /* Minimum value which is a magic number for exception return */ 94 #define EXC_RETURN_MIN_MAGIC 0xff000000 95 /* Minimum number which is a magic number for function or exception return 96 * when using v8M security extension 97 */ 98 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 99 100 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 101 FIELD(DBGWCR, E, 0, 1) 102 FIELD(DBGWCR, PAC, 1, 2) 103 FIELD(DBGWCR, LSC, 3, 2) 104 FIELD(DBGWCR, BAS, 5, 8) 105 FIELD(DBGWCR, HMC, 13, 1) 106 FIELD(DBGWCR, SSC, 14, 2) 107 FIELD(DBGWCR, LBN, 16, 4) 108 FIELD(DBGWCR, WT, 20, 1) 109 FIELD(DBGWCR, MASK, 24, 5) 110 FIELD(DBGWCR, SSCE, 29, 1) 111 112 #define VTCR_NSW (1u << 29) 113 #define VTCR_NSA (1u << 30) 114 #define VSTCR_SW VTCR_NSW 115 #define VSTCR_SA VTCR_NSA 116 117 /* Bit definitions for CPACR (AArch32 only) */ 118 FIELD(CPACR, CP10, 20, 2) 119 FIELD(CPACR, CP11, 22, 2) 120 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 121 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 122 FIELD(CPACR, ASEDIS, 31, 1) 123 124 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 125 FIELD(CPACR_EL1, ZEN, 16, 2) 126 FIELD(CPACR_EL1, FPEN, 20, 2) 127 FIELD(CPACR_EL1, SMEN, 24, 2) 128 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 129 130 /* Bit definitions for HCPTR (AArch32 only) */ 131 FIELD(HCPTR, TCP10, 10, 1) 132 FIELD(HCPTR, TCP11, 11, 1) 133 FIELD(HCPTR, TASE, 15, 1) 134 FIELD(HCPTR, TTA, 20, 1) 135 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 136 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 137 138 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 139 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 140 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 141 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 142 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 143 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 144 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 145 FIELD(CPTR_EL2, TTA, 28, 1) 146 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 147 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 148 149 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 150 FIELD(CPTR_EL3, EZ, 8, 1) 151 FIELD(CPTR_EL3, TFP, 10, 1) 152 FIELD(CPTR_EL3, ESM, 12, 1) 153 FIELD(CPTR_EL3, TTA, 20, 1) 154 FIELD(CPTR_EL3, TAM, 30, 1) 155 FIELD(CPTR_EL3, TCPAC, 31, 1) 156 157 #define MDCR_MTPME (1U << 28) 158 #define MDCR_TDCC (1U << 27) 159 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 160 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 161 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 162 #define MDCR_EPMAD (1U << 21) 163 #define MDCR_EDAD (1U << 20) 164 #define MDCR_TTRF (1U << 19) 165 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 166 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 167 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 168 #define MDCR_SDD (1U << 16) 169 #define MDCR_SPD (3U << 14) 170 #define MDCR_TDRA (1U << 11) 171 #define MDCR_TDOSA (1U << 10) 172 #define MDCR_TDA (1U << 9) 173 #define MDCR_TDE (1U << 8) 174 #define MDCR_HPME (1U << 7) 175 #define MDCR_TPM (1U << 6) 176 #define MDCR_TPMCR (1U << 5) 177 #define MDCR_HPMN (0x1fU) 178 179 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 180 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 181 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 182 MDCR_STE | MDCR_SPME | MDCR_SPD) 183 184 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 185 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 186 #define TTBCR_PD0 (1U << 4) 187 #define TTBCR_PD1 (1U << 5) 188 #define TTBCR_EPD0 (1U << 7) 189 #define TTBCR_IRGN0 (3U << 8) 190 #define TTBCR_ORGN0 (3U << 10) 191 #define TTBCR_SH0 (3U << 12) 192 #define TTBCR_T1SZ (3U << 16) 193 #define TTBCR_A1 (1U << 22) 194 #define TTBCR_EPD1 (1U << 23) 195 #define TTBCR_IRGN1 (3U << 24) 196 #define TTBCR_ORGN1 (3U << 26) 197 #define TTBCR_SH1 (1U << 28) 198 #define TTBCR_EAE (1U << 31) 199 200 FIELD(VTCR, T0SZ, 0, 6) 201 FIELD(VTCR, SL0, 6, 2) 202 FIELD(VTCR, IRGN0, 8, 2) 203 FIELD(VTCR, ORGN0, 10, 2) 204 FIELD(VTCR, SH0, 12, 2) 205 FIELD(VTCR, TG0, 14, 2) 206 FIELD(VTCR, PS, 16, 3) 207 FIELD(VTCR, VS, 19, 1) 208 FIELD(VTCR, HA, 21, 1) 209 FIELD(VTCR, HD, 22, 1) 210 FIELD(VTCR, HWU59, 25, 1) 211 FIELD(VTCR, HWU60, 26, 1) 212 FIELD(VTCR, HWU61, 27, 1) 213 FIELD(VTCR, HWU62, 28, 1) 214 FIELD(VTCR, NSW, 29, 1) 215 FIELD(VTCR, NSA, 30, 1) 216 FIELD(VTCR, DS, 32, 1) 217 FIELD(VTCR, SL2, 33, 1) 218 219 #define HCRX_ENAS0 (1ULL << 0) 220 #define HCRX_ENALS (1ULL << 1) 221 #define HCRX_ENASR (1ULL << 2) 222 #define HCRX_FNXS (1ULL << 3) 223 #define HCRX_FGTNXS (1ULL << 4) 224 #define HCRX_SMPME (1ULL << 5) 225 #define HCRX_TALLINT (1ULL << 6) 226 #define HCRX_VINMI (1ULL << 7) 227 #define HCRX_VFNMI (1ULL << 8) 228 #define HCRX_CMOW (1ULL << 9) 229 #define HCRX_MCE2 (1ULL << 10) 230 #define HCRX_MSCEN (1ULL << 11) 231 232 #define HPFAR_NS (1ULL << 63) 233 234 #define HSTR_TTEE (1 << 16) 235 #define HSTR_TJDBX (1 << 17) 236 237 /* 238 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 239 * have different bit definitions, and EL1PCTEN might be 240 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 241 * disambiguate if necessary. 242 */ 243 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 244 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 245 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 246 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 247 FIELD(CNTHCTL, EVNTEN, 2, 1) 248 FIELD(CNTHCTL, EVNTDIR, 3, 1) 249 FIELD(CNTHCTL, EVNTI, 4, 4) 250 FIELD(CNTHCTL, EL0VTEN, 8, 1) 251 FIELD(CNTHCTL, EL0PTEN, 9, 1) 252 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 253 FIELD(CNTHCTL, EL1PTEN, 11, 1) 254 FIELD(CNTHCTL, ECV, 12, 1) 255 FIELD(CNTHCTL, EL1TVT, 13, 1) 256 FIELD(CNTHCTL, EL1TVCT, 14, 1) 257 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 258 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 259 FIELD(CNTHCTL, EVNTIS, 17, 1) 260 FIELD(CNTHCTL, CNTVMASK, 18, 1) 261 FIELD(CNTHCTL, CNTPMASK, 19, 1) 262 263 /* We use a few fake FSR values for internal purposes in M profile. 264 * M profile cores don't have A/R format FSRs, but currently our 265 * get_phys_addr() code assumes A/R profile and reports failures via 266 * an A/R format FSR value. We then translate that into the proper 267 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 268 * Mostly the FSR values we use for this are those defined for v7PMSA, 269 * since we share some of that codepath. A few kinds of fault are 270 * only for M profile and have no A/R equivalent, though, so we have 271 * to pick a value from the reserved range (which we never otherwise 272 * generate) to use for these. 273 * These values will never be visible to the guest. 274 */ 275 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 276 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 277 278 /** 279 * raise_exception: Raise the specified exception. 280 * Raise a guest exception with the specified value, syndrome register 281 * and target exception level. This should be called from helper functions, 282 * and never returns because we will longjump back up to the CPU main loop. 283 */ 284 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 285 uint32_t syndrome, uint32_t target_el); 286 287 /* 288 * Similarly, but also use unwinding to restore cpu state. 289 */ 290 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 291 uint32_t syndrome, uint32_t target_el, 292 uintptr_t ra); 293 294 /* 295 * For AArch64, map a given EL to an index in the banked_spsr array. 296 * Note that this mapping and the AArch32 mapping defined in bank_number() 297 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 298 * mandated mapping between each other. 299 */ 300 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 301 { 302 static const unsigned int map[4] = { 303 [1] = BANK_SVC, /* EL1. */ 304 [2] = BANK_HYP, /* EL2. */ 305 [3] = BANK_MON, /* EL3. */ 306 }; 307 assert(el >= 1 && el <= 3); 308 return map[el]; 309 } 310 311 /* Map CPU modes onto saved register banks. */ 312 static inline int bank_number(int mode) 313 { 314 switch (mode) { 315 case ARM_CPU_MODE_USR: 316 case ARM_CPU_MODE_SYS: 317 return BANK_USRSYS; 318 case ARM_CPU_MODE_SVC: 319 return BANK_SVC; 320 case ARM_CPU_MODE_ABT: 321 return BANK_ABT; 322 case ARM_CPU_MODE_UND: 323 return BANK_UND; 324 case ARM_CPU_MODE_IRQ: 325 return BANK_IRQ; 326 case ARM_CPU_MODE_FIQ: 327 return BANK_FIQ; 328 case ARM_CPU_MODE_HYP: 329 return BANK_HYP; 330 case ARM_CPU_MODE_MON: 331 return BANK_MON; 332 } 333 g_assert_not_reached(); 334 } 335 336 /** 337 * r14_bank_number: Map CPU mode onto register bank for r14 338 * 339 * Given an AArch32 CPU mode, return the index into the saved register 340 * banks to use for the R14 (LR) in that mode. This is the same as 341 * bank_number(), except for the special case of Hyp mode, where 342 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 343 * This should be used as the index into env->banked_r14[], and 344 * bank_number() used for the index into env->banked_r13[] and 345 * env->banked_spsr[]. 346 */ 347 static inline int r14_bank_number(int mode) 348 { 349 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 350 } 351 352 void arm_cpu_register(const ARMCPUInfo *info); 353 void aarch64_cpu_register(const ARMCPUInfo *info); 354 355 void register_cp_regs_for_features(ARMCPU *cpu); 356 void init_cpreg_list(ARMCPU *cpu); 357 358 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 359 void arm_translate_init(void); 360 void arm_translate_code(CPUState *cs, TranslationBlock *tb, 361 int *max_insns, vaddr pc, void *host_pc); 362 363 void arm_cpu_register_gdb_commands(ARMCPU *cpu); 364 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, 365 GPtrArray *, GPtrArray *); 366 367 void arm_restore_state_to_opc(CPUState *cs, 368 const TranslationBlock *tb, 369 const uint64_t *data); 370 371 #ifdef CONFIG_TCG 372 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 373 374 /* Our implementation of TCGCPUOps::cpu_exec_halt */ 375 bool arm_cpu_exec_halt(CPUState *cs); 376 #endif /* CONFIG_TCG */ 377 378 typedef enum ARMFPRounding { 379 FPROUNDING_TIEEVEN, 380 FPROUNDING_POSINF, 381 FPROUNDING_NEGINF, 382 FPROUNDING_ZERO, 383 FPROUNDING_TIEAWAY, 384 FPROUNDING_ODD 385 } ARMFPRounding; 386 387 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 388 389 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 390 { 391 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 392 return arm_rmode_to_sf_map[rmode]; 393 } 394 395 static inline void aarch64_save_sp(CPUARMState *env, int el) 396 { 397 if (env->pstate & PSTATE_SP) { 398 env->sp_el[el] = env->xregs[31]; 399 } else { 400 env->sp_el[0] = env->xregs[31]; 401 } 402 } 403 404 static inline void aarch64_restore_sp(CPUARMState *env, int el) 405 { 406 if (env->pstate & PSTATE_SP) { 407 env->xregs[31] = env->sp_el[el]; 408 } else { 409 env->xregs[31] = env->sp_el[0]; 410 } 411 } 412 413 static inline void update_spsel(CPUARMState *env, uint32_t imm) 414 { 415 unsigned int cur_el = arm_current_el(env); 416 /* Update PSTATE SPSel bit; this requires us to update the 417 * working stack pointer in xregs[31]. 418 */ 419 if (!((imm ^ env->pstate) & PSTATE_SP)) { 420 return; 421 } 422 aarch64_save_sp(env, cur_el); 423 env->pstate = deposit32(env->pstate, 0, 1, imm); 424 425 /* We rely on illegal updates to SPsel from EL0 to get trapped 426 * at translation time. 427 */ 428 assert(cur_el >= 1 && cur_el <= 3); 429 aarch64_restore_sp(env, cur_el); 430 } 431 432 /* 433 * arm_pamax 434 * @cpu: ARMCPU 435 * 436 * Returns the implementation defined bit-width of physical addresses. 437 * The ARMv8 reference manuals refer to this as PAMax(). 438 */ 439 unsigned int arm_pamax(ARMCPU *cpu); 440 441 /* 442 * round_down_to_parange_index 443 * @bit_size: uint8_t 444 * 445 * Rounds down the bit_size supplied to the first supported ARM physical 446 * address range and returns the index for this. The index is intended to 447 * be used to set ID_AA64MMFR0_EL1's PARANGE bits. 448 */ 449 uint8_t round_down_to_parange_index(uint8_t bit_size); 450 451 /* 452 * round_down_to_parange_bit_size 453 * @bit_size: uint8_t 454 * 455 * Rounds down the bit_size supplied to the first supported ARM physical 456 * address range bit size and returns this. 457 */ 458 uint8_t round_down_to_parange_bit_size(uint8_t bit_size); 459 460 /* Return true if extended addresses are enabled. 461 * This is always the case if our translation regime is 64 bit, 462 * but depends on TTBCR.EAE for 32 bit. 463 */ 464 static inline bool extended_addresses_enabled(CPUARMState *env) 465 { 466 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 467 if (arm_feature(env, ARM_FEATURE_PMSA) && 468 arm_feature(env, ARM_FEATURE_V8)) { 469 return true; 470 } 471 return arm_el_is_aa64(env, 1) || 472 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 473 } 474 475 /* Update a QEMU watchpoint based on the information the guest has set in the 476 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 477 */ 478 void hw_watchpoint_update(ARMCPU *cpu, int n); 479 /* Update the QEMU watchpoints for every guest watchpoint. This does a 480 * complete delete-and-reinstate of the QEMU watchpoint list and so is 481 * suitable for use after migration or on reset. 482 */ 483 void hw_watchpoint_update_all(ARMCPU *cpu); 484 /* Update a QEMU breakpoint based on the information the guest has set in the 485 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 486 */ 487 void hw_breakpoint_update(ARMCPU *cpu, int n); 488 /* Update the QEMU breakpoints for every guest breakpoint. This does a 489 * complete delete-and-reinstate of the QEMU breakpoint list and so is 490 * suitable for use after migration or on reset. 491 */ 492 void hw_breakpoint_update_all(ARMCPU *cpu); 493 494 /* Callback function for checking if a breakpoint should trigger. */ 495 bool arm_debug_check_breakpoint(CPUState *cs); 496 497 /* Callback function for checking if a watchpoint should trigger. */ 498 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 499 500 /* Adjust addresses (in BE32 mode) before testing against watchpoint 501 * addresses. 502 */ 503 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 504 505 /* Callback function for when a watchpoint or breakpoint triggers. */ 506 void arm_debug_excp_handler(CPUState *cs); 507 508 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 509 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 510 { 511 return false; 512 } 513 static inline void arm_handle_psci_call(ARMCPU *cpu) 514 { 515 g_assert_not_reached(); 516 } 517 #else 518 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 519 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 520 /* Actually handle a PSCI call */ 521 void arm_handle_psci_call(ARMCPU *cpu); 522 #endif 523 524 /** 525 * arm_clear_exclusive: clear the exclusive monitor 526 * @env: CPU env 527 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 528 */ 529 static inline void arm_clear_exclusive(CPUARMState *env) 530 { 531 env->exclusive_addr = -1; 532 } 533 534 /** 535 * ARMFaultType: type of an ARM MMU fault 536 * This corresponds to the v8A pseudocode's Fault enumeration, 537 * with extensions for QEMU internal conditions. 538 */ 539 typedef enum ARMFaultType { 540 ARMFault_None, 541 ARMFault_AccessFlag, 542 ARMFault_Alignment, 543 ARMFault_Background, 544 ARMFault_Domain, 545 ARMFault_Permission, 546 ARMFault_Translation, 547 ARMFault_AddressSize, 548 ARMFault_SyncExternal, 549 ARMFault_SyncExternalOnWalk, 550 ARMFault_SyncParity, 551 ARMFault_SyncParityOnWalk, 552 ARMFault_AsyncParity, 553 ARMFault_AsyncExternal, 554 ARMFault_Debug, 555 ARMFault_TLBConflict, 556 ARMFault_UnsuppAtomicUpdate, 557 ARMFault_Lockdown, 558 ARMFault_Exclusive, 559 ARMFault_ICacheMaint, 560 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 561 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 562 ARMFault_GPCFOnWalk, 563 ARMFault_GPCFOnOutput, 564 } ARMFaultType; 565 566 typedef enum ARMGPCF { 567 GPCF_None, 568 GPCF_AddressSize, 569 GPCF_Walk, 570 GPCF_EABT, 571 GPCF_Fail, 572 } ARMGPCF; 573 574 /** 575 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 576 * @type: Type of fault 577 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 578 * @level: Table walk level (for translation, access flag and permission faults) 579 * @domain: Domain of the fault address (for non-LPAE CPUs only) 580 * @s2addr: Address that caused a fault at stage 2 581 * @paddr: physical address that caused a fault for gpc 582 * @paddr_space: physical address space that caused a fault for gpc 583 * @stage2: True if we faulted at stage 2 584 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 585 * @s1ns: True if we faulted on a non-secure IPA while in secure state 586 * @ea: True if we should set the EA (external abort type) bit in syndrome 587 */ 588 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 589 struct ARMMMUFaultInfo { 590 ARMFaultType type; 591 ARMGPCF gpcf; 592 target_ulong s2addr; 593 target_ulong paddr; 594 ARMSecuritySpace paddr_space; 595 int level; 596 int domain; 597 bool stage2; 598 bool s1ptw; 599 bool s1ns; 600 bool ea; 601 }; 602 603 /** 604 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 605 * Compare pseudocode EncodeSDFSC(), though unlike that function 606 * we set up a whole FSR-format code including domain field and 607 * putting the high bit of the FSC into bit 10. 608 */ 609 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 610 { 611 uint32_t fsc; 612 613 switch (fi->type) { 614 case ARMFault_None: 615 return 0; 616 case ARMFault_AccessFlag: 617 fsc = fi->level == 1 ? 0x3 : 0x6; 618 break; 619 case ARMFault_Alignment: 620 fsc = 0x1; 621 break; 622 case ARMFault_Permission: 623 fsc = fi->level == 1 ? 0xd : 0xf; 624 break; 625 case ARMFault_Domain: 626 fsc = fi->level == 1 ? 0x9 : 0xb; 627 break; 628 case ARMFault_Translation: 629 fsc = fi->level == 1 ? 0x5 : 0x7; 630 break; 631 case ARMFault_SyncExternal: 632 fsc = 0x8 | (fi->ea << 12); 633 break; 634 case ARMFault_SyncExternalOnWalk: 635 fsc = fi->level == 1 ? 0xc : 0xe; 636 fsc |= (fi->ea << 12); 637 break; 638 case ARMFault_SyncParity: 639 fsc = 0x409; 640 break; 641 case ARMFault_SyncParityOnWalk: 642 fsc = fi->level == 1 ? 0x40c : 0x40e; 643 break; 644 case ARMFault_AsyncParity: 645 fsc = 0x408; 646 break; 647 case ARMFault_AsyncExternal: 648 fsc = 0x406 | (fi->ea << 12); 649 break; 650 case ARMFault_Debug: 651 fsc = 0x2; 652 break; 653 case ARMFault_TLBConflict: 654 fsc = 0x400; 655 break; 656 case ARMFault_Lockdown: 657 fsc = 0x404; 658 break; 659 case ARMFault_Exclusive: 660 fsc = 0x405; 661 break; 662 case ARMFault_ICacheMaint: 663 fsc = 0x4; 664 break; 665 case ARMFault_Background: 666 fsc = 0x0; 667 break; 668 case ARMFault_QEMU_NSCExec: 669 fsc = M_FAKE_FSR_NSC_EXEC; 670 break; 671 case ARMFault_QEMU_SFault: 672 fsc = M_FAKE_FSR_SFAULT; 673 break; 674 default: 675 /* Other faults can't occur in a context that requires a 676 * short-format status code. 677 */ 678 g_assert_not_reached(); 679 } 680 681 fsc |= (fi->domain << 4); 682 return fsc; 683 } 684 685 /** 686 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 687 * Compare pseudocode EncodeLDFSC(), though unlike that function 688 * we fill in also the LPAE bit 9 of a DFSR format. 689 */ 690 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 691 { 692 uint32_t fsc; 693 694 switch (fi->type) { 695 case ARMFault_None: 696 return 0; 697 case ARMFault_AddressSize: 698 assert(fi->level >= -1 && fi->level <= 3); 699 if (fi->level < 0) { 700 fsc = 0b101001; 701 } else { 702 fsc = fi->level; 703 } 704 break; 705 case ARMFault_AccessFlag: 706 assert(fi->level >= 0 && fi->level <= 3); 707 fsc = 0b001000 | fi->level; 708 break; 709 case ARMFault_Permission: 710 assert(fi->level >= 0 && fi->level <= 3); 711 fsc = 0b001100 | fi->level; 712 break; 713 case ARMFault_Translation: 714 assert(fi->level >= -1 && fi->level <= 3); 715 if (fi->level < 0) { 716 fsc = 0b101011; 717 } else { 718 fsc = 0b000100 | fi->level; 719 } 720 break; 721 case ARMFault_SyncExternal: 722 fsc = 0x10 | (fi->ea << 12); 723 break; 724 case ARMFault_SyncExternalOnWalk: 725 assert(fi->level >= -1 && fi->level <= 3); 726 if (fi->level < 0) { 727 fsc = 0b010011; 728 } else { 729 fsc = 0b010100 | fi->level; 730 } 731 fsc |= fi->ea << 12; 732 break; 733 case ARMFault_SyncParity: 734 fsc = 0x18; 735 break; 736 case ARMFault_SyncParityOnWalk: 737 assert(fi->level >= -1 && fi->level <= 3); 738 if (fi->level < 0) { 739 fsc = 0b011011; 740 } else { 741 fsc = 0b011100 | fi->level; 742 } 743 break; 744 case ARMFault_AsyncParity: 745 fsc = 0x19; 746 break; 747 case ARMFault_AsyncExternal: 748 fsc = 0x11 | (fi->ea << 12); 749 break; 750 case ARMFault_Alignment: 751 fsc = 0x21; 752 break; 753 case ARMFault_Debug: 754 fsc = 0x22; 755 break; 756 case ARMFault_TLBConflict: 757 fsc = 0x30; 758 break; 759 case ARMFault_UnsuppAtomicUpdate: 760 fsc = 0x31; 761 break; 762 case ARMFault_Lockdown: 763 fsc = 0x34; 764 break; 765 case ARMFault_Exclusive: 766 fsc = 0x35; 767 break; 768 case ARMFault_GPCFOnWalk: 769 assert(fi->level >= -1 && fi->level <= 3); 770 if (fi->level < 0) { 771 fsc = 0b100011; 772 } else { 773 fsc = 0b100100 | fi->level; 774 } 775 break; 776 case ARMFault_GPCFOnOutput: 777 fsc = 0b101000; 778 break; 779 default: 780 /* Other faults can't occur in a context that requires a 781 * long-format status code. 782 */ 783 g_assert_not_reached(); 784 } 785 786 fsc |= 1 << 9; 787 return fsc; 788 } 789 790 static inline bool arm_extabort_type(MemTxResult result) 791 { 792 /* The EA bit in syndromes and fault status registers is an 793 * IMPDEF classification of external aborts. ARM implementations 794 * usually use this to indicate AXI bus Decode error (0) or 795 * Slave error (1); in QEMU we follow that. 796 */ 797 return result != MEMTX_DECODE_ERROR; 798 } 799 800 #ifdef CONFIG_USER_ONLY 801 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 802 MMUAccessType access_type, 803 bool maperr, uintptr_t ra); 804 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 805 MMUAccessType access_type, uintptr_t ra); 806 #else 807 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, 808 MMUAccessType access_type, int mmu_idx, 809 MemOp memop, int size, bool probe, uintptr_t ra); 810 #endif 811 812 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 813 { 814 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 815 } 816 817 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 818 { 819 if (arm_feature(env, ARM_FEATURE_M)) { 820 return mmu_idx | ARM_MMU_IDX_M; 821 } else { 822 return mmu_idx | ARM_MMU_IDX_A; 823 } 824 } 825 826 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 827 { 828 /* AArch64 is always a-profile. */ 829 return mmu_idx | ARM_MMU_IDX_A; 830 } 831 832 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 833 834 /* Return the MMU index for a v7M CPU in the specified security state */ 835 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 836 837 /* 838 * Return true if the stage 1 translation regime is using LPAE 839 * format page tables 840 */ 841 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 842 843 /* Raise a data fault alignment exception for the specified virtual address */ 844 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 845 MMUAccessType access_type, 846 int mmu_idx, uintptr_t retaddr); 847 848 #ifndef CONFIG_USER_ONLY 849 /* arm_cpu_do_transaction_failed: handle a memory system error response 850 * (eg "no device/memory present at address") by raising an external abort 851 * exception 852 */ 853 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 854 vaddr addr, unsigned size, 855 MMUAccessType access_type, 856 int mmu_idx, MemTxAttrs attrs, 857 MemTxResult response, uintptr_t retaddr); 858 #endif 859 860 /* Call any registered EL change hooks */ 861 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 862 { 863 ARMELChangeHook *hook, *next; 864 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 865 hook->hook(cpu, hook->opaque); 866 } 867 } 868 static inline void arm_call_el_change_hook(ARMCPU *cpu) 869 { 870 ARMELChangeHook *hook, *next; 871 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 872 hook->hook(cpu, hook->opaque); 873 } 874 } 875 876 /* 877 * Return true if this address translation regime has two ranges. 878 * Note that this will not return the correct answer for AArch32 879 * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is 880 * never called from a context where EL3 can be AArch32. (The 881 * correct return value for ARMMMUIdx_E3 would be different for 882 * that case, so we can't just make the function return the 883 * correct value anyway; we would need an extra "bool e3_is_aarch32" 884 * argument which all the current callsites would pass as 'false'.) 885 */ 886 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 887 { 888 switch (mmu_idx) { 889 case ARMMMUIdx_Stage1_E0: 890 case ARMMMUIdx_Stage1_E1: 891 case ARMMMUIdx_Stage1_E1_PAN: 892 case ARMMMUIdx_E10_0: 893 case ARMMMUIdx_E10_1: 894 case ARMMMUIdx_E10_1_PAN: 895 case ARMMMUIdx_E20_0: 896 case ARMMMUIdx_E20_2: 897 case ARMMMUIdx_E20_2_PAN: 898 return true; 899 default: 900 return false; 901 } 902 } 903 904 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 905 { 906 switch (mmu_idx) { 907 case ARMMMUIdx_Stage1_E1_PAN: 908 case ARMMMUIdx_E10_1_PAN: 909 case ARMMMUIdx_E20_2_PAN: 910 case ARMMMUIdx_E30_3_PAN: 911 return true; 912 default: 913 return false; 914 } 915 } 916 917 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 918 { 919 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 920 } 921 922 /* Return the exception level which controls this address translation regime */ 923 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 924 { 925 switch (mmu_idx) { 926 case ARMMMUIdx_E20_0: 927 case ARMMMUIdx_E20_2: 928 case ARMMMUIdx_E20_2_PAN: 929 case ARMMMUIdx_Stage2: 930 case ARMMMUIdx_Stage2_S: 931 case ARMMMUIdx_E2: 932 return 2; 933 case ARMMMUIdx_E3: 934 case ARMMMUIdx_E30_0: 935 case ARMMMUIdx_E30_3_PAN: 936 return 3; 937 case ARMMMUIdx_E10_0: 938 case ARMMMUIdx_Stage1_E0: 939 case ARMMMUIdx_Stage1_E1: 940 case ARMMMUIdx_Stage1_E1_PAN: 941 case ARMMMUIdx_E10_1: 942 case ARMMMUIdx_E10_1_PAN: 943 case ARMMMUIdx_MPrivNegPri: 944 case ARMMMUIdx_MUserNegPri: 945 case ARMMMUIdx_MPriv: 946 case ARMMMUIdx_MUser: 947 case ARMMMUIdx_MSPrivNegPri: 948 case ARMMMUIdx_MSUserNegPri: 949 case ARMMMUIdx_MSPriv: 950 case ARMMMUIdx_MSUser: 951 return 1; 952 default: 953 g_assert_not_reached(); 954 } 955 } 956 957 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 958 { 959 switch (mmu_idx) { 960 case ARMMMUIdx_E10_0: 961 case ARMMMUIdx_E20_0: 962 case ARMMMUIdx_E30_0: 963 case ARMMMUIdx_Stage1_E0: 964 case ARMMMUIdx_MUser: 965 case ARMMMUIdx_MSUser: 966 case ARMMMUIdx_MUserNegPri: 967 case ARMMMUIdx_MSUserNegPri: 968 return true; 969 default: 970 return false; 971 } 972 } 973 974 /* Return the SCTLR value which controls this address translation regime */ 975 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 976 { 977 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 978 } 979 980 /* 981 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 982 * and the Non-Secure stage 2 translation regimes (and hence which are 983 * not present in VSTCR_EL2). 984 */ 985 #define VTCR_SHARED_FIELD_MASK \ 986 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 987 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 988 R_VTCR_DS_MASK) 989 990 /* Return the value of the TCR controlling this translation regime */ 991 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 992 { 993 if (mmu_idx == ARMMMUIdx_Stage2) { 994 return env->cp15.vtcr_el2; 995 } 996 if (mmu_idx == ARMMMUIdx_Stage2_S) { 997 /* 998 * Secure stage 2 shares fields from VTCR_EL2. We merge those 999 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 1000 * value so the callers don't need to special case this. 1001 * 1002 * If a future architecture change defines bits in VSTCR_EL2 that 1003 * overlap with these VTCR_EL2 fields we may need to revisit this. 1004 */ 1005 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 1006 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 1007 return v; 1008 } 1009 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 1010 } 1011 1012 /* Return true if the translation regime is using LPAE format page tables */ 1013 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 1014 { 1015 int el = regime_el(env, mmu_idx); 1016 if (el == 2 || arm_el_is_aa64(env, el)) { 1017 return true; 1018 } 1019 if (arm_feature(env, ARM_FEATURE_PMSA) && 1020 arm_feature(env, ARM_FEATURE_V8)) { 1021 return true; 1022 } 1023 if (arm_feature(env, ARM_FEATURE_LPAE) 1024 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 1025 return true; 1026 } 1027 return false; 1028 } 1029 1030 /** 1031 * arm_num_brps: Return number of implemented breakpoints. 1032 * Note that the ID register BRPS field is "number of bps - 1", 1033 * and we return the actual number of breakpoints. 1034 */ 1035 static inline int arm_num_brps(ARMCPU *cpu) 1036 { 1037 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1038 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 1039 } else { 1040 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 1041 } 1042 } 1043 1044 /** 1045 * arm_num_wrps: Return number of implemented watchpoints. 1046 * Note that the ID register WRPS field is "number of wps - 1", 1047 * and we return the actual number of watchpoints. 1048 */ 1049 static inline int arm_num_wrps(ARMCPU *cpu) 1050 { 1051 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1052 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1053 } else { 1054 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1055 } 1056 } 1057 1058 /** 1059 * arm_num_ctx_cmps: Return number of implemented context comparators. 1060 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1061 * and we return the actual number of comparators. 1062 */ 1063 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1064 { 1065 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1066 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1067 } else { 1068 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1069 } 1070 } 1071 1072 /** 1073 * v7m_using_psp: Return true if using process stack pointer 1074 * Return true if the CPU is currently using the process stack 1075 * pointer, or false if it is using the main stack pointer. 1076 */ 1077 static inline bool v7m_using_psp(CPUARMState *env) 1078 { 1079 /* Handler mode always uses the main stack; for thread mode 1080 * the CONTROL.SPSEL bit determines the answer. 1081 * Note that in v7M it is not possible to be in Handler mode with 1082 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1083 */ 1084 return !arm_v7m_is_handler_mode(env) && 1085 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1086 } 1087 1088 /** 1089 * v7m_sp_limit: Return SP limit for current CPU state 1090 * Return the SP limit value for the current CPU security state 1091 * and stack pointer. 1092 */ 1093 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1094 { 1095 if (v7m_using_psp(env)) { 1096 return env->v7m.psplim[env->v7m.secure]; 1097 } else { 1098 return env->v7m.msplim[env->v7m.secure]; 1099 } 1100 } 1101 1102 /** 1103 * v7m_cpacr_pass: 1104 * Return true if the v7M CPACR permits access to the FPU for the specified 1105 * security state and privilege level. 1106 */ 1107 static inline bool v7m_cpacr_pass(CPUARMState *env, 1108 bool is_secure, bool is_priv) 1109 { 1110 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1111 case 0: 1112 case 2: /* UNPREDICTABLE: we treat like 0 */ 1113 return false; 1114 case 1: 1115 return is_priv; 1116 case 3: 1117 return true; 1118 default: 1119 g_assert_not_reached(); 1120 } 1121 } 1122 1123 /** 1124 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1125 * @psr: Program Status Register indicating CPU mode 1126 * 1127 * Returns, for debug logging purposes, a printable representation 1128 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1129 * the low bits of the specified PSR. 1130 */ 1131 static inline const char *aarch32_mode_name(uint32_t psr) 1132 { 1133 static const char cpu_mode_names[16][4] = { 1134 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1135 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1136 }; 1137 1138 return cpu_mode_names[psr & 0xf]; 1139 } 1140 1141 /** 1142 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1143 * 1144 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1145 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1146 * Must be called with the BQL held. 1147 */ 1148 void arm_cpu_update_virq(ARMCPU *cpu); 1149 1150 /** 1151 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1152 * 1153 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1154 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1155 * Must be called with the BQL held. 1156 */ 1157 void arm_cpu_update_vfiq(ARMCPU *cpu); 1158 1159 /** 1160 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1161 * 1162 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1163 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1164 * Must be called with the BQL held. 1165 */ 1166 void arm_cpu_update_vinmi(ARMCPU *cpu); 1167 1168 /** 1169 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1170 * 1171 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1172 * a change to the HCRX_EL2.VFNMI. 1173 * Must be called with the BQL held. 1174 */ 1175 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1176 1177 /** 1178 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1179 * 1180 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1181 * following a change to the HCR_EL2.VSE bit. 1182 */ 1183 void arm_cpu_update_vserr(ARMCPU *cpu); 1184 1185 /** 1186 * arm_mmu_idx_el: 1187 * @env: The cpu environment 1188 * @el: The EL to use. 1189 * 1190 * Return the full ARMMMUIdx for the translation regime for EL. 1191 */ 1192 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1193 1194 /** 1195 * arm_mmu_idx: 1196 * @env: The cpu environment 1197 * 1198 * Return the full ARMMMUIdx for the current translation regime. 1199 */ 1200 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1201 1202 /** 1203 * arm_stage1_mmu_idx: 1204 * @env: The cpu environment 1205 * 1206 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1207 */ 1208 #ifdef CONFIG_USER_ONLY 1209 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1210 { 1211 return ARMMMUIdx_Stage1_E0; 1212 } 1213 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1214 { 1215 return ARMMMUIdx_Stage1_E0; 1216 } 1217 #else 1218 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1219 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1220 #endif 1221 1222 /** 1223 * arm_mmu_idx_is_stage1_of_2: 1224 * @mmu_idx: The ARMMMUIdx to test 1225 * 1226 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1227 * first stage of a two stage regime. 1228 */ 1229 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1230 { 1231 switch (mmu_idx) { 1232 case ARMMMUIdx_Stage1_E0: 1233 case ARMMMUIdx_Stage1_E1: 1234 case ARMMMUIdx_Stage1_E1_PAN: 1235 return true; 1236 default: 1237 return false; 1238 } 1239 } 1240 1241 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1242 const ARMISARegisters *id) 1243 { 1244 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1245 1246 if ((features >> ARM_FEATURE_V4T) & 1) { 1247 valid |= CPSR_T; 1248 } 1249 if ((features >> ARM_FEATURE_V5) & 1) { 1250 valid |= CPSR_Q; /* V5TE in reality*/ 1251 } 1252 if ((features >> ARM_FEATURE_V6) & 1) { 1253 valid |= CPSR_E | CPSR_GE; 1254 } 1255 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1256 valid |= CPSR_IT; 1257 } 1258 if (isar_feature_aa32_jazelle(id)) { 1259 valid |= CPSR_J; 1260 } 1261 if (isar_feature_aa32_pan(id)) { 1262 valid |= CPSR_PAN; 1263 } 1264 if (isar_feature_aa32_dit(id)) { 1265 valid |= CPSR_DIT; 1266 } 1267 if (isar_feature_aa32_ssbs(id)) { 1268 valid |= CPSR_SSBS; 1269 } 1270 1271 return valid; 1272 } 1273 1274 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1275 { 1276 uint32_t valid; 1277 1278 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1279 if (isar_feature_aa64_bti(id)) { 1280 valid |= PSTATE_BTYPE; 1281 } 1282 if (isar_feature_aa64_pan(id)) { 1283 valid |= PSTATE_PAN; 1284 } 1285 if (isar_feature_aa64_uao(id)) { 1286 valid |= PSTATE_UAO; 1287 } 1288 if (isar_feature_aa64_dit(id)) { 1289 valid |= PSTATE_DIT; 1290 } 1291 if (isar_feature_aa64_ssbs(id)) { 1292 valid |= PSTATE_SSBS; 1293 } 1294 if (isar_feature_aa64_mte(id)) { 1295 valid |= PSTATE_TCO; 1296 } 1297 if (isar_feature_aa64_nmi(id)) { 1298 valid |= PSTATE_ALLINT; 1299 } 1300 1301 return valid; 1302 } 1303 1304 /* Granule size (i.e. page size) */ 1305 typedef enum ARMGranuleSize { 1306 /* Same order as TG0 encoding */ 1307 Gran4K, 1308 Gran64K, 1309 Gran16K, 1310 GranInvalid, 1311 } ARMGranuleSize; 1312 1313 /** 1314 * arm_granule_bits: Return address size of the granule in bits 1315 * 1316 * Return the address size of the granule in bits. This corresponds 1317 * to the pseudocode TGxGranuleBits(). 1318 */ 1319 static inline int arm_granule_bits(ARMGranuleSize gran) 1320 { 1321 switch (gran) { 1322 case Gran64K: 1323 return 16; 1324 case Gran16K: 1325 return 14; 1326 case Gran4K: 1327 return 12; 1328 default: 1329 g_assert_not_reached(); 1330 } 1331 } 1332 1333 /* 1334 * Parameters of a given virtual address, as extracted from the 1335 * translation control register (TCR) for a given regime. 1336 */ 1337 typedef struct ARMVAParameters { 1338 unsigned tsz : 8; 1339 unsigned ps : 3; 1340 unsigned sh : 2; 1341 unsigned select : 1; 1342 bool tbi : 1; 1343 bool epd : 1; 1344 bool hpd : 1; 1345 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1346 bool ds : 1; 1347 bool ha : 1; 1348 bool hd : 1; 1349 ARMGranuleSize gran : 2; 1350 } ARMVAParameters; 1351 1352 /** 1353 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1354 * @env: CPU 1355 * @va: virtual address to look up 1356 * @mmu_idx: determines translation regime to use 1357 * @data: true if this is a data access 1358 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1359 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1360 */ 1361 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1362 ARMMMUIdx mmu_idx, bool data, 1363 bool el1_is_aa32); 1364 1365 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1366 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1367 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1368 1369 /* Determine if allocation tags are available. */ 1370 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1371 uint64_t sctlr) 1372 { 1373 if (el < 3 1374 && arm_feature(env, ARM_FEATURE_EL3) 1375 && !(env->cp15.scr_el3 & SCR_ATA)) { 1376 return false; 1377 } 1378 if (el < 2 && arm_is_el2_enabled(env)) { 1379 uint64_t hcr = arm_hcr_el2_eff(env); 1380 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1381 return false; 1382 } 1383 } 1384 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1385 return sctlr != 0; 1386 } 1387 1388 #ifndef CONFIG_USER_ONLY 1389 1390 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1391 typedef struct V8M_SAttributes { 1392 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1393 bool ns; 1394 bool nsc; 1395 uint8_t sregion; 1396 bool srvalid; 1397 uint8_t iregion; 1398 bool irvalid; 1399 } V8M_SAttributes; 1400 1401 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1402 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1403 bool secure, V8M_SAttributes *sattrs); 1404 1405 /* Cacheability and shareability attributes for a memory access */ 1406 typedef struct ARMCacheAttrs { 1407 /* 1408 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1409 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1410 */ 1411 unsigned int attrs:8; 1412 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1413 bool is_s2_format:1; 1414 } ARMCacheAttrs; 1415 1416 /* Fields that are valid upon success. */ 1417 typedef struct GetPhysAddrResult { 1418 CPUTLBEntryFull f; 1419 ARMCacheAttrs cacheattrs; 1420 } GetPhysAddrResult; 1421 1422 /** 1423 * get_phys_addr: get the physical address for a virtual address 1424 * @env: CPUARMState 1425 * @address: virtual address to get physical address for 1426 * @access_type: 0 for read, 1 for write, 2 for execute 1427 * @memop: memory operation feeding this access, or 0 for none 1428 * @mmu_idx: MMU index indicating required translation regime 1429 * @result: set on translation success. 1430 * @fi: set to fault info if the translation fails 1431 * 1432 * Find the physical address corresponding to the given virtual address, 1433 * by doing a translation table walk on MMU based systems or using the 1434 * MPU state on MPU based systems. 1435 * 1436 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1437 * prot and page_size may not be filled in, and the populated fsr value provides 1438 * information on why the translation aborted, in the format of a 1439 * DFSR/IFSR fault register, with the following caveats: 1440 * * we honour the short vs long DFSR format differences. 1441 * * the WnR bit is never set (the caller must do this). 1442 * * for PSMAv5 based systems we don't bother to return a full FSR format 1443 * value. 1444 */ 1445 bool get_phys_addr(CPUARMState *env, vaddr address, 1446 MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, 1447 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1448 __attribute__((nonnull)); 1449 1450 /** 1451 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1452 * address 1453 * @env: CPUARMState 1454 * @address: virtual address to get physical address for 1455 * @access_type: 0 for read, 1 for write, 2 for execute 1456 * @memop: memory operation feeding this access, or 0 for none 1457 * @mmu_idx: MMU index indicating required translation regime 1458 * @space: security space for the access 1459 * @result: set on translation success. 1460 * @fi: set to fault info if the translation fails 1461 * 1462 * Similar to get_phys_addr, but use the given security space and don't perform 1463 * a Granule Protection Check on the resulting address. 1464 */ 1465 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, 1466 MMUAccessType access_type, MemOp memop, 1467 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1468 GetPhysAddrResult *result, 1469 ARMMMUFaultInfo *fi) 1470 __attribute__((nonnull)); 1471 1472 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1473 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1474 bool is_secure, GetPhysAddrResult *result, 1475 ARMMMUFaultInfo *fi, uint32_t *mregion); 1476 1477 void arm_log_exception(CPUState *cs); 1478 1479 #endif /* !CONFIG_USER_ONLY */ 1480 1481 /* 1482 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1483 * the same simd_desc() encoding due to restrictions on size. 1484 * Use these instead. 1485 */ 1486 FIELD(PREDDESC, OPRSZ, 0, 6) 1487 FIELD(PREDDESC, ESZ, 6, 2) 1488 FIELD(PREDDESC, DATA, 8, 24) 1489 1490 /* 1491 * The SVE simd_data field, for memory ops, contains either 1492 * rd (5 bits) or a shift count (2 bits). 1493 */ 1494 #define SVE_MTEDESC_SHIFT 5 1495 1496 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1497 FIELD(MTEDESC, MIDX, 0, 4) 1498 FIELD(MTEDESC, TBI, 4, 2) 1499 FIELD(MTEDESC, TCMA, 6, 2) 1500 FIELD(MTEDESC, WRITE, 8, 1) 1501 FIELD(MTEDESC, ALIGN, 9, 3) 1502 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1503 1504 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1505 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1506 1507 /** 1508 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1509 * @env: CPU env 1510 * @ptr: start address of memory region (dirty pointer) 1511 * @size: length of region (guaranteed not to cross a page boundary) 1512 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1513 * Returns: the size of the region that can be copied without hitting 1514 * an MTE tag failure 1515 * 1516 * Note that we assume that the caller has already checked the TBI 1517 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1518 * required. 1519 */ 1520 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1521 uint32_t desc); 1522 1523 /** 1524 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1525 * operation going in the reverse direction 1526 * @env: CPU env 1527 * @ptr: *end* address of memory region (dirty pointer) 1528 * @size: length of region (guaranteed not to cross a page boundary) 1529 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1530 * Returns: the size of the region that can be copied without hitting 1531 * an MTE tag failure 1532 * 1533 * Note that we assume that the caller has already checked the TBI 1534 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1535 * required. 1536 */ 1537 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1538 uint32_t desc); 1539 1540 /** 1541 * mte_check_fail: Record an MTE tag check failure 1542 * @env: CPU env 1543 * @desc: MTEDESC descriptor word 1544 * @dirty_ptr: Failing dirty address 1545 * @ra: TCG retaddr 1546 * 1547 * This may never return (if the MTE tag checks are configured to fault). 1548 */ 1549 void mte_check_fail(CPUARMState *env, uint32_t desc, 1550 uint64_t dirty_ptr, uintptr_t ra); 1551 1552 /** 1553 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1554 * @env: CPU env 1555 * @dirty_ptr: Start address of memory region (dirty pointer) 1556 * @size: length of region (guaranteed not to cross page boundary) 1557 * @desc: MTEDESC descriptor word 1558 */ 1559 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1560 uint32_t desc); 1561 1562 static inline int allocation_tag_from_addr(uint64_t ptr) 1563 { 1564 return extract64(ptr, 56, 4); 1565 } 1566 1567 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1568 { 1569 return deposit64(ptr, 56, 4, rtag); 1570 } 1571 1572 /* Return true if tbi bits mean that the access is checked. */ 1573 static inline bool tbi_check(uint32_t desc, int bit55) 1574 { 1575 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1576 } 1577 1578 /* Return true if tcma bits mean that the access is unchecked. */ 1579 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1580 { 1581 /* 1582 * We had extracted bit55 and ptr_tag for other reasons, so fold 1583 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1584 */ 1585 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1586 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1587 return tcma && match; 1588 } 1589 1590 /* 1591 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1592 * for the tag to be present in the FAR_ELx register. But for user-only 1593 * mode, we do not have a TLB with which to implement this, so we must 1594 * remove the top byte. 1595 */ 1596 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1597 { 1598 #ifdef CONFIG_USER_ONLY 1599 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1600 ptr &= sextract64(ptr, 0, 56); 1601 #endif 1602 return ptr; 1603 } 1604 1605 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1606 { 1607 #ifdef CONFIG_USER_ONLY 1608 int64_t clean_ptr = sextract64(ptr, 0, 56); 1609 if (tbi_check(desc, clean_ptr < 0)) { 1610 ptr = clean_ptr; 1611 } 1612 #endif 1613 return ptr; 1614 } 1615 1616 /* Values for M-profile PSR.ECI for MVE insns */ 1617 enum MVEECIState { 1618 ECI_NONE = 0, /* No completed beats */ 1619 ECI_A0 = 1, /* Completed: A0 */ 1620 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1621 /* 3 is reserved */ 1622 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1623 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1624 /* All other values reserved */ 1625 }; 1626 1627 /* Definitions for the PMU registers */ 1628 #define PMCRN_MASK 0xf800 1629 #define PMCRN_SHIFT 11 1630 #define PMCRLP 0x80 1631 #define PMCRLC 0x40 1632 #define PMCRDP 0x20 1633 #define PMCRX 0x10 1634 #define PMCRD 0x8 1635 #define PMCRC 0x4 1636 #define PMCRP 0x2 1637 #define PMCRE 0x1 1638 /* 1639 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1640 * which can be written as 1 to trigger behaviour but which stay RAZ). 1641 */ 1642 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1643 1644 #define PMXEVTYPER_P 0x80000000 1645 #define PMXEVTYPER_U 0x40000000 1646 #define PMXEVTYPER_NSK 0x20000000 1647 #define PMXEVTYPER_NSU 0x10000000 1648 #define PMXEVTYPER_NSH 0x08000000 1649 #define PMXEVTYPER_M 0x04000000 1650 #define PMXEVTYPER_MT 0x02000000 1651 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1652 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1653 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1654 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1655 PMXEVTYPER_EVTCOUNT) 1656 1657 #define PMCCFILTR 0xf8000000 1658 #define PMCCFILTR_M PMXEVTYPER_M 1659 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1660 1661 static inline uint32_t pmu_num_counters(CPUARMState *env) 1662 { 1663 ARMCPU *cpu = env_archcpu(env); 1664 1665 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1666 } 1667 1668 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1669 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1670 { 1671 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1672 } 1673 1674 #ifdef TARGET_AARCH64 1675 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1676 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1677 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1678 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1679 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1680 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1681 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1682 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg); 1683 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg); 1684 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1685 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1686 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1687 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1688 void aarch64_max_tcg_initfn(Object *obj); 1689 void aarch64_add_pauth_properties(Object *obj); 1690 void aarch64_add_sve_properties(Object *obj); 1691 void aarch64_add_sme_properties(Object *obj); 1692 #endif 1693 1694 /* Read the CONTROL register as the MRS instruction would. */ 1695 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1696 1697 /* 1698 * Return a pointer to the location where we currently store the 1699 * stack pointer for the requested security state and thread mode. 1700 * This pointer will become invalid if the CPU state is updated 1701 * such that the stack pointers are switched around (eg changing 1702 * the SPSEL control bit). 1703 */ 1704 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1705 bool threadmode, bool spsel); 1706 1707 bool el_is_in_host(CPUARMState *env, int el); 1708 1709 void aa32_max_features(ARMCPU *cpu); 1710 int exception_target_el(CPUARMState *env); 1711 bool arm_singlestep_active(CPUARMState *env); 1712 bool arm_generate_debug_exceptions(CPUARMState *env); 1713 1714 /** 1715 * pauth_ptr_mask: 1716 * @param: parameters defining the MMU setup 1717 * 1718 * Return a mask of the address bits that contain the authentication code, 1719 * given the MMU config defined by @param. 1720 */ 1721 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1722 { 1723 int bot_pac_bit = 64 - param.tsz; 1724 int top_pac_bit = 64 - 8 * param.tbi; 1725 1726 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1727 } 1728 1729 /* Add the cpreg definitions for debug related system registers */ 1730 void define_debug_regs(ARMCPU *cpu); 1731 1732 /* Add the cpreg definitions for TLBI instructions */ 1733 void define_tlb_insn_regs(ARMCPU *cpu); 1734 1735 /* Effective value of MDCR_EL2 */ 1736 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1737 { 1738 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1739 } 1740 1741 /* Powers of 2 for sve_vq_map et al. */ 1742 #define SVE_VQ_POW2_MAP \ 1743 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1744 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1745 1746 /* 1747 * Return true if it is possible to take a fine-grained-trap to EL2. 1748 */ 1749 static inline bool arm_fgt_active(CPUARMState *env, int el) 1750 { 1751 /* 1752 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1753 * that can affect EL0, but it is harmless to do the test also for 1754 * traps on registers that are only accessible at EL1 because if the test 1755 * returns true then we can't be executing at EL1 anyway. 1756 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1757 * traps from AArch32 only happen for the EL0 is AArch32 case. 1758 */ 1759 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1760 el < 2 && arm_is_el2_enabled(env) && 1761 arm_el_is_aa64(env, 1) && 1762 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1763 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1764 } 1765 1766 void assert_hflags_rebuild_correctly(CPUARMState *env); 1767 1768 /* 1769 * Although the ARM implementation of hardware assisted debugging 1770 * allows for different breakpoints per-core, the current GDB 1771 * interface treats them as a global pool of registers (which seems to 1772 * be the case for x86, ppc and s390). As a result we store one copy 1773 * of registers which is used for all active cores. 1774 * 1775 * Write access is serialised by virtue of the GDB protocol which 1776 * updates things. Read access (i.e. when the values are copied to the 1777 * vCPU) is also gated by GDB's run control. 1778 * 1779 * This is not unreasonable as most of the time debugging kernels you 1780 * never know which core will eventually execute your function. 1781 */ 1782 1783 typedef struct { 1784 uint64_t bcr; 1785 uint64_t bvr; 1786 } HWBreakpoint; 1787 1788 /* 1789 * The watchpoint registers can cover more area than the requested 1790 * watchpoint so we need to store the additional information 1791 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1792 * when the watchpoint is hit. 1793 */ 1794 typedef struct { 1795 uint64_t wcr; 1796 uint64_t wvr; 1797 CPUWatchpoint details; 1798 } HWWatchpoint; 1799 1800 /* Maximum and current break/watch point counts */ 1801 extern int max_hw_bps, max_hw_wps; 1802 extern GArray *hw_breakpoints, *hw_watchpoints; 1803 1804 #define cur_hw_wps (hw_watchpoints->len) 1805 #define cur_hw_bps (hw_breakpoints->len) 1806 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1807 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1808 1809 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1810 int insert_hw_breakpoint(target_ulong pc); 1811 int delete_hw_breakpoint(target_ulong pc); 1812 1813 bool check_watchpoint_in_range(int i, target_ulong addr); 1814 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1815 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1816 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1817 1818 /* Return the current value of the system counter in ticks */ 1819 uint64_t gt_get_countervalue(CPUARMState *env); 1820 /* 1821 * Return the currently applicable offset between the system counter 1822 * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2). 1823 */ 1824 uint64_t gt_virt_cnt_offset(CPUARMState *env); 1825 1826 /* 1827 * Return mask of ARMMMUIdxBit values corresponding to an "invalidate 1828 * all EL1" scope; this covers stage 1 and stage 2. 1829 */ 1830 int alle1_tlbmask(CPUARMState *env); 1831 1832 /* Set the float_status behaviour to match the Arm defaults */ 1833 void arm_set_default_fp_behaviours(float_status *s); 1834 /* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */ 1835 void arm_set_ah_fp_behaviours(float_status *s); 1836 1837 #endif 1838