1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/breakpoint.h" 29 #include "hw/registerfields.h" 30 #include "tcg/tcg-gvec-desc.h" 31 #include "system/memory.h" 32 #include "syndrome.h" 33 #include "cpu-features.h" 34 35 /* register banks for CPU modes */ 36 #define BANK_USRSYS 0 37 #define BANK_SVC 1 38 #define BANK_ABT 2 39 #define BANK_UND 3 40 #define BANK_IRQ 4 41 #define BANK_FIQ 5 42 #define BANK_HYP 6 43 #define BANK_MON 7 44 45 static inline int arm_env_mmu_index(CPUARMState *env) 46 { 47 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 48 } 49 50 static inline bool excp_is_internal(int excp) 51 { 52 /* Return true if this exception number represents a QEMU-internal 53 * exception that will not be passed to the guest. 54 */ 55 return excp == EXCP_INTERRUPT 56 || excp == EXCP_HLT 57 || excp == EXCP_DEBUG 58 || excp == EXCP_HALTED 59 || excp == EXCP_EXCEPTION_EXIT 60 || excp == EXCP_KERNEL_TRAP 61 || excp == EXCP_SEMIHOST; 62 } 63 64 /* 65 * Default frequency for the generic timer, in Hz. 66 * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before 67 * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz, 68 * which gives a 16ns tick period. 69 * 70 * We will use the back-compat value: 71 * - for QEMU CPU types added before we standardized on 1GHz 72 * - for versioned machine types with a version of 9.0 or earlier 73 * In any case, the machine model may override via the cntfrq property. 74 */ 75 #define GTIMER_DEFAULT_HZ 1000000000 76 #define GTIMER_BACKCOMPAT_HZ 62500000 77 78 /* Bit definitions for the v7M CONTROL register */ 79 FIELD(V7M_CONTROL, NPRIV, 0, 1) 80 FIELD(V7M_CONTROL, SPSEL, 1, 1) 81 FIELD(V7M_CONTROL, FPCA, 2, 1) 82 FIELD(V7M_CONTROL, SFPA, 3, 1) 83 84 /* Bit definitions for v7M exception return payload */ 85 FIELD(V7M_EXCRET, ES, 0, 1) 86 FIELD(V7M_EXCRET, RES0, 1, 1) 87 FIELD(V7M_EXCRET, SPSEL, 2, 1) 88 FIELD(V7M_EXCRET, MODE, 3, 1) 89 FIELD(V7M_EXCRET, FTYPE, 4, 1) 90 FIELD(V7M_EXCRET, DCRS, 5, 1) 91 FIELD(V7M_EXCRET, S, 6, 1) 92 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 93 94 /* Minimum value which is a magic number for exception return */ 95 #define EXC_RETURN_MIN_MAGIC 0xff000000 96 /* Minimum number which is a magic number for function or exception return 97 * when using v8M security extension 98 */ 99 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 100 101 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 102 FIELD(DBGWCR, E, 0, 1) 103 FIELD(DBGWCR, PAC, 1, 2) 104 FIELD(DBGWCR, LSC, 3, 2) 105 FIELD(DBGWCR, BAS, 5, 8) 106 FIELD(DBGWCR, HMC, 13, 1) 107 FIELD(DBGWCR, SSC, 14, 2) 108 FIELD(DBGWCR, LBN, 16, 4) 109 FIELD(DBGWCR, WT, 20, 1) 110 FIELD(DBGWCR, MASK, 24, 5) 111 FIELD(DBGWCR, SSCE, 29, 1) 112 113 #define VTCR_NSW (1u << 29) 114 #define VTCR_NSA (1u << 30) 115 #define VSTCR_SW VTCR_NSW 116 #define VSTCR_SA VTCR_NSA 117 118 /* Bit definitions for CPACR (AArch32 only) */ 119 FIELD(CPACR, CP10, 20, 2) 120 FIELD(CPACR, CP11, 22, 2) 121 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 122 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 123 FIELD(CPACR, ASEDIS, 31, 1) 124 125 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 126 FIELD(CPACR_EL1, ZEN, 16, 2) 127 FIELD(CPACR_EL1, FPEN, 20, 2) 128 FIELD(CPACR_EL1, SMEN, 24, 2) 129 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 130 131 /* Bit definitions for HCPTR (AArch32 only) */ 132 FIELD(HCPTR, TCP10, 10, 1) 133 FIELD(HCPTR, TCP11, 11, 1) 134 FIELD(HCPTR, TASE, 15, 1) 135 FIELD(HCPTR, TTA, 20, 1) 136 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 137 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 138 139 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 140 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 141 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 142 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 143 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 144 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 145 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 146 FIELD(CPTR_EL2, TTA, 28, 1) 147 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 148 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 149 150 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 151 FIELD(CPTR_EL3, EZ, 8, 1) 152 FIELD(CPTR_EL3, TFP, 10, 1) 153 FIELD(CPTR_EL3, ESM, 12, 1) 154 FIELD(CPTR_EL3, TTA, 20, 1) 155 FIELD(CPTR_EL3, TAM, 30, 1) 156 FIELD(CPTR_EL3, TCPAC, 31, 1) 157 158 #define MDCR_MTPME (1U << 28) 159 #define MDCR_TDCC (1U << 27) 160 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 161 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 162 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 163 #define MDCR_EPMAD (1U << 21) 164 #define MDCR_EDAD (1U << 20) 165 #define MDCR_TTRF (1U << 19) 166 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 167 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 168 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 169 #define MDCR_SDD (1U << 16) 170 #define MDCR_SPD (3U << 14) 171 #define MDCR_TDRA (1U << 11) 172 #define MDCR_TDOSA (1U << 10) 173 #define MDCR_TDA (1U << 9) 174 #define MDCR_TDE (1U << 8) 175 #define MDCR_HPME (1U << 7) 176 #define MDCR_TPM (1U << 6) 177 #define MDCR_TPMCR (1U << 5) 178 #define MDCR_HPMN (0x1fU) 179 180 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 181 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 182 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 183 MDCR_STE | MDCR_SPME | MDCR_SPD) 184 185 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 186 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 187 #define TTBCR_PD0 (1U << 4) 188 #define TTBCR_PD1 (1U << 5) 189 #define TTBCR_EPD0 (1U << 7) 190 #define TTBCR_IRGN0 (3U << 8) 191 #define TTBCR_ORGN0 (3U << 10) 192 #define TTBCR_SH0 (3U << 12) 193 #define TTBCR_T1SZ (3U << 16) 194 #define TTBCR_A1 (1U << 22) 195 #define TTBCR_EPD1 (1U << 23) 196 #define TTBCR_IRGN1 (3U << 24) 197 #define TTBCR_ORGN1 (3U << 26) 198 #define TTBCR_SH1 (1U << 28) 199 #define TTBCR_EAE (1U << 31) 200 201 FIELD(VTCR, T0SZ, 0, 6) 202 FIELD(VTCR, SL0, 6, 2) 203 FIELD(VTCR, IRGN0, 8, 2) 204 FIELD(VTCR, ORGN0, 10, 2) 205 FIELD(VTCR, SH0, 12, 2) 206 FIELD(VTCR, TG0, 14, 2) 207 FIELD(VTCR, PS, 16, 3) 208 FIELD(VTCR, VS, 19, 1) 209 FIELD(VTCR, HA, 21, 1) 210 FIELD(VTCR, HD, 22, 1) 211 FIELD(VTCR, HWU59, 25, 1) 212 FIELD(VTCR, HWU60, 26, 1) 213 FIELD(VTCR, HWU61, 27, 1) 214 FIELD(VTCR, HWU62, 28, 1) 215 FIELD(VTCR, NSW, 29, 1) 216 FIELD(VTCR, NSA, 30, 1) 217 FIELD(VTCR, DS, 32, 1) 218 FIELD(VTCR, SL2, 33, 1) 219 220 #define HCRX_ENAS0 (1ULL << 0) 221 #define HCRX_ENALS (1ULL << 1) 222 #define HCRX_ENASR (1ULL << 2) 223 #define HCRX_FNXS (1ULL << 3) 224 #define HCRX_FGTNXS (1ULL << 4) 225 #define HCRX_SMPME (1ULL << 5) 226 #define HCRX_TALLINT (1ULL << 6) 227 #define HCRX_VINMI (1ULL << 7) 228 #define HCRX_VFNMI (1ULL << 8) 229 #define HCRX_CMOW (1ULL << 9) 230 #define HCRX_MCE2 (1ULL << 10) 231 #define HCRX_MSCEN (1ULL << 11) 232 233 #define HPFAR_NS (1ULL << 63) 234 235 #define HSTR_TTEE (1 << 16) 236 #define HSTR_TJDBX (1 << 17) 237 238 /* 239 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 240 * have different bit definitions, and EL1PCTEN might be 241 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 242 * disambiguate if necessary. 243 */ 244 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 245 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 246 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 247 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 248 FIELD(CNTHCTL, EVNTEN, 2, 1) 249 FIELD(CNTHCTL, EVNTDIR, 3, 1) 250 FIELD(CNTHCTL, EVNTI, 4, 4) 251 FIELD(CNTHCTL, EL0VTEN, 8, 1) 252 FIELD(CNTHCTL, EL0PTEN, 9, 1) 253 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 254 FIELD(CNTHCTL, EL1PTEN, 11, 1) 255 FIELD(CNTHCTL, ECV, 12, 1) 256 FIELD(CNTHCTL, EL1TVT, 13, 1) 257 FIELD(CNTHCTL, EL1TVCT, 14, 1) 258 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 259 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 260 FIELD(CNTHCTL, EVNTIS, 17, 1) 261 FIELD(CNTHCTL, CNTVMASK, 18, 1) 262 FIELD(CNTHCTL, CNTPMASK, 19, 1) 263 264 /* We use a few fake FSR values for internal purposes in M profile. 265 * M profile cores don't have A/R format FSRs, but currently our 266 * get_phys_addr() code assumes A/R profile and reports failures via 267 * an A/R format FSR value. We then translate that into the proper 268 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 269 * Mostly the FSR values we use for this are those defined for v7PMSA, 270 * since we share some of that codepath. A few kinds of fault are 271 * only for M profile and have no A/R equivalent, though, so we have 272 * to pick a value from the reserved range (which we never otherwise 273 * generate) to use for these. 274 * These values will never be visible to the guest. 275 */ 276 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 277 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 278 279 /** 280 * raise_exception: Raise the specified exception. 281 * Raise a guest exception with the specified value, syndrome register 282 * and target exception level. This should be called from helper functions, 283 * and never returns because we will longjump back up to the CPU main loop. 284 */ 285 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 286 uint32_t syndrome, uint32_t target_el); 287 288 /* 289 * Similarly, but also use unwinding to restore cpu state. 290 */ 291 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 292 uint32_t syndrome, uint32_t target_el, 293 uintptr_t ra); 294 295 /* 296 * For AArch64, map a given EL to an index in the banked_spsr array. 297 * Note that this mapping and the AArch32 mapping defined in bank_number() 298 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 299 * mandated mapping between each other. 300 */ 301 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 302 { 303 static const unsigned int map[4] = { 304 [1] = BANK_SVC, /* EL1. */ 305 [2] = BANK_HYP, /* EL2. */ 306 [3] = BANK_MON, /* EL3. */ 307 }; 308 assert(el >= 1 && el <= 3); 309 return map[el]; 310 } 311 312 /* Map CPU modes onto saved register banks. */ 313 static inline int bank_number(int mode) 314 { 315 switch (mode) { 316 case ARM_CPU_MODE_USR: 317 case ARM_CPU_MODE_SYS: 318 return BANK_USRSYS; 319 case ARM_CPU_MODE_SVC: 320 return BANK_SVC; 321 case ARM_CPU_MODE_ABT: 322 return BANK_ABT; 323 case ARM_CPU_MODE_UND: 324 return BANK_UND; 325 case ARM_CPU_MODE_IRQ: 326 return BANK_IRQ; 327 case ARM_CPU_MODE_FIQ: 328 return BANK_FIQ; 329 case ARM_CPU_MODE_HYP: 330 return BANK_HYP; 331 case ARM_CPU_MODE_MON: 332 return BANK_MON; 333 } 334 g_assert_not_reached(); 335 } 336 337 /** 338 * r14_bank_number: Map CPU mode onto register bank for r14 339 * 340 * Given an AArch32 CPU mode, return the index into the saved register 341 * banks to use for the R14 (LR) in that mode. This is the same as 342 * bank_number(), except for the special case of Hyp mode, where 343 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 344 * This should be used as the index into env->banked_r14[], and 345 * bank_number() used for the index into env->banked_r13[] and 346 * env->banked_spsr[]. 347 */ 348 static inline int r14_bank_number(int mode) 349 { 350 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 351 } 352 353 void arm_cpu_register(const ARMCPUInfo *info); 354 void aarch64_cpu_register(const ARMCPUInfo *info); 355 356 void register_cp_regs_for_features(ARMCPU *cpu); 357 void init_cpreg_list(ARMCPU *cpu); 358 359 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 360 void arm_translate_init(void); 361 void arm_translate_code(CPUState *cs, TranslationBlock *tb, 362 int *max_insns, vaddr pc, void *host_pc); 363 364 void arm_cpu_register_gdb_commands(ARMCPU *cpu); 365 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, 366 GPtrArray *, GPtrArray *); 367 368 void arm_restore_state_to_opc(CPUState *cs, 369 const TranslationBlock *tb, 370 const uint64_t *data); 371 372 #ifdef CONFIG_TCG 373 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 374 375 /* Our implementation of TCGCPUOps::cpu_exec_halt */ 376 bool arm_cpu_exec_halt(CPUState *cs); 377 int arm_cpu_mmu_index(CPUState *cs, bool ifetch); 378 #endif /* CONFIG_TCG */ 379 380 typedef enum ARMFPRounding { 381 FPROUNDING_TIEEVEN, 382 FPROUNDING_POSINF, 383 FPROUNDING_NEGINF, 384 FPROUNDING_ZERO, 385 FPROUNDING_TIEAWAY, 386 FPROUNDING_ODD 387 } ARMFPRounding; 388 389 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 390 391 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 392 { 393 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 394 return arm_rmode_to_sf_map[rmode]; 395 } 396 397 /* Return the effective value of SCR_EL3.RW */ 398 static inline bool arm_scr_rw_eff(CPUARMState *env) 399 { 400 /* 401 * SCR_EL3.RW has an effective value of 1 if: 402 * - we are NS and EL2 is implemented but doesn't support AArch32 403 * - we are S and EL2 is enabled (in which case it must be AArch64) 404 */ 405 ARMCPU *cpu = env_archcpu(env); 406 407 if (env->cp15.scr_el3 & SCR_RW) { 408 return true; 409 } 410 if (env->cp15.scr_el3 & SCR_NS) { 411 return arm_feature(env, ARM_FEATURE_EL2) && 412 !cpu_isar_feature(aa64_aa32_el2, cpu); 413 } else { 414 return env->cp15.scr_el3 & SCR_EEL2; 415 } 416 } 417 418 /* Return true if the specified exception level is running in AArch64 state. */ 419 static inline bool arm_el_is_aa64(CPUARMState *env, int el) 420 { 421 /* 422 * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, 423 * and if we're not in EL0 then the state of EL0 isn't well defined.) 424 */ 425 assert(el >= 1 && el <= 3); 426 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); 427 428 /* 429 * The highest exception level is always at the maximum supported 430 * register width, and then lower levels have a register width controlled 431 * by bits in the SCR or HCR registers. 432 */ 433 if (el == 3) { 434 return aa64; 435 } 436 437 if (arm_feature(env, ARM_FEATURE_EL3)) { 438 aa64 = aa64 && arm_scr_rw_eff(env); 439 } 440 441 if (el == 2) { 442 return aa64; 443 } 444 445 if (arm_is_el2_enabled(env)) { 446 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); 447 } 448 449 return aa64; 450 } 451 452 /* 453 * Return the current Exception Level (as per ARMv8; note that this differs 454 * from the ARMv7 Privilege Level). 455 */ 456 static inline int arm_current_el(CPUARMState *env) 457 { 458 if (arm_feature(env, ARM_FEATURE_M)) { 459 return arm_v7m_is_handler_mode(env) || 460 !(env->v7m.control[env->v7m.secure] & 1); 461 } 462 463 if (is_a64(env)) { 464 return extract32(env->pstate, 2, 2); 465 } 466 467 switch (env->uncached_cpsr & 0x1f) { 468 case ARM_CPU_MODE_USR: 469 return 0; 470 case ARM_CPU_MODE_HYP: 471 return 2; 472 case ARM_CPU_MODE_MON: 473 return 3; 474 default: 475 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 476 /* If EL3 is 32-bit then all secure privileged modes run in EL3 */ 477 return 3; 478 } 479 480 return 1; 481 } 482 } 483 484 static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, 485 bool sctlr_b) 486 { 487 #ifdef CONFIG_USER_ONLY 488 /* 489 * In system mode, BE32 is modelled in line with the 490 * architecture (as word-invariant big-endianness), where loads 491 * and stores are done little endian but from addresses which 492 * are adjusted by XORing with the appropriate constant. So the 493 * endianness to use for the raw data access is not affected by 494 * SCTLR.B. 495 * In user mode, however, we model BE32 as byte-invariant 496 * big-endianness (because user-only code cannot tell the 497 * difference), and so we need to use a data access endianness 498 * that depends on SCTLR.B. 499 */ 500 if (sctlr_b) { 501 return true; 502 } 503 #endif 504 /* In 32bit endianness is determined by looking at CPSR's E bit */ 505 return env->uncached_cpsr & CPSR_E; 506 } 507 508 static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) 509 { 510 return sctlr & (el ? SCTLR_EE : SCTLR_E0E); 511 } 512 513 /* Return true if the processor is in big-endian mode. */ 514 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) 515 { 516 if (!is_a64(env)) { 517 return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); 518 } else { 519 int cur_el = arm_current_el(env); 520 uint64_t sctlr = arm_sctlr(env, cur_el); 521 return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); 522 } 523 } 524 525 #ifdef CONFIG_USER_ONLY 526 static inline bool arm_cpu_bswap_data(CPUARMState *env) 527 { 528 return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); 529 } 530 #endif 531 532 static inline void aarch64_save_sp(CPUARMState *env, int el) 533 { 534 if (env->pstate & PSTATE_SP) { 535 env->sp_el[el] = env->xregs[31]; 536 } else { 537 env->sp_el[0] = env->xregs[31]; 538 } 539 } 540 541 static inline void aarch64_restore_sp(CPUARMState *env, int el) 542 { 543 if (env->pstate & PSTATE_SP) { 544 env->xregs[31] = env->sp_el[el]; 545 } else { 546 env->xregs[31] = env->sp_el[0]; 547 } 548 } 549 550 static inline void update_spsel(CPUARMState *env, uint32_t imm) 551 { 552 unsigned int cur_el = arm_current_el(env); 553 /* Update PSTATE SPSel bit; this requires us to update the 554 * working stack pointer in xregs[31]. 555 */ 556 if (!((imm ^ env->pstate) & PSTATE_SP)) { 557 return; 558 } 559 aarch64_save_sp(env, cur_el); 560 env->pstate = deposit32(env->pstate, 0, 1, imm); 561 562 /* We rely on illegal updates to SPsel from EL0 to get trapped 563 * at translation time. 564 */ 565 assert(cur_el >= 1 && cur_el <= 3); 566 aarch64_restore_sp(env, cur_el); 567 } 568 569 /* 570 * arm_pamax 571 * @cpu: ARMCPU 572 * 573 * Returns the implementation defined bit-width of physical addresses. 574 * The ARMv8 reference manuals refer to this as PAMax(). 575 */ 576 unsigned int arm_pamax(ARMCPU *cpu); 577 578 /* 579 * round_down_to_parange_index 580 * @bit_size: uint8_t 581 * 582 * Rounds down the bit_size supplied to the first supported ARM physical 583 * address range and returns the index for this. The index is intended to 584 * be used to set ID_AA64MMFR0_EL1's PARANGE bits. 585 */ 586 uint8_t round_down_to_parange_index(uint8_t bit_size); 587 588 /* 589 * round_down_to_parange_bit_size 590 * @bit_size: uint8_t 591 * 592 * Rounds down the bit_size supplied to the first supported ARM physical 593 * address range bit size and returns this. 594 */ 595 uint8_t round_down_to_parange_bit_size(uint8_t bit_size); 596 597 /* Return true if extended addresses are enabled. 598 * This is always the case if our translation regime is 64 bit, 599 * but depends on TTBCR.EAE for 32 bit. 600 */ 601 static inline bool extended_addresses_enabled(CPUARMState *env) 602 { 603 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 604 if (arm_feature(env, ARM_FEATURE_PMSA) && 605 arm_feature(env, ARM_FEATURE_V8)) { 606 return true; 607 } 608 return arm_el_is_aa64(env, 1) || 609 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 610 } 611 612 /* Update a QEMU watchpoint based on the information the guest has set in the 613 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 614 */ 615 void hw_watchpoint_update(ARMCPU *cpu, int n); 616 /* Update the QEMU watchpoints for every guest watchpoint. This does a 617 * complete delete-and-reinstate of the QEMU watchpoint list and so is 618 * suitable for use after migration or on reset. 619 */ 620 void hw_watchpoint_update_all(ARMCPU *cpu); 621 /* Update a QEMU breakpoint based on the information the guest has set in the 622 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 623 */ 624 void hw_breakpoint_update(ARMCPU *cpu, int n); 625 /* Update the QEMU breakpoints for every guest breakpoint. This does a 626 * complete delete-and-reinstate of the QEMU breakpoint list and so is 627 * suitable for use after migration or on reset. 628 */ 629 void hw_breakpoint_update_all(ARMCPU *cpu); 630 631 /* Callback function for checking if a breakpoint should trigger. */ 632 bool arm_debug_check_breakpoint(CPUState *cs); 633 634 /* Callback function for checking if a watchpoint should trigger. */ 635 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 636 637 /* Adjust addresses (in BE32 mode) before testing against watchpoint 638 * addresses. 639 */ 640 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 641 642 /* Callback function for when a watchpoint or breakpoint triggers. */ 643 void arm_debug_excp_handler(CPUState *cs); 644 645 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 646 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 647 { 648 return false; 649 } 650 static inline void arm_handle_psci_call(ARMCPU *cpu) 651 { 652 g_assert_not_reached(); 653 } 654 #else 655 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 656 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 657 /* Actually handle a PSCI call */ 658 void arm_handle_psci_call(ARMCPU *cpu); 659 #endif 660 661 /** 662 * arm_clear_exclusive: clear the exclusive monitor 663 * @env: CPU env 664 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 665 */ 666 static inline void arm_clear_exclusive(CPUARMState *env) 667 { 668 env->exclusive_addr = -1; 669 } 670 671 /** 672 * ARMFaultType: type of an ARM MMU fault 673 * This corresponds to the v8A pseudocode's Fault enumeration, 674 * with extensions for QEMU internal conditions. 675 */ 676 typedef enum ARMFaultType { 677 ARMFault_None, 678 ARMFault_AccessFlag, 679 ARMFault_Alignment, 680 ARMFault_Background, 681 ARMFault_Domain, 682 ARMFault_Permission, 683 ARMFault_Translation, 684 ARMFault_AddressSize, 685 ARMFault_SyncExternal, 686 ARMFault_SyncExternalOnWalk, 687 ARMFault_SyncParity, 688 ARMFault_SyncParityOnWalk, 689 ARMFault_AsyncParity, 690 ARMFault_AsyncExternal, 691 ARMFault_Debug, 692 ARMFault_TLBConflict, 693 ARMFault_UnsuppAtomicUpdate, 694 ARMFault_Lockdown, 695 ARMFault_Exclusive, 696 ARMFault_ICacheMaint, 697 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 698 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 699 ARMFault_GPCFOnWalk, 700 ARMFault_GPCFOnOutput, 701 } ARMFaultType; 702 703 typedef enum ARMGPCF { 704 GPCF_None, 705 GPCF_AddressSize, 706 GPCF_Walk, 707 GPCF_EABT, 708 GPCF_Fail, 709 } ARMGPCF; 710 711 /** 712 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 713 * @type: Type of fault 714 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 715 * @level: Table walk level (for translation, access flag and permission faults) 716 * @domain: Domain of the fault address (for non-LPAE CPUs only) 717 * @s2addr: Address that caused a fault at stage 2 718 * @paddr: physical address that caused a fault for gpc 719 * @paddr_space: physical address space that caused a fault for gpc 720 * @stage2: True if we faulted at stage 2 721 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 722 * @s1ns: True if we faulted on a non-secure IPA while in secure state 723 * @ea: True if we should set the EA (external abort type) bit in syndrome 724 */ 725 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 726 struct ARMMMUFaultInfo { 727 ARMFaultType type; 728 ARMGPCF gpcf; 729 target_ulong s2addr; 730 target_ulong paddr; 731 ARMSecuritySpace paddr_space; 732 int level; 733 int domain; 734 bool stage2; 735 bool s1ptw; 736 bool s1ns; 737 bool ea; 738 }; 739 740 /** 741 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 742 * Compare pseudocode EncodeSDFSC(), though unlike that function 743 * we set up a whole FSR-format code including domain field and 744 * putting the high bit of the FSC into bit 10. 745 */ 746 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 747 { 748 uint32_t fsc; 749 750 switch (fi->type) { 751 case ARMFault_None: 752 return 0; 753 case ARMFault_AccessFlag: 754 fsc = fi->level == 1 ? 0x3 : 0x6; 755 break; 756 case ARMFault_Alignment: 757 fsc = 0x1; 758 break; 759 case ARMFault_Permission: 760 fsc = fi->level == 1 ? 0xd : 0xf; 761 break; 762 case ARMFault_Domain: 763 fsc = fi->level == 1 ? 0x9 : 0xb; 764 break; 765 case ARMFault_Translation: 766 fsc = fi->level == 1 ? 0x5 : 0x7; 767 break; 768 case ARMFault_SyncExternal: 769 fsc = 0x8 | (fi->ea << 12); 770 break; 771 case ARMFault_SyncExternalOnWalk: 772 fsc = fi->level == 1 ? 0xc : 0xe; 773 fsc |= (fi->ea << 12); 774 break; 775 case ARMFault_SyncParity: 776 fsc = 0x409; 777 break; 778 case ARMFault_SyncParityOnWalk: 779 fsc = fi->level == 1 ? 0x40c : 0x40e; 780 break; 781 case ARMFault_AsyncParity: 782 fsc = 0x408; 783 break; 784 case ARMFault_AsyncExternal: 785 fsc = 0x406 | (fi->ea << 12); 786 break; 787 case ARMFault_Debug: 788 fsc = 0x2; 789 break; 790 case ARMFault_TLBConflict: 791 fsc = 0x400; 792 break; 793 case ARMFault_Lockdown: 794 fsc = 0x404; 795 break; 796 case ARMFault_Exclusive: 797 fsc = 0x405; 798 break; 799 case ARMFault_ICacheMaint: 800 fsc = 0x4; 801 break; 802 case ARMFault_Background: 803 fsc = 0x0; 804 break; 805 case ARMFault_QEMU_NSCExec: 806 fsc = M_FAKE_FSR_NSC_EXEC; 807 break; 808 case ARMFault_QEMU_SFault: 809 fsc = M_FAKE_FSR_SFAULT; 810 break; 811 default: 812 /* Other faults can't occur in a context that requires a 813 * short-format status code. 814 */ 815 g_assert_not_reached(); 816 } 817 818 fsc |= (fi->domain << 4); 819 return fsc; 820 } 821 822 /** 823 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 824 * Compare pseudocode EncodeLDFSC(), though unlike that function 825 * we fill in also the LPAE bit 9 of a DFSR format. 826 */ 827 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 828 { 829 uint32_t fsc; 830 831 switch (fi->type) { 832 case ARMFault_None: 833 return 0; 834 case ARMFault_AddressSize: 835 assert(fi->level >= -1 && fi->level <= 3); 836 if (fi->level < 0) { 837 fsc = 0b101001; 838 } else { 839 fsc = fi->level; 840 } 841 break; 842 case ARMFault_AccessFlag: 843 assert(fi->level >= 0 && fi->level <= 3); 844 fsc = 0b001000 | fi->level; 845 break; 846 case ARMFault_Permission: 847 assert(fi->level >= 0 && fi->level <= 3); 848 fsc = 0b001100 | fi->level; 849 break; 850 case ARMFault_Translation: 851 assert(fi->level >= -1 && fi->level <= 3); 852 if (fi->level < 0) { 853 fsc = 0b101011; 854 } else { 855 fsc = 0b000100 | fi->level; 856 } 857 break; 858 case ARMFault_SyncExternal: 859 fsc = 0x10 | (fi->ea << 12); 860 break; 861 case ARMFault_SyncExternalOnWalk: 862 assert(fi->level >= -1 && fi->level <= 3); 863 if (fi->level < 0) { 864 fsc = 0b010011; 865 } else { 866 fsc = 0b010100 | fi->level; 867 } 868 fsc |= fi->ea << 12; 869 break; 870 case ARMFault_SyncParity: 871 fsc = 0x18; 872 break; 873 case ARMFault_SyncParityOnWalk: 874 assert(fi->level >= -1 && fi->level <= 3); 875 if (fi->level < 0) { 876 fsc = 0b011011; 877 } else { 878 fsc = 0b011100 | fi->level; 879 } 880 break; 881 case ARMFault_AsyncParity: 882 fsc = 0x19; 883 break; 884 case ARMFault_AsyncExternal: 885 fsc = 0x11 | (fi->ea << 12); 886 break; 887 case ARMFault_Alignment: 888 fsc = 0x21; 889 break; 890 case ARMFault_Debug: 891 fsc = 0x22; 892 break; 893 case ARMFault_TLBConflict: 894 fsc = 0x30; 895 break; 896 case ARMFault_UnsuppAtomicUpdate: 897 fsc = 0x31; 898 break; 899 case ARMFault_Lockdown: 900 fsc = 0x34; 901 break; 902 case ARMFault_Exclusive: 903 fsc = 0x35; 904 break; 905 case ARMFault_GPCFOnWalk: 906 assert(fi->level >= -1 && fi->level <= 3); 907 if (fi->level < 0) { 908 fsc = 0b100011; 909 } else { 910 fsc = 0b100100 | fi->level; 911 } 912 break; 913 case ARMFault_GPCFOnOutput: 914 fsc = 0b101000; 915 break; 916 default: 917 /* Other faults can't occur in a context that requires a 918 * long-format status code. 919 */ 920 g_assert_not_reached(); 921 } 922 923 fsc |= 1 << 9; 924 return fsc; 925 } 926 927 static inline bool arm_extabort_type(MemTxResult result) 928 { 929 /* The EA bit in syndromes and fault status registers is an 930 * IMPDEF classification of external aborts. ARM implementations 931 * usually use this to indicate AXI bus Decode error (0) or 932 * Slave error (1); in QEMU we follow that. 933 */ 934 return result != MEMTX_DECODE_ERROR; 935 } 936 937 #ifdef CONFIG_USER_ONLY 938 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 939 MMUAccessType access_type, 940 bool maperr, uintptr_t ra); 941 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 942 MMUAccessType access_type, uintptr_t ra); 943 #else 944 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, 945 MMUAccessType access_type, int mmu_idx, 946 MemOp memop, int size, bool probe, uintptr_t ra); 947 #endif 948 949 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 950 { 951 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 952 } 953 954 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 955 { 956 if (arm_feature(env, ARM_FEATURE_M)) { 957 return mmu_idx | ARM_MMU_IDX_M; 958 } else { 959 return mmu_idx | ARM_MMU_IDX_A; 960 } 961 } 962 963 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 964 { 965 /* AArch64 is always a-profile. */ 966 return mmu_idx | ARM_MMU_IDX_A; 967 } 968 969 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 970 971 /* Return the MMU index for a v7M CPU in the specified security state */ 972 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 973 974 /* 975 * Return true if the stage 1 translation regime is using LPAE 976 * format page tables 977 */ 978 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 979 980 /* Raise a data fault alignment exception for the specified virtual address */ 981 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 982 MMUAccessType access_type, 983 int mmu_idx, uintptr_t retaddr); 984 985 #ifndef CONFIG_USER_ONLY 986 /* arm_cpu_do_transaction_failed: handle a memory system error response 987 * (eg "no device/memory present at address") by raising an external abort 988 * exception 989 */ 990 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 991 vaddr addr, unsigned size, 992 MMUAccessType access_type, 993 int mmu_idx, MemTxAttrs attrs, 994 MemTxResult response, uintptr_t retaddr); 995 #endif 996 997 /* Call any registered EL change hooks */ 998 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 999 { 1000 ARMELChangeHook *hook, *next; 1001 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 1002 hook->hook(cpu, hook->opaque); 1003 } 1004 } 1005 static inline void arm_call_el_change_hook(ARMCPU *cpu) 1006 { 1007 ARMELChangeHook *hook, *next; 1008 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 1009 hook->hook(cpu, hook->opaque); 1010 } 1011 } 1012 1013 /* 1014 * Return true if this address translation regime has two ranges. 1015 * Note that this will not return the correct answer for AArch32 1016 * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is 1017 * never called from a context where EL3 can be AArch32. (The 1018 * correct return value for ARMMMUIdx_E3 would be different for 1019 * that case, so we can't just make the function return the 1020 * correct value anyway; we would need an extra "bool e3_is_aarch32" 1021 * argument which all the current callsites would pass as 'false'.) 1022 */ 1023 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 1024 { 1025 switch (mmu_idx) { 1026 case ARMMMUIdx_Stage1_E0: 1027 case ARMMMUIdx_Stage1_E1: 1028 case ARMMMUIdx_Stage1_E1_PAN: 1029 case ARMMMUIdx_E10_0: 1030 case ARMMMUIdx_E10_1: 1031 case ARMMMUIdx_E10_1_PAN: 1032 case ARMMMUIdx_E20_0: 1033 case ARMMMUIdx_E20_2: 1034 case ARMMMUIdx_E20_2_PAN: 1035 return true; 1036 default: 1037 return false; 1038 } 1039 } 1040 1041 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 1042 { 1043 switch (mmu_idx) { 1044 case ARMMMUIdx_Stage1_E1_PAN: 1045 case ARMMMUIdx_E10_1_PAN: 1046 case ARMMMUIdx_E20_2_PAN: 1047 case ARMMMUIdx_E30_3_PAN: 1048 return true; 1049 default: 1050 return false; 1051 } 1052 } 1053 1054 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 1055 { 1056 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 1057 } 1058 1059 /* Return the exception level which controls this address translation regime */ 1060 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 1061 { 1062 switch (mmu_idx) { 1063 case ARMMMUIdx_E20_0: 1064 case ARMMMUIdx_E20_2: 1065 case ARMMMUIdx_E20_2_PAN: 1066 case ARMMMUIdx_Stage2: 1067 case ARMMMUIdx_Stage2_S: 1068 case ARMMMUIdx_E2: 1069 return 2; 1070 case ARMMMUIdx_E3: 1071 case ARMMMUIdx_E30_0: 1072 case ARMMMUIdx_E30_3_PAN: 1073 return 3; 1074 case ARMMMUIdx_E10_0: 1075 case ARMMMUIdx_Stage1_E0: 1076 case ARMMMUIdx_Stage1_E1: 1077 case ARMMMUIdx_Stage1_E1_PAN: 1078 case ARMMMUIdx_E10_1: 1079 case ARMMMUIdx_E10_1_PAN: 1080 case ARMMMUIdx_MPrivNegPri: 1081 case ARMMMUIdx_MUserNegPri: 1082 case ARMMMUIdx_MPriv: 1083 case ARMMMUIdx_MUser: 1084 case ARMMMUIdx_MSPrivNegPri: 1085 case ARMMMUIdx_MSUserNegPri: 1086 case ARMMMUIdx_MSPriv: 1087 case ARMMMUIdx_MSUser: 1088 return 1; 1089 default: 1090 g_assert_not_reached(); 1091 } 1092 } 1093 1094 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 1095 { 1096 switch (mmu_idx) { 1097 case ARMMMUIdx_E10_0: 1098 case ARMMMUIdx_E20_0: 1099 case ARMMMUIdx_E30_0: 1100 case ARMMMUIdx_Stage1_E0: 1101 case ARMMMUIdx_MUser: 1102 case ARMMMUIdx_MSUser: 1103 case ARMMMUIdx_MUserNegPri: 1104 case ARMMMUIdx_MSUserNegPri: 1105 return true; 1106 default: 1107 return false; 1108 } 1109 } 1110 1111 /* Return the SCTLR value which controls this address translation regime */ 1112 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 1113 { 1114 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 1115 } 1116 1117 /* 1118 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 1119 * and the Non-Secure stage 2 translation regimes (and hence which are 1120 * not present in VSTCR_EL2). 1121 */ 1122 #define VTCR_SHARED_FIELD_MASK \ 1123 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 1124 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 1125 R_VTCR_DS_MASK) 1126 1127 /* Return the value of the TCR controlling this translation regime */ 1128 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 1129 { 1130 if (mmu_idx == ARMMMUIdx_Stage2) { 1131 return env->cp15.vtcr_el2; 1132 } 1133 if (mmu_idx == ARMMMUIdx_Stage2_S) { 1134 /* 1135 * Secure stage 2 shares fields from VTCR_EL2. We merge those 1136 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 1137 * value so the callers don't need to special case this. 1138 * 1139 * If a future architecture change defines bits in VSTCR_EL2 that 1140 * overlap with these VTCR_EL2 fields we may need to revisit this. 1141 */ 1142 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 1143 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 1144 return v; 1145 } 1146 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 1147 } 1148 1149 /* Return true if the translation regime is using LPAE format page tables */ 1150 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 1151 { 1152 int el = regime_el(env, mmu_idx); 1153 if (el == 2 || arm_el_is_aa64(env, el)) { 1154 return true; 1155 } 1156 if (arm_feature(env, ARM_FEATURE_PMSA) && 1157 arm_feature(env, ARM_FEATURE_V8)) { 1158 return true; 1159 } 1160 if (arm_feature(env, ARM_FEATURE_LPAE) 1161 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 1162 return true; 1163 } 1164 return false; 1165 } 1166 1167 /** 1168 * arm_num_brps: Return number of implemented breakpoints. 1169 * Note that the ID register BRPS field is "number of bps - 1", 1170 * and we return the actual number of breakpoints. 1171 */ 1172 static inline int arm_num_brps(ARMCPU *cpu) 1173 { 1174 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1175 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 1176 } else { 1177 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 1178 } 1179 } 1180 1181 /** 1182 * arm_num_wrps: Return number of implemented watchpoints. 1183 * Note that the ID register WRPS field is "number of wps - 1", 1184 * and we return the actual number of watchpoints. 1185 */ 1186 static inline int arm_num_wrps(ARMCPU *cpu) 1187 { 1188 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1189 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1190 } else { 1191 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1192 } 1193 } 1194 1195 /** 1196 * arm_num_ctx_cmps: Return number of implemented context comparators. 1197 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1198 * and we return the actual number of comparators. 1199 */ 1200 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1201 { 1202 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1203 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1204 } else { 1205 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1206 } 1207 } 1208 1209 /** 1210 * v7m_using_psp: Return true if using process stack pointer 1211 * Return true if the CPU is currently using the process stack 1212 * pointer, or false if it is using the main stack pointer. 1213 */ 1214 static inline bool v7m_using_psp(CPUARMState *env) 1215 { 1216 /* Handler mode always uses the main stack; for thread mode 1217 * the CONTROL.SPSEL bit determines the answer. 1218 * Note that in v7M it is not possible to be in Handler mode with 1219 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1220 */ 1221 return !arm_v7m_is_handler_mode(env) && 1222 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1223 } 1224 1225 /** 1226 * v7m_sp_limit: Return SP limit for current CPU state 1227 * Return the SP limit value for the current CPU security state 1228 * and stack pointer. 1229 */ 1230 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1231 { 1232 if (v7m_using_psp(env)) { 1233 return env->v7m.psplim[env->v7m.secure]; 1234 } else { 1235 return env->v7m.msplim[env->v7m.secure]; 1236 } 1237 } 1238 1239 /** 1240 * v7m_cpacr_pass: 1241 * Return true if the v7M CPACR permits access to the FPU for the specified 1242 * security state and privilege level. 1243 */ 1244 static inline bool v7m_cpacr_pass(CPUARMState *env, 1245 bool is_secure, bool is_priv) 1246 { 1247 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1248 case 0: 1249 case 2: /* UNPREDICTABLE: we treat like 0 */ 1250 return false; 1251 case 1: 1252 return is_priv; 1253 case 3: 1254 return true; 1255 default: 1256 g_assert_not_reached(); 1257 } 1258 } 1259 1260 /** 1261 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1262 * @psr: Program Status Register indicating CPU mode 1263 * 1264 * Returns, for debug logging purposes, a printable representation 1265 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1266 * the low bits of the specified PSR. 1267 */ 1268 static inline const char *aarch32_mode_name(uint32_t psr) 1269 { 1270 static const char cpu_mode_names[16][4] = { 1271 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1272 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1273 }; 1274 1275 return cpu_mode_names[psr & 0xf]; 1276 } 1277 1278 /** 1279 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1280 * 1281 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1282 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1283 * Must be called with the BQL held. 1284 */ 1285 void arm_cpu_update_virq(ARMCPU *cpu); 1286 1287 /** 1288 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1289 * 1290 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1291 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1292 * Must be called with the BQL held. 1293 */ 1294 void arm_cpu_update_vfiq(ARMCPU *cpu); 1295 1296 /** 1297 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1298 * 1299 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1300 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1301 * Must be called with the BQL held. 1302 */ 1303 void arm_cpu_update_vinmi(ARMCPU *cpu); 1304 1305 /** 1306 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1307 * 1308 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1309 * a change to the HCRX_EL2.VFNMI. 1310 * Must be called with the BQL held. 1311 */ 1312 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1313 1314 /** 1315 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1316 * 1317 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1318 * following a change to the HCR_EL2.VSE bit. 1319 */ 1320 void arm_cpu_update_vserr(ARMCPU *cpu); 1321 1322 /** 1323 * arm_mmu_idx_el: 1324 * @env: The cpu environment 1325 * @el: The EL to use. 1326 * 1327 * Return the full ARMMMUIdx for the translation regime for EL. 1328 */ 1329 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1330 1331 /** 1332 * arm_mmu_idx: 1333 * @env: The cpu environment 1334 * 1335 * Return the full ARMMMUIdx for the current translation regime. 1336 */ 1337 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1338 1339 /** 1340 * arm_stage1_mmu_idx: 1341 * @env: The cpu environment 1342 * 1343 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1344 */ 1345 #ifdef CONFIG_USER_ONLY 1346 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1347 { 1348 return ARMMMUIdx_Stage1_E0; 1349 } 1350 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1351 { 1352 return ARMMMUIdx_Stage1_E0; 1353 } 1354 #else 1355 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1356 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1357 #endif 1358 1359 /** 1360 * arm_mmu_idx_is_stage1_of_2: 1361 * @mmu_idx: The ARMMMUIdx to test 1362 * 1363 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1364 * first stage of a two stage regime. 1365 */ 1366 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1367 { 1368 switch (mmu_idx) { 1369 case ARMMMUIdx_Stage1_E0: 1370 case ARMMMUIdx_Stage1_E1: 1371 case ARMMMUIdx_Stage1_E1_PAN: 1372 return true; 1373 default: 1374 return false; 1375 } 1376 } 1377 1378 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1379 const ARMISARegisters *id) 1380 { 1381 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1382 1383 if ((features >> ARM_FEATURE_V4T) & 1) { 1384 valid |= CPSR_T; 1385 } 1386 if ((features >> ARM_FEATURE_V5) & 1) { 1387 valid |= CPSR_Q; /* V5TE in reality*/ 1388 } 1389 if ((features >> ARM_FEATURE_V6) & 1) { 1390 valid |= CPSR_E | CPSR_GE; 1391 } 1392 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1393 valid |= CPSR_IT; 1394 } 1395 if (isar_feature_aa32_jazelle(id)) { 1396 valid |= CPSR_J; 1397 } 1398 if (isar_feature_aa32_pan(id)) { 1399 valid |= CPSR_PAN; 1400 } 1401 if (isar_feature_aa32_dit(id)) { 1402 valid |= CPSR_DIT; 1403 } 1404 if (isar_feature_aa32_ssbs(id)) { 1405 valid |= CPSR_SSBS; 1406 } 1407 1408 return valid; 1409 } 1410 1411 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1412 { 1413 uint32_t valid; 1414 1415 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1416 if (isar_feature_aa64_bti(id)) { 1417 valid |= PSTATE_BTYPE; 1418 } 1419 if (isar_feature_aa64_pan(id)) { 1420 valid |= PSTATE_PAN; 1421 } 1422 if (isar_feature_aa64_uao(id)) { 1423 valid |= PSTATE_UAO; 1424 } 1425 if (isar_feature_aa64_dit(id)) { 1426 valid |= PSTATE_DIT; 1427 } 1428 if (isar_feature_aa64_ssbs(id)) { 1429 valid |= PSTATE_SSBS; 1430 } 1431 if (isar_feature_aa64_mte(id)) { 1432 valid |= PSTATE_TCO; 1433 } 1434 if (isar_feature_aa64_nmi(id)) { 1435 valid |= PSTATE_ALLINT; 1436 } 1437 1438 return valid; 1439 } 1440 1441 /* Granule size (i.e. page size) */ 1442 typedef enum ARMGranuleSize { 1443 /* Same order as TG0 encoding */ 1444 Gran4K, 1445 Gran64K, 1446 Gran16K, 1447 GranInvalid, 1448 } ARMGranuleSize; 1449 1450 /** 1451 * arm_granule_bits: Return address size of the granule in bits 1452 * 1453 * Return the address size of the granule in bits. This corresponds 1454 * to the pseudocode TGxGranuleBits(). 1455 */ 1456 static inline int arm_granule_bits(ARMGranuleSize gran) 1457 { 1458 switch (gran) { 1459 case Gran64K: 1460 return 16; 1461 case Gran16K: 1462 return 14; 1463 case Gran4K: 1464 return 12; 1465 default: 1466 g_assert_not_reached(); 1467 } 1468 } 1469 1470 /* 1471 * Parameters of a given virtual address, as extracted from the 1472 * translation control register (TCR) for a given regime. 1473 */ 1474 typedef struct ARMVAParameters { 1475 unsigned tsz : 8; 1476 unsigned ps : 3; 1477 unsigned sh : 2; 1478 unsigned select : 1; 1479 bool tbi : 1; 1480 bool epd : 1; 1481 bool hpd : 1; 1482 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1483 bool ds : 1; 1484 bool ha : 1; 1485 bool hd : 1; 1486 ARMGranuleSize gran : 2; 1487 } ARMVAParameters; 1488 1489 /** 1490 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1491 * @env: CPU 1492 * @va: virtual address to look up 1493 * @mmu_idx: determines translation regime to use 1494 * @data: true if this is a data access 1495 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1496 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1497 */ 1498 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1499 ARMMMUIdx mmu_idx, bool data, 1500 bool el1_is_aa32); 1501 1502 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1503 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1504 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1505 1506 /* Determine if allocation tags are available. */ 1507 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1508 uint64_t sctlr) 1509 { 1510 if (el < 3 1511 && arm_feature(env, ARM_FEATURE_EL3) 1512 && !(env->cp15.scr_el3 & SCR_ATA)) { 1513 return false; 1514 } 1515 if (el < 2 && arm_is_el2_enabled(env)) { 1516 uint64_t hcr = arm_hcr_el2_eff(env); 1517 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1518 return false; 1519 } 1520 } 1521 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1522 return sctlr != 0; 1523 } 1524 1525 #ifndef CONFIG_USER_ONLY 1526 1527 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1528 typedef struct V8M_SAttributes { 1529 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1530 bool ns; 1531 bool nsc; 1532 uint8_t sregion; 1533 bool srvalid; 1534 uint8_t iregion; 1535 bool irvalid; 1536 } V8M_SAttributes; 1537 1538 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1539 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1540 bool secure, V8M_SAttributes *sattrs); 1541 1542 /* Cacheability and shareability attributes for a memory access */ 1543 typedef struct ARMCacheAttrs { 1544 /* 1545 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1546 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1547 */ 1548 unsigned int attrs:8; 1549 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1550 bool is_s2_format:1; 1551 } ARMCacheAttrs; 1552 1553 /* Fields that are valid upon success. */ 1554 typedef struct GetPhysAddrResult { 1555 CPUTLBEntryFull f; 1556 ARMCacheAttrs cacheattrs; 1557 } GetPhysAddrResult; 1558 1559 /** 1560 * get_phys_addr: get the physical address for a virtual address 1561 * @env: CPUARMState 1562 * @address: virtual address to get physical address for 1563 * @access_type: 0 for read, 1 for write, 2 for execute 1564 * @memop: memory operation feeding this access, or 0 for none 1565 * @mmu_idx: MMU index indicating required translation regime 1566 * @result: set on translation success. 1567 * @fi: set to fault info if the translation fails 1568 * 1569 * Find the physical address corresponding to the given virtual address, 1570 * by doing a translation table walk on MMU based systems or using the 1571 * MPU state on MPU based systems. 1572 * 1573 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1574 * prot and page_size may not be filled in, and the populated fsr value provides 1575 * information on why the translation aborted, in the format of a 1576 * DFSR/IFSR fault register, with the following caveats: 1577 * * we honour the short vs long DFSR format differences. 1578 * * the WnR bit is never set (the caller must do this). 1579 * * for PSMAv5 based systems we don't bother to return a full FSR format 1580 * value. 1581 */ 1582 bool get_phys_addr(CPUARMState *env, vaddr address, 1583 MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, 1584 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1585 __attribute__((nonnull)); 1586 1587 /** 1588 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1589 * address 1590 * @env: CPUARMState 1591 * @address: virtual address to get physical address for 1592 * @access_type: 0 for read, 1 for write, 2 for execute 1593 * @memop: memory operation feeding this access, or 0 for none 1594 * @mmu_idx: MMU index indicating required translation regime 1595 * @space: security space for the access 1596 * @result: set on translation success. 1597 * @fi: set to fault info if the translation fails 1598 * 1599 * Similar to get_phys_addr, but use the given security space and don't perform 1600 * a Granule Protection Check on the resulting address. 1601 */ 1602 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, 1603 MMUAccessType access_type, MemOp memop, 1604 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1605 GetPhysAddrResult *result, 1606 ARMMMUFaultInfo *fi) 1607 __attribute__((nonnull)); 1608 1609 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1610 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1611 bool is_secure, GetPhysAddrResult *result, 1612 ARMMMUFaultInfo *fi, uint32_t *mregion); 1613 1614 void arm_log_exception(CPUState *cs); 1615 1616 #endif /* !CONFIG_USER_ONLY */ 1617 1618 /* 1619 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1620 * the same simd_desc() encoding due to restrictions on size. 1621 * Use these instead. 1622 */ 1623 FIELD(PREDDESC, OPRSZ, 0, 6) 1624 FIELD(PREDDESC, ESZ, 6, 2) 1625 FIELD(PREDDESC, DATA, 8, 24) 1626 1627 /* 1628 * The SVE simd_data field, for memory ops, contains either 1629 * rd (5 bits) or a shift count (2 bits). 1630 */ 1631 #define SVE_MTEDESC_SHIFT 5 1632 1633 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1634 FIELD(MTEDESC, MIDX, 0, 4) 1635 FIELD(MTEDESC, TBI, 4, 2) 1636 FIELD(MTEDESC, TCMA, 6, 2) 1637 FIELD(MTEDESC, WRITE, 8, 1) 1638 FIELD(MTEDESC, ALIGN, 9, 3) 1639 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1640 1641 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1642 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1643 1644 /** 1645 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1646 * @env: CPU env 1647 * @ptr: start address of memory region (dirty pointer) 1648 * @size: length of region (guaranteed not to cross a page boundary) 1649 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1650 * Returns: the size of the region that can be copied without hitting 1651 * an MTE tag failure 1652 * 1653 * Note that we assume that the caller has already checked the TBI 1654 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1655 * required. 1656 */ 1657 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1658 uint32_t desc); 1659 1660 /** 1661 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1662 * operation going in the reverse direction 1663 * @env: CPU env 1664 * @ptr: *end* address of memory region (dirty pointer) 1665 * @size: length of region (guaranteed not to cross a page boundary) 1666 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1667 * Returns: the size of the region that can be copied without hitting 1668 * an MTE tag failure 1669 * 1670 * Note that we assume that the caller has already checked the TBI 1671 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1672 * required. 1673 */ 1674 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1675 uint32_t desc); 1676 1677 /** 1678 * mte_check_fail: Record an MTE tag check failure 1679 * @env: CPU env 1680 * @desc: MTEDESC descriptor word 1681 * @dirty_ptr: Failing dirty address 1682 * @ra: TCG retaddr 1683 * 1684 * This may never return (if the MTE tag checks are configured to fault). 1685 */ 1686 void mte_check_fail(CPUARMState *env, uint32_t desc, 1687 uint64_t dirty_ptr, uintptr_t ra); 1688 1689 /** 1690 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1691 * @env: CPU env 1692 * @dirty_ptr: Start address of memory region (dirty pointer) 1693 * @size: length of region (guaranteed not to cross page boundary) 1694 * @desc: MTEDESC descriptor word 1695 */ 1696 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1697 uint32_t desc); 1698 1699 static inline int allocation_tag_from_addr(uint64_t ptr) 1700 { 1701 return extract64(ptr, 56, 4); 1702 } 1703 1704 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1705 { 1706 return deposit64(ptr, 56, 4, rtag); 1707 } 1708 1709 /* Return true if tbi bits mean that the access is checked. */ 1710 static inline bool tbi_check(uint32_t desc, int bit55) 1711 { 1712 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1713 } 1714 1715 /* Return true if tcma bits mean that the access is unchecked. */ 1716 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1717 { 1718 /* 1719 * We had extracted bit55 and ptr_tag for other reasons, so fold 1720 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1721 */ 1722 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1723 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1724 return tcma && match; 1725 } 1726 1727 /* 1728 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1729 * for the tag to be present in the FAR_ELx register. But for user-only 1730 * mode, we do not have a TLB with which to implement this, so we must 1731 * remove the top byte. 1732 */ 1733 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1734 { 1735 #ifdef CONFIG_USER_ONLY 1736 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1737 ptr &= sextract64(ptr, 0, 56); 1738 #endif 1739 return ptr; 1740 } 1741 1742 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1743 { 1744 #ifdef CONFIG_USER_ONLY 1745 int64_t clean_ptr = sextract64(ptr, 0, 56); 1746 if (tbi_check(desc, clean_ptr < 0)) { 1747 ptr = clean_ptr; 1748 } 1749 #endif 1750 return ptr; 1751 } 1752 1753 /* Values for M-profile PSR.ECI for MVE insns */ 1754 enum MVEECIState { 1755 ECI_NONE = 0, /* No completed beats */ 1756 ECI_A0 = 1, /* Completed: A0 */ 1757 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1758 /* 3 is reserved */ 1759 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1760 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1761 /* All other values reserved */ 1762 }; 1763 1764 /* Definitions for the PMU registers */ 1765 #define PMCRN_MASK 0xf800 1766 #define PMCRN_SHIFT 11 1767 #define PMCRLP 0x80 1768 #define PMCRLC 0x40 1769 #define PMCRDP 0x20 1770 #define PMCRX 0x10 1771 #define PMCRD 0x8 1772 #define PMCRC 0x4 1773 #define PMCRP 0x2 1774 #define PMCRE 0x1 1775 /* 1776 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1777 * which can be written as 1 to trigger behaviour but which stay RAZ). 1778 */ 1779 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1780 1781 #define PMXEVTYPER_P 0x80000000 1782 #define PMXEVTYPER_U 0x40000000 1783 #define PMXEVTYPER_NSK 0x20000000 1784 #define PMXEVTYPER_NSU 0x10000000 1785 #define PMXEVTYPER_NSH 0x08000000 1786 #define PMXEVTYPER_M 0x04000000 1787 #define PMXEVTYPER_MT 0x02000000 1788 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1789 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1790 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1791 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1792 PMXEVTYPER_EVTCOUNT) 1793 1794 #define PMCCFILTR 0xf8000000 1795 #define PMCCFILTR_M PMXEVTYPER_M 1796 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1797 1798 static inline uint32_t pmu_num_counters(CPUARMState *env) 1799 { 1800 ARMCPU *cpu = env_archcpu(env); 1801 1802 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1803 } 1804 1805 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1806 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1807 { 1808 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1809 } 1810 1811 #ifdef TARGET_AARCH64 1812 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1813 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1814 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1815 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1816 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1817 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1818 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1819 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg); 1820 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg); 1821 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1822 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1823 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1824 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1825 void aarch64_max_tcg_initfn(Object *obj); 1826 void aarch64_add_pauth_properties(Object *obj); 1827 void aarch64_add_sve_properties(Object *obj); 1828 void aarch64_add_sme_properties(Object *obj); 1829 #endif 1830 1831 /* Read the CONTROL register as the MRS instruction would. */ 1832 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1833 1834 /* 1835 * Return a pointer to the location where we currently store the 1836 * stack pointer for the requested security state and thread mode. 1837 * This pointer will become invalid if the CPU state is updated 1838 * such that the stack pointers are switched around (eg changing 1839 * the SPSEL control bit). 1840 */ 1841 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1842 bool threadmode, bool spsel); 1843 1844 bool el_is_in_host(CPUARMState *env, int el); 1845 1846 void aa32_max_features(ARMCPU *cpu); 1847 int exception_target_el(CPUARMState *env); 1848 bool arm_singlestep_active(CPUARMState *env); 1849 bool arm_generate_debug_exceptions(CPUARMState *env); 1850 1851 /** 1852 * pauth_ptr_mask: 1853 * @param: parameters defining the MMU setup 1854 * 1855 * Return a mask of the address bits that contain the authentication code, 1856 * given the MMU config defined by @param. 1857 */ 1858 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1859 { 1860 int bot_pac_bit = 64 - param.tsz; 1861 int top_pac_bit = 64 - 8 * param.tbi; 1862 1863 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1864 } 1865 1866 /* Add the cpreg definitions for debug related system registers */ 1867 void define_debug_regs(ARMCPU *cpu); 1868 1869 /* Add the cpreg definitions for TLBI instructions */ 1870 void define_tlb_insn_regs(ARMCPU *cpu); 1871 1872 /* Effective value of MDCR_EL2 */ 1873 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1874 { 1875 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1876 } 1877 1878 /* Powers of 2 for sve_vq_map et al. */ 1879 #define SVE_VQ_POW2_MAP \ 1880 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1881 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1882 1883 /* 1884 * Return true if it is possible to take a fine-grained-trap to EL2. 1885 */ 1886 static inline bool arm_fgt_active(CPUARMState *env, int el) 1887 { 1888 /* 1889 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1890 * that can affect EL0, but it is harmless to do the test also for 1891 * traps on registers that are only accessible at EL1 because if the test 1892 * returns true then we can't be executing at EL1 anyway. 1893 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1894 * traps from AArch32 only happen for the EL0 is AArch32 case. 1895 */ 1896 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1897 el < 2 && arm_is_el2_enabled(env) && 1898 arm_el_is_aa64(env, 1) && 1899 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1900 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1901 } 1902 1903 void assert_hflags_rebuild_correctly(CPUARMState *env); 1904 1905 /* 1906 * Although the ARM implementation of hardware assisted debugging 1907 * allows for different breakpoints per-core, the current GDB 1908 * interface treats them as a global pool of registers (which seems to 1909 * be the case for x86, ppc and s390). As a result we store one copy 1910 * of registers which is used for all active cores. 1911 * 1912 * Write access is serialised by virtue of the GDB protocol which 1913 * updates things. Read access (i.e. when the values are copied to the 1914 * vCPU) is also gated by GDB's run control. 1915 * 1916 * This is not unreasonable as most of the time debugging kernels you 1917 * never know which core will eventually execute your function. 1918 */ 1919 1920 typedef struct { 1921 uint64_t bcr; 1922 uint64_t bvr; 1923 } HWBreakpoint; 1924 1925 /* 1926 * The watchpoint registers can cover more area than the requested 1927 * watchpoint so we need to store the additional information 1928 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1929 * when the watchpoint is hit. 1930 */ 1931 typedef struct { 1932 uint64_t wcr; 1933 uint64_t wvr; 1934 CPUWatchpoint details; 1935 } HWWatchpoint; 1936 1937 /* Maximum and current break/watch point counts */ 1938 extern int max_hw_bps, max_hw_wps; 1939 extern GArray *hw_breakpoints, *hw_watchpoints; 1940 1941 #define cur_hw_wps (hw_watchpoints->len) 1942 #define cur_hw_bps (hw_breakpoints->len) 1943 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1944 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1945 1946 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1947 int insert_hw_breakpoint(target_ulong pc); 1948 int delete_hw_breakpoint(target_ulong pc); 1949 1950 bool check_watchpoint_in_range(int i, target_ulong addr); 1951 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1952 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1953 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1954 1955 /* Return the current value of the system counter in ticks */ 1956 uint64_t gt_get_countervalue(CPUARMState *env); 1957 /* 1958 * Return the currently applicable offset between the system counter 1959 * and the counter for the specified timer, as used for direct register 1960 * accesses. 1961 */ 1962 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx); 1963 1964 /* 1965 * Return mask of ARMMMUIdxBit values corresponding to an "invalidate 1966 * all EL1" scope; this covers stage 1 and stage 2. 1967 */ 1968 int alle1_tlbmask(CPUARMState *env); 1969 1970 /* Set the float_status behaviour to match the Arm defaults */ 1971 void arm_set_default_fp_behaviours(float_status *s); 1972 /* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */ 1973 void arm_set_ah_fp_behaviours(float_status *s); 1974 /* Read the float_status info and return the appropriate FPSR value */ 1975 uint32_t vfp_get_fpsr_from_host(CPUARMState *env); 1976 /* Clear the exception status flags from all float_status fields */ 1977 void vfp_clear_float_status_exc_flags(CPUARMState *env); 1978 /* 1979 * Update float_status fields to handle the bits of the FPCR 1980 * specified by mask changing to the values in val. 1981 */ 1982 void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask); 1983 1984 #endif 1985