1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/hwaddr.h" 29 #include "exec/vaddr.h" 30 #include "exec/breakpoint.h" 31 #include "hw/registerfields.h" 32 #include "tcg/tcg-gvec-desc.h" 33 #include "system/memory.h" 34 #include "syndrome.h" 35 #include "cpu-features.h" 36 37 /* register banks for CPU modes */ 38 #define BANK_USRSYS 0 39 #define BANK_SVC 1 40 #define BANK_ABT 2 41 #define BANK_UND 3 42 #define BANK_IRQ 4 43 #define BANK_FIQ 5 44 #define BANK_HYP 6 45 #define BANK_MON 7 46 47 static inline int arm_env_mmu_index(CPUARMState *env) 48 { 49 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 50 } 51 52 static inline bool excp_is_internal(int excp) 53 { 54 /* Return true if this exception number represents a QEMU-internal 55 * exception that will not be passed to the guest. 56 */ 57 return excp == EXCP_INTERRUPT 58 || excp == EXCP_HLT 59 || excp == EXCP_DEBUG 60 || excp == EXCP_HALTED 61 || excp == EXCP_EXCEPTION_EXIT 62 || excp == EXCP_KERNEL_TRAP 63 || excp == EXCP_SEMIHOST; 64 } 65 66 /* 67 * Default frequency for the generic timer, in Hz. 68 * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before 69 * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz, 70 * which gives a 16ns tick period. 71 * 72 * We will use the back-compat value: 73 * - for QEMU CPU types added before we standardized on 1GHz 74 * - for versioned machine types with a version of 9.0 or earlier 75 * In any case, the machine model may override via the cntfrq property. 76 */ 77 #define GTIMER_DEFAULT_HZ 1000000000 78 #define GTIMER_BACKCOMPAT_HZ 62500000 79 80 /* Bit definitions for the v7M CONTROL register */ 81 FIELD(V7M_CONTROL, NPRIV, 0, 1) 82 FIELD(V7M_CONTROL, SPSEL, 1, 1) 83 FIELD(V7M_CONTROL, FPCA, 2, 1) 84 FIELD(V7M_CONTROL, SFPA, 3, 1) 85 86 /* Bit definitions for v7M exception return payload */ 87 FIELD(V7M_EXCRET, ES, 0, 1) 88 FIELD(V7M_EXCRET, RES0, 1, 1) 89 FIELD(V7M_EXCRET, SPSEL, 2, 1) 90 FIELD(V7M_EXCRET, MODE, 3, 1) 91 FIELD(V7M_EXCRET, FTYPE, 4, 1) 92 FIELD(V7M_EXCRET, DCRS, 5, 1) 93 FIELD(V7M_EXCRET, S, 6, 1) 94 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 95 96 /* Minimum value which is a magic number for exception return */ 97 #define EXC_RETURN_MIN_MAGIC 0xff000000 98 /* Minimum number which is a magic number for function or exception return 99 * when using v8M security extension 100 */ 101 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 102 103 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 104 FIELD(DBGWCR, E, 0, 1) 105 FIELD(DBGWCR, PAC, 1, 2) 106 FIELD(DBGWCR, LSC, 3, 2) 107 FIELD(DBGWCR, BAS, 5, 8) 108 FIELD(DBGWCR, HMC, 13, 1) 109 FIELD(DBGWCR, SSC, 14, 2) 110 FIELD(DBGWCR, LBN, 16, 4) 111 FIELD(DBGWCR, WT, 20, 1) 112 FIELD(DBGWCR, MASK, 24, 5) 113 FIELD(DBGWCR, SSCE, 29, 1) 114 115 #define VTCR_NSW (1u << 29) 116 #define VTCR_NSA (1u << 30) 117 #define VSTCR_SW VTCR_NSW 118 #define VSTCR_SA VTCR_NSA 119 120 /* Bit definitions for CPACR (AArch32 only) */ 121 FIELD(CPACR, CP10, 20, 2) 122 FIELD(CPACR, CP11, 22, 2) 123 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 124 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 125 FIELD(CPACR, ASEDIS, 31, 1) 126 127 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 128 FIELD(CPACR_EL1, ZEN, 16, 2) 129 FIELD(CPACR_EL1, FPEN, 20, 2) 130 FIELD(CPACR_EL1, SMEN, 24, 2) 131 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 132 133 /* Bit definitions for HCPTR (AArch32 only) */ 134 FIELD(HCPTR, TCP10, 10, 1) 135 FIELD(HCPTR, TCP11, 11, 1) 136 FIELD(HCPTR, TASE, 15, 1) 137 FIELD(HCPTR, TTA, 20, 1) 138 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 139 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 140 141 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 142 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 143 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 144 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 145 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 146 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 147 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 148 FIELD(CPTR_EL2, TTA, 28, 1) 149 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 150 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 151 152 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 153 FIELD(CPTR_EL3, EZ, 8, 1) 154 FIELD(CPTR_EL3, TFP, 10, 1) 155 FIELD(CPTR_EL3, ESM, 12, 1) 156 FIELD(CPTR_EL3, TTA, 20, 1) 157 FIELD(CPTR_EL3, TAM, 30, 1) 158 FIELD(CPTR_EL3, TCPAC, 31, 1) 159 160 #define MDCR_MTPME (1U << 28) 161 #define MDCR_TDCC (1U << 27) 162 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 163 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 164 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 165 #define MDCR_EPMAD (1U << 21) 166 #define MDCR_EDAD (1U << 20) 167 #define MDCR_TTRF (1U << 19) 168 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 169 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 170 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 171 #define MDCR_SDD (1U << 16) 172 #define MDCR_SPD (3U << 14) 173 #define MDCR_TDRA (1U << 11) 174 #define MDCR_TDOSA (1U << 10) 175 #define MDCR_TDA (1U << 9) 176 #define MDCR_TDE (1U << 8) 177 #define MDCR_HPME (1U << 7) 178 #define MDCR_TPM (1U << 6) 179 #define MDCR_TPMCR (1U << 5) 180 #define MDCR_HPMN (0x1fU) 181 182 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 183 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 184 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 185 MDCR_STE | MDCR_SPME | MDCR_SPD) 186 187 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 188 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 189 #define TTBCR_PD0 (1U << 4) 190 #define TTBCR_PD1 (1U << 5) 191 #define TTBCR_EPD0 (1U << 7) 192 #define TTBCR_IRGN0 (3U << 8) 193 #define TTBCR_ORGN0 (3U << 10) 194 #define TTBCR_SH0 (3U << 12) 195 #define TTBCR_T1SZ (3U << 16) 196 #define TTBCR_A1 (1U << 22) 197 #define TTBCR_EPD1 (1U << 23) 198 #define TTBCR_IRGN1 (3U << 24) 199 #define TTBCR_ORGN1 (3U << 26) 200 #define TTBCR_SH1 (1U << 28) 201 #define TTBCR_EAE (1U << 31) 202 203 FIELD(VTCR, T0SZ, 0, 6) 204 FIELD(VTCR, SL0, 6, 2) 205 FIELD(VTCR, IRGN0, 8, 2) 206 FIELD(VTCR, ORGN0, 10, 2) 207 FIELD(VTCR, SH0, 12, 2) 208 FIELD(VTCR, TG0, 14, 2) 209 FIELD(VTCR, PS, 16, 3) 210 FIELD(VTCR, VS, 19, 1) 211 FIELD(VTCR, HA, 21, 1) 212 FIELD(VTCR, HD, 22, 1) 213 FIELD(VTCR, HWU59, 25, 1) 214 FIELD(VTCR, HWU60, 26, 1) 215 FIELD(VTCR, HWU61, 27, 1) 216 FIELD(VTCR, HWU62, 28, 1) 217 FIELD(VTCR, NSW, 29, 1) 218 FIELD(VTCR, NSA, 30, 1) 219 FIELD(VTCR, DS, 32, 1) 220 FIELD(VTCR, SL2, 33, 1) 221 222 #define HCRX_ENAS0 (1ULL << 0) 223 #define HCRX_ENALS (1ULL << 1) 224 #define HCRX_ENASR (1ULL << 2) 225 #define HCRX_FNXS (1ULL << 3) 226 #define HCRX_FGTNXS (1ULL << 4) 227 #define HCRX_SMPME (1ULL << 5) 228 #define HCRX_TALLINT (1ULL << 6) 229 #define HCRX_VINMI (1ULL << 7) 230 #define HCRX_VFNMI (1ULL << 8) 231 #define HCRX_CMOW (1ULL << 9) 232 #define HCRX_MCE2 (1ULL << 10) 233 #define HCRX_MSCEN (1ULL << 11) 234 235 #define HPFAR_NS (1ULL << 63) 236 237 #define HSTR_TTEE (1 << 16) 238 #define HSTR_TJDBX (1 << 17) 239 240 /* 241 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 242 * have different bit definitions, and EL1PCTEN might be 243 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 244 * disambiguate if necessary. 245 */ 246 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 247 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 248 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 249 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 250 FIELD(CNTHCTL, EVNTEN, 2, 1) 251 FIELD(CNTHCTL, EVNTDIR, 3, 1) 252 FIELD(CNTHCTL, EVNTI, 4, 4) 253 FIELD(CNTHCTL, EL0VTEN, 8, 1) 254 FIELD(CNTHCTL, EL0PTEN, 9, 1) 255 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 256 FIELD(CNTHCTL, EL1PTEN, 11, 1) 257 FIELD(CNTHCTL, ECV, 12, 1) 258 FIELD(CNTHCTL, EL1TVT, 13, 1) 259 FIELD(CNTHCTL, EL1TVCT, 14, 1) 260 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 261 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 262 FIELD(CNTHCTL, EVNTIS, 17, 1) 263 FIELD(CNTHCTL, CNTVMASK, 18, 1) 264 FIELD(CNTHCTL, CNTPMASK, 19, 1) 265 266 /* We use a few fake FSR values for internal purposes in M profile. 267 * M profile cores don't have A/R format FSRs, but currently our 268 * get_phys_addr() code assumes A/R profile and reports failures via 269 * an A/R format FSR value. We then translate that into the proper 270 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 271 * Mostly the FSR values we use for this are those defined for v7PMSA, 272 * since we share some of that codepath. A few kinds of fault are 273 * only for M profile and have no A/R equivalent, though, so we have 274 * to pick a value from the reserved range (which we never otherwise 275 * generate) to use for these. 276 * These values will never be visible to the guest. 277 */ 278 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 279 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 280 281 /** 282 * raise_exception: Raise the specified exception. 283 * Raise a guest exception with the specified value, syndrome register 284 * and target exception level. This should be called from helper functions, 285 * and never returns because we will longjump back up to the CPU main loop. 286 */ 287 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 288 uint32_t syndrome, uint32_t target_el); 289 290 /* 291 * Similarly, but also use unwinding to restore cpu state. 292 */ 293 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 294 uint32_t syndrome, uint32_t target_el, 295 uintptr_t ra); 296 297 /* 298 * For AArch64, map a given EL to an index in the banked_spsr array. 299 * Note that this mapping and the AArch32 mapping defined in bank_number() 300 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 301 * mandated mapping between each other. 302 */ 303 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 304 { 305 static const unsigned int map[4] = { 306 [1] = BANK_SVC, /* EL1. */ 307 [2] = BANK_HYP, /* EL2. */ 308 [3] = BANK_MON, /* EL3. */ 309 }; 310 assert(el >= 1 && el <= 3); 311 return map[el]; 312 } 313 314 /* Map CPU modes onto saved register banks. */ 315 static inline int bank_number(int mode) 316 { 317 switch (mode) { 318 case ARM_CPU_MODE_USR: 319 case ARM_CPU_MODE_SYS: 320 return BANK_USRSYS; 321 case ARM_CPU_MODE_SVC: 322 return BANK_SVC; 323 case ARM_CPU_MODE_ABT: 324 return BANK_ABT; 325 case ARM_CPU_MODE_UND: 326 return BANK_UND; 327 case ARM_CPU_MODE_IRQ: 328 return BANK_IRQ; 329 case ARM_CPU_MODE_FIQ: 330 return BANK_FIQ; 331 case ARM_CPU_MODE_HYP: 332 return BANK_HYP; 333 case ARM_CPU_MODE_MON: 334 return BANK_MON; 335 } 336 g_assert_not_reached(); 337 } 338 339 /** 340 * r14_bank_number: Map CPU mode onto register bank for r14 341 * 342 * Given an AArch32 CPU mode, return the index into the saved register 343 * banks to use for the R14 (LR) in that mode. This is the same as 344 * bank_number(), except for the special case of Hyp mode, where 345 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 346 * This should be used as the index into env->banked_r14[], and 347 * bank_number() used for the index into env->banked_r13[] and 348 * env->banked_spsr[]. 349 */ 350 static inline int r14_bank_number(int mode) 351 { 352 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 353 } 354 355 void arm_cpu_register(const ARMCPUInfo *info); 356 void aarch64_cpu_register(const ARMCPUInfo *info); 357 358 void register_cp_regs_for_features(ARMCPU *cpu); 359 void init_cpreg_list(ARMCPU *cpu); 360 361 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 362 void arm_translate_init(void); 363 void arm_translate_code(CPUState *cs, TranslationBlock *tb, 364 int *max_insns, vaddr pc, void *host_pc); 365 366 void arm_cpu_register_gdb_commands(ARMCPU *cpu); 367 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, 368 GPtrArray *, GPtrArray *); 369 370 void arm_restore_state_to_opc(CPUState *cs, 371 const TranslationBlock *tb, 372 const uint64_t *data); 373 374 #ifdef CONFIG_TCG 375 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 376 377 /* Our implementation of TCGCPUOps::cpu_exec_halt */ 378 bool arm_cpu_exec_halt(CPUState *cs); 379 int arm_cpu_mmu_index(CPUState *cs, bool ifetch); 380 #endif /* CONFIG_TCG */ 381 382 typedef enum ARMFPRounding { 383 FPROUNDING_TIEEVEN, 384 FPROUNDING_POSINF, 385 FPROUNDING_NEGINF, 386 FPROUNDING_ZERO, 387 FPROUNDING_TIEAWAY, 388 FPROUNDING_ODD 389 } ARMFPRounding; 390 391 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 392 393 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 394 { 395 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 396 return arm_rmode_to_sf_map[rmode]; 397 } 398 399 /* Return the effective value of SCR_EL3.RW */ 400 static inline bool arm_scr_rw_eff(CPUARMState *env) 401 { 402 /* 403 * SCR_EL3.RW has an effective value of 1 if: 404 * - we are NS and EL2 is implemented but doesn't support AArch32 405 * - we are S and EL2 is enabled (in which case it must be AArch64) 406 */ 407 ARMCPU *cpu = env_archcpu(env); 408 409 if (env->cp15.scr_el3 & SCR_RW) { 410 return true; 411 } 412 if (env->cp15.scr_el3 & SCR_NS) { 413 return arm_feature(env, ARM_FEATURE_EL2) && 414 !cpu_isar_feature(aa64_aa32_el2, cpu); 415 } else { 416 return env->cp15.scr_el3 & SCR_EEL2; 417 } 418 } 419 420 /* Return true if the specified exception level is running in AArch64 state. */ 421 static inline bool arm_el_is_aa64(CPUARMState *env, int el) 422 { 423 /* 424 * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, 425 * and if we're not in EL0 then the state of EL0 isn't well defined.) 426 */ 427 assert(el >= 1 && el <= 3); 428 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); 429 430 /* 431 * The highest exception level is always at the maximum supported 432 * register width, and then lower levels have a register width controlled 433 * by bits in the SCR or HCR registers. 434 */ 435 if (el == 3) { 436 return aa64; 437 } 438 439 if (arm_feature(env, ARM_FEATURE_EL3)) { 440 aa64 = aa64 && arm_scr_rw_eff(env); 441 } 442 443 if (el == 2) { 444 return aa64; 445 } 446 447 if (arm_is_el2_enabled(env)) { 448 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); 449 } 450 451 return aa64; 452 } 453 454 /* 455 * Return the current Exception Level (as per ARMv8; note that this differs 456 * from the ARMv7 Privilege Level). 457 */ 458 static inline int arm_current_el(CPUARMState *env) 459 { 460 if (arm_feature(env, ARM_FEATURE_M)) { 461 return arm_v7m_is_handler_mode(env) || 462 !(env->v7m.control[env->v7m.secure] & 1); 463 } 464 465 if (is_a64(env)) { 466 return extract32(env->pstate, 2, 2); 467 } 468 469 switch (env->uncached_cpsr & 0x1f) { 470 case ARM_CPU_MODE_USR: 471 return 0; 472 case ARM_CPU_MODE_HYP: 473 return 2; 474 case ARM_CPU_MODE_MON: 475 return 3; 476 default: 477 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 478 /* If EL3 is 32-bit then all secure privileged modes run in EL3 */ 479 return 3; 480 } 481 482 return 1; 483 } 484 } 485 486 static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, 487 bool sctlr_b) 488 { 489 #ifdef CONFIG_USER_ONLY 490 /* 491 * In system mode, BE32 is modelled in line with the 492 * architecture (as word-invariant big-endianness), where loads 493 * and stores are done little endian but from addresses which 494 * are adjusted by XORing with the appropriate constant. So the 495 * endianness to use for the raw data access is not affected by 496 * SCTLR.B. 497 * In user mode, however, we model BE32 as byte-invariant 498 * big-endianness (because user-only code cannot tell the 499 * difference), and so we need to use a data access endianness 500 * that depends on SCTLR.B. 501 */ 502 if (sctlr_b) { 503 return true; 504 } 505 #endif 506 /* In 32bit endianness is determined by looking at CPSR's E bit */ 507 return env->uncached_cpsr & CPSR_E; 508 } 509 510 static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) 511 { 512 return sctlr & (el ? SCTLR_EE : SCTLR_E0E); 513 } 514 515 /* Return true if the processor is in big-endian mode. */ 516 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) 517 { 518 if (!is_a64(env)) { 519 return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); 520 } else { 521 int cur_el = arm_current_el(env); 522 uint64_t sctlr = arm_sctlr(env, cur_el); 523 return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); 524 } 525 } 526 527 #ifdef CONFIG_USER_ONLY 528 static inline bool arm_cpu_bswap_data(CPUARMState *env) 529 { 530 return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); 531 } 532 #endif 533 534 static inline void aarch64_save_sp(CPUARMState *env, int el) 535 { 536 if (env->pstate & PSTATE_SP) { 537 env->sp_el[el] = env->xregs[31]; 538 } else { 539 env->sp_el[0] = env->xregs[31]; 540 } 541 } 542 543 static inline void aarch64_restore_sp(CPUARMState *env, int el) 544 { 545 if (env->pstate & PSTATE_SP) { 546 env->xregs[31] = env->sp_el[el]; 547 } else { 548 env->xregs[31] = env->sp_el[0]; 549 } 550 } 551 552 static inline void update_spsel(CPUARMState *env, uint32_t imm) 553 { 554 unsigned int cur_el = arm_current_el(env); 555 /* Update PSTATE SPSel bit; this requires us to update the 556 * working stack pointer in xregs[31]. 557 */ 558 if (!((imm ^ env->pstate) & PSTATE_SP)) { 559 return; 560 } 561 aarch64_save_sp(env, cur_el); 562 env->pstate = deposit32(env->pstate, 0, 1, imm); 563 564 /* We rely on illegal updates to SPsel from EL0 to get trapped 565 * at translation time. 566 */ 567 assert(cur_el >= 1 && cur_el <= 3); 568 aarch64_restore_sp(env, cur_el); 569 } 570 571 /* 572 * arm_pamax 573 * @cpu: ARMCPU 574 * 575 * Returns the implementation defined bit-width of physical addresses. 576 * The ARMv8 reference manuals refer to this as PAMax(). 577 */ 578 unsigned int arm_pamax(ARMCPU *cpu); 579 580 /* 581 * round_down_to_parange_index 582 * @bit_size: uint8_t 583 * 584 * Rounds down the bit_size supplied to the first supported ARM physical 585 * address range and returns the index for this. The index is intended to 586 * be used to set ID_AA64MMFR0_EL1's PARANGE bits. 587 */ 588 uint8_t round_down_to_parange_index(uint8_t bit_size); 589 590 /* 591 * round_down_to_parange_bit_size 592 * @bit_size: uint8_t 593 * 594 * Rounds down the bit_size supplied to the first supported ARM physical 595 * address range bit size and returns this. 596 */ 597 uint8_t round_down_to_parange_bit_size(uint8_t bit_size); 598 599 /* Return true if extended addresses are enabled. 600 * This is always the case if our translation regime is 64 bit, 601 * but depends on TTBCR.EAE for 32 bit. 602 */ 603 static inline bool extended_addresses_enabled(CPUARMState *env) 604 { 605 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 606 if (arm_feature(env, ARM_FEATURE_PMSA) && 607 arm_feature(env, ARM_FEATURE_V8)) { 608 return true; 609 } 610 return arm_el_is_aa64(env, 1) || 611 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 612 } 613 614 /* Update a QEMU watchpoint based on the information the guest has set in the 615 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 616 */ 617 void hw_watchpoint_update(ARMCPU *cpu, int n); 618 /* Update the QEMU watchpoints for every guest watchpoint. This does a 619 * complete delete-and-reinstate of the QEMU watchpoint list and so is 620 * suitable for use after migration or on reset. 621 */ 622 void hw_watchpoint_update_all(ARMCPU *cpu); 623 /* Update a QEMU breakpoint based on the information the guest has set in the 624 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 625 */ 626 void hw_breakpoint_update(ARMCPU *cpu, int n); 627 /* Update the QEMU breakpoints for every guest breakpoint. This does a 628 * complete delete-and-reinstate of the QEMU breakpoint list and so is 629 * suitable for use after migration or on reset. 630 */ 631 void hw_breakpoint_update_all(ARMCPU *cpu); 632 633 /* Callback function for checking if a breakpoint should trigger. */ 634 bool arm_debug_check_breakpoint(CPUState *cs); 635 636 /* Callback function for checking if a watchpoint should trigger. */ 637 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 638 639 /* Adjust addresses (in BE32 mode) before testing against watchpoint 640 * addresses. 641 */ 642 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 643 644 /* Callback function for when a watchpoint or breakpoint triggers. */ 645 void arm_debug_excp_handler(CPUState *cs); 646 647 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 648 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 649 { 650 return false; 651 } 652 static inline void arm_handle_psci_call(ARMCPU *cpu) 653 { 654 g_assert_not_reached(); 655 } 656 #else 657 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 658 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 659 /* Actually handle a PSCI call */ 660 void arm_handle_psci_call(ARMCPU *cpu); 661 #endif 662 663 /** 664 * arm_clear_exclusive: clear the exclusive monitor 665 * @env: CPU env 666 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 667 */ 668 static inline void arm_clear_exclusive(CPUARMState *env) 669 { 670 env->exclusive_addr = -1; 671 } 672 673 /** 674 * ARMFaultType: type of an ARM MMU fault 675 * This corresponds to the v8A pseudocode's Fault enumeration, 676 * with extensions for QEMU internal conditions. 677 */ 678 typedef enum ARMFaultType { 679 ARMFault_None, 680 ARMFault_AccessFlag, 681 ARMFault_Alignment, 682 ARMFault_Background, 683 ARMFault_Domain, 684 ARMFault_Permission, 685 ARMFault_Translation, 686 ARMFault_AddressSize, 687 ARMFault_SyncExternal, 688 ARMFault_SyncExternalOnWalk, 689 ARMFault_SyncParity, 690 ARMFault_SyncParityOnWalk, 691 ARMFault_AsyncParity, 692 ARMFault_AsyncExternal, 693 ARMFault_Debug, 694 ARMFault_TLBConflict, 695 ARMFault_UnsuppAtomicUpdate, 696 ARMFault_Lockdown, 697 ARMFault_Exclusive, 698 ARMFault_ICacheMaint, 699 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 700 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 701 ARMFault_GPCFOnWalk, 702 ARMFault_GPCFOnOutput, 703 } ARMFaultType; 704 705 typedef enum ARMGPCF { 706 GPCF_None, 707 GPCF_AddressSize, 708 GPCF_Walk, 709 GPCF_EABT, 710 GPCF_Fail, 711 } ARMGPCF; 712 713 /** 714 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 715 * @type: Type of fault 716 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 717 * @level: Table walk level (for translation, access flag and permission faults) 718 * @domain: Domain of the fault address (for non-LPAE CPUs only) 719 * @s2addr: Address that caused a fault at stage 2 720 * @paddr: physical address that caused a fault for gpc 721 * @paddr_space: physical address space that caused a fault for gpc 722 * @stage2: True if we faulted at stage 2 723 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 724 * @s1ns: True if we faulted on a non-secure IPA while in secure state 725 * @ea: True if we should set the EA (external abort type) bit in syndrome 726 */ 727 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 728 struct ARMMMUFaultInfo { 729 ARMFaultType type; 730 ARMGPCF gpcf; 731 hwaddr s2addr; 732 hwaddr paddr; 733 ARMSecuritySpace paddr_space; 734 int level; 735 int domain; 736 bool stage2; 737 bool s1ptw; 738 bool s1ns; 739 bool ea; 740 }; 741 742 /** 743 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 744 * Compare pseudocode EncodeSDFSC(), though unlike that function 745 * we set up a whole FSR-format code including domain field and 746 * putting the high bit of the FSC into bit 10. 747 */ 748 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 749 { 750 uint32_t fsc; 751 752 switch (fi->type) { 753 case ARMFault_None: 754 return 0; 755 case ARMFault_AccessFlag: 756 fsc = fi->level == 1 ? 0x3 : 0x6; 757 break; 758 case ARMFault_Alignment: 759 fsc = 0x1; 760 break; 761 case ARMFault_Permission: 762 fsc = fi->level == 1 ? 0xd : 0xf; 763 break; 764 case ARMFault_Domain: 765 fsc = fi->level == 1 ? 0x9 : 0xb; 766 break; 767 case ARMFault_Translation: 768 fsc = fi->level == 1 ? 0x5 : 0x7; 769 break; 770 case ARMFault_SyncExternal: 771 fsc = 0x8 | (fi->ea << 12); 772 break; 773 case ARMFault_SyncExternalOnWalk: 774 fsc = fi->level == 1 ? 0xc : 0xe; 775 fsc |= (fi->ea << 12); 776 break; 777 case ARMFault_SyncParity: 778 fsc = 0x409; 779 break; 780 case ARMFault_SyncParityOnWalk: 781 fsc = fi->level == 1 ? 0x40c : 0x40e; 782 break; 783 case ARMFault_AsyncParity: 784 fsc = 0x408; 785 break; 786 case ARMFault_AsyncExternal: 787 fsc = 0x406 | (fi->ea << 12); 788 break; 789 case ARMFault_Debug: 790 fsc = 0x2; 791 break; 792 case ARMFault_TLBConflict: 793 fsc = 0x400; 794 break; 795 case ARMFault_Lockdown: 796 fsc = 0x404; 797 break; 798 case ARMFault_Exclusive: 799 fsc = 0x405; 800 break; 801 case ARMFault_ICacheMaint: 802 fsc = 0x4; 803 break; 804 case ARMFault_Background: 805 fsc = 0x0; 806 break; 807 case ARMFault_QEMU_NSCExec: 808 fsc = M_FAKE_FSR_NSC_EXEC; 809 break; 810 case ARMFault_QEMU_SFault: 811 fsc = M_FAKE_FSR_SFAULT; 812 break; 813 default: 814 /* Other faults can't occur in a context that requires a 815 * short-format status code. 816 */ 817 g_assert_not_reached(); 818 } 819 820 fsc |= (fi->domain << 4); 821 return fsc; 822 } 823 824 /** 825 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 826 * Compare pseudocode EncodeLDFSC(), though unlike that function 827 * we fill in also the LPAE bit 9 of a DFSR format. 828 */ 829 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 830 { 831 uint32_t fsc; 832 833 switch (fi->type) { 834 case ARMFault_None: 835 return 0; 836 case ARMFault_AddressSize: 837 assert(fi->level >= -1 && fi->level <= 3); 838 if (fi->level < 0) { 839 fsc = 0b101001; 840 } else { 841 fsc = fi->level; 842 } 843 break; 844 case ARMFault_AccessFlag: 845 assert(fi->level >= 0 && fi->level <= 3); 846 fsc = 0b001000 | fi->level; 847 break; 848 case ARMFault_Permission: 849 assert(fi->level >= 0 && fi->level <= 3); 850 fsc = 0b001100 | fi->level; 851 break; 852 case ARMFault_Translation: 853 assert(fi->level >= -1 && fi->level <= 3); 854 if (fi->level < 0) { 855 fsc = 0b101011; 856 } else { 857 fsc = 0b000100 | fi->level; 858 } 859 break; 860 case ARMFault_SyncExternal: 861 fsc = 0x10 | (fi->ea << 12); 862 break; 863 case ARMFault_SyncExternalOnWalk: 864 assert(fi->level >= -1 && fi->level <= 3); 865 if (fi->level < 0) { 866 fsc = 0b010011; 867 } else { 868 fsc = 0b010100 | fi->level; 869 } 870 fsc |= fi->ea << 12; 871 break; 872 case ARMFault_SyncParity: 873 fsc = 0x18; 874 break; 875 case ARMFault_SyncParityOnWalk: 876 assert(fi->level >= -1 && fi->level <= 3); 877 if (fi->level < 0) { 878 fsc = 0b011011; 879 } else { 880 fsc = 0b011100 | fi->level; 881 } 882 break; 883 case ARMFault_AsyncParity: 884 fsc = 0x19; 885 break; 886 case ARMFault_AsyncExternal: 887 fsc = 0x11 | (fi->ea << 12); 888 break; 889 case ARMFault_Alignment: 890 fsc = 0x21; 891 break; 892 case ARMFault_Debug: 893 fsc = 0x22; 894 break; 895 case ARMFault_TLBConflict: 896 fsc = 0x30; 897 break; 898 case ARMFault_UnsuppAtomicUpdate: 899 fsc = 0x31; 900 break; 901 case ARMFault_Lockdown: 902 fsc = 0x34; 903 break; 904 case ARMFault_Exclusive: 905 fsc = 0x35; 906 break; 907 case ARMFault_GPCFOnWalk: 908 assert(fi->level >= -1 && fi->level <= 3); 909 if (fi->level < 0) { 910 fsc = 0b100011; 911 } else { 912 fsc = 0b100100 | fi->level; 913 } 914 break; 915 case ARMFault_GPCFOnOutput: 916 fsc = 0b101000; 917 break; 918 default: 919 /* Other faults can't occur in a context that requires a 920 * long-format status code. 921 */ 922 g_assert_not_reached(); 923 } 924 925 fsc |= 1 << 9; 926 return fsc; 927 } 928 929 static inline bool arm_extabort_type(MemTxResult result) 930 { 931 /* The EA bit in syndromes and fault status registers is an 932 * IMPDEF classification of external aborts. ARM implementations 933 * usually use this to indicate AXI bus Decode error (0) or 934 * Slave error (1); in QEMU we follow that. 935 */ 936 return result != MEMTX_DECODE_ERROR; 937 } 938 939 #ifdef CONFIG_USER_ONLY 940 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 941 MMUAccessType access_type, 942 bool maperr, uintptr_t ra); 943 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 944 MMUAccessType access_type, uintptr_t ra); 945 #else 946 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, 947 MMUAccessType access_type, int mmu_idx, 948 MemOp memop, int size, bool probe, uintptr_t ra); 949 #endif 950 951 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 952 { 953 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 954 } 955 956 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 957 { 958 if (arm_feature(env, ARM_FEATURE_M)) { 959 return mmu_idx | ARM_MMU_IDX_M; 960 } else { 961 return mmu_idx | ARM_MMU_IDX_A; 962 } 963 } 964 965 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 966 { 967 /* AArch64 is always a-profile. */ 968 return mmu_idx | ARM_MMU_IDX_A; 969 } 970 971 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 972 973 /* Return the MMU index for a v7M CPU in the specified security state */ 974 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 975 976 /* 977 * Return true if the stage 1 translation regime is using LPAE 978 * format page tables 979 */ 980 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 981 982 /* Raise a data fault alignment exception for the specified virtual address */ 983 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 984 MMUAccessType access_type, 985 int mmu_idx, uintptr_t retaddr); 986 987 #ifndef CONFIG_USER_ONLY 988 /* arm_cpu_do_transaction_failed: handle a memory system error response 989 * (eg "no device/memory present at address") by raising an external abort 990 * exception 991 */ 992 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 993 vaddr addr, unsigned size, 994 MMUAccessType access_type, 995 int mmu_idx, MemTxAttrs attrs, 996 MemTxResult response, uintptr_t retaddr); 997 #endif 998 999 /* Call any registered EL change hooks */ 1000 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 1001 { 1002 ARMELChangeHook *hook, *next; 1003 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 1004 hook->hook(cpu, hook->opaque); 1005 } 1006 } 1007 static inline void arm_call_el_change_hook(ARMCPU *cpu) 1008 { 1009 ARMELChangeHook *hook, *next; 1010 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 1011 hook->hook(cpu, hook->opaque); 1012 } 1013 } 1014 1015 /* 1016 * Return true if this address translation regime has two ranges. 1017 * Note that this will not return the correct answer for AArch32 1018 * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is 1019 * never called from a context where EL3 can be AArch32. (The 1020 * correct return value for ARMMMUIdx_E3 would be different for 1021 * that case, so we can't just make the function return the 1022 * correct value anyway; we would need an extra "bool e3_is_aarch32" 1023 * argument which all the current callsites would pass as 'false'.) 1024 */ 1025 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 1026 { 1027 switch (mmu_idx) { 1028 case ARMMMUIdx_Stage1_E0: 1029 case ARMMMUIdx_Stage1_E1: 1030 case ARMMMUIdx_Stage1_E1_PAN: 1031 case ARMMMUIdx_E10_0: 1032 case ARMMMUIdx_E10_1: 1033 case ARMMMUIdx_E10_1_PAN: 1034 case ARMMMUIdx_E20_0: 1035 case ARMMMUIdx_E20_2: 1036 case ARMMMUIdx_E20_2_PAN: 1037 return true; 1038 default: 1039 return false; 1040 } 1041 } 1042 1043 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 1044 { 1045 switch (mmu_idx) { 1046 case ARMMMUIdx_Stage1_E1_PAN: 1047 case ARMMMUIdx_E10_1_PAN: 1048 case ARMMMUIdx_E20_2_PAN: 1049 case ARMMMUIdx_E30_3_PAN: 1050 return true; 1051 default: 1052 return false; 1053 } 1054 } 1055 1056 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 1057 { 1058 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 1059 } 1060 1061 /* Return the exception level which controls this address translation regime */ 1062 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 1063 { 1064 switch (mmu_idx) { 1065 case ARMMMUIdx_E20_0: 1066 case ARMMMUIdx_E20_2: 1067 case ARMMMUIdx_E20_2_PAN: 1068 case ARMMMUIdx_Stage2: 1069 case ARMMMUIdx_Stage2_S: 1070 case ARMMMUIdx_E2: 1071 return 2; 1072 case ARMMMUIdx_E3: 1073 case ARMMMUIdx_E30_0: 1074 case ARMMMUIdx_E30_3_PAN: 1075 return 3; 1076 case ARMMMUIdx_E10_0: 1077 case ARMMMUIdx_Stage1_E0: 1078 case ARMMMUIdx_Stage1_E1: 1079 case ARMMMUIdx_Stage1_E1_PAN: 1080 case ARMMMUIdx_E10_1: 1081 case ARMMMUIdx_E10_1_PAN: 1082 case ARMMMUIdx_MPrivNegPri: 1083 case ARMMMUIdx_MUserNegPri: 1084 case ARMMMUIdx_MPriv: 1085 case ARMMMUIdx_MUser: 1086 case ARMMMUIdx_MSPrivNegPri: 1087 case ARMMMUIdx_MSUserNegPri: 1088 case ARMMMUIdx_MSPriv: 1089 case ARMMMUIdx_MSUser: 1090 return 1; 1091 default: 1092 g_assert_not_reached(); 1093 } 1094 } 1095 1096 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 1097 { 1098 switch (mmu_idx) { 1099 case ARMMMUIdx_E10_0: 1100 case ARMMMUIdx_E20_0: 1101 case ARMMMUIdx_E30_0: 1102 case ARMMMUIdx_Stage1_E0: 1103 case ARMMMUIdx_MUser: 1104 case ARMMMUIdx_MSUser: 1105 case ARMMMUIdx_MUserNegPri: 1106 case ARMMMUIdx_MSUserNegPri: 1107 return true; 1108 default: 1109 return false; 1110 } 1111 } 1112 1113 /* Return the SCTLR value which controls this address translation regime */ 1114 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 1115 { 1116 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 1117 } 1118 1119 /* 1120 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 1121 * and the Non-Secure stage 2 translation regimes (and hence which are 1122 * not present in VSTCR_EL2). 1123 */ 1124 #define VTCR_SHARED_FIELD_MASK \ 1125 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 1126 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 1127 R_VTCR_DS_MASK) 1128 1129 /* Return the value of the TCR controlling this translation regime */ 1130 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 1131 { 1132 if (mmu_idx == ARMMMUIdx_Stage2) { 1133 return env->cp15.vtcr_el2; 1134 } 1135 if (mmu_idx == ARMMMUIdx_Stage2_S) { 1136 /* 1137 * Secure stage 2 shares fields from VTCR_EL2. We merge those 1138 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 1139 * value so the callers don't need to special case this. 1140 * 1141 * If a future architecture change defines bits in VSTCR_EL2 that 1142 * overlap with these VTCR_EL2 fields we may need to revisit this. 1143 */ 1144 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 1145 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 1146 return v; 1147 } 1148 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 1149 } 1150 1151 /* Return true if the translation regime is using LPAE format page tables */ 1152 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 1153 { 1154 int el = regime_el(env, mmu_idx); 1155 if (el == 2 || arm_el_is_aa64(env, el)) { 1156 return true; 1157 } 1158 if (arm_feature(env, ARM_FEATURE_PMSA) && 1159 arm_feature(env, ARM_FEATURE_V8)) { 1160 return true; 1161 } 1162 if (arm_feature(env, ARM_FEATURE_LPAE) 1163 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 1164 return true; 1165 } 1166 return false; 1167 } 1168 1169 /** 1170 * arm_num_brps: Return number of implemented breakpoints. 1171 * Note that the ID register BRPS field is "number of bps - 1", 1172 * and we return the actual number of breakpoints. 1173 */ 1174 static inline int arm_num_brps(ARMCPU *cpu) 1175 { 1176 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1177 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 1178 } else { 1179 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 1180 } 1181 } 1182 1183 /** 1184 * arm_num_wrps: Return number of implemented watchpoints. 1185 * Note that the ID register WRPS field is "number of wps - 1", 1186 * and we return the actual number of watchpoints. 1187 */ 1188 static inline int arm_num_wrps(ARMCPU *cpu) 1189 { 1190 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1191 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1192 } else { 1193 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1194 } 1195 } 1196 1197 /** 1198 * arm_num_ctx_cmps: Return number of implemented context comparators. 1199 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1200 * and we return the actual number of comparators. 1201 */ 1202 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1203 { 1204 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1205 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1206 } else { 1207 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1208 } 1209 } 1210 1211 /** 1212 * v7m_using_psp: Return true if using process stack pointer 1213 * Return true if the CPU is currently using the process stack 1214 * pointer, or false if it is using the main stack pointer. 1215 */ 1216 static inline bool v7m_using_psp(CPUARMState *env) 1217 { 1218 /* Handler mode always uses the main stack; for thread mode 1219 * the CONTROL.SPSEL bit determines the answer. 1220 * Note that in v7M it is not possible to be in Handler mode with 1221 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1222 */ 1223 return !arm_v7m_is_handler_mode(env) && 1224 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1225 } 1226 1227 /** 1228 * v7m_sp_limit: Return SP limit for current CPU state 1229 * Return the SP limit value for the current CPU security state 1230 * and stack pointer. 1231 */ 1232 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1233 { 1234 if (v7m_using_psp(env)) { 1235 return env->v7m.psplim[env->v7m.secure]; 1236 } else { 1237 return env->v7m.msplim[env->v7m.secure]; 1238 } 1239 } 1240 1241 /** 1242 * v7m_cpacr_pass: 1243 * Return true if the v7M CPACR permits access to the FPU for the specified 1244 * security state and privilege level. 1245 */ 1246 static inline bool v7m_cpacr_pass(CPUARMState *env, 1247 bool is_secure, bool is_priv) 1248 { 1249 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1250 case 0: 1251 case 2: /* UNPREDICTABLE: we treat like 0 */ 1252 return false; 1253 case 1: 1254 return is_priv; 1255 case 3: 1256 return true; 1257 default: 1258 g_assert_not_reached(); 1259 } 1260 } 1261 1262 /** 1263 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1264 * @psr: Program Status Register indicating CPU mode 1265 * 1266 * Returns, for debug logging purposes, a printable representation 1267 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1268 * the low bits of the specified PSR. 1269 */ 1270 static inline const char *aarch32_mode_name(uint32_t psr) 1271 { 1272 static const char cpu_mode_names[16][4] = { 1273 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1274 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1275 }; 1276 1277 return cpu_mode_names[psr & 0xf]; 1278 } 1279 1280 /** 1281 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1282 * 1283 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1284 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1285 * Must be called with the BQL held. 1286 */ 1287 void arm_cpu_update_virq(ARMCPU *cpu); 1288 1289 /** 1290 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1291 * 1292 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1293 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1294 * Must be called with the BQL held. 1295 */ 1296 void arm_cpu_update_vfiq(ARMCPU *cpu); 1297 1298 /** 1299 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1300 * 1301 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1302 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1303 * Must be called with the BQL held. 1304 */ 1305 void arm_cpu_update_vinmi(ARMCPU *cpu); 1306 1307 /** 1308 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1309 * 1310 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1311 * a change to the HCRX_EL2.VFNMI. 1312 * Must be called with the BQL held. 1313 */ 1314 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1315 1316 /** 1317 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1318 * 1319 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1320 * following a change to the HCR_EL2.VSE bit. 1321 */ 1322 void arm_cpu_update_vserr(ARMCPU *cpu); 1323 1324 /** 1325 * arm_mmu_idx_el: 1326 * @env: The cpu environment 1327 * @el: The EL to use. 1328 * 1329 * Return the full ARMMMUIdx for the translation regime for EL. 1330 */ 1331 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1332 1333 /** 1334 * arm_mmu_idx: 1335 * @env: The cpu environment 1336 * 1337 * Return the full ARMMMUIdx for the current translation regime. 1338 */ 1339 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1340 1341 /** 1342 * arm_stage1_mmu_idx: 1343 * @env: The cpu environment 1344 * 1345 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1346 */ 1347 #ifdef CONFIG_USER_ONLY 1348 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1349 { 1350 return ARMMMUIdx_Stage1_E0; 1351 } 1352 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1353 { 1354 return ARMMMUIdx_Stage1_E0; 1355 } 1356 #else 1357 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1358 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1359 #endif 1360 1361 /** 1362 * arm_mmu_idx_is_stage1_of_2: 1363 * @mmu_idx: The ARMMMUIdx to test 1364 * 1365 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1366 * first stage of a two stage regime. 1367 */ 1368 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1369 { 1370 switch (mmu_idx) { 1371 case ARMMMUIdx_Stage1_E0: 1372 case ARMMMUIdx_Stage1_E1: 1373 case ARMMMUIdx_Stage1_E1_PAN: 1374 return true; 1375 default: 1376 return false; 1377 } 1378 } 1379 1380 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1381 const ARMISARegisters *id) 1382 { 1383 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1384 1385 if ((features >> ARM_FEATURE_V4T) & 1) { 1386 valid |= CPSR_T; 1387 } 1388 if ((features >> ARM_FEATURE_V5) & 1) { 1389 valid |= CPSR_Q; /* V5TE in reality*/ 1390 } 1391 if ((features >> ARM_FEATURE_V6) & 1) { 1392 valid |= CPSR_E | CPSR_GE; 1393 } 1394 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1395 valid |= CPSR_IT; 1396 } 1397 if (isar_feature_aa32_jazelle(id)) { 1398 valid |= CPSR_J; 1399 } 1400 if (isar_feature_aa32_pan(id)) { 1401 valid |= CPSR_PAN; 1402 } 1403 if (isar_feature_aa32_dit(id)) { 1404 valid |= CPSR_DIT; 1405 } 1406 if (isar_feature_aa32_ssbs(id)) { 1407 valid |= CPSR_SSBS; 1408 } 1409 1410 return valid; 1411 } 1412 1413 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1414 { 1415 uint32_t valid; 1416 1417 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1418 if (isar_feature_aa64_bti(id)) { 1419 valid |= PSTATE_BTYPE; 1420 } 1421 if (isar_feature_aa64_pan(id)) { 1422 valid |= PSTATE_PAN; 1423 } 1424 if (isar_feature_aa64_uao(id)) { 1425 valid |= PSTATE_UAO; 1426 } 1427 if (isar_feature_aa64_dit(id)) { 1428 valid |= PSTATE_DIT; 1429 } 1430 if (isar_feature_aa64_ssbs(id)) { 1431 valid |= PSTATE_SSBS; 1432 } 1433 if (isar_feature_aa64_mte(id)) { 1434 valid |= PSTATE_TCO; 1435 } 1436 if (isar_feature_aa64_nmi(id)) { 1437 valid |= PSTATE_ALLINT; 1438 } 1439 1440 return valid; 1441 } 1442 1443 /* Granule size (i.e. page size) */ 1444 typedef enum ARMGranuleSize { 1445 /* Same order as TG0 encoding */ 1446 Gran4K, 1447 Gran64K, 1448 Gran16K, 1449 GranInvalid, 1450 } ARMGranuleSize; 1451 1452 /** 1453 * arm_granule_bits: Return address size of the granule in bits 1454 * 1455 * Return the address size of the granule in bits. This corresponds 1456 * to the pseudocode TGxGranuleBits(). 1457 */ 1458 static inline int arm_granule_bits(ARMGranuleSize gran) 1459 { 1460 switch (gran) { 1461 case Gran64K: 1462 return 16; 1463 case Gran16K: 1464 return 14; 1465 case Gran4K: 1466 return 12; 1467 default: 1468 g_assert_not_reached(); 1469 } 1470 } 1471 1472 /* 1473 * Parameters of a given virtual address, as extracted from the 1474 * translation control register (TCR) for a given regime. 1475 */ 1476 typedef struct ARMVAParameters { 1477 unsigned tsz : 8; 1478 unsigned ps : 3; 1479 unsigned sh : 2; 1480 unsigned select : 1; 1481 bool tbi : 1; 1482 bool epd : 1; 1483 bool hpd : 1; 1484 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1485 bool ds : 1; 1486 bool ha : 1; 1487 bool hd : 1; 1488 ARMGranuleSize gran : 2; 1489 } ARMVAParameters; 1490 1491 /** 1492 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1493 * @env: CPU 1494 * @va: virtual address to look up 1495 * @mmu_idx: determines translation regime to use 1496 * @data: true if this is a data access 1497 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1498 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1499 */ 1500 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1501 ARMMMUIdx mmu_idx, bool data, 1502 bool el1_is_aa32); 1503 1504 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1505 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1506 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1507 1508 /* Determine if allocation tags are available. */ 1509 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1510 uint64_t sctlr) 1511 { 1512 if (el < 3 1513 && arm_feature(env, ARM_FEATURE_EL3) 1514 && !(env->cp15.scr_el3 & SCR_ATA)) { 1515 return false; 1516 } 1517 if (el < 2 && arm_is_el2_enabled(env)) { 1518 uint64_t hcr = arm_hcr_el2_eff(env); 1519 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1520 return false; 1521 } 1522 } 1523 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1524 return sctlr != 0; 1525 } 1526 1527 #ifndef CONFIG_USER_ONLY 1528 1529 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1530 typedef struct V8M_SAttributes { 1531 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1532 bool ns; 1533 bool nsc; 1534 uint8_t sregion; 1535 bool srvalid; 1536 uint8_t iregion; 1537 bool irvalid; 1538 } V8M_SAttributes; 1539 1540 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1541 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1542 bool secure, V8M_SAttributes *sattrs); 1543 1544 /* Cacheability and shareability attributes for a memory access */ 1545 typedef struct ARMCacheAttrs { 1546 /* 1547 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1548 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1549 */ 1550 unsigned int attrs:8; 1551 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1552 bool is_s2_format:1; 1553 } ARMCacheAttrs; 1554 1555 /* Fields that are valid upon success. */ 1556 typedef struct GetPhysAddrResult { 1557 CPUTLBEntryFull f; 1558 ARMCacheAttrs cacheattrs; 1559 } GetPhysAddrResult; 1560 1561 /** 1562 * get_phys_addr: get the physical address for a virtual address 1563 * @env: CPUARMState 1564 * @address: virtual address to get physical address for 1565 * @access_type: 0 for read, 1 for write, 2 for execute 1566 * @memop: memory operation feeding this access, or 0 for none 1567 * @mmu_idx: MMU index indicating required translation regime 1568 * @result: set on translation success. 1569 * @fi: set to fault info if the translation fails 1570 * 1571 * Find the physical address corresponding to the given virtual address, 1572 * by doing a translation table walk on MMU based systems or using the 1573 * MPU state on MPU based systems. 1574 * 1575 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1576 * prot and page_size may not be filled in, and the populated fsr value provides 1577 * information on why the translation aborted, in the format of a 1578 * DFSR/IFSR fault register, with the following caveats: 1579 * * we honour the short vs long DFSR format differences. 1580 * * the WnR bit is never set (the caller must do this). 1581 * * for PSMAv5 based systems we don't bother to return a full FSR format 1582 * value. 1583 */ 1584 bool get_phys_addr(CPUARMState *env, vaddr address, 1585 MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, 1586 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1587 __attribute__((nonnull)); 1588 1589 /** 1590 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1591 * address 1592 * @env: CPUARMState 1593 * @address: virtual address to get physical address for 1594 * @access_type: 0 for read, 1 for write, 2 for execute 1595 * @memop: memory operation feeding this access, or 0 for none 1596 * @mmu_idx: MMU index indicating required translation regime 1597 * @space: security space for the access 1598 * @result: set on translation success. 1599 * @fi: set to fault info if the translation fails 1600 * 1601 * Similar to get_phys_addr, but use the given security space and don't perform 1602 * a Granule Protection Check on the resulting address. 1603 */ 1604 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, 1605 MMUAccessType access_type, MemOp memop, 1606 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1607 GetPhysAddrResult *result, 1608 ARMMMUFaultInfo *fi) 1609 __attribute__((nonnull)); 1610 1611 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1612 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1613 bool is_secure, GetPhysAddrResult *result, 1614 ARMMMUFaultInfo *fi, uint32_t *mregion); 1615 1616 void arm_log_exception(CPUState *cs); 1617 1618 #endif /* !CONFIG_USER_ONLY */ 1619 1620 /* 1621 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1622 * the same simd_desc() encoding due to restrictions on size. 1623 * Use these instead. 1624 */ 1625 FIELD(PREDDESC, OPRSZ, 0, 6) 1626 FIELD(PREDDESC, ESZ, 6, 2) 1627 FIELD(PREDDESC, DATA, 8, 24) 1628 1629 /* 1630 * The SVE simd_data field, for memory ops, contains either 1631 * rd (5 bits) or a shift count (2 bits). 1632 */ 1633 #define SVE_MTEDESC_SHIFT 5 1634 1635 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1636 FIELD(MTEDESC, MIDX, 0, 4) 1637 FIELD(MTEDESC, TBI, 4, 2) 1638 FIELD(MTEDESC, TCMA, 6, 2) 1639 FIELD(MTEDESC, WRITE, 8, 1) 1640 FIELD(MTEDESC, ALIGN, 9, 3) 1641 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1642 1643 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1644 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1645 1646 /** 1647 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1648 * @env: CPU env 1649 * @ptr: start address of memory region (dirty pointer) 1650 * @size: length of region (guaranteed not to cross a page boundary) 1651 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1652 * Returns: the size of the region that can be copied without hitting 1653 * an MTE tag failure 1654 * 1655 * Note that we assume that the caller has already checked the TBI 1656 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1657 * required. 1658 */ 1659 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1660 uint32_t desc); 1661 1662 /** 1663 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1664 * operation going in the reverse direction 1665 * @env: CPU env 1666 * @ptr: *end* address of memory region (dirty pointer) 1667 * @size: length of region (guaranteed not to cross a page boundary) 1668 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1669 * Returns: the size of the region that can be copied without hitting 1670 * an MTE tag failure 1671 * 1672 * Note that we assume that the caller has already checked the TBI 1673 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1674 * required. 1675 */ 1676 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1677 uint32_t desc); 1678 1679 /** 1680 * mte_check_fail: Record an MTE tag check failure 1681 * @env: CPU env 1682 * @desc: MTEDESC descriptor word 1683 * @dirty_ptr: Failing dirty address 1684 * @ra: TCG retaddr 1685 * 1686 * This may never return (if the MTE tag checks are configured to fault). 1687 */ 1688 void mte_check_fail(CPUARMState *env, uint32_t desc, 1689 uint64_t dirty_ptr, uintptr_t ra); 1690 1691 /** 1692 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1693 * @env: CPU env 1694 * @dirty_ptr: Start address of memory region (dirty pointer) 1695 * @size: length of region (guaranteed not to cross page boundary) 1696 * @desc: MTEDESC descriptor word 1697 */ 1698 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1699 uint32_t desc); 1700 1701 static inline int allocation_tag_from_addr(uint64_t ptr) 1702 { 1703 return extract64(ptr, 56, 4); 1704 } 1705 1706 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1707 { 1708 return deposit64(ptr, 56, 4, rtag); 1709 } 1710 1711 /* Return true if tbi bits mean that the access is checked. */ 1712 static inline bool tbi_check(uint32_t desc, int bit55) 1713 { 1714 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1715 } 1716 1717 /* Return true if tcma bits mean that the access is unchecked. */ 1718 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1719 { 1720 /* 1721 * We had extracted bit55 and ptr_tag for other reasons, so fold 1722 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1723 */ 1724 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1725 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1726 return tcma && match; 1727 } 1728 1729 /* 1730 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1731 * for the tag to be present in the FAR_ELx register. But for user-only 1732 * mode, we do not have a TLB with which to implement this, so we must 1733 * remove the top byte. 1734 */ 1735 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1736 { 1737 #ifdef CONFIG_USER_ONLY 1738 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1739 ptr &= sextract64(ptr, 0, 56); 1740 #endif 1741 return ptr; 1742 } 1743 1744 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1745 { 1746 #ifdef CONFIG_USER_ONLY 1747 int64_t clean_ptr = sextract64(ptr, 0, 56); 1748 if (tbi_check(desc, clean_ptr < 0)) { 1749 ptr = clean_ptr; 1750 } 1751 #endif 1752 return ptr; 1753 } 1754 1755 /* Values for M-profile PSR.ECI for MVE insns */ 1756 enum MVEECIState { 1757 ECI_NONE = 0, /* No completed beats */ 1758 ECI_A0 = 1, /* Completed: A0 */ 1759 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1760 /* 3 is reserved */ 1761 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1762 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1763 /* All other values reserved */ 1764 }; 1765 1766 /* Definitions for the PMU registers */ 1767 #define PMCRN_MASK 0xf800 1768 #define PMCRN_SHIFT 11 1769 #define PMCRLP 0x80 1770 #define PMCRLC 0x40 1771 #define PMCRDP 0x20 1772 #define PMCRX 0x10 1773 #define PMCRD 0x8 1774 #define PMCRC 0x4 1775 #define PMCRP 0x2 1776 #define PMCRE 0x1 1777 /* 1778 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1779 * which can be written as 1 to trigger behaviour but which stay RAZ). 1780 */ 1781 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1782 1783 #define PMXEVTYPER_P 0x80000000 1784 #define PMXEVTYPER_U 0x40000000 1785 #define PMXEVTYPER_NSK 0x20000000 1786 #define PMXEVTYPER_NSU 0x10000000 1787 #define PMXEVTYPER_NSH 0x08000000 1788 #define PMXEVTYPER_M 0x04000000 1789 #define PMXEVTYPER_MT 0x02000000 1790 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1791 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1792 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1793 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1794 PMXEVTYPER_EVTCOUNT) 1795 1796 #define PMCCFILTR 0xf8000000 1797 #define PMCCFILTR_M PMXEVTYPER_M 1798 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1799 1800 static inline uint32_t pmu_num_counters(CPUARMState *env) 1801 { 1802 ARMCPU *cpu = env_archcpu(env); 1803 1804 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1805 } 1806 1807 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1808 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1809 { 1810 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1811 } 1812 1813 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1814 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1815 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1816 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1817 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1818 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1819 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1820 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg); 1821 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg); 1822 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1823 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1824 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1825 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1826 void aarch64_max_tcg_initfn(Object *obj); 1827 void aarch64_add_pauth_properties(Object *obj); 1828 void aarch64_add_sve_properties(Object *obj); 1829 void aarch64_add_sme_properties(Object *obj); 1830 1831 /* Return true if the gdbstub is presenting an AArch64 CPU */ 1832 static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu) 1833 { 1834 return object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU); 1835 } 1836 1837 /* Read the CONTROL register as the MRS instruction would. */ 1838 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1839 1840 /* 1841 * Return a pointer to the location where we currently store the 1842 * stack pointer for the requested security state and thread mode. 1843 * This pointer will become invalid if the CPU state is updated 1844 * such that the stack pointers are switched around (eg changing 1845 * the SPSEL control bit). 1846 */ 1847 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1848 bool threadmode, bool spsel); 1849 1850 bool el_is_in_host(CPUARMState *env, int el); 1851 1852 void aa32_max_features(ARMCPU *cpu); 1853 int exception_target_el(CPUARMState *env); 1854 bool arm_singlestep_active(CPUARMState *env); 1855 bool arm_generate_debug_exceptions(CPUARMState *env); 1856 1857 /** 1858 * pauth_ptr_mask: 1859 * @param: parameters defining the MMU setup 1860 * 1861 * Return a mask of the address bits that contain the authentication code, 1862 * given the MMU config defined by @param. 1863 */ 1864 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1865 { 1866 int bot_pac_bit = 64 - param.tsz; 1867 int top_pac_bit = 64 - 8 * param.tbi; 1868 1869 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1870 } 1871 1872 /* Add the cpreg definitions for debug related system registers */ 1873 void define_debug_regs(ARMCPU *cpu); 1874 1875 /* Add the cpreg definitions for TLBI instructions */ 1876 void define_tlb_insn_regs(ARMCPU *cpu); 1877 1878 /* Effective value of MDCR_EL2 */ 1879 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1880 { 1881 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1882 } 1883 1884 /* Powers of 2 for sve_vq_map et al. */ 1885 #define SVE_VQ_POW2_MAP \ 1886 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1887 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1888 1889 /* 1890 * Return true if it is possible to take a fine-grained-trap to EL2. 1891 */ 1892 static inline bool arm_fgt_active(CPUARMState *env, int el) 1893 { 1894 /* 1895 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1896 * that can affect EL0, but it is harmless to do the test also for 1897 * traps on registers that are only accessible at EL1 because if the test 1898 * returns true then we can't be executing at EL1 anyway. 1899 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1900 * traps from AArch32 only happen for the EL0 is AArch32 case. 1901 */ 1902 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1903 el < 2 && arm_is_el2_enabled(env) && 1904 arm_el_is_aa64(env, 1) && 1905 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1906 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1907 } 1908 1909 /* 1910 * Although the ARM implementation of hardware assisted debugging 1911 * allows for different breakpoints per-core, the current GDB 1912 * interface treats them as a global pool of registers (which seems to 1913 * be the case for x86, ppc and s390). As a result we store one copy 1914 * of registers which is used for all active cores. 1915 * 1916 * Write access is serialised by virtue of the GDB protocol which 1917 * updates things. Read access (i.e. when the values are copied to the 1918 * vCPU) is also gated by GDB's run control. 1919 * 1920 * This is not unreasonable as most of the time debugging kernels you 1921 * never know which core will eventually execute your function. 1922 */ 1923 1924 typedef struct { 1925 uint64_t bcr; 1926 uint64_t bvr; 1927 } HWBreakpoint; 1928 1929 /* 1930 * The watchpoint registers can cover more area than the requested 1931 * watchpoint so we need to store the additional information 1932 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1933 * when the watchpoint is hit. 1934 */ 1935 typedef struct { 1936 uint64_t wcr; 1937 uint64_t wvr; 1938 CPUWatchpoint details; 1939 } HWWatchpoint; 1940 1941 /* Maximum and current break/watch point counts */ 1942 extern int max_hw_bps, max_hw_wps; 1943 extern GArray *hw_breakpoints, *hw_watchpoints; 1944 1945 #define cur_hw_wps (hw_watchpoints->len) 1946 #define cur_hw_bps (hw_breakpoints->len) 1947 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1948 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1949 1950 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1951 int insert_hw_breakpoint(target_ulong pc); 1952 int delete_hw_breakpoint(target_ulong pc); 1953 1954 bool check_watchpoint_in_range(int i, vaddr addr); 1955 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr); 1956 int insert_hw_watchpoint(vaddr addr, vaddr len, int type); 1957 int delete_hw_watchpoint(vaddr addr, vaddr len, int type); 1958 1959 /* Return the current value of the system counter in ticks */ 1960 uint64_t gt_get_countervalue(CPUARMState *env); 1961 /* 1962 * Return the currently applicable offset between the system counter 1963 * and the counter for the specified timer, as used for direct register 1964 * accesses. 1965 */ 1966 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx); 1967 1968 /* 1969 * Return mask of ARMMMUIdxBit values corresponding to an "invalidate 1970 * all EL1" scope; this covers stage 1 and stage 2. 1971 */ 1972 int alle1_tlbmask(CPUARMState *env); 1973 1974 /* Set the float_status behaviour to match the Arm defaults */ 1975 void arm_set_default_fp_behaviours(float_status *s); 1976 /* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */ 1977 void arm_set_ah_fp_behaviours(float_status *s); 1978 /* Read the float_status info and return the appropriate FPSR value */ 1979 uint32_t vfp_get_fpsr_from_host(CPUARMState *env); 1980 /* Clear the exception status flags from all float_status fields */ 1981 void vfp_clear_float_status_exc_flags(CPUARMState *env); 1982 /* 1983 * Update float_status fields to handle the bits of the FPCR 1984 * specified by mask changing to the values in val. 1985 */ 1986 void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask); 1987 1988 #endif 1989