1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/breakpoint.h" 29 #include "hw/registerfields.h" 30 #include "tcg/tcg-gvec-desc.h" 31 #include "syndrome.h" 32 #include "cpu-features.h" 33 34 /* register banks for CPU modes */ 35 #define BANK_USRSYS 0 36 #define BANK_SVC 1 37 #define BANK_ABT 2 38 #define BANK_UND 3 39 #define BANK_IRQ 4 40 #define BANK_FIQ 5 41 #define BANK_HYP 6 42 #define BANK_MON 7 43 44 static inline int arm_env_mmu_index(CPUARMState *env) 45 { 46 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 47 } 48 49 static inline bool excp_is_internal(int excp) 50 { 51 /* Return true if this exception number represents a QEMU-internal 52 * exception that will not be passed to the guest. 53 */ 54 return excp == EXCP_INTERRUPT 55 || excp == EXCP_HLT 56 || excp == EXCP_DEBUG 57 || excp == EXCP_HALTED 58 || excp == EXCP_EXCEPTION_EXIT 59 || excp == EXCP_KERNEL_TRAP 60 || excp == EXCP_SEMIHOST; 61 } 62 63 /* 64 * Default frequency for the generic timer, in Hz. 65 * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before 66 * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz, 67 * which gives a 16ns tick period. 68 * 69 * We will use the back-compat value: 70 * - for QEMU CPU types added before we standardized on 1GHz 71 * - for versioned machine types with a version of 9.0 or earlier 72 * In any case, the machine model may override via the cntfrq property. 73 */ 74 #define GTIMER_DEFAULT_HZ 1000000000 75 #define GTIMER_BACKCOMPAT_HZ 62500000 76 77 /* Bit definitions for the v7M CONTROL register */ 78 FIELD(V7M_CONTROL, NPRIV, 0, 1) 79 FIELD(V7M_CONTROL, SPSEL, 1, 1) 80 FIELD(V7M_CONTROL, FPCA, 2, 1) 81 FIELD(V7M_CONTROL, SFPA, 3, 1) 82 83 /* Bit definitions for v7M exception return payload */ 84 FIELD(V7M_EXCRET, ES, 0, 1) 85 FIELD(V7M_EXCRET, RES0, 1, 1) 86 FIELD(V7M_EXCRET, SPSEL, 2, 1) 87 FIELD(V7M_EXCRET, MODE, 3, 1) 88 FIELD(V7M_EXCRET, FTYPE, 4, 1) 89 FIELD(V7M_EXCRET, DCRS, 5, 1) 90 FIELD(V7M_EXCRET, S, 6, 1) 91 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 92 93 /* Minimum value which is a magic number for exception return */ 94 #define EXC_RETURN_MIN_MAGIC 0xff000000 95 /* Minimum number which is a magic number for function or exception return 96 * when using v8M security extension 97 */ 98 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 99 100 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 101 FIELD(DBGWCR, E, 0, 1) 102 FIELD(DBGWCR, PAC, 1, 2) 103 FIELD(DBGWCR, LSC, 3, 2) 104 FIELD(DBGWCR, BAS, 5, 8) 105 FIELD(DBGWCR, HMC, 13, 1) 106 FIELD(DBGWCR, SSC, 14, 2) 107 FIELD(DBGWCR, LBN, 16, 4) 108 FIELD(DBGWCR, WT, 20, 1) 109 FIELD(DBGWCR, MASK, 24, 5) 110 FIELD(DBGWCR, SSCE, 29, 1) 111 112 #define VTCR_NSW (1u << 29) 113 #define VTCR_NSA (1u << 30) 114 #define VSTCR_SW VTCR_NSW 115 #define VSTCR_SA VTCR_NSA 116 117 /* Bit definitions for CPACR (AArch32 only) */ 118 FIELD(CPACR, CP10, 20, 2) 119 FIELD(CPACR, CP11, 22, 2) 120 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 121 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 122 FIELD(CPACR, ASEDIS, 31, 1) 123 124 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 125 FIELD(CPACR_EL1, ZEN, 16, 2) 126 FIELD(CPACR_EL1, FPEN, 20, 2) 127 FIELD(CPACR_EL1, SMEN, 24, 2) 128 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 129 130 /* Bit definitions for HCPTR (AArch32 only) */ 131 FIELD(HCPTR, TCP10, 10, 1) 132 FIELD(HCPTR, TCP11, 11, 1) 133 FIELD(HCPTR, TASE, 15, 1) 134 FIELD(HCPTR, TTA, 20, 1) 135 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 136 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 137 138 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 139 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 140 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 141 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 142 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 143 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 144 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 145 FIELD(CPTR_EL2, TTA, 28, 1) 146 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 147 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 148 149 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 150 FIELD(CPTR_EL3, EZ, 8, 1) 151 FIELD(CPTR_EL3, TFP, 10, 1) 152 FIELD(CPTR_EL3, ESM, 12, 1) 153 FIELD(CPTR_EL3, TTA, 20, 1) 154 FIELD(CPTR_EL3, TAM, 30, 1) 155 FIELD(CPTR_EL3, TCPAC, 31, 1) 156 157 #define MDCR_MTPME (1U << 28) 158 #define MDCR_TDCC (1U << 27) 159 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 160 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 161 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 162 #define MDCR_EPMAD (1U << 21) 163 #define MDCR_EDAD (1U << 20) 164 #define MDCR_TTRF (1U << 19) 165 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 166 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 167 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 168 #define MDCR_SDD (1U << 16) 169 #define MDCR_SPD (3U << 14) 170 #define MDCR_TDRA (1U << 11) 171 #define MDCR_TDOSA (1U << 10) 172 #define MDCR_TDA (1U << 9) 173 #define MDCR_TDE (1U << 8) 174 #define MDCR_HPME (1U << 7) 175 #define MDCR_TPM (1U << 6) 176 #define MDCR_TPMCR (1U << 5) 177 #define MDCR_HPMN (0x1fU) 178 179 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 180 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 181 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 182 MDCR_STE | MDCR_SPME | MDCR_SPD) 183 184 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 185 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 186 #define TTBCR_PD0 (1U << 4) 187 #define TTBCR_PD1 (1U << 5) 188 #define TTBCR_EPD0 (1U << 7) 189 #define TTBCR_IRGN0 (3U << 8) 190 #define TTBCR_ORGN0 (3U << 10) 191 #define TTBCR_SH0 (3U << 12) 192 #define TTBCR_T1SZ (3U << 16) 193 #define TTBCR_A1 (1U << 22) 194 #define TTBCR_EPD1 (1U << 23) 195 #define TTBCR_IRGN1 (3U << 24) 196 #define TTBCR_ORGN1 (3U << 26) 197 #define TTBCR_SH1 (1U << 28) 198 #define TTBCR_EAE (1U << 31) 199 200 FIELD(VTCR, T0SZ, 0, 6) 201 FIELD(VTCR, SL0, 6, 2) 202 FIELD(VTCR, IRGN0, 8, 2) 203 FIELD(VTCR, ORGN0, 10, 2) 204 FIELD(VTCR, SH0, 12, 2) 205 FIELD(VTCR, TG0, 14, 2) 206 FIELD(VTCR, PS, 16, 3) 207 FIELD(VTCR, VS, 19, 1) 208 FIELD(VTCR, HA, 21, 1) 209 FIELD(VTCR, HD, 22, 1) 210 FIELD(VTCR, HWU59, 25, 1) 211 FIELD(VTCR, HWU60, 26, 1) 212 FIELD(VTCR, HWU61, 27, 1) 213 FIELD(VTCR, HWU62, 28, 1) 214 FIELD(VTCR, NSW, 29, 1) 215 FIELD(VTCR, NSA, 30, 1) 216 FIELD(VTCR, DS, 32, 1) 217 FIELD(VTCR, SL2, 33, 1) 218 219 #define HCRX_ENAS0 (1ULL << 0) 220 #define HCRX_ENALS (1ULL << 1) 221 #define HCRX_ENASR (1ULL << 2) 222 #define HCRX_FNXS (1ULL << 3) 223 #define HCRX_FGTNXS (1ULL << 4) 224 #define HCRX_SMPME (1ULL << 5) 225 #define HCRX_TALLINT (1ULL << 6) 226 #define HCRX_VINMI (1ULL << 7) 227 #define HCRX_VFNMI (1ULL << 8) 228 #define HCRX_CMOW (1ULL << 9) 229 #define HCRX_MCE2 (1ULL << 10) 230 #define HCRX_MSCEN (1ULL << 11) 231 232 #define HPFAR_NS (1ULL << 63) 233 234 #define HSTR_TTEE (1 << 16) 235 #define HSTR_TJDBX (1 << 17) 236 237 /* 238 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 239 * have different bit definitions, and EL1PCTEN might be 240 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 241 * disambiguate if necessary. 242 */ 243 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 244 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 245 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 246 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 247 FIELD(CNTHCTL, EVNTEN, 2, 1) 248 FIELD(CNTHCTL, EVNTDIR, 3, 1) 249 FIELD(CNTHCTL, EVNTI, 4, 4) 250 FIELD(CNTHCTL, EL0VTEN, 8, 1) 251 FIELD(CNTHCTL, EL0PTEN, 9, 1) 252 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 253 FIELD(CNTHCTL, EL1PTEN, 11, 1) 254 FIELD(CNTHCTL, ECV, 12, 1) 255 FIELD(CNTHCTL, EL1TVT, 13, 1) 256 FIELD(CNTHCTL, EL1TVCT, 14, 1) 257 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 258 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 259 FIELD(CNTHCTL, EVNTIS, 17, 1) 260 FIELD(CNTHCTL, CNTVMASK, 18, 1) 261 FIELD(CNTHCTL, CNTPMASK, 19, 1) 262 263 /* We use a few fake FSR values for internal purposes in M profile. 264 * M profile cores don't have A/R format FSRs, but currently our 265 * get_phys_addr() code assumes A/R profile and reports failures via 266 * an A/R format FSR value. We then translate that into the proper 267 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 268 * Mostly the FSR values we use for this are those defined for v7PMSA, 269 * since we share some of that codepath. A few kinds of fault are 270 * only for M profile and have no A/R equivalent, though, so we have 271 * to pick a value from the reserved range (which we never otherwise 272 * generate) to use for these. 273 * These values will never be visible to the guest. 274 */ 275 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 276 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 277 278 /** 279 * raise_exception: Raise the specified exception. 280 * Raise a guest exception with the specified value, syndrome register 281 * and target exception level. This should be called from helper functions, 282 * and never returns because we will longjump back up to the CPU main loop. 283 */ 284 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 285 uint32_t syndrome, uint32_t target_el); 286 287 /* 288 * Similarly, but also use unwinding to restore cpu state. 289 */ 290 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 291 uint32_t syndrome, uint32_t target_el, 292 uintptr_t ra); 293 294 /* 295 * For AArch64, map a given EL to an index in the banked_spsr array. 296 * Note that this mapping and the AArch32 mapping defined in bank_number() 297 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 298 * mandated mapping between each other. 299 */ 300 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 301 { 302 static const unsigned int map[4] = { 303 [1] = BANK_SVC, /* EL1. */ 304 [2] = BANK_HYP, /* EL2. */ 305 [3] = BANK_MON, /* EL3. */ 306 }; 307 assert(el >= 1 && el <= 3); 308 return map[el]; 309 } 310 311 /* Map CPU modes onto saved register banks. */ 312 static inline int bank_number(int mode) 313 { 314 switch (mode) { 315 case ARM_CPU_MODE_USR: 316 case ARM_CPU_MODE_SYS: 317 return BANK_USRSYS; 318 case ARM_CPU_MODE_SVC: 319 return BANK_SVC; 320 case ARM_CPU_MODE_ABT: 321 return BANK_ABT; 322 case ARM_CPU_MODE_UND: 323 return BANK_UND; 324 case ARM_CPU_MODE_IRQ: 325 return BANK_IRQ; 326 case ARM_CPU_MODE_FIQ: 327 return BANK_FIQ; 328 case ARM_CPU_MODE_HYP: 329 return BANK_HYP; 330 case ARM_CPU_MODE_MON: 331 return BANK_MON; 332 } 333 g_assert_not_reached(); 334 } 335 336 /** 337 * r14_bank_number: Map CPU mode onto register bank for r14 338 * 339 * Given an AArch32 CPU mode, return the index into the saved register 340 * banks to use for the R14 (LR) in that mode. This is the same as 341 * bank_number(), except for the special case of Hyp mode, where 342 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 343 * This should be used as the index into env->banked_r14[], and 344 * bank_number() used for the index into env->banked_r13[] and 345 * env->banked_spsr[]. 346 */ 347 static inline int r14_bank_number(int mode) 348 { 349 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 350 } 351 352 void arm_cpu_register(const ARMCPUInfo *info); 353 void aarch64_cpu_register(const ARMCPUInfo *info); 354 355 void register_cp_regs_for_features(ARMCPU *cpu); 356 void init_cpreg_list(ARMCPU *cpu); 357 358 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 359 void arm_translate_init(void); 360 void arm_translate_code(CPUState *cs, TranslationBlock *tb, 361 int *max_insns, vaddr pc, void *host_pc); 362 363 void arm_cpu_register_gdb_commands(ARMCPU *cpu); 364 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, 365 GPtrArray *, GPtrArray *); 366 367 void arm_restore_state_to_opc(CPUState *cs, 368 const TranslationBlock *tb, 369 const uint64_t *data); 370 371 #ifdef CONFIG_TCG 372 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 373 374 /* Our implementation of TCGCPUOps::cpu_exec_halt */ 375 bool arm_cpu_exec_halt(CPUState *cs); 376 #endif /* CONFIG_TCG */ 377 378 typedef enum ARMFPRounding { 379 FPROUNDING_TIEEVEN, 380 FPROUNDING_POSINF, 381 FPROUNDING_NEGINF, 382 FPROUNDING_ZERO, 383 FPROUNDING_TIEAWAY, 384 FPROUNDING_ODD 385 } ARMFPRounding; 386 387 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 388 389 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 390 { 391 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 392 return arm_rmode_to_sf_map[rmode]; 393 } 394 395 /* Return the effective value of SCR_EL3.RW */ 396 static inline bool arm_scr_rw_eff(CPUARMState *env) 397 { 398 /* 399 * SCR_EL3.RW has an effective value of 1 if: 400 * - we are NS and EL2 is implemented but doesn't support AArch32 401 * - we are S and EL2 is enabled (in which case it must be AArch64) 402 */ 403 ARMCPU *cpu = env_archcpu(env); 404 405 if (env->cp15.scr_el3 & SCR_RW) { 406 return true; 407 } 408 if (env->cp15.scr_el3 & SCR_NS) { 409 return arm_feature(env, ARM_FEATURE_EL2) && 410 !cpu_isar_feature(aa64_aa32_el2, cpu); 411 } else { 412 return env->cp15.scr_el3 & SCR_EEL2; 413 } 414 } 415 416 /* Return true if the specified exception level is running in AArch64 state. */ 417 static inline bool arm_el_is_aa64(CPUARMState *env, int el) 418 { 419 /* 420 * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, 421 * and if we're not in EL0 then the state of EL0 isn't well defined.) 422 */ 423 assert(el >= 1 && el <= 3); 424 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); 425 426 /* 427 * The highest exception level is always at the maximum supported 428 * register width, and then lower levels have a register width controlled 429 * by bits in the SCR or HCR registers. 430 */ 431 if (el == 3) { 432 return aa64; 433 } 434 435 if (arm_feature(env, ARM_FEATURE_EL3)) { 436 aa64 = aa64 && arm_scr_rw_eff(env); 437 } 438 439 if (el == 2) { 440 return aa64; 441 } 442 443 if (arm_is_el2_enabled(env)) { 444 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); 445 } 446 447 return aa64; 448 } 449 450 /* 451 * Return the current Exception Level (as per ARMv8; note that this differs 452 * from the ARMv7 Privilege Level). 453 */ 454 static inline int arm_current_el(CPUARMState *env) 455 { 456 if (arm_feature(env, ARM_FEATURE_M)) { 457 return arm_v7m_is_handler_mode(env) || 458 !(env->v7m.control[env->v7m.secure] & 1); 459 } 460 461 if (is_a64(env)) { 462 return extract32(env->pstate, 2, 2); 463 } 464 465 switch (env->uncached_cpsr & 0x1f) { 466 case ARM_CPU_MODE_USR: 467 return 0; 468 case ARM_CPU_MODE_HYP: 469 return 2; 470 case ARM_CPU_MODE_MON: 471 return 3; 472 default: 473 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 474 /* If EL3 is 32-bit then all secure privileged modes run in EL3 */ 475 return 3; 476 } 477 478 return 1; 479 } 480 } 481 482 static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, 483 bool sctlr_b) 484 { 485 #ifdef CONFIG_USER_ONLY 486 /* 487 * In system mode, BE32 is modelled in line with the 488 * architecture (as word-invariant big-endianness), where loads 489 * and stores are done little endian but from addresses which 490 * are adjusted by XORing with the appropriate constant. So the 491 * endianness to use for the raw data access is not affected by 492 * SCTLR.B. 493 * In user mode, however, we model BE32 as byte-invariant 494 * big-endianness (because user-only code cannot tell the 495 * difference), and so we need to use a data access endianness 496 * that depends on SCTLR.B. 497 */ 498 if (sctlr_b) { 499 return true; 500 } 501 #endif 502 /* In 32bit endianness is determined by looking at CPSR's E bit */ 503 return env->uncached_cpsr & CPSR_E; 504 } 505 506 static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) 507 { 508 return sctlr & (el ? SCTLR_EE : SCTLR_E0E); 509 } 510 511 /* Return true if the processor is in big-endian mode. */ 512 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) 513 { 514 if (!is_a64(env)) { 515 return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); 516 } else { 517 int cur_el = arm_current_el(env); 518 uint64_t sctlr = arm_sctlr(env, cur_el); 519 return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); 520 } 521 } 522 523 #ifdef CONFIG_USER_ONLY 524 static inline bool arm_cpu_bswap_data(CPUARMState *env) 525 { 526 return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); 527 } 528 #endif 529 530 static inline void aarch64_save_sp(CPUARMState *env, int el) 531 { 532 if (env->pstate & PSTATE_SP) { 533 env->sp_el[el] = env->xregs[31]; 534 } else { 535 env->sp_el[0] = env->xregs[31]; 536 } 537 } 538 539 static inline void aarch64_restore_sp(CPUARMState *env, int el) 540 { 541 if (env->pstate & PSTATE_SP) { 542 env->xregs[31] = env->sp_el[el]; 543 } else { 544 env->xregs[31] = env->sp_el[0]; 545 } 546 } 547 548 static inline void update_spsel(CPUARMState *env, uint32_t imm) 549 { 550 unsigned int cur_el = arm_current_el(env); 551 /* Update PSTATE SPSel bit; this requires us to update the 552 * working stack pointer in xregs[31]. 553 */ 554 if (!((imm ^ env->pstate) & PSTATE_SP)) { 555 return; 556 } 557 aarch64_save_sp(env, cur_el); 558 env->pstate = deposit32(env->pstate, 0, 1, imm); 559 560 /* We rely on illegal updates to SPsel from EL0 to get trapped 561 * at translation time. 562 */ 563 assert(cur_el >= 1 && cur_el <= 3); 564 aarch64_restore_sp(env, cur_el); 565 } 566 567 /* 568 * arm_pamax 569 * @cpu: ARMCPU 570 * 571 * Returns the implementation defined bit-width of physical addresses. 572 * The ARMv8 reference manuals refer to this as PAMax(). 573 */ 574 unsigned int arm_pamax(ARMCPU *cpu); 575 576 /* 577 * round_down_to_parange_index 578 * @bit_size: uint8_t 579 * 580 * Rounds down the bit_size supplied to the first supported ARM physical 581 * address range and returns the index for this. The index is intended to 582 * be used to set ID_AA64MMFR0_EL1's PARANGE bits. 583 */ 584 uint8_t round_down_to_parange_index(uint8_t bit_size); 585 586 /* 587 * round_down_to_parange_bit_size 588 * @bit_size: uint8_t 589 * 590 * Rounds down the bit_size supplied to the first supported ARM physical 591 * address range bit size and returns this. 592 */ 593 uint8_t round_down_to_parange_bit_size(uint8_t bit_size); 594 595 /* Return true if extended addresses are enabled. 596 * This is always the case if our translation regime is 64 bit, 597 * but depends on TTBCR.EAE for 32 bit. 598 */ 599 static inline bool extended_addresses_enabled(CPUARMState *env) 600 { 601 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 602 if (arm_feature(env, ARM_FEATURE_PMSA) && 603 arm_feature(env, ARM_FEATURE_V8)) { 604 return true; 605 } 606 return arm_el_is_aa64(env, 1) || 607 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 608 } 609 610 /* Update a QEMU watchpoint based on the information the guest has set in the 611 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 612 */ 613 void hw_watchpoint_update(ARMCPU *cpu, int n); 614 /* Update the QEMU watchpoints for every guest watchpoint. This does a 615 * complete delete-and-reinstate of the QEMU watchpoint list and so is 616 * suitable for use after migration or on reset. 617 */ 618 void hw_watchpoint_update_all(ARMCPU *cpu); 619 /* Update a QEMU breakpoint based on the information the guest has set in the 620 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 621 */ 622 void hw_breakpoint_update(ARMCPU *cpu, int n); 623 /* Update the QEMU breakpoints for every guest breakpoint. This does a 624 * complete delete-and-reinstate of the QEMU breakpoint list and so is 625 * suitable for use after migration or on reset. 626 */ 627 void hw_breakpoint_update_all(ARMCPU *cpu); 628 629 /* Callback function for checking if a breakpoint should trigger. */ 630 bool arm_debug_check_breakpoint(CPUState *cs); 631 632 /* Callback function for checking if a watchpoint should trigger. */ 633 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 634 635 /* Adjust addresses (in BE32 mode) before testing against watchpoint 636 * addresses. 637 */ 638 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 639 640 /* Callback function for when a watchpoint or breakpoint triggers. */ 641 void arm_debug_excp_handler(CPUState *cs); 642 643 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 644 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 645 { 646 return false; 647 } 648 static inline void arm_handle_psci_call(ARMCPU *cpu) 649 { 650 g_assert_not_reached(); 651 } 652 #else 653 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 654 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 655 /* Actually handle a PSCI call */ 656 void arm_handle_psci_call(ARMCPU *cpu); 657 #endif 658 659 /** 660 * arm_clear_exclusive: clear the exclusive monitor 661 * @env: CPU env 662 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 663 */ 664 static inline void arm_clear_exclusive(CPUARMState *env) 665 { 666 env->exclusive_addr = -1; 667 } 668 669 /** 670 * ARMFaultType: type of an ARM MMU fault 671 * This corresponds to the v8A pseudocode's Fault enumeration, 672 * with extensions for QEMU internal conditions. 673 */ 674 typedef enum ARMFaultType { 675 ARMFault_None, 676 ARMFault_AccessFlag, 677 ARMFault_Alignment, 678 ARMFault_Background, 679 ARMFault_Domain, 680 ARMFault_Permission, 681 ARMFault_Translation, 682 ARMFault_AddressSize, 683 ARMFault_SyncExternal, 684 ARMFault_SyncExternalOnWalk, 685 ARMFault_SyncParity, 686 ARMFault_SyncParityOnWalk, 687 ARMFault_AsyncParity, 688 ARMFault_AsyncExternal, 689 ARMFault_Debug, 690 ARMFault_TLBConflict, 691 ARMFault_UnsuppAtomicUpdate, 692 ARMFault_Lockdown, 693 ARMFault_Exclusive, 694 ARMFault_ICacheMaint, 695 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 696 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 697 ARMFault_GPCFOnWalk, 698 ARMFault_GPCFOnOutput, 699 } ARMFaultType; 700 701 typedef enum ARMGPCF { 702 GPCF_None, 703 GPCF_AddressSize, 704 GPCF_Walk, 705 GPCF_EABT, 706 GPCF_Fail, 707 } ARMGPCF; 708 709 /** 710 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 711 * @type: Type of fault 712 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 713 * @level: Table walk level (for translation, access flag and permission faults) 714 * @domain: Domain of the fault address (for non-LPAE CPUs only) 715 * @s2addr: Address that caused a fault at stage 2 716 * @paddr: physical address that caused a fault for gpc 717 * @paddr_space: physical address space that caused a fault for gpc 718 * @stage2: True if we faulted at stage 2 719 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 720 * @s1ns: True if we faulted on a non-secure IPA while in secure state 721 * @ea: True if we should set the EA (external abort type) bit in syndrome 722 */ 723 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 724 struct ARMMMUFaultInfo { 725 ARMFaultType type; 726 ARMGPCF gpcf; 727 target_ulong s2addr; 728 target_ulong paddr; 729 ARMSecuritySpace paddr_space; 730 int level; 731 int domain; 732 bool stage2; 733 bool s1ptw; 734 bool s1ns; 735 bool ea; 736 }; 737 738 /** 739 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 740 * Compare pseudocode EncodeSDFSC(), though unlike that function 741 * we set up a whole FSR-format code including domain field and 742 * putting the high bit of the FSC into bit 10. 743 */ 744 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 745 { 746 uint32_t fsc; 747 748 switch (fi->type) { 749 case ARMFault_None: 750 return 0; 751 case ARMFault_AccessFlag: 752 fsc = fi->level == 1 ? 0x3 : 0x6; 753 break; 754 case ARMFault_Alignment: 755 fsc = 0x1; 756 break; 757 case ARMFault_Permission: 758 fsc = fi->level == 1 ? 0xd : 0xf; 759 break; 760 case ARMFault_Domain: 761 fsc = fi->level == 1 ? 0x9 : 0xb; 762 break; 763 case ARMFault_Translation: 764 fsc = fi->level == 1 ? 0x5 : 0x7; 765 break; 766 case ARMFault_SyncExternal: 767 fsc = 0x8 | (fi->ea << 12); 768 break; 769 case ARMFault_SyncExternalOnWalk: 770 fsc = fi->level == 1 ? 0xc : 0xe; 771 fsc |= (fi->ea << 12); 772 break; 773 case ARMFault_SyncParity: 774 fsc = 0x409; 775 break; 776 case ARMFault_SyncParityOnWalk: 777 fsc = fi->level == 1 ? 0x40c : 0x40e; 778 break; 779 case ARMFault_AsyncParity: 780 fsc = 0x408; 781 break; 782 case ARMFault_AsyncExternal: 783 fsc = 0x406 | (fi->ea << 12); 784 break; 785 case ARMFault_Debug: 786 fsc = 0x2; 787 break; 788 case ARMFault_TLBConflict: 789 fsc = 0x400; 790 break; 791 case ARMFault_Lockdown: 792 fsc = 0x404; 793 break; 794 case ARMFault_Exclusive: 795 fsc = 0x405; 796 break; 797 case ARMFault_ICacheMaint: 798 fsc = 0x4; 799 break; 800 case ARMFault_Background: 801 fsc = 0x0; 802 break; 803 case ARMFault_QEMU_NSCExec: 804 fsc = M_FAKE_FSR_NSC_EXEC; 805 break; 806 case ARMFault_QEMU_SFault: 807 fsc = M_FAKE_FSR_SFAULT; 808 break; 809 default: 810 /* Other faults can't occur in a context that requires a 811 * short-format status code. 812 */ 813 g_assert_not_reached(); 814 } 815 816 fsc |= (fi->domain << 4); 817 return fsc; 818 } 819 820 /** 821 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 822 * Compare pseudocode EncodeLDFSC(), though unlike that function 823 * we fill in also the LPAE bit 9 of a DFSR format. 824 */ 825 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 826 { 827 uint32_t fsc; 828 829 switch (fi->type) { 830 case ARMFault_None: 831 return 0; 832 case ARMFault_AddressSize: 833 assert(fi->level >= -1 && fi->level <= 3); 834 if (fi->level < 0) { 835 fsc = 0b101001; 836 } else { 837 fsc = fi->level; 838 } 839 break; 840 case ARMFault_AccessFlag: 841 assert(fi->level >= 0 && fi->level <= 3); 842 fsc = 0b001000 | fi->level; 843 break; 844 case ARMFault_Permission: 845 assert(fi->level >= 0 && fi->level <= 3); 846 fsc = 0b001100 | fi->level; 847 break; 848 case ARMFault_Translation: 849 assert(fi->level >= -1 && fi->level <= 3); 850 if (fi->level < 0) { 851 fsc = 0b101011; 852 } else { 853 fsc = 0b000100 | fi->level; 854 } 855 break; 856 case ARMFault_SyncExternal: 857 fsc = 0x10 | (fi->ea << 12); 858 break; 859 case ARMFault_SyncExternalOnWalk: 860 assert(fi->level >= -1 && fi->level <= 3); 861 if (fi->level < 0) { 862 fsc = 0b010011; 863 } else { 864 fsc = 0b010100 | fi->level; 865 } 866 fsc |= fi->ea << 12; 867 break; 868 case ARMFault_SyncParity: 869 fsc = 0x18; 870 break; 871 case ARMFault_SyncParityOnWalk: 872 assert(fi->level >= -1 && fi->level <= 3); 873 if (fi->level < 0) { 874 fsc = 0b011011; 875 } else { 876 fsc = 0b011100 | fi->level; 877 } 878 break; 879 case ARMFault_AsyncParity: 880 fsc = 0x19; 881 break; 882 case ARMFault_AsyncExternal: 883 fsc = 0x11 | (fi->ea << 12); 884 break; 885 case ARMFault_Alignment: 886 fsc = 0x21; 887 break; 888 case ARMFault_Debug: 889 fsc = 0x22; 890 break; 891 case ARMFault_TLBConflict: 892 fsc = 0x30; 893 break; 894 case ARMFault_UnsuppAtomicUpdate: 895 fsc = 0x31; 896 break; 897 case ARMFault_Lockdown: 898 fsc = 0x34; 899 break; 900 case ARMFault_Exclusive: 901 fsc = 0x35; 902 break; 903 case ARMFault_GPCFOnWalk: 904 assert(fi->level >= -1 && fi->level <= 3); 905 if (fi->level < 0) { 906 fsc = 0b100011; 907 } else { 908 fsc = 0b100100 | fi->level; 909 } 910 break; 911 case ARMFault_GPCFOnOutput: 912 fsc = 0b101000; 913 break; 914 default: 915 /* Other faults can't occur in a context that requires a 916 * long-format status code. 917 */ 918 g_assert_not_reached(); 919 } 920 921 fsc |= 1 << 9; 922 return fsc; 923 } 924 925 static inline bool arm_extabort_type(MemTxResult result) 926 { 927 /* The EA bit in syndromes and fault status registers is an 928 * IMPDEF classification of external aborts. ARM implementations 929 * usually use this to indicate AXI bus Decode error (0) or 930 * Slave error (1); in QEMU we follow that. 931 */ 932 return result != MEMTX_DECODE_ERROR; 933 } 934 935 #ifdef CONFIG_USER_ONLY 936 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 937 MMUAccessType access_type, 938 bool maperr, uintptr_t ra); 939 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 940 MMUAccessType access_type, uintptr_t ra); 941 #else 942 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, 943 MMUAccessType access_type, int mmu_idx, 944 MemOp memop, int size, bool probe, uintptr_t ra); 945 #endif 946 947 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 948 { 949 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 950 } 951 952 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 953 { 954 if (arm_feature(env, ARM_FEATURE_M)) { 955 return mmu_idx | ARM_MMU_IDX_M; 956 } else { 957 return mmu_idx | ARM_MMU_IDX_A; 958 } 959 } 960 961 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 962 { 963 /* AArch64 is always a-profile. */ 964 return mmu_idx | ARM_MMU_IDX_A; 965 } 966 967 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 968 969 /* Return the MMU index for a v7M CPU in the specified security state */ 970 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 971 972 /* 973 * Return true if the stage 1 translation regime is using LPAE 974 * format page tables 975 */ 976 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 977 978 /* Raise a data fault alignment exception for the specified virtual address */ 979 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 980 MMUAccessType access_type, 981 int mmu_idx, uintptr_t retaddr); 982 983 #ifndef CONFIG_USER_ONLY 984 /* arm_cpu_do_transaction_failed: handle a memory system error response 985 * (eg "no device/memory present at address") by raising an external abort 986 * exception 987 */ 988 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 989 vaddr addr, unsigned size, 990 MMUAccessType access_type, 991 int mmu_idx, MemTxAttrs attrs, 992 MemTxResult response, uintptr_t retaddr); 993 #endif 994 995 /* Call any registered EL change hooks */ 996 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 997 { 998 ARMELChangeHook *hook, *next; 999 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 1000 hook->hook(cpu, hook->opaque); 1001 } 1002 } 1003 static inline void arm_call_el_change_hook(ARMCPU *cpu) 1004 { 1005 ARMELChangeHook *hook, *next; 1006 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 1007 hook->hook(cpu, hook->opaque); 1008 } 1009 } 1010 1011 /* 1012 * Return true if this address translation regime has two ranges. 1013 * Note that this will not return the correct answer for AArch32 1014 * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is 1015 * never called from a context where EL3 can be AArch32. (The 1016 * correct return value for ARMMMUIdx_E3 would be different for 1017 * that case, so we can't just make the function return the 1018 * correct value anyway; we would need an extra "bool e3_is_aarch32" 1019 * argument which all the current callsites would pass as 'false'.) 1020 */ 1021 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 1022 { 1023 switch (mmu_idx) { 1024 case ARMMMUIdx_Stage1_E0: 1025 case ARMMMUIdx_Stage1_E1: 1026 case ARMMMUIdx_Stage1_E1_PAN: 1027 case ARMMMUIdx_E10_0: 1028 case ARMMMUIdx_E10_1: 1029 case ARMMMUIdx_E10_1_PAN: 1030 case ARMMMUIdx_E20_0: 1031 case ARMMMUIdx_E20_2: 1032 case ARMMMUIdx_E20_2_PAN: 1033 return true; 1034 default: 1035 return false; 1036 } 1037 } 1038 1039 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 1040 { 1041 switch (mmu_idx) { 1042 case ARMMMUIdx_Stage1_E1_PAN: 1043 case ARMMMUIdx_E10_1_PAN: 1044 case ARMMMUIdx_E20_2_PAN: 1045 case ARMMMUIdx_E30_3_PAN: 1046 return true; 1047 default: 1048 return false; 1049 } 1050 } 1051 1052 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 1053 { 1054 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 1055 } 1056 1057 /* Return the exception level which controls this address translation regime */ 1058 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 1059 { 1060 switch (mmu_idx) { 1061 case ARMMMUIdx_E20_0: 1062 case ARMMMUIdx_E20_2: 1063 case ARMMMUIdx_E20_2_PAN: 1064 case ARMMMUIdx_Stage2: 1065 case ARMMMUIdx_Stage2_S: 1066 case ARMMMUIdx_E2: 1067 return 2; 1068 case ARMMMUIdx_E3: 1069 case ARMMMUIdx_E30_0: 1070 case ARMMMUIdx_E30_3_PAN: 1071 return 3; 1072 case ARMMMUIdx_E10_0: 1073 case ARMMMUIdx_Stage1_E0: 1074 case ARMMMUIdx_Stage1_E1: 1075 case ARMMMUIdx_Stage1_E1_PAN: 1076 case ARMMMUIdx_E10_1: 1077 case ARMMMUIdx_E10_1_PAN: 1078 case ARMMMUIdx_MPrivNegPri: 1079 case ARMMMUIdx_MUserNegPri: 1080 case ARMMMUIdx_MPriv: 1081 case ARMMMUIdx_MUser: 1082 case ARMMMUIdx_MSPrivNegPri: 1083 case ARMMMUIdx_MSUserNegPri: 1084 case ARMMMUIdx_MSPriv: 1085 case ARMMMUIdx_MSUser: 1086 return 1; 1087 default: 1088 g_assert_not_reached(); 1089 } 1090 } 1091 1092 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 1093 { 1094 switch (mmu_idx) { 1095 case ARMMMUIdx_E10_0: 1096 case ARMMMUIdx_E20_0: 1097 case ARMMMUIdx_E30_0: 1098 case ARMMMUIdx_Stage1_E0: 1099 case ARMMMUIdx_MUser: 1100 case ARMMMUIdx_MSUser: 1101 case ARMMMUIdx_MUserNegPri: 1102 case ARMMMUIdx_MSUserNegPri: 1103 return true; 1104 default: 1105 return false; 1106 } 1107 } 1108 1109 /* Return the SCTLR value which controls this address translation regime */ 1110 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 1111 { 1112 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 1113 } 1114 1115 /* 1116 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 1117 * and the Non-Secure stage 2 translation regimes (and hence which are 1118 * not present in VSTCR_EL2). 1119 */ 1120 #define VTCR_SHARED_FIELD_MASK \ 1121 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 1122 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 1123 R_VTCR_DS_MASK) 1124 1125 /* Return the value of the TCR controlling this translation regime */ 1126 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 1127 { 1128 if (mmu_idx == ARMMMUIdx_Stage2) { 1129 return env->cp15.vtcr_el2; 1130 } 1131 if (mmu_idx == ARMMMUIdx_Stage2_S) { 1132 /* 1133 * Secure stage 2 shares fields from VTCR_EL2. We merge those 1134 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 1135 * value so the callers don't need to special case this. 1136 * 1137 * If a future architecture change defines bits in VSTCR_EL2 that 1138 * overlap with these VTCR_EL2 fields we may need to revisit this. 1139 */ 1140 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 1141 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 1142 return v; 1143 } 1144 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 1145 } 1146 1147 /* Return true if the translation regime is using LPAE format page tables */ 1148 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 1149 { 1150 int el = regime_el(env, mmu_idx); 1151 if (el == 2 || arm_el_is_aa64(env, el)) { 1152 return true; 1153 } 1154 if (arm_feature(env, ARM_FEATURE_PMSA) && 1155 arm_feature(env, ARM_FEATURE_V8)) { 1156 return true; 1157 } 1158 if (arm_feature(env, ARM_FEATURE_LPAE) 1159 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 1160 return true; 1161 } 1162 return false; 1163 } 1164 1165 /** 1166 * arm_num_brps: Return number of implemented breakpoints. 1167 * Note that the ID register BRPS field is "number of bps - 1", 1168 * and we return the actual number of breakpoints. 1169 */ 1170 static inline int arm_num_brps(ARMCPU *cpu) 1171 { 1172 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1173 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 1174 } else { 1175 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 1176 } 1177 } 1178 1179 /** 1180 * arm_num_wrps: Return number of implemented watchpoints. 1181 * Note that the ID register WRPS field is "number of wps - 1", 1182 * and we return the actual number of watchpoints. 1183 */ 1184 static inline int arm_num_wrps(ARMCPU *cpu) 1185 { 1186 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1187 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1188 } else { 1189 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1190 } 1191 } 1192 1193 /** 1194 * arm_num_ctx_cmps: Return number of implemented context comparators. 1195 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1196 * and we return the actual number of comparators. 1197 */ 1198 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1199 { 1200 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1201 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1202 } else { 1203 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1204 } 1205 } 1206 1207 /** 1208 * v7m_using_psp: Return true if using process stack pointer 1209 * Return true if the CPU is currently using the process stack 1210 * pointer, or false if it is using the main stack pointer. 1211 */ 1212 static inline bool v7m_using_psp(CPUARMState *env) 1213 { 1214 /* Handler mode always uses the main stack; for thread mode 1215 * the CONTROL.SPSEL bit determines the answer. 1216 * Note that in v7M it is not possible to be in Handler mode with 1217 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1218 */ 1219 return !arm_v7m_is_handler_mode(env) && 1220 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1221 } 1222 1223 /** 1224 * v7m_sp_limit: Return SP limit for current CPU state 1225 * Return the SP limit value for the current CPU security state 1226 * and stack pointer. 1227 */ 1228 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1229 { 1230 if (v7m_using_psp(env)) { 1231 return env->v7m.psplim[env->v7m.secure]; 1232 } else { 1233 return env->v7m.msplim[env->v7m.secure]; 1234 } 1235 } 1236 1237 /** 1238 * v7m_cpacr_pass: 1239 * Return true if the v7M CPACR permits access to the FPU for the specified 1240 * security state and privilege level. 1241 */ 1242 static inline bool v7m_cpacr_pass(CPUARMState *env, 1243 bool is_secure, bool is_priv) 1244 { 1245 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1246 case 0: 1247 case 2: /* UNPREDICTABLE: we treat like 0 */ 1248 return false; 1249 case 1: 1250 return is_priv; 1251 case 3: 1252 return true; 1253 default: 1254 g_assert_not_reached(); 1255 } 1256 } 1257 1258 /** 1259 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1260 * @psr: Program Status Register indicating CPU mode 1261 * 1262 * Returns, for debug logging purposes, a printable representation 1263 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1264 * the low bits of the specified PSR. 1265 */ 1266 static inline const char *aarch32_mode_name(uint32_t psr) 1267 { 1268 static const char cpu_mode_names[16][4] = { 1269 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1270 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1271 }; 1272 1273 return cpu_mode_names[psr & 0xf]; 1274 } 1275 1276 /** 1277 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1278 * 1279 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1280 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1281 * Must be called with the BQL held. 1282 */ 1283 void arm_cpu_update_virq(ARMCPU *cpu); 1284 1285 /** 1286 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1287 * 1288 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1289 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1290 * Must be called with the BQL held. 1291 */ 1292 void arm_cpu_update_vfiq(ARMCPU *cpu); 1293 1294 /** 1295 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1296 * 1297 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1298 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1299 * Must be called with the BQL held. 1300 */ 1301 void arm_cpu_update_vinmi(ARMCPU *cpu); 1302 1303 /** 1304 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1305 * 1306 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1307 * a change to the HCRX_EL2.VFNMI. 1308 * Must be called with the BQL held. 1309 */ 1310 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1311 1312 /** 1313 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1314 * 1315 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1316 * following a change to the HCR_EL2.VSE bit. 1317 */ 1318 void arm_cpu_update_vserr(ARMCPU *cpu); 1319 1320 /** 1321 * arm_mmu_idx_el: 1322 * @env: The cpu environment 1323 * @el: The EL to use. 1324 * 1325 * Return the full ARMMMUIdx for the translation regime for EL. 1326 */ 1327 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1328 1329 /** 1330 * arm_mmu_idx: 1331 * @env: The cpu environment 1332 * 1333 * Return the full ARMMMUIdx for the current translation regime. 1334 */ 1335 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1336 1337 /** 1338 * arm_stage1_mmu_idx: 1339 * @env: The cpu environment 1340 * 1341 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1342 */ 1343 #ifdef CONFIG_USER_ONLY 1344 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1345 { 1346 return ARMMMUIdx_Stage1_E0; 1347 } 1348 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1349 { 1350 return ARMMMUIdx_Stage1_E0; 1351 } 1352 #else 1353 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1354 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1355 #endif 1356 1357 /** 1358 * arm_mmu_idx_is_stage1_of_2: 1359 * @mmu_idx: The ARMMMUIdx to test 1360 * 1361 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1362 * first stage of a two stage regime. 1363 */ 1364 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1365 { 1366 switch (mmu_idx) { 1367 case ARMMMUIdx_Stage1_E0: 1368 case ARMMMUIdx_Stage1_E1: 1369 case ARMMMUIdx_Stage1_E1_PAN: 1370 return true; 1371 default: 1372 return false; 1373 } 1374 } 1375 1376 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1377 const ARMISARegisters *id) 1378 { 1379 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1380 1381 if ((features >> ARM_FEATURE_V4T) & 1) { 1382 valid |= CPSR_T; 1383 } 1384 if ((features >> ARM_FEATURE_V5) & 1) { 1385 valid |= CPSR_Q; /* V5TE in reality*/ 1386 } 1387 if ((features >> ARM_FEATURE_V6) & 1) { 1388 valid |= CPSR_E | CPSR_GE; 1389 } 1390 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1391 valid |= CPSR_IT; 1392 } 1393 if (isar_feature_aa32_jazelle(id)) { 1394 valid |= CPSR_J; 1395 } 1396 if (isar_feature_aa32_pan(id)) { 1397 valid |= CPSR_PAN; 1398 } 1399 if (isar_feature_aa32_dit(id)) { 1400 valid |= CPSR_DIT; 1401 } 1402 if (isar_feature_aa32_ssbs(id)) { 1403 valid |= CPSR_SSBS; 1404 } 1405 1406 return valid; 1407 } 1408 1409 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1410 { 1411 uint32_t valid; 1412 1413 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1414 if (isar_feature_aa64_bti(id)) { 1415 valid |= PSTATE_BTYPE; 1416 } 1417 if (isar_feature_aa64_pan(id)) { 1418 valid |= PSTATE_PAN; 1419 } 1420 if (isar_feature_aa64_uao(id)) { 1421 valid |= PSTATE_UAO; 1422 } 1423 if (isar_feature_aa64_dit(id)) { 1424 valid |= PSTATE_DIT; 1425 } 1426 if (isar_feature_aa64_ssbs(id)) { 1427 valid |= PSTATE_SSBS; 1428 } 1429 if (isar_feature_aa64_mte(id)) { 1430 valid |= PSTATE_TCO; 1431 } 1432 if (isar_feature_aa64_nmi(id)) { 1433 valid |= PSTATE_ALLINT; 1434 } 1435 1436 return valid; 1437 } 1438 1439 /* Granule size (i.e. page size) */ 1440 typedef enum ARMGranuleSize { 1441 /* Same order as TG0 encoding */ 1442 Gran4K, 1443 Gran64K, 1444 Gran16K, 1445 GranInvalid, 1446 } ARMGranuleSize; 1447 1448 /** 1449 * arm_granule_bits: Return address size of the granule in bits 1450 * 1451 * Return the address size of the granule in bits. This corresponds 1452 * to the pseudocode TGxGranuleBits(). 1453 */ 1454 static inline int arm_granule_bits(ARMGranuleSize gran) 1455 { 1456 switch (gran) { 1457 case Gran64K: 1458 return 16; 1459 case Gran16K: 1460 return 14; 1461 case Gran4K: 1462 return 12; 1463 default: 1464 g_assert_not_reached(); 1465 } 1466 } 1467 1468 /* 1469 * Parameters of a given virtual address, as extracted from the 1470 * translation control register (TCR) for a given regime. 1471 */ 1472 typedef struct ARMVAParameters { 1473 unsigned tsz : 8; 1474 unsigned ps : 3; 1475 unsigned sh : 2; 1476 unsigned select : 1; 1477 bool tbi : 1; 1478 bool epd : 1; 1479 bool hpd : 1; 1480 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1481 bool ds : 1; 1482 bool ha : 1; 1483 bool hd : 1; 1484 ARMGranuleSize gran : 2; 1485 } ARMVAParameters; 1486 1487 /** 1488 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1489 * @env: CPU 1490 * @va: virtual address to look up 1491 * @mmu_idx: determines translation regime to use 1492 * @data: true if this is a data access 1493 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1494 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1495 */ 1496 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1497 ARMMMUIdx mmu_idx, bool data, 1498 bool el1_is_aa32); 1499 1500 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1501 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1502 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1503 1504 /* Determine if allocation tags are available. */ 1505 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1506 uint64_t sctlr) 1507 { 1508 if (el < 3 1509 && arm_feature(env, ARM_FEATURE_EL3) 1510 && !(env->cp15.scr_el3 & SCR_ATA)) { 1511 return false; 1512 } 1513 if (el < 2 && arm_is_el2_enabled(env)) { 1514 uint64_t hcr = arm_hcr_el2_eff(env); 1515 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1516 return false; 1517 } 1518 } 1519 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1520 return sctlr != 0; 1521 } 1522 1523 #ifndef CONFIG_USER_ONLY 1524 1525 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1526 typedef struct V8M_SAttributes { 1527 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1528 bool ns; 1529 bool nsc; 1530 uint8_t sregion; 1531 bool srvalid; 1532 uint8_t iregion; 1533 bool irvalid; 1534 } V8M_SAttributes; 1535 1536 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1537 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1538 bool secure, V8M_SAttributes *sattrs); 1539 1540 /* Cacheability and shareability attributes for a memory access */ 1541 typedef struct ARMCacheAttrs { 1542 /* 1543 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1544 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1545 */ 1546 unsigned int attrs:8; 1547 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1548 bool is_s2_format:1; 1549 } ARMCacheAttrs; 1550 1551 /* Fields that are valid upon success. */ 1552 typedef struct GetPhysAddrResult { 1553 CPUTLBEntryFull f; 1554 ARMCacheAttrs cacheattrs; 1555 } GetPhysAddrResult; 1556 1557 /** 1558 * get_phys_addr: get the physical address for a virtual address 1559 * @env: CPUARMState 1560 * @address: virtual address to get physical address for 1561 * @access_type: 0 for read, 1 for write, 2 for execute 1562 * @memop: memory operation feeding this access, or 0 for none 1563 * @mmu_idx: MMU index indicating required translation regime 1564 * @result: set on translation success. 1565 * @fi: set to fault info if the translation fails 1566 * 1567 * Find the physical address corresponding to the given virtual address, 1568 * by doing a translation table walk on MMU based systems or using the 1569 * MPU state on MPU based systems. 1570 * 1571 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1572 * prot and page_size may not be filled in, and the populated fsr value provides 1573 * information on why the translation aborted, in the format of a 1574 * DFSR/IFSR fault register, with the following caveats: 1575 * * we honour the short vs long DFSR format differences. 1576 * * the WnR bit is never set (the caller must do this). 1577 * * for PSMAv5 based systems we don't bother to return a full FSR format 1578 * value. 1579 */ 1580 bool get_phys_addr(CPUARMState *env, vaddr address, 1581 MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, 1582 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1583 __attribute__((nonnull)); 1584 1585 /** 1586 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1587 * address 1588 * @env: CPUARMState 1589 * @address: virtual address to get physical address for 1590 * @access_type: 0 for read, 1 for write, 2 for execute 1591 * @memop: memory operation feeding this access, or 0 for none 1592 * @mmu_idx: MMU index indicating required translation regime 1593 * @space: security space for the access 1594 * @result: set on translation success. 1595 * @fi: set to fault info if the translation fails 1596 * 1597 * Similar to get_phys_addr, but use the given security space and don't perform 1598 * a Granule Protection Check on the resulting address. 1599 */ 1600 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, 1601 MMUAccessType access_type, MemOp memop, 1602 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1603 GetPhysAddrResult *result, 1604 ARMMMUFaultInfo *fi) 1605 __attribute__((nonnull)); 1606 1607 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1608 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1609 bool is_secure, GetPhysAddrResult *result, 1610 ARMMMUFaultInfo *fi, uint32_t *mregion); 1611 1612 void arm_log_exception(CPUState *cs); 1613 1614 #endif /* !CONFIG_USER_ONLY */ 1615 1616 /* 1617 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1618 * the same simd_desc() encoding due to restrictions on size. 1619 * Use these instead. 1620 */ 1621 FIELD(PREDDESC, OPRSZ, 0, 6) 1622 FIELD(PREDDESC, ESZ, 6, 2) 1623 FIELD(PREDDESC, DATA, 8, 24) 1624 1625 /* 1626 * The SVE simd_data field, for memory ops, contains either 1627 * rd (5 bits) or a shift count (2 bits). 1628 */ 1629 #define SVE_MTEDESC_SHIFT 5 1630 1631 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1632 FIELD(MTEDESC, MIDX, 0, 4) 1633 FIELD(MTEDESC, TBI, 4, 2) 1634 FIELD(MTEDESC, TCMA, 6, 2) 1635 FIELD(MTEDESC, WRITE, 8, 1) 1636 FIELD(MTEDESC, ALIGN, 9, 3) 1637 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1638 1639 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1640 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1641 1642 /** 1643 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1644 * @env: CPU env 1645 * @ptr: start address of memory region (dirty pointer) 1646 * @size: length of region (guaranteed not to cross a page boundary) 1647 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1648 * Returns: the size of the region that can be copied without hitting 1649 * an MTE tag failure 1650 * 1651 * Note that we assume that the caller has already checked the TBI 1652 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1653 * required. 1654 */ 1655 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1656 uint32_t desc); 1657 1658 /** 1659 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1660 * operation going in the reverse direction 1661 * @env: CPU env 1662 * @ptr: *end* address of memory region (dirty pointer) 1663 * @size: length of region (guaranteed not to cross a page boundary) 1664 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1665 * Returns: the size of the region that can be copied without hitting 1666 * an MTE tag failure 1667 * 1668 * Note that we assume that the caller has already checked the TBI 1669 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1670 * required. 1671 */ 1672 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1673 uint32_t desc); 1674 1675 /** 1676 * mte_check_fail: Record an MTE tag check failure 1677 * @env: CPU env 1678 * @desc: MTEDESC descriptor word 1679 * @dirty_ptr: Failing dirty address 1680 * @ra: TCG retaddr 1681 * 1682 * This may never return (if the MTE tag checks are configured to fault). 1683 */ 1684 void mte_check_fail(CPUARMState *env, uint32_t desc, 1685 uint64_t dirty_ptr, uintptr_t ra); 1686 1687 /** 1688 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1689 * @env: CPU env 1690 * @dirty_ptr: Start address of memory region (dirty pointer) 1691 * @size: length of region (guaranteed not to cross page boundary) 1692 * @desc: MTEDESC descriptor word 1693 */ 1694 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1695 uint32_t desc); 1696 1697 static inline int allocation_tag_from_addr(uint64_t ptr) 1698 { 1699 return extract64(ptr, 56, 4); 1700 } 1701 1702 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1703 { 1704 return deposit64(ptr, 56, 4, rtag); 1705 } 1706 1707 /* Return true if tbi bits mean that the access is checked. */ 1708 static inline bool tbi_check(uint32_t desc, int bit55) 1709 { 1710 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1711 } 1712 1713 /* Return true if tcma bits mean that the access is unchecked. */ 1714 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1715 { 1716 /* 1717 * We had extracted bit55 and ptr_tag for other reasons, so fold 1718 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1719 */ 1720 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1721 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1722 return tcma && match; 1723 } 1724 1725 /* 1726 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1727 * for the tag to be present in the FAR_ELx register. But for user-only 1728 * mode, we do not have a TLB with which to implement this, so we must 1729 * remove the top byte. 1730 */ 1731 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1732 { 1733 #ifdef CONFIG_USER_ONLY 1734 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1735 ptr &= sextract64(ptr, 0, 56); 1736 #endif 1737 return ptr; 1738 } 1739 1740 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1741 { 1742 #ifdef CONFIG_USER_ONLY 1743 int64_t clean_ptr = sextract64(ptr, 0, 56); 1744 if (tbi_check(desc, clean_ptr < 0)) { 1745 ptr = clean_ptr; 1746 } 1747 #endif 1748 return ptr; 1749 } 1750 1751 /* Values for M-profile PSR.ECI for MVE insns */ 1752 enum MVEECIState { 1753 ECI_NONE = 0, /* No completed beats */ 1754 ECI_A0 = 1, /* Completed: A0 */ 1755 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1756 /* 3 is reserved */ 1757 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1758 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1759 /* All other values reserved */ 1760 }; 1761 1762 /* Definitions for the PMU registers */ 1763 #define PMCRN_MASK 0xf800 1764 #define PMCRN_SHIFT 11 1765 #define PMCRLP 0x80 1766 #define PMCRLC 0x40 1767 #define PMCRDP 0x20 1768 #define PMCRX 0x10 1769 #define PMCRD 0x8 1770 #define PMCRC 0x4 1771 #define PMCRP 0x2 1772 #define PMCRE 0x1 1773 /* 1774 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1775 * which can be written as 1 to trigger behaviour but which stay RAZ). 1776 */ 1777 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1778 1779 #define PMXEVTYPER_P 0x80000000 1780 #define PMXEVTYPER_U 0x40000000 1781 #define PMXEVTYPER_NSK 0x20000000 1782 #define PMXEVTYPER_NSU 0x10000000 1783 #define PMXEVTYPER_NSH 0x08000000 1784 #define PMXEVTYPER_M 0x04000000 1785 #define PMXEVTYPER_MT 0x02000000 1786 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1787 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1788 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1789 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1790 PMXEVTYPER_EVTCOUNT) 1791 1792 #define PMCCFILTR 0xf8000000 1793 #define PMCCFILTR_M PMXEVTYPER_M 1794 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1795 1796 static inline uint32_t pmu_num_counters(CPUARMState *env) 1797 { 1798 ARMCPU *cpu = env_archcpu(env); 1799 1800 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1801 } 1802 1803 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1804 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1805 { 1806 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1807 } 1808 1809 #ifdef TARGET_AARCH64 1810 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1811 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1812 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1813 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1814 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1815 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1816 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1817 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg); 1818 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg); 1819 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1820 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1821 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1822 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1823 void aarch64_max_tcg_initfn(Object *obj); 1824 void aarch64_add_pauth_properties(Object *obj); 1825 void aarch64_add_sve_properties(Object *obj); 1826 void aarch64_add_sme_properties(Object *obj); 1827 #endif 1828 1829 /* Read the CONTROL register as the MRS instruction would. */ 1830 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1831 1832 /* 1833 * Return a pointer to the location where we currently store the 1834 * stack pointer for the requested security state and thread mode. 1835 * This pointer will become invalid if the CPU state is updated 1836 * such that the stack pointers are switched around (eg changing 1837 * the SPSEL control bit). 1838 */ 1839 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1840 bool threadmode, bool spsel); 1841 1842 bool el_is_in_host(CPUARMState *env, int el); 1843 1844 void aa32_max_features(ARMCPU *cpu); 1845 int exception_target_el(CPUARMState *env); 1846 bool arm_singlestep_active(CPUARMState *env); 1847 bool arm_generate_debug_exceptions(CPUARMState *env); 1848 1849 /** 1850 * pauth_ptr_mask: 1851 * @param: parameters defining the MMU setup 1852 * 1853 * Return a mask of the address bits that contain the authentication code, 1854 * given the MMU config defined by @param. 1855 */ 1856 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1857 { 1858 int bot_pac_bit = 64 - param.tsz; 1859 int top_pac_bit = 64 - 8 * param.tbi; 1860 1861 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1862 } 1863 1864 /* Add the cpreg definitions for debug related system registers */ 1865 void define_debug_regs(ARMCPU *cpu); 1866 1867 /* Add the cpreg definitions for TLBI instructions */ 1868 void define_tlb_insn_regs(ARMCPU *cpu); 1869 1870 /* Effective value of MDCR_EL2 */ 1871 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1872 { 1873 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1874 } 1875 1876 /* Powers of 2 for sve_vq_map et al. */ 1877 #define SVE_VQ_POW2_MAP \ 1878 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1879 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1880 1881 /* 1882 * Return true if it is possible to take a fine-grained-trap to EL2. 1883 */ 1884 static inline bool arm_fgt_active(CPUARMState *env, int el) 1885 { 1886 /* 1887 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1888 * that can affect EL0, but it is harmless to do the test also for 1889 * traps on registers that are only accessible at EL1 because if the test 1890 * returns true then we can't be executing at EL1 anyway. 1891 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1892 * traps from AArch32 only happen for the EL0 is AArch32 case. 1893 */ 1894 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1895 el < 2 && arm_is_el2_enabled(env) && 1896 arm_el_is_aa64(env, 1) && 1897 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1898 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1899 } 1900 1901 void assert_hflags_rebuild_correctly(CPUARMState *env); 1902 1903 /* 1904 * Although the ARM implementation of hardware assisted debugging 1905 * allows for different breakpoints per-core, the current GDB 1906 * interface treats them as a global pool of registers (which seems to 1907 * be the case for x86, ppc and s390). As a result we store one copy 1908 * of registers which is used for all active cores. 1909 * 1910 * Write access is serialised by virtue of the GDB protocol which 1911 * updates things. Read access (i.e. when the values are copied to the 1912 * vCPU) is also gated by GDB's run control. 1913 * 1914 * This is not unreasonable as most of the time debugging kernels you 1915 * never know which core will eventually execute your function. 1916 */ 1917 1918 typedef struct { 1919 uint64_t bcr; 1920 uint64_t bvr; 1921 } HWBreakpoint; 1922 1923 /* 1924 * The watchpoint registers can cover more area than the requested 1925 * watchpoint so we need to store the additional information 1926 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1927 * when the watchpoint is hit. 1928 */ 1929 typedef struct { 1930 uint64_t wcr; 1931 uint64_t wvr; 1932 CPUWatchpoint details; 1933 } HWWatchpoint; 1934 1935 /* Maximum and current break/watch point counts */ 1936 extern int max_hw_bps, max_hw_wps; 1937 extern GArray *hw_breakpoints, *hw_watchpoints; 1938 1939 #define cur_hw_wps (hw_watchpoints->len) 1940 #define cur_hw_bps (hw_breakpoints->len) 1941 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1942 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1943 1944 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1945 int insert_hw_breakpoint(target_ulong pc); 1946 int delete_hw_breakpoint(target_ulong pc); 1947 1948 bool check_watchpoint_in_range(int i, target_ulong addr); 1949 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1950 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1951 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1952 1953 /* Return the current value of the system counter in ticks */ 1954 uint64_t gt_get_countervalue(CPUARMState *env); 1955 /* 1956 * Return the currently applicable offset between the system counter 1957 * and the counter for the specified timer, as used for direct register 1958 * accesses. 1959 */ 1960 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx); 1961 1962 /* 1963 * Return mask of ARMMMUIdxBit values corresponding to an "invalidate 1964 * all EL1" scope; this covers stage 1 and stage 2. 1965 */ 1966 int alle1_tlbmask(CPUARMState *env); 1967 1968 /* Set the float_status behaviour to match the Arm defaults */ 1969 void arm_set_default_fp_behaviours(float_status *s); 1970 /* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */ 1971 void arm_set_ah_fp_behaviours(float_status *s); 1972 /* Read the float_status info and return the appropriate FPSR value */ 1973 uint32_t vfp_get_fpsr_from_host(CPUARMState *env); 1974 /* Clear the exception status flags from all float_status fields */ 1975 void vfp_clear_float_status_exc_flags(CPUARMState *env); 1976 /* 1977 * Update float_status fields to handle the bits of the FPCR 1978 * specified by mask changing to the values in val. 1979 */ 1980 void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask); 1981 1982 #endif 1983