1 /* 2 * ARM generic helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "trace.h" 12 #include "cpu.h" 13 #include "internals.h" 14 #include "cpu-features.h" 15 #include "exec/helper-proto.h" 16 #include "exec/page-protection.h" 17 #include "qemu/main-loop.h" 18 #include "qemu/timer.h" 19 #include "qemu/bitops.h" 20 #include "qemu/qemu-print.h" 21 #include "exec/cputlb.h" 22 #include "exec/exec-all.h" 23 #include "exec/translation-block.h" 24 #include "hw/irq.h" 25 #include "system/cpu-timers.h" 26 #include "system/kvm.h" 27 #include "system/tcg.h" 28 #include "qapi/error.h" 29 #include "qemu/guest-random.h" 30 #ifdef CONFIG_TCG 31 #include "semihosting/common-semi.h" 32 #endif 33 #include "cpregs.h" 34 #include "target/arm/gtimer.h" 35 36 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 37 38 static void switch_mode(CPUARMState *env, int mode); 39 40 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 41 { 42 assert(ri->fieldoffset); 43 if (cpreg_field_is_64bit(ri)) { 44 return CPREG_FIELD64(env, ri); 45 } else { 46 return CPREG_FIELD32(env, ri); 47 } 48 } 49 50 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 51 { 52 assert(ri->fieldoffset); 53 if (cpreg_field_is_64bit(ri)) { 54 CPREG_FIELD64(env, ri) = value; 55 } else { 56 CPREG_FIELD32(env, ri) = value; 57 } 58 } 59 60 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 61 { 62 return (char *)env + ri->fieldoffset; 63 } 64 65 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 66 { 67 /* Raw read of a coprocessor register (as needed for migration, etc). */ 68 if (ri->type & ARM_CP_CONST) { 69 return ri->resetvalue; 70 } else if (ri->raw_readfn) { 71 return ri->raw_readfn(env, ri); 72 } else if (ri->readfn) { 73 return ri->readfn(env, ri); 74 } else { 75 return raw_read(env, ri); 76 } 77 } 78 79 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 80 uint64_t v) 81 { 82 /* 83 * Raw write of a coprocessor register (as needed for migration, etc). 84 * Note that constant registers are treated as write-ignored; the 85 * caller should check for success by whether a readback gives the 86 * value written. 87 */ 88 if (ri->type & ARM_CP_CONST) { 89 return; 90 } else if (ri->raw_writefn) { 91 ri->raw_writefn(env, ri, v); 92 } else if (ri->writefn) { 93 ri->writefn(env, ri, v); 94 } else { 95 raw_write(env, ri, v); 96 } 97 } 98 99 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 100 { 101 /* 102 * Return true if the regdef would cause an assertion if you called 103 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 104 * program bug for it not to have the NO_RAW flag). 105 * NB that returning false here doesn't necessarily mean that calling 106 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 107 * read/write access functions which are safe for raw use" from "has 108 * read/write access functions which have side effects but has forgotten 109 * to provide raw access functions". 110 * The tests here line up with the conditions in read/write_raw_cp_reg() 111 * and assertions in raw_read()/raw_write(). 112 */ 113 if ((ri->type & ARM_CP_CONST) || 114 ri->fieldoffset || 115 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 116 return false; 117 } 118 return true; 119 } 120 121 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) 122 { 123 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 124 int i; 125 bool ok = true; 126 127 for (i = 0; i < cpu->cpreg_array_len; i++) { 128 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 129 const ARMCPRegInfo *ri; 130 uint64_t newval; 131 132 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 133 if (!ri) { 134 ok = false; 135 continue; 136 } 137 if (ri->type & ARM_CP_NO_RAW) { 138 continue; 139 } 140 141 newval = read_raw_cp_reg(&cpu->env, ri); 142 if (kvm_sync) { 143 /* 144 * Only sync if the previous list->cpustate sync succeeded. 145 * Rather than tracking the success/failure state for every 146 * item in the list, we just recheck "does the raw write we must 147 * have made in write_list_to_cpustate() read back OK" here. 148 */ 149 uint64_t oldval = cpu->cpreg_values[i]; 150 151 if (oldval == newval) { 152 continue; 153 } 154 155 write_raw_cp_reg(&cpu->env, ri, oldval); 156 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { 157 continue; 158 } 159 160 write_raw_cp_reg(&cpu->env, ri, newval); 161 } 162 cpu->cpreg_values[i] = newval; 163 } 164 return ok; 165 } 166 167 bool write_list_to_cpustate(ARMCPU *cpu) 168 { 169 int i; 170 bool ok = true; 171 172 for (i = 0; i < cpu->cpreg_array_len; i++) { 173 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 174 uint64_t v = cpu->cpreg_values[i]; 175 const ARMCPRegInfo *ri; 176 177 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 178 if (!ri) { 179 ok = false; 180 continue; 181 } 182 if (ri->type & ARM_CP_NO_RAW) { 183 continue; 184 } 185 /* 186 * Write value and confirm it reads back as written 187 * (to catch read-only registers and partially read-only 188 * registers where the incoming migration value doesn't match) 189 */ 190 write_raw_cp_reg(&cpu->env, ri, v); 191 if (read_raw_cp_reg(&cpu->env, ri) != v) { 192 ok = false; 193 } 194 } 195 return ok; 196 } 197 198 static void add_cpreg_to_list(gpointer key, gpointer opaque) 199 { 200 ARMCPU *cpu = opaque; 201 uint32_t regidx = (uintptr_t)key; 202 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 203 204 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { 205 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 206 /* The value array need not be initialized at this point */ 207 cpu->cpreg_array_len++; 208 } 209 } 210 211 static void count_cpreg(gpointer key, gpointer opaque) 212 { 213 ARMCPU *cpu = opaque; 214 const ARMCPRegInfo *ri; 215 216 ri = g_hash_table_lookup(cpu->cp_regs, key); 217 218 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { 219 cpu->cpreg_array_len++; 220 } 221 } 222 223 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 224 { 225 uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a); 226 uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b); 227 228 if (aidx > bidx) { 229 return 1; 230 } 231 if (aidx < bidx) { 232 return -1; 233 } 234 return 0; 235 } 236 237 void init_cpreg_list(ARMCPU *cpu) 238 { 239 /* 240 * Initialise the cpreg_tuples[] array based on the cp_regs hash. 241 * Note that we require cpreg_tuples[] to be sorted by key ID. 242 */ 243 GList *keys; 244 int arraylen; 245 246 keys = g_hash_table_get_keys(cpu->cp_regs); 247 keys = g_list_sort(keys, cpreg_key_compare); 248 249 cpu->cpreg_array_len = 0; 250 251 g_list_foreach(keys, count_cpreg, cpu); 252 253 arraylen = cpu->cpreg_array_len; 254 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 255 cpu->cpreg_values = g_new(uint64_t, arraylen); 256 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 257 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 258 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 259 cpu->cpreg_array_len = 0; 260 261 g_list_foreach(keys, add_cpreg_to_list, cpu); 262 263 assert(cpu->cpreg_array_len == arraylen); 264 265 g_list_free(keys); 266 } 267 268 static bool arm_pan_enabled(CPUARMState *env) 269 { 270 if (is_a64(env)) { 271 if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) { 272 return false; 273 } 274 return env->pstate & PSTATE_PAN; 275 } else { 276 return env->uncached_cpsr & CPSR_PAN; 277 } 278 } 279 280 /* 281 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0. 282 */ 283 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 284 const ARMCPRegInfo *ri, 285 bool isread) 286 { 287 if (!is_a64(env) && arm_current_el(env) == 3 && 288 arm_is_secure_below_el3(env)) { 289 return CP_ACCESS_UNDEFINED; 290 } 291 return CP_ACCESS_OK; 292 } 293 294 /* 295 * Some secure-only AArch32 registers trap to EL3 if used from 296 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 297 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 298 * We assume that the .access field is set to PL1_RW. 299 */ 300 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 301 const ARMCPRegInfo *ri, 302 bool isread) 303 { 304 if (arm_current_el(env) == 3) { 305 return CP_ACCESS_OK; 306 } 307 if (arm_is_secure_below_el3(env)) { 308 if (env->cp15.scr_el3 & SCR_EEL2) { 309 return CP_ACCESS_TRAP_EL2; 310 } 311 return CP_ACCESS_TRAP_EL3; 312 } 313 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 314 return CP_ACCESS_UNDEFINED; 315 } 316 317 /* 318 * Check for traps to performance monitor registers, which are controlled 319 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 320 */ 321 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 322 bool isread) 323 { 324 int el = arm_current_el(env); 325 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 326 327 if (el < 2 && (mdcr_el2 & MDCR_TPM)) { 328 return CP_ACCESS_TRAP_EL2; 329 } 330 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 331 return CP_ACCESS_TRAP_EL3; 332 } 333 return CP_ACCESS_OK; 334 } 335 336 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ 337 CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, 338 bool isread) 339 { 340 if (arm_current_el(env) == 1) { 341 uint64_t trap = isread ? HCR_TRVM : HCR_TVM; 342 if (arm_hcr_el2_eff(env) & trap) { 343 return CP_ACCESS_TRAP_EL2; 344 } 345 } 346 return CP_ACCESS_OK; 347 } 348 349 /* Check for traps from EL1 due to HCR_EL2.TSW. */ 350 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri, 351 bool isread) 352 { 353 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) { 354 return CP_ACCESS_TRAP_EL2; 355 } 356 return CP_ACCESS_OK; 357 } 358 359 /* Check for traps from EL1 due to HCR_EL2.TACR. */ 360 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, 361 bool isread) 362 { 363 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) { 364 return CP_ACCESS_TRAP_EL2; 365 } 366 return CP_ACCESS_OK; 367 } 368 369 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 370 { 371 ARMCPU *cpu = env_archcpu(env); 372 373 raw_write(env, ri, value); 374 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 375 } 376 377 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 378 { 379 ARMCPU *cpu = env_archcpu(env); 380 381 if (raw_read(env, ri) != value) { 382 /* 383 * Unlike real hardware the qemu TLB uses virtual addresses, 384 * not modified virtual addresses, so this causes a TLB flush. 385 */ 386 tlb_flush(CPU(cpu)); 387 raw_write(env, ri, value); 388 } 389 } 390 391 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 392 uint64_t value) 393 { 394 ARMCPU *cpu = env_archcpu(env); 395 396 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 397 && !extended_addresses_enabled(env)) { 398 /* 399 * For VMSA (when not using the LPAE long descriptor page table 400 * format) this register includes the ASID, so do a TLB flush. 401 * For PMSA it is purely a process ID and no action is needed. 402 */ 403 tlb_flush(CPU(cpu)); 404 } 405 raw_write(env, ri, value); 406 } 407 408 int alle1_tlbmask(CPUARMState *env) 409 { 410 /* 411 * Note that the 'ALL' scope must invalidate both stage 1 and 412 * stage 2 translations, whereas most other scopes only invalidate 413 * stage 1 translations. 414 * 415 * For AArch32 this is only used for TLBIALLNSNH and VTTBR 416 * writes, so only needs to apply to NS PL1&0, not S PL1&0. 417 */ 418 return (ARMMMUIdxBit_E10_1 | 419 ARMMMUIdxBit_E10_1_PAN | 420 ARMMMUIdxBit_E10_0 | 421 ARMMMUIdxBit_Stage2 | 422 ARMMMUIdxBit_Stage2_S); 423 } 424 425 static const ARMCPRegInfo cp_reginfo[] = { 426 /* 427 * Define the secure and non-secure FCSE identifier CP registers 428 * separately because there is no secure bank in V8 (no _EL3). This allows 429 * the secure register to be properly reset and migrated. There is also no 430 * v8 EL1 version of the register so the non-secure instance stands alone. 431 */ 432 { .name = "FCSEIDR", 433 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 434 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 435 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 436 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 437 { .name = "FCSEIDR_S", 438 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 439 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 440 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 441 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 442 /* 443 * Define the secure and non-secure context identifier CP registers 444 * separately because there is no secure bank in V8 (no _EL3). This allows 445 * the secure register to be properly reset and migrated. In the 446 * non-secure case, the 32-bit register will have reset and migration 447 * disabled during registration as it is handled by the 64-bit instance. 448 */ 449 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 450 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 451 .access = PL1_RW, .accessfn = access_tvm_trvm, 452 .fgt = FGT_CONTEXTIDR_EL1, 453 .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1, 454 .secure = ARM_CP_SECSTATE_NS, 455 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 456 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 457 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 458 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 459 .access = PL1_RW, .accessfn = access_tvm_trvm, 460 .secure = ARM_CP_SECSTATE_S, 461 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 462 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 463 }; 464 465 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 466 /* 467 * NB: Some of these registers exist in v8 but with more precise 468 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 469 */ 470 /* MMU Domain access control / MPU write buffer control */ 471 { .name = "DACR", 472 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 473 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 474 .writefn = dacr_write, .raw_writefn = raw_write, 475 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 476 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 477 /* 478 * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 479 * For v6 and v5, these mappings are overly broad. 480 */ 481 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 482 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 483 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 484 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 485 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 486 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 487 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 488 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 489 /* Cache maintenance ops; some of this space may be overridden later. */ 490 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 491 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 492 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 493 }; 494 495 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 496 /* 497 * Not all pre-v6 cores implemented this WFI, so this is slightly 498 * over-broad. 499 */ 500 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 501 .access = PL1_W, .type = ARM_CP_WFI }, 502 }; 503 504 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 505 /* 506 * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 507 * is UNPREDICTABLE; we choose to NOP as most implementations do). 508 */ 509 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 510 .access = PL1_W, .type = ARM_CP_WFI }, 511 /* 512 * L1 cache lockdown. Not architectural in v6 and earlier but in practice 513 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 514 * OMAPCP will override this space. 515 */ 516 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 517 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 518 .resetvalue = 0 }, 519 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 520 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 521 .resetvalue = 0 }, 522 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 523 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 524 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 525 .resetvalue = 0 }, 526 /* 527 * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 528 * implementing it as RAZ means the "debug architecture version" bits 529 * will read as a reserved value, which should cause Linux to not try 530 * to use the debug hardware. 531 */ 532 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 533 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 534 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 535 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 536 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 537 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 538 }; 539 540 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 541 uint64_t value) 542 { 543 uint32_t mask = 0; 544 545 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 546 if (!arm_feature(env, ARM_FEATURE_V8)) { 547 /* 548 * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 549 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 550 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 551 */ 552 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { 553 /* VFP coprocessor: cp10 & cp11 [23:20] */ 554 mask |= R_CPACR_ASEDIS_MASK | 555 R_CPACR_D32DIS_MASK | 556 R_CPACR_CP11_MASK | 557 R_CPACR_CP10_MASK; 558 559 if (!arm_feature(env, ARM_FEATURE_NEON)) { 560 /* ASEDIS [31] bit is RAO/WI */ 561 value |= R_CPACR_ASEDIS_MASK; 562 } 563 564 /* 565 * VFPv3 and upwards with NEON implement 32 double precision 566 * registers (D0-D31). 567 */ 568 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { 569 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 570 value |= R_CPACR_D32DIS_MASK; 571 } 572 } 573 value &= mask; 574 } 575 576 /* 577 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 578 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 579 */ 580 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 581 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 582 mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK; 583 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask); 584 } 585 586 env->cp15.cpacr_el1 = value; 587 } 588 589 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) 590 { 591 /* 592 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 593 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 594 */ 595 uint64_t value = env->cp15.cpacr_el1; 596 597 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 598 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 599 value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK); 600 } 601 return value; 602 } 603 604 605 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 606 { 607 /* 608 * Call cpacr_write() so that we reset with the correct RAO bits set 609 * for our CPU features. 610 */ 611 cpacr_write(env, ri, 0); 612 } 613 614 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 615 bool isread) 616 { 617 if (arm_feature(env, ARM_FEATURE_V8)) { 618 /* Check if CPACR accesses are to be trapped to EL2 */ 619 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) && 620 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) { 621 return CP_ACCESS_TRAP_EL2; 622 /* Check if CPACR accesses are to be trapped to EL3 */ 623 } else if (arm_current_el(env) < 3 && 624 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { 625 return CP_ACCESS_TRAP_EL3; 626 } 627 } 628 629 return CP_ACCESS_OK; 630 } 631 632 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 633 bool isread) 634 { 635 /* Check if CPTR accesses are set to trap to EL3 */ 636 if (arm_current_el(env) == 2 && 637 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { 638 return CP_ACCESS_TRAP_EL3; 639 } 640 641 return CP_ACCESS_OK; 642 } 643 644 static const ARMCPRegInfo v6_cp_reginfo[] = { 645 /* prefetch by MVA in v6, NOP in v7 */ 646 { .name = "MVA_prefetch", 647 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 648 .access = PL1_W, .type = ARM_CP_NOP }, 649 /* 650 * We need to break the TB after ISB to execute self-modifying code 651 * correctly and also to take any pending interrupts immediately. 652 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 653 */ 654 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 655 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 656 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 657 .access = PL0_W, .type = ARM_CP_NOP }, 658 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 659 .access = PL0_W, .type = ARM_CP_NOP }, 660 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 661 .access = PL1_RW, .accessfn = access_tvm_trvm, 662 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 663 offsetof(CPUARMState, cp15.ifar_ns) }, 664 .resetvalue = 0, }, 665 /* 666 * Watchpoint Fault Address Register : should actually only be present 667 * for 1136, 1176, 11MPCore. 668 */ 669 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 670 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 671 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 672 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 673 .fgt = FGT_CPACR_EL1, 674 .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1, 675 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 676 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, 677 }; 678 679 typedef struct pm_event { 680 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ 681 /* If the event is supported on this CPU (used to generate PMCEID[01]) */ 682 bool (*supported)(CPUARMState *); 683 /* 684 * Retrieve the current count of the underlying event. The programmed 685 * counters hold a difference from the return value from this function 686 */ 687 uint64_t (*get_count)(CPUARMState *); 688 /* 689 * Return how many nanoseconds it will take (at a minimum) for count events 690 * to occur. A negative value indicates the counter will never overflow, or 691 * that the counter has otherwise arranged for the overflow bit to be set 692 * and the PMU interrupt to be raised on overflow. 693 */ 694 int64_t (*ns_per_count)(uint64_t); 695 } pm_event; 696 697 static bool event_always_supported(CPUARMState *env) 698 { 699 return true; 700 } 701 702 static uint64_t swinc_get_count(CPUARMState *env) 703 { 704 /* 705 * SW_INCR events are written directly to the pmevcntr's by writes to 706 * PMSWINC, so there is no underlying count maintained by the PMU itself 707 */ 708 return 0; 709 } 710 711 static int64_t swinc_ns_per(uint64_t ignored) 712 { 713 return -1; 714 } 715 716 /* 717 * Return the underlying cycle count for the PMU cycle counters. If we're in 718 * usermode, simply return 0. 719 */ 720 static uint64_t cycles_get_count(CPUARMState *env) 721 { 722 #ifndef CONFIG_USER_ONLY 723 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 724 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 725 #else 726 return cpu_get_host_ticks(); 727 #endif 728 } 729 730 #ifndef CONFIG_USER_ONLY 731 static int64_t cycles_ns_per(uint64_t cycles) 732 { 733 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; 734 } 735 736 static bool instructions_supported(CPUARMState *env) 737 { 738 /* Precise instruction counting */ 739 return icount_enabled() == ICOUNT_PRECISE; 740 } 741 742 static uint64_t instructions_get_count(CPUARMState *env) 743 { 744 assert(icount_enabled() == ICOUNT_PRECISE); 745 return (uint64_t)icount_get_raw(); 746 } 747 748 static int64_t instructions_ns_per(uint64_t icount) 749 { 750 assert(icount_enabled() == ICOUNT_PRECISE); 751 return icount_to_ns((int64_t)icount); 752 } 753 #endif 754 755 static bool pmuv3p1_events_supported(CPUARMState *env) 756 { 757 /* For events which are supported in any v8.1 PMU */ 758 return cpu_isar_feature(any_pmuv3p1, env_archcpu(env)); 759 } 760 761 static bool pmuv3p4_events_supported(CPUARMState *env) 762 { 763 /* For events which are supported in any v8.1 PMU */ 764 return cpu_isar_feature(any_pmuv3p4, env_archcpu(env)); 765 } 766 767 static uint64_t zero_event_get_count(CPUARMState *env) 768 { 769 /* For events which on QEMU never fire, so their count is always zero */ 770 return 0; 771 } 772 773 static int64_t zero_event_ns_per(uint64_t cycles) 774 { 775 /* An event which never fires can never overflow */ 776 return -1; 777 } 778 779 static const pm_event pm_events[] = { 780 { .number = 0x000, /* SW_INCR */ 781 .supported = event_always_supported, 782 .get_count = swinc_get_count, 783 .ns_per_count = swinc_ns_per, 784 }, 785 #ifndef CONFIG_USER_ONLY 786 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ 787 .supported = instructions_supported, 788 .get_count = instructions_get_count, 789 .ns_per_count = instructions_ns_per, 790 }, 791 { .number = 0x011, /* CPU_CYCLES, Cycle */ 792 .supported = event_always_supported, 793 .get_count = cycles_get_count, 794 .ns_per_count = cycles_ns_per, 795 }, 796 #endif 797 { .number = 0x023, /* STALL_FRONTEND */ 798 .supported = pmuv3p1_events_supported, 799 .get_count = zero_event_get_count, 800 .ns_per_count = zero_event_ns_per, 801 }, 802 { .number = 0x024, /* STALL_BACKEND */ 803 .supported = pmuv3p1_events_supported, 804 .get_count = zero_event_get_count, 805 .ns_per_count = zero_event_ns_per, 806 }, 807 { .number = 0x03c, /* STALL */ 808 .supported = pmuv3p4_events_supported, 809 .get_count = zero_event_get_count, 810 .ns_per_count = zero_event_ns_per, 811 }, 812 }; 813 814 /* 815 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of 816 * events (i.e. the statistical profiling extension), this implementation 817 * should first be updated to something sparse instead of the current 818 * supported_event_map[] array. 819 */ 820 #define MAX_EVENT_ID 0x3c 821 #define UNSUPPORTED_EVENT UINT16_MAX 822 static uint16_t supported_event_map[MAX_EVENT_ID + 1]; 823 824 /* 825 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map 826 * of ARM event numbers to indices in our pm_events array. 827 * 828 * Note: Events in the 0x40XX range are not currently supported. 829 */ 830 void pmu_init(ARMCPU *cpu) 831 { 832 unsigned int i; 833 834 /* 835 * Empty supported_event_map and cpu->pmceid[01] before adding supported 836 * events to them 837 */ 838 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { 839 supported_event_map[i] = UNSUPPORTED_EVENT; 840 } 841 cpu->pmceid0 = 0; 842 cpu->pmceid1 = 0; 843 844 for (i = 0; i < ARRAY_SIZE(pm_events); i++) { 845 const pm_event *cnt = &pm_events[i]; 846 assert(cnt->number <= MAX_EVENT_ID); 847 /* We do not currently support events in the 0x40xx range */ 848 assert(cnt->number <= 0x3f); 849 850 if (cnt->supported(&cpu->env)) { 851 supported_event_map[cnt->number] = i; 852 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); 853 if (cnt->number & 0x20) { 854 cpu->pmceid1 |= event_mask; 855 } else { 856 cpu->pmceid0 |= event_mask; 857 } 858 } 859 } 860 } 861 862 /* 863 * Check at runtime whether a PMU event is supported for the current machine 864 */ 865 static bool event_supported(uint16_t number) 866 { 867 if (number > MAX_EVENT_ID) { 868 return false; 869 } 870 return supported_event_map[number] != UNSUPPORTED_EVENT; 871 } 872 873 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 874 bool isread) 875 { 876 /* 877 * Performance monitor registers user accessibility is controlled 878 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 879 * trapping to EL2 or EL3 for other accesses. 880 */ 881 int el = arm_current_el(env); 882 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 883 884 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 885 return CP_ACCESS_TRAP_EL1; 886 } 887 if (el < 2 && (mdcr_el2 & MDCR_TPM)) { 888 return CP_ACCESS_TRAP_EL2; 889 } 890 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 891 return CP_ACCESS_TRAP_EL3; 892 } 893 894 return CP_ACCESS_OK; 895 } 896 897 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 898 const ARMCPRegInfo *ri, 899 bool isread) 900 { 901 /* ER: event counter read trap control */ 902 if (arm_feature(env, ARM_FEATURE_V8) 903 && arm_current_el(env) == 0 904 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 905 && isread) { 906 return CP_ACCESS_OK; 907 } 908 909 return pmreg_access(env, ri, isread); 910 } 911 912 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 913 const ARMCPRegInfo *ri, 914 bool isread) 915 { 916 /* SW: software increment write trap control */ 917 if (arm_feature(env, ARM_FEATURE_V8) 918 && arm_current_el(env) == 0 919 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 920 && !isread) { 921 return CP_ACCESS_OK; 922 } 923 924 return pmreg_access(env, ri, isread); 925 } 926 927 static CPAccessResult pmreg_access_selr(CPUARMState *env, 928 const ARMCPRegInfo *ri, 929 bool isread) 930 { 931 /* ER: event counter read trap control */ 932 if (arm_feature(env, ARM_FEATURE_V8) 933 && arm_current_el(env) == 0 934 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 935 return CP_ACCESS_OK; 936 } 937 938 return pmreg_access(env, ri, isread); 939 } 940 941 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 942 const ARMCPRegInfo *ri, 943 bool isread) 944 { 945 /* CR: cycle counter read trap control */ 946 if (arm_feature(env, ARM_FEATURE_V8) 947 && arm_current_el(env) == 0 948 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 949 && isread) { 950 return CP_ACCESS_OK; 951 } 952 953 return pmreg_access(env, ri, isread); 954 } 955 956 /* 957 * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at. 958 * We use these to decide whether we need to wrap a write to MDCR_EL2 959 * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls. 960 */ 961 #define MDCR_EL2_PMU_ENABLE_BITS \ 962 (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP) 963 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD) 964 965 /* 966 * Returns true if the counter (pass 31 for PMCCNTR) should count events using 967 * the current EL, security state, and register configuration. 968 */ 969 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) 970 { 971 uint64_t filter; 972 bool e, p, u, nsk, nsu, nsh, m; 973 bool enabled, prohibited = false, filtered; 974 bool secure = arm_is_secure(env); 975 int el = arm_current_el(env); 976 uint64_t mdcr_el2; 977 uint8_t hpmn; 978 979 /* 980 * We might be called for M-profile cores where MDCR_EL2 doesn't 981 * exist and arm_mdcr_el2_eff() will assert, so this early-exit check 982 * must be before we read that value. 983 */ 984 if (!arm_feature(env, ARM_FEATURE_PMU)) { 985 return false; 986 } 987 988 mdcr_el2 = arm_mdcr_el2_eff(env); 989 hpmn = mdcr_el2 & MDCR_HPMN; 990 991 if (!arm_feature(env, ARM_FEATURE_EL2) || 992 (counter < hpmn || counter == 31)) { 993 e = env->cp15.c9_pmcr & PMCRE; 994 } else { 995 e = mdcr_el2 & MDCR_HPME; 996 } 997 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); 998 999 /* Is event counting prohibited? */ 1000 if (el == 2 && (counter < hpmn || counter == 31)) { 1001 prohibited = mdcr_el2 & MDCR_HPMD; 1002 } 1003 if (secure) { 1004 prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME); 1005 } 1006 1007 if (counter == 31) { 1008 /* 1009 * The cycle counter defaults to running. PMCR.DP says "disable 1010 * the cycle counter when event counting is prohibited". 1011 * Some MDCR bits disable the cycle counter specifically. 1012 */ 1013 prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP; 1014 if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { 1015 if (secure) { 1016 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD); 1017 } 1018 if (el == 2) { 1019 prohibited = prohibited || (mdcr_el2 & MDCR_HCCD); 1020 } 1021 } 1022 } 1023 1024 if (counter == 31) { 1025 filter = env->cp15.pmccfiltr_el0; 1026 } else { 1027 filter = env->cp15.c14_pmevtyper[counter]; 1028 } 1029 1030 p = filter & PMXEVTYPER_P; 1031 u = filter & PMXEVTYPER_U; 1032 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); 1033 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); 1034 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); 1035 m = arm_el_is_aa64(env, 1) && 1036 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); 1037 1038 if (el == 0) { 1039 filtered = secure ? u : u != nsu; 1040 } else if (el == 1) { 1041 filtered = secure ? p : p != nsk; 1042 } else if (el == 2) { 1043 filtered = !nsh; 1044 } else { /* EL3 */ 1045 filtered = m != p; 1046 } 1047 1048 if (counter != 31) { 1049 /* 1050 * If not checking PMCCNTR, ensure the counter is setup to an event we 1051 * support 1052 */ 1053 uint16_t event = filter & PMXEVTYPER_EVTCOUNT; 1054 if (!event_supported(event)) { 1055 return false; 1056 } 1057 } 1058 1059 return enabled && !prohibited && !filtered; 1060 } 1061 1062 static void pmu_update_irq(CPUARMState *env) 1063 { 1064 ARMCPU *cpu = env_archcpu(env); 1065 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1066 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1067 } 1068 1069 static bool pmccntr_clockdiv_enabled(CPUARMState *env) 1070 { 1071 /* 1072 * Return true if the clock divider is enabled and the cycle counter 1073 * is supposed to tick only once every 64 clock cycles. This is 1074 * controlled by PMCR.D, but if PMCR.LC is set to enable the long 1075 * (64-bit) cycle counter PMCR.D has no effect. 1076 */ 1077 return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD; 1078 } 1079 1080 static bool pmevcntr_is_64_bit(CPUARMState *env, int counter) 1081 { 1082 /* Return true if the specified event counter is configured to be 64 bit */ 1083 1084 /* This isn't intended to be used with the cycle counter */ 1085 assert(counter < 31); 1086 1087 if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { 1088 return false; 1089 } 1090 1091 if (arm_feature(env, ARM_FEATURE_EL2)) { 1092 /* 1093 * MDCR_EL2.HLP still applies even when EL2 is disabled in the 1094 * current security state, so we don't use arm_mdcr_el2_eff() here. 1095 */ 1096 bool hlp = env->cp15.mdcr_el2 & MDCR_HLP; 1097 int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; 1098 1099 if (counter >= hpmn) { 1100 return hlp; 1101 } 1102 } 1103 return env->cp15.c9_pmcr & PMCRLP; 1104 } 1105 1106 /* 1107 * Ensure c15_ccnt is the guest-visible count so that operations such as 1108 * enabling/disabling the counter or filtering, modifying the count itself, 1109 * etc. can be done logically. This is essentially a no-op if the counter is 1110 * not enabled at the time of the call. 1111 */ 1112 static void pmccntr_op_start(CPUARMState *env) 1113 { 1114 uint64_t cycles = cycles_get_count(env); 1115 1116 if (pmu_counter_enabled(env, 31)) { 1117 uint64_t eff_cycles = cycles; 1118 if (pmccntr_clockdiv_enabled(env)) { 1119 eff_cycles /= 64; 1120 } 1121 1122 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; 1123 1124 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1125 1ull << 63 : 1ull << 31; 1126 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { 1127 env->cp15.c9_pmovsr |= (1ULL << 31); 1128 pmu_update_irq(env); 1129 } 1130 1131 env->cp15.c15_ccnt = new_pmccntr; 1132 } 1133 env->cp15.c15_ccnt_delta = cycles; 1134 } 1135 1136 /* 1137 * If PMCCNTR is enabled, recalculate the delta between the clock and the 1138 * guest-visible count. A call to pmccntr_op_finish should follow every call to 1139 * pmccntr_op_start. 1140 */ 1141 static void pmccntr_op_finish(CPUARMState *env) 1142 { 1143 if (pmu_counter_enabled(env, 31)) { 1144 #ifndef CONFIG_USER_ONLY 1145 /* Calculate when the counter will next overflow */ 1146 uint64_t remaining_cycles = -env->cp15.c15_ccnt; 1147 if (!(env->cp15.c9_pmcr & PMCRLC)) { 1148 remaining_cycles = (uint32_t)remaining_cycles; 1149 } 1150 int64_t overflow_in = cycles_ns_per(remaining_cycles); 1151 1152 if (overflow_in > 0) { 1153 int64_t overflow_at; 1154 1155 if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1156 overflow_in, &overflow_at)) { 1157 ARMCPU *cpu = env_archcpu(env); 1158 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1159 } 1160 } 1161 #endif 1162 1163 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; 1164 if (pmccntr_clockdiv_enabled(env)) { 1165 prev_cycles /= 64; 1166 } 1167 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; 1168 } 1169 } 1170 1171 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) 1172 { 1173 1174 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1175 uint64_t count = 0; 1176 if (event_supported(event)) { 1177 uint16_t event_idx = supported_event_map[event]; 1178 count = pm_events[event_idx].get_count(env); 1179 } 1180 1181 if (pmu_counter_enabled(env, counter)) { 1182 uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; 1183 uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ? 1184 1ULL << 63 : 1ULL << 31; 1185 1186 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) { 1187 env->cp15.c9_pmovsr |= (1 << counter); 1188 pmu_update_irq(env); 1189 } 1190 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; 1191 } 1192 env->cp15.c14_pmevcntr_delta[counter] = count; 1193 } 1194 1195 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) 1196 { 1197 if (pmu_counter_enabled(env, counter)) { 1198 #ifndef CONFIG_USER_ONLY 1199 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1200 uint16_t event_idx = supported_event_map[event]; 1201 uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1); 1202 int64_t overflow_in; 1203 1204 if (!pmevcntr_is_64_bit(env, counter)) { 1205 delta = (uint32_t)delta; 1206 } 1207 overflow_in = pm_events[event_idx].ns_per_count(delta); 1208 1209 if (overflow_in > 0) { 1210 int64_t overflow_at; 1211 1212 if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1213 overflow_in, &overflow_at)) { 1214 ARMCPU *cpu = env_archcpu(env); 1215 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1216 } 1217 } 1218 #endif 1219 1220 env->cp15.c14_pmevcntr_delta[counter] -= 1221 env->cp15.c14_pmevcntr[counter]; 1222 } 1223 } 1224 1225 void pmu_op_start(CPUARMState *env) 1226 { 1227 unsigned int i; 1228 pmccntr_op_start(env); 1229 for (i = 0; i < pmu_num_counters(env); i++) { 1230 pmevcntr_op_start(env, i); 1231 } 1232 } 1233 1234 void pmu_op_finish(CPUARMState *env) 1235 { 1236 unsigned int i; 1237 pmccntr_op_finish(env); 1238 for (i = 0; i < pmu_num_counters(env); i++) { 1239 pmevcntr_op_finish(env, i); 1240 } 1241 } 1242 1243 void pmu_pre_el_change(ARMCPU *cpu, void *ignored) 1244 { 1245 pmu_op_start(&cpu->env); 1246 } 1247 1248 void pmu_post_el_change(ARMCPU *cpu, void *ignored) 1249 { 1250 pmu_op_finish(&cpu->env); 1251 } 1252 1253 void arm_pmu_timer_cb(void *opaque) 1254 { 1255 ARMCPU *cpu = opaque; 1256 1257 /* 1258 * Update all the counter values based on the current underlying counts, 1259 * triggering interrupts to be raised, if necessary. pmu_op_finish() also 1260 * has the effect of setting the cpu->pmu_timer to the next earliest time a 1261 * counter may expire. 1262 */ 1263 pmu_op_start(&cpu->env); 1264 pmu_op_finish(&cpu->env); 1265 } 1266 1267 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1268 uint64_t value) 1269 { 1270 pmu_op_start(env); 1271 1272 if (value & PMCRC) { 1273 /* The counter has been reset */ 1274 env->cp15.c15_ccnt = 0; 1275 } 1276 1277 if (value & PMCRP) { 1278 unsigned int i; 1279 for (i = 0; i < pmu_num_counters(env); i++) { 1280 env->cp15.c14_pmevcntr[i] = 0; 1281 } 1282 } 1283 1284 env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK; 1285 env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK); 1286 1287 pmu_op_finish(env); 1288 } 1289 1290 static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1291 { 1292 uint64_t pmcr = env->cp15.c9_pmcr; 1293 1294 /* 1295 * If EL2 is implemented and enabled for the current security state, reads 1296 * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN. 1297 */ 1298 if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) { 1299 pmcr &= ~PMCRN_MASK; 1300 pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT; 1301 } 1302 1303 return pmcr; 1304 } 1305 1306 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, 1307 uint64_t value) 1308 { 1309 unsigned int i; 1310 uint64_t overflow_mask, new_pmswinc; 1311 1312 for (i = 0; i < pmu_num_counters(env); i++) { 1313 /* Increment a counter's count iff: */ 1314 if ((value & (1 << i)) && /* counter's bit is set */ 1315 /* counter is enabled and not filtered */ 1316 pmu_counter_enabled(env, i) && 1317 /* counter is SW_INCR */ 1318 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { 1319 pmevcntr_op_start(env, i); 1320 1321 /* 1322 * Detect if this write causes an overflow since we can't predict 1323 * PMSWINC overflows like we can for other events 1324 */ 1325 new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; 1326 1327 overflow_mask = pmevcntr_is_64_bit(env, i) ? 1328 1ULL << 63 : 1ULL << 31; 1329 1330 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) { 1331 env->cp15.c9_pmovsr |= (1 << i); 1332 pmu_update_irq(env); 1333 } 1334 1335 env->cp15.c14_pmevcntr[i] = new_pmswinc; 1336 1337 pmevcntr_op_finish(env, i); 1338 } 1339 } 1340 } 1341 1342 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1343 { 1344 uint64_t ret; 1345 pmccntr_op_start(env); 1346 ret = env->cp15.c15_ccnt; 1347 pmccntr_op_finish(env); 1348 return ret; 1349 } 1350 1351 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1352 uint64_t value) 1353 { 1354 /* 1355 * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1356 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1357 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1358 * accessed. 1359 */ 1360 env->cp15.c9_pmselr = value & 0x1f; 1361 } 1362 1363 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1364 uint64_t value) 1365 { 1366 pmccntr_op_start(env); 1367 env->cp15.c15_ccnt = value; 1368 pmccntr_op_finish(env); 1369 } 1370 1371 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1372 uint64_t value) 1373 { 1374 uint64_t cur_val = pmccntr_read(env, NULL); 1375 1376 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1377 } 1378 1379 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1380 uint64_t value) 1381 { 1382 pmccntr_op_start(env); 1383 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; 1384 pmccntr_op_finish(env); 1385 } 1386 1387 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, 1388 uint64_t value) 1389 { 1390 pmccntr_op_start(env); 1391 /* M is not accessible from AArch32 */ 1392 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | 1393 (value & PMCCFILTR); 1394 pmccntr_op_finish(env); 1395 } 1396 1397 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) 1398 { 1399 /* M is not visible in AArch32 */ 1400 return env->cp15.pmccfiltr_el0 & PMCCFILTR; 1401 } 1402 1403 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1404 uint64_t value) 1405 { 1406 pmu_op_start(env); 1407 value &= pmu_counter_mask(env); 1408 env->cp15.c9_pmcnten |= value; 1409 pmu_op_finish(env); 1410 } 1411 1412 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1413 uint64_t value) 1414 { 1415 pmu_op_start(env); 1416 value &= pmu_counter_mask(env); 1417 env->cp15.c9_pmcnten &= ~value; 1418 pmu_op_finish(env); 1419 } 1420 1421 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1422 uint64_t value) 1423 { 1424 value &= pmu_counter_mask(env); 1425 env->cp15.c9_pmovsr &= ~value; 1426 pmu_update_irq(env); 1427 } 1428 1429 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1430 uint64_t value) 1431 { 1432 value &= pmu_counter_mask(env); 1433 env->cp15.c9_pmovsr |= value; 1434 pmu_update_irq(env); 1435 } 1436 1437 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1438 uint64_t value, const uint8_t counter) 1439 { 1440 if (counter == 31) { 1441 pmccfiltr_write(env, ri, value); 1442 } else if (counter < pmu_num_counters(env)) { 1443 pmevcntr_op_start(env, counter); 1444 1445 /* 1446 * If this counter's event type is changing, store the current 1447 * underlying count for the new type in c14_pmevcntr_delta[counter] so 1448 * pmevcntr_op_finish has the correct baseline when it converts back to 1449 * a delta. 1450 */ 1451 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & 1452 PMXEVTYPER_EVTCOUNT; 1453 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; 1454 if (old_event != new_event) { 1455 uint64_t count = 0; 1456 if (event_supported(new_event)) { 1457 uint16_t event_idx = supported_event_map[new_event]; 1458 count = pm_events[event_idx].get_count(env); 1459 } 1460 env->cp15.c14_pmevcntr_delta[counter] = count; 1461 } 1462 1463 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; 1464 pmevcntr_op_finish(env, counter); 1465 } 1466 /* 1467 * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1468 * PMSELR value is equal to or greater than the number of implemented 1469 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1470 */ 1471 } 1472 1473 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, 1474 const uint8_t counter) 1475 { 1476 if (counter == 31) { 1477 return env->cp15.pmccfiltr_el0; 1478 } else if (counter < pmu_num_counters(env)) { 1479 return env->cp15.c14_pmevtyper[counter]; 1480 } else { 1481 /* 1482 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1483 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). 1484 */ 1485 return 0; 1486 } 1487 } 1488 1489 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1490 uint64_t value) 1491 { 1492 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1493 pmevtyper_write(env, ri, value, counter); 1494 } 1495 1496 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1497 uint64_t value) 1498 { 1499 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1500 env->cp15.c14_pmevtyper[counter] = value; 1501 1502 /* 1503 * pmevtyper_rawwrite is called between a pair of pmu_op_start and 1504 * pmu_op_finish calls when loading saved state for a migration. Because 1505 * we're potentially updating the type of event here, the value written to 1506 * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a 1507 * different counter type. Therefore, we need to set this value to the 1508 * current count for the counter type we're writing so that pmu_op_finish 1509 * has the correct count for its calculation. 1510 */ 1511 uint16_t event = value & PMXEVTYPER_EVTCOUNT; 1512 if (event_supported(event)) { 1513 uint16_t event_idx = supported_event_map[event]; 1514 env->cp15.c14_pmevcntr_delta[counter] = 1515 pm_events[event_idx].get_count(env); 1516 } 1517 } 1518 1519 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1520 { 1521 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1522 return pmevtyper_read(env, ri, counter); 1523 } 1524 1525 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1526 uint64_t value) 1527 { 1528 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); 1529 } 1530 1531 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1532 { 1533 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); 1534 } 1535 1536 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1537 uint64_t value, uint8_t counter) 1538 { 1539 if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { 1540 /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ 1541 value &= MAKE_64BIT_MASK(0, 32); 1542 } 1543 if (counter < pmu_num_counters(env)) { 1544 pmevcntr_op_start(env, counter); 1545 env->cp15.c14_pmevcntr[counter] = value; 1546 pmevcntr_op_finish(env, counter); 1547 } 1548 /* 1549 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1550 * are CONSTRAINED UNPREDICTABLE. 1551 */ 1552 } 1553 1554 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, 1555 uint8_t counter) 1556 { 1557 if (counter < pmu_num_counters(env)) { 1558 uint64_t ret; 1559 pmevcntr_op_start(env, counter); 1560 ret = env->cp15.c14_pmevcntr[counter]; 1561 pmevcntr_op_finish(env, counter); 1562 if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { 1563 /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ 1564 ret &= MAKE_64BIT_MASK(0, 32); 1565 } 1566 return ret; 1567 } else { 1568 /* 1569 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1570 * are CONSTRAINED UNPREDICTABLE. 1571 */ 1572 return 0; 1573 } 1574 } 1575 1576 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1577 uint64_t value) 1578 { 1579 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1580 pmevcntr_write(env, ri, value, counter); 1581 } 1582 1583 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1584 { 1585 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1586 return pmevcntr_read(env, ri, counter); 1587 } 1588 1589 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1590 uint64_t value) 1591 { 1592 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1593 assert(counter < pmu_num_counters(env)); 1594 env->cp15.c14_pmevcntr[counter] = value; 1595 pmevcntr_write(env, ri, value, counter); 1596 } 1597 1598 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) 1599 { 1600 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1601 assert(counter < pmu_num_counters(env)); 1602 return env->cp15.c14_pmevcntr[counter]; 1603 } 1604 1605 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1606 uint64_t value) 1607 { 1608 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); 1609 } 1610 1611 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1612 { 1613 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); 1614 } 1615 1616 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1617 uint64_t value) 1618 { 1619 if (arm_feature(env, ARM_FEATURE_V8)) { 1620 env->cp15.c9_pmuserenr = value & 0xf; 1621 } else { 1622 env->cp15.c9_pmuserenr = value & 1; 1623 } 1624 } 1625 1626 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1627 uint64_t value) 1628 { 1629 /* We have no event counters so only the C bit can be changed */ 1630 value &= pmu_counter_mask(env); 1631 env->cp15.c9_pminten |= value; 1632 pmu_update_irq(env); 1633 } 1634 1635 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1636 uint64_t value) 1637 { 1638 value &= pmu_counter_mask(env); 1639 env->cp15.c9_pminten &= ~value; 1640 pmu_update_irq(env); 1641 } 1642 1643 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1644 uint64_t value) 1645 { 1646 /* 1647 * Note that even though the AArch64 view of this register has bits 1648 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1649 * architectural requirements for bits which are RES0 only in some 1650 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1651 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1652 */ 1653 raw_write(env, ri, value & ~0x1FULL); 1654 } 1655 1656 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1657 { 1658 /* Begin with base v8.0 state. */ 1659 uint64_t valid_mask = 0x3fff; 1660 ARMCPU *cpu = env_archcpu(env); 1661 uint64_t changed; 1662 1663 /* 1664 * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always 1665 * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64. 1666 * Instead, choose the format based on the mode of EL3. 1667 */ 1668 if (arm_el_is_aa64(env, 3)) { 1669 value |= SCR_FW | SCR_AW; /* RES1 */ 1670 valid_mask &= ~SCR_NET; /* RES0 */ 1671 1672 if (!cpu_isar_feature(aa64_aa32_el1, cpu) && 1673 !cpu_isar_feature(aa64_aa32_el2, cpu)) { 1674 value |= SCR_RW; /* RAO/WI */ 1675 } 1676 if (cpu_isar_feature(aa64_ras, cpu)) { 1677 valid_mask |= SCR_TERR; 1678 } 1679 if (cpu_isar_feature(aa64_lor, cpu)) { 1680 valid_mask |= SCR_TLOR; 1681 } 1682 if (cpu_isar_feature(aa64_pauth, cpu)) { 1683 valid_mask |= SCR_API | SCR_APK; 1684 } 1685 if (cpu_isar_feature(aa64_sel2, cpu)) { 1686 valid_mask |= SCR_EEL2; 1687 } else if (cpu_isar_feature(aa64_rme, cpu)) { 1688 /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */ 1689 value |= SCR_NS; 1690 } 1691 if (cpu_isar_feature(aa64_mte, cpu)) { 1692 valid_mask |= SCR_ATA; 1693 } 1694 if (cpu_isar_feature(aa64_scxtnum, cpu)) { 1695 valid_mask |= SCR_ENSCXT; 1696 } 1697 if (cpu_isar_feature(aa64_doublefault, cpu)) { 1698 valid_mask |= SCR_EASE | SCR_NMEA; 1699 } 1700 if (cpu_isar_feature(aa64_sme, cpu)) { 1701 valid_mask |= SCR_ENTP2; 1702 } 1703 if (cpu_isar_feature(aa64_hcx, cpu)) { 1704 valid_mask |= SCR_HXEN; 1705 } 1706 if (cpu_isar_feature(aa64_fgt, cpu)) { 1707 valid_mask |= SCR_FGTEN; 1708 } 1709 if (cpu_isar_feature(aa64_rme, cpu)) { 1710 valid_mask |= SCR_NSE | SCR_GPF; 1711 } 1712 if (cpu_isar_feature(aa64_ecv, cpu)) { 1713 valid_mask |= SCR_ECVEN; 1714 } 1715 } else { 1716 valid_mask &= ~(SCR_RW | SCR_ST); 1717 if (cpu_isar_feature(aa32_ras, cpu)) { 1718 valid_mask |= SCR_TERR; 1719 } 1720 } 1721 1722 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1723 valid_mask &= ~SCR_HCE; 1724 1725 /* 1726 * On ARMv7, SMD (or SCD as it is called in v7) is only 1727 * supported if EL2 exists. The bit is UNK/SBZP when 1728 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1729 * when EL2 is unavailable. 1730 * On ARMv8, this bit is always available. 1731 */ 1732 if (arm_feature(env, ARM_FEATURE_V7) && 1733 !arm_feature(env, ARM_FEATURE_V8)) { 1734 valid_mask &= ~SCR_SMD; 1735 } 1736 } 1737 1738 /* Clear all-context RES0 bits. */ 1739 value &= valid_mask; 1740 changed = env->cp15.scr_el3 ^ value; 1741 env->cp15.scr_el3 = value; 1742 1743 /* 1744 * If SCR_EL3.{NS,NSE} changes, i.e. change of security state, 1745 * we must invalidate all TLBs below EL3. 1746 */ 1747 if (changed & (SCR_NS | SCR_NSE)) { 1748 tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 | 1749 ARMMMUIdxBit_E20_0 | 1750 ARMMMUIdxBit_E10_1 | 1751 ARMMMUIdxBit_E20_2 | 1752 ARMMMUIdxBit_E10_1_PAN | 1753 ARMMMUIdxBit_E20_2_PAN | 1754 ARMMMUIdxBit_E2)); 1755 } 1756 } 1757 1758 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1759 { 1760 /* 1761 * scr_write will set the RES1 bits on an AArch64-only CPU. 1762 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise. 1763 */ 1764 scr_write(env, ri, 0); 1765 } 1766 1767 static CPAccessResult access_tid4(CPUARMState *env, 1768 const ARMCPRegInfo *ri, 1769 bool isread) 1770 { 1771 if (arm_current_el(env) == 1 && 1772 (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) { 1773 return CP_ACCESS_TRAP_EL2; 1774 } 1775 1776 return CP_ACCESS_OK; 1777 } 1778 1779 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1780 { 1781 ARMCPU *cpu = env_archcpu(env); 1782 1783 /* 1784 * Acquire the CSSELR index from the bank corresponding to the CCSIDR 1785 * bank 1786 */ 1787 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1788 ri->secure & ARM_CP_SECSTATE_S); 1789 1790 return cpu->ccsidr[index]; 1791 } 1792 1793 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1794 uint64_t value) 1795 { 1796 raw_write(env, ri, value & 0xf); 1797 } 1798 1799 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1800 { 1801 CPUState *cs = env_cpu(env); 1802 bool el1 = arm_current_el(env) == 1; 1803 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0; 1804 uint64_t ret = 0; 1805 1806 if (hcr_el2 & HCR_IMO) { 1807 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { 1808 ret |= CPSR_I; 1809 } 1810 if (cs->interrupt_request & CPU_INTERRUPT_VINMI) { 1811 ret |= ISR_IS; 1812 ret |= CPSR_I; 1813 } 1814 } else { 1815 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1816 ret |= CPSR_I; 1817 } 1818 1819 if (cs->interrupt_request & CPU_INTERRUPT_NMI) { 1820 ret |= ISR_IS; 1821 ret |= CPSR_I; 1822 } 1823 } 1824 1825 if (hcr_el2 & HCR_FMO) { 1826 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { 1827 ret |= CPSR_F; 1828 } 1829 if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) { 1830 ret |= ISR_FS; 1831 ret |= CPSR_F; 1832 } 1833 } else { 1834 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1835 ret |= CPSR_F; 1836 } 1837 } 1838 1839 if (hcr_el2 & HCR_AMO) { 1840 if (cs->interrupt_request & CPU_INTERRUPT_VSERR) { 1841 ret |= CPSR_A; 1842 } 1843 } 1844 1845 return ret; 1846 } 1847 1848 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 1849 bool isread) 1850 { 1851 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { 1852 return CP_ACCESS_TRAP_EL2; 1853 } 1854 1855 return CP_ACCESS_OK; 1856 } 1857 1858 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 1859 bool isread) 1860 { 1861 if (arm_feature(env, ARM_FEATURE_V8)) { 1862 return access_aa64_tid1(env, ri, isread); 1863 } 1864 1865 return CP_ACCESS_OK; 1866 } 1867 1868 static const ARMCPRegInfo v7_cp_reginfo[] = { 1869 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1870 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 1871 .access = PL1_W, .type = ARM_CP_NOP }, 1872 /* 1873 * Performance monitors are implementation defined in v7, 1874 * but with an ARM recommended set of registers, which we 1875 * follow. 1876 * 1877 * Performance registers fall into three categories: 1878 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 1879 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 1880 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 1881 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 1882 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 1883 */ 1884 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 1885 .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO, 1886 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1887 .writefn = pmcntenset_write, 1888 .accessfn = pmreg_access, 1889 .fgt = FGT_PMCNTEN, 1890 .raw_writefn = raw_write }, 1891 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO, 1892 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 1893 .access = PL0_RW, .accessfn = pmreg_access, 1894 .fgt = FGT_PMCNTEN, 1895 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 1896 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 1897 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 1898 .access = PL0_RW, 1899 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1900 .accessfn = pmreg_access, 1901 .fgt = FGT_PMCNTEN, 1902 .writefn = pmcntenclr_write, 1903 .type = ARM_CP_ALIAS | ARM_CP_IO }, 1904 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 1905 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 1906 .access = PL0_RW, .accessfn = pmreg_access, 1907 .fgt = FGT_PMCNTEN, 1908 .type = ARM_CP_ALIAS | ARM_CP_IO, 1909 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 1910 .writefn = pmcntenclr_write }, 1911 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 1912 .access = PL0_RW, .type = ARM_CP_IO, 1913 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 1914 .accessfn = pmreg_access, 1915 .fgt = FGT_PMOVS, 1916 .writefn = pmovsr_write, 1917 .raw_writefn = raw_write }, 1918 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 1919 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 1920 .access = PL0_RW, .accessfn = pmreg_access, 1921 .fgt = FGT_PMOVS, 1922 .type = ARM_CP_ALIAS | ARM_CP_IO, 1923 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 1924 .writefn = pmovsr_write, 1925 .raw_writefn = raw_write }, 1926 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 1927 .access = PL0_W, .accessfn = pmreg_access_swinc, 1928 .fgt = FGT_PMSWINC_EL0, 1929 .type = ARM_CP_NO_RAW | ARM_CP_IO, 1930 .writefn = pmswinc_write }, 1931 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, 1932 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, 1933 .access = PL0_W, .accessfn = pmreg_access_swinc, 1934 .fgt = FGT_PMSWINC_EL0, 1935 .type = ARM_CP_NO_RAW | ARM_CP_IO, 1936 .writefn = pmswinc_write }, 1937 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 1938 .access = PL0_RW, .type = ARM_CP_ALIAS, 1939 .fgt = FGT_PMSELR_EL0, 1940 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 1941 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 1942 .raw_writefn = raw_write}, 1943 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 1944 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 1945 .access = PL0_RW, .accessfn = pmreg_access_selr, 1946 .fgt = FGT_PMSELR_EL0, 1947 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 1948 .writefn = pmselr_write, .raw_writefn = raw_write, }, 1949 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 1950 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 1951 .fgt = FGT_PMCCNTR_EL0, 1952 .readfn = pmccntr_read, .writefn = pmccntr_write32, 1953 .accessfn = pmreg_access_ccntr }, 1954 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 1955 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 1956 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 1957 .fgt = FGT_PMCCNTR_EL0, 1958 .type = ARM_CP_IO, 1959 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), 1960 .readfn = pmccntr_read, .writefn = pmccntr_write, 1961 .raw_readfn = raw_read, .raw_writefn = raw_write, }, 1962 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, 1963 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, 1964 .access = PL0_RW, .accessfn = pmreg_access, 1965 .fgt = FGT_PMCCFILTR_EL0, 1966 .type = ARM_CP_ALIAS | ARM_CP_IO, 1967 .resetvalue = 0, }, 1968 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 1969 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 1970 .writefn = pmccfiltr_write, .raw_writefn = raw_write, 1971 .access = PL0_RW, .accessfn = pmreg_access, 1972 .fgt = FGT_PMCCFILTR_EL0, 1973 .type = ARM_CP_IO, 1974 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 1975 .resetvalue = 0, }, 1976 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 1977 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 1978 .accessfn = pmreg_access, 1979 .fgt = FGT_PMEVTYPERN_EL0, 1980 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1981 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 1982 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 1983 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 1984 .accessfn = pmreg_access, 1985 .fgt = FGT_PMEVTYPERN_EL0, 1986 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1987 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 1988 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 1989 .accessfn = pmreg_access_xevcntr, 1990 .fgt = FGT_PMEVCNTRN_EL0, 1991 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 1992 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, 1993 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, 1994 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 1995 .accessfn = pmreg_access_xevcntr, 1996 .fgt = FGT_PMEVCNTRN_EL0, 1997 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 1998 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 1999 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 2000 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 2001 .resetvalue = 0, 2002 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2003 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 2004 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 2005 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 2006 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 2007 .resetvalue = 0, 2008 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2009 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 2010 .access = PL1_RW, .accessfn = access_tpm, 2011 .fgt = FGT_PMINTEN, 2012 .type = ARM_CP_ALIAS | ARM_CP_IO, 2013 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 2014 .resetvalue = 0, 2015 .writefn = pmintenset_write, .raw_writefn = raw_write }, 2016 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 2017 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 2018 .access = PL1_RW, .accessfn = access_tpm, 2019 .fgt = FGT_PMINTEN, 2020 .type = ARM_CP_IO, 2021 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2022 .writefn = pmintenset_write, .raw_writefn = raw_write, 2023 .resetvalue = 0x0 }, 2024 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 2025 .access = PL1_RW, .accessfn = access_tpm, 2026 .fgt = FGT_PMINTEN, 2027 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, 2028 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2029 .writefn = pmintenclr_write, }, 2030 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 2031 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 2032 .access = PL1_RW, .accessfn = access_tpm, 2033 .fgt = FGT_PMINTEN, 2034 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, 2035 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2036 .writefn = pmintenclr_write }, 2037 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 2038 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 2039 .access = PL1_R, 2040 .accessfn = access_tid4, 2041 .fgt = FGT_CCSIDR_EL1, 2042 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 2043 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 2044 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 2045 .access = PL1_RW, 2046 .accessfn = access_tid4, 2047 .fgt = FGT_CSSELR_EL1, 2048 .writefn = csselr_write, .resetvalue = 0, 2049 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 2050 offsetof(CPUARMState, cp15.csselr_ns) } }, 2051 /* 2052 * Auxiliary ID register: this actually has an IMPDEF value but for now 2053 * just RAZ for all cores: 2054 */ 2055 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 2056 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 2057 .access = PL1_R, .type = ARM_CP_CONST, 2058 .accessfn = access_aa64_tid1, 2059 .fgt = FGT_AIDR_EL1, 2060 .resetvalue = 0 }, 2061 /* 2062 * Auxiliary fault status registers: these also are IMPDEF, and we 2063 * choose to RAZ/WI for all cores. 2064 */ 2065 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 2066 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 2067 .access = PL1_RW, .accessfn = access_tvm_trvm, 2068 .fgt = FGT_AFSR0_EL1, 2069 .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1, 2070 .type = ARM_CP_CONST, .resetvalue = 0 }, 2071 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 2072 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 2073 .access = PL1_RW, .accessfn = access_tvm_trvm, 2074 .fgt = FGT_AFSR1_EL1, 2075 .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1, 2076 .type = ARM_CP_CONST, .resetvalue = 0 }, 2077 /* 2078 * MAIR can just read-as-written because we don't implement caches 2079 * and so don't need to care about memory attributes. 2080 */ 2081 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 2082 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2083 .access = PL1_RW, .accessfn = access_tvm_trvm, 2084 .fgt = FGT_MAIR_EL1, 2085 .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1, 2086 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 2087 .resetvalue = 0 }, 2088 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 2089 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 2090 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 2091 .resetvalue = 0 }, 2092 /* 2093 * For non-long-descriptor page tables these are PRRR and NMRR; 2094 * regardless they still act as reads-as-written for QEMU. 2095 */ 2096 /* 2097 * MAIR0/1 are defined separately from their 64-bit counterpart which 2098 * allows them to assign the correct fieldoffset based on the endianness 2099 * handled in the field definitions. 2100 */ 2101 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 2102 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2103 .access = PL1_RW, .accessfn = access_tvm_trvm, 2104 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 2105 offsetof(CPUARMState, cp15.mair0_ns) }, 2106 .resetfn = arm_cp_reset_ignore }, 2107 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 2108 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, 2109 .access = PL1_RW, .accessfn = access_tvm_trvm, 2110 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 2111 offsetof(CPUARMState, cp15.mair1_ns) }, 2112 .resetfn = arm_cp_reset_ignore }, 2113 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 2114 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 2115 .fgt = FGT_ISR_EL1, 2116 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 2117 }; 2118 2119 static const ARMCPRegInfo pmovsset_cp_reginfo[] = { 2120 /* PMOVSSET is not implemented in v7 before v7ve */ 2121 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, 2122 .access = PL0_RW, .accessfn = pmreg_access, 2123 .fgt = FGT_PMOVS, 2124 .type = ARM_CP_ALIAS | ARM_CP_IO, 2125 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2126 .writefn = pmovsset_write, 2127 .raw_writefn = raw_write }, 2128 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, 2129 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, 2130 .access = PL0_RW, .accessfn = pmreg_access, 2131 .fgt = FGT_PMOVS, 2132 .type = ARM_CP_ALIAS | ARM_CP_IO, 2133 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2134 .writefn = pmovsset_write, 2135 .raw_writefn = raw_write }, 2136 }; 2137 2138 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2139 uint64_t value) 2140 { 2141 value &= 1; 2142 env->teecr = value; 2143 } 2144 2145 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2146 bool isread) 2147 { 2148 /* 2149 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE 2150 * at all, so we don't need to check whether we're v8A. 2151 */ 2152 if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && 2153 (env->cp15.hstr_el2 & HSTR_TTEE)) { 2154 return CP_ACCESS_TRAP_EL2; 2155 } 2156 return CP_ACCESS_OK; 2157 } 2158 2159 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2160 bool isread) 2161 { 2162 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 2163 return CP_ACCESS_TRAP_EL1; 2164 } 2165 return teecr_access(env, ri, isread); 2166 } 2167 2168 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 2169 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 2170 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 2171 .resetvalue = 0, 2172 .writefn = teecr_write, .accessfn = teecr_access }, 2173 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 2174 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 2175 .accessfn = teehbr_access, .resetvalue = 0 }, 2176 }; 2177 2178 static const ARMCPRegInfo v6k_cp_reginfo[] = { 2179 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 2180 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 2181 .access = PL0_RW, 2182 .fgt = FGT_TPIDR_EL0, 2183 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 2184 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 2185 .access = PL0_RW, 2186 .fgt = FGT_TPIDR_EL0, 2187 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 2188 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 2189 .resetfn = arm_cp_reset_ignore }, 2190 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 2191 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 2192 .access = PL0_R | PL1_W, 2193 .fgt = FGT_TPIDRRO_EL0, 2194 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 2195 .resetvalue = 0}, 2196 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 2197 .access = PL0_R | PL1_W, 2198 .fgt = FGT_TPIDRRO_EL0, 2199 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 2200 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 2201 .resetfn = arm_cp_reset_ignore }, 2202 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 2203 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 2204 .access = PL1_RW, 2205 .fgt = FGT_TPIDR_EL1, 2206 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 2207 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 2208 .access = PL1_RW, 2209 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 2210 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 2211 .resetvalue = 0 }, 2212 }; 2213 2214 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) 2215 { 2216 ARMCPU *cpu = env_archcpu(env); 2217 2218 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; 2219 } 2220 2221 #ifndef CONFIG_USER_ONLY 2222 2223 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 2224 bool isread) 2225 { 2226 /* 2227 * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 2228 * Writable only at the highest implemented exception level. 2229 */ 2230 int el = arm_current_el(env); 2231 uint64_t hcr; 2232 uint32_t cntkctl; 2233 2234 switch (el) { 2235 case 0: 2236 hcr = arm_hcr_el2_eff(env); 2237 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2238 cntkctl = env->cp15.cnthctl_el2; 2239 } else { 2240 cntkctl = env->cp15.c14_cntkctl; 2241 } 2242 if (!extract32(cntkctl, 0, 2)) { 2243 return CP_ACCESS_TRAP_EL1; 2244 } 2245 break; 2246 case 1: 2247 if (!isread && ri->state == ARM_CP_STATE_AA32 && 2248 arm_is_secure_below_el3(env)) { 2249 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 2250 return CP_ACCESS_UNDEFINED; 2251 } 2252 break; 2253 case 2: 2254 case 3: 2255 break; 2256 } 2257 2258 if (!isread && el < arm_highest_el(env)) { 2259 return CP_ACCESS_UNDEFINED; 2260 } 2261 2262 return CP_ACCESS_OK; 2263 } 2264 2265 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 2266 bool isread) 2267 { 2268 unsigned int cur_el = arm_current_el(env); 2269 bool has_el2 = arm_is_el2_enabled(env); 2270 uint64_t hcr = arm_hcr_el2_eff(env); 2271 2272 switch (cur_el) { 2273 case 0: 2274 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ 2275 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2276 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) 2277 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2278 } 2279 2280 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ 2281 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 2282 return CP_ACCESS_TRAP_EL1; 2283 } 2284 /* fall through */ 2285 case 1: 2286 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ 2287 if (has_el2 && timeridx == GTIMER_PHYS && 2288 (hcr & HCR_E2H 2289 ? !extract32(env->cp15.cnthctl_el2, 10, 1) 2290 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { 2291 return CP_ACCESS_TRAP_EL2; 2292 } 2293 if (has_el2 && timeridx == GTIMER_VIRT) { 2294 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) { 2295 return CP_ACCESS_TRAP_EL2; 2296 } 2297 } 2298 break; 2299 } 2300 return CP_ACCESS_OK; 2301 } 2302 2303 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 2304 bool isread) 2305 { 2306 unsigned int cur_el = arm_current_el(env); 2307 bool has_el2 = arm_is_el2_enabled(env); 2308 uint64_t hcr = arm_hcr_el2_eff(env); 2309 2310 switch (cur_el) { 2311 case 0: 2312 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2313 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ 2314 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) 2315 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2316 } 2317 2318 /* 2319 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from 2320 * EL0 if EL0[PV]TEN is zero. 2321 */ 2322 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 2323 return CP_ACCESS_TRAP_EL1; 2324 } 2325 /* fall through */ 2326 2327 case 1: 2328 if (has_el2 && timeridx == GTIMER_PHYS) { 2329 if (hcr & HCR_E2H) { 2330 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */ 2331 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { 2332 return CP_ACCESS_TRAP_EL2; 2333 } 2334 } else { 2335 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2336 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { 2337 return CP_ACCESS_TRAP_EL2; 2338 } 2339 } 2340 } 2341 if (has_el2 && timeridx == GTIMER_VIRT) { 2342 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) { 2343 return CP_ACCESS_TRAP_EL2; 2344 } 2345 } 2346 break; 2347 } 2348 return CP_ACCESS_OK; 2349 } 2350 2351 static CPAccessResult gt_pct_access(CPUARMState *env, 2352 const ARMCPRegInfo *ri, 2353 bool isread) 2354 { 2355 return gt_counter_access(env, GTIMER_PHYS, isread); 2356 } 2357 2358 static CPAccessResult gt_vct_access(CPUARMState *env, 2359 const ARMCPRegInfo *ri, 2360 bool isread) 2361 { 2362 return gt_counter_access(env, GTIMER_VIRT, isread); 2363 } 2364 2365 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2366 bool isread) 2367 { 2368 return gt_timer_access(env, GTIMER_PHYS, isread); 2369 } 2370 2371 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2372 bool isread) 2373 { 2374 return gt_timer_access(env, GTIMER_VIRT, isread); 2375 } 2376 2377 static CPAccessResult gt_stimer_access(CPUARMState *env, 2378 const ARMCPRegInfo *ri, 2379 bool isread) 2380 { 2381 /* 2382 * The AArch64 register view of the secure physical timer is 2383 * always accessible from EL3, and configurably accessible from 2384 * Secure EL1. 2385 */ 2386 switch (arm_current_el(env)) { 2387 case 1: 2388 if (!arm_is_secure(env)) { 2389 return CP_ACCESS_UNDEFINED; 2390 } 2391 if (!(env->cp15.scr_el3 & SCR_ST)) { 2392 return CP_ACCESS_TRAP_EL3; 2393 } 2394 return CP_ACCESS_OK; 2395 case 0: 2396 case 2: 2397 return CP_ACCESS_UNDEFINED; 2398 case 3: 2399 return CP_ACCESS_OK; 2400 default: 2401 g_assert_not_reached(); 2402 } 2403 } 2404 2405 uint64_t gt_get_countervalue(CPUARMState *env) 2406 { 2407 ARMCPU *cpu = env_archcpu(env); 2408 2409 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); 2410 } 2411 2412 static void gt_update_irq(ARMCPU *cpu, int timeridx) 2413 { 2414 CPUARMState *env = &cpu->env; 2415 uint64_t cnthctl = env->cp15.cnthctl_el2; 2416 ARMSecuritySpace ss = arm_security_space(env); 2417 /* ISTATUS && !IMASK */ 2418 int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4; 2419 2420 /* 2421 * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK. 2422 * It is RES0 in Secure and NonSecure state. 2423 */ 2424 if ((ss == ARMSS_Root || ss == ARMSS_Realm) && 2425 ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) || 2426 (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) { 2427 irqstate = 0; 2428 } 2429 2430 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2431 trace_arm_gt_update_irq(timeridx, irqstate); 2432 } 2433 2434 void gt_rme_post_el_change(ARMCPU *cpu, void *ignored) 2435 { 2436 /* 2437 * Changing security state between Root and Secure/NonSecure, which may 2438 * happen when switching EL, can change the effective value of CNTHCTL_EL2 2439 * mask bits. Update the IRQ state accordingly. 2440 */ 2441 gt_update_irq(cpu, GTIMER_VIRT); 2442 gt_update_irq(cpu, GTIMER_PHYS); 2443 } 2444 2445 static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env) 2446 { 2447 if ((env->cp15.scr_el3 & SCR_ECVEN) && 2448 FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) && 2449 arm_is_el2_enabled(env) && 2450 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 2451 return env->cp15.cntpoff_el2; 2452 } 2453 return 0; 2454 } 2455 2456 static uint64_t gt_phys_cnt_offset(CPUARMState *env) 2457 { 2458 if (arm_current_el(env) >= 2) { 2459 return 0; 2460 } 2461 return gt_phys_raw_cnt_offset(env); 2462 } 2463 2464 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 2465 { 2466 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 2467 2468 if (gt->ctl & 1) { 2469 /* 2470 * Timer enabled: calculate and set current ISTATUS, irq, and 2471 * reset timer to when ISTATUS next has to change 2472 */ 2473 uint64_t offset = timeridx == GTIMER_VIRT ? 2474 cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env); 2475 uint64_t count = gt_get_countervalue(&cpu->env); 2476 /* Note that this must be unsigned 64 bit arithmetic: */ 2477 int istatus = count - offset >= gt->cval; 2478 uint64_t nexttick; 2479 2480 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 2481 2482 if (istatus) { 2483 /* 2484 * Next transition is when (count - offset) rolls back over to 0. 2485 * If offset > count then this is when count == offset; 2486 * if offset <= count then this is when count == offset + 2^64 2487 * For the latter case we set nexttick to an "as far in future 2488 * as possible" value and let the code below handle it. 2489 */ 2490 if (offset > count) { 2491 nexttick = offset; 2492 } else { 2493 nexttick = UINT64_MAX; 2494 } 2495 } else { 2496 /* 2497 * Next transition is when (count - offset) == cval, i.e. 2498 * when count == (cval + offset). 2499 * If that would overflow, then again we set up the next interrupt 2500 * for "as far in the future as possible" for the code below. 2501 */ 2502 if (uadd64_overflow(gt->cval, offset, &nexttick)) { 2503 nexttick = UINT64_MAX; 2504 } 2505 } 2506 /* 2507 * Note that the desired next expiry time might be beyond the 2508 * signed-64-bit range of a QEMUTimer -- in this case we just 2509 * set the timer for as far in the future as possible. When the 2510 * timer expires we will reset the timer for any remaining period. 2511 */ 2512 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { 2513 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); 2514 } else { 2515 timer_mod(cpu->gt_timer[timeridx], nexttick); 2516 } 2517 trace_arm_gt_recalc(timeridx, nexttick); 2518 } else { 2519 /* Timer disabled: ISTATUS and timer output always clear */ 2520 gt->ctl &= ~4; 2521 timer_del(cpu->gt_timer[timeridx]); 2522 trace_arm_gt_recalc_disabled(timeridx); 2523 } 2524 gt_update_irq(cpu, timeridx); 2525 } 2526 2527 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2528 int timeridx) 2529 { 2530 ARMCPU *cpu = env_archcpu(env); 2531 2532 timer_del(cpu->gt_timer[timeridx]); 2533 } 2534 2535 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2536 { 2537 return gt_get_countervalue(env) - gt_phys_cnt_offset(env); 2538 } 2539 2540 uint64_t gt_virt_cnt_offset(CPUARMState *env) 2541 { 2542 uint64_t hcr; 2543 2544 switch (arm_current_el(env)) { 2545 case 2: 2546 hcr = arm_hcr_el2_eff(env); 2547 if (hcr & HCR_E2H) { 2548 return 0; 2549 } 2550 break; 2551 case 0: 2552 hcr = arm_hcr_el2_eff(env); 2553 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2554 return 0; 2555 } 2556 break; 2557 } 2558 2559 return env->cp15.cntvoff_el2; 2560 } 2561 2562 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2563 { 2564 return gt_get_countervalue(env) - gt_virt_cnt_offset(env); 2565 } 2566 2567 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2568 int timeridx, 2569 uint64_t value) 2570 { 2571 trace_arm_gt_cval_write(timeridx, value); 2572 env->cp15.c14_timer[timeridx].cval = value; 2573 gt_recalc_timer(env_archcpu(env), timeridx); 2574 } 2575 2576 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 2577 int timeridx) 2578 { 2579 uint64_t offset = 0; 2580 2581 switch (timeridx) { 2582 case GTIMER_VIRT: 2583 case GTIMER_HYPVIRT: 2584 offset = gt_virt_cnt_offset(env); 2585 break; 2586 case GTIMER_PHYS: 2587 offset = gt_phys_cnt_offset(env); 2588 break; 2589 } 2590 2591 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 2592 (gt_get_countervalue(env) - offset)); 2593 } 2594 2595 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2596 int timeridx, 2597 uint64_t value) 2598 { 2599 uint64_t offset = 0; 2600 2601 switch (timeridx) { 2602 case GTIMER_VIRT: 2603 case GTIMER_HYPVIRT: 2604 offset = gt_virt_cnt_offset(env); 2605 break; 2606 case GTIMER_PHYS: 2607 offset = gt_phys_cnt_offset(env); 2608 break; 2609 } 2610 2611 trace_arm_gt_tval_write(timeridx, value); 2612 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2613 sextract64(value, 0, 32); 2614 gt_recalc_timer(env_archcpu(env), timeridx); 2615 } 2616 2617 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2618 int timeridx, 2619 uint64_t value) 2620 { 2621 ARMCPU *cpu = env_archcpu(env); 2622 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2623 2624 trace_arm_gt_ctl_write(timeridx, value); 2625 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 2626 if ((oldval ^ value) & 1) { 2627 /* Enable toggled */ 2628 gt_recalc_timer(cpu, timeridx); 2629 } else if ((oldval ^ value) & 2) { 2630 /* 2631 * IMASK toggled: don't need to recalculate, 2632 * just set the interrupt line based on ISTATUS 2633 */ 2634 trace_arm_gt_imask_toggle(timeridx); 2635 gt_update_irq(cpu, timeridx); 2636 } 2637 } 2638 2639 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2640 { 2641 gt_timer_reset(env, ri, GTIMER_PHYS); 2642 } 2643 2644 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2645 uint64_t value) 2646 { 2647 gt_cval_write(env, ri, GTIMER_PHYS, value); 2648 } 2649 2650 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2651 { 2652 return gt_tval_read(env, ri, GTIMER_PHYS); 2653 } 2654 2655 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2656 uint64_t value) 2657 { 2658 gt_tval_write(env, ri, GTIMER_PHYS, value); 2659 } 2660 2661 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2662 uint64_t value) 2663 { 2664 gt_ctl_write(env, ri, GTIMER_PHYS, value); 2665 } 2666 2667 static int gt_phys_redir_timeridx(CPUARMState *env) 2668 { 2669 switch (arm_mmu_idx(env)) { 2670 case ARMMMUIdx_E20_0: 2671 case ARMMMUIdx_E20_2: 2672 case ARMMMUIdx_E20_2_PAN: 2673 return GTIMER_HYP; 2674 default: 2675 return GTIMER_PHYS; 2676 } 2677 } 2678 2679 static int gt_virt_redir_timeridx(CPUARMState *env) 2680 { 2681 switch (arm_mmu_idx(env)) { 2682 case ARMMMUIdx_E20_0: 2683 case ARMMMUIdx_E20_2: 2684 case ARMMMUIdx_E20_2_PAN: 2685 return GTIMER_HYPVIRT; 2686 default: 2687 return GTIMER_VIRT; 2688 } 2689 } 2690 2691 static uint64_t gt_phys_redir_cval_read(CPUARMState *env, 2692 const ARMCPRegInfo *ri) 2693 { 2694 int timeridx = gt_phys_redir_timeridx(env); 2695 return env->cp15.c14_timer[timeridx].cval; 2696 } 2697 2698 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2699 uint64_t value) 2700 { 2701 int timeridx = gt_phys_redir_timeridx(env); 2702 gt_cval_write(env, ri, timeridx, value); 2703 } 2704 2705 static uint64_t gt_phys_redir_tval_read(CPUARMState *env, 2706 const ARMCPRegInfo *ri) 2707 { 2708 int timeridx = gt_phys_redir_timeridx(env); 2709 return gt_tval_read(env, ri, timeridx); 2710 } 2711 2712 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2713 uint64_t value) 2714 { 2715 int timeridx = gt_phys_redir_timeridx(env); 2716 gt_tval_write(env, ri, timeridx, value); 2717 } 2718 2719 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, 2720 const ARMCPRegInfo *ri) 2721 { 2722 int timeridx = gt_phys_redir_timeridx(env); 2723 return env->cp15.c14_timer[timeridx].ctl; 2724 } 2725 2726 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2727 uint64_t value) 2728 { 2729 int timeridx = gt_phys_redir_timeridx(env); 2730 gt_ctl_write(env, ri, timeridx, value); 2731 } 2732 2733 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2734 { 2735 gt_timer_reset(env, ri, GTIMER_VIRT); 2736 } 2737 2738 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2739 uint64_t value) 2740 { 2741 gt_cval_write(env, ri, GTIMER_VIRT, value); 2742 } 2743 2744 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2745 { 2746 return gt_tval_read(env, ri, GTIMER_VIRT); 2747 } 2748 2749 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2750 uint64_t value) 2751 { 2752 gt_tval_write(env, ri, GTIMER_VIRT, value); 2753 } 2754 2755 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2756 uint64_t value) 2757 { 2758 gt_ctl_write(env, ri, GTIMER_VIRT, value); 2759 } 2760 2761 static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2762 uint64_t value) 2763 { 2764 ARMCPU *cpu = env_archcpu(env); 2765 uint32_t oldval = env->cp15.cnthctl_el2; 2766 uint32_t valid_mask = 2767 R_CNTHCTL_EL0PCTEN_E2H1_MASK | 2768 R_CNTHCTL_EL0VCTEN_E2H1_MASK | 2769 R_CNTHCTL_EVNTEN_MASK | 2770 R_CNTHCTL_EVNTDIR_MASK | 2771 R_CNTHCTL_EVNTI_MASK | 2772 R_CNTHCTL_EL0VTEN_MASK | 2773 R_CNTHCTL_EL0PTEN_MASK | 2774 R_CNTHCTL_EL1PCTEN_E2H1_MASK | 2775 R_CNTHCTL_EL1PTEN_MASK; 2776 2777 if (cpu_isar_feature(aa64_rme, cpu)) { 2778 valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK; 2779 } 2780 if (cpu_isar_feature(aa64_ecv_traps, cpu)) { 2781 valid_mask |= 2782 R_CNTHCTL_EL1TVT_MASK | 2783 R_CNTHCTL_EL1TVCT_MASK | 2784 R_CNTHCTL_EL1NVPCT_MASK | 2785 R_CNTHCTL_EL1NVVCT_MASK | 2786 R_CNTHCTL_EVNTIS_MASK; 2787 } 2788 if (cpu_isar_feature(aa64_ecv, cpu)) { 2789 valid_mask |= R_CNTHCTL_ECV_MASK; 2790 } 2791 2792 /* Clear RES0 bits */ 2793 value &= valid_mask; 2794 2795 raw_write(env, ri, value); 2796 2797 if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) { 2798 gt_update_irq(cpu, GTIMER_VIRT); 2799 } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) { 2800 gt_update_irq(cpu, GTIMER_PHYS); 2801 } 2802 } 2803 2804 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2805 uint64_t value) 2806 { 2807 ARMCPU *cpu = env_archcpu(env); 2808 2809 trace_arm_gt_cntvoff_write(value); 2810 raw_write(env, ri, value); 2811 gt_recalc_timer(cpu, GTIMER_VIRT); 2812 } 2813 2814 static uint64_t gt_virt_redir_cval_read(CPUARMState *env, 2815 const ARMCPRegInfo *ri) 2816 { 2817 int timeridx = gt_virt_redir_timeridx(env); 2818 return env->cp15.c14_timer[timeridx].cval; 2819 } 2820 2821 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2822 uint64_t value) 2823 { 2824 int timeridx = gt_virt_redir_timeridx(env); 2825 gt_cval_write(env, ri, timeridx, value); 2826 } 2827 2828 static uint64_t gt_virt_redir_tval_read(CPUARMState *env, 2829 const ARMCPRegInfo *ri) 2830 { 2831 int timeridx = gt_virt_redir_timeridx(env); 2832 return gt_tval_read(env, ri, timeridx); 2833 } 2834 2835 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2836 uint64_t value) 2837 { 2838 int timeridx = gt_virt_redir_timeridx(env); 2839 gt_tval_write(env, ri, timeridx, value); 2840 } 2841 2842 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, 2843 const ARMCPRegInfo *ri) 2844 { 2845 int timeridx = gt_virt_redir_timeridx(env); 2846 return env->cp15.c14_timer[timeridx].ctl; 2847 } 2848 2849 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2850 uint64_t value) 2851 { 2852 int timeridx = gt_virt_redir_timeridx(env); 2853 gt_ctl_write(env, ri, timeridx, value); 2854 } 2855 2856 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2857 { 2858 gt_timer_reset(env, ri, GTIMER_HYP); 2859 } 2860 2861 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2862 uint64_t value) 2863 { 2864 gt_cval_write(env, ri, GTIMER_HYP, value); 2865 } 2866 2867 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2868 { 2869 return gt_tval_read(env, ri, GTIMER_HYP); 2870 } 2871 2872 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2873 uint64_t value) 2874 { 2875 gt_tval_write(env, ri, GTIMER_HYP, value); 2876 } 2877 2878 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2879 uint64_t value) 2880 { 2881 gt_ctl_write(env, ri, GTIMER_HYP, value); 2882 } 2883 2884 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2885 { 2886 gt_timer_reset(env, ri, GTIMER_SEC); 2887 } 2888 2889 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2890 uint64_t value) 2891 { 2892 gt_cval_write(env, ri, GTIMER_SEC, value); 2893 } 2894 2895 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2896 { 2897 return gt_tval_read(env, ri, GTIMER_SEC); 2898 } 2899 2900 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2901 uint64_t value) 2902 { 2903 gt_tval_write(env, ri, GTIMER_SEC, value); 2904 } 2905 2906 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2907 uint64_t value) 2908 { 2909 gt_ctl_write(env, ri, GTIMER_SEC, value); 2910 } 2911 2912 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2913 { 2914 gt_timer_reset(env, ri, GTIMER_HYPVIRT); 2915 } 2916 2917 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2918 uint64_t value) 2919 { 2920 gt_cval_write(env, ri, GTIMER_HYPVIRT, value); 2921 } 2922 2923 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2924 { 2925 return gt_tval_read(env, ri, GTIMER_HYPVIRT); 2926 } 2927 2928 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2929 uint64_t value) 2930 { 2931 gt_tval_write(env, ri, GTIMER_HYPVIRT, value); 2932 } 2933 2934 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2935 uint64_t value) 2936 { 2937 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); 2938 } 2939 2940 void arm_gt_ptimer_cb(void *opaque) 2941 { 2942 ARMCPU *cpu = opaque; 2943 2944 gt_recalc_timer(cpu, GTIMER_PHYS); 2945 } 2946 2947 void arm_gt_vtimer_cb(void *opaque) 2948 { 2949 ARMCPU *cpu = opaque; 2950 2951 gt_recalc_timer(cpu, GTIMER_VIRT); 2952 } 2953 2954 void arm_gt_htimer_cb(void *opaque) 2955 { 2956 ARMCPU *cpu = opaque; 2957 2958 gt_recalc_timer(cpu, GTIMER_HYP); 2959 } 2960 2961 void arm_gt_stimer_cb(void *opaque) 2962 { 2963 ARMCPU *cpu = opaque; 2964 2965 gt_recalc_timer(cpu, GTIMER_SEC); 2966 } 2967 2968 void arm_gt_hvtimer_cb(void *opaque) 2969 { 2970 ARMCPU *cpu = opaque; 2971 2972 gt_recalc_timer(cpu, GTIMER_HYPVIRT); 2973 } 2974 2975 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2976 /* 2977 * Note that CNTFRQ is purely reads-as-written for the benefit 2978 * of software; writing it doesn't actually change the timer frequency. 2979 * Our reset value matches the fixed frequency we implement the timer at. 2980 */ 2981 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 2982 .type = ARM_CP_ALIAS, 2983 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2984 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 2985 }, 2986 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2987 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2988 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2989 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2990 .resetfn = arm_gt_cntfrq_reset, 2991 }, 2992 /* overall control: mostly access permissions */ 2993 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 2994 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 2995 .access = PL1_RW, 2996 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 2997 .resetvalue = 0, 2998 }, 2999 /* per-timer control */ 3000 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3001 .secure = ARM_CP_SECSTATE_NS, 3002 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3003 .accessfn = gt_ptimer_access, 3004 .fieldoffset = offsetoflow32(CPUARMState, 3005 cp15.c14_timer[GTIMER_PHYS].ctl), 3006 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3007 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3008 }, 3009 { .name = "CNTP_CTL_S", 3010 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3011 .secure = ARM_CP_SECSTATE_S, 3012 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3013 .accessfn = gt_ptimer_access, 3014 .fieldoffset = offsetoflow32(CPUARMState, 3015 cp15.c14_timer[GTIMER_SEC].ctl), 3016 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3017 }, 3018 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 3019 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 3020 .type = ARM_CP_IO, .access = PL0_RW, 3021 .accessfn = gt_ptimer_access, 3022 .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1, 3023 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 3024 .resetvalue = 0, 3025 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3026 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3027 }, 3028 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 3029 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3030 .accessfn = gt_vtimer_access, 3031 .fieldoffset = offsetoflow32(CPUARMState, 3032 cp15.c14_timer[GTIMER_VIRT].ctl), 3033 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3034 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3035 }, 3036 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 3037 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 3038 .type = ARM_CP_IO, .access = PL0_RW, 3039 .accessfn = gt_vtimer_access, 3040 .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1, 3041 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 3042 .resetvalue = 0, 3043 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3044 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3045 }, 3046 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 3047 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3048 .secure = ARM_CP_SECSTATE_NS, 3049 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3050 .accessfn = gt_ptimer_access, 3051 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3052 }, 3053 { .name = "CNTP_TVAL_S", 3054 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3055 .secure = ARM_CP_SECSTATE_S, 3056 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3057 .accessfn = gt_ptimer_access, 3058 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 3059 }, 3060 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3061 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 3062 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3063 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 3064 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3065 }, 3066 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 3067 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3068 .accessfn = gt_vtimer_access, 3069 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3070 }, 3071 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3072 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 3073 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3074 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 3075 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3076 }, 3077 /* The counter itself */ 3078 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 3079 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3080 .accessfn = gt_pct_access, 3081 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 3082 }, 3083 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 3084 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 3085 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3086 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 3087 }, 3088 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 3089 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3090 .accessfn = gt_vct_access, 3091 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 3092 }, 3093 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3094 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3095 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3096 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 3097 }, 3098 /* Comparison value, indicating when the timer goes off */ 3099 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 3100 .secure = ARM_CP_SECSTATE_NS, 3101 .access = PL0_RW, 3102 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3103 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3104 .accessfn = gt_ptimer_access, 3105 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3106 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3107 }, 3108 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 3109 .secure = ARM_CP_SECSTATE_S, 3110 .access = PL0_RW, 3111 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3112 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3113 .accessfn = gt_ptimer_access, 3114 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3115 }, 3116 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3117 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 3118 .access = PL0_RW, 3119 .type = ARM_CP_IO, 3120 .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1, 3121 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3122 .resetvalue = 0, .accessfn = gt_ptimer_access, 3123 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3124 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3125 }, 3126 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 3127 .access = PL0_RW, 3128 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3129 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3130 .accessfn = gt_vtimer_access, 3131 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3132 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3133 }, 3134 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3135 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 3136 .access = PL0_RW, 3137 .type = ARM_CP_IO, 3138 .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1, 3139 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3140 .resetvalue = 0, .accessfn = gt_vtimer_access, 3141 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3142 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3143 }, 3144 /* 3145 * Secure timer -- this is actually restricted to only EL3 3146 * and configurably Secure-EL1 via the accessfn. 3147 */ 3148 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 3149 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 3150 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 3151 .accessfn = gt_stimer_access, 3152 .readfn = gt_sec_tval_read, 3153 .writefn = gt_sec_tval_write, 3154 .resetfn = gt_sec_timer_reset, 3155 }, 3156 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 3157 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 3158 .type = ARM_CP_IO, .access = PL1_RW, 3159 .accessfn = gt_stimer_access, 3160 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 3161 .resetvalue = 0, 3162 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3163 }, 3164 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 3165 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 3166 .type = ARM_CP_IO, .access = PL1_RW, 3167 .accessfn = gt_stimer_access, 3168 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3169 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3170 }, 3171 }; 3172 3173 /* 3174 * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which 3175 * are "self-synchronizing". For QEMU all sysregs are self-synchronizing, 3176 * so our implementations here are identical to the normal registers. 3177 */ 3178 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = { 3179 { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9, 3180 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3181 .accessfn = gt_vct_access, 3182 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 3183 }, 3184 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64, 3185 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6, 3186 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3187 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 3188 }, 3189 { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8, 3190 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3191 .accessfn = gt_pct_access, 3192 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 3193 }, 3194 { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64, 3195 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 5, 3196 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3197 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 3198 }, 3199 }; 3200 3201 static CPAccessResult gt_cntpoff_access(CPUARMState *env, 3202 const ARMCPRegInfo *ri, 3203 bool isread) 3204 { 3205 if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) && 3206 !(env->cp15.scr_el3 & SCR_ECVEN)) { 3207 return CP_ACCESS_TRAP_EL3; 3208 } 3209 return CP_ACCESS_OK; 3210 } 3211 3212 static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 3213 uint64_t value) 3214 { 3215 ARMCPU *cpu = env_archcpu(env); 3216 3217 trace_arm_gt_cntpoff_write(value); 3218 raw_write(env, ri, value); 3219 gt_recalc_timer(cpu, GTIMER_PHYS); 3220 } 3221 3222 static const ARMCPRegInfo gen_timer_cntpoff_reginfo = { 3223 .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64, 3224 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6, 3225 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 3226 .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write, 3227 .nv2_redirect_offset = 0x1a8, 3228 .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2), 3229 }; 3230 #else 3231 3232 /* 3233 * In user-mode most of the generic timer registers are inaccessible 3234 * however modern kernels (4.12+) allow access to cntvct_el0 3235 */ 3236 3237 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 3238 { 3239 ARMCPU *cpu = env_archcpu(env); 3240 3241 /* 3242 * Currently we have no support for QEMUTimer in linux-user so we 3243 * can't call gt_get_countervalue(env), instead we directly 3244 * call the lower level functions. 3245 */ 3246 return cpu_get_clock() / gt_cntfrq_period_ns(cpu); 3247 } 3248 3249 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3250 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3251 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3252 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 3253 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3254 .resetfn = arm_gt_cntfrq_reset, 3255 }, 3256 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3257 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3258 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3259 .readfn = gt_virt_cnt_read, 3260 }, 3261 }; 3262 3263 /* 3264 * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also 3265 * is exposed to userspace by Linux. 3266 */ 3267 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = { 3268 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64, 3269 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6, 3270 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3271 .readfn = gt_virt_cnt_read, 3272 }, 3273 }; 3274 3275 #endif 3276 3277 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3278 { 3279 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3280 raw_write(env, ri, value); 3281 } else if (arm_feature(env, ARM_FEATURE_V7)) { 3282 raw_write(env, ri, value & 0xfffff6ff); 3283 } else { 3284 raw_write(env, ri, value & 0xfffff1ff); 3285 } 3286 } 3287 3288 #ifndef CONFIG_USER_ONLY 3289 /* get_phys_addr() isn't present for user-mode-only targets */ 3290 3291 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 3292 bool isread) 3293 { 3294 if (ri->opc2 & 4) { 3295 /* 3296 * The ATS12NSO* operations must trap to EL3 or EL2 if executed in 3297 * Secure EL1 (which can only happen if EL3 is AArch64). 3298 * They are simply UNDEF if executed from NS EL1. 3299 * They function normally from EL2 or EL3. 3300 */ 3301 if (arm_current_el(env) == 1) { 3302 if (arm_is_secure_below_el3(env)) { 3303 if (env->cp15.scr_el3 & SCR_EEL2) { 3304 return CP_ACCESS_TRAP_EL2; 3305 } 3306 return CP_ACCESS_TRAP_EL3; 3307 } 3308 return CP_ACCESS_UNDEFINED; 3309 } 3310 } 3311 return CP_ACCESS_OK; 3312 } 3313 3314 #ifdef CONFIG_TCG 3315 static int par_el1_shareability(GetPhysAddrResult *res) 3316 { 3317 /* 3318 * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC 3319 * memory -- see pseudocode PAREncodeShareability(). 3320 */ 3321 if (((res->cacheattrs.attrs & 0xf0) == 0) || 3322 res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) { 3323 return 2; 3324 } 3325 return res->cacheattrs.shareability; 3326 } 3327 3328 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 3329 MMUAccessType access_type, ARMMMUIdx mmu_idx, 3330 ARMSecuritySpace ss) 3331 { 3332 bool ret; 3333 uint64_t par64; 3334 bool format64 = false; 3335 ARMMMUFaultInfo fi = {}; 3336 GetPhysAddrResult res = {}; 3337 3338 /* 3339 * I_MXTJT: Granule protection checks are not performed on the final 3340 * address of a successful translation. This is a translation not a 3341 * memory reference, so "memop = none = 0". 3342 */ 3343 ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0, 3344 mmu_idx, ss, &res, &fi); 3345 3346 /* 3347 * ATS operations only do S1 or S1+S2 translations, so we never 3348 * have to deal with the ARMCacheAttrs format for S2 only. 3349 */ 3350 assert(!res.cacheattrs.is_s2_format); 3351 3352 if (ret) { 3353 /* 3354 * Some kinds of translation fault must cause exceptions rather 3355 * than being reported in the PAR. 3356 */ 3357 int current_el = arm_current_el(env); 3358 int target_el; 3359 uint32_t syn, fsr, fsc; 3360 bool take_exc = false; 3361 3362 if (fi.s1ptw && current_el == 1 3363 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 3364 /* 3365 * Synchronous stage 2 fault on an access made as part of the 3366 * translation table walk for AT S1E0* or AT S1E1* insn 3367 * executed from NS EL1. If this is a synchronous external abort 3368 * and SCR_EL3.EA == 1, then we take a synchronous external abort 3369 * to EL3. Otherwise the fault is taken as an exception to EL2, 3370 * and HPFAR_EL2 holds the faulting IPA. 3371 */ 3372 if (fi.type == ARMFault_SyncExternalOnWalk && 3373 (env->cp15.scr_el3 & SCR_EA)) { 3374 target_el = 3; 3375 } else { 3376 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; 3377 if (arm_is_secure_below_el3(env) && fi.s1ns) { 3378 env->cp15.hpfar_el2 |= HPFAR_NS; 3379 } 3380 target_el = 2; 3381 } 3382 take_exc = true; 3383 } else if (fi.type == ARMFault_SyncExternalOnWalk) { 3384 /* 3385 * Synchronous external aborts during a translation table walk 3386 * are taken as Data Abort exceptions. 3387 */ 3388 if (fi.stage2) { 3389 if (current_el == 3) { 3390 target_el = 3; 3391 } else { 3392 target_el = 2; 3393 } 3394 } else { 3395 target_el = exception_target_el(env); 3396 } 3397 take_exc = true; 3398 } 3399 3400 if (take_exc) { 3401 /* Construct FSR and FSC using same logic as arm_deliver_fault() */ 3402 if (target_el == 2 || arm_el_is_aa64(env, target_el) || 3403 arm_s1_regime_using_lpae_format(env, mmu_idx)) { 3404 fsr = arm_fi_to_lfsc(&fi); 3405 fsc = extract32(fsr, 0, 6); 3406 } else { 3407 fsr = arm_fi_to_sfsc(&fi); 3408 fsc = 0x3f; 3409 } 3410 /* 3411 * Report exception with ESR indicating a fault due to a 3412 * translation table walk for a cache maintenance instruction. 3413 */ 3414 syn = syn_data_abort_no_iss(current_el == target_el, 0, 3415 fi.ea, 1, fi.s1ptw, 1, fsc); 3416 env->exception.vaddress = value; 3417 env->exception.fsr = fsr; 3418 raise_exception(env, EXCP_DATA_ABORT, syn, target_el); 3419 } 3420 } 3421 3422 if (is_a64(env)) { 3423 format64 = true; 3424 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 3425 /* 3426 * ATS1Cxx: 3427 * * TTBCR.EAE determines whether the result is returned using the 3428 * 32-bit or the 64-bit PAR format 3429 * * Instructions executed in Hyp mode always use the 64bit format 3430 * 3431 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 3432 * * The Non-secure TTBCR.EAE bit is set to 1 3433 * * The implementation includes EL2, and the value of HCR.VM is 1 3434 * 3435 * (Note that HCR.DC makes HCR.VM behave as if it is 1.) 3436 * 3437 * ATS1Hx always uses the 64bit format. 3438 */ 3439 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 3440 3441 if (arm_feature(env, ARM_FEATURE_EL2)) { 3442 if (mmu_idx == ARMMMUIdx_E10_0 || 3443 mmu_idx == ARMMMUIdx_E10_1 || 3444 mmu_idx == ARMMMUIdx_E10_1_PAN) { 3445 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); 3446 } else { 3447 format64 |= arm_current_el(env) == 2; 3448 } 3449 } 3450 } 3451 3452 if (format64) { 3453 /* Create a 64-bit PAR */ 3454 par64 = (1 << 11); /* LPAE bit always set */ 3455 if (!ret) { 3456 par64 |= res.f.phys_addr & ~0xfffULL; 3457 if (!res.f.attrs.secure) { 3458 par64 |= (1 << 9); /* NS */ 3459 } 3460 par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */ 3461 par64 |= par_el1_shareability(&res) << 7; /* SH */ 3462 } else { 3463 uint32_t fsr = arm_fi_to_lfsc(&fi); 3464 3465 par64 |= 1; /* F */ 3466 par64 |= (fsr & 0x3f) << 1; /* FS */ 3467 if (fi.stage2) { 3468 par64 |= (1 << 9); /* S */ 3469 } 3470 if (fi.s1ptw) { 3471 par64 |= (1 << 8); /* PTW */ 3472 } 3473 } 3474 } else { 3475 /* 3476 * fsr is a DFSR/IFSR value for the short descriptor 3477 * translation table format (with WnR always clear). 3478 * Convert it to a 32-bit PAR. 3479 */ 3480 if (!ret) { 3481 /* We do not set any attribute bits in the PAR */ 3482 if (res.f.lg_page_size == 24 3483 && arm_feature(env, ARM_FEATURE_V7)) { 3484 par64 = (res.f.phys_addr & 0xff000000) | (1 << 1); 3485 } else { 3486 par64 = res.f.phys_addr & 0xfffff000; 3487 } 3488 if (!res.f.attrs.secure) { 3489 par64 |= (1 << 9); /* NS */ 3490 } 3491 } else { 3492 uint32_t fsr = arm_fi_to_sfsc(&fi); 3493 3494 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 3495 ((fsr & 0xf) << 1) | 1; 3496 } 3497 } 3498 return par64; 3499 } 3500 #endif /* CONFIG_TCG */ 3501 3502 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3503 { 3504 #ifdef CONFIG_TCG 3505 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3506 uint64_t par64; 3507 ARMMMUIdx mmu_idx; 3508 int el = arm_current_el(env); 3509 ARMSecuritySpace ss = arm_security_space(env); 3510 3511 switch (ri->opc2 & 6) { 3512 case 0: 3513 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ 3514 switch (el) { 3515 case 3: 3516 if (ri->crm == 9 && arm_pan_enabled(env)) { 3517 mmu_idx = ARMMMUIdx_E30_3_PAN; 3518 } else { 3519 mmu_idx = ARMMMUIdx_E3; 3520 } 3521 break; 3522 case 2: 3523 g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ 3524 /* fall through */ 3525 case 1: 3526 if (ri->crm == 9 && arm_pan_enabled(env)) { 3527 mmu_idx = ARMMMUIdx_Stage1_E1_PAN; 3528 } else { 3529 mmu_idx = ARMMMUIdx_Stage1_E1; 3530 } 3531 break; 3532 default: 3533 g_assert_not_reached(); 3534 } 3535 break; 3536 case 2: 3537 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 3538 switch (el) { 3539 case 3: 3540 mmu_idx = ARMMMUIdx_E30_0; 3541 break; 3542 case 2: 3543 g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ 3544 mmu_idx = ARMMMUIdx_Stage1_E0; 3545 break; 3546 case 1: 3547 mmu_idx = ARMMMUIdx_Stage1_E0; 3548 break; 3549 default: 3550 g_assert_not_reached(); 3551 } 3552 break; 3553 case 4: 3554 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 3555 mmu_idx = ARMMMUIdx_E10_1; 3556 ss = ARMSS_NonSecure; 3557 break; 3558 case 6: 3559 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 3560 mmu_idx = ARMMMUIdx_E10_0; 3561 ss = ARMSS_NonSecure; 3562 break; 3563 default: 3564 g_assert_not_reached(); 3565 } 3566 3567 par64 = do_ats_write(env, value, access_type, mmu_idx, ss); 3568 3569 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3570 #else 3571 /* Handled by hardware accelerator. */ 3572 g_assert_not_reached(); 3573 #endif /* CONFIG_TCG */ 3574 } 3575 3576 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 3577 uint64_t value) 3578 { 3579 #ifdef CONFIG_TCG 3580 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3581 uint64_t par64; 3582 3583 /* There is no SecureEL2 for AArch32. */ 3584 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, 3585 ARMSS_NonSecure); 3586 3587 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3588 #else 3589 /* Handled by hardware accelerator. */ 3590 g_assert_not_reached(); 3591 #endif /* CONFIG_TCG */ 3592 } 3593 3594 static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri, 3595 bool isread) 3596 { 3597 /* 3598 * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level 3599 * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can 3600 * only happen when executing at EL3 because that combination also causes an 3601 * illegal exception return. We don't need to check FEAT_RME either, because 3602 * scr_write() ensures that the NSE bit is not set otherwise. 3603 */ 3604 if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) { 3605 return CP_ACCESS_UNDEFINED; 3606 } 3607 return CP_ACCESS_OK; 3608 } 3609 3610 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 3611 bool isread) 3612 { 3613 if (arm_current_el(env) == 3 && 3614 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) { 3615 return CP_ACCESS_UNDEFINED; 3616 } 3617 return at_e012_access(env, ri, isread); 3618 } 3619 3620 static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri, 3621 bool isread) 3622 { 3623 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) { 3624 return CP_ACCESS_TRAP_EL2; 3625 } 3626 return at_e012_access(env, ri, isread); 3627 } 3628 3629 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 3630 uint64_t value) 3631 { 3632 #ifdef CONFIG_TCG 3633 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3634 ARMMMUIdx mmu_idx; 3635 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 3636 bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE); 3637 bool for_el3 = false; 3638 ARMSecuritySpace ss; 3639 3640 switch (ri->opc2 & 6) { 3641 case 0: 3642 switch (ri->opc1) { 3643 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ 3644 if (ri->crm == 9 && arm_pan_enabled(env)) { 3645 mmu_idx = regime_e20 ? 3646 ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN; 3647 } else { 3648 mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1; 3649 } 3650 break; 3651 case 4: /* AT S1E2R, AT S1E2W */ 3652 mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; 3653 break; 3654 case 6: /* AT S1E3R, AT S1E3W */ 3655 mmu_idx = ARMMMUIdx_E3; 3656 for_el3 = true; 3657 break; 3658 default: 3659 g_assert_not_reached(); 3660 } 3661 break; 3662 case 2: /* AT S1E0R, AT S1E0W */ 3663 mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0; 3664 break; 3665 case 4: /* AT S12E1R, AT S12E1W */ 3666 mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1; 3667 break; 3668 case 6: /* AT S12E0R, AT S12E0W */ 3669 mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0; 3670 break; 3671 default: 3672 g_assert_not_reached(); 3673 } 3674 3675 ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env); 3676 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss); 3677 #else 3678 /* Handled by hardware accelerator. */ 3679 g_assert_not_reached(); 3680 #endif /* CONFIG_TCG */ 3681 } 3682 #endif 3683 3684 /* Return basic MPU access permission bits. */ 3685 static uint32_t simple_mpu_ap_bits(uint32_t val) 3686 { 3687 uint32_t ret; 3688 uint32_t mask; 3689 int i; 3690 ret = 0; 3691 mask = 3; 3692 for (i = 0; i < 16; i += 2) { 3693 ret |= (val >> i) & mask; 3694 mask <<= 2; 3695 } 3696 return ret; 3697 } 3698 3699 /* Pad basic MPU access permission bits to extended format. */ 3700 static uint32_t extended_mpu_ap_bits(uint32_t val) 3701 { 3702 uint32_t ret; 3703 uint32_t mask; 3704 int i; 3705 ret = 0; 3706 mask = 3; 3707 for (i = 0; i < 16; i += 2) { 3708 ret |= (val & mask) << i; 3709 mask <<= 2; 3710 } 3711 return ret; 3712 } 3713 3714 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3715 uint64_t value) 3716 { 3717 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 3718 } 3719 3720 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3721 { 3722 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 3723 } 3724 3725 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3726 uint64_t value) 3727 { 3728 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 3729 } 3730 3731 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3732 { 3733 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 3734 } 3735 3736 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 3737 { 3738 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3739 3740 if (!u32p) { 3741 return 0; 3742 } 3743 3744 u32p += env->pmsav7.rnr[M_REG_NS]; 3745 return *u32p; 3746 } 3747 3748 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3749 uint64_t value) 3750 { 3751 ARMCPU *cpu = env_archcpu(env); 3752 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3753 3754 if (!u32p) { 3755 return; 3756 } 3757 3758 u32p += env->pmsav7.rnr[M_REG_NS]; 3759 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3760 *u32p = value; 3761 } 3762 3763 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3764 uint64_t value) 3765 { 3766 ARMCPU *cpu = env_archcpu(env); 3767 uint32_t nrgs = cpu->pmsav7_dregion; 3768 3769 if (value >= nrgs) { 3770 qemu_log_mask(LOG_GUEST_ERROR, 3771 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 3772 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 3773 return; 3774 } 3775 3776 raw_write(env, ri, value); 3777 } 3778 3779 static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 3780 uint64_t value) 3781 { 3782 ARMCPU *cpu = env_archcpu(env); 3783 3784 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3785 env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; 3786 } 3787 3788 static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri) 3789 { 3790 return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; 3791 } 3792 3793 static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri, 3794 uint64_t value) 3795 { 3796 ARMCPU *cpu = env_archcpu(env); 3797 3798 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3799 env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; 3800 } 3801 3802 static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri) 3803 { 3804 return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; 3805 } 3806 3807 static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3808 uint64_t value) 3809 { 3810 ARMCPU *cpu = env_archcpu(env); 3811 3812 /* 3813 * Ignore writes that would select not implemented region. 3814 * This is architecturally UNPREDICTABLE. 3815 */ 3816 if (value >= cpu->pmsav7_dregion) { 3817 return; 3818 } 3819 3820 env->pmsav7.rnr[M_REG_NS] = value; 3821 } 3822 3823 static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 3824 uint64_t value) 3825 { 3826 ARMCPU *cpu = env_archcpu(env); 3827 3828 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3829 env->pmsav8.hprbar[env->pmsav8.hprselr] = value; 3830 } 3831 3832 static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri) 3833 { 3834 return env->pmsav8.hprbar[env->pmsav8.hprselr]; 3835 } 3836 3837 static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri, 3838 uint64_t value) 3839 { 3840 ARMCPU *cpu = env_archcpu(env); 3841 3842 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3843 env->pmsav8.hprlar[env->pmsav8.hprselr] = value; 3844 } 3845 3846 static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri) 3847 { 3848 return env->pmsav8.hprlar[env->pmsav8.hprselr]; 3849 } 3850 3851 static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3852 uint64_t value) 3853 { 3854 uint32_t n; 3855 uint32_t bit; 3856 ARMCPU *cpu = env_archcpu(env); 3857 3858 /* Ignore writes to unimplemented regions */ 3859 int rmax = MIN(cpu->pmsav8r_hdregion, 32); 3860 value &= MAKE_64BIT_MASK(0, rmax); 3861 3862 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3863 3864 /* Register alias is only valid for first 32 indexes */ 3865 for (n = 0; n < rmax; ++n) { 3866 bit = extract32(value, n, 1); 3867 env->pmsav8.hprlar[n] = deposit32( 3868 env->pmsav8.hprlar[n], 0, 1, bit); 3869 } 3870 } 3871 3872 static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3873 { 3874 uint32_t n; 3875 uint32_t result = 0x0; 3876 ARMCPU *cpu = env_archcpu(env); 3877 3878 /* Register alias is only valid for first 32 indexes */ 3879 for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) { 3880 if (env->pmsav8.hprlar[n] & 0x1) { 3881 result |= (0x1 << n); 3882 } 3883 } 3884 return result; 3885 } 3886 3887 static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3888 uint64_t value) 3889 { 3890 ARMCPU *cpu = env_archcpu(env); 3891 3892 /* 3893 * Ignore writes that would select not implemented region. 3894 * This is architecturally UNPREDICTABLE. 3895 */ 3896 if (value >= cpu->pmsav8r_hdregion) { 3897 return; 3898 } 3899 3900 env->pmsav8.hprselr = value; 3901 } 3902 3903 static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri, 3904 uint64_t value) 3905 { 3906 ARMCPU *cpu = env_archcpu(env); 3907 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | 3908 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); 3909 3910 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3911 3912 if (ri->opc1 & 4) { 3913 if (index >= cpu->pmsav8r_hdregion) { 3914 return; 3915 } 3916 if (ri->opc2 & 0x1) { 3917 env->pmsav8.hprlar[index] = value; 3918 } else { 3919 env->pmsav8.hprbar[index] = value; 3920 } 3921 } else { 3922 if (index >= cpu->pmsav7_dregion) { 3923 return; 3924 } 3925 if (ri->opc2 & 0x1) { 3926 env->pmsav8.rlar[M_REG_NS][index] = value; 3927 } else { 3928 env->pmsav8.rbar[M_REG_NS][index] = value; 3929 } 3930 } 3931 } 3932 3933 static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri) 3934 { 3935 ARMCPU *cpu = env_archcpu(env); 3936 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | 3937 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); 3938 3939 if (ri->opc1 & 4) { 3940 if (index >= cpu->pmsav8r_hdregion) { 3941 return 0x0; 3942 } 3943 if (ri->opc2 & 0x1) { 3944 return env->pmsav8.hprlar[index]; 3945 } else { 3946 return env->pmsav8.hprbar[index]; 3947 } 3948 } else { 3949 if (index >= cpu->pmsav7_dregion) { 3950 return 0x0; 3951 } 3952 if (ri->opc2 & 0x1) { 3953 return env->pmsav8.rlar[M_REG_NS][index]; 3954 } else { 3955 return env->pmsav8.rbar[M_REG_NS][index]; 3956 } 3957 } 3958 } 3959 3960 static const ARMCPRegInfo pmsav8r_cp_reginfo[] = { 3961 { .name = "PRBAR", 3962 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0, 3963 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3964 .accessfn = access_tvm_trvm, 3965 .readfn = prbar_read, .writefn = prbar_write }, 3966 { .name = "PRLAR", 3967 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1, 3968 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3969 .accessfn = access_tvm_trvm, 3970 .readfn = prlar_read, .writefn = prlar_write }, 3971 { .name = "PRSELR", .resetvalue = 0, 3972 .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1, 3973 .access = PL1_RW, .accessfn = access_tvm_trvm, 3974 .writefn = prselr_write, 3975 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) }, 3976 { .name = "HPRBAR", .resetvalue = 0, 3977 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0, 3978 .access = PL2_RW, .type = ARM_CP_NO_RAW, 3979 .readfn = hprbar_read, .writefn = hprbar_write }, 3980 { .name = "HPRLAR", 3981 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1, 3982 .access = PL2_RW, .type = ARM_CP_NO_RAW, 3983 .readfn = hprlar_read, .writefn = hprlar_write }, 3984 { .name = "HPRSELR", .resetvalue = 0, 3985 .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1, 3986 .access = PL2_RW, 3987 .writefn = hprselr_write, 3988 .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) }, 3989 { .name = "HPRENR", 3990 .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1, 3991 .access = PL2_RW, .type = ARM_CP_NO_RAW, 3992 .readfn = hprenr_read, .writefn = hprenr_write }, 3993 }; 3994 3995 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 3996 /* 3997 * Reset for all these registers is handled in arm_cpu_reset(), 3998 * because the PMSAv7 is also used by M-profile CPUs, which do 3999 * not register cpregs but still need the state to be reset. 4000 */ 4001 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 4002 .access = PL1_RW, .type = ARM_CP_NO_RAW, 4003 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 4004 .readfn = pmsav7_read, .writefn = pmsav7_write, 4005 .resetfn = arm_cp_reset_ignore }, 4006 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 4007 .access = PL1_RW, .type = ARM_CP_NO_RAW, 4008 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 4009 .readfn = pmsav7_read, .writefn = pmsav7_write, 4010 .resetfn = arm_cp_reset_ignore }, 4011 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 4012 .access = PL1_RW, .type = ARM_CP_NO_RAW, 4013 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 4014 .readfn = pmsav7_read, .writefn = pmsav7_write, 4015 .resetfn = arm_cp_reset_ignore }, 4016 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 4017 .access = PL1_RW, 4018 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 4019 .writefn = pmsav7_rgnr_write, 4020 .resetfn = arm_cp_reset_ignore }, 4021 }; 4022 4023 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 4024 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 4025 .access = PL1_RW, .type = ARM_CP_ALIAS, 4026 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 4027 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 4028 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 4029 .access = PL1_RW, .type = ARM_CP_ALIAS, 4030 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 4031 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 4032 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 4033 .access = PL1_RW, 4034 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 4035 .resetvalue = 0, }, 4036 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 4037 .access = PL1_RW, 4038 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 4039 .resetvalue = 0, }, 4040 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 4041 .access = PL1_RW, 4042 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 4043 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 4044 .access = PL1_RW, 4045 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 4046 /* Protection region base and size registers */ 4047 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 4048 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 4049 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 4050 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 4051 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 4052 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 4053 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 4054 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 4055 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 4056 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 4057 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 4058 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 4059 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 4060 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 4061 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 4062 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 4063 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 4064 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 4065 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 4066 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 4067 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 4068 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 4069 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 4070 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 4071 }; 4072 4073 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4074 uint64_t value) 4075 { 4076 ARMCPU *cpu = env_archcpu(env); 4077 4078 if (!arm_feature(env, ARM_FEATURE_V8)) { 4079 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 4080 /* 4081 * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 4082 * using Long-descriptor translation table format 4083 */ 4084 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 4085 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 4086 /* 4087 * In an implementation that includes the Security Extensions 4088 * TTBCR has additional fields PD0 [4] and PD1 [5] for 4089 * Short-descriptor translation table format. 4090 */ 4091 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 4092 } else { 4093 value &= TTBCR_N; 4094 } 4095 } 4096 4097 if (arm_feature(env, ARM_FEATURE_LPAE)) { 4098 /* 4099 * With LPAE the TTBCR could result in a change of ASID 4100 * via the TTBCR.A1 bit, so do a TLB flush. 4101 */ 4102 tlb_flush(CPU(cpu)); 4103 } 4104 raw_write(env, ri, value); 4105 } 4106 4107 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, 4108 uint64_t value) 4109 { 4110 ARMCPU *cpu = env_archcpu(env); 4111 4112 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 4113 tlb_flush(CPU(cpu)); 4114 raw_write(env, ri, value); 4115 } 4116 4117 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4118 uint64_t value) 4119 { 4120 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 4121 if (cpreg_field_is_64bit(ri) && 4122 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 4123 ARMCPU *cpu = env_archcpu(env); 4124 tlb_flush(CPU(cpu)); 4125 } 4126 raw_write(env, ri, value); 4127 } 4128 4129 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4130 uint64_t value) 4131 { 4132 /* 4133 * If we are running with E2&0 regime, then an ASID is active. 4134 * Flush if that might be changing. Note we're not checking 4135 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that 4136 * holds the active ASID, only checking the field that might. 4137 */ 4138 if (extract64(raw_read(env, ri) ^ value, 48, 16) && 4139 (arm_hcr_el2_eff(env) & HCR_E2H)) { 4140 uint16_t mask = ARMMMUIdxBit_E20_2 | 4141 ARMMMUIdxBit_E20_2_PAN | 4142 ARMMMUIdxBit_E20_0; 4143 tlb_flush_by_mmuidx(env_cpu(env), mask); 4144 } 4145 raw_write(env, ri, value); 4146 } 4147 4148 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4149 uint64_t value) 4150 { 4151 ARMCPU *cpu = env_archcpu(env); 4152 CPUState *cs = CPU(cpu); 4153 4154 /* 4155 * A change in VMID to the stage2 page table (Stage2) invalidates 4156 * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0). 4157 */ 4158 if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 4159 tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); 4160 } 4161 raw_write(env, ri, value); 4162 } 4163 4164 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 4165 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 4166 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, 4167 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 4168 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 4169 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 4170 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 4171 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 4172 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 4173 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 4174 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 4175 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 4176 offsetof(CPUARMState, cp15.dfar_ns) } }, 4177 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 4178 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 4179 .access = PL1_RW, .accessfn = access_tvm_trvm, 4180 .fgt = FGT_FAR_EL1, 4181 .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1, 4182 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 4183 .resetvalue = 0, }, 4184 }; 4185 4186 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 4187 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 4188 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 4189 .access = PL1_RW, .accessfn = access_tvm_trvm, 4190 .fgt = FGT_ESR_EL1, 4191 .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1, 4192 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 4193 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 4194 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 4195 .access = PL1_RW, .accessfn = access_tvm_trvm, 4196 .fgt = FGT_TTBR0_EL1, 4197 .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1, 4198 .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, 4199 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4200 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 4201 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 4202 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 4203 .access = PL1_RW, .accessfn = access_tvm_trvm, 4204 .fgt = FGT_TTBR1_EL1, 4205 .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1, 4206 .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, 4207 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4208 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 4209 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 4210 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 4211 .access = PL1_RW, .accessfn = access_tvm_trvm, 4212 .fgt = FGT_TCR_EL1, 4213 .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1, 4214 .writefn = vmsa_tcr_el12_write, 4215 .raw_writefn = raw_write, 4216 .resetvalue = 0, 4217 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 4218 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 4219 .access = PL1_RW, .accessfn = access_tvm_trvm, 4220 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 4221 .raw_writefn = raw_write, 4222 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 4223 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 4224 }; 4225 4226 /* 4227 * Note that unlike TTBCR, writing to TTBCR2 does not require flushing 4228 * qemu tlbs nor adjusting cached masks. 4229 */ 4230 static const ARMCPRegInfo ttbcr2_reginfo = { 4231 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, 4232 .access = PL1_RW, .accessfn = access_tvm_trvm, 4233 .type = ARM_CP_ALIAS, 4234 .bank_fieldoffsets = { 4235 offsetofhigh32(CPUARMState, cp15.tcr_el[3]), 4236 offsetofhigh32(CPUARMState, cp15.tcr_el[1]), 4237 }, 4238 }; 4239 4240 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 4241 uint64_t value) 4242 { 4243 env->cp15.c15_ticonfig = value & 0xe7; 4244 /* The OS_TYPE bit in this register changes the reported CPUID! */ 4245 env->cp15.c0_cpuid = (value & (1 << 5)) ? 4246 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 4247 } 4248 4249 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 4250 uint64_t value) 4251 { 4252 env->cp15.c15_threadid = value & 0xffff; 4253 } 4254 4255 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 4256 uint64_t value) 4257 { 4258 /* Wait-for-interrupt (deprecated) */ 4259 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 4260 } 4261 4262 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 4263 uint64_t value) 4264 { 4265 /* 4266 * On OMAP there are registers indicating the max/min index of dcache lines 4267 * containing a dirty line; cache flush operations have to reset these. 4268 */ 4269 env->cp15.c15_i_max = 0x000; 4270 env->cp15.c15_i_min = 0xff0; 4271 } 4272 4273 static const ARMCPRegInfo omap_cp_reginfo[] = { 4274 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 4275 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 4276 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 4277 .resetvalue = 0, }, 4278 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 4279 .access = PL1_RW, .type = ARM_CP_NOP }, 4280 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 4281 .access = PL1_RW, 4282 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 4283 .writefn = omap_ticonfig_write }, 4284 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 4285 .access = PL1_RW, 4286 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 4287 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 4288 .access = PL1_RW, .resetvalue = 0xff0, 4289 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 4290 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 4291 .access = PL1_RW, 4292 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 4293 .writefn = omap_threadid_write }, 4294 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 4295 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4296 .type = ARM_CP_NO_RAW, 4297 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 4298 /* 4299 * TODO: Peripheral port remap register: 4300 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 4301 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 4302 * when MMU is off. 4303 */ 4304 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 4305 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 4306 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 4307 .writefn = omap_cachemaint_write }, 4308 { .name = "C9", .cp = 15, .crn = 9, 4309 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 4310 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 4311 }; 4312 4313 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4314 uint64_t value) 4315 { 4316 env->cp15.c15_cpar = value & 0x3fff; 4317 } 4318 4319 static const ARMCPRegInfo xscale_cp_reginfo[] = { 4320 { .name = "XSCALE_CPAR", 4321 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4322 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 4323 .writefn = xscale_cpar_write, }, 4324 { .name = "XSCALE_AUXCR", 4325 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 4326 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 4327 .resetvalue = 0, }, 4328 /* 4329 * XScale specific cache-lockdown: since we have no cache we NOP these 4330 * and hope the guest does not really rely on cache behaviour. 4331 */ 4332 { .name = "XSCALE_LOCK_ICACHE_LINE", 4333 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 4334 .access = PL1_W, .type = ARM_CP_NOP }, 4335 { .name = "XSCALE_UNLOCK_ICACHE", 4336 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 4337 .access = PL1_W, .type = ARM_CP_NOP }, 4338 { .name = "XSCALE_DCACHE_LOCK", 4339 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 4340 .access = PL1_RW, .type = ARM_CP_NOP }, 4341 { .name = "XSCALE_UNLOCK_DCACHE", 4342 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 4343 .access = PL1_W, .type = ARM_CP_NOP }, 4344 }; 4345 4346 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 4347 /* 4348 * RAZ/WI the whole crn=15 space, when we don't have a more specific 4349 * implementation of this implementation-defined space. 4350 * Ideally this should eventually disappear in favour of actually 4351 * implementing the correct behaviour for all cores. 4352 */ 4353 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 4354 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4355 .access = PL1_RW, 4356 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 4357 .resetvalue = 0 }, 4358 }; 4359 4360 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 4361 /* Cache status: RAZ because we have no cache so it's always clean */ 4362 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 4363 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4364 .resetvalue = 0 }, 4365 }; 4366 4367 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 4368 /* We never have a block transfer operation in progress */ 4369 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 4370 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4371 .resetvalue = 0 }, 4372 /* The cache ops themselves: these all NOP for QEMU */ 4373 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 4374 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, 4375 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 4376 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, 4377 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 4378 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, 4379 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 4380 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, 4381 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 4382 .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, 4383 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 4384 .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, 4385 }; 4386 4387 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 4388 /* 4389 * The cache test-and-clean instructions always return (1 << 30) 4390 * to indicate that there are no dirty cache lines. 4391 */ 4392 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 4393 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4394 .resetvalue = (1 << 30) }, 4395 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 4396 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4397 .resetvalue = (1 << 30) }, 4398 }; 4399 4400 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 4401 /* Ignore ReadBuffer accesses */ 4402 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 4403 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4404 .access = PL1_RW, .resetvalue = 0, 4405 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 4406 }; 4407 4408 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4409 { 4410 unsigned int cur_el = arm_current_el(env); 4411 4412 if (arm_is_el2_enabled(env) && cur_el == 1) { 4413 return env->cp15.vpidr_el2; 4414 } 4415 return raw_read(env, ri); 4416 } 4417 4418 static uint64_t mpidr_read_val(CPUARMState *env) 4419 { 4420 ARMCPU *cpu = env_archcpu(env); 4421 uint64_t mpidr = cpu->mp_affinity; 4422 4423 if (arm_feature(env, ARM_FEATURE_V7MP)) { 4424 mpidr |= (1U << 31); 4425 /* 4426 * Cores which are uniprocessor (non-coherent) 4427 * but still implement the MP extensions set 4428 * bit 30. (For instance, Cortex-R5). 4429 */ 4430 if (cpu->mp_is_up) { 4431 mpidr |= (1u << 30); 4432 } 4433 } 4434 return mpidr; 4435 } 4436 4437 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4438 { 4439 unsigned int cur_el = arm_current_el(env); 4440 4441 if (arm_is_el2_enabled(env) && cur_el == 1) { 4442 return env->cp15.vmpidr_el2; 4443 } 4444 return mpidr_read_val(env); 4445 } 4446 4447 static const ARMCPRegInfo lpae_cp_reginfo[] = { 4448 /* NOP AMAIR0/1 */ 4449 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 4450 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 4451 .access = PL1_RW, .accessfn = access_tvm_trvm, 4452 .fgt = FGT_AMAIR_EL1, 4453 .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1, 4454 .type = ARM_CP_CONST, .resetvalue = 0 }, 4455 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 4456 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 4457 .access = PL1_RW, .accessfn = access_tvm_trvm, 4458 .type = ARM_CP_CONST, .resetvalue = 0 }, 4459 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 4460 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 4461 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 4462 offsetof(CPUARMState, cp15.par_ns)} }, 4463 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 4464 .access = PL1_RW, .accessfn = access_tvm_trvm, 4465 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4466 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4467 offsetof(CPUARMState, cp15.ttbr0_ns) }, 4468 .writefn = vmsa_ttbr_write, .raw_writefn = raw_write }, 4469 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 4470 .access = PL1_RW, .accessfn = access_tvm_trvm, 4471 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4472 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4473 offsetof(CPUARMState, cp15.ttbr1_ns) }, 4474 .writefn = vmsa_ttbr_write, .raw_writefn = raw_write }, 4475 }; 4476 4477 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4478 { 4479 return vfp_get_fpcr(env); 4480 } 4481 4482 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4483 uint64_t value) 4484 { 4485 vfp_set_fpcr(env, value); 4486 } 4487 4488 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4489 { 4490 return vfp_get_fpsr(env); 4491 } 4492 4493 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4494 uint64_t value) 4495 { 4496 vfp_set_fpsr(env, value); 4497 } 4498 4499 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 4500 bool isread) 4501 { 4502 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { 4503 return CP_ACCESS_TRAP_EL1; 4504 } 4505 return CP_ACCESS_OK; 4506 } 4507 4508 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 4509 uint64_t value) 4510 { 4511 env->daif = value & PSTATE_DAIF; 4512 } 4513 4514 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) 4515 { 4516 return env->pstate & PSTATE_PAN; 4517 } 4518 4519 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, 4520 uint64_t value) 4521 { 4522 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); 4523 } 4524 4525 static const ARMCPRegInfo pan_reginfo = { 4526 .name = "PAN", .state = ARM_CP_STATE_AA64, 4527 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, 4528 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4529 .readfn = aa64_pan_read, .writefn = aa64_pan_write 4530 }; 4531 4532 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) 4533 { 4534 return env->pstate & PSTATE_UAO; 4535 } 4536 4537 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, 4538 uint64_t value) 4539 { 4540 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); 4541 } 4542 4543 static const ARMCPRegInfo uao_reginfo = { 4544 .name = "UAO", .state = ARM_CP_STATE_AA64, 4545 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, 4546 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4547 .readfn = aa64_uao_read, .writefn = aa64_uao_write 4548 }; 4549 4550 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri) 4551 { 4552 return env->pstate & PSTATE_DIT; 4553 } 4554 4555 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri, 4556 uint64_t value) 4557 { 4558 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT); 4559 } 4560 4561 static const ARMCPRegInfo dit_reginfo = { 4562 .name = "DIT", .state = ARM_CP_STATE_AA64, 4563 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5, 4564 .type = ARM_CP_NO_RAW, .access = PL0_RW, 4565 .readfn = aa64_dit_read, .writefn = aa64_dit_write 4566 }; 4567 4568 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri) 4569 { 4570 return env->pstate & PSTATE_SSBS; 4571 } 4572 4573 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri, 4574 uint64_t value) 4575 { 4576 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS); 4577 } 4578 4579 static const ARMCPRegInfo ssbs_reginfo = { 4580 .name = "SSBS", .state = ARM_CP_STATE_AA64, 4581 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6, 4582 .type = ARM_CP_NO_RAW, .access = PL0_RW, 4583 .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write 4584 }; 4585 4586 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, 4587 const ARMCPRegInfo *ri, 4588 bool isread) 4589 { 4590 /* Cache invalidate/clean to Point of Coherency or Persistence... */ 4591 switch (arm_current_el(env)) { 4592 case 0: 4593 /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */ 4594 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4595 return CP_ACCESS_TRAP_EL1; 4596 } 4597 /* fall through */ 4598 case 1: 4599 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ 4600 if (arm_hcr_el2_eff(env) & HCR_TPCP) { 4601 return CP_ACCESS_TRAP_EL2; 4602 } 4603 break; 4604 } 4605 return CP_ACCESS_OK; 4606 } 4607 4608 static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags) 4609 { 4610 /* Cache invalidate/clean to Point of Unification... */ 4611 switch (arm_current_el(env)) { 4612 case 0: 4613 /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */ 4614 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4615 return CP_ACCESS_TRAP_EL1; 4616 } 4617 /* fall through */ 4618 case 1: 4619 /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set. */ 4620 if (arm_hcr_el2_eff(env) & hcrflags) { 4621 return CP_ACCESS_TRAP_EL2; 4622 } 4623 break; 4624 } 4625 return CP_ACCESS_OK; 4626 } 4627 4628 static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri, 4629 bool isread) 4630 { 4631 return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU); 4632 } 4633 4634 static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri, 4635 bool isread) 4636 { 4637 return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU); 4638 } 4639 4640 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 4641 bool isread) 4642 { 4643 int cur_el = arm_current_el(env); 4644 4645 if (cur_el < 2) { 4646 uint64_t hcr = arm_hcr_el2_eff(env); 4647 4648 if (cur_el == 0) { 4649 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4650 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { 4651 return CP_ACCESS_TRAP_EL2; 4652 } 4653 } else { 4654 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 4655 return CP_ACCESS_TRAP_EL1; 4656 } 4657 if (hcr & HCR_TDZ) { 4658 return CP_ACCESS_TRAP_EL2; 4659 } 4660 } 4661 } else if (hcr & HCR_TDZ) { 4662 return CP_ACCESS_TRAP_EL2; 4663 } 4664 } 4665 return CP_ACCESS_OK; 4666 } 4667 4668 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4669 { 4670 ARMCPU *cpu = env_archcpu(env); 4671 int dzp_bit = 1 << 4; 4672 4673 /* DZP indicates whether DC ZVA access is allowed */ 4674 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 4675 dzp_bit = 0; 4676 } 4677 return cpu->dcz_blocksize | dzp_bit; 4678 } 4679 4680 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4681 bool isread) 4682 { 4683 if (!(env->pstate & PSTATE_SP)) { 4684 /* 4685 * Access to SP_EL0 is undefined if it's being used as 4686 * the stack pointer. 4687 */ 4688 return CP_ACCESS_UNDEFINED; 4689 } 4690 return CP_ACCESS_OK; 4691 } 4692 4693 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 4694 { 4695 return env->pstate & PSTATE_SP; 4696 } 4697 4698 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 4699 { 4700 update_spsel(env, val); 4701 } 4702 4703 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4704 uint64_t value) 4705 { 4706 ARMCPU *cpu = env_archcpu(env); 4707 4708 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 4709 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 4710 value &= ~SCTLR_M; 4711 } 4712 4713 /* ??? Lots of these bits are not implemented. */ 4714 4715 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) { 4716 if (ri->opc1 == 6) { /* SCTLR_EL3 */ 4717 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA); 4718 } else { 4719 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF | 4720 SCTLR_ATA0 | SCTLR_ATA); 4721 } 4722 } 4723 4724 if (raw_read(env, ri) == value) { 4725 /* 4726 * Skip the TLB flush if nothing actually changed; Linux likes 4727 * to do a lot of pointless SCTLR writes. 4728 */ 4729 return; 4730 } 4731 4732 raw_write(env, ri, value); 4733 4734 /* This may enable/disable the MMU, so do a TLB flush. */ 4735 tlb_flush(CPU(cpu)); 4736 4737 if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) { 4738 /* 4739 * Normally we would always end the TB on an SCTLR write; see the 4740 * comment in ARMCPRegInfo sctlr initialization below for why Xscale 4741 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild 4742 * of hflags from the translator, so do it here. 4743 */ 4744 arm_rebuild_hflags(env); 4745 } 4746 } 4747 4748 static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4749 uint64_t value) 4750 { 4751 /* 4752 * Some MDCR_EL3 bits affect whether PMU counters are running: 4753 * if we are trying to change any of those then we must 4754 * bracket this update with PMU start/finish calls. 4755 */ 4756 bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS; 4757 4758 if (pmu_op) { 4759 pmu_op_start(env); 4760 } 4761 env->cp15.mdcr_el3 = value; 4762 if (pmu_op) { 4763 pmu_op_finish(env); 4764 } 4765 } 4766 4767 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4768 uint64_t value) 4769 { 4770 /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */ 4771 mdcr_el3_write(env, ri, value & SDCR_VALID_MASK); 4772 } 4773 4774 static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4775 uint64_t value) 4776 { 4777 /* 4778 * Some MDCR_EL2 bits affect whether PMU counters are running: 4779 * if we are trying to change any of those then we must 4780 * bracket this update with PMU start/finish calls. 4781 */ 4782 bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS; 4783 4784 if (pmu_op) { 4785 pmu_op_start(env); 4786 } 4787 env->cp15.mdcr_el2 = value; 4788 if (pmu_op) { 4789 pmu_op_finish(env); 4790 } 4791 } 4792 4793 static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri, 4794 bool isread) 4795 { 4796 if (arm_current_el(env) == 1) { 4797 uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2); 4798 4799 if (hcr_nv == (HCR_NV | HCR_NV1)) { 4800 return CP_ACCESS_TRAP_EL2; 4801 } 4802 } 4803 return CP_ACCESS_OK; 4804 } 4805 4806 #ifdef CONFIG_USER_ONLY 4807 /* 4808 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their 4809 * code to get around W^X restrictions, where one region is writable and the 4810 * other is executable. 4811 * 4812 * Since the executable region is never written to we cannot detect code 4813 * changes when running in user mode, and rely on the emulated JIT telling us 4814 * that the code has changed by executing this instruction. 4815 */ 4816 static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri, 4817 uint64_t value) 4818 { 4819 uint64_t icache_line_mask, start_address, end_address; 4820 const ARMCPU *cpu; 4821 4822 cpu = env_archcpu(env); 4823 4824 icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1; 4825 start_address = value & ~icache_line_mask; 4826 end_address = value | icache_line_mask; 4827 4828 mmap_lock(); 4829 4830 tb_invalidate_phys_range(start_address, end_address); 4831 4832 mmap_unlock(); 4833 } 4834 #endif 4835 4836 static const ARMCPRegInfo v8_cp_reginfo[] = { 4837 /* 4838 * Minimal set of EL0-visible registers. This will need to be expanded 4839 * significantly for system emulation of AArch64 CPUs. 4840 */ 4841 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 4842 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 4843 .access = PL0_RW, .type = ARM_CP_NZCV }, 4844 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 4845 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 4846 .type = ARM_CP_NO_RAW, 4847 .access = PL0_RW, .accessfn = aa64_daif_access, 4848 .fieldoffset = offsetof(CPUARMState, daif), 4849 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 4850 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 4851 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 4852 .access = PL0_RW, .type = ARM_CP_FPU, 4853 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 4854 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 4855 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 4856 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4857 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 4858 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 4859 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 4860 .access = PL0_R, .type = ARM_CP_NO_RAW, 4861 .fgt = FGT_DCZID_EL0, 4862 .readfn = aa64_dczid_read }, 4863 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 4864 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 4865 .access = PL0_W, .type = ARM_CP_DC_ZVA, 4866 #ifndef CONFIG_USER_ONLY 4867 /* Avoid overhead of an access check that always passes in user-mode */ 4868 .accessfn = aa64_zva_access, 4869 .fgt = FGT_DCZVA, 4870 #endif 4871 }, 4872 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 4873 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 4874 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 4875 /* 4876 * Instruction cache ops. All of these except `IC IVAU` NOP because we 4877 * don't emulate caches. 4878 */ 4879 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 4880 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4881 .access = PL1_W, .type = ARM_CP_NOP, 4882 .fgt = FGT_ICIALLUIS, 4883 .accessfn = access_ticab }, 4884 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 4885 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4886 .access = PL1_W, .type = ARM_CP_NOP, 4887 .fgt = FGT_ICIALLU, 4888 .accessfn = access_tocu }, 4889 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 4890 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 4891 .access = PL0_W, 4892 .fgt = FGT_ICIVAU, 4893 .accessfn = access_tocu, 4894 #ifdef CONFIG_USER_ONLY 4895 .type = ARM_CP_NO_RAW, 4896 .writefn = ic_ivau_write 4897 #else 4898 .type = ARM_CP_NOP 4899 #endif 4900 }, 4901 /* Cache ops: all NOPs since we don't emulate caches */ 4902 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 4903 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4904 .access = PL1_W, .accessfn = aa64_cacheop_poc_access, 4905 .fgt = FGT_DCIVAC, 4906 .type = ARM_CP_NOP }, 4907 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 4908 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4909 .fgt = FGT_DCISW, 4910 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4911 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 4912 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 4913 .access = PL0_W, .type = ARM_CP_NOP, 4914 .fgt = FGT_DCCVAC, 4915 .accessfn = aa64_cacheop_poc_access }, 4916 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 4917 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4918 .fgt = FGT_DCCSW, 4919 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4920 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 4921 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 4922 .access = PL0_W, .type = ARM_CP_NOP, 4923 .fgt = FGT_DCCVAU, 4924 .accessfn = access_tocu }, 4925 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 4926 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 4927 .access = PL0_W, .type = ARM_CP_NOP, 4928 .fgt = FGT_DCCIVAC, 4929 .accessfn = aa64_cacheop_poc_access }, 4930 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 4931 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4932 .fgt = FGT_DCCISW, 4933 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4934 #ifndef CONFIG_USER_ONLY 4935 /* 64 bit address translation operations */ 4936 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 4937 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 4938 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4939 .fgt = FGT_ATS1E1R, 4940 .accessfn = at_s1e01_access, .writefn = ats_write64 }, 4941 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 4942 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 4943 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4944 .fgt = FGT_ATS1E1W, 4945 .accessfn = at_s1e01_access, .writefn = ats_write64 }, 4946 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 4947 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 4948 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4949 .fgt = FGT_ATS1E0R, 4950 .accessfn = at_s1e01_access, .writefn = ats_write64 }, 4951 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 4952 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 4953 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4954 .fgt = FGT_ATS1E0W, 4955 .accessfn = at_s1e01_access, .writefn = ats_write64 }, 4956 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 4957 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 4958 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4959 .accessfn = at_e012_access, .writefn = ats_write64 }, 4960 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 4961 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 4962 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4963 .accessfn = at_e012_access, .writefn = ats_write64 }, 4964 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 4965 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 4966 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4967 .accessfn = at_e012_access, .writefn = ats_write64 }, 4968 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 4969 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 4970 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4971 .accessfn = at_e012_access, .writefn = ats_write64 }, 4972 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 4973 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 4974 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 4975 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4976 .writefn = ats_write64 }, 4977 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 4978 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 4979 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4980 .writefn = ats_write64 }, 4981 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 4982 .type = ARM_CP_ALIAS, 4983 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 4984 .access = PL1_RW, .resetvalue = 0, 4985 .fgt = FGT_PAR_EL1, 4986 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 4987 .writefn = par_write }, 4988 #endif 4989 /* 32 bit cache operations */ 4990 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4991 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab }, 4992 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 4993 .type = ARM_CP_NOP, .access = PL1_W }, 4994 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4995 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu }, 4996 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 4997 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu }, 4998 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 4999 .type = ARM_CP_NOP, .access = PL1_W }, 5000 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 5001 .type = ARM_CP_NOP, .access = PL1_W }, 5002 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 5003 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5004 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 5005 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5006 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 5007 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5008 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 5009 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5010 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 5011 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu }, 5012 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 5013 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5014 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 5015 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5016 /* MMU Domain access control / MPU write buffer control */ 5017 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 5018 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 5019 .writefn = dacr_write, .raw_writefn = raw_write, 5020 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 5021 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 5022 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 5023 .type = ARM_CP_ALIAS, 5024 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 5025 .access = PL1_RW, .accessfn = access_nv1, 5026 .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1, 5027 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 5028 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 5029 .type = ARM_CP_ALIAS, 5030 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 5031 .access = PL1_RW, .accessfn = access_nv1, 5032 .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1, 5033 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 5034 /* 5035 * We rely on the access checks not allowing the guest to write to the 5036 * state field when SPSel indicates that it's being used as the stack 5037 * pointer. 5038 */ 5039 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 5040 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 5041 .access = PL1_RW, .accessfn = sp_el0_access, 5042 .type = ARM_CP_ALIAS, 5043 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 5044 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 5045 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 5046 .nv2_redirect_offset = 0x240, 5047 .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP, 5048 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 5049 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 5050 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 5051 .type = ARM_CP_NO_RAW, 5052 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 5053 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 5054 .type = ARM_CP_ALIAS, 5055 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 5056 .access = PL2_RW, 5057 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 5058 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 5059 .type = ARM_CP_ALIAS, 5060 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 5061 .access = PL2_RW, 5062 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 5063 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 5064 .type = ARM_CP_ALIAS, 5065 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 5066 .access = PL2_RW, 5067 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 5068 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 5069 .type = ARM_CP_ALIAS, 5070 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 5071 .access = PL2_RW, 5072 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 5073 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 5074 .type = ARM_CP_IO, 5075 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 5076 .resetvalue = 0, 5077 .access = PL3_RW, 5078 .writefn = mdcr_el3_write, 5079 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 5080 { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO, 5081 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 5082 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5083 .writefn = sdcr_write, 5084 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 5085 }; 5086 5087 /* These are present only when EL1 supports AArch32 */ 5088 static const ARMCPRegInfo v8_aa32_el1_reginfo[] = { 5089 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 5090 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 5091 .access = PL2_RW, 5092 .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP, 5093 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, 5094 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 5095 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 5096 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, 5097 .writefn = dacr_write, .raw_writefn = raw_write, 5098 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 5099 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 5100 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 5101 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, 5102 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 5103 }; 5104 5105 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) 5106 { 5107 ARMCPU *cpu = env_archcpu(env); 5108 5109 if (arm_feature(env, ARM_FEATURE_V8)) { 5110 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */ 5111 } else { 5112 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */ 5113 } 5114 5115 if (arm_feature(env, ARM_FEATURE_EL3)) { 5116 valid_mask &= ~HCR_HCD; 5117 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 5118 /* 5119 * Architecturally HCR.TSC is RES0 if EL3 is not implemented. 5120 * However, if we're using the SMC PSCI conduit then QEMU is 5121 * effectively acting like EL3 firmware and so the guest at 5122 * EL2 should retain the ability to prevent EL1 from being 5123 * able to make SMC calls into the ersatz firmware, so in 5124 * that case HCR.TSC should be read/write. 5125 */ 5126 valid_mask &= ~HCR_TSC; 5127 } 5128 5129 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5130 if (cpu_isar_feature(aa64_vh, cpu)) { 5131 valid_mask |= HCR_E2H; 5132 } 5133 if (cpu_isar_feature(aa64_ras, cpu)) { 5134 valid_mask |= HCR_TERR | HCR_TEA; 5135 } 5136 if (cpu_isar_feature(aa64_lor, cpu)) { 5137 valid_mask |= HCR_TLOR; 5138 } 5139 if (cpu_isar_feature(aa64_pauth, cpu)) { 5140 valid_mask |= HCR_API | HCR_APK; 5141 } 5142 if (cpu_isar_feature(aa64_mte, cpu)) { 5143 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5; 5144 } 5145 if (cpu_isar_feature(aa64_scxtnum, cpu)) { 5146 valid_mask |= HCR_ENSCXT; 5147 } 5148 if (cpu_isar_feature(aa64_fwb, cpu)) { 5149 valid_mask |= HCR_FWB; 5150 } 5151 if (cpu_isar_feature(aa64_rme, cpu)) { 5152 valid_mask |= HCR_GPF; 5153 } 5154 if (cpu_isar_feature(aa64_nv, cpu)) { 5155 valid_mask |= HCR_NV | HCR_NV1 | HCR_AT; 5156 } 5157 if (cpu_isar_feature(aa64_nv2, cpu)) { 5158 valid_mask |= HCR_NV2; 5159 } 5160 } 5161 5162 if (cpu_isar_feature(any_evt, cpu)) { 5163 valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4; 5164 } else if (cpu_isar_feature(any_half_evt, cpu)) { 5165 valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4; 5166 } 5167 5168 /* Clear RES0 bits. */ 5169 value &= valid_mask; 5170 5171 /* 5172 * These bits change the MMU setup: 5173 * HCR_VM enables stage 2 translation 5174 * HCR_PTW forbids certain page-table setups 5175 * HCR_DC disables stage1 and enables stage2 translation 5176 * HCR_DCT enables tagging on (disabled) stage1 translation 5177 * HCR_FWB changes the interpretation of stage2 descriptor bits 5178 * HCR_NV and HCR_NV1 affect interpretation of descriptor bits 5179 */ 5180 if ((env->cp15.hcr_el2 ^ value) & 5181 (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) { 5182 tlb_flush(CPU(cpu)); 5183 } 5184 env->cp15.hcr_el2 = value; 5185 5186 /* 5187 * Updates to VI and VF require us to update the status of 5188 * virtual interrupts, which are the logical OR of these bits 5189 * and the state of the input lines from the GIC. (This requires 5190 * that we have the BQL, which is done by marking the 5191 * reginfo structs as ARM_CP_IO.) 5192 * Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or 5193 * VFNMI, it is never possible for it to be taken immediately 5194 * because VIRQ, VFIQ, VINMI and VFNMI are masked unless running 5195 * at EL0 or EL1, and HCR can only be written at EL2. 5196 */ 5197 g_assert(bql_locked()); 5198 arm_cpu_update_virq(cpu); 5199 arm_cpu_update_vfiq(cpu); 5200 arm_cpu_update_vserr(cpu); 5201 if (cpu_isar_feature(aa64_nmi, cpu)) { 5202 arm_cpu_update_vinmi(cpu); 5203 arm_cpu_update_vfnmi(cpu); 5204 } 5205 } 5206 5207 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 5208 { 5209 do_hcr_write(env, value, 0); 5210 } 5211 5212 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 5213 uint64_t value) 5214 { 5215 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 5216 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 5217 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32)); 5218 } 5219 5220 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 5221 uint64_t value) 5222 { 5223 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 5224 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 5225 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); 5226 } 5227 5228 /* 5229 * Return the effective value of HCR_EL2, at the given security state. 5230 * Bits that are not included here: 5231 * RW (read from SCR_EL3.RW as needed) 5232 */ 5233 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space) 5234 { 5235 uint64_t ret = env->cp15.hcr_el2; 5236 5237 assert(space != ARMSS_Root); 5238 5239 if (!arm_is_el2_enabled_secstate(env, space)) { 5240 /* 5241 * "This register has no effect if EL2 is not enabled in the 5242 * current Security state". This is ARMv8.4-SecEL2 speak for 5243 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). 5244 * 5245 * Prior to that, the language was "In an implementation that 5246 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves 5247 * as if this field is 0 for all purposes other than a direct 5248 * read or write access of HCR_EL2". With lots of enumeration 5249 * on a per-field basis. In current QEMU, this is condition 5250 * is arm_is_secure_below_el3. 5251 * 5252 * Since the v8.4 language applies to the entire register, and 5253 * appears to be backward compatible, use that. 5254 */ 5255 return 0; 5256 } 5257 5258 /* 5259 * For a cpu that supports both aarch64 and aarch32, we can set bits 5260 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32. 5261 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32. 5262 */ 5263 if (!arm_el_is_aa64(env, 2)) { 5264 uint64_t aa32_valid; 5265 5266 /* 5267 * These bits are up-to-date as of ARMv8.6. 5268 * For HCR, it's easiest to list just the 2 bits that are invalid. 5269 * For HCR2, list those that are valid. 5270 */ 5271 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ); 5272 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE | 5273 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS); 5274 ret &= aa32_valid; 5275 } 5276 5277 if (ret & HCR_TGE) { 5278 /* These bits are up-to-date as of ARMv8.6. */ 5279 if (ret & HCR_E2H) { 5280 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | 5281 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | 5282 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | 5283 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE | 5284 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT | 5285 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5); 5286 } else { 5287 ret |= HCR_FMO | HCR_IMO | HCR_AMO; 5288 } 5289 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | 5290 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | 5291 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | 5292 HCR_TLOR); 5293 } 5294 5295 return ret; 5296 } 5297 5298 uint64_t arm_hcr_el2_eff(CPUARMState *env) 5299 { 5300 if (arm_feature(env, ARM_FEATURE_M)) { 5301 return 0; 5302 } 5303 return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env)); 5304 } 5305 5306 /* 5307 * Corresponds to ARM pseudocode function ELIsInHost(). 5308 */ 5309 bool el_is_in_host(CPUARMState *env, int el) 5310 { 5311 uint64_t mask; 5312 5313 /* 5314 * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff(). 5315 * Perform the simplest bit tests first, and validate EL2 afterward. 5316 */ 5317 if (el & 1) { 5318 return false; /* EL1 or EL3 */ 5319 } 5320 5321 /* 5322 * Note that hcr_write() checks isar_feature_aa64_vh(), 5323 * aka HaveVirtHostExt(), in allowing HCR_E2H to be set. 5324 */ 5325 mask = el ? HCR_E2H : HCR_E2H | HCR_TGE; 5326 if ((env->cp15.hcr_el2 & mask) != mask) { 5327 return false; 5328 } 5329 5330 /* TGE and/or E2H set: double check those bits are currently legal. */ 5331 return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2); 5332 } 5333 5334 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, 5335 uint64_t value) 5336 { 5337 ARMCPU *cpu = env_archcpu(env); 5338 uint64_t valid_mask = 0; 5339 5340 /* FEAT_MOPS adds MSCEn and MCE2 */ 5341 if (cpu_isar_feature(aa64_mops, cpu)) { 5342 valid_mask |= HCRX_MSCEN | HCRX_MCE2; 5343 } 5344 5345 /* FEAT_NMI adds TALLINT, VINMI and VFNMI */ 5346 if (cpu_isar_feature(aa64_nmi, cpu)) { 5347 valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI; 5348 } 5349 /* FEAT_CMOW adds CMOW */ 5350 if (cpu_isar_feature(aa64_cmow, cpu)) { 5351 valid_mask |= HCRX_CMOW; 5352 } 5353 /* FEAT_XS adds FGTnXS, FnXS */ 5354 if (cpu_isar_feature(aa64_xs, cpu)) { 5355 valid_mask |= HCRX_FGTNXS | HCRX_FNXS; 5356 } 5357 5358 /* Clear RES0 bits. */ 5359 env->cp15.hcrx_el2 = value & valid_mask; 5360 5361 /* 5362 * Updates to VINMI and VFNMI require us to update the status of 5363 * virtual NMI, which are the logical OR of these bits 5364 * and the state of the input lines from the GIC. (This requires 5365 * that we have the BQL, which is done by marking the 5366 * reginfo structs as ARM_CP_IO.) 5367 * Note that if a write to HCRX pends a VINMI or VFNMI it is never 5368 * possible for it to be taken immediately, because VINMI and 5369 * VFNMI are masked unless running at EL0 or EL1, and HCRX 5370 * can only be written at EL2. 5371 */ 5372 if (cpu_isar_feature(aa64_nmi, cpu)) { 5373 g_assert(bql_locked()); 5374 arm_cpu_update_vinmi(cpu); 5375 arm_cpu_update_vfnmi(cpu); 5376 } 5377 } 5378 5379 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri, 5380 bool isread) 5381 { 5382 if (arm_current_el(env) == 2 5383 && arm_feature(env, ARM_FEATURE_EL3) 5384 && !(env->cp15.scr_el3 & SCR_HXEN)) { 5385 return CP_ACCESS_TRAP_EL3; 5386 } 5387 return CP_ACCESS_OK; 5388 } 5389 5390 static const ARMCPRegInfo hcrx_el2_reginfo = { 5391 .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64, 5392 .type = ARM_CP_IO, 5393 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2, 5394 .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen, 5395 .nv2_redirect_offset = 0xa0, 5396 .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2), 5397 }; 5398 5399 /* Return the effective value of HCRX_EL2. */ 5400 uint64_t arm_hcrx_el2_eff(CPUARMState *env) 5401 { 5402 /* 5403 * The bits in this register behave as 0 for all purposes other than 5404 * direct reads of the register if SCR_EL3.HXEn is 0. 5405 * If EL2 is not enabled in the current security state, then the 5406 * bit may behave as if 0, or as if 1, depending on the bit. 5407 * For the moment, we treat the EL2-disabled case as taking 5408 * priority over the HXEn-disabled case. This is true for the only 5409 * bit for a feature which we implement where the answer is different 5410 * for the two cases (MSCEn for FEAT_MOPS). 5411 * This may need to be revisited for future bits. 5412 */ 5413 if (!arm_is_el2_enabled(env)) { 5414 uint64_t hcrx = 0; 5415 if (cpu_isar_feature(aa64_mops, env_archcpu(env))) { 5416 /* MSCEn behaves as 1 if EL2 is not enabled */ 5417 hcrx |= HCRX_MSCEN; 5418 } 5419 return hcrx; 5420 } 5421 if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) { 5422 return 0; 5423 } 5424 return env->cp15.hcrx_el2; 5425 } 5426 5427 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 5428 uint64_t value) 5429 { 5430 /* 5431 * For A-profile AArch32 EL3, if NSACR.CP10 5432 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5433 */ 5434 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5435 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5436 uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK; 5437 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask); 5438 } 5439 env->cp15.cptr_el[2] = value; 5440 } 5441 5442 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) 5443 { 5444 /* 5445 * For A-profile AArch32 EL3, if NSACR.CP10 5446 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5447 */ 5448 uint64_t value = env->cp15.cptr_el[2]; 5449 5450 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5451 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5452 value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK; 5453 } 5454 return value; 5455 } 5456 5457 static const ARMCPRegInfo el2_cp_reginfo[] = { 5458 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 5459 .type = ARM_CP_IO, 5460 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5461 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5462 .nv2_redirect_offset = 0x78, 5463 .writefn = hcr_write, .raw_writefn = raw_write }, 5464 { .name = "HCR", .state = ARM_CP_STATE_AA32, 5465 .type = ARM_CP_ALIAS | ARM_CP_IO, 5466 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5467 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5468 .writefn = hcr_writelow }, 5469 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5470 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5471 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5472 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 5473 .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT, 5474 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 5475 .access = PL2_RW, 5476 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 5477 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5478 .type = ARM_CP_NV2_REDIRECT, 5479 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5480 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 5481 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5482 .type = ARM_CP_NV2_REDIRECT, 5483 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5484 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 5485 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5486 .type = ARM_CP_ALIAS, 5487 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5488 .access = PL2_RW, 5489 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 5490 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 5491 .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT, 5492 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 5493 .access = PL2_RW, 5494 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 5495 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5496 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5497 .access = PL2_RW, .writefn = vbar_write, 5498 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 5499 .resetvalue = 0 }, 5500 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 5501 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 5502 .access = PL3_RW, .type = ARM_CP_ALIAS, 5503 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 5504 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5505 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5506 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 5507 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), 5508 .readfn = cptr_el2_read, .writefn = cptr_el2_write }, 5509 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5510 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5511 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 5512 .resetvalue = 0 }, 5513 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5514 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5515 .access = PL2_RW, .type = ARM_CP_ALIAS, 5516 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 5517 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5518 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5519 .access = PL2_RW, .type = ARM_CP_CONST, 5520 .resetvalue = 0 }, 5521 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 5522 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5523 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5524 .access = PL2_RW, .type = ARM_CP_CONST, 5525 .resetvalue = 0 }, 5526 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5527 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5528 .access = PL2_RW, .type = ARM_CP_CONST, 5529 .resetvalue = 0 }, 5530 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5531 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5532 .access = PL2_RW, .type = ARM_CP_CONST, 5533 .resetvalue = 0 }, 5534 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5535 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5536 .access = PL2_RW, .writefn = vmsa_tcr_el12_write, 5537 .raw_writefn = raw_write, 5538 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 5539 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 5540 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5541 .type = ARM_CP_ALIAS, 5542 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5543 .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) }, 5544 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 5545 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5546 .access = PL2_RW, 5547 .nv2_redirect_offset = 0x40, 5548 /* no .writefn needed as this can't cause an ASID change */ 5549 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5550 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5551 .cp = 15, .opc1 = 6, .crm = 2, 5552 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5553 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5554 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 5555 .writefn = vttbr_write, .raw_writefn = raw_write }, 5556 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5557 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5558 .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write, 5559 .nv2_redirect_offset = 0x20, 5560 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 5561 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5562 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5563 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 5564 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 5565 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5566 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5567 .access = PL2_RW, .resetvalue = 0, 5568 .nv2_redirect_offset = 0x90, 5569 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 5570 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5571 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5572 .access = PL2_RW, .resetvalue = 0, 5573 .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write, 5574 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5575 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5576 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5577 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5578 #ifndef CONFIG_USER_ONLY 5579 /* 5580 * Unlike the other EL2-related AT operations, these must 5581 * UNDEF from EL3 if EL2 is not implemented, which is why we 5582 * define them here rather than with the rest of the AT ops. 5583 */ 5584 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 5585 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5586 .access = PL2_W, .accessfn = at_s1e2_access, 5587 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, 5588 .writefn = ats_write64 }, 5589 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 5590 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5591 .access = PL2_W, .accessfn = at_s1e2_access, 5592 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, 5593 .writefn = ats_write64 }, 5594 /* 5595 * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 5596 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 5597 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 5598 * to behave as if SCR.NS was 1. 5599 */ 5600 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5601 .access = PL2_W, 5602 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5603 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5604 .access = PL2_W, 5605 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5606 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5607 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5608 /* 5609 * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 5610 * reset values as IMPDEF. We choose to reset to 3 to comply with 5611 * both ARMv7 and ARMv8. 5612 */ 5613 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3, 5614 .writefn = gt_cnthctl_write, .raw_writefn = raw_write, 5615 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 5616 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5617 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5618 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 5619 .writefn = gt_cntvoff_write, 5620 .nv2_redirect_offset = 0x60, 5621 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5622 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5623 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 5624 .writefn = gt_cntvoff_write, 5625 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5626 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5627 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5628 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5629 .type = ARM_CP_IO, .access = PL2_RW, 5630 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5631 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5632 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5633 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 5634 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5635 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5636 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5637 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 5638 .resetfn = gt_hyp_timer_reset, 5639 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 5640 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5641 .type = ARM_CP_IO, 5642 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5643 .access = PL2_RW, 5644 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 5645 .resetvalue = 0, 5646 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 5647 #endif 5648 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 5649 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5650 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5651 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5652 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 5653 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5654 .access = PL2_RW, 5655 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5656 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5657 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5658 .access = PL2_RW, 5659 .nv2_redirect_offset = 0x80, 5660 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 5661 }; 5662 5663 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 5664 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5665 .type = ARM_CP_ALIAS | ARM_CP_IO, 5666 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5667 .access = PL2_RW, 5668 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 5669 .writefn = hcr_writehigh }, 5670 }; 5671 5672 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri, 5673 bool isread) 5674 { 5675 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) { 5676 return CP_ACCESS_OK; 5677 } 5678 return CP_ACCESS_UNDEFINED; 5679 } 5680 5681 static const ARMCPRegInfo el2_sec_cp_reginfo[] = { 5682 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64, 5683 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0, 5684 .access = PL2_RW, .accessfn = sel2_access, 5685 .nv2_redirect_offset = 0x30, 5686 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) }, 5687 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64, 5688 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2, 5689 .access = PL2_RW, .accessfn = sel2_access, 5690 .nv2_redirect_offset = 0x48, 5691 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) }, 5692 }; 5693 5694 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 5695 bool isread) 5696 { 5697 /* 5698 * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 5699 * At Secure EL1 it traps to EL3 or EL2. 5700 */ 5701 if (arm_current_el(env) == 3) { 5702 return CP_ACCESS_OK; 5703 } 5704 if (arm_is_secure_below_el3(env)) { 5705 if (env->cp15.scr_el3 & SCR_EEL2) { 5706 return CP_ACCESS_TRAP_EL2; 5707 } 5708 return CP_ACCESS_TRAP_EL3; 5709 } 5710 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 5711 if (isread) { 5712 return CP_ACCESS_OK; 5713 } 5714 return CP_ACCESS_UNDEFINED; 5715 } 5716 5717 static const ARMCPRegInfo el3_cp_reginfo[] = { 5718 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 5719 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 5720 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 5721 .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write }, 5722 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, 5723 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 5724 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5725 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 5726 .writefn = scr_write, .raw_writefn = raw_write }, 5727 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 5728 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 5729 .access = PL3_RW, .resetvalue = 0, 5730 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 5731 { .name = "SDER", 5732 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 5733 .access = PL3_RW, .resetvalue = 0, 5734 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 5735 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5736 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5737 .writefn = vbar_write, .resetvalue = 0, 5738 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 5739 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 5740 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 5741 .access = PL3_RW, .resetvalue = 0, 5742 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 5743 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 5744 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 5745 .access = PL3_RW, 5746 /* no .writefn needed as this can't cause an ASID change */ 5747 .resetvalue = 0, 5748 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 5749 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 5750 .type = ARM_CP_ALIAS, 5751 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 5752 .access = PL3_RW, 5753 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 5754 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 5755 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 5756 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 5757 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 5758 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 5759 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 5760 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 5761 .type = ARM_CP_ALIAS, 5762 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 5763 .access = PL3_RW, 5764 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 5765 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 5766 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 5767 .access = PL3_RW, .writefn = vbar_write, 5768 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 5769 .resetvalue = 0 }, 5770 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 5771 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 5772 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 5773 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 5774 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 5775 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 5776 .access = PL3_RW, .resetvalue = 0, 5777 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 5778 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 5779 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 5780 .access = PL3_RW, .type = ARM_CP_CONST, 5781 .resetvalue = 0 }, 5782 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 5783 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 5784 .access = PL3_RW, .type = ARM_CP_CONST, 5785 .resetvalue = 0 }, 5786 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 5787 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 5788 .access = PL3_RW, .type = ARM_CP_CONST, 5789 .resetvalue = 0 }, 5790 }; 5791 5792 #ifndef CONFIG_USER_ONLY 5793 5794 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, 5795 bool isread) 5796 { 5797 if (arm_current_el(env) == 1) { 5798 /* This must be a FEAT_NV access */ 5799 return CP_ACCESS_OK; 5800 } 5801 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { 5802 return CP_ACCESS_UNDEFINED; 5803 } 5804 return CP_ACCESS_OK; 5805 } 5806 5807 static CPAccessResult access_el1nvpct(CPUARMState *env, const ARMCPRegInfo *ri, 5808 bool isread) 5809 { 5810 if (arm_current_el(env) == 1) { 5811 /* This must be a FEAT_NV access with NVx == 101 */ 5812 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) { 5813 return CP_ACCESS_TRAP_EL2; 5814 } 5815 } 5816 return e2h_access(env, ri, isread); 5817 } 5818 5819 static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri, 5820 bool isread) 5821 { 5822 if (arm_current_el(env) == 1) { 5823 /* This must be a FEAT_NV access with NVx == 101 */ 5824 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) { 5825 return CP_ACCESS_TRAP_EL2; 5826 } 5827 } 5828 return e2h_access(env, ri, isread); 5829 } 5830 5831 /* Test if system register redirection is to occur in the current state. */ 5832 static bool redirect_for_e2h(CPUARMState *env) 5833 { 5834 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H); 5835 } 5836 5837 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri) 5838 { 5839 CPReadFn *readfn; 5840 5841 if (redirect_for_e2h(env)) { 5842 /* Switch to the saved EL2 version of the register. */ 5843 ri = ri->opaque; 5844 readfn = ri->readfn; 5845 } else { 5846 readfn = ri->orig_readfn; 5847 } 5848 if (readfn == NULL) { 5849 readfn = raw_read; 5850 } 5851 return readfn(env, ri); 5852 } 5853 5854 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, 5855 uint64_t value) 5856 { 5857 CPWriteFn *writefn; 5858 5859 if (redirect_for_e2h(env)) { 5860 /* Switch to the saved EL2 version of the register. */ 5861 ri = ri->opaque; 5862 writefn = ri->writefn; 5863 } else { 5864 writefn = ri->orig_writefn; 5865 } 5866 if (writefn == NULL) { 5867 writefn = raw_write; 5868 } 5869 writefn(env, ri, value); 5870 } 5871 5872 static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri) 5873 { 5874 /* Pass the EL1 register accessor its ri, not the EL12 alias ri */ 5875 return ri->orig_readfn(env, ri->opaque); 5876 } 5877 5878 static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri, 5879 uint64_t value) 5880 { 5881 /* Pass the EL1 register accessor its ri, not the EL12 alias ri */ 5882 return ri->orig_writefn(env, ri->opaque, value); 5883 } 5884 5885 static CPAccessResult el2_e2h_e12_access(CPUARMState *env, 5886 const ARMCPRegInfo *ri, 5887 bool isread) 5888 { 5889 if (arm_current_el(env) == 1) { 5890 /* 5891 * This must be a FEAT_NV access (will either trap or redirect 5892 * to memory). None of the registers with _EL12 aliases want to 5893 * apply their trap controls for this kind of access, so don't 5894 * call the orig_accessfn or do the "UNDEF when E2H is 0" check. 5895 */ 5896 return CP_ACCESS_OK; 5897 } 5898 /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */ 5899 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { 5900 return CP_ACCESS_UNDEFINED; 5901 } 5902 if (ri->orig_accessfn) { 5903 return ri->orig_accessfn(env, ri->opaque, isread); 5904 } 5905 return CP_ACCESS_OK; 5906 } 5907 5908 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) 5909 { 5910 struct E2HAlias { 5911 uint32_t src_key, dst_key, new_key; 5912 const char *src_name, *dst_name, *new_name; 5913 bool (*feature)(const ARMISARegisters *id); 5914 }; 5915 5916 #define K(op0, op1, crn, crm, op2) \ 5917 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) 5918 5919 static const struct E2HAlias aliases[] = { 5920 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), 5921 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, 5922 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), 5923 "CPACR", "CPTR_EL2", "CPACR_EL12" }, 5924 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), 5925 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, 5926 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), 5927 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, 5928 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), 5929 "TCR_EL1", "TCR_EL2", "TCR_EL12" }, 5930 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), 5931 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, 5932 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), 5933 "ELR_EL1", "ELR_EL2", "ELR_EL12" }, 5934 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), 5935 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, 5936 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), 5937 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, 5938 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), 5939 "ESR_EL1", "ESR_EL2", "ESR_EL12" }, 5940 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), 5941 "FAR_EL1", "FAR_EL2", "FAR_EL12" }, 5942 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), 5943 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, 5944 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), 5945 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, 5946 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), 5947 "VBAR", "VBAR_EL2", "VBAR_EL12" }, 5948 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), 5949 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, 5950 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), 5951 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, 5952 5953 /* 5954 * Note that redirection of ZCR is mentioned in the description 5955 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but 5956 * not in the summary table. 5957 */ 5958 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), 5959 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, 5960 { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6), 5961 "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme }, 5962 5963 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0), 5964 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte }, 5965 5966 { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7), 5967 "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12", 5968 isar_feature_aa64_scxtnum }, 5969 5970 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ 5971 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ 5972 }; 5973 #undef K 5974 5975 size_t i; 5976 5977 for (i = 0; i < ARRAY_SIZE(aliases); i++) { 5978 const struct E2HAlias *a = &aliases[i]; 5979 ARMCPRegInfo *src_reg, *dst_reg, *new_reg; 5980 bool ok; 5981 5982 if (a->feature && !a->feature(&cpu->isar)) { 5983 continue; 5984 } 5985 5986 src_reg = g_hash_table_lookup(cpu->cp_regs, 5987 (gpointer)(uintptr_t)a->src_key); 5988 dst_reg = g_hash_table_lookup(cpu->cp_regs, 5989 (gpointer)(uintptr_t)a->dst_key); 5990 g_assert(src_reg != NULL); 5991 g_assert(dst_reg != NULL); 5992 5993 /* Cross-compare names to detect typos in the keys. */ 5994 g_assert(strcmp(src_reg->name, a->src_name) == 0); 5995 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); 5996 5997 /* None of the core system registers use opaque; we will. */ 5998 g_assert(src_reg->opaque == NULL); 5999 6000 /* Create alias before redirection so we dup the right data. */ 6001 new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); 6002 6003 new_reg->name = a->new_name; 6004 new_reg->type |= ARM_CP_ALIAS; 6005 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ 6006 new_reg->access &= PL2_RW | PL3_RW; 6007 /* The new_reg op fields are as per new_key, not the target reg */ 6008 new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK) 6009 >> CP_REG_ARM64_SYSREG_CRN_SHIFT; 6010 new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK) 6011 >> CP_REG_ARM64_SYSREG_CRM_SHIFT; 6012 new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK) 6013 >> CP_REG_ARM64_SYSREG_OP0_SHIFT; 6014 new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK) 6015 >> CP_REG_ARM64_SYSREG_OP1_SHIFT; 6016 new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK) 6017 >> CP_REG_ARM64_SYSREG_OP2_SHIFT; 6018 new_reg->opaque = src_reg; 6019 new_reg->orig_readfn = src_reg->readfn ?: raw_read; 6020 new_reg->orig_writefn = src_reg->writefn ?: raw_write; 6021 new_reg->orig_accessfn = src_reg->accessfn; 6022 if (!new_reg->raw_readfn) { 6023 new_reg->raw_readfn = raw_read; 6024 } 6025 if (!new_reg->raw_writefn) { 6026 new_reg->raw_writefn = raw_write; 6027 } 6028 new_reg->readfn = el2_e2h_e12_read; 6029 new_reg->writefn = el2_e2h_e12_write; 6030 new_reg->accessfn = el2_e2h_e12_access; 6031 6032 /* 6033 * If the _EL1 register is redirected to memory by FEAT_NV2, 6034 * then it shares the offset with the _EL12 register, 6035 * and which one is redirected depends on HCR_EL2.NV1. 6036 */ 6037 if (new_reg->nv2_redirect_offset) { 6038 assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1); 6039 new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1; 6040 new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1; 6041 } 6042 6043 ok = g_hash_table_insert(cpu->cp_regs, 6044 (gpointer)(uintptr_t)a->new_key, new_reg); 6045 g_assert(ok); 6046 6047 src_reg->opaque = dst_reg; 6048 src_reg->orig_readfn = src_reg->readfn ?: raw_read; 6049 src_reg->orig_writefn = src_reg->writefn ?: raw_write; 6050 if (!src_reg->raw_readfn) { 6051 src_reg->raw_readfn = raw_read; 6052 } 6053 if (!src_reg->raw_writefn) { 6054 src_reg->raw_writefn = raw_write; 6055 } 6056 src_reg->readfn = el2_e2h_read; 6057 src_reg->writefn = el2_e2h_write; 6058 } 6059 } 6060 #endif 6061 6062 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 6063 bool isread) 6064 { 6065 int cur_el = arm_current_el(env); 6066 6067 if (cur_el < 2) { 6068 uint64_t hcr = arm_hcr_el2_eff(env); 6069 6070 if (cur_el == 0) { 6071 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 6072 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { 6073 return CP_ACCESS_TRAP_EL2; 6074 } 6075 } else { 6076 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 6077 return CP_ACCESS_TRAP_EL1; 6078 } 6079 if (hcr & HCR_TID2) { 6080 return CP_ACCESS_TRAP_EL2; 6081 } 6082 } 6083 } else if (hcr & HCR_TID2) { 6084 return CP_ACCESS_TRAP_EL2; 6085 } 6086 } 6087 6088 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { 6089 return CP_ACCESS_TRAP_EL2; 6090 } 6091 6092 return CP_ACCESS_OK; 6093 } 6094 6095 /* 6096 * Check for traps to RAS registers, which are controlled 6097 * by HCR_EL2.TERR and SCR_EL3.TERR. 6098 */ 6099 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri, 6100 bool isread) 6101 { 6102 int el = arm_current_el(env); 6103 6104 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) { 6105 return CP_ACCESS_TRAP_EL2; 6106 } 6107 if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) { 6108 return CP_ACCESS_TRAP_EL3; 6109 } 6110 return CP_ACCESS_OK; 6111 } 6112 6113 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri) 6114 { 6115 int el = arm_current_el(env); 6116 6117 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) { 6118 return env->cp15.vdisr_el2; 6119 } 6120 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { 6121 return 0; /* RAZ/WI */ 6122 } 6123 return env->cp15.disr_el1; 6124 } 6125 6126 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 6127 { 6128 int el = arm_current_el(env); 6129 6130 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) { 6131 env->cp15.vdisr_el2 = val; 6132 return; 6133 } 6134 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { 6135 return; /* RAZ/WI */ 6136 } 6137 env->cp15.disr_el1 = val; 6138 } 6139 6140 /* 6141 * Minimal RAS implementation with no Error Records. 6142 * Which means that all of the Error Record registers: 6143 * ERXADDR_EL1 6144 * ERXCTLR_EL1 6145 * ERXFR_EL1 6146 * ERXMISC0_EL1 6147 * ERXMISC1_EL1 6148 * ERXMISC2_EL1 6149 * ERXMISC3_EL1 6150 * ERXPFGCDN_EL1 (RASv1p1) 6151 * ERXPFGCTL_EL1 (RASv1p1) 6152 * ERXPFGF_EL1 (RASv1p1) 6153 * ERXSTATUS_EL1 6154 * and 6155 * ERRSELR_EL1 6156 * may generate UNDEFINED, which is the effect we get by not 6157 * listing them at all. 6158 * 6159 * These registers have fine-grained trap bits, but UNDEF-to-EL1 6160 * is higher priority than FGT-to-EL2 so we do not need to list them 6161 * in order to check for an FGT. 6162 */ 6163 static const ARMCPRegInfo minimal_ras_reginfo[] = { 6164 { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH, 6165 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1, 6166 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1), 6167 .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write }, 6168 { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH, 6169 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0, 6170 .access = PL1_R, .accessfn = access_terr, 6171 .fgt = FGT_ERRIDR_EL1, 6172 .type = ARM_CP_CONST, .resetvalue = 0 }, 6173 { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH, 6174 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1, 6175 .nv2_redirect_offset = 0x500, 6176 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) }, 6177 { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH, 6178 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3, 6179 .nv2_redirect_offset = 0x508, 6180 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) }, 6181 }; 6182 6183 /* 6184 * Return the exception level to which exceptions should be taken 6185 * via SVEAccessTrap. This excludes the check for whether the exception 6186 * should be routed through AArch64.AdvSIMDFPAccessTrap. That can easily 6187 * be found by testing 0 < fp_exception_el < sve_exception_el. 6188 * 6189 * C.f. the ARM pseudocode function CheckSVEEnabled. Note that the 6190 * pseudocode does *not* separate out the FP trap checks, but has them 6191 * all in one function. 6192 */ 6193 int sve_exception_el(CPUARMState *env, int el) 6194 { 6195 #ifndef CONFIG_USER_ONLY 6196 if (el <= 1 && !el_is_in_host(env, el)) { 6197 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) { 6198 case 1: 6199 if (el != 0) { 6200 break; 6201 } 6202 /* fall through */ 6203 case 0: 6204 case 2: 6205 return 1; 6206 } 6207 } 6208 6209 if (el <= 2 && arm_is_el2_enabled(env)) { 6210 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */ 6211 if (env->cp15.hcr_el2 & HCR_E2H) { 6212 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) { 6213 case 1: 6214 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { 6215 break; 6216 } 6217 /* fall through */ 6218 case 0: 6219 case 2: 6220 return 2; 6221 } 6222 } else { 6223 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) { 6224 return 2; 6225 } 6226 } 6227 } 6228 6229 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 6230 if (arm_feature(env, ARM_FEATURE_EL3) 6231 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) { 6232 return 3; 6233 } 6234 #endif 6235 return 0; 6236 } 6237 6238 /* 6239 * Return the exception level to which exceptions should be taken for SME. 6240 * C.f. the ARM pseudocode function CheckSMEAccess. 6241 */ 6242 int sme_exception_el(CPUARMState *env, int el) 6243 { 6244 #ifndef CONFIG_USER_ONLY 6245 if (el <= 1 && !el_is_in_host(env, el)) { 6246 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) { 6247 case 1: 6248 if (el != 0) { 6249 break; 6250 } 6251 /* fall through */ 6252 case 0: 6253 case 2: 6254 return 1; 6255 } 6256 } 6257 6258 if (el <= 2 && arm_is_el2_enabled(env)) { 6259 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */ 6260 if (env->cp15.hcr_el2 & HCR_E2H) { 6261 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) { 6262 case 1: 6263 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { 6264 break; 6265 } 6266 /* fall through */ 6267 case 0: 6268 case 2: 6269 return 2; 6270 } 6271 } else { 6272 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) { 6273 return 2; 6274 } 6275 } 6276 } 6277 6278 /* CPTR_EL3. Since ESM is negative we must check for EL3. */ 6279 if (arm_feature(env, ARM_FEATURE_EL3) 6280 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { 6281 return 3; 6282 } 6283 #endif 6284 return 0; 6285 } 6286 6287 /* 6288 * Given that SVE is enabled, return the vector length for EL. 6289 */ 6290 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm) 6291 { 6292 ARMCPU *cpu = env_archcpu(env); 6293 uint64_t *cr = env->vfp.zcr_el; 6294 uint32_t map = cpu->sve_vq.map; 6295 uint32_t len = ARM_MAX_VQ - 1; 6296 6297 if (sm) { 6298 cr = env->vfp.smcr_el; 6299 map = cpu->sme_vq.map; 6300 } 6301 6302 if (el <= 1 && !el_is_in_host(env, el)) { 6303 len = MIN(len, 0xf & (uint32_t)cr[1]); 6304 } 6305 if (el <= 2 && arm_is_el2_enabled(env)) { 6306 len = MIN(len, 0xf & (uint32_t)cr[2]); 6307 } 6308 if (arm_feature(env, ARM_FEATURE_EL3)) { 6309 len = MIN(len, 0xf & (uint32_t)cr[3]); 6310 } 6311 6312 map &= MAKE_64BIT_MASK(0, len + 1); 6313 if (map != 0) { 6314 return 31 - clz32(map); 6315 } 6316 6317 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */ 6318 assert(sm); 6319 return ctz32(cpu->sme_vq.map); 6320 } 6321 6322 uint32_t sve_vqm1_for_el(CPUARMState *env, int el) 6323 { 6324 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM)); 6325 } 6326 6327 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6328 uint64_t value) 6329 { 6330 int cur_el = arm_current_el(env); 6331 int old_len = sve_vqm1_for_el(env, cur_el); 6332 int new_len; 6333 6334 /* Bits other than [3:0] are RAZ/WI. */ 6335 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); 6336 raw_write(env, ri, value & 0xf); 6337 6338 /* 6339 * Because we arrived here, we know both FP and SVE are enabled; 6340 * otherwise we would have trapped access to the ZCR_ELn register. 6341 */ 6342 new_len = sve_vqm1_for_el(env, cur_el); 6343 if (new_len < old_len) { 6344 aarch64_sve_narrow_vq(env, new_len + 1); 6345 } 6346 } 6347 6348 static const ARMCPRegInfo zcr_reginfo[] = { 6349 { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 6350 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 6351 .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1, 6352 .access = PL1_RW, .type = ARM_CP_SVE, 6353 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 6354 .writefn = zcr_write, .raw_writefn = raw_write }, 6355 { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6356 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6357 .access = PL2_RW, .type = ARM_CP_SVE, 6358 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 6359 .writefn = zcr_write, .raw_writefn = raw_write }, 6360 { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 6361 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 6362 .access = PL3_RW, .type = ARM_CP_SVE, 6363 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 6364 .writefn = zcr_write, .raw_writefn = raw_write }, 6365 }; 6366 6367 #ifdef TARGET_AARCH64 6368 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri, 6369 bool isread) 6370 { 6371 int el = arm_current_el(env); 6372 6373 if (el == 0) { 6374 uint64_t sctlr = arm_sctlr(env, el); 6375 if (!(sctlr & SCTLR_EnTP2)) { 6376 return CP_ACCESS_TRAP_EL1; 6377 } 6378 } 6379 /* TODO: FEAT_FGT */ 6380 if (el < 3 6381 && arm_feature(env, ARM_FEATURE_EL3) 6382 && !(env->cp15.scr_el3 & SCR_ENTP2)) { 6383 return CP_ACCESS_TRAP_EL3; 6384 } 6385 return CP_ACCESS_OK; 6386 } 6387 6388 static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri, 6389 bool isread) 6390 { 6391 /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */ 6392 if (arm_current_el(env) == 2 6393 && arm_feature(env, ARM_FEATURE_EL3) 6394 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { 6395 return CP_ACCESS_TRAP_EL3; 6396 } 6397 return CP_ACCESS_OK; 6398 } 6399 6400 static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri, 6401 bool isread) 6402 { 6403 if (arm_current_el(env) < 3 6404 && arm_feature(env, ARM_FEATURE_EL3) 6405 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { 6406 return CP_ACCESS_TRAP_EL3; 6407 } 6408 return CP_ACCESS_OK; 6409 } 6410 6411 /* ResetSVEState */ 6412 static void arm_reset_sve_state(CPUARMState *env) 6413 { 6414 memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs)); 6415 /* Recall that FFR is stored as pregs[16]. */ 6416 memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs)); 6417 vfp_set_fpsr(env, 0x0800009f); 6418 } 6419 6420 void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask) 6421 { 6422 uint64_t change = (env->svcr ^ new) & mask; 6423 6424 if (change == 0) { 6425 return; 6426 } 6427 env->svcr ^= change; 6428 6429 if (change & R_SVCR_SM_MASK) { 6430 arm_reset_sve_state(env); 6431 } 6432 6433 /* 6434 * ResetSMEState. 6435 * 6436 * SetPSTATE_ZA zeros on enable and disable. We can zero this only 6437 * on enable: while disabled, the storage is inaccessible and the 6438 * value does not matter. We're not saving the storage in vmstate 6439 * when disabled either. 6440 */ 6441 if (change & new & R_SVCR_ZA_MASK) { 6442 memset(env->zarray, 0, sizeof(env->zarray)); 6443 } 6444 6445 if (tcg_enabled()) { 6446 arm_rebuild_hflags(env); 6447 } 6448 } 6449 6450 static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6451 uint64_t value) 6452 { 6453 aarch64_set_svcr(env, value, -1); 6454 } 6455 6456 static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6457 uint64_t value) 6458 { 6459 int cur_el = arm_current_el(env); 6460 int old_len = sve_vqm1_for_el(env, cur_el); 6461 int new_len; 6462 6463 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1); 6464 value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK; 6465 raw_write(env, ri, value); 6466 6467 /* 6468 * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage 6469 * when SVL is widened (old values kept, or zeros). Choose to keep the 6470 * current values for simplicity. But for QEMU internals, we must still 6471 * apply the narrower SVL to the Zregs and Pregs -- see the comment 6472 * above aarch64_sve_narrow_vq. 6473 */ 6474 new_len = sve_vqm1_for_el(env, cur_el); 6475 if (new_len < old_len) { 6476 aarch64_sve_narrow_vq(env, new_len + 1); 6477 } 6478 } 6479 6480 static const ARMCPRegInfo sme_reginfo[] = { 6481 { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64, 6482 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5, 6483 .access = PL0_RW, .accessfn = access_tpidr2, 6484 .fgt = FGT_NTPIDR2_EL0, 6485 .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) }, 6486 { .name = "SVCR", .state = ARM_CP_STATE_AA64, 6487 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2, 6488 .access = PL0_RW, .type = ARM_CP_SME, 6489 .fieldoffset = offsetof(CPUARMState, svcr), 6490 .writefn = svcr_write, .raw_writefn = raw_write }, 6491 { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64, 6492 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6, 6493 .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1, 6494 .access = PL1_RW, .type = ARM_CP_SME, 6495 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]), 6496 .writefn = smcr_write, .raw_writefn = raw_write }, 6497 { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64, 6498 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6, 6499 .access = PL2_RW, .type = ARM_CP_SME, 6500 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]), 6501 .writefn = smcr_write, .raw_writefn = raw_write }, 6502 { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64, 6503 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6, 6504 .access = PL3_RW, .type = ARM_CP_SME, 6505 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]), 6506 .writefn = smcr_write, .raw_writefn = raw_write }, 6507 { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64, 6508 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6, 6509 .access = PL1_R, .accessfn = access_aa64_tid1, 6510 /* 6511 * IMPLEMENTOR = 0 (software) 6512 * REVISION = 0 (implementation defined) 6513 * SMPS = 0 (no streaming execution priority in QEMU) 6514 * AFFINITY = 0 (streaming sve mode not shared with other PEs) 6515 */ 6516 .type = ARM_CP_CONST, .resetvalue = 0, }, 6517 /* 6518 * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0. 6519 */ 6520 { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64, 6521 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4, 6522 .access = PL1_RW, .accessfn = access_smpri, 6523 .fgt = FGT_NSMPRI_EL1, 6524 .type = ARM_CP_CONST, .resetvalue = 0 }, 6525 { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64, 6526 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5, 6527 .nv2_redirect_offset = 0x1f8, 6528 .access = PL2_RW, .accessfn = access_smprimap, 6529 .type = ARM_CP_CONST, .resetvalue = 0 }, 6530 }; 6531 6532 static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6533 uint64_t value) 6534 { 6535 /* L0GPTSZ is RO; other bits not mentioned are RES0. */ 6536 uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK | 6537 R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK | 6538 R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK; 6539 6540 env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask); 6541 } 6542 6543 static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 6544 { 6545 env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ, 6546 env_archcpu(env)->reset_l0gptsz); 6547 } 6548 6549 static const ARMCPRegInfo rme_reginfo[] = { 6550 { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64, 6551 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6, 6552 .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset, 6553 .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) }, 6554 { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64, 6555 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4, 6556 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) }, 6557 { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64, 6558 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5, 6559 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) }, 6560 { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64, 6561 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1, 6562 .access = PL3_W, .type = ARM_CP_NOP }, 6563 }; 6564 6565 static const ARMCPRegInfo rme_mte_reginfo[] = { 6566 { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64, 6567 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5, 6568 .access = PL3_W, .type = ARM_CP_NOP }, 6569 }; 6570 6571 static void aa64_allint_write(CPUARMState *env, const ARMCPRegInfo *ri, 6572 uint64_t value) 6573 { 6574 env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT); 6575 } 6576 6577 static uint64_t aa64_allint_read(CPUARMState *env, const ARMCPRegInfo *ri) 6578 { 6579 return env->pstate & PSTATE_ALLINT; 6580 } 6581 6582 static CPAccessResult aa64_allint_access(CPUARMState *env, 6583 const ARMCPRegInfo *ri, bool isread) 6584 { 6585 if (!isread && arm_current_el(env) == 1 && 6586 (arm_hcrx_el2_eff(env) & HCRX_TALLINT)) { 6587 return CP_ACCESS_TRAP_EL2; 6588 } 6589 return CP_ACCESS_OK; 6590 } 6591 6592 static const ARMCPRegInfo nmi_reginfo[] = { 6593 { .name = "ALLINT", .state = ARM_CP_STATE_AA64, 6594 .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 4, .crm = 3, 6595 .type = ARM_CP_NO_RAW, 6596 .access = PL1_RW, .accessfn = aa64_allint_access, 6597 .fieldoffset = offsetof(CPUARMState, pstate), 6598 .writefn = aa64_allint_write, .readfn = aa64_allint_read, 6599 .resetfn = arm_cp_reset_ignore }, 6600 }; 6601 #endif /* TARGET_AARCH64 */ 6602 6603 static void define_pmu_regs(ARMCPU *cpu) 6604 { 6605 /* 6606 * v7 performance monitor control register: same implementor 6607 * field as main ID register, and we implement four counters in 6608 * addition to the cycle count register. 6609 */ 6610 unsigned int i, pmcrn = pmu_num_counters(&cpu->env); 6611 ARMCPRegInfo pmcr = { 6612 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 6613 .access = PL0_RW, 6614 .fgt = FGT_PMCR_EL0, 6615 .type = ARM_CP_IO | ARM_CP_ALIAS, 6616 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 6617 .accessfn = pmreg_access, 6618 .readfn = pmcr_read, .raw_readfn = raw_read, 6619 .writefn = pmcr_write, .raw_writefn = raw_write, 6620 }; 6621 ARMCPRegInfo pmcr64 = { 6622 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 6623 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 6624 .access = PL0_RW, .accessfn = pmreg_access, 6625 .fgt = FGT_PMCR_EL0, 6626 .type = ARM_CP_IO, 6627 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 6628 .resetvalue = cpu->isar.reset_pmcr_el0, 6629 .readfn = pmcr_read, .raw_readfn = raw_read, 6630 .writefn = pmcr_write, .raw_writefn = raw_write, 6631 }; 6632 6633 define_one_arm_cp_reg(cpu, &pmcr); 6634 define_one_arm_cp_reg(cpu, &pmcr64); 6635 for (i = 0; i < pmcrn; i++) { 6636 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); 6637 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); 6638 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); 6639 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); 6640 ARMCPRegInfo pmev_regs[] = { 6641 { .name = pmevcntr_name, .cp = 15, .crn = 14, 6642 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6643 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6644 .fgt = FGT_PMEVCNTRN_EL0, 6645 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6646 .accessfn = pmreg_access_xevcntr }, 6647 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, 6648 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), 6649 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr, 6650 .type = ARM_CP_IO, 6651 .fgt = FGT_PMEVCNTRN_EL0, 6652 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6653 .raw_readfn = pmevcntr_rawread, 6654 .raw_writefn = pmevcntr_rawwrite }, 6655 { .name = pmevtyper_name, .cp = 15, .crn = 14, 6656 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6657 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6658 .fgt = FGT_PMEVTYPERN_EL0, 6659 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6660 .accessfn = pmreg_access }, 6661 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, 6662 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), 6663 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6664 .fgt = FGT_PMEVTYPERN_EL0, 6665 .type = ARM_CP_IO, 6666 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6667 .raw_writefn = pmevtyper_rawwrite }, 6668 }; 6669 define_arm_cp_regs(cpu, pmev_regs); 6670 g_free(pmevcntr_name); 6671 g_free(pmevcntr_el0_name); 6672 g_free(pmevtyper_name); 6673 g_free(pmevtyper_el0_name); 6674 } 6675 if (cpu_isar_feature(aa32_pmuv3p1, cpu)) { 6676 ARMCPRegInfo v81_pmu_regs[] = { 6677 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, 6678 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, 6679 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6680 .fgt = FGT_PMCEIDN_EL0, 6681 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, 6682 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, 6683 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, 6684 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6685 .fgt = FGT_PMCEIDN_EL0, 6686 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, 6687 }; 6688 define_arm_cp_regs(cpu, v81_pmu_regs); 6689 } 6690 if (cpu_isar_feature(any_pmuv3p4, cpu)) { 6691 static const ARMCPRegInfo v84_pmmir = { 6692 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, 6693 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, 6694 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6695 .fgt = FGT_PMMIR_EL1, 6696 .resetvalue = 0 6697 }; 6698 define_one_arm_cp_reg(cpu, &v84_pmmir); 6699 } 6700 } 6701 6702 #ifndef CONFIG_USER_ONLY 6703 /* 6704 * We don't know until after realize whether there's a GICv3 6705 * attached, and that is what registers the gicv3 sysregs. 6706 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 6707 * at runtime. 6708 */ 6709 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 6710 { 6711 ARMCPU *cpu = env_archcpu(env); 6712 uint64_t pfr1 = cpu->isar.id_pfr1; 6713 6714 if (env->gicv3state) { 6715 pfr1 |= 1 << 28; 6716 } 6717 return pfr1; 6718 } 6719 6720 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 6721 { 6722 ARMCPU *cpu = env_archcpu(env); 6723 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 6724 6725 if (env->gicv3state) { 6726 pfr0 |= 1 << 24; 6727 } 6728 return pfr0; 6729 } 6730 #endif 6731 6732 /* 6733 * Shared logic between LORID and the rest of the LOR* registers. 6734 * Secure state exclusion has already been dealt with. 6735 */ 6736 static CPAccessResult access_lor_ns(CPUARMState *env, 6737 const ARMCPRegInfo *ri, bool isread) 6738 { 6739 int el = arm_current_el(env); 6740 6741 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { 6742 return CP_ACCESS_TRAP_EL2; 6743 } 6744 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { 6745 return CP_ACCESS_TRAP_EL3; 6746 } 6747 return CP_ACCESS_OK; 6748 } 6749 6750 static CPAccessResult access_lor_other(CPUARMState *env, 6751 const ARMCPRegInfo *ri, bool isread) 6752 { 6753 if (arm_is_secure_below_el3(env)) { 6754 /* UNDEF if SCR_EL3.NS == 0 */ 6755 return CP_ACCESS_UNDEFINED; 6756 } 6757 return access_lor_ns(env, ri, isread); 6758 } 6759 6760 /* 6761 * A trivial implementation of ARMv8.1-LOR leaves all of these 6762 * registers fixed at 0, which indicates that there are zero 6763 * supported Limited Ordering regions. 6764 */ 6765 static const ARMCPRegInfo lor_reginfo[] = { 6766 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, 6767 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, 6768 .access = PL1_RW, .accessfn = access_lor_other, 6769 .fgt = FGT_LORSA_EL1, 6770 .type = ARM_CP_CONST, .resetvalue = 0 }, 6771 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, 6772 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, 6773 .access = PL1_RW, .accessfn = access_lor_other, 6774 .fgt = FGT_LOREA_EL1, 6775 .type = ARM_CP_CONST, .resetvalue = 0 }, 6776 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, 6777 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, 6778 .access = PL1_RW, .accessfn = access_lor_other, 6779 .fgt = FGT_LORN_EL1, 6780 .type = ARM_CP_CONST, .resetvalue = 0 }, 6781 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, 6782 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, 6783 .access = PL1_RW, .accessfn = access_lor_other, 6784 .fgt = FGT_LORC_EL1, 6785 .type = ARM_CP_CONST, .resetvalue = 0 }, 6786 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, 6787 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, 6788 .access = PL1_R, .accessfn = access_lor_ns, 6789 .fgt = FGT_LORID_EL1, 6790 .type = ARM_CP_CONST, .resetvalue = 0 }, 6791 }; 6792 6793 #ifdef TARGET_AARCH64 6794 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, 6795 bool isread) 6796 { 6797 int el = arm_current_el(env); 6798 6799 if (el < 2 && 6800 arm_is_el2_enabled(env) && 6801 !(arm_hcr_el2_eff(env) & HCR_APK)) { 6802 return CP_ACCESS_TRAP_EL2; 6803 } 6804 if (el < 3 && 6805 arm_feature(env, ARM_FEATURE_EL3) && 6806 !(env->cp15.scr_el3 & SCR_APK)) { 6807 return CP_ACCESS_TRAP_EL3; 6808 } 6809 return CP_ACCESS_OK; 6810 } 6811 6812 static const ARMCPRegInfo pauth_reginfo[] = { 6813 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6814 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, 6815 .access = PL1_RW, .accessfn = access_pauth, 6816 .fgt = FGT_APDAKEY, 6817 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, 6818 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6819 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, 6820 .access = PL1_RW, .accessfn = access_pauth, 6821 .fgt = FGT_APDAKEY, 6822 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, 6823 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6824 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, 6825 .access = PL1_RW, .accessfn = access_pauth, 6826 .fgt = FGT_APDBKEY, 6827 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, 6828 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6829 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, 6830 .access = PL1_RW, .accessfn = access_pauth, 6831 .fgt = FGT_APDBKEY, 6832 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, 6833 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6834 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, 6835 .access = PL1_RW, .accessfn = access_pauth, 6836 .fgt = FGT_APGAKEY, 6837 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, 6838 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6839 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, 6840 .access = PL1_RW, .accessfn = access_pauth, 6841 .fgt = FGT_APGAKEY, 6842 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, 6843 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6844 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, 6845 .access = PL1_RW, .accessfn = access_pauth, 6846 .fgt = FGT_APIAKEY, 6847 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, 6848 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6849 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, 6850 .access = PL1_RW, .accessfn = access_pauth, 6851 .fgt = FGT_APIAKEY, 6852 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, 6853 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6854 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, 6855 .access = PL1_RW, .accessfn = access_pauth, 6856 .fgt = FGT_APIBKEY, 6857 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, 6858 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6859 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, 6860 .access = PL1_RW, .accessfn = access_pauth, 6861 .fgt = FGT_APIBKEY, 6862 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, 6863 }; 6864 6865 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 6866 { 6867 Error *err = NULL; 6868 uint64_t ret; 6869 6870 /* Success sets NZCV = 0000. */ 6871 env->NF = env->CF = env->VF = 0, env->ZF = 1; 6872 6873 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) { 6874 /* 6875 * ??? Failed, for unknown reasons in the crypto subsystem. 6876 * The best we can do is log the reason and return the 6877 * timed-out indication to the guest. There is no reason 6878 * we know to expect this failure to be transitory, so the 6879 * guest may well hang retrying the operation. 6880 */ 6881 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 6882 ri->name, error_get_pretty(err)); 6883 error_free(err); 6884 6885 env->ZF = 0; /* NZCF = 0100 */ 6886 return 0; 6887 } 6888 return ret; 6889 } 6890 6891 /* We do not support re-seeding, so the two registers operate the same. */ 6892 static const ARMCPRegInfo rndr_reginfo[] = { 6893 { .name = "RNDR", .state = ARM_CP_STATE_AA64, 6894 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6895 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, 6896 .access = PL0_R, .readfn = rndr_readfn }, 6897 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, 6898 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6899 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, 6900 .access = PL0_R, .readfn = rndr_readfn }, 6901 }; 6902 6903 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, 6904 uint64_t value) 6905 { 6906 #ifdef CONFIG_TCG 6907 ARMCPU *cpu = env_archcpu(env); 6908 /* CTR_EL0 System register -> DminLine, bits [19:16] */ 6909 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); 6910 uint64_t vaddr_in = (uint64_t) value; 6911 uint64_t vaddr = vaddr_in & ~(dline_size - 1); 6912 void *haddr; 6913 int mem_idx = arm_env_mmu_index(env); 6914 6915 /* This won't be crossing page boundaries */ 6916 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); 6917 if (haddr) { 6918 #ifndef CONFIG_USER_ONLY 6919 6920 ram_addr_t offset; 6921 MemoryRegion *mr; 6922 6923 /* RCU lock is already being held */ 6924 mr = memory_region_from_host(haddr, &offset); 6925 6926 if (mr) { 6927 memory_region_writeback(mr, offset, dline_size); 6928 } 6929 #endif /*CONFIG_USER_ONLY*/ 6930 } 6931 #else 6932 /* Handled by hardware accelerator. */ 6933 g_assert_not_reached(); 6934 #endif /* CONFIG_TCG */ 6935 } 6936 6937 static const ARMCPRegInfo dcpop_reg[] = { 6938 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, 6939 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, 6940 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6941 .fgt = FGT_DCCVAP, 6942 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 6943 }; 6944 6945 static const ARMCPRegInfo dcpodp_reg[] = { 6946 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, 6947 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, 6948 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6949 .fgt = FGT_DCCVADP, 6950 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 6951 }; 6952 6953 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri, 6954 bool isread) 6955 { 6956 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) { 6957 return CP_ACCESS_TRAP_EL2; 6958 } 6959 6960 return CP_ACCESS_OK; 6961 } 6962 6963 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri, 6964 bool isread) 6965 { 6966 int el = arm_current_el(env); 6967 if (el < 2 && arm_is_el2_enabled(env)) { 6968 uint64_t hcr = arm_hcr_el2_eff(env); 6969 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 6970 return CP_ACCESS_TRAP_EL2; 6971 } 6972 } 6973 if (el < 3 && 6974 arm_feature(env, ARM_FEATURE_EL3) && 6975 !(env->cp15.scr_el3 & SCR_ATA)) { 6976 return CP_ACCESS_TRAP_EL3; 6977 } 6978 return CP_ACCESS_OK; 6979 } 6980 6981 static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri, 6982 bool isread) 6983 { 6984 CPAccessResult nv1 = access_nv1(env, ri, isread); 6985 6986 if (nv1 != CP_ACCESS_OK) { 6987 return nv1; 6988 } 6989 return access_mte(env, ri, isread); 6990 } 6991 6992 static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri, 6993 bool isread) 6994 { 6995 /* 6996 * TFSR_EL2: similar to generic access_mte(), but we need to 6997 * account for FEAT_NV. At EL1 this must be a FEAT_NV access; 6998 * if NV2 is enabled then we will redirect this to TFSR_EL1 6999 * after doing the HCR and SCR ATA traps; otherwise this will 7000 * be a trap to EL2 and the HCR/SCR traps do not apply. 7001 */ 7002 int el = arm_current_el(env); 7003 7004 if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) { 7005 return CP_ACCESS_OK; 7006 } 7007 if (el < 2 && arm_is_el2_enabled(env)) { 7008 uint64_t hcr = arm_hcr_el2_eff(env); 7009 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 7010 return CP_ACCESS_TRAP_EL2; 7011 } 7012 } 7013 if (el < 3 && 7014 arm_feature(env, ARM_FEATURE_EL3) && 7015 !(env->cp15.scr_el3 & SCR_ATA)) { 7016 return CP_ACCESS_TRAP_EL3; 7017 } 7018 return CP_ACCESS_OK; 7019 } 7020 7021 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri) 7022 { 7023 return env->pstate & PSTATE_TCO; 7024 } 7025 7026 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 7027 { 7028 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO); 7029 } 7030 7031 static const ARMCPRegInfo mte_reginfo[] = { 7032 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64, 7033 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1, 7034 .access = PL1_RW, .accessfn = access_mte, 7035 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) }, 7036 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64, 7037 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0, 7038 .access = PL1_RW, .accessfn = access_tfsr_el1, 7039 .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1, 7040 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) }, 7041 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64, 7042 .type = ARM_CP_NV2_REDIRECT, 7043 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0, 7044 .access = PL2_RW, .accessfn = access_tfsr_el2, 7045 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) }, 7046 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64, 7047 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0, 7048 .access = PL3_RW, 7049 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) }, 7050 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64, 7051 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5, 7052 .access = PL1_RW, .accessfn = access_mte, 7053 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) }, 7054 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64, 7055 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6, 7056 .access = PL1_RW, .accessfn = access_mte, 7057 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) }, 7058 { .name = "TCO", .state = ARM_CP_STATE_AA64, 7059 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, 7060 .type = ARM_CP_NO_RAW, 7061 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write }, 7062 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64, 7063 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3, 7064 .type = ARM_CP_NOP, .access = PL1_W, 7065 .fgt = FGT_DCIVAC, 7066 .accessfn = aa64_cacheop_poc_access }, 7067 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64, 7068 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4, 7069 .fgt = FGT_DCISW, 7070 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7071 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64, 7072 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5, 7073 .type = ARM_CP_NOP, .access = PL1_W, 7074 .fgt = FGT_DCIVAC, 7075 .accessfn = aa64_cacheop_poc_access }, 7076 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64, 7077 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6, 7078 .fgt = FGT_DCISW, 7079 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7080 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64, 7081 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4, 7082 .fgt = FGT_DCCSW, 7083 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7084 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64, 7085 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6, 7086 .fgt = FGT_DCCSW, 7087 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7088 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64, 7089 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4, 7090 .fgt = FGT_DCCISW, 7091 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7092 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64, 7093 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6, 7094 .fgt = FGT_DCCISW, 7095 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7096 }; 7097 7098 static const ARMCPRegInfo mte_tco_ro_reginfo[] = { 7099 { .name = "TCO", .state = ARM_CP_STATE_AA64, 7100 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, 7101 .type = ARM_CP_CONST, .access = PL0_RW, }, 7102 }; 7103 7104 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = { 7105 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64, 7106 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3, 7107 .type = ARM_CP_NOP, .access = PL0_W, 7108 .fgt = FGT_DCCVAC, 7109 .accessfn = aa64_cacheop_poc_access }, 7110 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64, 7111 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5, 7112 .type = ARM_CP_NOP, .access = PL0_W, 7113 .fgt = FGT_DCCVAC, 7114 .accessfn = aa64_cacheop_poc_access }, 7115 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64, 7116 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3, 7117 .type = ARM_CP_NOP, .access = PL0_W, 7118 .fgt = FGT_DCCVAP, 7119 .accessfn = aa64_cacheop_poc_access }, 7120 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64, 7121 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5, 7122 .type = ARM_CP_NOP, .access = PL0_W, 7123 .fgt = FGT_DCCVAP, 7124 .accessfn = aa64_cacheop_poc_access }, 7125 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64, 7126 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3, 7127 .type = ARM_CP_NOP, .access = PL0_W, 7128 .fgt = FGT_DCCVADP, 7129 .accessfn = aa64_cacheop_poc_access }, 7130 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64, 7131 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5, 7132 .type = ARM_CP_NOP, .access = PL0_W, 7133 .fgt = FGT_DCCVADP, 7134 .accessfn = aa64_cacheop_poc_access }, 7135 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64, 7136 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3, 7137 .type = ARM_CP_NOP, .access = PL0_W, 7138 .fgt = FGT_DCCIVAC, 7139 .accessfn = aa64_cacheop_poc_access }, 7140 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64, 7141 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5, 7142 .type = ARM_CP_NOP, .access = PL0_W, 7143 .fgt = FGT_DCCIVAC, 7144 .accessfn = aa64_cacheop_poc_access }, 7145 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64, 7146 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3, 7147 .access = PL0_W, .type = ARM_CP_DC_GVA, 7148 #ifndef CONFIG_USER_ONLY 7149 /* Avoid overhead of an access check that always passes in user-mode */ 7150 .accessfn = aa64_zva_access, 7151 .fgt = FGT_DCZVA, 7152 #endif 7153 }, 7154 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64, 7155 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4, 7156 .access = PL0_W, .type = ARM_CP_DC_GZVA, 7157 #ifndef CONFIG_USER_ONLY 7158 /* Avoid overhead of an access check that always passes in user-mode */ 7159 .accessfn = aa64_zva_access, 7160 .fgt = FGT_DCZVA, 7161 #endif 7162 }, 7163 }; 7164 7165 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri, 7166 bool isread) 7167 { 7168 uint64_t hcr = arm_hcr_el2_eff(env); 7169 int el = arm_current_el(env); 7170 7171 if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) { 7172 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) { 7173 if (hcr & HCR_TGE) { 7174 return CP_ACCESS_TRAP_EL2; 7175 } 7176 return CP_ACCESS_TRAP_EL1; 7177 } 7178 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) { 7179 return CP_ACCESS_TRAP_EL2; 7180 } 7181 if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) { 7182 return CP_ACCESS_TRAP_EL2; 7183 } 7184 if (el < 3 7185 && arm_feature(env, ARM_FEATURE_EL3) 7186 && !(env->cp15.scr_el3 & SCR_ENSCXT)) { 7187 return CP_ACCESS_TRAP_EL3; 7188 } 7189 return CP_ACCESS_OK; 7190 } 7191 7192 static CPAccessResult access_scxtnum_el1(CPUARMState *env, 7193 const ARMCPRegInfo *ri, 7194 bool isread) 7195 { 7196 CPAccessResult nv1 = access_nv1(env, ri, isread); 7197 7198 if (nv1 != CP_ACCESS_OK) { 7199 return nv1; 7200 } 7201 return access_scxtnum(env, ri, isread); 7202 } 7203 7204 static const ARMCPRegInfo scxtnum_reginfo[] = { 7205 { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64, 7206 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7, 7207 .access = PL0_RW, .accessfn = access_scxtnum, 7208 .fgt = FGT_SCXTNUM_EL0, 7209 .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) }, 7210 { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64, 7211 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7, 7212 .access = PL1_RW, .accessfn = access_scxtnum_el1, 7213 .fgt = FGT_SCXTNUM_EL1, 7214 .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1, 7215 .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) }, 7216 { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64, 7217 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7, 7218 .access = PL2_RW, .accessfn = access_scxtnum, 7219 .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) }, 7220 { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64, 7221 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7, 7222 .access = PL3_RW, 7223 .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) }, 7224 }; 7225 7226 static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri, 7227 bool isread) 7228 { 7229 if (arm_current_el(env) == 2 && 7230 arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) { 7231 return CP_ACCESS_TRAP_EL3; 7232 } 7233 return CP_ACCESS_OK; 7234 } 7235 7236 static const ARMCPRegInfo fgt_reginfo[] = { 7237 { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64, 7238 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 7239 .nv2_redirect_offset = 0x1b8, 7240 .access = PL2_RW, .accessfn = access_fgt, 7241 .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) }, 7242 { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64, 7243 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5, 7244 .nv2_redirect_offset = 0x1c0, 7245 .access = PL2_RW, .accessfn = access_fgt, 7246 .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) }, 7247 { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64, 7248 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4, 7249 .nv2_redirect_offset = 0x1d0, 7250 .access = PL2_RW, .accessfn = access_fgt, 7251 .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) }, 7252 { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64, 7253 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5, 7254 .nv2_redirect_offset = 0x1d8, 7255 .access = PL2_RW, .accessfn = access_fgt, 7256 .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) }, 7257 { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64, 7258 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6, 7259 .nv2_redirect_offset = 0x1c8, 7260 .access = PL2_RW, .accessfn = access_fgt, 7261 .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) }, 7262 }; 7263 7264 static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri, 7265 uint64_t value) 7266 { 7267 /* 7268 * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee 7269 * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything 7270 * about the RESS bits at the top -- we choose the "generate an EL2 7271 * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let 7272 * the ptw.c code detect the resulting invalid address). 7273 */ 7274 env->cp15.vncr_el2 = value & ~0xfffULL; 7275 } 7276 7277 static const ARMCPRegInfo nv2_reginfo[] = { 7278 { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64, 7279 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0, 7280 .access = PL2_RW, 7281 .writefn = vncr_write, 7282 .nv2_redirect_offset = 0xb0, 7283 .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) }, 7284 }; 7285 7286 #endif /* TARGET_AARCH64 */ 7287 7288 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, 7289 bool isread) 7290 { 7291 int el = arm_current_el(env); 7292 7293 if (el == 0) { 7294 uint64_t sctlr = arm_sctlr(env, el); 7295 if (!(sctlr & SCTLR_EnRCTX)) { 7296 return CP_ACCESS_TRAP_EL1; 7297 } 7298 } else if (el == 1) { 7299 uint64_t hcr = arm_hcr_el2_eff(env); 7300 if (hcr & HCR_NV) { 7301 return CP_ACCESS_TRAP_EL2; 7302 } 7303 } 7304 return CP_ACCESS_OK; 7305 } 7306 7307 static const ARMCPRegInfo predinv_reginfo[] = { 7308 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, 7309 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, 7310 .fgt = FGT_CFPRCTX, 7311 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7312 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, 7313 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, 7314 .fgt = FGT_DVPRCTX, 7315 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7316 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, 7317 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, 7318 .fgt = FGT_CPPRCTX, 7319 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7320 /* 7321 * Note the AArch32 opcodes have a different OPC1. 7322 */ 7323 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, 7324 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, 7325 .fgt = FGT_CFPRCTX, 7326 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7327 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, 7328 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, 7329 .fgt = FGT_DVPRCTX, 7330 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7331 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, 7332 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, 7333 .fgt = FGT_CPPRCTX, 7334 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7335 }; 7336 7337 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri) 7338 { 7339 /* Read the high 32 bits of the current CCSIDR */ 7340 return extract64(ccsidr_read(env, ri), 32, 32); 7341 } 7342 7343 static const ARMCPRegInfo ccsidr2_reginfo[] = { 7344 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH, 7345 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2, 7346 .access = PL1_R, 7347 .accessfn = access_tid4, 7348 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, 7349 }; 7350 7351 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 7352 bool isread) 7353 { 7354 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { 7355 return CP_ACCESS_TRAP_EL2; 7356 } 7357 7358 return CP_ACCESS_OK; 7359 } 7360 7361 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 7362 bool isread) 7363 { 7364 if (arm_feature(env, ARM_FEATURE_V8)) { 7365 return access_aa64_tid3(env, ri, isread); 7366 } 7367 7368 return CP_ACCESS_OK; 7369 } 7370 7371 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, 7372 bool isread) 7373 { 7374 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { 7375 return CP_ACCESS_TRAP_EL2; 7376 } 7377 7378 return CP_ACCESS_OK; 7379 } 7380 7381 static CPAccessResult access_joscr_jmcr(CPUARMState *env, 7382 const ARMCPRegInfo *ri, bool isread) 7383 { 7384 /* 7385 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only 7386 * in v7A, not in v8A. 7387 */ 7388 if (!arm_feature(env, ARM_FEATURE_V8) && 7389 arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && 7390 (env->cp15.hstr_el2 & HSTR_TJDBX)) { 7391 return CP_ACCESS_TRAP_EL2; 7392 } 7393 return CP_ACCESS_OK; 7394 } 7395 7396 static const ARMCPRegInfo jazelle_regs[] = { 7397 { .name = "JIDR", 7398 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, 7399 .access = PL1_R, .accessfn = access_jazelle, 7400 .type = ARM_CP_CONST, .resetvalue = 0 }, 7401 { .name = "JOSCR", 7402 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, 7403 .accessfn = access_joscr_jmcr, 7404 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 7405 { .name = "JMCR", 7406 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, 7407 .accessfn = access_joscr_jmcr, 7408 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 7409 }; 7410 7411 static const ARMCPRegInfo contextidr_el2 = { 7412 .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, 7413 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, 7414 .access = PL2_RW, 7415 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) 7416 }; 7417 7418 static const ARMCPRegInfo vhe_reginfo[] = { 7419 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, 7420 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, 7421 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, 7422 .raw_writefn = raw_write, 7423 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, 7424 #ifndef CONFIG_USER_ONLY 7425 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, 7426 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, 7427 .fieldoffset = 7428 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), 7429 .type = ARM_CP_IO, .access = PL2_RW, 7430 .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, 7431 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 7432 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, 7433 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 7434 .resetfn = gt_hv_timer_reset, 7435 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, 7436 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, 7437 .type = ARM_CP_IO, 7438 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, 7439 .access = PL2_RW, 7440 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), 7441 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, 7442 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, 7443 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, 7444 .type = ARM_CP_IO | ARM_CP_ALIAS, 7445 .access = PL2_RW, .accessfn = access_el1nvpct, 7446 .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1, 7447 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 7448 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, 7449 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, 7450 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, 7451 .type = ARM_CP_IO | ARM_CP_ALIAS, 7452 .access = PL2_RW, .accessfn = access_el1nvvct, 7453 .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1, 7454 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 7455 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, 7456 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, 7457 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, 7458 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 7459 .access = PL2_RW, .accessfn = e2h_access, 7460 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, 7461 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, 7462 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, 7463 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 7464 .access = PL2_RW, .accessfn = e2h_access, 7465 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, 7466 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, 7467 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, 7468 .type = ARM_CP_IO | ARM_CP_ALIAS, 7469 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 7470 .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1, 7471 .access = PL2_RW, .accessfn = access_el1nvpct, 7472 .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, 7473 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, 7474 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, 7475 .type = ARM_CP_IO | ARM_CP_ALIAS, 7476 .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1, 7477 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 7478 .access = PL2_RW, .accessfn = access_el1nvvct, 7479 .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, 7480 #endif 7481 }; 7482 7483 #ifndef CONFIG_USER_ONLY 7484 static const ARMCPRegInfo ats1e1_reginfo[] = { 7485 { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64, 7486 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 7487 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7488 .fgt = FGT_ATS1E1RP, 7489 .accessfn = at_s1e01_access, .writefn = ats_write64 }, 7490 { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64, 7491 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 7492 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7493 .fgt = FGT_ATS1E1WP, 7494 .accessfn = at_s1e01_access, .writefn = ats_write64 }, 7495 }; 7496 7497 static const ARMCPRegInfo ats1cp_reginfo[] = { 7498 { .name = "ATS1CPRP", 7499 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 7500 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7501 .writefn = ats_write }, 7502 { .name = "ATS1CPWP", 7503 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 7504 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7505 .writefn = ats_write }, 7506 }; 7507 #endif 7508 7509 /* 7510 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and 7511 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field 7512 * is non-zero, which is never for ARMv7, optionally in ARMv8 7513 * and mandatorily for ARMv8.2 and up. 7514 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's 7515 * implementation is RAZ/WI we can ignore this detail, as we 7516 * do for ACTLR. 7517 */ 7518 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { 7519 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32, 7520 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3, 7521 .access = PL1_RW, .accessfn = access_tacr, 7522 .type = ARM_CP_CONST, .resetvalue = 0 }, 7523 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 7524 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 7525 .access = PL2_RW, .type = ARM_CP_CONST, 7526 .resetvalue = 0 }, 7527 }; 7528 7529 void register_cp_regs_for_features(ARMCPU *cpu) 7530 { 7531 /* Register all the coprocessor registers based on feature bits */ 7532 CPUARMState *env = &cpu->env; 7533 if (arm_feature(env, ARM_FEATURE_M)) { 7534 /* M profile has no coprocessor registers */ 7535 return; 7536 } 7537 7538 define_arm_cp_regs(cpu, cp_reginfo); 7539 if (!arm_feature(env, ARM_FEATURE_V8)) { 7540 /* 7541 * Must go early as it is full of wildcards that may be 7542 * overridden by later definitions. 7543 */ 7544 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 7545 } 7546 7547 define_tlb_insn_regs(cpu); 7548 7549 if (arm_feature(env, ARM_FEATURE_V6)) { 7550 /* The ID registers all have impdef reset values */ 7551 ARMCPRegInfo v6_idregs[] = { 7552 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 7553 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 7554 .access = PL1_R, .type = ARM_CP_CONST, 7555 .accessfn = access_aa32_tid3, 7556 .resetvalue = cpu->isar.id_pfr0 }, 7557 /* 7558 * ID_PFR1 is not a plain ARM_CP_CONST because we don't know 7559 * the value of the GIC field until after we define these regs. 7560 */ 7561 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 7562 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 7563 .access = PL1_R, .type = ARM_CP_NO_RAW, 7564 .accessfn = access_aa32_tid3, 7565 #ifdef CONFIG_USER_ONLY 7566 .type = ARM_CP_CONST, 7567 .resetvalue = cpu->isar.id_pfr1, 7568 #else 7569 .type = ARM_CP_NO_RAW, 7570 .accessfn = access_aa32_tid3, 7571 .readfn = id_pfr1_read, 7572 .writefn = arm_cp_write_ignore 7573 #endif 7574 }, 7575 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 7576 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 7577 .access = PL1_R, .type = ARM_CP_CONST, 7578 .accessfn = access_aa32_tid3, 7579 .resetvalue = cpu->isar.id_dfr0 }, 7580 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 7581 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 7582 .access = PL1_R, .type = ARM_CP_CONST, 7583 .accessfn = access_aa32_tid3, 7584 .resetvalue = cpu->id_afr0 }, 7585 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 7586 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 7587 .access = PL1_R, .type = ARM_CP_CONST, 7588 .accessfn = access_aa32_tid3, 7589 .resetvalue = cpu->isar.id_mmfr0 }, 7590 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 7591 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 7592 .access = PL1_R, .type = ARM_CP_CONST, 7593 .accessfn = access_aa32_tid3, 7594 .resetvalue = cpu->isar.id_mmfr1 }, 7595 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 7596 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 7597 .access = PL1_R, .type = ARM_CP_CONST, 7598 .accessfn = access_aa32_tid3, 7599 .resetvalue = cpu->isar.id_mmfr2 }, 7600 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 7601 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 7602 .access = PL1_R, .type = ARM_CP_CONST, 7603 .accessfn = access_aa32_tid3, 7604 .resetvalue = cpu->isar.id_mmfr3 }, 7605 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 7606 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 7607 .access = PL1_R, .type = ARM_CP_CONST, 7608 .accessfn = access_aa32_tid3, 7609 .resetvalue = cpu->isar.id_isar0 }, 7610 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 7611 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 7612 .access = PL1_R, .type = ARM_CP_CONST, 7613 .accessfn = access_aa32_tid3, 7614 .resetvalue = cpu->isar.id_isar1 }, 7615 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 7616 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 7617 .access = PL1_R, .type = ARM_CP_CONST, 7618 .accessfn = access_aa32_tid3, 7619 .resetvalue = cpu->isar.id_isar2 }, 7620 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 7621 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 7622 .access = PL1_R, .type = ARM_CP_CONST, 7623 .accessfn = access_aa32_tid3, 7624 .resetvalue = cpu->isar.id_isar3 }, 7625 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 7626 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 7627 .access = PL1_R, .type = ARM_CP_CONST, 7628 .accessfn = access_aa32_tid3, 7629 .resetvalue = cpu->isar.id_isar4 }, 7630 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 7631 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 7632 .access = PL1_R, .type = ARM_CP_CONST, 7633 .accessfn = access_aa32_tid3, 7634 .resetvalue = cpu->isar.id_isar5 }, 7635 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 7636 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 7637 .access = PL1_R, .type = ARM_CP_CONST, 7638 .accessfn = access_aa32_tid3, 7639 .resetvalue = cpu->isar.id_mmfr4 }, 7640 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 7641 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 7642 .access = PL1_R, .type = ARM_CP_CONST, 7643 .accessfn = access_aa32_tid3, 7644 .resetvalue = cpu->isar.id_isar6 }, 7645 }; 7646 define_arm_cp_regs(cpu, v6_idregs); 7647 define_arm_cp_regs(cpu, v6_cp_reginfo); 7648 } else { 7649 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 7650 } 7651 if (arm_feature(env, ARM_FEATURE_V6K)) { 7652 define_arm_cp_regs(cpu, v6k_cp_reginfo); 7653 } 7654 if (arm_feature(env, ARM_FEATURE_V7VE)) { 7655 define_arm_cp_regs(cpu, pmovsset_cp_reginfo); 7656 } 7657 if (arm_feature(env, ARM_FEATURE_V7)) { 7658 ARMCPRegInfo clidr = { 7659 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 7660 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 7661 .access = PL1_R, .type = ARM_CP_CONST, 7662 .accessfn = access_tid4, 7663 .fgt = FGT_CLIDR_EL1, 7664 .resetvalue = cpu->clidr 7665 }; 7666 define_one_arm_cp_reg(cpu, &clidr); 7667 define_arm_cp_regs(cpu, v7_cp_reginfo); 7668 define_debug_regs(cpu); 7669 define_pmu_regs(cpu); 7670 } else { 7671 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 7672 } 7673 if (arm_feature(env, ARM_FEATURE_V8)) { 7674 /* 7675 * v8 ID registers, which all have impdef reset values. 7676 * Note that within the ID register ranges the unused slots 7677 * must all RAZ, not UNDEF; future architecture versions may 7678 * define new registers here. 7679 * ID registers which are AArch64 views of the AArch32 ID registers 7680 * which already existed in v6 and v7 are handled elsewhere, 7681 * in v6_idregs[]. 7682 */ 7683 int i; 7684 ARMCPRegInfo v8_idregs[] = { 7685 /* 7686 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system 7687 * emulation because we don't know the right value for the 7688 * GIC field until after we define these regs. 7689 */ 7690 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 7691 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 7692 .access = PL1_R, 7693 #ifdef CONFIG_USER_ONLY 7694 .type = ARM_CP_CONST, 7695 .resetvalue = cpu->isar.id_aa64pfr0 7696 #else 7697 .type = ARM_CP_NO_RAW, 7698 .accessfn = access_aa64_tid3, 7699 .readfn = id_aa64pfr0_read, 7700 .writefn = arm_cp_write_ignore 7701 #endif 7702 }, 7703 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 7704 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 7705 .access = PL1_R, .type = ARM_CP_CONST, 7706 .accessfn = access_aa64_tid3, 7707 .resetvalue = cpu->isar.id_aa64pfr1}, 7708 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7709 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 7710 .access = PL1_R, .type = ARM_CP_CONST, 7711 .accessfn = access_aa64_tid3, 7712 .resetvalue = 0 }, 7713 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7714 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 7715 .access = PL1_R, .type = ARM_CP_CONST, 7716 .accessfn = access_aa64_tid3, 7717 .resetvalue = 0 }, 7718 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 7719 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 7720 .access = PL1_R, .type = ARM_CP_CONST, 7721 .accessfn = access_aa64_tid3, 7722 .resetvalue = cpu->isar.id_aa64zfr0 }, 7723 { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64, 7724 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 7725 .access = PL1_R, .type = ARM_CP_CONST, 7726 .accessfn = access_aa64_tid3, 7727 .resetvalue = cpu->isar.id_aa64smfr0 }, 7728 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7729 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 7730 .access = PL1_R, .type = ARM_CP_CONST, 7731 .accessfn = access_aa64_tid3, 7732 .resetvalue = 0 }, 7733 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7734 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 7735 .access = PL1_R, .type = ARM_CP_CONST, 7736 .accessfn = access_aa64_tid3, 7737 .resetvalue = 0 }, 7738 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 7739 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 7740 .access = PL1_R, .type = ARM_CP_CONST, 7741 .accessfn = access_aa64_tid3, 7742 .resetvalue = cpu->isar.id_aa64dfr0 }, 7743 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 7744 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 7745 .access = PL1_R, .type = ARM_CP_CONST, 7746 .accessfn = access_aa64_tid3, 7747 .resetvalue = cpu->isar.id_aa64dfr1 }, 7748 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7749 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 7750 .access = PL1_R, .type = ARM_CP_CONST, 7751 .accessfn = access_aa64_tid3, 7752 .resetvalue = 0 }, 7753 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7754 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 7755 .access = PL1_R, .type = ARM_CP_CONST, 7756 .accessfn = access_aa64_tid3, 7757 .resetvalue = 0 }, 7758 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 7759 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 7760 .access = PL1_R, .type = ARM_CP_CONST, 7761 .accessfn = access_aa64_tid3, 7762 .resetvalue = cpu->id_aa64afr0 }, 7763 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 7764 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 7765 .access = PL1_R, .type = ARM_CP_CONST, 7766 .accessfn = access_aa64_tid3, 7767 .resetvalue = cpu->id_aa64afr1 }, 7768 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7769 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 7770 .access = PL1_R, .type = ARM_CP_CONST, 7771 .accessfn = access_aa64_tid3, 7772 .resetvalue = 0 }, 7773 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7774 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 7775 .access = PL1_R, .type = ARM_CP_CONST, 7776 .accessfn = access_aa64_tid3, 7777 .resetvalue = 0 }, 7778 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 7779 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 7780 .access = PL1_R, .type = ARM_CP_CONST, 7781 .accessfn = access_aa64_tid3, 7782 .resetvalue = cpu->isar.id_aa64isar0 }, 7783 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 7784 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 7785 .access = PL1_R, .type = ARM_CP_CONST, 7786 .accessfn = access_aa64_tid3, 7787 .resetvalue = cpu->isar.id_aa64isar1 }, 7788 { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64, 7789 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 7790 .access = PL1_R, .type = ARM_CP_CONST, 7791 .accessfn = access_aa64_tid3, 7792 .resetvalue = cpu->isar.id_aa64isar2 }, 7793 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7794 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 7795 .access = PL1_R, .type = ARM_CP_CONST, 7796 .accessfn = access_aa64_tid3, 7797 .resetvalue = 0 }, 7798 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7799 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 7800 .access = PL1_R, .type = ARM_CP_CONST, 7801 .accessfn = access_aa64_tid3, 7802 .resetvalue = 0 }, 7803 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7804 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 7805 .access = PL1_R, .type = ARM_CP_CONST, 7806 .accessfn = access_aa64_tid3, 7807 .resetvalue = 0 }, 7808 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7809 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 7810 .access = PL1_R, .type = ARM_CP_CONST, 7811 .accessfn = access_aa64_tid3, 7812 .resetvalue = 0 }, 7813 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7814 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 7815 .access = PL1_R, .type = ARM_CP_CONST, 7816 .accessfn = access_aa64_tid3, 7817 .resetvalue = 0 }, 7818 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 7819 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 7820 .access = PL1_R, .type = ARM_CP_CONST, 7821 .accessfn = access_aa64_tid3, 7822 .resetvalue = cpu->isar.id_aa64mmfr0 }, 7823 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 7824 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 7825 .access = PL1_R, .type = ARM_CP_CONST, 7826 .accessfn = access_aa64_tid3, 7827 .resetvalue = cpu->isar.id_aa64mmfr1 }, 7828 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, 7829 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 7830 .access = PL1_R, .type = ARM_CP_CONST, 7831 .accessfn = access_aa64_tid3, 7832 .resetvalue = cpu->isar.id_aa64mmfr2 }, 7833 { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64, 7834 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 7835 .access = PL1_R, .type = ARM_CP_CONST, 7836 .accessfn = access_aa64_tid3, 7837 .resetvalue = cpu->isar.id_aa64mmfr3 }, 7838 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7839 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 7840 .access = PL1_R, .type = ARM_CP_CONST, 7841 .accessfn = access_aa64_tid3, 7842 .resetvalue = 0 }, 7843 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7844 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 7845 .access = PL1_R, .type = ARM_CP_CONST, 7846 .accessfn = access_aa64_tid3, 7847 .resetvalue = 0 }, 7848 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7849 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 7850 .access = PL1_R, .type = ARM_CP_CONST, 7851 .accessfn = access_aa64_tid3, 7852 .resetvalue = 0 }, 7853 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7854 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 7855 .access = PL1_R, .type = ARM_CP_CONST, 7856 .accessfn = access_aa64_tid3, 7857 .resetvalue = 0 }, 7858 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 7859 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 7860 .access = PL1_R, .type = ARM_CP_CONST, 7861 .accessfn = access_aa64_tid3, 7862 .resetvalue = cpu->isar.mvfr0 }, 7863 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 7864 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 7865 .access = PL1_R, .type = ARM_CP_CONST, 7866 .accessfn = access_aa64_tid3, 7867 .resetvalue = cpu->isar.mvfr1 }, 7868 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 7869 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 7870 .access = PL1_R, .type = ARM_CP_CONST, 7871 .accessfn = access_aa64_tid3, 7872 .resetvalue = cpu->isar.mvfr2 }, 7873 /* 7874 * "0, c0, c3, {0,1,2}" are the encodings corresponding to 7875 * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding 7876 * as RAZ, since it is in the "reserved for future ID 7877 * registers, RAZ" part of the AArch32 encoding space. 7878 */ 7879 { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32, 7880 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 7881 .access = PL1_R, .type = ARM_CP_CONST, 7882 .accessfn = access_aa64_tid3, 7883 .resetvalue = 0 }, 7884 { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32, 7885 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 7886 .access = PL1_R, .type = ARM_CP_CONST, 7887 .accessfn = access_aa64_tid3, 7888 .resetvalue = 0 }, 7889 { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32, 7890 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 7891 .access = PL1_R, .type = ARM_CP_CONST, 7892 .accessfn = access_aa64_tid3, 7893 .resetvalue = 0 }, 7894 /* 7895 * Other encodings in "0, c0, c3, ..." are STATE_BOTH because 7896 * they're also RAZ for AArch64, and in v8 are gradually 7897 * being filled with AArch64-view-of-AArch32-ID-register 7898 * for new ID registers. 7899 */ 7900 { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH, 7901 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 7902 .access = PL1_R, .type = ARM_CP_CONST, 7903 .accessfn = access_aa64_tid3, 7904 .resetvalue = 0 }, 7905 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH, 7906 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 7907 .access = PL1_R, .type = ARM_CP_CONST, 7908 .accessfn = access_aa64_tid3, 7909 .resetvalue = cpu->isar.id_pfr2 }, 7910 { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH, 7911 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 7912 .access = PL1_R, .type = ARM_CP_CONST, 7913 .accessfn = access_aa64_tid3, 7914 .resetvalue = cpu->isar.id_dfr1 }, 7915 { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH, 7916 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 7917 .access = PL1_R, .type = ARM_CP_CONST, 7918 .accessfn = access_aa64_tid3, 7919 .resetvalue = cpu->isar.id_mmfr5 }, 7920 { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH, 7921 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 7922 .access = PL1_R, .type = ARM_CP_CONST, 7923 .accessfn = access_aa64_tid3, 7924 .resetvalue = 0 }, 7925 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 7926 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 7927 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7928 .fgt = FGT_PMCEIDN_EL0, 7929 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, 7930 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 7931 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 7932 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7933 .fgt = FGT_PMCEIDN_EL0, 7934 .resetvalue = cpu->pmceid0 }, 7935 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 7936 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 7937 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7938 .fgt = FGT_PMCEIDN_EL0, 7939 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, 7940 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 7941 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 7942 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7943 .fgt = FGT_PMCEIDN_EL0, 7944 .resetvalue = cpu->pmceid1 }, 7945 }; 7946 #ifdef CONFIG_USER_ONLY 7947 static const ARMCPRegUserSpaceInfo v8_user_idregs[] = { 7948 { .name = "ID_AA64PFR0_EL1", 7949 .exported_bits = R_ID_AA64PFR0_FP_MASK | 7950 R_ID_AA64PFR0_ADVSIMD_MASK | 7951 R_ID_AA64PFR0_SVE_MASK | 7952 R_ID_AA64PFR0_DIT_MASK, 7953 .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) | 7954 (0x1u << R_ID_AA64PFR0_EL1_SHIFT) }, 7955 { .name = "ID_AA64PFR1_EL1", 7956 .exported_bits = R_ID_AA64PFR1_BT_MASK | 7957 R_ID_AA64PFR1_SSBS_MASK | 7958 R_ID_AA64PFR1_MTE_MASK | 7959 R_ID_AA64PFR1_SME_MASK }, 7960 { .name = "ID_AA64PFR*_EL1_RESERVED", 7961 .is_glob = true }, 7962 { .name = "ID_AA64ZFR0_EL1", 7963 .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK | 7964 R_ID_AA64ZFR0_AES_MASK | 7965 R_ID_AA64ZFR0_BITPERM_MASK | 7966 R_ID_AA64ZFR0_BFLOAT16_MASK | 7967 R_ID_AA64ZFR0_B16B16_MASK | 7968 R_ID_AA64ZFR0_SHA3_MASK | 7969 R_ID_AA64ZFR0_SM4_MASK | 7970 R_ID_AA64ZFR0_I8MM_MASK | 7971 R_ID_AA64ZFR0_F32MM_MASK | 7972 R_ID_AA64ZFR0_F64MM_MASK }, 7973 { .name = "ID_AA64SMFR0_EL1", 7974 .exported_bits = R_ID_AA64SMFR0_F32F32_MASK | 7975 R_ID_AA64SMFR0_BI32I32_MASK | 7976 R_ID_AA64SMFR0_B16F32_MASK | 7977 R_ID_AA64SMFR0_F16F32_MASK | 7978 R_ID_AA64SMFR0_I8I32_MASK | 7979 R_ID_AA64SMFR0_F16F16_MASK | 7980 R_ID_AA64SMFR0_B16B16_MASK | 7981 R_ID_AA64SMFR0_I16I32_MASK | 7982 R_ID_AA64SMFR0_F64F64_MASK | 7983 R_ID_AA64SMFR0_I16I64_MASK | 7984 R_ID_AA64SMFR0_SMEVER_MASK | 7985 R_ID_AA64SMFR0_FA64_MASK }, 7986 { .name = "ID_AA64MMFR0_EL1", 7987 .exported_bits = R_ID_AA64MMFR0_ECV_MASK, 7988 .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) | 7989 (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) }, 7990 { .name = "ID_AA64MMFR1_EL1", 7991 .exported_bits = R_ID_AA64MMFR1_AFP_MASK }, 7992 { .name = "ID_AA64MMFR2_EL1", 7993 .exported_bits = R_ID_AA64MMFR2_AT_MASK }, 7994 { .name = "ID_AA64MMFR3_EL1", 7995 .exported_bits = 0 }, 7996 { .name = "ID_AA64MMFR*_EL1_RESERVED", 7997 .is_glob = true }, 7998 { .name = "ID_AA64DFR0_EL1", 7999 .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) }, 8000 { .name = "ID_AA64DFR1_EL1" }, 8001 { .name = "ID_AA64DFR*_EL1_RESERVED", 8002 .is_glob = true }, 8003 { .name = "ID_AA64AFR*", 8004 .is_glob = true }, 8005 { .name = "ID_AA64ISAR0_EL1", 8006 .exported_bits = R_ID_AA64ISAR0_AES_MASK | 8007 R_ID_AA64ISAR0_SHA1_MASK | 8008 R_ID_AA64ISAR0_SHA2_MASK | 8009 R_ID_AA64ISAR0_CRC32_MASK | 8010 R_ID_AA64ISAR0_ATOMIC_MASK | 8011 R_ID_AA64ISAR0_RDM_MASK | 8012 R_ID_AA64ISAR0_SHA3_MASK | 8013 R_ID_AA64ISAR0_SM3_MASK | 8014 R_ID_AA64ISAR0_SM4_MASK | 8015 R_ID_AA64ISAR0_DP_MASK | 8016 R_ID_AA64ISAR0_FHM_MASK | 8017 R_ID_AA64ISAR0_TS_MASK | 8018 R_ID_AA64ISAR0_RNDR_MASK }, 8019 { .name = "ID_AA64ISAR1_EL1", 8020 .exported_bits = R_ID_AA64ISAR1_DPB_MASK | 8021 R_ID_AA64ISAR1_APA_MASK | 8022 R_ID_AA64ISAR1_API_MASK | 8023 R_ID_AA64ISAR1_JSCVT_MASK | 8024 R_ID_AA64ISAR1_FCMA_MASK | 8025 R_ID_AA64ISAR1_LRCPC_MASK | 8026 R_ID_AA64ISAR1_GPA_MASK | 8027 R_ID_AA64ISAR1_GPI_MASK | 8028 R_ID_AA64ISAR1_FRINTTS_MASK | 8029 R_ID_AA64ISAR1_SB_MASK | 8030 R_ID_AA64ISAR1_BF16_MASK | 8031 R_ID_AA64ISAR1_DGH_MASK | 8032 R_ID_AA64ISAR1_I8MM_MASK }, 8033 { .name = "ID_AA64ISAR2_EL1", 8034 .exported_bits = R_ID_AA64ISAR2_WFXT_MASK | 8035 R_ID_AA64ISAR2_RPRES_MASK | 8036 R_ID_AA64ISAR2_GPA3_MASK | 8037 R_ID_AA64ISAR2_APA3_MASK | 8038 R_ID_AA64ISAR2_MOPS_MASK | 8039 R_ID_AA64ISAR2_BC_MASK | 8040 R_ID_AA64ISAR2_RPRFM_MASK | 8041 R_ID_AA64ISAR2_CSSC_MASK }, 8042 { .name = "ID_AA64ISAR*_EL1_RESERVED", 8043 .is_glob = true }, 8044 }; 8045 modify_arm_cp_regs(v8_idregs, v8_user_idregs); 8046 #endif 8047 /* 8048 * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL. 8049 * TODO: For RMR, a write with bit 1 set should do something with 8050 * cpu_reset(). In the meantime, "the bit is strictly a request", 8051 * so we are in spec just ignoring writes. 8052 */ 8053 if (!arm_feature(env, ARM_FEATURE_EL3) && 8054 !arm_feature(env, ARM_FEATURE_EL2)) { 8055 ARMCPRegInfo el1_reset_regs[] = { 8056 { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH, 8057 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 8058 .access = PL1_R, 8059 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) }, 8060 { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH, 8061 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2, 8062 .access = PL1_RW, .type = ARM_CP_CONST, 8063 .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) } 8064 }; 8065 define_arm_cp_regs(cpu, el1_reset_regs); 8066 } 8067 define_arm_cp_regs(cpu, v8_idregs); 8068 define_arm_cp_regs(cpu, v8_cp_reginfo); 8069 if (cpu_isar_feature(aa64_aa32_el1, cpu)) { 8070 define_arm_cp_regs(cpu, v8_aa32_el1_reginfo); 8071 } 8072 8073 for (i = 4; i < 16; i++) { 8074 /* 8075 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32. 8076 * For pre-v8 cores there are RAZ patterns for these in 8077 * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here. 8078 * v8 extends the "must RAZ" part of the ID register space 8079 * to also cover c0, 0, c{8-15}, {0-7}. 8080 * These are STATE_AA32 because in the AArch64 sysreg space 8081 * c4-c7 is where the AArch64 ID registers live (and we've 8082 * already defined those in v8_idregs[]), and c8-c15 are not 8083 * "must RAZ" for AArch64. 8084 */ 8085 g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i); 8086 ARMCPRegInfo v8_aa32_raz_idregs = { 8087 .name = name, 8088 .state = ARM_CP_STATE_AA32, 8089 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY, 8090 .access = PL1_R, .type = ARM_CP_CONST, 8091 .accessfn = access_aa64_tid3, 8092 .resetvalue = 0 }; 8093 define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs); 8094 } 8095 } 8096 8097 /* 8098 * Register the base EL2 cpregs. 8099 * Pre v8, these registers are implemented only as part of the 8100 * Virtualization Extensions (EL2 present). Beginning with v8, 8101 * if EL2 is missing but EL3 is enabled, mostly these become 8102 * RES0 from EL3, with some specific exceptions. 8103 */ 8104 if (arm_feature(env, ARM_FEATURE_EL2) 8105 || (arm_feature(env, ARM_FEATURE_EL3) 8106 && arm_feature(env, ARM_FEATURE_V8))) { 8107 uint64_t vmpidr_def = mpidr_read_val(env); 8108 ARMCPRegInfo vpidr_regs[] = { 8109 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 8110 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 8111 .access = PL2_RW, .accessfn = access_el3_aa32ns, 8112 .resetvalue = cpu->midr, 8113 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ, 8114 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 8115 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 8116 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 8117 .access = PL2_RW, .resetvalue = cpu->midr, 8118 .type = ARM_CP_EL3_NO_EL2_C_NZ, 8119 .nv2_redirect_offset = 0x88, 8120 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 8121 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 8122 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 8123 .access = PL2_RW, .accessfn = access_el3_aa32ns, 8124 .resetvalue = vmpidr_def, 8125 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ, 8126 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 8127 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 8128 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 8129 .access = PL2_RW, .resetvalue = vmpidr_def, 8130 .type = ARM_CP_EL3_NO_EL2_C_NZ, 8131 .nv2_redirect_offset = 0x50, 8132 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 8133 }; 8134 /* 8135 * The only field of MDCR_EL2 that has a defined architectural reset 8136 * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N. 8137 */ 8138 ARMCPRegInfo mdcr_el2 = { 8139 .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO, 8140 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 8141 .writefn = mdcr_el2_write, 8142 .access = PL2_RW, .resetvalue = pmu_num_counters(env), 8143 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), 8144 }; 8145 define_one_arm_cp_reg(cpu, &mdcr_el2); 8146 define_arm_cp_regs(cpu, vpidr_regs); 8147 define_arm_cp_regs(cpu, el2_cp_reginfo); 8148 if (arm_feature(env, ARM_FEATURE_V8)) { 8149 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 8150 } 8151 if (cpu_isar_feature(aa64_sel2, cpu)) { 8152 define_arm_cp_regs(cpu, el2_sec_cp_reginfo); 8153 } 8154 /* 8155 * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL. 8156 * See commentary near RMR_EL1. 8157 */ 8158 if (!arm_feature(env, ARM_FEATURE_EL3)) { 8159 static const ARMCPRegInfo el2_reset_regs[] = { 8160 { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 8161 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 8162 .access = PL2_R, 8163 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) }, 8164 { .name = "RVBAR", .type = ARM_CP_ALIAS, 8165 .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 8166 .access = PL2_R, 8167 .fieldoffset = offsetof(CPUARMState, cp15.rvbar) }, 8168 { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64, 8169 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2, 8170 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 }, 8171 }; 8172 define_arm_cp_regs(cpu, el2_reset_regs); 8173 } 8174 } 8175 8176 /* Register the base EL3 cpregs. */ 8177 if (arm_feature(env, ARM_FEATURE_EL3)) { 8178 define_arm_cp_regs(cpu, el3_cp_reginfo); 8179 ARMCPRegInfo el3_regs[] = { 8180 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 8181 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 8182 .access = PL3_R, 8183 .fieldoffset = offsetof(CPUARMState, cp15.rvbar), }, 8184 { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64, 8185 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2, 8186 .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 }, 8187 { .name = "RMR", .state = ARM_CP_STATE_AA32, 8188 .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2, 8189 .access = PL3_RW, .type = ARM_CP_CONST, 8190 .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }, 8191 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 8192 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 8193 .access = PL3_RW, 8194 .raw_writefn = raw_write, .writefn = sctlr_write, 8195 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 8196 .resetvalue = cpu->reset_sctlr }, 8197 }; 8198 8199 define_arm_cp_regs(cpu, el3_regs); 8200 } 8201 /* 8202 * The behaviour of NSACR is sufficiently various that we don't 8203 * try to describe it in a single reginfo: 8204 * if EL3 is 64 bit, then trap to EL3 from S EL1, 8205 * reads as constant 0xc00 from NS EL1 and NS EL2 8206 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 8207 * if v7 without EL3, register doesn't exist 8208 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 8209 */ 8210 if (arm_feature(env, ARM_FEATURE_EL3)) { 8211 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 8212 static const ARMCPRegInfo nsacr = { 8213 .name = "NSACR", .type = ARM_CP_CONST, 8214 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 8215 .access = PL1_RW, .accessfn = nsacr_access, 8216 .resetvalue = 0xc00 8217 }; 8218 define_one_arm_cp_reg(cpu, &nsacr); 8219 } else { 8220 static const ARMCPRegInfo nsacr = { 8221 .name = "NSACR", 8222 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 8223 .access = PL3_RW | PL1_R, 8224 .resetvalue = 0, 8225 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 8226 }; 8227 define_one_arm_cp_reg(cpu, &nsacr); 8228 } 8229 } else { 8230 if (arm_feature(env, ARM_FEATURE_V8)) { 8231 static const ARMCPRegInfo nsacr = { 8232 .name = "NSACR", .type = ARM_CP_CONST, 8233 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 8234 .access = PL1_R, 8235 .resetvalue = 0xc00 8236 }; 8237 define_one_arm_cp_reg(cpu, &nsacr); 8238 } 8239 } 8240 8241 if (arm_feature(env, ARM_FEATURE_PMSA)) { 8242 if (arm_feature(env, ARM_FEATURE_V6)) { 8243 /* PMSAv6 not implemented */ 8244 assert(arm_feature(env, ARM_FEATURE_V7)); 8245 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 8246 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 8247 } else { 8248 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 8249 } 8250 } else { 8251 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 8252 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 8253 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ 8254 if (cpu_isar_feature(aa32_hpd, cpu)) { 8255 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); 8256 } 8257 } 8258 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 8259 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 8260 } 8261 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 8262 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 8263 } 8264 if (cpu_isar_feature(aa64_ecv_traps, cpu)) { 8265 define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo); 8266 } 8267 #ifndef CONFIG_USER_ONLY 8268 if (cpu_isar_feature(aa64_ecv, cpu)) { 8269 define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo); 8270 } 8271 #endif 8272 if (arm_feature(env, ARM_FEATURE_VAPA)) { 8273 ARMCPRegInfo vapa_cp_reginfo[] = { 8274 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 8275 .access = PL1_RW, .resetvalue = 0, 8276 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 8277 offsetoflow32(CPUARMState, cp15.par_ns) }, 8278 .writefn = par_write}, 8279 #ifndef CONFIG_USER_ONLY 8280 /* This underdecoding is safe because the reginfo is NO_RAW. */ 8281 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 8282 .access = PL1_W, .accessfn = ats_access, 8283 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 8284 #endif 8285 }; 8286 8287 /* 8288 * When LPAE exists this 32-bit PAR register is an alias of the 8289 * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[] 8290 */ 8291 if (arm_feature(env, ARM_FEATURE_LPAE)) { 8292 vapa_cp_reginfo[0].type = ARM_CP_ALIAS | ARM_CP_NO_GDB; 8293 } 8294 define_arm_cp_regs(cpu, vapa_cp_reginfo); 8295 } 8296 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 8297 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 8298 } 8299 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 8300 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 8301 } 8302 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 8303 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 8304 } 8305 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 8306 define_arm_cp_regs(cpu, omap_cp_reginfo); 8307 } 8308 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 8309 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 8310 } 8311 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 8312 define_arm_cp_regs(cpu, xscale_cp_reginfo); 8313 } 8314 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 8315 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 8316 } 8317 if (arm_feature(env, ARM_FEATURE_LPAE)) { 8318 define_arm_cp_regs(cpu, lpae_cp_reginfo); 8319 } 8320 if (cpu_isar_feature(aa32_jazelle, cpu)) { 8321 define_arm_cp_regs(cpu, jazelle_regs); 8322 } 8323 /* 8324 * Slightly awkwardly, the OMAP and StrongARM cores need all of 8325 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 8326 * be read-only (ie write causes UNDEF exception). 8327 */ 8328 { 8329 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 8330 /* 8331 * Pre-v8 MIDR space. 8332 * Note that the MIDR isn't a simple constant register because 8333 * of the TI925 behaviour where writes to another register can 8334 * cause the MIDR value to change. 8335 * 8336 * Unimplemented registers in the c15 0 0 0 space default to 8337 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 8338 * and friends override accordingly. 8339 */ 8340 { .name = "MIDR", 8341 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 8342 .access = PL1_R, .resetvalue = cpu->midr, 8343 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 8344 .readfn = midr_read, 8345 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 8346 .type = ARM_CP_OVERRIDE }, 8347 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 8348 { .name = "DUMMY", 8349 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 8350 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8351 { .name = "DUMMY", 8352 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 8353 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8354 { .name = "DUMMY", 8355 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 8356 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8357 { .name = "DUMMY", 8358 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 8359 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8360 { .name = "DUMMY", 8361 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 8362 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8363 }; 8364 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 8365 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 8366 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 8367 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 8368 .fgt = FGT_MIDR_EL1, 8369 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 8370 .readfn = midr_read }, 8371 /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */ 8372 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 8373 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 8374 .access = PL1_R, .resetvalue = cpu->midr }, 8375 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 8376 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 8377 .access = PL1_R, 8378 .accessfn = access_aa64_tid1, 8379 .fgt = FGT_REVIDR_EL1, 8380 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 8381 }; 8382 ARMCPRegInfo id_v8_midr_alias_cp_reginfo = { 8383 .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB, 8384 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 8385 .access = PL1_R, .resetvalue = cpu->midr 8386 }; 8387 ARMCPRegInfo id_cp_reginfo[] = { 8388 /* These are common to v8 and pre-v8 */ 8389 { .name = "CTR", 8390 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 8391 .access = PL1_R, .accessfn = ctr_el0_access, 8392 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 8393 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 8394 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 8395 .access = PL0_R, .accessfn = ctr_el0_access, 8396 .fgt = FGT_CTR_EL0, 8397 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 8398 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 8399 { .name = "TCMTR", 8400 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 8401 .access = PL1_R, 8402 .accessfn = access_aa32_tid1, 8403 .type = ARM_CP_CONST, .resetvalue = 0 }, 8404 }; 8405 /* TLBTR is specific to VMSA */ 8406 ARMCPRegInfo id_tlbtr_reginfo = { 8407 .name = "TLBTR", 8408 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 8409 .access = PL1_R, 8410 .accessfn = access_aa32_tid1, 8411 .type = ARM_CP_CONST, .resetvalue = 0, 8412 }; 8413 /* MPUIR is specific to PMSA V6+ */ 8414 ARMCPRegInfo id_mpuir_reginfo = { 8415 .name = "MPUIR", 8416 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 8417 .access = PL1_R, .type = ARM_CP_CONST, 8418 .resetvalue = cpu->pmsav7_dregion << 8 8419 }; 8420 /* HMPUIR is specific to PMSA V8 */ 8421 ARMCPRegInfo id_hmpuir_reginfo = { 8422 .name = "HMPUIR", 8423 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4, 8424 .access = PL2_R, .type = ARM_CP_CONST, 8425 .resetvalue = cpu->pmsav8r_hdregion 8426 }; 8427 static const ARMCPRegInfo crn0_wi_reginfo = { 8428 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 8429 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 8430 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 8431 }; 8432 #ifdef CONFIG_USER_ONLY 8433 static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { 8434 { .name = "MIDR_EL1", 8435 .exported_bits = R_MIDR_EL1_REVISION_MASK | 8436 R_MIDR_EL1_PARTNUM_MASK | 8437 R_MIDR_EL1_ARCHITECTURE_MASK | 8438 R_MIDR_EL1_VARIANT_MASK | 8439 R_MIDR_EL1_IMPLEMENTER_MASK }, 8440 { .name = "REVIDR_EL1" }, 8441 }; 8442 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); 8443 #endif 8444 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 8445 arm_feature(env, ARM_FEATURE_STRONGARM)) { 8446 size_t i; 8447 /* 8448 * Register the blanket "writes ignored" value first to cover the 8449 * whole space. Then update the specific ID registers to allow write 8450 * access, so that they ignore writes rather than causing them to 8451 * UNDEF. 8452 */ 8453 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 8454 for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) { 8455 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW; 8456 } 8457 for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) { 8458 id_cp_reginfo[i].access = PL1_RW; 8459 } 8460 id_mpuir_reginfo.access = PL1_RW; 8461 id_tlbtr_reginfo.access = PL1_RW; 8462 } 8463 if (arm_feature(env, ARM_FEATURE_V8)) { 8464 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 8465 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 8466 define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo); 8467 } 8468 } else { 8469 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 8470 } 8471 define_arm_cp_regs(cpu, id_cp_reginfo); 8472 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 8473 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 8474 } else if (arm_feature(env, ARM_FEATURE_PMSA) && 8475 arm_feature(env, ARM_FEATURE_V8)) { 8476 uint32_t i = 0; 8477 char *tmp_string; 8478 8479 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 8480 define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo); 8481 define_arm_cp_regs(cpu, pmsav8r_cp_reginfo); 8482 8483 /* Register alias is only valid for first 32 indexes */ 8484 for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) { 8485 uint8_t crm = 0b1000 | extract32(i, 1, 3); 8486 uint8_t opc1 = extract32(i, 4, 1); 8487 uint8_t opc2 = extract32(i, 0, 1) << 2; 8488 8489 tmp_string = g_strdup_printf("PRBAR%u", i); 8490 ARMCPRegInfo tmp_prbarn_reginfo = { 8491 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW, 8492 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, 8493 .access = PL1_RW, .resetvalue = 0, 8494 .accessfn = access_tvm_trvm, 8495 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read 8496 }; 8497 define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo); 8498 g_free(tmp_string); 8499 8500 opc2 = extract32(i, 0, 1) << 2 | 0x1; 8501 tmp_string = g_strdup_printf("PRLAR%u", i); 8502 ARMCPRegInfo tmp_prlarn_reginfo = { 8503 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW, 8504 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, 8505 .access = PL1_RW, .resetvalue = 0, 8506 .accessfn = access_tvm_trvm, 8507 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read 8508 }; 8509 define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo); 8510 g_free(tmp_string); 8511 } 8512 8513 /* Register alias is only valid for first 32 indexes */ 8514 for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) { 8515 uint8_t crm = 0b1000 | extract32(i, 1, 3); 8516 uint8_t opc1 = 0b100 | extract32(i, 4, 1); 8517 uint8_t opc2 = extract32(i, 0, 1) << 2; 8518 8519 tmp_string = g_strdup_printf("HPRBAR%u", i); 8520 ARMCPRegInfo tmp_hprbarn_reginfo = { 8521 .name = tmp_string, 8522 .type = ARM_CP_NO_RAW, 8523 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, 8524 .access = PL2_RW, .resetvalue = 0, 8525 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read 8526 }; 8527 define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo); 8528 g_free(tmp_string); 8529 8530 opc2 = extract32(i, 0, 1) << 2 | 0x1; 8531 tmp_string = g_strdup_printf("HPRLAR%u", i); 8532 ARMCPRegInfo tmp_hprlarn_reginfo = { 8533 .name = tmp_string, 8534 .type = ARM_CP_NO_RAW, 8535 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, 8536 .access = PL2_RW, .resetvalue = 0, 8537 .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read 8538 }; 8539 define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo); 8540 g_free(tmp_string); 8541 } 8542 } else if (arm_feature(env, ARM_FEATURE_V7)) { 8543 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 8544 } 8545 } 8546 8547 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 8548 ARMCPRegInfo mpidr_cp_reginfo[] = { 8549 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, 8550 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 8551 .fgt = FGT_MPIDR_EL1, 8552 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 8553 }; 8554 #ifdef CONFIG_USER_ONLY 8555 static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { 8556 { .name = "MPIDR_EL1", 8557 .fixed_bits = 0x0000000080000000 }, 8558 }; 8559 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); 8560 #endif 8561 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 8562 } 8563 8564 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 8565 ARMCPRegInfo auxcr_reginfo[] = { 8566 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 8567 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 8568 .access = PL1_RW, .accessfn = access_tacr, 8569 .nv2_redirect_offset = 0x118, 8570 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, 8571 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 8572 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 8573 .access = PL2_RW, .type = ARM_CP_CONST, 8574 .resetvalue = 0 }, 8575 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 8576 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 8577 .access = PL3_RW, .type = ARM_CP_CONST, 8578 .resetvalue = 0 }, 8579 }; 8580 define_arm_cp_regs(cpu, auxcr_reginfo); 8581 if (cpu_isar_feature(aa32_ac2, cpu)) { 8582 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo); 8583 } 8584 } 8585 8586 if (arm_feature(env, ARM_FEATURE_CBAR)) { 8587 /* 8588 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. 8589 * There are two flavours: 8590 * (1) older 32-bit only cores have a simple 32-bit CBAR 8591 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a 8592 * 32-bit register visible to AArch32 at a different encoding 8593 * to the "flavour 1" register and with the bits rearranged to 8594 * be able to squash a 64-bit address into the 32-bit view. 8595 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but 8596 * in future if we support AArch32-only configs of some of the 8597 * AArch64 cores we might need to add a specific feature flag 8598 * to indicate cores with "flavour 2" CBAR. 8599 */ 8600 if (arm_feature(env, ARM_FEATURE_V8)) { 8601 /* 32 bit view is [31:18] 0...0 [43:32]. */ 8602 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 8603 | extract64(cpu->reset_cbar, 32, 12); 8604 ARMCPRegInfo cbar_reginfo[] = { 8605 { .name = "CBAR", 8606 .type = ARM_CP_CONST, 8607 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, 8608 .access = PL1_R, .resetvalue = cbar32 }, 8609 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 8610 .type = ARM_CP_CONST, 8611 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 8612 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 8613 }; 8614 /* We don't implement a r/w 64 bit CBAR currently */ 8615 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 8616 define_arm_cp_regs(cpu, cbar_reginfo); 8617 } else { 8618 ARMCPRegInfo cbar = { 8619 .name = "CBAR", 8620 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 8621 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar, 8622 .fieldoffset = offsetof(CPUARMState, 8623 cp15.c15_config_base_address) 8624 }; 8625 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 8626 cbar.access = PL1_R; 8627 cbar.fieldoffset = 0; 8628 cbar.type = ARM_CP_CONST; 8629 } 8630 define_one_arm_cp_reg(cpu, &cbar); 8631 } 8632 } 8633 8634 if (arm_feature(env, ARM_FEATURE_VBAR)) { 8635 static const ARMCPRegInfo vbar_cp_reginfo[] = { 8636 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 8637 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 8638 .access = PL1_RW, .writefn = vbar_write, 8639 .accessfn = access_nv1, 8640 .fgt = FGT_VBAR_EL1, 8641 .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1, 8642 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 8643 offsetof(CPUARMState, cp15.vbar_ns) }, 8644 .resetvalue = 0 }, 8645 }; 8646 define_arm_cp_regs(cpu, vbar_cp_reginfo); 8647 } 8648 8649 /* Generic registers whose values depend on the implementation */ 8650 { 8651 ARMCPRegInfo sctlr = { 8652 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 8653 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 8654 .access = PL1_RW, .accessfn = access_tvm_trvm, 8655 .fgt = FGT_SCTLR_EL1, 8656 .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1, 8657 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 8658 offsetof(CPUARMState, cp15.sctlr_ns) }, 8659 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 8660 .raw_writefn = raw_write, 8661 }; 8662 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 8663 /* 8664 * Normally we would always end the TB on an SCTLR write, but Linux 8665 * arch/arm/mach-pxa/sleep.S expects two instructions following 8666 * an MMU enable to execute from cache. Imitate this behaviour. 8667 */ 8668 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 8669 } 8670 define_one_arm_cp_reg(cpu, &sctlr); 8671 8672 if (arm_feature(env, ARM_FEATURE_PMSA) && 8673 arm_feature(env, ARM_FEATURE_V8)) { 8674 ARMCPRegInfo vsctlr = { 8675 .name = "VSCTLR", .state = ARM_CP_STATE_AA32, 8676 .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 8677 .access = PL2_RW, .resetvalue = 0x0, 8678 .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr), 8679 }; 8680 define_one_arm_cp_reg(cpu, &vsctlr); 8681 } 8682 } 8683 8684 if (cpu_isar_feature(aa64_lor, cpu)) { 8685 define_arm_cp_regs(cpu, lor_reginfo); 8686 } 8687 if (cpu_isar_feature(aa64_pan, cpu)) { 8688 define_one_arm_cp_reg(cpu, &pan_reginfo); 8689 } 8690 #ifndef CONFIG_USER_ONLY 8691 if (cpu_isar_feature(aa64_ats1e1, cpu)) { 8692 define_arm_cp_regs(cpu, ats1e1_reginfo); 8693 } 8694 if (cpu_isar_feature(aa32_ats1e1, cpu)) { 8695 define_arm_cp_regs(cpu, ats1cp_reginfo); 8696 } 8697 #endif 8698 if (cpu_isar_feature(aa64_uao, cpu)) { 8699 define_one_arm_cp_reg(cpu, &uao_reginfo); 8700 } 8701 8702 if (cpu_isar_feature(aa64_dit, cpu)) { 8703 define_one_arm_cp_reg(cpu, &dit_reginfo); 8704 } 8705 if (cpu_isar_feature(aa64_ssbs, cpu)) { 8706 define_one_arm_cp_reg(cpu, &ssbs_reginfo); 8707 } 8708 if (cpu_isar_feature(any_ras, cpu)) { 8709 define_arm_cp_regs(cpu, minimal_ras_reginfo); 8710 } 8711 8712 if (cpu_isar_feature(aa64_vh, cpu) || 8713 cpu_isar_feature(aa64_debugv8p2, cpu)) { 8714 define_one_arm_cp_reg(cpu, &contextidr_el2); 8715 } 8716 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 8717 define_arm_cp_regs(cpu, vhe_reginfo); 8718 } 8719 8720 if (cpu_isar_feature(aa64_sve, cpu)) { 8721 define_arm_cp_regs(cpu, zcr_reginfo); 8722 } 8723 8724 if (cpu_isar_feature(aa64_hcx, cpu)) { 8725 define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo); 8726 } 8727 8728 #ifdef TARGET_AARCH64 8729 if (cpu_isar_feature(aa64_sme, cpu)) { 8730 define_arm_cp_regs(cpu, sme_reginfo); 8731 } 8732 if (cpu_isar_feature(aa64_pauth, cpu)) { 8733 define_arm_cp_regs(cpu, pauth_reginfo); 8734 } 8735 if (cpu_isar_feature(aa64_rndr, cpu)) { 8736 define_arm_cp_regs(cpu, rndr_reginfo); 8737 } 8738 /* Data Cache clean instructions up to PoP */ 8739 if (cpu_isar_feature(aa64_dcpop, cpu)) { 8740 define_one_arm_cp_reg(cpu, dcpop_reg); 8741 8742 if (cpu_isar_feature(aa64_dcpodp, cpu)) { 8743 define_one_arm_cp_reg(cpu, dcpodp_reg); 8744 } 8745 } 8746 8747 /* 8748 * If full MTE is enabled, add all of the system registers. 8749 * If only "instructions available at EL0" are enabled, 8750 * then define only a RAZ/WI version of PSTATE.TCO. 8751 */ 8752 if (cpu_isar_feature(aa64_mte, cpu)) { 8753 ARMCPRegInfo gmid_reginfo = { 8754 .name = "GMID_EL1", .state = ARM_CP_STATE_AA64, 8755 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4, 8756 .access = PL1_R, .accessfn = access_aa64_tid5, 8757 .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize, 8758 }; 8759 define_one_arm_cp_reg(cpu, &gmid_reginfo); 8760 define_arm_cp_regs(cpu, mte_reginfo); 8761 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); 8762 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) { 8763 define_arm_cp_regs(cpu, mte_tco_ro_reginfo); 8764 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); 8765 } 8766 8767 if (cpu_isar_feature(aa64_scxtnum, cpu)) { 8768 define_arm_cp_regs(cpu, scxtnum_reginfo); 8769 } 8770 8771 if (cpu_isar_feature(aa64_fgt, cpu)) { 8772 define_arm_cp_regs(cpu, fgt_reginfo); 8773 } 8774 8775 if (cpu_isar_feature(aa64_rme, cpu)) { 8776 define_arm_cp_regs(cpu, rme_reginfo); 8777 if (cpu_isar_feature(aa64_mte, cpu)) { 8778 define_arm_cp_regs(cpu, rme_mte_reginfo); 8779 } 8780 } 8781 8782 if (cpu_isar_feature(aa64_nv2, cpu)) { 8783 define_arm_cp_regs(cpu, nv2_reginfo); 8784 } 8785 8786 if (cpu_isar_feature(aa64_nmi, cpu)) { 8787 define_arm_cp_regs(cpu, nmi_reginfo); 8788 } 8789 #endif 8790 8791 if (cpu_isar_feature(any_predinv, cpu)) { 8792 define_arm_cp_regs(cpu, predinv_reginfo); 8793 } 8794 8795 if (cpu_isar_feature(any_ccidx, cpu)) { 8796 define_arm_cp_regs(cpu, ccsidr2_reginfo); 8797 } 8798 8799 #ifndef CONFIG_USER_ONLY 8800 /* 8801 * Register redirections and aliases must be done last, 8802 * after the registers from the other extensions have been defined. 8803 */ 8804 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 8805 define_arm_vh_e2h_redirects_aliases(cpu); 8806 } 8807 #endif 8808 } 8809 8810 /* 8811 * Private utility function for define_one_arm_cp_reg_with_opaque(): 8812 * add a single reginfo struct to the hash table. 8813 */ 8814 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 8815 void *opaque, CPState state, 8816 CPSecureState secstate, 8817 int crm, int opc1, int opc2, 8818 const char *name) 8819 { 8820 CPUARMState *env = &cpu->env; 8821 uint32_t key; 8822 ARMCPRegInfo *r2; 8823 bool is64 = r->type & ARM_CP_64BIT; 8824 bool ns = secstate & ARM_CP_SECSTATE_NS; 8825 int cp = r->cp; 8826 size_t name_len; 8827 bool make_const; 8828 8829 switch (state) { 8830 case ARM_CP_STATE_AA32: 8831 /* We assume it is a cp15 register if the .cp field is left unset. */ 8832 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) { 8833 cp = 15; 8834 } 8835 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2); 8836 break; 8837 case ARM_CP_STATE_AA64: 8838 /* 8839 * To allow abbreviation of ARMCPRegInfo definitions, we treat 8840 * cp == 0 as equivalent to the value for "standard guest-visible 8841 * sysreg". STATE_BOTH definitions are also always "standard sysreg" 8842 * in their AArch64 view (the .cp value may be non-zero for the 8843 * benefit of the AArch32 view). 8844 */ 8845 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) { 8846 cp = CP_REG_ARM64_SYSREG_CP; 8847 } 8848 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2); 8849 break; 8850 default: 8851 g_assert_not_reached(); 8852 } 8853 8854 /* Overriding of an existing definition must be explicitly requested. */ 8855 if (!(r->type & ARM_CP_OVERRIDE)) { 8856 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key); 8857 if (oldreg) { 8858 assert(oldreg->type & ARM_CP_OVERRIDE); 8859 } 8860 } 8861 8862 /* 8863 * Eliminate registers that are not present because the EL is missing. 8864 * Doing this here makes it easier to put all registers for a given 8865 * feature into the same ARMCPRegInfo array and define them all at once. 8866 */ 8867 make_const = false; 8868 if (arm_feature(env, ARM_FEATURE_EL3)) { 8869 /* 8870 * An EL2 register without EL2 but with EL3 is (usually) RES0. 8871 * See rule RJFFP in section D1.1.3 of DDI0487H.a. 8872 */ 8873 int min_el = ctz32(r->access) / 2; 8874 if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) { 8875 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) { 8876 return; 8877 } 8878 make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP); 8879 } 8880 } else { 8881 CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2) 8882 ? PL2_RW : PL1_RW); 8883 if ((r->access & max_el) == 0) { 8884 return; 8885 } 8886 } 8887 8888 /* Combine cpreg and name into one allocation. */ 8889 name_len = strlen(name) + 1; 8890 r2 = g_malloc(sizeof(*r2) + name_len); 8891 *r2 = *r; 8892 r2->name = memcpy(r2 + 1, name, name_len); 8893 8894 /* 8895 * Update fields to match the instantiation, overwiting wildcards 8896 * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH. 8897 */ 8898 r2->cp = cp; 8899 r2->crm = crm; 8900 r2->opc1 = opc1; 8901 r2->opc2 = opc2; 8902 r2->state = state; 8903 r2->secure = secstate; 8904 if (opaque) { 8905 r2->opaque = opaque; 8906 } 8907 8908 if (make_const) { 8909 /* This should not have been a very special register to begin. */ 8910 int old_special = r2->type & ARM_CP_SPECIAL_MASK; 8911 assert(old_special == 0 || old_special == ARM_CP_NOP); 8912 /* 8913 * Set the special function to CONST, retaining the other flags. 8914 * This is important for e.g. ARM_CP_SVE so that we still 8915 * take the SVE trap if CPTR_EL3.EZ == 0. 8916 */ 8917 r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST; 8918 /* 8919 * Usually, these registers become RES0, but there are a few 8920 * special cases like VPIDR_EL2 which have a constant non-zero 8921 * value with writes ignored. 8922 */ 8923 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) { 8924 r2->resetvalue = 0; 8925 } 8926 /* 8927 * ARM_CP_CONST has precedence, so removing the callbacks and 8928 * offsets are not strictly necessary, but it is potentially 8929 * less confusing to debug later. 8930 */ 8931 r2->readfn = NULL; 8932 r2->writefn = NULL; 8933 r2->raw_readfn = NULL; 8934 r2->raw_writefn = NULL; 8935 r2->resetfn = NULL; 8936 r2->fieldoffset = 0; 8937 r2->bank_fieldoffsets[0] = 0; 8938 r2->bank_fieldoffsets[1] = 0; 8939 } else { 8940 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]; 8941 8942 if (isbanked) { 8943 /* 8944 * Register is banked (using both entries in array). 8945 * Overwriting fieldoffset as the array is only used to define 8946 * banked registers but later only fieldoffset is used. 8947 */ 8948 r2->fieldoffset = r->bank_fieldoffsets[ns]; 8949 } 8950 if (state == ARM_CP_STATE_AA32) { 8951 if (isbanked) { 8952 /* 8953 * If the register is banked then we don't need to migrate or 8954 * reset the 32-bit instance in certain cases: 8955 * 8956 * 1) If the register has both 32-bit and 64-bit instances 8957 * then we can count on the 64-bit instance taking care 8958 * of the non-secure bank. 8959 * 2) If ARMv8 is enabled then we can count on a 64-bit 8960 * version taking care of the secure bank. This requires 8961 * that separate 32 and 64-bit definitions are provided. 8962 */ 8963 if ((r->state == ARM_CP_STATE_BOTH && ns) || 8964 (arm_feature(env, ARM_FEATURE_V8) && !ns)) { 8965 r2->type |= ARM_CP_ALIAS; 8966 } 8967 } else if ((secstate != r->secure) && !ns) { 8968 /* 8969 * The register is not banked so we only want to allow 8970 * migration of the non-secure instance. 8971 */ 8972 r2->type |= ARM_CP_ALIAS; 8973 } 8974 8975 if (HOST_BIG_ENDIAN && 8976 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) { 8977 r2->fieldoffset += sizeof(uint32_t); 8978 } 8979 } 8980 } 8981 8982 /* 8983 * By convention, for wildcarded registers only the first 8984 * entry is used for migration; the others are marked as 8985 * ALIAS so we don't try to transfer the register 8986 * multiple times. Special registers (ie NOP/WFI) are 8987 * never migratable and not even raw-accessible. 8988 */ 8989 if (r2->type & ARM_CP_SPECIAL_MASK) { 8990 r2->type |= ARM_CP_NO_RAW; 8991 } 8992 if (((r->crm == CP_ANY) && crm != 0) || 8993 ((r->opc1 == CP_ANY) && opc1 != 0) || 8994 ((r->opc2 == CP_ANY) && opc2 != 0)) { 8995 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 8996 } 8997 8998 /* 8999 * Check that raw accesses are either forbidden or handled. Note that 9000 * we can't assert this earlier because the setup of fieldoffset for 9001 * banked registers has to be done first. 9002 */ 9003 if (!(r2->type & ARM_CP_NO_RAW)) { 9004 assert(!raw_accessors_invalid(r2)); 9005 } 9006 9007 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2); 9008 } 9009 9010 9011 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 9012 const ARMCPRegInfo *r, void *opaque) 9013 { 9014 /* 9015 * Define implementations of coprocessor registers. 9016 * We store these in a hashtable because typically 9017 * there are less than 150 registers in a space which 9018 * is 16*16*16*8*8 = 262144 in size. 9019 * Wildcarding is supported for the crm, opc1 and opc2 fields. 9020 * If a register is defined twice then the second definition is 9021 * used, so this can be used to define some generic registers and 9022 * then override them with implementation specific variations. 9023 * At least one of the original and the second definition should 9024 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 9025 * against accidental use. 9026 * 9027 * The state field defines whether the register is to be 9028 * visible in the AArch32 or AArch64 execution state. If the 9029 * state is set to ARM_CP_STATE_BOTH then we synthesise a 9030 * reginfo structure for the AArch32 view, which sees the lower 9031 * 32 bits of the 64 bit register. 9032 * 9033 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 9034 * be wildcarded. AArch64 registers are always considered to be 64 9035 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 9036 * the register, if any. 9037 */ 9038 int crm, opc1, opc2; 9039 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 9040 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 9041 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 9042 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 9043 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 9044 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 9045 CPState state; 9046 9047 /* 64 bit registers have only CRm and Opc1 fields */ 9048 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 9049 /* op0 only exists in the AArch64 encodings */ 9050 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 9051 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 9052 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 9053 /* 9054 * This API is only for Arm's system coprocessors (14 and 15) or 9055 * (M-profile or v7A-and-earlier only) for implementation defined 9056 * coprocessors in the range 0..7. Our decode assumes this, since 9057 * 8..13 can be used for other insns including VFP and Neon. See 9058 * valid_cp() in translate.c. Assert here that we haven't tried 9059 * to use an invalid coprocessor number. 9060 */ 9061 switch (r->state) { 9062 case ARM_CP_STATE_BOTH: 9063 /* 0 has a special meaning, but otherwise the same rules as AA32. */ 9064 if (r->cp == 0) { 9065 break; 9066 } 9067 /* fall through */ 9068 case ARM_CP_STATE_AA32: 9069 if (arm_feature(&cpu->env, ARM_FEATURE_V8) && 9070 !arm_feature(&cpu->env, ARM_FEATURE_M)) { 9071 assert(r->cp >= 14 && r->cp <= 15); 9072 } else { 9073 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15)); 9074 } 9075 break; 9076 case ARM_CP_STATE_AA64: 9077 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP); 9078 break; 9079 default: 9080 g_assert_not_reached(); 9081 } 9082 /* 9083 * The AArch64 pseudocode CheckSystemAccess() specifies that op1 9084 * encodes a minimum access level for the register. We roll this 9085 * runtime check into our general permission check code, so check 9086 * here that the reginfo's specified permissions are strict enough 9087 * to encompass the generic architectural permission check. 9088 */ 9089 if (r->state != ARM_CP_STATE_AA32) { 9090 CPAccessRights mask; 9091 switch (r->opc1) { 9092 case 0: 9093 /* min_EL EL1, but some accessible to EL0 via kernel ABI */ 9094 mask = PL0U_R | PL1_RW; 9095 break; 9096 case 1: case 2: 9097 /* min_EL EL1 */ 9098 mask = PL1_RW; 9099 break; 9100 case 3: 9101 /* min_EL EL0 */ 9102 mask = PL0_RW; 9103 break; 9104 case 4: 9105 case 5: 9106 /* min_EL EL2 */ 9107 mask = PL2_RW; 9108 break; 9109 case 6: 9110 /* min_EL EL3 */ 9111 mask = PL3_RW; 9112 break; 9113 case 7: 9114 /* min_EL EL1, secure mode only (we don't check the latter) */ 9115 mask = PL1_RW; 9116 break; 9117 default: 9118 /* broken reginfo with out-of-range opc1 */ 9119 g_assert_not_reached(); 9120 } 9121 /* assert our permissions are not too lax (stricter is fine) */ 9122 assert((r->access & ~mask) == 0); 9123 } 9124 9125 /* 9126 * Check that the register definition has enough info to handle 9127 * reads and writes if they are permitted. 9128 */ 9129 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) { 9130 if (r->access & PL3_R) { 9131 assert((r->fieldoffset || 9132 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 9133 r->readfn); 9134 } 9135 if (r->access & PL3_W) { 9136 assert((r->fieldoffset || 9137 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 9138 r->writefn); 9139 } 9140 } 9141 9142 for (crm = crmmin; crm <= crmmax; crm++) { 9143 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 9144 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 9145 for (state = ARM_CP_STATE_AA32; 9146 state <= ARM_CP_STATE_AA64; state++) { 9147 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 9148 continue; 9149 } 9150 if ((r->type & ARM_CP_ADD_TLBI_NXS) && 9151 cpu_isar_feature(aa64_xs, cpu)) { 9152 /* 9153 * This is a TLBI insn which has an NXS variant. The 9154 * NXS variant is at the same encoding except that 9155 * crn is +1, and has the same behaviour except for 9156 * fine-grained trapping. Add the NXS insn here and 9157 * then fall through to add the normal register. 9158 * add_cpreg_to_hashtable() copies the cpreg struct 9159 * and name that it is passed, so it's OK to use 9160 * a local struct here. 9161 */ 9162 ARMCPRegInfo nxs_ri = *r; 9163 g_autofree char *name = g_strdup_printf("%sNXS", r->name); 9164 9165 assert(state == ARM_CP_STATE_AA64); 9166 assert(nxs_ri.crn < 0xf); 9167 nxs_ri.crn++; 9168 if (nxs_ri.fgt) { 9169 nxs_ri.fgt |= R_FGT_NXS_MASK; 9170 } 9171 add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state, 9172 ARM_CP_SECSTATE_NS, 9173 crm, opc1, opc2, name); 9174 } 9175 if (state == ARM_CP_STATE_AA32) { 9176 /* 9177 * Under AArch32 CP registers can be common 9178 * (same for secure and non-secure world) or banked. 9179 */ 9180 char *name; 9181 9182 switch (r->secure) { 9183 case ARM_CP_SECSTATE_S: 9184 case ARM_CP_SECSTATE_NS: 9185 add_cpreg_to_hashtable(cpu, r, opaque, state, 9186 r->secure, crm, opc1, opc2, 9187 r->name); 9188 break; 9189 case ARM_CP_SECSTATE_BOTH: 9190 name = g_strdup_printf("%s_S", r->name); 9191 add_cpreg_to_hashtable(cpu, r, opaque, state, 9192 ARM_CP_SECSTATE_S, 9193 crm, opc1, opc2, name); 9194 g_free(name); 9195 add_cpreg_to_hashtable(cpu, r, opaque, state, 9196 ARM_CP_SECSTATE_NS, 9197 crm, opc1, opc2, r->name); 9198 break; 9199 default: 9200 g_assert_not_reached(); 9201 } 9202 } else { 9203 /* 9204 * AArch64 registers get mapped to non-secure instance 9205 * of AArch32 9206 */ 9207 add_cpreg_to_hashtable(cpu, r, opaque, state, 9208 ARM_CP_SECSTATE_NS, 9209 crm, opc1, opc2, r->name); 9210 } 9211 } 9212 } 9213 } 9214 } 9215 } 9216 9217 /* Define a whole list of registers */ 9218 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs, 9219 void *opaque, size_t len) 9220 { 9221 size_t i; 9222 for (i = 0; i < len; ++i) { 9223 define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque); 9224 } 9225 } 9226 9227 /* 9228 * Modify ARMCPRegInfo for access from userspace. 9229 * 9230 * This is a data driven modification directed by 9231 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as 9232 * user-space cannot alter any values and dynamic values pertaining to 9233 * execution state are hidden from user space view anyway. 9234 */ 9235 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len, 9236 const ARMCPRegUserSpaceInfo *mods, 9237 size_t mods_len) 9238 { 9239 for (size_t mi = 0; mi < mods_len; ++mi) { 9240 const ARMCPRegUserSpaceInfo *m = mods + mi; 9241 GPatternSpec *pat = NULL; 9242 9243 if (m->is_glob) { 9244 pat = g_pattern_spec_new(m->name); 9245 } 9246 for (size_t ri = 0; ri < regs_len; ++ri) { 9247 ARMCPRegInfo *r = regs + ri; 9248 9249 if (pat && g_pattern_match_string(pat, r->name)) { 9250 r->type = ARM_CP_CONST; 9251 r->access = PL0U_R; 9252 r->resetvalue = 0; 9253 /* continue */ 9254 } else if (strcmp(r->name, m->name) == 0) { 9255 r->type = ARM_CP_CONST; 9256 r->access = PL0U_R; 9257 r->resetvalue &= m->exported_bits; 9258 r->resetvalue |= m->fixed_bits; 9259 break; 9260 } 9261 } 9262 if (pat) { 9263 g_pattern_spec_free(pat); 9264 } 9265 } 9266 } 9267 9268 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 9269 { 9270 return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp); 9271 } 9272 9273 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 9274 uint64_t value) 9275 { 9276 /* Helper coprocessor write function for write-ignore registers */ 9277 } 9278 9279 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 9280 { 9281 /* Helper coprocessor write function for read-as-zero registers */ 9282 return 0; 9283 } 9284 9285 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 9286 { 9287 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 9288 } 9289 9290 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 9291 { 9292 /* 9293 * Return true if it is not valid for us to switch to 9294 * this CPU mode (ie all the UNPREDICTABLE cases in 9295 * the ARM ARM CPSRWriteByInstr pseudocode). 9296 */ 9297 9298 /* Changes to or from Hyp via MSR and CPS are illegal. */ 9299 if (write_type == CPSRWriteByInstr && 9300 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 9301 mode == ARM_CPU_MODE_HYP)) { 9302 return 1; 9303 } 9304 9305 switch (mode) { 9306 case ARM_CPU_MODE_USR: 9307 return 0; 9308 case ARM_CPU_MODE_SYS: 9309 case ARM_CPU_MODE_SVC: 9310 case ARM_CPU_MODE_ABT: 9311 case ARM_CPU_MODE_UND: 9312 case ARM_CPU_MODE_IRQ: 9313 case ARM_CPU_MODE_FIQ: 9314 /* 9315 * Note that we don't implement the IMPDEF NSACR.RFR which in v7 9316 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 9317 */ 9318 /* 9319 * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 9320 * and CPS are treated as illegal mode changes. 9321 */ 9322 if (write_type == CPSRWriteByInstr && 9323 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 9324 (arm_hcr_el2_eff(env) & HCR_TGE)) { 9325 return 1; 9326 } 9327 return 0; 9328 case ARM_CPU_MODE_HYP: 9329 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2; 9330 case ARM_CPU_MODE_MON: 9331 return arm_current_el(env) < 3; 9332 default: 9333 return 1; 9334 } 9335 } 9336 9337 uint32_t cpsr_read(CPUARMState *env) 9338 { 9339 int ZF; 9340 ZF = (env->ZF == 0); 9341 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 9342 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 9343 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 9344 | ((env->condexec_bits & 0xfc) << 8) 9345 | (env->GE << 16) | (env->daif & CPSR_AIF); 9346 } 9347 9348 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 9349 CPSRWriteType write_type) 9350 { 9351 uint32_t changed_daif; 9352 bool rebuild_hflags = (write_type != CPSRWriteRaw) && 9353 (mask & (CPSR_M | CPSR_E | CPSR_IL)); 9354 9355 if (mask & CPSR_NZCV) { 9356 env->ZF = (~val) & CPSR_Z; 9357 env->NF = val; 9358 env->CF = (val >> 29) & 1; 9359 env->VF = (val << 3) & 0x80000000; 9360 } 9361 if (mask & CPSR_Q) { 9362 env->QF = ((val & CPSR_Q) != 0); 9363 } 9364 if (mask & CPSR_T) { 9365 env->thumb = ((val & CPSR_T) != 0); 9366 } 9367 if (mask & CPSR_IT_0_1) { 9368 env->condexec_bits &= ~3; 9369 env->condexec_bits |= (val >> 25) & 3; 9370 } 9371 if (mask & CPSR_IT_2_7) { 9372 env->condexec_bits &= 3; 9373 env->condexec_bits |= (val >> 8) & 0xfc; 9374 } 9375 if (mask & CPSR_GE) { 9376 env->GE = (val >> 16) & 0xf; 9377 } 9378 9379 /* 9380 * In a V7 implementation that includes the security extensions but does 9381 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 9382 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 9383 * bits respectively. 9384 * 9385 * In a V8 implementation, it is permitted for privileged software to 9386 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 9387 */ 9388 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 9389 arm_feature(env, ARM_FEATURE_EL3) && 9390 !arm_feature(env, ARM_FEATURE_EL2) && 9391 !arm_is_secure(env)) { 9392 9393 changed_daif = (env->daif ^ val) & mask; 9394 9395 if (changed_daif & CPSR_A) { 9396 /* 9397 * Check to see if we are allowed to change the masking of async 9398 * abort exceptions from a non-secure state. 9399 */ 9400 if (!(env->cp15.scr_el3 & SCR_AW)) { 9401 qemu_log_mask(LOG_GUEST_ERROR, 9402 "Ignoring attempt to switch CPSR_A flag from " 9403 "non-secure world with SCR.AW bit clear\n"); 9404 mask &= ~CPSR_A; 9405 } 9406 } 9407 9408 if (changed_daif & CPSR_F) { 9409 /* 9410 * Check to see if we are allowed to change the masking of FIQ 9411 * exceptions from a non-secure state. 9412 */ 9413 if (!(env->cp15.scr_el3 & SCR_FW)) { 9414 qemu_log_mask(LOG_GUEST_ERROR, 9415 "Ignoring attempt to switch CPSR_F flag from " 9416 "non-secure world with SCR.FW bit clear\n"); 9417 mask &= ~CPSR_F; 9418 } 9419 9420 /* 9421 * Check whether non-maskable FIQ (NMFI) support is enabled. 9422 * If this bit is set software is not allowed to mask 9423 * FIQs, but is allowed to set CPSR_F to 0. 9424 */ 9425 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 9426 (val & CPSR_F)) { 9427 qemu_log_mask(LOG_GUEST_ERROR, 9428 "Ignoring attempt to enable CPSR_F flag " 9429 "(non-maskable FIQ [NMFI] support enabled)\n"); 9430 mask &= ~CPSR_F; 9431 } 9432 } 9433 } 9434 9435 env->daif &= ~(CPSR_AIF & mask); 9436 env->daif |= val & CPSR_AIF & mask; 9437 9438 if (write_type != CPSRWriteRaw && 9439 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 9440 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 9441 /* 9442 * Note that we can only get here in USR mode if this is a 9443 * gdb stub write; for this case we follow the architectural 9444 * behaviour for guest writes in USR mode of ignoring an attempt 9445 * to switch mode. (Those are caught by translate.c for writes 9446 * triggered by guest instructions.) 9447 */ 9448 mask &= ~CPSR_M; 9449 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 9450 /* 9451 * Attempt to switch to an invalid mode: this is UNPREDICTABLE in 9452 * v7, and has defined behaviour in v8: 9453 * + leave CPSR.M untouched 9454 * + allow changes to the other CPSR fields 9455 * + set PSTATE.IL 9456 * For user changes via the GDB stub, we don't set PSTATE.IL, 9457 * as this would be unnecessarily harsh for a user error. 9458 */ 9459 mask &= ~CPSR_M; 9460 if (write_type != CPSRWriteByGDBStub && 9461 arm_feature(env, ARM_FEATURE_V8)) { 9462 mask |= CPSR_IL; 9463 val |= CPSR_IL; 9464 } 9465 qemu_log_mask(LOG_GUEST_ERROR, 9466 "Illegal AArch32 mode switch attempt from %s to %s\n", 9467 aarch32_mode_name(env->uncached_cpsr), 9468 aarch32_mode_name(val)); 9469 } else { 9470 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", 9471 write_type == CPSRWriteExceptionReturn ? 9472 "Exception return from AArch32" : 9473 "AArch32 mode switch from", 9474 aarch32_mode_name(env->uncached_cpsr), 9475 aarch32_mode_name(val), env->regs[15]); 9476 switch_mode(env, val & CPSR_M); 9477 } 9478 } 9479 mask &= ~CACHED_CPSR_BITS; 9480 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 9481 if (tcg_enabled() && rebuild_hflags) { 9482 arm_rebuild_hflags(env); 9483 } 9484 } 9485 9486 #ifdef CONFIG_USER_ONLY 9487 9488 static void switch_mode(CPUARMState *env, int mode) 9489 { 9490 ARMCPU *cpu = env_archcpu(env); 9491 9492 if (mode != ARM_CPU_MODE_USR) { 9493 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 9494 } 9495 } 9496 9497 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 9498 uint32_t cur_el, bool secure) 9499 { 9500 return 1; 9501 } 9502 9503 void aarch64_sync_64_to_32(CPUARMState *env) 9504 { 9505 g_assert_not_reached(); 9506 } 9507 9508 #else 9509 9510 static void switch_mode(CPUARMState *env, int mode) 9511 { 9512 int old_mode; 9513 int i; 9514 9515 old_mode = env->uncached_cpsr & CPSR_M; 9516 if (mode == old_mode) { 9517 return; 9518 } 9519 9520 if (old_mode == ARM_CPU_MODE_FIQ) { 9521 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 9522 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 9523 } else if (mode == ARM_CPU_MODE_FIQ) { 9524 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 9525 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 9526 } 9527 9528 i = bank_number(old_mode); 9529 env->banked_r13[i] = env->regs[13]; 9530 env->banked_spsr[i] = env->spsr; 9531 9532 i = bank_number(mode); 9533 env->regs[13] = env->banked_r13[i]; 9534 env->spsr = env->banked_spsr[i]; 9535 9536 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; 9537 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; 9538 } 9539 9540 /* 9541 * Physical Interrupt Target EL Lookup Table 9542 * 9543 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 9544 * 9545 * The below multi-dimensional table is used for looking up the target 9546 * exception level given numerous condition criteria. Specifically, the 9547 * target EL is based on SCR and HCR routing controls as well as the 9548 * currently executing EL and secure state. 9549 * 9550 * Dimensions: 9551 * target_el_table[2][2][2][2][2][4] 9552 * | | | | | +--- Current EL 9553 * | | | | +------ Non-secure(0)/Secure(1) 9554 * | | | +--------- HCR mask override 9555 * | | +------------ SCR exec state control 9556 * | +--------------- SCR mask override 9557 * +------------------ 32-bit(0)/64-bit(1) EL3 9558 * 9559 * The table values are as such: 9560 * 0-3 = EL0-EL3 9561 * -1 = Cannot occur 9562 * 9563 * The ARM ARM target EL table includes entries indicating that an "exception 9564 * is not taken". The two cases where this is applicable are: 9565 * 1) An exception is taken from EL3 but the SCR does not have the exception 9566 * routed to EL3. 9567 * 2) An exception is taken from EL2 but the HCR does not have the exception 9568 * routed to EL2. 9569 * In these two cases, the below table contain a target of EL1. This value is 9570 * returned as it is expected that the consumer of the table data will check 9571 * for "target EL >= current EL" to ensure the exception is not taken. 9572 * 9573 * SCR HCR 9574 * 64 EA AMO From 9575 * BIT IRQ IMO Non-secure Secure 9576 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 9577 */ 9578 static const int8_t target_el_table[2][2][2][2][2][4] = { 9579 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 9580 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 9581 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 9582 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 9583 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 9584 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 9585 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 9586 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 9587 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 9588 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},}, 9589 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },}, 9590 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},}, 9591 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 9592 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 9593 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },}, 9594 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},}, 9595 }; 9596 9597 /* 9598 * Determine the target EL for physical exceptions 9599 */ 9600 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 9601 uint32_t cur_el, bool secure) 9602 { 9603 CPUARMState *env = cpu_env(cs); 9604 bool rw; 9605 bool scr; 9606 bool hcr; 9607 int target_el; 9608 /* Is the highest EL AArch64? */ 9609 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); 9610 uint64_t hcr_el2; 9611 9612 if (arm_feature(env, ARM_FEATURE_EL3)) { 9613 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 9614 } else { 9615 /* 9616 * Either EL2 is the highest EL (and so the EL2 register width 9617 * is given by is64); or there is no EL2 or EL3, in which case 9618 * the value of 'rw' does not affect the table lookup anyway. 9619 */ 9620 rw = is64; 9621 } 9622 9623 hcr_el2 = arm_hcr_el2_eff(env); 9624 switch (excp_idx) { 9625 case EXCP_IRQ: 9626 case EXCP_NMI: 9627 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 9628 hcr = hcr_el2 & HCR_IMO; 9629 break; 9630 case EXCP_FIQ: 9631 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 9632 hcr = hcr_el2 & HCR_FMO; 9633 break; 9634 default: 9635 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 9636 hcr = hcr_el2 & HCR_AMO; 9637 break; 9638 }; 9639 9640 /* 9641 * For these purposes, TGE and AMO/IMO/FMO both force the 9642 * interrupt to EL2. Fold TGE into the bit extracted above. 9643 */ 9644 hcr |= (hcr_el2 & HCR_TGE) != 0; 9645 9646 /* Perform a table-lookup for the target EL given the current state */ 9647 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 9648 9649 assert(target_el > 0); 9650 9651 return target_el; 9652 } 9653 9654 void arm_log_exception(CPUState *cs) 9655 { 9656 int idx = cs->exception_index; 9657 9658 if (qemu_loglevel_mask(CPU_LOG_INT)) { 9659 const char *exc = NULL; 9660 static const char * const excnames[] = { 9661 [EXCP_UDEF] = "Undefined Instruction", 9662 [EXCP_SWI] = "SVC", 9663 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 9664 [EXCP_DATA_ABORT] = "Data Abort", 9665 [EXCP_IRQ] = "IRQ", 9666 [EXCP_FIQ] = "FIQ", 9667 [EXCP_BKPT] = "Breakpoint", 9668 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 9669 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 9670 [EXCP_HVC] = "Hypervisor Call", 9671 [EXCP_HYP_TRAP] = "Hypervisor Trap", 9672 [EXCP_SMC] = "Secure Monitor Call", 9673 [EXCP_VIRQ] = "Virtual IRQ", 9674 [EXCP_VFIQ] = "Virtual FIQ", 9675 [EXCP_SEMIHOST] = "Semihosting call", 9676 [EXCP_NOCP] = "v7M NOCP UsageFault", 9677 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 9678 [EXCP_STKOF] = "v8M STKOF UsageFault", 9679 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", 9680 [EXCP_LSERR] = "v8M LSERR UsageFault", 9681 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", 9682 [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault", 9683 [EXCP_VSERR] = "Virtual SERR", 9684 [EXCP_GPC] = "Granule Protection Check", 9685 [EXCP_NMI] = "NMI", 9686 [EXCP_VINMI] = "Virtual IRQ NMI", 9687 [EXCP_VFNMI] = "Virtual FIQ NMI", 9688 [EXCP_MON_TRAP] = "Monitor Trap", 9689 }; 9690 9691 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 9692 exc = excnames[idx]; 9693 } 9694 if (!exc) { 9695 exc = "unknown"; 9696 } 9697 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n", 9698 idx, exc, cs->cpu_index); 9699 } 9700 } 9701 9702 /* 9703 * Function used to synchronize QEMU's AArch64 register set with AArch32 9704 * register set. This is necessary when switching between AArch32 and AArch64 9705 * execution state. 9706 */ 9707 void aarch64_sync_32_to_64(CPUARMState *env) 9708 { 9709 int i; 9710 uint32_t mode = env->uncached_cpsr & CPSR_M; 9711 9712 /* We can blanket copy R[0:7] to X[0:7] */ 9713 for (i = 0; i < 8; i++) { 9714 env->xregs[i] = env->regs[i]; 9715 } 9716 9717 /* 9718 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 9719 * Otherwise, they come from the banked user regs. 9720 */ 9721 if (mode == ARM_CPU_MODE_FIQ) { 9722 for (i = 8; i < 13; i++) { 9723 env->xregs[i] = env->usr_regs[i - 8]; 9724 } 9725 } else { 9726 for (i = 8; i < 13; i++) { 9727 env->xregs[i] = env->regs[i]; 9728 } 9729 } 9730 9731 /* 9732 * Registers x13-x23 are the various mode SP and FP registers. Registers 9733 * r13 and r14 are only copied if we are in that mode, otherwise we copy 9734 * from the mode banked register. 9735 */ 9736 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 9737 env->xregs[13] = env->regs[13]; 9738 env->xregs[14] = env->regs[14]; 9739 } else { 9740 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 9741 /* HYP is an exception in that it is copied from r14 */ 9742 if (mode == ARM_CPU_MODE_HYP) { 9743 env->xregs[14] = env->regs[14]; 9744 } else { 9745 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; 9746 } 9747 } 9748 9749 if (mode == ARM_CPU_MODE_HYP) { 9750 env->xregs[15] = env->regs[13]; 9751 } else { 9752 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 9753 } 9754 9755 if (mode == ARM_CPU_MODE_IRQ) { 9756 env->xregs[16] = env->regs[14]; 9757 env->xregs[17] = env->regs[13]; 9758 } else { 9759 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; 9760 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 9761 } 9762 9763 if (mode == ARM_CPU_MODE_SVC) { 9764 env->xregs[18] = env->regs[14]; 9765 env->xregs[19] = env->regs[13]; 9766 } else { 9767 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; 9768 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 9769 } 9770 9771 if (mode == ARM_CPU_MODE_ABT) { 9772 env->xregs[20] = env->regs[14]; 9773 env->xregs[21] = env->regs[13]; 9774 } else { 9775 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; 9776 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 9777 } 9778 9779 if (mode == ARM_CPU_MODE_UND) { 9780 env->xregs[22] = env->regs[14]; 9781 env->xregs[23] = env->regs[13]; 9782 } else { 9783 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; 9784 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 9785 } 9786 9787 /* 9788 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9789 * mode, then we can copy from r8-r14. Otherwise, we copy from the 9790 * FIQ bank for r8-r14. 9791 */ 9792 if (mode == ARM_CPU_MODE_FIQ) { 9793 for (i = 24; i < 31; i++) { 9794 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 9795 } 9796 } else { 9797 for (i = 24; i < 29; i++) { 9798 env->xregs[i] = env->fiq_regs[i - 24]; 9799 } 9800 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 9801 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; 9802 } 9803 9804 env->pc = env->regs[15]; 9805 } 9806 9807 /* 9808 * Function used to synchronize QEMU's AArch32 register set with AArch64 9809 * register set. This is necessary when switching between AArch32 and AArch64 9810 * execution state. 9811 */ 9812 void aarch64_sync_64_to_32(CPUARMState *env) 9813 { 9814 int i; 9815 uint32_t mode = env->uncached_cpsr & CPSR_M; 9816 9817 /* We can blanket copy X[0:7] to R[0:7] */ 9818 for (i = 0; i < 8; i++) { 9819 env->regs[i] = env->xregs[i]; 9820 } 9821 9822 /* 9823 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 9824 * Otherwise, we copy x8-x12 into the banked user regs. 9825 */ 9826 if (mode == ARM_CPU_MODE_FIQ) { 9827 for (i = 8; i < 13; i++) { 9828 env->usr_regs[i - 8] = env->xregs[i]; 9829 } 9830 } else { 9831 for (i = 8; i < 13; i++) { 9832 env->regs[i] = env->xregs[i]; 9833 } 9834 } 9835 9836 /* 9837 * Registers r13 & r14 depend on the current mode. 9838 * If we are in a given mode, we copy the corresponding x registers to r13 9839 * and r14. Otherwise, we copy the x register to the banked r13 and r14 9840 * for the mode. 9841 */ 9842 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 9843 env->regs[13] = env->xregs[13]; 9844 env->regs[14] = env->xregs[14]; 9845 } else { 9846 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 9847 9848 /* 9849 * HYP is an exception in that it does not have its own banked r14 but 9850 * shares the USR r14 9851 */ 9852 if (mode == ARM_CPU_MODE_HYP) { 9853 env->regs[14] = env->xregs[14]; 9854 } else { 9855 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 9856 } 9857 } 9858 9859 if (mode == ARM_CPU_MODE_HYP) { 9860 env->regs[13] = env->xregs[15]; 9861 } else { 9862 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 9863 } 9864 9865 if (mode == ARM_CPU_MODE_IRQ) { 9866 env->regs[14] = env->xregs[16]; 9867 env->regs[13] = env->xregs[17]; 9868 } else { 9869 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 9870 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 9871 } 9872 9873 if (mode == ARM_CPU_MODE_SVC) { 9874 env->regs[14] = env->xregs[18]; 9875 env->regs[13] = env->xregs[19]; 9876 } else { 9877 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 9878 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 9879 } 9880 9881 if (mode == ARM_CPU_MODE_ABT) { 9882 env->regs[14] = env->xregs[20]; 9883 env->regs[13] = env->xregs[21]; 9884 } else { 9885 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 9886 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 9887 } 9888 9889 if (mode == ARM_CPU_MODE_UND) { 9890 env->regs[14] = env->xregs[22]; 9891 env->regs[13] = env->xregs[23]; 9892 } else { 9893 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 9894 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 9895 } 9896 9897 /* 9898 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9899 * mode, then we can copy to r8-r14. Otherwise, we copy to the 9900 * FIQ bank for r8-r14. 9901 */ 9902 if (mode == ARM_CPU_MODE_FIQ) { 9903 for (i = 24; i < 31; i++) { 9904 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 9905 } 9906 } else { 9907 for (i = 24; i < 29; i++) { 9908 env->fiq_regs[i - 24] = env->xregs[i]; 9909 } 9910 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 9911 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 9912 } 9913 9914 env->regs[15] = env->pc; 9915 } 9916 9917 static void take_aarch32_exception(CPUARMState *env, int new_mode, 9918 uint32_t mask, uint32_t offset, 9919 uint32_t newpc) 9920 { 9921 int new_el; 9922 9923 /* Change the CPU state so as to actually take the exception. */ 9924 switch_mode(env, new_mode); 9925 9926 /* 9927 * For exceptions taken to AArch32 we must clear the SS bit in both 9928 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 9929 */ 9930 env->pstate &= ~PSTATE_SS; 9931 env->spsr = cpsr_read(env); 9932 /* Clear IT bits. */ 9933 env->condexec_bits = 0; 9934 /* Switch to the new mode, and to the correct instruction set. */ 9935 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 9936 9937 /* This must be after mode switching. */ 9938 new_el = arm_current_el(env); 9939 9940 /* Set new mode endianness */ 9941 env->uncached_cpsr &= ~CPSR_E; 9942 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { 9943 env->uncached_cpsr |= CPSR_E; 9944 } 9945 /* J and IL must always be cleared for exception entry */ 9946 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 9947 env->daif |= mask; 9948 9949 if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) { 9950 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) { 9951 env->uncached_cpsr |= CPSR_SSBS; 9952 } else { 9953 env->uncached_cpsr &= ~CPSR_SSBS; 9954 } 9955 } 9956 9957 if (new_mode == ARM_CPU_MODE_HYP) { 9958 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 9959 env->elr_el[2] = env->regs[15]; 9960 } else { 9961 /* CPSR.PAN is normally preserved preserved unless... */ 9962 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { 9963 switch (new_el) { 9964 case 3: 9965 if (!arm_is_secure_below_el3(env)) { 9966 /* ... the target is EL3, from non-secure state. */ 9967 env->uncached_cpsr &= ~CPSR_PAN; 9968 break; 9969 } 9970 /* ... the target is EL3, from secure state ... */ 9971 /* fall through */ 9972 case 1: 9973 /* ... the target is EL1 and SCTLR.SPAN is 0. */ 9974 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { 9975 env->uncached_cpsr |= CPSR_PAN; 9976 } 9977 break; 9978 } 9979 } 9980 /* 9981 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 9982 * and we should just guard the thumb mode on V4 9983 */ 9984 if (arm_feature(env, ARM_FEATURE_V4T)) { 9985 env->thumb = 9986 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 9987 } 9988 env->regs[14] = env->regs[15] + offset; 9989 } 9990 env->regs[15] = newpc; 9991 9992 if (tcg_enabled()) { 9993 arm_rebuild_hflags(env); 9994 } 9995 } 9996 9997 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 9998 { 9999 /* 10000 * Handle exception entry to Hyp mode; this is sufficiently 10001 * different to entry to other AArch32 modes that we handle it 10002 * separately here. 10003 * 10004 * The vector table entry used is always the 0x14 Hyp mode entry point, 10005 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp. 10006 * The offset applied to the preferred return address is always zero 10007 * (see DDI0487C.a section G1.12.3). 10008 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 10009 */ 10010 uint32_t addr, mask; 10011 ARMCPU *cpu = ARM_CPU(cs); 10012 CPUARMState *env = &cpu->env; 10013 10014 switch (cs->exception_index) { 10015 case EXCP_UDEF: 10016 addr = 0x04; 10017 break; 10018 case EXCP_SWI: 10019 addr = 0x08; 10020 break; 10021 case EXCP_BKPT: 10022 /* Fall through to prefetch abort. */ 10023 case EXCP_PREFETCH_ABORT: 10024 env->cp15.ifar_s = env->exception.vaddress; 10025 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 10026 (uint32_t)env->exception.vaddress); 10027 addr = 0x0c; 10028 break; 10029 case EXCP_DATA_ABORT: 10030 env->cp15.dfar_s = env->exception.vaddress; 10031 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 10032 (uint32_t)env->exception.vaddress); 10033 addr = 0x10; 10034 break; 10035 case EXCP_IRQ: 10036 addr = 0x18; 10037 break; 10038 case EXCP_FIQ: 10039 addr = 0x1c; 10040 break; 10041 case EXCP_HVC: 10042 addr = 0x08; 10043 break; 10044 case EXCP_HYP_TRAP: 10045 addr = 0x14; 10046 break; 10047 default: 10048 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 10049 } 10050 10051 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 10052 if (!arm_feature(env, ARM_FEATURE_V8)) { 10053 /* 10054 * QEMU syndrome values are v8-style. v7 has the IL bit 10055 * UNK/SBZP for "field not valid" cases, where v8 uses RES1. 10056 * If this is a v7 CPU, squash the IL bit in those cases. 10057 */ 10058 if (cs->exception_index == EXCP_PREFETCH_ABORT || 10059 (cs->exception_index == EXCP_DATA_ABORT && 10060 !(env->exception.syndrome & ARM_EL_ISV)) || 10061 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { 10062 env->exception.syndrome &= ~ARM_EL_IL; 10063 } 10064 } 10065 env->cp15.esr_el[2] = env->exception.syndrome; 10066 } 10067 10068 if (arm_current_el(env) != 2 && addr < 0x14) { 10069 addr = 0x14; 10070 } 10071 10072 mask = 0; 10073 if (!(env->cp15.scr_el3 & SCR_EA)) { 10074 mask |= CPSR_A; 10075 } 10076 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 10077 mask |= CPSR_I; 10078 } 10079 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 10080 mask |= CPSR_F; 10081 } 10082 10083 addr += env->cp15.hvbar; 10084 10085 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 10086 } 10087 10088 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 10089 { 10090 ARMCPU *cpu = ARM_CPU(cs); 10091 CPUARMState *env = &cpu->env; 10092 uint32_t addr; 10093 uint32_t mask; 10094 int new_mode; 10095 uint32_t offset; 10096 uint32_t moe; 10097 10098 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 10099 switch (syn_get_ec(env->exception.syndrome)) { 10100 case EC_BREAKPOINT: 10101 case EC_BREAKPOINT_SAME_EL: 10102 moe = 1; 10103 break; 10104 case EC_WATCHPOINT: 10105 case EC_WATCHPOINT_SAME_EL: 10106 moe = 10; 10107 break; 10108 case EC_AA32_BKPT: 10109 moe = 3; 10110 break; 10111 case EC_VECTORCATCH: 10112 moe = 5; 10113 break; 10114 default: 10115 moe = 0; 10116 break; 10117 } 10118 10119 if (moe) { 10120 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 10121 } 10122 10123 if (env->exception.target_el == 2) { 10124 /* Debug exceptions are reported differently on AArch32 */ 10125 switch (syn_get_ec(env->exception.syndrome)) { 10126 case EC_BREAKPOINT: 10127 case EC_BREAKPOINT_SAME_EL: 10128 case EC_AA32_BKPT: 10129 case EC_VECTORCATCH: 10130 env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2, 10131 0, 0, 0x22); 10132 break; 10133 case EC_WATCHPOINT: 10134 env->exception.syndrome = syn_set_ec(env->exception.syndrome, 10135 EC_DATAABORT); 10136 break; 10137 case EC_WATCHPOINT_SAME_EL: 10138 env->exception.syndrome = syn_set_ec(env->exception.syndrome, 10139 EC_DATAABORT_SAME_EL); 10140 break; 10141 } 10142 arm_cpu_do_interrupt_aarch32_hyp(cs); 10143 return; 10144 } 10145 10146 switch (cs->exception_index) { 10147 case EXCP_UDEF: 10148 new_mode = ARM_CPU_MODE_UND; 10149 addr = 0x04; 10150 mask = CPSR_I; 10151 if (env->thumb) { 10152 offset = 2; 10153 } else { 10154 offset = 4; 10155 } 10156 break; 10157 case EXCP_SWI: 10158 new_mode = ARM_CPU_MODE_SVC; 10159 addr = 0x08; 10160 mask = CPSR_I; 10161 /* The PC already points to the next instruction. */ 10162 offset = 0; 10163 break; 10164 case EXCP_BKPT: 10165 /* Fall through to prefetch abort. */ 10166 case EXCP_PREFETCH_ABORT: 10167 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 10168 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 10169 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 10170 env->exception.fsr, (uint32_t)env->exception.vaddress); 10171 new_mode = ARM_CPU_MODE_ABT; 10172 addr = 0x0c; 10173 mask = CPSR_A | CPSR_I; 10174 offset = 4; 10175 break; 10176 case EXCP_DATA_ABORT: 10177 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 10178 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 10179 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 10180 env->exception.fsr, 10181 (uint32_t)env->exception.vaddress); 10182 new_mode = ARM_CPU_MODE_ABT; 10183 addr = 0x10; 10184 mask = CPSR_A | CPSR_I; 10185 offset = 8; 10186 break; 10187 case EXCP_IRQ: 10188 new_mode = ARM_CPU_MODE_IRQ; 10189 addr = 0x18; 10190 /* Disable IRQ and imprecise data aborts. */ 10191 mask = CPSR_A | CPSR_I; 10192 offset = 4; 10193 if (env->cp15.scr_el3 & SCR_IRQ) { 10194 /* IRQ routed to monitor mode */ 10195 new_mode = ARM_CPU_MODE_MON; 10196 mask |= CPSR_F; 10197 } 10198 break; 10199 case EXCP_FIQ: 10200 new_mode = ARM_CPU_MODE_FIQ; 10201 addr = 0x1c; 10202 /* Disable FIQ, IRQ and imprecise data aborts. */ 10203 mask = CPSR_A | CPSR_I | CPSR_F; 10204 if (env->cp15.scr_el3 & SCR_FIQ) { 10205 /* FIQ routed to monitor mode */ 10206 new_mode = ARM_CPU_MODE_MON; 10207 } 10208 offset = 4; 10209 break; 10210 case EXCP_VIRQ: 10211 new_mode = ARM_CPU_MODE_IRQ; 10212 addr = 0x18; 10213 /* Disable IRQ and imprecise data aborts. */ 10214 mask = CPSR_A | CPSR_I; 10215 offset = 4; 10216 break; 10217 case EXCP_VFIQ: 10218 new_mode = ARM_CPU_MODE_FIQ; 10219 addr = 0x1c; 10220 /* Disable FIQ, IRQ and imprecise data aborts. */ 10221 mask = CPSR_A | CPSR_I | CPSR_F; 10222 offset = 4; 10223 break; 10224 case EXCP_VSERR: 10225 { 10226 /* 10227 * Note that this is reported as a data abort, but the DFAR 10228 * has an UNKNOWN value. Construct the SError syndrome from 10229 * AET and ExT fields. 10230 */ 10231 ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, }; 10232 10233 if (extended_addresses_enabled(env)) { 10234 env->exception.fsr = arm_fi_to_lfsc(&fi); 10235 } else { 10236 env->exception.fsr = arm_fi_to_sfsc(&fi); 10237 } 10238 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000; 10239 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 10240 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n", 10241 env->exception.fsr); 10242 10243 new_mode = ARM_CPU_MODE_ABT; 10244 addr = 0x10; 10245 mask = CPSR_A | CPSR_I; 10246 offset = 8; 10247 } 10248 break; 10249 case EXCP_SMC: 10250 new_mode = ARM_CPU_MODE_MON; 10251 addr = 0x08; 10252 mask = CPSR_A | CPSR_I | CPSR_F; 10253 offset = 0; 10254 break; 10255 case EXCP_MON_TRAP: 10256 new_mode = ARM_CPU_MODE_MON; 10257 addr = 0x04; 10258 mask = CPSR_A | CPSR_I | CPSR_F; 10259 if (env->thumb) { 10260 offset = 2; 10261 } else { 10262 offset = 4; 10263 } 10264 break; 10265 default: 10266 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 10267 return; /* Never happens. Keep compiler happy. */ 10268 } 10269 10270 if (new_mode == ARM_CPU_MODE_MON) { 10271 addr += env->cp15.mvbar; 10272 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 10273 /* High vectors. When enabled, base address cannot be remapped. */ 10274 addr += 0xffff0000; 10275 } else { 10276 /* 10277 * ARM v7 architectures provide a vector base address register to remap 10278 * the interrupt vector table. 10279 * This register is only followed in non-monitor mode, and is banked. 10280 * Note: only bits 31:5 are valid. 10281 */ 10282 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 10283 } 10284 10285 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 10286 env->cp15.scr_el3 &= ~SCR_NS; 10287 } 10288 10289 take_aarch32_exception(env, new_mode, mask, offset, addr); 10290 } 10291 10292 static int aarch64_regnum(CPUARMState *env, int aarch32_reg) 10293 { 10294 /* 10295 * Return the register number of the AArch64 view of the AArch32 10296 * register @aarch32_reg. The CPUARMState CPSR is assumed to still 10297 * be that of the AArch32 mode the exception came from. 10298 */ 10299 int mode = env->uncached_cpsr & CPSR_M; 10300 10301 switch (aarch32_reg) { 10302 case 0 ... 7: 10303 return aarch32_reg; 10304 case 8 ... 12: 10305 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg; 10306 case 13: 10307 switch (mode) { 10308 case ARM_CPU_MODE_USR: 10309 case ARM_CPU_MODE_SYS: 10310 return 13; 10311 case ARM_CPU_MODE_HYP: 10312 return 15; 10313 case ARM_CPU_MODE_IRQ: 10314 return 17; 10315 case ARM_CPU_MODE_SVC: 10316 return 19; 10317 case ARM_CPU_MODE_ABT: 10318 return 21; 10319 case ARM_CPU_MODE_UND: 10320 return 23; 10321 case ARM_CPU_MODE_FIQ: 10322 return 29; 10323 default: 10324 g_assert_not_reached(); 10325 } 10326 case 14: 10327 switch (mode) { 10328 case ARM_CPU_MODE_USR: 10329 case ARM_CPU_MODE_SYS: 10330 case ARM_CPU_MODE_HYP: 10331 return 14; 10332 case ARM_CPU_MODE_IRQ: 10333 return 16; 10334 case ARM_CPU_MODE_SVC: 10335 return 18; 10336 case ARM_CPU_MODE_ABT: 10337 return 20; 10338 case ARM_CPU_MODE_UND: 10339 return 22; 10340 case ARM_CPU_MODE_FIQ: 10341 return 30; 10342 default: 10343 g_assert_not_reached(); 10344 } 10345 case 15: 10346 return 31; 10347 default: 10348 g_assert_not_reached(); 10349 } 10350 } 10351 10352 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env) 10353 { 10354 uint32_t ret = cpsr_read(env); 10355 10356 /* Move DIT to the correct location for SPSR_ELx */ 10357 if (ret & CPSR_DIT) { 10358 ret &= ~CPSR_DIT; 10359 ret |= PSTATE_DIT; 10360 } 10361 /* Merge PSTATE.SS into SPSR_ELx */ 10362 ret |= env->pstate & PSTATE_SS; 10363 10364 return ret; 10365 } 10366 10367 static bool syndrome_is_sync_extabt(uint32_t syndrome) 10368 { 10369 /* Return true if this syndrome value is a synchronous external abort */ 10370 switch (syn_get_ec(syndrome)) { 10371 case EC_INSNABORT: 10372 case EC_INSNABORT_SAME_EL: 10373 case EC_DATAABORT: 10374 case EC_DATAABORT_SAME_EL: 10375 /* Look at fault status code for all the synchronous ext abort cases */ 10376 switch (syndrome & 0x3f) { 10377 case 0x10: 10378 case 0x13: 10379 case 0x14: 10380 case 0x15: 10381 case 0x16: 10382 case 0x17: 10383 return true; 10384 default: 10385 return false; 10386 } 10387 default: 10388 return false; 10389 } 10390 } 10391 10392 /* Handle exception entry to a target EL which is using AArch64 */ 10393 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 10394 { 10395 ARMCPU *cpu = ARM_CPU(cs); 10396 CPUARMState *env = &cpu->env; 10397 unsigned int new_el = env->exception.target_el; 10398 target_ulong addr = env->cp15.vbar_el[new_el]; 10399 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 10400 unsigned int old_mode; 10401 unsigned int cur_el = arm_current_el(env); 10402 int rt; 10403 10404 if (tcg_enabled()) { 10405 /* 10406 * Note that new_el can never be 0. If cur_el is 0, then 10407 * el0_a64 is is_a64(), else el0_a64 is ignored. 10408 */ 10409 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 10410 } 10411 10412 if (cur_el < new_el) { 10413 /* 10414 * Entry vector offset depends on whether the implemented EL 10415 * immediately lower than the target level is using AArch32 or AArch64 10416 */ 10417 bool is_aa64; 10418 uint64_t hcr; 10419 10420 switch (new_el) { 10421 case 3: 10422 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 10423 break; 10424 case 2: 10425 hcr = arm_hcr_el2_eff(env); 10426 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 10427 is_aa64 = (hcr & HCR_RW) != 0; 10428 break; 10429 } 10430 /* fall through */ 10431 case 1: 10432 is_aa64 = is_a64(env); 10433 break; 10434 default: 10435 g_assert_not_reached(); 10436 } 10437 10438 if (is_aa64) { 10439 addr += 0x400; 10440 } else { 10441 addr += 0x600; 10442 } 10443 } else if (pstate_read(env) & PSTATE_SP) { 10444 addr += 0x200; 10445 } 10446 10447 switch (cs->exception_index) { 10448 case EXCP_GPC: 10449 qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n", 10450 env->cp15.mfar_el3); 10451 /* fall through */ 10452 case EXCP_PREFETCH_ABORT: 10453 case EXCP_DATA_ABORT: 10454 /* 10455 * FEAT_DoubleFault allows synchronous external aborts taken to EL3 10456 * to be taken to the SError vector entrypoint. 10457 */ 10458 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) && 10459 syndrome_is_sync_extabt(env->exception.syndrome)) { 10460 addr += 0x180; 10461 } 10462 env->cp15.far_el[new_el] = env->exception.vaddress; 10463 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 10464 env->cp15.far_el[new_el]); 10465 /* fall through */ 10466 case EXCP_BKPT: 10467 case EXCP_UDEF: 10468 case EXCP_SWI: 10469 case EXCP_HVC: 10470 case EXCP_HYP_TRAP: 10471 case EXCP_SMC: 10472 switch (syn_get_ec(env->exception.syndrome)) { 10473 case EC_ADVSIMDFPACCESSTRAP: 10474 /* 10475 * QEMU internal FP/SIMD syndromes from AArch32 include the 10476 * TA and coproc fields which are only exposed if the exception 10477 * is taken to AArch32 Hyp mode. Mask them out to get a valid 10478 * AArch64 format syndrome. 10479 */ 10480 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); 10481 break; 10482 case EC_CP14RTTRAP: 10483 case EC_CP15RTTRAP: 10484 case EC_CP14DTTRAP: 10485 /* 10486 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently 10487 * the raw register field from the insn; when taking this to 10488 * AArch64 we must convert it to the AArch64 view of the register 10489 * number. Notice that we read a 4-bit AArch32 register number and 10490 * write back a 5-bit AArch64 one. 10491 */ 10492 rt = extract32(env->exception.syndrome, 5, 4); 10493 rt = aarch64_regnum(env, rt); 10494 env->exception.syndrome = deposit32(env->exception.syndrome, 10495 5, 5, rt); 10496 break; 10497 case EC_CP15RRTTRAP: 10498 case EC_CP14RRTTRAP: 10499 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */ 10500 rt = extract32(env->exception.syndrome, 5, 4); 10501 rt = aarch64_regnum(env, rt); 10502 env->exception.syndrome = deposit32(env->exception.syndrome, 10503 5, 5, rt); 10504 rt = extract32(env->exception.syndrome, 10, 4); 10505 rt = aarch64_regnum(env, rt); 10506 env->exception.syndrome = deposit32(env->exception.syndrome, 10507 10, 5, rt); 10508 break; 10509 } 10510 env->cp15.esr_el[new_el] = env->exception.syndrome; 10511 break; 10512 case EXCP_IRQ: 10513 case EXCP_VIRQ: 10514 case EXCP_NMI: 10515 case EXCP_VINMI: 10516 addr += 0x80; 10517 break; 10518 case EXCP_FIQ: 10519 case EXCP_VFIQ: 10520 case EXCP_VFNMI: 10521 addr += 0x100; 10522 break; 10523 case EXCP_VSERR: 10524 addr += 0x180; 10525 /* Construct the SError syndrome from IDS and ISS fields. */ 10526 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff); 10527 env->cp15.esr_el[new_el] = env->exception.syndrome; 10528 break; 10529 default: 10530 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 10531 } 10532 10533 if (is_a64(env)) { 10534 old_mode = pstate_read(env); 10535 aarch64_save_sp(env, arm_current_el(env)); 10536 env->elr_el[new_el] = env->pc; 10537 10538 if (cur_el == 1 && new_el == 1) { 10539 uint64_t hcr = arm_hcr_el2_eff(env); 10540 if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV || 10541 (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) { 10542 /* 10543 * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR 10544 * by setting M[3:2] to 0b10. 10545 * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN) 10546 * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM) 10547 */ 10548 old_mode = deposit32(old_mode, 2, 2, 2); 10549 } 10550 } 10551 } else { 10552 old_mode = cpsr_read_for_spsr_elx(env); 10553 env->elr_el[new_el] = env->regs[15]; 10554 10555 aarch64_sync_32_to_64(env); 10556 10557 env->condexec_bits = 0; 10558 } 10559 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; 10560 10561 qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode); 10562 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 10563 env->elr_el[new_el]); 10564 10565 if (cpu_isar_feature(aa64_pan, cpu)) { 10566 /* The value of PSTATE.PAN is normally preserved, except when ... */ 10567 new_mode |= old_mode & PSTATE_PAN; 10568 switch (new_el) { 10569 case 2: 10570 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ 10571 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) 10572 != (HCR_E2H | HCR_TGE)) { 10573 break; 10574 } 10575 /* fall through */ 10576 case 1: 10577 /* ... the target is EL1 ... */ 10578 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ 10579 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { 10580 new_mode |= PSTATE_PAN; 10581 } 10582 break; 10583 } 10584 } 10585 if (cpu_isar_feature(aa64_mte, cpu)) { 10586 new_mode |= PSTATE_TCO; 10587 } 10588 10589 if (cpu_isar_feature(aa64_ssbs, cpu)) { 10590 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) { 10591 new_mode |= PSTATE_SSBS; 10592 } else { 10593 new_mode &= ~PSTATE_SSBS; 10594 } 10595 } 10596 10597 if (cpu_isar_feature(aa64_nmi, cpu)) { 10598 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) { 10599 new_mode |= PSTATE_ALLINT; 10600 } else { 10601 new_mode &= ~PSTATE_ALLINT; 10602 } 10603 } 10604 10605 pstate_write(env, PSTATE_DAIF | new_mode); 10606 env->aarch64 = true; 10607 aarch64_restore_sp(env, new_el); 10608 10609 if (tcg_enabled()) { 10610 helper_rebuild_hflags_a64(env, new_el); 10611 } 10612 10613 env->pc = addr; 10614 10615 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 10616 new_el, env->pc, pstate_read(env)); 10617 } 10618 10619 /* 10620 * Do semihosting call and set the appropriate return value. All the 10621 * permission and validity checks have been done at translate time. 10622 * 10623 * We only see semihosting exceptions in TCG only as they are not 10624 * trapped to the hypervisor in KVM. 10625 */ 10626 #ifdef CONFIG_TCG 10627 static void tcg_handle_semihosting(CPUState *cs) 10628 { 10629 ARMCPU *cpu = ARM_CPU(cs); 10630 CPUARMState *env = &cpu->env; 10631 10632 if (is_a64(env)) { 10633 qemu_log_mask(CPU_LOG_INT, 10634 "...handling as semihosting call 0x%" PRIx64 "\n", 10635 env->xregs[0]); 10636 do_common_semihosting(cs); 10637 env->pc += 4; 10638 } else { 10639 qemu_log_mask(CPU_LOG_INT, 10640 "...handling as semihosting call 0x%x\n", 10641 env->regs[0]); 10642 do_common_semihosting(cs); 10643 env->regs[15] += env->thumb ? 2 : 4; 10644 } 10645 } 10646 #endif 10647 10648 /* 10649 * Handle a CPU exception for A and R profile CPUs. 10650 * Do any appropriate logging, handle PSCI calls, and then hand off 10651 * to the AArch64-entry or AArch32-entry function depending on the 10652 * target exception level's register width. 10653 * 10654 * Note: this is used for both TCG (as the do_interrupt tcg op), 10655 * and KVM to re-inject guest debug exceptions, and to 10656 * inject a Synchronous-External-Abort. 10657 */ 10658 void arm_cpu_do_interrupt(CPUState *cs) 10659 { 10660 ARMCPU *cpu = ARM_CPU(cs); 10661 CPUARMState *env = &cpu->env; 10662 unsigned int new_el = env->exception.target_el; 10663 10664 assert(!arm_feature(env, ARM_FEATURE_M)); 10665 10666 arm_log_exception(cs); 10667 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 10668 new_el); 10669 if (qemu_loglevel_mask(CPU_LOG_INT) 10670 && !excp_is_internal(cs->exception_index)) { 10671 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 10672 syn_get_ec(env->exception.syndrome), 10673 env->exception.syndrome); 10674 } 10675 10676 if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) { 10677 arm_handle_psci_call(cpu); 10678 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 10679 return; 10680 } 10681 10682 /* 10683 * Semihosting semantics depend on the register width of the code 10684 * that caused the exception, not the target exception level, so 10685 * must be handled here. 10686 */ 10687 #ifdef CONFIG_TCG 10688 if (cs->exception_index == EXCP_SEMIHOST) { 10689 tcg_handle_semihosting(cs); 10690 return; 10691 } 10692 #endif 10693 10694 /* 10695 * Hooks may change global state so BQL should be held, also the 10696 * BQL needs to be held for any modification of 10697 * cs->interrupt_request. 10698 */ 10699 g_assert(bql_locked()); 10700 10701 arm_call_pre_el_change_hook(cpu); 10702 10703 assert(!excp_is_internal(cs->exception_index)); 10704 if (arm_el_is_aa64(env, new_el)) { 10705 arm_cpu_do_interrupt_aarch64(cs); 10706 } else { 10707 arm_cpu_do_interrupt_aarch32(cs); 10708 } 10709 10710 arm_call_el_change_hook(cpu); 10711 10712 if (!kvm_enabled()) { 10713 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 10714 } 10715 } 10716 #endif /* !CONFIG_USER_ONLY */ 10717 10718 uint64_t arm_sctlr(CPUARMState *env, int el) 10719 { 10720 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */ 10721 if (el == 0) { 10722 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); 10723 switch (mmu_idx) { 10724 case ARMMMUIdx_E20_0: 10725 el = 2; 10726 break; 10727 case ARMMMUIdx_E30_0: 10728 el = 3; 10729 break; 10730 default: 10731 el = 1; 10732 break; 10733 } 10734 } 10735 return env->cp15.sctlr_el[el]; 10736 } 10737 10738 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) 10739 { 10740 if (regime_has_2_ranges(mmu_idx)) { 10741 return extract64(tcr, 37, 2); 10742 } else if (regime_is_stage2(mmu_idx)) { 10743 return 0; /* VTCR_EL2 */ 10744 } else { 10745 /* Replicate the single TBI bit so we always have 2 bits. */ 10746 return extract32(tcr, 20, 1) * 3; 10747 } 10748 } 10749 10750 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) 10751 { 10752 if (regime_has_2_ranges(mmu_idx)) { 10753 return extract64(tcr, 51, 2); 10754 } else if (regime_is_stage2(mmu_idx)) { 10755 return 0; /* VTCR_EL2 */ 10756 } else { 10757 /* Replicate the single TBID bit so we always have 2 bits. */ 10758 return extract32(tcr, 29, 1) * 3; 10759 } 10760 } 10761 10762 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx) 10763 { 10764 if (regime_has_2_ranges(mmu_idx)) { 10765 return extract64(tcr, 57, 2); 10766 } else { 10767 /* Replicate the single TCMA bit so we always have 2 bits. */ 10768 return extract32(tcr, 30, 1) * 3; 10769 } 10770 } 10771 10772 static ARMGranuleSize tg0_to_gran_size(int tg) 10773 { 10774 switch (tg) { 10775 case 0: 10776 return Gran4K; 10777 case 1: 10778 return Gran64K; 10779 case 2: 10780 return Gran16K; 10781 default: 10782 return GranInvalid; 10783 } 10784 } 10785 10786 static ARMGranuleSize tg1_to_gran_size(int tg) 10787 { 10788 switch (tg) { 10789 case 1: 10790 return Gran16K; 10791 case 2: 10792 return Gran4K; 10793 case 3: 10794 return Gran64K; 10795 default: 10796 return GranInvalid; 10797 } 10798 } 10799 10800 static inline bool have4k(ARMCPU *cpu, bool stage2) 10801 { 10802 return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu) 10803 : cpu_isar_feature(aa64_tgran4, cpu); 10804 } 10805 10806 static inline bool have16k(ARMCPU *cpu, bool stage2) 10807 { 10808 return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu) 10809 : cpu_isar_feature(aa64_tgran16, cpu); 10810 } 10811 10812 static inline bool have64k(ARMCPU *cpu, bool stage2) 10813 { 10814 return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu) 10815 : cpu_isar_feature(aa64_tgran64, cpu); 10816 } 10817 10818 static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran, 10819 bool stage2) 10820 { 10821 switch (gran) { 10822 case Gran4K: 10823 if (have4k(cpu, stage2)) { 10824 return gran; 10825 } 10826 break; 10827 case Gran16K: 10828 if (have16k(cpu, stage2)) { 10829 return gran; 10830 } 10831 break; 10832 case Gran64K: 10833 if (have64k(cpu, stage2)) { 10834 return gran; 10835 } 10836 break; 10837 case GranInvalid: 10838 break; 10839 } 10840 /* 10841 * If the guest selects a granule size that isn't implemented, 10842 * the architecture requires that we behave as if it selected one 10843 * that is (with an IMPDEF choice of which one to pick). We choose 10844 * to implement the smallest supported granule size. 10845 */ 10846 if (have4k(cpu, stage2)) { 10847 return Gran4K; 10848 } 10849 if (have16k(cpu, stage2)) { 10850 return Gran16K; 10851 } 10852 assert(have64k(cpu, stage2)); 10853 return Gran64K; 10854 } 10855 10856 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 10857 ARMMMUIdx mmu_idx, bool data, 10858 bool el1_is_aa32) 10859 { 10860 uint64_t tcr = regime_tcr(env, mmu_idx); 10861 bool epd, hpd, tsz_oob, ds, ha, hd; 10862 int select, tsz, tbi, max_tsz, min_tsz, ps, sh; 10863 ARMGranuleSize gran; 10864 ARMCPU *cpu = env_archcpu(env); 10865 bool stage2 = regime_is_stage2(mmu_idx); 10866 10867 if (!regime_has_2_ranges(mmu_idx)) { 10868 select = 0; 10869 tsz = extract32(tcr, 0, 6); 10870 gran = tg0_to_gran_size(extract32(tcr, 14, 2)); 10871 if (stage2) { 10872 /* VTCR_EL2 */ 10873 hpd = false; 10874 } else { 10875 hpd = extract32(tcr, 24, 1); 10876 } 10877 epd = false; 10878 sh = extract32(tcr, 12, 2); 10879 ps = extract32(tcr, 16, 3); 10880 ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu); 10881 hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu); 10882 ds = extract64(tcr, 32, 1); 10883 } else { 10884 bool e0pd; 10885 10886 /* 10887 * Bit 55 is always between the two regions, and is canonical for 10888 * determining if address tagging is enabled. 10889 */ 10890 select = extract64(va, 55, 1); 10891 if (!select) { 10892 tsz = extract32(tcr, 0, 6); 10893 gran = tg0_to_gran_size(extract32(tcr, 14, 2)); 10894 epd = extract32(tcr, 7, 1); 10895 sh = extract32(tcr, 12, 2); 10896 hpd = extract64(tcr, 41, 1); 10897 e0pd = extract64(tcr, 55, 1); 10898 } else { 10899 tsz = extract32(tcr, 16, 6); 10900 gran = tg1_to_gran_size(extract32(tcr, 30, 2)); 10901 epd = extract32(tcr, 23, 1); 10902 sh = extract32(tcr, 28, 2); 10903 hpd = extract64(tcr, 42, 1); 10904 e0pd = extract64(tcr, 56, 1); 10905 } 10906 ps = extract64(tcr, 32, 3); 10907 ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu); 10908 hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu); 10909 ds = extract64(tcr, 59, 1); 10910 10911 if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) && 10912 regime_is_user(env, mmu_idx)) { 10913 epd = true; 10914 } 10915 } 10916 10917 gran = sanitize_gran_size(cpu, gran, stage2); 10918 10919 if (cpu_isar_feature(aa64_st, cpu)) { 10920 max_tsz = 48 - (gran == Gran64K); 10921 } else { 10922 max_tsz = 39; 10923 } 10924 10925 /* 10926 * DS is RES0 unless FEAT_LPA2 is supported for the given page size; 10927 * adjust the effective value of DS, as documented. 10928 */ 10929 min_tsz = 16; 10930 if (gran == Gran64K) { 10931 if (cpu_isar_feature(aa64_lva, cpu)) { 10932 min_tsz = 12; 10933 } 10934 ds = false; 10935 } else if (ds) { 10936 if (regime_is_stage2(mmu_idx)) { 10937 if (gran == Gran16K) { 10938 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu); 10939 } else { 10940 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu); 10941 } 10942 } else { 10943 if (gran == Gran16K) { 10944 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu); 10945 } else { 10946 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu); 10947 } 10948 } 10949 if (ds) { 10950 min_tsz = 12; 10951 } 10952 } 10953 10954 if (stage2 && el1_is_aa32) { 10955 /* 10956 * For AArch32 EL1 the min txsz (and thus max IPA size) requirements 10957 * are loosened: a configured IPA of 40 bits is permitted even if 10958 * the implemented PA is less than that (and so a 40 bit IPA would 10959 * fault for an AArch64 EL1). See R_DTLMN. 10960 */ 10961 min_tsz = MIN(min_tsz, 24); 10962 } 10963 10964 if (tsz > max_tsz) { 10965 tsz = max_tsz; 10966 tsz_oob = true; 10967 } else if (tsz < min_tsz) { 10968 tsz = min_tsz; 10969 tsz_oob = true; 10970 } else { 10971 tsz_oob = false; 10972 } 10973 10974 /* Present TBI as a composite with TBID. */ 10975 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 10976 if (!data) { 10977 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 10978 } 10979 tbi = (tbi >> select) & 1; 10980 10981 return (ARMVAParameters) { 10982 .tsz = tsz, 10983 .ps = ps, 10984 .sh = sh, 10985 .select = select, 10986 .tbi = tbi, 10987 .epd = epd, 10988 .hpd = hpd, 10989 .tsz_oob = tsz_oob, 10990 .ds = ds, 10991 .ha = ha, 10992 .hd = ha && hd, 10993 .gran = gran, 10994 }; 10995 } 10996 10997 10998 /* 10999 * Return the exception level to which FP-disabled exceptions should 11000 * be taken, or 0 if FP is enabled. 11001 */ 11002 int fp_exception_el(CPUARMState *env, int cur_el) 11003 { 11004 #ifndef CONFIG_USER_ONLY 11005 uint64_t hcr_el2; 11006 11007 /* 11008 * CPACR and the CPTR registers don't exist before v6, so FP is 11009 * always accessible 11010 */ 11011 if (!arm_feature(env, ARM_FEATURE_V6)) { 11012 return 0; 11013 } 11014 11015 if (arm_feature(env, ARM_FEATURE_M)) { 11016 /* CPACR can cause a NOCP UsageFault taken to current security state */ 11017 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { 11018 return 1; 11019 } 11020 11021 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { 11022 if (!extract32(env->v7m.nsacr, 10, 1)) { 11023 /* FP insns cause a NOCP UsageFault taken to Secure */ 11024 return 3; 11025 } 11026 } 11027 11028 return 0; 11029 } 11030 11031 hcr_el2 = arm_hcr_el2_eff(env); 11032 11033 /* 11034 * The CPACR controls traps to EL1, or PL1 if we're 32 bit: 11035 * 0, 2 : trap EL0 and EL1/PL1 accesses 11036 * 1 : trap only EL0 accesses 11037 * 3 : trap no accesses 11038 * This register is ignored if E2H+TGE are both set. 11039 */ 11040 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 11041 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN); 11042 11043 switch (fpen) { 11044 case 1: 11045 if (cur_el != 0) { 11046 break; 11047 } 11048 /* fall through */ 11049 case 0: 11050 case 2: 11051 /* Trap from Secure PL0 or PL1 to Secure PL1. */ 11052 if (!arm_el_is_aa64(env, 3) 11053 && (cur_el == 3 || arm_is_secure_below_el3(env))) { 11054 return 3; 11055 } 11056 if (cur_el <= 1) { 11057 return 1; 11058 } 11059 break; 11060 } 11061 } 11062 11063 /* 11064 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode 11065 * to control non-secure access to the FPU. It doesn't have any 11066 * effect if EL3 is AArch64 or if EL3 doesn't exist at all. 11067 */ 11068 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 11069 cur_el <= 2 && !arm_is_secure_below_el3(env))) { 11070 if (!extract32(env->cp15.nsacr, 10, 1)) { 11071 /* FP insns act as UNDEF */ 11072 return cur_el == 2 ? 2 : 1; 11073 } 11074 } 11075 11076 /* 11077 * CPTR_EL2 is present in v7VE or v8, and changes format 11078 * with HCR_EL2.E2H (regardless of TGE). 11079 */ 11080 if (cur_el <= 2) { 11081 if (hcr_el2 & HCR_E2H) { 11082 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) { 11083 case 1: 11084 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) { 11085 break; 11086 } 11087 /* fall through */ 11088 case 0: 11089 case 2: 11090 return 2; 11091 } 11092 } else if (arm_is_el2_enabled(env)) { 11093 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) { 11094 return 2; 11095 } 11096 } 11097 } 11098 11099 /* CPTR_EL3 : present in v8 */ 11100 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) { 11101 /* Trap all FP ops to EL3 */ 11102 return 3; 11103 } 11104 #endif 11105 return 0; 11106 } 11107 11108 /* Return the exception level we're running at if this is our mmu_idx */ 11109 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) 11110 { 11111 if (mmu_idx & ARM_MMU_IDX_M) { 11112 return mmu_idx & ARM_MMU_IDX_M_PRIV; 11113 } 11114 11115 switch (mmu_idx) { 11116 case ARMMMUIdx_E10_0: 11117 case ARMMMUIdx_E20_0: 11118 case ARMMMUIdx_E30_0: 11119 return 0; 11120 case ARMMMUIdx_E10_1: 11121 case ARMMMUIdx_E10_1_PAN: 11122 return 1; 11123 case ARMMMUIdx_E2: 11124 case ARMMMUIdx_E20_2: 11125 case ARMMMUIdx_E20_2_PAN: 11126 return 2; 11127 case ARMMMUIdx_E3: 11128 case ARMMMUIdx_E30_3_PAN: 11129 return 3; 11130 default: 11131 g_assert_not_reached(); 11132 } 11133 } 11134 11135 #ifndef CONFIG_TCG 11136 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) 11137 { 11138 g_assert_not_reached(); 11139 } 11140 #endif 11141 11142 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) 11143 { 11144 ARMMMUIdx idx; 11145 uint64_t hcr; 11146 11147 if (arm_feature(env, ARM_FEATURE_M)) { 11148 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 11149 } 11150 11151 /* See ARM pseudo-function ELIsInHost. */ 11152 switch (el) { 11153 case 0: 11154 hcr = arm_hcr_el2_eff(env); 11155 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 11156 idx = ARMMMUIdx_E20_0; 11157 } else if (arm_is_secure_below_el3(env) && 11158 !arm_el_is_aa64(env, 3)) { 11159 idx = ARMMMUIdx_E30_0; 11160 } else { 11161 idx = ARMMMUIdx_E10_0; 11162 } 11163 break; 11164 case 1: 11165 if (arm_pan_enabled(env)) { 11166 idx = ARMMMUIdx_E10_1_PAN; 11167 } else { 11168 idx = ARMMMUIdx_E10_1; 11169 } 11170 break; 11171 case 2: 11172 /* Note that TGE does not apply at EL2. */ 11173 if (arm_hcr_el2_eff(env) & HCR_E2H) { 11174 if (arm_pan_enabled(env)) { 11175 idx = ARMMMUIdx_E20_2_PAN; 11176 } else { 11177 idx = ARMMMUIdx_E20_2; 11178 } 11179 } else { 11180 idx = ARMMMUIdx_E2; 11181 } 11182 break; 11183 case 3: 11184 if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) { 11185 return ARMMMUIdx_E30_3_PAN; 11186 } 11187 return ARMMMUIdx_E3; 11188 default: 11189 g_assert_not_reached(); 11190 } 11191 11192 return idx; 11193 } 11194 11195 ARMMMUIdx arm_mmu_idx(CPUARMState *env) 11196 { 11197 return arm_mmu_idx_el(env, arm_current_el(env)); 11198 } 11199 11200 static bool mve_no_pred(CPUARMState *env) 11201 { 11202 /* 11203 * Return true if there is definitely no predication of MVE 11204 * instructions by VPR or LTPSIZE. (Returning false even if there 11205 * isn't any predication is OK; generated code will just be 11206 * a little worse.) 11207 * If the CPU does not implement MVE then this TB flag is always 0. 11208 * 11209 * NOTE: if you change this logic, the "recalculate s->mve_no_pred" 11210 * logic in gen_update_fp_context() needs to be updated to match. 11211 * 11212 * We do not include the effect of the ECI bits here -- they are 11213 * tracked in other TB flags. This simplifies the logic for 11214 * "when did we emit code that changes the MVE_NO_PRED TB flag 11215 * and thus need to end the TB?". 11216 */ 11217 if (cpu_isar_feature(aa32_mve, env_archcpu(env))) { 11218 return false; 11219 } 11220 if (env->v7m.vpr) { 11221 return false; 11222 } 11223 if (env->v7m.ltpsize < 4) { 11224 return false; 11225 } 11226 return true; 11227 } 11228 11229 void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc, 11230 uint64_t *cs_base, uint32_t *pflags) 11231 { 11232 CPUARMTBFlags flags; 11233 11234 assert_hflags_rebuild_correctly(env); 11235 flags = env->hflags; 11236 11237 if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) { 11238 *pc = env->pc; 11239 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 11240 DP_TBFLAG_A64(flags, BTYPE, env->btype); 11241 } 11242 } else { 11243 *pc = env->regs[15]; 11244 11245 if (arm_feature(env, ARM_FEATURE_M)) { 11246 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 11247 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) 11248 != env->v7m.secure) { 11249 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1); 11250 } 11251 11252 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && 11253 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || 11254 (env->v7m.secure && 11255 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { 11256 /* 11257 * ASPEN is set, but FPCA/SFPA indicate that there is no 11258 * active FP context; we must create a new FP context before 11259 * executing any FP insn. 11260 */ 11261 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1); 11262 } 11263 11264 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 11265 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { 11266 DP_TBFLAG_M32(flags, LSPACT, 1); 11267 } 11268 11269 if (mve_no_pred(env)) { 11270 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1); 11271 } 11272 } else { 11273 /* 11274 * Note that XSCALE_CPAR shares bits with VECSTRIDE. 11275 * Note that VECLEN+VECSTRIDE are RES0 for M-profile. 11276 */ 11277 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 11278 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); 11279 } else { 11280 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); 11281 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); 11282 } 11283 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { 11284 DP_TBFLAG_A32(flags, VFPEN, 1); 11285 } 11286 } 11287 11288 DP_TBFLAG_AM32(flags, THUMB, env->thumb); 11289 DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits); 11290 } 11291 11292 /* 11293 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 11294 * states defined in the ARM ARM for software singlestep: 11295 * SS_ACTIVE PSTATE.SS State 11296 * 0 x Inactive (the TB flag for SS is always 0) 11297 * 1 0 Active-pending 11298 * 1 1 Active-not-pending 11299 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB. 11300 */ 11301 if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) { 11302 DP_TBFLAG_ANY(flags, PSTATE__SS, 1); 11303 } 11304 11305 *pflags = flags.flags; 11306 *cs_base = flags.flags2; 11307 } 11308 11309 #ifdef TARGET_AARCH64 11310 /* 11311 * The manual says that when SVE is enabled and VQ is widened the 11312 * implementation is allowed to zero the previously inaccessible 11313 * portion of the registers. The corollary to that is that when 11314 * SVE is enabled and VQ is narrowed we are also allowed to zero 11315 * the now inaccessible portion of the registers. 11316 * 11317 * The intent of this is that no predicate bit beyond VQ is ever set. 11318 * Which means that some operations on predicate registers themselves 11319 * may operate on full uint64_t or even unrolled across the maximum 11320 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 11321 * may well be cheaper than conditionals to restrict the operation 11322 * to the relevant portion of a uint16_t[16]. 11323 */ 11324 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 11325 { 11326 int i, j; 11327 uint64_t pmask; 11328 11329 assert(vq >= 1 && vq <= ARM_MAX_VQ); 11330 assert(vq <= env_archcpu(env)->sve_max_vq); 11331 11332 /* Zap the high bits of the zregs. */ 11333 for (i = 0; i < 32; i++) { 11334 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 11335 } 11336 11337 /* Zap the high bits of the pregs and ffr. */ 11338 pmask = 0; 11339 if (vq & 3) { 11340 pmask = ~(-1ULL << (16 * (vq & 3))); 11341 } 11342 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 11343 for (i = 0; i < 17; ++i) { 11344 env->vfp.pregs[i].p[j] &= pmask; 11345 } 11346 pmask = 0; 11347 } 11348 } 11349 11350 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm) 11351 { 11352 int exc_el; 11353 11354 if (sm) { 11355 exc_el = sme_exception_el(env, el); 11356 } else { 11357 exc_el = sve_exception_el(env, el); 11358 } 11359 if (exc_el) { 11360 return 0; /* disabled */ 11361 } 11362 return sve_vqm1_for_el_sm(env, el, sm); 11363 } 11364 11365 /* 11366 * Notice a change in SVE vector size when changing EL. 11367 */ 11368 void aarch64_sve_change_el(CPUARMState *env, int old_el, 11369 int new_el, bool el0_a64) 11370 { 11371 ARMCPU *cpu = env_archcpu(env); 11372 int old_len, new_len; 11373 bool old_a64, new_a64, sm; 11374 11375 /* Nothing to do if no SVE. */ 11376 if (!cpu_isar_feature(aa64_sve, cpu)) { 11377 return; 11378 } 11379 11380 /* Nothing to do if FP is disabled in either EL. */ 11381 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 11382 return; 11383 } 11384 11385 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 11386 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 11387 11388 /* 11389 * Both AArch64.TakeException and AArch64.ExceptionReturn 11390 * invoke ResetSVEState when taking an exception from, or 11391 * returning to, AArch32 state when PSTATE.SM is enabled. 11392 */ 11393 sm = FIELD_EX64(env->svcr, SVCR, SM); 11394 if (old_a64 != new_a64 && sm) { 11395 arm_reset_sve_state(env); 11396 return; 11397 } 11398 11399 /* 11400 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 11401 * at ELx, or not available because the EL is in AArch32 state, then 11402 * for all purposes other than a direct read, the ZCR_ELx.LEN field 11403 * has an effective value of 0". 11404 * 11405 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 11406 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 11407 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 11408 * we already have the correct register contents when encountering the 11409 * vq0->vq0 transition between EL0->EL1. 11410 */ 11411 old_len = new_len = 0; 11412 if (old_a64) { 11413 old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm); 11414 } 11415 if (new_a64) { 11416 new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm); 11417 } 11418 11419 /* When changing vector length, clear inaccessible state. */ 11420 if (new_len < old_len) { 11421 aarch64_sve_narrow_vq(env, new_len + 1); 11422 } 11423 } 11424 #endif 11425 11426 #ifndef CONFIG_USER_ONLY 11427 ARMSecuritySpace arm_security_space(CPUARMState *env) 11428 { 11429 if (arm_feature(env, ARM_FEATURE_M)) { 11430 return arm_secure_to_space(env->v7m.secure); 11431 } 11432 11433 /* 11434 * If EL3 is not supported then the secure state is implementation 11435 * defined, in which case QEMU defaults to non-secure. 11436 */ 11437 if (!arm_feature(env, ARM_FEATURE_EL3)) { 11438 return ARMSS_NonSecure; 11439 } 11440 11441 /* Check for AArch64 EL3 or AArch32 Mon. */ 11442 if (is_a64(env)) { 11443 if (extract32(env->pstate, 2, 2) == 3) { 11444 if (cpu_isar_feature(aa64_rme, env_archcpu(env))) { 11445 return ARMSS_Root; 11446 } else { 11447 return ARMSS_Secure; 11448 } 11449 } 11450 } else { 11451 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 11452 return ARMSS_Secure; 11453 } 11454 } 11455 11456 return arm_security_space_below_el3(env); 11457 } 11458 11459 ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env) 11460 { 11461 assert(!arm_feature(env, ARM_FEATURE_M)); 11462 11463 /* 11464 * If EL3 is not supported then the secure state is implementation 11465 * defined, in which case QEMU defaults to non-secure. 11466 */ 11467 if (!arm_feature(env, ARM_FEATURE_EL3)) { 11468 return ARMSS_NonSecure; 11469 } 11470 11471 /* 11472 * Note NSE cannot be set without RME, and NSE & !NS is Reserved. 11473 * Ignoring NSE when !NS retains consistency without having to 11474 * modify other predicates. 11475 */ 11476 if (!(env->cp15.scr_el3 & SCR_NS)) { 11477 return ARMSS_Secure; 11478 } else if (env->cp15.scr_el3 & SCR_NSE) { 11479 return ARMSS_Realm; 11480 } else { 11481 return ARMSS_NonSecure; 11482 } 11483 } 11484 #endif /* !CONFIG_USER_ONLY */ 11485