Lines Matching +full:attr +full:- +full:cnt +full:- +full:name
6 * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "cpu-features.h"
15 #include "exec/page-protection.h"
16 #include "exec/mmap-lock.h"
17 #include "qemu/main-loop.h"
20 #include "qemu/qemu-print.h"
22 #include "exec/translation-block.h"
24 #include "system/cpu-timers.h"
29 #include "qemu/guest-random.h"
33 #include "semihosting/common-semi.h"
39 #include "exec/helper-proto.h.inc"
47 assert(ri->fieldoffset); in raw_read()
57 assert(ri->fieldoffset); in raw_write()
67 return (char *)env + ri->fieldoffset; in raw_ptr()
73 if (ri->type & ARM_CP_CONST) { in read_raw_cp_reg()
74 return ri->resetvalue; in read_raw_cp_reg()
75 } else if (ri->raw_readfn) { in read_raw_cp_reg()
76 return ri->raw_readfn(env, ri); in read_raw_cp_reg()
77 } else if (ri->readfn) { in read_raw_cp_reg()
78 return ri->readfn(env, ri); in read_raw_cp_reg()
89 * Note that constant registers are treated as write-ignored; the in write_raw_cp_reg()
93 if (ri->type & ARM_CP_CONST) { in write_raw_cp_reg()
95 } else if (ri->raw_writefn) { in write_raw_cp_reg()
96 ri->raw_writefn(env, ri, v); in write_raw_cp_reg()
97 } else if (ri->writefn) { in write_raw_cp_reg()
98 ri->writefn(env, ri, v); in write_raw_cp_reg()
118 if ((ri->type & ARM_CP_CONST) || in raw_accessors_invalid()
119 ri->fieldoffset || in raw_accessors_invalid()
120 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { in raw_accessors_invalid()
128 /* Write the coprocessor state from cpu->env to the (index,value) list. */ in write_cpustate_to_list()
132 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_cpustate_to_list()
133 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); in write_cpustate_to_list()
137 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); in write_cpustate_to_list()
142 if (ri->type & ARM_CP_NO_RAW) { in write_cpustate_to_list()
146 newval = read_raw_cp_reg(&cpu->env, ri); in write_cpustate_to_list()
149 * Only sync if the previous list->cpustate sync succeeded. in write_cpustate_to_list()
154 uint64_t oldval = cpu->cpreg_values[i]; in write_cpustate_to_list()
160 write_raw_cp_reg(&cpu->env, ri, oldval); in write_cpustate_to_list()
161 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { in write_cpustate_to_list()
165 write_raw_cp_reg(&cpu->env, ri, newval); in write_cpustate_to_list()
167 cpu->cpreg_values[i] = newval; in write_cpustate_to_list()
177 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_list_to_cpustate()
178 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); in write_list_to_cpustate()
179 uint64_t v = cpu->cpreg_values[i]; in write_list_to_cpustate()
182 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); in write_list_to_cpustate()
187 if (ri->type & ARM_CP_NO_RAW) { in write_list_to_cpustate()
192 * (to catch read-only registers and partially read-only in write_list_to_cpustate()
195 write_raw_cp_reg(&cpu->env, ri, v); in write_list_to_cpustate()
196 if (read_raw_cp_reg(&cpu->env, ri) != v) { in write_list_to_cpustate()
207 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); in add_cpreg_to_list()
209 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { in add_cpreg_to_list()
210 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); in add_cpreg_to_list()
212 cpu->cpreg_array_len++; in add_cpreg_to_list()
221 ri = g_hash_table_lookup(cpu->cp_regs, key); in count_cpreg()
223 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { in count_cpreg()
224 cpu->cpreg_array_len++; in count_cpreg()
237 return -1; in cpreg_key_compare()
251 keys = g_hash_table_get_keys(cpu->cp_regs); in init_cpreg_list()
254 cpu->cpreg_array_len = 0; in init_cpreg_list()
258 arraylen = cpu->cpreg_array_len; in init_cpreg_list()
259 cpu->cpreg_indexes = g_new(uint64_t, arraylen); in init_cpreg_list()
260 cpu->cpreg_values = g_new(uint64_t, arraylen); in init_cpreg_list()
261 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); in init_cpreg_list()
262 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); in init_cpreg_list()
263 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; in init_cpreg_list()
264 cpu->cpreg_array_len = 0; in init_cpreg_list()
268 assert(cpu->cpreg_array_len == arraylen); in init_cpreg_list()
279 return env->pstate & PSTATE_PAN; in arm_pan_enabled()
281 return env->uncached_cpsr & CPSR_PAN; in arm_pan_enabled()
300 * Some secure-only AArch32 registers trap to EL3 if used from
301 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
313 if (env->cp15.scr_el3 & SCR_EEL2) { in access_trap_aa32s_el1()
335 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { in access_tpm()
432 * Define the secure and non-secure FCSE identifier CP registers
435 * v8 EL1 version of the register so the non-secure instance stands alone.
437 { .name = "FCSEIDR",
442 { .name = "FCSEIDR_S",
448 * Define the secure and non-secure context identifier CP registers
451 * non-secure case, the 32-bit register will have reset and migration
452 * disabled during registration as it is handled by the 64-bit instance.
454 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
462 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
476 { .name = "DACR",
486 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
488 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
490 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
492 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
495 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
502 * Not all pre-v6 cores implemented this WFI, so this is slightly
503 * over-broad.
505 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
511 * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
514 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
521 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
524 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
528 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
532 * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
537 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
539 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
541 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
571 * registers (D0-D31). in cpacr_write()
574 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ in cpacr_write()
582 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 in cpacr_write()
586 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cpacr_write()
588 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask); in cpacr_write()
591 env->cp15.cpacr_el1 = value; in cpacr_write()
597 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 in cpacr_read()
600 uint64_t value = env->cp15.cpacr_el1; in cpacr_read()
603 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cpacr_read()
625 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) { in cpacr_access()
629 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { in cpacr_access()
642 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { in cptr_access()
651 { .name = "MVA_prefetch",
655 * We need to break the TB after ISB to execute self-modifying code
659 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
661 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
663 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
665 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
674 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
676 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
718 return -1; in swinc_ns_per()
781 return -1; in zero_event_ns_per()
840 * Empty supported_event_map and cpu->pmceid[01] before adding supported in pmu_init()
846 cpu->pmceid0 = 0; in pmu_init()
847 cpu->pmceid1 = 0; in pmu_init()
850 const pm_event *cnt = &pm_events[i]; in pmu_init() local
851 assert(cnt->number <= MAX_EVENT_ID); in pmu_init()
853 assert(cnt->number <= 0x3f); in pmu_init()
855 if (cnt->supported(&cpu->env)) { in pmu_init()
856 supported_event_map[cnt->number] = i; in pmu_init()
857 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); in pmu_init()
858 if (cnt->number & 0x20) { in pmu_init()
859 cpu->pmceid1 |= event_mask; in pmu_init()
861 cpu->pmceid0 |= event_mask; in pmu_init()
889 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { in pmreg_access()
895 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { in pmreg_access()
909 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 in pmreg_access_xevcntr()
924 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 in pmreg_access_swinc()
939 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { in pmreg_access_selr()
953 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 in pmreg_access_ccntr()
985 * We might be called for M-profile cores where MDCR_EL2 doesn't in pmu_counter_enabled()
986 * exist and arm_mdcr_el2_eff() will assert, so this early-exit check in pmu_counter_enabled()
998 e = env->cp15.c9_pmcr & PMCRE; in pmu_counter_enabled()
1002 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); in pmu_counter_enabled()
1009 prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME); in pmu_counter_enabled()
1018 prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP; in pmu_counter_enabled()
1021 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD); in pmu_counter_enabled()
1030 filter = env->cp15.pmccfiltr_el0; in pmu_counter_enabled()
1032 filter = env->cp15.c14_pmevtyper[counter]; in pmu_counter_enabled()
1070 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && in pmu_update_irq()
1071 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); in pmu_update_irq()
1080 * (64-bit) cycle counter PMCR.D has no effect. in pmccntr_clockdiv_enabled()
1082 return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD; in pmccntr_clockdiv_enabled()
1101 bool hlp = env->cp15.mdcr_el2 & MDCR_HLP; in pmevcntr_is_64_bit()
1102 int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; in pmevcntr_is_64_bit()
1108 return env->cp15.c9_pmcr & PMCRLP; in pmevcntr_is_64_bit()
1112 * Ensure c15_ccnt is the guest-visible count so that operations such as
1114 * etc. can be done logically. This is essentially a no-op if the counter is
1127 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; in pmccntr_op_start()
1129 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ in pmccntr_op_start()
1131 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { in pmccntr_op_start()
1132 env->cp15.c9_pmovsr |= (1ULL << 31); in pmccntr_op_start()
1136 env->cp15.c15_ccnt = new_pmccntr; in pmccntr_op_start()
1138 env->cp15.c15_ccnt_delta = cycles; in pmccntr_op_start()
1143 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1151 uint64_t remaining_cycles = -env->cp15.c15_ccnt; in pmccntr_op_finish()
1152 if (!(env->cp15.c9_pmcr & PMCRLC)) { in pmccntr_op_finish()
1163 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); in pmccntr_op_finish()
1168 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; in pmccntr_op_finish()
1172 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; in pmccntr_op_finish()
1179 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; in pmevcntr_op_start()
1187 uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; in pmevcntr_op_start()
1191 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) { in pmevcntr_op_start()
1192 env->cp15.c9_pmovsr |= (1 << counter); in pmevcntr_op_start()
1195 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; in pmevcntr_op_start()
1197 env->cp15.c14_pmevcntr_delta[counter] = count; in pmevcntr_op_start()
1204 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; in pmevcntr_op_finish()
1206 uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1); in pmevcntr_op_finish()
1220 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); in pmevcntr_op_finish()
1225 env->cp15.c14_pmevcntr_delta[counter] -= in pmevcntr_op_finish()
1226 env->cp15.c14_pmevcntr[counter]; in pmevcntr_op_finish()
1250 pmu_op_start(&cpu->env); in pmu_pre_el_change()
1255 pmu_op_finish(&cpu->env); in pmu_post_el_change()
1265 * has the effect of setting the cpu->pmu_timer to the next earliest time a in arm_pmu_timer_cb()
1268 pmu_op_start(&cpu->env); in arm_pmu_timer_cb()
1269 pmu_op_finish(&cpu->env); in arm_pmu_timer_cb()
1279 env->cp15.c15_ccnt = 0; in pmcr_write()
1285 env->cp15.c14_pmevcntr[i] = 0; in pmcr_write()
1289 env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK; in pmcr_write()
1290 env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK); in pmcr_write()
1297 uint64_t pmcr = env->cp15.c9_pmcr; in pmcr_read()
1305 pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT; in pmcr_read()
1323 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { in pmswinc_write()
1330 new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; in pmswinc_write()
1335 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) { in pmswinc_write()
1336 env->cp15.c9_pmovsr |= (1 << i); in pmswinc_write()
1340 env->cp15.c14_pmevcntr[i] = new_pmswinc; in pmswinc_write()
1351 ret = env->cp15.c15_ccnt; in pmccntr_read()
1365 env->cp15.c9_pmselr = value & 0x1f; in pmselr_write()
1372 env->cp15.c15_ccnt = value; in pmccntr_write()
1388 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; in pmccfiltr_write()
1397 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | in pmccfiltr_write_a32()
1405 return env->cp15.pmccfiltr_el0 & PMCCFILTR; in pmccfiltr_read_a32()
1413 env->cp15.c9_pmcnten |= value; in pmcntenset_write()
1422 env->cp15.c9_pmcnten &= ~value; in pmcntenclr_write()
1430 env->cp15.c9_pmovsr &= ~value; in pmovsr_write()
1438 env->cp15.c9_pmovsr |= value; in pmovsset_write()
1456 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & in pmevtyper_write()
1465 env->cp15.c14_pmevcntr_delta[counter] = count; in pmevtyper_write()
1468 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; in pmevtyper_write()
1482 return env->cp15.pmccfiltr_el0; in pmevtyper_read()
1484 return env->cp15.c14_pmevtyper[counter]; in pmevtyper_read()
1497 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevtyper_writefn()
1504 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevtyper_rawwrite()
1505 env->cp15.c14_pmevtyper[counter] = value; in pmevtyper_rawwrite()
1519 env->cp15.c14_pmevcntr_delta[counter] = in pmevtyper_rawwrite()
1526 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevtyper_readfn()
1533 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); in pmxevtyper_write()
1538 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); in pmxevtyper_read()
1550 env->cp15.c14_pmevcntr[counter] = value; in pmevcntr_write()
1565 ret = env->cp15.c14_pmevcntr[counter]; in pmevcntr_read()
1584 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevcntr_writefn()
1590 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevcntr_readfn()
1597 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevcntr_rawwrite()
1599 env->cp15.c14_pmevcntr[counter] = value; in pmevcntr_rawwrite()
1605 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevcntr_rawread()
1607 return env->cp15.c14_pmevcntr[counter]; in pmevcntr_rawread()
1613 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); in pmxevcntr_write()
1618 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); in pmxevcntr_read()
1625 env->cp15.c9_pmuserenr = value & 0xf; in pmuserenr_write()
1627 env->cp15.c9_pmuserenr = value & 1; in pmuserenr_write()
1636 env->cp15.c9_pminten |= value; in pmintenset_write()
1644 env->cp15.c9_pminten &= ~value; in pmintenclr_write()
1743 /* Clear all-context RES0 bits. */ in scr_write()
1745 changed = env->cp15.scr_el3 ^ value; in scr_write()
1746 env->cp15.scr_el3 = value; in scr_write()
1766 * scr_write will set the RES1 bits on an AArch64-only CPU. in scr_reset()
1767 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise. in scr_reset()
1793 ri->secure & ARM_CP_SECSTATE_S); in ccsidr_read()
1795 return cpu->ccsidr[index]; in ccsidr_read()
1812 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { in isr_read()
1815 if (cs->interrupt_request & CPU_INTERRUPT_VINMI) { in isr_read()
1820 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { in isr_read()
1824 if (cs->interrupt_request & CPU_INTERRUPT_NMI) { in isr_read()
1831 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { in isr_read()
1834 if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) { in isr_read()
1839 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { in isr_read()
1845 if (cs->interrupt_request & CPU_INTERRUPT_VSERR) { in isr_read()
1875 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1889 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1896 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
1902 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1909 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1916 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1923 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1931 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1936 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
1942 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1948 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1954 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1959 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1967 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
1973 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1981 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1986 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1992 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1997 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2003 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2008 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2014 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2021 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2029 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2035 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2042 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2048 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2060 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2070 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2076 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2083 * MAIR can just read-as-written because we don't implement caches
2086 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2093 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2098 * For non-long-descriptor page tables these are PRRR and NMRR;
2099 * regardless they still act as reads-as-written for QEMU.
2102 * MAIR0/1 are defined separately from their 64-bit counterpart which
2106 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2112 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2118 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2126 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2133 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2147 env->teecr = value; in teecr_write()
2158 (env->cp15.hstr_el2 & HSTR_TTEE)) { in teecr_access()
2167 if (arm_current_el(env) == 0 && (env->teecr & 1)) { in teehbr_access()
2174 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2178 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2184 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2189 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2195 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2201 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2207 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2212 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2223 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; in arm_gt_cntfrq_reset()
2243 cntkctl = env->cp15.cnthctl_el2; in gt_cntfrq_access()
2245 cntkctl = env->cp15.c14_cntkctl; in gt_cntfrq_access()
2252 if (!isread && ri->state == ARM_CP_STATE_AA32 && in gt_cntfrq_access()
2254 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ in gt_cntfrq_access()
2281 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) in gt_counter_access()
2285 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ in gt_counter_access()
2286 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { in gt_counter_access()
2294 ? !extract32(env->cp15.cnthctl_el2, 10, 1) in gt_counter_access()
2295 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { in gt_counter_access()
2299 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) { in gt_counter_access()
2319 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) in gt_timer_access()
2324 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from in gt_timer_access()
2327 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { in gt_timer_access()
2336 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { in gt_timer_access()
2341 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { in gt_timer_access()
2347 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) { in gt_timer_access()
2399 if (!(env->cp15.scr_el3 & SCR_ST)) { in gt_stimer_access()
2442 if (env->cp15.scr_el3 & SCR_EEL2) { in gt_sel2timer_access()
2461 CPUARMState *env = &cpu->env; in gt_update_irq()
2462 uint64_t cnthctl = env->cp15.cnthctl_el2; in gt_update_irq()
2465 int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4; in gt_update_irq()
2468 * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK. in gt_update_irq()
2477 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); in gt_update_irq()
2494 if ((env->cp15.scr_el3 & SCR_ECVEN) && in gt_phys_raw_cnt_offset()
2495 FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) && in gt_phys_raw_cnt_offset()
2498 return env->cp15.cntpoff_el2; in gt_phys_raw_cnt_offset()
2519 return env->cp15.cntvoff_el2; in gt_indirect_access_timer_offset()
2536 * to the CNT*_TVAL registers. in gt_direct_access_timer_offset()
2538 * This isn't exactly the same as the indirect-access offset, in gt_direct_access_timer_offset()
2567 return env->cp15.cntvoff_el2; in gt_direct_access_timer_offset()
2581 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; in gt_recalc_timer()
2583 if (gt->ctl & 1) { in gt_recalc_timer()
2588 uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx); in gt_recalc_timer()
2589 uint64_t count = gt_get_countervalue(&cpu->env); in gt_recalc_timer()
2591 int istatus = count - offset >= gt->cval; in gt_recalc_timer()
2594 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); in gt_recalc_timer()
2598 * Next transition is when (count - offset) rolls back over to 0. in gt_recalc_timer()
2611 * Next transition is when (count - offset) == cval, i.e. in gt_recalc_timer()
2616 if (uadd64_overflow(gt->cval, offset, &nexttick)) { in gt_recalc_timer()
2622 * signed-64-bit range of a QEMUTimer -- in this case we just in gt_recalc_timer()
2627 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); in gt_recalc_timer()
2629 timer_mod(cpu->gt_timer[timeridx], nexttick); in gt_recalc_timer()
2634 gt->ctl &= ~4; in gt_recalc_timer()
2635 timer_del(cpu->gt_timer[timeridx]); in gt_recalc_timer()
2646 timer_del(cpu->gt_timer[timeridx]); in gt_timer_reset()
2652 return gt_get_countervalue(env) - offset; in gt_cnt_read()
2658 return gt_get_countervalue(env) - offset; in gt_virt_cnt_read()
2666 env->cp15.c14_timer[timeridx].cval = value; in gt_cval_write()
2672 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - in do_tval_read()
2673 (gt_get_countervalue(env) - offset)); in do_tval_read()
2688 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + in do_tval_write()
2707 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; in gt_ctl_write()
2710 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); in gt_ctl_write()
2780 return env->cp15.c14_timer[timeridx].cval; in gt_phys_redir_cval_read()
2808 return env->cp15.c14_timer[timeridx].ctl; in gt_phys_redir_ctl_read()
2835 * to re-detect that it's this register. in gt_virt_tval_read()
2838 return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2); in gt_virt_tval_read()
2845 do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2); in gt_virt_tval_write()
2858 uint32_t oldval = env->cp15.cnthctl_el2; in gt_cnthctl_write()
2911 return env->cp15.c14_timer[timeridx].cval; in gt_virt_redir_cval_read()
2939 return env->cp15.c14_timer[timeridx].ctl; in gt_virt_redir_ctl_read()
3140 * Note that CNTFRQ is purely reads-as-written for the benefit
3144 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3149 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3156 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
3162 /* per-timer control */
3163 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3172 { .name = "CNTP_CTL_S",
3181 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3191 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3199 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3210 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3216 { .name = "CNTP_TVAL_S",
3223 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3229 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3234 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3241 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3246 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3251 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3256 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3262 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3271 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3279 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3289 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3297 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3308 * Secure timer -- this is actually restricted to only EL3
3309 * and configurably Secure-EL1 via the accessfn.
3311 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3319 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3327 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3338 * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
3342 { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9,
3347 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
3352 { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8,
3357 { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64,
3369 !(env->cp15.scr_el3 & SCR_ECVEN)) { in gt_cntpoff_access()
3386 .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64,
3396 * In user-mode most of the generic timer registers are inaccessible
3405 * Currently we have no support for QEMUTimer in linux-user so we in gt_virt_cnt_read()
3413 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3415 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3419 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3431 { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
3452 /* get_phys_addr() isn't present for user-mode-only targets */
3457 if (ri->opc2 & 4) { in ats_access()
3466 if (env->cp15.scr_el3 & SCR_EEL2) { in ats_access()
3481 * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC in par_el1_shareability()
3482 * memory -- see pseudocode PAREncodeShareability(). in par_el1_shareability()
3484 if (((res->cacheattrs.attrs & 0xf0) == 0) || in par_el1_shareability()
3485 res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) { in par_el1_shareability()
3488 return res->cacheattrs.shareability; in par_el1_shareability()
3536 (env->cp15.scr_el3 & SCR_EA)) { in do_ats_write()
3539 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; in do_ats_write()
3541 env->cp15.hpfar_el2 |= HPFAR_NS; in do_ats_write()
3579 env->exception.vaddress = value; in do_ats_write()
3580 env->exception.fsr = fsr; in do_ats_write()
3591 * 32-bit or the 64-bit PAR format in do_ats_write()
3595 * * The Non-secure TTBCR.EAE bit is set to 1 in do_ats_write()
3608 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); in do_ats_write()
3616 /* Create a 64-bit PAR */ in do_ats_write()
3623 par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */ in do_ats_write()
3641 * Convert it to a 32-bit PAR. in do_ats_write()
3668 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; in ats_write()
3674 switch (ri->opc2 & 6) { in ats_write()
3679 if (ri->crm == 9 && arm_pan_enabled(env)) { in ats_write()
3686 g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ in ats_write()
3689 if (ri->crm == 9 && arm_pan_enabled(env)) { in ats_write()
3706 g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ in ats_write()
3743 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; in ats1h_write()
3767 if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) { in at_e012_access()
3777 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) { in at_s1e2_access()
3796 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; in ats_write64()
3803 switch (ri->opc2 & 6) { in ats_write64()
3805 switch (ri->opc1) { in ats_write64()
3807 if (ri->crm == 9 && arm_pan_enabled(env)) { in ats_write64()
3839 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss); in ats_write64()
3880 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); in pmsav5_data_ap_write()
3885 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); in pmsav5_data_ap_read()
3891 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); in pmsav5_insn_ap_write()
3896 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); in pmsav5_insn_ap_read()
3907 u32p += env->pmsav7.rnr[M_REG_NS]; in pmsav7_read()
3921 u32p += env->pmsav7.rnr[M_REG_NS]; in pmsav7_write()
3922 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in pmsav7_write()
3930 uint32_t nrgs = cpu->pmsav7_dregion; in pmsav7_rgnr_write()
3947 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in prbar_write()
3948 env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; in prbar_write()
3953 return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; in prbar_read()
3961 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in prlar_write()
3962 env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; in prlar_write()
3967 return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; in prlar_read()
3979 if (value >= cpu->pmsav7_dregion) { in prselr_write()
3983 env->pmsav7.rnr[M_REG_NS] = value; in prselr_write()
3991 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in hprbar_write()
3992 env->pmsav8.hprbar[env->pmsav8.hprselr] = value; in hprbar_write()
3997 return env->pmsav8.hprbar[env->pmsav8.hprselr]; in hprbar_read()
4005 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in hprlar_write()
4006 env->pmsav8.hprlar[env->pmsav8.hprselr] = value; in hprlar_write()
4011 return env->pmsav8.hprlar[env->pmsav8.hprselr]; in hprlar_read()
4022 int rmax = MIN(cpu->pmsav8r_hdregion, 32); in hprenr_write()
4025 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in hprenr_write()
4030 env->pmsav8.hprlar[n] = deposit32( in hprenr_write()
4031 env->pmsav8.hprlar[n], 0, 1, bit); in hprenr_write()
4042 for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) { in hprenr_read()
4043 if (env->pmsav8.hprlar[n] & 0x1) { in hprenr_read()
4059 if (value >= cpu->pmsav8r_hdregion) { in hprselr_write()
4063 env->pmsav8.hprselr = value; in hprselr_write()
4070 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | in pmsav8r_regn_write()
4071 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); in pmsav8r_regn_write()
4073 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in pmsav8r_regn_write()
4075 if (ri->opc1 & 4) { in pmsav8r_regn_write()
4076 if (index >= cpu->pmsav8r_hdregion) { in pmsav8r_regn_write()
4079 if (ri->opc2 & 0x1) { in pmsav8r_regn_write()
4080 env->pmsav8.hprlar[index] = value; in pmsav8r_regn_write()
4082 env->pmsav8.hprbar[index] = value; in pmsav8r_regn_write()
4085 if (index >= cpu->pmsav7_dregion) { in pmsav8r_regn_write()
4088 if (ri->opc2 & 0x1) { in pmsav8r_regn_write()
4089 env->pmsav8.rlar[M_REG_NS][index] = value; in pmsav8r_regn_write()
4091 env->pmsav8.rbar[M_REG_NS][index] = value; in pmsav8r_regn_write()
4099 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | in pmsav8r_regn_read()
4100 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); in pmsav8r_regn_read()
4102 if (ri->opc1 & 4) { in pmsav8r_regn_read()
4103 if (index >= cpu->pmsav8r_hdregion) { in pmsav8r_regn_read()
4106 if (ri->opc2 & 0x1) { in pmsav8r_regn_read()
4107 return env->pmsav8.hprlar[index]; in pmsav8r_regn_read()
4109 return env->pmsav8.hprbar[index]; in pmsav8r_regn_read()
4112 if (index >= cpu->pmsav7_dregion) { in pmsav8r_regn_read()
4115 if (ri->opc2 & 0x1) { in pmsav8r_regn_read()
4116 return env->pmsav8.rlar[M_REG_NS][index]; in pmsav8r_regn_read()
4118 return env->pmsav8.rbar[M_REG_NS][index]; in pmsav8r_regn_read()
4124 { .name = "PRBAR",
4129 { .name = "PRLAR",
4134 { .name = "PRSELR", .resetvalue = 0,
4139 { .name = "HPRBAR", .resetvalue = 0,
4143 { .name = "HPRLAR",
4147 { .name = "HPRSELR", .resetvalue = 0,
4152 { .name = "HPRENR",
4161 * because the PMSAv7 is also used by M-profile CPUs, which do
4164 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
4169 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
4174 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
4179 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
4187 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4191 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4195 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
4199 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
4203 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4206 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
4210 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
4213 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
4216 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
4219 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
4222 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
4225 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
4228 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
4231 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
4245 * using Long-descriptor translation table format in vmsa_ttbcr_write()
4252 * Short-descriptor translation table format. in vmsa_ttbcr_write()
4283 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ in vmsa_ttbr_write()
4328 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4332 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4336 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
4340 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
4350 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
4356 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
4364 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
4372 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
4381 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4394 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
4406 env->cp15.c15_ticonfig = value & 0xe7; in omap_ticonfig_write()
4408 env->cp15.c0_cpuid = (value & (1 << 5)) ? in omap_ticonfig_write()
4415 env->cp15.c15_threadid = value & 0xffff; in omap_threadid_write()
4421 /* Wait-for-interrupt (deprecated) */ in omap_wfi_write()
4432 env->cp15.c15_i_max = 0x000; in omap_cachemaint_write()
4433 env->cp15.c15_i_min = 0xff0; in omap_cachemaint_write()
4437 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
4441 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
4443 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
4447 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
4450 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
4453 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
4457 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
4467 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
4471 { .name = "C9", .cp = 15, .crn = 9,
4479 env->cp15.c15_cpar = value & 0x3fff; in xscale_cpar_write()
4483 { .name = "XSCALE_CPAR",
4487 { .name = "XSCALE_AUXCR",
4492 * XScale specific cache-lockdown: since we have no cache we NOP these
4495 { .name = "XSCALE_LOCK_ICACHE_LINE",
4498 { .name = "XSCALE_UNLOCK_ICACHE",
4501 { .name = "XSCALE_DCACHE_LOCK",
4504 { .name = "XSCALE_UNLOCK_DCACHE",
4512 * implementation of this implementation-defined space.
4516 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
4525 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4532 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4536 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4538 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4540 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4542 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4544 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4546 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4552 * The cache test-and-clean instructions always return (1 << 30)
4555 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4558 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4565 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4576 return env->cp15.vpidr_el2; in midr_read()
4584 uint64_t mpidr = cpu->mp_affinity; in mpidr_read_val()
4589 * Cores which are uniprocessor (non-coherent) in mpidr_read_val()
4591 * bit 30. (For instance, Cortex-R5). in mpidr_read_val()
4593 if (cpu->mp_is_up) { in mpidr_read_val()
4605 return env->cp15.vmpidr_el2; in mpidr_read()
4612 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4619 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4622 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4626 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4632 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4674 env->daif = value & PSTATE_DAIF; in aa64_daif_write()
4679 return env->pstate & PSTATE_PAN; in aa64_pan_read()
4685 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); in aa64_pan_write()
4689 .name = "PAN", .state = ARM_CP_STATE_AA64,
4697 return env->pstate & PSTATE_UAO; in aa64_uao_read()
4703 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); in aa64_uao_write()
4707 .name = "UAO", .state = ARM_CP_STATE_AA64,
4715 return env->pstate & PSTATE_DIT; in aa64_dit_read()
4721 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT); in aa64_dit_write()
4725 .name = "DIT", .state = ARM_CP_STATE_AA64,
4733 return env->pstate & PSTATE_SSBS; in aa64_ssbs_read()
4739 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS); in aa64_ssbs_write()
4743 .name = "SSBS", .state = ARM_CP_STATE_AA64,
4813 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { in aa64_zva_access()
4817 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { in aa64_zva_access()
4840 return cpu->dcz_blocksize | dzp_bit; in aa64_dczid_read()
4846 if (!(env->pstate & PSTATE_SP)) { in sp_el0_access()
4858 return env->pstate & PSTATE_SP; in spsel_read()
4871 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { in sctlr_write()
4878 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) { in sctlr_write()
4879 if (ri->opc1 == 6) { /* SCTLR_EL3 */ in sctlr_write()
4900 if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) { in sctlr_write()
4919 bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS; in mdcr_el3_write()
4924 env->cp15.mdcr_el3 = value; in mdcr_el3_write()
4945 bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS; in mdcr_el2_write()
4950 env->cp15.mdcr_el2 = value; in mdcr_el2_write()
4971 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
4987 icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1; in ic_ivau_write()
5001 * Minimal set of EL0-visible registers. This will need to be expanded
5004 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
5007 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
5013 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
5017 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
5021 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
5026 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
5030 /* Avoid overhead of an access check that always passes in user-mode */
5035 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
5042 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
5047 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
5052 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
5065 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
5070 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
5074 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
5079 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
5083 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
5088 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
5093 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
5099 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
5104 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
5109 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
5114 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
5119 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
5123 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
5127 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
5131 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
5136 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
5140 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
5144 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
5153 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5155 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
5157 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5159 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
5161 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5163 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5165 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5167 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5169 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5171 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5173 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5175 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5177 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5180 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5185 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5191 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5202 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5207 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5212 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5216 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5221 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5226 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5231 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5236 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5243 { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
5252 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5257 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5262 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5280 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { in do_hcr_write()
5342 * HCR_PTW forbids certain page-table setups in do_hcr_write()
5348 if ((env->cp15.hcr_el2 ^ value) & in do_hcr_write()
5352 env->cp15.hcr_el2 = value; in do_hcr_write()
5384 value = deposit64(env->cp15.hcr_el2, 32, 32, value); in hcr_writehigh()
5392 value = deposit64(env->cp15.hcr_el2, 0, 32, value); in hcr_writelow()
5398 /* hcr_write will set the RES1 bits on an AArch64-only CPU */ in hcr_reset()
5409 uint64_t ret = env->cp15.hcr_el2; in arm_hcr_el2_eff_secstate()
5416 * current Security state". This is ARMv8.4-SecEL2 speak for in arm_hcr_el2_eff_secstate()
5423 * on a per-field basis. In current QEMU, this is condition in arm_hcr_el2_eff_secstate()
5441 * These bits are up-to-date as of ARMv8.6. in arm_hcr_el2_eff_secstate()
5452 /* These bits are up-to-date as of ARMv8.6. */ in arm_hcr_el2_eff_secstate()
5500 if ((env->cp15.hcr_el2 & mask) != mask) { in el_is_in_host()
5533 env->cp15.hcrx_el2 = value & valid_mask; in hcrx_write()
5558 && !(env->cp15.scr_el3 & SCR_HXEN)) { in access_hxen()
5565 .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
5581 * For the moment, we treat the EL2-disabled case as taking in arm_hcrx_el2_eff()
5582 * priority over the HXEn-disabled case. This is true for the only in arm_hcrx_el2_eff()
5595 if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) { in arm_hcrx_el2_eff()
5598 return env->cp15.hcrx_el2; in arm_hcrx_el2_eff()
5605 * For A-profile AArch32 EL3, if NSACR.CP10 in cptr_el2_write()
5609 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cptr_el2_write()
5611 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask); in cptr_el2_write()
5613 env->cp15.cptr_el[2] = value; in cptr_el2_write()
5619 * For A-profile AArch32 EL3, if NSACR.CP10 in cptr_el2_read()
5622 uint64_t value = env->cp15.cptr_el[2]; in cptr_el2_read()
5625 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cptr_el2_read()
5632 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
5639 { .name = "HCR", .state = ARM_CP_STATE_AA32,
5644 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5647 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
5652 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5656 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5660 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5665 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
5670 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5675 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5679 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5684 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5688 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5692 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5697 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5701 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5705 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5709 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5714 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
5719 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
5725 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5731 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5736 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5740 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5745 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5750 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5755 * Unlike the other EL2-related AT operations, these must
5759 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5764 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5775 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5778 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5781 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5791 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5797 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5801 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5806 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5810 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5815 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5823 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5827 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5831 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5839 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5857 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
5862 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
5869 { .name = "CNTHPS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
5877 { .name = "CNTHPS_CTL_EL2", .state = ARM_CP_STATE_AA64,
5885 { .name = "CNTHPS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5893 { .name = "CNTHVS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
5901 { .name = "CNTHVS_CTL_EL2", .state = ARM_CP_STATE_AA64,
5909 { .name = "CNTHVS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5930 if (env->cp15.scr_el3 & SCR_EEL2) { in nsacr_access()
5943 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5947 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
5952 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5956 { .name = "SDER",
5960 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5964 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5968 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5974 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
5979 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5982 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5985 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5990 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5995 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5999 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
6003 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
6007 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
6011 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
6037 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) { in access_el1nvpct()
6049 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) { in access_el1nvvct()
6068 ri = ri->opaque; in el2_e2h_read()
6069 readfn = ri->readfn; in el2_e2h_read()
6071 readfn = ri->orig_readfn; in el2_e2h_read()
6086 ri = ri->opaque; in el2_e2h_write()
6087 writefn = ri->writefn; in el2_e2h_write()
6089 writefn = ri->orig_writefn; in el2_e2h_write()
6100 return ri->orig_readfn(env, ri->opaque); in el2_e2h_e12_read()
6107 return ri->orig_writefn(env, ri->opaque, value); in el2_e2h_e12_write()
6127 if (ri->orig_accessfn) { in el2_e2h_e12_access()
6128 return ri->orig_accessfn(env, ri->opaque, isread); in el2_e2h_e12_access()
6195 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ in define_arm_vh_e2h_redirects_aliases()
6196 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ in define_arm_vh_e2h_redirects_aliases()
6207 if (a->feature && !a->feature(&cpu->isar)) { in define_arm_vh_e2h_redirects_aliases()
6211 src_reg = g_hash_table_lookup(cpu->cp_regs, in define_arm_vh_e2h_redirects_aliases()
6212 (gpointer)(uintptr_t)a->src_key); in define_arm_vh_e2h_redirects_aliases()
6213 dst_reg = g_hash_table_lookup(cpu->cp_regs, in define_arm_vh_e2h_redirects_aliases()
6214 (gpointer)(uintptr_t)a->dst_key); in define_arm_vh_e2h_redirects_aliases()
6218 /* Cross-compare names to detect typos in the keys. */ in define_arm_vh_e2h_redirects_aliases()
6219 g_assert(strcmp(src_reg->name, a->src_name) == 0); in define_arm_vh_e2h_redirects_aliases()
6220 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); in define_arm_vh_e2h_redirects_aliases()
6223 g_assert(src_reg->opaque == NULL); in define_arm_vh_e2h_redirects_aliases()
6228 new_reg->name = a->new_name; in define_arm_vh_e2h_redirects_aliases()
6229 new_reg->type |= ARM_CP_ALIAS; in define_arm_vh_e2h_redirects_aliases()
6231 new_reg->access &= PL2_RW | PL3_RW; in define_arm_vh_e2h_redirects_aliases()
6233 new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK) in define_arm_vh_e2h_redirects_aliases()
6235 new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK) in define_arm_vh_e2h_redirects_aliases()
6237 new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK) in define_arm_vh_e2h_redirects_aliases()
6239 new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK) in define_arm_vh_e2h_redirects_aliases()
6241 new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK) in define_arm_vh_e2h_redirects_aliases()
6243 new_reg->opaque = src_reg; in define_arm_vh_e2h_redirects_aliases()
6244 new_reg->orig_readfn = src_reg->readfn ?: raw_read; in define_arm_vh_e2h_redirects_aliases()
6245 new_reg->orig_writefn = src_reg->writefn ?: raw_write; in define_arm_vh_e2h_redirects_aliases()
6246 new_reg->orig_accessfn = src_reg->accessfn; in define_arm_vh_e2h_redirects_aliases()
6247 if (!new_reg->raw_readfn) { in define_arm_vh_e2h_redirects_aliases()
6248 new_reg->raw_readfn = raw_read; in define_arm_vh_e2h_redirects_aliases()
6250 if (!new_reg->raw_writefn) { in define_arm_vh_e2h_redirects_aliases()
6251 new_reg->raw_writefn = raw_write; in define_arm_vh_e2h_redirects_aliases()
6253 new_reg->readfn = el2_e2h_e12_read; in define_arm_vh_e2h_redirects_aliases()
6254 new_reg->writefn = el2_e2h_e12_write; in define_arm_vh_e2h_redirects_aliases()
6255 new_reg->accessfn = el2_e2h_e12_access; in define_arm_vh_e2h_redirects_aliases()
6262 if (new_reg->nv2_redirect_offset) { in define_arm_vh_e2h_redirects_aliases()
6263 assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1); in define_arm_vh_e2h_redirects_aliases()
6264 new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1; in define_arm_vh_e2h_redirects_aliases()
6265 new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1; in define_arm_vh_e2h_redirects_aliases()
6268 ok = g_hash_table_insert(cpu->cp_regs, in define_arm_vh_e2h_redirects_aliases()
6269 (gpointer)(uintptr_t)a->new_key, new_reg); in define_arm_vh_e2h_redirects_aliases()
6272 src_reg->opaque = dst_reg; in define_arm_vh_e2h_redirects_aliases()
6273 src_reg->orig_readfn = src_reg->readfn ?: raw_read; in define_arm_vh_e2h_redirects_aliases()
6274 src_reg->orig_writefn = src_reg->writefn ?: raw_write; in define_arm_vh_e2h_redirects_aliases()
6275 if (!src_reg->raw_readfn) { in define_arm_vh_e2h_redirects_aliases()
6276 src_reg->raw_readfn = raw_read; in define_arm_vh_e2h_redirects_aliases()
6278 if (!src_reg->raw_writefn) { in define_arm_vh_e2h_redirects_aliases()
6279 src_reg->raw_writefn = raw_write; in define_arm_vh_e2h_redirects_aliases()
6281 src_reg->readfn = el2_e2h_read; in define_arm_vh_e2h_redirects_aliases()
6282 src_reg->writefn = el2_e2h_write; in define_arm_vh_e2h_redirects_aliases()
6297 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { in ctr_el0_access()
6301 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { in ctr_el0_access()
6332 if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) { in access_terr()
6343 return env->cp15.vdisr_el2; in disr_read()
6345 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { in disr_read()
6348 return env->cp15.disr_el1; in disr_read()
6356 env->cp15.vdisr_el2 = val; in disr_write()
6359 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { in disr_write()
6362 env->cp15.disr_el1 = val; in disr_write()
6384 * These registers have fine-grained trap bits, but UNDEF-to-EL1
6385 * is higher priority than FGT-to-EL2 so we do not need to list them
6389 { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
6393 { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
6398 { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
6402 { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
6422 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) { in sve_exception_el()
6436 if (env->cp15.hcr_el2 & HCR_E2H) { in sve_exception_el()
6437 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) { in sve_exception_el()
6439 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { in sve_exception_el()
6448 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) { in sve_exception_el()
6456 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) { in sve_exception_el()
6471 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) { in sme_exception_el()
6485 if (env->cp15.hcr_el2 & HCR_E2H) { in sme_exception_el()
6486 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) { in sme_exception_el()
6488 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { in sme_exception_el()
6497 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) { in sme_exception_el()
6505 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { in sme_exception_el()
6518 uint64_t *cr = env->vfp.zcr_el; in sve_vqm1_for_el_sm()
6519 uint32_t map = cpu->sve_vq.map; in sve_vqm1_for_el_sm()
6520 uint32_t len = ARM_MAX_VQ - 1; in sve_vqm1_for_el_sm()
6523 cr = env->vfp.smcr_el; in sve_vqm1_for_el_sm()
6524 map = cpu->sme_vq.map; in sve_vqm1_for_el_sm()
6539 return 31 - clz32(map); in sve_vqm1_for_el_sm()
6542 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */ in sve_vqm1_for_el_sm()
6544 return ctz32(cpu->sme_vq.map); in sve_vqm1_for_el_sm()
6549 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM)); in sve_vqm1_for_el()
6574 { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6580 { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6585 { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6606 && !(env->cp15.scr_el3 & SCR_ENTP2)) { in access_tpidr2()
6618 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { in access_smprimap()
6629 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { in access_smpri()
6638 memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs)); in arm_reset_sve_state()
6640 memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs)); in arm_reset_sve_state()
6646 uint64_t change = (env->svcr ^ new) & mask; in aarch64_set_svcr()
6651 env->svcr ^= change; in aarch64_set_svcr()
6666 memset(env->zarray, 0, sizeof(env->zarray)); in aarch64_set_svcr()
6677 aarch64_set_svcr(env, value, -1); in svcr_write()
6695 * apply the narrower SVL to the Zregs and Pregs -- see the comment in smcr_write()
6705 { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
6710 { .name = "SVCR", .state = ARM_CP_STATE_AA64,
6715 { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
6721 { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
6726 { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
6731 { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
6744 { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
6749 { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
6764 env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask); in gpccr_write()
6769 env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ, in gpccr_reset()
6770 env_archcpu(env)->reset_l0gptsz); in gpccr_reset()
6774 { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
6778 { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
6781 { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
6784 { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
6790 { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
6798 env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT); in aa64_allint_write()
6803 return env->pstate & PSTATE_ALLINT; in aa64_allint_read()
6817 { .name = "ALLINT", .state = ARM_CP_STATE_AA64,
6833 unsigned int i, pmcrn = pmu_num_counters(&cpu->env); in define_pmu_regs()
6835 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, in define_pmu_regs()
6845 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, in define_pmu_regs()
6851 .resetvalue = cpu->isar.reset_pmcr_el0, in define_pmu_regs()
6864 { .name = pmevcntr_name, .cp = 15, .crn = 14, in define_pmu_regs()
6870 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, in define_pmu_regs()
6878 { .name = pmevtyper_name, .cp = 15, .crn = 14, in define_pmu_regs()
6884 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, in define_pmu_regs()
6900 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, in define_pmu_regs()
6904 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, in define_pmu_regs()
6905 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, in define_pmu_regs()
6909 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, in define_pmu_regs()
6915 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, in define_pmu_regs()
6935 uint64_t pfr1 = cpu->isar.id_pfr1; in id_pfr1_read()
6937 if (env->gicv3state) { in id_pfr1_read()
6946 uint64_t pfr0 = cpu->isar.id_aa64pfr0; in id_aa64pfr0_read()
6948 if (env->gicv3state) { in id_aa64pfr0_read()
6967 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { in access_lor_ns()
6984 * A trivial implementation of ARMv8.1-LOR leaves all of these
6989 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6994 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6999 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
7004 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
7009 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
7028 !(env->cp15.scr_el3 & SCR_APK)) { in access_pauth()
7035 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7040 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7045 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7050 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7055 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7060 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7065 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7070 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7075 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7080 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7093 env->NF = env->CF = env->VF = 0, env->ZF = 1; in rndr_readfn()
7099 * timed-out indication to the guest. There is no reason in rndr_readfn()
7104 ri->name, error_get_pretty(err)); in rndr_readfn()
7107 env->ZF = 0; /* NZCF = 0100 */ in rndr_readfn()
7113 /* We do not support re-seeding, so the two registers operate the same. */
7115 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
7119 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
7130 /* CTR_EL0 System register -> DminLine, bits [19:16] */ in dccvap_writefn()
7131 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); in dccvap_writefn()
7133 uint64_t vaddr = vaddr_in & ~(dline_size - 1); in dccvap_writefn()
7160 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
7168 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
7197 !(env->cp15.scr_el3 & SCR_ATA)) { in access_mte()
7237 !(env->cp15.scr_el3 & SCR_ATA)) { in access_tfsr_el2()
7245 return env->pstate & PSTATE_TCO; in tco_read()
7250 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO); in tco_write()
7254 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
7258 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
7263 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
7268 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
7272 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
7276 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
7280 { .name = "TCO", .state = ARM_CP_STATE_AA64,
7284 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
7289 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
7293 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
7298 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
7302 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
7306 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
7310 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
7314 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
7321 { .name = "TCO", .state = ARM_CP_STATE_AA64,
7327 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
7332 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
7337 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
7342 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
7347 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
7352 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
7357 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
7362 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
7367 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
7371 /* Avoid overhead of an access check that always passes in user-mode */
7376 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
7380 /* Avoid overhead of an access check that always passes in user-mode */
7394 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) { in access_scxtnum()
7400 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) { in access_scxtnum()
7408 && !(env->cp15.scr_el3 & SCR_ENSCXT)) { in access_scxtnum()
7427 { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
7432 { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
7438 { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
7442 { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
7452 arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) { in access_fgt()
7459 { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
7464 { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
7469 { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
7474 { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
7479 { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
7491 * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything in vncr_write()
7492 * about the RESS bits at the top -- we choose the "generate an EL2 in vncr_write()
7496 env->cp15.vncr_el2 = value & ~0xfffULL; in vncr_write()
7500 { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64,
7528 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
7532 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
7536 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
7543 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
7547 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
7551 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
7564 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
7610 (env->cp15.hstr_el2 & HSTR_TJDBX)) { in access_joscr_jmcr()
7617 { .name = "JIDR",
7621 { .name = "JOSCR",
7625 { .name = "JMCR",
7632 .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
7639 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
7645 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
7651 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
7656 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
7662 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
7669 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
7676 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7681 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7686 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7693 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7705 { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
7710 { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
7718 { .name = "ATS1CPRP",
7722 { .name = "ATS1CPWP",
7732 * is non-zero, which is never for ARMv7, optionally in ARMv8
7739 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
7743 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
7752 CPUARMState *env = &cpu->env; in register_cp_regs_for_features()
7774 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7778 .resetvalue = cpu->isar.id_pfr0 }, in register_cp_regs_for_features()
7783 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7789 .resetvalue = cpu->isar.id_pfr1, in register_cp_regs_for_features()
7797 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7801 .resetvalue = cpu->isar.id_dfr0 }, in register_cp_regs_for_features()
7802 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7806 .resetvalue = cpu->id_afr0 }, in register_cp_regs_for_features()
7807 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7811 .resetvalue = cpu->isar.id_mmfr0 }, in register_cp_regs_for_features()
7812 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7816 .resetvalue = cpu->isar.id_mmfr1 }, in register_cp_regs_for_features()
7817 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7821 .resetvalue = cpu->isar.id_mmfr2 }, in register_cp_regs_for_features()
7822 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7826 .resetvalue = cpu->isar.id_mmfr3 }, in register_cp_regs_for_features()
7827 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7831 .resetvalue = cpu->isar.id_isar0 }, in register_cp_regs_for_features()
7832 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7836 .resetvalue = cpu->isar.id_isar1 }, in register_cp_regs_for_features()
7837 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7841 .resetvalue = cpu->isar.id_isar2 }, in register_cp_regs_for_features()
7842 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7846 .resetvalue = cpu->isar.id_isar3 }, in register_cp_regs_for_features()
7847 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7851 .resetvalue = cpu->isar.id_isar4 }, in register_cp_regs_for_features()
7852 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7856 .resetvalue = cpu->isar.id_isar5 }, in register_cp_regs_for_features()
7857 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7861 .resetvalue = cpu->isar.id_mmfr4 }, in register_cp_regs_for_features()
7862 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7866 .resetvalue = cpu->isar.id_isar6 }, in register_cp_regs_for_features()
7881 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
7886 .resetvalue = cpu->clidr in register_cp_regs_for_features()
7912 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7917 .resetvalue = cpu->isar.id_aa64pfr0 in register_cp_regs_for_features()
7925 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7929 .resetvalue = cpu->isar.id_aa64pfr1}, in register_cp_regs_for_features()
7930 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7935 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7940 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7944 .resetvalue = cpu->isar.id_aa64zfr0 }, in register_cp_regs_for_features()
7945 { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7949 .resetvalue = cpu->isar.id_aa64smfr0 }, in register_cp_regs_for_features()
7950 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7955 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7960 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7964 .resetvalue = cpu->isar.id_aa64dfr0 }, in register_cp_regs_for_features()
7965 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7969 .resetvalue = cpu->isar.id_aa64dfr1 }, in register_cp_regs_for_features()
7970 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7975 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7980 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7984 .resetvalue = cpu->id_aa64afr0 }, in register_cp_regs_for_features()
7985 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7989 .resetvalue = cpu->id_aa64afr1 }, in register_cp_regs_for_features()
7990 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
7995 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8000 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8004 .resetvalue = cpu->isar.id_aa64isar0 }, in register_cp_regs_for_features()
8005 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8009 .resetvalue = cpu->isar.id_aa64isar1 }, in register_cp_regs_for_features()
8010 { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8014 .resetvalue = cpu->isar.id_aa64isar2 }, in register_cp_regs_for_features()
8015 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8020 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8025 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8030 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8035 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8040 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8044 .resetvalue = cpu->isar.id_aa64mmfr0 }, in register_cp_regs_for_features()
8045 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8049 .resetvalue = cpu->isar.id_aa64mmfr1 }, in register_cp_regs_for_features()
8050 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8054 .resetvalue = cpu->isar.id_aa64mmfr2 }, in register_cp_regs_for_features()
8055 { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8059 .resetvalue = cpu->isar.id_aa64mmfr3 }, in register_cp_regs_for_features()
8060 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8065 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8070 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8075 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8080 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8084 .resetvalue = cpu->isar.mvfr0 }, in register_cp_regs_for_features()
8085 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8089 .resetvalue = cpu->isar.mvfr1 }, in register_cp_regs_for_features()
8090 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8094 .resetvalue = cpu->isar.mvfr2 }, in register_cp_regs_for_features()
8101 { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32, in register_cp_regs_for_features()
8106 { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32, in register_cp_regs_for_features()
8111 { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32, in register_cp_regs_for_features()
8119 * being filled with AArch64-view-of-AArch32-ID-register in register_cp_regs_for_features()
8122 { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8127 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8131 .resetvalue = cpu->isar.id_pfr2 }, in register_cp_regs_for_features()
8132 { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8136 .resetvalue = cpu->isar.id_dfr1 }, in register_cp_regs_for_features()
8137 { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8141 .resetvalue = cpu->isar.id_mmfr5 }, in register_cp_regs_for_features()
8142 { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8147 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, in register_cp_regs_for_features()
8151 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, in register_cp_regs_for_features()
8152 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8156 .resetvalue = cpu->pmceid0 }, in register_cp_regs_for_features()
8157 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, in register_cp_regs_for_features()
8161 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, in register_cp_regs_for_features()
8162 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8166 .resetvalue = cpu->pmceid1 }, in register_cp_regs_for_features()
8170 { .name = "ID_AA64PFR0_EL1", in register_cp_regs_for_features()
8177 { .name = "ID_AA64PFR1_EL1", in register_cp_regs_for_features()
8182 { .name = "ID_AA64PFR*_EL1_RESERVED", in register_cp_regs_for_features()
8184 { .name = "ID_AA64ZFR0_EL1", in register_cp_regs_for_features()
8195 { .name = "ID_AA64SMFR0_EL1", in register_cp_regs_for_features()
8208 { .name = "ID_AA64MMFR0_EL1", in register_cp_regs_for_features()
8212 { .name = "ID_AA64MMFR1_EL1", in register_cp_regs_for_features()
8214 { .name = "ID_AA64MMFR2_EL1", in register_cp_regs_for_features()
8216 { .name = "ID_AA64MMFR3_EL1", in register_cp_regs_for_features()
8218 { .name = "ID_AA64MMFR*_EL1_RESERVED", in register_cp_regs_for_features()
8220 { .name = "ID_AA64DFR0_EL1", in register_cp_regs_for_features()
8222 { .name = "ID_AA64DFR1_EL1" }, in register_cp_regs_for_features()
8223 { .name = "ID_AA64DFR*_EL1_RESERVED", in register_cp_regs_for_features()
8225 { .name = "ID_AA64AFR*", in register_cp_regs_for_features()
8227 { .name = "ID_AA64ISAR0_EL1", in register_cp_regs_for_features()
8241 { .name = "ID_AA64ISAR1_EL1", in register_cp_regs_for_features()
8255 { .name = "ID_AA64ISAR2_EL1", in register_cp_regs_for_features()
8264 { .name = "ID_AA64ISAR*_EL1_RESERVED", in register_cp_regs_for_features()
8278 { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8282 { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8297 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32. in register_cp_regs_for_features()
8298 * For pre-v8 cores there are RAZ patterns for these in in register_cp_regs_for_features()
8301 * to also cover c0, 0, c{8-15}, {0-7}. in register_cp_regs_for_features()
8303 * c4-c7 is where the AArch64 ID registers live (and we've in register_cp_regs_for_features()
8304 * already defined those in v8_idregs[]), and c8-c15 are not in register_cp_regs_for_features()
8307 g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i); in register_cp_regs_for_features() local
8309 .name = name, in register_cp_regs_for_features()
8331 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, in register_cp_regs_for_features()
8334 .resetvalue = cpu->midr, in register_cp_regs_for_features()
8337 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8339 .access = PL2_RW, .resetvalue = cpu->midr, in register_cp_regs_for_features()
8343 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, in register_cp_regs_for_features()
8349 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8361 .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO, in register_cp_regs_for_features()
8382 { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8386 { .name = "RVBAR", .type = ARM_CP_ALIAS, in register_cp_regs_for_features()
8390 { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8402 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8406 { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8409 { .name = "RMR", .state = ARM_CP_STATE_AA32, in register_cp_regs_for_features()
8413 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8418 .resetvalue = cpu->reset_sctlr }, in register_cp_regs_for_features()
8435 .name = "NSACR", .type = ARM_CP_CONST, in register_cp_regs_for_features()
8443 .name = "NSACR", in register_cp_regs_for_features()
8454 .name = "NSACR", .type = ARM_CP_CONST, in register_cp_regs_for_features()
8475 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ in register_cp_regs_for_features()
8496 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, in register_cp_regs_for_features()
8503 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, in register_cp_regs_for_features()
8510 * When LPAE exists this 32-bit PAR register is an alias of the in register_cp_regs_for_features()
8511 * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[] in register_cp_regs_for_features()
8547 * cp15 crn=0 to be writes-ignored, whereas for other cores they should in register_cp_regs_for_features()
8548 * be read-only (ie write causes UNDEF exception). in register_cp_regs_for_features()
8553 * Pre-v8 MIDR space. in register_cp_regs_for_features()
8562 { .name = "MIDR", in register_cp_regs_for_features()
8564 .access = PL1_R, .resetvalue = cpu->midr, in register_cp_regs_for_features()
8570 { .name = "DUMMY", in register_cp_regs_for_features()
8573 { .name = "DUMMY", in register_cp_regs_for_features()
8576 { .name = "DUMMY", in register_cp_regs_for_features()
8579 { .name = "DUMMY", in register_cp_regs_for_features()
8582 { .name = "DUMMY", in register_cp_regs_for_features()
8587 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8589 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, in register_cp_regs_for_features()
8594 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, in register_cp_regs_for_features()
8596 .access = PL1_R, .resetvalue = cpu->midr }, in register_cp_regs_for_features()
8597 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8602 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, in register_cp_regs_for_features()
8605 .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB, in register_cp_regs_for_features()
8607 .access = PL1_R, .resetvalue = cpu->midr in register_cp_regs_for_features()
8610 /* These are common to v8 and pre-v8 */ in register_cp_regs_for_features()
8611 { .name = "CTR", in register_cp_regs_for_features()
8614 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, in register_cp_regs_for_features()
8615 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8619 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, in register_cp_regs_for_features()
8620 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ in register_cp_regs_for_features()
8621 { .name = "TCMTR", in register_cp_regs_for_features()
8629 .name = "TLBTR", in register_cp_regs_for_features()
8637 .name = "MPUIR", in register_cp_regs_for_features()
8640 .resetvalue = cpu->pmsav7_dregion << 8 in register_cp_regs_for_features()
8644 .name = "HMPUIR", in register_cp_regs_for_features()
8647 .resetvalue = cpu->pmsav8r_hdregion in register_cp_regs_for_features()
8650 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, in register_cp_regs_for_features()
8656 { .name = "MIDR_EL1", in register_cp_regs_for_features()
8662 { .name = "REVIDR_EL1" }, in register_cp_regs_for_features()
8706 for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) { in register_cp_regs_for_features()
8713 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW, in register_cp_regs_for_features()
8725 .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW, in register_cp_regs_for_features()
8736 for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) { in register_cp_regs_for_features()
8743 .name = tmp_string, in register_cp_regs_for_features()
8755 .name = tmp_string, in register_cp_regs_for_features()
8771 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8778 { .name = "MPIDR_EL1", in register_cp_regs_for_features()
8788 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8792 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, in register_cp_regs_for_features()
8793 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8797 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8810 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. in register_cp_regs_for_features()
8812 * (1) older 32-bit only cores have a simple 32-bit CBAR in register_cp_regs_for_features()
8813 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a in register_cp_regs_for_features()
8814 * 32-bit register visible to AArch32 at a different encoding in register_cp_regs_for_features()
8816 * be able to squash a 64-bit address into the 32-bit view. in register_cp_regs_for_features()
8818 * in future if we support AArch32-only configs of some of the in register_cp_regs_for_features()
8824 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) in register_cp_regs_for_features()
8825 | extract64(cpu->reset_cbar, 32, 12); in register_cp_regs_for_features()
8827 { .name = "CBAR", in register_cp_regs_for_features()
8831 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8834 .access = PL1_R, .resetvalue = cpu->reset_cbar }, in register_cp_regs_for_features()
8841 .name = "CBAR", in register_cp_regs_for_features()
8843 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar, in register_cp_regs_for_features()
8858 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8874 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, in register_cp_regs_for_features()
8881 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, in register_cp_regs_for_features()
8887 * arch/arm/mach-pxa/sleep.S expects two instructions following in register_cp_regs_for_features()
8897 .name = "VSCTLR", .state = ARM_CP_STATE_AA32, in register_cp_regs_for_features()
8975 .name = "GMID_EL1", .state = ARM_CP_STATE_AA64, in register_cp_regs_for_features()
8978 .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize, in register_cp_regs_for_features()
9038 const char *name) in add_cpreg_to_hashtable() argument
9040 CPUARMState *env = &cpu->env; in add_cpreg_to_hashtable()
9043 bool is64 = r->type & ARM_CP_64BIT; in add_cpreg_to_hashtable()
9045 int cp = r->cp; in add_cpreg_to_hashtable()
9052 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) { in add_cpreg_to_hashtable()
9055 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2); in add_cpreg_to_hashtable()
9060 * cp == 0 as equivalent to the value for "standard guest-visible in add_cpreg_to_hashtable()
9062 * in their AArch64 view (the .cp value may be non-zero for the in add_cpreg_to_hashtable()
9065 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) { in add_cpreg_to_hashtable()
9068 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2); in add_cpreg_to_hashtable()
9075 if (!(r->type & ARM_CP_OVERRIDE)) { in add_cpreg_to_hashtable()
9076 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key); in add_cpreg_to_hashtable()
9078 assert(oldreg->type & ARM_CP_OVERRIDE); in add_cpreg_to_hashtable()
9093 int min_el = ctz32(r->access) / 2; in add_cpreg_to_hashtable()
9095 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) { in add_cpreg_to_hashtable()
9098 make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP); in add_cpreg_to_hashtable()
9103 if ((r->access & max_el) == 0) { in add_cpreg_to_hashtable()
9108 /* Combine cpreg and name into one allocation. */ in add_cpreg_to_hashtable()
9109 name_len = strlen(name) + 1; in add_cpreg_to_hashtable()
9112 r2->name = memcpy(r2 + 1, name, name_len); in add_cpreg_to_hashtable()
9118 r2->cp = cp; in add_cpreg_to_hashtable()
9119 r2->crm = crm; in add_cpreg_to_hashtable()
9120 r2->opc1 = opc1; in add_cpreg_to_hashtable()
9121 r2->opc2 = opc2; in add_cpreg_to_hashtable()
9122 r2->state = state; in add_cpreg_to_hashtable()
9123 r2->secure = secstate; in add_cpreg_to_hashtable()
9125 r2->opaque = opaque; in add_cpreg_to_hashtable()
9130 int old_special = r2->type & ARM_CP_SPECIAL_MASK; in add_cpreg_to_hashtable()
9137 r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST; in add_cpreg_to_hashtable()
9140 * special cases like VPIDR_EL2 which have a constant non-zero in add_cpreg_to_hashtable()
9143 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) { in add_cpreg_to_hashtable()
9144 r2->resetvalue = 0; in add_cpreg_to_hashtable()
9151 r2->readfn = NULL; in add_cpreg_to_hashtable()
9152 r2->writefn = NULL; in add_cpreg_to_hashtable()
9153 r2->raw_readfn = NULL; in add_cpreg_to_hashtable()
9154 r2->raw_writefn = NULL; in add_cpreg_to_hashtable()
9155 r2->resetfn = NULL; in add_cpreg_to_hashtable()
9156 r2->fieldoffset = 0; in add_cpreg_to_hashtable()
9157 r2->bank_fieldoffsets[0] = 0; in add_cpreg_to_hashtable()
9158 r2->bank_fieldoffsets[1] = 0; in add_cpreg_to_hashtable()
9160 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]; in add_cpreg_to_hashtable()
9168 r2->fieldoffset = r->bank_fieldoffsets[ns]; in add_cpreg_to_hashtable()
9174 * reset the 32-bit instance in certain cases: in add_cpreg_to_hashtable()
9176 * 1) If the register has both 32-bit and 64-bit instances in add_cpreg_to_hashtable()
9177 * then we can count on the 64-bit instance taking care in add_cpreg_to_hashtable()
9178 * of the non-secure bank. in add_cpreg_to_hashtable()
9179 * 2) If ARMv8 is enabled then we can count on a 64-bit in add_cpreg_to_hashtable()
9181 * that separate 32 and 64-bit definitions are provided. in add_cpreg_to_hashtable()
9183 if ((r->state == ARM_CP_STATE_BOTH && ns) || in add_cpreg_to_hashtable()
9185 r2->type |= ARM_CP_ALIAS; in add_cpreg_to_hashtable()
9187 } else if ((secstate != r->secure) && !ns) { in add_cpreg_to_hashtable()
9190 * migration of the non-secure instance. in add_cpreg_to_hashtable()
9192 r2->type |= ARM_CP_ALIAS; in add_cpreg_to_hashtable()
9196 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) { in add_cpreg_to_hashtable()
9197 r2->fieldoffset += sizeof(uint32_t); in add_cpreg_to_hashtable()
9207 * never migratable and not even raw-accessible. in add_cpreg_to_hashtable()
9209 if (r2->type & ARM_CP_SPECIAL_MASK) { in add_cpreg_to_hashtable()
9210 r2->type |= ARM_CP_NO_RAW; in add_cpreg_to_hashtable()
9212 if (((r->crm == CP_ANY) && crm != 0) || in add_cpreg_to_hashtable()
9213 ((r->opc1 == CP_ANY) && opc1 != 0) || in add_cpreg_to_hashtable()
9214 ((r->opc2 == CP_ANY) && opc2 != 0)) { in add_cpreg_to_hashtable()
9215 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; in add_cpreg_to_hashtable()
9223 if (!(r2->type & ARM_CP_NO_RAW)) { in add_cpreg_to_hashtable()
9227 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2); in add_cpreg_to_hashtable()
9244 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard in define_one_arm_cp_reg_with_opaque()
9253 * Only registers visible in AArch64 may set r->opc0; opc0 cannot in define_one_arm_cp_reg_with_opaque()
9259 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; in define_one_arm_cp_reg_with_opaque()
9260 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; in define_one_arm_cp_reg_with_opaque()
9261 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; in define_one_arm_cp_reg_with_opaque()
9262 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; in define_one_arm_cp_reg_with_opaque()
9263 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; in define_one_arm_cp_reg_with_opaque()
9264 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; in define_one_arm_cp_reg_with_opaque()
9268 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); in define_one_arm_cp_reg_with_opaque()
9270 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); in define_one_arm_cp_reg_with_opaque()
9272 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); in define_one_arm_cp_reg_with_opaque()
9275 * (M-profile or v7A-and-earlier only) for implementation defined in define_one_arm_cp_reg_with_opaque()
9281 switch (r->state) { in define_one_arm_cp_reg_with_opaque()
9284 if (r->cp == 0) { in define_one_arm_cp_reg_with_opaque()
9289 if (arm_feature(&cpu->env, ARM_FEATURE_V8) && in define_one_arm_cp_reg_with_opaque()
9290 !arm_feature(&cpu->env, ARM_FEATURE_M)) { in define_one_arm_cp_reg_with_opaque()
9291 assert(r->cp >= 14 && r->cp <= 15); in define_one_arm_cp_reg_with_opaque()
9293 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15)); in define_one_arm_cp_reg_with_opaque()
9297 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP); in define_one_arm_cp_reg_with_opaque()
9309 if (r->state != ARM_CP_STATE_AA32) { in define_one_arm_cp_reg_with_opaque()
9311 switch (r->opc1) { in define_one_arm_cp_reg_with_opaque()
9338 /* broken reginfo with out-of-range opc1 */ in define_one_arm_cp_reg_with_opaque()
9342 assert((r->access & ~mask) == 0); in define_one_arm_cp_reg_with_opaque()
9349 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) { in define_one_arm_cp_reg_with_opaque()
9350 if (r->access & PL3_R) { in define_one_arm_cp_reg_with_opaque()
9351 assert((r->fieldoffset || in define_one_arm_cp_reg_with_opaque()
9352 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || in define_one_arm_cp_reg_with_opaque()
9353 r->readfn); in define_one_arm_cp_reg_with_opaque()
9355 if (r->access & PL3_W) { in define_one_arm_cp_reg_with_opaque()
9356 assert((r->fieldoffset || in define_one_arm_cp_reg_with_opaque()
9357 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || in define_one_arm_cp_reg_with_opaque()
9358 r->writefn); in define_one_arm_cp_reg_with_opaque()
9367 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { in define_one_arm_cp_reg_with_opaque()
9370 if ((r->type & ARM_CP_ADD_TLBI_NXS) && in define_one_arm_cp_reg_with_opaque()
9376 * fine-grained trapping. Add the NXS insn here and in define_one_arm_cp_reg_with_opaque()
9379 * and name that it is passed, so it's OK to use in define_one_arm_cp_reg_with_opaque()
9383 g_autofree char *name = g_strdup_printf("%sNXS", r->name); in define_one_arm_cp_reg_with_opaque() local
9393 crm, opc1, opc2, name); in define_one_arm_cp_reg_with_opaque()
9398 * (same for secure and non-secure world) or banked. in define_one_arm_cp_reg_with_opaque()
9400 char *name; in define_one_arm_cp_reg_with_opaque() local
9402 switch (r->secure) { in define_one_arm_cp_reg_with_opaque()
9406 r->secure, crm, opc1, opc2, in define_one_arm_cp_reg_with_opaque()
9407 r->name); in define_one_arm_cp_reg_with_opaque()
9410 name = g_strdup_printf("%s_S", r->name); in define_one_arm_cp_reg_with_opaque()
9413 crm, opc1, opc2, name); in define_one_arm_cp_reg_with_opaque()
9414 g_free(name); in define_one_arm_cp_reg_with_opaque()
9417 crm, opc1, opc2, r->name); in define_one_arm_cp_reg_with_opaque()
9424 * AArch64 registers get mapped to non-secure instance in define_one_arm_cp_reg_with_opaque()
9429 crm, opc1, opc2, r->name); in define_one_arm_cp_reg_with_opaque()
9452 * user-space cannot alter any values and dynamic values pertaining to
9463 if (m->is_glob) { in modify_arm_cp_regs_with_len()
9464 pat = g_pattern_spec_new(m->name); in modify_arm_cp_regs_with_len()
9469 if (pat && g_pattern_match_string(pat, r->name)) { in modify_arm_cp_regs_with_len()
9470 r->type = ARM_CP_CONST; in modify_arm_cp_regs_with_len()
9471 r->access = PL0U_R; in modify_arm_cp_regs_with_len()
9472 r->resetvalue = 0; in modify_arm_cp_regs_with_len()
9474 } else if (strcmp(r->name, m->name) == 0) { in modify_arm_cp_regs_with_len()
9475 r->type = ARM_CP_CONST; in modify_arm_cp_regs_with_len()
9476 r->access = PL0U_R; in modify_arm_cp_regs_with_len()
9477 r->resetvalue &= m->exported_bits; in modify_arm_cp_regs_with_len()
9478 r->resetvalue |= m->fixed_bits; in modify_arm_cp_regs_with_len()
9496 /* Helper coprocessor write function for write-ignore registers */ in arm_cp_write_ignore()
9501 /* Helper coprocessor write function for read-as-zero registers */ in arm_cp_read_zero()
9507 /* Helper coprocessor reset function for do-nothing-on-reset registers */ in arm_cp_reset_ignore()
9520 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || in bad_mode_switch()
9536 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) in bad_mode_switch()
9543 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && in bad_mode_switch()
9560 ZF = (env->ZF == 0); in cpsr_read()
9561 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | in cpsr_read()
9562 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) in cpsr_read()
9563 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) in cpsr_read()
9564 | ((env->condexec_bits & 0xfc) << 8) in cpsr_read()
9565 | (env->GE << 16) | (env->daif & CPSR_AIF); in cpsr_read()
9576 env->ZF = (~val) & CPSR_Z; in cpsr_write()
9577 env->NF = val; in cpsr_write()
9578 env->CF = (val >> 29) & 1; in cpsr_write()
9579 env->VF = (val << 3) & 0x80000000; in cpsr_write()
9582 env->QF = ((val & CPSR_Q) != 0); in cpsr_write()
9585 env->thumb = ((val & CPSR_T) != 0); in cpsr_write()
9588 env->condexec_bits &= ~3; in cpsr_write()
9589 env->condexec_bits |= (val >> 25) & 3; in cpsr_write()
9592 env->condexec_bits &= 3; in cpsr_write()
9593 env->condexec_bits |= (val >> 8) & 0xfc; in cpsr_write()
9596 env->GE = (val >> 16) & 0xf; in cpsr_write()
9602 * whether non-secure software is allowed to change the CPSR_F and CPSR_A in cpsr_write()
9613 changed_daif = (env->daif ^ val) & mask; in cpsr_write()
9618 * abort exceptions from a non-secure state. in cpsr_write()
9620 if (!(env->cp15.scr_el3 & SCR_AW)) { in cpsr_write()
9623 "non-secure world with SCR.AW bit clear\n"); in cpsr_write()
9631 * exceptions from a non-secure state. in cpsr_write()
9633 if (!(env->cp15.scr_el3 & SCR_FW)) { in cpsr_write()
9636 "non-secure world with SCR.FW bit clear\n"); in cpsr_write()
9641 * Check whether non-maskable FIQ (NMFI) support is enabled. in cpsr_write()
9649 "(non-maskable FIQ [NMFI] support enabled)\n"); in cpsr_write()
9655 env->daif &= ~(CPSR_AIF & mask); in cpsr_write()
9656 env->daif |= val & CPSR_AIF & mask; in cpsr_write()
9659 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { in cpsr_write()
9660 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { in cpsr_write()
9687 aarch32_mode_name(env->uncached_cpsr), in cpsr_write()
9694 aarch32_mode_name(env->uncached_cpsr), in cpsr_write()
9695 aarch32_mode_name(val), env->regs[15]); in cpsr_write()
9700 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); in cpsr_write()
9735 old_mode = env->uncached_cpsr & CPSR_M; in switch_mode()
9741 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); in switch_mode()
9742 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); in switch_mode()
9744 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); in switch_mode()
9745 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); in switch_mode()
9749 env->banked_r13[i] = env->regs[13]; in switch_mode()
9750 env->banked_spsr[i] = env->spsr; in switch_mode()
9753 env->regs[13] = env->banked_r13[i]; in switch_mode()
9754 env->spsr = env->banked_spsr[i]; in switch_mode()
9756 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; in switch_mode()
9757 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; in switch_mode()
9763 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9765 * The below multi-dimensional table is used for looking up the target
9772 * | | | | | +--- Current EL
9773 * | | | | +------ Non-secure(0)/Secure(1)
9774 * | | | +--------- HCR mask override
9775 * | | +------------ SCR exec state control
9776 * | +--------------- SCR mask override
9777 * +------------------ 32-bit(0)/64-bit(1) EL3
9780 * 0-3 = EL0-EL3
9781 * -1 = Cannot occur
9795 * BIT IRQ IMO Non-secure Secure
9799 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9800 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
9801 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9802 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
9803 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9804 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
9805 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9806 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
9807 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
9808 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
9809 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
9810 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
9811 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
9812 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
9813 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
9814 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
9847 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); in arm_phys_excp_target_el()
9851 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); in arm_phys_excp_target_el()
9855 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); in arm_phys_excp_target_el()
9866 /* Perform a table-lookup for the target EL given the current state */ in arm_phys_excp_target_el()
9876 int idx = cs->exception_index; in arm_log_exception()
9918 idx, exc, cs->cpu_index); in arm_log_exception()
9930 uint32_t mode = env->uncached_cpsr & CPSR_M; in aarch64_sync_32_to_64()
9934 env->xregs[i] = env->regs[i]; in aarch64_sync_32_to_64()
9938 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. in aarch64_sync_32_to_64()
9943 env->xregs[i] = env->usr_regs[i - 8]; in aarch64_sync_32_to_64()
9947 env->xregs[i] = env->regs[i]; in aarch64_sync_32_to_64()
9952 * Registers x13-x23 are the various mode SP and FP registers. Registers in aarch64_sync_32_to_64()
9957 env->xregs[13] = env->regs[13]; in aarch64_sync_32_to_64()
9958 env->xregs[14] = env->regs[14]; in aarch64_sync_32_to_64()
9960 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; in aarch64_sync_32_to_64()
9963 env->xregs[14] = env->regs[14]; in aarch64_sync_32_to_64()
9965 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; in aarch64_sync_32_to_64()
9970 env->xregs[15] = env->regs[13]; in aarch64_sync_32_to_64()
9972 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; in aarch64_sync_32_to_64()
9976 env->xregs[16] = env->regs[14]; in aarch64_sync_32_to_64()
9977 env->xregs[17] = env->regs[13]; in aarch64_sync_32_to_64()
9979 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; in aarch64_sync_32_to_64()
9980 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; in aarch64_sync_32_to_64()
9984 env->xregs[18] = env->regs[14]; in aarch64_sync_32_to_64()
9985 env->xregs[19] = env->regs[13]; in aarch64_sync_32_to_64()
9987 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; in aarch64_sync_32_to_64()
9988 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; in aarch64_sync_32_to_64()
9992 env->xregs[20] = env->regs[14]; in aarch64_sync_32_to_64()
9993 env->xregs[21] = env->regs[13]; in aarch64_sync_32_to_64()
9995 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; in aarch64_sync_32_to_64()
9996 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; in aarch64_sync_32_to_64()
10000 env->xregs[22] = env->regs[14]; in aarch64_sync_32_to_64()
10001 env->xregs[23] = env->regs[13]; in aarch64_sync_32_to_64()
10003 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; in aarch64_sync_32_to_64()
10004 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; in aarch64_sync_32_to_64()
10008 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ in aarch64_sync_32_to_64()
10009 * mode, then we can copy from r8-r14. Otherwise, we copy from the in aarch64_sync_32_to_64()
10010 * FIQ bank for r8-r14. in aarch64_sync_32_to_64()
10014 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ in aarch64_sync_32_to_64()
10018 env->xregs[i] = env->fiq_regs[i - 24]; in aarch64_sync_32_to_64()
10020 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; in aarch64_sync_32_to_64()
10021 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; in aarch64_sync_32_to_64()
10024 env->pc = env->regs[15]; in aarch64_sync_32_to_64()
10035 uint32_t mode = env->uncached_cpsr & CPSR_M; in aarch64_sync_64_to_32()
10039 env->regs[i] = env->xregs[i]; in aarch64_sync_64_to_32()
10043 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. in aarch64_sync_64_to_32()
10044 * Otherwise, we copy x8-x12 into the banked user regs. in aarch64_sync_64_to_32()
10048 env->usr_regs[i - 8] = env->xregs[i]; in aarch64_sync_64_to_32()
10052 env->regs[i] = env->xregs[i]; in aarch64_sync_64_to_32()
10063 env->regs[13] = env->xregs[13]; in aarch64_sync_64_to_32()
10064 env->regs[14] = env->xregs[14]; in aarch64_sync_64_to_32()
10066 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; in aarch64_sync_64_to_32()
10073 env->regs[14] = env->xregs[14]; in aarch64_sync_64_to_32()
10075 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; in aarch64_sync_64_to_32()
10080 env->regs[13] = env->xregs[15]; in aarch64_sync_64_to_32()
10082 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; in aarch64_sync_64_to_32()
10086 env->regs[14] = env->xregs[16]; in aarch64_sync_64_to_32()
10087 env->regs[13] = env->xregs[17]; in aarch64_sync_64_to_32()
10089 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; in aarch64_sync_64_to_32()
10090 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; in aarch64_sync_64_to_32()
10094 env->regs[14] = env->xregs[18]; in aarch64_sync_64_to_32()
10095 env->regs[13] = env->xregs[19]; in aarch64_sync_64_to_32()
10097 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; in aarch64_sync_64_to_32()
10098 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; in aarch64_sync_64_to_32()
10102 env->regs[14] = env->xregs[20]; in aarch64_sync_64_to_32()
10103 env->regs[13] = env->xregs[21]; in aarch64_sync_64_to_32()
10105 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; in aarch64_sync_64_to_32()
10106 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; in aarch64_sync_64_to_32()
10110 env->regs[14] = env->xregs[22]; in aarch64_sync_64_to_32()
10111 env->regs[13] = env->xregs[23]; in aarch64_sync_64_to_32()
10113 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; in aarch64_sync_64_to_32()
10114 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; in aarch64_sync_64_to_32()
10118 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ in aarch64_sync_64_to_32()
10119 * mode, then we can copy to r8-r14. Otherwise, we copy to the in aarch64_sync_64_to_32()
10120 * FIQ bank for r8-r14. in aarch64_sync_64_to_32()
10124 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ in aarch64_sync_64_to_32()
10128 env->fiq_regs[i - 24] = env->xregs[i]; in aarch64_sync_64_to_32()
10130 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; in aarch64_sync_64_to_32()
10131 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; in aarch64_sync_64_to_32()
10134 env->regs[15] = env->pc; in aarch64_sync_64_to_32()
10148 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. in take_aarch32_exception()
10150 env->pstate &= ~PSTATE_SS; in take_aarch32_exception()
10151 env->spsr = cpsr_read(env); in take_aarch32_exception()
10153 env->condexec_bits = 0; in take_aarch32_exception()
10155 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; in take_aarch32_exception()
10161 env->uncached_cpsr &= ~CPSR_E; in take_aarch32_exception()
10162 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { in take_aarch32_exception()
10163 env->uncached_cpsr |= CPSR_E; in take_aarch32_exception()
10166 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); in take_aarch32_exception()
10167 env->daif |= mask; in take_aarch32_exception()
10170 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) { in take_aarch32_exception()
10171 env->uncached_cpsr |= CPSR_SSBS; in take_aarch32_exception()
10173 env->uncached_cpsr &= ~CPSR_SSBS; in take_aarch32_exception()
10178 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; in take_aarch32_exception()
10179 env->elr_el[2] = env->regs[15]; in take_aarch32_exception()
10186 /* ... the target is EL3, from non-secure state. */ in take_aarch32_exception()
10187 env->uncached_cpsr &= ~CPSR_PAN; in take_aarch32_exception()
10194 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { in take_aarch32_exception()
10195 env->uncached_cpsr |= CPSR_PAN; in take_aarch32_exception()
10205 env->thumb = in take_aarch32_exception()
10208 env->regs[14] = env->regs[15] + offset; in take_aarch32_exception()
10210 env->regs[15] = newpc; in take_aarch32_exception()
10232 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt_aarch32_hyp()
10234 switch (cs->exception_index) { in arm_cpu_do_interrupt_aarch32_hyp()
10244 env->cp15.ifar_s = env->exception.vaddress; in arm_cpu_do_interrupt_aarch32_hyp()
10246 (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32_hyp()
10250 env->cp15.dfar_s = env->exception.vaddress; in arm_cpu_do_interrupt_aarch32_hyp()
10252 (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32_hyp()
10268 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); in arm_cpu_do_interrupt_aarch32_hyp()
10271 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { in arm_cpu_do_interrupt_aarch32_hyp()
10274 * QEMU syndrome values are v8-style. v7 has the IL bit in arm_cpu_do_interrupt_aarch32_hyp()
10278 if (cs->exception_index == EXCP_PREFETCH_ABORT || in arm_cpu_do_interrupt_aarch32_hyp()
10279 (cs->exception_index == EXCP_DATA_ABORT && in arm_cpu_do_interrupt_aarch32_hyp()
10280 !(env->exception.syndrome & ARM_EL_ISV)) || in arm_cpu_do_interrupt_aarch32_hyp()
10281 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { in arm_cpu_do_interrupt_aarch32_hyp()
10282 env->exception.syndrome &= ~ARM_EL_IL; in arm_cpu_do_interrupt_aarch32_hyp()
10285 env->cp15.esr_el[2] = env->exception.syndrome; in arm_cpu_do_interrupt_aarch32_hyp()
10293 if (!(env->cp15.scr_el3 & SCR_EA)) { in arm_cpu_do_interrupt_aarch32_hyp()
10296 if (!(env->cp15.scr_el3 & SCR_IRQ)) { in arm_cpu_do_interrupt_aarch32_hyp()
10299 if (!(env->cp15.scr_el3 & SCR_FIQ)) { in arm_cpu_do_interrupt_aarch32_hyp()
10303 addr += env->cp15.hvbar; in arm_cpu_do_interrupt_aarch32_hyp()
10311 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt_aarch32()
10319 switch (syn_get_ec(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch32()
10340 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); in arm_cpu_do_interrupt_aarch32()
10343 if (env->exception.target_el == 2) { in arm_cpu_do_interrupt_aarch32()
10345 switch (syn_get_ec(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch32()
10350 env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2, in arm_cpu_do_interrupt_aarch32()
10354 env->exception.syndrome = syn_set_ec(env->exception.syndrome, in arm_cpu_do_interrupt_aarch32()
10358 env->exception.syndrome = syn_set_ec(env->exception.syndrome, in arm_cpu_do_interrupt_aarch32()
10366 switch (cs->exception_index) { in arm_cpu_do_interrupt_aarch32()
10371 if (env->thumb) { in arm_cpu_do_interrupt_aarch32()
10387 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
10388 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
10390 env->exception.fsr, (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
10397 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
10398 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
10400 env->exception.fsr, in arm_cpu_do_interrupt_aarch32()
10401 (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
10413 if (env->cp15.scr_el3 & SCR_IRQ) { in arm_cpu_do_interrupt_aarch32()
10424 if (env->cp15.scr_el3 & SCR_FIQ) { in arm_cpu_do_interrupt_aarch32()
10454 env->exception.fsr = arm_fi_to_lfsc(&fi); in arm_cpu_do_interrupt_aarch32()
10456 env->exception.fsr = arm_fi_to_sfsc(&fi); in arm_cpu_do_interrupt_aarch32()
10458 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000; in arm_cpu_do_interrupt_aarch32()
10459 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
10461 env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
10479 if (env->thumb) { in arm_cpu_do_interrupt_aarch32()
10486 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); in arm_cpu_do_interrupt_aarch32()
10491 addr += env->cp15.mvbar; in arm_cpu_do_interrupt_aarch32()
10499 * This register is only followed in non-monitor mode, and is banked. in arm_cpu_do_interrupt_aarch32()
10505 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { in arm_cpu_do_interrupt_aarch32()
10506 env->cp15.scr_el3 &= ~SCR_NS; in arm_cpu_do_interrupt_aarch32()
10519 int mode = env->uncached_cpsr & CPSR_M; in aarch64_regnum()
10582 ret |= env->pstate & PSTATE_SS; in cpsr_read_for_spsr_elx()
10616 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt_aarch64()
10617 unsigned int new_el = env->exception.target_el; in arm_cpu_do_interrupt_aarch64()
10618 vaddr addr = env->cp15.vbar_el[new_el]; in arm_cpu_do_interrupt_aarch64()
10667 switch (cs->exception_index) { in arm_cpu_do_interrupt_aarch64()
10670 env->cp15.mfar_el3); in arm_cpu_do_interrupt_aarch64()
10678 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) && in arm_cpu_do_interrupt_aarch64()
10679 syndrome_is_sync_extabt(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch64()
10682 env->cp15.far_el[new_el] = env->exception.vaddress; in arm_cpu_do_interrupt_aarch64()
10684 env->cp15.far_el[new_el]); in arm_cpu_do_interrupt_aarch64()
10692 switch (syn_get_ec(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch64()
10700 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); in arm_cpu_do_interrupt_aarch64()
10709 * number. Notice that we read a 4-bit AArch32 register number and in arm_cpu_do_interrupt_aarch64()
10710 * write back a 5-bit AArch64 one. in arm_cpu_do_interrupt_aarch64()
10712 rt = extract32(env->exception.syndrome, 5, 4); in arm_cpu_do_interrupt_aarch64()
10714 env->exception.syndrome = deposit32(env->exception.syndrome, in arm_cpu_do_interrupt_aarch64()
10720 rt = extract32(env->exception.syndrome, 5, 4); in arm_cpu_do_interrupt_aarch64()
10722 env->exception.syndrome = deposit32(env->exception.syndrome, in arm_cpu_do_interrupt_aarch64()
10724 rt = extract32(env->exception.syndrome, 10, 4); in arm_cpu_do_interrupt_aarch64()
10726 env->exception.syndrome = deposit32(env->exception.syndrome, in arm_cpu_do_interrupt_aarch64()
10730 env->cp15.esr_el[new_el] = env->exception.syndrome; in arm_cpu_do_interrupt_aarch64()
10746 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff); in arm_cpu_do_interrupt_aarch64()
10747 env->cp15.esr_el[new_el] = env->exception.syndrome; in arm_cpu_do_interrupt_aarch64()
10750 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); in arm_cpu_do_interrupt_aarch64()
10756 env->elr_el[new_el] = env->pc; in arm_cpu_do_interrupt_aarch64()
10773 env->elr_el[new_el] = env->regs[15]; in arm_cpu_do_interrupt_aarch64()
10777 env->condexec_bits = 0; in arm_cpu_do_interrupt_aarch64()
10779 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; in arm_cpu_do_interrupt_aarch64()
10783 env->elr_el[new_el]); in arm_cpu_do_interrupt_aarch64()
10799 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { in arm_cpu_do_interrupt_aarch64()
10810 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) { in arm_cpu_do_interrupt_aarch64()
10818 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) { in arm_cpu_do_interrupt_aarch64()
10826 env->aarch64 = true; in arm_cpu_do_interrupt_aarch64()
10833 env->pc = addr; in arm_cpu_do_interrupt_aarch64()
10836 new_el, env->pc, pstate_read(env)); in arm_cpu_do_interrupt_aarch64()
10850 CPUARMState *env = &cpu->env; in tcg_handle_semihosting()
10855 env->xregs[0]); in tcg_handle_semihosting()
10857 env->pc += 4; in tcg_handle_semihosting()
10861 env->regs[0]); in tcg_handle_semihosting()
10863 env->regs[15] += env->thumb ? 2 : 4; in tcg_handle_semihosting()
10871 * to the AArch64-entry or AArch32-entry function depending on the
10875 * and KVM to re-inject guest debug exceptions, and to
10876 * inject a Synchronous-External-Abort.
10881 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt()
10882 unsigned int new_el = env->exception.target_el; in arm_cpu_do_interrupt()
10890 && !excp_is_internal(cs->exception_index)) { in arm_cpu_do_interrupt()
10892 syn_get_ec(env->exception.syndrome), in arm_cpu_do_interrupt()
10893 env->exception.syndrome); in arm_cpu_do_interrupt()
10896 if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) { in arm_cpu_do_interrupt()
10908 if (cs->exception_index == EXCP_SEMIHOST) { in arm_cpu_do_interrupt()
10917 * cs->interrupt_request. in arm_cpu_do_interrupt()
10923 assert(!excp_is_internal(cs->exception_index)); in arm_cpu_do_interrupt()
10933 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; in arm_cpu_do_interrupt()
10955 return env->cp15.sctlr_el[el]; in arm_sctlr()
11140 max_tsz = 48 - (gran == Gran64K); in aa64_va_parameters()
11219 * Return the exception level to which FP-disabled exceptions should
11237 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { in fp_exception_el()
11241 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { in fp_exception_el()
11242 if (!extract32(env->v7m.nsacr, 10, 1)) { in fp_exception_el()
11261 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN); in fp_exception_el()
11284 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode in fp_exception_el()
11285 * to control non-secure access to the FPU. It doesn't have any in fp_exception_el()
11290 if (!extract32(env->cp15.nsacr, 10, 1)) { in fp_exception_el()
11302 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) { in fp_exception_el()
11313 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) { in fp_exception_el()
11320 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) { in fp_exception_el()
11368 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); in arm_mmu_idx_el()
11371 /* See ARM pseudo-function ELIsInHost. */ in arm_mmu_idx_el()
11440 assert(vq <= env_archcpu(env)->sve_max_vq); in aarch64_sve_narrow_vq()
11444 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); in aarch64_sve_narrow_vq()
11450 pmask = ~(-1ULL << (16 * (vq & 3))); in aarch64_sve_narrow_vq()
11454 env->vfp.pregs[i].p[j] &= pmask; in aarch64_sve_narrow_vq()
11503 sm = FIELD_EX64(env->svcr, SVCR, SM); in aarch64_sve_change_el()
11515 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). in aarch64_sve_change_el()
11516 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition in aarch64_sve_change_el()
11517 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that in aarch64_sve_change_el()
11519 * vq0->vq0 transition between EL0->EL1. in aarch64_sve_change_el()
11539 return arm_secure_to_space(env->v7m.secure); in arm_security_space()
11544 * defined, in which case QEMU defaults to non-secure. in arm_security_space()
11552 if (extract32(env->pstate, 2, 2) == 3) { in arm_security_space()
11560 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { in arm_security_space()
11574 * defined, in which case QEMU defaults to non-secure. in arm_security_space_below_el3()
11585 if (!(env->cp15.scr_el3 & SCR_NS)) { in arm_security_space_below_el3()
11587 } else if (env->cp15.scr_el3 & SCR_NSE) { in arm_security_space_below_el3()