Lines Matching +full:- +full:- +full:enable +full:- +full:sparse
6 * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "cpu-features.h"
15 #include "exec/page-protection.h"
16 #include "exec/mmap-lock.h"
17 #include "qemu/main-loop.h"
20 #include "qemu/qemu-print.h"
22 #include "exec/translation-block.h"
24 #include "system/cpu-timers.h"
29 #include "qemu/guest-random.h"
33 #include "semihosting/common-semi.h"
39 #include "exec/helper-proto.h.inc"
47 assert(ri->fieldoffset);
57 assert(ri->fieldoffset);
67 return (char *)env + ri->fieldoffset;
73 if (ri->type & ARM_CP_CONST) {
74 return ri->resetvalue;
75 } else if (ri->raw_readfn) {
76 return ri->raw_readfn(env, ri);
77 } else if (ri->readfn) {
78 return ri->readfn(env, ri);
89 * Note that constant registers are treated as write-ignored; the
93 if (ri->type & ARM_CP_CONST) {
95 } else if (ri->raw_writefn) {
96 ri->raw_writefn(env, ri, v);
97 } else if (ri->writefn) {
98 ri->writefn(env, ri, v);
118 if ((ri->type & ARM_CP_CONST) ||
119 ri->fieldoffset ||
120 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
128 /* Write the coprocessor state from cpu->env to the (index,value) list. */
132 for (i = 0; i < cpu->cpreg_array_len; i++) {
133 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
137 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
142 if (ri->type & ARM_CP_NO_RAW) {
146 newval = read_raw_cp_reg(&cpu->env, ri);
149 * Only sync if the previous list->cpustate sync succeeded.
154 uint64_t oldval = cpu->cpreg_values[i];
160 write_raw_cp_reg(&cpu->env, ri, oldval);
161 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
165 write_raw_cp_reg(&cpu->env, ri, newval);
167 cpu->cpreg_values[i] = newval;
177 for (i = 0; i < cpu->cpreg_array_len; i++) {
178 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
179 uint64_t v = cpu->cpreg_values[i];
182 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
187 if (ri->type & ARM_CP_NO_RAW) {
192 * (to catch read-only registers and partially read-only
195 write_raw_cp_reg(&cpu->env, ri, v);
196 if (read_raw_cp_reg(&cpu->env, ri) != v) {
207 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
209 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
210 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
212 cpu->cpreg_array_len++;
221 ri = g_hash_table_lookup(cpu->cp_regs, key);
223 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
224 cpu->cpreg_array_len++;
237 return -1;
251 keys = g_hash_table_get_keys(cpu->cp_regs);
254 cpu->cpreg_array_len = 0;
258 arraylen = cpu->cpreg_array_len;
259 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
260 cpu->cpreg_values = g_new(uint64_t, arraylen);
261 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
262 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
263 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
264 cpu->cpreg_array_len = 0;
268 assert(cpu->cpreg_array_len == arraylen);
279 return env->pstate & PSTATE_PAN;
281 return env->uncached_cpsr & CPSR_PAN;
300 * Some secure-only AArch32 registers trap to EL3 if used from
301 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
313 if (env->cp15.scr_el3 & SCR_EEL2) {
335 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
432 * Define the secure and non-secure FCSE identifier CP registers
435 * v8 EL1 version of the register so the non-secure instance stands alone.
448 * Define the secure and non-secure context identifier CP registers
451 * non-secure case, the 32-bit register will have reset and migration
452 * disabled during registration as it is handled by the 64-bit instance.
502 * Not all pre-v6 cores implemented this WFI, so this is slightly
503 * over-broad.
511 * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
532 * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
571 * registers (D0-D31).
574 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
582 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
586 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
588 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
591 env->cp15.cpacr_el1 = value;
597 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
600 uint64_t value = env->cp15.cpacr_el1;
603 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
625 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
629 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
642 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
655 * We need to break the TB after ISB to execute self-modifying code
718 return -1;
781 return -1;
822 * should first be updated to something sparse instead of the current
840 * Empty supported_event_map and cpu->pmceid[01] before adding supported
846 cpu->pmceid0 = 0;
847 cpu->pmceid1 = 0;
851 assert(cnt->number <= MAX_EVENT_ID);
853 assert(cnt->number <= 0x3f);
855 if (cnt->supported(&cpu->env)) {
856 supported_event_map[cnt->number] = i;
857 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
858 if (cnt->number & 0x20) {
859 cpu->pmceid1 |= event_mask;
861 cpu->pmceid0 |= event_mask;
889 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
895 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
909 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
924 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
939 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
953 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
985 * We might be called for M-profile cores where MDCR_EL2 doesn't
986 * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
998 e = env->cp15.c9_pmcr & PMCRE;
1002 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1009 prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
1018 prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
1021 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
1030 filter = env->cp15.pmccfiltr_el0;
1032 filter = env->cp15.c14_pmevtyper[counter];
1070 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1071 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1079 * controlled by PMCR.D, but if PMCR.LC is set to enable the long
1080 * (64-bit) cycle counter PMCR.D has no effect.
1082 return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
1101 bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
1102 int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1108 return env->cp15.c9_pmcr & PMCRLP;
1112 * Ensure c15_ccnt is the guest-visible count so that operations such as
1114 * etc. can be done logically. This is essentially a no-op if the counter is
1127 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1129 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1131 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1132 env->cp15.c9_pmovsr |= (1ULL << 31);
1136 env->cp15.c15_ccnt = new_pmccntr;
1138 env->cp15.c15_ccnt_delta = cycles;
1143 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1151 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1152 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1163 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1168 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1172 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1179 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1187 uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1191 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
1192 env->cp15.c9_pmovsr |= (1 << counter);
1195 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1197 env->cp15.c14_pmevcntr_delta[counter] = count;
1204 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1206 uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
1220 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1225 env->cp15.c14_pmevcntr_delta[counter] -=
1226 env->cp15.c14_pmevcntr[counter];
1250 pmu_op_start(&cpu->env);
1255 pmu_op_finish(&cpu->env);
1265 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1268 pmu_op_start(&cpu->env);
1269 pmu_op_finish(&cpu->env);
1279 env->cp15.c15_ccnt = 0;
1285 env->cp15.c14_pmevcntr[i] = 0;
1289 env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1290 env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
1297 uint64_t pmcr = env->cp15.c9_pmcr;
1305 pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
1323 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1330 new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1335 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
1336 env->cp15.c9_pmovsr |= (1 << i);
1340 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1351 ret = env->cp15.c15_ccnt;
1365 env->cp15.c9_pmselr = value & 0x1f;
1372 env->cp15.c15_ccnt = value;
1388 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1397 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1405 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1413 env->cp15.c9_pmcnten |= value;
1422 env->cp15.c9_pmcnten &= ~value;
1430 env->cp15.c9_pmovsr &= ~value;
1438 env->cp15.c9_pmovsr |= value;
1456 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1465 env->cp15.c14_pmevcntr_delta[counter] = count;
1468 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1482 return env->cp15.pmccfiltr_el0;
1484 return env->cp15.c14_pmevtyper[counter];
1497 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1504 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1505 env->cp15.c14_pmevtyper[counter] = value;
1519 env->cp15.c14_pmevcntr_delta[counter] =
1526 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1533 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1538 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1550 env->cp15.c14_pmevcntr[counter] = value;
1565 ret = env->cp15.c14_pmevcntr[counter];
1584 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1590 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1597 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1599 env->cp15.c14_pmevcntr[counter] = value;
1605 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1607 return env->cp15.c14_pmevcntr[counter];
1613 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1618 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1625 env->cp15.c9_pmuserenr = value & 0xf;
1627 env->cp15.c9_pmuserenr = value & 1;
1636 env->cp15.c9_pminten |= value;
1644 env->cp15.c9_pminten &= ~value;
1743 /* Clear all-context RES0 bits. */
1745 changed = env->cp15.scr_el3 ^ value;
1746 env->cp15.scr_el3 = value;
1766 * scr_write will set the RES1 bits on an AArch64-only CPU.
1767 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1793 ri->secure & ARM_CP_SECSTATE_S);
1795 return cpu->ccsidr[index];
1812 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1815 if (cs->interrupt_request & CPU_INTERRUPT_VINMI) {
1820 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1824 if (cs->interrupt_request & CPU_INTERRUPT_NMI) {
1831 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1834 if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) {
1839 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1845 if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
2083 * MAIR can just read-as-written because we don't implement caches
2098 * For non-long-descriptor page tables these are PRRR and NMRR;
2099 * regardless they still act as reads-as-written for QEMU.
2102 * MAIR0/1 are defined separately from their 64-bit counterpart which
2147 env->teecr = value;
2158 (env->cp15.hstr_el2 & HSTR_TTEE)) {
2167 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2223 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
2243 cntkctl = env->cp15.cnthctl_el2;
2245 cntkctl = env->cp15.c14_cntkctl;
2252 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2254 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2281 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2286 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2294 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2295 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2299 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) {
2319 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2327 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2336 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2341 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2347 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) {
2399 if (!(env->cp15.scr_el3 & SCR_ST)) {
2442 if (env->cp15.scr_el3 & SCR_EEL2) {
2461 CPUARMState *env = &cpu->env;
2462 uint64_t cnthctl = env->cp15.cnthctl_el2;
2465 int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
2477 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2494 if ((env->cp15.scr_el3 & SCR_ECVEN) &&
2495 FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) &&
2498 return env->cp15.cntpoff_el2;
2519 return env->cp15.cntvoff_el2;
2538 * This isn't exactly the same as the indirect-access offset,
2567 return env->cp15.cntvoff_el2;
2581 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2583 if (gt->ctl & 1) {
2588 uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx);
2589 uint64_t count = gt_get_countervalue(&cpu->env);
2591 int istatus = count - offset >= gt->cval;
2594 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2598 * Next transition is when (count - offset) rolls back over to 0.
2611 * Next transition is when (count - offset) == cval, i.e.
2616 if (uadd64_overflow(gt->cval, offset, &nexttick)) {
2622 * signed-64-bit range of a QEMUTimer -- in this case we just
2627 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2629 timer_mod(cpu->gt_timer[timeridx], nexttick);
2634 gt->ctl &= ~4;
2635 timer_del(cpu->gt_timer[timeridx]);
2646 timer_del(cpu->gt_timer[timeridx]);
2652 return gt_get_countervalue(env) - offset;
2658 return gt_get_countervalue(env) - offset;
2666 env->cp15.c14_timer[timeridx].cval = value;
2672 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2673 (gt_get_countervalue(env) - offset));
2688 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2707 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2710 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2712 /* Enable toggled */
2780 return env->cp15.c14_timer[timeridx].cval;
2808 return env->cp15.c14_timer[timeridx].ctl;
2835 * to re-detect that it's this register.
2838 return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2);
2845 do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2);
2858 uint32_t oldval = env->cp15.cnthctl_el2;
2911 return env->cp15.c14_timer[timeridx].cval;
2939 return env->cp15.c14_timer[timeridx].ctl;
3140 * Note that CNTFRQ is purely reads-as-written for the benefit
3162 /* per-timer control */
3308 * Secure timer -- this is actually restricted to only EL3
3309 * and configurably Secure-EL1 via the accessfn.
3338 * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
3369 !(env->cp15.scr_el3 & SCR_ECVEN)) {
3396 * In user-mode most of the generic timer registers are inaccessible
3405 * Currently we have no support for QEMUTimer in linux-user so we
3415 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3452 /* get_phys_addr() isn't present for user-mode-only targets */
3457 if (ri->opc2 & 4) {
3466 if (env->cp15.scr_el3 & SCR_EEL2) {
3481 * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
3482 * memory -- see pseudocode PAREncodeShareability().
3484 if (((res->cacheattrs.attrs & 0xf0) == 0) ||
3485 res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
3488 return res->cacheattrs.shareability;
3536 (env->cp15.scr_el3 & SCR_EA)) {
3539 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3541 env->cp15.hpfar_el2 |= HPFAR_NS;
3579 env->exception.vaddress = value;
3580 env->exception.fsr = fsr;
3591 * 32-bit or the 64-bit PAR format
3595 * * The Non-secure TTBCR.EAE bit is set to 1
3608 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3616 /* Create a 64-bit PAR */
3641 * Convert it to a 32-bit PAR.
3668 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3674 switch (ri->opc2 & 6) {
3679 if (ri->crm == 9 && arm_pan_enabled(env)) {
3686 g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
3689 if (ri->crm == 9 && arm_pan_enabled(env)) {
3706 g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
3743 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3767 if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
3777 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
3796 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3803 switch (ri->opc2 & 6) {
3805 switch (ri->opc1) {
3807 if (ri->crm == 9 && arm_pan_enabled(env)) {
3839 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
3880 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3885 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3891 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3896 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3907 u32p += env->pmsav7.rnr[M_REG_NS];
3921 u32p += env->pmsav7.rnr[M_REG_NS];
3922 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3930 uint32_t nrgs = cpu->pmsav7_dregion;
3947 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3948 env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
3953 return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
3961 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3962 env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
3967 return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
3979 if (value >= cpu->pmsav7_dregion) {
3983 env->pmsav7.rnr[M_REG_NS] = value;
3991 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3992 env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
3997 return env->pmsav8.hprbar[env->pmsav8.hprselr];
4005 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4006 env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
4011 return env->pmsav8.hprlar[env->pmsav8.hprselr];
4022 int rmax = MIN(cpu->pmsav8r_hdregion, 32);
4025 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4030 env->pmsav8.hprlar[n] = deposit32(
4031 env->pmsav8.hprlar[n], 0, 1, bit);
4042 for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
4043 if (env->pmsav8.hprlar[n] & 0x1) {
4059 if (value >= cpu->pmsav8r_hdregion) {
4063 env->pmsav8.hprselr = value;
4070 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
4071 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
4073 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4075 if (ri->opc1 & 4) {
4076 if (index >= cpu->pmsav8r_hdregion) {
4079 if (ri->opc2 & 0x1) {
4080 env->pmsav8.hprlar[index] = value;
4082 env->pmsav8.hprbar[index] = value;
4085 if (index >= cpu->pmsav7_dregion) {
4088 if (ri->opc2 & 0x1) {
4089 env->pmsav8.rlar[M_REG_NS][index] = value;
4091 env->pmsav8.rbar[M_REG_NS][index] = value;
4099 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
4100 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
4102 if (ri->opc1 & 4) {
4103 if (index >= cpu->pmsav8r_hdregion) {
4106 if (ri->opc2 & 0x1) {
4107 return env->pmsav8.hprlar[index];
4109 return env->pmsav8.hprbar[index];
4112 if (index >= cpu->pmsav7_dregion) {
4115 if (ri->opc2 & 0x1) {
4116 return env->pmsav8.rlar[M_REG_NS][index];
4118 return env->pmsav8.rbar[M_REG_NS][index];
4161 * because the PMSAv7 is also used by M-profile CPUs, which do
4245 * using Long-descriptor translation table format
4252 * Short-descriptor translation table format.
4283 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
4406 env->cp15.c15_ticonfig = value & 0xe7;
4408 env->cp15.c0_cpuid = (value & (1 << 5)) ?
4415 env->cp15.c15_threadid = value & 0xffff;
4421 /* Wait-for-interrupt (deprecated) */
4432 env->cp15.c15_i_max = 0x000;
4433 env->cp15.c15_i_min = 0xff0;
4479 env->cp15.c15_cpar = value & 0x3fff;
4492 * XScale specific cache-lockdown: since we have no cache we NOP these
4512 * implementation of this implementation-defined space.
4552 * The cache test-and-clean instructions always return (1 << 30)
4576 return env->cp15.vpidr_el2;
4584 uint64_t mpidr = cpu->mp_affinity;
4589 * Cores which are uniprocessor (non-coherent)
4591 * bit 30. (For instance, Cortex-R5).
4593 if (cpu->mp_is_up) {
4605 return env->cp15.vmpidr_el2;
4674 env->daif = value & PSTATE_DAIF;
4679 return env->pstate & PSTATE_PAN;
4685 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4697 return env->pstate & PSTATE_UAO;
4703 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4715 return env->pstate & PSTATE_DIT;
4721 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4733 return env->pstate & PSTATE_SSBS;
4739 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4813 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
4817 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4840 return cpu->dcz_blocksize | dzp_bit;
4846 if (!(env->pstate & PSTATE_SP)) {
4858 return env->pstate & PSTATE_SP;
4871 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4878 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
4879 if (ri->opc1 == 6) { /* SCTLR_EL3 */
4897 /* This may enable/disable the MMU, so do a TLB flush. */
4900 if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
4919 bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
4924 env->cp15.mdcr_el3 = value;
4945 bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
4950 env->cp15.mdcr_el2 = value;
4971 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
4987 icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
5001 * Minimal set of EL0-visible registers. This will need to be expanded
5030 /* Avoid overhead of an access check that always passes in user-mode */
5280 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5342 * HCR_PTW forbids certain page-table setups
5348 if ((env->cp15.hcr_el2 ^ value) &
5352 env->cp15.hcr_el2 = value;
5384 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
5392 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
5398 /* hcr_write will set the RES1 bits on an AArch64-only CPU */
5409 uint64_t ret = env->cp15.hcr_el2;
5416 * current Security state". This is ARMv8.4-SecEL2 speak for
5423 * on a per-field basis. In current QEMU, this is condition
5441 * These bits are up-to-date as of ARMv8.6.
5452 /* These bits are up-to-date as of ARMv8.6. */
5500 if ((env->cp15.hcr_el2 & mask) != mask) {
5533 env->cp15.hcrx_el2 = value & valid_mask;
5558 && !(env->cp15.scr_el3 & SCR_HXEN)) {
5581 * For the moment, we treat the EL2-disabled case as taking
5582 * priority over the HXEn-disabled case. This is true for the only
5595 if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
5598 return env->cp15.hcrx_el2;
5605 * For A-profile AArch32 EL3, if NSACR.CP10
5609 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5611 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
5613 env->cp15.cptr_el[2] = value;
5619 * For A-profile AArch32 EL3, if NSACR.CP10
5622 uint64_t value = env->cp15.cptr_el[2];
5625 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5755 * Unlike the other EL2-related AT operations, these must
5930 if (env->cp15.scr_el3 & SCR_EEL2) {
6037 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) {
6049 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) {
6068 ri = ri->opaque;
6069 readfn = ri->readfn;
6071 readfn = ri->orig_readfn;
6086 ri = ri->opaque;
6087 writefn = ri->writefn;
6089 writefn = ri->orig_writefn;
6100 return ri->orig_readfn(env, ri->opaque);
6107 return ri->orig_writefn(env, ri->opaque, value);
6127 if (ri->orig_accessfn) {
6128 return ri->orig_accessfn(env, ri->opaque, isread);
6195 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
6196 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
6207 if (a->feature && !a->feature(&cpu->isar)) {
6211 src_reg = g_hash_table_lookup(cpu->cp_regs,
6212 (gpointer)(uintptr_t)a->src_key);
6213 dst_reg = g_hash_table_lookup(cpu->cp_regs,
6214 (gpointer)(uintptr_t)a->dst_key);
6218 /* Cross-compare names to detect typos in the keys. */
6219 g_assert(strcmp(src_reg->name, a->src_name) == 0);
6220 g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
6223 g_assert(src_reg->opaque == NULL);
6228 new_reg->name = a->new_name;
6229 new_reg->type |= ARM_CP_ALIAS;
6231 new_reg->access &= PL2_RW | PL3_RW;
6233 new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK)
6235 new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK)
6237 new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK)
6239 new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK)
6241 new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK)
6243 new_reg->opaque = src_reg;
6244 new_reg->orig_readfn = src_reg->readfn ?: raw_read;
6245 new_reg->orig_writefn = src_reg->writefn ?: raw_write;
6246 new_reg->orig_accessfn = src_reg->accessfn;
6247 if (!new_reg->raw_readfn) {
6248 new_reg->raw_readfn = raw_read;
6250 if (!new_reg->raw_writefn) {
6251 new_reg->raw_writefn = raw_write;
6253 new_reg->readfn = el2_e2h_e12_read;
6254 new_reg->writefn = el2_e2h_e12_write;
6255 new_reg->accessfn = el2_e2h_e12_access;
6262 if (new_reg->nv2_redirect_offset) {
6263 assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1);
6264 new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1;
6265 new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
6268 ok = g_hash_table_insert(cpu->cp_regs,
6269 (gpointer)(uintptr_t)a->new_key, new_reg);
6272 src_reg->opaque = dst_reg;
6273 src_reg->orig_readfn = src_reg->readfn ?: raw_read;
6274 src_reg->orig_writefn = src_reg->writefn ?: raw_write;
6275 if (!src_reg->raw_readfn) {
6276 src_reg->raw_readfn = raw_read;
6278 if (!src_reg->raw_writefn) {
6279 src_reg->raw_writefn = raw_write;
6281 src_reg->readfn = el2_e2h_read;
6282 src_reg->writefn = el2_e2h_write;
6297 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
6301 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
6332 if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) {
6343 return env->cp15.vdisr_el2;
6345 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6348 return env->cp15.disr_el1;
6356 env->cp15.vdisr_el2 = val;
6359 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6362 env->cp15.disr_el1 = val;
6384 * These registers have fine-grained trap bits, but UNDEF-to-EL1
6385 * is higher priority than FGT-to-EL2 so we do not need to list them
6422 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
6436 if (env->cp15.hcr_el2 & HCR_E2H) {
6437 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
6439 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6448 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
6456 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
6471 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
6485 if (env->cp15.hcr_el2 & HCR_E2H) {
6486 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
6488 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6497 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
6505 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6518 uint64_t *cr = env->vfp.zcr_el;
6519 uint32_t map = cpu->sve_vq.map;
6520 uint32_t len = ARM_MAX_VQ - 1;
6523 cr = env->vfp.smcr_el;
6524 map = cpu->sme_vq.map;
6539 return 31 - clz32(map);
6542 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
6544 return ctz32(cpu->sme_vq.map);
6549 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
6606 && !(env->cp15.scr_el3 & SCR_ENTP2)) {
6618 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6629 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6638 memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
6640 memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
6646 uint64_t change = (env->svcr ^ new) & mask;
6651 env->svcr ^= change;
6660 * SetPSTATE_ZA zeros on enable and disable. We can zero this only
6661 * on enable: while disabled, the storage is inaccessible and the
6666 memset(&env->za_state, 0, sizeof(env->za_state));
6677 aarch64_set_svcr(env, value, -1);
6699 * apply the narrower SVL to the Zregs and Pregs -- see the comment
6768 env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
6773 env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
6774 env_archcpu(env)->reset_l0gptsz);
6802 env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT);
6807 return env->pstate & PSTATE_ALLINT;
6837 unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
6855 .resetvalue = cpu->isar.reset_pmcr_el0,
6908 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6913 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6939 uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1);
6941 if (env->gicv3state) {
6950 uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0);
6952 if (env->gicv3state) {
6971 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
6988 * A trivial implementation of ARMv8.1-LOR leaves all of these
7032 !(env->cp15.scr_el3 & SCR_APK)) {
7097 env->NF = env->CF = env->VF = 0, env->ZF = 1;
7103 * timed-out indication to the guest. There is no reason
7108 ri->name, error_get_pretty(err));
7111 env->ZF = 0; /* NZCF = 0100 */
7117 /* We do not support re-seeding, so the two registers operate the same. */
7134 /* CTR_EL0 System register -> DminLine, bits [19:16] */
7135 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
7137 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
7201 !(env->cp15.scr_el3 & SCR_ATA)) {
7241 !(env->cp15.scr_el3 & SCR_ATA)) {
7249 return env->pstate & PSTATE_TCO;
7254 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
7375 /* Avoid overhead of an access check that always passes in user-mode */
7384 /* Avoid overhead of an access check that always passes in user-mode */
7398 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
7404 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
7412 && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
7456 arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
7495 * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
7496 * about the RESS bits at the top -- we choose the "generate an EL2
7500 env->cp15.vncr_el2 = value & ~0xfffULL;
7614 (env->cp15.hstr_el2 & HSTR_TJDBX)) {
7736 * is non-zero, which is never for ARMv7, optionally in ARMv8
7756 CPUARMState *env = &cpu->env;
7757 ARMISARegisters *isar = &cpu->isar;
7812 .resetvalue = cpu->id_afr0 },
7892 .resetvalue = cpu->clidr
7990 .resetvalue = cpu->id_aa64afr0 },
7995 .resetvalue = cpu->id_aa64afr1 },
8090 .resetvalue = cpu->isar.mvfr0 },
8095 .resetvalue = cpu->isar.mvfr1 },
8100 .resetvalue = cpu->isar.mvfr2 },
8125 * being filled with AArch64-view-of-AArch32-ID-register
8157 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
8162 .resetvalue = cpu->pmceid0 },
8167 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
8172 .resetvalue = cpu->pmceid1 },
8303 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
8304 * For pre-v8 cores there are RAZ patterns for these in
8307 * to also cover c0, 0, c{8-15}, {0-7}.
8309 * c4-c7 is where the AArch64 ID registers live (and we've
8310 * already defined those in v8_idregs[]), and c8-c15 are not
8340 .resetvalue = cpu->midr,
8345 .access = PL2_RW, .resetvalue = cpu->midr,
8424 .resetvalue = cpu->reset_sctlr },
8481 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
8516 * When LPAE exists this 32-bit PAR register is an alias of the
8517 * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
8553 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
8554 * be read-only (ie write causes UNDEF exception).
8559 * Pre-v8 MIDR space.
8570 .access = PL1_R, .resetvalue = cpu->midr,
8595 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
8602 .access = PL1_R, .resetvalue = cpu->midr },
8608 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
8613 .access = PL1_R, .resetvalue = cpu->midr
8616 /* These are common to v8 and pre-v8 */
8620 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8625 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8626 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
8646 .resetvalue = cpu->pmsav7_dregion << 8
8653 .resetvalue = cpu->pmsav8r_hdregion
8712 for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
8742 for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
8798 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
8816 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8818 * (1) older 32-bit only cores have a simple 32-bit CBAR
8819 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8820 * 32-bit register visible to AArch32 at a different encoding
8822 * be able to squash a 64-bit address into the 32-bit view.
8824 * in future if we support AArch32-only configs of some of the
8830 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
8831 | extract64(cpu->reset_cbar, 32, 12);
8840 .access = PL1_R, .resetvalue = cpu->reset_cbar },
8849 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
8887 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
8893 * arch/arm/mach-pxa/sleep.S expects two instructions following
8894 * an MMU enable to execute from cache. Imitate this behaviour.
8984 .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
9046 CPUARMState *env = &cpu->env;
9049 bool is64 = r->type & ARM_CP_64BIT;
9051 int cp = r->cp;
9058 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
9061 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
9066 * cp == 0 as equivalent to the value for "standard guest-visible
9068 * in their AArch64 view (the .cp value may be non-zero for the
9071 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
9074 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
9081 if (!(r->type & ARM_CP_OVERRIDE)) {
9082 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
9084 assert(oldreg->type & ARM_CP_OVERRIDE);
9099 int min_el = ctz32(r->access) / 2;
9101 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
9104 make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
9109 if ((r->access & max_el) == 0) {
9118 r2->name = memcpy(r2 + 1, name, name_len);
9124 r2->cp = cp;
9125 r2->crm = crm;
9126 r2->opc1 = opc1;
9127 r2->opc2 = opc2;
9128 r2->state = state;
9129 r2->secure = secstate;
9131 r2->opaque = opaque;
9136 int old_special = r2->type & ARM_CP_SPECIAL_MASK;
9143 r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
9146 * special cases like VPIDR_EL2 which have a constant non-zero
9149 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
9150 r2->resetvalue = 0;
9157 r2->readfn = NULL;
9158 r2->writefn = NULL;
9159 r2->raw_readfn = NULL;
9160 r2->raw_writefn = NULL;
9161 r2->resetfn = NULL;
9162 r2->fieldoffset = 0;
9163 r2->bank_fieldoffsets[0] = 0;
9164 r2->bank_fieldoffsets[1] = 0;
9166 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
9174 r2->fieldoffset = r->bank_fieldoffsets[ns];
9180 * reset the 32-bit instance in certain cases:
9182 * 1) If the register has both 32-bit and 64-bit instances
9183 * then we can count on the 64-bit instance taking care
9184 * of the non-secure bank.
9185 * 2) If ARMv8 is enabled then we can count on a 64-bit
9187 * that separate 32 and 64-bit definitions are provided.
9189 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
9191 r2->type |= ARM_CP_ALIAS;
9193 } else if ((secstate != r->secure) && !ns) {
9196 * migration of the non-secure instance.
9198 r2->type |= ARM_CP_ALIAS;
9202 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
9203 r2->fieldoffset += sizeof(uint32_t);
9213 * never migratable and not even raw-accessible.
9215 if (r2->type & ARM_CP_SPECIAL_MASK) {
9216 r2->type |= ARM_CP_NO_RAW;
9218 if (((r->crm == CP_ANY) && crm != 0) ||
9219 ((r->opc1 == CP_ANY) && opc1 != 0) ||
9220 ((r->opc2 == CP_ANY) && opc2 != 0)) {
9221 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
9229 if (!(r2->type & ARM_CP_NO_RAW)) {
9233 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
9250 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
9259 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
9265 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
9266 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
9267 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
9268 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
9269 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
9270 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
9274 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
9276 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
9278 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
9281 * (M-profile or v7A-and-earlier only) for implementation defined
9287 switch (r->state) {
9290 if (r->cp == 0) {
9295 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
9296 !arm_feature(&cpu->env, ARM_FEATURE_M)) {
9297 assert(r->cp >= 14 && r->cp <= 15);
9299 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
9303 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
9315 if (r->state != ARM_CP_STATE_AA32) {
9317 switch (r->opc1) {
9344 /* broken reginfo with out-of-range opc1 */
9348 assert((r->access & ~mask) == 0);
9355 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
9356 if (r->access & PL3_R) {
9357 assert((r->fieldoffset ||
9358 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
9359 r->readfn);
9361 if (r->access & PL3_W) {
9362 assert((r->fieldoffset ||
9363 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
9364 r->writefn);
9373 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
9376 if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
9382 * fine-grained trapping. Add the NXS insn here and
9389 g_autofree char *name = g_strdup_printf("%sNXS", r->name);
9404 * (same for secure and non-secure world) or banked.
9408 switch (r->secure) {
9412 r->secure, crm, opc1, opc2,
9413 r->name);
9416 name = g_strdup_printf("%s_S", r->name);
9423 crm, opc1, opc2, r->name);
9430 * AArch64 registers get mapped to non-secure instance
9435 crm, opc1, opc2, r->name);
9458 * user-space cannot alter any values and dynamic values pertaining to
9469 if (m->is_glob) {
9470 pat = g_pattern_spec_new(m->name);
9475 if (pat && g_pattern_match_string(pat, r->name)) {
9476 r->type = ARM_CP_CONST;
9477 r->access = PL0U_R;
9478 r->resetvalue = 0;
9480 } else if (strcmp(r->name, m->name) == 0) {
9481 r->type = ARM_CP_CONST;
9482 r->access = PL0U_R;
9483 r->resetvalue &= m->exported_bits;
9484 r->resetvalue |= m->fixed_bits;
9502 /* Helper coprocessor write function for write-ignore registers */
9507 /* Helper coprocessor write function for read-as-zero registers */
9513 /* Helper coprocessor reset function for do-nothing-on-reset registers */
9526 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
9542 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
9549 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
9566 ZF = (env->ZF == 0);
9567 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
9568 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
9569 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
9570 | ((env->condexec_bits & 0xfc) << 8)
9571 | (env->GE << 16) | (env->daif & CPSR_AIF);
9582 env->ZF = (~val) & CPSR_Z;
9583 env->NF = val;
9584 env->CF = (val >> 29) & 1;
9585 env->VF = (val << 3) & 0x80000000;
9588 env->QF = ((val & CPSR_Q) != 0);
9591 env->thumb = ((val & CPSR_T) != 0);
9594 env->condexec_bits &= ~3;
9595 env->condexec_bits |= (val >> 25) & 3;
9598 env->condexec_bits &= 3;
9599 env->condexec_bits |= (val >> 8) & 0xfc;
9602 env->GE = (val >> 16) & 0xf;
9608 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
9619 changed_daif = (env->daif ^ val) & mask;
9624 * abort exceptions from a non-secure state.
9626 if (!(env->cp15.scr_el3 & SCR_AW)) {
9629 "non-secure world with SCR.AW bit clear\n");
9637 * exceptions from a non-secure state.
9639 if (!(env->cp15.scr_el3 & SCR_FW)) {
9642 "non-secure world with SCR.FW bit clear\n");
9647 * Check whether non-maskable FIQ (NMFI) support is enabled.
9654 "Ignoring attempt to enable CPSR_F flag "
9655 "(non-maskable FIQ [NMFI] support enabled)\n");
9661 env->daif &= ~(CPSR_AIF & mask);
9662 env->daif |= val & CPSR_AIF & mask;
9665 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
9666 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
9693 aarch32_mode_name(env->uncached_cpsr),
9700 aarch32_mode_name(env->uncached_cpsr),
9701 aarch32_mode_name(val), env->regs[15]);
9706 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
9741 old_mode = env->uncached_cpsr & CPSR_M;
9747 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
9748 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
9750 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
9751 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
9755 env->banked_r13[i] = env->regs[13];
9756 env->banked_spsr[i] = env->spsr;
9759 env->regs[13] = env->banked_r13[i];
9760 env->spsr = env->banked_spsr[i];
9762 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
9763 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
9769 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9771 * The below multi-dimensional table is used for looking up the target
9778 * | | | | | +--- Current EL
9779 * | | | | +------ Non-secure(0)/Secure(1)
9780 * | | | +--------- HCR mask override
9781 * | | +------------ SCR exec state control
9782 * | +--------------- SCR mask override
9783 * +------------------ 32-bit(0)/64-bit(1) EL3
9786 * 0-3 = EL0-EL3
9787 * -1 = Cannot occur
9801 * BIT IRQ IMO Non-secure Secure
9805 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9806 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
9807 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9808 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
9809 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9810 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
9811 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9812 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
9813 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
9814 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
9815 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
9816 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
9817 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
9818 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
9819 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
9820 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
9853 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
9857 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
9861 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
9872 /* Perform a table-lookup for the target EL given the current state */
9882 int idx = cs->exception_index;
9924 idx, exc, cs->cpu_index);
9936 uint32_t mode = env->uncached_cpsr & CPSR_M;
9940 env->xregs[i] = env->regs[i];
9944 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9949 env->xregs[i] = env->usr_regs[i - 8];
9953 env->xregs[i] = env->regs[i];
9958 * Registers x13-x23 are the various mode SP and FP registers. Registers
9963 env->xregs[13] = env->regs[13];
9964 env->xregs[14] = env->regs[14];
9966 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9969 env->xregs[14] = env->regs[14];
9971 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
9976 env->xregs[15] = env->regs[13];
9978 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
9982 env->xregs[16] = env->regs[14];
9983 env->xregs[17] = env->regs[13];
9985 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9986 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9990 env->xregs[18] = env->regs[14];
9991 env->xregs[19] = env->regs[13];
9993 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9994 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9998 env->xregs[20] = env->regs[14];
9999 env->xregs[21] = env->regs[13];
10001 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
10002 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
10006 env->xregs[22] = env->regs[14];
10007 env->xregs[23] = env->regs[13];
10009 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
10010 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
10014 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
10015 * mode, then we can copy from r8-r14. Otherwise, we copy from the
10016 * FIQ bank for r8-r14.
10020 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
10024 env->xregs[i] = env->fiq_regs[i - 24];
10026 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
10027 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
10030 env->pc = env->regs[15];
10041 uint32_t mode = env->uncached_cpsr & CPSR_M;
10045 env->regs[i] = env->xregs[i];
10049 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
10050 * Otherwise, we copy x8-x12 into the banked user regs.
10054 env->usr_regs[i - 8] = env->xregs[i];
10058 env->regs[i] = env->xregs[i];
10069 env->regs[13] = env->xregs[13];
10070 env->regs[14] = env->xregs[14];
10072 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
10079 env->regs[14] = env->xregs[14];
10081 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
10086 env->regs[13] = env->xregs[15];
10088 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
10092 env->regs[14] = env->xregs[16];
10093 env->regs[13] = env->xregs[17];
10095 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
10096 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
10100 env->regs[14] = env->xregs[18];
10101 env->regs[13] = env->xregs[19];
10103 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
10104 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
10108 env->regs[14] = env->xregs[20];
10109 env->regs[13] = env->xregs[21];
10111 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
10112 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
10116 env->regs[14] = env->xregs[22];
10117 env->regs[13] = env->xregs[23];
10119 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
10120 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
10124 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
10125 * mode, then we can copy to r8-r14. Otherwise, we copy to the
10126 * FIQ bank for r8-r14.
10130 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
10134 env->fiq_regs[i - 24] = env->xregs[i];
10136 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
10137 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
10140 env->regs[15] = env->pc;
10154 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
10156 env->pstate &= ~PSTATE_SS;
10157 env->spsr = cpsr_read(env);
10159 env->condexec_bits = 0;
10161 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
10167 env->uncached_cpsr &= ~CPSR_E;
10168 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
10169 env->uncached_cpsr |= CPSR_E;
10172 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
10173 env->daif |= mask;
10176 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
10177 env->uncached_cpsr |= CPSR_SSBS;
10179 env->uncached_cpsr &= ~CPSR_SSBS;
10184 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
10185 env->elr_el[2] = env->regs[15];
10192 /* ... the target is EL3, from non-secure state. */
10193 env->uncached_cpsr &= ~CPSR_PAN;
10200 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
10201 env->uncached_cpsr |= CPSR_PAN;
10211 env->thumb =
10214 env->regs[14] = env->regs[15] + offset;
10216 env->regs[15] = newpc;
10238 CPUARMState *env = &cpu->env;
10240 switch (cs->exception_index) {
10250 env->cp15.ifar_s = env->exception.vaddress;
10252 (uint32_t)env->exception.vaddress);
10256 env->cp15.dfar_s = env->exception.vaddress;
10258 (uint32_t)env->exception.vaddress);
10274 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10277 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
10280 * QEMU syndrome values are v8-style. v7 has the IL bit
10284 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
10285 (cs->exception_index == EXCP_DATA_ABORT &&
10286 !(env->exception.syndrome & ARM_EL_ISV)) ||
10287 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
10288 env->exception.syndrome &= ~ARM_EL_IL;
10291 env->cp15.esr_el[2] = env->exception.syndrome;
10299 if (!(env->cp15.scr_el3 & SCR_EA)) {
10302 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
10305 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
10309 addr += env->cp15.hvbar;
10317 CPUARMState *env = &cpu->env;
10325 switch (syn_get_ec(env->exception.syndrome)) {
10346 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
10349 if (env->exception.target_el == 2) {
10351 switch (syn_get_ec(env->exception.syndrome)) {
10356 env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2,
10360 env->exception.syndrome = syn_set_ec(env->exception.syndrome,
10364 env->exception.syndrome = syn_set_ec(env->exception.syndrome,
10372 switch (cs->exception_index) {
10377 if (env->thumb) {
10393 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
10394 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
10396 env->exception.fsr, (uint32_t)env->exception.vaddress);
10403 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
10404 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
10406 env->exception.fsr,
10407 (uint32_t)env->exception.vaddress);
10419 if (env->cp15.scr_el3 & SCR_IRQ) {
10430 if (env->cp15.scr_el3 & SCR_FIQ) {
10460 env->exception.fsr = arm_fi_to_lfsc(&fi);
10462 env->exception.fsr = arm_fi_to_sfsc(&fi);
10464 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
10465 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
10467 env->exception.fsr);
10485 if (env->thumb) {
10492 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10497 addr += env->cp15.mvbar;
10505 * This register is only followed in non-monitor mode, and is banked.
10511 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
10512 env->cp15.scr_el3 &= ~SCR_NS;
10525 int mode = env->uncached_cpsr & CPSR_M;
10588 ret |= env->pstate & PSTATE_SS;
10622 CPUARMState *env = &cpu->env;
10623 unsigned int new_el = env->exception.target_el;
10624 vaddr addr = env->cp15.vbar_el[new_el];
10673 switch (cs->exception_index) {
10676 env->cp15.mfar_el3);
10684 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
10685 syndrome_is_sync_extabt(env->exception.syndrome)) {
10688 env->cp15.far_el[new_el] = env->exception.vaddress;
10690 env->cp15.far_el[new_el]);
10698 switch (syn_get_ec(env->exception.syndrome)) {
10706 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
10715 * number. Notice that we read a 4-bit AArch32 register number and
10716 * write back a 5-bit AArch64 one.
10718 rt = extract32(env->exception.syndrome, 5, 4);
10720 env->exception.syndrome = deposit32(env->exception.syndrome,
10726 rt = extract32(env->exception.syndrome, 5, 4);
10728 env->exception.syndrome = deposit32(env->exception.syndrome,
10730 rt = extract32(env->exception.syndrome, 10, 4);
10732 env->exception.syndrome = deposit32(env->exception.syndrome,
10736 env->cp15.esr_el[new_el] = env->exception.syndrome;
10752 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
10753 env->cp15.esr_el[new_el] = env->exception.syndrome;
10756 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10762 env->elr_el[new_el] = env->pc;
10779 env->elr_el[new_el] = env->regs[15];
10783 env->condexec_bits = 0;
10785 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
10789 env->elr_el[new_el]);
10805 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
10816 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
10824 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) {
10832 env->aarch64 = true;
10839 env->pc = addr;
10842 new_el, env->pc, pstate_read(env));
10856 CPUARMState *env = &cpu->env;
10861 env->xregs[0]);
10863 env->pc += 4;
10867 env->regs[0]);
10869 env->regs[15] += env->thumb ? 2 : 4;
10877 * to the AArch64-entry or AArch32-entry function depending on the
10881 * and KVM to re-inject guest debug exceptions, and to
10882 * inject a Synchronous-External-Abort.
10887 CPUARMState *env = &cpu->env;
10888 unsigned int new_el = env->exception.target_el;
10896 && !excp_is_internal(cs->exception_index)) {
10898 syn_get_ec(env->exception.syndrome),
10899 env->exception.syndrome);
10902 if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
10914 if (cs->exception_index == EXCP_SEMIHOST) {
10923 * cs->interrupt_request.
10929 assert(!excp_is_internal(cs->exception_index));
10939 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
10961 return env->cp15.sctlr_el[el];
11146 max_tsz = 48 - (gran == Gran64K);
11225 * Return the exception level to which FP-disabled exceptions should
11243 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
11247 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
11248 if (!extract32(env->v7m.nsacr, 10, 1)) {
11267 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
11290 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
11291 * to control non-secure access to the FPU. It doesn't have any
11296 if (!extract32(env->cp15.nsacr, 10, 1)) {
11308 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
11319 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
11326 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
11374 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
11377 /* See ARM pseudo-function ELIsInHost. */
11446 assert(vq <= env_archcpu(env)->sve_max_vq);
11450 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
11456 pmask = ~(-1ULL << (16 * (vq & 3)));
11460 env->vfp.pregs[i].p[j] &= pmask;
11509 sm = FIELD_EX64(env->svcr, SVCR, SM);
11521 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
11522 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
11523 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
11525 * vq0->vq0 transition between EL0->EL1.
11545 return arm_secure_to_space(env->v7m.secure);
11550 * defined, in which case QEMU defaults to non-secure.
11558 if (extract32(env->pstate, 2, 2) == 3) {
11566 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
11580 * defined, in which case QEMU defaults to non-secure.
11591 if (!(env->cp15.scr_el3 & SCR_NS)) {
11593 } else if (env->cp15.scr_el3 & SCR_NSE) {