xref: /qemu/target/arm/cpu.c (revision ccc76731aee7d3efb16a679fa5bf3cc3f57e9f2d)
1 /*
2  * QEMU ARM CPU
3  *
4  * Copyright (c) 2012 SUSE LINUX Products GmbH
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/qemu-print.h"
23 #include "qemu/timer.h"
24 #include "qemu/log.h"
25 #include "exec/page-vary.h"
26 #include "target/arm/idau.h"
27 #include "qemu/module.h"
28 #include "qapi/error.h"
29 #include "cpu.h"
30 #ifdef CONFIG_TCG
31 #include "hw/core/tcg-cpu-ops.h"
32 #endif /* CONFIG_TCG */
33 #include "internals.h"
34 #include "cpu-features.h"
35 #include "exec/exec-all.h"
36 #include "hw/qdev-properties.h"
37 #if !defined(CONFIG_USER_ONLY)
38 #include "hw/loader.h"
39 #include "hw/boards.h"
40 #ifdef CONFIG_TCG
41 #include "hw/intc/armv7m_nvic.h"
42 #endif /* CONFIG_TCG */
43 #endif /* !CONFIG_USER_ONLY */
44 #include "sysemu/tcg.h"
45 #include "sysemu/qtest.h"
46 #include "sysemu/hw_accel.h"
47 #include "kvm_arm.h"
48 #include "disas/capstone.h"
49 #include "fpu/softfloat.h"
50 #include "cpregs.h"
51 #include "target/arm/cpu-qom.h"
52 #include "target/arm/gtimer.h"
53 
54 static void arm_cpu_set_pc(CPUState *cs, vaddr value)
55 {
56     ARMCPU *cpu = ARM_CPU(cs);
57     CPUARMState *env = &cpu->env;
58 
59     if (is_a64(env)) {
60         env->pc = value;
61         env->thumb = false;
62     } else {
63         env->regs[15] = value & ~1;
64         env->thumb = value & 1;
65     }
66 }
67 
68 static vaddr arm_cpu_get_pc(CPUState *cs)
69 {
70     ARMCPU *cpu = ARM_CPU(cs);
71     CPUARMState *env = &cpu->env;
72 
73     if (is_a64(env)) {
74         return env->pc;
75     } else {
76         return env->regs[15];
77     }
78 }
79 
80 #ifdef CONFIG_TCG
81 void arm_cpu_synchronize_from_tb(CPUState *cs,
82                                  const TranslationBlock *tb)
83 {
84     /* The program counter is always up to date with CF_PCREL. */
85     if (!(tb_cflags(tb) & CF_PCREL)) {
86         CPUARMState *env = cpu_env(cs);
87         /*
88          * It's OK to look at env for the current mode here, because it's
89          * never possible for an AArch64 TB to chain to an AArch32 TB.
90          */
91         if (is_a64(env)) {
92             env->pc = tb->pc;
93         } else {
94             env->regs[15] = tb->pc;
95         }
96     }
97 }
98 
99 void arm_restore_state_to_opc(CPUState *cs,
100                               const TranslationBlock *tb,
101                               const uint64_t *data)
102 {
103     CPUARMState *env = cpu_env(cs);
104 
105     if (is_a64(env)) {
106         if (tb_cflags(tb) & CF_PCREL) {
107             env->pc = (env->pc & TARGET_PAGE_MASK) | data[0];
108         } else {
109             env->pc = data[0];
110         }
111         env->condexec_bits = 0;
112         env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
113     } else {
114         if (tb_cflags(tb) & CF_PCREL) {
115             env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0];
116         } else {
117             env->regs[15] = data[0];
118         }
119         env->condexec_bits = data[1];
120         env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
121     }
122 }
123 #endif /* CONFIG_TCG */
124 
125 /*
126  * With SCTLR_ELx.NMI == 0, IRQ with Superpriority is masked identically with
127  * IRQ without Superpriority. Moreover, if the GIC is configured so that
128  * FEAT_GICv3_NMI is only set if FEAT_NMI is set, then we won't ever see
129  * CPU_INTERRUPT_*NMI anyway. So we might as well accept NMI here
130  * unconditionally.
131  */
132 static bool arm_cpu_has_work(CPUState *cs)
133 {
134     ARMCPU *cpu = ARM_CPU(cs);
135 
136     return (cpu->power_state != PSCI_OFF)
137         && cs->interrupt_request &
138         (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
139          | CPU_INTERRUPT_NMI | CPU_INTERRUPT_VINMI | CPU_INTERRUPT_VFNMI
140          | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
141          | CPU_INTERRUPT_EXITTB);
142 }
143 
144 static int arm_cpu_mmu_index(CPUState *cs, bool ifetch)
145 {
146     return arm_env_mmu_index(cpu_env(cs));
147 }
148 
149 void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
150                                  void *opaque)
151 {
152     ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
153 
154     entry->hook = hook;
155     entry->opaque = opaque;
156 
157     QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node);
158 }
159 
160 void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
161                                  void *opaque)
162 {
163     ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
164 
165     entry->hook = hook;
166     entry->opaque = opaque;
167 
168     QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node);
169 }
170 
171 /*
172  * Set the float_status behaviour to match the Arm defaults:
173  *  * tininess-before-rounding
174  *  * 2-input NaN propagation prefers SNaN over QNaN, and then
175  *    operand A over operand B (see FPProcessNaNs() pseudocode)
176  *  * 3-input NaN propagation prefers SNaN over QNaN, and then
177  *    operand C over A over B (see FPProcessNaNs3() pseudocode,
178  *    but note that for QEMU muladd is a * b + c, whereas for
179  *    the pseudocode function the arguments are in the order c, a, b.
180  *  * 0 * Inf + NaN returns the default NaN if the input NaN is quiet,
181  *    and the input NaN if it is signalling
182  *  * Default NaN has sign bit clear, msb frac bit set
183  */
184 static void arm_set_default_fp_behaviours(float_status *s)
185 {
186     set_float_detect_tininess(float_tininess_before_rounding, s);
187     set_float_2nan_prop_rule(float_2nan_prop_s_ab, s);
188     set_float_3nan_prop_rule(float_3nan_prop_s_cab, s);
189     set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, s);
190     set_float_default_nan_pattern(0b01000000, s);
191 }
192 
193 static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
194 {
195     /* Reset a single ARMCPRegInfo register */
196     ARMCPRegInfo *ri = value;
197     ARMCPU *cpu = opaque;
198 
199     if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS)) {
200         return;
201     }
202 
203     if (ri->resetfn) {
204         ri->resetfn(&cpu->env, ri);
205         return;
206     }
207 
208     /* A zero offset is never possible as it would be regs[0]
209      * so we use it to indicate that reset is being handled elsewhere.
210      * This is basically only used for fields in non-core coprocessors
211      * (like the pxa2xx ones).
212      */
213     if (!ri->fieldoffset) {
214         return;
215     }
216 
217     if (cpreg_field_is_64bit(ri)) {
218         CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
219     } else {
220         CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
221     }
222 }
223 
224 static void cp_reg_check_reset(gpointer key, gpointer value,  gpointer opaque)
225 {
226     /* Purely an assertion check: we've already done reset once,
227      * so now check that running the reset for the cpreg doesn't
228      * change its value. This traps bugs where two different cpregs
229      * both try to reset the same state field but to different values.
230      */
231     ARMCPRegInfo *ri = value;
232     ARMCPU *cpu = opaque;
233     uint64_t oldvalue, newvalue;
234 
235     if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
236         return;
237     }
238 
239     oldvalue = read_raw_cp_reg(&cpu->env, ri);
240     cp_reg_reset(key, value, opaque);
241     newvalue = read_raw_cp_reg(&cpu->env, ri);
242     assert(oldvalue == newvalue);
243 }
244 
245 static void arm_cpu_reset_hold(Object *obj, ResetType type)
246 {
247     CPUState *cs = CPU(obj);
248     ARMCPU *cpu = ARM_CPU(cs);
249     ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
250     CPUARMState *env = &cpu->env;
251 
252     if (acc->parent_phases.hold) {
253         acc->parent_phases.hold(obj, type);
254     }
255 
256     memset(env, 0, offsetof(CPUARMState, end_reset_fields));
257 
258     g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
259     g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
260 
261     env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
262     env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
263     env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
264     env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
265 
266     cpu->power_state = cs->start_powered_off ? PSCI_OFF : PSCI_ON;
267 
268     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
269         env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
270     }
271 
272     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
273         /* 64 bit CPUs always start in 64 bit mode */
274         env->aarch64 = true;
275 #if defined(CONFIG_USER_ONLY)
276         env->pstate = PSTATE_MODE_EL0t;
277         /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
278         env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
279         /* Enable all PAC keys.  */
280         env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
281                                   SCTLR_EnDA | SCTLR_EnDB);
282         /* Trap on btype=3 for PACIxSP. */
283         env->cp15.sctlr_el[1] |= SCTLR_BT0;
284         /* Trap on implementation defined registers. */
285         if (cpu_isar_feature(aa64_tidcp1, cpu)) {
286             env->cp15.sctlr_el[1] |= SCTLR_TIDCP;
287         }
288         /* and to the FP/Neon instructions */
289         env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
290                                          CPACR_EL1, FPEN, 3);
291         /* and to the SVE instructions, with default vector length */
292         if (cpu_isar_feature(aa64_sve, cpu)) {
293             env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
294                                              CPACR_EL1, ZEN, 3);
295             env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
296         }
297         /* and for SME instructions, with default vector length, and TPIDR2 */
298         if (cpu_isar_feature(aa64_sme, cpu)) {
299             env->cp15.sctlr_el[1] |= SCTLR_EnTP2;
300             env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
301                                              CPACR_EL1, SMEN, 3);
302             env->vfp.smcr_el[1] = cpu->sme_default_vq - 1;
303             if (cpu_isar_feature(aa64_sme_fa64, cpu)) {
304                 env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1],
305                                                  SMCR, FA64, 1);
306             }
307         }
308         /*
309          * Enable 48-bit address space (TODO: take reserved_va into account).
310          * Enable TBI0 but not TBI1.
311          * Note that this must match useronly_clean_ptr.
312          */
313         env->cp15.tcr_el[1] = 5 | (1ULL << 37);
314 
315         /* Enable MTE */
316         if (cpu_isar_feature(aa64_mte, cpu)) {
317             /* Enable tag access, but leave TCF0 as No Effect (0). */
318             env->cp15.sctlr_el[1] |= SCTLR_ATA0;
319             /*
320              * Exclude all tags, so that tag 0 is always used.
321              * This corresponds to Linux current->thread.gcr_incl = 0.
322              *
323              * Set RRND, so that helper_irg() will generate a seed later.
324              * Here in cpu_reset(), the crypto subsystem has not yet been
325              * initialized.
326              */
327             env->cp15.gcr_el1 = 0x1ffff;
328         }
329         /*
330          * Disable access to SCXTNUM_EL0 from CSV2_1p2.
331          * This is not yet exposed from the Linux kernel in any way.
332          */
333         env->cp15.sctlr_el[1] |= SCTLR_TSCXT;
334         /* Disable access to Debug Communication Channel (DCC). */
335         env->cp15.mdscr_el1 |= 1 << 12;
336         /* Enable FEAT_MOPS */
337         env->cp15.sctlr_el[1] |= SCTLR_MSCEN;
338 #else
339         /* Reset into the highest available EL */
340         if (arm_feature(env, ARM_FEATURE_EL3)) {
341             env->pstate = PSTATE_MODE_EL3h;
342         } else if (arm_feature(env, ARM_FEATURE_EL2)) {
343             env->pstate = PSTATE_MODE_EL2h;
344         } else {
345             env->pstate = PSTATE_MODE_EL1h;
346         }
347 
348         /* Sample rvbar at reset.  */
349         env->cp15.rvbar = cpu->rvbar_prop;
350         env->pc = env->cp15.rvbar;
351 #endif
352     } else {
353 #if defined(CONFIG_USER_ONLY)
354         /* Userspace expects access to cp10 and cp11 for FP/Neon */
355         env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
356                                          CPACR, CP10, 3);
357         env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
358                                          CPACR, CP11, 3);
359 #endif
360         if (arm_feature(env, ARM_FEATURE_V8)) {
361             env->cp15.rvbar = cpu->rvbar_prop;
362             env->regs[15] = cpu->rvbar_prop;
363         }
364     }
365 
366 #if defined(CONFIG_USER_ONLY)
367     env->uncached_cpsr = ARM_CPU_MODE_USR;
368     /* For user mode we must enable access to coprocessors */
369     env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
370     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
371         env->cp15.c15_cpar = 3;
372     } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
373         env->cp15.c15_cpar = 1;
374     }
375 #else
376 
377     /*
378      * If the highest available EL is EL2, AArch32 will start in Hyp
379      * mode; otherwise it starts in SVC. Note that if we start in
380      * AArch64 then these values in the uncached_cpsr will be ignored.
381      */
382     if (arm_feature(env, ARM_FEATURE_EL2) &&
383         !arm_feature(env, ARM_FEATURE_EL3)) {
384         env->uncached_cpsr = ARM_CPU_MODE_HYP;
385     } else {
386         env->uncached_cpsr = ARM_CPU_MODE_SVC;
387     }
388     env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
389 
390     /* AArch32 has a hard highvec setting of 0xFFFF0000.  If we are currently
391      * executing as AArch32 then check if highvecs are enabled and
392      * adjust the PC accordingly.
393      */
394     if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
395         env->regs[15] = 0xFFFF0000;
396     }
397 
398     env->vfp.xregs[ARM_VFP_FPEXC] = 0;
399 #endif
400 
401     if (arm_feature(env, ARM_FEATURE_M)) {
402 #ifndef CONFIG_USER_ONLY
403         uint32_t initial_msp; /* Loaded from 0x0 */
404         uint32_t initial_pc; /* Loaded from 0x4 */
405         uint8_t *rom;
406         uint32_t vecbase;
407 #endif
408 
409         if (cpu_isar_feature(aa32_lob, cpu)) {
410             /*
411              * LTPSIZE is constant 4 if MVE not implemented, and resets
412              * to an UNKNOWN value if MVE is implemented. We choose to
413              * always reset to 4.
414              */
415             env->v7m.ltpsize = 4;
416             /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
417             env->v7m.fpdscr[M_REG_NS] = 4 << FPCR_LTPSIZE_SHIFT;
418             env->v7m.fpdscr[M_REG_S] = 4 << FPCR_LTPSIZE_SHIFT;
419         }
420 
421         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
422             env->v7m.secure = true;
423         } else {
424             /* This bit resets to 0 if security is supported, but 1 if
425              * it is not. The bit is not present in v7M, but we set it
426              * here so we can avoid having to make checks on it conditional
427              * on ARM_FEATURE_V8 (we don't let the guest see the bit).
428              */
429             env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
430             /*
431              * Set NSACR to indicate "NS access permitted to everything";
432              * this avoids having to have all the tests of it being
433              * conditional on ARM_FEATURE_M_SECURITY. Note also that from
434              * v8.1M the guest-visible value of NSACR in a CPU without the
435              * Security Extension is 0xcff.
436              */
437             env->v7m.nsacr = 0xcff;
438         }
439 
440         /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
441          * that it resets to 1, so QEMU always does that rather than making
442          * it dependent on CPU model. In v8M it is RES1.
443          */
444         env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
445         env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
446         if (arm_feature(env, ARM_FEATURE_V8)) {
447             /* in v8M the NONBASETHRDENA bit [0] is RES1 */
448             env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
449             env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
450         }
451         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
452             env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
453             env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
454         }
455 
456         if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
457             env->v7m.fpccr[M_REG_NS] = R_V7M_FPCCR_ASPEN_MASK;
458             env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK |
459                 R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK;
460         }
461 
462 #ifndef CONFIG_USER_ONLY
463         /* Unlike A/R profile, M profile defines the reset LR value */
464         env->regs[14] = 0xffffffff;
465 
466         env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
467         env->v7m.vecbase[M_REG_NS] = cpu->init_nsvtor & 0xffffff80;
468 
469         /* Load the initial SP and PC from offset 0 and 4 in the vector table */
470         vecbase = env->v7m.vecbase[env->v7m.secure];
471         rom = rom_ptr_for_as(cs->as, vecbase, 8);
472         if (rom) {
473             /* Address zero is covered by ROM which hasn't yet been
474              * copied into physical memory.
475              */
476             initial_msp = ldl_p(rom);
477             initial_pc = ldl_p(rom + 4);
478         } else {
479             /* Address zero not covered by a ROM blob, or the ROM blob
480              * is in non-modifiable memory and this is a second reset after
481              * it got copied into memory. In the latter case, rom_ptr
482              * will return a NULL pointer and we should use ldl_phys instead.
483              */
484             initial_msp = ldl_phys(cs->as, vecbase);
485             initial_pc = ldl_phys(cs->as, vecbase + 4);
486         }
487 
488         qemu_log_mask(CPU_LOG_INT,
489                       "Loaded reset SP 0x%x PC 0x%x from vector table\n",
490                       initial_msp, initial_pc);
491 
492         env->regs[13] = initial_msp & 0xFFFFFFFC;
493         env->regs[15] = initial_pc & ~1;
494         env->thumb = initial_pc & 1;
495 #else
496         /*
497          * For user mode we run non-secure and with access to the FPU.
498          * The FPU context is active (ie does not need further setup)
499          * and is owned by non-secure.
500          */
501         env->v7m.secure = false;
502         env->v7m.nsacr = 0xcff;
503         env->v7m.cpacr[M_REG_NS] = 0xf0ffff;
504         env->v7m.fpccr[M_REG_S] &=
505             ~(R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK);
506         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
507 #endif
508     }
509 
510     /* M profile requires that reset clears the exclusive monitor;
511      * A profile does not, but clearing it makes more sense than having it
512      * set with an exclusive access on address zero.
513      */
514     arm_clear_exclusive(env);
515 
516     if (arm_feature(env, ARM_FEATURE_PMSA)) {
517         if (cpu->pmsav7_dregion > 0) {
518             if (arm_feature(env, ARM_FEATURE_V8)) {
519                 memset(env->pmsav8.rbar[M_REG_NS], 0,
520                        sizeof(*env->pmsav8.rbar[M_REG_NS])
521                        * cpu->pmsav7_dregion);
522                 memset(env->pmsav8.rlar[M_REG_NS], 0,
523                        sizeof(*env->pmsav8.rlar[M_REG_NS])
524                        * cpu->pmsav7_dregion);
525                 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
526                     memset(env->pmsav8.rbar[M_REG_S], 0,
527                            sizeof(*env->pmsav8.rbar[M_REG_S])
528                            * cpu->pmsav7_dregion);
529                     memset(env->pmsav8.rlar[M_REG_S], 0,
530                            sizeof(*env->pmsav8.rlar[M_REG_S])
531                            * cpu->pmsav7_dregion);
532                 }
533             } else if (arm_feature(env, ARM_FEATURE_V7)) {
534                 memset(env->pmsav7.drbar, 0,
535                        sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
536                 memset(env->pmsav7.drsr, 0,
537                        sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
538                 memset(env->pmsav7.dracr, 0,
539                        sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
540             }
541         }
542 
543         if (cpu->pmsav8r_hdregion > 0) {
544             memset(env->pmsav8.hprbar, 0,
545                    sizeof(*env->pmsav8.hprbar) * cpu->pmsav8r_hdregion);
546             memset(env->pmsav8.hprlar, 0,
547                    sizeof(*env->pmsav8.hprlar) * cpu->pmsav8r_hdregion);
548         }
549 
550         env->pmsav7.rnr[M_REG_NS] = 0;
551         env->pmsav7.rnr[M_REG_S] = 0;
552         env->pmsav8.mair0[M_REG_NS] = 0;
553         env->pmsav8.mair0[M_REG_S] = 0;
554         env->pmsav8.mair1[M_REG_NS] = 0;
555         env->pmsav8.mair1[M_REG_S] = 0;
556     }
557 
558     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
559         if (cpu->sau_sregion > 0) {
560             memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
561             memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
562         }
563         env->sau.rnr = 0;
564         /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
565          * the Cortex-M33 does.
566          */
567         env->sau.ctrl = 0;
568     }
569 
570     set_flush_to_zero(1, &env->vfp.standard_fp_status);
571     set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
572     set_default_nan_mode(1, &env->vfp.standard_fp_status);
573     set_default_nan_mode(1, &env->vfp.standard_fp_status_f16);
574     arm_set_default_fp_behaviours(&env->vfp.fp_status);
575     arm_set_default_fp_behaviours(&env->vfp.standard_fp_status);
576     arm_set_default_fp_behaviours(&env->vfp.fp_status_f16);
577     arm_set_default_fp_behaviours(&env->vfp.standard_fp_status_f16);
578 
579 #ifndef CONFIG_USER_ONLY
580     if (kvm_enabled()) {
581         kvm_arm_reset_vcpu(cpu);
582     }
583 #endif
584 
585     if (tcg_enabled()) {
586         hw_breakpoint_update_all(cpu);
587         hw_watchpoint_update_all(cpu);
588 
589         arm_rebuild_hflags(env);
590     }
591 }
592 
593 void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
594 {
595     ARMCPU *cpu = ARM_CPU(cpustate);
596     CPUARMState *env = &cpu->env;
597     bool have_el3 = arm_feature(env, ARM_FEATURE_EL3);
598     bool have_el2 = arm_feature(env, ARM_FEATURE_EL2);
599 
600     /*
601      * Check we have the EL we're aiming for. If that is the
602      * highest implemented EL, then cpu_reset has already done
603      * all the work.
604      */
605     switch (target_el) {
606     case 3:
607         assert(have_el3);
608         return;
609     case 2:
610         assert(have_el2);
611         if (!have_el3) {
612             return;
613         }
614         break;
615     case 1:
616         if (!have_el3 && !have_el2) {
617             return;
618         }
619         break;
620     default:
621         g_assert_not_reached();
622     }
623 
624     if (have_el3) {
625         /*
626          * Set the EL3 state so code can run at EL2. This should match
627          * the requirements set by Linux in its booting spec.
628          */
629         if (env->aarch64) {
630             env->cp15.scr_el3 |= SCR_RW;
631             if (cpu_isar_feature(aa64_pauth, cpu)) {
632                 env->cp15.scr_el3 |= SCR_API | SCR_APK;
633             }
634             if (cpu_isar_feature(aa64_mte, cpu)) {
635                 env->cp15.scr_el3 |= SCR_ATA;
636             }
637             if (cpu_isar_feature(aa64_sve, cpu)) {
638                 env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
639                 env->vfp.zcr_el[3] = 0xf;
640             }
641             if (cpu_isar_feature(aa64_sme, cpu)) {
642                 env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
643                 env->cp15.scr_el3 |= SCR_ENTP2;
644                 env->vfp.smcr_el[3] = 0xf;
645             }
646             if (cpu_isar_feature(aa64_hcx, cpu)) {
647                 env->cp15.scr_el3 |= SCR_HXEN;
648             }
649             if (cpu_isar_feature(aa64_fgt, cpu)) {
650                 env->cp15.scr_el3 |= SCR_FGTEN;
651             }
652         }
653 
654         if (target_el == 2) {
655             /* If the guest is at EL2 then Linux expects the HVC insn to work */
656             env->cp15.scr_el3 |= SCR_HCE;
657         }
658 
659         /* Put CPU into non-secure state */
660         env->cp15.scr_el3 |= SCR_NS;
661         /* Set NSACR.{CP11,CP10} so NS can access the FPU */
662         env->cp15.nsacr |= 3 << 10;
663     }
664 
665     if (have_el2 && target_el < 2) {
666         /* Set EL2 state so code can run at EL1. */
667         if (env->aarch64) {
668             env->cp15.hcr_el2 |= HCR_RW;
669         }
670     }
671 
672     /* Set the CPU to the desired state */
673     if (env->aarch64) {
674         env->pstate = aarch64_pstate_mode(target_el, true);
675     } else {
676         static const uint32_t mode_for_el[] = {
677             0,
678             ARM_CPU_MODE_SVC,
679             ARM_CPU_MODE_HYP,
680             ARM_CPU_MODE_SVC,
681         };
682 
683         cpsr_write(env, mode_for_el[target_el], CPSR_M, CPSRWriteRaw);
684     }
685 }
686 
687 
688 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
689 
690 static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
691                                      unsigned int target_el,
692                                      unsigned int cur_el, bool secure,
693                                      uint64_t hcr_el2)
694 {
695     CPUARMState *env = cpu_env(cs);
696     bool pstate_unmasked;
697     bool unmasked = false;
698     bool allIntMask = false;
699 
700     /*
701      * Don't take exceptions if they target a lower EL.
702      * This check should catch any exceptions that would not be taken
703      * but left pending.
704      */
705     if (cur_el > target_el) {
706         return false;
707     }
708 
709     if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) &&
710         env->cp15.sctlr_el[target_el] & SCTLR_NMI && cur_el == target_el) {
711         allIntMask = env->pstate & PSTATE_ALLINT ||
712                      ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) &&
713                       (env->pstate & PSTATE_SP));
714     }
715 
716     switch (excp_idx) {
717     case EXCP_NMI:
718         pstate_unmasked = !allIntMask;
719         break;
720 
721     case EXCP_VINMI:
722         if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
723             /* VINMIs are only taken when hypervized.  */
724             return false;
725         }
726         return !allIntMask;
727     case EXCP_VFNMI:
728         if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
729             /* VFNMIs are only taken when hypervized.  */
730             return false;
731         }
732         return !allIntMask;
733     case EXCP_FIQ:
734         pstate_unmasked = (!(env->daif & PSTATE_F)) && (!allIntMask);
735         break;
736 
737     case EXCP_IRQ:
738         pstate_unmasked = (!(env->daif & PSTATE_I)) && (!allIntMask);
739         break;
740 
741     case EXCP_VFIQ:
742         if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
743             /* VFIQs are only taken when hypervized.  */
744             return false;
745         }
746         return !(env->daif & PSTATE_F) && (!allIntMask);
747     case EXCP_VIRQ:
748         if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
749             /* VIRQs are only taken when hypervized.  */
750             return false;
751         }
752         return !(env->daif & PSTATE_I) && (!allIntMask);
753     case EXCP_VSERR:
754         if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
755             /* VIRQs are only taken when hypervized.  */
756             return false;
757         }
758         return !(env->daif & PSTATE_A);
759     default:
760         g_assert_not_reached();
761     }
762 
763     /*
764      * Use the target EL, current execution state and SCR/HCR settings to
765      * determine whether the corresponding CPSR bit is used to mask the
766      * interrupt.
767      */
768     if ((target_el > cur_el) && (target_el != 1)) {
769         /* Exceptions targeting a higher EL may not be maskable */
770         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
771             switch (target_el) {
772             case 2:
773                 /*
774                  * According to ARM DDI 0487H.a, an interrupt can be masked
775                  * when HCR_E2H and HCR_TGE are both set regardless of the
776                  * current Security state. Note that we need to revisit this
777                  * part again once we need to support NMI.
778                  */
779                 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
780                         unmasked = true;
781                 }
782                 break;
783             case 3:
784                 /* Interrupt cannot be masked when the target EL is 3 */
785                 unmasked = true;
786                 break;
787             default:
788                 g_assert_not_reached();
789             }
790         } else {
791             /*
792              * The old 32-bit-only environment has a more complicated
793              * masking setup. HCR and SCR bits not only affect interrupt
794              * routing but also change the behaviour of masking.
795              */
796             bool hcr, scr;
797 
798             switch (excp_idx) {
799             case EXCP_FIQ:
800                 /*
801                  * If FIQs are routed to EL3 or EL2 then there are cases where
802                  * we override the CPSR.F in determining if the exception is
803                  * masked or not. If neither of these are set then we fall back
804                  * to the CPSR.F setting otherwise we further assess the state
805                  * below.
806                  */
807                 hcr = hcr_el2 & HCR_FMO;
808                 scr = (env->cp15.scr_el3 & SCR_FIQ);
809 
810                 /*
811                  * When EL3 is 32-bit, the SCR.FW bit controls whether the
812                  * CPSR.F bit masks FIQ interrupts when taken in non-secure
813                  * state. If SCR.FW is set then FIQs can be masked by CPSR.F
814                  * when non-secure but only when FIQs are only routed to EL3.
815                  */
816                 scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
817                 break;
818             case EXCP_IRQ:
819                 /*
820                  * When EL3 execution state is 32-bit, if HCR.IMO is set then
821                  * we may override the CPSR.I masking when in non-secure state.
822                  * The SCR.IRQ setting has already been taken into consideration
823                  * when setting the target EL, so it does not have a further
824                  * affect here.
825                  */
826                 hcr = hcr_el2 & HCR_IMO;
827                 scr = false;
828                 break;
829             default:
830                 g_assert_not_reached();
831             }
832 
833             if ((scr || hcr) && !secure) {
834                 unmasked = true;
835             }
836         }
837     }
838 
839     /*
840      * The PSTATE bits only mask the interrupt if we have not overridden the
841      * ability above.
842      */
843     return unmasked || pstate_unmasked;
844 }
845 
846 static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
847 {
848     CPUClass *cc = CPU_GET_CLASS(cs);
849     CPUARMState *env = cpu_env(cs);
850     uint32_t cur_el = arm_current_el(env);
851     bool secure = arm_is_secure(env);
852     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
853     uint32_t target_el;
854     uint32_t excp_idx;
855 
856     /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
857 
858     if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) &&
859         (arm_sctlr(env, cur_el) & SCTLR_NMI)) {
860         if (interrupt_request & CPU_INTERRUPT_NMI) {
861             excp_idx = EXCP_NMI;
862             target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
863             if (arm_excp_unmasked(cs, excp_idx, target_el,
864                                   cur_el, secure, hcr_el2)) {
865                 goto found;
866             }
867         }
868         if (interrupt_request & CPU_INTERRUPT_VINMI) {
869             excp_idx = EXCP_VINMI;
870             target_el = 1;
871             if (arm_excp_unmasked(cs, excp_idx, target_el,
872                                   cur_el, secure, hcr_el2)) {
873                 goto found;
874             }
875         }
876         if (interrupt_request & CPU_INTERRUPT_VFNMI) {
877             excp_idx = EXCP_VFNMI;
878             target_el = 1;
879             if (arm_excp_unmasked(cs, excp_idx, target_el,
880                                   cur_el, secure, hcr_el2)) {
881                 goto found;
882             }
883         }
884     } else {
885         /*
886          * NMI disabled: interrupts with superpriority are handled
887          * as if they didn't have it
888          */
889         if (interrupt_request & CPU_INTERRUPT_NMI) {
890             interrupt_request |= CPU_INTERRUPT_HARD;
891         }
892         if (interrupt_request & CPU_INTERRUPT_VINMI) {
893             interrupt_request |= CPU_INTERRUPT_VIRQ;
894         }
895         if (interrupt_request & CPU_INTERRUPT_VFNMI) {
896             interrupt_request |= CPU_INTERRUPT_VFIQ;
897         }
898     }
899 
900     if (interrupt_request & CPU_INTERRUPT_FIQ) {
901         excp_idx = EXCP_FIQ;
902         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
903         if (arm_excp_unmasked(cs, excp_idx, target_el,
904                               cur_el, secure, hcr_el2)) {
905             goto found;
906         }
907     }
908     if (interrupt_request & CPU_INTERRUPT_HARD) {
909         excp_idx = EXCP_IRQ;
910         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
911         if (arm_excp_unmasked(cs, excp_idx, target_el,
912                               cur_el, secure, hcr_el2)) {
913             goto found;
914         }
915     }
916     if (interrupt_request & CPU_INTERRUPT_VIRQ) {
917         excp_idx = EXCP_VIRQ;
918         target_el = 1;
919         if (arm_excp_unmasked(cs, excp_idx, target_el,
920                               cur_el, secure, hcr_el2)) {
921             goto found;
922         }
923     }
924     if (interrupt_request & CPU_INTERRUPT_VFIQ) {
925         excp_idx = EXCP_VFIQ;
926         target_el = 1;
927         if (arm_excp_unmasked(cs, excp_idx, target_el,
928                               cur_el, secure, hcr_el2)) {
929             goto found;
930         }
931     }
932     if (interrupt_request & CPU_INTERRUPT_VSERR) {
933         excp_idx = EXCP_VSERR;
934         target_el = 1;
935         if (arm_excp_unmasked(cs, excp_idx, target_el,
936                               cur_el, secure, hcr_el2)) {
937             /* Taking a virtual abort clears HCR_EL2.VSE */
938             env->cp15.hcr_el2 &= ~HCR_VSE;
939             cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
940             goto found;
941         }
942     }
943     return false;
944 
945  found:
946     cs->exception_index = excp_idx;
947     env->exception.target_el = target_el;
948     cc->tcg_ops->do_interrupt(cs);
949     return true;
950 }
951 
952 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
953 
954 void arm_cpu_update_virq(ARMCPU *cpu)
955 {
956     /*
957      * Update the interrupt level for VIRQ, which is the logical OR of
958      * the HCR_EL2.VI bit and the input line level from the GIC.
959      */
960     CPUARMState *env = &cpu->env;
961     CPUState *cs = CPU(cpu);
962 
963     bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) &&
964         !(arm_hcrx_el2_eff(env) & HCRX_VINMI)) ||
965         (env->irq_line_state & CPU_INTERRUPT_VIRQ);
966 
967     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
968         if (new_state) {
969             cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
970         } else {
971             cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
972         }
973     }
974 }
975 
976 void arm_cpu_update_vfiq(ARMCPU *cpu)
977 {
978     /*
979      * Update the interrupt level for VFIQ, which is the logical OR of
980      * the HCR_EL2.VF bit and the input line level from the GIC.
981      */
982     CPUARMState *env = &cpu->env;
983     CPUState *cs = CPU(cpu);
984 
985     bool new_state = ((arm_hcr_el2_eff(env) & HCR_VF) &&
986         !(arm_hcrx_el2_eff(env) & HCRX_VFNMI)) ||
987         (env->irq_line_state & CPU_INTERRUPT_VFIQ);
988 
989     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
990         if (new_state) {
991             cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
992         } else {
993             cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ);
994         }
995     }
996 }
997 
998 void arm_cpu_update_vinmi(ARMCPU *cpu)
999 {
1000     /*
1001      * Update the interrupt level for VINMI, which is the logical OR of
1002      * the HCRX_EL2.VINMI bit and the input line level from the GIC.
1003      */
1004     CPUARMState *env = &cpu->env;
1005     CPUState *cs = CPU(cpu);
1006 
1007     bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) &&
1008                       (arm_hcrx_el2_eff(env) & HCRX_VINMI)) ||
1009         (env->irq_line_state & CPU_INTERRUPT_VINMI);
1010 
1011     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VINMI) != 0)) {
1012         if (new_state) {
1013             cpu_interrupt(cs, CPU_INTERRUPT_VINMI);
1014         } else {
1015             cpu_reset_interrupt(cs, CPU_INTERRUPT_VINMI);
1016         }
1017     }
1018 }
1019 
1020 void arm_cpu_update_vfnmi(ARMCPU *cpu)
1021 {
1022     /*
1023      * Update the interrupt level for VFNMI, which is the HCRX_EL2.VFNMI bit.
1024      */
1025     CPUARMState *env = &cpu->env;
1026     CPUState *cs = CPU(cpu);
1027 
1028     bool new_state = (arm_hcr_el2_eff(env) & HCR_VF) &&
1029                       (arm_hcrx_el2_eff(env) & HCRX_VFNMI);
1030 
1031     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFNMI) != 0)) {
1032         if (new_state) {
1033             cpu_interrupt(cs, CPU_INTERRUPT_VFNMI);
1034         } else {
1035             cpu_reset_interrupt(cs, CPU_INTERRUPT_VFNMI);
1036         }
1037     }
1038 }
1039 
1040 void arm_cpu_update_vserr(ARMCPU *cpu)
1041 {
1042     /*
1043      * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
1044      */
1045     CPUARMState *env = &cpu->env;
1046     CPUState *cs = CPU(cpu);
1047 
1048     bool new_state = env->cp15.hcr_el2 & HCR_VSE;
1049 
1050     if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) {
1051         if (new_state) {
1052             cpu_interrupt(cs, CPU_INTERRUPT_VSERR);
1053         } else {
1054             cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
1055         }
1056     }
1057 }
1058 
1059 #ifndef CONFIG_USER_ONLY
1060 static void arm_cpu_set_irq(void *opaque, int irq, int level)
1061 {
1062     ARMCPU *cpu = opaque;
1063     CPUARMState *env = &cpu->env;
1064     CPUState *cs = CPU(cpu);
1065     static const int mask[] = {
1066         [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
1067         [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
1068         [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
1069         [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
1070         [ARM_CPU_NMI] = CPU_INTERRUPT_NMI,
1071         [ARM_CPU_VINMI] = CPU_INTERRUPT_VINMI,
1072     };
1073 
1074     if (!arm_feature(env, ARM_FEATURE_EL2) &&
1075         (irq == ARM_CPU_VIRQ || irq == ARM_CPU_VFIQ)) {
1076         /*
1077          * The GIC might tell us about VIRQ and VFIQ state, but if we don't
1078          * have EL2 support we don't care. (Unless the guest is doing something
1079          * silly this will only be calls saying "level is still 0".)
1080          */
1081         return;
1082     }
1083 
1084     if (level) {
1085         env->irq_line_state |= mask[irq];
1086     } else {
1087         env->irq_line_state &= ~mask[irq];
1088     }
1089 
1090     switch (irq) {
1091     case ARM_CPU_VIRQ:
1092         arm_cpu_update_virq(cpu);
1093         break;
1094     case ARM_CPU_VFIQ:
1095         arm_cpu_update_vfiq(cpu);
1096         break;
1097     case ARM_CPU_VINMI:
1098         arm_cpu_update_vinmi(cpu);
1099         break;
1100     case ARM_CPU_IRQ:
1101     case ARM_CPU_FIQ:
1102     case ARM_CPU_NMI:
1103         if (level) {
1104             cpu_interrupt(cs, mask[irq]);
1105         } else {
1106             cpu_reset_interrupt(cs, mask[irq]);
1107         }
1108         break;
1109     default:
1110         g_assert_not_reached();
1111     }
1112 }
1113 
1114 static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
1115 {
1116 #ifdef CONFIG_KVM
1117     ARMCPU *cpu = opaque;
1118     CPUARMState *env = &cpu->env;
1119     CPUState *cs = CPU(cpu);
1120     uint32_t linestate_bit;
1121     int irq_id;
1122 
1123     switch (irq) {
1124     case ARM_CPU_IRQ:
1125         irq_id = KVM_ARM_IRQ_CPU_IRQ;
1126         linestate_bit = CPU_INTERRUPT_HARD;
1127         break;
1128     case ARM_CPU_FIQ:
1129         irq_id = KVM_ARM_IRQ_CPU_FIQ;
1130         linestate_bit = CPU_INTERRUPT_FIQ;
1131         break;
1132     default:
1133         g_assert_not_reached();
1134     }
1135 
1136     if (level) {
1137         env->irq_line_state |= linestate_bit;
1138     } else {
1139         env->irq_line_state &= ~linestate_bit;
1140     }
1141     kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
1142 #endif
1143 }
1144 
1145 static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
1146 {
1147     ARMCPU *cpu = ARM_CPU(cs);
1148     CPUARMState *env = &cpu->env;
1149 
1150     cpu_synchronize_state(cs);
1151     return arm_cpu_data_is_big_endian(env);
1152 }
1153 
1154 #ifdef CONFIG_TCG
1155 bool arm_cpu_exec_halt(CPUState *cs)
1156 {
1157     bool leave_halt = cpu_has_work(cs);
1158 
1159     if (leave_halt) {
1160         /* We're about to come out of WFI/WFE: disable the WFxT timer */
1161         ARMCPU *cpu = ARM_CPU(cs);
1162         if (cpu->wfxt_timer) {
1163             timer_del(cpu->wfxt_timer);
1164         }
1165     }
1166     return leave_halt;
1167 }
1168 #endif
1169 
1170 static void arm_wfxt_timer_cb(void *opaque)
1171 {
1172     ARMCPU *cpu = opaque;
1173     CPUState *cs = CPU(cpu);
1174 
1175     /*
1176      * We expect the CPU to be halted; this will cause arm_cpu_is_work()
1177      * to return true (so we will come out of halt even with no other
1178      * pending interrupt), and the TCG accelerator's cpu_exec_interrupt()
1179      * function auto-clears the CPU_INTERRUPT_EXITTB flag for us.
1180      */
1181     cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
1182 }
1183 #endif
1184 
1185 static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
1186 {
1187     ARMCPU *ac = ARM_CPU(cpu);
1188     CPUARMState *env = &ac->env;
1189     bool sctlr_b;
1190 
1191     if (is_a64(env)) {
1192         info->cap_arch = CS_ARCH_ARM64;
1193         info->cap_insn_unit = 4;
1194         info->cap_insn_split = 4;
1195     } else {
1196         int cap_mode;
1197         if (env->thumb) {
1198             info->cap_insn_unit = 2;
1199             info->cap_insn_split = 4;
1200             cap_mode = CS_MODE_THUMB;
1201         } else {
1202             info->cap_insn_unit = 4;
1203             info->cap_insn_split = 4;
1204             cap_mode = CS_MODE_ARM;
1205         }
1206         if (arm_feature(env, ARM_FEATURE_V8)) {
1207             cap_mode |= CS_MODE_V8;
1208         }
1209         if (arm_feature(env, ARM_FEATURE_M)) {
1210             cap_mode |= CS_MODE_MCLASS;
1211         }
1212         info->cap_arch = CS_ARCH_ARM;
1213         info->cap_mode = cap_mode;
1214     }
1215 
1216     sctlr_b = arm_sctlr_b(env);
1217     if (bswap_code(sctlr_b)) {
1218 #if TARGET_BIG_ENDIAN
1219         info->endian = BFD_ENDIAN_LITTLE;
1220 #else
1221         info->endian = BFD_ENDIAN_BIG;
1222 #endif
1223     }
1224     info->flags &= ~INSN_ARM_BE32;
1225 #ifndef CONFIG_USER_ONLY
1226     if (sctlr_b) {
1227         info->flags |= INSN_ARM_BE32;
1228     }
1229 #endif
1230 }
1231 
1232 #ifdef TARGET_AARCH64
1233 
1234 static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1235 {
1236     ARMCPU *cpu = ARM_CPU(cs);
1237     CPUARMState *env = &cpu->env;
1238     uint32_t psr = pstate_read(env);
1239     int i, j;
1240     int el = arm_current_el(env);
1241     uint64_t hcr = arm_hcr_el2_eff(env);
1242     const char *ns_status;
1243     bool sve;
1244 
1245     qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
1246     for (i = 0; i < 32; i++) {
1247         if (i == 31) {
1248             qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
1249         } else {
1250             qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
1251                          (i + 2) % 3 ? " " : "\n");
1252         }
1253     }
1254 
1255     if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
1256         ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
1257     } else {
1258         ns_status = "";
1259     }
1260     qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
1261                  psr,
1262                  psr & PSTATE_N ? 'N' : '-',
1263                  psr & PSTATE_Z ? 'Z' : '-',
1264                  psr & PSTATE_C ? 'C' : '-',
1265                  psr & PSTATE_V ? 'V' : '-',
1266                  ns_status,
1267                  el,
1268                  psr & PSTATE_SP ? 'h' : 't');
1269 
1270     if (cpu_isar_feature(aa64_sme, cpu)) {
1271         qemu_fprintf(f, "  SVCR=%08" PRIx64 " %c%c",
1272                      env->svcr,
1273                      (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'),
1274                      (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
1275     }
1276     if (cpu_isar_feature(aa64_bti, cpu)) {
1277         qemu_fprintf(f, "  BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
1278     }
1279     qemu_fprintf(f, "%s%s%s",
1280                  (hcr & HCR_NV) ? " NV" : "",
1281                  (hcr & HCR_NV1) ? " NV1" : "",
1282                  (hcr & HCR_NV2) ? " NV2" : "");
1283     if (!(flags & CPU_DUMP_FPU)) {
1284         qemu_fprintf(f, "\n");
1285         return;
1286     }
1287     if (fp_exception_el(env, el) != 0) {
1288         qemu_fprintf(f, "    FPU disabled\n");
1289         return;
1290     }
1291     qemu_fprintf(f, "     FPCR=%08x FPSR=%08x\n",
1292                  vfp_get_fpcr(env), vfp_get_fpsr(env));
1293 
1294     if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) {
1295         sve = sme_exception_el(env, el) == 0;
1296     } else if (cpu_isar_feature(aa64_sve, cpu)) {
1297         sve = sve_exception_el(env, el) == 0;
1298     } else {
1299         sve = false;
1300     }
1301 
1302     if (sve) {
1303         int zcr_len = sve_vqm1_for_el(env, el);
1304 
1305         for (i = 0; i <= FFR_PRED_NUM; i++) {
1306             bool eol;
1307             if (i == FFR_PRED_NUM) {
1308                 qemu_fprintf(f, "FFR=");
1309                 /* It's last, so end the line.  */
1310                 eol = true;
1311             } else {
1312                 qemu_fprintf(f, "P%02d=", i);
1313                 switch (zcr_len) {
1314                 case 0:
1315                     eol = i % 8 == 7;
1316                     break;
1317                 case 1:
1318                     eol = i % 6 == 5;
1319                     break;
1320                 case 2:
1321                 case 3:
1322                     eol = i % 3 == 2;
1323                     break;
1324                 default:
1325                     /* More than one quadword per predicate.  */
1326                     eol = true;
1327                     break;
1328                 }
1329             }
1330             for (j = zcr_len / 4; j >= 0; j--) {
1331                 int digits;
1332                 if (j * 4 + 4 <= zcr_len + 1) {
1333                     digits = 16;
1334                 } else {
1335                     digits = (zcr_len % 4 + 1) * 4;
1336                 }
1337                 qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
1338                              env->vfp.pregs[i].p[j],
1339                              j ? ":" : eol ? "\n" : " ");
1340             }
1341         }
1342 
1343         if (zcr_len == 0) {
1344             /*
1345              * With vl=16, there are only 37 columns per register,
1346              * so output two registers per line.
1347              */
1348             for (i = 0; i < 32; i++) {
1349                 qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
1350                              i, env->vfp.zregs[i].d[1],
1351                              env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
1352             }
1353         } else {
1354             for (i = 0; i < 32; i++) {
1355                 qemu_fprintf(f, "Z%02d=", i);
1356                 for (j = zcr_len; j >= 0; j--) {
1357                     qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
1358                                  env->vfp.zregs[i].d[j * 2 + 1],
1359                                  env->vfp.zregs[i].d[j * 2 + 0],
1360                                  j ? ":" : "\n");
1361                 }
1362             }
1363         }
1364     } else {
1365         for (i = 0; i < 32; i++) {
1366             uint64_t *q = aa64_vfp_qreg(env, i);
1367             qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
1368                          i, q[1], q[0], (i & 1 ? "\n" : " "));
1369         }
1370     }
1371 
1372     if (cpu_isar_feature(aa64_sme, cpu) &&
1373         FIELD_EX64(env->svcr, SVCR, ZA) &&
1374         sme_exception_el(env, el) == 0) {
1375         int zcr_len = sve_vqm1_for_el_sm(env, el, true);
1376         int svl = (zcr_len + 1) * 16;
1377         int svl_lg10 = svl < 100 ? 2 : 3;
1378 
1379         for (i = 0; i < svl; i++) {
1380             qemu_fprintf(f, "ZA[%0*d]=", svl_lg10, i);
1381             for (j = zcr_len; j >= 0; --j) {
1382                 qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%c",
1383                              env->zarray[i].d[2 * j + 1],
1384                              env->zarray[i].d[2 * j],
1385                              j ? ':' : '\n');
1386             }
1387         }
1388     }
1389 }
1390 
1391 #else
1392 
1393 static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1394 {
1395     g_assert_not_reached();
1396 }
1397 
1398 #endif
1399 
1400 static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1401 {
1402     ARMCPU *cpu = ARM_CPU(cs);
1403     CPUARMState *env = &cpu->env;
1404     int i;
1405 
1406     if (is_a64(env)) {
1407         aarch64_cpu_dump_state(cs, f, flags);
1408         return;
1409     }
1410 
1411     for (i = 0; i < 16; i++) {
1412         qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
1413         if ((i % 4) == 3) {
1414             qemu_fprintf(f, "\n");
1415         } else {
1416             qemu_fprintf(f, " ");
1417         }
1418     }
1419 
1420     if (arm_feature(env, ARM_FEATURE_M)) {
1421         uint32_t xpsr = xpsr_read(env);
1422         const char *mode;
1423         const char *ns_status = "";
1424 
1425         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1426             ns_status = env->v7m.secure ? "S " : "NS ";
1427         }
1428 
1429         if (xpsr & XPSR_EXCP) {
1430             mode = "handler";
1431         } else {
1432             if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
1433                 mode = "unpriv-thread";
1434             } else {
1435                 mode = "priv-thread";
1436             }
1437         }
1438 
1439         qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
1440                      xpsr,
1441                      xpsr & XPSR_N ? 'N' : '-',
1442                      xpsr & XPSR_Z ? 'Z' : '-',
1443                      xpsr & XPSR_C ? 'C' : '-',
1444                      xpsr & XPSR_V ? 'V' : '-',
1445                      xpsr & XPSR_T ? 'T' : 'A',
1446                      ns_status,
1447                      mode);
1448     } else {
1449         uint32_t psr = cpsr_read(env);
1450         const char *ns_status = "";
1451 
1452         if (arm_feature(env, ARM_FEATURE_EL3) &&
1453             (psr & CPSR_M) != ARM_CPU_MODE_MON) {
1454             ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
1455         }
1456 
1457         qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
1458                      psr,
1459                      psr & CPSR_N ? 'N' : '-',
1460                      psr & CPSR_Z ? 'Z' : '-',
1461                      psr & CPSR_C ? 'C' : '-',
1462                      psr & CPSR_V ? 'V' : '-',
1463                      psr & CPSR_T ? 'T' : 'A',
1464                      ns_status,
1465                      aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
1466     }
1467 
1468     if (flags & CPU_DUMP_FPU) {
1469         int numvfpregs = 0;
1470         if (cpu_isar_feature(aa32_simd_r32, cpu)) {
1471             numvfpregs = 32;
1472         } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
1473             numvfpregs = 16;
1474         }
1475         for (i = 0; i < numvfpregs; i++) {
1476             uint64_t v = *aa32_vfp_dreg(env, i);
1477             qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
1478                          i * 2, (uint32_t)v,
1479                          i * 2 + 1, (uint32_t)(v >> 32),
1480                          i, v);
1481         }
1482         qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
1483         if (cpu_isar_feature(aa32_mve, cpu)) {
1484             qemu_fprintf(f, "VPR: %08x\n", env->v7m.vpr);
1485         }
1486     }
1487 }
1488 
1489 uint64_t arm_build_mp_affinity(int idx, uint8_t clustersz)
1490 {
1491     uint32_t Aff1 = idx / clustersz;
1492     uint32_t Aff0 = idx % clustersz;
1493     return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
1494 }
1495 
1496 uint64_t arm_cpu_mp_affinity(ARMCPU *cpu)
1497 {
1498     return cpu->mp_affinity;
1499 }
1500 
1501 static void arm_cpu_initfn(Object *obj)
1502 {
1503     ARMCPU *cpu = ARM_CPU(obj);
1504 
1505     cpu->cp_regs = g_hash_table_new_full(g_direct_hash, g_direct_equal,
1506                                          NULL, g_free);
1507 
1508     QLIST_INIT(&cpu->pre_el_change_hooks);
1509     QLIST_INIT(&cpu->el_change_hooks);
1510 
1511 #ifdef CONFIG_USER_ONLY
1512 # ifdef TARGET_AARCH64
1513     /*
1514      * The linux kernel defaults to 512-bit for SVE, and 256-bit for SME.
1515      * These values were chosen to fit within the default signal frame.
1516      * See documentation for /proc/sys/abi/{sve,sme}_default_vector_length,
1517      * and our corresponding cpu property.
1518      */
1519     cpu->sve_default_vq = 4;
1520     cpu->sme_default_vq = 2;
1521 # endif
1522 #else
1523     /* Our inbound IRQ and FIQ lines */
1524     if (kvm_enabled()) {
1525         /*
1526          * VIRQ, VFIQ, NMI, VINMI are unused with KVM but we add
1527          * them to maintain the same interface as non-KVM CPUs.
1528          */
1529         qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 6);
1530     } else {
1531         qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 6);
1532     }
1533 
1534     qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
1535                        ARRAY_SIZE(cpu->gt_timer_outputs));
1536 
1537     qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
1538                              "gicv3-maintenance-interrupt", 1);
1539     qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
1540                              "pmu-interrupt", 1);
1541 #endif
1542 
1543     /* DTB consumers generally don't in fact care what the 'compatible'
1544      * string is, so always provide some string and trust that a hypothetical
1545      * picky DTB consumer will also provide a helpful error message.
1546      */
1547     cpu->dtb_compatible = "qemu,unknown";
1548     cpu->psci_version = QEMU_PSCI_VERSION_0_1; /* By default assume PSCI v0.1 */
1549     cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
1550 
1551     if (tcg_enabled() || hvf_enabled()) {
1552         /* TCG and HVF implement PSCI 1.1 */
1553         cpu->psci_version = QEMU_PSCI_VERSION_1_1;
1554     }
1555 }
1556 
1557 /*
1558  * 0 means "unset, use the default value". That default might vary depending
1559  * on the CPU type, and is set in the realize fn.
1560  */
1561 static Property arm_cpu_gt_cntfrq_property =
1562             DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz, 0);
1563 
1564 static Property arm_cpu_reset_cbar_property =
1565             DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
1566 
1567 static Property arm_cpu_reset_hivecs_property =
1568             DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
1569 
1570 #ifndef CONFIG_USER_ONLY
1571 static Property arm_cpu_has_el2_property =
1572             DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
1573 
1574 static Property arm_cpu_has_el3_property =
1575             DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
1576 #endif
1577 
1578 static Property arm_cpu_cfgend_property =
1579             DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
1580 
1581 static Property arm_cpu_has_vfp_property =
1582             DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
1583 
1584 static Property arm_cpu_has_vfp_d32_property =
1585             DEFINE_PROP_BOOL("vfp-d32", ARMCPU, has_vfp_d32, true);
1586 
1587 static Property arm_cpu_has_neon_property =
1588             DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
1589 
1590 static Property arm_cpu_has_dsp_property =
1591             DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
1592 
1593 static Property arm_cpu_has_mpu_property =
1594             DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
1595 
1596 /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
1597  * because the CPU initfn will have already set cpu->pmsav7_dregion to
1598  * the right value for that particular CPU type, and we don't want
1599  * to override that with an incorrect constant value.
1600  */
1601 static Property arm_cpu_pmsav7_dregion_property =
1602             DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
1603                                            pmsav7_dregion,
1604                                            qdev_prop_uint32, uint32_t);
1605 
1606 static bool arm_get_pmu(Object *obj, Error **errp)
1607 {
1608     ARMCPU *cpu = ARM_CPU(obj);
1609 
1610     return cpu->has_pmu;
1611 }
1612 
1613 static void arm_set_pmu(Object *obj, bool value, Error **errp)
1614 {
1615     ARMCPU *cpu = ARM_CPU(obj);
1616 
1617     if (value) {
1618         if (kvm_enabled() && !kvm_arm_pmu_supported()) {
1619             error_setg(errp, "'pmu' feature not supported by KVM on this host");
1620             return;
1621         }
1622         set_feature(&cpu->env, ARM_FEATURE_PMU);
1623     } else {
1624         unset_feature(&cpu->env, ARM_FEATURE_PMU);
1625     }
1626     cpu->has_pmu = value;
1627 }
1628 
1629 unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
1630 {
1631     /*
1632      * The exact approach to calculating guest ticks is:
1633      *
1634      *     muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz,
1635      *              NANOSECONDS_PER_SECOND);
1636      *
1637      * We don't do that. Rather we intentionally use integer division
1638      * truncation below and in the caller for the conversion of host monotonic
1639      * time to guest ticks to provide the exact inverse for the semantics of
1640      * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so
1641      * it loses precision when representing frequencies where
1642      * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to
1643      * provide an exact inverse leads to scheduling timers with negative
1644      * periods, which in turn leads to sticky behaviour in the guest.
1645      *
1646      * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor
1647      * cannot become zero.
1648      */
1649     return NANOSECONDS_PER_SECOND > cpu->gt_cntfrq_hz ?
1650       NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1;
1651 }
1652 
1653 static void arm_cpu_propagate_feature_implications(ARMCPU *cpu)
1654 {
1655     CPUARMState *env = &cpu->env;
1656     bool no_aa32 = false;
1657 
1658     /*
1659      * Some features automatically imply others: set the feature
1660      * bits explicitly for these cases.
1661      */
1662 
1663     if (arm_feature(env, ARM_FEATURE_M)) {
1664         set_feature(env, ARM_FEATURE_PMSA);
1665     }
1666 
1667     if (arm_feature(env, ARM_FEATURE_V8)) {
1668         if (arm_feature(env, ARM_FEATURE_M)) {
1669             set_feature(env, ARM_FEATURE_V7);
1670         } else {
1671             set_feature(env, ARM_FEATURE_V7VE);
1672         }
1673     }
1674 
1675     /*
1676      * There exist AArch64 cpus without AArch32 support.  When KVM
1677      * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
1678      * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
1679      * As a general principle, we also do not make ID register
1680      * consistency checks anywhere unless using TCG, because only
1681      * for TCG would a consistency-check failure be a QEMU bug.
1682      */
1683     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1684         no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
1685     }
1686 
1687     if (arm_feature(env, ARM_FEATURE_V7VE)) {
1688         /*
1689          * v7 Virtualization Extensions. In real hardware this implies
1690          * EL2 and also the presence of the Security Extensions.
1691          * For QEMU, for backwards-compatibility we implement some
1692          * CPUs or CPU configs which have no actual EL2 or EL3 but do
1693          * include the various other features that V7VE implies.
1694          * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
1695          * Security Extensions is ARM_FEATURE_EL3.
1696          */
1697         assert(!tcg_enabled() || no_aa32 ||
1698                cpu_isar_feature(aa32_arm_div, cpu));
1699         set_feature(env, ARM_FEATURE_LPAE);
1700         set_feature(env, ARM_FEATURE_V7);
1701     }
1702     if (arm_feature(env, ARM_FEATURE_V7)) {
1703         set_feature(env, ARM_FEATURE_VAPA);
1704         set_feature(env, ARM_FEATURE_THUMB2);
1705         set_feature(env, ARM_FEATURE_MPIDR);
1706         if (!arm_feature(env, ARM_FEATURE_M)) {
1707             set_feature(env, ARM_FEATURE_V6K);
1708         } else {
1709             set_feature(env, ARM_FEATURE_V6);
1710         }
1711 
1712         /*
1713          * Always define VBAR for V7 CPUs even if it doesn't exist in
1714          * non-EL3 configs. This is needed by some legacy boards.
1715          */
1716         set_feature(env, ARM_FEATURE_VBAR);
1717     }
1718     if (arm_feature(env, ARM_FEATURE_V6K)) {
1719         set_feature(env, ARM_FEATURE_V6);
1720         set_feature(env, ARM_FEATURE_MVFR);
1721     }
1722     if (arm_feature(env, ARM_FEATURE_V6)) {
1723         set_feature(env, ARM_FEATURE_V5);
1724         if (!arm_feature(env, ARM_FEATURE_M)) {
1725             assert(!tcg_enabled() || no_aa32 ||
1726                    cpu_isar_feature(aa32_jazelle, cpu));
1727             set_feature(env, ARM_FEATURE_AUXCR);
1728         }
1729     }
1730     if (arm_feature(env, ARM_FEATURE_V5)) {
1731         set_feature(env, ARM_FEATURE_V4T);
1732     }
1733     if (arm_feature(env, ARM_FEATURE_LPAE)) {
1734         set_feature(env, ARM_FEATURE_V7MP);
1735     }
1736     if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
1737         set_feature(env, ARM_FEATURE_CBAR);
1738     }
1739     if (arm_feature(env, ARM_FEATURE_THUMB2) &&
1740         !arm_feature(env, ARM_FEATURE_M)) {
1741         set_feature(env, ARM_FEATURE_THUMB_DSP);
1742     }
1743 }
1744 
1745 void arm_cpu_post_init(Object *obj)
1746 {
1747     ARMCPU *cpu = ARM_CPU(obj);
1748 
1749     /*
1750      * Some features imply others. Figure this out now, because we
1751      * are going to look at the feature bits in deciding which
1752      * properties to add.
1753      */
1754     arm_cpu_propagate_feature_implications(cpu);
1755 
1756     if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
1757         arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
1758         qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
1759     }
1760 
1761     if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
1762         qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property);
1763     }
1764 
1765     if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1766         object_property_add_uint64_ptr(obj, "rvbar",
1767                                        &cpu->rvbar_prop,
1768                                        OBJ_PROP_FLAG_READWRITE);
1769     }
1770 
1771 #ifndef CONFIG_USER_ONLY
1772     if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
1773         /* Add the has_el3 state CPU property only if EL3 is allowed.  This will
1774          * prevent "has_el3" from existing on CPUs which cannot support EL3.
1775          */
1776         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property);
1777 
1778         object_property_add_link(obj, "secure-memory",
1779                                  TYPE_MEMORY_REGION,
1780                                  (Object **)&cpu->secure_memory,
1781                                  qdev_prop_allow_set_link_before_realize,
1782                                  OBJ_PROP_LINK_STRONG);
1783     }
1784 
1785     if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
1786         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property);
1787     }
1788 #endif
1789 
1790     if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
1791         cpu->has_pmu = true;
1792         object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu);
1793     }
1794 
1795     /*
1796      * Allow user to turn off VFP and Neon support, but only for TCG --
1797      * KVM does not currently allow us to lie to the guest about its
1798      * ID/feature registers, so the guest always sees what the host has.
1799      */
1800     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1801         if (cpu_isar_feature(aa64_fp_simd, cpu)) {
1802             cpu->has_vfp = true;
1803             cpu->has_vfp_d32 = true;
1804             if (tcg_enabled() || qtest_enabled()) {
1805                 qdev_property_add_static(DEVICE(obj),
1806                                          &arm_cpu_has_vfp_property);
1807             }
1808         }
1809     } else if (cpu_isar_feature(aa32_vfp, cpu)) {
1810         cpu->has_vfp = true;
1811         if (tcg_enabled() || qtest_enabled()) {
1812             qdev_property_add_static(DEVICE(obj),
1813                                      &arm_cpu_has_vfp_property);
1814         }
1815         if (cpu_isar_feature(aa32_simd_r32, cpu)) {
1816             cpu->has_vfp_d32 = true;
1817             /*
1818              * The permitted values of the SIMDReg bits [3:0] on
1819              * Armv8-A are either 0b0000 and 0b0010. On such CPUs,
1820              * make sure that has_vfp_d32 can not be set to false.
1821              */
1822             if ((tcg_enabled() || qtest_enabled())
1823                 && !(arm_feature(&cpu->env, ARM_FEATURE_V8)
1824                      && !arm_feature(&cpu->env, ARM_FEATURE_M))) {
1825                 qdev_property_add_static(DEVICE(obj),
1826                                          &arm_cpu_has_vfp_d32_property);
1827             }
1828         }
1829     }
1830 
1831     if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) {
1832         cpu->has_neon = true;
1833         if (!kvm_enabled()) {
1834             qdev_property_add_static(DEVICE(obj), &arm_cpu_has_neon_property);
1835         }
1836     }
1837 
1838     if (arm_feature(&cpu->env, ARM_FEATURE_M) &&
1839         arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) {
1840         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property);
1841     }
1842 
1843     if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
1844         qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property);
1845         if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1846             qdev_property_add_static(DEVICE(obj),
1847                                      &arm_cpu_pmsav7_dregion_property);
1848         }
1849     }
1850 
1851     if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) {
1852         object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau,
1853                                  qdev_prop_allow_set_link_before_realize,
1854                                  OBJ_PROP_LINK_STRONG);
1855         /*
1856          * M profile: initial value of the Secure VTOR. We can't just use
1857          * a simple DEFINE_PROP_UINT32 for this because we want to permit
1858          * the property to be set after realize.
1859          */
1860         object_property_add_uint32_ptr(obj, "init-svtor",
1861                                        &cpu->init_svtor,
1862                                        OBJ_PROP_FLAG_READWRITE);
1863     }
1864     if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
1865         /*
1866          * Initial value of the NS VTOR (for cores without the Security
1867          * extension, this is the only VTOR)
1868          */
1869         object_property_add_uint32_ptr(obj, "init-nsvtor",
1870                                        &cpu->init_nsvtor,
1871                                        OBJ_PROP_FLAG_READWRITE);
1872     }
1873 
1874     /* Not DEFINE_PROP_UINT32: we want this to be settable after realize */
1875     object_property_add_uint32_ptr(obj, "psci-conduit",
1876                                    &cpu->psci_conduit,
1877                                    OBJ_PROP_FLAG_READWRITE);
1878 
1879     qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
1880 
1881     if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
1882         qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property);
1883     }
1884 
1885     if (kvm_enabled()) {
1886         kvm_arm_add_vcpu_properties(cpu);
1887     }
1888 
1889 #ifndef CONFIG_USER_ONLY
1890     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) &&
1891         cpu_isar_feature(aa64_mte, cpu)) {
1892         object_property_add_link(obj, "tag-memory",
1893                                  TYPE_MEMORY_REGION,
1894                                  (Object **)&cpu->tag_memory,
1895                                  qdev_prop_allow_set_link_before_realize,
1896                                  OBJ_PROP_LINK_STRONG);
1897 
1898         if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
1899             object_property_add_link(obj, "secure-tag-memory",
1900                                      TYPE_MEMORY_REGION,
1901                                      (Object **)&cpu->secure_tag_memory,
1902                                      qdev_prop_allow_set_link_before_realize,
1903                                      OBJ_PROP_LINK_STRONG);
1904         }
1905     }
1906 #endif
1907 }
1908 
1909 static void arm_cpu_finalizefn(Object *obj)
1910 {
1911     ARMCPU *cpu = ARM_CPU(obj);
1912     ARMELChangeHook *hook, *next;
1913 
1914     g_hash_table_destroy(cpu->cp_regs);
1915 
1916     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
1917         QLIST_REMOVE(hook, node);
1918         g_free(hook);
1919     }
1920     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
1921         QLIST_REMOVE(hook, node);
1922         g_free(hook);
1923     }
1924 #ifndef CONFIG_USER_ONLY
1925     if (cpu->pmu_timer) {
1926         timer_free(cpu->pmu_timer);
1927     }
1928     if (cpu->wfxt_timer) {
1929         timer_free(cpu->wfxt_timer);
1930     }
1931 #endif
1932 }
1933 
1934 void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
1935 {
1936     Error *local_err = NULL;
1937 
1938 #ifdef TARGET_AARCH64
1939     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1940         arm_cpu_sve_finalize(cpu, &local_err);
1941         if (local_err != NULL) {
1942             error_propagate(errp, local_err);
1943             return;
1944         }
1945 
1946         /*
1947          * FEAT_SME is not architecturally dependent on FEAT_SVE (unless
1948          * FEAT_SME_FA64 is present). However our implementation currently
1949          * assumes it, so if the user asked for sve=off then turn off SME also.
1950          * (KVM doesn't currently support SME at all.)
1951          */
1952         if (cpu_isar_feature(aa64_sme, cpu) && !cpu_isar_feature(aa64_sve, cpu)) {
1953             object_property_set_bool(OBJECT(cpu), "sme", false, &error_abort);
1954         }
1955 
1956         arm_cpu_sme_finalize(cpu, &local_err);
1957         if (local_err != NULL) {
1958             error_propagate(errp, local_err);
1959             return;
1960         }
1961 
1962         arm_cpu_pauth_finalize(cpu, &local_err);
1963         if (local_err != NULL) {
1964             error_propagate(errp, local_err);
1965             return;
1966         }
1967 
1968         arm_cpu_lpa2_finalize(cpu, &local_err);
1969         if (local_err != NULL) {
1970             error_propagate(errp, local_err);
1971             return;
1972         }
1973     }
1974 #endif
1975 
1976     if (kvm_enabled()) {
1977         kvm_arm_steal_time_finalize(cpu, &local_err);
1978         if (local_err != NULL) {
1979             error_propagate(errp, local_err);
1980             return;
1981         }
1982     }
1983 }
1984 
1985 static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
1986 {
1987     CPUState *cs = CPU(dev);
1988     ARMCPU *cpu = ARM_CPU(dev);
1989     ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
1990     CPUARMState *env = &cpu->env;
1991     Error *local_err = NULL;
1992 
1993 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
1994     /* Use pc-relative instructions in system-mode */
1995     tcg_cflags_set(cs, CF_PCREL);
1996 #endif
1997 
1998     /* If we needed to query the host kernel for the CPU features
1999      * then it's possible that might have failed in the initfn, but
2000      * this is the first point where we can report it.
2001      */
2002     if (cpu->host_cpu_probe_failed) {
2003         if (!kvm_enabled() && !hvf_enabled()) {
2004             error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF");
2005         } else {
2006             error_setg(errp, "Failed to retrieve host CPU features");
2007         }
2008         return;
2009     }
2010 
2011     if (!cpu->gt_cntfrq_hz) {
2012         /*
2013          * 0 means "the board didn't set a value, use the default". (We also
2014          * get here for the CONFIG_USER_ONLY case.)
2015          * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before
2016          * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz,
2017          * which gives a 16ns tick period.
2018          *
2019          * We will use the back-compat value:
2020          *  - for QEMU CPU types added before we standardized on 1GHz
2021          *  - for versioned machine types with a version of 9.0 or earlier
2022          */
2023         if (arm_feature(env, ARM_FEATURE_BACKCOMPAT_CNTFRQ) ||
2024             cpu->backcompat_cntfrq) {
2025             cpu->gt_cntfrq_hz = GTIMER_BACKCOMPAT_HZ;
2026         } else {
2027             cpu->gt_cntfrq_hz = GTIMER_DEFAULT_HZ;
2028         }
2029     }
2030 
2031 #ifndef CONFIG_USER_ONLY
2032     /* The NVIC and M-profile CPU are two halves of a single piece of
2033      * hardware; trying to use one without the other is a command line
2034      * error and will result in segfaults if not caught here.
2035      */
2036     if (arm_feature(env, ARM_FEATURE_M)) {
2037         if (!env->nvic) {
2038             error_setg(errp, "This board cannot be used with Cortex-M CPUs");
2039             return;
2040         }
2041     } else {
2042         if (env->nvic) {
2043             error_setg(errp, "This board can only be used with Cortex-M CPUs");
2044             return;
2045         }
2046     }
2047 
2048     if (!tcg_enabled() && !qtest_enabled()) {
2049         /*
2050          * We assume that no accelerator except TCG (and the "not really an
2051          * accelerator" qtest) can handle these features, because Arm hardware
2052          * virtualization can't virtualize them.
2053          *
2054          * Catch all the cases which might cause us to create more than one
2055          * address space for the CPU (otherwise we will assert() later in
2056          * cpu_address_space_init()).
2057          */
2058         if (arm_feature(env, ARM_FEATURE_M)) {
2059             error_setg(errp,
2060                        "Cannot enable %s when using an M-profile guest CPU",
2061                        current_accel_name());
2062             return;
2063         }
2064         if (cpu->has_el3) {
2065             error_setg(errp,
2066                        "Cannot enable %s when guest CPU has EL3 enabled",
2067                        current_accel_name());
2068             return;
2069         }
2070         if (cpu->tag_memory) {
2071             error_setg(errp,
2072                        "Cannot enable %s when guest CPUs has MTE enabled",
2073                        current_accel_name());
2074             return;
2075         }
2076     }
2077 
2078     {
2079         uint64_t scale = gt_cntfrq_period_ns(cpu);
2080 
2081         cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
2082                                                arm_gt_ptimer_cb, cpu);
2083         cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
2084                                                arm_gt_vtimer_cb, cpu);
2085         cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
2086                                               arm_gt_htimer_cb, cpu);
2087         cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
2088                                               arm_gt_stimer_cb, cpu);
2089         cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
2090                                                   arm_gt_hvtimer_cb, cpu);
2091     }
2092 #endif
2093 
2094     cpu_exec_realizefn(cs, &local_err);
2095     if (local_err != NULL) {
2096         error_propagate(errp, local_err);
2097         return;
2098     }
2099 
2100     arm_cpu_finalize_features(cpu, &local_err);
2101     if (local_err != NULL) {
2102         error_propagate(errp, local_err);
2103         return;
2104     }
2105 
2106 #ifdef CONFIG_USER_ONLY
2107     /*
2108      * User mode relies on IC IVAU instructions to catch modification of
2109      * dual-mapped code.
2110      *
2111      * Clear CTR_EL0.DIC to ensure that software that honors these flags uses
2112      * IC IVAU even if the emulated processor does not normally require it.
2113      */
2114     cpu->ctr = FIELD_DP64(cpu->ctr, CTR_EL0, DIC, 0);
2115 #endif
2116 
2117     if (arm_feature(env, ARM_FEATURE_AARCH64) &&
2118         cpu->has_vfp != cpu->has_neon) {
2119         /*
2120          * This is an architectural requirement for AArch64; AArch32 is
2121          * more flexible and permits VFP-no-Neon and Neon-no-VFP.
2122          */
2123         error_setg(errp,
2124                    "AArch64 CPUs must have both VFP and Neon or neither");
2125         return;
2126     }
2127 
2128     if (cpu->has_vfp_d32 != cpu->has_neon) {
2129         error_setg(errp, "ARM CPUs must have both VFP-D32 and Neon or neither");
2130         return;
2131     }
2132 
2133    if (!cpu->has_vfp_d32) {
2134         uint32_t u;
2135 
2136         u = cpu->isar.mvfr0;
2137         u = FIELD_DP32(u, MVFR0, SIMDREG, 1); /* 16 registers */
2138         cpu->isar.mvfr0 = u;
2139     }
2140 
2141     if (!cpu->has_vfp) {
2142         uint64_t t;
2143         uint32_t u;
2144 
2145         t = cpu->isar.id_aa64isar1;
2146         t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
2147         cpu->isar.id_aa64isar1 = t;
2148 
2149         t = cpu->isar.id_aa64pfr0;
2150         t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
2151         cpu->isar.id_aa64pfr0 = t;
2152 
2153         u = cpu->isar.id_isar6;
2154         u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
2155         u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
2156         cpu->isar.id_isar6 = u;
2157 
2158         u = cpu->isar.mvfr0;
2159         u = FIELD_DP32(u, MVFR0, FPSP, 0);
2160         u = FIELD_DP32(u, MVFR0, FPDP, 0);
2161         u = FIELD_DP32(u, MVFR0, FPDIVIDE, 0);
2162         u = FIELD_DP32(u, MVFR0, FPSQRT, 0);
2163         u = FIELD_DP32(u, MVFR0, FPROUND, 0);
2164         if (!arm_feature(env, ARM_FEATURE_M)) {
2165             u = FIELD_DP32(u, MVFR0, FPTRAP, 0);
2166             u = FIELD_DP32(u, MVFR0, FPSHVEC, 0);
2167         }
2168         cpu->isar.mvfr0 = u;
2169 
2170         u = cpu->isar.mvfr1;
2171         u = FIELD_DP32(u, MVFR1, FPFTZ, 0);
2172         u = FIELD_DP32(u, MVFR1, FPDNAN, 0);
2173         u = FIELD_DP32(u, MVFR1, FPHP, 0);
2174         if (arm_feature(env, ARM_FEATURE_M)) {
2175             u = FIELD_DP32(u, MVFR1, FP16, 0);
2176         }
2177         cpu->isar.mvfr1 = u;
2178 
2179         u = cpu->isar.mvfr2;
2180         u = FIELD_DP32(u, MVFR2, FPMISC, 0);
2181         cpu->isar.mvfr2 = u;
2182     }
2183 
2184     if (!cpu->has_neon) {
2185         uint64_t t;
2186         uint32_t u;
2187 
2188         unset_feature(env, ARM_FEATURE_NEON);
2189 
2190         t = cpu->isar.id_aa64isar0;
2191         t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0);
2192         t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0);
2193         t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0);
2194         t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 0);
2195         t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0);
2196         t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0);
2197         t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
2198         cpu->isar.id_aa64isar0 = t;
2199 
2200         t = cpu->isar.id_aa64isar1;
2201         t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
2202         t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
2203         t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
2204         cpu->isar.id_aa64isar1 = t;
2205 
2206         t = cpu->isar.id_aa64pfr0;
2207         t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
2208         cpu->isar.id_aa64pfr0 = t;
2209 
2210         u = cpu->isar.id_isar5;
2211         u = FIELD_DP32(u, ID_ISAR5, AES, 0);
2212         u = FIELD_DP32(u, ID_ISAR5, SHA1, 0);
2213         u = FIELD_DP32(u, ID_ISAR5, SHA2, 0);
2214         u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
2215         u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
2216         cpu->isar.id_isar5 = u;
2217 
2218         u = cpu->isar.id_isar6;
2219         u = FIELD_DP32(u, ID_ISAR6, DP, 0);
2220         u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
2221         u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
2222         u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
2223         cpu->isar.id_isar6 = u;
2224 
2225         if (!arm_feature(env, ARM_FEATURE_M)) {
2226             u = cpu->isar.mvfr1;
2227             u = FIELD_DP32(u, MVFR1, SIMDLS, 0);
2228             u = FIELD_DP32(u, MVFR1, SIMDINT, 0);
2229             u = FIELD_DP32(u, MVFR1, SIMDSP, 0);
2230             u = FIELD_DP32(u, MVFR1, SIMDHP, 0);
2231             cpu->isar.mvfr1 = u;
2232 
2233             u = cpu->isar.mvfr2;
2234             u = FIELD_DP32(u, MVFR2, SIMDMISC, 0);
2235             cpu->isar.mvfr2 = u;
2236         }
2237     }
2238 
2239     if (!cpu->has_neon && !cpu->has_vfp) {
2240         uint64_t t;
2241         uint32_t u;
2242 
2243         t = cpu->isar.id_aa64isar0;
2244         t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
2245         cpu->isar.id_aa64isar0 = t;
2246 
2247         t = cpu->isar.id_aa64isar1;
2248         t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
2249         cpu->isar.id_aa64isar1 = t;
2250 
2251         u = cpu->isar.mvfr0;
2252         u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
2253         cpu->isar.mvfr0 = u;
2254 
2255         /* Despite the name, this field covers both VFP and Neon */
2256         u = cpu->isar.mvfr1;
2257         u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0);
2258         cpu->isar.mvfr1 = u;
2259     }
2260 
2261     if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) {
2262         uint32_t u;
2263 
2264         unset_feature(env, ARM_FEATURE_THUMB_DSP);
2265 
2266         u = cpu->isar.id_isar1;
2267         u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
2268         cpu->isar.id_isar1 = u;
2269 
2270         u = cpu->isar.id_isar2;
2271         u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
2272         u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
2273         cpu->isar.id_isar2 = u;
2274 
2275         u = cpu->isar.id_isar3;
2276         u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
2277         u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
2278         cpu->isar.id_isar3 = u;
2279     }
2280 
2281 
2282     /*
2283      * We rely on no XScale CPU having VFP so we can use the same bits in the
2284      * TB flags field for VECSTRIDE and XSCALE_CPAR.
2285      */
2286     assert(arm_feature(env, ARM_FEATURE_AARCH64) ||
2287            !cpu_isar_feature(aa32_vfp_simd, cpu) ||
2288            !arm_feature(env, ARM_FEATURE_XSCALE));
2289 
2290 #ifndef CONFIG_USER_ONLY
2291     {
2292         int pagebits;
2293         if (arm_feature(env, ARM_FEATURE_V7) &&
2294             !arm_feature(env, ARM_FEATURE_M) &&
2295             !arm_feature(env, ARM_FEATURE_PMSA)) {
2296             /*
2297              * v7VMSA drops support for the old ARMv5 tiny pages,
2298              * so we can use 4K pages.
2299              */
2300             pagebits = 12;
2301         } else {
2302             /*
2303              * For CPUs which might have tiny 1K pages, or which have an
2304              * MPU and might have small region sizes, stick with 1K pages.
2305              */
2306             pagebits = 10;
2307         }
2308         if (!set_preferred_target_page_bits(pagebits)) {
2309             /*
2310              * This can only ever happen for hotplugging a CPU, or if
2311              * the board code incorrectly creates a CPU which it has
2312              * promised via minimum_page_size that it will not.
2313              */
2314             error_setg(errp, "This CPU requires a smaller page size "
2315                        "than the system is using");
2316             return;
2317         }
2318     }
2319 #endif
2320 
2321     /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
2322      * We don't support setting cluster ID ([16..23]) (known as Aff2
2323      * in later ARM ARM versions), or any of the higher affinity level fields,
2324      * so these bits always RAZ.
2325      */
2326     if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
2327         cpu->mp_affinity = arm_build_mp_affinity(cs->cpu_index,
2328                                                  ARM_DEFAULT_CPUS_PER_CLUSTER);
2329     }
2330 
2331     if (cpu->reset_hivecs) {
2332             cpu->reset_sctlr |= (1 << 13);
2333     }
2334 
2335     if (cpu->cfgend) {
2336         if (arm_feature(env, ARM_FEATURE_V7)) {
2337             cpu->reset_sctlr |= SCTLR_EE;
2338         } else {
2339             cpu->reset_sctlr |= SCTLR_B;
2340         }
2341     }
2342 
2343     if (!arm_feature(env, ARM_FEATURE_M) && !cpu->has_el3) {
2344         /* If the has_el3 CPU property is disabled then we need to disable the
2345          * feature.
2346          */
2347         unset_feature(env, ARM_FEATURE_EL3);
2348 
2349         /*
2350          * Disable the security extension feature bits in the processor
2351          * feature registers as well.
2352          */
2353         cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
2354         cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
2355         cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
2356                                            ID_AA64PFR0, EL3, 0);
2357 
2358         /* Disable the realm management extension, which requires EL3. */
2359         cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
2360                                            ID_AA64PFR0, RME, 0);
2361     }
2362 
2363     if (!cpu->has_el2) {
2364         unset_feature(env, ARM_FEATURE_EL2);
2365     }
2366 
2367     if (!cpu->has_pmu) {
2368         unset_feature(env, ARM_FEATURE_PMU);
2369     }
2370     if (arm_feature(env, ARM_FEATURE_PMU)) {
2371         pmu_init(cpu);
2372 
2373         if (!kvm_enabled()) {
2374             arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
2375             arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
2376         }
2377 
2378 #ifndef CONFIG_USER_ONLY
2379         cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb,
2380                 cpu);
2381 #endif
2382     } else {
2383         cpu->isar.id_aa64dfr0 =
2384             FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
2385         cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
2386         cpu->pmceid0 = 0;
2387         cpu->pmceid1 = 0;
2388     }
2389 
2390     if (!arm_feature(env, ARM_FEATURE_EL2)) {
2391         /*
2392          * Disable the hypervisor feature bits in the processor feature
2393          * registers if we don't have EL2.
2394          */
2395         cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
2396                                            ID_AA64PFR0, EL2, 0);
2397         cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1,
2398                                        ID_PFR1, VIRTUALIZATION, 0);
2399     }
2400 
2401     if (cpu_isar_feature(aa64_mte, cpu)) {
2402         /*
2403          * The architectural range of GM blocksize is 2-6, however qemu
2404          * doesn't support blocksize of 2 (see HELPER(ldgm)).
2405          */
2406         if (tcg_enabled()) {
2407             assert(cpu->gm_blocksize >= 3 && cpu->gm_blocksize <= 6);
2408         }
2409 
2410 #ifndef CONFIG_USER_ONLY
2411         /*
2412          * If we run with TCG and do not have tag-memory provided by
2413          * the machine, then reduce MTE support to instructions enabled at EL0.
2414          * This matches Cortex-A710 BROADCASTMTE input being LOW.
2415          */
2416         if (tcg_enabled() && cpu->tag_memory == NULL) {
2417             cpu->isar.id_aa64pfr1 =
2418                 FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
2419         }
2420 
2421         /*
2422          * If MTE is supported by the host, however it should not be
2423          * enabled on the guest (i.e mte=off), clear guest's MTE bits."
2424          */
2425         if (kvm_enabled() && !cpu->kvm_mte) {
2426                 FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
2427         }
2428 #endif
2429     }
2430 
2431 #ifndef CONFIG_USER_ONLY
2432     if (tcg_enabled() && cpu_isar_feature(aa64_wfxt, cpu)) {
2433         cpu->wfxt_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2434                                        arm_wfxt_timer_cb, cpu);
2435     }
2436 #endif
2437 
2438     if (tcg_enabled()) {
2439         /*
2440          * Don't report some architectural features in the ID registers
2441          * where TCG does not yet implement it (not even a minimal
2442          * stub version). This avoids guests falling over when they
2443          * try to access the non-existent system registers for them.
2444          */
2445         /* FEAT_SPE (Statistical Profiling Extension) */
2446         cpu->isar.id_aa64dfr0 =
2447             FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
2448         /* FEAT_TRBE (Trace Buffer Extension) */
2449         cpu->isar.id_aa64dfr0 =
2450             FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0);
2451         /* FEAT_TRF (Self-hosted Trace Extension) */
2452         cpu->isar.id_aa64dfr0 =
2453             FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0);
2454         cpu->isar.id_dfr0 =
2455             FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, TRACEFILT, 0);
2456         /* Trace Macrocell system register access */
2457         cpu->isar.id_aa64dfr0 =
2458             FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEVER, 0);
2459         cpu->isar.id_dfr0 =
2460             FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPTRC, 0);
2461         /* Memory mapped trace */
2462         cpu->isar.id_dfr0 =
2463             FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, MMAPTRC, 0);
2464         /* FEAT_AMU (Activity Monitors Extension) */
2465         cpu->isar.id_aa64pfr0 =
2466             FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, AMU, 0);
2467         cpu->isar.id_pfr0 =
2468             FIELD_DP32(cpu->isar.id_pfr0, ID_PFR0, AMU, 0);
2469         /* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */
2470         cpu->isar.id_aa64pfr0 =
2471             FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0);
2472     }
2473 
2474     /* MPU can be configured out of a PMSA CPU either by setting has-mpu
2475      * to false or by setting pmsav7-dregion to 0.
2476      */
2477     if (!cpu->has_mpu || cpu->pmsav7_dregion == 0) {
2478         cpu->has_mpu = false;
2479         cpu->pmsav7_dregion = 0;
2480         cpu->pmsav8r_hdregion = 0;
2481     }
2482 
2483     if (arm_feature(env, ARM_FEATURE_PMSA) &&
2484         arm_feature(env, ARM_FEATURE_V7)) {
2485         uint32_t nr = cpu->pmsav7_dregion;
2486 
2487         if (nr > 0xff) {
2488             error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
2489             return;
2490         }
2491 
2492         if (nr) {
2493             if (arm_feature(env, ARM_FEATURE_V8)) {
2494                 /* PMSAv8 */
2495                 env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
2496                 env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
2497                 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2498                     env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
2499                     env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
2500                 }
2501             } else {
2502                 env->pmsav7.drbar = g_new0(uint32_t, nr);
2503                 env->pmsav7.drsr = g_new0(uint32_t, nr);
2504                 env->pmsav7.dracr = g_new0(uint32_t, nr);
2505             }
2506         }
2507 
2508         if (cpu->pmsav8r_hdregion > 0xff) {
2509             error_setg(errp, "PMSAv8 MPU EL2 #regions invalid %" PRIu32,
2510                               cpu->pmsav8r_hdregion);
2511             return;
2512         }
2513 
2514         if (cpu->pmsav8r_hdregion) {
2515             env->pmsav8.hprbar = g_new0(uint32_t,
2516                                         cpu->pmsav8r_hdregion);
2517             env->pmsav8.hprlar = g_new0(uint32_t,
2518                                         cpu->pmsav8r_hdregion);
2519         }
2520     }
2521 
2522     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2523         uint32_t nr = cpu->sau_sregion;
2524 
2525         if (nr > 0xff) {
2526             error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
2527             return;
2528         }
2529 
2530         if (nr) {
2531             env->sau.rbar = g_new0(uint32_t, nr);
2532             env->sau.rlar = g_new0(uint32_t, nr);
2533         }
2534     }
2535 
2536     if (arm_feature(env, ARM_FEATURE_EL3)) {
2537         set_feature(env, ARM_FEATURE_VBAR);
2538     }
2539 
2540 #ifndef CONFIG_USER_ONLY
2541     if (tcg_enabled() && cpu_isar_feature(aa64_rme, cpu)) {
2542         arm_register_el_change_hook(cpu, &gt_rme_post_el_change, 0);
2543     }
2544 #endif
2545 
2546     register_cp_regs_for_features(cpu);
2547     arm_cpu_register_gdb_regs_for_features(cpu);
2548     arm_cpu_register_gdb_commands(cpu);
2549 
2550     init_cpreg_list(cpu);
2551 
2552 #ifndef CONFIG_USER_ONLY
2553     MachineState *ms = MACHINE(qdev_get_machine());
2554     unsigned int smp_cpus = ms->smp.cpus;
2555     bool has_secure = cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY);
2556 
2557     /*
2558      * We must set cs->num_ases to the final value before
2559      * the first call to cpu_address_space_init.
2560      */
2561     if (cpu->tag_memory != NULL) {
2562         cs->num_ases = 3 + has_secure;
2563     } else {
2564         cs->num_ases = 1 + has_secure;
2565     }
2566 
2567     if (has_secure) {
2568         if (!cpu->secure_memory) {
2569             cpu->secure_memory = cs->memory;
2570         }
2571         cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
2572                                cpu->secure_memory);
2573     }
2574 
2575     if (cpu->tag_memory != NULL) {
2576         cpu_address_space_init(cs, ARMASIdx_TagNS, "cpu-tag-memory",
2577                                cpu->tag_memory);
2578         if (has_secure) {
2579             cpu_address_space_init(cs, ARMASIdx_TagS, "cpu-tag-memory",
2580                                    cpu->secure_tag_memory);
2581         }
2582     }
2583 
2584     cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
2585 
2586     /* No core_count specified, default to smp_cpus. */
2587     if (cpu->core_count == -1) {
2588         cpu->core_count = smp_cpus;
2589     }
2590 #endif
2591 
2592     if (tcg_enabled()) {
2593         int dcz_blocklen = 4 << cpu->dcz_blocksize;
2594 
2595         /*
2596          * We only support DCZ blocklen that fits on one page.
2597          *
2598          * Architectually this is always true.  However TARGET_PAGE_SIZE
2599          * is variable and, for compatibility with -machine virt-2.7,
2600          * is only 1KiB, as an artifact of legacy ARMv5 subpage support.
2601          * But even then, while the largest architectural DCZ blocklen
2602          * is 2KiB, no cpu actually uses such a large blocklen.
2603          */
2604         assert(dcz_blocklen <= TARGET_PAGE_SIZE);
2605 
2606         /*
2607          * We only support DCZ blocksize >= 2*TAG_GRANULE, which is to say
2608          * both nibbles of each byte storing tag data may be written at once.
2609          * Since TAG_GRANULE is 16, this means that blocklen must be >= 32.
2610          */
2611         if (cpu_isar_feature(aa64_mte, cpu)) {
2612             assert(dcz_blocklen >= 2 * TAG_GRANULE);
2613         }
2614     }
2615 
2616     qemu_init_vcpu(cs);
2617     cpu_reset(cs);
2618 
2619     acc->parent_realize(dev, errp);
2620 }
2621 
2622 static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
2623 {
2624     ObjectClass *oc;
2625     char *typename;
2626     char **cpuname;
2627     const char *cpunamestr;
2628 
2629     cpuname = g_strsplit(cpu_model, ",", 1);
2630     cpunamestr = cpuname[0];
2631 #ifdef CONFIG_USER_ONLY
2632     /* For backwards compatibility usermode emulation allows "-cpu any",
2633      * which has the same semantics as "-cpu max".
2634      */
2635     if (!strcmp(cpunamestr, "any")) {
2636         cpunamestr = "max";
2637     }
2638 #endif
2639     typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
2640     oc = object_class_by_name(typename);
2641     g_strfreev(cpuname);
2642     g_free(typename);
2643 
2644     return oc;
2645 }
2646 
2647 static Property arm_cpu_properties[] = {
2648     DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
2649     DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
2650                         mp_affinity, ARM64_AFFINITY_INVALID),
2651     DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
2652     DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
2653     /* True to default to the backward-compat old CNTFRQ rather than 1Ghz */
2654     DEFINE_PROP_BOOL("backcompat-cntfrq", ARMCPU, backcompat_cntfrq, false),
2655     DEFINE_PROP_END_OF_LIST()
2656 };
2657 
2658 static const gchar *arm_gdb_arch_name(CPUState *cs)
2659 {
2660     ARMCPU *cpu = ARM_CPU(cs);
2661     CPUARMState *env = &cpu->env;
2662 
2663     if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2664         return "iwmmxt";
2665     }
2666     return "arm";
2667 }
2668 
2669 #ifndef CONFIG_USER_ONLY
2670 #include "hw/core/sysemu-cpu-ops.h"
2671 
2672 static const struct SysemuCPUOps arm_sysemu_ops = {
2673     .get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug,
2674     .asidx_from_attrs = arm_asidx_from_attrs,
2675     .write_elf32_note = arm_cpu_write_elf32_note,
2676     .write_elf64_note = arm_cpu_write_elf64_note,
2677     .virtio_is_big_endian = arm_cpu_virtio_is_big_endian,
2678     .legacy_vmsd = &vmstate_arm_cpu,
2679 };
2680 #endif
2681 
2682 #ifdef CONFIG_TCG
2683 static const TCGCPUOps arm_tcg_ops = {
2684     .initialize = arm_translate_init,
2685     .synchronize_from_tb = arm_cpu_synchronize_from_tb,
2686     .debug_excp_handler = arm_debug_excp_handler,
2687     .restore_state_to_opc = arm_restore_state_to_opc,
2688 
2689 #ifdef CONFIG_USER_ONLY
2690     .record_sigsegv = arm_cpu_record_sigsegv,
2691     .record_sigbus = arm_cpu_record_sigbus,
2692 #else
2693     .tlb_fill_align = arm_cpu_tlb_fill_align,
2694     .cpu_exec_interrupt = arm_cpu_exec_interrupt,
2695     .cpu_exec_halt = arm_cpu_exec_halt,
2696     .do_interrupt = arm_cpu_do_interrupt,
2697     .do_transaction_failed = arm_cpu_do_transaction_failed,
2698     .do_unaligned_access = arm_cpu_do_unaligned_access,
2699     .adjust_watchpoint_address = arm_adjust_watchpoint_address,
2700     .debug_check_watchpoint = arm_debug_check_watchpoint,
2701     .debug_check_breakpoint = arm_debug_check_breakpoint,
2702 #endif /* !CONFIG_USER_ONLY */
2703 };
2704 #endif /* CONFIG_TCG */
2705 
2706 static void arm_cpu_class_init(ObjectClass *oc, void *data)
2707 {
2708     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
2709     CPUClass *cc = CPU_CLASS(acc);
2710     DeviceClass *dc = DEVICE_CLASS(oc);
2711     ResettableClass *rc = RESETTABLE_CLASS(oc);
2712 
2713     device_class_set_parent_realize(dc, arm_cpu_realizefn,
2714                                     &acc->parent_realize);
2715 
2716     device_class_set_props(dc, arm_cpu_properties);
2717 
2718     resettable_class_set_parent_phases(rc, NULL, arm_cpu_reset_hold, NULL,
2719                                        &acc->parent_phases);
2720 
2721     cc->class_by_name = arm_cpu_class_by_name;
2722     cc->has_work = arm_cpu_has_work;
2723     cc->mmu_index = arm_cpu_mmu_index;
2724     cc->dump_state = arm_cpu_dump_state;
2725     cc->set_pc = arm_cpu_set_pc;
2726     cc->get_pc = arm_cpu_get_pc;
2727     cc->gdb_read_register = arm_cpu_gdb_read_register;
2728     cc->gdb_write_register = arm_cpu_gdb_write_register;
2729 #ifndef CONFIG_USER_ONLY
2730     cc->sysemu_ops = &arm_sysemu_ops;
2731 #endif
2732     cc->gdb_arch_name = arm_gdb_arch_name;
2733     cc->gdb_stop_before_watchpoint = true;
2734     cc->disas_set_info = arm_disas_set_info;
2735 
2736 #ifdef CONFIG_TCG
2737     cc->tcg_ops = &arm_tcg_ops;
2738 #endif /* CONFIG_TCG */
2739 }
2740 
2741 static void arm_cpu_instance_init(Object *obj)
2742 {
2743     ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
2744 
2745     acc->info->initfn(obj);
2746     arm_cpu_post_init(obj);
2747 }
2748 
2749 static void cpu_register_class_init(ObjectClass *oc, void *data)
2750 {
2751     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
2752     CPUClass *cc = CPU_CLASS(acc);
2753 
2754     acc->info = data;
2755     cc->gdb_core_xml_file = "arm-core.xml";
2756 }
2757 
2758 void arm_cpu_register(const ARMCPUInfo *info)
2759 {
2760     TypeInfo type_info = {
2761         .parent = TYPE_ARM_CPU,
2762         .instance_init = arm_cpu_instance_init,
2763         .class_init = info->class_init ?: cpu_register_class_init,
2764         .class_data = (void *)info,
2765     };
2766 
2767     type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
2768     type_register_static(&type_info);
2769     g_free((void *)type_info.name);
2770 }
2771 
2772 static const TypeInfo arm_cpu_type_info = {
2773     .name = TYPE_ARM_CPU,
2774     .parent = TYPE_CPU,
2775     .instance_size = sizeof(ARMCPU),
2776     .instance_align = __alignof__(ARMCPU),
2777     .instance_init = arm_cpu_initfn,
2778     .instance_finalize = arm_cpu_finalizefn,
2779     .abstract = true,
2780     .class_size = sizeof(ARMCPUClass),
2781     .class_init = arm_cpu_class_init,
2782 };
2783 
2784 static void arm_cpu_register_types(void)
2785 {
2786     type_register_static(&arm_cpu_type_info);
2787 }
2788 
2789 type_init(arm_cpu_register_types)
2790