xref: /qemu/target/arm/helper.c (revision d2c12785be06da708fe1c8e4e81d7134f4f3c56a)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "trace.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "cpu-features.h"
15 #include "exec/helper-proto.h"
16 #include "exec/page-protection.h"
17 #include "qemu/main-loop.h"
18 #include "qemu/timer.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include "exec/translation-block.h"
24 #include <zlib.h> /* for crc32 */
25 #include "hw/irq.h"
26 #include "system/cpu-timers.h"
27 #include "system/kvm.h"
28 #include "system/tcg.h"
29 #include "qapi/error.h"
30 #include "qemu/guest-random.h"
31 #ifdef CONFIG_TCG
32 #include "semihosting/common-semi.h"
33 #endif
34 #include "cpregs.h"
35 #include "target/arm/gtimer.h"
36 
37 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
38 
39 static void switch_mode(CPUARMState *env, int mode);
40 
41 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
42 {
43     assert(ri->fieldoffset);
44     if (cpreg_field_is_64bit(ri)) {
45         return CPREG_FIELD64(env, ri);
46     } else {
47         return CPREG_FIELD32(env, ri);
48     }
49 }
50 
51 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
52 {
53     assert(ri->fieldoffset);
54     if (cpreg_field_is_64bit(ri)) {
55         CPREG_FIELD64(env, ri) = value;
56     } else {
57         CPREG_FIELD32(env, ri) = value;
58     }
59 }
60 
61 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
62 {
63     return (char *)env + ri->fieldoffset;
64 }
65 
66 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
67 {
68     /* Raw read of a coprocessor register (as needed for migration, etc). */
69     if (ri->type & ARM_CP_CONST) {
70         return ri->resetvalue;
71     } else if (ri->raw_readfn) {
72         return ri->raw_readfn(env, ri);
73     } else if (ri->readfn) {
74         return ri->readfn(env, ri);
75     } else {
76         return raw_read(env, ri);
77     }
78 }
79 
80 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
81                              uint64_t v)
82 {
83     /*
84      * Raw write of a coprocessor register (as needed for migration, etc).
85      * Note that constant registers are treated as write-ignored; the
86      * caller should check for success by whether a readback gives the
87      * value written.
88      */
89     if (ri->type & ARM_CP_CONST) {
90         return;
91     } else if (ri->raw_writefn) {
92         ri->raw_writefn(env, ri, v);
93     } else if (ri->writefn) {
94         ri->writefn(env, ri, v);
95     } else {
96         raw_write(env, ri, v);
97     }
98 }
99 
100 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
101 {
102    /*
103     * Return true if the regdef would cause an assertion if you called
104     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
105     * program bug for it not to have the NO_RAW flag).
106     * NB that returning false here doesn't necessarily mean that calling
107     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
108     * read/write access functions which are safe for raw use" from "has
109     * read/write access functions which have side effects but has forgotten
110     * to provide raw access functions".
111     * The tests here line up with the conditions in read/write_raw_cp_reg()
112     * and assertions in raw_read()/raw_write().
113     */
114     if ((ri->type & ARM_CP_CONST) ||
115         ri->fieldoffset ||
116         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
117         return false;
118     }
119     return true;
120 }
121 
122 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
123 {
124     /* Write the coprocessor state from cpu->env to the (index,value) list. */
125     int i;
126     bool ok = true;
127 
128     for (i = 0; i < cpu->cpreg_array_len; i++) {
129         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
130         const ARMCPRegInfo *ri;
131         uint64_t newval;
132 
133         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
134         if (!ri) {
135             ok = false;
136             continue;
137         }
138         if (ri->type & ARM_CP_NO_RAW) {
139             continue;
140         }
141 
142         newval = read_raw_cp_reg(&cpu->env, ri);
143         if (kvm_sync) {
144             /*
145              * Only sync if the previous list->cpustate sync succeeded.
146              * Rather than tracking the success/failure state for every
147              * item in the list, we just recheck "does the raw write we must
148              * have made in write_list_to_cpustate() read back OK" here.
149              */
150             uint64_t oldval = cpu->cpreg_values[i];
151 
152             if (oldval == newval) {
153                 continue;
154             }
155 
156             write_raw_cp_reg(&cpu->env, ri, oldval);
157             if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
158                 continue;
159             }
160 
161             write_raw_cp_reg(&cpu->env, ri, newval);
162         }
163         cpu->cpreg_values[i] = newval;
164     }
165     return ok;
166 }
167 
168 bool write_list_to_cpustate(ARMCPU *cpu)
169 {
170     int i;
171     bool ok = true;
172 
173     for (i = 0; i < cpu->cpreg_array_len; i++) {
174         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
175         uint64_t v = cpu->cpreg_values[i];
176         const ARMCPRegInfo *ri;
177 
178         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
179         if (!ri) {
180             ok = false;
181             continue;
182         }
183         if (ri->type & ARM_CP_NO_RAW) {
184             continue;
185         }
186         /*
187          * Write value and confirm it reads back as written
188          * (to catch read-only registers and partially read-only
189          * registers where the incoming migration value doesn't match)
190          */
191         write_raw_cp_reg(&cpu->env, ri, v);
192         if (read_raw_cp_reg(&cpu->env, ri) != v) {
193             ok = false;
194         }
195     }
196     return ok;
197 }
198 
199 static void add_cpreg_to_list(gpointer key, gpointer opaque)
200 {
201     ARMCPU *cpu = opaque;
202     uint32_t regidx = (uintptr_t)key;
203     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
204 
205     if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
206         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
207         /* The value array need not be initialized at this point */
208         cpu->cpreg_array_len++;
209     }
210 }
211 
212 static void count_cpreg(gpointer key, gpointer opaque)
213 {
214     ARMCPU *cpu = opaque;
215     const ARMCPRegInfo *ri;
216 
217     ri = g_hash_table_lookup(cpu->cp_regs, key);
218 
219     if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
220         cpu->cpreg_array_len++;
221     }
222 }
223 
224 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
225 {
226     uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
227     uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
228 
229     if (aidx > bidx) {
230         return 1;
231     }
232     if (aidx < bidx) {
233         return -1;
234     }
235     return 0;
236 }
237 
238 void init_cpreg_list(ARMCPU *cpu)
239 {
240     /*
241      * Initialise the cpreg_tuples[] array based on the cp_regs hash.
242      * Note that we require cpreg_tuples[] to be sorted by key ID.
243      */
244     GList *keys;
245     int arraylen;
246 
247     keys = g_hash_table_get_keys(cpu->cp_regs);
248     keys = g_list_sort(keys, cpreg_key_compare);
249 
250     cpu->cpreg_array_len = 0;
251 
252     g_list_foreach(keys, count_cpreg, cpu);
253 
254     arraylen = cpu->cpreg_array_len;
255     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
256     cpu->cpreg_values = g_new(uint64_t, arraylen);
257     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
258     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
259     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
260     cpu->cpreg_array_len = 0;
261 
262     g_list_foreach(keys, add_cpreg_to_list, cpu);
263 
264     assert(cpu->cpreg_array_len == arraylen);
265 
266     g_list_free(keys);
267 }
268 
269 static bool arm_pan_enabled(CPUARMState *env)
270 {
271     if (is_a64(env)) {
272         if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
273             return false;
274         }
275         return env->pstate & PSTATE_PAN;
276     } else {
277         return env->uncached_cpsr & CPSR_PAN;
278     }
279 }
280 
281 /*
282  * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
283  */
284 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
285                                         const ARMCPRegInfo *ri,
286                                         bool isread)
287 {
288     if (!is_a64(env) && arm_current_el(env) == 3 &&
289         arm_is_secure_below_el3(env)) {
290         return CP_ACCESS_TRAP_UNCATEGORIZED;
291     }
292     return CP_ACCESS_OK;
293 }
294 
295 /*
296  * Some secure-only AArch32 registers trap to EL3 if used from
297  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
298  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
299  * We assume that the .access field is set to PL1_RW.
300  */
301 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
302                                             const ARMCPRegInfo *ri,
303                                             bool isread)
304 {
305     if (arm_current_el(env) == 3) {
306         return CP_ACCESS_OK;
307     }
308     if (arm_is_secure_below_el3(env)) {
309         if (env->cp15.scr_el3 & SCR_EEL2) {
310             return CP_ACCESS_TRAP_EL2;
311         }
312         return CP_ACCESS_TRAP_EL3;
313     }
314     /* This will be EL1 NS and EL2 NS, which just UNDEF */
315     return CP_ACCESS_TRAP_UNCATEGORIZED;
316 }
317 
318 /*
319  * Check for traps to performance monitor registers, which are controlled
320  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
321  */
322 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
323                                  bool isread)
324 {
325     int el = arm_current_el(env);
326     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
327 
328     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
329         return CP_ACCESS_TRAP_EL2;
330     }
331     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
332         return CP_ACCESS_TRAP_EL3;
333     }
334     return CP_ACCESS_OK;
335 }
336 
337 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
338 CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
339                                bool isread)
340 {
341     if (arm_current_el(env) == 1) {
342         uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
343         if (arm_hcr_el2_eff(env) & trap) {
344             return CP_ACCESS_TRAP_EL2;
345         }
346     }
347     return CP_ACCESS_OK;
348 }
349 
350 /* Check for traps from EL1 due to HCR_EL2.TSW.  */
351 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
352                                  bool isread)
353 {
354     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
355         return CP_ACCESS_TRAP_EL2;
356     }
357     return CP_ACCESS_OK;
358 }
359 
360 /* Check for traps from EL1 due to HCR_EL2.TACR.  */
361 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
362                                   bool isread)
363 {
364     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
365         return CP_ACCESS_TRAP_EL2;
366     }
367     return CP_ACCESS_OK;
368 }
369 
370 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
371 {
372     ARMCPU *cpu = env_archcpu(env);
373 
374     raw_write(env, ri, value);
375     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
376 }
377 
378 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
379 {
380     ARMCPU *cpu = env_archcpu(env);
381 
382     if (raw_read(env, ri) != value) {
383         /*
384          * Unlike real hardware the qemu TLB uses virtual addresses,
385          * not modified virtual addresses, so this causes a TLB flush.
386          */
387         tlb_flush(CPU(cpu));
388         raw_write(env, ri, value);
389     }
390 }
391 
392 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
393                              uint64_t value)
394 {
395     ARMCPU *cpu = env_archcpu(env);
396 
397     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
398         && !extended_addresses_enabled(env)) {
399         /*
400          * For VMSA (when not using the LPAE long descriptor page table
401          * format) this register includes the ASID, so do a TLB flush.
402          * For PMSA it is purely a process ID and no action is needed.
403          */
404         tlb_flush(CPU(cpu));
405     }
406     raw_write(env, ri, value);
407 }
408 
409 int alle1_tlbmask(CPUARMState *env)
410 {
411     /*
412      * Note that the 'ALL' scope must invalidate both stage 1 and
413      * stage 2 translations, whereas most other scopes only invalidate
414      * stage 1 translations.
415      *
416      * For AArch32 this is only used for TLBIALLNSNH and VTTBR
417      * writes, so only needs to apply to NS PL1&0, not S PL1&0.
418      */
419     return (ARMMMUIdxBit_E10_1 |
420             ARMMMUIdxBit_E10_1_PAN |
421             ARMMMUIdxBit_E10_0 |
422             ARMMMUIdxBit_Stage2 |
423             ARMMMUIdxBit_Stage2_S);
424 }
425 
426 static const ARMCPRegInfo cp_reginfo[] = {
427     /*
428      * Define the secure and non-secure FCSE identifier CP registers
429      * separately because there is no secure bank in V8 (no _EL3).  This allows
430      * the secure register to be properly reset and migrated. There is also no
431      * v8 EL1 version of the register so the non-secure instance stands alone.
432      */
433     { .name = "FCSEIDR",
434       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
435       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
436       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
437       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
438     { .name = "FCSEIDR_S",
439       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
440       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
441       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
442       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
443     /*
444      * Define the secure and non-secure context identifier CP registers
445      * separately because there is no secure bank in V8 (no _EL3).  This allows
446      * the secure register to be properly reset and migrated.  In the
447      * non-secure case, the 32-bit register will have reset and migration
448      * disabled during registration as it is handled by the 64-bit instance.
449      */
450     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
451       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
452       .access = PL1_RW, .accessfn = access_tvm_trvm,
453       .fgt = FGT_CONTEXTIDR_EL1,
454       .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
455       .secure = ARM_CP_SECSTATE_NS,
456       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
457       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
458     { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
459       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
460       .access = PL1_RW, .accessfn = access_tvm_trvm,
461       .secure = ARM_CP_SECSTATE_S,
462       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
463       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
464 };
465 
466 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
467     /*
468      * NB: Some of these registers exist in v8 but with more precise
469      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
470      */
471     /* MMU Domain access control / MPU write buffer control */
472     { .name = "DACR",
473       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
474       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
475       .writefn = dacr_write, .raw_writefn = raw_write,
476       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
477                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
478     /*
479      * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
480      * For v6 and v5, these mappings are overly broad.
481      */
482     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
483       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
484     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
485       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
486     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
487       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
488     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
489       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
490     /* Cache maintenance ops; some of this space may be overridden later. */
491     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
492       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
493       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
494 };
495 
496 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
497     /*
498      * Not all pre-v6 cores implemented this WFI, so this is slightly
499      * over-broad.
500      */
501     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
502       .access = PL1_W, .type = ARM_CP_WFI },
503 };
504 
505 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
506     /*
507      * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
508      * is UNPREDICTABLE; we choose to NOP as most implementations do).
509      */
510     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
511       .access = PL1_W, .type = ARM_CP_WFI },
512     /*
513      * L1 cache lockdown. Not architectural in v6 and earlier but in practice
514      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
515      * OMAPCP will override this space.
516      */
517     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
518       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
519       .resetvalue = 0 },
520     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
521       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
522       .resetvalue = 0 },
523     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
524     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
525       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
526       .resetvalue = 0 },
527     /*
528      * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
529      * implementing it as RAZ means the "debug architecture version" bits
530      * will read as a reserved value, which should cause Linux to not try
531      * to use the debug hardware.
532      */
533     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
534       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
535     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
536       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
537     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
538       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
539 };
540 
541 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
542                         uint64_t value)
543 {
544     uint32_t mask = 0;
545 
546     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
547     if (!arm_feature(env, ARM_FEATURE_V8)) {
548         /*
549          * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
550          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
551          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
552          */
553         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
554             /* VFP coprocessor: cp10 & cp11 [23:20] */
555             mask |= R_CPACR_ASEDIS_MASK |
556                     R_CPACR_D32DIS_MASK |
557                     R_CPACR_CP11_MASK |
558                     R_CPACR_CP10_MASK;
559 
560             if (!arm_feature(env, ARM_FEATURE_NEON)) {
561                 /* ASEDIS [31] bit is RAO/WI */
562                 value |= R_CPACR_ASEDIS_MASK;
563             }
564 
565             /*
566              * VFPv3 and upwards with NEON implement 32 double precision
567              * registers (D0-D31).
568              */
569             if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
570                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
571                 value |= R_CPACR_D32DIS_MASK;
572             }
573         }
574         value &= mask;
575     }
576 
577     /*
578      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
579      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
580      */
581     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
582         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
583         mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
584         value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
585     }
586 
587     env->cp15.cpacr_el1 = value;
588 }
589 
590 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
591 {
592     /*
593      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
594      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
595      */
596     uint64_t value = env->cp15.cpacr_el1;
597 
598     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
599         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
600         value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
601     }
602     return value;
603 }
604 
605 
606 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
607 {
608     /*
609      * Call cpacr_write() so that we reset with the correct RAO bits set
610      * for our CPU features.
611      */
612     cpacr_write(env, ri, 0);
613 }
614 
615 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
616                                    bool isread)
617 {
618     if (arm_feature(env, ARM_FEATURE_V8)) {
619         /* Check if CPACR accesses are to be trapped to EL2 */
620         if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
621             FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
622             return CP_ACCESS_TRAP_EL2;
623         /* Check if CPACR accesses are to be trapped to EL3 */
624         } else if (arm_current_el(env) < 3 &&
625                    FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
626             return CP_ACCESS_TRAP_EL3;
627         }
628     }
629 
630     return CP_ACCESS_OK;
631 }
632 
633 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
634                                   bool isread)
635 {
636     /* Check if CPTR accesses are set to trap to EL3 */
637     if (arm_current_el(env) == 2 &&
638         FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
639         return CP_ACCESS_TRAP_EL3;
640     }
641 
642     return CP_ACCESS_OK;
643 }
644 
645 static const ARMCPRegInfo v6_cp_reginfo[] = {
646     /* prefetch by MVA in v6, NOP in v7 */
647     { .name = "MVA_prefetch",
648       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
649       .access = PL1_W, .type = ARM_CP_NOP },
650     /*
651      * We need to break the TB after ISB to execute self-modifying code
652      * correctly and also to take any pending interrupts immediately.
653      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
654      */
655     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
656       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
657     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
658       .access = PL0_W, .type = ARM_CP_NOP },
659     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
660       .access = PL0_W, .type = ARM_CP_NOP },
661     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
662       .access = PL1_RW, .accessfn = access_tvm_trvm,
663       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
664                              offsetof(CPUARMState, cp15.ifar_ns) },
665       .resetvalue = 0, },
666     /*
667      * Watchpoint Fault Address Register : should actually only be present
668      * for 1136, 1176, 11MPCore.
669      */
670     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
671       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
672     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
673       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
674       .fgt = FGT_CPACR_EL1,
675       .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
676       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
677       .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
678 };
679 
680 typedef struct pm_event {
681     uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
682     /* If the event is supported on this CPU (used to generate PMCEID[01]) */
683     bool (*supported)(CPUARMState *);
684     /*
685      * Retrieve the current count of the underlying event. The programmed
686      * counters hold a difference from the return value from this function
687      */
688     uint64_t (*get_count)(CPUARMState *);
689     /*
690      * Return how many nanoseconds it will take (at a minimum) for count events
691      * to occur. A negative value indicates the counter will never overflow, or
692      * that the counter has otherwise arranged for the overflow bit to be set
693      * and the PMU interrupt to be raised on overflow.
694      */
695     int64_t (*ns_per_count)(uint64_t);
696 } pm_event;
697 
698 static bool event_always_supported(CPUARMState *env)
699 {
700     return true;
701 }
702 
703 static uint64_t swinc_get_count(CPUARMState *env)
704 {
705     /*
706      * SW_INCR events are written directly to the pmevcntr's by writes to
707      * PMSWINC, so there is no underlying count maintained by the PMU itself
708      */
709     return 0;
710 }
711 
712 static int64_t swinc_ns_per(uint64_t ignored)
713 {
714     return -1;
715 }
716 
717 /*
718  * Return the underlying cycle count for the PMU cycle counters. If we're in
719  * usermode, simply return 0.
720  */
721 static uint64_t cycles_get_count(CPUARMState *env)
722 {
723 #ifndef CONFIG_USER_ONLY
724     return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
725                    ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
726 #else
727     return cpu_get_host_ticks();
728 #endif
729 }
730 
731 #ifndef CONFIG_USER_ONLY
732 static int64_t cycles_ns_per(uint64_t cycles)
733 {
734     return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
735 }
736 
737 static bool instructions_supported(CPUARMState *env)
738 {
739     /* Precise instruction counting */
740     return icount_enabled() == ICOUNT_PRECISE;
741 }
742 
743 static uint64_t instructions_get_count(CPUARMState *env)
744 {
745     assert(icount_enabled() == ICOUNT_PRECISE);
746     return (uint64_t)icount_get_raw();
747 }
748 
749 static int64_t instructions_ns_per(uint64_t icount)
750 {
751     assert(icount_enabled() == ICOUNT_PRECISE);
752     return icount_to_ns((int64_t)icount);
753 }
754 #endif
755 
756 static bool pmuv3p1_events_supported(CPUARMState *env)
757 {
758     /* For events which are supported in any v8.1 PMU */
759     return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
760 }
761 
762 static bool pmuv3p4_events_supported(CPUARMState *env)
763 {
764     /* For events which are supported in any v8.1 PMU */
765     return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
766 }
767 
768 static uint64_t zero_event_get_count(CPUARMState *env)
769 {
770     /* For events which on QEMU never fire, so their count is always zero */
771     return 0;
772 }
773 
774 static int64_t zero_event_ns_per(uint64_t cycles)
775 {
776     /* An event which never fires can never overflow */
777     return -1;
778 }
779 
780 static const pm_event pm_events[] = {
781     { .number = 0x000, /* SW_INCR */
782       .supported = event_always_supported,
783       .get_count = swinc_get_count,
784       .ns_per_count = swinc_ns_per,
785     },
786 #ifndef CONFIG_USER_ONLY
787     { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
788       .supported = instructions_supported,
789       .get_count = instructions_get_count,
790       .ns_per_count = instructions_ns_per,
791     },
792     { .number = 0x011, /* CPU_CYCLES, Cycle */
793       .supported = event_always_supported,
794       .get_count = cycles_get_count,
795       .ns_per_count = cycles_ns_per,
796     },
797 #endif
798     { .number = 0x023, /* STALL_FRONTEND */
799       .supported = pmuv3p1_events_supported,
800       .get_count = zero_event_get_count,
801       .ns_per_count = zero_event_ns_per,
802     },
803     { .number = 0x024, /* STALL_BACKEND */
804       .supported = pmuv3p1_events_supported,
805       .get_count = zero_event_get_count,
806       .ns_per_count = zero_event_ns_per,
807     },
808     { .number = 0x03c, /* STALL */
809       .supported = pmuv3p4_events_supported,
810       .get_count = zero_event_get_count,
811       .ns_per_count = zero_event_ns_per,
812     },
813 };
814 
815 /*
816  * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
817  * events (i.e. the statistical profiling extension), this implementation
818  * should first be updated to something sparse instead of the current
819  * supported_event_map[] array.
820  */
821 #define MAX_EVENT_ID 0x3c
822 #define UNSUPPORTED_EVENT UINT16_MAX
823 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
824 
825 /*
826  * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
827  * of ARM event numbers to indices in our pm_events array.
828  *
829  * Note: Events in the 0x40XX range are not currently supported.
830  */
831 void pmu_init(ARMCPU *cpu)
832 {
833     unsigned int i;
834 
835     /*
836      * Empty supported_event_map and cpu->pmceid[01] before adding supported
837      * events to them
838      */
839     for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
840         supported_event_map[i] = UNSUPPORTED_EVENT;
841     }
842     cpu->pmceid0 = 0;
843     cpu->pmceid1 = 0;
844 
845     for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
846         const pm_event *cnt = &pm_events[i];
847         assert(cnt->number <= MAX_EVENT_ID);
848         /* We do not currently support events in the 0x40xx range */
849         assert(cnt->number <= 0x3f);
850 
851         if (cnt->supported(&cpu->env)) {
852             supported_event_map[cnt->number] = i;
853             uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
854             if (cnt->number & 0x20) {
855                 cpu->pmceid1 |= event_mask;
856             } else {
857                 cpu->pmceid0 |= event_mask;
858             }
859         }
860     }
861 }
862 
863 /*
864  * Check at runtime whether a PMU event is supported for the current machine
865  */
866 static bool event_supported(uint16_t number)
867 {
868     if (number > MAX_EVENT_ID) {
869         return false;
870     }
871     return supported_event_map[number] != UNSUPPORTED_EVENT;
872 }
873 
874 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
875                                    bool isread)
876 {
877     /*
878      * Performance monitor registers user accessibility is controlled
879      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
880      * trapping to EL2 or EL3 for other accesses.
881      */
882     int el = arm_current_el(env);
883     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
884 
885     if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
886         return CP_ACCESS_TRAP;
887     }
888     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
889         return CP_ACCESS_TRAP_EL2;
890     }
891     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
892         return CP_ACCESS_TRAP_EL3;
893     }
894 
895     return CP_ACCESS_OK;
896 }
897 
898 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
899                                            const ARMCPRegInfo *ri,
900                                            bool isread)
901 {
902     /* ER: event counter read trap control */
903     if (arm_feature(env, ARM_FEATURE_V8)
904         && arm_current_el(env) == 0
905         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
906         && isread) {
907         return CP_ACCESS_OK;
908     }
909 
910     return pmreg_access(env, ri, isread);
911 }
912 
913 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
914                                          const ARMCPRegInfo *ri,
915                                          bool isread)
916 {
917     /* SW: software increment write trap control */
918     if (arm_feature(env, ARM_FEATURE_V8)
919         && arm_current_el(env) == 0
920         && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
921         && !isread) {
922         return CP_ACCESS_OK;
923     }
924 
925     return pmreg_access(env, ri, isread);
926 }
927 
928 static CPAccessResult pmreg_access_selr(CPUARMState *env,
929                                         const ARMCPRegInfo *ri,
930                                         bool isread)
931 {
932     /* ER: event counter read trap control */
933     if (arm_feature(env, ARM_FEATURE_V8)
934         && arm_current_el(env) == 0
935         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
936         return CP_ACCESS_OK;
937     }
938 
939     return pmreg_access(env, ri, isread);
940 }
941 
942 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
943                                          const ARMCPRegInfo *ri,
944                                          bool isread)
945 {
946     /* CR: cycle counter read trap control */
947     if (arm_feature(env, ARM_FEATURE_V8)
948         && arm_current_el(env) == 0
949         && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
950         && isread) {
951         return CP_ACCESS_OK;
952     }
953 
954     return pmreg_access(env, ri, isread);
955 }
956 
957 /*
958  * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
959  * We use these to decide whether we need to wrap a write to MDCR_EL2
960  * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
961  */
962 #define MDCR_EL2_PMU_ENABLE_BITS \
963     (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
964 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
965 
966 /*
967  * Returns true if the counter (pass 31 for PMCCNTR) should count events using
968  * the current EL, security state, and register configuration.
969  */
970 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
971 {
972     uint64_t filter;
973     bool e, p, u, nsk, nsu, nsh, m;
974     bool enabled, prohibited = false, filtered;
975     bool secure = arm_is_secure(env);
976     int el = arm_current_el(env);
977     uint64_t mdcr_el2;
978     uint8_t hpmn;
979 
980     /*
981      * We might be called for M-profile cores where MDCR_EL2 doesn't
982      * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
983      * must be before we read that value.
984      */
985     if (!arm_feature(env, ARM_FEATURE_PMU)) {
986         return false;
987     }
988 
989     mdcr_el2 = arm_mdcr_el2_eff(env);
990     hpmn = mdcr_el2 & MDCR_HPMN;
991 
992     if (!arm_feature(env, ARM_FEATURE_EL2) ||
993             (counter < hpmn || counter == 31)) {
994         e = env->cp15.c9_pmcr & PMCRE;
995     } else {
996         e = mdcr_el2 & MDCR_HPME;
997     }
998     enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
999 
1000     /* Is event counting prohibited? */
1001     if (el == 2 && (counter < hpmn || counter == 31)) {
1002         prohibited = mdcr_el2 & MDCR_HPMD;
1003     }
1004     if (secure) {
1005         prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
1006     }
1007 
1008     if (counter == 31) {
1009         /*
1010          * The cycle counter defaults to running. PMCR.DP says "disable
1011          * the cycle counter when event counting is prohibited".
1012          * Some MDCR bits disable the cycle counter specifically.
1013          */
1014         prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
1015         if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1016             if (secure) {
1017                 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
1018             }
1019             if (el == 2) {
1020                 prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
1021             }
1022         }
1023     }
1024 
1025     if (counter == 31) {
1026         filter = env->cp15.pmccfiltr_el0;
1027     } else {
1028         filter = env->cp15.c14_pmevtyper[counter];
1029     }
1030 
1031     p   = filter & PMXEVTYPER_P;
1032     u   = filter & PMXEVTYPER_U;
1033     nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1034     nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1035     nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1036     m   = arm_el_is_aa64(env, 1) &&
1037               arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1038 
1039     if (el == 0) {
1040         filtered = secure ? u : u != nsu;
1041     } else if (el == 1) {
1042         filtered = secure ? p : p != nsk;
1043     } else if (el == 2) {
1044         filtered = !nsh;
1045     } else { /* EL3 */
1046         filtered = m != p;
1047     }
1048 
1049     if (counter != 31) {
1050         /*
1051          * If not checking PMCCNTR, ensure the counter is setup to an event we
1052          * support
1053          */
1054         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1055         if (!event_supported(event)) {
1056             return false;
1057         }
1058     }
1059 
1060     return enabled && !prohibited && !filtered;
1061 }
1062 
1063 static void pmu_update_irq(CPUARMState *env)
1064 {
1065     ARMCPU *cpu = env_archcpu(env);
1066     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1067             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1068 }
1069 
1070 static bool pmccntr_clockdiv_enabled(CPUARMState *env)
1071 {
1072     /*
1073      * Return true if the clock divider is enabled and the cycle counter
1074      * is supposed to tick only once every 64 clock cycles. This is
1075      * controlled by PMCR.D, but if PMCR.LC is set to enable the long
1076      * (64-bit) cycle counter PMCR.D has no effect.
1077      */
1078     return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
1079 }
1080 
1081 static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
1082 {
1083     /* Return true if the specified event counter is configured to be 64 bit */
1084 
1085     /* This isn't intended to be used with the cycle counter */
1086     assert(counter < 31);
1087 
1088     if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1089         return false;
1090     }
1091 
1092     if (arm_feature(env, ARM_FEATURE_EL2)) {
1093         /*
1094          * MDCR_EL2.HLP still applies even when EL2 is disabled in the
1095          * current security state, so we don't use arm_mdcr_el2_eff() here.
1096          */
1097         bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
1098         int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1099 
1100         if (counter >= hpmn) {
1101             return hlp;
1102         }
1103     }
1104     return env->cp15.c9_pmcr & PMCRLP;
1105 }
1106 
1107 /*
1108  * Ensure c15_ccnt is the guest-visible count so that operations such as
1109  * enabling/disabling the counter or filtering, modifying the count itself,
1110  * etc. can be done logically. This is essentially a no-op if the counter is
1111  * not enabled at the time of the call.
1112  */
1113 static void pmccntr_op_start(CPUARMState *env)
1114 {
1115     uint64_t cycles = cycles_get_count(env);
1116 
1117     if (pmu_counter_enabled(env, 31)) {
1118         uint64_t eff_cycles = cycles;
1119         if (pmccntr_clockdiv_enabled(env)) {
1120             eff_cycles /= 64;
1121         }
1122 
1123         uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1124 
1125         uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1126                                  1ull << 63 : 1ull << 31;
1127         if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1128             env->cp15.c9_pmovsr |= (1ULL << 31);
1129             pmu_update_irq(env);
1130         }
1131 
1132         env->cp15.c15_ccnt = new_pmccntr;
1133     }
1134     env->cp15.c15_ccnt_delta = cycles;
1135 }
1136 
1137 /*
1138  * If PMCCNTR is enabled, recalculate the delta between the clock and the
1139  * guest-visible count. A call to pmccntr_op_finish should follow every call to
1140  * pmccntr_op_start.
1141  */
1142 static void pmccntr_op_finish(CPUARMState *env)
1143 {
1144     if (pmu_counter_enabled(env, 31)) {
1145 #ifndef CONFIG_USER_ONLY
1146         /* Calculate when the counter will next overflow */
1147         uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1148         if (!(env->cp15.c9_pmcr & PMCRLC)) {
1149             remaining_cycles = (uint32_t)remaining_cycles;
1150         }
1151         int64_t overflow_in = cycles_ns_per(remaining_cycles);
1152 
1153         if (overflow_in > 0) {
1154             int64_t overflow_at;
1155 
1156             if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1157                                  overflow_in, &overflow_at)) {
1158                 ARMCPU *cpu = env_archcpu(env);
1159                 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1160             }
1161         }
1162 #endif
1163 
1164         uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1165         if (pmccntr_clockdiv_enabled(env)) {
1166             prev_cycles /= 64;
1167         }
1168         env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1169     }
1170 }
1171 
1172 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1173 {
1174 
1175     uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1176     uint64_t count = 0;
1177     if (event_supported(event)) {
1178         uint16_t event_idx = supported_event_map[event];
1179         count = pm_events[event_idx].get_count(env);
1180     }
1181 
1182     if (pmu_counter_enabled(env, counter)) {
1183         uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1184         uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
1185             1ULL << 63 : 1ULL << 31;
1186 
1187         if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
1188             env->cp15.c9_pmovsr |= (1 << counter);
1189             pmu_update_irq(env);
1190         }
1191         env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1192     }
1193     env->cp15.c14_pmevcntr_delta[counter] = count;
1194 }
1195 
1196 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1197 {
1198     if (pmu_counter_enabled(env, counter)) {
1199 #ifndef CONFIG_USER_ONLY
1200         uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1201         uint16_t event_idx = supported_event_map[event];
1202         uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
1203         int64_t overflow_in;
1204 
1205         if (!pmevcntr_is_64_bit(env, counter)) {
1206             delta = (uint32_t)delta;
1207         }
1208         overflow_in = pm_events[event_idx].ns_per_count(delta);
1209 
1210         if (overflow_in > 0) {
1211             int64_t overflow_at;
1212 
1213             if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1214                                  overflow_in, &overflow_at)) {
1215                 ARMCPU *cpu = env_archcpu(env);
1216                 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1217             }
1218         }
1219 #endif
1220 
1221         env->cp15.c14_pmevcntr_delta[counter] -=
1222             env->cp15.c14_pmevcntr[counter];
1223     }
1224 }
1225 
1226 void pmu_op_start(CPUARMState *env)
1227 {
1228     unsigned int i;
1229     pmccntr_op_start(env);
1230     for (i = 0; i < pmu_num_counters(env); i++) {
1231         pmevcntr_op_start(env, i);
1232     }
1233 }
1234 
1235 void pmu_op_finish(CPUARMState *env)
1236 {
1237     unsigned int i;
1238     pmccntr_op_finish(env);
1239     for (i = 0; i < pmu_num_counters(env); i++) {
1240         pmevcntr_op_finish(env, i);
1241     }
1242 }
1243 
1244 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1245 {
1246     pmu_op_start(&cpu->env);
1247 }
1248 
1249 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1250 {
1251     pmu_op_finish(&cpu->env);
1252 }
1253 
1254 void arm_pmu_timer_cb(void *opaque)
1255 {
1256     ARMCPU *cpu = opaque;
1257 
1258     /*
1259      * Update all the counter values based on the current underlying counts,
1260      * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1261      * has the effect of setting the cpu->pmu_timer to the next earliest time a
1262      * counter may expire.
1263      */
1264     pmu_op_start(&cpu->env);
1265     pmu_op_finish(&cpu->env);
1266 }
1267 
1268 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1269                        uint64_t value)
1270 {
1271     pmu_op_start(env);
1272 
1273     if (value & PMCRC) {
1274         /* The counter has been reset */
1275         env->cp15.c15_ccnt = 0;
1276     }
1277 
1278     if (value & PMCRP) {
1279         unsigned int i;
1280         for (i = 0; i < pmu_num_counters(env); i++) {
1281             env->cp15.c14_pmevcntr[i] = 0;
1282         }
1283     }
1284 
1285     env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1286     env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
1287 
1288     pmu_op_finish(env);
1289 }
1290 
1291 static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1292 {
1293     uint64_t pmcr = env->cp15.c9_pmcr;
1294 
1295     /*
1296      * If EL2 is implemented and enabled for the current security state, reads
1297      * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
1298      */
1299     if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
1300         pmcr &= ~PMCRN_MASK;
1301         pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
1302     }
1303 
1304     return pmcr;
1305 }
1306 
1307 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1308                           uint64_t value)
1309 {
1310     unsigned int i;
1311     uint64_t overflow_mask, new_pmswinc;
1312 
1313     for (i = 0; i < pmu_num_counters(env); i++) {
1314         /* Increment a counter's count iff: */
1315         if ((value & (1 << i)) && /* counter's bit is set */
1316                 /* counter is enabled and not filtered */
1317                 pmu_counter_enabled(env, i) &&
1318                 /* counter is SW_INCR */
1319                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1320             pmevcntr_op_start(env, i);
1321 
1322             /*
1323              * Detect if this write causes an overflow since we can't predict
1324              * PMSWINC overflows like we can for other events
1325              */
1326             new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1327 
1328             overflow_mask = pmevcntr_is_64_bit(env, i) ?
1329                 1ULL << 63 : 1ULL << 31;
1330 
1331             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
1332                 env->cp15.c9_pmovsr |= (1 << i);
1333                 pmu_update_irq(env);
1334             }
1335 
1336             env->cp15.c14_pmevcntr[i] = new_pmswinc;
1337 
1338             pmevcntr_op_finish(env, i);
1339         }
1340     }
1341 }
1342 
1343 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1344 {
1345     uint64_t ret;
1346     pmccntr_op_start(env);
1347     ret = env->cp15.c15_ccnt;
1348     pmccntr_op_finish(env);
1349     return ret;
1350 }
1351 
1352 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1353                          uint64_t value)
1354 {
1355     /*
1356      * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1357      * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1358      * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1359      * accessed.
1360      */
1361     env->cp15.c9_pmselr = value & 0x1f;
1362 }
1363 
1364 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1365                         uint64_t value)
1366 {
1367     pmccntr_op_start(env);
1368     env->cp15.c15_ccnt = value;
1369     pmccntr_op_finish(env);
1370 }
1371 
1372 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1373                             uint64_t value)
1374 {
1375     uint64_t cur_val = pmccntr_read(env, NULL);
1376 
1377     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1378 }
1379 
1380 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1381                             uint64_t value)
1382 {
1383     pmccntr_op_start(env);
1384     env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1385     pmccntr_op_finish(env);
1386 }
1387 
1388 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1389                             uint64_t value)
1390 {
1391     pmccntr_op_start(env);
1392     /* M is not accessible from AArch32 */
1393     env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1394         (value & PMCCFILTR);
1395     pmccntr_op_finish(env);
1396 }
1397 
1398 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1399 {
1400     /* M is not visible in AArch32 */
1401     return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1402 }
1403 
1404 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1405                             uint64_t value)
1406 {
1407     pmu_op_start(env);
1408     value &= pmu_counter_mask(env);
1409     env->cp15.c9_pmcnten |= value;
1410     pmu_op_finish(env);
1411 }
1412 
1413 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1414                              uint64_t value)
1415 {
1416     pmu_op_start(env);
1417     value &= pmu_counter_mask(env);
1418     env->cp15.c9_pmcnten &= ~value;
1419     pmu_op_finish(env);
1420 }
1421 
1422 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1423                          uint64_t value)
1424 {
1425     value &= pmu_counter_mask(env);
1426     env->cp15.c9_pmovsr &= ~value;
1427     pmu_update_irq(env);
1428 }
1429 
1430 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1431                          uint64_t value)
1432 {
1433     value &= pmu_counter_mask(env);
1434     env->cp15.c9_pmovsr |= value;
1435     pmu_update_irq(env);
1436 }
1437 
1438 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1439                              uint64_t value, const uint8_t counter)
1440 {
1441     if (counter == 31) {
1442         pmccfiltr_write(env, ri, value);
1443     } else if (counter < pmu_num_counters(env)) {
1444         pmevcntr_op_start(env, counter);
1445 
1446         /*
1447          * If this counter's event type is changing, store the current
1448          * underlying count for the new type in c14_pmevcntr_delta[counter] so
1449          * pmevcntr_op_finish has the correct baseline when it converts back to
1450          * a delta.
1451          */
1452         uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1453             PMXEVTYPER_EVTCOUNT;
1454         uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1455         if (old_event != new_event) {
1456             uint64_t count = 0;
1457             if (event_supported(new_event)) {
1458                 uint16_t event_idx = supported_event_map[new_event];
1459                 count = pm_events[event_idx].get_count(env);
1460             }
1461             env->cp15.c14_pmevcntr_delta[counter] = count;
1462         }
1463 
1464         env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1465         pmevcntr_op_finish(env, counter);
1466     }
1467     /*
1468      * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1469      * PMSELR value is equal to or greater than the number of implemented
1470      * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1471      */
1472 }
1473 
1474 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1475                                const uint8_t counter)
1476 {
1477     if (counter == 31) {
1478         return env->cp15.pmccfiltr_el0;
1479     } else if (counter < pmu_num_counters(env)) {
1480         return env->cp15.c14_pmevtyper[counter];
1481     } else {
1482       /*
1483        * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1484        * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1485        */
1486         return 0;
1487     }
1488 }
1489 
1490 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1491                               uint64_t value)
1492 {
1493     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1494     pmevtyper_write(env, ri, value, counter);
1495 }
1496 
1497 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1498                                uint64_t value)
1499 {
1500     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1501     env->cp15.c14_pmevtyper[counter] = value;
1502 
1503     /*
1504      * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1505      * pmu_op_finish calls when loading saved state for a migration. Because
1506      * we're potentially updating the type of event here, the value written to
1507      * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
1508      * different counter type. Therefore, we need to set this value to the
1509      * current count for the counter type we're writing so that pmu_op_finish
1510      * has the correct count for its calculation.
1511      */
1512     uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1513     if (event_supported(event)) {
1514         uint16_t event_idx = supported_event_map[event];
1515         env->cp15.c14_pmevcntr_delta[counter] =
1516             pm_events[event_idx].get_count(env);
1517     }
1518 }
1519 
1520 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1521 {
1522     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1523     return pmevtyper_read(env, ri, counter);
1524 }
1525 
1526 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1527                              uint64_t value)
1528 {
1529     pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1530 }
1531 
1532 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1533 {
1534     return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1535 }
1536 
1537 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1538                              uint64_t value, uint8_t counter)
1539 {
1540     if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1541         /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1542         value &= MAKE_64BIT_MASK(0, 32);
1543     }
1544     if (counter < pmu_num_counters(env)) {
1545         pmevcntr_op_start(env, counter);
1546         env->cp15.c14_pmevcntr[counter] = value;
1547         pmevcntr_op_finish(env, counter);
1548     }
1549     /*
1550      * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1551      * are CONSTRAINED UNPREDICTABLE.
1552      */
1553 }
1554 
1555 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1556                               uint8_t counter)
1557 {
1558     if (counter < pmu_num_counters(env)) {
1559         uint64_t ret;
1560         pmevcntr_op_start(env, counter);
1561         ret = env->cp15.c14_pmevcntr[counter];
1562         pmevcntr_op_finish(env, counter);
1563         if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1564             /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1565             ret &= MAKE_64BIT_MASK(0, 32);
1566         }
1567         return ret;
1568     } else {
1569       /*
1570        * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1571        * are CONSTRAINED UNPREDICTABLE.
1572        */
1573         return 0;
1574     }
1575 }
1576 
1577 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1578                              uint64_t value)
1579 {
1580     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1581     pmevcntr_write(env, ri, value, counter);
1582 }
1583 
1584 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1585 {
1586     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1587     return pmevcntr_read(env, ri, counter);
1588 }
1589 
1590 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1591                              uint64_t value)
1592 {
1593     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1594     assert(counter < pmu_num_counters(env));
1595     env->cp15.c14_pmevcntr[counter] = value;
1596     pmevcntr_write(env, ri, value, counter);
1597 }
1598 
1599 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1600 {
1601     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1602     assert(counter < pmu_num_counters(env));
1603     return env->cp15.c14_pmevcntr[counter];
1604 }
1605 
1606 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1607                              uint64_t value)
1608 {
1609     pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1610 }
1611 
1612 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1613 {
1614     return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1615 }
1616 
1617 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1618                             uint64_t value)
1619 {
1620     if (arm_feature(env, ARM_FEATURE_V8)) {
1621         env->cp15.c9_pmuserenr = value & 0xf;
1622     } else {
1623         env->cp15.c9_pmuserenr = value & 1;
1624     }
1625 }
1626 
1627 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1628                              uint64_t value)
1629 {
1630     /* We have no event counters so only the C bit can be changed */
1631     value &= pmu_counter_mask(env);
1632     env->cp15.c9_pminten |= value;
1633     pmu_update_irq(env);
1634 }
1635 
1636 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1637                              uint64_t value)
1638 {
1639     value &= pmu_counter_mask(env);
1640     env->cp15.c9_pminten &= ~value;
1641     pmu_update_irq(env);
1642 }
1643 
1644 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1645                        uint64_t value)
1646 {
1647     /*
1648      * Note that even though the AArch64 view of this register has bits
1649      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1650      * architectural requirements for bits which are RES0 only in some
1651      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1652      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1653      */
1654     raw_write(env, ri, value & ~0x1FULL);
1655 }
1656 
1657 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1658 {
1659     /* Begin with base v8.0 state.  */
1660     uint64_t valid_mask = 0x3fff;
1661     ARMCPU *cpu = env_archcpu(env);
1662     uint64_t changed;
1663 
1664     /*
1665      * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
1666      * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
1667      * Instead, choose the format based on the mode of EL3.
1668      */
1669     if (arm_el_is_aa64(env, 3)) {
1670         value |= SCR_FW | SCR_AW;      /* RES1 */
1671         valid_mask &= ~SCR_NET;        /* RES0 */
1672 
1673         if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
1674             !cpu_isar_feature(aa64_aa32_el2, cpu)) {
1675             value |= SCR_RW;           /* RAO/WI */
1676         }
1677         if (cpu_isar_feature(aa64_ras, cpu)) {
1678             valid_mask |= SCR_TERR;
1679         }
1680         if (cpu_isar_feature(aa64_lor, cpu)) {
1681             valid_mask |= SCR_TLOR;
1682         }
1683         if (cpu_isar_feature(aa64_pauth, cpu)) {
1684             valid_mask |= SCR_API | SCR_APK;
1685         }
1686         if (cpu_isar_feature(aa64_sel2, cpu)) {
1687             valid_mask |= SCR_EEL2;
1688         } else if (cpu_isar_feature(aa64_rme, cpu)) {
1689             /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
1690             value |= SCR_NS;
1691         }
1692         if (cpu_isar_feature(aa64_mte, cpu)) {
1693             valid_mask |= SCR_ATA;
1694         }
1695         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
1696             valid_mask |= SCR_ENSCXT;
1697         }
1698         if (cpu_isar_feature(aa64_doublefault, cpu)) {
1699             valid_mask |= SCR_EASE | SCR_NMEA;
1700         }
1701         if (cpu_isar_feature(aa64_sme, cpu)) {
1702             valid_mask |= SCR_ENTP2;
1703         }
1704         if (cpu_isar_feature(aa64_hcx, cpu)) {
1705             valid_mask |= SCR_HXEN;
1706         }
1707         if (cpu_isar_feature(aa64_fgt, cpu)) {
1708             valid_mask |= SCR_FGTEN;
1709         }
1710         if (cpu_isar_feature(aa64_rme, cpu)) {
1711             valid_mask |= SCR_NSE | SCR_GPF;
1712         }
1713         if (cpu_isar_feature(aa64_ecv, cpu)) {
1714             valid_mask |= SCR_ECVEN;
1715         }
1716     } else {
1717         valid_mask &= ~(SCR_RW | SCR_ST);
1718         if (cpu_isar_feature(aa32_ras, cpu)) {
1719             valid_mask |= SCR_TERR;
1720         }
1721     }
1722 
1723     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1724         valid_mask &= ~SCR_HCE;
1725 
1726         /*
1727          * On ARMv7, SMD (or SCD as it is called in v7) is only
1728          * supported if EL2 exists. The bit is UNK/SBZP when
1729          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1730          * when EL2 is unavailable.
1731          * On ARMv8, this bit is always available.
1732          */
1733         if (arm_feature(env, ARM_FEATURE_V7) &&
1734             !arm_feature(env, ARM_FEATURE_V8)) {
1735             valid_mask &= ~SCR_SMD;
1736         }
1737     }
1738 
1739     /* Clear all-context RES0 bits.  */
1740     value &= valid_mask;
1741     changed = env->cp15.scr_el3 ^ value;
1742     env->cp15.scr_el3 = value;
1743 
1744     /*
1745      * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
1746      * we must invalidate all TLBs below EL3.
1747      */
1748     if (changed & (SCR_NS | SCR_NSE)) {
1749         tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
1750                                            ARMMMUIdxBit_E20_0 |
1751                                            ARMMMUIdxBit_E10_1 |
1752                                            ARMMMUIdxBit_E20_2 |
1753                                            ARMMMUIdxBit_E10_1_PAN |
1754                                            ARMMMUIdxBit_E20_2_PAN |
1755                                            ARMMMUIdxBit_E2));
1756     }
1757 }
1758 
1759 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1760 {
1761     /*
1762      * scr_write will set the RES1 bits on an AArch64-only CPU.
1763      * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1764      */
1765     scr_write(env, ri, 0);
1766 }
1767 
1768 static CPAccessResult access_tid4(CPUARMState *env,
1769                                   const ARMCPRegInfo *ri,
1770                                   bool isread)
1771 {
1772     if (arm_current_el(env) == 1 &&
1773         (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) {
1774         return CP_ACCESS_TRAP_EL2;
1775     }
1776 
1777     return CP_ACCESS_OK;
1778 }
1779 
1780 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1781 {
1782     ARMCPU *cpu = env_archcpu(env);
1783 
1784     /*
1785      * Acquire the CSSELR index from the bank corresponding to the CCSIDR
1786      * bank
1787      */
1788     uint32_t index = A32_BANKED_REG_GET(env, csselr,
1789                                         ri->secure & ARM_CP_SECSTATE_S);
1790 
1791     return cpu->ccsidr[index];
1792 }
1793 
1794 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1795                          uint64_t value)
1796 {
1797     raw_write(env, ri, value & 0xf);
1798 }
1799 
1800 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1801 {
1802     CPUState *cs = env_cpu(env);
1803     bool el1 = arm_current_el(env) == 1;
1804     uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
1805     uint64_t ret = 0;
1806 
1807     if (hcr_el2 & HCR_IMO) {
1808         if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1809             ret |= CPSR_I;
1810         }
1811         if (cs->interrupt_request & CPU_INTERRUPT_VINMI) {
1812             ret |= ISR_IS;
1813             ret |= CPSR_I;
1814         }
1815     } else {
1816         if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1817             ret |= CPSR_I;
1818         }
1819 
1820         if (cs->interrupt_request & CPU_INTERRUPT_NMI) {
1821             ret |= ISR_IS;
1822             ret |= CPSR_I;
1823         }
1824     }
1825 
1826     if (hcr_el2 & HCR_FMO) {
1827         if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1828             ret |= CPSR_F;
1829         }
1830         if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) {
1831             ret |= ISR_FS;
1832             ret |= CPSR_F;
1833         }
1834     } else {
1835         if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1836             ret |= CPSR_F;
1837         }
1838     }
1839 
1840     if (hcr_el2 & HCR_AMO) {
1841         if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
1842             ret |= CPSR_A;
1843         }
1844     }
1845 
1846     return ret;
1847 }
1848 
1849 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1850                                        bool isread)
1851 {
1852     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
1853         return CP_ACCESS_TRAP_EL2;
1854     }
1855 
1856     return CP_ACCESS_OK;
1857 }
1858 
1859 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1860                                        bool isread)
1861 {
1862     if (arm_feature(env, ARM_FEATURE_V8)) {
1863         return access_aa64_tid1(env, ri, isread);
1864     }
1865 
1866     return CP_ACCESS_OK;
1867 }
1868 
1869 static const ARMCPRegInfo v7_cp_reginfo[] = {
1870     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1871     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1872       .access = PL1_W, .type = ARM_CP_NOP },
1873     /*
1874      * Performance monitors are implementation defined in v7,
1875      * but with an ARM recommended set of registers, which we
1876      * follow.
1877      *
1878      * Performance registers fall into three categories:
1879      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1880      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1881      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1882      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1883      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1884      */
1885     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1886       .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
1887       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1888       .writefn = pmcntenset_write,
1889       .accessfn = pmreg_access,
1890       .fgt = FGT_PMCNTEN,
1891       .raw_writefn = raw_write },
1892     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
1893       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1894       .access = PL0_RW, .accessfn = pmreg_access,
1895       .fgt = FGT_PMCNTEN,
1896       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1897       .writefn = pmcntenset_write, .raw_writefn = raw_write },
1898     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1899       .access = PL0_RW,
1900       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1901       .accessfn = pmreg_access,
1902       .fgt = FGT_PMCNTEN,
1903       .writefn = pmcntenclr_write,
1904       .type = ARM_CP_ALIAS | ARM_CP_IO },
1905     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1906       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1907       .access = PL0_RW, .accessfn = pmreg_access,
1908       .fgt = FGT_PMCNTEN,
1909       .type = ARM_CP_ALIAS | ARM_CP_IO,
1910       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1911       .writefn = pmcntenclr_write },
1912     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1913       .access = PL0_RW, .type = ARM_CP_IO,
1914       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1915       .accessfn = pmreg_access,
1916       .fgt = FGT_PMOVS,
1917       .writefn = pmovsr_write,
1918       .raw_writefn = raw_write },
1919     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1920       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1921       .access = PL0_RW, .accessfn = pmreg_access,
1922       .fgt = FGT_PMOVS,
1923       .type = ARM_CP_ALIAS | ARM_CP_IO,
1924       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1925       .writefn = pmovsr_write,
1926       .raw_writefn = raw_write },
1927     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1928       .access = PL0_W, .accessfn = pmreg_access_swinc,
1929       .fgt = FGT_PMSWINC_EL0,
1930       .type = ARM_CP_NO_RAW | ARM_CP_IO,
1931       .writefn = pmswinc_write },
1932     { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
1933       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
1934       .access = PL0_W, .accessfn = pmreg_access_swinc,
1935       .fgt = FGT_PMSWINC_EL0,
1936       .type = ARM_CP_NO_RAW | ARM_CP_IO,
1937       .writefn = pmswinc_write },
1938     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1939       .access = PL0_RW, .type = ARM_CP_ALIAS,
1940       .fgt = FGT_PMSELR_EL0,
1941       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1942       .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1943       .raw_writefn = raw_write},
1944     { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1945       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1946       .access = PL0_RW, .accessfn = pmreg_access_selr,
1947       .fgt = FGT_PMSELR_EL0,
1948       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1949       .writefn = pmselr_write, .raw_writefn = raw_write, },
1950     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1951       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1952       .fgt = FGT_PMCCNTR_EL0,
1953       .readfn = pmccntr_read, .writefn = pmccntr_write32,
1954       .accessfn = pmreg_access_ccntr },
1955     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1956       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1957       .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1958       .fgt = FGT_PMCCNTR_EL0,
1959       .type = ARM_CP_IO,
1960       .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
1961       .readfn = pmccntr_read, .writefn = pmccntr_write,
1962       .raw_readfn = raw_read, .raw_writefn = raw_write, },
1963     { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
1964       .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
1965       .access = PL0_RW, .accessfn = pmreg_access,
1966       .fgt = FGT_PMCCFILTR_EL0,
1967       .type = ARM_CP_ALIAS | ARM_CP_IO,
1968       .resetvalue = 0, },
1969     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1970       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1971       .writefn = pmccfiltr_write, .raw_writefn = raw_write,
1972       .access = PL0_RW, .accessfn = pmreg_access,
1973       .fgt = FGT_PMCCFILTR_EL0,
1974       .type = ARM_CP_IO,
1975       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1976       .resetvalue = 0, },
1977     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1978       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1979       .accessfn = pmreg_access,
1980       .fgt = FGT_PMEVTYPERN_EL0,
1981       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1982     { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1983       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1984       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1985       .accessfn = pmreg_access,
1986       .fgt = FGT_PMEVTYPERN_EL0,
1987       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1988     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1989       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1990       .accessfn = pmreg_access_xevcntr,
1991       .fgt = FGT_PMEVCNTRN_EL0,
1992       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
1993     { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
1994       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
1995       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1996       .accessfn = pmreg_access_xevcntr,
1997       .fgt = FGT_PMEVCNTRN_EL0,
1998       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
1999     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2000       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2001       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2002       .resetvalue = 0,
2003       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2004     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2005       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2006       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2007       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2008       .resetvalue = 0,
2009       .writefn = pmuserenr_write, .raw_writefn = raw_write },
2010     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2011       .access = PL1_RW, .accessfn = access_tpm,
2012       .fgt = FGT_PMINTEN,
2013       .type = ARM_CP_ALIAS | ARM_CP_IO,
2014       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2015       .resetvalue = 0,
2016       .writefn = pmintenset_write, .raw_writefn = raw_write },
2017     { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2018       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2019       .access = PL1_RW, .accessfn = access_tpm,
2020       .fgt = FGT_PMINTEN,
2021       .type = ARM_CP_IO,
2022       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2023       .writefn = pmintenset_write, .raw_writefn = raw_write,
2024       .resetvalue = 0x0 },
2025     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2026       .access = PL1_RW, .accessfn = access_tpm,
2027       .fgt = FGT_PMINTEN,
2028       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2029       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2030       .writefn = pmintenclr_write, },
2031     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2032       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2033       .access = PL1_RW, .accessfn = access_tpm,
2034       .fgt = FGT_PMINTEN,
2035       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2036       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2037       .writefn = pmintenclr_write },
2038     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2039       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2040       .access = PL1_R,
2041       .accessfn = access_tid4,
2042       .fgt = FGT_CCSIDR_EL1,
2043       .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2044     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2045       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2046       .access = PL1_RW,
2047       .accessfn = access_tid4,
2048       .fgt = FGT_CSSELR_EL1,
2049       .writefn = csselr_write, .resetvalue = 0,
2050       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2051                              offsetof(CPUARMState, cp15.csselr_ns) } },
2052     /*
2053      * Auxiliary ID register: this actually has an IMPDEF value but for now
2054      * just RAZ for all cores:
2055      */
2056     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2057       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2058       .access = PL1_R, .type = ARM_CP_CONST,
2059       .accessfn = access_aa64_tid1,
2060       .fgt = FGT_AIDR_EL1,
2061       .resetvalue = 0 },
2062     /*
2063      * Auxiliary fault status registers: these also are IMPDEF, and we
2064      * choose to RAZ/WI for all cores.
2065      */
2066     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2067       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2068       .access = PL1_RW, .accessfn = access_tvm_trvm,
2069       .fgt = FGT_AFSR0_EL1,
2070       .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
2071       .type = ARM_CP_CONST, .resetvalue = 0 },
2072     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2073       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2074       .access = PL1_RW, .accessfn = access_tvm_trvm,
2075       .fgt = FGT_AFSR1_EL1,
2076       .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
2077       .type = ARM_CP_CONST, .resetvalue = 0 },
2078     /*
2079      * MAIR can just read-as-written because we don't implement caches
2080      * and so don't need to care about memory attributes.
2081      */
2082     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2083       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2084       .access = PL1_RW, .accessfn = access_tvm_trvm,
2085       .fgt = FGT_MAIR_EL1,
2086       .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
2087       .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2088       .resetvalue = 0 },
2089     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2090       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2091       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2092       .resetvalue = 0 },
2093     /*
2094      * For non-long-descriptor page tables these are PRRR and NMRR;
2095      * regardless they still act as reads-as-written for QEMU.
2096      */
2097      /*
2098       * MAIR0/1 are defined separately from their 64-bit counterpart which
2099       * allows them to assign the correct fieldoffset based on the endianness
2100       * handled in the field definitions.
2101       */
2102     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2103       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2104       .access = PL1_RW, .accessfn = access_tvm_trvm,
2105       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2106                              offsetof(CPUARMState, cp15.mair0_ns) },
2107       .resetfn = arm_cp_reset_ignore },
2108     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2109       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2110       .access = PL1_RW, .accessfn = access_tvm_trvm,
2111       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2112                              offsetof(CPUARMState, cp15.mair1_ns) },
2113       .resetfn = arm_cp_reset_ignore },
2114     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2115       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2116       .fgt = FGT_ISR_EL1,
2117       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2118 };
2119 
2120 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2121     /* PMOVSSET is not implemented in v7 before v7ve */
2122     { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2123       .access = PL0_RW, .accessfn = pmreg_access,
2124       .fgt = FGT_PMOVS,
2125       .type = ARM_CP_ALIAS | ARM_CP_IO,
2126       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2127       .writefn = pmovsset_write,
2128       .raw_writefn = raw_write },
2129     { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2130       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2131       .access = PL0_RW, .accessfn = pmreg_access,
2132       .fgt = FGT_PMOVS,
2133       .type = ARM_CP_ALIAS | ARM_CP_IO,
2134       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2135       .writefn = pmovsset_write,
2136       .raw_writefn = raw_write },
2137 };
2138 
2139 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2140                         uint64_t value)
2141 {
2142     value &= 1;
2143     env->teecr = value;
2144 }
2145 
2146 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2147                                    bool isread)
2148 {
2149     /*
2150      * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2151      * at all, so we don't need to check whether we're v8A.
2152      */
2153     if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
2154         (env->cp15.hstr_el2 & HSTR_TTEE)) {
2155         return CP_ACCESS_TRAP_EL2;
2156     }
2157     return CP_ACCESS_OK;
2158 }
2159 
2160 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2161                                     bool isread)
2162 {
2163     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2164         return CP_ACCESS_TRAP;
2165     }
2166     return teecr_access(env, ri, isread);
2167 }
2168 
2169 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2170     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2171       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2172       .resetvalue = 0,
2173       .writefn = teecr_write, .accessfn = teecr_access },
2174     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2175       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2176       .accessfn = teehbr_access, .resetvalue = 0 },
2177 };
2178 
2179 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2180     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2181       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2182       .access = PL0_RW,
2183       .fgt = FGT_TPIDR_EL0,
2184       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2185     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2186       .access = PL0_RW,
2187       .fgt = FGT_TPIDR_EL0,
2188       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2189                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2190       .resetfn = arm_cp_reset_ignore },
2191     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2192       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2193       .access = PL0_R | PL1_W,
2194       .fgt = FGT_TPIDRRO_EL0,
2195       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2196       .resetvalue = 0},
2197     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2198       .access = PL0_R | PL1_W,
2199       .fgt = FGT_TPIDRRO_EL0,
2200       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2201                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2202       .resetfn = arm_cp_reset_ignore },
2203     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2204       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2205       .access = PL1_RW,
2206       .fgt = FGT_TPIDR_EL1,
2207       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2208     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2209       .access = PL1_RW,
2210       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2211                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2212       .resetvalue = 0 },
2213 };
2214 
2215 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
2216 {
2217     ARMCPU *cpu = env_archcpu(env);
2218 
2219     cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
2220 }
2221 
2222 #ifndef CONFIG_USER_ONLY
2223 
2224 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2225                                        bool isread)
2226 {
2227     /*
2228      * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2229      * Writable only at the highest implemented exception level.
2230      */
2231     int el = arm_current_el(env);
2232     uint64_t hcr;
2233     uint32_t cntkctl;
2234 
2235     switch (el) {
2236     case 0:
2237         hcr = arm_hcr_el2_eff(env);
2238         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2239             cntkctl = env->cp15.cnthctl_el2;
2240         } else {
2241             cntkctl = env->cp15.c14_cntkctl;
2242         }
2243         if (!extract32(cntkctl, 0, 2)) {
2244             return CP_ACCESS_TRAP;
2245         }
2246         break;
2247     case 1:
2248         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2249             arm_is_secure_below_el3(env)) {
2250             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2251             return CP_ACCESS_TRAP_UNCATEGORIZED;
2252         }
2253         break;
2254     case 2:
2255     case 3:
2256         break;
2257     }
2258 
2259     if (!isread && el < arm_highest_el(env)) {
2260         return CP_ACCESS_TRAP_UNCATEGORIZED;
2261     }
2262 
2263     return CP_ACCESS_OK;
2264 }
2265 
2266 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2267                                         bool isread)
2268 {
2269     unsigned int cur_el = arm_current_el(env);
2270     bool has_el2 = arm_is_el2_enabled(env);
2271     uint64_t hcr = arm_hcr_el2_eff(env);
2272 
2273     switch (cur_el) {
2274     case 0:
2275         /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2276         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2277             return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2278                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2279         }
2280 
2281         /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2282         if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2283             return CP_ACCESS_TRAP;
2284         }
2285         /* fall through */
2286     case 1:
2287         /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2288         if (has_el2 && timeridx == GTIMER_PHYS &&
2289             (hcr & HCR_E2H
2290              ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2291              : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2292             return CP_ACCESS_TRAP_EL2;
2293         }
2294         if (has_el2 && timeridx == GTIMER_VIRT) {
2295             if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) {
2296                 return CP_ACCESS_TRAP_EL2;
2297             }
2298         }
2299         break;
2300     }
2301     return CP_ACCESS_OK;
2302 }
2303 
2304 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2305                                       bool isread)
2306 {
2307     unsigned int cur_el = arm_current_el(env);
2308     bool has_el2 = arm_is_el2_enabled(env);
2309     uint64_t hcr = arm_hcr_el2_eff(env);
2310 
2311     switch (cur_el) {
2312     case 0:
2313         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2314             /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2315             return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2316                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2317         }
2318 
2319         /*
2320          * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2321          * EL0 if EL0[PV]TEN is zero.
2322          */
2323         if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2324             return CP_ACCESS_TRAP;
2325         }
2326         /* fall through */
2327 
2328     case 1:
2329         if (has_el2 && timeridx == GTIMER_PHYS) {
2330             if (hcr & HCR_E2H) {
2331                 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2332                 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2333                     return CP_ACCESS_TRAP_EL2;
2334                 }
2335             } else {
2336                 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2337                 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2338                     return CP_ACCESS_TRAP_EL2;
2339                 }
2340             }
2341         }
2342         if (has_el2 && timeridx == GTIMER_VIRT) {
2343             if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) {
2344                 return CP_ACCESS_TRAP_EL2;
2345             }
2346         }
2347         break;
2348     }
2349     return CP_ACCESS_OK;
2350 }
2351 
2352 static CPAccessResult gt_pct_access(CPUARMState *env,
2353                                     const ARMCPRegInfo *ri,
2354                                     bool isread)
2355 {
2356     return gt_counter_access(env, GTIMER_PHYS, isread);
2357 }
2358 
2359 static CPAccessResult gt_vct_access(CPUARMState *env,
2360                                     const ARMCPRegInfo *ri,
2361                                     bool isread)
2362 {
2363     return gt_counter_access(env, GTIMER_VIRT, isread);
2364 }
2365 
2366 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2367                                        bool isread)
2368 {
2369     return gt_timer_access(env, GTIMER_PHYS, isread);
2370 }
2371 
2372 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2373                                        bool isread)
2374 {
2375     return gt_timer_access(env, GTIMER_VIRT, isread);
2376 }
2377 
2378 static CPAccessResult gt_stimer_access(CPUARMState *env,
2379                                        const ARMCPRegInfo *ri,
2380                                        bool isread)
2381 {
2382     /*
2383      * The AArch64 register view of the secure physical timer is
2384      * always accessible from EL3, and configurably accessible from
2385      * Secure EL1.
2386      */
2387     switch (arm_current_el(env)) {
2388     case 1:
2389         if (!arm_is_secure(env)) {
2390             return CP_ACCESS_TRAP;
2391         }
2392         if (!(env->cp15.scr_el3 & SCR_ST)) {
2393             return CP_ACCESS_TRAP_EL3;
2394         }
2395         return CP_ACCESS_OK;
2396     case 0:
2397     case 2:
2398         return CP_ACCESS_TRAP;
2399     case 3:
2400         return CP_ACCESS_OK;
2401     default:
2402         g_assert_not_reached();
2403     }
2404 }
2405 
2406 uint64_t gt_get_countervalue(CPUARMState *env)
2407 {
2408     ARMCPU *cpu = env_archcpu(env);
2409 
2410     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2411 }
2412 
2413 static void gt_update_irq(ARMCPU *cpu, int timeridx)
2414 {
2415     CPUARMState *env = &cpu->env;
2416     uint64_t cnthctl = env->cp15.cnthctl_el2;
2417     ARMSecuritySpace ss = arm_security_space(env);
2418     /* ISTATUS && !IMASK */
2419     int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
2420 
2421     /*
2422      * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
2423      * It is RES0 in Secure and NonSecure state.
2424      */
2425     if ((ss == ARMSS_Root || ss == ARMSS_Realm) &&
2426         ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) ||
2427          (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) {
2428         irqstate = 0;
2429     }
2430 
2431     qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2432     trace_arm_gt_update_irq(timeridx, irqstate);
2433 }
2434 
2435 void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
2436 {
2437     /*
2438      * Changing security state between Root and Secure/NonSecure, which may
2439      * happen when switching EL, can change the effective value of CNTHCTL_EL2
2440      * mask bits. Update the IRQ state accordingly.
2441      */
2442     gt_update_irq(cpu, GTIMER_VIRT);
2443     gt_update_irq(cpu, GTIMER_PHYS);
2444 }
2445 
2446 static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
2447 {
2448     if ((env->cp15.scr_el3 & SCR_ECVEN) &&
2449         FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) &&
2450         arm_is_el2_enabled(env) &&
2451         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
2452         return env->cp15.cntpoff_el2;
2453     }
2454     return 0;
2455 }
2456 
2457 static uint64_t gt_phys_cnt_offset(CPUARMState *env)
2458 {
2459     if (arm_current_el(env) >= 2) {
2460         return 0;
2461     }
2462     return gt_phys_raw_cnt_offset(env);
2463 }
2464 
2465 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2466 {
2467     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2468 
2469     if (gt->ctl & 1) {
2470         /*
2471          * Timer enabled: calculate and set current ISTATUS, irq, and
2472          * reset timer to when ISTATUS next has to change
2473          */
2474         uint64_t offset = timeridx == GTIMER_VIRT ?
2475             cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env);
2476         uint64_t count = gt_get_countervalue(&cpu->env);
2477         /* Note that this must be unsigned 64 bit arithmetic: */
2478         int istatus = count - offset >= gt->cval;
2479         uint64_t nexttick;
2480 
2481         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2482 
2483         if (istatus) {
2484             /*
2485              * Next transition is when (count - offset) rolls back over to 0.
2486              * If offset > count then this is when count == offset;
2487              * if offset <= count then this is when count == offset + 2^64
2488              * For the latter case we set nexttick to an "as far in future
2489              * as possible" value and let the code below handle it.
2490              */
2491             if (offset > count) {
2492                 nexttick = offset;
2493             } else {
2494                 nexttick = UINT64_MAX;
2495             }
2496         } else {
2497             /*
2498              * Next transition is when (count - offset) == cval, i.e.
2499              * when count == (cval + offset).
2500              * If that would overflow, then again we set up the next interrupt
2501              * for "as far in the future as possible" for the code below.
2502              */
2503             if (uadd64_overflow(gt->cval, offset, &nexttick)) {
2504                 nexttick = UINT64_MAX;
2505             }
2506         }
2507         /*
2508          * Note that the desired next expiry time might be beyond the
2509          * signed-64-bit range of a QEMUTimer -- in this case we just
2510          * set the timer for as far in the future as possible. When the
2511          * timer expires we will reset the timer for any remaining period.
2512          */
2513         if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2514             timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2515         } else {
2516             timer_mod(cpu->gt_timer[timeridx], nexttick);
2517         }
2518         trace_arm_gt_recalc(timeridx, nexttick);
2519     } else {
2520         /* Timer disabled: ISTATUS and timer output always clear */
2521         gt->ctl &= ~4;
2522         timer_del(cpu->gt_timer[timeridx]);
2523         trace_arm_gt_recalc_disabled(timeridx);
2524     }
2525     gt_update_irq(cpu, timeridx);
2526 }
2527 
2528 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2529                            int timeridx)
2530 {
2531     ARMCPU *cpu = env_archcpu(env);
2532 
2533     timer_del(cpu->gt_timer[timeridx]);
2534 }
2535 
2536 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2537 {
2538     return gt_get_countervalue(env) - gt_phys_cnt_offset(env);
2539 }
2540 
2541 uint64_t gt_virt_cnt_offset(CPUARMState *env)
2542 {
2543     uint64_t hcr;
2544 
2545     switch (arm_current_el(env)) {
2546     case 2:
2547         hcr = arm_hcr_el2_eff(env);
2548         if (hcr & HCR_E2H) {
2549             return 0;
2550         }
2551         break;
2552     case 0:
2553         hcr = arm_hcr_el2_eff(env);
2554         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2555             return 0;
2556         }
2557         break;
2558     }
2559 
2560     return env->cp15.cntvoff_el2;
2561 }
2562 
2563 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2564 {
2565     return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2566 }
2567 
2568 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2569                           int timeridx,
2570                           uint64_t value)
2571 {
2572     trace_arm_gt_cval_write(timeridx, value);
2573     env->cp15.c14_timer[timeridx].cval = value;
2574     gt_recalc_timer(env_archcpu(env), timeridx);
2575 }
2576 
2577 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2578                              int timeridx)
2579 {
2580     uint64_t offset = 0;
2581 
2582     switch (timeridx) {
2583     case GTIMER_VIRT:
2584     case GTIMER_HYPVIRT:
2585         offset = gt_virt_cnt_offset(env);
2586         break;
2587     case GTIMER_PHYS:
2588         offset = gt_phys_cnt_offset(env);
2589         break;
2590     }
2591 
2592     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2593                       (gt_get_countervalue(env) - offset));
2594 }
2595 
2596 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2597                           int timeridx,
2598                           uint64_t value)
2599 {
2600     uint64_t offset = 0;
2601 
2602     switch (timeridx) {
2603     case GTIMER_VIRT:
2604     case GTIMER_HYPVIRT:
2605         offset = gt_virt_cnt_offset(env);
2606         break;
2607     case GTIMER_PHYS:
2608         offset = gt_phys_cnt_offset(env);
2609         break;
2610     }
2611 
2612     trace_arm_gt_tval_write(timeridx, value);
2613     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2614                                          sextract64(value, 0, 32);
2615     gt_recalc_timer(env_archcpu(env), timeridx);
2616 }
2617 
2618 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2619                          int timeridx,
2620                          uint64_t value)
2621 {
2622     ARMCPU *cpu = env_archcpu(env);
2623     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2624 
2625     trace_arm_gt_ctl_write(timeridx, value);
2626     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2627     if ((oldval ^ value) & 1) {
2628         /* Enable toggled */
2629         gt_recalc_timer(cpu, timeridx);
2630     } else if ((oldval ^ value) & 2) {
2631         /*
2632          * IMASK toggled: don't need to recalculate,
2633          * just set the interrupt line based on ISTATUS
2634          */
2635         trace_arm_gt_imask_toggle(timeridx);
2636         gt_update_irq(cpu, timeridx);
2637     }
2638 }
2639 
2640 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2641 {
2642     gt_timer_reset(env, ri, GTIMER_PHYS);
2643 }
2644 
2645 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2646                                uint64_t value)
2647 {
2648     gt_cval_write(env, ri, GTIMER_PHYS, value);
2649 }
2650 
2651 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2652 {
2653     return gt_tval_read(env, ri, GTIMER_PHYS);
2654 }
2655 
2656 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2657                                uint64_t value)
2658 {
2659     gt_tval_write(env, ri, GTIMER_PHYS, value);
2660 }
2661 
2662 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2663                               uint64_t value)
2664 {
2665     gt_ctl_write(env, ri, GTIMER_PHYS, value);
2666 }
2667 
2668 static int gt_phys_redir_timeridx(CPUARMState *env)
2669 {
2670     switch (arm_mmu_idx(env)) {
2671     case ARMMMUIdx_E20_0:
2672     case ARMMMUIdx_E20_2:
2673     case ARMMMUIdx_E20_2_PAN:
2674         return GTIMER_HYP;
2675     default:
2676         return GTIMER_PHYS;
2677     }
2678 }
2679 
2680 static int gt_virt_redir_timeridx(CPUARMState *env)
2681 {
2682     switch (arm_mmu_idx(env)) {
2683     case ARMMMUIdx_E20_0:
2684     case ARMMMUIdx_E20_2:
2685     case ARMMMUIdx_E20_2_PAN:
2686         return GTIMER_HYPVIRT;
2687     default:
2688         return GTIMER_VIRT;
2689     }
2690 }
2691 
2692 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2693                                         const ARMCPRegInfo *ri)
2694 {
2695     int timeridx = gt_phys_redir_timeridx(env);
2696     return env->cp15.c14_timer[timeridx].cval;
2697 }
2698 
2699 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2700                                      uint64_t value)
2701 {
2702     int timeridx = gt_phys_redir_timeridx(env);
2703     gt_cval_write(env, ri, timeridx, value);
2704 }
2705 
2706 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2707                                         const ARMCPRegInfo *ri)
2708 {
2709     int timeridx = gt_phys_redir_timeridx(env);
2710     return gt_tval_read(env, ri, timeridx);
2711 }
2712 
2713 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2714                                      uint64_t value)
2715 {
2716     int timeridx = gt_phys_redir_timeridx(env);
2717     gt_tval_write(env, ri, timeridx, value);
2718 }
2719 
2720 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2721                                        const ARMCPRegInfo *ri)
2722 {
2723     int timeridx = gt_phys_redir_timeridx(env);
2724     return env->cp15.c14_timer[timeridx].ctl;
2725 }
2726 
2727 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2728                                     uint64_t value)
2729 {
2730     int timeridx = gt_phys_redir_timeridx(env);
2731     gt_ctl_write(env, ri, timeridx, value);
2732 }
2733 
2734 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2735 {
2736     gt_timer_reset(env, ri, GTIMER_VIRT);
2737 }
2738 
2739 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2740                                uint64_t value)
2741 {
2742     gt_cval_write(env, ri, GTIMER_VIRT, value);
2743 }
2744 
2745 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2746 {
2747     return gt_tval_read(env, ri, GTIMER_VIRT);
2748 }
2749 
2750 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2751                                uint64_t value)
2752 {
2753     gt_tval_write(env, ri, GTIMER_VIRT, value);
2754 }
2755 
2756 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2757                               uint64_t value)
2758 {
2759     gt_ctl_write(env, ri, GTIMER_VIRT, value);
2760 }
2761 
2762 static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2763                              uint64_t value)
2764 {
2765     ARMCPU *cpu = env_archcpu(env);
2766     uint32_t oldval = env->cp15.cnthctl_el2;
2767     uint32_t valid_mask =
2768         R_CNTHCTL_EL0PCTEN_E2H1_MASK |
2769         R_CNTHCTL_EL0VCTEN_E2H1_MASK |
2770         R_CNTHCTL_EVNTEN_MASK |
2771         R_CNTHCTL_EVNTDIR_MASK |
2772         R_CNTHCTL_EVNTI_MASK |
2773         R_CNTHCTL_EL0VTEN_MASK |
2774         R_CNTHCTL_EL0PTEN_MASK |
2775         R_CNTHCTL_EL1PCTEN_E2H1_MASK |
2776         R_CNTHCTL_EL1PTEN_MASK;
2777 
2778     if (cpu_isar_feature(aa64_rme, cpu)) {
2779         valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK;
2780     }
2781     if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
2782         valid_mask |=
2783             R_CNTHCTL_EL1TVT_MASK |
2784             R_CNTHCTL_EL1TVCT_MASK |
2785             R_CNTHCTL_EL1NVPCT_MASK |
2786             R_CNTHCTL_EL1NVVCT_MASK |
2787             R_CNTHCTL_EVNTIS_MASK;
2788     }
2789     if (cpu_isar_feature(aa64_ecv, cpu)) {
2790         valid_mask |= R_CNTHCTL_ECV_MASK;
2791     }
2792 
2793     /* Clear RES0 bits */
2794     value &= valid_mask;
2795 
2796     raw_write(env, ri, value);
2797 
2798     if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) {
2799         gt_update_irq(cpu, GTIMER_VIRT);
2800     } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) {
2801         gt_update_irq(cpu, GTIMER_PHYS);
2802     }
2803 }
2804 
2805 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2806                               uint64_t value)
2807 {
2808     ARMCPU *cpu = env_archcpu(env);
2809 
2810     trace_arm_gt_cntvoff_write(value);
2811     raw_write(env, ri, value);
2812     gt_recalc_timer(cpu, GTIMER_VIRT);
2813 }
2814 
2815 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2816                                         const ARMCPRegInfo *ri)
2817 {
2818     int timeridx = gt_virt_redir_timeridx(env);
2819     return env->cp15.c14_timer[timeridx].cval;
2820 }
2821 
2822 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2823                                      uint64_t value)
2824 {
2825     int timeridx = gt_virt_redir_timeridx(env);
2826     gt_cval_write(env, ri, timeridx, value);
2827 }
2828 
2829 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2830                                         const ARMCPRegInfo *ri)
2831 {
2832     int timeridx = gt_virt_redir_timeridx(env);
2833     return gt_tval_read(env, ri, timeridx);
2834 }
2835 
2836 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2837                                      uint64_t value)
2838 {
2839     int timeridx = gt_virt_redir_timeridx(env);
2840     gt_tval_write(env, ri, timeridx, value);
2841 }
2842 
2843 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2844                                        const ARMCPRegInfo *ri)
2845 {
2846     int timeridx = gt_virt_redir_timeridx(env);
2847     return env->cp15.c14_timer[timeridx].ctl;
2848 }
2849 
2850 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2851                                     uint64_t value)
2852 {
2853     int timeridx = gt_virt_redir_timeridx(env);
2854     gt_ctl_write(env, ri, timeridx, value);
2855 }
2856 
2857 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2858 {
2859     gt_timer_reset(env, ri, GTIMER_HYP);
2860 }
2861 
2862 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2863                               uint64_t value)
2864 {
2865     gt_cval_write(env, ri, GTIMER_HYP, value);
2866 }
2867 
2868 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2869 {
2870     return gt_tval_read(env, ri, GTIMER_HYP);
2871 }
2872 
2873 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2874                               uint64_t value)
2875 {
2876     gt_tval_write(env, ri, GTIMER_HYP, value);
2877 }
2878 
2879 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2880                               uint64_t value)
2881 {
2882     gt_ctl_write(env, ri, GTIMER_HYP, value);
2883 }
2884 
2885 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2886 {
2887     gt_timer_reset(env, ri, GTIMER_SEC);
2888 }
2889 
2890 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2891                               uint64_t value)
2892 {
2893     gt_cval_write(env, ri, GTIMER_SEC, value);
2894 }
2895 
2896 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2897 {
2898     return gt_tval_read(env, ri, GTIMER_SEC);
2899 }
2900 
2901 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2902                               uint64_t value)
2903 {
2904     gt_tval_write(env, ri, GTIMER_SEC, value);
2905 }
2906 
2907 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2908                               uint64_t value)
2909 {
2910     gt_ctl_write(env, ri, GTIMER_SEC, value);
2911 }
2912 
2913 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2914 {
2915     gt_timer_reset(env, ri, GTIMER_HYPVIRT);
2916 }
2917 
2918 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2919                              uint64_t value)
2920 {
2921     gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
2922 }
2923 
2924 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2925 {
2926     return gt_tval_read(env, ri, GTIMER_HYPVIRT);
2927 }
2928 
2929 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2930                              uint64_t value)
2931 {
2932     gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
2933 }
2934 
2935 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2936                             uint64_t value)
2937 {
2938     gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
2939 }
2940 
2941 void arm_gt_ptimer_cb(void *opaque)
2942 {
2943     ARMCPU *cpu = opaque;
2944 
2945     gt_recalc_timer(cpu, GTIMER_PHYS);
2946 }
2947 
2948 void arm_gt_vtimer_cb(void *opaque)
2949 {
2950     ARMCPU *cpu = opaque;
2951 
2952     gt_recalc_timer(cpu, GTIMER_VIRT);
2953 }
2954 
2955 void arm_gt_htimer_cb(void *opaque)
2956 {
2957     ARMCPU *cpu = opaque;
2958 
2959     gt_recalc_timer(cpu, GTIMER_HYP);
2960 }
2961 
2962 void arm_gt_stimer_cb(void *opaque)
2963 {
2964     ARMCPU *cpu = opaque;
2965 
2966     gt_recalc_timer(cpu, GTIMER_SEC);
2967 }
2968 
2969 void arm_gt_hvtimer_cb(void *opaque)
2970 {
2971     ARMCPU *cpu = opaque;
2972 
2973     gt_recalc_timer(cpu, GTIMER_HYPVIRT);
2974 }
2975 
2976 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2977     /*
2978      * Note that CNTFRQ is purely reads-as-written for the benefit
2979      * of software; writing it doesn't actually change the timer frequency.
2980      * Our reset value matches the fixed frequency we implement the timer at.
2981      */
2982     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2983       .type = ARM_CP_ALIAS,
2984       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2985       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2986     },
2987     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2988       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2989       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2990       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2991       .resetfn = arm_gt_cntfrq_reset,
2992     },
2993     /* overall control: mostly access permissions */
2994     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2995       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2996       .access = PL1_RW,
2997       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2998       .resetvalue = 0,
2999     },
3000     /* per-timer control */
3001     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3002       .secure = ARM_CP_SECSTATE_NS,
3003       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3004       .accessfn = gt_ptimer_access,
3005       .fieldoffset = offsetoflow32(CPUARMState,
3006                                    cp15.c14_timer[GTIMER_PHYS].ctl),
3007       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3008       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3009     },
3010     { .name = "CNTP_CTL_S",
3011       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3012       .secure = ARM_CP_SECSTATE_S,
3013       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3014       .accessfn = gt_ptimer_access,
3015       .fieldoffset = offsetoflow32(CPUARMState,
3016                                    cp15.c14_timer[GTIMER_SEC].ctl),
3017       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3018     },
3019     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3020       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
3021       .type = ARM_CP_IO, .access = PL0_RW,
3022       .accessfn = gt_ptimer_access,
3023       .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1,
3024       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
3025       .resetvalue = 0,
3026       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3027       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3028     },
3029     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3030       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3031       .accessfn = gt_vtimer_access,
3032       .fieldoffset = offsetoflow32(CPUARMState,
3033                                    cp15.c14_timer[GTIMER_VIRT].ctl),
3034       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3035       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3036     },
3037     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3038       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
3039       .type = ARM_CP_IO, .access = PL0_RW,
3040       .accessfn = gt_vtimer_access,
3041       .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1,
3042       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
3043       .resetvalue = 0,
3044       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3045       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3046     },
3047     /* TimerValue views: a 32 bit downcounting view of the underlying state */
3048     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3049       .secure = ARM_CP_SECSTATE_NS,
3050       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3051       .accessfn = gt_ptimer_access,
3052       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3053     },
3054     { .name = "CNTP_TVAL_S",
3055       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3056       .secure = ARM_CP_SECSTATE_S,
3057       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3058       .accessfn = gt_ptimer_access,
3059       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
3060     },
3061     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3062       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
3063       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3064       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
3065       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3066     },
3067     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3068       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3069       .accessfn = gt_vtimer_access,
3070       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3071     },
3072     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3073       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
3074       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3075       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
3076       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3077     },
3078     /* The counter itself */
3079     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3080       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3081       .accessfn = gt_pct_access,
3082       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3083     },
3084     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3085       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3086       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3087       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3088     },
3089     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3090       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3091       .accessfn = gt_vct_access,
3092       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3093     },
3094     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3095       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3096       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3097       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3098     },
3099     /* Comparison value, indicating when the timer goes off */
3100     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3101       .secure = ARM_CP_SECSTATE_NS,
3102       .access = PL0_RW,
3103       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3104       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3105       .accessfn = gt_ptimer_access,
3106       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3107       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3108     },
3109     { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3110       .secure = ARM_CP_SECSTATE_S,
3111       .access = PL0_RW,
3112       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3113       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3114       .accessfn = gt_ptimer_access,
3115       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3116     },
3117     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3118       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3119       .access = PL0_RW,
3120       .type = ARM_CP_IO,
3121       .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1,
3122       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3123       .resetvalue = 0, .accessfn = gt_ptimer_access,
3124       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3125       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3126     },
3127     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3128       .access = PL0_RW,
3129       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3130       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3131       .accessfn = gt_vtimer_access,
3132       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3133       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3134     },
3135     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3136       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3137       .access = PL0_RW,
3138       .type = ARM_CP_IO,
3139       .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1,
3140       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3141       .resetvalue = 0, .accessfn = gt_vtimer_access,
3142       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3143       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3144     },
3145     /*
3146      * Secure timer -- this is actually restricted to only EL3
3147      * and configurably Secure-EL1 via the accessfn.
3148      */
3149     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3150       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3151       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3152       .accessfn = gt_stimer_access,
3153       .readfn = gt_sec_tval_read,
3154       .writefn = gt_sec_tval_write,
3155       .resetfn = gt_sec_timer_reset,
3156     },
3157     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3158       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3159       .type = ARM_CP_IO, .access = PL1_RW,
3160       .accessfn = gt_stimer_access,
3161       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3162       .resetvalue = 0,
3163       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3164     },
3165     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3166       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3167       .type = ARM_CP_IO, .access = PL1_RW,
3168       .accessfn = gt_stimer_access,
3169       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3170       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3171     },
3172 };
3173 
3174 /*
3175  * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which
3176  * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
3177  * so our implementations here are identical to the normal registers.
3178  */
3179 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
3180     { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9,
3181       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3182       .accessfn = gt_vct_access,
3183       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3184     },
3185     { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
3186       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
3187       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3188       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3189     },
3190     { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8,
3191       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3192       .accessfn = gt_pct_access,
3193       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3194     },
3195     { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64,
3196       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 5,
3197       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3198       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3199     },
3200 };
3201 
3202 static CPAccessResult gt_cntpoff_access(CPUARMState *env,
3203                                         const ARMCPRegInfo *ri,
3204                                         bool isread)
3205 {
3206     if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) &&
3207         !(env->cp15.scr_el3 & SCR_ECVEN)) {
3208         return CP_ACCESS_TRAP_EL3;
3209     }
3210     return CP_ACCESS_OK;
3211 }
3212 
3213 static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
3214                               uint64_t value)
3215 {
3216     ARMCPU *cpu = env_archcpu(env);
3217 
3218     trace_arm_gt_cntpoff_write(value);
3219     raw_write(env, ri, value);
3220     gt_recalc_timer(cpu, GTIMER_PHYS);
3221 }
3222 
3223 static const ARMCPRegInfo gen_timer_cntpoff_reginfo = {
3224     .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64,
3225     .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6,
3226     .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
3227     .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write,
3228     .nv2_redirect_offset = 0x1a8,
3229     .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2),
3230 };
3231 #else
3232 
3233 /*
3234  * In user-mode most of the generic timer registers are inaccessible
3235  * however modern kernels (4.12+) allow access to cntvct_el0
3236  */
3237 
3238 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3239 {
3240     ARMCPU *cpu = env_archcpu(env);
3241 
3242     /*
3243      * Currently we have no support for QEMUTimer in linux-user so we
3244      * can't call gt_get_countervalue(env), instead we directly
3245      * call the lower level functions.
3246      */
3247     return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3248 }
3249 
3250 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3251     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3252       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3253       .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3254       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3255       .resetfn = arm_gt_cntfrq_reset,
3256     },
3257     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3258       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3259       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3260       .readfn = gt_virt_cnt_read,
3261     },
3262 };
3263 
3264 /*
3265  * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also
3266  * is exposed to userspace by Linux.
3267  */
3268 static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
3269     { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
3270       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
3271       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3272       .readfn = gt_virt_cnt_read,
3273     },
3274 };
3275 
3276 #endif
3277 
3278 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3279 {
3280     if (arm_feature(env, ARM_FEATURE_LPAE)) {
3281         raw_write(env, ri, value);
3282     } else if (arm_feature(env, ARM_FEATURE_V7)) {
3283         raw_write(env, ri, value & 0xfffff6ff);
3284     } else {
3285         raw_write(env, ri, value & 0xfffff1ff);
3286     }
3287 }
3288 
3289 #ifndef CONFIG_USER_ONLY
3290 /* get_phys_addr() isn't present for user-mode-only targets */
3291 
3292 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3293                                  bool isread)
3294 {
3295     if (ri->opc2 & 4) {
3296         /*
3297          * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3298          * Secure EL1 (which can only happen if EL3 is AArch64).
3299          * They are simply UNDEF if executed from NS EL1.
3300          * They function normally from EL2 or EL3.
3301          */
3302         if (arm_current_el(env) == 1) {
3303             if (arm_is_secure_below_el3(env)) {
3304                 if (env->cp15.scr_el3 & SCR_EEL2) {
3305                     return CP_ACCESS_TRAP_EL2;
3306                 }
3307                 return CP_ACCESS_TRAP_EL3;
3308             }
3309             return CP_ACCESS_TRAP_UNCATEGORIZED;
3310         }
3311     }
3312     return CP_ACCESS_OK;
3313 }
3314 
3315 #ifdef CONFIG_TCG
3316 static int par_el1_shareability(GetPhysAddrResult *res)
3317 {
3318     /*
3319      * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
3320      * memory -- see pseudocode PAREncodeShareability().
3321      */
3322     if (((res->cacheattrs.attrs & 0xf0) == 0) ||
3323         res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
3324         return 2;
3325     }
3326     return res->cacheattrs.shareability;
3327 }
3328 
3329 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3330                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
3331                              ARMSecuritySpace ss)
3332 {
3333     bool ret;
3334     uint64_t par64;
3335     bool format64 = false;
3336     ARMMMUFaultInfo fi = {};
3337     GetPhysAddrResult res = {};
3338 
3339     /*
3340      * I_MXTJT: Granule protection checks are not performed on the final
3341      * address of a successful translation.  This is a translation not a
3342      * memory reference, so "memop = none = 0".
3343      */
3344     ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
3345                                          mmu_idx, ss, &res, &fi);
3346 
3347     /*
3348      * ATS operations only do S1 or S1+S2 translations, so we never
3349      * have to deal with the ARMCacheAttrs format for S2 only.
3350      */
3351     assert(!res.cacheattrs.is_s2_format);
3352 
3353     if (ret) {
3354         /*
3355          * Some kinds of translation fault must cause exceptions rather
3356          * than being reported in the PAR.
3357          */
3358         int current_el = arm_current_el(env);
3359         int target_el;
3360         uint32_t syn, fsr, fsc;
3361         bool take_exc = false;
3362 
3363         if (fi.s1ptw && current_el == 1
3364             && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3365             /*
3366              * Synchronous stage 2 fault on an access made as part of the
3367              * translation table walk for AT S1E0* or AT S1E1* insn
3368              * executed from NS EL1. If this is a synchronous external abort
3369              * and SCR_EL3.EA == 1, then we take a synchronous external abort
3370              * to EL3. Otherwise the fault is taken as an exception to EL2,
3371              * and HPFAR_EL2 holds the faulting IPA.
3372              */
3373             if (fi.type == ARMFault_SyncExternalOnWalk &&
3374                 (env->cp15.scr_el3 & SCR_EA)) {
3375                 target_el = 3;
3376             } else {
3377                 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3378                 if (arm_is_secure_below_el3(env) && fi.s1ns) {
3379                     env->cp15.hpfar_el2 |= HPFAR_NS;
3380                 }
3381                 target_el = 2;
3382             }
3383             take_exc = true;
3384         } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3385             /*
3386              * Synchronous external aborts during a translation table walk
3387              * are taken as Data Abort exceptions.
3388              */
3389             if (fi.stage2) {
3390                 if (current_el == 3) {
3391                     target_el = 3;
3392                 } else {
3393                     target_el = 2;
3394                 }
3395             } else {
3396                 target_el = exception_target_el(env);
3397             }
3398             take_exc = true;
3399         }
3400 
3401         if (take_exc) {
3402             /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3403             if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3404                 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3405                 fsr = arm_fi_to_lfsc(&fi);
3406                 fsc = extract32(fsr, 0, 6);
3407             } else {
3408                 fsr = arm_fi_to_sfsc(&fi);
3409                 fsc = 0x3f;
3410             }
3411             /*
3412              * Report exception with ESR indicating a fault due to a
3413              * translation table walk for a cache maintenance instruction.
3414              */
3415             syn = syn_data_abort_no_iss(current_el == target_el, 0,
3416                                         fi.ea, 1, fi.s1ptw, 1, fsc);
3417             env->exception.vaddress = value;
3418             env->exception.fsr = fsr;
3419             raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3420         }
3421     }
3422 
3423     if (is_a64(env)) {
3424         format64 = true;
3425     } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3426         /*
3427          * ATS1Cxx:
3428          * * TTBCR.EAE determines whether the result is returned using the
3429          *   32-bit or the 64-bit PAR format
3430          * * Instructions executed in Hyp mode always use the 64bit format
3431          *
3432          * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3433          * * The Non-secure TTBCR.EAE bit is set to 1
3434          * * The implementation includes EL2, and the value of HCR.VM is 1
3435          *
3436          * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3437          *
3438          * ATS1Hx always uses the 64bit format.
3439          */
3440         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3441 
3442         if (arm_feature(env, ARM_FEATURE_EL2)) {
3443             if (mmu_idx == ARMMMUIdx_E10_0 ||
3444                 mmu_idx == ARMMMUIdx_E10_1 ||
3445                 mmu_idx == ARMMMUIdx_E10_1_PAN) {
3446                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3447             } else {
3448                 format64 |= arm_current_el(env) == 2;
3449             }
3450         }
3451     }
3452 
3453     if (format64) {
3454         /* Create a 64-bit PAR */
3455         par64 = (1 << 11); /* LPAE bit always set */
3456         if (!ret) {
3457             par64 |= res.f.phys_addr & ~0xfffULL;
3458             if (!res.f.attrs.secure) {
3459                 par64 |= (1 << 9); /* NS */
3460             }
3461             par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
3462             par64 |= par_el1_shareability(&res) << 7; /* SH */
3463         } else {
3464             uint32_t fsr = arm_fi_to_lfsc(&fi);
3465 
3466             par64 |= 1; /* F */
3467             par64 |= (fsr & 0x3f) << 1; /* FS */
3468             if (fi.stage2) {
3469                 par64 |= (1 << 9); /* S */
3470             }
3471             if (fi.s1ptw) {
3472                 par64 |= (1 << 8); /* PTW */
3473             }
3474         }
3475     } else {
3476         /*
3477          * fsr is a DFSR/IFSR value for the short descriptor
3478          * translation table format (with WnR always clear).
3479          * Convert it to a 32-bit PAR.
3480          */
3481         if (!ret) {
3482             /* We do not set any attribute bits in the PAR */
3483             if (res.f.lg_page_size == 24
3484                 && arm_feature(env, ARM_FEATURE_V7)) {
3485                 par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
3486             } else {
3487                 par64 = res.f.phys_addr & 0xfffff000;
3488             }
3489             if (!res.f.attrs.secure) {
3490                 par64 |= (1 << 9); /* NS */
3491             }
3492         } else {
3493             uint32_t fsr = arm_fi_to_sfsc(&fi);
3494 
3495             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3496                     ((fsr & 0xf) << 1) | 1;
3497         }
3498     }
3499     return par64;
3500 }
3501 #endif /* CONFIG_TCG */
3502 
3503 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3504 {
3505 #ifdef CONFIG_TCG
3506     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3507     uint64_t par64;
3508     ARMMMUIdx mmu_idx;
3509     int el = arm_current_el(env);
3510     ARMSecuritySpace ss = arm_security_space(env);
3511 
3512     switch (ri->opc2 & 6) {
3513     case 0:
3514         /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3515         switch (el) {
3516         case 3:
3517             if (ri->crm == 9 && arm_pan_enabled(env)) {
3518                 mmu_idx = ARMMMUIdx_E30_3_PAN;
3519             } else {
3520                 mmu_idx = ARMMMUIdx_E3;
3521             }
3522             break;
3523         case 2:
3524             g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3525             /* fall through */
3526         case 1:
3527             if (ri->crm == 9 && arm_pan_enabled(env)) {
3528                 mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
3529             } else {
3530                 mmu_idx = ARMMMUIdx_Stage1_E1;
3531             }
3532             break;
3533         default:
3534             g_assert_not_reached();
3535         }
3536         break;
3537     case 2:
3538         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3539         switch (el) {
3540         case 3:
3541             mmu_idx = ARMMMUIdx_E30_0;
3542             break;
3543         case 2:
3544             g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3545             mmu_idx = ARMMMUIdx_Stage1_E0;
3546             break;
3547         case 1:
3548             mmu_idx = ARMMMUIdx_Stage1_E0;
3549             break;
3550         default:
3551             g_assert_not_reached();
3552         }
3553         break;
3554     case 4:
3555         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3556         mmu_idx = ARMMMUIdx_E10_1;
3557         ss = ARMSS_NonSecure;
3558         break;
3559     case 6:
3560         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3561         mmu_idx = ARMMMUIdx_E10_0;
3562         ss = ARMSS_NonSecure;
3563         break;
3564     default:
3565         g_assert_not_reached();
3566     }
3567 
3568     par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
3569 
3570     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3571 #else
3572     /* Handled by hardware accelerator. */
3573     g_assert_not_reached();
3574 #endif /* CONFIG_TCG */
3575 }
3576 
3577 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3578                         uint64_t value)
3579 {
3580 #ifdef CONFIG_TCG
3581     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3582     uint64_t par64;
3583 
3584     /* There is no SecureEL2 for AArch32. */
3585     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
3586                          ARMSS_NonSecure);
3587 
3588     A32_BANKED_CURRENT_REG_SET(env, par, par64);
3589 #else
3590     /* Handled by hardware accelerator. */
3591     g_assert_not_reached();
3592 #endif /* CONFIG_TCG */
3593 }
3594 
3595 static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
3596                                      bool isread)
3597 {
3598     /*
3599      * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
3600      * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
3601      * only happen when executing at EL3 because that combination also causes an
3602      * illegal exception return. We don't need to check FEAT_RME either, because
3603      * scr_write() ensures that the NSE bit is not set otherwise.
3604      */
3605     if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
3606         return CP_ACCESS_TRAP;
3607     }
3608     return CP_ACCESS_OK;
3609 }
3610 
3611 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3612                                      bool isread)
3613 {
3614     if (arm_current_el(env) == 3 &&
3615         !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
3616         return CP_ACCESS_TRAP;
3617     }
3618     return at_e012_access(env, ri, isread);
3619 }
3620 
3621 static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
3622                                       bool isread)
3623 {
3624     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
3625         return CP_ACCESS_TRAP_EL2;
3626     }
3627     return at_e012_access(env, ri, isread);
3628 }
3629 
3630 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3631                         uint64_t value)
3632 {
3633 #ifdef CONFIG_TCG
3634     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3635     ARMMMUIdx mmu_idx;
3636     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
3637     bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
3638     bool for_el3 = false;
3639     ARMSecuritySpace ss;
3640 
3641     switch (ri->opc2 & 6) {
3642     case 0:
3643         switch (ri->opc1) {
3644         case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3645             if (ri->crm == 9 && arm_pan_enabled(env)) {
3646                 mmu_idx = regime_e20 ?
3647                           ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
3648             } else {
3649                 mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
3650             }
3651             break;
3652         case 4: /* AT S1E2R, AT S1E2W */
3653             mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
3654             break;
3655         case 6: /* AT S1E3R, AT S1E3W */
3656             mmu_idx = ARMMMUIdx_E3;
3657             for_el3 = true;
3658             break;
3659         default:
3660             g_assert_not_reached();
3661         }
3662         break;
3663     case 2: /* AT S1E0R, AT S1E0W */
3664         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
3665         break;
3666     case 4: /* AT S12E1R, AT S12E1W */
3667         mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
3668         break;
3669     case 6: /* AT S12E0R, AT S12E0W */
3670         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
3671         break;
3672     default:
3673         g_assert_not_reached();
3674     }
3675 
3676     ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
3677     env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
3678 #else
3679     /* Handled by hardware accelerator. */
3680     g_assert_not_reached();
3681 #endif /* CONFIG_TCG */
3682 }
3683 #endif
3684 
3685 /* Return basic MPU access permission bits.  */
3686 static uint32_t simple_mpu_ap_bits(uint32_t val)
3687 {
3688     uint32_t ret;
3689     uint32_t mask;
3690     int i;
3691     ret = 0;
3692     mask = 3;
3693     for (i = 0; i < 16; i += 2) {
3694         ret |= (val >> i) & mask;
3695         mask <<= 2;
3696     }
3697     return ret;
3698 }
3699 
3700 /* Pad basic MPU access permission bits to extended format.  */
3701 static uint32_t extended_mpu_ap_bits(uint32_t val)
3702 {
3703     uint32_t ret;
3704     uint32_t mask;
3705     int i;
3706     ret = 0;
3707     mask = 3;
3708     for (i = 0; i < 16; i += 2) {
3709         ret |= (val & mask) << i;
3710         mask <<= 2;
3711     }
3712     return ret;
3713 }
3714 
3715 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3716                                  uint64_t value)
3717 {
3718     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3719 }
3720 
3721 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3722 {
3723     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3724 }
3725 
3726 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3727                                  uint64_t value)
3728 {
3729     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3730 }
3731 
3732 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3733 {
3734     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3735 }
3736 
3737 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3738 {
3739     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3740 
3741     if (!u32p) {
3742         return 0;
3743     }
3744 
3745     u32p += env->pmsav7.rnr[M_REG_NS];
3746     return *u32p;
3747 }
3748 
3749 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3750                          uint64_t value)
3751 {
3752     ARMCPU *cpu = env_archcpu(env);
3753     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3754 
3755     if (!u32p) {
3756         return;
3757     }
3758 
3759     u32p += env->pmsav7.rnr[M_REG_NS];
3760     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3761     *u32p = value;
3762 }
3763 
3764 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3765                               uint64_t value)
3766 {
3767     ARMCPU *cpu = env_archcpu(env);
3768     uint32_t nrgs = cpu->pmsav7_dregion;
3769 
3770     if (value >= nrgs) {
3771         qemu_log_mask(LOG_GUEST_ERROR,
3772                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3773                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3774         return;
3775     }
3776 
3777     raw_write(env, ri, value);
3778 }
3779 
3780 static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3781                           uint64_t value)
3782 {
3783     ARMCPU *cpu = env_archcpu(env);
3784 
3785     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3786     env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
3787 }
3788 
3789 static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3790 {
3791     return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
3792 }
3793 
3794 static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3795                           uint64_t value)
3796 {
3797     ARMCPU *cpu = env_archcpu(env);
3798 
3799     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3800     env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
3801 }
3802 
3803 static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3804 {
3805     return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
3806 }
3807 
3808 static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3809                            uint64_t value)
3810 {
3811     ARMCPU *cpu = env_archcpu(env);
3812 
3813     /*
3814      * Ignore writes that would select not implemented region.
3815      * This is architecturally UNPREDICTABLE.
3816      */
3817     if (value >= cpu->pmsav7_dregion) {
3818         return;
3819     }
3820 
3821     env->pmsav7.rnr[M_REG_NS] = value;
3822 }
3823 
3824 static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3825                           uint64_t value)
3826 {
3827     ARMCPU *cpu = env_archcpu(env);
3828 
3829     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3830     env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
3831 }
3832 
3833 static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3834 {
3835     return env->pmsav8.hprbar[env->pmsav8.hprselr];
3836 }
3837 
3838 static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3839                           uint64_t value)
3840 {
3841     ARMCPU *cpu = env_archcpu(env);
3842 
3843     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3844     env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
3845 }
3846 
3847 static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3848 {
3849     return env->pmsav8.hprlar[env->pmsav8.hprselr];
3850 }
3851 
3852 static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3853                           uint64_t value)
3854 {
3855     uint32_t n;
3856     uint32_t bit;
3857     ARMCPU *cpu = env_archcpu(env);
3858 
3859     /* Ignore writes to unimplemented regions */
3860     int rmax = MIN(cpu->pmsav8r_hdregion, 32);
3861     value &= MAKE_64BIT_MASK(0, rmax);
3862 
3863     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3864 
3865     /* Register alias is only valid for first 32 indexes */
3866     for (n = 0; n < rmax; ++n) {
3867         bit = extract32(value, n, 1);
3868         env->pmsav8.hprlar[n] = deposit32(
3869                     env->pmsav8.hprlar[n], 0, 1, bit);
3870     }
3871 }
3872 
3873 static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3874 {
3875     uint32_t n;
3876     uint32_t result = 0x0;
3877     ARMCPU *cpu = env_archcpu(env);
3878 
3879     /* Register alias is only valid for first 32 indexes */
3880     for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
3881         if (env->pmsav8.hprlar[n] & 0x1) {
3882             result |= (0x1 << n);
3883         }
3884     }
3885     return result;
3886 }
3887 
3888 static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3889                            uint64_t value)
3890 {
3891     ARMCPU *cpu = env_archcpu(env);
3892 
3893     /*
3894      * Ignore writes that would select not implemented region.
3895      * This is architecturally UNPREDICTABLE.
3896      */
3897     if (value >= cpu->pmsav8r_hdregion) {
3898         return;
3899     }
3900 
3901     env->pmsav8.hprselr = value;
3902 }
3903 
3904 static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
3905                           uint64_t value)
3906 {
3907     ARMCPU *cpu = env_archcpu(env);
3908     uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
3909                     (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
3910 
3911     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3912 
3913     if (ri->opc1 & 4) {
3914         if (index >= cpu->pmsav8r_hdregion) {
3915             return;
3916         }
3917         if (ri->opc2 & 0x1) {
3918             env->pmsav8.hprlar[index] = value;
3919         } else {
3920             env->pmsav8.hprbar[index] = value;
3921         }
3922     } else {
3923         if (index >= cpu->pmsav7_dregion) {
3924             return;
3925         }
3926         if (ri->opc2 & 0x1) {
3927             env->pmsav8.rlar[M_REG_NS][index] = value;
3928         } else {
3929             env->pmsav8.rbar[M_REG_NS][index] = value;
3930         }
3931     }
3932 }
3933 
3934 static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
3935 {
3936     ARMCPU *cpu = env_archcpu(env);
3937     uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
3938                     (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
3939 
3940     if (ri->opc1 & 4) {
3941         if (index >= cpu->pmsav8r_hdregion) {
3942             return 0x0;
3943         }
3944         if (ri->opc2 & 0x1) {
3945             return env->pmsav8.hprlar[index];
3946         } else {
3947             return env->pmsav8.hprbar[index];
3948         }
3949     } else {
3950         if (index >= cpu->pmsav7_dregion) {
3951             return 0x0;
3952         }
3953         if (ri->opc2 & 0x1) {
3954             return env->pmsav8.rlar[M_REG_NS][index];
3955         } else {
3956             return env->pmsav8.rbar[M_REG_NS][index];
3957         }
3958     }
3959 }
3960 
3961 static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
3962     { .name = "PRBAR",
3963       .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
3964       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3965       .accessfn = access_tvm_trvm,
3966       .readfn = prbar_read, .writefn = prbar_write },
3967     { .name = "PRLAR",
3968       .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
3969       .access = PL1_RW, .type = ARM_CP_NO_RAW,
3970       .accessfn = access_tvm_trvm,
3971       .readfn = prlar_read, .writefn = prlar_write },
3972     { .name = "PRSELR", .resetvalue = 0,
3973       .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
3974       .access = PL1_RW, .accessfn = access_tvm_trvm,
3975       .writefn = prselr_write,
3976       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
3977     { .name = "HPRBAR", .resetvalue = 0,
3978       .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
3979       .access = PL2_RW, .type = ARM_CP_NO_RAW,
3980       .readfn = hprbar_read, .writefn = hprbar_write },
3981     { .name = "HPRLAR",
3982       .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
3983       .access = PL2_RW, .type = ARM_CP_NO_RAW,
3984       .readfn = hprlar_read, .writefn = hprlar_write },
3985     { .name = "HPRSELR", .resetvalue = 0,
3986       .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
3987       .access = PL2_RW,
3988       .writefn = hprselr_write,
3989       .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
3990     { .name = "HPRENR",
3991       .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
3992       .access = PL2_RW, .type = ARM_CP_NO_RAW,
3993       .readfn = hprenr_read, .writefn = hprenr_write },
3994 };
3995 
3996 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3997     /*
3998      * Reset for all these registers is handled in arm_cpu_reset(),
3999      * because the PMSAv7 is also used by M-profile CPUs, which do
4000      * not register cpregs but still need the state to be reset.
4001      */
4002     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
4003       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4004       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
4005       .readfn = pmsav7_read, .writefn = pmsav7_write,
4006       .resetfn = arm_cp_reset_ignore },
4007     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
4008       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4009       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
4010       .readfn = pmsav7_read, .writefn = pmsav7_write,
4011       .resetfn = arm_cp_reset_ignore },
4012     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
4013       .access = PL1_RW, .type = ARM_CP_NO_RAW,
4014       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
4015       .readfn = pmsav7_read, .writefn = pmsav7_write,
4016       .resetfn = arm_cp_reset_ignore },
4017     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
4018       .access = PL1_RW,
4019       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
4020       .writefn = pmsav7_rgnr_write,
4021       .resetfn = arm_cp_reset_ignore },
4022 };
4023 
4024 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
4025     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4026       .access = PL1_RW, .type = ARM_CP_ALIAS,
4027       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4028       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
4029     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4030       .access = PL1_RW, .type = ARM_CP_ALIAS,
4031       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4032       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
4033     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
4034       .access = PL1_RW,
4035       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4036       .resetvalue = 0, },
4037     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
4038       .access = PL1_RW,
4039       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4040       .resetvalue = 0, },
4041     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4042       .access = PL1_RW,
4043       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
4044     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
4045       .access = PL1_RW,
4046       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
4047     /* Protection region base and size registers */
4048     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
4049       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4050       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
4051     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
4052       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4053       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
4054     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
4055       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4056       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
4057     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
4058       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4059       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
4060     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
4061       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4062       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
4063     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
4064       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4065       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
4066     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
4067       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4068       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
4069     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
4070       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4071       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
4072 };
4073 
4074 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4075                              uint64_t value)
4076 {
4077     ARMCPU *cpu = env_archcpu(env);
4078 
4079     if (!arm_feature(env, ARM_FEATURE_V8)) {
4080         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
4081             /*
4082              * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
4083              * using Long-descriptor translation table format
4084              */
4085             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
4086         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
4087             /*
4088              * In an implementation that includes the Security Extensions
4089              * TTBCR has additional fields PD0 [4] and PD1 [5] for
4090              * Short-descriptor translation table format.
4091              */
4092             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
4093         } else {
4094             value &= TTBCR_N;
4095         }
4096     }
4097 
4098     if (arm_feature(env, ARM_FEATURE_LPAE)) {
4099         /*
4100          * With LPAE the TTBCR could result in a change of ASID
4101          * via the TTBCR.A1 bit, so do a TLB flush.
4102          */
4103         tlb_flush(CPU(cpu));
4104     }
4105     raw_write(env, ri, value);
4106 }
4107 
4108 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
4109                                uint64_t value)
4110 {
4111     ARMCPU *cpu = env_archcpu(env);
4112 
4113     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
4114     tlb_flush(CPU(cpu));
4115     raw_write(env, ri, value);
4116 }
4117 
4118 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4119                             uint64_t value)
4120 {
4121     /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
4122     if (cpreg_field_is_64bit(ri) &&
4123         extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4124         ARMCPU *cpu = env_archcpu(env);
4125         tlb_flush(CPU(cpu));
4126     }
4127     raw_write(env, ri, value);
4128 }
4129 
4130 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4131                                     uint64_t value)
4132 {
4133     /*
4134      * If we are running with E2&0 regime, then an ASID is active.
4135      * Flush if that might be changing.  Note we're not checking
4136      * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
4137      * holds the active ASID, only checking the field that might.
4138      */
4139     if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
4140         (arm_hcr_el2_eff(env) & HCR_E2H)) {
4141         uint16_t mask = ARMMMUIdxBit_E20_2 |
4142                         ARMMMUIdxBit_E20_2_PAN |
4143                         ARMMMUIdxBit_E20_0;
4144         tlb_flush_by_mmuidx(env_cpu(env), mask);
4145     }
4146     raw_write(env, ri, value);
4147 }
4148 
4149 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4150                         uint64_t value)
4151 {
4152     ARMCPU *cpu = env_archcpu(env);
4153     CPUState *cs = CPU(cpu);
4154 
4155     /*
4156      * A change in VMID to the stage2 page table (Stage2) invalidates
4157      * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
4158      */
4159     if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4160         tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
4161     }
4162     raw_write(env, ri, value);
4163 }
4164 
4165 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
4166     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4167       .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
4168       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
4169                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
4170     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4171       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4172       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
4173                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
4174     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
4175       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4176       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
4177                              offsetof(CPUARMState, cp15.dfar_ns) } },
4178     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
4179       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
4180       .access = PL1_RW, .accessfn = access_tvm_trvm,
4181       .fgt = FGT_FAR_EL1,
4182       .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
4183       .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
4184       .resetvalue = 0, },
4185 };
4186 
4187 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
4188     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
4189       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
4190       .access = PL1_RW, .accessfn = access_tvm_trvm,
4191       .fgt = FGT_ESR_EL1,
4192       .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
4193       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
4194     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
4195       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
4196       .access = PL1_RW, .accessfn = access_tvm_trvm,
4197       .fgt = FGT_TTBR0_EL1,
4198       .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
4199       .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4200       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4201                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
4202     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
4203       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
4204       .access = PL1_RW, .accessfn = access_tvm_trvm,
4205       .fgt = FGT_TTBR1_EL1,
4206       .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
4207       .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4208       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4209                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
4210     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
4211       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4212       .access = PL1_RW, .accessfn = access_tvm_trvm,
4213       .fgt = FGT_TCR_EL1,
4214       .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
4215       .writefn = vmsa_tcr_el12_write,
4216       .raw_writefn = raw_write,
4217       .resetvalue = 0,
4218       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
4219     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4220       .access = PL1_RW, .accessfn = access_tvm_trvm,
4221       .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
4222       .raw_writefn = raw_write,
4223       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
4224                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
4225 };
4226 
4227 /*
4228  * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4229  * qemu tlbs nor adjusting cached masks.
4230  */
4231 static const ARMCPRegInfo ttbcr2_reginfo = {
4232     .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
4233     .access = PL1_RW, .accessfn = access_tvm_trvm,
4234     .type = ARM_CP_ALIAS,
4235     .bank_fieldoffsets = {
4236         offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
4237         offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
4238     },
4239 };
4240 
4241 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
4242                                 uint64_t value)
4243 {
4244     env->cp15.c15_ticonfig = value & 0xe7;
4245     /* The OS_TYPE bit in this register changes the reported CPUID! */
4246     env->cp15.c0_cpuid = (value & (1 << 5)) ?
4247         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
4248 }
4249 
4250 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
4251                                 uint64_t value)
4252 {
4253     env->cp15.c15_threadid = value & 0xffff;
4254 }
4255 
4256 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
4257                            uint64_t value)
4258 {
4259     /* Wait-for-interrupt (deprecated) */
4260     cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
4261 }
4262 
4263 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
4264                                   uint64_t value)
4265 {
4266     /*
4267      * On OMAP there are registers indicating the max/min index of dcache lines
4268      * containing a dirty line; cache flush operations have to reset these.
4269      */
4270     env->cp15.c15_i_max = 0x000;
4271     env->cp15.c15_i_min = 0xff0;
4272 }
4273 
4274 static const ARMCPRegInfo omap_cp_reginfo[] = {
4275     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
4276       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
4277       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
4278       .resetvalue = 0, },
4279     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
4280       .access = PL1_RW, .type = ARM_CP_NOP },
4281     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
4282       .access = PL1_RW,
4283       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
4284       .writefn = omap_ticonfig_write },
4285     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
4286       .access = PL1_RW,
4287       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
4288     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
4289       .access = PL1_RW, .resetvalue = 0xff0,
4290       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
4291     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
4292       .access = PL1_RW,
4293       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
4294       .writefn = omap_threadid_write },
4295     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
4296       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4297       .type = ARM_CP_NO_RAW,
4298       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
4299     /*
4300      * TODO: Peripheral port remap register:
4301      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4302      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4303      * when MMU is off.
4304      */
4305     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
4306       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
4307       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
4308       .writefn = omap_cachemaint_write },
4309     { .name = "C9", .cp = 15, .crn = 9,
4310       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
4311       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
4312 };
4313 
4314 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4315                               uint64_t value)
4316 {
4317     env->cp15.c15_cpar = value & 0x3fff;
4318 }
4319 
4320 static const ARMCPRegInfo xscale_cp_reginfo[] = {
4321     { .name = "XSCALE_CPAR",
4322       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4323       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
4324       .writefn = xscale_cpar_write, },
4325     { .name = "XSCALE_AUXCR",
4326       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
4327       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
4328       .resetvalue = 0, },
4329     /*
4330      * XScale specific cache-lockdown: since we have no cache we NOP these
4331      * and hope the guest does not really rely on cache behaviour.
4332      */
4333     { .name = "XSCALE_LOCK_ICACHE_LINE",
4334       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
4335       .access = PL1_W, .type = ARM_CP_NOP },
4336     { .name = "XSCALE_UNLOCK_ICACHE",
4337       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
4338       .access = PL1_W, .type = ARM_CP_NOP },
4339     { .name = "XSCALE_DCACHE_LOCK",
4340       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
4341       .access = PL1_RW, .type = ARM_CP_NOP },
4342     { .name = "XSCALE_UNLOCK_DCACHE",
4343       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
4344       .access = PL1_W, .type = ARM_CP_NOP },
4345 };
4346 
4347 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
4348     /*
4349      * RAZ/WI the whole crn=15 space, when we don't have a more specific
4350      * implementation of this implementation-defined space.
4351      * Ideally this should eventually disappear in favour of actually
4352      * implementing the correct behaviour for all cores.
4353      */
4354     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
4355       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4356       .access = PL1_RW,
4357       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
4358       .resetvalue = 0 },
4359 };
4360 
4361 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
4362     /* Cache status: RAZ because we have no cache so it's always clean */
4363     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4364       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4365       .resetvalue = 0 },
4366 };
4367 
4368 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
4369     /* We never have a block transfer operation in progress */
4370     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4371       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4372       .resetvalue = 0 },
4373     /* The cache ops themselves: these all NOP for QEMU */
4374     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4375       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4376     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4377       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4378     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4379       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4380     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4381       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4382     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4383       .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4384     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4385       .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4386 };
4387 
4388 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4389     /*
4390      * The cache test-and-clean instructions always return (1 << 30)
4391      * to indicate that there are no dirty cache lines.
4392      */
4393     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4394       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4395       .resetvalue = (1 << 30) },
4396     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4397       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4398       .resetvalue = (1 << 30) },
4399 };
4400 
4401 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4402     /* Ignore ReadBuffer accesses */
4403     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4404       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4405       .access = PL1_RW, .resetvalue = 0,
4406       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4407 };
4408 
4409 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4410 {
4411     unsigned int cur_el = arm_current_el(env);
4412 
4413     if (arm_is_el2_enabled(env) && cur_el == 1) {
4414         return env->cp15.vpidr_el2;
4415     }
4416     return raw_read(env, ri);
4417 }
4418 
4419 static uint64_t mpidr_read_val(CPUARMState *env)
4420 {
4421     ARMCPU *cpu = env_archcpu(env);
4422     uint64_t mpidr = cpu->mp_affinity;
4423 
4424     if (arm_feature(env, ARM_FEATURE_V7MP)) {
4425         mpidr |= (1U << 31);
4426         /*
4427          * Cores which are uniprocessor (non-coherent)
4428          * but still implement the MP extensions set
4429          * bit 30. (For instance, Cortex-R5).
4430          */
4431         if (cpu->mp_is_up) {
4432             mpidr |= (1u << 30);
4433         }
4434     }
4435     return mpidr;
4436 }
4437 
4438 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4439 {
4440     unsigned int cur_el = arm_current_el(env);
4441 
4442     if (arm_is_el2_enabled(env) && cur_el == 1) {
4443         return env->cp15.vmpidr_el2;
4444     }
4445     return mpidr_read_val(env);
4446 }
4447 
4448 static const ARMCPRegInfo lpae_cp_reginfo[] = {
4449     /* NOP AMAIR0/1 */
4450     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4451       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4452       .access = PL1_RW, .accessfn = access_tvm_trvm,
4453       .fgt = FGT_AMAIR_EL1,
4454       .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
4455       .type = ARM_CP_CONST, .resetvalue = 0 },
4456     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4457     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4458       .access = PL1_RW, .accessfn = access_tvm_trvm,
4459       .type = ARM_CP_CONST, .resetvalue = 0 },
4460     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4461       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4462       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4463                              offsetof(CPUARMState, cp15.par_ns)} },
4464     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4465       .access = PL1_RW, .accessfn = access_tvm_trvm,
4466       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4467       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4468                              offsetof(CPUARMState, cp15.ttbr0_ns) },
4469       .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4470     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4471       .access = PL1_RW, .accessfn = access_tvm_trvm,
4472       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4473       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4474                              offsetof(CPUARMState, cp15.ttbr1_ns) },
4475       .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4476 };
4477 
4478 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4479 {
4480     return vfp_get_fpcr(env);
4481 }
4482 
4483 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4484                             uint64_t value)
4485 {
4486     vfp_set_fpcr(env, value);
4487 }
4488 
4489 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4490 {
4491     return vfp_get_fpsr(env);
4492 }
4493 
4494 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4495                             uint64_t value)
4496 {
4497     vfp_set_fpsr(env, value);
4498 }
4499 
4500 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4501                                        bool isread)
4502 {
4503     if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4504         return CP_ACCESS_TRAP;
4505     }
4506     return CP_ACCESS_OK;
4507 }
4508 
4509 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4510                             uint64_t value)
4511 {
4512     env->daif = value & PSTATE_DAIF;
4513 }
4514 
4515 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4516 {
4517     return env->pstate & PSTATE_PAN;
4518 }
4519 
4520 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4521                            uint64_t value)
4522 {
4523     env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4524 }
4525 
4526 static const ARMCPRegInfo pan_reginfo = {
4527     .name = "PAN", .state = ARM_CP_STATE_AA64,
4528     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4529     .type = ARM_CP_NO_RAW, .access = PL1_RW,
4530     .readfn = aa64_pan_read, .writefn = aa64_pan_write
4531 };
4532 
4533 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4534 {
4535     return env->pstate & PSTATE_UAO;
4536 }
4537 
4538 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4539                            uint64_t value)
4540 {
4541     env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4542 }
4543 
4544 static const ARMCPRegInfo uao_reginfo = {
4545     .name = "UAO", .state = ARM_CP_STATE_AA64,
4546     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4547     .type = ARM_CP_NO_RAW, .access = PL1_RW,
4548     .readfn = aa64_uao_read, .writefn = aa64_uao_write
4549 };
4550 
4551 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4552 {
4553     return env->pstate & PSTATE_DIT;
4554 }
4555 
4556 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4557                            uint64_t value)
4558 {
4559     env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4560 }
4561 
4562 static const ARMCPRegInfo dit_reginfo = {
4563     .name = "DIT", .state = ARM_CP_STATE_AA64,
4564     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4565     .type = ARM_CP_NO_RAW, .access = PL0_RW,
4566     .readfn = aa64_dit_read, .writefn = aa64_dit_write
4567 };
4568 
4569 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
4570 {
4571     return env->pstate & PSTATE_SSBS;
4572 }
4573 
4574 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
4575                            uint64_t value)
4576 {
4577     env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4578 }
4579 
4580 static const ARMCPRegInfo ssbs_reginfo = {
4581     .name = "SSBS", .state = ARM_CP_STATE_AA64,
4582     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
4583     .type = ARM_CP_NO_RAW, .access = PL0_RW,
4584     .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
4585 };
4586 
4587 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4588                                               const ARMCPRegInfo *ri,
4589                                               bool isread)
4590 {
4591     /* Cache invalidate/clean to Point of Coherency or Persistence...  */
4592     switch (arm_current_el(env)) {
4593     case 0:
4594         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4595         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4596             return CP_ACCESS_TRAP;
4597         }
4598         /* fall through */
4599     case 1:
4600         /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
4601         if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4602             return CP_ACCESS_TRAP_EL2;
4603         }
4604         break;
4605     }
4606     return CP_ACCESS_OK;
4607 }
4608 
4609 static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
4610 {
4611     /* Cache invalidate/clean to Point of Unification... */
4612     switch (arm_current_el(env)) {
4613     case 0:
4614         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4615         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4616             return CP_ACCESS_TRAP;
4617         }
4618         /* fall through */
4619     case 1:
4620         /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set.  */
4621         if (arm_hcr_el2_eff(env) & hcrflags) {
4622             return CP_ACCESS_TRAP_EL2;
4623         }
4624         break;
4625     }
4626     return CP_ACCESS_OK;
4627 }
4628 
4629 static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri,
4630                                    bool isread)
4631 {
4632     return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU);
4633 }
4634 
4635 static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
4636                                   bool isread)
4637 {
4638     return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
4639 }
4640 
4641 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4642                                       bool isread)
4643 {
4644     int cur_el = arm_current_el(env);
4645 
4646     if (cur_el < 2) {
4647         uint64_t hcr = arm_hcr_el2_eff(env);
4648 
4649         if (cur_el == 0) {
4650             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4651                 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
4652                     return CP_ACCESS_TRAP_EL2;
4653                 }
4654             } else {
4655                 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4656                     return CP_ACCESS_TRAP;
4657                 }
4658                 if (hcr & HCR_TDZ) {
4659                     return CP_ACCESS_TRAP_EL2;
4660                 }
4661             }
4662         } else if (hcr & HCR_TDZ) {
4663             return CP_ACCESS_TRAP_EL2;
4664         }
4665     }
4666     return CP_ACCESS_OK;
4667 }
4668 
4669 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4670 {
4671     ARMCPU *cpu = env_archcpu(env);
4672     int dzp_bit = 1 << 4;
4673 
4674     /* DZP indicates whether DC ZVA access is allowed */
4675     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4676         dzp_bit = 0;
4677     }
4678     return cpu->dcz_blocksize | dzp_bit;
4679 }
4680 
4681 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4682                                     bool isread)
4683 {
4684     if (!(env->pstate & PSTATE_SP)) {
4685         /*
4686          * Access to SP_EL0 is undefined if it's being used as
4687          * the stack pointer.
4688          */
4689         return CP_ACCESS_TRAP_UNCATEGORIZED;
4690     }
4691     return CP_ACCESS_OK;
4692 }
4693 
4694 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4695 {
4696     return env->pstate & PSTATE_SP;
4697 }
4698 
4699 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4700 {
4701     update_spsel(env, val);
4702 }
4703 
4704 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4705                         uint64_t value)
4706 {
4707     ARMCPU *cpu = env_archcpu(env);
4708 
4709     if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4710         /* M bit is RAZ/WI for PMSA with no MPU implemented */
4711         value &= ~SCTLR_M;
4712     }
4713 
4714     /* ??? Lots of these bits are not implemented.  */
4715 
4716     if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
4717         if (ri->opc1 == 6) { /* SCTLR_EL3 */
4718             value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
4719         } else {
4720             value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
4721                        SCTLR_ATA0 | SCTLR_ATA);
4722         }
4723     }
4724 
4725     if (raw_read(env, ri) == value) {
4726         /*
4727          * Skip the TLB flush if nothing actually changed; Linux likes
4728          * to do a lot of pointless SCTLR writes.
4729          */
4730         return;
4731     }
4732 
4733     raw_write(env, ri, value);
4734 
4735     /* This may enable/disable the MMU, so do a TLB flush.  */
4736     tlb_flush(CPU(cpu));
4737 
4738     if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
4739         /*
4740          * Normally we would always end the TB on an SCTLR write; see the
4741          * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4742          * is special.  Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4743          * of hflags from the translator, so do it here.
4744          */
4745         arm_rebuild_hflags(env);
4746     }
4747 }
4748 
4749 static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4750                            uint64_t value)
4751 {
4752     /*
4753      * Some MDCR_EL3 bits affect whether PMU counters are running:
4754      * if we are trying to change any of those then we must
4755      * bracket this update with PMU start/finish calls.
4756      */
4757     bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
4758 
4759     if (pmu_op) {
4760         pmu_op_start(env);
4761     }
4762     env->cp15.mdcr_el3 = value;
4763     if (pmu_op) {
4764         pmu_op_finish(env);
4765     }
4766 }
4767 
4768 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4769                        uint64_t value)
4770 {
4771     /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
4772     mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
4773 }
4774 
4775 static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4776                            uint64_t value)
4777 {
4778     /*
4779      * Some MDCR_EL2 bits affect whether PMU counters are running:
4780      * if we are trying to change any of those then we must
4781      * bracket this update with PMU start/finish calls.
4782      */
4783     bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
4784 
4785     if (pmu_op) {
4786         pmu_op_start(env);
4787     }
4788     env->cp15.mdcr_el2 = value;
4789     if (pmu_op) {
4790         pmu_op_finish(env);
4791     }
4792 }
4793 
4794 static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
4795                                  bool isread)
4796 {
4797     if (arm_current_el(env) == 1) {
4798         uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2);
4799 
4800         if (hcr_nv == (HCR_NV | HCR_NV1)) {
4801             return CP_ACCESS_TRAP_EL2;
4802         }
4803     }
4804     return CP_ACCESS_OK;
4805 }
4806 
4807 #ifdef CONFIG_USER_ONLY
4808 /*
4809  * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
4810  * code to get around W^X restrictions, where one region is writable and the
4811  * other is executable.
4812  *
4813  * Since the executable region is never written to we cannot detect code
4814  * changes when running in user mode, and rely on the emulated JIT telling us
4815  * that the code has changed by executing this instruction.
4816  */
4817 static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
4818                           uint64_t value)
4819 {
4820     uint64_t icache_line_mask, start_address, end_address;
4821     const ARMCPU *cpu;
4822 
4823     cpu = env_archcpu(env);
4824 
4825     icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
4826     start_address = value & ~icache_line_mask;
4827     end_address = value | icache_line_mask;
4828 
4829     mmap_lock();
4830 
4831     tb_invalidate_phys_range(start_address, end_address);
4832 
4833     mmap_unlock();
4834 }
4835 #endif
4836 
4837 static const ARMCPRegInfo v8_cp_reginfo[] = {
4838     /*
4839      * Minimal set of EL0-visible registers. This will need to be expanded
4840      * significantly for system emulation of AArch64 CPUs.
4841      */
4842     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4843       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4844       .access = PL0_RW, .type = ARM_CP_NZCV },
4845     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4846       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4847       .type = ARM_CP_NO_RAW,
4848       .access = PL0_RW, .accessfn = aa64_daif_access,
4849       .fieldoffset = offsetof(CPUARMState, daif),
4850       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4851     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4852       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4853       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4854       .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4855     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4856       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4857       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4858       .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4859     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4860       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4861       .access = PL0_R, .type = ARM_CP_NO_RAW,
4862       .fgt = FGT_DCZID_EL0,
4863       .readfn = aa64_dczid_read },
4864     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4865       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4866       .access = PL0_W, .type = ARM_CP_DC_ZVA,
4867 #ifndef CONFIG_USER_ONLY
4868       /* Avoid overhead of an access check that always passes in user-mode */
4869       .accessfn = aa64_zva_access,
4870       .fgt = FGT_DCZVA,
4871 #endif
4872     },
4873     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4874       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4875       .access = PL1_R, .type = ARM_CP_CURRENTEL },
4876     /*
4877      * Instruction cache ops. All of these except `IC IVAU` NOP because we
4878      * don't emulate caches.
4879      */
4880     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4881       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4882       .access = PL1_W, .type = ARM_CP_NOP,
4883       .fgt = FGT_ICIALLUIS,
4884       .accessfn = access_ticab },
4885     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4886       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4887       .access = PL1_W, .type = ARM_CP_NOP,
4888       .fgt = FGT_ICIALLU,
4889       .accessfn = access_tocu },
4890     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4891       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4892       .access = PL0_W,
4893       .fgt = FGT_ICIVAU,
4894       .accessfn = access_tocu,
4895 #ifdef CONFIG_USER_ONLY
4896       .type = ARM_CP_NO_RAW,
4897       .writefn = ic_ivau_write
4898 #else
4899       .type = ARM_CP_NOP
4900 #endif
4901     },
4902     /* Cache ops: all NOPs since we don't emulate caches */
4903     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4904       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4905       .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
4906       .fgt = FGT_DCIVAC,
4907       .type = ARM_CP_NOP },
4908     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4909       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4910       .fgt = FGT_DCISW,
4911       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4912     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4913       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4914       .access = PL0_W, .type = ARM_CP_NOP,
4915       .fgt = FGT_DCCVAC,
4916       .accessfn = aa64_cacheop_poc_access },
4917     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4918       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4919       .fgt = FGT_DCCSW,
4920       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4921     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4922       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4923       .access = PL0_W, .type = ARM_CP_NOP,
4924       .fgt = FGT_DCCVAU,
4925       .accessfn = access_tocu },
4926     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4927       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4928       .access = PL0_W, .type = ARM_CP_NOP,
4929       .fgt = FGT_DCCIVAC,
4930       .accessfn = aa64_cacheop_poc_access },
4931     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4932       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4933       .fgt = FGT_DCCISW,
4934       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4935 #ifndef CONFIG_USER_ONLY
4936     /* 64 bit address translation operations */
4937     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4938       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4939       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4940       .fgt = FGT_ATS1E1R,
4941       .accessfn = at_s1e01_access, .writefn = ats_write64 },
4942     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4943       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4944       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4945       .fgt = FGT_ATS1E1W,
4946       .accessfn = at_s1e01_access, .writefn = ats_write64 },
4947     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4948       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4949       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4950       .fgt = FGT_ATS1E0R,
4951       .accessfn = at_s1e01_access, .writefn = ats_write64 },
4952     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4953       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4954       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4955       .fgt = FGT_ATS1E0W,
4956       .accessfn = at_s1e01_access, .writefn = ats_write64 },
4957     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4958       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4959       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4960       .accessfn = at_e012_access, .writefn = ats_write64 },
4961     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4962       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4963       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4964       .accessfn = at_e012_access, .writefn = ats_write64 },
4965     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4966       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4967       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4968       .accessfn = at_e012_access, .writefn = ats_write64 },
4969     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4970       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4971       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4972       .accessfn = at_e012_access, .writefn = ats_write64 },
4973     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4974     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4975       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4976       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4977       .writefn = ats_write64 },
4978     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4979       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4980       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4981       .writefn = ats_write64 },
4982     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4983       .type = ARM_CP_ALIAS,
4984       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4985       .access = PL1_RW, .resetvalue = 0,
4986       .fgt = FGT_PAR_EL1,
4987       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4988       .writefn = par_write },
4989 #endif
4990     /* 32 bit cache operations */
4991     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4992       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
4993     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4994       .type = ARM_CP_NOP, .access = PL1_W },
4995     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4996       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
4997     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
4998       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
4999     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5000       .type = ARM_CP_NOP, .access = PL1_W },
5001     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5002       .type = ARM_CP_NOP, .access = PL1_W },
5003     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5004       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5005     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5006       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5007     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5008       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5009     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5010       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5011     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5012       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5013     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5014       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5015     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5016       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5017     /* MMU Domain access control / MPU write buffer control */
5018     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5019       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
5020       .writefn = dacr_write, .raw_writefn = raw_write,
5021       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
5022                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
5023     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5024       .type = ARM_CP_ALIAS,
5025       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
5026       .access = PL1_RW, .accessfn = access_nv1,
5027       .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
5028       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
5029     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5030       .type = ARM_CP_ALIAS,
5031       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
5032       .access = PL1_RW, .accessfn = access_nv1,
5033       .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
5034       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
5035     /*
5036      * We rely on the access checks not allowing the guest to write to the
5037      * state field when SPSel indicates that it's being used as the stack
5038      * pointer.
5039      */
5040     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5041       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5042       .access = PL1_RW, .accessfn = sp_el0_access,
5043       .type = ARM_CP_ALIAS,
5044       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
5045     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5046       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
5047       .nv2_redirect_offset = 0x240,
5048       .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
5049       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
5050     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5051       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
5052       .type = ARM_CP_NO_RAW,
5053       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
5054     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5055       .type = ARM_CP_ALIAS,
5056       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5057       .access = PL2_RW,
5058       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5059     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5060       .type = ARM_CP_ALIAS,
5061       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5062       .access = PL2_RW,
5063       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5064     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5065       .type = ARM_CP_ALIAS,
5066       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5067       .access = PL2_RW,
5068       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5069     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5070       .type = ARM_CP_ALIAS,
5071       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5072       .access = PL2_RW,
5073       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
5074     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5075       .type = ARM_CP_IO,
5076       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5077       .resetvalue = 0,
5078       .access = PL3_RW,
5079       .writefn = mdcr_el3_write,
5080       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5081     { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
5082       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5083       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5084       .writefn = sdcr_write,
5085       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
5086 };
5087 
5088 /* These are present only when EL1 supports AArch32 */
5089 static const ARMCPRegInfo v8_aa32_el1_reginfo[] = {
5090     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5091       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
5092       .access = PL2_RW,
5093       .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
5094       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
5095     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5096       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
5097       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5098       .writefn = dacr_write, .raw_writefn = raw_write,
5099       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5100     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5101       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
5102       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5103       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5104 };
5105 
5106 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
5107 {
5108     ARMCPU *cpu = env_archcpu(env);
5109 
5110     if (arm_feature(env, ARM_FEATURE_V8)) {
5111         valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
5112     } else {
5113         valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
5114     }
5115 
5116     if (arm_feature(env, ARM_FEATURE_EL3)) {
5117         valid_mask &= ~HCR_HCD;
5118     } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5119         /*
5120          * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5121          * However, if we're using the SMC PSCI conduit then QEMU is
5122          * effectively acting like EL3 firmware and so the guest at
5123          * EL2 should retain the ability to prevent EL1 from being
5124          * able to make SMC calls into the ersatz firmware, so in
5125          * that case HCR.TSC should be read/write.
5126          */
5127         valid_mask &= ~HCR_TSC;
5128     }
5129 
5130     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5131         if (cpu_isar_feature(aa64_vh, cpu)) {
5132             valid_mask |= HCR_E2H;
5133         }
5134         if (cpu_isar_feature(aa64_ras, cpu)) {
5135             valid_mask |= HCR_TERR | HCR_TEA;
5136         }
5137         if (cpu_isar_feature(aa64_lor, cpu)) {
5138             valid_mask |= HCR_TLOR;
5139         }
5140         if (cpu_isar_feature(aa64_pauth, cpu)) {
5141             valid_mask |= HCR_API | HCR_APK;
5142         }
5143         if (cpu_isar_feature(aa64_mte, cpu)) {
5144             valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
5145         }
5146         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
5147             valid_mask |= HCR_ENSCXT;
5148         }
5149         if (cpu_isar_feature(aa64_fwb, cpu)) {
5150             valid_mask |= HCR_FWB;
5151         }
5152         if (cpu_isar_feature(aa64_rme, cpu)) {
5153             valid_mask |= HCR_GPF;
5154         }
5155         if (cpu_isar_feature(aa64_nv, cpu)) {
5156             valid_mask |= HCR_NV | HCR_NV1 | HCR_AT;
5157         }
5158         if (cpu_isar_feature(aa64_nv2, cpu)) {
5159             valid_mask |= HCR_NV2;
5160         }
5161     }
5162 
5163     if (cpu_isar_feature(any_evt, cpu)) {
5164         valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4;
5165     } else if (cpu_isar_feature(any_half_evt, cpu)) {
5166         valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4;
5167     }
5168 
5169     /* Clear RES0 bits.  */
5170     value &= valid_mask;
5171 
5172     /*
5173      * These bits change the MMU setup:
5174      * HCR_VM enables stage 2 translation
5175      * HCR_PTW forbids certain page-table setups
5176      * HCR_DC disables stage1 and enables stage2 translation
5177      * HCR_DCT enables tagging on (disabled) stage1 translation
5178      * HCR_FWB changes the interpretation of stage2 descriptor bits
5179      * HCR_NV and HCR_NV1 affect interpretation of descriptor bits
5180      */
5181     if ((env->cp15.hcr_el2 ^ value) &
5182         (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) {
5183         tlb_flush(CPU(cpu));
5184     }
5185     env->cp15.hcr_el2 = value;
5186 
5187     /*
5188      * Updates to VI and VF require us to update the status of
5189      * virtual interrupts, which are the logical OR of these bits
5190      * and the state of the input lines from the GIC. (This requires
5191      * that we have the BQL, which is done by marking the
5192      * reginfo structs as ARM_CP_IO.)
5193      * Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or
5194      * VFNMI, it is never possible for it to be taken immediately
5195      * because VIRQ, VFIQ, VINMI and VFNMI are masked unless running
5196      * at EL0 or EL1, and HCR can only be written at EL2.
5197      */
5198     g_assert(bql_locked());
5199     arm_cpu_update_virq(cpu);
5200     arm_cpu_update_vfiq(cpu);
5201     arm_cpu_update_vserr(cpu);
5202     if (cpu_isar_feature(aa64_nmi, cpu)) {
5203         arm_cpu_update_vinmi(cpu);
5204         arm_cpu_update_vfnmi(cpu);
5205     }
5206 }
5207 
5208 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5209 {
5210     do_hcr_write(env, value, 0);
5211 }
5212 
5213 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5214                           uint64_t value)
5215 {
5216     /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5217     value = deposit64(env->cp15.hcr_el2, 32, 32, value);
5218     do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
5219 }
5220 
5221 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5222                          uint64_t value)
5223 {
5224     /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5225     value = deposit64(env->cp15.hcr_el2, 0, 32, value);
5226     do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
5227 }
5228 
5229 /*
5230  * Return the effective value of HCR_EL2, at the given security state.
5231  * Bits that are not included here:
5232  * RW       (read from SCR_EL3.RW as needed)
5233  */
5234 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space)
5235 {
5236     uint64_t ret = env->cp15.hcr_el2;
5237 
5238     assert(space != ARMSS_Root);
5239 
5240     if (!arm_is_el2_enabled_secstate(env, space)) {
5241         /*
5242          * "This register has no effect if EL2 is not enabled in the
5243          * current Security state".  This is ARMv8.4-SecEL2 speak for
5244          * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5245          *
5246          * Prior to that, the language was "In an implementation that
5247          * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5248          * as if this field is 0 for all purposes other than a direct
5249          * read or write access of HCR_EL2".  With lots of enumeration
5250          * on a per-field basis.  In current QEMU, this is condition
5251          * is arm_is_secure_below_el3.
5252          *
5253          * Since the v8.4 language applies to the entire register, and
5254          * appears to be backward compatible, use that.
5255          */
5256         return 0;
5257     }
5258 
5259     /*
5260      * For a cpu that supports both aarch64 and aarch32, we can set bits
5261      * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5262      * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5263      */
5264     if (!arm_el_is_aa64(env, 2)) {
5265         uint64_t aa32_valid;
5266 
5267         /*
5268          * These bits are up-to-date as of ARMv8.6.
5269          * For HCR, it's easiest to list just the 2 bits that are invalid.
5270          * For HCR2, list those that are valid.
5271          */
5272         aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
5273         aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
5274                        HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
5275         ret &= aa32_valid;
5276     }
5277 
5278     if (ret & HCR_TGE) {
5279         /* These bits are up-to-date as of ARMv8.6.  */
5280         if (ret & HCR_E2H) {
5281             ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
5282                      HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
5283                      HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
5284                      HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
5285                      HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
5286                      HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
5287         } else {
5288             ret |= HCR_FMO | HCR_IMO | HCR_AMO;
5289         }
5290         ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
5291                  HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
5292                  HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
5293                  HCR_TLOR);
5294     }
5295 
5296     return ret;
5297 }
5298 
5299 uint64_t arm_hcr_el2_eff(CPUARMState *env)
5300 {
5301     if (arm_feature(env, ARM_FEATURE_M)) {
5302         return 0;
5303     }
5304     return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
5305 }
5306 
5307 /*
5308  * Corresponds to ARM pseudocode function ELIsInHost().
5309  */
5310 bool el_is_in_host(CPUARMState *env, int el)
5311 {
5312     uint64_t mask;
5313 
5314     /*
5315      * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
5316      * Perform the simplest bit tests first, and validate EL2 afterward.
5317      */
5318     if (el & 1) {
5319         return false; /* EL1 or EL3 */
5320     }
5321 
5322     /*
5323      * Note that hcr_write() checks isar_feature_aa64_vh(),
5324      * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
5325      */
5326     mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
5327     if ((env->cp15.hcr_el2 & mask) != mask) {
5328         return false;
5329     }
5330 
5331     /* TGE and/or E2H set: double check those bits are currently legal. */
5332     return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
5333 }
5334 
5335 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
5336                        uint64_t value)
5337 {
5338     ARMCPU *cpu = env_archcpu(env);
5339     uint64_t valid_mask = 0;
5340 
5341     /* FEAT_MOPS adds MSCEn and MCE2 */
5342     if (cpu_isar_feature(aa64_mops, cpu)) {
5343         valid_mask |= HCRX_MSCEN | HCRX_MCE2;
5344     }
5345 
5346     /* FEAT_NMI adds TALLINT, VINMI and VFNMI */
5347     if (cpu_isar_feature(aa64_nmi, cpu)) {
5348         valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI;
5349     }
5350     /* FEAT_CMOW adds CMOW */
5351     if (cpu_isar_feature(aa64_cmow, cpu)) {
5352         valid_mask |= HCRX_CMOW;
5353     }
5354     /* FEAT_XS adds FGTnXS, FnXS */
5355     if (cpu_isar_feature(aa64_xs, cpu)) {
5356         valid_mask |= HCRX_FGTNXS | HCRX_FNXS;
5357     }
5358 
5359     /* Clear RES0 bits.  */
5360     env->cp15.hcrx_el2 = value & valid_mask;
5361 
5362     /*
5363      * Updates to VINMI and VFNMI require us to update the status of
5364      * virtual NMI, which are the logical OR of these bits
5365      * and the state of the input lines from the GIC. (This requires
5366      * that we have the BQL, which is done by marking the
5367      * reginfo structs as ARM_CP_IO.)
5368      * Note that if a write to HCRX pends a VINMI or VFNMI it is never
5369      * possible for it to be taken immediately, because VINMI and
5370      * VFNMI are masked unless running at EL0 or EL1, and HCRX
5371      * can only be written at EL2.
5372      */
5373     if (cpu_isar_feature(aa64_nmi, cpu)) {
5374         g_assert(bql_locked());
5375         arm_cpu_update_vinmi(cpu);
5376         arm_cpu_update_vfnmi(cpu);
5377     }
5378 }
5379 
5380 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
5381                                   bool isread)
5382 {
5383     if (arm_current_el(env) == 2
5384         && arm_feature(env, ARM_FEATURE_EL3)
5385         && !(env->cp15.scr_el3 & SCR_HXEN)) {
5386         return CP_ACCESS_TRAP_EL3;
5387     }
5388     return CP_ACCESS_OK;
5389 }
5390 
5391 static const ARMCPRegInfo hcrx_el2_reginfo = {
5392     .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
5393     .type = ARM_CP_IO,
5394     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
5395     .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
5396     .nv2_redirect_offset = 0xa0,
5397     .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
5398 };
5399 
5400 /* Return the effective value of HCRX_EL2.  */
5401 uint64_t arm_hcrx_el2_eff(CPUARMState *env)
5402 {
5403     /*
5404      * The bits in this register behave as 0 for all purposes other than
5405      * direct reads of the register if SCR_EL3.HXEn is 0.
5406      * If EL2 is not enabled in the current security state, then the
5407      * bit may behave as if 0, or as if 1, depending on the bit.
5408      * For the moment, we treat the EL2-disabled case as taking
5409      * priority over the HXEn-disabled case. This is true for the only
5410      * bit for a feature which we implement where the answer is different
5411      * for the two cases (MSCEn for FEAT_MOPS).
5412      * This may need to be revisited for future bits.
5413      */
5414     if (!arm_is_el2_enabled(env)) {
5415         uint64_t hcrx = 0;
5416         if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
5417             /* MSCEn behaves as 1 if EL2 is not enabled */
5418             hcrx |= HCRX_MSCEN;
5419         }
5420         return hcrx;
5421     }
5422     if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
5423         return 0;
5424     }
5425     return env->cp15.hcrx_el2;
5426 }
5427 
5428 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5429                            uint64_t value)
5430 {
5431     /*
5432      * For A-profile AArch32 EL3, if NSACR.CP10
5433      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5434      */
5435     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5436         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5437         uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
5438         value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
5439     }
5440     env->cp15.cptr_el[2] = value;
5441 }
5442 
5443 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5444 {
5445     /*
5446      * For A-profile AArch32 EL3, if NSACR.CP10
5447      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5448      */
5449     uint64_t value = env->cp15.cptr_el[2];
5450 
5451     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5452         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5453         value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
5454     }
5455     return value;
5456 }
5457 
5458 static const ARMCPRegInfo el2_cp_reginfo[] = {
5459     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
5460       .type = ARM_CP_IO,
5461       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5462       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5463       .nv2_redirect_offset = 0x78,
5464       .writefn = hcr_write, .raw_writefn = raw_write },
5465     { .name = "HCR", .state = ARM_CP_STATE_AA32,
5466       .type = ARM_CP_ALIAS | ARM_CP_IO,
5467       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5468       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5469       .writefn = hcr_writelow },
5470     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5471       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5472       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5473     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
5474       .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
5475       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
5476       .access = PL2_RW,
5477       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
5478     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5479       .type = ARM_CP_NV2_REDIRECT,
5480       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5481       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
5482     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5483       .type = ARM_CP_NV2_REDIRECT,
5484       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5485       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
5486     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5487       .type = ARM_CP_ALIAS,
5488       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5489       .access = PL2_RW,
5490       .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
5491     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
5492       .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
5493       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
5494       .access = PL2_RW,
5495       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
5496     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5497       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5498       .access = PL2_RW, .writefn = vbar_write,
5499       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
5500       .resetvalue = 0 },
5501     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5502       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
5503       .access = PL3_RW, .type = ARM_CP_ALIAS,
5504       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
5505     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5506       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5507       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
5508       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
5509       .readfn = cptr_el2_read, .writefn = cptr_el2_write },
5510     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5511       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5512       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
5513       .resetvalue = 0 },
5514     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5515       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
5516       .access = PL2_RW, .type = ARM_CP_ALIAS,
5517       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
5518     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5519       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5520       .access = PL2_RW, .type = ARM_CP_CONST,
5521       .resetvalue = 0 },
5522     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5523     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5524       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5525       .access = PL2_RW, .type = ARM_CP_CONST,
5526       .resetvalue = 0 },
5527     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5528       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5529       .access = PL2_RW, .type = ARM_CP_CONST,
5530       .resetvalue = 0 },
5531     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5532       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5533       .access = PL2_RW, .type = ARM_CP_CONST,
5534       .resetvalue = 0 },
5535     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5536       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5537       .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
5538       .raw_writefn = raw_write,
5539       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
5540     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
5541       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5542       .type = ARM_CP_ALIAS,
5543       .access = PL2_RW, .accessfn = access_el3_aa32ns,
5544       .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
5545     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
5546       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5547       .access = PL2_RW,
5548       .nv2_redirect_offset = 0x40,
5549       /* no .writefn needed as this can't cause an ASID change */
5550       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5551     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5552       .cp = 15, .opc1 = 6, .crm = 2,
5553       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5554       .access = PL2_RW, .accessfn = access_el3_aa32ns,
5555       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
5556       .writefn = vttbr_write, .raw_writefn = raw_write },
5557     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5558       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5559       .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
5560       .nv2_redirect_offset = 0x20,
5561       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
5562     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5563       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5564       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
5565       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
5566     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5567       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5568       .access = PL2_RW, .resetvalue = 0,
5569       .nv2_redirect_offset = 0x90,
5570       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
5571     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5572       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5573       .access = PL2_RW, .resetvalue = 0,
5574       .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
5575       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5576     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5577       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5578       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5579 #ifndef CONFIG_USER_ONLY
5580     /*
5581      * Unlike the other EL2-related AT operations, these must
5582      * UNDEF from EL3 if EL2 is not implemented, which is why we
5583      * define them here rather than with the rest of the AT ops.
5584      */
5585     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5586       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5587       .access = PL2_W, .accessfn = at_s1e2_access,
5588       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
5589       .writefn = ats_write64 },
5590     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5591       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5592       .access = PL2_W, .accessfn = at_s1e2_access,
5593       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
5594       .writefn = ats_write64 },
5595     /*
5596      * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5597      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5598      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5599      * to behave as if SCR.NS was 1.
5600      */
5601     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5602       .access = PL2_W,
5603       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5604     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5605       .access = PL2_W,
5606       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5607     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5608       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5609       /*
5610        * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5611        * reset values as IMPDEF. We choose to reset to 3 to comply with
5612        * both ARMv7 and ARMv8.
5613        */
5614       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3,
5615       .writefn = gt_cnthctl_write, .raw_writefn = raw_write,
5616       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
5617     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5618       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5619       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5620       .writefn = gt_cntvoff_write,
5621       .nv2_redirect_offset = 0x60,
5622       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5623     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5624       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5625       .writefn = gt_cntvoff_write,
5626       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5627     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5628       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5629       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5630       .type = ARM_CP_IO, .access = PL2_RW,
5631       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5632     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5633       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5634       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5635       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5636     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5637       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5638       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5639       .resetfn = gt_hyp_timer_reset,
5640       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5641     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5642       .type = ARM_CP_IO,
5643       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5644       .access = PL2_RW,
5645       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5646       .resetvalue = 0,
5647       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
5648 #endif
5649     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5650       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5651       .access = PL2_RW, .accessfn = access_el3_aa32ns,
5652       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5653     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5654       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5655       .access = PL2_RW,
5656       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5657     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5658       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5659       .access = PL2_RW,
5660       .nv2_redirect_offset = 0x80,
5661       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
5662 };
5663 
5664 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5665     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5666       .type = ARM_CP_ALIAS | ARM_CP_IO,
5667       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5668       .access = PL2_RW,
5669       .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5670       .writefn = hcr_writehigh },
5671 };
5672 
5673 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
5674                                   bool isread)
5675 {
5676     if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
5677         return CP_ACCESS_OK;
5678     }
5679     return CP_ACCESS_TRAP_UNCATEGORIZED;
5680 }
5681 
5682 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
5683     { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
5684       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
5685       .access = PL2_RW, .accessfn = sel2_access,
5686       .nv2_redirect_offset = 0x30,
5687       .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
5688     { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
5689       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
5690       .access = PL2_RW, .accessfn = sel2_access,
5691       .nv2_redirect_offset = 0x48,
5692       .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
5693 };
5694 
5695 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5696                                    bool isread)
5697 {
5698     /*
5699      * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5700      * At Secure EL1 it traps to EL3 or EL2.
5701      */
5702     if (arm_current_el(env) == 3) {
5703         return CP_ACCESS_OK;
5704     }
5705     if (arm_is_secure_below_el3(env)) {
5706         if (env->cp15.scr_el3 & SCR_EEL2) {
5707             return CP_ACCESS_TRAP_EL2;
5708         }
5709         return CP_ACCESS_TRAP_EL3;
5710     }
5711     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5712     if (isread) {
5713         return CP_ACCESS_OK;
5714     }
5715     return CP_ACCESS_TRAP_UNCATEGORIZED;
5716 }
5717 
5718 static const ARMCPRegInfo el3_cp_reginfo[] = {
5719     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5720       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5721       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
5722       .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
5723     { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
5724       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
5725       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5726       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
5727       .writefn = scr_write, .raw_writefn = raw_write },
5728     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5729       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5730       .access = PL3_RW, .resetvalue = 0,
5731       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5732     { .name = "SDER",
5733       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5734       .access = PL3_RW, .resetvalue = 0,
5735       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
5736     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5737       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5738       .writefn = vbar_write, .resetvalue = 0,
5739       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
5740     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5741       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
5742       .access = PL3_RW, .resetvalue = 0,
5743       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
5744     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5745       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
5746       .access = PL3_RW,
5747       /* no .writefn needed as this can't cause an ASID change */
5748       .resetvalue = 0,
5749       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
5750     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
5751       .type = ARM_CP_ALIAS,
5752       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5753       .access = PL3_RW,
5754       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
5755     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5756       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5757       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
5758     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5759       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5760       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
5761     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5762       .type = ARM_CP_ALIAS,
5763       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
5764       .access = PL3_RW,
5765       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
5766     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5767       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5768       .access = PL3_RW, .writefn = vbar_write,
5769       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5770       .resetvalue = 0 },
5771     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5772       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5773       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5774       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
5775     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5776       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5777       .access = PL3_RW, .resetvalue = 0,
5778       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
5779     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5780       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5781       .access = PL3_RW, .type = ARM_CP_CONST,
5782       .resetvalue = 0 },
5783     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5784       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5785       .access = PL3_RW, .type = ARM_CP_CONST,
5786       .resetvalue = 0 },
5787     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5788       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5789       .access = PL3_RW, .type = ARM_CP_CONST,
5790       .resetvalue = 0 },
5791 };
5792 
5793 #ifndef CONFIG_USER_ONLY
5794 
5795 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
5796                                  bool isread)
5797 {
5798     if (arm_current_el(env) == 1) {
5799         /* This must be a FEAT_NV access */
5800         return CP_ACCESS_OK;
5801     }
5802     if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
5803         return CP_ACCESS_TRAP_UNCATEGORIZED;
5804     }
5805     return CP_ACCESS_OK;
5806 }
5807 
5808 static CPAccessResult access_el1nvpct(CPUARMState *env, const ARMCPRegInfo *ri,
5809                                       bool isread)
5810 {
5811     if (arm_current_el(env) == 1) {
5812         /* This must be a FEAT_NV access with NVx == 101 */
5813         if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) {
5814             return CP_ACCESS_TRAP_EL2;
5815         }
5816     }
5817     return e2h_access(env, ri, isread);
5818 }
5819 
5820 static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri,
5821                                       bool isread)
5822 {
5823     if (arm_current_el(env) == 1) {
5824         /* This must be a FEAT_NV access with NVx == 101 */
5825         if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) {
5826             return CP_ACCESS_TRAP_EL2;
5827         }
5828     }
5829     return e2h_access(env, ri, isread);
5830 }
5831 
5832 /* Test if system register redirection is to occur in the current state.  */
5833 static bool redirect_for_e2h(CPUARMState *env)
5834 {
5835     return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
5836 }
5837 
5838 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
5839 {
5840     CPReadFn *readfn;
5841 
5842     if (redirect_for_e2h(env)) {
5843         /* Switch to the saved EL2 version of the register.  */
5844         ri = ri->opaque;
5845         readfn = ri->readfn;
5846     } else {
5847         readfn = ri->orig_readfn;
5848     }
5849     if (readfn == NULL) {
5850         readfn = raw_read;
5851     }
5852     return readfn(env, ri);
5853 }
5854 
5855 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
5856                           uint64_t value)
5857 {
5858     CPWriteFn *writefn;
5859 
5860     if (redirect_for_e2h(env)) {
5861         /* Switch to the saved EL2 version of the register.  */
5862         ri = ri->opaque;
5863         writefn = ri->writefn;
5864     } else {
5865         writefn = ri->orig_writefn;
5866     }
5867     if (writefn == NULL) {
5868         writefn = raw_write;
5869     }
5870     writefn(env, ri, value);
5871 }
5872 
5873 static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri)
5874 {
5875     /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
5876     return ri->orig_readfn(env, ri->opaque);
5877 }
5878 
5879 static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri,
5880                               uint64_t value)
5881 {
5882     /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
5883     return ri->orig_writefn(env, ri->opaque, value);
5884 }
5885 
5886 static CPAccessResult el2_e2h_e12_access(CPUARMState *env,
5887                                          const ARMCPRegInfo *ri,
5888                                          bool isread)
5889 {
5890     if (arm_current_el(env) == 1) {
5891         /*
5892          * This must be a FEAT_NV access (will either trap or redirect
5893          * to memory). None of the registers with _EL12 aliases want to
5894          * apply their trap controls for this kind of access, so don't
5895          * call the orig_accessfn or do the "UNDEF when E2H is 0" check.
5896          */
5897         return CP_ACCESS_OK;
5898     }
5899     /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
5900     if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
5901         return CP_ACCESS_TRAP_UNCATEGORIZED;
5902     }
5903     if (ri->orig_accessfn) {
5904         return ri->orig_accessfn(env, ri->opaque, isread);
5905     }
5906     return CP_ACCESS_OK;
5907 }
5908 
5909 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
5910 {
5911     struct E2HAlias {
5912         uint32_t src_key, dst_key, new_key;
5913         const char *src_name, *dst_name, *new_name;
5914         bool (*feature)(const ARMISARegisters *id);
5915     };
5916 
5917 #define K(op0, op1, crn, crm, op2) \
5918     ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5919 
5920     static const struct E2HAlias aliases[] = {
5921         { K(3, 0,  1, 0, 0), K(3, 4,  1, 0, 0), K(3, 5, 1, 0, 0),
5922           "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5923         { K(3, 0,  1, 0, 2), K(3, 4,  1, 1, 2), K(3, 5, 1, 0, 2),
5924           "CPACR", "CPTR_EL2", "CPACR_EL12" },
5925         { K(3, 0,  2, 0, 0), K(3, 4,  2, 0, 0), K(3, 5, 2, 0, 0),
5926           "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5927         { K(3, 0,  2, 0, 1), K(3, 4,  2, 0, 1), K(3, 5, 2, 0, 1),
5928           "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5929         { K(3, 0,  2, 0, 2), K(3, 4,  2, 0, 2), K(3, 5, 2, 0, 2),
5930           "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5931         { K(3, 0,  4, 0, 0), K(3, 4,  4, 0, 0), K(3, 5, 4, 0, 0),
5932           "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5933         { K(3, 0,  4, 0, 1), K(3, 4,  4, 0, 1), K(3, 5, 4, 0, 1),
5934           "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5935         { K(3, 0,  5, 1, 0), K(3, 4,  5, 1, 0), K(3, 5, 5, 1, 0),
5936           "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5937         { K(3, 0,  5, 1, 1), K(3, 4,  5, 1, 1), K(3, 5, 5, 1, 1),
5938           "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5939         { K(3, 0,  5, 2, 0), K(3, 4,  5, 2, 0), K(3, 5, 5, 2, 0),
5940           "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5941         { K(3, 0,  6, 0, 0), K(3, 4,  6, 0, 0), K(3, 5, 6, 0, 0),
5942           "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5943         { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5944           "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5945         { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5946           "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5947         { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5948           "VBAR", "VBAR_EL2", "VBAR_EL12" },
5949         { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5950           "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5951         { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5952           "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5953 
5954         /*
5955          * Note that redirection of ZCR is mentioned in the description
5956          * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5957          * not in the summary table.
5958          */
5959         { K(3, 0,  1, 2, 0), K(3, 4,  1, 2, 0), K(3, 5, 1, 2, 0),
5960           "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
5961         { K(3, 0,  1, 2, 6), K(3, 4,  1, 2, 6), K(3, 5, 1, 2, 6),
5962           "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
5963 
5964         { K(3, 0,  5, 6, 0), K(3, 4,  5, 6, 0), K(3, 5, 5, 6, 0),
5965           "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
5966 
5967         { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
5968           "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
5969           isar_feature_aa64_scxtnum },
5970 
5971         /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5972         /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5973     };
5974 #undef K
5975 
5976     size_t i;
5977 
5978     for (i = 0; i < ARRAY_SIZE(aliases); i++) {
5979         const struct E2HAlias *a = &aliases[i];
5980         ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
5981         bool ok;
5982 
5983         if (a->feature && !a->feature(&cpu->isar)) {
5984             continue;
5985         }
5986 
5987         src_reg = g_hash_table_lookup(cpu->cp_regs,
5988                                       (gpointer)(uintptr_t)a->src_key);
5989         dst_reg = g_hash_table_lookup(cpu->cp_regs,
5990                                       (gpointer)(uintptr_t)a->dst_key);
5991         g_assert(src_reg != NULL);
5992         g_assert(dst_reg != NULL);
5993 
5994         /* Cross-compare names to detect typos in the keys.  */
5995         g_assert(strcmp(src_reg->name, a->src_name) == 0);
5996         g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
5997 
5998         /* None of the core system registers use opaque; we will.  */
5999         g_assert(src_reg->opaque == NULL);
6000 
6001         /* Create alias before redirection so we dup the right data. */
6002         new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
6003 
6004         new_reg->name = a->new_name;
6005         new_reg->type |= ARM_CP_ALIAS;
6006         /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
6007         new_reg->access &= PL2_RW | PL3_RW;
6008         /* The new_reg op fields are as per new_key, not the target reg */
6009         new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK)
6010             >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
6011         new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK)
6012             >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
6013         new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK)
6014             >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
6015         new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK)
6016             >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
6017         new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK)
6018             >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
6019         new_reg->opaque = src_reg;
6020         new_reg->orig_readfn = src_reg->readfn ?: raw_read;
6021         new_reg->orig_writefn = src_reg->writefn ?: raw_write;
6022         new_reg->orig_accessfn = src_reg->accessfn;
6023         if (!new_reg->raw_readfn) {
6024             new_reg->raw_readfn = raw_read;
6025         }
6026         if (!new_reg->raw_writefn) {
6027             new_reg->raw_writefn = raw_write;
6028         }
6029         new_reg->readfn = el2_e2h_e12_read;
6030         new_reg->writefn = el2_e2h_e12_write;
6031         new_reg->accessfn = el2_e2h_e12_access;
6032 
6033         /*
6034          * If the _EL1 register is redirected to memory by FEAT_NV2,
6035          * then it shares the offset with the _EL12 register,
6036          * and which one is redirected depends on HCR_EL2.NV1.
6037          */
6038         if (new_reg->nv2_redirect_offset) {
6039             assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1);
6040             new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1;
6041             new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
6042         }
6043 
6044         ok = g_hash_table_insert(cpu->cp_regs,
6045                                  (gpointer)(uintptr_t)a->new_key, new_reg);
6046         g_assert(ok);
6047 
6048         src_reg->opaque = dst_reg;
6049         src_reg->orig_readfn = src_reg->readfn ?: raw_read;
6050         src_reg->orig_writefn = src_reg->writefn ?: raw_write;
6051         if (!src_reg->raw_readfn) {
6052             src_reg->raw_readfn = raw_read;
6053         }
6054         if (!src_reg->raw_writefn) {
6055             src_reg->raw_writefn = raw_write;
6056         }
6057         src_reg->readfn = el2_e2h_read;
6058         src_reg->writefn = el2_e2h_write;
6059     }
6060 }
6061 #endif
6062 
6063 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
6064                                      bool isread)
6065 {
6066     int cur_el = arm_current_el(env);
6067 
6068     if (cur_el < 2) {
6069         uint64_t hcr = arm_hcr_el2_eff(env);
6070 
6071         if (cur_el == 0) {
6072             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
6073                 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
6074                     return CP_ACCESS_TRAP_EL2;
6075                 }
6076             } else {
6077                 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
6078                     return CP_ACCESS_TRAP;
6079                 }
6080                 if (hcr & HCR_TID2) {
6081                     return CP_ACCESS_TRAP_EL2;
6082                 }
6083             }
6084         } else if (hcr & HCR_TID2) {
6085             return CP_ACCESS_TRAP_EL2;
6086         }
6087     }
6088 
6089     if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
6090         return CP_ACCESS_TRAP_EL2;
6091     }
6092 
6093     return CP_ACCESS_OK;
6094 }
6095 
6096 /*
6097  * Check for traps to RAS registers, which are controlled
6098  * by HCR_EL2.TERR and SCR_EL3.TERR.
6099  */
6100 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
6101                                   bool isread)
6102 {
6103     int el = arm_current_el(env);
6104 
6105     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
6106         return CP_ACCESS_TRAP_EL2;
6107     }
6108     if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
6109         return CP_ACCESS_TRAP_EL3;
6110     }
6111     return CP_ACCESS_OK;
6112 }
6113 
6114 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
6115 {
6116     int el = arm_current_el(env);
6117 
6118     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
6119         return env->cp15.vdisr_el2;
6120     }
6121     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6122         return 0; /* RAZ/WI */
6123     }
6124     return env->cp15.disr_el1;
6125 }
6126 
6127 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
6128 {
6129     int el = arm_current_el(env);
6130 
6131     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
6132         env->cp15.vdisr_el2 = val;
6133         return;
6134     }
6135     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6136         return; /* RAZ/WI */
6137     }
6138     env->cp15.disr_el1 = val;
6139 }
6140 
6141 /*
6142  * Minimal RAS implementation with no Error Records.
6143  * Which means that all of the Error Record registers:
6144  *   ERXADDR_EL1
6145  *   ERXCTLR_EL1
6146  *   ERXFR_EL1
6147  *   ERXMISC0_EL1
6148  *   ERXMISC1_EL1
6149  *   ERXMISC2_EL1
6150  *   ERXMISC3_EL1
6151  *   ERXPFGCDN_EL1  (RASv1p1)
6152  *   ERXPFGCTL_EL1  (RASv1p1)
6153  *   ERXPFGF_EL1    (RASv1p1)
6154  *   ERXSTATUS_EL1
6155  * and
6156  *   ERRSELR_EL1
6157  * may generate UNDEFINED, which is the effect we get by not
6158  * listing them at all.
6159  *
6160  * These registers have fine-grained trap bits, but UNDEF-to-EL1
6161  * is higher priority than FGT-to-EL2 so we do not need to list them
6162  * in order to check for an FGT.
6163  */
6164 static const ARMCPRegInfo minimal_ras_reginfo[] = {
6165     { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
6166       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
6167       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
6168       .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
6169     { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
6170       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
6171       .access = PL1_R, .accessfn = access_terr,
6172       .fgt = FGT_ERRIDR_EL1,
6173       .type = ARM_CP_CONST, .resetvalue = 0 },
6174     { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
6175       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
6176       .nv2_redirect_offset = 0x500,
6177       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
6178     { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
6179       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
6180       .nv2_redirect_offset = 0x508,
6181       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
6182 };
6183 
6184 /*
6185  * Return the exception level to which exceptions should be taken
6186  * via SVEAccessTrap.  This excludes the check for whether the exception
6187  * should be routed through AArch64.AdvSIMDFPAccessTrap.  That can easily
6188  * be found by testing 0 < fp_exception_el < sve_exception_el.
6189  *
6190  * C.f. the ARM pseudocode function CheckSVEEnabled.  Note that the
6191  * pseudocode does *not* separate out the FP trap checks, but has them
6192  * all in one function.
6193  */
6194 int sve_exception_el(CPUARMState *env, int el)
6195 {
6196 #ifndef CONFIG_USER_ONLY
6197     if (el <= 1 && !el_is_in_host(env, el)) {
6198         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
6199         case 1:
6200             if (el != 0) {
6201                 break;
6202             }
6203             /* fall through */
6204         case 0:
6205         case 2:
6206             return 1;
6207         }
6208     }
6209 
6210     if (el <= 2 && arm_is_el2_enabled(env)) {
6211         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6212         if (env->cp15.hcr_el2 & HCR_E2H) {
6213             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
6214             case 1:
6215                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6216                     break;
6217                 }
6218                 /* fall through */
6219             case 0:
6220             case 2:
6221                 return 2;
6222             }
6223         } else {
6224             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
6225                 return 2;
6226             }
6227         }
6228     }
6229 
6230     /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
6231     if (arm_feature(env, ARM_FEATURE_EL3)
6232         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
6233         return 3;
6234     }
6235 #endif
6236     return 0;
6237 }
6238 
6239 /*
6240  * Return the exception level to which exceptions should be taken for SME.
6241  * C.f. the ARM pseudocode function CheckSMEAccess.
6242  */
6243 int sme_exception_el(CPUARMState *env, int el)
6244 {
6245 #ifndef CONFIG_USER_ONLY
6246     if (el <= 1 && !el_is_in_host(env, el)) {
6247         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
6248         case 1:
6249             if (el != 0) {
6250                 break;
6251             }
6252             /* fall through */
6253         case 0:
6254         case 2:
6255             return 1;
6256         }
6257     }
6258 
6259     if (el <= 2 && arm_is_el2_enabled(env)) {
6260         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6261         if (env->cp15.hcr_el2 & HCR_E2H) {
6262             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
6263             case 1:
6264                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6265                     break;
6266                 }
6267                 /* fall through */
6268             case 0:
6269             case 2:
6270                 return 2;
6271             }
6272         } else {
6273             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
6274                 return 2;
6275             }
6276         }
6277     }
6278 
6279     /* CPTR_EL3.  Since ESM is negative we must check for EL3.  */
6280     if (arm_feature(env, ARM_FEATURE_EL3)
6281         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6282         return 3;
6283     }
6284 #endif
6285     return 0;
6286 }
6287 
6288 /*
6289  * Given that SVE is enabled, return the vector length for EL.
6290  */
6291 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
6292 {
6293     ARMCPU *cpu = env_archcpu(env);
6294     uint64_t *cr = env->vfp.zcr_el;
6295     uint32_t map = cpu->sve_vq.map;
6296     uint32_t len = ARM_MAX_VQ - 1;
6297 
6298     if (sm) {
6299         cr = env->vfp.smcr_el;
6300         map = cpu->sme_vq.map;
6301     }
6302 
6303     if (el <= 1 && !el_is_in_host(env, el)) {
6304         len = MIN(len, 0xf & (uint32_t)cr[1]);
6305     }
6306     if (el <= 2 && arm_is_el2_enabled(env)) {
6307         len = MIN(len, 0xf & (uint32_t)cr[2]);
6308     }
6309     if (arm_feature(env, ARM_FEATURE_EL3)) {
6310         len = MIN(len, 0xf & (uint32_t)cr[3]);
6311     }
6312 
6313     map &= MAKE_64BIT_MASK(0, len + 1);
6314     if (map != 0) {
6315         return 31 - clz32(map);
6316     }
6317 
6318     /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
6319     assert(sm);
6320     return ctz32(cpu->sme_vq.map);
6321 }
6322 
6323 uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
6324 {
6325     return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
6326 }
6327 
6328 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6329                       uint64_t value)
6330 {
6331     int cur_el = arm_current_el(env);
6332     int old_len = sve_vqm1_for_el(env, cur_el);
6333     int new_len;
6334 
6335     /* Bits other than [3:0] are RAZ/WI.  */
6336     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
6337     raw_write(env, ri, value & 0xf);
6338 
6339     /*
6340      * Because we arrived here, we know both FP and SVE are enabled;
6341      * otherwise we would have trapped access to the ZCR_ELn register.
6342      */
6343     new_len = sve_vqm1_for_el(env, cur_el);
6344     if (new_len < old_len) {
6345         aarch64_sve_narrow_vq(env, new_len + 1);
6346     }
6347 }
6348 
6349 static const ARMCPRegInfo zcr_reginfo[] = {
6350     { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6351       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
6352       .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
6353       .access = PL1_RW, .type = ARM_CP_SVE,
6354       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
6355       .writefn = zcr_write, .raw_writefn = raw_write },
6356     { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6357       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6358       .access = PL2_RW, .type = ARM_CP_SVE,
6359       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
6360       .writefn = zcr_write, .raw_writefn = raw_write },
6361     { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6362       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
6363       .access = PL3_RW, .type = ARM_CP_SVE,
6364       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
6365       .writefn = zcr_write, .raw_writefn = raw_write },
6366 };
6367 
6368 #ifdef TARGET_AARCH64
6369 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
6370                                     bool isread)
6371 {
6372     int el = arm_current_el(env);
6373 
6374     if (el == 0) {
6375         uint64_t sctlr = arm_sctlr(env, el);
6376         if (!(sctlr & SCTLR_EnTP2)) {
6377             return CP_ACCESS_TRAP;
6378         }
6379     }
6380     /* TODO: FEAT_FGT */
6381     if (el < 3
6382         && arm_feature(env, ARM_FEATURE_EL3)
6383         && !(env->cp15.scr_el3 & SCR_ENTP2)) {
6384         return CP_ACCESS_TRAP_EL3;
6385     }
6386     return CP_ACCESS_OK;
6387 }
6388 
6389 static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri,
6390                                       bool isread)
6391 {
6392     /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */
6393     if (arm_current_el(env) == 2
6394         && arm_feature(env, ARM_FEATURE_EL3)
6395         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6396         return CP_ACCESS_TRAP_EL3;
6397     }
6398     return CP_ACCESS_OK;
6399 }
6400 
6401 static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri,
6402                                    bool isread)
6403 {
6404     if (arm_current_el(env) < 3
6405         && arm_feature(env, ARM_FEATURE_EL3)
6406         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6407         return CP_ACCESS_TRAP_EL3;
6408     }
6409     return CP_ACCESS_OK;
6410 }
6411 
6412 /* ResetSVEState */
6413 static void arm_reset_sve_state(CPUARMState *env)
6414 {
6415     memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
6416     /* Recall that FFR is stored as pregs[16]. */
6417     memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
6418     vfp_set_fpcr(env, 0x0800009f);
6419 }
6420 
6421 void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
6422 {
6423     uint64_t change = (env->svcr ^ new) & mask;
6424 
6425     if (change == 0) {
6426         return;
6427     }
6428     env->svcr ^= change;
6429 
6430     if (change & R_SVCR_SM_MASK) {
6431         arm_reset_sve_state(env);
6432     }
6433 
6434     /*
6435      * ResetSMEState.
6436      *
6437      * SetPSTATE_ZA zeros on enable and disable.  We can zero this only
6438      * on enable: while disabled, the storage is inaccessible and the
6439      * value does not matter.  We're not saving the storage in vmstate
6440      * when disabled either.
6441      */
6442     if (change & new & R_SVCR_ZA_MASK) {
6443         memset(env->zarray, 0, sizeof(env->zarray));
6444     }
6445 
6446     if (tcg_enabled()) {
6447         arm_rebuild_hflags(env);
6448     }
6449 }
6450 
6451 static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6452                        uint64_t value)
6453 {
6454     aarch64_set_svcr(env, value, -1);
6455 }
6456 
6457 static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6458                        uint64_t value)
6459 {
6460     int cur_el = arm_current_el(env);
6461     int old_len = sve_vqm1_for_el(env, cur_el);
6462     int new_len;
6463 
6464     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
6465     value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
6466     raw_write(env, ri, value);
6467 
6468     /*
6469      * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
6470      * when SVL is widened (old values kept, or zeros).  Choose to keep the
6471      * current values for simplicity.  But for QEMU internals, we must still
6472      * apply the narrower SVL to the Zregs and Pregs -- see the comment
6473      * above aarch64_sve_narrow_vq.
6474      */
6475     new_len = sve_vqm1_for_el(env, cur_el);
6476     if (new_len < old_len) {
6477         aarch64_sve_narrow_vq(env, new_len + 1);
6478     }
6479 }
6480 
6481 static const ARMCPRegInfo sme_reginfo[] = {
6482     { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
6483       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
6484       .access = PL0_RW, .accessfn = access_tpidr2,
6485       .fgt = FGT_NTPIDR2_EL0,
6486       .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
6487     { .name = "SVCR", .state = ARM_CP_STATE_AA64,
6488       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
6489       .access = PL0_RW, .type = ARM_CP_SME,
6490       .fieldoffset = offsetof(CPUARMState, svcr),
6491       .writefn = svcr_write, .raw_writefn = raw_write },
6492     { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
6493       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
6494       .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
6495       .access = PL1_RW, .type = ARM_CP_SME,
6496       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
6497       .writefn = smcr_write, .raw_writefn = raw_write },
6498     { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
6499       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
6500       .access = PL2_RW, .type = ARM_CP_SME,
6501       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
6502       .writefn = smcr_write, .raw_writefn = raw_write },
6503     { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
6504       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
6505       .access = PL3_RW, .type = ARM_CP_SME,
6506       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
6507       .writefn = smcr_write, .raw_writefn = raw_write },
6508     { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
6509       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
6510       .access = PL1_R, .accessfn = access_aa64_tid1,
6511       /*
6512        * IMPLEMENTOR = 0 (software)
6513        * REVISION    = 0 (implementation defined)
6514        * SMPS        = 0 (no streaming execution priority in QEMU)
6515        * AFFINITY    = 0 (streaming sve mode not shared with other PEs)
6516        */
6517       .type = ARM_CP_CONST, .resetvalue = 0, },
6518     /*
6519      * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
6520      */
6521     { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
6522       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
6523       .access = PL1_RW, .accessfn = access_smpri,
6524       .fgt = FGT_NSMPRI_EL1,
6525       .type = ARM_CP_CONST, .resetvalue = 0 },
6526     { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
6527       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
6528       .nv2_redirect_offset = 0x1f8,
6529       .access = PL2_RW, .accessfn = access_smprimap,
6530       .type = ARM_CP_CONST, .resetvalue = 0 },
6531 };
6532 
6533 static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6534                         uint64_t value)
6535 {
6536     /* L0GPTSZ is RO; other bits not mentioned are RES0. */
6537     uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
6538         R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
6539         R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
6540 
6541     env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
6542 }
6543 
6544 static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
6545 {
6546     env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
6547                                      env_archcpu(env)->reset_l0gptsz);
6548 }
6549 
6550 static const ARMCPRegInfo rme_reginfo[] = {
6551     { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
6552       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
6553       .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
6554       .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
6555     { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
6556       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
6557       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
6558     { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
6559       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
6560       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
6561     { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
6562       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
6563       .access = PL3_W, .type = ARM_CP_NOP },
6564 };
6565 
6566 static const ARMCPRegInfo rme_mte_reginfo[] = {
6567     { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
6568       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
6569       .access = PL3_W, .type = ARM_CP_NOP },
6570 };
6571 
6572 static void aa64_allint_write(CPUARMState *env, const ARMCPRegInfo *ri,
6573                               uint64_t value)
6574 {
6575     env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT);
6576 }
6577 
6578 static uint64_t aa64_allint_read(CPUARMState *env, const ARMCPRegInfo *ri)
6579 {
6580     return env->pstate & PSTATE_ALLINT;
6581 }
6582 
6583 static CPAccessResult aa64_allint_access(CPUARMState *env,
6584                                          const ARMCPRegInfo *ri, bool isread)
6585 {
6586     if (!isread && arm_current_el(env) == 1 &&
6587         (arm_hcrx_el2_eff(env) & HCRX_TALLINT)) {
6588         return CP_ACCESS_TRAP_EL2;
6589     }
6590     return CP_ACCESS_OK;
6591 }
6592 
6593 static const ARMCPRegInfo nmi_reginfo[] = {
6594     { .name = "ALLINT", .state = ARM_CP_STATE_AA64,
6595       .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 4, .crm = 3,
6596       .type = ARM_CP_NO_RAW,
6597       .access = PL1_RW, .accessfn = aa64_allint_access,
6598       .fieldoffset = offsetof(CPUARMState, pstate),
6599       .writefn = aa64_allint_write, .readfn = aa64_allint_read,
6600       .resetfn = arm_cp_reset_ignore },
6601 };
6602 #endif /* TARGET_AARCH64 */
6603 
6604 static void define_pmu_regs(ARMCPU *cpu)
6605 {
6606     /*
6607      * v7 performance monitor control register: same implementor
6608      * field as main ID register, and we implement four counters in
6609      * addition to the cycle count register.
6610      */
6611     unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
6612     ARMCPRegInfo pmcr = {
6613         .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
6614         .access = PL0_RW,
6615         .fgt = FGT_PMCR_EL0,
6616         .type = ARM_CP_IO | ARM_CP_ALIAS,
6617         .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
6618         .accessfn = pmreg_access,
6619         .readfn = pmcr_read, .raw_readfn = raw_read,
6620         .writefn = pmcr_write, .raw_writefn = raw_write,
6621     };
6622     ARMCPRegInfo pmcr64 = {
6623         .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6624         .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6625         .access = PL0_RW, .accessfn = pmreg_access,
6626         .fgt = FGT_PMCR_EL0,
6627         .type = ARM_CP_IO,
6628         .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
6629         .resetvalue = cpu->isar.reset_pmcr_el0,
6630         .readfn = pmcr_read, .raw_readfn = raw_read,
6631         .writefn = pmcr_write, .raw_writefn = raw_write,
6632     };
6633 
6634     define_one_arm_cp_reg(cpu, &pmcr);
6635     define_one_arm_cp_reg(cpu, &pmcr64);
6636     for (i = 0; i < pmcrn; i++) {
6637         char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6638         char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6639         char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6640         char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6641         ARMCPRegInfo pmev_regs[] = {
6642             { .name = pmevcntr_name, .cp = 15, .crn = 14,
6643               .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6644               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6645               .fgt = FGT_PMEVCNTRN_EL0,
6646               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6647               .accessfn = pmreg_access_xevcntr },
6648             { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6649               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
6650               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
6651               .type = ARM_CP_IO,
6652               .fgt = FGT_PMEVCNTRN_EL0,
6653               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6654               .raw_readfn = pmevcntr_rawread,
6655               .raw_writefn = pmevcntr_rawwrite },
6656             { .name = pmevtyper_name, .cp = 15, .crn = 14,
6657               .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6658               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6659               .fgt = FGT_PMEVTYPERN_EL0,
6660               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6661               .accessfn = pmreg_access },
6662             { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6663               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6664               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6665               .fgt = FGT_PMEVTYPERN_EL0,
6666               .type = ARM_CP_IO,
6667               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6668               .raw_writefn = pmevtyper_rawwrite },
6669         };
6670         define_arm_cp_regs(cpu, pmev_regs);
6671         g_free(pmevcntr_name);
6672         g_free(pmevcntr_el0_name);
6673         g_free(pmevtyper_name);
6674         g_free(pmevtyper_el0_name);
6675     }
6676     if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
6677         ARMCPRegInfo v81_pmu_regs[] = {
6678             { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6679               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6680               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6681               .fgt = FGT_PMCEIDN_EL0,
6682               .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6683             { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6684               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6685               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6686               .fgt = FGT_PMCEIDN_EL0,
6687               .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6688         };
6689         define_arm_cp_regs(cpu, v81_pmu_regs);
6690     }
6691     if (cpu_isar_feature(any_pmuv3p4, cpu)) {
6692         static const ARMCPRegInfo v84_pmmir = {
6693             .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
6694             .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
6695             .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6696             .fgt = FGT_PMMIR_EL1,
6697             .resetvalue = 0
6698         };
6699         define_one_arm_cp_reg(cpu, &v84_pmmir);
6700     }
6701 }
6702 
6703 #ifndef CONFIG_USER_ONLY
6704 /*
6705  * We don't know until after realize whether there's a GICv3
6706  * attached, and that is what registers the gicv3 sysregs.
6707  * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6708  * at runtime.
6709  */
6710 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
6711 {
6712     ARMCPU *cpu = env_archcpu(env);
6713     uint64_t pfr1 = cpu->isar.id_pfr1;
6714 
6715     if (env->gicv3state) {
6716         pfr1 |= 1 << 28;
6717     }
6718     return pfr1;
6719 }
6720 
6721 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
6722 {
6723     ARMCPU *cpu = env_archcpu(env);
6724     uint64_t pfr0 = cpu->isar.id_aa64pfr0;
6725 
6726     if (env->gicv3state) {
6727         pfr0 |= 1 << 24;
6728     }
6729     return pfr0;
6730 }
6731 #endif
6732 
6733 /*
6734  * Shared logic between LORID and the rest of the LOR* registers.
6735  * Secure state exclusion has already been dealt with.
6736  */
6737 static CPAccessResult access_lor_ns(CPUARMState *env,
6738                                     const ARMCPRegInfo *ri, bool isread)
6739 {
6740     int el = arm_current_el(env);
6741 
6742     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
6743         return CP_ACCESS_TRAP_EL2;
6744     }
6745     if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
6746         return CP_ACCESS_TRAP_EL3;
6747     }
6748     return CP_ACCESS_OK;
6749 }
6750 
6751 static CPAccessResult access_lor_other(CPUARMState *env,
6752                                        const ARMCPRegInfo *ri, bool isread)
6753 {
6754     if (arm_is_secure_below_el3(env)) {
6755         /* Access denied in secure mode.  */
6756         return CP_ACCESS_TRAP;
6757     }
6758     return access_lor_ns(env, ri, isread);
6759 }
6760 
6761 /*
6762  * A trivial implementation of ARMv8.1-LOR leaves all of these
6763  * registers fixed at 0, which indicates that there are zero
6764  * supported Limited Ordering regions.
6765  */
6766 static const ARMCPRegInfo lor_reginfo[] = {
6767     { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6768       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6769       .access = PL1_RW, .accessfn = access_lor_other,
6770       .fgt = FGT_LORSA_EL1,
6771       .type = ARM_CP_CONST, .resetvalue = 0 },
6772     { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6773       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6774       .access = PL1_RW, .accessfn = access_lor_other,
6775       .fgt = FGT_LOREA_EL1,
6776       .type = ARM_CP_CONST, .resetvalue = 0 },
6777     { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6778       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6779       .access = PL1_RW, .accessfn = access_lor_other,
6780       .fgt = FGT_LORN_EL1,
6781       .type = ARM_CP_CONST, .resetvalue = 0 },
6782     { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6783       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6784       .access = PL1_RW, .accessfn = access_lor_other,
6785       .fgt = FGT_LORC_EL1,
6786       .type = ARM_CP_CONST, .resetvalue = 0 },
6787     { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6788       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6789       .access = PL1_R, .accessfn = access_lor_ns,
6790       .fgt = FGT_LORID_EL1,
6791       .type = ARM_CP_CONST, .resetvalue = 0 },
6792 };
6793 
6794 #ifdef TARGET_AARCH64
6795 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
6796                                    bool isread)
6797 {
6798     int el = arm_current_el(env);
6799 
6800     if (el < 2 &&
6801         arm_is_el2_enabled(env) &&
6802         !(arm_hcr_el2_eff(env) & HCR_APK)) {
6803         return CP_ACCESS_TRAP_EL2;
6804     }
6805     if (el < 3 &&
6806         arm_feature(env, ARM_FEATURE_EL3) &&
6807         !(env->cp15.scr_el3 & SCR_APK)) {
6808         return CP_ACCESS_TRAP_EL3;
6809     }
6810     return CP_ACCESS_OK;
6811 }
6812 
6813 static const ARMCPRegInfo pauth_reginfo[] = {
6814     { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6815       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
6816       .access = PL1_RW, .accessfn = access_pauth,
6817       .fgt = FGT_APDAKEY,
6818       .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
6819     { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6820       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
6821       .access = PL1_RW, .accessfn = access_pauth,
6822       .fgt = FGT_APDAKEY,
6823       .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
6824     { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6825       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
6826       .access = PL1_RW, .accessfn = access_pauth,
6827       .fgt = FGT_APDBKEY,
6828       .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
6829     { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6830       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
6831       .access = PL1_RW, .accessfn = access_pauth,
6832       .fgt = FGT_APDBKEY,
6833       .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
6834     { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6835       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
6836       .access = PL1_RW, .accessfn = access_pauth,
6837       .fgt = FGT_APGAKEY,
6838       .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
6839     { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6840       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
6841       .access = PL1_RW, .accessfn = access_pauth,
6842       .fgt = FGT_APGAKEY,
6843       .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
6844     { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6845       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
6846       .access = PL1_RW, .accessfn = access_pauth,
6847       .fgt = FGT_APIAKEY,
6848       .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
6849     { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6850       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
6851       .access = PL1_RW, .accessfn = access_pauth,
6852       .fgt = FGT_APIAKEY,
6853       .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
6854     { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6855       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
6856       .access = PL1_RW, .accessfn = access_pauth,
6857       .fgt = FGT_APIBKEY,
6858       .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
6859     { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6860       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
6861       .access = PL1_RW, .accessfn = access_pauth,
6862       .fgt = FGT_APIBKEY,
6863       .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
6864 };
6865 
6866 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
6867 {
6868     Error *err = NULL;
6869     uint64_t ret;
6870 
6871     /* Success sets NZCV = 0000.  */
6872     env->NF = env->CF = env->VF = 0, env->ZF = 1;
6873 
6874     if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
6875         /*
6876          * ??? Failed, for unknown reasons in the crypto subsystem.
6877          * The best we can do is log the reason and return the
6878          * timed-out indication to the guest.  There is no reason
6879          * we know to expect this failure to be transitory, so the
6880          * guest may well hang retrying the operation.
6881          */
6882         qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
6883                       ri->name, error_get_pretty(err));
6884         error_free(err);
6885 
6886         env->ZF = 0; /* NZCF = 0100 */
6887         return 0;
6888     }
6889     return ret;
6890 }
6891 
6892 /* We do not support re-seeding, so the two registers operate the same.  */
6893 static const ARMCPRegInfo rndr_reginfo[] = {
6894     { .name = "RNDR", .state = ARM_CP_STATE_AA64,
6895       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6896       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
6897       .access = PL0_R, .readfn = rndr_readfn },
6898     { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
6899       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6900       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
6901       .access = PL0_R, .readfn = rndr_readfn },
6902 };
6903 
6904 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
6905                           uint64_t value)
6906 {
6907 #ifdef CONFIG_TCG
6908     ARMCPU *cpu = env_archcpu(env);
6909     /* CTR_EL0 System register -> DminLine, bits [19:16] */
6910     uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
6911     uint64_t vaddr_in = (uint64_t) value;
6912     uint64_t vaddr = vaddr_in & ~(dline_size - 1);
6913     void *haddr;
6914     int mem_idx = arm_env_mmu_index(env);
6915 
6916     /* This won't be crossing page boundaries */
6917     haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
6918     if (haddr) {
6919 #ifndef CONFIG_USER_ONLY
6920 
6921         ram_addr_t offset;
6922         MemoryRegion *mr;
6923 
6924         /* RCU lock is already being held */
6925         mr = memory_region_from_host(haddr, &offset);
6926 
6927         if (mr) {
6928             memory_region_writeback(mr, offset, dline_size);
6929         }
6930 #endif /*CONFIG_USER_ONLY*/
6931     }
6932 #else
6933     /* Handled by hardware accelerator. */
6934     g_assert_not_reached();
6935 #endif /* CONFIG_TCG */
6936 }
6937 
6938 static const ARMCPRegInfo dcpop_reg[] = {
6939     { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
6940       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
6941       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6942       .fgt = FGT_DCCVAP,
6943       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
6944 };
6945 
6946 static const ARMCPRegInfo dcpodp_reg[] = {
6947     { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
6948       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
6949       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6950       .fgt = FGT_DCCVADP,
6951       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
6952 };
6953 
6954 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
6955                                        bool isread)
6956 {
6957     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
6958         return CP_ACCESS_TRAP_EL2;
6959     }
6960 
6961     return CP_ACCESS_OK;
6962 }
6963 
6964 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
6965                                  bool isread)
6966 {
6967     int el = arm_current_el(env);
6968     if (el < 2 && arm_is_el2_enabled(env)) {
6969         uint64_t hcr = arm_hcr_el2_eff(env);
6970         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
6971             return CP_ACCESS_TRAP_EL2;
6972         }
6973     }
6974     if (el < 3 &&
6975         arm_feature(env, ARM_FEATURE_EL3) &&
6976         !(env->cp15.scr_el3 & SCR_ATA)) {
6977         return CP_ACCESS_TRAP_EL3;
6978     }
6979     return CP_ACCESS_OK;
6980 }
6981 
6982 static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri,
6983                                       bool isread)
6984 {
6985     CPAccessResult nv1 = access_nv1(env, ri, isread);
6986 
6987     if (nv1 != CP_ACCESS_OK) {
6988         return nv1;
6989     }
6990     return access_mte(env, ri, isread);
6991 }
6992 
6993 static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri,
6994                                       bool isread)
6995 {
6996     /*
6997      * TFSR_EL2: similar to generic access_mte(), but we need to
6998      * account for FEAT_NV. At EL1 this must be a FEAT_NV access;
6999      * if NV2 is enabled then we will redirect this to TFSR_EL1
7000      * after doing the HCR and SCR ATA traps; otherwise this will
7001      * be a trap to EL2 and the HCR/SCR traps do not apply.
7002      */
7003     int el = arm_current_el(env);
7004 
7005     if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) {
7006         return CP_ACCESS_OK;
7007     }
7008     if (el < 2 && arm_is_el2_enabled(env)) {
7009         uint64_t hcr = arm_hcr_el2_eff(env);
7010         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
7011             return CP_ACCESS_TRAP_EL2;
7012         }
7013     }
7014     if (el < 3 &&
7015         arm_feature(env, ARM_FEATURE_EL3) &&
7016         !(env->cp15.scr_el3 & SCR_ATA)) {
7017         return CP_ACCESS_TRAP_EL3;
7018     }
7019     return CP_ACCESS_OK;
7020 }
7021 
7022 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
7023 {
7024     return env->pstate & PSTATE_TCO;
7025 }
7026 
7027 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
7028 {
7029     env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
7030 }
7031 
7032 static const ARMCPRegInfo mte_reginfo[] = {
7033     { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
7034       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
7035       .access = PL1_RW, .accessfn = access_mte,
7036       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
7037     { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
7038       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
7039       .access = PL1_RW, .accessfn = access_tfsr_el1,
7040       .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
7041       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
7042     { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
7043       .type = ARM_CP_NV2_REDIRECT,
7044       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
7045       .access = PL2_RW, .accessfn = access_tfsr_el2,
7046       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
7047     { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
7048       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
7049       .access = PL3_RW,
7050       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
7051     { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
7052       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
7053       .access = PL1_RW, .accessfn = access_mte,
7054       .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
7055     { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
7056       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
7057       .access = PL1_RW, .accessfn = access_mte,
7058       .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
7059     { .name = "TCO", .state = ARM_CP_STATE_AA64,
7060       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7061       .type = ARM_CP_NO_RAW,
7062       .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
7063     { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
7064       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
7065       .type = ARM_CP_NOP, .access = PL1_W,
7066       .fgt = FGT_DCIVAC,
7067       .accessfn = aa64_cacheop_poc_access },
7068     { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
7069       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
7070       .fgt = FGT_DCISW,
7071       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7072     { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
7073       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
7074       .type = ARM_CP_NOP, .access = PL1_W,
7075       .fgt = FGT_DCIVAC,
7076       .accessfn = aa64_cacheop_poc_access },
7077     { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
7078       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
7079       .fgt = FGT_DCISW,
7080       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7081     { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
7082       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
7083       .fgt = FGT_DCCSW,
7084       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7085     { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
7086       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
7087       .fgt = FGT_DCCSW,
7088       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7089     { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
7090       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
7091       .fgt = FGT_DCCISW,
7092       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7093     { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
7094       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
7095       .fgt = FGT_DCCISW,
7096       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7097 };
7098 
7099 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
7100     { .name = "TCO", .state = ARM_CP_STATE_AA64,
7101       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7102       .type = ARM_CP_CONST, .access = PL0_RW, },
7103 };
7104 
7105 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
7106     { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
7107       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
7108       .type = ARM_CP_NOP, .access = PL0_W,
7109       .fgt = FGT_DCCVAC,
7110       .accessfn = aa64_cacheop_poc_access },
7111     { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
7112       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
7113       .type = ARM_CP_NOP, .access = PL0_W,
7114       .fgt = FGT_DCCVAC,
7115       .accessfn = aa64_cacheop_poc_access },
7116     { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
7117       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
7118       .type = ARM_CP_NOP, .access = PL0_W,
7119       .fgt = FGT_DCCVAP,
7120       .accessfn = aa64_cacheop_poc_access },
7121     { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
7122       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
7123       .type = ARM_CP_NOP, .access = PL0_W,
7124       .fgt = FGT_DCCVAP,
7125       .accessfn = aa64_cacheop_poc_access },
7126     { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
7127       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
7128       .type = ARM_CP_NOP, .access = PL0_W,
7129       .fgt = FGT_DCCVADP,
7130       .accessfn = aa64_cacheop_poc_access },
7131     { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
7132       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
7133       .type = ARM_CP_NOP, .access = PL0_W,
7134       .fgt = FGT_DCCVADP,
7135       .accessfn = aa64_cacheop_poc_access },
7136     { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
7137       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
7138       .type = ARM_CP_NOP, .access = PL0_W,
7139       .fgt = FGT_DCCIVAC,
7140       .accessfn = aa64_cacheop_poc_access },
7141     { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
7142       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
7143       .type = ARM_CP_NOP, .access = PL0_W,
7144       .fgt = FGT_DCCIVAC,
7145       .accessfn = aa64_cacheop_poc_access },
7146     { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
7147       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
7148       .access = PL0_W, .type = ARM_CP_DC_GVA,
7149 #ifndef CONFIG_USER_ONLY
7150       /* Avoid overhead of an access check that always passes in user-mode */
7151       .accessfn = aa64_zva_access,
7152       .fgt = FGT_DCZVA,
7153 #endif
7154     },
7155     { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
7156       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
7157       .access = PL0_W, .type = ARM_CP_DC_GZVA,
7158 #ifndef CONFIG_USER_ONLY
7159       /* Avoid overhead of an access check that always passes in user-mode */
7160       .accessfn = aa64_zva_access,
7161       .fgt = FGT_DCZVA,
7162 #endif
7163     },
7164 };
7165 
7166 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
7167                                      bool isread)
7168 {
7169     uint64_t hcr = arm_hcr_el2_eff(env);
7170     int el = arm_current_el(env);
7171 
7172     if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
7173         if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
7174             if (hcr & HCR_TGE) {
7175                 return CP_ACCESS_TRAP_EL2;
7176             }
7177             return CP_ACCESS_TRAP;
7178         }
7179     } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
7180         return CP_ACCESS_TRAP_EL2;
7181     }
7182     if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
7183         return CP_ACCESS_TRAP_EL2;
7184     }
7185     if (el < 3
7186         && arm_feature(env, ARM_FEATURE_EL3)
7187         && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
7188         return CP_ACCESS_TRAP_EL3;
7189     }
7190     return CP_ACCESS_OK;
7191 }
7192 
7193 static CPAccessResult access_scxtnum_el1(CPUARMState *env,
7194                                          const ARMCPRegInfo *ri,
7195                                          bool isread)
7196 {
7197     CPAccessResult nv1 = access_nv1(env, ri, isread);
7198 
7199     if (nv1 != CP_ACCESS_OK) {
7200         return nv1;
7201     }
7202     return access_scxtnum(env, ri, isread);
7203 }
7204 
7205 static const ARMCPRegInfo scxtnum_reginfo[] = {
7206     { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
7207       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
7208       .access = PL0_RW, .accessfn = access_scxtnum,
7209       .fgt = FGT_SCXTNUM_EL0,
7210       .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
7211     { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
7212       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
7213       .access = PL1_RW, .accessfn = access_scxtnum_el1,
7214       .fgt = FGT_SCXTNUM_EL1,
7215       .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
7216       .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
7217     { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
7218       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
7219       .access = PL2_RW, .accessfn = access_scxtnum,
7220       .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
7221     { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
7222       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
7223       .access = PL3_RW,
7224       .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
7225 };
7226 
7227 static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri,
7228                                  bool isread)
7229 {
7230     if (arm_current_el(env) == 2 &&
7231         arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
7232         return CP_ACCESS_TRAP_EL3;
7233     }
7234     return CP_ACCESS_OK;
7235 }
7236 
7237 static const ARMCPRegInfo fgt_reginfo[] = {
7238     { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
7239       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
7240       .nv2_redirect_offset = 0x1b8,
7241       .access = PL2_RW, .accessfn = access_fgt,
7242       .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) },
7243     { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
7244       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5,
7245       .nv2_redirect_offset = 0x1c0,
7246       .access = PL2_RW, .accessfn = access_fgt,
7247       .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) },
7248     { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
7249       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4,
7250       .nv2_redirect_offset = 0x1d0,
7251       .access = PL2_RW, .accessfn = access_fgt,
7252       .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) },
7253     { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
7254       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5,
7255       .nv2_redirect_offset = 0x1d8,
7256       .access = PL2_RW, .accessfn = access_fgt,
7257       .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) },
7258     { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
7259       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
7260       .nv2_redirect_offset = 0x1c8,
7261       .access = PL2_RW, .accessfn = access_fgt,
7262       .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) },
7263 };
7264 
7265 static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7266                        uint64_t value)
7267 {
7268     /*
7269      * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee
7270      * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
7271      * about the RESS bits at the top -- we choose the "generate an EL2
7272      * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let
7273      * the ptw.c code detect the resulting invalid address).
7274      */
7275     env->cp15.vncr_el2 = value & ~0xfffULL;
7276 }
7277 
7278 static const ARMCPRegInfo nv2_reginfo[] = {
7279     { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64,
7280       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0,
7281       .access = PL2_RW,
7282       .writefn = vncr_write,
7283       .nv2_redirect_offset = 0xb0,
7284       .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
7285 };
7286 
7287 #endif /* TARGET_AARCH64 */
7288 
7289 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
7290                                      bool isread)
7291 {
7292     int el = arm_current_el(env);
7293 
7294     if (el == 0) {
7295         uint64_t sctlr = arm_sctlr(env, el);
7296         if (!(sctlr & SCTLR_EnRCTX)) {
7297             return CP_ACCESS_TRAP;
7298         }
7299     } else if (el == 1) {
7300         uint64_t hcr = arm_hcr_el2_eff(env);
7301         if (hcr & HCR_NV) {
7302             return CP_ACCESS_TRAP_EL2;
7303         }
7304     }
7305     return CP_ACCESS_OK;
7306 }
7307 
7308 static const ARMCPRegInfo predinv_reginfo[] = {
7309     { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
7310       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
7311       .fgt = FGT_CFPRCTX,
7312       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7313     { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
7314       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
7315       .fgt = FGT_DVPRCTX,
7316       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7317     { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
7318       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
7319       .fgt = FGT_CPPRCTX,
7320       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7321     /*
7322      * Note the AArch32 opcodes have a different OPC1.
7323      */
7324     { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
7325       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
7326       .fgt = FGT_CFPRCTX,
7327       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7328     { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
7329       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
7330       .fgt = FGT_DVPRCTX,
7331       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7332     { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
7333       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
7334       .fgt = FGT_CPPRCTX,
7335       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7336 };
7337 
7338 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
7339 {
7340     /* Read the high 32 bits of the current CCSIDR */
7341     return extract64(ccsidr_read(env, ri), 32, 32);
7342 }
7343 
7344 static const ARMCPRegInfo ccsidr2_reginfo[] = {
7345     { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
7346       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
7347       .access = PL1_R,
7348       .accessfn = access_tid4,
7349       .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
7350 };
7351 
7352 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7353                                        bool isread)
7354 {
7355     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
7356         return CP_ACCESS_TRAP_EL2;
7357     }
7358 
7359     return CP_ACCESS_OK;
7360 }
7361 
7362 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7363                                        bool isread)
7364 {
7365     if (arm_feature(env, ARM_FEATURE_V8)) {
7366         return access_aa64_tid3(env, ri, isread);
7367     }
7368 
7369     return CP_ACCESS_OK;
7370 }
7371 
7372 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
7373                                      bool isread)
7374 {
7375     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
7376         return CP_ACCESS_TRAP_EL2;
7377     }
7378 
7379     return CP_ACCESS_OK;
7380 }
7381 
7382 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
7383                                         const ARMCPRegInfo *ri, bool isread)
7384 {
7385     /*
7386      * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
7387      * in v7A, not in v8A.
7388      */
7389     if (!arm_feature(env, ARM_FEATURE_V8) &&
7390         arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
7391         (env->cp15.hstr_el2 & HSTR_TJDBX)) {
7392         return CP_ACCESS_TRAP_EL2;
7393     }
7394     return CP_ACCESS_OK;
7395 }
7396 
7397 static const ARMCPRegInfo jazelle_regs[] = {
7398     { .name = "JIDR",
7399       .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
7400       .access = PL1_R, .accessfn = access_jazelle,
7401       .type = ARM_CP_CONST, .resetvalue = 0 },
7402     { .name = "JOSCR",
7403       .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
7404       .accessfn = access_joscr_jmcr,
7405       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7406     { .name = "JMCR",
7407       .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
7408       .accessfn = access_joscr_jmcr,
7409       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7410 };
7411 
7412 static const ARMCPRegInfo contextidr_el2 = {
7413     .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
7414     .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
7415     .access = PL2_RW,
7416     .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
7417 };
7418 
7419 static const ARMCPRegInfo vhe_reginfo[] = {
7420     { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
7421       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
7422       .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
7423       .raw_writefn = raw_write,
7424       .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
7425 #ifndef CONFIG_USER_ONLY
7426     { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
7427       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
7428       .fieldoffset =
7429         offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
7430       .type = ARM_CP_IO, .access = PL2_RW,
7431       .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
7432     { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
7433       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
7434       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
7435       .resetfn = gt_hv_timer_reset,
7436       .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
7437     { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
7438       .type = ARM_CP_IO,
7439       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
7440       .access = PL2_RW,
7441       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
7442       .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
7443     { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
7444       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
7445       .type = ARM_CP_IO | ARM_CP_ALIAS,
7446       .access = PL2_RW, .accessfn = access_el1nvpct,
7447       .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1,
7448       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
7449       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
7450     { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
7451       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
7452       .type = ARM_CP_IO | ARM_CP_ALIAS,
7453       .access = PL2_RW, .accessfn = access_el1nvvct,
7454       .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1,
7455       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
7456       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
7457     { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7458       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
7459       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7460       .access = PL2_RW, .accessfn = e2h_access,
7461       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
7462     { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7463       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
7464       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7465       .access = PL2_RW, .accessfn = e2h_access,
7466       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
7467     { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7468       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
7469       .type = ARM_CP_IO | ARM_CP_ALIAS,
7470       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
7471       .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1,
7472       .access = PL2_RW, .accessfn = access_el1nvpct,
7473       .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
7474     { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7475       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
7476       .type = ARM_CP_IO | ARM_CP_ALIAS,
7477       .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1,
7478       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
7479       .access = PL2_RW, .accessfn = access_el1nvvct,
7480       .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
7481 #endif
7482 };
7483 
7484 #ifndef CONFIG_USER_ONLY
7485 static const ARMCPRegInfo ats1e1_reginfo[] = {
7486     { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
7487       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7488       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7489       .fgt = FGT_ATS1E1RP,
7490       .accessfn = at_s1e01_access, .writefn = ats_write64 },
7491     { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
7492       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7493       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7494       .fgt = FGT_ATS1E1WP,
7495       .accessfn = at_s1e01_access, .writefn = ats_write64 },
7496 };
7497 
7498 static const ARMCPRegInfo ats1cp_reginfo[] = {
7499     { .name = "ATS1CPRP",
7500       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7501       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7502       .writefn = ats_write },
7503     { .name = "ATS1CPWP",
7504       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7505       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7506       .writefn = ats_write },
7507 };
7508 #endif
7509 
7510 /*
7511  * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7512  * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7513  * is non-zero, which is never for ARMv7, optionally in ARMv8
7514  * and mandatorily for ARMv8.2 and up.
7515  * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7516  * implementation is RAZ/WI we can ignore this detail, as we
7517  * do for ACTLR.
7518  */
7519 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
7520     { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
7521       .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
7522       .access = PL1_RW, .accessfn = access_tacr,
7523       .type = ARM_CP_CONST, .resetvalue = 0 },
7524     { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
7525       .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
7526       .access = PL2_RW, .type = ARM_CP_CONST,
7527       .resetvalue = 0 },
7528 };
7529 
7530 void register_cp_regs_for_features(ARMCPU *cpu)
7531 {
7532     /* Register all the coprocessor registers based on feature bits */
7533     CPUARMState *env = &cpu->env;
7534     if (arm_feature(env, ARM_FEATURE_M)) {
7535         /* M profile has no coprocessor registers */
7536         return;
7537     }
7538 
7539     define_arm_cp_regs(cpu, cp_reginfo);
7540     if (!arm_feature(env, ARM_FEATURE_V8)) {
7541         /*
7542          * Must go early as it is full of wildcards that may be
7543          * overridden by later definitions.
7544          */
7545         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
7546     }
7547 
7548     define_tlb_insn_regs(cpu);
7549 
7550     if (arm_feature(env, ARM_FEATURE_V6)) {
7551         /* The ID registers all have impdef reset values */
7552         ARMCPRegInfo v6_idregs[] = {
7553             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
7554               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
7555               .access = PL1_R, .type = ARM_CP_CONST,
7556               .accessfn = access_aa32_tid3,
7557               .resetvalue = cpu->isar.id_pfr0 },
7558             /*
7559              * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7560              * the value of the GIC field until after we define these regs.
7561              */
7562             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
7563               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
7564               .access = PL1_R, .type = ARM_CP_NO_RAW,
7565               .accessfn = access_aa32_tid3,
7566 #ifdef CONFIG_USER_ONLY
7567               .type = ARM_CP_CONST,
7568               .resetvalue = cpu->isar.id_pfr1,
7569 #else
7570               .type = ARM_CP_NO_RAW,
7571               .accessfn = access_aa32_tid3,
7572               .readfn = id_pfr1_read,
7573               .writefn = arm_cp_write_ignore
7574 #endif
7575             },
7576             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
7577               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
7578               .access = PL1_R, .type = ARM_CP_CONST,
7579               .accessfn = access_aa32_tid3,
7580               .resetvalue = cpu->isar.id_dfr0 },
7581             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
7582               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
7583               .access = PL1_R, .type = ARM_CP_CONST,
7584               .accessfn = access_aa32_tid3,
7585               .resetvalue = cpu->id_afr0 },
7586             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
7587               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
7588               .access = PL1_R, .type = ARM_CP_CONST,
7589               .accessfn = access_aa32_tid3,
7590               .resetvalue = cpu->isar.id_mmfr0 },
7591             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
7592               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
7593               .access = PL1_R, .type = ARM_CP_CONST,
7594               .accessfn = access_aa32_tid3,
7595               .resetvalue = cpu->isar.id_mmfr1 },
7596             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
7597               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
7598               .access = PL1_R, .type = ARM_CP_CONST,
7599               .accessfn = access_aa32_tid3,
7600               .resetvalue = cpu->isar.id_mmfr2 },
7601             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
7602               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
7603               .access = PL1_R, .type = ARM_CP_CONST,
7604               .accessfn = access_aa32_tid3,
7605               .resetvalue = cpu->isar.id_mmfr3 },
7606             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
7607               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
7608               .access = PL1_R, .type = ARM_CP_CONST,
7609               .accessfn = access_aa32_tid3,
7610               .resetvalue = cpu->isar.id_isar0 },
7611             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
7612               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
7613               .access = PL1_R, .type = ARM_CP_CONST,
7614               .accessfn = access_aa32_tid3,
7615               .resetvalue = cpu->isar.id_isar1 },
7616             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
7617               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
7618               .access = PL1_R, .type = ARM_CP_CONST,
7619               .accessfn = access_aa32_tid3,
7620               .resetvalue = cpu->isar.id_isar2 },
7621             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
7622               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
7623               .access = PL1_R, .type = ARM_CP_CONST,
7624               .accessfn = access_aa32_tid3,
7625               .resetvalue = cpu->isar.id_isar3 },
7626             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
7627               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
7628               .access = PL1_R, .type = ARM_CP_CONST,
7629               .accessfn = access_aa32_tid3,
7630               .resetvalue = cpu->isar.id_isar4 },
7631             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
7632               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
7633               .access = PL1_R, .type = ARM_CP_CONST,
7634               .accessfn = access_aa32_tid3,
7635               .resetvalue = cpu->isar.id_isar5 },
7636             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
7637               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
7638               .access = PL1_R, .type = ARM_CP_CONST,
7639               .accessfn = access_aa32_tid3,
7640               .resetvalue = cpu->isar.id_mmfr4 },
7641             { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
7642               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
7643               .access = PL1_R, .type = ARM_CP_CONST,
7644               .accessfn = access_aa32_tid3,
7645               .resetvalue = cpu->isar.id_isar6 },
7646         };
7647         define_arm_cp_regs(cpu, v6_idregs);
7648         define_arm_cp_regs(cpu, v6_cp_reginfo);
7649     } else {
7650         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
7651     }
7652     if (arm_feature(env, ARM_FEATURE_V6K)) {
7653         define_arm_cp_regs(cpu, v6k_cp_reginfo);
7654     }
7655     if (arm_feature(env, ARM_FEATURE_V7VE)) {
7656         define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
7657     }
7658     if (arm_feature(env, ARM_FEATURE_V7)) {
7659         ARMCPRegInfo clidr = {
7660             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
7661             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
7662             .access = PL1_R, .type = ARM_CP_CONST,
7663             .accessfn = access_tid4,
7664             .fgt = FGT_CLIDR_EL1,
7665             .resetvalue = cpu->clidr
7666         };
7667         define_one_arm_cp_reg(cpu, &clidr);
7668         define_arm_cp_regs(cpu, v7_cp_reginfo);
7669         define_debug_regs(cpu);
7670         define_pmu_regs(cpu);
7671     } else {
7672         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
7673     }
7674     if (arm_feature(env, ARM_FEATURE_V8)) {
7675         /*
7676          * v8 ID registers, which all have impdef reset values.
7677          * Note that within the ID register ranges the unused slots
7678          * must all RAZ, not UNDEF; future architecture versions may
7679          * define new registers here.
7680          * ID registers which are AArch64 views of the AArch32 ID registers
7681          * which already existed in v6 and v7 are handled elsewhere,
7682          * in v6_idregs[].
7683          */
7684         int i;
7685         ARMCPRegInfo v8_idregs[] = {
7686             /*
7687              * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7688              * emulation because we don't know the right value for the
7689              * GIC field until after we define these regs.
7690              */
7691             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
7692               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
7693               .access = PL1_R,
7694 #ifdef CONFIG_USER_ONLY
7695               .type = ARM_CP_CONST,
7696               .resetvalue = cpu->isar.id_aa64pfr0
7697 #else
7698               .type = ARM_CP_NO_RAW,
7699               .accessfn = access_aa64_tid3,
7700               .readfn = id_aa64pfr0_read,
7701               .writefn = arm_cp_write_ignore
7702 #endif
7703             },
7704             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
7705               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
7706               .access = PL1_R, .type = ARM_CP_CONST,
7707               .accessfn = access_aa64_tid3,
7708               .resetvalue = cpu->isar.id_aa64pfr1},
7709             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7710               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
7711               .access = PL1_R, .type = ARM_CP_CONST,
7712               .accessfn = access_aa64_tid3,
7713               .resetvalue = 0 },
7714             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7715               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
7716               .access = PL1_R, .type = ARM_CP_CONST,
7717               .accessfn = access_aa64_tid3,
7718               .resetvalue = 0 },
7719             { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
7720               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
7721               .access = PL1_R, .type = ARM_CP_CONST,
7722               .accessfn = access_aa64_tid3,
7723               .resetvalue = cpu->isar.id_aa64zfr0 },
7724             { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
7725               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
7726               .access = PL1_R, .type = ARM_CP_CONST,
7727               .accessfn = access_aa64_tid3,
7728               .resetvalue = cpu->isar.id_aa64smfr0 },
7729             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7730               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
7731               .access = PL1_R, .type = ARM_CP_CONST,
7732               .accessfn = access_aa64_tid3,
7733               .resetvalue = 0 },
7734             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7735               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
7736               .access = PL1_R, .type = ARM_CP_CONST,
7737               .accessfn = access_aa64_tid3,
7738               .resetvalue = 0 },
7739             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
7740               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
7741               .access = PL1_R, .type = ARM_CP_CONST,
7742               .accessfn = access_aa64_tid3,
7743               .resetvalue = cpu->isar.id_aa64dfr0 },
7744             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
7745               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
7746               .access = PL1_R, .type = ARM_CP_CONST,
7747               .accessfn = access_aa64_tid3,
7748               .resetvalue = cpu->isar.id_aa64dfr1 },
7749             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7750               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
7751               .access = PL1_R, .type = ARM_CP_CONST,
7752               .accessfn = access_aa64_tid3,
7753               .resetvalue = 0 },
7754             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7755               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
7756               .access = PL1_R, .type = ARM_CP_CONST,
7757               .accessfn = access_aa64_tid3,
7758               .resetvalue = 0 },
7759             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
7760               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
7761               .access = PL1_R, .type = ARM_CP_CONST,
7762               .accessfn = access_aa64_tid3,
7763               .resetvalue = cpu->id_aa64afr0 },
7764             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
7765               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
7766               .access = PL1_R, .type = ARM_CP_CONST,
7767               .accessfn = access_aa64_tid3,
7768               .resetvalue = cpu->id_aa64afr1 },
7769             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7770               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
7771               .access = PL1_R, .type = ARM_CP_CONST,
7772               .accessfn = access_aa64_tid3,
7773               .resetvalue = 0 },
7774             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7775               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
7776               .access = PL1_R, .type = ARM_CP_CONST,
7777               .accessfn = access_aa64_tid3,
7778               .resetvalue = 0 },
7779             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
7780               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
7781               .access = PL1_R, .type = ARM_CP_CONST,
7782               .accessfn = access_aa64_tid3,
7783               .resetvalue = cpu->isar.id_aa64isar0 },
7784             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
7785               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
7786               .access = PL1_R, .type = ARM_CP_CONST,
7787               .accessfn = access_aa64_tid3,
7788               .resetvalue = cpu->isar.id_aa64isar1 },
7789             { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
7790               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
7791               .access = PL1_R, .type = ARM_CP_CONST,
7792               .accessfn = access_aa64_tid3,
7793               .resetvalue = cpu->isar.id_aa64isar2 },
7794             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7795               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
7796               .access = PL1_R, .type = ARM_CP_CONST,
7797               .accessfn = access_aa64_tid3,
7798               .resetvalue = 0 },
7799             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7800               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
7801               .access = PL1_R, .type = ARM_CP_CONST,
7802               .accessfn = access_aa64_tid3,
7803               .resetvalue = 0 },
7804             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7805               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
7806               .access = PL1_R, .type = ARM_CP_CONST,
7807               .accessfn = access_aa64_tid3,
7808               .resetvalue = 0 },
7809             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7810               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
7811               .access = PL1_R, .type = ARM_CP_CONST,
7812               .accessfn = access_aa64_tid3,
7813               .resetvalue = 0 },
7814             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7815               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
7816               .access = PL1_R, .type = ARM_CP_CONST,
7817               .accessfn = access_aa64_tid3,
7818               .resetvalue = 0 },
7819             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
7820               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
7821               .access = PL1_R, .type = ARM_CP_CONST,
7822               .accessfn = access_aa64_tid3,
7823               .resetvalue = cpu->isar.id_aa64mmfr0 },
7824             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
7825               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
7826               .access = PL1_R, .type = ARM_CP_CONST,
7827               .accessfn = access_aa64_tid3,
7828               .resetvalue = cpu->isar.id_aa64mmfr1 },
7829             { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
7830               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
7831               .access = PL1_R, .type = ARM_CP_CONST,
7832               .accessfn = access_aa64_tid3,
7833               .resetvalue = cpu->isar.id_aa64mmfr2 },
7834             { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64,
7835               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
7836               .access = PL1_R, .type = ARM_CP_CONST,
7837               .accessfn = access_aa64_tid3,
7838               .resetvalue = cpu->isar.id_aa64mmfr3 },
7839             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7840               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
7841               .access = PL1_R, .type = ARM_CP_CONST,
7842               .accessfn = access_aa64_tid3,
7843               .resetvalue = 0 },
7844             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7845               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
7846               .access = PL1_R, .type = ARM_CP_CONST,
7847               .accessfn = access_aa64_tid3,
7848               .resetvalue = 0 },
7849             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7850               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
7851               .access = PL1_R, .type = ARM_CP_CONST,
7852               .accessfn = access_aa64_tid3,
7853               .resetvalue = 0 },
7854             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7855               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
7856               .access = PL1_R, .type = ARM_CP_CONST,
7857               .accessfn = access_aa64_tid3,
7858               .resetvalue = 0 },
7859             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
7860               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7861               .access = PL1_R, .type = ARM_CP_CONST,
7862               .accessfn = access_aa64_tid3,
7863               .resetvalue = cpu->isar.mvfr0 },
7864             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
7865               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7866               .access = PL1_R, .type = ARM_CP_CONST,
7867               .accessfn = access_aa64_tid3,
7868               .resetvalue = cpu->isar.mvfr1 },
7869             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
7870               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7871               .access = PL1_R, .type = ARM_CP_CONST,
7872               .accessfn = access_aa64_tid3,
7873               .resetvalue = cpu->isar.mvfr2 },
7874             /*
7875              * "0, c0, c3, {0,1,2}" are the encodings corresponding to
7876              * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
7877              * as RAZ, since it is in the "reserved for future ID
7878              * registers, RAZ" part of the AArch32 encoding space.
7879              */
7880             { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
7881               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7882               .access = PL1_R, .type = ARM_CP_CONST,
7883               .accessfn = access_aa64_tid3,
7884               .resetvalue = 0 },
7885             { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
7886               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7887               .access = PL1_R, .type = ARM_CP_CONST,
7888               .accessfn = access_aa64_tid3,
7889               .resetvalue = 0 },
7890             { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
7891               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7892               .access = PL1_R, .type = ARM_CP_CONST,
7893               .accessfn = access_aa64_tid3,
7894               .resetvalue = 0 },
7895             /*
7896              * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
7897              * they're also RAZ for AArch64, and in v8 are gradually
7898              * being filled with AArch64-view-of-AArch32-ID-register
7899              * for new ID registers.
7900              */
7901             { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
7902               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
7903               .access = PL1_R, .type = ARM_CP_CONST,
7904               .accessfn = access_aa64_tid3,
7905               .resetvalue = 0 },
7906             { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
7907               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
7908               .access = PL1_R, .type = ARM_CP_CONST,
7909               .accessfn = access_aa64_tid3,
7910               .resetvalue = cpu->isar.id_pfr2 },
7911             { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
7912               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
7913               .access = PL1_R, .type = ARM_CP_CONST,
7914               .accessfn = access_aa64_tid3,
7915               .resetvalue = cpu->isar.id_dfr1 },
7916             { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
7917               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
7918               .access = PL1_R, .type = ARM_CP_CONST,
7919               .accessfn = access_aa64_tid3,
7920               .resetvalue = cpu->isar.id_mmfr5 },
7921             { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
7922               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
7923               .access = PL1_R, .type = ARM_CP_CONST,
7924               .accessfn = access_aa64_tid3,
7925               .resetvalue = 0 },
7926             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
7927               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
7928               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7929               .fgt = FGT_PMCEIDN_EL0,
7930               .resetvalue = extract64(cpu->pmceid0, 0, 32) },
7931             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
7932               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
7933               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7934               .fgt = FGT_PMCEIDN_EL0,
7935               .resetvalue = cpu->pmceid0 },
7936             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
7937               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
7938               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7939               .fgt = FGT_PMCEIDN_EL0,
7940               .resetvalue = extract64(cpu->pmceid1, 0, 32) },
7941             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
7942               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
7943               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7944               .fgt = FGT_PMCEIDN_EL0,
7945               .resetvalue = cpu->pmceid1 },
7946         };
7947 #ifdef CONFIG_USER_ONLY
7948         static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
7949             { .name = "ID_AA64PFR0_EL1",
7950               .exported_bits = R_ID_AA64PFR0_FP_MASK |
7951                                R_ID_AA64PFR0_ADVSIMD_MASK |
7952                                R_ID_AA64PFR0_SVE_MASK |
7953                                R_ID_AA64PFR0_DIT_MASK,
7954               .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
7955                             (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
7956             { .name = "ID_AA64PFR1_EL1",
7957               .exported_bits = R_ID_AA64PFR1_BT_MASK |
7958                                R_ID_AA64PFR1_SSBS_MASK |
7959                                R_ID_AA64PFR1_MTE_MASK |
7960                                R_ID_AA64PFR1_SME_MASK },
7961             { .name = "ID_AA64PFR*_EL1_RESERVED",
7962               .is_glob = true },
7963             { .name = "ID_AA64ZFR0_EL1",
7964               .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
7965                                R_ID_AA64ZFR0_AES_MASK |
7966                                R_ID_AA64ZFR0_BITPERM_MASK |
7967                                R_ID_AA64ZFR0_BFLOAT16_MASK |
7968                                R_ID_AA64ZFR0_B16B16_MASK |
7969                                R_ID_AA64ZFR0_SHA3_MASK |
7970                                R_ID_AA64ZFR0_SM4_MASK |
7971                                R_ID_AA64ZFR0_I8MM_MASK |
7972                                R_ID_AA64ZFR0_F32MM_MASK |
7973                                R_ID_AA64ZFR0_F64MM_MASK },
7974             { .name = "ID_AA64SMFR0_EL1",
7975               .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
7976                                R_ID_AA64SMFR0_BI32I32_MASK |
7977                                R_ID_AA64SMFR0_B16F32_MASK |
7978                                R_ID_AA64SMFR0_F16F32_MASK |
7979                                R_ID_AA64SMFR0_I8I32_MASK |
7980                                R_ID_AA64SMFR0_F16F16_MASK |
7981                                R_ID_AA64SMFR0_B16B16_MASK |
7982                                R_ID_AA64SMFR0_I16I32_MASK |
7983                                R_ID_AA64SMFR0_F64F64_MASK |
7984                                R_ID_AA64SMFR0_I16I64_MASK |
7985                                R_ID_AA64SMFR0_SMEVER_MASK |
7986                                R_ID_AA64SMFR0_FA64_MASK },
7987             { .name = "ID_AA64MMFR0_EL1",
7988               .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
7989               .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
7990                             (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
7991             { .name = "ID_AA64MMFR1_EL1",
7992               .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
7993             { .name = "ID_AA64MMFR2_EL1",
7994               .exported_bits = R_ID_AA64MMFR2_AT_MASK },
7995             { .name = "ID_AA64MMFR3_EL1",
7996               .exported_bits = 0 },
7997             { .name = "ID_AA64MMFR*_EL1_RESERVED",
7998               .is_glob = true },
7999             { .name = "ID_AA64DFR0_EL1",
8000               .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
8001             { .name = "ID_AA64DFR1_EL1" },
8002             { .name = "ID_AA64DFR*_EL1_RESERVED",
8003               .is_glob = true },
8004             { .name = "ID_AA64AFR*",
8005               .is_glob = true },
8006             { .name = "ID_AA64ISAR0_EL1",
8007               .exported_bits = R_ID_AA64ISAR0_AES_MASK |
8008                                R_ID_AA64ISAR0_SHA1_MASK |
8009                                R_ID_AA64ISAR0_SHA2_MASK |
8010                                R_ID_AA64ISAR0_CRC32_MASK |
8011                                R_ID_AA64ISAR0_ATOMIC_MASK |
8012                                R_ID_AA64ISAR0_RDM_MASK |
8013                                R_ID_AA64ISAR0_SHA3_MASK |
8014                                R_ID_AA64ISAR0_SM3_MASK |
8015                                R_ID_AA64ISAR0_SM4_MASK |
8016                                R_ID_AA64ISAR0_DP_MASK |
8017                                R_ID_AA64ISAR0_FHM_MASK |
8018                                R_ID_AA64ISAR0_TS_MASK |
8019                                R_ID_AA64ISAR0_RNDR_MASK },
8020             { .name = "ID_AA64ISAR1_EL1",
8021               .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
8022                                R_ID_AA64ISAR1_APA_MASK |
8023                                R_ID_AA64ISAR1_API_MASK |
8024                                R_ID_AA64ISAR1_JSCVT_MASK |
8025                                R_ID_AA64ISAR1_FCMA_MASK |
8026                                R_ID_AA64ISAR1_LRCPC_MASK |
8027                                R_ID_AA64ISAR1_GPA_MASK |
8028                                R_ID_AA64ISAR1_GPI_MASK |
8029                                R_ID_AA64ISAR1_FRINTTS_MASK |
8030                                R_ID_AA64ISAR1_SB_MASK |
8031                                R_ID_AA64ISAR1_BF16_MASK |
8032                                R_ID_AA64ISAR1_DGH_MASK |
8033                                R_ID_AA64ISAR1_I8MM_MASK },
8034             { .name = "ID_AA64ISAR2_EL1",
8035               .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
8036                                R_ID_AA64ISAR2_RPRES_MASK |
8037                                R_ID_AA64ISAR2_GPA3_MASK |
8038                                R_ID_AA64ISAR2_APA3_MASK |
8039                                R_ID_AA64ISAR2_MOPS_MASK |
8040                                R_ID_AA64ISAR2_BC_MASK |
8041                                R_ID_AA64ISAR2_RPRFM_MASK |
8042                                R_ID_AA64ISAR2_CSSC_MASK },
8043             { .name = "ID_AA64ISAR*_EL1_RESERVED",
8044               .is_glob = true },
8045         };
8046         modify_arm_cp_regs(v8_idregs, v8_user_idregs);
8047 #endif
8048         /*
8049          * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
8050          * TODO: For RMR, a write with bit 1 set should do something with
8051          * cpu_reset(). In the meantime, "the bit is strictly a request",
8052          * so we are in spec just ignoring writes.
8053          */
8054         if (!arm_feature(env, ARM_FEATURE_EL3) &&
8055             !arm_feature(env, ARM_FEATURE_EL2)) {
8056             ARMCPRegInfo el1_reset_regs[] = {
8057                 { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
8058                   .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
8059                   .access = PL1_R,
8060                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
8061                 { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
8062                   .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
8063                   .access = PL1_RW, .type = ARM_CP_CONST,
8064                   .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
8065             };
8066             define_arm_cp_regs(cpu, el1_reset_regs);
8067         }
8068         define_arm_cp_regs(cpu, v8_idregs);
8069         define_arm_cp_regs(cpu, v8_cp_reginfo);
8070         if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
8071             define_arm_cp_regs(cpu, v8_aa32_el1_reginfo);
8072         }
8073 
8074         for (i = 4; i < 16; i++) {
8075             /*
8076              * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
8077              * For pre-v8 cores there are RAZ patterns for these in
8078              * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
8079              * v8 extends the "must RAZ" part of the ID register space
8080              * to also cover c0, 0, c{8-15}, {0-7}.
8081              * These are STATE_AA32 because in the AArch64 sysreg space
8082              * c4-c7 is where the AArch64 ID registers live (and we've
8083              * already defined those in v8_idregs[]), and c8-c15 are not
8084              * "must RAZ" for AArch64.
8085              */
8086             g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
8087             ARMCPRegInfo v8_aa32_raz_idregs = {
8088                 .name = name,
8089                 .state = ARM_CP_STATE_AA32,
8090                 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
8091                 .access = PL1_R, .type = ARM_CP_CONST,
8092                 .accessfn = access_aa64_tid3,
8093                 .resetvalue = 0 };
8094             define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
8095         }
8096     }
8097 
8098     /*
8099      * Register the base EL2 cpregs.
8100      * Pre v8, these registers are implemented only as part of the
8101      * Virtualization Extensions (EL2 present).  Beginning with v8,
8102      * if EL2 is missing but EL3 is enabled, mostly these become
8103      * RES0 from EL3, with some specific exceptions.
8104      */
8105     if (arm_feature(env, ARM_FEATURE_EL2)
8106         || (arm_feature(env, ARM_FEATURE_EL3)
8107             && arm_feature(env, ARM_FEATURE_V8))) {
8108         uint64_t vmpidr_def = mpidr_read_val(env);
8109         ARMCPRegInfo vpidr_regs[] = {
8110             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
8111               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
8112               .access = PL2_RW, .accessfn = access_el3_aa32ns,
8113               .resetvalue = cpu->midr,
8114               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
8115               .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
8116             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
8117               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
8118               .access = PL2_RW, .resetvalue = cpu->midr,
8119               .type = ARM_CP_EL3_NO_EL2_C_NZ,
8120               .nv2_redirect_offset = 0x88,
8121               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
8122             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
8123               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
8124               .access = PL2_RW, .accessfn = access_el3_aa32ns,
8125               .resetvalue = vmpidr_def,
8126               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
8127               .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
8128             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
8129               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
8130               .access = PL2_RW, .resetvalue = vmpidr_def,
8131               .type = ARM_CP_EL3_NO_EL2_C_NZ,
8132               .nv2_redirect_offset = 0x50,
8133               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
8134         };
8135         /*
8136          * The only field of MDCR_EL2 that has a defined architectural reset
8137          * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
8138          */
8139         ARMCPRegInfo mdcr_el2 = {
8140             .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
8141             .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
8142             .writefn = mdcr_el2_write,
8143             .access = PL2_RW, .resetvalue = pmu_num_counters(env),
8144             .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
8145         };
8146         define_one_arm_cp_reg(cpu, &mdcr_el2);
8147         define_arm_cp_regs(cpu, vpidr_regs);
8148         define_arm_cp_regs(cpu, el2_cp_reginfo);
8149         if (arm_feature(env, ARM_FEATURE_V8)) {
8150             define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
8151         }
8152         if (cpu_isar_feature(aa64_sel2, cpu)) {
8153             define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
8154         }
8155         /*
8156          * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
8157          * See commentary near RMR_EL1.
8158          */
8159         if (!arm_feature(env, ARM_FEATURE_EL3)) {
8160             static const ARMCPRegInfo el2_reset_regs[] = {
8161                 { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
8162                   .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
8163                   .access = PL2_R,
8164                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
8165                 { .name = "RVBAR", .type = ARM_CP_ALIAS,
8166                   .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
8167                   .access = PL2_R,
8168                   .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
8169                 { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
8170                   .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
8171                   .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
8172             };
8173             define_arm_cp_regs(cpu, el2_reset_regs);
8174         }
8175     }
8176 
8177     /* Register the base EL3 cpregs. */
8178     if (arm_feature(env, ARM_FEATURE_EL3)) {
8179         define_arm_cp_regs(cpu, el3_cp_reginfo);
8180         ARMCPRegInfo el3_regs[] = {
8181             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
8182               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
8183               .access = PL3_R,
8184               .fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
8185             { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
8186               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
8187               .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
8188             { .name = "RMR", .state = ARM_CP_STATE_AA32,
8189               .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
8190               .access = PL3_RW, .type = ARM_CP_CONST,
8191               .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
8192             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
8193               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
8194               .access = PL3_RW,
8195               .raw_writefn = raw_write, .writefn = sctlr_write,
8196               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
8197               .resetvalue = cpu->reset_sctlr },
8198         };
8199 
8200         define_arm_cp_regs(cpu, el3_regs);
8201     }
8202     /*
8203      * The behaviour of NSACR is sufficiently various that we don't
8204      * try to describe it in a single reginfo:
8205      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
8206      *     reads as constant 0xc00 from NS EL1 and NS EL2
8207      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
8208      *  if v7 without EL3, register doesn't exist
8209      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
8210      */
8211     if (arm_feature(env, ARM_FEATURE_EL3)) {
8212         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8213             static const ARMCPRegInfo nsacr = {
8214                 .name = "NSACR", .type = ARM_CP_CONST,
8215                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8216                 .access = PL1_RW, .accessfn = nsacr_access,
8217                 .resetvalue = 0xc00
8218             };
8219             define_one_arm_cp_reg(cpu, &nsacr);
8220         } else {
8221             static const ARMCPRegInfo nsacr = {
8222                 .name = "NSACR",
8223                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8224                 .access = PL3_RW | PL1_R,
8225                 .resetvalue = 0,
8226                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
8227             };
8228             define_one_arm_cp_reg(cpu, &nsacr);
8229         }
8230     } else {
8231         if (arm_feature(env, ARM_FEATURE_V8)) {
8232             static const ARMCPRegInfo nsacr = {
8233                 .name = "NSACR", .type = ARM_CP_CONST,
8234                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8235                 .access = PL1_R,
8236                 .resetvalue = 0xc00
8237             };
8238             define_one_arm_cp_reg(cpu, &nsacr);
8239         }
8240     }
8241 
8242     if (arm_feature(env, ARM_FEATURE_PMSA)) {
8243         if (arm_feature(env, ARM_FEATURE_V6)) {
8244             /* PMSAv6 not implemented */
8245             assert(arm_feature(env, ARM_FEATURE_V7));
8246             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
8247             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
8248         } else {
8249             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
8250         }
8251     } else {
8252         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
8253         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
8254         /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
8255         if (cpu_isar_feature(aa32_hpd, cpu)) {
8256             define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
8257         }
8258     }
8259     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
8260         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
8261     }
8262     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
8263         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
8264     }
8265     if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
8266         define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo);
8267     }
8268 #ifndef CONFIG_USER_ONLY
8269     if (cpu_isar_feature(aa64_ecv, cpu)) {
8270         define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo);
8271     }
8272 #endif
8273     if (arm_feature(env, ARM_FEATURE_VAPA)) {
8274         ARMCPRegInfo vapa_cp_reginfo[] = {
8275             { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
8276               .access = PL1_RW, .resetvalue = 0,
8277               .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
8278                                      offsetoflow32(CPUARMState, cp15.par_ns) },
8279               .writefn = par_write},
8280 #ifndef CONFIG_USER_ONLY
8281             /* This underdecoding is safe because the reginfo is NO_RAW. */
8282             { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
8283               .access = PL1_W, .accessfn = ats_access,
8284               .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
8285 #endif
8286         };
8287 
8288         /*
8289          * When LPAE exists this 32-bit PAR register is an alias of the
8290          * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
8291          */
8292         if (arm_feature(env, ARM_FEATURE_LPAE)) {
8293             vapa_cp_reginfo[0].type = ARM_CP_ALIAS | ARM_CP_NO_GDB;
8294         }
8295         define_arm_cp_regs(cpu, vapa_cp_reginfo);
8296     }
8297     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
8298         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
8299     }
8300     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
8301         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
8302     }
8303     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
8304         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
8305     }
8306     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
8307         define_arm_cp_regs(cpu, omap_cp_reginfo);
8308     }
8309     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
8310         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
8311     }
8312     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8313         define_arm_cp_regs(cpu, xscale_cp_reginfo);
8314     }
8315     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
8316         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
8317     }
8318     if (arm_feature(env, ARM_FEATURE_LPAE)) {
8319         define_arm_cp_regs(cpu, lpae_cp_reginfo);
8320     }
8321     if (cpu_isar_feature(aa32_jazelle, cpu)) {
8322         define_arm_cp_regs(cpu, jazelle_regs);
8323     }
8324     /*
8325      * Slightly awkwardly, the OMAP and StrongARM cores need all of
8326      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
8327      * be read-only (ie write causes UNDEF exception).
8328      */
8329     {
8330         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
8331             /*
8332              * Pre-v8 MIDR space.
8333              * Note that the MIDR isn't a simple constant register because
8334              * of the TI925 behaviour where writes to another register can
8335              * cause the MIDR value to change.
8336              *
8337              * Unimplemented registers in the c15 0 0 0 space default to
8338              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
8339              * and friends override accordingly.
8340              */
8341             { .name = "MIDR",
8342               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
8343               .access = PL1_R, .resetvalue = cpu->midr,
8344               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
8345               .readfn = midr_read,
8346               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
8347               .type = ARM_CP_OVERRIDE },
8348             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
8349             { .name = "DUMMY",
8350               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
8351               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8352             { .name = "DUMMY",
8353               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
8354               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8355             { .name = "DUMMY",
8356               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
8357               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8358             { .name = "DUMMY",
8359               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
8360               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8361             { .name = "DUMMY",
8362               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
8363               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8364         };
8365         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
8366             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
8367               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
8368               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
8369               .fgt = FGT_MIDR_EL1,
8370               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
8371               .readfn = midr_read },
8372             /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
8373             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
8374               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
8375               .access = PL1_R, .resetvalue = cpu->midr },
8376             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
8377               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
8378               .access = PL1_R,
8379               .accessfn = access_aa64_tid1,
8380               .fgt = FGT_REVIDR_EL1,
8381               .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
8382         };
8383         ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
8384             .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB,
8385             .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8386             .access = PL1_R, .resetvalue = cpu->midr
8387         };
8388         ARMCPRegInfo id_cp_reginfo[] = {
8389             /* These are common to v8 and pre-v8 */
8390             { .name = "CTR",
8391               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
8392               .access = PL1_R, .accessfn = ctr_el0_access,
8393               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8394             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
8395               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
8396               .access = PL0_R, .accessfn = ctr_el0_access,
8397               .fgt = FGT_CTR_EL0,
8398               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8399             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
8400             { .name = "TCMTR",
8401               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
8402               .access = PL1_R,
8403               .accessfn = access_aa32_tid1,
8404               .type = ARM_CP_CONST, .resetvalue = 0 },
8405         };
8406         /* TLBTR is specific to VMSA */
8407         ARMCPRegInfo id_tlbtr_reginfo = {
8408               .name = "TLBTR",
8409               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
8410               .access = PL1_R,
8411               .accessfn = access_aa32_tid1,
8412               .type = ARM_CP_CONST, .resetvalue = 0,
8413         };
8414         /* MPUIR is specific to PMSA V6+ */
8415         ARMCPRegInfo id_mpuir_reginfo = {
8416               .name = "MPUIR",
8417               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8418               .access = PL1_R, .type = ARM_CP_CONST,
8419               .resetvalue = cpu->pmsav7_dregion << 8
8420         };
8421         /* HMPUIR is specific to PMSA V8 */
8422         ARMCPRegInfo id_hmpuir_reginfo = {
8423             .name = "HMPUIR",
8424             .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
8425             .access = PL2_R, .type = ARM_CP_CONST,
8426             .resetvalue = cpu->pmsav8r_hdregion
8427         };
8428         static const ARMCPRegInfo crn0_wi_reginfo = {
8429             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
8430             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
8431             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
8432         };
8433 #ifdef CONFIG_USER_ONLY
8434         static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
8435             { .name = "MIDR_EL1",
8436               .exported_bits = R_MIDR_EL1_REVISION_MASK |
8437                                R_MIDR_EL1_PARTNUM_MASK |
8438                                R_MIDR_EL1_ARCHITECTURE_MASK |
8439                                R_MIDR_EL1_VARIANT_MASK |
8440                                R_MIDR_EL1_IMPLEMENTER_MASK },
8441             { .name = "REVIDR_EL1" },
8442         };
8443         modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
8444 #endif
8445         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
8446             arm_feature(env, ARM_FEATURE_STRONGARM)) {
8447             size_t i;
8448             /*
8449              * Register the blanket "writes ignored" value first to cover the
8450              * whole space. Then update the specific ID registers to allow write
8451              * access, so that they ignore writes rather than causing them to
8452              * UNDEF.
8453              */
8454             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
8455             for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
8456                 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
8457             }
8458             for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
8459                 id_cp_reginfo[i].access = PL1_RW;
8460             }
8461             id_mpuir_reginfo.access = PL1_RW;
8462             id_tlbtr_reginfo.access = PL1_RW;
8463         }
8464         if (arm_feature(env, ARM_FEATURE_V8)) {
8465             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
8466             if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8467                 define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
8468             }
8469         } else {
8470             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
8471         }
8472         define_arm_cp_regs(cpu, id_cp_reginfo);
8473         if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8474             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
8475         } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
8476                    arm_feature(env, ARM_FEATURE_V8)) {
8477             uint32_t i = 0;
8478             char *tmp_string;
8479 
8480             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
8481             define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
8482             define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
8483 
8484             /* Register alias is only valid for first 32 indexes */
8485             for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
8486                 uint8_t crm = 0b1000 | extract32(i, 1, 3);
8487                 uint8_t opc1 = extract32(i, 4, 1);
8488                 uint8_t opc2 = extract32(i, 0, 1) << 2;
8489 
8490                 tmp_string = g_strdup_printf("PRBAR%u", i);
8491                 ARMCPRegInfo tmp_prbarn_reginfo = {
8492                     .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
8493                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
8494                     .access = PL1_RW, .resetvalue = 0,
8495                     .accessfn = access_tvm_trvm,
8496                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
8497                 };
8498                 define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
8499                 g_free(tmp_string);
8500 
8501                 opc2 = extract32(i, 0, 1) << 2 | 0x1;
8502                 tmp_string = g_strdup_printf("PRLAR%u", i);
8503                 ARMCPRegInfo tmp_prlarn_reginfo = {
8504                     .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
8505                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
8506                     .access = PL1_RW, .resetvalue = 0,
8507                     .accessfn = access_tvm_trvm,
8508                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
8509                 };
8510                 define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
8511                 g_free(tmp_string);
8512             }
8513 
8514             /* Register alias is only valid for first 32 indexes */
8515             for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
8516                 uint8_t crm = 0b1000 | extract32(i, 1, 3);
8517                 uint8_t opc1 = 0b100 | extract32(i, 4, 1);
8518                 uint8_t opc2 = extract32(i, 0, 1) << 2;
8519 
8520                 tmp_string = g_strdup_printf("HPRBAR%u", i);
8521                 ARMCPRegInfo tmp_hprbarn_reginfo = {
8522                     .name = tmp_string,
8523                     .type = ARM_CP_NO_RAW,
8524                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
8525                     .access = PL2_RW, .resetvalue = 0,
8526                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
8527                 };
8528                 define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
8529                 g_free(tmp_string);
8530 
8531                 opc2 = extract32(i, 0, 1) << 2 | 0x1;
8532                 tmp_string = g_strdup_printf("HPRLAR%u", i);
8533                 ARMCPRegInfo tmp_hprlarn_reginfo = {
8534                     .name = tmp_string,
8535                     .type = ARM_CP_NO_RAW,
8536                     .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
8537                     .access = PL2_RW, .resetvalue = 0,
8538                     .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
8539                 };
8540                 define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
8541                 g_free(tmp_string);
8542             }
8543         } else if (arm_feature(env, ARM_FEATURE_V7)) {
8544             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
8545         }
8546     }
8547 
8548     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
8549         ARMCPRegInfo mpidr_cp_reginfo[] = {
8550             { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
8551               .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
8552               .fgt = FGT_MPIDR_EL1,
8553               .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
8554         };
8555 #ifdef CONFIG_USER_ONLY
8556         static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
8557             { .name = "MPIDR_EL1",
8558               .fixed_bits = 0x0000000080000000 },
8559         };
8560         modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
8561 #endif
8562         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
8563     }
8564 
8565     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
8566         ARMCPRegInfo auxcr_reginfo[] = {
8567             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
8568               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
8569               .access = PL1_RW, .accessfn = access_tacr,
8570               .nv2_redirect_offset = 0x118,
8571               .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
8572             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
8573               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
8574               .access = PL2_RW, .type = ARM_CP_CONST,
8575               .resetvalue = 0 },
8576             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
8577               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
8578               .access = PL3_RW, .type = ARM_CP_CONST,
8579               .resetvalue = 0 },
8580         };
8581         define_arm_cp_regs(cpu, auxcr_reginfo);
8582         if (cpu_isar_feature(aa32_ac2, cpu)) {
8583             define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
8584         }
8585     }
8586 
8587     if (arm_feature(env, ARM_FEATURE_CBAR)) {
8588         /*
8589          * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8590          * There are two flavours:
8591          *  (1) older 32-bit only cores have a simple 32-bit CBAR
8592          *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8593          *      32-bit register visible to AArch32 at a different encoding
8594          *      to the "flavour 1" register and with the bits rearranged to
8595          *      be able to squash a 64-bit address into the 32-bit view.
8596          * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8597          * in future if we support AArch32-only configs of some of the
8598          * AArch64 cores we might need to add a specific feature flag
8599          * to indicate cores with "flavour 2" CBAR.
8600          */
8601         if (arm_feature(env, ARM_FEATURE_V8)) {
8602             /* 32 bit view is [31:18] 0...0 [43:32]. */
8603             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
8604                 | extract64(cpu->reset_cbar, 32, 12);
8605             ARMCPRegInfo cbar_reginfo[] = {
8606                 { .name = "CBAR",
8607                   .type = ARM_CP_CONST,
8608                   .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
8609                   .access = PL1_R, .resetvalue = cbar32 },
8610                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
8611                   .type = ARM_CP_CONST,
8612                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
8613                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
8614             };
8615             /* We don't implement a r/w 64 bit CBAR currently */
8616             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
8617             define_arm_cp_regs(cpu, cbar_reginfo);
8618         } else {
8619             ARMCPRegInfo cbar = {
8620                 .name = "CBAR",
8621                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
8622                 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
8623                 .fieldoffset = offsetof(CPUARMState,
8624                                         cp15.c15_config_base_address)
8625             };
8626             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
8627                 cbar.access = PL1_R;
8628                 cbar.fieldoffset = 0;
8629                 cbar.type = ARM_CP_CONST;
8630             }
8631             define_one_arm_cp_reg(cpu, &cbar);
8632         }
8633     }
8634 
8635     if (arm_feature(env, ARM_FEATURE_VBAR)) {
8636         static const ARMCPRegInfo vbar_cp_reginfo[] = {
8637             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
8638               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
8639               .access = PL1_RW, .writefn = vbar_write,
8640               .accessfn = access_nv1,
8641               .fgt = FGT_VBAR_EL1,
8642               .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
8643               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
8644                                      offsetof(CPUARMState, cp15.vbar_ns) },
8645               .resetvalue = 0 },
8646         };
8647         define_arm_cp_regs(cpu, vbar_cp_reginfo);
8648     }
8649 
8650     /* Generic registers whose values depend on the implementation */
8651     {
8652         ARMCPRegInfo sctlr = {
8653             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
8654             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
8655             .access = PL1_RW, .accessfn = access_tvm_trvm,
8656             .fgt = FGT_SCTLR_EL1,
8657             .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
8658             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
8659                                    offsetof(CPUARMState, cp15.sctlr_ns) },
8660             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
8661             .raw_writefn = raw_write,
8662         };
8663         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8664             /*
8665              * Normally we would always end the TB on an SCTLR write, but Linux
8666              * arch/arm/mach-pxa/sleep.S expects two instructions following
8667              * an MMU enable to execute from cache.  Imitate this behaviour.
8668              */
8669             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
8670         }
8671         define_one_arm_cp_reg(cpu, &sctlr);
8672 
8673         if (arm_feature(env, ARM_FEATURE_PMSA) &&
8674             arm_feature(env, ARM_FEATURE_V8)) {
8675             ARMCPRegInfo vsctlr = {
8676                 .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
8677                 .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
8678                 .access = PL2_RW, .resetvalue = 0x0,
8679                 .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
8680             };
8681             define_one_arm_cp_reg(cpu, &vsctlr);
8682         }
8683     }
8684 
8685     if (cpu_isar_feature(aa64_lor, cpu)) {
8686         define_arm_cp_regs(cpu, lor_reginfo);
8687     }
8688     if (cpu_isar_feature(aa64_pan, cpu)) {
8689         define_one_arm_cp_reg(cpu, &pan_reginfo);
8690     }
8691 #ifndef CONFIG_USER_ONLY
8692     if (cpu_isar_feature(aa64_ats1e1, cpu)) {
8693         define_arm_cp_regs(cpu, ats1e1_reginfo);
8694     }
8695     if (cpu_isar_feature(aa32_ats1e1, cpu)) {
8696         define_arm_cp_regs(cpu, ats1cp_reginfo);
8697     }
8698 #endif
8699     if (cpu_isar_feature(aa64_uao, cpu)) {
8700         define_one_arm_cp_reg(cpu, &uao_reginfo);
8701     }
8702 
8703     if (cpu_isar_feature(aa64_dit, cpu)) {
8704         define_one_arm_cp_reg(cpu, &dit_reginfo);
8705     }
8706     if (cpu_isar_feature(aa64_ssbs, cpu)) {
8707         define_one_arm_cp_reg(cpu, &ssbs_reginfo);
8708     }
8709     if (cpu_isar_feature(any_ras, cpu)) {
8710         define_arm_cp_regs(cpu, minimal_ras_reginfo);
8711     }
8712 
8713     if (cpu_isar_feature(aa64_vh, cpu) ||
8714         cpu_isar_feature(aa64_debugv8p2, cpu)) {
8715         define_one_arm_cp_reg(cpu, &contextidr_el2);
8716     }
8717     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8718         define_arm_cp_regs(cpu, vhe_reginfo);
8719     }
8720 
8721     if (cpu_isar_feature(aa64_sve, cpu)) {
8722         define_arm_cp_regs(cpu, zcr_reginfo);
8723     }
8724 
8725     if (cpu_isar_feature(aa64_hcx, cpu)) {
8726         define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
8727     }
8728 
8729 #ifdef TARGET_AARCH64
8730     if (cpu_isar_feature(aa64_sme, cpu)) {
8731         define_arm_cp_regs(cpu, sme_reginfo);
8732     }
8733     if (cpu_isar_feature(aa64_pauth, cpu)) {
8734         define_arm_cp_regs(cpu, pauth_reginfo);
8735     }
8736     if (cpu_isar_feature(aa64_rndr, cpu)) {
8737         define_arm_cp_regs(cpu, rndr_reginfo);
8738     }
8739     /* Data Cache clean instructions up to PoP */
8740     if (cpu_isar_feature(aa64_dcpop, cpu)) {
8741         define_one_arm_cp_reg(cpu, dcpop_reg);
8742 
8743         if (cpu_isar_feature(aa64_dcpodp, cpu)) {
8744             define_one_arm_cp_reg(cpu, dcpodp_reg);
8745         }
8746     }
8747 
8748     /*
8749      * If full MTE is enabled, add all of the system registers.
8750      * If only "instructions available at EL0" are enabled,
8751      * then define only a RAZ/WI version of PSTATE.TCO.
8752      */
8753     if (cpu_isar_feature(aa64_mte, cpu)) {
8754         ARMCPRegInfo gmid_reginfo = {
8755             .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
8756             .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
8757             .access = PL1_R, .accessfn = access_aa64_tid5,
8758             .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
8759         };
8760         define_one_arm_cp_reg(cpu, &gmid_reginfo);
8761         define_arm_cp_regs(cpu, mte_reginfo);
8762         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8763     } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
8764         define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
8765         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8766     }
8767 
8768     if (cpu_isar_feature(aa64_scxtnum, cpu)) {
8769         define_arm_cp_regs(cpu, scxtnum_reginfo);
8770     }
8771 
8772     if (cpu_isar_feature(aa64_fgt, cpu)) {
8773         define_arm_cp_regs(cpu, fgt_reginfo);
8774     }
8775 
8776     if (cpu_isar_feature(aa64_rme, cpu)) {
8777         define_arm_cp_regs(cpu, rme_reginfo);
8778         if (cpu_isar_feature(aa64_mte, cpu)) {
8779             define_arm_cp_regs(cpu, rme_mte_reginfo);
8780         }
8781     }
8782 
8783     if (cpu_isar_feature(aa64_nv2, cpu)) {
8784         define_arm_cp_regs(cpu, nv2_reginfo);
8785     }
8786 
8787     if (cpu_isar_feature(aa64_nmi, cpu)) {
8788         define_arm_cp_regs(cpu, nmi_reginfo);
8789     }
8790 #endif
8791 
8792     if (cpu_isar_feature(any_predinv, cpu)) {
8793         define_arm_cp_regs(cpu, predinv_reginfo);
8794     }
8795 
8796     if (cpu_isar_feature(any_ccidx, cpu)) {
8797         define_arm_cp_regs(cpu, ccsidr2_reginfo);
8798     }
8799 
8800 #ifndef CONFIG_USER_ONLY
8801     /*
8802      * Register redirections and aliases must be done last,
8803      * after the registers from the other extensions have been defined.
8804      */
8805     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8806         define_arm_vh_e2h_redirects_aliases(cpu);
8807     }
8808 #endif
8809 }
8810 
8811 /*
8812  * Private utility function for define_one_arm_cp_reg_with_opaque():
8813  * add a single reginfo struct to the hash table.
8814  */
8815 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
8816                                    void *opaque, CPState state,
8817                                    CPSecureState secstate,
8818                                    int crm, int opc1, int opc2,
8819                                    const char *name)
8820 {
8821     CPUARMState *env = &cpu->env;
8822     uint32_t key;
8823     ARMCPRegInfo *r2;
8824     bool is64 = r->type & ARM_CP_64BIT;
8825     bool ns = secstate & ARM_CP_SECSTATE_NS;
8826     int cp = r->cp;
8827     size_t name_len;
8828     bool make_const;
8829 
8830     switch (state) {
8831     case ARM_CP_STATE_AA32:
8832         /* We assume it is a cp15 register if the .cp field is left unset. */
8833         if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
8834             cp = 15;
8835         }
8836         key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
8837         break;
8838     case ARM_CP_STATE_AA64:
8839         /*
8840          * To allow abbreviation of ARMCPRegInfo definitions, we treat
8841          * cp == 0 as equivalent to the value for "standard guest-visible
8842          * sysreg".  STATE_BOTH definitions are also always "standard sysreg"
8843          * in their AArch64 view (the .cp value may be non-zero for the
8844          * benefit of the AArch32 view).
8845          */
8846         if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
8847             cp = CP_REG_ARM64_SYSREG_CP;
8848         }
8849         key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
8850         break;
8851     default:
8852         g_assert_not_reached();
8853     }
8854 
8855     /* Overriding of an existing definition must be explicitly requested. */
8856     if (!(r->type & ARM_CP_OVERRIDE)) {
8857         const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
8858         if (oldreg) {
8859             assert(oldreg->type & ARM_CP_OVERRIDE);
8860         }
8861     }
8862 
8863     /*
8864      * Eliminate registers that are not present because the EL is missing.
8865      * Doing this here makes it easier to put all registers for a given
8866      * feature into the same ARMCPRegInfo array and define them all at once.
8867      */
8868     make_const = false;
8869     if (arm_feature(env, ARM_FEATURE_EL3)) {
8870         /*
8871          * An EL2 register without EL2 but with EL3 is (usually) RES0.
8872          * See rule RJFFP in section D1.1.3 of DDI0487H.a.
8873          */
8874         int min_el = ctz32(r->access) / 2;
8875         if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
8876             if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
8877                 return;
8878             }
8879             make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
8880         }
8881     } else {
8882         CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
8883                                  ? PL2_RW : PL1_RW);
8884         if ((r->access & max_el) == 0) {
8885             return;
8886         }
8887     }
8888 
8889     /* Combine cpreg and name into one allocation. */
8890     name_len = strlen(name) + 1;
8891     r2 = g_malloc(sizeof(*r2) + name_len);
8892     *r2 = *r;
8893     r2->name = memcpy(r2 + 1, name, name_len);
8894 
8895     /*
8896      * Update fields to match the instantiation, overwiting wildcards
8897      * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
8898      */
8899     r2->cp = cp;
8900     r2->crm = crm;
8901     r2->opc1 = opc1;
8902     r2->opc2 = opc2;
8903     r2->state = state;
8904     r2->secure = secstate;
8905     if (opaque) {
8906         r2->opaque = opaque;
8907     }
8908 
8909     if (make_const) {
8910         /* This should not have been a very special register to begin. */
8911         int old_special = r2->type & ARM_CP_SPECIAL_MASK;
8912         assert(old_special == 0 || old_special == ARM_CP_NOP);
8913         /*
8914          * Set the special function to CONST, retaining the other flags.
8915          * This is important for e.g. ARM_CP_SVE so that we still
8916          * take the SVE trap if CPTR_EL3.EZ == 0.
8917          */
8918         r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
8919         /*
8920          * Usually, these registers become RES0, but there are a few
8921          * special cases like VPIDR_EL2 which have a constant non-zero
8922          * value with writes ignored.
8923          */
8924         if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
8925             r2->resetvalue = 0;
8926         }
8927         /*
8928          * ARM_CP_CONST has precedence, so removing the callbacks and
8929          * offsets are not strictly necessary, but it is potentially
8930          * less confusing to debug later.
8931          */
8932         r2->readfn = NULL;
8933         r2->writefn = NULL;
8934         r2->raw_readfn = NULL;
8935         r2->raw_writefn = NULL;
8936         r2->resetfn = NULL;
8937         r2->fieldoffset = 0;
8938         r2->bank_fieldoffsets[0] = 0;
8939         r2->bank_fieldoffsets[1] = 0;
8940     } else {
8941         bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
8942 
8943         if (isbanked) {
8944             /*
8945              * Register is banked (using both entries in array).
8946              * Overwriting fieldoffset as the array is only used to define
8947              * banked registers but later only fieldoffset is used.
8948              */
8949             r2->fieldoffset = r->bank_fieldoffsets[ns];
8950         }
8951         if (state == ARM_CP_STATE_AA32) {
8952             if (isbanked) {
8953                 /*
8954                  * If the register is banked then we don't need to migrate or
8955                  * reset the 32-bit instance in certain cases:
8956                  *
8957                  * 1) If the register has both 32-bit and 64-bit instances
8958                  *    then we can count on the 64-bit instance taking care
8959                  *    of the non-secure bank.
8960                  * 2) If ARMv8 is enabled then we can count on a 64-bit
8961                  *    version taking care of the secure bank.  This requires
8962                  *    that separate 32 and 64-bit definitions are provided.
8963                  */
8964                 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
8965                     (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
8966                     r2->type |= ARM_CP_ALIAS;
8967                 }
8968             } else if ((secstate != r->secure) && !ns) {
8969                 /*
8970                  * The register is not banked so we only want to allow
8971                  * migration of the non-secure instance.
8972                  */
8973                 r2->type |= ARM_CP_ALIAS;
8974             }
8975 
8976             if (HOST_BIG_ENDIAN &&
8977                 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
8978                 r2->fieldoffset += sizeof(uint32_t);
8979             }
8980         }
8981     }
8982 
8983     /*
8984      * By convention, for wildcarded registers only the first
8985      * entry is used for migration; the others are marked as
8986      * ALIAS so we don't try to transfer the register
8987      * multiple times. Special registers (ie NOP/WFI) are
8988      * never migratable and not even raw-accessible.
8989      */
8990     if (r2->type & ARM_CP_SPECIAL_MASK) {
8991         r2->type |= ARM_CP_NO_RAW;
8992     }
8993     if (((r->crm == CP_ANY) && crm != 0) ||
8994         ((r->opc1 == CP_ANY) && opc1 != 0) ||
8995         ((r->opc2 == CP_ANY) && opc2 != 0)) {
8996         r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
8997     }
8998 
8999     /*
9000      * Check that raw accesses are either forbidden or handled. Note that
9001      * we can't assert this earlier because the setup of fieldoffset for
9002      * banked registers has to be done first.
9003      */
9004     if (!(r2->type & ARM_CP_NO_RAW)) {
9005         assert(!raw_accessors_invalid(r2));
9006     }
9007 
9008     g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
9009 }
9010 
9011 
9012 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
9013                                        const ARMCPRegInfo *r, void *opaque)
9014 {
9015     /*
9016      * Define implementations of coprocessor registers.
9017      * We store these in a hashtable because typically
9018      * there are less than 150 registers in a space which
9019      * is 16*16*16*8*8 = 262144 in size.
9020      * Wildcarding is supported for the crm, opc1 and opc2 fields.
9021      * If a register is defined twice then the second definition is
9022      * used, so this can be used to define some generic registers and
9023      * then override them with implementation specific variations.
9024      * At least one of the original and the second definition should
9025      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
9026      * against accidental use.
9027      *
9028      * The state field defines whether the register is to be
9029      * visible in the AArch32 or AArch64 execution state. If the
9030      * state is set to ARM_CP_STATE_BOTH then we synthesise a
9031      * reginfo structure for the AArch32 view, which sees the lower
9032      * 32 bits of the 64 bit register.
9033      *
9034      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
9035      * be wildcarded. AArch64 registers are always considered to be 64
9036      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
9037      * the register, if any.
9038      */
9039     int crm, opc1, opc2;
9040     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
9041     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
9042     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
9043     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
9044     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
9045     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
9046     CPState state;
9047 
9048     /* 64 bit registers have only CRm and Opc1 fields */
9049     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
9050     /* op0 only exists in the AArch64 encodings */
9051     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
9052     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
9053     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
9054     /*
9055      * This API is only for Arm's system coprocessors (14 and 15) or
9056      * (M-profile or v7A-and-earlier only) for implementation defined
9057      * coprocessors in the range 0..7.  Our decode assumes this, since
9058      * 8..13 can be used for other insns including VFP and Neon. See
9059      * valid_cp() in translate.c.  Assert here that we haven't tried
9060      * to use an invalid coprocessor number.
9061      */
9062     switch (r->state) {
9063     case ARM_CP_STATE_BOTH:
9064         /* 0 has a special meaning, but otherwise the same rules as AA32. */
9065         if (r->cp == 0) {
9066             break;
9067         }
9068         /* fall through */
9069     case ARM_CP_STATE_AA32:
9070         if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
9071             !arm_feature(&cpu->env, ARM_FEATURE_M)) {
9072             assert(r->cp >= 14 && r->cp <= 15);
9073         } else {
9074             assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
9075         }
9076         break;
9077     case ARM_CP_STATE_AA64:
9078         assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
9079         break;
9080     default:
9081         g_assert_not_reached();
9082     }
9083     /*
9084      * The AArch64 pseudocode CheckSystemAccess() specifies that op1
9085      * encodes a minimum access level for the register. We roll this
9086      * runtime check into our general permission check code, so check
9087      * here that the reginfo's specified permissions are strict enough
9088      * to encompass the generic architectural permission check.
9089      */
9090     if (r->state != ARM_CP_STATE_AA32) {
9091         CPAccessRights mask;
9092         switch (r->opc1) {
9093         case 0:
9094             /* min_EL EL1, but some accessible to EL0 via kernel ABI */
9095             mask = PL0U_R | PL1_RW;
9096             break;
9097         case 1: case 2:
9098             /* min_EL EL1 */
9099             mask = PL1_RW;
9100             break;
9101         case 3:
9102             /* min_EL EL0 */
9103             mask = PL0_RW;
9104             break;
9105         case 4:
9106         case 5:
9107             /* min_EL EL2 */
9108             mask = PL2_RW;
9109             break;
9110         case 6:
9111             /* min_EL EL3 */
9112             mask = PL3_RW;
9113             break;
9114         case 7:
9115             /* min_EL EL1, secure mode only (we don't check the latter) */
9116             mask = PL1_RW;
9117             break;
9118         default:
9119             /* broken reginfo with out-of-range opc1 */
9120             g_assert_not_reached();
9121         }
9122         /* assert our permissions are not too lax (stricter is fine) */
9123         assert((r->access & ~mask) == 0);
9124     }
9125 
9126     /*
9127      * Check that the register definition has enough info to handle
9128      * reads and writes if they are permitted.
9129      */
9130     if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
9131         if (r->access & PL3_R) {
9132             assert((r->fieldoffset ||
9133                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
9134                    r->readfn);
9135         }
9136         if (r->access & PL3_W) {
9137             assert((r->fieldoffset ||
9138                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
9139                    r->writefn);
9140         }
9141     }
9142 
9143     for (crm = crmmin; crm <= crmmax; crm++) {
9144         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
9145             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
9146                 for (state = ARM_CP_STATE_AA32;
9147                      state <= ARM_CP_STATE_AA64; state++) {
9148                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
9149                         continue;
9150                     }
9151                     if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
9152                         cpu_isar_feature(aa64_xs, cpu)) {
9153                         /*
9154                          * This is a TLBI insn which has an NXS variant. The
9155                          * NXS variant is at the same encoding except that
9156                          * crn is +1, and has the same behaviour except for
9157                          * fine-grained trapping. Add the NXS insn here and
9158                          * then fall through to add the normal register.
9159                          * add_cpreg_to_hashtable() copies the cpreg struct
9160                          * and name that it is passed, so it's OK to use
9161                          * a local struct here.
9162                          */
9163                         ARMCPRegInfo nxs_ri = *r;
9164                         g_autofree char *name = g_strdup_printf("%sNXS", r->name);
9165 
9166                         assert(state == ARM_CP_STATE_AA64);
9167                         assert(nxs_ri.crn < 0xf);
9168                         nxs_ri.crn++;
9169                         if (nxs_ri.fgt) {
9170                             nxs_ri.fgt |= R_FGT_NXS_MASK;
9171                         }
9172                         add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state,
9173                                                ARM_CP_SECSTATE_NS,
9174                                                crm, opc1, opc2, name);
9175                     }
9176                     if (state == ARM_CP_STATE_AA32) {
9177                         /*
9178                          * Under AArch32 CP registers can be common
9179                          * (same for secure and non-secure world) or banked.
9180                          */
9181                         char *name;
9182 
9183                         switch (r->secure) {
9184                         case ARM_CP_SECSTATE_S:
9185                         case ARM_CP_SECSTATE_NS:
9186                             add_cpreg_to_hashtable(cpu, r, opaque, state,
9187                                                    r->secure, crm, opc1, opc2,
9188                                                    r->name);
9189                             break;
9190                         case ARM_CP_SECSTATE_BOTH:
9191                             name = g_strdup_printf("%s_S", r->name);
9192                             add_cpreg_to_hashtable(cpu, r, opaque, state,
9193                                                    ARM_CP_SECSTATE_S,
9194                                                    crm, opc1, opc2, name);
9195                             g_free(name);
9196                             add_cpreg_to_hashtable(cpu, r, opaque, state,
9197                                                    ARM_CP_SECSTATE_NS,
9198                                                    crm, opc1, opc2, r->name);
9199                             break;
9200                         default:
9201                             g_assert_not_reached();
9202                         }
9203                     } else {
9204                         /*
9205                          * AArch64 registers get mapped to non-secure instance
9206                          * of AArch32
9207                          */
9208                         add_cpreg_to_hashtable(cpu, r, opaque, state,
9209                                                ARM_CP_SECSTATE_NS,
9210                                                crm, opc1, opc2, r->name);
9211                     }
9212                 }
9213             }
9214         }
9215     }
9216 }
9217 
9218 /* Define a whole list of registers */
9219 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
9220                                         void *opaque, size_t len)
9221 {
9222     size_t i;
9223     for (i = 0; i < len; ++i) {
9224         define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
9225     }
9226 }
9227 
9228 /*
9229  * Modify ARMCPRegInfo for access from userspace.
9230  *
9231  * This is a data driven modification directed by
9232  * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
9233  * user-space cannot alter any values and dynamic values pertaining to
9234  * execution state are hidden from user space view anyway.
9235  */
9236 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
9237                                  const ARMCPRegUserSpaceInfo *mods,
9238                                  size_t mods_len)
9239 {
9240     for (size_t mi = 0; mi < mods_len; ++mi) {
9241         const ARMCPRegUserSpaceInfo *m = mods + mi;
9242         GPatternSpec *pat = NULL;
9243 
9244         if (m->is_glob) {
9245             pat = g_pattern_spec_new(m->name);
9246         }
9247         for (size_t ri = 0; ri < regs_len; ++ri) {
9248             ARMCPRegInfo *r = regs + ri;
9249 
9250             if (pat && g_pattern_match_string(pat, r->name)) {
9251                 r->type = ARM_CP_CONST;
9252                 r->access = PL0U_R;
9253                 r->resetvalue = 0;
9254                 /* continue */
9255             } else if (strcmp(r->name, m->name) == 0) {
9256                 r->type = ARM_CP_CONST;
9257                 r->access = PL0U_R;
9258                 r->resetvalue &= m->exported_bits;
9259                 r->resetvalue |= m->fixed_bits;
9260                 break;
9261             }
9262         }
9263         if (pat) {
9264             g_pattern_spec_free(pat);
9265         }
9266     }
9267 }
9268 
9269 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
9270 {
9271     return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
9272 }
9273 
9274 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
9275                          uint64_t value)
9276 {
9277     /* Helper coprocessor write function for write-ignore registers */
9278 }
9279 
9280 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
9281 {
9282     /* Helper coprocessor write function for read-as-zero registers */
9283     return 0;
9284 }
9285 
9286 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
9287 {
9288     /* Helper coprocessor reset function for do-nothing-on-reset registers */
9289 }
9290 
9291 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
9292 {
9293     /*
9294      * Return true if it is not valid for us to switch to
9295      * this CPU mode (ie all the UNPREDICTABLE cases in
9296      * the ARM ARM CPSRWriteByInstr pseudocode).
9297      */
9298 
9299     /* Changes to or from Hyp via MSR and CPS are illegal. */
9300     if (write_type == CPSRWriteByInstr &&
9301         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
9302          mode == ARM_CPU_MODE_HYP)) {
9303         return 1;
9304     }
9305 
9306     switch (mode) {
9307     case ARM_CPU_MODE_USR:
9308         return 0;
9309     case ARM_CPU_MODE_SYS:
9310     case ARM_CPU_MODE_SVC:
9311     case ARM_CPU_MODE_ABT:
9312     case ARM_CPU_MODE_UND:
9313     case ARM_CPU_MODE_IRQ:
9314     case ARM_CPU_MODE_FIQ:
9315         /*
9316          * Note that we don't implement the IMPDEF NSACR.RFR which in v7
9317          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
9318          */
9319         /*
9320          * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
9321          * and CPS are treated as illegal mode changes.
9322          */
9323         if (write_type == CPSRWriteByInstr &&
9324             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
9325             (arm_hcr_el2_eff(env) & HCR_TGE)) {
9326             return 1;
9327         }
9328         return 0;
9329     case ARM_CPU_MODE_HYP:
9330         return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
9331     case ARM_CPU_MODE_MON:
9332         return arm_current_el(env) < 3;
9333     default:
9334         return 1;
9335     }
9336 }
9337 
9338 uint32_t cpsr_read(CPUARMState *env)
9339 {
9340     int ZF;
9341     ZF = (env->ZF == 0);
9342     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
9343         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
9344         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
9345         | ((env->condexec_bits & 0xfc) << 8)
9346         | (env->GE << 16) | (env->daif & CPSR_AIF);
9347 }
9348 
9349 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
9350                 CPSRWriteType write_type)
9351 {
9352     uint32_t changed_daif;
9353     bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
9354         (mask & (CPSR_M | CPSR_E | CPSR_IL));
9355 
9356     if (mask & CPSR_NZCV) {
9357         env->ZF = (~val) & CPSR_Z;
9358         env->NF = val;
9359         env->CF = (val >> 29) & 1;
9360         env->VF = (val << 3) & 0x80000000;
9361     }
9362     if (mask & CPSR_Q) {
9363         env->QF = ((val & CPSR_Q) != 0);
9364     }
9365     if (mask & CPSR_T) {
9366         env->thumb = ((val & CPSR_T) != 0);
9367     }
9368     if (mask & CPSR_IT_0_1) {
9369         env->condexec_bits &= ~3;
9370         env->condexec_bits |= (val >> 25) & 3;
9371     }
9372     if (mask & CPSR_IT_2_7) {
9373         env->condexec_bits &= 3;
9374         env->condexec_bits |= (val >> 8) & 0xfc;
9375     }
9376     if (mask & CPSR_GE) {
9377         env->GE = (val >> 16) & 0xf;
9378     }
9379 
9380     /*
9381      * In a V7 implementation that includes the security extensions but does
9382      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
9383      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
9384      * bits respectively.
9385      *
9386      * In a V8 implementation, it is permitted for privileged software to
9387      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
9388      */
9389     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
9390         arm_feature(env, ARM_FEATURE_EL3) &&
9391         !arm_feature(env, ARM_FEATURE_EL2) &&
9392         !arm_is_secure(env)) {
9393 
9394         changed_daif = (env->daif ^ val) & mask;
9395 
9396         if (changed_daif & CPSR_A) {
9397             /*
9398              * Check to see if we are allowed to change the masking of async
9399              * abort exceptions from a non-secure state.
9400              */
9401             if (!(env->cp15.scr_el3 & SCR_AW)) {
9402                 qemu_log_mask(LOG_GUEST_ERROR,
9403                               "Ignoring attempt to switch CPSR_A flag from "
9404                               "non-secure world with SCR.AW bit clear\n");
9405                 mask &= ~CPSR_A;
9406             }
9407         }
9408 
9409         if (changed_daif & CPSR_F) {
9410             /*
9411              * Check to see if we are allowed to change the masking of FIQ
9412              * exceptions from a non-secure state.
9413              */
9414             if (!(env->cp15.scr_el3 & SCR_FW)) {
9415                 qemu_log_mask(LOG_GUEST_ERROR,
9416                               "Ignoring attempt to switch CPSR_F flag from "
9417                               "non-secure world with SCR.FW bit clear\n");
9418                 mask &= ~CPSR_F;
9419             }
9420 
9421             /*
9422              * Check whether non-maskable FIQ (NMFI) support is enabled.
9423              * If this bit is set software is not allowed to mask
9424              * FIQs, but is allowed to set CPSR_F to 0.
9425              */
9426             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
9427                 (val & CPSR_F)) {
9428                 qemu_log_mask(LOG_GUEST_ERROR,
9429                               "Ignoring attempt to enable CPSR_F flag "
9430                               "(non-maskable FIQ [NMFI] support enabled)\n");
9431                 mask &= ~CPSR_F;
9432             }
9433         }
9434     }
9435 
9436     env->daif &= ~(CPSR_AIF & mask);
9437     env->daif |= val & CPSR_AIF & mask;
9438 
9439     if (write_type != CPSRWriteRaw &&
9440         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
9441         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
9442             /*
9443              * Note that we can only get here in USR mode if this is a
9444              * gdb stub write; for this case we follow the architectural
9445              * behaviour for guest writes in USR mode of ignoring an attempt
9446              * to switch mode. (Those are caught by translate.c for writes
9447              * triggered by guest instructions.)
9448              */
9449             mask &= ~CPSR_M;
9450         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
9451             /*
9452              * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
9453              * v7, and has defined behaviour in v8:
9454              *  + leave CPSR.M untouched
9455              *  + allow changes to the other CPSR fields
9456              *  + set PSTATE.IL
9457              * For user changes via the GDB stub, we don't set PSTATE.IL,
9458              * as this would be unnecessarily harsh for a user error.
9459              */
9460             mask &= ~CPSR_M;
9461             if (write_type != CPSRWriteByGDBStub &&
9462                 arm_feature(env, ARM_FEATURE_V8)) {
9463                 mask |= CPSR_IL;
9464                 val |= CPSR_IL;
9465             }
9466             qemu_log_mask(LOG_GUEST_ERROR,
9467                           "Illegal AArch32 mode switch attempt from %s to %s\n",
9468                           aarch32_mode_name(env->uncached_cpsr),
9469                           aarch32_mode_name(val));
9470         } else {
9471             qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
9472                           write_type == CPSRWriteExceptionReturn ?
9473                           "Exception return from AArch32" :
9474                           "AArch32 mode switch from",
9475                           aarch32_mode_name(env->uncached_cpsr),
9476                           aarch32_mode_name(val), env->regs[15]);
9477             switch_mode(env, val & CPSR_M);
9478         }
9479     }
9480     mask &= ~CACHED_CPSR_BITS;
9481     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
9482     if (tcg_enabled() && rebuild_hflags) {
9483         arm_rebuild_hflags(env);
9484     }
9485 }
9486 
9487 #ifdef CONFIG_USER_ONLY
9488 
9489 static void switch_mode(CPUARMState *env, int mode)
9490 {
9491     ARMCPU *cpu = env_archcpu(env);
9492 
9493     if (mode != ARM_CPU_MODE_USR) {
9494         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
9495     }
9496 }
9497 
9498 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9499                                  uint32_t cur_el, bool secure)
9500 {
9501     return 1;
9502 }
9503 
9504 void aarch64_sync_64_to_32(CPUARMState *env)
9505 {
9506     g_assert_not_reached();
9507 }
9508 
9509 #else
9510 
9511 static void switch_mode(CPUARMState *env, int mode)
9512 {
9513     int old_mode;
9514     int i;
9515 
9516     old_mode = env->uncached_cpsr & CPSR_M;
9517     if (mode == old_mode) {
9518         return;
9519     }
9520 
9521     if (old_mode == ARM_CPU_MODE_FIQ) {
9522         memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
9523         memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
9524     } else if (mode == ARM_CPU_MODE_FIQ) {
9525         memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
9526         memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
9527     }
9528 
9529     i = bank_number(old_mode);
9530     env->banked_r13[i] = env->regs[13];
9531     env->banked_spsr[i] = env->spsr;
9532 
9533     i = bank_number(mode);
9534     env->regs[13] = env->banked_r13[i];
9535     env->spsr = env->banked_spsr[i];
9536 
9537     env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
9538     env->regs[14] = env->banked_r14[r14_bank_number(mode)];
9539 }
9540 
9541 /*
9542  * Physical Interrupt Target EL Lookup Table
9543  *
9544  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9545  *
9546  * The below multi-dimensional table is used for looking up the target
9547  * exception level given numerous condition criteria.  Specifically, the
9548  * target EL is based on SCR and HCR routing controls as well as the
9549  * currently executing EL and secure state.
9550  *
9551  *    Dimensions:
9552  *    target_el_table[2][2][2][2][2][4]
9553  *                    |  |  |  |  |  +--- Current EL
9554  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
9555  *                    |  |  |  +--------- HCR mask override
9556  *                    |  |  +------------ SCR exec state control
9557  *                    |  +--------------- SCR mask override
9558  *                    +------------------ 32-bit(0)/64-bit(1) EL3
9559  *
9560  *    The table values are as such:
9561  *    0-3 = EL0-EL3
9562  *     -1 = Cannot occur
9563  *
9564  * The ARM ARM target EL table includes entries indicating that an "exception
9565  * is not taken".  The two cases where this is applicable are:
9566  *    1) An exception is taken from EL3 but the SCR does not have the exception
9567  *    routed to EL3.
9568  *    2) An exception is taken from EL2 but the HCR does not have the exception
9569  *    routed to EL2.
9570  * In these two cases, the below table contain a target of EL1.  This value is
9571  * returned as it is expected that the consumer of the table data will check
9572  * for "target EL >= current EL" to ensure the exception is not taken.
9573  *
9574  *            SCR     HCR
9575  *         64  EA     AMO                 From
9576  *        BIT IRQ     IMO      Non-secure         Secure
9577  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
9578  */
9579 static const int8_t target_el_table[2][2][2][2][2][4] = {
9580     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
9581        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
9582       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
9583        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
9584      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
9585        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
9586       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
9587        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
9588     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
9589        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 2,  2, -1,  1 },},},
9590       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1,  1,  1 },},
9591        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 2,  2,  2,  1 },},},},
9592      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
9593        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
9594       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},
9595        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},},},},
9596 };
9597 
9598 /*
9599  * Determine the target EL for physical exceptions
9600  */
9601 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9602                                  uint32_t cur_el, bool secure)
9603 {
9604     CPUARMState *env = cpu_env(cs);
9605     bool rw;
9606     bool scr;
9607     bool hcr;
9608     int target_el;
9609     /* Is the highest EL AArch64? */
9610     bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
9611     uint64_t hcr_el2;
9612 
9613     if (arm_feature(env, ARM_FEATURE_EL3)) {
9614         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
9615     } else {
9616         /*
9617          * Either EL2 is the highest EL (and so the EL2 register width
9618          * is given by is64); or there is no EL2 or EL3, in which case
9619          * the value of 'rw' does not affect the table lookup anyway.
9620          */
9621         rw = is64;
9622     }
9623 
9624     hcr_el2 = arm_hcr_el2_eff(env);
9625     switch (excp_idx) {
9626     case EXCP_IRQ:
9627     case EXCP_NMI:
9628         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
9629         hcr = hcr_el2 & HCR_IMO;
9630         break;
9631     case EXCP_FIQ:
9632         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
9633         hcr = hcr_el2 & HCR_FMO;
9634         break;
9635     default:
9636         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
9637         hcr = hcr_el2 & HCR_AMO;
9638         break;
9639     };
9640 
9641     /*
9642      * For these purposes, TGE and AMO/IMO/FMO both force the
9643      * interrupt to EL2.  Fold TGE into the bit extracted above.
9644      */
9645     hcr |= (hcr_el2 & HCR_TGE) != 0;
9646 
9647     /* Perform a table-lookup for the target EL given the current state */
9648     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
9649 
9650     assert(target_el > 0);
9651 
9652     return target_el;
9653 }
9654 
9655 void arm_log_exception(CPUState *cs)
9656 {
9657     int idx = cs->exception_index;
9658 
9659     if (qemu_loglevel_mask(CPU_LOG_INT)) {
9660         const char *exc = NULL;
9661         static const char * const excnames[] = {
9662             [EXCP_UDEF] = "Undefined Instruction",
9663             [EXCP_SWI] = "SVC",
9664             [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
9665             [EXCP_DATA_ABORT] = "Data Abort",
9666             [EXCP_IRQ] = "IRQ",
9667             [EXCP_FIQ] = "FIQ",
9668             [EXCP_BKPT] = "Breakpoint",
9669             [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
9670             [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
9671             [EXCP_HVC] = "Hypervisor Call",
9672             [EXCP_HYP_TRAP] = "Hypervisor Trap",
9673             [EXCP_SMC] = "Secure Monitor Call",
9674             [EXCP_VIRQ] = "Virtual IRQ",
9675             [EXCP_VFIQ] = "Virtual FIQ",
9676             [EXCP_SEMIHOST] = "Semihosting call",
9677             [EXCP_NOCP] = "v7M NOCP UsageFault",
9678             [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
9679             [EXCP_STKOF] = "v8M STKOF UsageFault",
9680             [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
9681             [EXCP_LSERR] = "v8M LSERR UsageFault",
9682             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
9683             [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
9684             [EXCP_VSERR] = "Virtual SERR",
9685             [EXCP_GPC] = "Granule Protection Check",
9686             [EXCP_NMI] = "NMI",
9687             [EXCP_VINMI] = "Virtual IRQ NMI",
9688             [EXCP_VFNMI] = "Virtual FIQ NMI",
9689         };
9690 
9691         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
9692             exc = excnames[idx];
9693         }
9694         if (!exc) {
9695             exc = "unknown";
9696         }
9697         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
9698                       idx, exc, cs->cpu_index);
9699     }
9700 }
9701 
9702 /*
9703  * Function used to synchronize QEMU's AArch64 register set with AArch32
9704  * register set.  This is necessary when switching between AArch32 and AArch64
9705  * execution state.
9706  */
9707 void aarch64_sync_32_to_64(CPUARMState *env)
9708 {
9709     int i;
9710     uint32_t mode = env->uncached_cpsr & CPSR_M;
9711 
9712     /* We can blanket copy R[0:7] to X[0:7] */
9713     for (i = 0; i < 8; i++) {
9714         env->xregs[i] = env->regs[i];
9715     }
9716 
9717     /*
9718      * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9719      * Otherwise, they come from the banked user regs.
9720      */
9721     if (mode == ARM_CPU_MODE_FIQ) {
9722         for (i = 8; i < 13; i++) {
9723             env->xregs[i] = env->usr_regs[i - 8];
9724         }
9725     } else {
9726         for (i = 8; i < 13; i++) {
9727             env->xregs[i] = env->regs[i];
9728         }
9729     }
9730 
9731     /*
9732      * Registers x13-x23 are the various mode SP and FP registers. Registers
9733      * r13 and r14 are only copied if we are in that mode, otherwise we copy
9734      * from the mode banked register.
9735      */
9736     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9737         env->xregs[13] = env->regs[13];
9738         env->xregs[14] = env->regs[14];
9739     } else {
9740         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9741         /* HYP is an exception in that it is copied from r14 */
9742         if (mode == ARM_CPU_MODE_HYP) {
9743             env->xregs[14] = env->regs[14];
9744         } else {
9745             env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
9746         }
9747     }
9748 
9749     if (mode == ARM_CPU_MODE_HYP) {
9750         env->xregs[15] = env->regs[13];
9751     } else {
9752         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
9753     }
9754 
9755     if (mode == ARM_CPU_MODE_IRQ) {
9756         env->xregs[16] = env->regs[14];
9757         env->xregs[17] = env->regs[13];
9758     } else {
9759         env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9760         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9761     }
9762 
9763     if (mode == ARM_CPU_MODE_SVC) {
9764         env->xregs[18] = env->regs[14];
9765         env->xregs[19] = env->regs[13];
9766     } else {
9767         env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9768         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9769     }
9770 
9771     if (mode == ARM_CPU_MODE_ABT) {
9772         env->xregs[20] = env->regs[14];
9773         env->xregs[21] = env->regs[13];
9774     } else {
9775         env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
9776         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
9777     }
9778 
9779     if (mode == ARM_CPU_MODE_UND) {
9780         env->xregs[22] = env->regs[14];
9781         env->xregs[23] = env->regs[13];
9782     } else {
9783         env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
9784         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
9785     }
9786 
9787     /*
9788      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9789      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
9790      * FIQ bank for r8-r14.
9791      */
9792     if (mode == ARM_CPU_MODE_FIQ) {
9793         for (i = 24; i < 31; i++) {
9794             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
9795         }
9796     } else {
9797         for (i = 24; i < 29; i++) {
9798             env->xregs[i] = env->fiq_regs[i - 24];
9799         }
9800         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
9801         env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
9802     }
9803 
9804     env->pc = env->regs[15];
9805 }
9806 
9807 /*
9808  * Function used to synchronize QEMU's AArch32 register set with AArch64
9809  * register set.  This is necessary when switching between AArch32 and AArch64
9810  * execution state.
9811  */
9812 void aarch64_sync_64_to_32(CPUARMState *env)
9813 {
9814     int i;
9815     uint32_t mode = env->uncached_cpsr & CPSR_M;
9816 
9817     /* We can blanket copy X[0:7] to R[0:7] */
9818     for (i = 0; i < 8; i++) {
9819         env->regs[i] = env->xregs[i];
9820     }
9821 
9822     /*
9823      * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9824      * Otherwise, we copy x8-x12 into the banked user regs.
9825      */
9826     if (mode == ARM_CPU_MODE_FIQ) {
9827         for (i = 8; i < 13; i++) {
9828             env->usr_regs[i - 8] = env->xregs[i];
9829         }
9830     } else {
9831         for (i = 8; i < 13; i++) {
9832             env->regs[i] = env->xregs[i];
9833         }
9834     }
9835 
9836     /*
9837      * Registers r13 & r14 depend on the current mode.
9838      * If we are in a given mode, we copy the corresponding x registers to r13
9839      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
9840      * for the mode.
9841      */
9842     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9843         env->regs[13] = env->xregs[13];
9844         env->regs[14] = env->xregs[14];
9845     } else {
9846         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
9847 
9848         /*
9849          * HYP is an exception in that it does not have its own banked r14 but
9850          * shares the USR r14
9851          */
9852         if (mode == ARM_CPU_MODE_HYP) {
9853             env->regs[14] = env->xregs[14];
9854         } else {
9855             env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
9856         }
9857     }
9858 
9859     if (mode == ARM_CPU_MODE_HYP) {
9860         env->regs[13] = env->xregs[15];
9861     } else {
9862         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
9863     }
9864 
9865     if (mode == ARM_CPU_MODE_IRQ) {
9866         env->regs[14] = env->xregs[16];
9867         env->regs[13] = env->xregs[17];
9868     } else {
9869         env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
9870         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
9871     }
9872 
9873     if (mode == ARM_CPU_MODE_SVC) {
9874         env->regs[14] = env->xregs[18];
9875         env->regs[13] = env->xregs[19];
9876     } else {
9877         env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
9878         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
9879     }
9880 
9881     if (mode == ARM_CPU_MODE_ABT) {
9882         env->regs[14] = env->xregs[20];
9883         env->regs[13] = env->xregs[21];
9884     } else {
9885         env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
9886         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
9887     }
9888 
9889     if (mode == ARM_CPU_MODE_UND) {
9890         env->regs[14] = env->xregs[22];
9891         env->regs[13] = env->xregs[23];
9892     } else {
9893         env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
9894         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
9895     }
9896 
9897     /*
9898      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9899      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
9900      * FIQ bank for r8-r14.
9901      */
9902     if (mode == ARM_CPU_MODE_FIQ) {
9903         for (i = 24; i < 31; i++) {
9904             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
9905         }
9906     } else {
9907         for (i = 24; i < 29; i++) {
9908             env->fiq_regs[i - 24] = env->xregs[i];
9909         }
9910         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
9911         env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
9912     }
9913 
9914     env->regs[15] = env->pc;
9915 }
9916 
9917 static void take_aarch32_exception(CPUARMState *env, int new_mode,
9918                                    uint32_t mask, uint32_t offset,
9919                                    uint32_t newpc)
9920 {
9921     int new_el;
9922 
9923     /* Change the CPU state so as to actually take the exception. */
9924     switch_mode(env, new_mode);
9925 
9926     /*
9927      * For exceptions taken to AArch32 we must clear the SS bit in both
9928      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9929      */
9930     env->pstate &= ~PSTATE_SS;
9931     env->spsr = cpsr_read(env);
9932     /* Clear IT bits.  */
9933     env->condexec_bits = 0;
9934     /* Switch to the new mode, and to the correct instruction set.  */
9935     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
9936 
9937     /* This must be after mode switching. */
9938     new_el = arm_current_el(env);
9939 
9940     /* Set new mode endianness */
9941     env->uncached_cpsr &= ~CPSR_E;
9942     if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
9943         env->uncached_cpsr |= CPSR_E;
9944     }
9945     /* J and IL must always be cleared for exception entry */
9946     env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
9947     env->daif |= mask;
9948 
9949     if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
9950         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
9951             env->uncached_cpsr |= CPSR_SSBS;
9952         } else {
9953             env->uncached_cpsr &= ~CPSR_SSBS;
9954         }
9955     }
9956 
9957     if (new_mode == ARM_CPU_MODE_HYP) {
9958         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
9959         env->elr_el[2] = env->regs[15];
9960     } else {
9961         /* CPSR.PAN is normally preserved preserved unless...  */
9962         if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
9963             switch (new_el) {
9964             case 3:
9965                 if (!arm_is_secure_below_el3(env)) {
9966                     /* ... the target is EL3, from non-secure state.  */
9967                     env->uncached_cpsr &= ~CPSR_PAN;
9968                     break;
9969                 }
9970                 /* ... the target is EL3, from secure state ... */
9971                 /* fall through */
9972             case 1:
9973                 /* ... the target is EL1 and SCTLR.SPAN is 0.  */
9974                 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
9975                     env->uncached_cpsr |= CPSR_PAN;
9976                 }
9977                 break;
9978             }
9979         }
9980         /*
9981          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9982          * and we should just guard the thumb mode on V4
9983          */
9984         if (arm_feature(env, ARM_FEATURE_V4T)) {
9985             env->thumb =
9986                 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
9987         }
9988         env->regs[14] = env->regs[15] + offset;
9989     }
9990     env->regs[15] = newpc;
9991 
9992     if (tcg_enabled()) {
9993         arm_rebuild_hflags(env);
9994     }
9995 }
9996 
9997 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
9998 {
9999     /*
10000      * Handle exception entry to Hyp mode; this is sufficiently
10001      * different to entry to other AArch32 modes that we handle it
10002      * separately here.
10003      *
10004      * The vector table entry used is always the 0x14 Hyp mode entry point,
10005      * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
10006      * The offset applied to the preferred return address is always zero
10007      * (see DDI0487C.a section G1.12.3).
10008      * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
10009      */
10010     uint32_t addr, mask;
10011     ARMCPU *cpu = ARM_CPU(cs);
10012     CPUARMState *env = &cpu->env;
10013 
10014     switch (cs->exception_index) {
10015     case EXCP_UDEF:
10016         addr = 0x04;
10017         break;
10018     case EXCP_SWI:
10019         addr = 0x08;
10020         break;
10021     case EXCP_BKPT:
10022         /* Fall through to prefetch abort.  */
10023     case EXCP_PREFETCH_ABORT:
10024         env->cp15.ifar_s = env->exception.vaddress;
10025         qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
10026                       (uint32_t)env->exception.vaddress);
10027         addr = 0x0c;
10028         break;
10029     case EXCP_DATA_ABORT:
10030         env->cp15.dfar_s = env->exception.vaddress;
10031         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
10032                       (uint32_t)env->exception.vaddress);
10033         addr = 0x10;
10034         break;
10035     case EXCP_IRQ:
10036         addr = 0x18;
10037         break;
10038     case EXCP_FIQ:
10039         addr = 0x1c;
10040         break;
10041     case EXCP_HVC:
10042         addr = 0x08;
10043         break;
10044     case EXCP_HYP_TRAP:
10045         addr = 0x14;
10046         break;
10047     default:
10048         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10049     }
10050 
10051     if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
10052         if (!arm_feature(env, ARM_FEATURE_V8)) {
10053             /*
10054              * QEMU syndrome values are v8-style. v7 has the IL bit
10055              * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
10056              * If this is a v7 CPU, squash the IL bit in those cases.
10057              */
10058             if (cs->exception_index == EXCP_PREFETCH_ABORT ||
10059                 (cs->exception_index == EXCP_DATA_ABORT &&
10060                  !(env->exception.syndrome & ARM_EL_ISV)) ||
10061                 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
10062                 env->exception.syndrome &= ~ARM_EL_IL;
10063             }
10064         }
10065         env->cp15.esr_el[2] = env->exception.syndrome;
10066     }
10067 
10068     if (arm_current_el(env) != 2 && addr < 0x14) {
10069         addr = 0x14;
10070     }
10071 
10072     mask = 0;
10073     if (!(env->cp15.scr_el3 & SCR_EA)) {
10074         mask |= CPSR_A;
10075     }
10076     if (!(env->cp15.scr_el3 & SCR_IRQ)) {
10077         mask |= CPSR_I;
10078     }
10079     if (!(env->cp15.scr_el3 & SCR_FIQ)) {
10080         mask |= CPSR_F;
10081     }
10082 
10083     addr += env->cp15.hvbar;
10084 
10085     take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
10086 }
10087 
10088 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
10089 {
10090     ARMCPU *cpu = ARM_CPU(cs);
10091     CPUARMState *env = &cpu->env;
10092     uint32_t addr;
10093     uint32_t mask;
10094     int new_mode;
10095     uint32_t offset;
10096     uint32_t moe;
10097 
10098     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
10099     switch (syn_get_ec(env->exception.syndrome)) {
10100     case EC_BREAKPOINT:
10101     case EC_BREAKPOINT_SAME_EL:
10102         moe = 1;
10103         break;
10104     case EC_WATCHPOINT:
10105     case EC_WATCHPOINT_SAME_EL:
10106         moe = 10;
10107         break;
10108     case EC_AA32_BKPT:
10109         moe = 3;
10110         break;
10111     case EC_VECTORCATCH:
10112         moe = 5;
10113         break;
10114     default:
10115         moe = 0;
10116         break;
10117     }
10118 
10119     if (moe) {
10120         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
10121     }
10122 
10123     if (env->exception.target_el == 2) {
10124         /* Debug exceptions are reported differently on AArch32 */
10125         switch (syn_get_ec(env->exception.syndrome)) {
10126         case EC_BREAKPOINT:
10127         case EC_BREAKPOINT_SAME_EL:
10128         case EC_AA32_BKPT:
10129         case EC_VECTORCATCH:
10130             env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2,
10131                                                      0, 0, 0x22);
10132             break;
10133         case EC_WATCHPOINT:
10134             env->exception.syndrome = syn_set_ec(env->exception.syndrome,
10135                                                  EC_DATAABORT);
10136             break;
10137         case EC_WATCHPOINT_SAME_EL:
10138             env->exception.syndrome = syn_set_ec(env->exception.syndrome,
10139                                                  EC_DATAABORT_SAME_EL);
10140             break;
10141         }
10142         arm_cpu_do_interrupt_aarch32_hyp(cs);
10143         return;
10144     }
10145 
10146     switch (cs->exception_index) {
10147     case EXCP_UDEF:
10148         new_mode = ARM_CPU_MODE_UND;
10149         addr = 0x04;
10150         mask = CPSR_I;
10151         if (env->thumb) {
10152             offset = 2;
10153         } else {
10154             offset = 4;
10155         }
10156         break;
10157     case EXCP_SWI:
10158         new_mode = ARM_CPU_MODE_SVC;
10159         addr = 0x08;
10160         mask = CPSR_I;
10161         /* The PC already points to the next instruction.  */
10162         offset = 0;
10163         break;
10164     case EXCP_BKPT:
10165         /* Fall through to prefetch abort.  */
10166     case EXCP_PREFETCH_ABORT:
10167         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
10168         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
10169         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
10170                       env->exception.fsr, (uint32_t)env->exception.vaddress);
10171         new_mode = ARM_CPU_MODE_ABT;
10172         addr = 0x0c;
10173         mask = CPSR_A | CPSR_I;
10174         offset = 4;
10175         break;
10176     case EXCP_DATA_ABORT:
10177         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
10178         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
10179         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
10180                       env->exception.fsr,
10181                       (uint32_t)env->exception.vaddress);
10182         new_mode = ARM_CPU_MODE_ABT;
10183         addr = 0x10;
10184         mask = CPSR_A | CPSR_I;
10185         offset = 8;
10186         break;
10187     case EXCP_IRQ:
10188         new_mode = ARM_CPU_MODE_IRQ;
10189         addr = 0x18;
10190         /* Disable IRQ and imprecise data aborts.  */
10191         mask = CPSR_A | CPSR_I;
10192         offset = 4;
10193         if (env->cp15.scr_el3 & SCR_IRQ) {
10194             /* IRQ routed to monitor mode */
10195             new_mode = ARM_CPU_MODE_MON;
10196             mask |= CPSR_F;
10197         }
10198         break;
10199     case EXCP_FIQ:
10200         new_mode = ARM_CPU_MODE_FIQ;
10201         addr = 0x1c;
10202         /* Disable FIQ, IRQ and imprecise data aborts.  */
10203         mask = CPSR_A | CPSR_I | CPSR_F;
10204         if (env->cp15.scr_el3 & SCR_FIQ) {
10205             /* FIQ routed to monitor mode */
10206             new_mode = ARM_CPU_MODE_MON;
10207         }
10208         offset = 4;
10209         break;
10210     case EXCP_VIRQ:
10211         new_mode = ARM_CPU_MODE_IRQ;
10212         addr = 0x18;
10213         /* Disable IRQ and imprecise data aborts.  */
10214         mask = CPSR_A | CPSR_I;
10215         offset = 4;
10216         break;
10217     case EXCP_VFIQ:
10218         new_mode = ARM_CPU_MODE_FIQ;
10219         addr = 0x1c;
10220         /* Disable FIQ, IRQ and imprecise data aborts.  */
10221         mask = CPSR_A | CPSR_I | CPSR_F;
10222         offset = 4;
10223         break;
10224     case EXCP_VSERR:
10225         {
10226             /*
10227              * Note that this is reported as a data abort, but the DFAR
10228              * has an UNKNOWN value.  Construct the SError syndrome from
10229              * AET and ExT fields.
10230              */
10231             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
10232 
10233             if (extended_addresses_enabled(env)) {
10234                 env->exception.fsr = arm_fi_to_lfsc(&fi);
10235             } else {
10236                 env->exception.fsr = arm_fi_to_sfsc(&fi);
10237             }
10238             env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
10239             A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
10240             qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
10241                           env->exception.fsr);
10242 
10243             new_mode = ARM_CPU_MODE_ABT;
10244             addr = 0x10;
10245             mask = CPSR_A | CPSR_I;
10246             offset = 8;
10247         }
10248         break;
10249     case EXCP_SMC:
10250         new_mode = ARM_CPU_MODE_MON;
10251         addr = 0x08;
10252         mask = CPSR_A | CPSR_I | CPSR_F;
10253         offset = 0;
10254         break;
10255     default:
10256         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10257         return; /* Never happens.  Keep compiler happy.  */
10258     }
10259 
10260     if (new_mode == ARM_CPU_MODE_MON) {
10261         addr += env->cp15.mvbar;
10262     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
10263         /* High vectors. When enabled, base address cannot be remapped. */
10264         addr += 0xffff0000;
10265     } else {
10266         /*
10267          * ARM v7 architectures provide a vector base address register to remap
10268          * the interrupt vector table.
10269          * This register is only followed in non-monitor mode, and is banked.
10270          * Note: only bits 31:5 are valid.
10271          */
10272         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
10273     }
10274 
10275     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
10276         env->cp15.scr_el3 &= ~SCR_NS;
10277     }
10278 
10279     take_aarch32_exception(env, new_mode, mask, offset, addr);
10280 }
10281 
10282 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
10283 {
10284     /*
10285      * Return the register number of the AArch64 view of the AArch32
10286      * register @aarch32_reg. The CPUARMState CPSR is assumed to still
10287      * be that of the AArch32 mode the exception came from.
10288      */
10289     int mode = env->uncached_cpsr & CPSR_M;
10290 
10291     switch (aarch32_reg) {
10292     case 0 ... 7:
10293         return aarch32_reg;
10294     case 8 ... 12:
10295         return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
10296     case 13:
10297         switch (mode) {
10298         case ARM_CPU_MODE_USR:
10299         case ARM_CPU_MODE_SYS:
10300             return 13;
10301         case ARM_CPU_MODE_HYP:
10302             return 15;
10303         case ARM_CPU_MODE_IRQ:
10304             return 17;
10305         case ARM_CPU_MODE_SVC:
10306             return 19;
10307         case ARM_CPU_MODE_ABT:
10308             return 21;
10309         case ARM_CPU_MODE_UND:
10310             return 23;
10311         case ARM_CPU_MODE_FIQ:
10312             return 29;
10313         default:
10314             g_assert_not_reached();
10315         }
10316     case 14:
10317         switch (mode) {
10318         case ARM_CPU_MODE_USR:
10319         case ARM_CPU_MODE_SYS:
10320         case ARM_CPU_MODE_HYP:
10321             return 14;
10322         case ARM_CPU_MODE_IRQ:
10323             return 16;
10324         case ARM_CPU_MODE_SVC:
10325             return 18;
10326         case ARM_CPU_MODE_ABT:
10327             return 20;
10328         case ARM_CPU_MODE_UND:
10329             return 22;
10330         case ARM_CPU_MODE_FIQ:
10331             return 30;
10332         default:
10333             g_assert_not_reached();
10334         }
10335     case 15:
10336         return 31;
10337     default:
10338         g_assert_not_reached();
10339     }
10340 }
10341 
10342 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
10343 {
10344     uint32_t ret = cpsr_read(env);
10345 
10346     /* Move DIT to the correct location for SPSR_ELx */
10347     if (ret & CPSR_DIT) {
10348         ret &= ~CPSR_DIT;
10349         ret |= PSTATE_DIT;
10350     }
10351     /* Merge PSTATE.SS into SPSR_ELx */
10352     ret |= env->pstate & PSTATE_SS;
10353 
10354     return ret;
10355 }
10356 
10357 static bool syndrome_is_sync_extabt(uint32_t syndrome)
10358 {
10359     /* Return true if this syndrome value is a synchronous external abort */
10360     switch (syn_get_ec(syndrome)) {
10361     case EC_INSNABORT:
10362     case EC_INSNABORT_SAME_EL:
10363     case EC_DATAABORT:
10364     case EC_DATAABORT_SAME_EL:
10365         /* Look at fault status code for all the synchronous ext abort cases */
10366         switch (syndrome & 0x3f) {
10367         case 0x10:
10368         case 0x13:
10369         case 0x14:
10370         case 0x15:
10371         case 0x16:
10372         case 0x17:
10373             return true;
10374         default:
10375             return false;
10376         }
10377     default:
10378         return false;
10379     }
10380 }
10381 
10382 /* Handle exception entry to a target EL which is using AArch64 */
10383 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
10384 {
10385     ARMCPU *cpu = ARM_CPU(cs);
10386     CPUARMState *env = &cpu->env;
10387     unsigned int new_el = env->exception.target_el;
10388     target_ulong addr = env->cp15.vbar_el[new_el];
10389     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
10390     unsigned int old_mode;
10391     unsigned int cur_el = arm_current_el(env);
10392     int rt;
10393 
10394     if (tcg_enabled()) {
10395         /*
10396          * Note that new_el can never be 0.  If cur_el is 0, then
10397          * el0_a64 is is_a64(), else el0_a64 is ignored.
10398          */
10399         aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
10400     }
10401 
10402     if (cur_el < new_el) {
10403         /*
10404          * Entry vector offset depends on whether the implemented EL
10405          * immediately lower than the target level is using AArch32 or AArch64
10406          */
10407         bool is_aa64;
10408         uint64_t hcr;
10409 
10410         switch (new_el) {
10411         case 3:
10412             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
10413             break;
10414         case 2:
10415             hcr = arm_hcr_el2_eff(env);
10416             if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
10417                 is_aa64 = (hcr & HCR_RW) != 0;
10418                 break;
10419             }
10420             /* fall through */
10421         case 1:
10422             is_aa64 = is_a64(env);
10423             break;
10424         default:
10425             g_assert_not_reached();
10426         }
10427 
10428         if (is_aa64) {
10429             addr += 0x400;
10430         } else {
10431             addr += 0x600;
10432         }
10433     } else if (pstate_read(env) & PSTATE_SP) {
10434         addr += 0x200;
10435     }
10436 
10437     switch (cs->exception_index) {
10438     case EXCP_GPC:
10439         qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
10440                       env->cp15.mfar_el3);
10441         /* fall through */
10442     case EXCP_PREFETCH_ABORT:
10443     case EXCP_DATA_ABORT:
10444         /*
10445          * FEAT_DoubleFault allows synchronous external aborts taken to EL3
10446          * to be taken to the SError vector entrypoint.
10447          */
10448         if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
10449             syndrome_is_sync_extabt(env->exception.syndrome)) {
10450             addr += 0x180;
10451         }
10452         env->cp15.far_el[new_el] = env->exception.vaddress;
10453         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
10454                       env->cp15.far_el[new_el]);
10455         /* fall through */
10456     case EXCP_BKPT:
10457     case EXCP_UDEF:
10458     case EXCP_SWI:
10459     case EXCP_HVC:
10460     case EXCP_HYP_TRAP:
10461     case EXCP_SMC:
10462         switch (syn_get_ec(env->exception.syndrome)) {
10463         case EC_ADVSIMDFPACCESSTRAP:
10464             /*
10465              * QEMU internal FP/SIMD syndromes from AArch32 include the
10466              * TA and coproc fields which are only exposed if the exception
10467              * is taken to AArch32 Hyp mode. Mask them out to get a valid
10468              * AArch64 format syndrome.
10469              */
10470             env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
10471             break;
10472         case EC_CP14RTTRAP:
10473         case EC_CP15RTTRAP:
10474         case EC_CP14DTTRAP:
10475             /*
10476              * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
10477              * the raw register field from the insn; when taking this to
10478              * AArch64 we must convert it to the AArch64 view of the register
10479              * number. Notice that we read a 4-bit AArch32 register number and
10480              * write back a 5-bit AArch64 one.
10481              */
10482             rt = extract32(env->exception.syndrome, 5, 4);
10483             rt = aarch64_regnum(env, rt);
10484             env->exception.syndrome = deposit32(env->exception.syndrome,
10485                                                 5, 5, rt);
10486             break;
10487         case EC_CP15RRTTRAP:
10488         case EC_CP14RRTTRAP:
10489             /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
10490             rt = extract32(env->exception.syndrome, 5, 4);
10491             rt = aarch64_regnum(env, rt);
10492             env->exception.syndrome = deposit32(env->exception.syndrome,
10493                                                 5, 5, rt);
10494             rt = extract32(env->exception.syndrome, 10, 4);
10495             rt = aarch64_regnum(env, rt);
10496             env->exception.syndrome = deposit32(env->exception.syndrome,
10497                                                 10, 5, rt);
10498             break;
10499         }
10500         env->cp15.esr_el[new_el] = env->exception.syndrome;
10501         break;
10502     case EXCP_IRQ:
10503     case EXCP_VIRQ:
10504     case EXCP_NMI:
10505     case EXCP_VINMI:
10506         addr += 0x80;
10507         break;
10508     case EXCP_FIQ:
10509     case EXCP_VFIQ:
10510     case EXCP_VFNMI:
10511         addr += 0x100;
10512         break;
10513     case EXCP_VSERR:
10514         addr += 0x180;
10515         /* Construct the SError syndrome from IDS and ISS fields. */
10516         env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
10517         env->cp15.esr_el[new_el] = env->exception.syndrome;
10518         break;
10519     default:
10520         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10521     }
10522 
10523     if (is_a64(env)) {
10524         old_mode = pstate_read(env);
10525         aarch64_save_sp(env, arm_current_el(env));
10526         env->elr_el[new_el] = env->pc;
10527 
10528         if (cur_el == 1 && new_el == 1) {
10529             uint64_t hcr = arm_hcr_el2_eff(env);
10530             if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV ||
10531                 (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) {
10532                 /*
10533                  * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR
10534                  * by setting M[3:2] to 0b10.
10535                  * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
10536                  * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
10537                  */
10538                 old_mode = deposit32(old_mode, 2, 2, 2);
10539             }
10540         }
10541     } else {
10542         old_mode = cpsr_read_for_spsr_elx(env);
10543         env->elr_el[new_el] = env->regs[15];
10544 
10545         aarch64_sync_32_to_64(env);
10546 
10547         env->condexec_bits = 0;
10548     }
10549     env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
10550 
10551     qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode);
10552     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
10553                   env->elr_el[new_el]);
10554 
10555     if (cpu_isar_feature(aa64_pan, cpu)) {
10556         /* The value of PSTATE.PAN is normally preserved, except when ... */
10557         new_mode |= old_mode & PSTATE_PAN;
10558         switch (new_el) {
10559         case 2:
10560             /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
10561             if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
10562                 != (HCR_E2H | HCR_TGE)) {
10563                 break;
10564             }
10565             /* fall through */
10566         case 1:
10567             /* ... the target is EL1 ... */
10568             /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
10569             if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
10570                 new_mode |= PSTATE_PAN;
10571             }
10572             break;
10573         }
10574     }
10575     if (cpu_isar_feature(aa64_mte, cpu)) {
10576         new_mode |= PSTATE_TCO;
10577     }
10578 
10579     if (cpu_isar_feature(aa64_ssbs, cpu)) {
10580         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
10581             new_mode |= PSTATE_SSBS;
10582         } else {
10583             new_mode &= ~PSTATE_SSBS;
10584         }
10585     }
10586 
10587     if (cpu_isar_feature(aa64_nmi, cpu)) {
10588         if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) {
10589             new_mode |= PSTATE_ALLINT;
10590         } else {
10591             new_mode &= ~PSTATE_ALLINT;
10592         }
10593     }
10594 
10595     pstate_write(env, PSTATE_DAIF | new_mode);
10596     env->aarch64 = true;
10597     aarch64_restore_sp(env, new_el);
10598 
10599     if (tcg_enabled()) {
10600         helper_rebuild_hflags_a64(env, new_el);
10601     }
10602 
10603     env->pc = addr;
10604 
10605     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
10606                   new_el, env->pc, pstate_read(env));
10607 }
10608 
10609 /*
10610  * Do semihosting call and set the appropriate return value. All the
10611  * permission and validity checks have been done at translate time.
10612  *
10613  * We only see semihosting exceptions in TCG only as they are not
10614  * trapped to the hypervisor in KVM.
10615  */
10616 #ifdef CONFIG_TCG
10617 static void tcg_handle_semihosting(CPUState *cs)
10618 {
10619     ARMCPU *cpu = ARM_CPU(cs);
10620     CPUARMState *env = &cpu->env;
10621 
10622     if (is_a64(env)) {
10623         qemu_log_mask(CPU_LOG_INT,
10624                       "...handling as semihosting call 0x%" PRIx64 "\n",
10625                       env->xregs[0]);
10626         do_common_semihosting(cs);
10627         env->pc += 4;
10628     } else {
10629         qemu_log_mask(CPU_LOG_INT,
10630                       "...handling as semihosting call 0x%x\n",
10631                       env->regs[0]);
10632         do_common_semihosting(cs);
10633         env->regs[15] += env->thumb ? 2 : 4;
10634     }
10635 }
10636 #endif
10637 
10638 /*
10639  * Handle a CPU exception for A and R profile CPUs.
10640  * Do any appropriate logging, handle PSCI calls, and then hand off
10641  * to the AArch64-entry or AArch32-entry function depending on the
10642  * target exception level's register width.
10643  *
10644  * Note: this is used for both TCG (as the do_interrupt tcg op),
10645  *       and KVM to re-inject guest debug exceptions, and to
10646  *       inject a Synchronous-External-Abort.
10647  */
10648 void arm_cpu_do_interrupt(CPUState *cs)
10649 {
10650     ARMCPU *cpu = ARM_CPU(cs);
10651     CPUARMState *env = &cpu->env;
10652     unsigned int new_el = env->exception.target_el;
10653 
10654     assert(!arm_feature(env, ARM_FEATURE_M));
10655 
10656     arm_log_exception(cs);
10657     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
10658                   new_el);
10659     if (qemu_loglevel_mask(CPU_LOG_INT)
10660         && !excp_is_internal(cs->exception_index)) {
10661         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
10662                       syn_get_ec(env->exception.syndrome),
10663                       env->exception.syndrome);
10664     }
10665 
10666     if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
10667         arm_handle_psci_call(cpu);
10668         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
10669         return;
10670     }
10671 
10672     /*
10673      * Semihosting semantics depend on the register width of the code
10674      * that caused the exception, not the target exception level, so
10675      * must be handled here.
10676      */
10677 #ifdef CONFIG_TCG
10678     if (cs->exception_index == EXCP_SEMIHOST) {
10679         tcg_handle_semihosting(cs);
10680         return;
10681     }
10682 #endif
10683 
10684     /*
10685      * Hooks may change global state so BQL should be held, also the
10686      * BQL needs to be held for any modification of
10687      * cs->interrupt_request.
10688      */
10689     g_assert(bql_locked());
10690 
10691     arm_call_pre_el_change_hook(cpu);
10692 
10693     assert(!excp_is_internal(cs->exception_index));
10694     if (arm_el_is_aa64(env, new_el)) {
10695         arm_cpu_do_interrupt_aarch64(cs);
10696     } else {
10697         arm_cpu_do_interrupt_aarch32(cs);
10698     }
10699 
10700     arm_call_el_change_hook(cpu);
10701 
10702     if (!kvm_enabled()) {
10703         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
10704     }
10705 }
10706 #endif /* !CONFIG_USER_ONLY */
10707 
10708 uint64_t arm_sctlr(CPUARMState *env, int el)
10709 {
10710     /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */
10711     if (el == 0) {
10712         ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
10713         switch (mmu_idx) {
10714         case ARMMMUIdx_E20_0:
10715             el = 2;
10716             break;
10717         case ARMMMUIdx_E30_0:
10718             el = 3;
10719             break;
10720         default:
10721             el = 1;
10722             break;
10723         }
10724     }
10725     return env->cp15.sctlr_el[el];
10726 }
10727 
10728 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
10729 {
10730     if (regime_has_2_ranges(mmu_idx)) {
10731         return extract64(tcr, 37, 2);
10732     } else if (regime_is_stage2(mmu_idx)) {
10733         return 0; /* VTCR_EL2 */
10734     } else {
10735         /* Replicate the single TBI bit so we always have 2 bits.  */
10736         return extract32(tcr, 20, 1) * 3;
10737     }
10738 }
10739 
10740 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
10741 {
10742     if (regime_has_2_ranges(mmu_idx)) {
10743         return extract64(tcr, 51, 2);
10744     } else if (regime_is_stage2(mmu_idx)) {
10745         return 0; /* VTCR_EL2 */
10746     } else {
10747         /* Replicate the single TBID bit so we always have 2 bits.  */
10748         return extract32(tcr, 29, 1) * 3;
10749     }
10750 }
10751 
10752 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
10753 {
10754     if (regime_has_2_ranges(mmu_idx)) {
10755         return extract64(tcr, 57, 2);
10756     } else {
10757         /* Replicate the single TCMA bit so we always have 2 bits.  */
10758         return extract32(tcr, 30, 1) * 3;
10759     }
10760 }
10761 
10762 static ARMGranuleSize tg0_to_gran_size(int tg)
10763 {
10764     switch (tg) {
10765     case 0:
10766         return Gran4K;
10767     case 1:
10768         return Gran64K;
10769     case 2:
10770         return Gran16K;
10771     default:
10772         return GranInvalid;
10773     }
10774 }
10775 
10776 static ARMGranuleSize tg1_to_gran_size(int tg)
10777 {
10778     switch (tg) {
10779     case 1:
10780         return Gran16K;
10781     case 2:
10782         return Gran4K;
10783     case 3:
10784         return Gran64K;
10785     default:
10786         return GranInvalid;
10787     }
10788 }
10789 
10790 static inline bool have4k(ARMCPU *cpu, bool stage2)
10791 {
10792     return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
10793         : cpu_isar_feature(aa64_tgran4, cpu);
10794 }
10795 
10796 static inline bool have16k(ARMCPU *cpu, bool stage2)
10797 {
10798     return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
10799         : cpu_isar_feature(aa64_tgran16, cpu);
10800 }
10801 
10802 static inline bool have64k(ARMCPU *cpu, bool stage2)
10803 {
10804     return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
10805         : cpu_isar_feature(aa64_tgran64, cpu);
10806 }
10807 
10808 static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
10809                                          bool stage2)
10810 {
10811     switch (gran) {
10812     case Gran4K:
10813         if (have4k(cpu, stage2)) {
10814             return gran;
10815         }
10816         break;
10817     case Gran16K:
10818         if (have16k(cpu, stage2)) {
10819             return gran;
10820         }
10821         break;
10822     case Gran64K:
10823         if (have64k(cpu, stage2)) {
10824             return gran;
10825         }
10826         break;
10827     case GranInvalid:
10828         break;
10829     }
10830     /*
10831      * If the guest selects a granule size that isn't implemented,
10832      * the architecture requires that we behave as if it selected one
10833      * that is (with an IMPDEF choice of which one to pick). We choose
10834      * to implement the smallest supported granule size.
10835      */
10836     if (have4k(cpu, stage2)) {
10837         return Gran4K;
10838     }
10839     if (have16k(cpu, stage2)) {
10840         return Gran16K;
10841     }
10842     assert(have64k(cpu, stage2));
10843     return Gran64K;
10844 }
10845 
10846 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
10847                                    ARMMMUIdx mmu_idx, bool data,
10848                                    bool el1_is_aa32)
10849 {
10850     uint64_t tcr = regime_tcr(env, mmu_idx);
10851     bool epd, hpd, tsz_oob, ds, ha, hd;
10852     int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
10853     ARMGranuleSize gran;
10854     ARMCPU *cpu = env_archcpu(env);
10855     bool stage2 = regime_is_stage2(mmu_idx);
10856 
10857     if (!regime_has_2_ranges(mmu_idx)) {
10858         select = 0;
10859         tsz = extract32(tcr, 0, 6);
10860         gran = tg0_to_gran_size(extract32(tcr, 14, 2));
10861         if (stage2) {
10862             /* VTCR_EL2 */
10863             hpd = false;
10864         } else {
10865             hpd = extract32(tcr, 24, 1);
10866         }
10867         epd = false;
10868         sh = extract32(tcr, 12, 2);
10869         ps = extract32(tcr, 16, 3);
10870         ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
10871         hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
10872         ds = extract64(tcr, 32, 1);
10873     } else {
10874         bool e0pd;
10875 
10876         /*
10877          * Bit 55 is always between the two regions, and is canonical for
10878          * determining if address tagging is enabled.
10879          */
10880         select = extract64(va, 55, 1);
10881         if (!select) {
10882             tsz = extract32(tcr, 0, 6);
10883             gran = tg0_to_gran_size(extract32(tcr, 14, 2));
10884             epd = extract32(tcr, 7, 1);
10885             sh = extract32(tcr, 12, 2);
10886             hpd = extract64(tcr, 41, 1);
10887             e0pd = extract64(tcr, 55, 1);
10888         } else {
10889             tsz = extract32(tcr, 16, 6);
10890             gran = tg1_to_gran_size(extract32(tcr, 30, 2));
10891             epd = extract32(tcr, 23, 1);
10892             sh = extract32(tcr, 28, 2);
10893             hpd = extract64(tcr, 42, 1);
10894             e0pd = extract64(tcr, 56, 1);
10895         }
10896         ps = extract64(tcr, 32, 3);
10897         ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
10898         hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
10899         ds = extract64(tcr, 59, 1);
10900 
10901         if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
10902             regime_is_user(env, mmu_idx)) {
10903             epd = true;
10904         }
10905     }
10906 
10907     gran = sanitize_gran_size(cpu, gran, stage2);
10908 
10909     if (cpu_isar_feature(aa64_st, cpu)) {
10910         max_tsz = 48 - (gran == Gran64K);
10911     } else {
10912         max_tsz = 39;
10913     }
10914 
10915     /*
10916      * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
10917      * adjust the effective value of DS, as documented.
10918      */
10919     min_tsz = 16;
10920     if (gran == Gran64K) {
10921         if (cpu_isar_feature(aa64_lva, cpu)) {
10922             min_tsz = 12;
10923         }
10924         ds = false;
10925     } else if (ds) {
10926         if (regime_is_stage2(mmu_idx)) {
10927             if (gran == Gran16K) {
10928                 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
10929             } else {
10930                 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
10931             }
10932         } else {
10933             if (gran == Gran16K) {
10934                 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
10935             } else {
10936                 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
10937             }
10938         }
10939         if (ds) {
10940             min_tsz = 12;
10941         }
10942     }
10943 
10944     if (stage2 && el1_is_aa32) {
10945         /*
10946          * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
10947          * are loosened: a configured IPA of 40 bits is permitted even if
10948          * the implemented PA is less than that (and so a 40 bit IPA would
10949          * fault for an AArch64 EL1). See R_DTLMN.
10950          */
10951         min_tsz = MIN(min_tsz, 24);
10952     }
10953 
10954     if (tsz > max_tsz) {
10955         tsz = max_tsz;
10956         tsz_oob = true;
10957     } else if (tsz < min_tsz) {
10958         tsz = min_tsz;
10959         tsz_oob = true;
10960     } else {
10961         tsz_oob = false;
10962     }
10963 
10964     /* Present TBI as a composite with TBID.  */
10965     tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
10966     if (!data) {
10967         tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
10968     }
10969     tbi = (tbi >> select) & 1;
10970 
10971     return (ARMVAParameters) {
10972         .tsz = tsz,
10973         .ps = ps,
10974         .sh = sh,
10975         .select = select,
10976         .tbi = tbi,
10977         .epd = epd,
10978         .hpd = hpd,
10979         .tsz_oob = tsz_oob,
10980         .ds = ds,
10981         .ha = ha,
10982         .hd = ha && hd,
10983         .gran = gran,
10984     };
10985 }
10986 
10987 /*
10988  * Note that signed overflow is undefined in C.  The following routines are
10989  * careful to use unsigned types where modulo arithmetic is required.
10990  * Failure to do so _will_ break on newer gcc.
10991  */
10992 
10993 /* Signed saturating arithmetic.  */
10994 
10995 /* Perform 16-bit signed saturating addition.  */
10996 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
10997 {
10998     uint16_t res;
10999 
11000     res = a + b;
11001     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
11002         if (a & 0x8000) {
11003             res = 0x8000;
11004         } else {
11005             res = 0x7fff;
11006         }
11007     }
11008     return res;
11009 }
11010 
11011 /* Perform 8-bit signed saturating addition.  */
11012 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
11013 {
11014     uint8_t res;
11015 
11016     res = a + b;
11017     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
11018         if (a & 0x80) {
11019             res = 0x80;
11020         } else {
11021             res = 0x7f;
11022         }
11023     }
11024     return res;
11025 }
11026 
11027 /* Perform 16-bit signed saturating subtraction.  */
11028 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
11029 {
11030     uint16_t res;
11031 
11032     res = a - b;
11033     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
11034         if (a & 0x8000) {
11035             res = 0x8000;
11036         } else {
11037             res = 0x7fff;
11038         }
11039     }
11040     return res;
11041 }
11042 
11043 /* Perform 8-bit signed saturating subtraction.  */
11044 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
11045 {
11046     uint8_t res;
11047 
11048     res = a - b;
11049     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
11050         if (a & 0x80) {
11051             res = 0x80;
11052         } else {
11053             res = 0x7f;
11054         }
11055     }
11056     return res;
11057 }
11058 
11059 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11060 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11061 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
11062 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
11063 #define PFX q
11064 
11065 #include "op_addsub.h"
11066 
11067 /* Unsigned saturating arithmetic.  */
11068 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
11069 {
11070     uint16_t res;
11071     res = a + b;
11072     if (res < a) {
11073         res = 0xffff;
11074     }
11075     return res;
11076 }
11077 
11078 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
11079 {
11080     if (a > b) {
11081         return a - b;
11082     } else {
11083         return 0;
11084     }
11085 }
11086 
11087 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
11088 {
11089     uint8_t res;
11090     res = a + b;
11091     if (res < a) {
11092         res = 0xff;
11093     }
11094     return res;
11095 }
11096 
11097 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
11098 {
11099     if (a > b) {
11100         return a - b;
11101     } else {
11102         return 0;
11103     }
11104 }
11105 
11106 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11107 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11108 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
11109 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
11110 #define PFX uq
11111 
11112 #include "op_addsub.h"
11113 
11114 /* Signed modulo arithmetic.  */
11115 #define SARITH16(a, b, n, op) do { \
11116     int32_t sum; \
11117     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11118     RESULT(sum, n, 16); \
11119     if (sum >= 0) \
11120         ge |= 3 << (n * 2); \
11121     } while (0)
11122 
11123 #define SARITH8(a, b, n, op) do { \
11124     int32_t sum; \
11125     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11126     RESULT(sum, n, 8); \
11127     if (sum >= 0) \
11128         ge |= 1 << n; \
11129     } while (0)
11130 
11131 
11132 #define ADD16(a, b, n) SARITH16(a, b, n, +)
11133 #define SUB16(a, b, n) SARITH16(a, b, n, -)
11134 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
11135 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
11136 #define PFX s
11137 #define ARITH_GE
11138 
11139 #include "op_addsub.h"
11140 
11141 /* Unsigned modulo arithmetic.  */
11142 #define ADD16(a, b, n) do { \
11143     uint32_t sum; \
11144     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11145     RESULT(sum, n, 16); \
11146     if ((sum >> 16) == 1) \
11147         ge |= 3 << (n * 2); \
11148     } while (0)
11149 
11150 #define ADD8(a, b, n) do { \
11151     uint32_t sum; \
11152     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11153     RESULT(sum, n, 8); \
11154     if ((sum >> 8) == 1) \
11155         ge |= 1 << n; \
11156     } while (0)
11157 
11158 #define SUB16(a, b, n) do { \
11159     uint32_t sum; \
11160     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11161     RESULT(sum, n, 16); \
11162     if ((sum >> 16) == 0) \
11163         ge |= 3 << (n * 2); \
11164     } while (0)
11165 
11166 #define SUB8(a, b, n) do { \
11167     uint32_t sum; \
11168     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11169     RESULT(sum, n, 8); \
11170     if ((sum >> 8) == 0) \
11171         ge |= 1 << n; \
11172     } while (0)
11173 
11174 #define PFX u
11175 #define ARITH_GE
11176 
11177 #include "op_addsub.h"
11178 
11179 /* Halved signed arithmetic.  */
11180 #define ADD16(a, b, n) \
11181   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11182 #define SUB16(a, b, n) \
11183   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11184 #define ADD8(a, b, n) \
11185   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11186 #define SUB8(a, b, n) \
11187   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11188 #define PFX sh
11189 
11190 #include "op_addsub.h"
11191 
11192 /* Halved unsigned arithmetic.  */
11193 #define ADD16(a, b, n) \
11194   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11195 #define SUB16(a, b, n) \
11196   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11197 #define ADD8(a, b, n) \
11198   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11199 #define SUB8(a, b, n) \
11200   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11201 #define PFX uh
11202 
11203 #include "op_addsub.h"
11204 
11205 static inline uint8_t do_usad(uint8_t a, uint8_t b)
11206 {
11207     if (a > b) {
11208         return a - b;
11209     } else {
11210         return b - a;
11211     }
11212 }
11213 
11214 /* Unsigned sum of absolute byte differences.  */
11215 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
11216 {
11217     uint32_t sum;
11218     sum = do_usad(a, b);
11219     sum += do_usad(a >> 8, b >> 8);
11220     sum += do_usad(a >> 16, b >> 16);
11221     sum += do_usad(a >> 24, b >> 24);
11222     return sum;
11223 }
11224 
11225 /* For ARMv6 SEL instruction.  */
11226 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
11227 {
11228     uint32_t mask;
11229 
11230     mask = 0;
11231     if (flags & 1) {
11232         mask |= 0xff;
11233     }
11234     if (flags & 2) {
11235         mask |= 0xff00;
11236     }
11237     if (flags & 4) {
11238         mask |= 0xff0000;
11239     }
11240     if (flags & 8) {
11241         mask |= 0xff000000;
11242     }
11243     return (a & mask) | (b & ~mask);
11244 }
11245 
11246 /*
11247  * CRC helpers.
11248  * The upper bytes of val (above the number specified by 'bytes') must have
11249  * been zeroed out by the caller.
11250  */
11251 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
11252 {
11253     uint8_t buf[4];
11254 
11255     stl_le_p(buf, val);
11256 
11257     /* zlib crc32 converts the accumulator and output to one's complement.  */
11258     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
11259 }
11260 
11261 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
11262 {
11263     uint8_t buf[4];
11264 
11265     stl_le_p(buf, val);
11266 
11267     /* Linux crc32c converts the output to one's complement.  */
11268     return crc32c(acc, buf, bytes) ^ 0xffffffff;
11269 }
11270 
11271 /*
11272  * Return the exception level to which FP-disabled exceptions should
11273  * be taken, or 0 if FP is enabled.
11274  */
11275 int fp_exception_el(CPUARMState *env, int cur_el)
11276 {
11277 #ifndef CONFIG_USER_ONLY
11278     uint64_t hcr_el2;
11279 
11280     /*
11281      * CPACR and the CPTR registers don't exist before v6, so FP is
11282      * always accessible
11283      */
11284     if (!arm_feature(env, ARM_FEATURE_V6)) {
11285         return 0;
11286     }
11287 
11288     if (arm_feature(env, ARM_FEATURE_M)) {
11289         /* CPACR can cause a NOCP UsageFault taken to current security state */
11290         if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
11291             return 1;
11292         }
11293 
11294         if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
11295             if (!extract32(env->v7m.nsacr, 10, 1)) {
11296                 /* FP insns cause a NOCP UsageFault taken to Secure */
11297                 return 3;
11298             }
11299         }
11300 
11301         return 0;
11302     }
11303 
11304     hcr_el2 = arm_hcr_el2_eff(env);
11305 
11306     /*
11307      * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
11308      * 0, 2 : trap EL0 and EL1/PL1 accesses
11309      * 1    : trap only EL0 accesses
11310      * 3    : trap no accesses
11311      * This register is ignored if E2H+TGE are both set.
11312      */
11313     if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
11314         int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
11315 
11316         switch (fpen) {
11317         case 1:
11318             if (cur_el != 0) {
11319                 break;
11320             }
11321             /* fall through */
11322         case 0:
11323         case 2:
11324             /* Trap from Secure PL0 or PL1 to Secure PL1. */
11325             if (!arm_el_is_aa64(env, 3)
11326                 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
11327                 return 3;
11328             }
11329             if (cur_el <= 1) {
11330                 return 1;
11331             }
11332             break;
11333         }
11334     }
11335 
11336     /*
11337      * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
11338      * to control non-secure access to the FPU. It doesn't have any
11339      * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
11340      */
11341     if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
11342          cur_el <= 2 && !arm_is_secure_below_el3(env))) {
11343         if (!extract32(env->cp15.nsacr, 10, 1)) {
11344             /* FP insns act as UNDEF */
11345             return cur_el == 2 ? 2 : 1;
11346         }
11347     }
11348 
11349     /*
11350      * CPTR_EL2 is present in v7VE or v8, and changes format
11351      * with HCR_EL2.E2H (regardless of TGE).
11352      */
11353     if (cur_el <= 2) {
11354         if (hcr_el2 & HCR_E2H) {
11355             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
11356             case 1:
11357                 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
11358                     break;
11359                 }
11360                 /* fall through */
11361             case 0:
11362             case 2:
11363                 return 2;
11364             }
11365         } else if (arm_is_el2_enabled(env)) {
11366             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
11367                 return 2;
11368             }
11369         }
11370     }
11371 
11372     /* CPTR_EL3 : present in v8 */
11373     if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
11374         /* Trap all FP ops to EL3 */
11375         return 3;
11376     }
11377 #endif
11378     return 0;
11379 }
11380 
11381 /* Return the exception level we're running at if this is our mmu_idx */
11382 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
11383 {
11384     if (mmu_idx & ARM_MMU_IDX_M) {
11385         return mmu_idx & ARM_MMU_IDX_M_PRIV;
11386     }
11387 
11388     switch (mmu_idx) {
11389     case ARMMMUIdx_E10_0:
11390     case ARMMMUIdx_E20_0:
11391     case ARMMMUIdx_E30_0:
11392         return 0;
11393     case ARMMMUIdx_E10_1:
11394     case ARMMMUIdx_E10_1_PAN:
11395         return 1;
11396     case ARMMMUIdx_E2:
11397     case ARMMMUIdx_E20_2:
11398     case ARMMMUIdx_E20_2_PAN:
11399         return 2;
11400     case ARMMMUIdx_E3:
11401     case ARMMMUIdx_E30_3_PAN:
11402         return 3;
11403     default:
11404         g_assert_not_reached();
11405     }
11406 }
11407 
11408 #ifndef CONFIG_TCG
11409 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
11410 {
11411     g_assert_not_reached();
11412 }
11413 #endif
11414 
11415 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
11416 {
11417     ARMMMUIdx idx;
11418     uint64_t hcr;
11419 
11420     if (arm_feature(env, ARM_FEATURE_M)) {
11421         return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
11422     }
11423 
11424     /* See ARM pseudo-function ELIsInHost.  */
11425     switch (el) {
11426     case 0:
11427         hcr = arm_hcr_el2_eff(env);
11428         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
11429             idx = ARMMMUIdx_E20_0;
11430         } else if (arm_is_secure_below_el3(env) &&
11431                    !arm_el_is_aa64(env, 3)) {
11432             idx = ARMMMUIdx_E30_0;
11433         } else {
11434             idx = ARMMMUIdx_E10_0;
11435         }
11436         break;
11437     case 1:
11438         if (arm_pan_enabled(env)) {
11439             idx = ARMMMUIdx_E10_1_PAN;
11440         } else {
11441             idx = ARMMMUIdx_E10_1;
11442         }
11443         break;
11444     case 2:
11445         /* Note that TGE does not apply at EL2.  */
11446         if (arm_hcr_el2_eff(env) & HCR_E2H) {
11447             if (arm_pan_enabled(env)) {
11448                 idx = ARMMMUIdx_E20_2_PAN;
11449             } else {
11450                 idx = ARMMMUIdx_E20_2;
11451             }
11452         } else {
11453             idx = ARMMMUIdx_E2;
11454         }
11455         break;
11456     case 3:
11457         if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) {
11458             return ARMMMUIdx_E30_3_PAN;
11459         }
11460         return ARMMMUIdx_E3;
11461     default:
11462         g_assert_not_reached();
11463     }
11464 
11465     return idx;
11466 }
11467 
11468 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
11469 {
11470     return arm_mmu_idx_el(env, arm_current_el(env));
11471 }
11472 
11473 static bool mve_no_pred(CPUARMState *env)
11474 {
11475     /*
11476      * Return true if there is definitely no predication of MVE
11477      * instructions by VPR or LTPSIZE. (Returning false even if there
11478      * isn't any predication is OK; generated code will just be
11479      * a little worse.)
11480      * If the CPU does not implement MVE then this TB flag is always 0.
11481      *
11482      * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
11483      * logic in gen_update_fp_context() needs to be updated to match.
11484      *
11485      * We do not include the effect of the ECI bits here -- they are
11486      * tracked in other TB flags. This simplifies the logic for
11487      * "when did we emit code that changes the MVE_NO_PRED TB flag
11488      * and thus need to end the TB?".
11489      */
11490     if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
11491         return false;
11492     }
11493     if (env->v7m.vpr) {
11494         return false;
11495     }
11496     if (env->v7m.ltpsize < 4) {
11497         return false;
11498     }
11499     return true;
11500 }
11501 
11502 void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
11503                           uint64_t *cs_base, uint32_t *pflags)
11504 {
11505     CPUARMTBFlags flags;
11506 
11507     assert_hflags_rebuild_correctly(env);
11508     flags = env->hflags;
11509 
11510     if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
11511         *pc = env->pc;
11512         if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
11513             DP_TBFLAG_A64(flags, BTYPE, env->btype);
11514         }
11515     } else {
11516         *pc = env->regs[15];
11517 
11518         if (arm_feature(env, ARM_FEATURE_M)) {
11519             if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11520                 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
11521                 != env->v7m.secure) {
11522                 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
11523             }
11524 
11525             if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
11526                 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
11527                  (env->v7m.secure &&
11528                   !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
11529                 /*
11530                  * ASPEN is set, but FPCA/SFPA indicate that there is no
11531                  * active FP context; we must create a new FP context before
11532                  * executing any FP insn.
11533                  */
11534                 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
11535             }
11536 
11537             bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
11538             if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
11539                 DP_TBFLAG_M32(flags, LSPACT, 1);
11540             }
11541 
11542             if (mve_no_pred(env)) {
11543                 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
11544             }
11545         } else {
11546             /*
11547              * Note that XSCALE_CPAR shares bits with VECSTRIDE.
11548              * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
11549              */
11550             if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11551                 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
11552             } else {
11553                 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
11554                 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
11555             }
11556             if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
11557                 DP_TBFLAG_A32(flags, VFPEN, 1);
11558             }
11559         }
11560 
11561         DP_TBFLAG_AM32(flags, THUMB, env->thumb);
11562         DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
11563     }
11564 
11565     /*
11566      * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
11567      * states defined in the ARM ARM for software singlestep:
11568      *  SS_ACTIVE   PSTATE.SS   State
11569      *     0            x       Inactive (the TB flag for SS is always 0)
11570      *     1            0       Active-pending
11571      *     1            1       Active-not-pending
11572      * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
11573      */
11574     if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
11575         DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
11576     }
11577 
11578     *pflags = flags.flags;
11579     *cs_base = flags.flags2;
11580 }
11581 
11582 #ifdef TARGET_AARCH64
11583 /*
11584  * The manual says that when SVE is enabled and VQ is widened the
11585  * implementation is allowed to zero the previously inaccessible
11586  * portion of the registers.  The corollary to that is that when
11587  * SVE is enabled and VQ is narrowed we are also allowed to zero
11588  * the now inaccessible portion of the registers.
11589  *
11590  * The intent of this is that no predicate bit beyond VQ is ever set.
11591  * Which means that some operations on predicate registers themselves
11592  * may operate on full uint64_t or even unrolled across the maximum
11593  * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
11594  * may well be cheaper than conditionals to restrict the operation
11595  * to the relevant portion of a uint16_t[16].
11596  */
11597 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
11598 {
11599     int i, j;
11600     uint64_t pmask;
11601 
11602     assert(vq >= 1 && vq <= ARM_MAX_VQ);
11603     assert(vq <= env_archcpu(env)->sve_max_vq);
11604 
11605     /* Zap the high bits of the zregs.  */
11606     for (i = 0; i < 32; i++) {
11607         memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
11608     }
11609 
11610     /* Zap the high bits of the pregs and ffr.  */
11611     pmask = 0;
11612     if (vq & 3) {
11613         pmask = ~(-1ULL << (16 * (vq & 3)));
11614     }
11615     for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
11616         for (i = 0; i < 17; ++i) {
11617             env->vfp.pregs[i].p[j] &= pmask;
11618         }
11619         pmask = 0;
11620     }
11621 }
11622 
11623 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
11624 {
11625     int exc_el;
11626 
11627     if (sm) {
11628         exc_el = sme_exception_el(env, el);
11629     } else {
11630         exc_el = sve_exception_el(env, el);
11631     }
11632     if (exc_el) {
11633         return 0; /* disabled */
11634     }
11635     return sve_vqm1_for_el_sm(env, el, sm);
11636 }
11637 
11638 /*
11639  * Notice a change in SVE vector size when changing EL.
11640  */
11641 void aarch64_sve_change_el(CPUARMState *env, int old_el,
11642                            int new_el, bool el0_a64)
11643 {
11644     ARMCPU *cpu = env_archcpu(env);
11645     int old_len, new_len;
11646     bool old_a64, new_a64, sm;
11647 
11648     /* Nothing to do if no SVE.  */
11649     if (!cpu_isar_feature(aa64_sve, cpu)) {
11650         return;
11651     }
11652 
11653     /* Nothing to do if FP is disabled in either EL.  */
11654     if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
11655         return;
11656     }
11657 
11658     old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
11659     new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
11660 
11661     /*
11662      * Both AArch64.TakeException and AArch64.ExceptionReturn
11663      * invoke ResetSVEState when taking an exception from, or
11664      * returning to, AArch32 state when PSTATE.SM is enabled.
11665      */
11666     sm = FIELD_EX64(env->svcr, SVCR, SM);
11667     if (old_a64 != new_a64 && sm) {
11668         arm_reset_sve_state(env);
11669         return;
11670     }
11671 
11672     /*
11673      * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
11674      * at ELx, or not available because the EL is in AArch32 state, then
11675      * for all purposes other than a direct read, the ZCR_ELx.LEN field
11676      * has an effective value of 0".
11677      *
11678      * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
11679      * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
11680      * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
11681      * we already have the correct register contents when encountering the
11682      * vq0->vq0 transition between EL0->EL1.
11683      */
11684     old_len = new_len = 0;
11685     if (old_a64) {
11686         old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
11687     }
11688     if (new_a64) {
11689         new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
11690     }
11691 
11692     /* When changing vector length, clear inaccessible state.  */
11693     if (new_len < old_len) {
11694         aarch64_sve_narrow_vq(env, new_len + 1);
11695     }
11696 }
11697 #endif
11698 
11699 #ifndef CONFIG_USER_ONLY
11700 ARMSecuritySpace arm_security_space(CPUARMState *env)
11701 {
11702     if (arm_feature(env, ARM_FEATURE_M)) {
11703         return arm_secure_to_space(env->v7m.secure);
11704     }
11705 
11706     /*
11707      * If EL3 is not supported then the secure state is implementation
11708      * defined, in which case QEMU defaults to non-secure.
11709      */
11710     if (!arm_feature(env, ARM_FEATURE_EL3)) {
11711         return ARMSS_NonSecure;
11712     }
11713 
11714     /* Check for AArch64 EL3 or AArch32 Mon. */
11715     if (is_a64(env)) {
11716         if (extract32(env->pstate, 2, 2) == 3) {
11717             if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
11718                 return ARMSS_Root;
11719             } else {
11720                 return ARMSS_Secure;
11721             }
11722         }
11723     } else {
11724         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
11725             return ARMSS_Secure;
11726         }
11727     }
11728 
11729     return arm_security_space_below_el3(env);
11730 }
11731 
11732 ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
11733 {
11734     assert(!arm_feature(env, ARM_FEATURE_M));
11735 
11736     /*
11737      * If EL3 is not supported then the secure state is implementation
11738      * defined, in which case QEMU defaults to non-secure.
11739      */
11740     if (!arm_feature(env, ARM_FEATURE_EL3)) {
11741         return ARMSS_NonSecure;
11742     }
11743 
11744     /*
11745      * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
11746      * Ignoring NSE when !NS retains consistency without having to
11747      * modify other predicates.
11748      */
11749     if (!(env->cp15.scr_el3 & SCR_NS)) {
11750         return ARMSS_Secure;
11751     } else if (env->cp15.scr_el3 & SCR_NSE) {
11752         return ARMSS_Realm;
11753     } else {
11754         return ARMSS_NonSecure;
11755     }
11756 }
11757 #endif /* !CONFIG_USER_ONLY */
11758