xref: /qemu/target/arm/cpregs.h (revision 68df8c8dba57f539d24f1a92a8699a179d9bb6fb)
1 /*
2  * QEMU ARM CP Register access and descriptions
3  *
4  * Copyright (c) 2022 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #ifndef TARGET_ARM_CPREGS_H
22 #define TARGET_ARM_CPREGS_H
23 
24 #include "hw/registerfields.h"
25 #include "target/arm/kvm-consts.h"
26 
27 /*
28  * ARMCPRegInfo type field bits:
29  */
30 enum {
31     /*
32      * Register must be handled specially during translation.
33      * The method is one of the values below:
34      */
35     ARM_CP_SPECIAL_MASK          = 0x000f,
36     /* Special: no change to PE state: writes ignored, reads ignored. */
37     ARM_CP_NOP                   = 0x0001,
38     /* Special: sysreg is WFI, for v5 and v6. */
39     ARM_CP_WFI                   = 0x0002,
40     /* Special: sysreg is NZCV. */
41     ARM_CP_NZCV                  = 0x0003,
42     /* Special: sysreg is CURRENTEL. */
43     ARM_CP_CURRENTEL             = 0x0004,
44     /* Special: sysreg is DC ZVA or similar. */
45     ARM_CP_DC_ZVA                = 0x0005,
46     ARM_CP_DC_GVA                = 0x0006,
47     ARM_CP_DC_GZVA               = 0x0007,
48 
49     /* Flag: reads produce resetvalue; writes ignored. */
50     ARM_CP_CONST                 = 1 << 4,
51     /* Flag: For ARM_CP_STATE_AA32, sysreg is 64-bit. */
52     ARM_CP_64BIT                 = 1 << 5,
53     /*
54      * Flag: TB should not be ended after a write to this register
55      * (the default is that the TB ends after cp writes).
56      */
57     ARM_CP_SUPPRESS_TB_END       = 1 << 6,
58     /*
59      * Flag: Permit a register definition to override a previous definition
60      * for the same (cp, is64, crn, crm, opc1, opc2) tuple: either the new
61      * or the old must have the ARM_CP_OVERRIDE bit set.
62      */
63     ARM_CP_OVERRIDE              = 1 << 7,
64     /*
65      * Flag: Register is an alias view of some underlying state which is also
66      * visible via another register, and that the other register is handling
67      * migration and reset; registers marked ARM_CP_ALIAS will not be migrated
68      * but may have their state set by syncing of register state from KVM.
69      */
70     ARM_CP_ALIAS                 = 1 << 8,
71     /*
72      * Flag: Register does I/O and therefore its accesses need to be marked
73      * with translator_io_start() and also end the TB. In particular,
74      * registers which implement clocks or timers require this.
75      */
76     ARM_CP_IO                    = 1 << 9,
77     /*
78      * Flag: Register has no underlying state and does not support raw access
79      * for state saving/loading; it will not be used for either migration or
80      * KVM state synchronization. Typically this is for "registers" which are
81      * actually used as instructions for cache maintenance and so on.
82      */
83     ARM_CP_NO_RAW                = 1 << 10,
84     /*
85      * Flag: The read or write hook might raise an exception; the generated
86      * code will synchronize the CPU state before calling the hook so that it
87      * is safe for the hook to call raise_exception().
88      */
89     ARM_CP_RAISES_EXC            = 1 << 11,
90     /*
91      * Flag: Writes to the sysreg might change the exception level - typically
92      * on older ARM chips. For those cases we need to re-read the new el when
93      * recomputing the translation flags.
94      */
95     ARM_CP_NEWEL                 = 1 << 12,
96     /*
97      * Flag: Access check for this sysreg is identical to accessing FPU state
98      * from an instruction: use translation fp_access_check().
99      */
100     ARM_CP_FPU                   = 1 << 13,
101     /*
102      * Flag: Access check for this sysreg is identical to accessing SVE state
103      * from an instruction: use translation sve_access_check().
104      */
105     ARM_CP_SVE                   = 1 << 14,
106     /* Flag: Do not expose in gdb sysreg xml. */
107     ARM_CP_NO_GDB                = 1 << 15,
108     /*
109      * Flags: If EL3 but not EL2...
110      *   - UNDEF: discard the cpreg,
111      *   -  KEEP: retain the cpreg as is,
112      *   -  C_NZ: set const on the cpreg, but retain resetvalue,
113      *   -  else: set const on the cpreg, zero resetvalue, aka RES0.
114      * See rule RJFFP in section D1.1.3 of DDI0487H.a.
115      */
116     ARM_CP_EL3_NO_EL2_UNDEF      = 1 << 16,
117     ARM_CP_EL3_NO_EL2_KEEP       = 1 << 17,
118     ARM_CP_EL3_NO_EL2_C_NZ       = 1 << 18,
119     /*
120      * Flag: Access check for this sysreg is constrained by the
121      * ARM pseudocode function CheckSMEAccess().
122      */
123     ARM_CP_SME                   = 1 << 19,
124     /*
125      * Flag: one of the four EL2 registers which redirect to the
126      * equivalent EL1 register when FEAT_NV2 is enabled.
127      */
128     ARM_CP_NV2_REDIRECT          = 1 << 20,
129     /*
130      * Flag: this is a TLBI insn which (when FEAT_XS is present) also has
131      * an NXS variant at the same encoding except that crn is 1 greater,
132      * so when registering this cpreg automatically also register one
133      * for the TLBI NXS variant. (For QEMU the NXS variant behaves
134      * identically to the normal one, other than FGT trapping handling.)
135      */
136     ARM_CP_ADD_TLBI_NXS          = 1 << 21,
137 };
138 
139 /*
140  * Interface for defining coprocessor registers.
141  * Registers are defined in tables of arm_cp_reginfo structs
142  * which are passed to define_arm_cp_regs().
143  */
144 
145 /*
146  * When looking up a coprocessor register we look for it
147  * via an integer which encodes all of:
148  *  coprocessor number
149  *  Crn, Crm, opc1, opc2 fields
150  *  32 or 64 bit register (ie is it accessed via MRC/MCR
151  *    or via MRRC/MCRR?)
152  *  non-secure/secure bank (AArch32 only)
153  * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
154  * (In this case crn and opc2 should be zero.)
155  * For AArch64, there is no 32/64 bit size distinction;
156  * instead all registers have a 2 bit op0, 3 bit op1 and op2,
157  * and 4 bit CRn and CRm. The encoding patterns are chosen
158  * to be easy to convert to and from the KVM encodings, and also
159  * so that the hashtable can contain both AArch32 and AArch64
160  * registers (to allow for interprocessing where we might run
161  * 32 bit code on a 64 bit core).
162  */
163 /*
164  * This bit is private to our hashtable cpreg; in KVM register
165  * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
166  * in the upper bits of the 64 bit ID.
167  */
168 #define CP_REG_AA64_SHIFT 28
169 #define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
170 
171 /*
172  * To enable banking of coprocessor registers depending on ns-bit we
173  * add a bit to distinguish between secure and non-secure cpregs in the
174  * hashtable.
175  */
176 #define CP_REG_NS_SHIFT 29
177 #define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
178 
179 #define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2)   \
180     ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) |   \
181      ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
182 
183 #define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
184     (CP_REG_AA64_MASK |                                 \
185      ((cp) << CP_REG_ARM_COPROC_SHIFT) |                \
186      ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) |         \
187      ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) |         \
188      ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) |         \
189      ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) |         \
190      ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
191 
192 /*
193  * Convert a full 64 bit KVM register ID to the truncated 32 bit
194  * version used as a key for the coprocessor register hashtable
195  */
196 static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
197 {
198     uint32_t cpregid = kvmid;
199     if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
200         cpregid |= CP_REG_AA64_MASK;
201     } else {
202         if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
203             cpregid |= (1 << 15);
204         }
205 
206         /*
207          * KVM is always non-secure so add the NS flag on AArch32 register
208          * entries.
209          */
210          cpregid |= 1 << CP_REG_NS_SHIFT;
211     }
212     return cpregid;
213 }
214 
215 /*
216  * Convert a truncated 32 bit hashtable key into the full
217  * 64 bit KVM register ID.
218  */
219 static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
220 {
221     uint64_t kvmid;
222 
223     if (cpregid & CP_REG_AA64_MASK) {
224         kvmid = cpregid & ~CP_REG_AA64_MASK;
225         kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
226     } else {
227         kvmid = cpregid & ~(1 << 15);
228         if (cpregid & (1 << 15)) {
229             kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
230         } else {
231             kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
232         }
233     }
234     return kvmid;
235 }
236 
237 /*
238  * Valid values for ARMCPRegInfo state field, indicating which of
239  * the AArch32 and AArch64 execution states this register is visible in.
240  * If the reginfo doesn't explicitly specify then it is AArch32 only.
241  * If the reginfo is declared to be visible in both states then a second
242  * reginfo is synthesised for the AArch32 view of the AArch64 register,
243  * such that the AArch32 view is the lower 32 bits of the AArch64 one.
244  * Note that we rely on the values of these enums as we iterate through
245  * the various states in some places.
246  */
247 typedef enum {
248     ARM_CP_STATE_AA32 = 0,
249     ARM_CP_STATE_AA64 = 1,
250     ARM_CP_STATE_BOTH = 2,
251 } CPState;
252 
253 /*
254  * ARM CP register secure state flags.  These flags identify security state
255  * attributes for a given CP register entry.
256  * The existence of both or neither secure and non-secure flags indicates that
257  * the register has both a secure and non-secure hash entry.  A single one of
258  * these flags causes the register to only be hashed for the specified
259  * security state.
260  * Although definitions may have any combination of the S/NS bits, each
261  * registered entry will only have one to identify whether the entry is secure
262  * or non-secure.
263  */
264 typedef enum {
265     ARM_CP_SECSTATE_BOTH = 0,       /* define one cpreg for each secstate */
266     ARM_CP_SECSTATE_S =   (1 << 0), /* bit[0]: Secure state register */
267     ARM_CP_SECSTATE_NS =  (1 << 1), /* bit[1]: Non-secure state register */
268 } CPSecureState;
269 
270 /*
271  * Access rights:
272  * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
273  * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
274  * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
275  * (ie any of the privileged modes in Secure state, or Monitor mode).
276  * If a register is accessible in one privilege level it's always accessible
277  * in higher privilege levels too. Since "Secure PL1" also follows this rule
278  * (ie anything visible in PL2 is visible in S-PL1, some things are only
279  * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
280  * terminology a little and call this PL3.
281  * In AArch64 things are somewhat simpler as the PLx bits line up exactly
282  * with the ELx exception levels.
283  *
284  * If access permissions for a register are more complex than can be
285  * described with these bits, then use a laxer set of restrictions, and
286  * do the more restrictive/complex check inside a helper function.
287  */
288 typedef enum {
289     PL3_R = 0x80,
290     PL3_W = 0x40,
291     PL2_R = 0x20 | PL3_R,
292     PL2_W = 0x10 | PL3_W,
293     PL1_R = 0x08 | PL2_R,
294     PL1_W = 0x04 | PL2_W,
295     PL0_R = 0x02 | PL1_R,
296     PL0_W = 0x01 | PL1_W,
297 
298     /*
299      * For user-mode some registers are accessible to EL0 via a kernel
300      * trap-and-emulate ABI. In this case we define the read permissions
301      * as actually being PL0_R. However some bits of any given register
302      * may still be masked.
303      */
304 #ifdef CONFIG_USER_ONLY
305     PL0U_R = PL0_R,
306 #else
307     PL0U_R = PL1_R,
308 #endif
309 
310     PL3_RW = PL3_R | PL3_W,
311     PL2_RW = PL2_R | PL2_W,
312     PL1_RW = PL1_R | PL1_W,
313     PL0_RW = PL0_R | PL0_W,
314 } CPAccessRights;
315 
316 typedef enum CPAccessResult {
317     /* Access is permitted */
318     CP_ACCESS_OK = 0,
319 
320     /*
321      * Combined with one of the following, the low 2 bits indicate the
322      * target exception level.  If 0, the exception is taken to the usual
323      * target EL (EL1 or PL1 if in EL0, otherwise to the current EL).
324      */
325     CP_ACCESS_EL_MASK = 3,
326 
327     /*
328      * Access fails due to a configurable trap or enable which would
329      * result in a categorized exception syndrome giving information about
330      * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
331      * 0xc or 0x18).
332      */
333     CP_ACCESS_TRAP = (1 << 2),
334     CP_ACCESS_TRAP_EL2 = CP_ACCESS_TRAP | 2,
335     CP_ACCESS_TRAP_EL3 = CP_ACCESS_TRAP | 3,
336 
337     /*
338      * Access fails and results in an exception syndrome 0x0 ("uncategorized").
339      * Note that this is not a catch-all case -- the set of cases which may
340      * result in this failure is specifically defined by the architecture.
341      * This trap is always to the usual target EL, never directly to a
342      * specified target EL.
343      */
344     CP_ACCESS_TRAP_UNCATEGORIZED = (2 << 2),
345 } CPAccessResult;
346 
347 /* Indexes into fgt_read[] */
348 #define FGTREG_HFGRTR 0
349 #define FGTREG_HDFGRTR 1
350 /* Indexes into fgt_write[] */
351 #define FGTREG_HFGWTR 0
352 #define FGTREG_HDFGWTR 1
353 /* Indexes into fgt_exec[] */
354 #define FGTREG_HFGITR 0
355 
356 FIELD(HFGRTR_EL2, AFSR0_EL1, 0, 1)
357 FIELD(HFGRTR_EL2, AFSR1_EL1, 1, 1)
358 FIELD(HFGRTR_EL2, AIDR_EL1, 2, 1)
359 FIELD(HFGRTR_EL2, AMAIR_EL1, 3, 1)
360 FIELD(HFGRTR_EL2, APDAKEY, 4, 1)
361 FIELD(HFGRTR_EL2, APDBKEY, 5, 1)
362 FIELD(HFGRTR_EL2, APGAKEY, 6, 1)
363 FIELD(HFGRTR_EL2, APIAKEY, 7, 1)
364 FIELD(HFGRTR_EL2, APIBKEY, 8, 1)
365 FIELD(HFGRTR_EL2, CCSIDR_EL1, 9, 1)
366 FIELD(HFGRTR_EL2, CLIDR_EL1, 10, 1)
367 FIELD(HFGRTR_EL2, CONTEXTIDR_EL1, 11, 1)
368 FIELD(HFGRTR_EL2, CPACR_EL1, 12, 1)
369 FIELD(HFGRTR_EL2, CSSELR_EL1, 13, 1)
370 FIELD(HFGRTR_EL2, CTR_EL0, 14, 1)
371 FIELD(HFGRTR_EL2, DCZID_EL0, 15, 1)
372 FIELD(HFGRTR_EL2, ESR_EL1, 16, 1)
373 FIELD(HFGRTR_EL2, FAR_EL1, 17, 1)
374 FIELD(HFGRTR_EL2, ISR_EL1, 18, 1)
375 FIELD(HFGRTR_EL2, LORC_EL1, 19, 1)
376 FIELD(HFGRTR_EL2, LOREA_EL1, 20, 1)
377 FIELD(HFGRTR_EL2, LORID_EL1, 21, 1)
378 FIELD(HFGRTR_EL2, LORN_EL1, 22, 1)
379 FIELD(HFGRTR_EL2, LORSA_EL1, 23, 1)
380 FIELD(HFGRTR_EL2, MAIR_EL1, 24, 1)
381 FIELD(HFGRTR_EL2, MIDR_EL1, 25, 1)
382 FIELD(HFGRTR_EL2, MPIDR_EL1, 26, 1)
383 FIELD(HFGRTR_EL2, PAR_EL1, 27, 1)
384 FIELD(HFGRTR_EL2, REVIDR_EL1, 28, 1)
385 FIELD(HFGRTR_EL2, SCTLR_EL1, 29, 1)
386 FIELD(HFGRTR_EL2, SCXTNUM_EL1, 30, 1)
387 FIELD(HFGRTR_EL2, SCXTNUM_EL0, 31, 1)
388 FIELD(HFGRTR_EL2, TCR_EL1, 32, 1)
389 FIELD(HFGRTR_EL2, TPIDR_EL1, 33, 1)
390 FIELD(HFGRTR_EL2, TPIDRRO_EL0, 34, 1)
391 FIELD(HFGRTR_EL2, TPIDR_EL0, 35, 1)
392 FIELD(HFGRTR_EL2, TTBR0_EL1, 36, 1)
393 FIELD(HFGRTR_EL2, TTBR1_EL1, 37, 1)
394 FIELD(HFGRTR_EL2, VBAR_EL1, 38, 1)
395 FIELD(HFGRTR_EL2, ICC_IGRPENN_EL1, 39, 1)
396 FIELD(HFGRTR_EL2, ERRIDR_EL1, 40, 1)
397 FIELD(HFGRTR_EL2, ERRSELR_EL1, 41, 1)
398 FIELD(HFGRTR_EL2, ERXFR_EL1, 42, 1)
399 FIELD(HFGRTR_EL2, ERXCTLR_EL1, 43, 1)
400 FIELD(HFGRTR_EL2, ERXSTATUS_EL1, 44, 1)
401 FIELD(HFGRTR_EL2, ERXMISCN_EL1, 45, 1)
402 FIELD(HFGRTR_EL2, ERXPFGF_EL1, 46, 1)
403 FIELD(HFGRTR_EL2, ERXPFGCTL_EL1, 47, 1)
404 FIELD(HFGRTR_EL2, ERXPFGCDN_EL1, 48, 1)
405 FIELD(HFGRTR_EL2, ERXADDR_EL1, 49, 1)
406 FIELD(HFGRTR_EL2, NACCDATA_EL1, 50, 1)
407 /* 51-53: RES0 */
408 FIELD(HFGRTR_EL2, NSMPRI_EL1, 54, 1)
409 FIELD(HFGRTR_EL2, NTPIDR2_EL0, 55, 1)
410 /* 56-63: RES0 */
411 
412 /* These match HFGRTR but bits for RO registers are RES0 */
413 FIELD(HFGWTR_EL2, AFSR0_EL1, 0, 1)
414 FIELD(HFGWTR_EL2, AFSR1_EL1, 1, 1)
415 FIELD(HFGWTR_EL2, AMAIR_EL1, 3, 1)
416 FIELD(HFGWTR_EL2, APDAKEY, 4, 1)
417 FIELD(HFGWTR_EL2, APDBKEY, 5, 1)
418 FIELD(HFGWTR_EL2, APGAKEY, 6, 1)
419 FIELD(HFGWTR_EL2, APIAKEY, 7, 1)
420 FIELD(HFGWTR_EL2, APIBKEY, 8, 1)
421 FIELD(HFGWTR_EL2, CONTEXTIDR_EL1, 11, 1)
422 FIELD(HFGWTR_EL2, CPACR_EL1, 12, 1)
423 FIELD(HFGWTR_EL2, CSSELR_EL1, 13, 1)
424 FIELD(HFGWTR_EL2, ESR_EL1, 16, 1)
425 FIELD(HFGWTR_EL2, FAR_EL1, 17, 1)
426 FIELD(HFGWTR_EL2, LORC_EL1, 19, 1)
427 FIELD(HFGWTR_EL2, LOREA_EL1, 20, 1)
428 FIELD(HFGWTR_EL2, LORN_EL1, 22, 1)
429 FIELD(HFGWTR_EL2, LORSA_EL1, 23, 1)
430 FIELD(HFGWTR_EL2, MAIR_EL1, 24, 1)
431 FIELD(HFGWTR_EL2, PAR_EL1, 27, 1)
432 FIELD(HFGWTR_EL2, SCTLR_EL1, 29, 1)
433 FIELD(HFGWTR_EL2, SCXTNUM_EL1, 30, 1)
434 FIELD(HFGWTR_EL2, SCXTNUM_EL0, 31, 1)
435 FIELD(HFGWTR_EL2, TCR_EL1, 32, 1)
436 FIELD(HFGWTR_EL2, TPIDR_EL1, 33, 1)
437 FIELD(HFGWTR_EL2, TPIDRRO_EL0, 34, 1)
438 FIELD(HFGWTR_EL2, TPIDR_EL0, 35, 1)
439 FIELD(HFGWTR_EL2, TTBR0_EL1, 36, 1)
440 FIELD(HFGWTR_EL2, TTBR1_EL1, 37, 1)
441 FIELD(HFGWTR_EL2, VBAR_EL1, 38, 1)
442 FIELD(HFGWTR_EL2, ICC_IGRPENN_EL1, 39, 1)
443 FIELD(HFGWTR_EL2, ERRSELR_EL1, 41, 1)
444 FIELD(HFGWTR_EL2, ERXCTLR_EL1, 43, 1)
445 FIELD(HFGWTR_EL2, ERXSTATUS_EL1, 44, 1)
446 FIELD(HFGWTR_EL2, ERXMISCN_EL1, 45, 1)
447 FIELD(HFGWTR_EL2, ERXPFGCTL_EL1, 47, 1)
448 FIELD(HFGWTR_EL2, ERXPFGCDN_EL1, 48, 1)
449 FIELD(HFGWTR_EL2, ERXADDR_EL1, 49, 1)
450 FIELD(HFGWTR_EL2, NACCDATA_EL1, 50, 1)
451 FIELD(HFGWTR_EL2, NSMPRI_EL1, 54, 1)
452 FIELD(HFGWTR_EL2, NTPIDR2_EL0, 55, 1)
453 
454 FIELD(HFGITR_EL2, ICIALLUIS, 0, 1)
455 FIELD(HFGITR_EL2, ICIALLU, 1, 1)
456 FIELD(HFGITR_EL2, ICIVAU, 2, 1)
457 FIELD(HFGITR_EL2, DCIVAC, 3, 1)
458 FIELD(HFGITR_EL2, DCISW, 4, 1)
459 FIELD(HFGITR_EL2, DCCSW, 5, 1)
460 FIELD(HFGITR_EL2, DCCISW, 6, 1)
461 FIELD(HFGITR_EL2, DCCVAU, 7, 1)
462 FIELD(HFGITR_EL2, DCCVAP, 8, 1)
463 FIELD(HFGITR_EL2, DCCVADP, 9, 1)
464 FIELD(HFGITR_EL2, DCCIVAC, 10, 1)
465 FIELD(HFGITR_EL2, DCZVA, 11, 1)
466 FIELD(HFGITR_EL2, ATS1E1R, 12, 1)
467 FIELD(HFGITR_EL2, ATS1E1W, 13, 1)
468 FIELD(HFGITR_EL2, ATS1E0R, 14, 1)
469 FIELD(HFGITR_EL2, ATS1E0W, 15, 1)
470 FIELD(HFGITR_EL2, ATS1E1RP, 16, 1)
471 FIELD(HFGITR_EL2, ATS1E1WP, 17, 1)
472 FIELD(HFGITR_EL2, TLBIVMALLE1OS, 18, 1)
473 FIELD(HFGITR_EL2, TLBIVAE1OS, 19, 1)
474 FIELD(HFGITR_EL2, TLBIASIDE1OS, 20, 1)
475 FIELD(HFGITR_EL2, TLBIVAAE1OS, 21, 1)
476 FIELD(HFGITR_EL2, TLBIVALE1OS, 22, 1)
477 FIELD(HFGITR_EL2, TLBIVAALE1OS, 23, 1)
478 FIELD(HFGITR_EL2, TLBIRVAE1OS, 24, 1)
479 FIELD(HFGITR_EL2, TLBIRVAAE1OS, 25, 1)
480 FIELD(HFGITR_EL2, TLBIRVALE1OS, 26, 1)
481 FIELD(HFGITR_EL2, TLBIRVAALE1OS, 27, 1)
482 FIELD(HFGITR_EL2, TLBIVMALLE1IS, 28, 1)
483 FIELD(HFGITR_EL2, TLBIVAE1IS, 29, 1)
484 FIELD(HFGITR_EL2, TLBIASIDE1IS, 30, 1)
485 FIELD(HFGITR_EL2, TLBIVAAE1IS, 31, 1)
486 FIELD(HFGITR_EL2, TLBIVALE1IS, 32, 1)
487 FIELD(HFGITR_EL2, TLBIVAALE1IS, 33, 1)
488 FIELD(HFGITR_EL2, TLBIRVAE1IS, 34, 1)
489 FIELD(HFGITR_EL2, TLBIRVAAE1IS, 35, 1)
490 FIELD(HFGITR_EL2, TLBIRVALE1IS, 36, 1)
491 FIELD(HFGITR_EL2, TLBIRVAALE1IS, 37, 1)
492 FIELD(HFGITR_EL2, TLBIRVAE1, 38, 1)
493 FIELD(HFGITR_EL2, TLBIRVAAE1, 39, 1)
494 FIELD(HFGITR_EL2, TLBIRVALE1, 40, 1)
495 FIELD(HFGITR_EL2, TLBIRVAALE1, 41, 1)
496 FIELD(HFGITR_EL2, TLBIVMALLE1, 42, 1)
497 FIELD(HFGITR_EL2, TLBIVAE1, 43, 1)
498 FIELD(HFGITR_EL2, TLBIASIDE1, 44, 1)
499 FIELD(HFGITR_EL2, TLBIVAAE1, 45, 1)
500 FIELD(HFGITR_EL2, TLBIVALE1, 46, 1)
501 FIELD(HFGITR_EL2, TLBIVAALE1, 47, 1)
502 FIELD(HFGITR_EL2, CFPRCTX, 48, 1)
503 FIELD(HFGITR_EL2, DVPRCTX, 49, 1)
504 FIELD(HFGITR_EL2, CPPRCTX, 50, 1)
505 FIELD(HFGITR_EL2, ERET, 51, 1)
506 FIELD(HFGITR_EL2, SVC_EL0, 52, 1)
507 FIELD(HFGITR_EL2, SVC_EL1, 53, 1)
508 FIELD(HFGITR_EL2, DCCVAC, 54, 1)
509 FIELD(HFGITR_EL2, NBRBINJ, 55, 1)
510 FIELD(HFGITR_EL2, NBRBIALL, 56, 1)
511 
512 FIELD(HDFGRTR_EL2, DBGBCRN_EL1, 0, 1)
513 FIELD(HDFGRTR_EL2, DBGBVRN_EL1, 1, 1)
514 FIELD(HDFGRTR_EL2, DBGWCRN_EL1, 2, 1)
515 FIELD(HDFGRTR_EL2, DBGWVRN_EL1, 3, 1)
516 FIELD(HDFGRTR_EL2, MDSCR_EL1, 4, 1)
517 FIELD(HDFGRTR_EL2, DBGCLAIM, 5, 1)
518 FIELD(HDFGRTR_EL2, DBGAUTHSTATUS_EL1, 6, 1)
519 FIELD(HDFGRTR_EL2, DBGPRCR_EL1, 7, 1)
520 /* 8: RES0: OSLAR_EL1 is WO */
521 FIELD(HDFGRTR_EL2, OSLSR_EL1, 9, 1)
522 FIELD(HDFGRTR_EL2, OSECCR_EL1, 10, 1)
523 FIELD(HDFGRTR_EL2, OSDLR_EL1, 11, 1)
524 FIELD(HDFGRTR_EL2, PMEVCNTRN_EL0, 12, 1)
525 FIELD(HDFGRTR_EL2, PMEVTYPERN_EL0, 13, 1)
526 FIELD(HDFGRTR_EL2, PMCCFILTR_EL0, 14, 1)
527 FIELD(HDFGRTR_EL2, PMCCNTR_EL0, 15, 1)
528 FIELD(HDFGRTR_EL2, PMCNTEN, 16, 1)
529 FIELD(HDFGRTR_EL2, PMINTEN, 17, 1)
530 FIELD(HDFGRTR_EL2, PMOVS, 18, 1)
531 FIELD(HDFGRTR_EL2, PMSELR_EL0, 19, 1)
532 /* 20: RES0: PMSWINC_EL0 is WO */
533 /* 21: RES0: PMCR_EL0 is WO */
534 FIELD(HDFGRTR_EL2, PMMIR_EL1, 22, 1)
535 FIELD(HDFGRTR_EL2, PMBLIMITR_EL1, 23, 1)
536 FIELD(HDFGRTR_EL2, PMBPTR_EL1, 24, 1)
537 FIELD(HDFGRTR_EL2, PMBSR_EL1, 25, 1)
538 FIELD(HDFGRTR_EL2, PMSCR_EL1, 26, 1)
539 FIELD(HDFGRTR_EL2, PMSEVFR_EL1, 27, 1)
540 FIELD(HDFGRTR_EL2, PMSFCR_EL1, 28, 1)
541 FIELD(HDFGRTR_EL2, PMSICR_EL1, 29, 1)
542 FIELD(HDFGRTR_EL2, PMSIDR_EL1, 30, 1)
543 FIELD(HDFGRTR_EL2, PMSIRR_EL1, 31, 1)
544 FIELD(HDFGRTR_EL2, PMSLATFR_EL1, 32, 1)
545 FIELD(HDFGRTR_EL2, TRC, 33, 1)
546 FIELD(HDFGRTR_EL2, TRCAUTHSTATUS, 34, 1)
547 FIELD(HDFGRTR_EL2, TRCAUXCTLR, 35, 1)
548 FIELD(HDFGRTR_EL2, TRCCLAIM, 36, 1)
549 FIELD(HDFGRTR_EL2, TRCCNTVRn, 37, 1)
550 /* 38, 39: RES0 */
551 FIELD(HDFGRTR_EL2, TRCID, 40, 1)
552 FIELD(HDFGRTR_EL2, TRCIMSPECN, 41, 1)
553 /* 42: RES0: TRCOSLAR is WO */
554 FIELD(HDFGRTR_EL2, TRCOSLSR, 43, 1)
555 FIELD(HDFGRTR_EL2, TRCPRGCTLR, 44, 1)
556 FIELD(HDFGRTR_EL2, TRCSEQSTR, 45, 1)
557 FIELD(HDFGRTR_EL2, TRCSSCSRN, 46, 1)
558 FIELD(HDFGRTR_EL2, TRCSTATR, 47, 1)
559 FIELD(HDFGRTR_EL2, TRCVICTLR, 48, 1)
560 /* 49: RES0: TRFCR_EL1 is WO */
561 FIELD(HDFGRTR_EL2, TRBBASER_EL1, 50, 1)
562 FIELD(HDFGRTR_EL2, TRBIDR_EL1, 51, 1)
563 FIELD(HDFGRTR_EL2, TRBLIMITR_EL1, 52, 1)
564 FIELD(HDFGRTR_EL2, TRBMAR_EL1, 53, 1)
565 FIELD(HDFGRTR_EL2, TRBPTR_EL1, 54, 1)
566 FIELD(HDFGRTR_EL2, TRBSR_EL1, 55, 1)
567 FIELD(HDFGRTR_EL2, TRBTRG_EL1, 56, 1)
568 FIELD(HDFGRTR_EL2, PMUSERENR_EL0, 57, 1)
569 FIELD(HDFGRTR_EL2, PMCEIDN_EL0, 58, 1)
570 FIELD(HDFGRTR_EL2, NBRBIDR, 59, 1)
571 FIELD(HDFGRTR_EL2, NBRBCTL, 60, 1)
572 FIELD(HDFGRTR_EL2, NBRBDATA, 61, 1)
573 FIELD(HDFGRTR_EL2, NPMSNEVFR_EL1, 62, 1)
574 FIELD(HDFGRTR_EL2, PMBIDR_EL1, 63, 1)
575 
576 /*
577  * These match HDFGRTR_EL2, but bits for RO registers are RES0.
578  * A few bits are for WO registers, where the HDFGRTR_EL2 bit is RES0.
579  */
580 FIELD(HDFGWTR_EL2, DBGBCRN_EL1, 0, 1)
581 FIELD(HDFGWTR_EL2, DBGBVRN_EL1, 1, 1)
582 FIELD(HDFGWTR_EL2, DBGWCRN_EL1, 2, 1)
583 FIELD(HDFGWTR_EL2, DBGWVRN_EL1, 3, 1)
584 FIELD(HDFGWTR_EL2, MDSCR_EL1, 4, 1)
585 FIELD(HDFGWTR_EL2, DBGCLAIM, 5, 1)
586 FIELD(HDFGWTR_EL2, DBGPRCR_EL1, 7, 1)
587 FIELD(HDFGWTR_EL2, OSLAR_EL1, 8, 1)
588 FIELD(HDFGWTR_EL2, OSLSR_EL1, 9, 1)
589 FIELD(HDFGWTR_EL2, OSECCR_EL1, 10, 1)
590 FIELD(HDFGWTR_EL2, OSDLR_EL1, 11, 1)
591 FIELD(HDFGWTR_EL2, PMEVCNTRN_EL0, 12, 1)
592 FIELD(HDFGWTR_EL2, PMEVTYPERN_EL0, 13, 1)
593 FIELD(HDFGWTR_EL2, PMCCFILTR_EL0, 14, 1)
594 FIELD(HDFGWTR_EL2, PMCCNTR_EL0, 15, 1)
595 FIELD(HDFGWTR_EL2, PMCNTEN, 16, 1)
596 FIELD(HDFGWTR_EL2, PMINTEN, 17, 1)
597 FIELD(HDFGWTR_EL2, PMOVS, 18, 1)
598 FIELD(HDFGWTR_EL2, PMSELR_EL0, 19, 1)
599 FIELD(HDFGWTR_EL2, PMSWINC_EL0, 20, 1)
600 FIELD(HDFGWTR_EL2, PMCR_EL0, 21, 1)
601 FIELD(HDFGWTR_EL2, PMBLIMITR_EL1, 23, 1)
602 FIELD(HDFGWTR_EL2, PMBPTR_EL1, 24, 1)
603 FIELD(HDFGWTR_EL2, PMBSR_EL1, 25, 1)
604 FIELD(HDFGWTR_EL2, PMSCR_EL1, 26, 1)
605 FIELD(HDFGWTR_EL2, PMSEVFR_EL1, 27, 1)
606 FIELD(HDFGWTR_EL2, PMSFCR_EL1, 28, 1)
607 FIELD(HDFGWTR_EL2, PMSICR_EL1, 29, 1)
608 FIELD(HDFGWTR_EL2, PMSIRR_EL1, 31, 1)
609 FIELD(HDFGWTR_EL2, PMSLATFR_EL1, 32, 1)
610 FIELD(HDFGWTR_EL2, TRC, 33, 1)
611 FIELD(HDFGWTR_EL2, TRCAUXCTLR, 35, 1)
612 FIELD(HDFGWTR_EL2, TRCCLAIM, 36, 1)
613 FIELD(HDFGWTR_EL2, TRCCNTVRn, 37, 1)
614 FIELD(HDFGWTR_EL2, TRCIMSPECN, 41, 1)
615 FIELD(HDFGWTR_EL2, TRCOSLAR, 42, 1)
616 FIELD(HDFGWTR_EL2, TRCPRGCTLR, 44, 1)
617 FIELD(HDFGWTR_EL2, TRCSEQSTR, 45, 1)
618 FIELD(HDFGWTR_EL2, TRCSSCSRN, 46, 1)
619 FIELD(HDFGWTR_EL2, TRCVICTLR, 48, 1)
620 FIELD(HDFGWTR_EL2, TRFCR_EL1, 49, 1)
621 FIELD(HDFGWTR_EL2, TRBBASER_EL1, 50, 1)
622 FIELD(HDFGWTR_EL2, TRBLIMITR_EL1, 52, 1)
623 FIELD(HDFGWTR_EL2, TRBMAR_EL1, 53, 1)
624 FIELD(HDFGWTR_EL2, TRBPTR_EL1, 54, 1)
625 FIELD(HDFGWTR_EL2, TRBSR_EL1, 55, 1)
626 FIELD(HDFGWTR_EL2, TRBTRG_EL1, 56, 1)
627 FIELD(HDFGWTR_EL2, PMUSERENR_EL0, 57, 1)
628 FIELD(HDFGWTR_EL2, NBRBCTL, 60, 1)
629 FIELD(HDFGWTR_EL2, NBRBDATA, 61, 1)
630 FIELD(HDFGWTR_EL2, NPMSNEVFR_EL1, 62, 1)
631 
632 FIELD(FGT, NXS, 13, 1) /* Honour HCR_EL2.FGTnXS to suppress FGT */
633 /* Which fine-grained trap bit register to check, if any */
634 FIELD(FGT, TYPE, 10, 3)
635 FIELD(FGT, REV, 9, 1) /* Is bit sense reversed? */
636 FIELD(FGT, IDX, 6, 3) /* Index within a uint64_t[] array */
637 FIELD(FGT, BITPOS, 0, 6) /* Bit position within the uint64_t */
638 
639 /*
640  * Macros to define FGT_##bitname enum constants to use in ARMCPRegInfo::fgt
641  * fields. We assume for brevity's sake that there are no duplicated
642  * bit names across the various FGT registers.
643  */
644 #define DO_BIT(REG, BITNAME)                                    \
645     FGT_##BITNAME = FGT_##REG | R_##REG##_EL2_##BITNAME##_SHIFT
646 
647 /* Some bits have reversed sense, so 0 means trap and 1 means not */
648 #define DO_REV_BIT(REG, BITNAME)                                        \
649     FGT_##BITNAME = FGT_##REG | FGT_REV | R_##REG##_EL2_##BITNAME##_SHIFT
650 
651 /*
652  * The FGT bits for TLBI maintenance instructions accessible at EL1 always
653  * affect the "normal" TLBI insns; they affect the corresponding TLBI insns
654  * with the nXS qualifier only if HCRX_EL2.FGTnXS is 0. We define e.g.
655  * FGT_TLBIVAE1 to use for the normal insn, and FGT_TLBIVAE1NXS to use
656  * for the nXS qualified insn.
657  */
658 #define DO_TLBINXS_BIT(REG, BITNAME)                             \
659     FGT_##BITNAME = FGT_##REG | R_##REG##_EL2_##BITNAME##_SHIFT, \
660     FGT_##BITNAME##NXS = FGT_##BITNAME | R_FGT_NXS_MASK
661 
662 typedef enum FGTBit {
663     /*
664      * These bits tell us which register arrays to use:
665      * if FGT_R is set then reads are checked against fgt_read[];
666      * if FGT_W is set then writes are checked against fgt_write[];
667      * if FGT_EXEC is set then all accesses are checked against fgt_exec[].
668      *
669      * For almost all bits in the R/W register pairs, the bit exists in
670      * both registers for a RW register, in HFGRTR/HDFGRTR for a RO register
671      * with the corresponding HFGWTR/HDFGTWTR bit being RES0, and vice-versa
672      * for a WO register. There are unfortunately a couple of exceptions
673      * (PMCR_EL0, TRFCR_EL1) where the register being trapped is RW but
674      * the FGT system only allows trapping of writes, not reads.
675      *
676      * Note that we arrange these bits so that a 0 FGTBit means "no trap".
677      */
678     FGT_R = 1 << R_FGT_TYPE_SHIFT,
679     FGT_W = 2 << R_FGT_TYPE_SHIFT,
680     FGT_EXEC = 4 << R_FGT_TYPE_SHIFT,
681     FGT_RW = FGT_R | FGT_W,
682     /* Bit to identify whether trap bit is reversed sense */
683     FGT_REV = R_FGT_REV_MASK,
684 
685     /*
686      * If a bit exists in HFGRTR/HDFGRTR then either the register being
687      * trapped is RO or the bit also exists in HFGWTR/HDFGWTR, so we either
688      * want to trap for both reads and writes or else it's harmless to mark
689      * it as trap-on-writes.
690      * If a bit exists only in HFGWTR/HDFGWTR then either the register being
691      * trapped is WO, or else it is one of the two oddball special cases
692      * which are RW but have only a write trap. We mark these as only
693      * FGT_W so we get the right behaviour for those special cases.
694      * (If a bit was added in future that provided only a read trap for an
695      * RW register we'd need to do something special to get the FGT_R bit
696      * only. But this seems unlikely to happen.)
697      *
698      * So for the DO_BIT/DO_REV_BIT macros: use FGT_HFGRTR/FGT_HDFGRTR if
699      * the bit exists in that register. Otherwise use FGT_HFGWTR/FGT_HDFGWTR.
700      */
701     FGT_HFGRTR = FGT_RW | (FGTREG_HFGRTR << R_FGT_IDX_SHIFT),
702     FGT_HFGWTR = FGT_W | (FGTREG_HFGWTR << R_FGT_IDX_SHIFT),
703     FGT_HDFGRTR = FGT_RW | (FGTREG_HDFGRTR << R_FGT_IDX_SHIFT),
704     FGT_HDFGWTR = FGT_W | (FGTREG_HDFGWTR << R_FGT_IDX_SHIFT),
705     FGT_HFGITR = FGT_EXEC | (FGTREG_HFGITR << R_FGT_IDX_SHIFT),
706 
707     /* Trap bits in HFGRTR_EL2 / HFGWTR_EL2, starting from bit 0. */
708     DO_BIT(HFGRTR, AFSR0_EL1),
709     DO_BIT(HFGRTR, AFSR1_EL1),
710     DO_BIT(HFGRTR, AIDR_EL1),
711     DO_BIT(HFGRTR, AMAIR_EL1),
712     DO_BIT(HFGRTR, APDAKEY),
713     DO_BIT(HFGRTR, APDBKEY),
714     DO_BIT(HFGRTR, APGAKEY),
715     DO_BIT(HFGRTR, APIAKEY),
716     DO_BIT(HFGRTR, APIBKEY),
717     DO_BIT(HFGRTR, CCSIDR_EL1),
718     DO_BIT(HFGRTR, CLIDR_EL1),
719     DO_BIT(HFGRTR, CONTEXTIDR_EL1),
720     DO_BIT(HFGRTR, CPACR_EL1),
721     DO_BIT(HFGRTR, CSSELR_EL1),
722     DO_BIT(HFGRTR, CTR_EL0),
723     DO_BIT(HFGRTR, DCZID_EL0),
724     DO_BIT(HFGRTR, ESR_EL1),
725     DO_BIT(HFGRTR, FAR_EL1),
726     DO_BIT(HFGRTR, ISR_EL1),
727     DO_BIT(HFGRTR, LORC_EL1),
728     DO_BIT(HFGRTR, LOREA_EL1),
729     DO_BIT(HFGRTR, LORID_EL1),
730     DO_BIT(HFGRTR, LORN_EL1),
731     DO_BIT(HFGRTR, LORSA_EL1),
732     DO_BIT(HFGRTR, MAIR_EL1),
733     DO_BIT(HFGRTR, MIDR_EL1),
734     DO_BIT(HFGRTR, MPIDR_EL1),
735     DO_BIT(HFGRTR, PAR_EL1),
736     DO_BIT(HFGRTR, REVIDR_EL1),
737     DO_BIT(HFGRTR, SCTLR_EL1),
738     DO_BIT(HFGRTR, SCXTNUM_EL1),
739     DO_BIT(HFGRTR, SCXTNUM_EL0),
740     DO_BIT(HFGRTR, TCR_EL1),
741     DO_BIT(HFGRTR, TPIDR_EL1),
742     DO_BIT(HFGRTR, TPIDRRO_EL0),
743     DO_BIT(HFGRTR, TPIDR_EL0),
744     DO_BIT(HFGRTR, TTBR0_EL1),
745     DO_BIT(HFGRTR, TTBR1_EL1),
746     DO_BIT(HFGRTR, VBAR_EL1),
747     DO_BIT(HFGRTR, ICC_IGRPENN_EL1),
748     DO_BIT(HFGRTR, ERRIDR_EL1),
749     DO_REV_BIT(HFGRTR, NSMPRI_EL1),
750     DO_REV_BIT(HFGRTR, NTPIDR2_EL0),
751 
752     /* Trap bits in HDFGRTR_EL2 / HDFGWTR_EL2, starting from bit 0. */
753     DO_BIT(HDFGRTR, DBGBCRN_EL1),
754     DO_BIT(HDFGRTR, DBGBVRN_EL1),
755     DO_BIT(HDFGRTR, DBGWCRN_EL1),
756     DO_BIT(HDFGRTR, DBGWVRN_EL1),
757     DO_BIT(HDFGRTR, MDSCR_EL1),
758     DO_BIT(HDFGRTR, DBGCLAIM),
759     DO_BIT(HDFGWTR, OSLAR_EL1),
760     DO_BIT(HDFGRTR, OSLSR_EL1),
761     DO_BIT(HDFGRTR, OSECCR_EL1),
762     DO_BIT(HDFGRTR, OSDLR_EL1),
763     DO_BIT(HDFGRTR, PMEVCNTRN_EL0),
764     DO_BIT(HDFGRTR, PMEVTYPERN_EL0),
765     DO_BIT(HDFGRTR, PMCCFILTR_EL0),
766     DO_BIT(HDFGRTR, PMCCNTR_EL0),
767     DO_BIT(HDFGRTR, PMCNTEN),
768     DO_BIT(HDFGRTR, PMINTEN),
769     DO_BIT(HDFGRTR, PMOVS),
770     DO_BIT(HDFGRTR, PMSELR_EL0),
771     DO_BIT(HDFGWTR, PMSWINC_EL0),
772     DO_BIT(HDFGWTR, PMCR_EL0),
773     DO_BIT(HDFGRTR, PMMIR_EL1),
774     DO_BIT(HDFGRTR, PMCEIDN_EL0),
775 
776     /* Trap bits in HFGITR_EL2, starting from bit 0 */
777     DO_BIT(HFGITR, ICIALLUIS),
778     DO_BIT(HFGITR, ICIALLU),
779     DO_BIT(HFGITR, ICIVAU),
780     DO_BIT(HFGITR, DCIVAC),
781     DO_BIT(HFGITR, DCISW),
782     DO_BIT(HFGITR, DCCSW),
783     DO_BIT(HFGITR, DCCISW),
784     DO_BIT(HFGITR, DCCVAU),
785     DO_BIT(HFGITR, DCCVAP),
786     DO_BIT(HFGITR, DCCVADP),
787     DO_BIT(HFGITR, DCCIVAC),
788     DO_BIT(HFGITR, DCZVA),
789     DO_BIT(HFGITR, ATS1E1R),
790     DO_BIT(HFGITR, ATS1E1W),
791     DO_BIT(HFGITR, ATS1E0R),
792     DO_BIT(HFGITR, ATS1E0W),
793     DO_BIT(HFGITR, ATS1E1RP),
794     DO_BIT(HFGITR, ATS1E1WP),
795     DO_TLBINXS_BIT(HFGITR, TLBIVMALLE1OS),
796     DO_TLBINXS_BIT(HFGITR, TLBIVAE1OS),
797     DO_TLBINXS_BIT(HFGITR, TLBIASIDE1OS),
798     DO_TLBINXS_BIT(HFGITR, TLBIVAAE1OS),
799     DO_TLBINXS_BIT(HFGITR, TLBIVALE1OS),
800     DO_TLBINXS_BIT(HFGITR, TLBIVAALE1OS),
801     DO_TLBINXS_BIT(HFGITR, TLBIRVAE1OS),
802     DO_TLBINXS_BIT(HFGITR, TLBIRVAAE1OS),
803     DO_TLBINXS_BIT(HFGITR, TLBIRVALE1OS),
804     DO_TLBINXS_BIT(HFGITR, TLBIRVAALE1OS),
805     DO_TLBINXS_BIT(HFGITR, TLBIVMALLE1IS),
806     DO_TLBINXS_BIT(HFGITR, TLBIVAE1IS),
807     DO_TLBINXS_BIT(HFGITR, TLBIASIDE1IS),
808     DO_TLBINXS_BIT(HFGITR, TLBIVAAE1IS),
809     DO_TLBINXS_BIT(HFGITR, TLBIVALE1IS),
810     DO_TLBINXS_BIT(HFGITR, TLBIVAALE1IS),
811     DO_TLBINXS_BIT(HFGITR, TLBIRVAE1IS),
812     DO_TLBINXS_BIT(HFGITR, TLBIRVAAE1IS),
813     DO_TLBINXS_BIT(HFGITR, TLBIRVALE1IS),
814     DO_TLBINXS_BIT(HFGITR, TLBIRVAALE1IS),
815     DO_TLBINXS_BIT(HFGITR, TLBIRVAE1),
816     DO_TLBINXS_BIT(HFGITR, TLBIRVAAE1),
817     DO_TLBINXS_BIT(HFGITR, TLBIRVALE1),
818     DO_TLBINXS_BIT(HFGITR, TLBIRVAALE1),
819     DO_TLBINXS_BIT(HFGITR, TLBIVMALLE1),
820     DO_TLBINXS_BIT(HFGITR, TLBIVAE1),
821     DO_TLBINXS_BIT(HFGITR, TLBIASIDE1),
822     DO_TLBINXS_BIT(HFGITR, TLBIVAAE1),
823     DO_TLBINXS_BIT(HFGITR, TLBIVALE1),
824     DO_TLBINXS_BIT(HFGITR, TLBIVAALE1),
825     DO_BIT(HFGITR, CFPRCTX),
826     DO_BIT(HFGITR, DVPRCTX),
827     DO_BIT(HFGITR, CPPRCTX),
828     DO_BIT(HFGITR, DCCVAC),
829 } FGTBit;
830 
831 #undef DO_BIT
832 #undef DO_REV_BIT
833 
834 typedef struct ARMCPRegInfo ARMCPRegInfo;
835 
836 /*
837  * Access functions for coprocessor registers. These cannot fail and
838  * may not raise exceptions.
839  */
840 typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
841 typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
842                        uint64_t value);
843 /* Access permission check functions for coprocessor registers. */
844 typedef CPAccessResult CPAccessFn(CPUARMState *env,
845                                   const ARMCPRegInfo *opaque,
846                                   bool isread);
847 /* Hook function for register reset */
848 typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
849 
850 #define CP_ANY 0xff
851 
852 /* Flags in the high bits of nv2_redirect_offset */
853 #define NV2_REDIR_NV1 0x4000 /* Only redirect when HCR_EL2.NV1 == 1 */
854 #define NV2_REDIR_NO_NV1 0x8000 /* Only redirect when HCR_EL2.NV1 == 0 */
855 #define NV2_REDIR_FLAG_MASK 0xc000
856 
857 /* Definition of an ARM coprocessor register */
858 struct ARMCPRegInfo {
859     /* Name of register (useful mainly for debugging, need not be unique) */
860     const char *name;
861     /*
862      * Location of register: coprocessor number and (crn,crm,opc1,opc2)
863      * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
864      * 'wildcard' field -- any value of that field in the MRC/MCR insn
865      * will be decoded to this register. The register read and write
866      * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
867      * used by the program, so it is possible to register a wildcard and
868      * then behave differently on read/write if necessary.
869      * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
870      * must both be zero.
871      * For AArch64-visible registers, opc0 is also used.
872      * Since there are no "coprocessors" in AArch64, cp is purely used as a
873      * way to distinguish (for KVM's benefit) guest-visible system registers
874      * from demuxed ones provided to preserve the "no side effects on
875      * KVM register read/write from QEMU" semantics. cp==0x13 is guest
876      * visible (to match KVM's encoding); cp==0 will be converted to
877      * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
878      */
879     uint8_t cp;
880     uint8_t crn;
881     uint8_t crm;
882     uint8_t opc0;
883     uint8_t opc1;
884     uint8_t opc2;
885     /* Execution state in which this register is visible: ARM_CP_STATE_* */
886     CPState state;
887     /* Register type: ARM_CP_* bits/values */
888     int type;
889     /* Access rights: PL*_[RW] */
890     CPAccessRights access;
891     /* Security state: ARM_CP_SECSTATE_* bits/values */
892     CPSecureState secure;
893     /*
894      * Which fine-grained trap register bit to check, if any. This
895      * value encodes both the trap register and bit within it.
896      */
897     FGTBit fgt;
898 
899     /*
900      * Offset from VNCR_EL2 when FEAT_NV2 redirects access to memory;
901      * may include an NV2_REDIR_* flag.
902      */
903     uint32_t nv2_redirect_offset;
904 
905     /*
906      * The opaque pointer passed to define_arm_cp_regs_with_opaque() when
907      * this register was defined: can be used to hand data through to the
908      * register read/write functions, since they are passed the ARMCPRegInfo*.
909      */
910     void *opaque;
911     /*
912      * Value of this register, if it is ARM_CP_CONST. Otherwise, if
913      * fieldoffset is non-zero, the reset value of the register.
914      */
915     uint64_t resetvalue;
916     /*
917      * Offset of the field in CPUARMState for this register.
918      * This is not needed if either:
919      *  1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
920      *  2. both readfn and writefn are specified
921      */
922     ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
923 
924     /*
925      * Offsets of the secure and non-secure fields in CPUARMState for the
926      * register if it is banked.  These fields are only used during the static
927      * registration of a register.  During hashing the bank associated
928      * with a given security state is copied to fieldoffset which is used from
929      * there on out.
930      *
931      * It is expected that register definitions use either fieldoffset or
932      * bank_fieldoffsets in the definition but not both.  It is also expected
933      * that both bank offsets are set when defining a banked register.  This
934      * use indicates that a register is banked.
935      */
936     ptrdiff_t bank_fieldoffsets[2];
937 
938     /*
939      * Function for making any access checks for this register in addition to
940      * those specified by the 'access' permissions bits. If NULL, no extra
941      * checks required. The access check is performed at runtime, not at
942      * translate time.
943      */
944     CPAccessFn *accessfn;
945     /*
946      * Function for handling reads of this register. If NULL, then reads
947      * will be done by loading from the offset into CPUARMState specified
948      * by fieldoffset.
949      */
950     CPReadFn *readfn;
951     /*
952      * Function for handling writes of this register. If NULL, then writes
953      * will be done by writing to the offset into CPUARMState specified
954      * by fieldoffset.
955      */
956     CPWriteFn *writefn;
957     /*
958      * Function for doing a "raw" read; used when we need to copy
959      * coprocessor state to the kernel for KVM or out for
960      * migration. This only needs to be provided if there is also a
961      * readfn and it has side effects (for instance clear-on-read bits).
962      */
963     CPReadFn *raw_readfn;
964     /*
965      * Function for doing a "raw" write; used when we need to copy KVM
966      * kernel coprocessor state into userspace, or for inbound
967      * migration. This only needs to be provided if there is also a
968      * writefn and it masks out "unwritable" bits or has write-one-to-clear
969      * or similar behaviour.
970      */
971     CPWriteFn *raw_writefn;
972     /*
973      * Function for resetting the register. If NULL, then reset will be done
974      * by writing resetvalue to the field specified in fieldoffset. If
975      * fieldoffset is 0 then no reset will be done.
976      */
977     CPResetFn *resetfn;
978 
979     /*
980      * "Original" readfn, writefn, accessfn.
981      * For ARMv8.1-VHE register aliases, we overwrite the read/write
982      * accessor functions of various EL1/EL0 to perform the runtime
983      * check for which sysreg should actually be modified, and then
984      * forwards the operation.  Before overwriting the accessors,
985      * the original function is copied here, so that accesses that
986      * really do go to the EL1/EL0 version proceed normally.
987      * (The corresponding EL2 register is linked via opaque.)
988      */
989     CPReadFn *orig_readfn;
990     CPWriteFn *orig_writefn;
991     CPAccessFn *orig_accessfn;
992 };
993 
994 /*
995  * Macros which are lvalues for the field in CPUARMState for the
996  * ARMCPRegInfo *ri.
997  */
998 #define CPREG_FIELD32(env, ri) \
999     (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
1000 #define CPREG_FIELD64(env, ri) \
1001     (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
1002 
1003 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *reg,
1004                                        void *opaque);
1005 
1006 static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
1007 {
1008     define_one_arm_cp_reg_with_opaque(cpu, regs, NULL);
1009 }
1010 
1011 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
1012                                         void *opaque, size_t len);
1013 
1014 #define define_arm_cp_regs_with_opaque(CPU, REGS, OPAQUE)               \
1015     do {                                                                \
1016         QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0);                       \
1017         define_arm_cp_regs_with_opaque_len(CPU, REGS, OPAQUE,           \
1018                                            ARRAY_SIZE(REGS));           \
1019     } while (0)
1020 
1021 #define define_arm_cp_regs(CPU, REGS) \
1022     define_arm_cp_regs_with_opaque(CPU, REGS, NULL)
1023 
1024 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
1025 
1026 /*
1027  * Definition of an ARM co-processor register as viewed from
1028  * userspace. This is used for presenting sanitised versions of
1029  * registers to userspace when emulating the Linux AArch64 CPU
1030  * ID/feature ABI (advertised as HWCAP_CPUID).
1031  */
1032 typedef struct ARMCPRegUserSpaceInfo {
1033     /* Name of register */
1034     const char *name;
1035 
1036     /* Is the name actually a glob pattern */
1037     bool is_glob;
1038 
1039     /* Only some bits are exported to user space */
1040     uint64_t exported_bits;
1041 
1042     /* Fixed bits are applied after the mask */
1043     uint64_t fixed_bits;
1044 } ARMCPRegUserSpaceInfo;
1045 
1046 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
1047                                  const ARMCPRegUserSpaceInfo *mods,
1048                                  size_t mods_len);
1049 
1050 #define modify_arm_cp_regs(REGS, MODS)                                  \
1051     do {                                                                \
1052         QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0);                       \
1053         QEMU_BUILD_BUG_ON(ARRAY_SIZE(MODS) == 0);                       \
1054         modify_arm_cp_regs_with_len(REGS, ARRAY_SIZE(REGS),             \
1055                                     MODS, ARRAY_SIZE(MODS));            \
1056     } while (0)
1057 
1058 /* CPWriteFn that can be used to implement writes-ignored behaviour */
1059 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
1060                          uint64_t value);
1061 /* CPReadFn that can be used for read-as-zero behaviour */
1062 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
1063 
1064 /* CPWriteFn that just writes the value to ri->fieldoffset */
1065 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value);
1066 
1067 /*
1068  * CPResetFn that does nothing, for use if no reset is required even
1069  * if fieldoffset is non zero.
1070  */
1071 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
1072 
1073 /*
1074  * Return true if this reginfo struct's field in the cpu state struct
1075  * is 64 bits wide.
1076  */
1077 static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
1078 {
1079     return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
1080 }
1081 
1082 static inline bool cp_access_ok(int current_el,
1083                                 const ARMCPRegInfo *ri, int isread)
1084 {
1085     return (ri->access >> ((current_el * 2) + isread)) & 1;
1086 }
1087 
1088 /* Raw read of a coprocessor register (as needed for migration, etc) */
1089 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
1090 
1091 /*
1092  * Return true if the cp register encoding is in the "feature ID space" as
1093  * defined by FEAT_IDST (and thus should be reported with ER_ELx.EC
1094  * as EC_SYSTEMREGISTERTRAP rather than EC_UNCATEGORIZED).
1095  */
1096 static inline bool arm_cpreg_encoding_in_idspace(uint8_t opc0, uint8_t opc1,
1097                                                  uint8_t opc2,
1098                                                  uint8_t crn, uint8_t crm)
1099 {
1100     return opc0 == 3 && (opc1 == 0 || opc1 == 1 || opc1 == 3) &&
1101         crn == 0 && crm < 8;
1102 }
1103 
1104 /*
1105  * As arm_cpreg_encoding_in_idspace(), but take the encoding from an
1106  * ARMCPRegInfo.
1107  */
1108 static inline bool arm_cpreg_in_idspace(const ARMCPRegInfo *ri)
1109 {
1110     return ri->state == ARM_CP_STATE_AA64 &&
1111         arm_cpreg_encoding_in_idspace(ri->opc0, ri->opc1, ri->opc2,
1112                                       ri->crn, ri->crm);
1113 }
1114 
1115 #ifdef CONFIG_USER_ONLY
1116 static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
1117 #else
1118 void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
1119 #endif
1120 
1121 CPAccessResult access_tvm_trvm(CPUARMState *, const ARMCPRegInfo *, bool);
1122 
1123 /**
1124  * arm_cpreg_trap_in_nv: Return true if cpreg traps in nested virtualization
1125  *
1126  * Return true if this cpreg is one which should be trapped to EL2 if
1127  * it is executed at EL1 when nested virtualization is enabled via HCR_EL2.NV.
1128  */
1129 static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri)
1130 {
1131     /*
1132      * The Arm ARM defines the registers to be trapped in terms of
1133      * their names (I_TZTZL). However the underlying principle is "if
1134      * it would UNDEF at EL1 but work at EL2 then it should trap", and
1135      * the way the encoding of sysregs and system instructions is done
1136      * means that the right set of registers is exactly those where
1137      * the opc1 field is 4 or 5. (You can see this also in the assert
1138      * we do that the opc1 field and the permissions mask line up in
1139      * define_one_arm_cp_reg_with_opaque().)
1140      * Checking the opc1 field is easier for us and avoids the problem
1141      * that we do not consistently use the right architectural names
1142      * for all sysregs, since we treat the name field as largely for debug.
1143      *
1144      * However we do this check, it is going to be at least potentially
1145      * fragile to future new sysregs, but this seems the least likely
1146      * to break.
1147      *
1148      * In particular, note that the released sysreg XML defines that
1149      * the FEAT_MEC sysregs and instructions do not follow this FEAT_NV
1150      * trapping rule, so we will need to add an ARM_CP_* flag to indicate
1151      * "register does not trap on NV" to handle those if/when we implement
1152      * FEAT_MEC.
1153      */
1154     return ri->opc1 == 4 || ri->opc1 == 5;
1155 }
1156 
1157 #endif /* TARGET_ARM_CPREGS_H */
1158