1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Check for KVM_GET_REG_LIST regressions.
4 *
5 * Copyright (C) 2020, Red Hat, Inc.
6 *
7 * While the blessed list should be created from the oldest possible
8 * kernel, we can't go older than v5.2, though, because that's the first
9 * release which includes df205b5c6328 ("KVM: arm64: Filter out invalid
10 * core register IDs in KVM_GET_REG_LIST"). Without that commit the core
11 * registers won't match expectations.
12 */
13 #include <stdio.h>
14 #include "kvm_util.h"
15 #include "test_util.h"
16 #include "processor.h"
17
18 #define SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \
19 sys_reg_Op1(SYS_ ## r), \
20 sys_reg_CRn(SYS_ ## r), \
21 sys_reg_CRm(SYS_ ## r), \
22 sys_reg_Op2(SYS_ ## r))
23
24 struct feature_id_reg {
25 __u64 reg;
26 __u64 id_reg;
27 __u64 feat_shift;
28 __u64 feat_min;
29 };
30
31 #define FEAT(id, f, v) \
32 .id_reg = SYS_REG(id), \
33 .feat_shift = id ## _ ## f ## _SHIFT, \
34 .feat_min = id ## _ ## f ## _ ## v
35
36 #define REG_FEAT(r, id, f, v) \
37 { \
38 .reg = SYS_REG(r), \
39 FEAT(id, f, v) \
40 }
41
42 static struct feature_id_reg feat_id_regs[] = {
43 REG_FEAT(TCR2_EL1, ID_AA64MMFR3_EL1, TCRX, IMP),
44 REG_FEAT(TCR2_EL2, ID_AA64MMFR3_EL1, TCRX, IMP),
45 REG_FEAT(PIRE0_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP),
46 REG_FEAT(PIRE0_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP),
47 REG_FEAT(PIR_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP),
48 REG_FEAT(PIR_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP),
49 REG_FEAT(POR_EL1, ID_AA64MMFR3_EL1, S1POE, IMP),
50 REG_FEAT(POR_EL0, ID_AA64MMFR3_EL1, S1POE, IMP),
51 REG_FEAT(POR_EL2, ID_AA64MMFR3_EL1, S1POE, IMP),
52 REG_FEAT(HCRX_EL2, ID_AA64MMFR1_EL1, HCX, IMP),
53 REG_FEAT(HFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
54 REG_FEAT(HFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
55 REG_FEAT(HFGITR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
56 REG_FEAT(HDFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
57 REG_FEAT(HDFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
58 REG_FEAT(HAFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
59 REG_FEAT(HFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
60 REG_FEAT(HFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
61 REG_FEAT(HFGITR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
62 REG_FEAT(HDFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
63 REG_FEAT(HDFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
64 REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP),
65 REG_FEAT(SCTLR2_EL1, ID_AA64MMFR3_EL1, SCTLRX, IMP),
66 REG_FEAT(VDISR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
67 REG_FEAT(VSESR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
68 };
69
filter_reg(__u64 reg)70 bool filter_reg(__u64 reg)
71 {
72 /*
73 * DEMUX register presence depends on the host's CLIDR_EL1.
74 * This means there's no set of them that we can bless.
75 */
76 if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
77 return true;
78
79 return false;
80 }
81
check_supported_feat_reg(struct kvm_vcpu * vcpu,__u64 reg)82 static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
83 {
84 int i, ret;
85 __u64 data, feat_val;
86
87 for (i = 0; i < ARRAY_SIZE(feat_id_regs); i++) {
88 if (feat_id_regs[i].reg == reg) {
89 ret = __vcpu_get_reg(vcpu, feat_id_regs[i].id_reg, &data);
90 if (ret < 0)
91 return false;
92
93 feat_val = ((data >> feat_id_regs[i].feat_shift) & 0xf);
94 return feat_val >= feat_id_regs[i].feat_min;
95 }
96 }
97
98 return true;
99 }
100
check_supported_reg(struct kvm_vcpu * vcpu,__u64 reg)101 bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
102 {
103 return check_supported_feat_reg(vcpu, reg);
104 }
105
check_reject_set(int err)106 bool check_reject_set(int err)
107 {
108 return err == EPERM;
109 }
110
finalize_vcpu(struct kvm_vcpu * vcpu,struct vcpu_reg_list * c)111 void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
112 {
113 struct vcpu_reg_sublist *s;
114 int feature;
115
116 for_each_sublist(c, s) {
117 if (s->finalize) {
118 feature = s->feature;
119 vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
120 }
121 }
122 }
123
124 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
125
126 #define CORE_REGS_XX_NR_WORDS 2
127 #define CORE_SPSR_XX_NR_WORDS 2
128 #define CORE_FPREGS_XX_NR_WORDS 4
129
core_id_to_str(const char * prefix,__u64 id)130 static const char *core_id_to_str(const char *prefix, __u64 id)
131 {
132 __u64 core_off = id & ~REG_MASK, idx;
133
134 /*
135 * core_off is the offset into struct kvm_regs
136 */
137 switch (core_off) {
138 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
139 KVM_REG_ARM_CORE_REG(regs.regs[30]):
140 idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
141 TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", prefix, idx);
142 return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx);
143 case KVM_REG_ARM_CORE_REG(regs.sp):
144 return "KVM_REG_ARM_CORE_REG(regs.sp)";
145 case KVM_REG_ARM_CORE_REG(regs.pc):
146 return "KVM_REG_ARM_CORE_REG(regs.pc)";
147 case KVM_REG_ARM_CORE_REG(regs.pstate):
148 return "KVM_REG_ARM_CORE_REG(regs.pstate)";
149 case KVM_REG_ARM_CORE_REG(sp_el1):
150 return "KVM_REG_ARM_CORE_REG(sp_el1)";
151 case KVM_REG_ARM_CORE_REG(elr_el1):
152 return "KVM_REG_ARM_CORE_REG(elr_el1)";
153 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
154 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
155 idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
156 TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", prefix, idx);
157 return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx);
158 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
159 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
160 idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
161 TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", prefix, idx);
162 return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx);
163 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
164 return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
165 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
166 return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
167 }
168
169 TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
170 return NULL;
171 }
172
sve_id_to_str(const char * prefix,__u64 id)173 static const char *sve_id_to_str(const char *prefix, __u64 id)
174 {
175 __u64 sve_off, n, i;
176
177 if (id == KVM_REG_ARM64_SVE_VLS)
178 return "KVM_REG_ARM64_SVE_VLS";
179
180 sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
181 i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
182
183 TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", prefix, id);
184
185 switch (sve_off) {
186 case KVM_REG_ARM64_SVE_ZREG_BASE ...
187 KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
188 n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
189 TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
190 "%s: Unexpected bits set in SVE ZREG id: 0x%llx", prefix, id);
191 return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n);
192 case KVM_REG_ARM64_SVE_PREG_BASE ...
193 KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
194 n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
195 TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
196 "%s: Unexpected bits set in SVE PREG id: 0x%llx", prefix, id);
197 return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n);
198 case KVM_REG_ARM64_SVE_FFR_BASE:
199 TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
200 "%s: Unexpected bits set in SVE FFR id: 0x%llx", prefix, id);
201 return "KVM_REG_ARM64_SVE_FFR(0)";
202 }
203
204 return NULL;
205 }
206
print_reg(const char * prefix,__u64 id)207 void print_reg(const char *prefix, __u64 id)
208 {
209 unsigned op0, op1, crn, crm, op2;
210 const char *reg_size = NULL;
211
212 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
213 "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", prefix, id);
214
215 switch (id & KVM_REG_SIZE_MASK) {
216 case KVM_REG_SIZE_U8:
217 reg_size = "KVM_REG_SIZE_U8";
218 break;
219 case KVM_REG_SIZE_U16:
220 reg_size = "KVM_REG_SIZE_U16";
221 break;
222 case KVM_REG_SIZE_U32:
223 reg_size = "KVM_REG_SIZE_U32";
224 break;
225 case KVM_REG_SIZE_U64:
226 reg_size = "KVM_REG_SIZE_U64";
227 break;
228 case KVM_REG_SIZE_U128:
229 reg_size = "KVM_REG_SIZE_U128";
230 break;
231 case KVM_REG_SIZE_U256:
232 reg_size = "KVM_REG_SIZE_U256";
233 break;
234 case KVM_REG_SIZE_U512:
235 reg_size = "KVM_REG_SIZE_U512";
236 break;
237 case KVM_REG_SIZE_U1024:
238 reg_size = "KVM_REG_SIZE_U1024";
239 break;
240 case KVM_REG_SIZE_U2048:
241 reg_size = "KVM_REG_SIZE_U2048";
242 break;
243 default:
244 TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
245 prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
246 }
247
248 switch (id & KVM_REG_ARM_COPROC_MASK) {
249 case KVM_REG_ARM_CORE:
250 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(prefix, id));
251 break;
252 case KVM_REG_ARM_DEMUX:
253 TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
254 "%s: Unexpected bits set in DEMUX reg id: 0x%llx", prefix, id);
255 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
256 reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
257 break;
258 case KVM_REG_ARM64_SYSREG:
259 op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
260 op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
261 crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
262 crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
263 op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
264 TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
265 "%s: Unexpected bits set in SYSREG reg id: 0x%llx", prefix, id);
266 printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
267 break;
268 case KVM_REG_ARM_FW:
269 TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
270 "%s: Unexpected bits set in FW reg id: 0x%llx", prefix, id);
271 printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
272 break;
273 case KVM_REG_ARM_FW_FEAT_BMAP:
274 TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
275 "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", prefix, id);
276 printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
277 break;
278 case KVM_REG_ARM64_SVE:
279 printf("\t%s,\n", sve_id_to_str(prefix, id));
280 break;
281 default:
282 TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
283 prefix, (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
284 }
285 }
286
287 /*
288 * The original blessed list was primed with the output of kernel version
289 * v4.15 with --core-reg-fixup and then later updated with new registers.
290 * (The --core-reg-fixup option and it's fixup function have been removed
291 * from the test, as it's unlikely to use this type of test on a kernel
292 * older than v5.2.)
293 *
294 * The blessed list is up to date with kernel version v6.4 (or so we hope)
295 */
296 static __u64 base_regs[] = {
297 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
298 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
299 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
300 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
301 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
302 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
303 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
304 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
305 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
306 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
307 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
308 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
309 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
310 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
311 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
312 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
313 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
314 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
315 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
316 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
317 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
318 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
319 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
320 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
321 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
322 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
323 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
324 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
325 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
326 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
327 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
328 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
329 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
330 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
331 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
332 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
333 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
334 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
335 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
336 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
337 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
338 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
339 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
340 KVM_REG_ARM_FW_REG(0), /* KVM_REG_ARM_PSCI_VERSION */
341 KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
342 KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
343 KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
344 KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */
345 KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
346 KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
347 KVM_REG_ARM_FW_FEAT_BMAP_REG(3), /* KVM_REG_ARM_VENDOR_HYP_BMAP_2 */
348 ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
349 ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
350 ARM64_SYS_REG(3, 3, 14, 0, 2),
351 ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */
352 ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */
353 ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */
354 ARM64_SYS_REG(3, 1, 0, 0, 7), /* AIDR_EL1 */
355 ARM64_SYS_REG(3, 3, 0, 0, 1), /* CTR_EL0 */
356 ARM64_SYS_REG(2, 0, 0, 0, 4),
357 ARM64_SYS_REG(2, 0, 0, 0, 5),
358 ARM64_SYS_REG(2, 0, 0, 0, 6),
359 ARM64_SYS_REG(2, 0, 0, 0, 7),
360 ARM64_SYS_REG(2, 0, 0, 1, 4),
361 ARM64_SYS_REG(2, 0, 0, 1, 5),
362 ARM64_SYS_REG(2, 0, 0, 1, 6),
363 ARM64_SYS_REG(2, 0, 0, 1, 7),
364 ARM64_SYS_REG(2, 0, 0, 2, 0), /* MDCCINT_EL1 */
365 ARM64_SYS_REG(2, 0, 0, 2, 2), /* MDSCR_EL1 */
366 ARM64_SYS_REG(2, 0, 0, 2, 4),
367 ARM64_SYS_REG(2, 0, 0, 2, 5),
368 ARM64_SYS_REG(2, 0, 0, 2, 6),
369 ARM64_SYS_REG(2, 0, 0, 2, 7),
370 ARM64_SYS_REG(2, 0, 0, 3, 4),
371 ARM64_SYS_REG(2, 0, 0, 3, 5),
372 ARM64_SYS_REG(2, 0, 0, 3, 6),
373 ARM64_SYS_REG(2, 0, 0, 3, 7),
374 ARM64_SYS_REG(2, 0, 0, 4, 4),
375 ARM64_SYS_REG(2, 0, 0, 4, 5),
376 ARM64_SYS_REG(2, 0, 0, 4, 6),
377 ARM64_SYS_REG(2, 0, 0, 4, 7),
378 ARM64_SYS_REG(2, 0, 0, 5, 4),
379 ARM64_SYS_REG(2, 0, 0, 5, 5),
380 ARM64_SYS_REG(2, 0, 0, 5, 6),
381 ARM64_SYS_REG(2, 0, 0, 5, 7),
382 ARM64_SYS_REG(2, 0, 0, 6, 4),
383 ARM64_SYS_REG(2, 0, 0, 6, 5),
384 ARM64_SYS_REG(2, 0, 0, 6, 6),
385 ARM64_SYS_REG(2, 0, 0, 6, 7),
386 ARM64_SYS_REG(2, 0, 0, 7, 4),
387 ARM64_SYS_REG(2, 0, 0, 7, 5),
388 ARM64_SYS_REG(2, 0, 0, 7, 6),
389 ARM64_SYS_REG(2, 0, 0, 7, 7),
390 ARM64_SYS_REG(2, 0, 0, 8, 4),
391 ARM64_SYS_REG(2, 0, 0, 8, 5),
392 ARM64_SYS_REG(2, 0, 0, 8, 6),
393 ARM64_SYS_REG(2, 0, 0, 8, 7),
394 ARM64_SYS_REG(2, 0, 0, 9, 4),
395 ARM64_SYS_REG(2, 0, 0, 9, 5),
396 ARM64_SYS_REG(2, 0, 0, 9, 6),
397 ARM64_SYS_REG(2, 0, 0, 9, 7),
398 ARM64_SYS_REG(2, 0, 0, 10, 4),
399 ARM64_SYS_REG(2, 0, 0, 10, 5),
400 ARM64_SYS_REG(2, 0, 0, 10, 6),
401 ARM64_SYS_REG(2, 0, 0, 10, 7),
402 ARM64_SYS_REG(2, 0, 0, 11, 4),
403 ARM64_SYS_REG(2, 0, 0, 11, 5),
404 ARM64_SYS_REG(2, 0, 0, 11, 6),
405 ARM64_SYS_REG(2, 0, 0, 11, 7),
406 ARM64_SYS_REG(2, 0, 0, 12, 4),
407 ARM64_SYS_REG(2, 0, 0, 12, 5),
408 ARM64_SYS_REG(2, 0, 0, 12, 6),
409 ARM64_SYS_REG(2, 0, 0, 12, 7),
410 ARM64_SYS_REG(2, 0, 0, 13, 4),
411 ARM64_SYS_REG(2, 0, 0, 13, 5),
412 ARM64_SYS_REG(2, 0, 0, 13, 6),
413 ARM64_SYS_REG(2, 0, 0, 13, 7),
414 ARM64_SYS_REG(2, 0, 0, 14, 4),
415 ARM64_SYS_REG(2, 0, 0, 14, 5),
416 ARM64_SYS_REG(2, 0, 0, 14, 6),
417 ARM64_SYS_REG(2, 0, 0, 14, 7),
418 ARM64_SYS_REG(2, 0, 0, 15, 4),
419 ARM64_SYS_REG(2, 0, 0, 15, 5),
420 ARM64_SYS_REG(2, 0, 0, 15, 6),
421 ARM64_SYS_REG(2, 0, 0, 15, 7),
422 ARM64_SYS_REG(2, 0, 1, 1, 4), /* OSLSR_EL1 */
423 ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */
424 ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */
425 ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */
426 ARM64_SYS_REG(3, 0, 0, 1, 1), /* ID_PFR1_EL1 */
427 ARM64_SYS_REG(3, 0, 0, 1, 2), /* ID_DFR0_EL1 */
428 ARM64_SYS_REG(3, 0, 0, 1, 3), /* ID_AFR0_EL1 */
429 ARM64_SYS_REG(3, 0, 0, 1, 4), /* ID_MMFR0_EL1 */
430 ARM64_SYS_REG(3, 0, 0, 1, 5), /* ID_MMFR1_EL1 */
431 ARM64_SYS_REG(3, 0, 0, 1, 6), /* ID_MMFR2_EL1 */
432 ARM64_SYS_REG(3, 0, 0, 1, 7), /* ID_MMFR3_EL1 */
433 ARM64_SYS_REG(3, 0, 0, 2, 0), /* ID_ISAR0_EL1 */
434 ARM64_SYS_REG(3, 0, 0, 2, 1), /* ID_ISAR1_EL1 */
435 ARM64_SYS_REG(3, 0, 0, 2, 2), /* ID_ISAR2_EL1 */
436 ARM64_SYS_REG(3, 0, 0, 2, 3), /* ID_ISAR3_EL1 */
437 ARM64_SYS_REG(3, 0, 0, 2, 4), /* ID_ISAR4_EL1 */
438 ARM64_SYS_REG(3, 0, 0, 2, 5), /* ID_ISAR5_EL1 */
439 ARM64_SYS_REG(3, 0, 0, 2, 6), /* ID_MMFR4_EL1 */
440 ARM64_SYS_REG(3, 0, 0, 2, 7), /* ID_ISAR6_EL1 */
441 ARM64_SYS_REG(3, 0, 0, 3, 0), /* MVFR0_EL1 */
442 ARM64_SYS_REG(3, 0, 0, 3, 1), /* MVFR1_EL1 */
443 ARM64_SYS_REG(3, 0, 0, 3, 2), /* MVFR2_EL1 */
444 ARM64_SYS_REG(3, 0, 0, 3, 3),
445 ARM64_SYS_REG(3, 0, 0, 3, 4), /* ID_PFR2_EL1 */
446 ARM64_SYS_REG(3, 0, 0, 3, 5), /* ID_DFR1_EL1 */
447 ARM64_SYS_REG(3, 0, 0, 3, 6), /* ID_MMFR5_EL1 */
448 ARM64_SYS_REG(3, 0, 0, 3, 7),
449 ARM64_SYS_REG(3, 0, 0, 4, 0), /* ID_AA64PFR0_EL1 */
450 ARM64_SYS_REG(3, 0, 0, 4, 1), /* ID_AA64PFR1_EL1 */
451 ARM64_SYS_REG(3, 0, 0, 4, 2), /* ID_AA64PFR2_EL1 */
452 ARM64_SYS_REG(3, 0, 0, 4, 3),
453 ARM64_SYS_REG(3, 0, 0, 4, 4), /* ID_AA64ZFR0_EL1 */
454 ARM64_SYS_REG(3, 0, 0, 4, 5), /* ID_AA64SMFR0_EL1 */
455 ARM64_SYS_REG(3, 0, 0, 4, 6),
456 ARM64_SYS_REG(3, 0, 0, 4, 7),
457 ARM64_SYS_REG(3, 0, 0, 5, 0), /* ID_AA64DFR0_EL1 */
458 ARM64_SYS_REG(3, 0, 0, 5, 1), /* ID_AA64DFR1_EL1 */
459 ARM64_SYS_REG(3, 0, 0, 5, 2),
460 ARM64_SYS_REG(3, 0, 0, 5, 3),
461 ARM64_SYS_REG(3, 0, 0, 5, 4), /* ID_AA64AFR0_EL1 */
462 ARM64_SYS_REG(3, 0, 0, 5, 5), /* ID_AA64AFR1_EL1 */
463 ARM64_SYS_REG(3, 0, 0, 5, 6),
464 ARM64_SYS_REG(3, 0, 0, 5, 7),
465 ARM64_SYS_REG(3, 0, 0, 6, 0), /* ID_AA64ISAR0_EL1 */
466 ARM64_SYS_REG(3, 0, 0, 6, 1), /* ID_AA64ISAR1_EL1 */
467 ARM64_SYS_REG(3, 0, 0, 6, 2), /* ID_AA64ISAR2_EL1 */
468 ARM64_SYS_REG(3, 0, 0, 6, 3),
469 ARM64_SYS_REG(3, 0, 0, 6, 4),
470 ARM64_SYS_REG(3, 0, 0, 6, 5),
471 ARM64_SYS_REG(3, 0, 0, 6, 6),
472 ARM64_SYS_REG(3, 0, 0, 6, 7),
473 ARM64_SYS_REG(3, 0, 0, 7, 0), /* ID_AA64MMFR0_EL1 */
474 ARM64_SYS_REG(3, 0, 0, 7, 1), /* ID_AA64MMFR1_EL1 */
475 ARM64_SYS_REG(3, 0, 0, 7, 2), /* ID_AA64MMFR2_EL1 */
476 ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
477 ARM64_SYS_REG(3, 0, 0, 7, 4), /* ID_AA64MMFR4_EL1 */
478 ARM64_SYS_REG(3, 0, 0, 7, 5),
479 ARM64_SYS_REG(3, 0, 0, 7, 6),
480 ARM64_SYS_REG(3, 0, 0, 7, 7),
481 ARM64_SYS_REG(3, 0, 1, 0, 0), /* SCTLR_EL1 */
482 ARM64_SYS_REG(3, 0, 1, 0, 1), /* ACTLR_EL1 */
483 ARM64_SYS_REG(3, 0, 1, 0, 2), /* CPACR_EL1 */
484 KVM_ARM64_SYS_REG(SYS_SCTLR2_EL1),
485 ARM64_SYS_REG(3, 0, 2, 0, 0), /* TTBR0_EL1 */
486 ARM64_SYS_REG(3, 0, 2, 0, 1), /* TTBR1_EL1 */
487 ARM64_SYS_REG(3, 0, 2, 0, 2), /* TCR_EL1 */
488 ARM64_SYS_REG(3, 0, 2, 0, 3), /* TCR2_EL1 */
489 ARM64_SYS_REG(3, 0, 5, 1, 0), /* AFSR0_EL1 */
490 ARM64_SYS_REG(3, 0, 5, 1, 1), /* AFSR1_EL1 */
491 ARM64_SYS_REG(3, 0, 5, 2, 0), /* ESR_EL1 */
492 ARM64_SYS_REG(3, 0, 6, 0, 0), /* FAR_EL1 */
493 ARM64_SYS_REG(3, 0, 7, 4, 0), /* PAR_EL1 */
494 ARM64_SYS_REG(3, 0, 10, 2, 0), /* MAIR_EL1 */
495 ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */
496 ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */
497 ARM64_SYS_REG(3, 0, 10, 2, 4), /* POR_EL1 */
498 ARM64_SYS_REG(3, 0, 10, 3, 0), /* AMAIR_EL1 */
499 ARM64_SYS_REG(3, 0, 12, 0, 0), /* VBAR_EL1 */
500 ARM64_SYS_REG(3, 0, 12, 1, 1), /* DISR_EL1 */
501 ARM64_SYS_REG(3, 0, 13, 0, 1), /* CONTEXTIDR_EL1 */
502 ARM64_SYS_REG(3, 0, 13, 0, 4), /* TPIDR_EL1 */
503 ARM64_SYS_REG(3, 0, 14, 1, 0), /* CNTKCTL_EL1 */
504 ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */
505 ARM64_SYS_REG(3, 3, 10, 2, 4), /* POR_EL0 */
506 ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */
507 ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */
508 ARM64_SYS_REG(3, 3, 14, 0, 1), /* CNTPCT_EL0 */
509 ARM64_SYS_REG(3, 3, 14, 2, 1), /* CNTP_CTL_EL0 */
510 ARM64_SYS_REG(3, 3, 14, 2, 2), /* CNTP_CVAL_EL0 */
511 ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
512 ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
513 ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
514 };
515
516 static __u64 pmu_regs[] = {
517 ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */
518 ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */
519 ARM64_SYS_REG(3, 3, 9, 12, 0), /* PMCR_EL0 */
520 ARM64_SYS_REG(3, 3, 9, 12, 1), /* PMCNTENSET_EL0 */
521 ARM64_SYS_REG(3, 3, 9, 12, 2), /* PMCNTENCLR_EL0 */
522 ARM64_SYS_REG(3, 3, 9, 12, 3), /* PMOVSCLR_EL0 */
523 ARM64_SYS_REG(3, 3, 9, 12, 4), /* PMSWINC_EL0 */
524 ARM64_SYS_REG(3, 3, 9, 12, 5), /* PMSELR_EL0 */
525 ARM64_SYS_REG(3, 3, 9, 13, 0), /* PMCCNTR_EL0 */
526 ARM64_SYS_REG(3, 3, 9, 14, 0), /* PMUSERENR_EL0 */
527 ARM64_SYS_REG(3, 3, 9, 14, 3), /* PMOVSSET_EL0 */
528 ARM64_SYS_REG(3, 3, 14, 8, 0),
529 ARM64_SYS_REG(3, 3, 14, 8, 1),
530 ARM64_SYS_REG(3, 3, 14, 8, 2),
531 ARM64_SYS_REG(3, 3, 14, 8, 3),
532 ARM64_SYS_REG(3, 3, 14, 8, 4),
533 ARM64_SYS_REG(3, 3, 14, 8, 5),
534 ARM64_SYS_REG(3, 3, 14, 8, 6),
535 ARM64_SYS_REG(3, 3, 14, 8, 7),
536 ARM64_SYS_REG(3, 3, 14, 9, 0),
537 ARM64_SYS_REG(3, 3, 14, 9, 1),
538 ARM64_SYS_REG(3, 3, 14, 9, 2),
539 ARM64_SYS_REG(3, 3, 14, 9, 3),
540 ARM64_SYS_REG(3, 3, 14, 9, 4),
541 ARM64_SYS_REG(3, 3, 14, 9, 5),
542 ARM64_SYS_REG(3, 3, 14, 9, 6),
543 ARM64_SYS_REG(3, 3, 14, 9, 7),
544 ARM64_SYS_REG(3, 3, 14, 10, 0),
545 ARM64_SYS_REG(3, 3, 14, 10, 1),
546 ARM64_SYS_REG(3, 3, 14, 10, 2),
547 ARM64_SYS_REG(3, 3, 14, 10, 3),
548 ARM64_SYS_REG(3, 3, 14, 10, 4),
549 ARM64_SYS_REG(3, 3, 14, 10, 5),
550 ARM64_SYS_REG(3, 3, 14, 10, 6),
551 ARM64_SYS_REG(3, 3, 14, 10, 7),
552 ARM64_SYS_REG(3, 3, 14, 11, 0),
553 ARM64_SYS_REG(3, 3, 14, 11, 1),
554 ARM64_SYS_REG(3, 3, 14, 11, 2),
555 ARM64_SYS_REG(3, 3, 14, 11, 3),
556 ARM64_SYS_REG(3, 3, 14, 11, 4),
557 ARM64_SYS_REG(3, 3, 14, 11, 5),
558 ARM64_SYS_REG(3, 3, 14, 11, 6),
559 ARM64_SYS_REG(3, 3, 14, 12, 0),
560 ARM64_SYS_REG(3, 3, 14, 12, 1),
561 ARM64_SYS_REG(3, 3, 14, 12, 2),
562 ARM64_SYS_REG(3, 3, 14, 12, 3),
563 ARM64_SYS_REG(3, 3, 14, 12, 4),
564 ARM64_SYS_REG(3, 3, 14, 12, 5),
565 ARM64_SYS_REG(3, 3, 14, 12, 6),
566 ARM64_SYS_REG(3, 3, 14, 12, 7),
567 ARM64_SYS_REG(3, 3, 14, 13, 0),
568 ARM64_SYS_REG(3, 3, 14, 13, 1),
569 ARM64_SYS_REG(3, 3, 14, 13, 2),
570 ARM64_SYS_REG(3, 3, 14, 13, 3),
571 ARM64_SYS_REG(3, 3, 14, 13, 4),
572 ARM64_SYS_REG(3, 3, 14, 13, 5),
573 ARM64_SYS_REG(3, 3, 14, 13, 6),
574 ARM64_SYS_REG(3, 3, 14, 13, 7),
575 ARM64_SYS_REG(3, 3, 14, 14, 0),
576 ARM64_SYS_REG(3, 3, 14, 14, 1),
577 ARM64_SYS_REG(3, 3, 14, 14, 2),
578 ARM64_SYS_REG(3, 3, 14, 14, 3),
579 ARM64_SYS_REG(3, 3, 14, 14, 4),
580 ARM64_SYS_REG(3, 3, 14, 14, 5),
581 ARM64_SYS_REG(3, 3, 14, 14, 6),
582 ARM64_SYS_REG(3, 3, 14, 14, 7),
583 ARM64_SYS_REG(3, 3, 14, 15, 0),
584 ARM64_SYS_REG(3, 3, 14, 15, 1),
585 ARM64_SYS_REG(3, 3, 14, 15, 2),
586 ARM64_SYS_REG(3, 3, 14, 15, 3),
587 ARM64_SYS_REG(3, 3, 14, 15, 4),
588 ARM64_SYS_REG(3, 3, 14, 15, 5),
589 ARM64_SYS_REG(3, 3, 14, 15, 6),
590 ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */
591 };
592
593 static __u64 vregs[] = {
594 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
595 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
596 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
597 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
598 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
599 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
600 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
601 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
602 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
603 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
604 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
605 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
606 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
607 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
608 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
609 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
610 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
611 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
612 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
613 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
614 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
615 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
616 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
617 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
618 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
619 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
620 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
621 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
622 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
623 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
624 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
625 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
626 };
627
628 static __u64 sve_regs[] = {
629 KVM_REG_ARM64_SVE_VLS,
630 KVM_REG_ARM64_SVE_ZREG(0, 0),
631 KVM_REG_ARM64_SVE_ZREG(1, 0),
632 KVM_REG_ARM64_SVE_ZREG(2, 0),
633 KVM_REG_ARM64_SVE_ZREG(3, 0),
634 KVM_REG_ARM64_SVE_ZREG(4, 0),
635 KVM_REG_ARM64_SVE_ZREG(5, 0),
636 KVM_REG_ARM64_SVE_ZREG(6, 0),
637 KVM_REG_ARM64_SVE_ZREG(7, 0),
638 KVM_REG_ARM64_SVE_ZREG(8, 0),
639 KVM_REG_ARM64_SVE_ZREG(9, 0),
640 KVM_REG_ARM64_SVE_ZREG(10, 0),
641 KVM_REG_ARM64_SVE_ZREG(11, 0),
642 KVM_REG_ARM64_SVE_ZREG(12, 0),
643 KVM_REG_ARM64_SVE_ZREG(13, 0),
644 KVM_REG_ARM64_SVE_ZREG(14, 0),
645 KVM_REG_ARM64_SVE_ZREG(15, 0),
646 KVM_REG_ARM64_SVE_ZREG(16, 0),
647 KVM_REG_ARM64_SVE_ZREG(17, 0),
648 KVM_REG_ARM64_SVE_ZREG(18, 0),
649 KVM_REG_ARM64_SVE_ZREG(19, 0),
650 KVM_REG_ARM64_SVE_ZREG(20, 0),
651 KVM_REG_ARM64_SVE_ZREG(21, 0),
652 KVM_REG_ARM64_SVE_ZREG(22, 0),
653 KVM_REG_ARM64_SVE_ZREG(23, 0),
654 KVM_REG_ARM64_SVE_ZREG(24, 0),
655 KVM_REG_ARM64_SVE_ZREG(25, 0),
656 KVM_REG_ARM64_SVE_ZREG(26, 0),
657 KVM_REG_ARM64_SVE_ZREG(27, 0),
658 KVM_REG_ARM64_SVE_ZREG(28, 0),
659 KVM_REG_ARM64_SVE_ZREG(29, 0),
660 KVM_REG_ARM64_SVE_ZREG(30, 0),
661 KVM_REG_ARM64_SVE_ZREG(31, 0),
662 KVM_REG_ARM64_SVE_PREG(0, 0),
663 KVM_REG_ARM64_SVE_PREG(1, 0),
664 KVM_REG_ARM64_SVE_PREG(2, 0),
665 KVM_REG_ARM64_SVE_PREG(3, 0),
666 KVM_REG_ARM64_SVE_PREG(4, 0),
667 KVM_REG_ARM64_SVE_PREG(5, 0),
668 KVM_REG_ARM64_SVE_PREG(6, 0),
669 KVM_REG_ARM64_SVE_PREG(7, 0),
670 KVM_REG_ARM64_SVE_PREG(8, 0),
671 KVM_REG_ARM64_SVE_PREG(9, 0),
672 KVM_REG_ARM64_SVE_PREG(10, 0),
673 KVM_REG_ARM64_SVE_PREG(11, 0),
674 KVM_REG_ARM64_SVE_PREG(12, 0),
675 KVM_REG_ARM64_SVE_PREG(13, 0),
676 KVM_REG_ARM64_SVE_PREG(14, 0),
677 KVM_REG_ARM64_SVE_PREG(15, 0),
678 KVM_REG_ARM64_SVE_FFR(0),
679 ARM64_SYS_REG(3, 0, 1, 2, 0), /* ZCR_EL1 */
680 };
681
682 static __u64 sve_rejects_set[] = {
683 KVM_REG_ARM64_SVE_VLS,
684 };
685
686 static __u64 pauth_addr_regs[] = {
687 ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */
688 ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */
689 ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */
690 ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */
691 ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */
692 ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */
693 ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */
694 ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */
695 };
696
697 static __u64 pauth_generic_regs[] = {
698 ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */
699 ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */
700 };
701
702 static __u64 el2_regs[] = {
703 SYS_REG(VPIDR_EL2),
704 SYS_REG(VMPIDR_EL2),
705 SYS_REG(SCTLR_EL2),
706 SYS_REG(ACTLR_EL2),
707 SYS_REG(HCR_EL2),
708 SYS_REG(MDCR_EL2),
709 SYS_REG(CPTR_EL2),
710 SYS_REG(HSTR_EL2),
711 SYS_REG(HFGRTR_EL2),
712 SYS_REG(HFGWTR_EL2),
713 SYS_REG(HFGITR_EL2),
714 SYS_REG(HACR_EL2),
715 SYS_REG(ZCR_EL2),
716 SYS_REG(HCRX_EL2),
717 SYS_REG(TTBR0_EL2),
718 SYS_REG(TTBR1_EL2),
719 SYS_REG(TCR_EL2),
720 SYS_REG(TCR2_EL2),
721 SYS_REG(VTTBR_EL2),
722 SYS_REG(VTCR_EL2),
723 SYS_REG(VNCR_EL2),
724 SYS_REG(HDFGRTR2_EL2),
725 SYS_REG(HDFGWTR2_EL2),
726 SYS_REG(HFGRTR2_EL2),
727 SYS_REG(HFGWTR2_EL2),
728 SYS_REG(HDFGRTR_EL2),
729 SYS_REG(HDFGWTR_EL2),
730 SYS_REG(HAFGRTR_EL2),
731 SYS_REG(HFGITR2_EL2),
732 SYS_REG(SPSR_EL2),
733 SYS_REG(ELR_EL2),
734 SYS_REG(AFSR0_EL2),
735 SYS_REG(AFSR1_EL2),
736 SYS_REG(ESR_EL2),
737 SYS_REG(FAR_EL2),
738 SYS_REG(HPFAR_EL2),
739 SYS_REG(MAIR_EL2),
740 SYS_REG(PIRE0_EL2),
741 SYS_REG(PIR_EL2),
742 SYS_REG(POR_EL2),
743 SYS_REG(AMAIR_EL2),
744 SYS_REG(VBAR_EL2),
745 SYS_REG(CONTEXTIDR_EL2),
746 SYS_REG(TPIDR_EL2),
747 SYS_REG(CNTVOFF_EL2),
748 SYS_REG(CNTHCTL_EL2),
749 SYS_REG(CNTHP_CTL_EL2),
750 SYS_REG(CNTHP_CVAL_EL2),
751 SYS_REG(CNTHV_CTL_EL2),
752 SYS_REG(CNTHV_CVAL_EL2),
753 SYS_REG(SP_EL2),
754 SYS_REG(VDISR_EL2),
755 SYS_REG(VSESR_EL2),
756 };
757
758 #define BASE_SUBLIST \
759 { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
760 #define VREGS_SUBLIST \
761 { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
762 #define PMU_SUBLIST \
763 { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
764 .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
765 #define SVE_SUBLIST \
766 { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
767 .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
768 .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
769 #define PAUTH_SUBLIST \
770 { \
771 .name = "pauth_address", \
772 .capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \
773 .feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \
774 .regs = pauth_addr_regs, \
775 .regs_n = ARRAY_SIZE(pauth_addr_regs), \
776 }, \
777 { \
778 .name = "pauth_generic", \
779 .capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \
780 .feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \
781 .regs = pauth_generic_regs, \
782 .regs_n = ARRAY_SIZE(pauth_generic_regs), \
783 }
784 #define EL2_SUBLIST \
785 { \
786 .name = "EL2", \
787 .capability = KVM_CAP_ARM_EL2, \
788 .feature = KVM_ARM_VCPU_HAS_EL2, \
789 .regs = el2_regs, \
790 .regs_n = ARRAY_SIZE(el2_regs), \
791 }
792
793 static struct vcpu_reg_list vregs_config = {
794 .sublists = {
795 BASE_SUBLIST,
796 VREGS_SUBLIST,
797 {0},
798 },
799 };
800 static struct vcpu_reg_list vregs_pmu_config = {
801 .sublists = {
802 BASE_SUBLIST,
803 VREGS_SUBLIST,
804 PMU_SUBLIST,
805 {0},
806 },
807 };
808 static struct vcpu_reg_list sve_config = {
809 .sublists = {
810 BASE_SUBLIST,
811 SVE_SUBLIST,
812 {0},
813 },
814 };
815 static struct vcpu_reg_list sve_pmu_config = {
816 .sublists = {
817 BASE_SUBLIST,
818 SVE_SUBLIST,
819 PMU_SUBLIST,
820 {0},
821 },
822 };
823 static struct vcpu_reg_list pauth_config = {
824 .sublists = {
825 BASE_SUBLIST,
826 VREGS_SUBLIST,
827 PAUTH_SUBLIST,
828 {0},
829 },
830 };
831 static struct vcpu_reg_list pauth_pmu_config = {
832 .sublists = {
833 BASE_SUBLIST,
834 VREGS_SUBLIST,
835 PAUTH_SUBLIST,
836 PMU_SUBLIST,
837 {0},
838 },
839 };
840
841 static struct vcpu_reg_list el2_vregs_config = {
842 .sublists = {
843 BASE_SUBLIST,
844 EL2_SUBLIST,
845 VREGS_SUBLIST,
846 {0},
847 },
848 };
849
850 static struct vcpu_reg_list el2_vregs_pmu_config = {
851 .sublists = {
852 BASE_SUBLIST,
853 EL2_SUBLIST,
854 VREGS_SUBLIST,
855 PMU_SUBLIST,
856 {0},
857 },
858 };
859
860 static struct vcpu_reg_list el2_sve_config = {
861 .sublists = {
862 BASE_SUBLIST,
863 EL2_SUBLIST,
864 SVE_SUBLIST,
865 {0},
866 },
867 };
868
869 static struct vcpu_reg_list el2_sve_pmu_config = {
870 .sublists = {
871 BASE_SUBLIST,
872 EL2_SUBLIST,
873 SVE_SUBLIST,
874 PMU_SUBLIST,
875 {0},
876 },
877 };
878
879 static struct vcpu_reg_list el2_pauth_config = {
880 .sublists = {
881 BASE_SUBLIST,
882 EL2_SUBLIST,
883 VREGS_SUBLIST,
884 PAUTH_SUBLIST,
885 {0},
886 },
887 };
888
889 static struct vcpu_reg_list el2_pauth_pmu_config = {
890 .sublists = {
891 BASE_SUBLIST,
892 EL2_SUBLIST,
893 VREGS_SUBLIST,
894 PAUTH_SUBLIST,
895 PMU_SUBLIST,
896 {0},
897 },
898 };
899
900 struct vcpu_reg_list *vcpu_configs[] = {
901 &vregs_config,
902 &vregs_pmu_config,
903 &sve_config,
904 &sve_pmu_config,
905 &pauth_config,
906 &pauth_pmu_config,
907
908 &el2_vregs_config,
909 &el2_vregs_pmu_config,
910 &el2_sve_config,
911 &el2_sve_pmu_config,
912 &el2_pauth_config,
913 &el2_pauth_pmu_config,
914 };
915 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
916