xref: /qemu/target/arm/tcg/cpu64.c (revision 7698afc42b5af9e55f12ab2236618e38e5a1c23f)
1 /*
2  * QEMU AArch64 TCG CPUs
3  *
4  * Copyright (c) 2013 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "qemu/module.h"
25 #include "qapi/visitor.h"
26 #include "hw/qdev-properties.h"
27 #include "qemu/units.h"
28 #include "internals.h"
29 #include "cpu-features.h"
30 #include "cpregs.h"
31 
aarch64_a35_initfn(Object * obj)32 static void aarch64_a35_initfn(Object *obj)
33 {
34     ARMCPU *cpu = ARM_CPU(obj);
35     ARMISARegisters *isar = &cpu->isar;
36 
37     cpu->dtb_compatible = "arm,cortex-a35";
38     set_feature(&cpu->env, ARM_FEATURE_V8);
39     set_feature(&cpu->env, ARM_FEATURE_NEON);
40     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
41     set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
42     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
43     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
44     set_feature(&cpu->env, ARM_FEATURE_EL2);
45     set_feature(&cpu->env, ARM_FEATURE_EL3);
46     set_feature(&cpu->env, ARM_FEATURE_PMU);
47 
48     /* From B2.2 AArch64 identification registers. */
49     cpu->midr = 0x411fd040;
50     cpu->revidr = 0;
51     cpu->ctr = 0x84448004;
52     SET_IDREG(isar, ID_PFR0, 0x00000131);
53     SET_IDREG(isar, ID_PFR1, 0x00011011);
54     SET_IDREG(isar, ID_DFR0, 0x03010066);
55     cpu->id_afr0 = 0;
56     SET_IDREG(isar, ID_MMFR0, 0x10201105);
57     SET_IDREG(isar, ID_MMFR1, 0x40000000);
58     SET_IDREG(isar, ID_MMFR2, 0x01260000);
59     SET_IDREG(isar, ID_MMFR3, 0x02102211);
60     SET_IDREG(isar, ID_ISAR0, 0x02101110);
61     SET_IDREG(isar, ID_ISAR1, 0x13112111);
62     SET_IDREG(isar, ID_ISAR2, 0x21232042);
63     SET_IDREG(isar, ID_ISAR3, 0x01112131);
64     SET_IDREG(isar, ID_ISAR4, 0x00011142);
65     SET_IDREG(isar, ID_ISAR5, 0x00011121);
66     SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
67     SET_IDREG(isar, ID_AA64PFR1, 0);
68     SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
69     SET_IDREG(isar, ID_AA64DFR1, 0);
70     SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
71     SET_IDREG(isar, ID_AA64ISAR1, 0);
72     SET_IDREG(isar, ID_AA64MMFR0, 0x00101122);
73     SET_IDREG(isar, ID_AA64MMFR1, 0);
74     cpu->clidr = 0x0a200023;
75     cpu->dcz_blocksize = 4;
76 
77     /* From B2.4 AArch64 Virtual Memory control registers */
78     cpu->reset_sctlr = 0x00c50838;
79 
80     /* From B2.10 AArch64 performance monitor registers */
81     cpu->isar.reset_pmcr_el0 = 0x410a3000;
82 
83     /* From B2.29 Cache ID registers */
84     /* 32KB L1 dcache */
85     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
86     /* 32KB L1 icache */
87     cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 2);
88     /* 512KB L2 cache */
89     cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 512 * KiB, 7);
90 
91     /* From B3.5 VGIC Type register */
92     cpu->gic_num_lrs = 4;
93     cpu->gic_vpribits = 5;
94     cpu->gic_vprebits = 5;
95     cpu->gic_pribits = 5;
96 
97     /* From C6.4 Debug ID Register */
98     cpu->isar.dbgdidr = 0x3516d000;
99     /* From C6.5 Debug Device ID Register */
100     cpu->isar.dbgdevid = 0x00110f13;
101     /* From C6.6 Debug Device ID Register 1 */
102     cpu->isar.dbgdevid1 = 0x2;
103 
104     /* From Cortex-A35 SIMD and Floating-point Support r1p0 */
105     /* From 3.2 AArch32 register summary */
106     cpu->reset_fpsid = 0x41034043;
107 
108     /* From 2.2 AArch64 register summary */
109     cpu->isar.mvfr0 = 0x10110222;
110     cpu->isar.mvfr1 = 0x12111111;
111     cpu->isar.mvfr2 = 0x00000043;
112 
113     /* These values are the same with A53/A57/A72. */
114     define_cortex_a72_a57_a53_cp_reginfo(cpu);
115 }
116 
cpu_max_get_sve_max_vq(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)117 static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
118                                    void *opaque, Error **errp)
119 {
120     ARMCPU *cpu = ARM_CPU(obj);
121     uint32_t value;
122 
123     /* All vector lengths are disabled when SVE is off. */
124     if (!cpu_isar_feature(aa64_sve, cpu)) {
125         value = 0;
126     } else {
127         value = cpu->sve_max_vq;
128     }
129     visit_type_uint32(v, name, &value, errp);
130 }
131 
cpu_max_set_sve_max_vq(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)132 static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
133                                    void *opaque, Error **errp)
134 {
135     ARMCPU *cpu = ARM_CPU(obj);
136     uint32_t max_vq;
137 
138     if (!visit_type_uint32(v, name, &max_vq, errp)) {
139         return;
140     }
141 
142     if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
143         error_setg(errp, "unsupported SVE vector length");
144         error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
145                           ARM_MAX_VQ);
146         return;
147     }
148 
149     cpu->sve_max_vq = max_vq;
150 }
151 
cpu_arm_get_rme(Object * obj,Error ** errp)152 static bool cpu_arm_get_rme(Object *obj, Error **errp)
153 {
154     ARMCPU *cpu = ARM_CPU(obj);
155     return cpu_isar_feature(aa64_rme, cpu);
156 }
157 
cpu_arm_set_rme(Object * obj,bool value,Error ** errp)158 static void cpu_arm_set_rme(Object *obj, bool value, Error **errp)
159 {
160     ARMCPU *cpu = ARM_CPU(obj);
161 
162     FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, RME, value);
163 }
164 
cpu_max_set_l0gptsz(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)165 static void cpu_max_set_l0gptsz(Object *obj, Visitor *v, const char *name,
166                                 void *opaque, Error **errp)
167 {
168     ARMCPU *cpu = ARM_CPU(obj);
169     uint32_t value;
170 
171     if (!visit_type_uint32(v, name, &value, errp)) {
172         return;
173     }
174 
175     /* Encode the value for the GPCCR_EL3 field. */
176     switch (value) {
177     case 30:
178     case 34:
179     case 36:
180     case 39:
181         cpu->reset_l0gptsz = value - 30;
182         break;
183     default:
184         error_setg(errp, "invalid value for l0gptsz");
185         error_append_hint(errp, "valid values are 30, 34, 36, 39\n");
186         break;
187     }
188 }
189 
cpu_max_get_l0gptsz(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)190 static void cpu_max_get_l0gptsz(Object *obj, Visitor *v, const char *name,
191                                 void *opaque, Error **errp)
192 {
193     ARMCPU *cpu = ARM_CPU(obj);
194     uint32_t value = cpu->reset_l0gptsz + 30;
195 
196     visit_type_uint32(v, name, &value, errp);
197 }
198 
199 static const Property arm_cpu_lpa2_property =
200     DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true);
201 
aarch64_a55_initfn(Object * obj)202 static void aarch64_a55_initfn(Object *obj)
203 {
204     ARMCPU *cpu = ARM_CPU(obj);
205     ARMISARegisters *isar = &cpu->isar;
206 
207     cpu->dtb_compatible = "arm,cortex-a55";
208     set_feature(&cpu->env, ARM_FEATURE_V8);
209     set_feature(&cpu->env, ARM_FEATURE_NEON);
210     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
211     set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
212     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
213     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
214     set_feature(&cpu->env, ARM_FEATURE_EL2);
215     set_feature(&cpu->env, ARM_FEATURE_EL3);
216     set_feature(&cpu->env, ARM_FEATURE_PMU);
217 
218     /* Ordered by B2.4 AArch64 registers by functional group */
219     cpu->clidr = 0x82000023;
220     cpu->ctr = 0x84448004; /* L1Ip = VIPT */
221     cpu->dcz_blocksize = 4; /* 64 bytes */
222     SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull);
223     SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
224     SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
225     SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull);
226     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
227     SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
228     SET_IDREG(isar, ID_AA64PFR0, 0x0000000010112222ull);
229     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
230     cpu->id_afr0       = 0x00000000;
231     SET_IDREG(isar, ID_DFR0, 0x04010088);
232     SET_IDREG(isar, ID_ISAR0, 0x02101110);
233     SET_IDREG(isar, ID_ISAR1, 0x13112111);
234     SET_IDREG(isar, ID_ISAR2, 0x21232042);
235     SET_IDREG(isar, ID_ISAR3, 0x01112131);
236     SET_IDREG(isar, ID_ISAR4, 0x00011142);
237     SET_IDREG(isar, ID_ISAR5, 0x01011121);
238     SET_IDREG(isar, ID_ISAR6, 0x00000010);
239     SET_IDREG(isar, ID_MMFR0, 0x10201105);
240     SET_IDREG(isar, ID_MMFR1, 0x40000000);
241     SET_IDREG(isar, ID_MMFR2, 0x01260000);
242     SET_IDREG(isar, ID_MMFR3, 0x02122211);
243     SET_IDREG(isar, ID_MMFR4, 0x00021110);
244     SET_IDREG(isar, ID_PFR0, 0x10010131);
245     SET_IDREG(isar, ID_PFR1, 0x00011011);
246     SET_IDREG(isar, ID_PFR2, 0x00000011);
247     cpu->midr = 0x412FD050;          /* r2p0 */
248     cpu->revidr = 0;
249 
250     /* From B2.23 CCSIDR_EL1 */
251     /* 32KB L1 dcache */
252     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
253     /* 32KB L1 icache */
254     cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 2);
255     /* 512KB L2 cache */
256     cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 512 * KiB, 7);
257 
258     /* From B2.96 SCTLR_EL3 */
259     cpu->reset_sctlr = 0x30c50838;
260 
261     /* From B4.45 ICH_VTR_EL2 */
262     cpu->gic_num_lrs = 4;
263     cpu->gic_vpribits = 5;
264     cpu->gic_vprebits = 5;
265     cpu->gic_pribits = 5;
266 
267     cpu->isar.mvfr0 = 0x10110222;
268     cpu->isar.mvfr1 = 0x13211111;
269     cpu->isar.mvfr2 = 0x00000043;
270 
271     /* From D5.4 AArch64 PMU register summary */
272     cpu->isar.reset_pmcr_el0 = 0x410b3000;
273 }
274 
aarch64_a72_initfn(Object * obj)275 static void aarch64_a72_initfn(Object *obj)
276 {
277     ARMCPU *cpu = ARM_CPU(obj);
278     ARMISARegisters *isar = &cpu->isar;
279 
280     cpu->dtb_compatible = "arm,cortex-a72";
281     set_feature(&cpu->env, ARM_FEATURE_V8);
282     set_feature(&cpu->env, ARM_FEATURE_NEON);
283     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
284     set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
285     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
286     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
287     set_feature(&cpu->env, ARM_FEATURE_EL2);
288     set_feature(&cpu->env, ARM_FEATURE_EL3);
289     set_feature(&cpu->env, ARM_FEATURE_PMU);
290     cpu->midr = 0x410fd083;
291     cpu->revidr = 0x00000000;
292     cpu->reset_fpsid = 0x41034080;
293     cpu->isar.mvfr0 = 0x10110222;
294     cpu->isar.mvfr1 = 0x12111111;
295     cpu->isar.mvfr2 = 0x00000043;
296     cpu->ctr = 0x8444c004;
297     cpu->reset_sctlr = 0x00c50838;
298     SET_IDREG(isar, ID_PFR0, 0x00000131);
299     SET_IDREG(isar, ID_PFR1, 0x00011011);
300     SET_IDREG(isar, ID_DFR0, 0x03010066);
301     cpu->id_afr0 = 0x00000000;
302     SET_IDREG(isar, ID_MMFR0, 0x10201105);
303     SET_IDREG(isar, ID_MMFR1, 0x40000000);
304     SET_IDREG(isar, ID_MMFR2, 0x01260000);
305     SET_IDREG(isar, ID_MMFR3, 0x02102211);
306     SET_IDREG(isar, ID_ISAR0, 0x02101110);
307     SET_IDREG(isar, ID_ISAR1, 0x13112111);
308     SET_IDREG(isar, ID_ISAR2, 0x21232042);
309     SET_IDREG(isar, ID_ISAR3, 0x01112131);
310     SET_IDREG(isar, ID_ISAR4, 0x00011142);
311     SET_IDREG(isar, ID_ISAR5, 0x00011121);
312     SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
313     SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
314     SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
315     SET_IDREG(isar, ID_AA64MMFR0, 0x00001124);
316     cpu->isar.dbgdidr = 0x3516d000;
317     cpu->isar.dbgdevid = 0x01110f13;
318     cpu->isar.dbgdevid1 = 0x2;
319     cpu->isar.reset_pmcr_el0 = 0x41023000;
320     cpu->clidr = 0x0a200023;
321     /* 32KB L1 dcache */
322     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
323     /* 48KB L1 dcache */
324     cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 3, 64, 48 * KiB, 2);
325     /* 1MB L2 cache */
326     cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 1 * MiB, 7);
327     cpu->dcz_blocksize = 4; /* 64 bytes */
328     cpu->gic_num_lrs = 4;
329     cpu->gic_vpribits = 5;
330     cpu->gic_vprebits = 5;
331     cpu->gic_pribits = 5;
332     define_cortex_a72_a57_a53_cp_reginfo(cpu);
333 }
334 
aarch64_a76_initfn(Object * obj)335 static void aarch64_a76_initfn(Object *obj)
336 {
337     ARMCPU *cpu = ARM_CPU(obj);
338     ARMISARegisters *isar = &cpu->isar;
339 
340     cpu->dtb_compatible = "arm,cortex-a76";
341     set_feature(&cpu->env, ARM_FEATURE_V8);
342     set_feature(&cpu->env, ARM_FEATURE_NEON);
343     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
344     set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
345     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
346     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
347     set_feature(&cpu->env, ARM_FEATURE_EL2);
348     set_feature(&cpu->env, ARM_FEATURE_EL3);
349     set_feature(&cpu->env, ARM_FEATURE_PMU);
350 
351     /* Ordered by B2.4 AArch64 registers by functional group */
352     cpu->clidr = 0x82000023;
353     cpu->ctr = 0x8444C004;
354     cpu->dcz_blocksize = 4;
355     SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull),
356     SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
357     SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
358     SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull);
359     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
360     SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
361     SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
362     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
363     cpu->id_afr0       = 0x00000000;
364     SET_IDREG(isar, ID_DFR0, 0x04010088);
365     SET_IDREG(isar, ID_ISAR0, 0x02101110);
366     SET_IDREG(isar, ID_ISAR1, 0x13112111);
367     SET_IDREG(isar, ID_ISAR2, 0x21232042);
368     SET_IDREG(isar, ID_ISAR3, 0x01112131);
369     SET_IDREG(isar, ID_ISAR4, 0x00010142);
370     SET_IDREG(isar, ID_ISAR5, 0x01011121);
371     SET_IDREG(isar, ID_ISAR6, 0x00000010);
372     SET_IDREG(isar, ID_MMFR0, 0x10201105);
373     SET_IDREG(isar, ID_MMFR1, 0x40000000);
374     SET_IDREG(isar, ID_MMFR2, 0x01260000);
375     SET_IDREG(isar, ID_MMFR3, 0x02122211);
376     SET_IDREG(isar, ID_MMFR4, 0x00021110);
377     SET_IDREG(isar, ID_PFR0, 0x10010131);
378     SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
379     SET_IDREG(isar, ID_PFR2, 0x00000011);
380     cpu->midr = 0x414fd0b1;          /* r4p1 */
381     cpu->revidr = 0;
382 
383     /* From B2.18 CCSIDR_EL1 */
384     /* 64KB L1 dcache */
385     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 7);
386     /* 64KB L1 icache */
387     cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 2);
388     /* 512KB L2 cache */
389     cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 8, 64, 512 * KiB, 7);
390 
391     /* From B2.93 SCTLR_EL3 */
392     cpu->reset_sctlr = 0x30c50838;
393 
394     /* From B4.23 ICH_VTR_EL2 */
395     cpu->gic_num_lrs = 4;
396     cpu->gic_vpribits = 5;
397     cpu->gic_vprebits = 5;
398     cpu->gic_pribits = 5;
399 
400     /* From B5.1 AdvSIMD AArch64 register summary */
401     cpu->isar.mvfr0 = 0x10110222;
402     cpu->isar.mvfr1 = 0x13211111;
403     cpu->isar.mvfr2 = 0x00000043;
404 
405     /* From D5.1 AArch64 PMU register summary */
406     cpu->isar.reset_pmcr_el0 = 0x410b3000;
407 }
408 
aarch64_a64fx_initfn(Object * obj)409 static void aarch64_a64fx_initfn(Object *obj)
410 {
411     ARMCPU *cpu = ARM_CPU(obj);
412     ARMISARegisters *isar = &cpu->isar;
413 
414     cpu->dtb_compatible = "arm,a64fx";
415     set_feature(&cpu->env, ARM_FEATURE_V8);
416     set_feature(&cpu->env, ARM_FEATURE_NEON);
417     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
418     set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
419     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
420     set_feature(&cpu->env, ARM_FEATURE_EL2);
421     set_feature(&cpu->env, ARM_FEATURE_EL3);
422     set_feature(&cpu->env, ARM_FEATURE_PMU);
423     cpu->midr = 0x461f0010;
424     cpu->revidr = 0x00000000;
425     cpu->ctr = 0x86668006;
426     cpu->reset_sctlr = 0x30000180;
427     SET_IDREG(isar, ID_AA64PFR0, 0x0000000101111111); /* No RAS Extensions */
428     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000000);
429     SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408),
430     SET_IDREG(isar, ID_AA64DFR1, 0x0000000000000000),
431     cpu->id_aa64afr0 = 0x0000000000000000;
432     cpu->id_aa64afr1 = 0x0000000000000000;
433     SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000001122);
434     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000011212100);
435     SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011);
436     SET_IDREG(isar, ID_AA64ISAR0, 0x0000000010211120);
437     SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000010001);
438     SET_IDREG(isar, ID_AA64ZFR0, 0x0000000000000000);
439     cpu->clidr = 0x0000000080000023;
440     /* 64KB L1 dcache */
441     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 7);
442     /* 64KB L1 icache */
443     cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 2);
444     /* 8MB L2 cache */
445     cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 256, 8 * MiB, 7);
446     cpu->dcz_blocksize = 6; /* 256 bytes */
447     cpu->gic_num_lrs = 4;
448     cpu->gic_vpribits = 5;
449     cpu->gic_vprebits = 5;
450     cpu->gic_pribits = 5;
451 
452     /* The A64FX supports only 128, 256 and 512 bit vector lengths */
453     aarch64_add_sve_properties(obj);
454     cpu->sve_vq.supported = (1 << 0)  /* 128bit */
455                           | (1 << 1)  /* 256bit */
456                           | (1 << 3); /* 512bit */
457 
458     cpu->isar.reset_pmcr_el0 = 0x46014040;
459 
460     /* TODO:  Add A64FX specific HPC extension registers */
461 }
462 
access_actlr_w(CPUARMState * env,const ARMCPRegInfo * r,bool read)463 static CPAccessResult access_actlr_w(CPUARMState *env, const ARMCPRegInfo *r,
464                                      bool read)
465 {
466     if (!read) {
467         int el = arm_current_el(env);
468 
469         /* Because ACTLR_EL2 is constant 0, writes below EL2 trap to EL2. */
470         if (el < 2 && arm_is_el2_enabled(env)) {
471             return CP_ACCESS_TRAP_EL2;
472         }
473         /* Because ACTLR_EL3 is constant 0, writes below EL3 trap to EL3. */
474         if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
475             return CP_ACCESS_TRAP_EL3;
476         }
477     }
478     return CP_ACCESS_OK;
479 }
480 
481 static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
482     { .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64,
483       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0,
484       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
485       /* Traps and enables are the same as for TCR_EL1. */
486       .accessfn = access_tvm_trvm, .fgt = FGT_TCR_EL1, },
487     { .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64,
488       .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0,
489       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
490     { .name = "ATCR_EL3", .state = ARM_CP_STATE_AA64,
491       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 7, .opc2 = 0,
492       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
493     { .name = "ATCR_EL12", .state = ARM_CP_STATE_AA64,
494       .opc0 = 3, .opc1 = 5, .crn = 15, .crm = 7, .opc2 = 0,
495       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
496     { .name = "AVTCR_EL2", .state = ARM_CP_STATE_AA64,
497       .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 1,
498       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
499     { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
500       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0,
501       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
502       .accessfn = access_actlr_w },
503     { .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64,
504       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1,
505       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
506       .accessfn = access_actlr_w },
507     { .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64,
508       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2,
509       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
510       .accessfn = access_actlr_w },
511     /*
512      * Report CPUCFR_EL1.SCU as 1, as we do not implement the DSU
513      * (and in particular its system registers).
514      */
515     { .name = "CPUCFR_EL1", .state = ARM_CP_STATE_AA64,
516       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 0,
517       .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 4 },
518     { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
519       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4,
520       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010,
521       .accessfn = access_actlr_w },
522     { .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64,
523       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1,
524       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
525     { .name = "CPUPMR_EL3", .state = ARM_CP_STATE_AA64,
526       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 3,
527       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
528     { .name = "CPUPOR_EL3", .state = ARM_CP_STATE_AA64,
529       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 2,
530       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
531     { .name = "CPUPSELR_EL3", .state = ARM_CP_STATE_AA64,
532       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 0,
533       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
534     { .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64,
535       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7,
536       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
537       .accessfn = access_actlr_w },
538     { .name = "ERXPFGCDN_EL1", .state = ARM_CP_STATE_AA64,
539       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 2,
540       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
541       .accessfn = access_actlr_w },
542     { .name = "ERXPFGCTL_EL1", .state = ARM_CP_STATE_AA64,
543       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 1,
544       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
545       .accessfn = access_actlr_w },
546     { .name = "ERXPFGF_EL1", .state = ARM_CP_STATE_AA64,
547       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 0,
548       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
549       .accessfn = access_actlr_w },
550 };
551 
define_neoverse_n1_cp_reginfo(ARMCPU * cpu)552 static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu)
553 {
554     define_arm_cp_regs(cpu, neoverse_n1_cp_reginfo);
555 }
556 
557 static const ARMCPRegInfo neoverse_v1_cp_reginfo[] = {
558     { .name = "CPUECTLR2_EL1", .state = ARM_CP_STATE_AA64,
559       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 5,
560       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
561       .accessfn = access_actlr_w },
562     { .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
563       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 0,
564       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
565     { .name = "CPUPPMCR2_EL3", .state = ARM_CP_STATE_AA64,
566       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 1,
567       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
568     { .name = "CPUPPMCR3_EL3", .state = ARM_CP_STATE_AA64,
569       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 6,
570       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
571 };
572 
define_neoverse_v1_cp_reginfo(ARMCPU * cpu)573 static void define_neoverse_v1_cp_reginfo(ARMCPU *cpu)
574 {
575     /*
576      * The Neoverse V1 has all of the Neoverse N1's IMPDEF
577      * registers and a few more of its own.
578      */
579     define_arm_cp_regs(cpu, neoverse_n1_cp_reginfo);
580     define_arm_cp_regs(cpu, neoverse_v1_cp_reginfo);
581 }
582 
aarch64_neoverse_n1_initfn(Object * obj)583 static void aarch64_neoverse_n1_initfn(Object *obj)
584 {
585     ARMCPU *cpu = ARM_CPU(obj);
586     ARMISARegisters *isar = &cpu->isar;
587 
588     cpu->dtb_compatible = "arm,neoverse-n1";
589     set_feature(&cpu->env, ARM_FEATURE_V8);
590     set_feature(&cpu->env, ARM_FEATURE_NEON);
591     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
592     set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
593     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
594     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
595     set_feature(&cpu->env, ARM_FEATURE_EL2);
596     set_feature(&cpu->env, ARM_FEATURE_EL3);
597     set_feature(&cpu->env, ARM_FEATURE_PMU);
598 
599     /* Ordered by B2.4 AArch64 registers by functional group */
600     cpu->clidr = 0x82000023;
601     cpu->ctr = 0x8444c004;
602     cpu->dcz_blocksize = 4;
603     SET_IDREG(isar, ID_AA64DFR0, 0x0000000110305408ull);
604     SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
605     SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
606     SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull);
607     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
608     SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
609     SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
610     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
611     cpu->id_afr0       = 0x00000000;
612     SET_IDREG(isar, ID_DFR0, 0x04010088);
613     SET_IDREG(isar, ID_ISAR0, 0x02101110);
614     SET_IDREG(isar, ID_ISAR1, 0x13112111);
615     SET_IDREG(isar, ID_ISAR2, 0x21232042);
616     SET_IDREG(isar, ID_ISAR3, 0x01112131);
617     SET_IDREG(isar, ID_ISAR4, 0x00010142);
618     SET_IDREG(isar, ID_ISAR5, 0x01011121);
619     SET_IDREG(isar, ID_ISAR6, 0x00000010);
620     SET_IDREG(isar, ID_MMFR0, 0x10201105);
621     SET_IDREG(isar, ID_MMFR1, 0x40000000);
622     SET_IDREG(isar, ID_MMFR2, 0x01260000);
623     SET_IDREG(isar, ID_MMFR3, 0x02122211);
624     SET_IDREG(isar, ID_MMFR4, 0x00021110);
625     SET_IDREG(isar, ID_PFR0, 0x10010131);
626     SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
627     SET_IDREG(isar, ID_PFR2, 0x00000011);
628     cpu->midr = 0x414fd0c1;          /* r4p1 */
629     cpu->revidr = 0;
630 
631     /* From B2.23 CCSIDR_EL1 */
632     /* 64KB L1 dcache */
633     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 7);
634     /* 64KB L1 icache */
635     cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 2);
636     /* 1MB L2 dcache */
637     cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 8, 64, 1 * MiB, 7);
638 
639     /* From B2.98 SCTLR_EL3 */
640     cpu->reset_sctlr = 0x30c50838;
641 
642     /* From B4.23 ICH_VTR_EL2 */
643     cpu->gic_num_lrs = 4;
644     cpu->gic_vpribits = 5;
645     cpu->gic_vprebits = 5;
646     cpu->gic_pribits = 5;
647 
648     /* From B5.1 AdvSIMD AArch64 register summary */
649     cpu->isar.mvfr0 = 0x10110222;
650     cpu->isar.mvfr1 = 0x13211111;
651     cpu->isar.mvfr2 = 0x00000043;
652 
653     /* From D5.1 AArch64 PMU register summary */
654     cpu->isar.reset_pmcr_el0 = 0x410c3000;
655 
656     define_neoverse_n1_cp_reginfo(cpu);
657 }
658 
aarch64_neoverse_v1_initfn(Object * obj)659 static void aarch64_neoverse_v1_initfn(Object *obj)
660 {
661     ARMCPU *cpu = ARM_CPU(obj);
662     ARMISARegisters *isar = &cpu->isar;
663 
664     cpu->dtb_compatible = "arm,neoverse-v1";
665     set_feature(&cpu->env, ARM_FEATURE_V8);
666     set_feature(&cpu->env, ARM_FEATURE_NEON);
667     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
668     set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
669     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
670     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
671     set_feature(&cpu->env, ARM_FEATURE_EL2);
672     set_feature(&cpu->env, ARM_FEATURE_EL3);
673     set_feature(&cpu->env, ARM_FEATURE_PMU);
674 
675     /* Ordered by 3.2.4 AArch64 registers by functional group */
676     cpu->clidr = 0x82000023;
677     cpu->ctr = 0xb444c004; /* With DIC and IDC set */
678     cpu->dcz_blocksize = 4;
679     cpu->id_aa64afr0 = 0x00000000;
680     cpu->id_aa64afr1 = 0x00000000;
681     SET_IDREG(isar, ID_AA64DFR0, 0x000001f210305519ull),
682     SET_IDREG(isar, ID_AA64DFR1, 0x00000000),
683     SET_IDREG(isar, ID_AA64ISAR0, 0x1011111110212120ull); /* with FEAT_RNG */
684     SET_IDREG(isar, ID_AA64ISAR1, 0x0011000001211032ull);
685     SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull);
686     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull),
687     SET_IDREG(isar, ID_AA64MMFR2, 0x0220011102101011ull),
688     SET_IDREG(isar, ID_AA64PFR0, 0x1101110120111112ull); /* GIC filled in later */
689     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
690     cpu->id_afr0       = 0x00000000;
691     SET_IDREG(isar, ID_DFR0, 0x15011099);
692     SET_IDREG(isar, ID_ISAR0, 0x02101110);
693     SET_IDREG(isar, ID_ISAR1, 0x13112111);
694     SET_IDREG(isar, ID_ISAR2, 0x21232042);
695     SET_IDREG(isar, ID_ISAR3, 0x01112131);
696     SET_IDREG(isar, ID_ISAR4, 0x00010142);
697     SET_IDREG(isar, ID_ISAR5, 0x11011121);
698     SET_IDREG(isar, ID_ISAR6, 0x01100111);
699     SET_IDREG(isar, ID_MMFR0, 0x10201105);
700     SET_IDREG(isar, ID_MMFR1, 0x40000000);
701     SET_IDREG(isar, ID_MMFR2, 0x01260000);
702     SET_IDREG(isar, ID_MMFR3, 0x02122211);
703     SET_IDREG(isar, ID_MMFR4, 0x01021110);
704     SET_IDREG(isar, ID_PFR0, 0x21110131);
705     SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
706     SET_IDREG(isar, ID_PFR2, 0x00000011);
707     cpu->midr = 0x411FD402;          /* r1p2 */
708     cpu->revidr = 0;
709 
710     /*
711      * The Neoverse-V1 r1p2 TRM lists 32-bit format CCSIDR_EL1 values,
712      * but also says it implements CCIDX, which means they should be
713      * 64-bit format. So we here use values which are based on the textual
714      * information in chapter 2 of the TRM:
715      *
716      * L1: 4-way set associative 64-byte line size, total size 64K.
717      * L2: 8-way set associative, 64 byte line size, either 512K or 1MB.
718      * L3: No L3 (this matches the CLIDR_EL1 value).
719      */
720     /* 64KB L1 dcache */
721     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 4, 64, 64 * KiB, 0);
722     /* 64KB L1 icache */
723     cpu->ccsidr[1] = cpu->ccsidr[0];
724     /* 1MB L2 cache */
725     cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 8, 64, 1 * MiB, 0);
726 
727     /* From 3.2.115 SCTLR_EL3 */
728     cpu->reset_sctlr = 0x30c50838;
729 
730     /* From 3.4.8 ICC_CTLR_EL3 and 3.4.23 ICH_VTR_EL2 */
731     cpu->gic_num_lrs = 4;
732     cpu->gic_vpribits = 5;
733     cpu->gic_vprebits = 5;
734     cpu->gic_pribits = 5;
735 
736     /* From 3.5.1 AdvSIMD AArch64 register summary */
737     cpu->isar.mvfr0 = 0x10110222;
738     cpu->isar.mvfr1 = 0x13211111;
739     cpu->isar.mvfr2 = 0x00000043;
740 
741     /* From 3.7.5 ID_AA64ZFR0_EL1 */
742     SET_IDREG(isar, ID_AA64ZFR0, 0x0000100000100000);
743     cpu->sve_vq.supported = (1 << 0)  /* 128bit */
744                             | (1 << 1);  /* 256bit */
745 
746     /* From 5.5.1 AArch64 PMU register summary */
747     cpu->isar.reset_pmcr_el0 = 0x41213000;
748 
749     define_neoverse_v1_cp_reginfo(cpu);
750 
751     aarch64_add_pauth_properties(obj);
752     aarch64_add_sve_properties(obj);
753 }
754 
755 static const ARMCPRegInfo cortex_a710_cp_reginfo[] = {
756     { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
757       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0,
758       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
759       .accessfn = access_actlr_w },
760     { .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64,
761       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1,
762       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
763       .accessfn = access_actlr_w },
764     { .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64,
765       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2,
766       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
767       .accessfn = access_actlr_w },
768     { .name = "CPUACTLR4_EL1", .state = ARM_CP_STATE_AA64,
769       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 3,
770       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
771       .accessfn = access_actlr_w },
772     { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
773       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4,
774       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
775       .accessfn = access_actlr_w },
776     { .name = "CPUECTLR2_EL1", .state = ARM_CP_STATE_AA64,
777       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 5,
778       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
779       .accessfn = access_actlr_w },
780     { .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
781       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 4,
782       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
783     { .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64,
784       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7,
785       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
786       .accessfn = access_actlr_w },
787     { .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64,
788       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0,
789       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
790     { .name = "CPUACTLR5_EL1", .state = ARM_CP_STATE_AA64,
791       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 8, .opc2 = 0,
792       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
793       .accessfn = access_actlr_w },
794     { .name = "CPUACTLR6_EL1", .state = ARM_CP_STATE_AA64,
795       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 8, .opc2 = 1,
796       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
797       .accessfn = access_actlr_w },
798     { .name = "CPUACTLR7_EL1", .state = ARM_CP_STATE_AA64,
799       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 8, .opc2 = 2,
800       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
801       .accessfn = access_actlr_w },
802     { .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64,
803       .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0,
804       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
805     { .name = "AVTCR_EL2", .state = ARM_CP_STATE_AA64,
806       .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 1,
807       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
808     { .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
809       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 0,
810       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
811     { .name = "CPUPPMCR2_EL3", .state = ARM_CP_STATE_AA64,
812       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 1,
813       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
814     { .name = "CPUPPMCR4_EL3", .state = ARM_CP_STATE_AA64,
815       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 4,
816       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
817     { .name = "CPUPPMCR5_EL3", .state = ARM_CP_STATE_AA64,
818       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 5,
819       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
820     { .name = "CPUPPMCR6_EL3", .state = ARM_CP_STATE_AA64,
821       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 6,
822       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
823     { .name = "CPUACTLR_EL3", .state = ARM_CP_STATE_AA64,
824       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 4, .opc2 = 0,
825       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
826     { .name = "ATCR_EL3", .state = ARM_CP_STATE_AA64,
827       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 7, .opc2 = 0,
828       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
829     { .name = "CPUPSELR_EL3", .state = ARM_CP_STATE_AA64,
830       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 0,
831       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
832     { .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64,
833       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1,
834       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
835     { .name = "CPUPOR_EL3", .state = ARM_CP_STATE_AA64,
836       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 2,
837       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
838     { .name = "CPUPMR_EL3", .state = ARM_CP_STATE_AA64,
839       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 3,
840       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
841     { .name = "CPUPOR2_EL3", .state = ARM_CP_STATE_AA64,
842       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 4,
843       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
844     { .name = "CPUPMR2_EL3", .state = ARM_CP_STATE_AA64,
845       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 5,
846       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
847     { .name = "CPUPFR_EL3", .state = ARM_CP_STATE_AA64,
848       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 6,
849       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
850     /*
851      * Report CPUCFR_EL1.SCU as 1, as we do not implement the DSU
852      * (and in particular its system registers).
853      */
854     { .name = "CPUCFR_EL1", .state = ARM_CP_STATE_AA64,
855       .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 0,
856       .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 4 },
857 
858     /*
859      * Stub RAMINDEX, as we don't actually implement caches, BTB,
860      * or anything else with cpu internal memory.
861      * "Read" zeros into the IDATA* and DDATA* output registers.
862      */
863     { .name = "RAMINDEX_EL3", .state = ARM_CP_STATE_AA64,
864       .opc0 = 1, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 0,
865       .access = PL3_W, .type = ARM_CP_CONST, .resetvalue = 0 },
866     { .name = "IDATA0_EL3", .state = ARM_CP_STATE_AA64,
867       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 0,
868       .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
869     { .name = "IDATA1_EL3", .state = ARM_CP_STATE_AA64,
870       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 1,
871       .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
872     { .name = "IDATA2_EL3", .state = ARM_CP_STATE_AA64,
873       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 2,
874       .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
875     { .name = "DDATA0_EL3", .state = ARM_CP_STATE_AA64,
876       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 1, .opc2 = 0,
877       .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
878     { .name = "DDATA1_EL3", .state = ARM_CP_STATE_AA64,
879       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 1, .opc2 = 1,
880       .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
881     { .name = "DDATA2_EL3", .state = ARM_CP_STATE_AA64,
882       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 1, .opc2 = 2,
883       .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
884 };
885 
aarch64_a710_initfn(Object * obj)886 static void aarch64_a710_initfn(Object *obj)
887 {
888     ARMCPU *cpu = ARM_CPU(obj);
889     ARMISARegisters *isar = &cpu->isar;
890 
891     cpu->dtb_compatible = "arm,cortex-a710";
892     set_feature(&cpu->env, ARM_FEATURE_V8);
893     set_feature(&cpu->env, ARM_FEATURE_NEON);
894     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
895     set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
896     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
897     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
898     set_feature(&cpu->env, ARM_FEATURE_EL2);
899     set_feature(&cpu->env, ARM_FEATURE_EL3);
900     set_feature(&cpu->env, ARM_FEATURE_PMU);
901 
902     /* Ordered by Section B.4: AArch64 registers */
903     cpu->midr          = 0x412FD471; /* r2p1 */
904     cpu->revidr        = 0;
905     SET_IDREG(isar, ID_PFR0, 0x21110131);
906     SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
907     SET_IDREG(isar, ID_DFR0, 0x16011099);
908     cpu->id_afr0       = 0;
909     SET_IDREG(isar, ID_MMFR0, 0x10201105);
910     SET_IDREG(isar, ID_MMFR1, 0x40000000);
911     SET_IDREG(isar, ID_MMFR2, 0x01260000);
912     SET_IDREG(isar, ID_MMFR3, 0x02122211);
913     SET_IDREG(isar, ID_ISAR0, 0x02101110);
914     SET_IDREG(isar, ID_ISAR1, 0x13112111);
915     SET_IDREG(isar, ID_ISAR2, 0x21232042);
916     SET_IDREG(isar, ID_ISAR3, 0x01112131);
917     SET_IDREG(isar, ID_ISAR4, 0x00010142);
918     SET_IDREG(isar, ID_ISAR5, 0x11011121); /* with Crypto */
919     SET_IDREG(isar, ID_MMFR4, 0x21021110);
920     SET_IDREG(isar, ID_ISAR6, 0x01111111);
921     cpu->isar.mvfr0    = 0x10110222;
922     cpu->isar.mvfr1    = 0x13211111;
923     cpu->isar.mvfr2    = 0x00000043;
924     SET_IDREG(isar, ID_PFR2, 0x00000011);
925     SET_IDREG(isar, ID_AA64PFR0, 0x1201111120111112ull); /* GIC filled in later */
926     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000221ull);
927     SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
928     SET_IDREG(isar, ID_AA64DFR0, 0x000011f010305619ull);
929     SET_IDREG(isar, ID_AA64DFR1, 0);
930     cpu->id_aa64afr0       = 0;
931     cpu->id_aa64afr1       = 0;
932     SET_IDREG(isar, ID_AA64ISAR0, 0x0221111110212120ull); /* with Crypto */
933     SET_IDREG(isar, ID_AA64ISAR1, 0x0010111101211052ull);
934     SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101122ull);
935     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
936     SET_IDREG(isar, ID_AA64MMFR2, 0x1221011110101011ull);
937     cpu->clidr             = 0x0000001482000023ull;
938     cpu->gm_blocksize      = 4;
939     cpu->ctr               = 0x000000049444c004ull;
940     cpu->dcz_blocksize     = 4;
941     /* TODO FEAT_MPAM: mpamidr_el1 = 0x0000_0001_0006_003f */
942 
943     /* Section B.5.2: PMCR_EL0 */
944     cpu->isar.reset_pmcr_el0 = 0xa000;  /* with 20 counters */
945 
946     /* Section B.6.7: ICH_VTR_EL2 */
947     cpu->gic_num_lrs = 4;
948     cpu->gic_vpribits = 5;
949     cpu->gic_vprebits = 5;
950     cpu->gic_pribits = 5;
951 
952     /* Section 14: Scalable Vector Extensions support */
953     cpu->sve_vq.supported = 1 << 0;  /* 128bit */
954 
955     /*
956      * The cortex-a710 TRM does not list CCSIDR values.  The layout of
957      * the caches are in text in Table 7-1, Table 8-1, and Table 9-1.
958      *
959      * L1: 4-way set associative 64-byte line size, total either 32K or 64K.
960      * L2: 8-way set associative 64 byte line size, total either 256K or 512K.
961      */
962     /* L1 dcache */
963     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 4, 64, 64 * KiB, 0);
964     /* L1 icache */
965     cpu->ccsidr[1] = cpu->ccsidr[0];
966     /* L2 cache */
967     cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 8, 64, 512 * KiB, 0);
968 
969     /* FIXME: Not documented -- copied from neoverse-v1 */
970     cpu->reset_sctlr = 0x30c50838;
971 
972     define_arm_cp_regs(cpu, cortex_a710_cp_reginfo);
973 
974     aarch64_add_pauth_properties(obj);
975     aarch64_add_sve_properties(obj);
976 }
977 
978 /* Extra IMPDEF regs in the N2 beyond those in the A710 */
979 static const ARMCPRegInfo neoverse_n2_cp_reginfo[] = {
980     { .name = "CPURNDBR_EL3", .state = ARM_CP_STATE_AA64,
981       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 3, .opc2 = 0,
982       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
983     { .name = "CPURNDPEID_EL3", .state = ARM_CP_STATE_AA64,
984       .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 3, .opc2 = 1,
985       .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
986 };
987 
aarch64_neoverse_n2_initfn(Object * obj)988 static void aarch64_neoverse_n2_initfn(Object *obj)
989 {
990     ARMCPU *cpu = ARM_CPU(obj);
991     ARMISARegisters *isar = &cpu->isar;
992 
993     cpu->dtb_compatible = "arm,neoverse-n2";
994     set_feature(&cpu->env, ARM_FEATURE_V8);
995     set_feature(&cpu->env, ARM_FEATURE_NEON);
996     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
997     set_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
998     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
999     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1000     set_feature(&cpu->env, ARM_FEATURE_EL2);
1001     set_feature(&cpu->env, ARM_FEATURE_EL3);
1002     set_feature(&cpu->env, ARM_FEATURE_PMU);
1003 
1004     /* Ordered by Section B.5: AArch64 ID registers */
1005     cpu->midr          = 0x410FD493; /* r0p3 */
1006     cpu->revidr        = 0;
1007     SET_IDREG(isar, ID_PFR0, 0x21110131);
1008     SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
1009     SET_IDREG(isar, ID_DFR0, 0x16011099);
1010     cpu->id_afr0       = 0;
1011     SET_IDREG(isar, ID_MMFR0, 0x10201105);
1012     SET_IDREG(isar, ID_MMFR1, 0x40000000);
1013     SET_IDREG(isar, ID_MMFR2, 0x01260000);
1014     SET_IDREG(isar, ID_MMFR3, 0x02122211);
1015     SET_IDREG(isar, ID_ISAR0, 0x02101110);
1016     SET_IDREG(isar, ID_ISAR1, 0x13112111);
1017     SET_IDREG(isar, ID_ISAR2, 0x21232042);
1018     SET_IDREG(isar, ID_ISAR3, 0x01112131);
1019     SET_IDREG(isar, ID_ISAR4, 0x00010142);
1020     SET_IDREG(isar, ID_ISAR5, 0x11011121); /* with Crypto */
1021     SET_IDREG(isar, ID_MMFR4, 0x01021110);
1022     SET_IDREG(isar, ID_ISAR6, 0x01111111);
1023     cpu->isar.mvfr0    = 0x10110222;
1024     cpu->isar.mvfr1    = 0x13211111;
1025     cpu->isar.mvfr2    = 0x00000043;
1026     SET_IDREG(isar, ID_PFR2, 0x00000011);
1027     SET_IDREG(isar, ID_AA64PFR0, 0x1201111120111112ull); /* GIC filled in later */
1028     SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000221ull);
1029     SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
1030     SET_IDREG(isar, ID_AA64DFR0, 0x000011f210305619ull);
1031     SET_IDREG(isar, ID_AA64DFR1, 0);
1032     cpu->id_aa64afr0       = 0;
1033     cpu->id_aa64afr1       = 0;
1034     SET_IDREG(isar, ID_AA64ISAR0, 0x1221111110212120ull); /* with Crypto and FEAT_RNG */
1035     SET_IDREG(isar, ID_AA64ISAR1, 0x0011111101211052ull);
1036     SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101125ull);
1037     SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
1038     SET_IDREG(isar, ID_AA64MMFR2, 0x1221011112101011ull);
1039     cpu->clidr             = 0x0000001482000023ull;
1040     cpu->gm_blocksize      = 4;
1041     cpu->ctr               = 0x00000004b444c004ull;
1042     cpu->dcz_blocksize     = 4;
1043     /* TODO FEAT_MPAM: mpamidr_el1 = 0x0000_0001_001e_01ff */
1044 
1045     /* Section B.7.2: PMCR_EL0 */
1046     cpu->isar.reset_pmcr_el0 = 0x3000;  /* with 6 counters */
1047 
1048     /* Section B.8.9: ICH_VTR_EL2 */
1049     cpu->gic_num_lrs = 4;
1050     cpu->gic_vpribits = 5;
1051     cpu->gic_vprebits = 5;
1052     cpu->gic_pribits = 5;
1053 
1054     /* Section 14: Scalable Vector Extensions support */
1055     cpu->sve_vq.supported = 1 << 0;  /* 128bit */
1056 
1057     /*
1058      * The Neoverse N2 TRM does not list CCSIDR values.  The layout of
1059      * the caches are in text in Table 7-1, Table 8-1, and Table 9-1.
1060      *
1061      * L1: 4-way set associative 64-byte line size, total 64K.
1062      * L2: 8-way set associative 64 byte line size, total either 512K or 1024K.
1063      */
1064     /* L1 dcache */
1065     cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 4, 64, 64 * KiB, 0);
1066     /* L1 icache */
1067     cpu->ccsidr[1] = cpu->ccsidr[0];
1068     /* L2 cache */
1069     cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 8, 64, 512 * KiB, 0);
1070     /* FIXME: Not documented -- copied from neoverse-v1 */
1071     cpu->reset_sctlr = 0x30c50838;
1072 
1073     /*
1074      * The Neoverse N2 has all of the Cortex-A710 IMPDEF registers,
1075      * and a few more RNG related ones.
1076      */
1077     define_arm_cp_regs(cpu, cortex_a710_cp_reginfo);
1078     define_arm_cp_regs(cpu, neoverse_n2_cp_reginfo);
1079 
1080     aarch64_add_pauth_properties(obj);
1081     aarch64_add_sve_properties(obj);
1082 }
1083 
1084 /*
1085  * -cpu max: a CPU with as many features enabled as our emulation supports.
1086  * The version of '-cpu max' for qemu-system-arm is defined in cpu32.c;
1087  * this only needs to handle 64 bits.
1088  */
aarch64_max_tcg_initfn(Object * obj)1089 void aarch64_max_tcg_initfn(Object *obj)
1090 {
1091     ARMCPU *cpu = ARM_CPU(obj);
1092     ARMISARegisters *isar = &cpu->isar;
1093     uint64_t t;
1094     uint32_t u;
1095 
1096     /*
1097      * Unset ARM_FEATURE_BACKCOMPAT_CNTFRQ, which we would otherwise default
1098      * to because we started with aarch64_a57_initfn(). A 'max' CPU might
1099      * be a v8.6-or-later one, in which case the cntfrq must be 1GHz; and
1100      * because it is our "may change" CPU type we are OK with it not being
1101      * backwards-compatible with how it worked in old QEMU.
1102      */
1103     unset_feature(&cpu->env, ARM_FEATURE_BACKCOMPAT_CNTFRQ);
1104 
1105     /*
1106      * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
1107      * one and try to apply errata workarounds or use impdef features we
1108      * don't provide.
1109      * An IMPLEMENTER field of 0 means "reserved for software use";
1110      * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
1111      * to see which features are present";
1112      * the VARIANT, PARTNUM and REVISION fields are all implementation
1113      * defined and we choose to define PARTNUM just in case guest
1114      * code needs to distinguish this QEMU CPU from other software
1115      * implementations, though this shouldn't be needed.
1116      */
1117     t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
1118     t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
1119     t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
1120     t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
1121     t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
1122     cpu->midr = t;
1123 
1124     /*
1125      * We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS}
1126      * are zero.
1127      */
1128     u = cpu->clidr;
1129     u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0);
1130     u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0);
1131     cpu->clidr = u;
1132 
1133     /*
1134      * Set CTR_EL0.DIC and IDC to tell the guest it doesnt' need to
1135      * do any cache maintenance for data-to-instruction or
1136      * instruction-to-guest coherence. (Our cache ops are nops.)
1137      */
1138     t = cpu->ctr;
1139     t = FIELD_DP64(t, CTR_EL0, IDC, 1);
1140     t = FIELD_DP64(t, CTR_EL0, DIC, 1);
1141     cpu->ctr = t;
1142 
1143     t = GET_IDREG(isar, ID_AA64ISAR0);
1144     t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2);      /* FEAT_PMULL */
1145     t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);     /* FEAT_SHA1 */
1146     t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2);     /* FEAT_SHA512 */
1147     t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);    /* FEAT_CRC32 */
1148     t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);   /* FEAT_LSE */
1149     t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);      /* FEAT_RDM */
1150     t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);     /* FEAT_SHA3 */
1151     t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);      /* FEAT_SM3 */
1152     t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);      /* FEAT_SM4 */
1153     t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);       /* FEAT_DotProd */
1154     t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);      /* FEAT_FHM */
1155     t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2);       /* FEAT_FlagM2 */
1156     t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2);      /* FEAT_TLBIRANGE */
1157     t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);     /* FEAT_RNG */
1158     SET_IDREG(isar, ID_AA64ISAR0, t);
1159 
1160     t = GET_IDREG(isar, ID_AA64ISAR1);
1161     t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);      /* FEAT_DPB2 */
1162     t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_FPACCOMBINED);
1163     t = FIELD_DP64(t, ID_AA64ISAR1, API, 1);
1164     t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);    /* FEAT_JSCVT */
1165     t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);     /* FEAT_FCMA */
1166     t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2);    /* FEAT_LRCPC2 */
1167     t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);  /* FEAT_FRINTTS */
1168     t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);       /* FEAT_SB */
1169     t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);  /* FEAT_SPECRES */
1170     t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 2);     /* FEAT_BF16, FEAT_EBF16 */
1171     t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1);      /* FEAT_DGH */
1172     t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);     /* FEAT_I8MM */
1173     t = FIELD_DP64(t, ID_AA64ISAR1, XS, 1);       /* FEAT_XS */
1174     SET_IDREG(isar, ID_AA64ISAR1, t);
1175 
1176     t = GET_IDREG(isar, ID_AA64ISAR2);
1177     t = FIELD_DP64(t, ID_AA64ISAR2, RPRES, 1);    /* FEAT_RPRES */
1178     t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1);     /* FEAT_MOPS */
1179     t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1);       /* FEAT_HBC */
1180     t = FIELD_DP64(t, ID_AA64ISAR2, WFXT, 2);     /* FEAT_WFxT */
1181     SET_IDREG(isar, ID_AA64ISAR2, t);
1182 
1183     t = GET_IDREG(isar, ID_AA64PFR0);
1184     t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);        /* FEAT_FP16 */
1185     t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);   /* FEAT_FP16 */
1186     t = FIELD_DP64(t, ID_AA64PFR0, RAS, 2);       /* FEAT_RASv1p1 + FEAT_DoubleFault */
1187     t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
1188     t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);      /* FEAT_SEL2 */
1189     t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);       /* FEAT_DIT */
1190     t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 3);      /* FEAT_CSV2_3 */
1191     t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1);      /* FEAT_CSV3 */
1192     SET_IDREG(isar, ID_AA64PFR0, t);
1193 
1194     t = GET_IDREG(isar, ID_AA64PFR1);
1195     t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);        /* FEAT_BTI */
1196     t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);      /* FEAT_SSBS2 */
1197     /*
1198      * Begin with full support for MTE. This will be downgraded to MTE=0
1199      * during realize if the board provides no tag memory, much like
1200      * we do for EL2 with the virtualization=on property.
1201      */
1202     t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);       /* FEAT_MTE3 */
1203     t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0);  /* FEAT_RASv1p1 + FEAT_DoubleFault */
1204     t = FIELD_DP64(t, ID_AA64PFR1, SME, 1);       /* FEAT_SME */
1205     t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_3 */
1206     t = FIELD_DP64(t, ID_AA64PFR1, NMI, 1);       /* FEAT_NMI */
1207     SET_IDREG(isar, ID_AA64PFR1, t);
1208 
1209     t = GET_IDREG(isar, ID_AA64MMFR0);
1210     t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */
1211     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1);   /* 16k pages supported */
1212     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */
1213     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */
1214     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2);  /*  4k stage2 supported */
1215     t = FIELD_DP64(t, ID_AA64MMFR0, FGT, 1);       /* FEAT_FGT */
1216     t = FIELD_DP64(t, ID_AA64MMFR0, ECV, 2);       /* FEAT_ECV */
1217     SET_IDREG(isar, ID_AA64MMFR0, t);
1218 
1219     t = GET_IDREG(isar, ID_AA64MMFR1);
1220     t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2);   /* FEAT_HAFDBS */
1221     t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
1222     t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);       /* FEAT_VHE */
1223     t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 2);     /* FEAT_HPDS2 */
1224     t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);       /* FEAT_LOR */
1225     t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 3);      /* FEAT_PAN3 */
1226     t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1);      /* FEAT_XNX */
1227     t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 2);      /* FEAT_ETS2 */
1228     t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1);      /* FEAT_HCX */
1229     t = FIELD_DP64(t, ID_AA64MMFR1, AFP, 1);      /* FEAT_AFP */
1230     t = FIELD_DP64(t, ID_AA64MMFR1, TIDCP1, 1);   /* FEAT_TIDCP1 */
1231     t = FIELD_DP64(t, ID_AA64MMFR1, CMOW, 1);     /* FEAT_CMOW */
1232     SET_IDREG(isar, ID_AA64MMFR1, t);
1233 
1234     t = GET_IDREG(isar, ID_AA64MMFR2);
1235     t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1);      /* FEAT_TTCNP */
1236     t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);      /* FEAT_UAO */
1237     t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1);     /* FEAT_IESB */
1238     t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1);  /* FEAT_LVA */
1239     t = FIELD_DP64(t, ID_AA64MMFR2, NV, 2);       /* FEAT_NV2 */
1240     t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1);       /* FEAT_TTST */
1241     t = FIELD_DP64(t, ID_AA64MMFR2, AT, 1);       /* FEAT_LSE2 */
1242     t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1);      /* FEAT_IDST */
1243     t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1);      /* FEAT_S2FWB */
1244     t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1);      /* FEAT_TTL */
1245     t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2);      /* FEAT_BBM at level 2 */
1246     t = FIELD_DP64(t, ID_AA64MMFR2, EVT, 2);      /* FEAT_EVT */
1247     t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1);     /* FEAT_E0PD */
1248     SET_IDREG(isar, ID_AA64MMFR2, t);
1249 
1250     FIELD_DP64_IDREG(isar, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */
1251 
1252     t = GET_IDREG(isar, ID_AA64ZFR0);
1253     t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
1254     t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2);       /* FEAT_SVE_PMULL128 */
1255     t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);   /* FEAT_SVE_BitPerm */
1256     t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 2);  /* FEAT_BF16, FEAT_EBF16 */
1257     t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);      /* FEAT_SVE_SHA3 */
1258     t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);       /* FEAT_SVE_SM4 */
1259     t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);      /* FEAT_I8MM */
1260     t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);     /* FEAT_F32MM */
1261     t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);     /* FEAT_F64MM */
1262     SET_IDREG(isar, ID_AA64ZFR0, t);
1263 
1264     t = GET_IDREG(isar, ID_AA64DFR0);
1265     t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 10); /* FEAT_Debugv8p8 */
1266     t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6);    /* FEAT_PMUv3p5 */
1267     t = FIELD_DP64(t, ID_AA64DFR0, HPMN0, 1);     /* FEAT_HPMN0 */
1268     SET_IDREG(isar, ID_AA64DFR0, t);
1269 
1270     t = GET_IDREG(isar, ID_AA64SMFR0);
1271     t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1);   /* FEAT_SME */
1272     t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1);   /* FEAT_SME */
1273     t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1);   /* FEAT_SME */
1274     t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf);  /* FEAT_SME */
1275     t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1);   /* FEAT_SME_F64F64 */
1276     t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */
1277     t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1);     /* FEAT_SME_FA64 */
1278     SET_IDREG(isar, ID_AA64SMFR0, t);
1279 
1280     /* Replicate the same data to the 32-bit id registers.  */
1281     aa32_max_features(cpu);
1282 
1283 #ifdef CONFIG_USER_ONLY
1284     /*
1285      * For usermode -cpu max we can use a larger and more efficient DCZ
1286      * blocksize since we don't have to follow what the hardware does.
1287      */
1288     cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
1289     cpu->dcz_blocksize = 7; /*  512 bytes */
1290 #endif
1291     cpu->gm_blocksize = 6;  /*  256 bytes */
1292 
1293     cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ);
1294     cpu->sme_vq.supported = SVE_VQ_POW2_MAP;
1295 
1296     aarch64_add_pauth_properties(obj);
1297     aarch64_add_sve_properties(obj);
1298     aarch64_add_sme_properties(obj);
1299     object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
1300                         cpu_max_set_sve_max_vq, NULL, NULL);
1301     object_property_add_bool(obj, "x-rme", cpu_arm_get_rme, cpu_arm_set_rme);
1302     object_property_add(obj, "x-l0gptsz", "uint32", cpu_max_get_l0gptsz,
1303                         cpu_max_set_l0gptsz, NULL, NULL);
1304     qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
1305 }
1306 
1307 static const ARMCPUInfo aarch64_cpus[] = {
1308     { .name = "cortex-a35",         .initfn = aarch64_a35_initfn },
1309     { .name = "cortex-a55",         .initfn = aarch64_a55_initfn },
1310     { .name = "cortex-a72",         .initfn = aarch64_a72_initfn },
1311     { .name = "cortex-a76",         .initfn = aarch64_a76_initfn },
1312     { .name = "cortex-a710",        .initfn = aarch64_a710_initfn },
1313     { .name = "a64fx",              .initfn = aarch64_a64fx_initfn },
1314     { .name = "neoverse-n1",        .initfn = aarch64_neoverse_n1_initfn },
1315     { .name = "neoverse-v1",        .initfn = aarch64_neoverse_v1_initfn },
1316     { .name = "neoverse-n2",        .initfn = aarch64_neoverse_n2_initfn },
1317 };
1318 
aarch64_cpu_register_types(void)1319 static void aarch64_cpu_register_types(void)
1320 {
1321     size_t i;
1322 
1323     for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
1324         arm_cpu_register(&aarch64_cpus[i]);
1325     }
1326 }
1327 
1328 type_init(aarch64_cpu_register_types)
1329