xref: /qemu/target/arm/cpu64.c (revision 6e76d35f2375c3ef58aaaccbe5cee54b20a1f74a)
1 /*
2  * QEMU AArch64 CPU
3  *
4  * Copyright (c) 2013 Linaro Ltd
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #ifdef CONFIG_TCG
25 #include "hw/core/tcg-cpu-ops.h"
26 #endif /* CONFIG_TCG */
27 #include "qemu/module.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/loader.h"
30 #endif
31 #include "sysemu/kvm.h"
32 #include "sysemu/hvf.h"
33 #include "kvm_arm.h"
34 #include "hvf_arm.h"
35 #include "qapi/visitor.h"
36 #include "hw/qdev-properties.h"
37 #include "internals.h"
38 
39 
40 static void aarch64_a57_initfn(Object *obj)
41 {
42     ARMCPU *cpu = ARM_CPU(obj);
43 
44     cpu->dtb_compatible = "arm,cortex-a57";
45     set_feature(&cpu->env, ARM_FEATURE_V8);
46     set_feature(&cpu->env, ARM_FEATURE_NEON);
47     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
48     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
49     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
50     set_feature(&cpu->env, ARM_FEATURE_EL2);
51     set_feature(&cpu->env, ARM_FEATURE_EL3);
52     set_feature(&cpu->env, ARM_FEATURE_PMU);
53     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
54     cpu->midr = 0x411fd070;
55     cpu->revidr = 0x00000000;
56     cpu->reset_fpsid = 0x41034070;
57     cpu->isar.mvfr0 = 0x10110222;
58     cpu->isar.mvfr1 = 0x12111111;
59     cpu->isar.mvfr2 = 0x00000043;
60     cpu->ctr = 0x8444c004;
61     cpu->reset_sctlr = 0x00c50838;
62     cpu->isar.id_pfr0 = 0x00000131;
63     cpu->isar.id_pfr1 = 0x00011011;
64     cpu->isar.id_dfr0 = 0x03010066;
65     cpu->id_afr0 = 0x00000000;
66     cpu->isar.id_mmfr0 = 0x10101105;
67     cpu->isar.id_mmfr1 = 0x40000000;
68     cpu->isar.id_mmfr2 = 0x01260000;
69     cpu->isar.id_mmfr3 = 0x02102211;
70     cpu->isar.id_isar0 = 0x02101110;
71     cpu->isar.id_isar1 = 0x13112111;
72     cpu->isar.id_isar2 = 0x21232042;
73     cpu->isar.id_isar3 = 0x01112131;
74     cpu->isar.id_isar4 = 0x00011142;
75     cpu->isar.id_isar5 = 0x00011121;
76     cpu->isar.id_isar6 = 0;
77     cpu->isar.id_aa64pfr0 = 0x00002222;
78     cpu->isar.id_aa64dfr0 = 0x10305106;
79     cpu->isar.id_aa64isar0 = 0x00011120;
80     cpu->isar.id_aa64mmfr0 = 0x00001124;
81     cpu->isar.dbgdidr = 0x3516d000;
82     cpu->clidr = 0x0a200023;
83     cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
84     cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
85     cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
86     cpu->dcz_blocksize = 4; /* 64 bytes */
87     cpu->gic_num_lrs = 4;
88     cpu->gic_vpribits = 5;
89     cpu->gic_vprebits = 5;
90     cpu->gic_pribits = 5;
91     define_cortex_a72_a57_a53_cp_reginfo(cpu);
92 }
93 
94 static void aarch64_a53_initfn(Object *obj)
95 {
96     ARMCPU *cpu = ARM_CPU(obj);
97 
98     cpu->dtb_compatible = "arm,cortex-a53";
99     set_feature(&cpu->env, ARM_FEATURE_V8);
100     set_feature(&cpu->env, ARM_FEATURE_NEON);
101     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
102     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
103     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
104     set_feature(&cpu->env, ARM_FEATURE_EL2);
105     set_feature(&cpu->env, ARM_FEATURE_EL3);
106     set_feature(&cpu->env, ARM_FEATURE_PMU);
107     cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
108     cpu->midr = 0x410fd034;
109     cpu->revidr = 0x00000000;
110     cpu->reset_fpsid = 0x41034070;
111     cpu->isar.mvfr0 = 0x10110222;
112     cpu->isar.mvfr1 = 0x12111111;
113     cpu->isar.mvfr2 = 0x00000043;
114     cpu->ctr = 0x84448004; /* L1Ip = VIPT */
115     cpu->reset_sctlr = 0x00c50838;
116     cpu->isar.id_pfr0 = 0x00000131;
117     cpu->isar.id_pfr1 = 0x00011011;
118     cpu->isar.id_dfr0 = 0x03010066;
119     cpu->id_afr0 = 0x00000000;
120     cpu->isar.id_mmfr0 = 0x10101105;
121     cpu->isar.id_mmfr1 = 0x40000000;
122     cpu->isar.id_mmfr2 = 0x01260000;
123     cpu->isar.id_mmfr3 = 0x02102211;
124     cpu->isar.id_isar0 = 0x02101110;
125     cpu->isar.id_isar1 = 0x13112111;
126     cpu->isar.id_isar2 = 0x21232042;
127     cpu->isar.id_isar3 = 0x01112131;
128     cpu->isar.id_isar4 = 0x00011142;
129     cpu->isar.id_isar5 = 0x00011121;
130     cpu->isar.id_isar6 = 0;
131     cpu->isar.id_aa64pfr0 = 0x00002222;
132     cpu->isar.id_aa64dfr0 = 0x10305106;
133     cpu->isar.id_aa64isar0 = 0x00011120;
134     cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
135     cpu->isar.dbgdidr = 0x3516d000;
136     cpu->clidr = 0x0a200023;
137     cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
138     cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
139     cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
140     cpu->dcz_blocksize = 4; /* 64 bytes */
141     cpu->gic_num_lrs = 4;
142     cpu->gic_vpribits = 5;
143     cpu->gic_vprebits = 5;
144     cpu->gic_pribits = 5;
145     define_cortex_a72_a57_a53_cp_reginfo(cpu);
146 }
147 
148 static void aarch64_a72_initfn(Object *obj)
149 {
150     ARMCPU *cpu = ARM_CPU(obj);
151 
152     cpu->dtb_compatible = "arm,cortex-a72";
153     set_feature(&cpu->env, ARM_FEATURE_V8);
154     set_feature(&cpu->env, ARM_FEATURE_NEON);
155     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
156     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
157     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
158     set_feature(&cpu->env, ARM_FEATURE_EL2);
159     set_feature(&cpu->env, ARM_FEATURE_EL3);
160     set_feature(&cpu->env, ARM_FEATURE_PMU);
161     cpu->midr = 0x410fd083;
162     cpu->revidr = 0x00000000;
163     cpu->reset_fpsid = 0x41034080;
164     cpu->isar.mvfr0 = 0x10110222;
165     cpu->isar.mvfr1 = 0x12111111;
166     cpu->isar.mvfr2 = 0x00000043;
167     cpu->ctr = 0x8444c004;
168     cpu->reset_sctlr = 0x00c50838;
169     cpu->isar.id_pfr0 = 0x00000131;
170     cpu->isar.id_pfr1 = 0x00011011;
171     cpu->isar.id_dfr0 = 0x03010066;
172     cpu->id_afr0 = 0x00000000;
173     cpu->isar.id_mmfr0 = 0x10201105;
174     cpu->isar.id_mmfr1 = 0x40000000;
175     cpu->isar.id_mmfr2 = 0x01260000;
176     cpu->isar.id_mmfr3 = 0x02102211;
177     cpu->isar.id_isar0 = 0x02101110;
178     cpu->isar.id_isar1 = 0x13112111;
179     cpu->isar.id_isar2 = 0x21232042;
180     cpu->isar.id_isar3 = 0x01112131;
181     cpu->isar.id_isar4 = 0x00011142;
182     cpu->isar.id_isar5 = 0x00011121;
183     cpu->isar.id_aa64pfr0 = 0x00002222;
184     cpu->isar.id_aa64dfr0 = 0x10305106;
185     cpu->isar.id_aa64isar0 = 0x00011120;
186     cpu->isar.id_aa64mmfr0 = 0x00001124;
187     cpu->isar.dbgdidr = 0x3516d000;
188     cpu->clidr = 0x0a200023;
189     cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
190     cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
191     cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */
192     cpu->dcz_blocksize = 4; /* 64 bytes */
193     cpu->gic_num_lrs = 4;
194     cpu->gic_vpribits = 5;
195     cpu->gic_vprebits = 5;
196     cpu->gic_pribits = 5;
197     define_cortex_a72_a57_a53_cp_reginfo(cpu);
198 }
199 
200 static void aarch64_a76_initfn(Object *obj)
201 {
202     ARMCPU *cpu = ARM_CPU(obj);
203 
204     cpu->dtb_compatible = "arm,cortex-a76";
205     set_feature(&cpu->env, ARM_FEATURE_V8);
206     set_feature(&cpu->env, ARM_FEATURE_NEON);
207     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
208     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
209     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
210     set_feature(&cpu->env, ARM_FEATURE_EL2);
211     set_feature(&cpu->env, ARM_FEATURE_EL3);
212     set_feature(&cpu->env, ARM_FEATURE_PMU);
213 
214     /* Ordered by B2.4 AArch64 registers by functional group */
215     cpu->clidr = 0x82000023;
216     cpu->ctr = 0x8444C004;
217     cpu->dcz_blocksize = 4;
218     cpu->isar.id_aa64dfr0  = 0x0000000010305408ull;
219     cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
220     cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
221     cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull;
222     cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
223     cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
224     cpu->isar.id_aa64pfr0  = 0x1100000010111112ull; /* GIC filled in later */
225     cpu->isar.id_aa64pfr1  = 0x0000000000000010ull;
226     cpu->id_afr0       = 0x00000000;
227     cpu->isar.id_dfr0  = 0x04010088;
228     cpu->isar.id_isar0 = 0x02101110;
229     cpu->isar.id_isar1 = 0x13112111;
230     cpu->isar.id_isar2 = 0x21232042;
231     cpu->isar.id_isar3 = 0x01112131;
232     cpu->isar.id_isar4 = 0x00010142;
233     cpu->isar.id_isar5 = 0x01011121;
234     cpu->isar.id_isar6 = 0x00000010;
235     cpu->isar.id_mmfr0 = 0x10201105;
236     cpu->isar.id_mmfr1 = 0x40000000;
237     cpu->isar.id_mmfr2 = 0x01260000;
238     cpu->isar.id_mmfr3 = 0x02122211;
239     cpu->isar.id_mmfr4 = 0x00021110;
240     cpu->isar.id_pfr0  = 0x10010131;
241     cpu->isar.id_pfr1  = 0x00010000; /* GIC filled in later */
242     cpu->isar.id_pfr2  = 0x00000011;
243     cpu->midr = 0x414fd0b1;          /* r4p1 */
244     cpu->revidr = 0;
245 
246     /* From B2.18 CCSIDR_EL1 */
247     cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
248     cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
249     cpu->ccsidr[2] = 0x707fe03a; /* 512KB L2 cache */
250 
251     /* From B2.93 SCTLR_EL3 */
252     cpu->reset_sctlr = 0x30c50838;
253 
254     /* From B4.23 ICH_VTR_EL2 */
255     cpu->gic_num_lrs = 4;
256     cpu->gic_vpribits = 5;
257     cpu->gic_vprebits = 5;
258     cpu->gic_pribits = 5;
259 
260     /* From B5.1 AdvSIMD AArch64 register summary */
261     cpu->isar.mvfr0 = 0x10110222;
262     cpu->isar.mvfr1 = 0x13211111;
263     cpu->isar.mvfr2 = 0x00000043;
264 }
265 
266 static void aarch64_neoverse_n1_initfn(Object *obj)
267 {
268     ARMCPU *cpu = ARM_CPU(obj);
269 
270     cpu->dtb_compatible = "arm,neoverse-n1";
271     set_feature(&cpu->env, ARM_FEATURE_V8);
272     set_feature(&cpu->env, ARM_FEATURE_NEON);
273     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
274     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
275     set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
276     set_feature(&cpu->env, ARM_FEATURE_EL2);
277     set_feature(&cpu->env, ARM_FEATURE_EL3);
278     set_feature(&cpu->env, ARM_FEATURE_PMU);
279 
280     /* Ordered by B2.4 AArch64 registers by functional group */
281     cpu->clidr = 0x82000023;
282     cpu->ctr = 0x8444c004;
283     cpu->dcz_blocksize = 4;
284     cpu->isar.id_aa64dfr0  = 0x0000000110305408ull;
285     cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
286     cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
287     cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
288     cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
289     cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
290     cpu->isar.id_aa64pfr0  = 0x1100000010111112ull; /* GIC filled in later */
291     cpu->isar.id_aa64pfr1  = 0x0000000000000020ull;
292     cpu->id_afr0       = 0x00000000;
293     cpu->isar.id_dfr0  = 0x04010088;
294     cpu->isar.id_isar0 = 0x02101110;
295     cpu->isar.id_isar1 = 0x13112111;
296     cpu->isar.id_isar2 = 0x21232042;
297     cpu->isar.id_isar3 = 0x01112131;
298     cpu->isar.id_isar4 = 0x00010142;
299     cpu->isar.id_isar5 = 0x01011121;
300     cpu->isar.id_isar6 = 0x00000010;
301     cpu->isar.id_mmfr0 = 0x10201105;
302     cpu->isar.id_mmfr1 = 0x40000000;
303     cpu->isar.id_mmfr2 = 0x01260000;
304     cpu->isar.id_mmfr3 = 0x02122211;
305     cpu->isar.id_mmfr4 = 0x00021110;
306     cpu->isar.id_pfr0  = 0x10010131;
307     cpu->isar.id_pfr1  = 0x00010000; /* GIC filled in later */
308     cpu->isar.id_pfr2  = 0x00000011;
309     cpu->midr = 0x414fd0c1;          /* r4p1 */
310     cpu->revidr = 0;
311 
312     /* From B2.23 CCSIDR_EL1 */
313     cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
314     cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
315     cpu->ccsidr[2] = 0x70ffe03a; /* 1MB L2 cache */
316 
317     /* From B2.98 SCTLR_EL3 */
318     cpu->reset_sctlr = 0x30c50838;
319 
320     /* From B4.23 ICH_VTR_EL2 */
321     cpu->gic_num_lrs = 4;
322     cpu->gic_vpribits = 5;
323     cpu->gic_vprebits = 5;
324     cpu->gic_pribits = 5;
325 
326     /* From B5.1 AdvSIMD AArch64 register summary */
327     cpu->isar.mvfr0 = 0x10110222;
328     cpu->isar.mvfr1 = 0x13211111;
329     cpu->isar.mvfr2 = 0x00000043;
330 }
331 
332 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
333 {
334     /*
335      * If any vector lengths are explicitly enabled with sve<N> properties,
336      * then all other lengths are implicitly disabled.  If sve-max-vq is
337      * specified then it is the same as explicitly enabling all lengths
338      * up to and including the specified maximum, which means all larger
339      * lengths will be implicitly disabled.  If no sve<N> properties
340      * are enabled and sve-max-vq is not specified, then all lengths not
341      * explicitly disabled will be enabled.  Additionally, all power-of-two
342      * vector lengths less than the maximum enabled length will be
343      * automatically enabled and all vector lengths larger than the largest
344      * disabled power-of-two vector length will be automatically disabled.
345      * Errors are generated if the user provided input that interferes with
346      * any of the above.  Finally, if SVE is not disabled, then at least one
347      * vector length must be enabled.
348      */
349     DECLARE_BITMAP(tmp, ARM_MAX_VQ);
350     uint32_t vq, max_vq = 0;
351 
352     /*
353      * CPU models specify a set of supported vector lengths which are
354      * enabled by default.  Attempting to enable any vector length not set
355      * in the supported bitmap results in an error.  When KVM is enabled we
356      * fetch the supported bitmap from the host.
357      */
358     if (kvm_enabled() && kvm_arm_sve_supported()) {
359         kvm_arm_sve_get_vls(CPU(cpu), cpu->sve_vq_supported);
360     } else if (kvm_enabled()) {
361         assert(!cpu_isar_feature(aa64_sve, cpu));
362     }
363 
364     /*
365      * Process explicit sve<N> properties.
366      * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
367      * Check first for any sve<N> enabled.
368      */
369     if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
370         max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
371 
372         if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
373             error_setg(errp, "cannot enable sve%d", max_vq * 128);
374             error_append_hint(errp, "sve%d is larger than the maximum vector "
375                               "length, sve-max-vq=%d (%d bits)\n",
376                               max_vq * 128, cpu->sve_max_vq,
377                               cpu->sve_max_vq * 128);
378             return;
379         }
380 
381         if (kvm_enabled()) {
382             /*
383              * For KVM we have to automatically enable all supported unitialized
384              * lengths, even when the smaller lengths are not all powers-of-two.
385              */
386             bitmap_andnot(tmp, cpu->sve_vq_supported, cpu->sve_vq_init, max_vq);
387             bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
388         } else {
389             /* Propagate enabled bits down through required powers-of-two. */
390             for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
391                 if (!test_bit(vq - 1, cpu->sve_vq_init)) {
392                     set_bit(vq - 1, cpu->sve_vq_map);
393                 }
394             }
395         }
396     } else if (cpu->sve_max_vq == 0) {
397         /*
398          * No explicit bits enabled, and no implicit bits from sve-max-vq.
399          */
400         if (!cpu_isar_feature(aa64_sve, cpu)) {
401             /* SVE is disabled and so are all vector lengths.  Good. */
402             return;
403         }
404 
405         if (kvm_enabled()) {
406             /* Disabling a supported length disables all larger lengths. */
407             for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
408                 if (test_bit(vq - 1, cpu->sve_vq_init) &&
409                     test_bit(vq - 1, cpu->sve_vq_supported)) {
410                     break;
411                 }
412             }
413         } else {
414             /* Disabling a power-of-two disables all larger lengths. */
415             for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
416                 if (test_bit(vq - 1, cpu->sve_vq_init)) {
417                     break;
418                 }
419             }
420         }
421 
422         max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
423         bitmap_andnot(cpu->sve_vq_map, cpu->sve_vq_supported,
424                       cpu->sve_vq_init, max_vq);
425         if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
426             error_setg(errp, "cannot disable sve%d", vq * 128);
427             error_append_hint(errp, "Disabling sve%d results in all "
428                               "vector lengths being disabled.\n",
429                               vq * 128);
430             error_append_hint(errp, "With SVE enabled, at least one "
431                               "vector length must be enabled.\n");
432             return;
433         }
434 
435         max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
436     }
437 
438     /*
439      * Process the sve-max-vq property.
440      * Note that we know from the above that no bit above
441      * sve-max-vq is currently set.
442      */
443     if (cpu->sve_max_vq != 0) {
444         max_vq = cpu->sve_max_vq;
445 
446         if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
447             test_bit(max_vq - 1, cpu->sve_vq_init)) {
448             error_setg(errp, "cannot disable sve%d", max_vq * 128);
449             error_append_hint(errp, "The maximum vector length must be "
450                               "enabled, sve-max-vq=%d (%d bits)\n",
451                               max_vq, max_vq * 128);
452             return;
453         }
454 
455         /* Set all bits not explicitly set within sve-max-vq. */
456         bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
457         bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
458     }
459 
460     /*
461      * We should know what max-vq is now.  Also, as we're done
462      * manipulating sve-vq-map, we ensure any bits above max-vq
463      * are clear, just in case anybody looks.
464      */
465     assert(max_vq != 0);
466     bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
467 
468     /* Ensure the set of lengths matches what is supported. */
469     bitmap_xor(tmp, cpu->sve_vq_map, cpu->sve_vq_supported, max_vq);
470     if (!bitmap_empty(tmp, max_vq)) {
471         vq = find_last_bit(tmp, max_vq) + 1;
472         if (test_bit(vq - 1, cpu->sve_vq_map)) {
473             if (cpu->sve_max_vq) {
474                 error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq);
475                 error_append_hint(errp, "This CPU does not support "
476                                   "the vector length %d-bits.\n", vq * 128);
477                 error_append_hint(errp, "It may not be possible to use "
478                                   "sve-max-vq with this CPU. Try "
479                                   "using only sve<N> properties.\n");
480             } else {
481                 error_setg(errp, "cannot enable sve%d", vq * 128);
482                 error_append_hint(errp, "This CPU does not support "
483                                   "the vector length %d-bits.\n", vq * 128);
484             }
485             return;
486         } else {
487             if (kvm_enabled()) {
488                 error_setg(errp, "cannot disable sve%d", vq * 128);
489                 error_append_hint(errp, "The KVM host requires all "
490                                   "supported vector lengths smaller "
491                                   "than %d bits to also be enabled.\n",
492                                   max_vq * 128);
493                 return;
494             } else {
495                 /* Ensure all required powers-of-two are enabled. */
496                 for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
497                     if (!test_bit(vq - 1, cpu->sve_vq_map)) {
498                         error_setg(errp, "cannot disable sve%d", vq * 128);
499                         error_append_hint(errp, "sve%d is required as it "
500                                           "is a power-of-two length smaller "
501                                           "than the maximum, sve%d\n",
502                                           vq * 128, max_vq * 128);
503                         return;
504                     }
505                 }
506             }
507         }
508     }
509 
510     /*
511      * Now that we validated all our vector lengths, the only question
512      * left to answer is if we even want SVE at all.
513      */
514     if (!cpu_isar_feature(aa64_sve, cpu)) {
515         error_setg(errp, "cannot enable sve%d", max_vq * 128);
516         error_append_hint(errp, "SVE must be enabled to enable vector "
517                           "lengths.\n");
518         error_append_hint(errp, "Add sve=on to the CPU property list.\n");
519         return;
520     }
521 
522     /* From now on sve_max_vq is the actual maximum supported length. */
523     cpu->sve_max_vq = max_vq;
524 }
525 
526 static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
527                                    void *opaque, Error **errp)
528 {
529     ARMCPU *cpu = ARM_CPU(obj);
530     uint32_t value;
531 
532     /* All vector lengths are disabled when SVE is off. */
533     if (!cpu_isar_feature(aa64_sve, cpu)) {
534         value = 0;
535     } else {
536         value = cpu->sve_max_vq;
537     }
538     visit_type_uint32(v, name, &value, errp);
539 }
540 
541 static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
542                                    void *opaque, Error **errp)
543 {
544     ARMCPU *cpu = ARM_CPU(obj);
545     uint32_t max_vq;
546 
547     if (!visit_type_uint32(v, name, &max_vq, errp)) {
548         return;
549     }
550 
551     if (kvm_enabled() && !kvm_arm_sve_supported()) {
552         error_setg(errp, "cannot set sve-max-vq");
553         error_append_hint(errp, "SVE not supported by KVM on this host\n");
554         return;
555     }
556 
557     if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
558         error_setg(errp, "unsupported SVE vector length");
559         error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
560                           ARM_MAX_VQ);
561         return;
562     }
563 
564     cpu->sve_max_vq = max_vq;
565 }
566 
567 /*
568  * Note that cpu_arm_get/set_sve_vq cannot use the simpler
569  * object_property_add_bool interface because they make use
570  * of the contents of "name" to determine which bit on which
571  * to operate.
572  */
573 static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
574                                void *opaque, Error **errp)
575 {
576     ARMCPU *cpu = ARM_CPU(obj);
577     uint32_t vq = atoi(&name[3]) / 128;
578     bool value;
579 
580     /* All vector lengths are disabled when SVE is off. */
581     if (!cpu_isar_feature(aa64_sve, cpu)) {
582         value = false;
583     } else {
584         value = test_bit(vq - 1, cpu->sve_vq_map);
585     }
586     visit_type_bool(v, name, &value, errp);
587 }
588 
589 static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
590                                void *opaque, Error **errp)
591 {
592     ARMCPU *cpu = ARM_CPU(obj);
593     uint32_t vq = atoi(&name[3]) / 128;
594     bool value;
595 
596     if (!visit_type_bool(v, name, &value, errp)) {
597         return;
598     }
599 
600     if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
601         error_setg(errp, "cannot enable %s", name);
602         error_append_hint(errp, "SVE not supported by KVM on this host\n");
603         return;
604     }
605 
606     if (value) {
607         set_bit(vq - 1, cpu->sve_vq_map);
608     } else {
609         clear_bit(vq - 1, cpu->sve_vq_map);
610     }
611     set_bit(vq - 1, cpu->sve_vq_init);
612 }
613 
614 static bool cpu_arm_get_sve(Object *obj, Error **errp)
615 {
616     ARMCPU *cpu = ARM_CPU(obj);
617     return cpu_isar_feature(aa64_sve, cpu);
618 }
619 
620 static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
621 {
622     ARMCPU *cpu = ARM_CPU(obj);
623     uint64_t t;
624 
625     if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
626         error_setg(errp, "'sve' feature not supported by KVM on this host");
627         return;
628     }
629 
630     t = cpu->isar.id_aa64pfr0;
631     t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
632     cpu->isar.id_aa64pfr0 = t;
633 }
634 
635 #ifdef CONFIG_USER_ONLY
636 /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
637 static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
638                                             const char *name, void *opaque,
639                                             Error **errp)
640 {
641     ARMCPU *cpu = ARM_CPU(obj);
642     int32_t default_len, default_vq, remainder;
643 
644     if (!visit_type_int32(v, name, &default_len, errp)) {
645         return;
646     }
647 
648     /* Undocumented, but the kernel allows -1 to indicate "maximum". */
649     if (default_len == -1) {
650         cpu->sve_default_vq = ARM_MAX_VQ;
651         return;
652     }
653 
654     default_vq = default_len / 16;
655     remainder = default_len % 16;
656 
657     /*
658      * Note that the 512 max comes from include/uapi/asm/sve_context.h
659      * and is the maximum architectural width of ZCR_ELx.LEN.
660      */
661     if (remainder || default_vq < 1 || default_vq > 512) {
662         error_setg(errp, "cannot set sve-default-vector-length");
663         if (remainder) {
664             error_append_hint(errp, "Vector length not a multiple of 16\n");
665         } else if (default_vq < 1) {
666             error_append_hint(errp, "Vector length smaller than 16\n");
667         } else {
668             error_append_hint(errp, "Vector length larger than %d\n",
669                               512 * 16);
670         }
671         return;
672     }
673 
674     cpu->sve_default_vq = default_vq;
675 }
676 
677 static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v,
678                                             const char *name, void *opaque,
679                                             Error **errp)
680 {
681     ARMCPU *cpu = ARM_CPU(obj);
682     int32_t value = cpu->sve_default_vq * 16;
683 
684     visit_type_int32(v, name, &value, errp);
685 }
686 #endif
687 
688 void aarch64_add_sve_properties(Object *obj)
689 {
690     uint32_t vq;
691 
692     object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
693 
694     for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
695         char name[8];
696         sprintf(name, "sve%d", vq * 128);
697         object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
698                             cpu_arm_set_sve_vq, NULL, NULL);
699     }
700 
701 #ifdef CONFIG_USER_ONLY
702     /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
703     object_property_add(obj, "sve-default-vector-length", "int32",
704                         cpu_arm_get_sve_default_vec_len,
705                         cpu_arm_set_sve_default_vec_len, NULL, NULL);
706 #endif
707 }
708 
709 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
710 {
711     int arch_val = 0, impdef_val = 0;
712     uint64_t t;
713 
714     /* Exit early if PAuth is enabled, and fall through to disable it */
715     if ((kvm_enabled() || hvf_enabled()) && cpu->prop_pauth) {
716         if (!cpu_isar_feature(aa64_pauth, cpu)) {
717             error_setg(errp, "'pauth' feature not supported by %s on this host",
718                        kvm_enabled() ? "KVM" : "hvf");
719         }
720 
721         return;
722     }
723 
724     /* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
725     if (cpu->prop_pauth) {
726         if (cpu->prop_pauth_impdef) {
727             impdef_val = 1;
728         } else {
729             arch_val = 1;
730         }
731     } else if (cpu->prop_pauth_impdef) {
732         error_setg(errp, "cannot enable pauth-impdef without pauth");
733         error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
734     }
735 
736     t = cpu->isar.id_aa64isar1;
737     t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
738     t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
739     t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
740     t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
741     cpu->isar.id_aa64isar1 = t;
742 }
743 
744 static Property arm_cpu_pauth_property =
745     DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
746 static Property arm_cpu_pauth_impdef_property =
747     DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
748 
749 void aarch64_add_pauth_properties(Object *obj)
750 {
751     ARMCPU *cpu = ARM_CPU(obj);
752 
753     /* Default to PAUTH on, with the architected algorithm on TCG. */
754     qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property);
755     if (kvm_enabled() || hvf_enabled()) {
756         /*
757          * Mirror PAuth support from the probed sysregs back into the
758          * property for KVM or hvf. Is it just a bit backward? Yes it is!
759          * Note that prop_pauth is true whether the host CPU supports the
760          * architected QARMA5 algorithm or the IMPDEF one. We don't
761          * provide the separate pauth-impdef property for KVM or hvf,
762          * only for TCG.
763          */
764         cpu->prop_pauth = cpu_isar_feature(aa64_pauth, cpu);
765     } else {
766         qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
767     }
768 }
769 
770 static Property arm_cpu_lpa2_property =
771     DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true);
772 
773 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
774 {
775     uint64_t t;
776 
777     /*
778      * We only install the property for tcg -cpu max; this is the
779      * only situation in which the cpu field can be true.
780      */
781     if (!cpu->prop_lpa2) {
782         return;
783     }
784 
785     t = cpu->isar.id_aa64mmfr0;
786     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2);   /* 16k pages w/ LPA2 */
787     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1);    /*  4k pages w/ LPA2 */
788     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */
789     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3);  /*  4k stage2 w/ LPA2 */
790     cpu->isar.id_aa64mmfr0 = t;
791 }
792 
793 static void aarch64_host_initfn(Object *obj)
794 {
795 #if defined(CONFIG_KVM)
796     ARMCPU *cpu = ARM_CPU(obj);
797     kvm_arm_set_cpu_features_from_host(cpu);
798     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
799         aarch64_add_sve_properties(obj);
800         aarch64_add_pauth_properties(obj);
801     }
802 #elif defined(CONFIG_HVF)
803     ARMCPU *cpu = ARM_CPU(obj);
804     hvf_arm_set_cpu_features_from_host(cpu);
805     aarch64_add_pauth_properties(obj);
806 #else
807     g_assert_not_reached();
808 #endif
809 }
810 
811 /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
812  * otherwise, a CPU with as many features enabled as our emulation supports.
813  * The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
814  * this only needs to handle 64 bits.
815  */
816 static void aarch64_max_initfn(Object *obj)
817 {
818     ARMCPU *cpu = ARM_CPU(obj);
819     uint64_t t;
820     uint32_t u;
821 
822     if (kvm_enabled() || hvf_enabled()) {
823         /* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
824         aarch64_host_initfn(obj);
825         return;
826     }
827 
828     /* '-cpu max' for TCG: we currently do this as "A57 with extra things" */
829 
830     aarch64_a57_initfn(obj);
831 
832     /*
833      * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
834      * one and try to apply errata workarounds or use impdef features we
835      * don't provide.
836      * An IMPLEMENTER field of 0 means "reserved for software use";
837      * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
838      * to see which features are present";
839      * the VARIANT, PARTNUM and REVISION fields are all implementation
840      * defined and we choose to define PARTNUM just in case guest
841      * code needs to distinguish this QEMU CPU from other software
842      * implementations, though this shouldn't be needed.
843      */
844     t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
845     t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
846     t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
847     t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
848     t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
849     cpu->midr = t;
850 
851     /*
852      * We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS}
853      * are zero.
854      */
855     u = cpu->clidr;
856     u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0);
857     u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0);
858     cpu->clidr = u;
859 
860     t = cpu->isar.id_aa64isar0;
861     t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2);      /* FEAT_PMULL */
862     t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);     /* FEAT_SHA1 */
863     t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2);     /* FEAT_SHA512 */
864     t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
865     t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);   /* FEAT_LSE */
866     t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);      /* FEAT_RDM */
867     t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);     /* FEAT_SHA3 */
868     t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);      /* FEAT_SM3 */
869     t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);      /* FEAT_SM4 */
870     t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);       /* FEAT_DotProd */
871     t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);      /* FEAT_FHM */
872     t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2);       /* FEAT_FlagM2 */
873     t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2);      /* FEAT_TLBIRANGE */
874     t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);     /* FEAT_RNG */
875     cpu->isar.id_aa64isar0 = t;
876 
877     t = cpu->isar.id_aa64isar1;
878     t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);      /* FEAT_DPB2 */
879     t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);    /* FEAT_JSCVT */
880     t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);     /* FEAT_FCMA */
881     t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2);    /* FEAT_LRCPC2 */
882     t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);  /* FEAT_FRINTTS */
883     t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);       /* FEAT_SB */
884     t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);  /* FEAT_SPECRES */
885     t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);     /* FEAT_BF16 */
886     t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1);      /* FEAT_DGH */
887     t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);     /* FEAT_I8MM */
888     cpu->isar.id_aa64isar1 = t;
889 
890     t = cpu->isar.id_aa64pfr0;
891     t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);        /* FEAT_FP16 */
892     t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);   /* FEAT_FP16 */
893     t = FIELD_DP64(t, ID_AA64PFR0, RAS, 1);       /* FEAT_RAS */
894     t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
895     t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);      /* FEAT_SEL2 */
896     t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);       /* FEAT_DIT */
897     t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 2);      /* FEAT_CSV2_2 */
898     t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1);      /* FEAT_CSV3 */
899     cpu->isar.id_aa64pfr0 = t;
900 
901     t = cpu->isar.id_aa64pfr1;
902     t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);        /* FEAT_BTI */
903     t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);      /* FEAT_SSBS2 */
904     /*
905      * Begin with full support for MTE. This will be downgraded to MTE=0
906      * during realize if the board provides no tag memory, much like
907      * we do for EL2 with the virtualization=on property.
908      */
909     t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);       /* FEAT_MTE3 */
910     t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */
911     cpu->isar.id_aa64pfr1 = t;
912 
913     t = cpu->isar.id_aa64mmfr0;
914     t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */
915     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1);   /* 16k pages supported */
916     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */
917     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */
918     t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2);  /*  4k stage2 supported */
919     cpu->isar.id_aa64mmfr0 = t;
920 
921     t = cpu->isar.id_aa64mmfr1;
922     t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
923     t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);       /* FEAT_VHE */
924     t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1);     /* FEAT_HPDS */
925     t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);       /* FEAT_LOR */
926     t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2);      /* FEAT_PAN2 */
927     t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1);      /* FEAT_XNX */
928     cpu->isar.id_aa64mmfr1 = t;
929 
930     t = cpu->isar.id_aa64mmfr2;
931     t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1);      /* FEAT_TTCNP */
932     t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);      /* FEAT_UAO */
933     t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1);     /* FEAT_IESB */
934     t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1);  /* FEAT_LVA */
935     t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1);       /* FEAT_TTST */
936     t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1);      /* FEAT_IDST */
937     t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1);      /* FEAT_S2FWB */
938     t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1);      /* FEAT_TTL */
939     t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2);      /* FEAT_BBM at level 2 */
940     cpu->isar.id_aa64mmfr2 = t;
941 
942     t = cpu->isar.id_aa64zfr0;
943     t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
944     t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2);       /* FEAT_SVE_PMULL128 */
945     t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);   /* FEAT_SVE_BitPerm */
946     t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);  /* FEAT_BF16 */
947     t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);      /* FEAT_SVE_SHA3 */
948     t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);       /* FEAT_SVE_SM4 */
949     t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);      /* FEAT_I8MM */
950     t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);     /* FEAT_F32MM */
951     t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);     /* FEAT_F64MM */
952     cpu->isar.id_aa64zfr0 = t;
953 
954     t = cpu->isar.id_aa64dfr0;
955     t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9);  /* FEAT_Debugv8p4 */
956     t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5);    /* FEAT_PMUv3p4 */
957     cpu->isar.id_aa64dfr0 = t;
958 
959     /* Replicate the same data to the 32-bit id registers.  */
960     aa32_max_features(cpu);
961 
962 #ifdef CONFIG_USER_ONLY
963     /*
964      * For usermode -cpu max we can use a larger and more efficient DCZ
965      * blocksize since we don't have to follow what the hardware does.
966      */
967     cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
968     cpu->dcz_blocksize = 7; /*  512 bytes */
969 #endif
970 
971     bitmap_fill(cpu->sve_vq_supported, ARM_MAX_VQ);
972 
973     aarch64_add_pauth_properties(obj);
974     aarch64_add_sve_properties(obj);
975     object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
976                         cpu_max_set_sve_max_vq, NULL, NULL);
977     qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
978 }
979 
980 static void aarch64_a64fx_initfn(Object *obj)
981 {
982     ARMCPU *cpu = ARM_CPU(obj);
983 
984     cpu->dtb_compatible = "arm,a64fx";
985     set_feature(&cpu->env, ARM_FEATURE_V8);
986     set_feature(&cpu->env, ARM_FEATURE_NEON);
987     set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
988     set_feature(&cpu->env, ARM_FEATURE_AARCH64);
989     set_feature(&cpu->env, ARM_FEATURE_EL2);
990     set_feature(&cpu->env, ARM_FEATURE_EL3);
991     set_feature(&cpu->env, ARM_FEATURE_PMU);
992     cpu->midr = 0x461f0010;
993     cpu->revidr = 0x00000000;
994     cpu->ctr = 0x86668006;
995     cpu->reset_sctlr = 0x30000180;
996     cpu->isar.id_aa64pfr0 =   0x0000000101111111; /* No RAS Extensions */
997     cpu->isar.id_aa64pfr1 = 0x0000000000000000;
998     cpu->isar.id_aa64dfr0 = 0x0000000010305408;
999     cpu->isar.id_aa64dfr1 = 0x0000000000000000;
1000     cpu->id_aa64afr0 = 0x0000000000000000;
1001     cpu->id_aa64afr1 = 0x0000000000000000;
1002     cpu->isar.id_aa64mmfr0 = 0x0000000000001122;
1003     cpu->isar.id_aa64mmfr1 = 0x0000000011212100;
1004     cpu->isar.id_aa64mmfr2 = 0x0000000000001011;
1005     cpu->isar.id_aa64isar0 = 0x0000000010211120;
1006     cpu->isar.id_aa64isar1 = 0x0000000000010001;
1007     cpu->isar.id_aa64zfr0 = 0x0000000000000000;
1008     cpu->clidr = 0x0000000080000023;
1009     cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */
1010     cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */
1011     cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */
1012     cpu->dcz_blocksize = 6; /* 256 bytes */
1013     cpu->gic_num_lrs = 4;
1014     cpu->gic_vpribits = 5;
1015     cpu->gic_vprebits = 5;
1016     cpu->gic_pribits = 5;
1017 
1018     /* Suppport of A64FX's vector length are 128,256 and 512bit only */
1019     aarch64_add_sve_properties(obj);
1020     bitmap_zero(cpu->sve_vq_supported, ARM_MAX_VQ);
1021     set_bit(0, cpu->sve_vq_supported); /* 128bit */
1022     set_bit(1, cpu->sve_vq_supported); /* 256bit */
1023     set_bit(3, cpu->sve_vq_supported); /* 512bit */
1024 
1025     /* TODO:  Add A64FX specific HPC extension registers */
1026 }
1027 
1028 static const ARMCPUInfo aarch64_cpus[] = {
1029     { .name = "cortex-a57",         .initfn = aarch64_a57_initfn },
1030     { .name = "cortex-a53",         .initfn = aarch64_a53_initfn },
1031     { .name = "cortex-a72",         .initfn = aarch64_a72_initfn },
1032     { .name = "cortex-a76",         .initfn = aarch64_a76_initfn },
1033     { .name = "a64fx",              .initfn = aarch64_a64fx_initfn },
1034     { .name = "neoverse-n1",        .initfn = aarch64_neoverse_n1_initfn },
1035     { .name = "max",                .initfn = aarch64_max_initfn },
1036 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
1037     { .name = "host",               .initfn = aarch64_host_initfn },
1038 #endif
1039 };
1040 
1041 static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
1042 {
1043     ARMCPU *cpu = ARM_CPU(obj);
1044 
1045     return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
1046 }
1047 
1048 static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
1049 {
1050     ARMCPU *cpu = ARM_CPU(obj);
1051 
1052     /* At this time, this property is only allowed if KVM is enabled.  This
1053      * restriction allows us to avoid fixing up functionality that assumes a
1054      * uniform execution state like do_interrupt.
1055      */
1056     if (value == false) {
1057         if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
1058             error_setg(errp, "'aarch64' feature cannot be disabled "
1059                              "unless KVM is enabled and 32-bit EL1 "
1060                              "is supported");
1061             return;
1062         }
1063         unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
1064     } else {
1065         set_feature(&cpu->env, ARM_FEATURE_AARCH64);
1066     }
1067 }
1068 
1069 static void aarch64_cpu_finalizefn(Object *obj)
1070 {
1071 }
1072 
1073 static gchar *aarch64_gdb_arch_name(CPUState *cs)
1074 {
1075     return g_strdup("aarch64");
1076 }
1077 
1078 static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
1079 {
1080     CPUClass *cc = CPU_CLASS(oc);
1081 
1082     cc->gdb_read_register = aarch64_cpu_gdb_read_register;
1083     cc->gdb_write_register = aarch64_cpu_gdb_write_register;
1084     cc->gdb_num_core_regs = 34;
1085     cc->gdb_core_xml_file = "aarch64-core.xml";
1086     cc->gdb_arch_name = aarch64_gdb_arch_name;
1087 
1088     object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
1089                                    aarch64_cpu_set_aarch64);
1090     object_class_property_set_description(oc, "aarch64",
1091                                           "Set on/off to enable/disable aarch64 "
1092                                           "execution state ");
1093 }
1094 
1095 static void aarch64_cpu_instance_init(Object *obj)
1096 {
1097     ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
1098 
1099     acc->info->initfn(obj);
1100     arm_cpu_post_init(obj);
1101 }
1102 
1103 static void cpu_register_class_init(ObjectClass *oc, void *data)
1104 {
1105     ARMCPUClass *acc = ARM_CPU_CLASS(oc);
1106 
1107     acc->info = data;
1108 }
1109 
1110 void aarch64_cpu_register(const ARMCPUInfo *info)
1111 {
1112     TypeInfo type_info = {
1113         .parent = TYPE_AARCH64_CPU,
1114         .instance_size = sizeof(ARMCPU),
1115         .instance_init = aarch64_cpu_instance_init,
1116         .class_size = sizeof(ARMCPUClass),
1117         .class_init = info->class_init ?: cpu_register_class_init,
1118         .class_data = (void *)info,
1119     };
1120 
1121     type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
1122     type_register(&type_info);
1123     g_free((void *)type_info.name);
1124 }
1125 
1126 static const TypeInfo aarch64_cpu_type_info = {
1127     .name = TYPE_AARCH64_CPU,
1128     .parent = TYPE_ARM_CPU,
1129     .instance_size = sizeof(ARMCPU),
1130     .instance_finalize = aarch64_cpu_finalizefn,
1131     .abstract = true,
1132     .class_size = sizeof(AArch64CPUClass),
1133     .class_init = aarch64_cpu_class_init,
1134 };
1135 
1136 static void aarch64_cpu_register_types(void)
1137 {
1138     size_t i;
1139 
1140     type_register_static(&aarch64_cpu_type_info);
1141 
1142     for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
1143         aarch64_cpu_register(&aarch64_cpus[i]);
1144     }
1145 }
1146 
1147 type_init(aarch64_cpu_register_types)
1148