xref: /qemu/target/riscv/cpu.c (revision e087bd4de3369d678ed8ebda4ba1c11b782cf899)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41 
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46 
47 /*
48  * From vector_helper.c
49  * Note that vector data is stored in host-endian 64-bit chunks,
50  * so addressing bytes needs a host-endian fixup.
51  */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x)   ((x) ^ 7)
54 #else
55 #define BYTE(x)   (x)
56 #endif
57 
58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62 
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65 
66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68     g_hash_table_insert(general_user_opts, (gpointer)optname,
69                         GUINT_TO_POINTER(value));
70 }
71 
72 bool riscv_cpu_option_set(const char *optname)
73 {
74     return g_hash_table_contains(general_user_opts, optname);
75 }
76 
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79 
80 /*
81  * Here are the ordering rules of extension naming defined by RISC-V
82  * specification :
83  * 1. All extensions should be separated from other multi-letter extensions
84  *    by an underscore.
85  * 2. The first letter following the 'Z' conventionally indicates the most
86  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87  *    If multiple 'Z' extensions are named, they should be ordered first
88  *    by category, then alphabetically within a category.
89  * 3. Standard supervisor-level extensions (starts with 'S') should be
90  *    listed after standard unprivileged extensions.  If multiple
91  *    supervisor-level extensions are listed, they should be ordered
92  *    alphabetically.
93  * 4. Non-standard extensions (starts with 'X') must be listed after all
94  *    standard extensions. They must be separated from other multi-letter
95  *    extensions by an underscore.
96  *
97  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98  * instead.
99  */
100 const RISCVIsaExtData isa_edata_arr[] = {
101     ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105     ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106     ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107     ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108     ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116     ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
117     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
118     ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12),
119     ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
120     ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
121     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
122     ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
123     ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
124     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
125     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
126     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
127     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
128     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
129     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
130     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
131     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
132     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
133     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
134     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
135     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
136     ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
137     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
138     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
139     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
140     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
141     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
142     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
143     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
144     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
145     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
146     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
147     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
148     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
149     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
150     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
151     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
152     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
153     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
154     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
155     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
156     ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
157     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
158     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
159     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
160     ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
161     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
162     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
163     ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
164     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
165     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
166     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
167     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
168     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
169     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
170     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
171     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
172     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
173     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
174     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
175     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
176     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
177     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
178     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
179     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
180     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
181     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
182     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
183     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
184     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
185     ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
186     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
187     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
188     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
189     ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
190     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
191     ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
192     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
193     ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
194     ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
195     ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
196     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
197     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
198     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
199     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
200     ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc),
201     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
202     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
203     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
204     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
205     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
206     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
207     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
208     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
209     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
210     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
211     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
212     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
213 
214     DEFINE_PROP_END_OF_LIST(),
215 };
216 
217 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
218 {
219     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
220 
221     return *ext_enabled;
222 }
223 
224 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
225 {
226     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
227 
228     *ext_enabled = en;
229 }
230 
231 bool riscv_cpu_is_vendor(Object *cpu_obj)
232 {
233     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
234 }
235 
236 const char * const riscv_int_regnames[] = {
237     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
238     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
239     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
240     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
241     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
242 };
243 
244 const char * const riscv_int_regnamesh[] = {
245     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
246     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
247     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
248     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
249     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
250     "x30h/t5h",  "x31h/t6h"
251 };
252 
253 const char * const riscv_fpr_regnames[] = {
254     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
255     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
256     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
257     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
258     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
259     "f30/ft10", "f31/ft11"
260 };
261 
262 const char * const riscv_rvv_regnames[] = {
263   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
264   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
265   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
266   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
267   "v28", "v29", "v30", "v31"
268 };
269 
270 static const char * const riscv_excp_names[] = {
271     "misaligned_fetch",
272     "fault_fetch",
273     "illegal_instruction",
274     "breakpoint",
275     "misaligned_load",
276     "fault_load",
277     "misaligned_store",
278     "fault_store",
279     "user_ecall",
280     "supervisor_ecall",
281     "hypervisor_ecall",
282     "machine_ecall",
283     "exec_page_fault",
284     "load_page_fault",
285     "reserved",
286     "store_page_fault",
287     "reserved",
288     "reserved",
289     "reserved",
290     "reserved",
291     "guest_exec_page_fault",
292     "guest_load_page_fault",
293     "reserved",
294     "guest_store_page_fault",
295 };
296 
297 static const char * const riscv_intr_names[] = {
298     "u_software",
299     "s_software",
300     "vs_software",
301     "m_software",
302     "u_timer",
303     "s_timer",
304     "vs_timer",
305     "m_timer",
306     "u_external",
307     "s_external",
308     "vs_external",
309     "m_external",
310     "reserved",
311     "reserved",
312     "reserved",
313     "reserved"
314 };
315 
316 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
317 {
318     if (async) {
319         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
320                riscv_intr_names[cause] : "(unknown)";
321     } else {
322         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
323                riscv_excp_names[cause] : "(unknown)";
324     }
325 }
326 
327 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
328 {
329     env->misa_ext_mask = env->misa_ext = ext;
330 }
331 
332 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
333 {
334     return 16 << mcc->misa_mxl_max;
335 }
336 
337 #ifndef CONFIG_USER_ONLY
338 static uint8_t satp_mode_from_str(const char *satp_mode_str)
339 {
340     if (!strncmp(satp_mode_str, "mbare", 5)) {
341         return VM_1_10_MBARE;
342     }
343 
344     if (!strncmp(satp_mode_str, "sv32", 4)) {
345         return VM_1_10_SV32;
346     }
347 
348     if (!strncmp(satp_mode_str, "sv39", 4)) {
349         return VM_1_10_SV39;
350     }
351 
352     if (!strncmp(satp_mode_str, "sv48", 4)) {
353         return VM_1_10_SV48;
354     }
355 
356     if (!strncmp(satp_mode_str, "sv57", 4)) {
357         return VM_1_10_SV57;
358     }
359 
360     if (!strncmp(satp_mode_str, "sv64", 4)) {
361         return VM_1_10_SV64;
362     }
363 
364     g_assert_not_reached();
365 }
366 
367 uint8_t satp_mode_max_from_map(uint32_t map)
368 {
369     /*
370      * 'map = 0' will make us return (31 - 32), which C will
371      * happily overflow to UINT_MAX. There's no good result to
372      * return if 'map = 0' (e.g. returning 0 will be ambiguous
373      * with the result for 'map = 1').
374      *
375      * Assert out if map = 0. Callers will have to deal with
376      * it outside of this function.
377      */
378     g_assert(map > 0);
379 
380     /* map here has at least one bit set, so no problem with clz */
381     return 31 - __builtin_clz(map);
382 }
383 
384 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
385 {
386     if (is_32_bit) {
387         switch (satp_mode) {
388         case VM_1_10_SV32:
389             return "sv32";
390         case VM_1_10_MBARE:
391             return "none";
392         }
393     } else {
394         switch (satp_mode) {
395         case VM_1_10_SV64:
396             return "sv64";
397         case VM_1_10_SV57:
398             return "sv57";
399         case VM_1_10_SV48:
400             return "sv48";
401         case VM_1_10_SV39:
402             return "sv39";
403         case VM_1_10_MBARE:
404             return "none";
405         }
406     }
407 
408     g_assert_not_reached();
409 }
410 
411 static void set_satp_mode_max_supported(RISCVCPU *cpu,
412                                         uint8_t satp_mode)
413 {
414     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
415     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
416 
417     for (int i = 0; i <= satp_mode; ++i) {
418         if (valid_vm[i]) {
419             cpu->cfg.satp_mode.supported |= (1 << i);
420         }
421     }
422 }
423 
424 /* Set the satp mode to the max supported */
425 static void set_satp_mode_default_map(RISCVCPU *cpu)
426 {
427     /*
428      * Bare CPUs do not default to the max available.
429      * Users must set a valid satp_mode in the command
430      * line.
431      */
432     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
433         warn_report("No satp mode set. Defaulting to 'bare'");
434         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
435         return;
436     }
437 
438     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
439 }
440 #endif
441 
442 static void riscv_max_cpu_init(Object *obj)
443 {
444     RISCVCPU *cpu = RISCV_CPU(obj);
445     CPURISCVState *env = &cpu->env;
446 
447     cpu->cfg.mmu = true;
448     cpu->cfg.pmp = true;
449 
450     env->priv_ver = PRIV_VERSION_LATEST;
451 #ifndef CONFIG_USER_ONLY
452 #ifdef TARGET_RISCV32
453     set_satp_mode_max_supported(cpu, VM_1_10_SV32);
454 #else
455     set_satp_mode_max_supported(cpu, VM_1_10_SV57);
456 #endif
457 #endif
458 }
459 
460 #if defined(TARGET_RISCV64)
461 static void rv64_base_cpu_init(Object *obj)
462 {
463     RISCVCPU *cpu = RISCV_CPU(obj);
464     CPURISCVState *env = &cpu->env;
465 
466     cpu->cfg.mmu = true;
467     cpu->cfg.pmp = true;
468 
469     /* Set latest version of privileged specification */
470     env->priv_ver = PRIV_VERSION_LATEST;
471 #ifndef CONFIG_USER_ONLY
472     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
473 #endif
474 }
475 
476 static void rv64_sifive_u_cpu_init(Object *obj)
477 {
478     RISCVCPU *cpu = RISCV_CPU(obj);
479     CPURISCVState *env = &cpu->env;
480     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
481     env->priv_ver = PRIV_VERSION_1_10_0;
482 #ifndef CONFIG_USER_ONLY
483     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
484 #endif
485 
486     /* inherited from parent obj via riscv_cpu_init() */
487     cpu->cfg.ext_zifencei = true;
488     cpu->cfg.ext_zicsr = true;
489     cpu->cfg.mmu = true;
490     cpu->cfg.pmp = true;
491 }
492 
493 static void rv64_sifive_e_cpu_init(Object *obj)
494 {
495     CPURISCVState *env = &RISCV_CPU(obj)->env;
496     RISCVCPU *cpu = RISCV_CPU(obj);
497 
498     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
499     env->priv_ver = PRIV_VERSION_1_10_0;
500 #ifndef CONFIG_USER_ONLY
501     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
502 #endif
503 
504     /* inherited from parent obj via riscv_cpu_init() */
505     cpu->cfg.ext_zifencei = true;
506     cpu->cfg.ext_zicsr = true;
507     cpu->cfg.pmp = true;
508 }
509 
510 static void rv64_thead_c906_cpu_init(Object *obj)
511 {
512     CPURISCVState *env = &RISCV_CPU(obj)->env;
513     RISCVCPU *cpu = RISCV_CPU(obj);
514 
515     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
516     env->priv_ver = PRIV_VERSION_1_11_0;
517 
518     cpu->cfg.ext_zfa = true;
519     cpu->cfg.ext_zfh = true;
520     cpu->cfg.mmu = true;
521     cpu->cfg.ext_xtheadba = true;
522     cpu->cfg.ext_xtheadbb = true;
523     cpu->cfg.ext_xtheadbs = true;
524     cpu->cfg.ext_xtheadcmo = true;
525     cpu->cfg.ext_xtheadcondmov = true;
526     cpu->cfg.ext_xtheadfmemidx = true;
527     cpu->cfg.ext_xtheadmac = true;
528     cpu->cfg.ext_xtheadmemidx = true;
529     cpu->cfg.ext_xtheadmempair = true;
530     cpu->cfg.ext_xtheadsync = true;
531 
532     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
533 #ifndef CONFIG_USER_ONLY
534     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
535     th_register_custom_csrs(cpu);
536 #endif
537 
538     /* inherited from parent obj via riscv_cpu_init() */
539     cpu->cfg.pmp = true;
540 }
541 
542 static void rv64_veyron_v1_cpu_init(Object *obj)
543 {
544     CPURISCVState *env = &RISCV_CPU(obj)->env;
545     RISCVCPU *cpu = RISCV_CPU(obj);
546 
547     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
548     env->priv_ver = PRIV_VERSION_1_12_0;
549 
550     /* Enable ISA extensions */
551     cpu->cfg.mmu = true;
552     cpu->cfg.ext_zifencei = true;
553     cpu->cfg.ext_zicsr = true;
554     cpu->cfg.pmp = true;
555     cpu->cfg.ext_zicbom = true;
556     cpu->cfg.cbom_blocksize = 64;
557     cpu->cfg.cboz_blocksize = 64;
558     cpu->cfg.ext_zicboz = true;
559     cpu->cfg.ext_smaia = true;
560     cpu->cfg.ext_ssaia = true;
561     cpu->cfg.ext_sscofpmf = true;
562     cpu->cfg.ext_sstc = true;
563     cpu->cfg.ext_svinval = true;
564     cpu->cfg.ext_svnapot = true;
565     cpu->cfg.ext_svpbmt = true;
566     cpu->cfg.ext_smstateen = true;
567     cpu->cfg.ext_zba = true;
568     cpu->cfg.ext_zbb = true;
569     cpu->cfg.ext_zbc = true;
570     cpu->cfg.ext_zbs = true;
571     cpu->cfg.ext_XVentanaCondOps = true;
572 
573     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
574     cpu->cfg.marchid = VEYRON_V1_MARCHID;
575     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
576 
577 #ifndef CONFIG_USER_ONLY
578     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
579 #endif
580 }
581 
582 #ifdef CONFIG_TCG
583 static void rv128_base_cpu_init(Object *obj)
584 {
585     RISCVCPU *cpu = RISCV_CPU(obj);
586     CPURISCVState *env = &cpu->env;
587 
588     if (qemu_tcg_mttcg_enabled()) {
589         /* Missing 128-bit aligned atomics */
590         error_report("128-bit RISC-V currently does not work with Multi "
591                      "Threaded TCG. Please use: -accel tcg,thread=single");
592         exit(EXIT_FAILURE);
593     }
594 
595     cpu->cfg.mmu = true;
596     cpu->cfg.pmp = true;
597 
598     /* Set latest version of privileged specification */
599     env->priv_ver = PRIV_VERSION_LATEST;
600 #ifndef CONFIG_USER_ONLY
601     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
602 #endif
603 }
604 #endif /* CONFIG_TCG */
605 
606 static void rv64i_bare_cpu_init(Object *obj)
607 {
608     CPURISCVState *env = &RISCV_CPU(obj)->env;
609     riscv_cpu_set_misa_ext(env, RVI);
610 }
611 
612 static void rv64e_bare_cpu_init(Object *obj)
613 {
614     CPURISCVState *env = &RISCV_CPU(obj)->env;
615     riscv_cpu_set_misa_ext(env, RVE);
616 }
617 
618 #endif /* !TARGET_RISCV64 */
619 
620 #if defined(TARGET_RISCV32) || \
621     (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
622 
623 static void rv32_base_cpu_init(Object *obj)
624 {
625     RISCVCPU *cpu = RISCV_CPU(obj);
626     CPURISCVState *env = &cpu->env;
627 
628     cpu->cfg.mmu = true;
629     cpu->cfg.pmp = true;
630 
631     /* Set latest version of privileged specification */
632     env->priv_ver = PRIV_VERSION_LATEST;
633 #ifndef CONFIG_USER_ONLY
634     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
635 #endif
636 }
637 
638 static void rv32_sifive_u_cpu_init(Object *obj)
639 {
640     RISCVCPU *cpu = RISCV_CPU(obj);
641     CPURISCVState *env = &cpu->env;
642     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
643     env->priv_ver = PRIV_VERSION_1_10_0;
644 #ifndef CONFIG_USER_ONLY
645     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
646 #endif
647 
648     /* inherited from parent obj via riscv_cpu_init() */
649     cpu->cfg.ext_zifencei = true;
650     cpu->cfg.ext_zicsr = true;
651     cpu->cfg.mmu = true;
652     cpu->cfg.pmp = true;
653 }
654 
655 static void rv32_sifive_e_cpu_init(Object *obj)
656 {
657     CPURISCVState *env = &RISCV_CPU(obj)->env;
658     RISCVCPU *cpu = RISCV_CPU(obj);
659 
660     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
661     env->priv_ver = PRIV_VERSION_1_10_0;
662 #ifndef CONFIG_USER_ONLY
663     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
664 #endif
665 
666     /* inherited from parent obj via riscv_cpu_init() */
667     cpu->cfg.ext_zifencei = true;
668     cpu->cfg.ext_zicsr = true;
669     cpu->cfg.pmp = true;
670 }
671 
672 static void rv32_ibex_cpu_init(Object *obj)
673 {
674     CPURISCVState *env = &RISCV_CPU(obj)->env;
675     RISCVCPU *cpu = RISCV_CPU(obj);
676 
677     riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
678     env->priv_ver = PRIV_VERSION_1_12_0;
679 #ifndef CONFIG_USER_ONLY
680     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
681 #endif
682     /* inherited from parent obj via riscv_cpu_init() */
683     cpu->cfg.ext_zifencei = true;
684     cpu->cfg.ext_zicsr = true;
685     cpu->cfg.pmp = true;
686     cpu->cfg.ext_smepmp = true;
687 
688     cpu->cfg.ext_zba = true;
689     cpu->cfg.ext_zbb = true;
690     cpu->cfg.ext_zbc = true;
691     cpu->cfg.ext_zbs = true;
692 }
693 
694 static void rv32_imafcu_nommu_cpu_init(Object *obj)
695 {
696     CPURISCVState *env = &RISCV_CPU(obj)->env;
697     RISCVCPU *cpu = RISCV_CPU(obj);
698 
699     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
700     env->priv_ver = PRIV_VERSION_1_10_0;
701 #ifndef CONFIG_USER_ONLY
702     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
703 #endif
704 
705     /* inherited from parent obj via riscv_cpu_init() */
706     cpu->cfg.ext_zifencei = true;
707     cpu->cfg.ext_zicsr = true;
708     cpu->cfg.pmp = true;
709 }
710 
711 static void rv32i_bare_cpu_init(Object *obj)
712 {
713     CPURISCVState *env = &RISCV_CPU(obj)->env;
714     riscv_cpu_set_misa_ext(env, RVI);
715 }
716 
717 static void rv32e_bare_cpu_init(Object *obj)
718 {
719     CPURISCVState *env = &RISCV_CPU(obj)->env;
720     riscv_cpu_set_misa_ext(env, RVE);
721 }
722 #endif
723 
724 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
725 {
726     ObjectClass *oc;
727     char *typename;
728     char **cpuname;
729 
730     cpuname = g_strsplit(cpu_model, ",", 1);
731     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
732     oc = object_class_by_name(typename);
733     g_strfreev(cpuname);
734     g_free(typename);
735 
736     return oc;
737 }
738 
739 char *riscv_cpu_get_name(RISCVCPU *cpu)
740 {
741     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
742     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
743 
744     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
745 
746     return cpu_model_from_type(typename);
747 }
748 
749 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
750 {
751     RISCVCPU *cpu = RISCV_CPU(cs);
752     CPURISCVState *env = &cpu->env;
753     int i, j;
754     uint8_t *p;
755 
756 #if !defined(CONFIG_USER_ONLY)
757     if (riscv_has_ext(env, RVH)) {
758         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
759     }
760 #endif
761     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
762 #ifndef CONFIG_USER_ONLY
763     {
764         static const int dump_csrs[] = {
765             CSR_MHARTID,
766             CSR_MSTATUS,
767             CSR_MSTATUSH,
768             /*
769              * CSR_SSTATUS is intentionally omitted here as its value
770              * can be figured out by looking at CSR_MSTATUS
771              */
772             CSR_HSTATUS,
773             CSR_VSSTATUS,
774             CSR_MIP,
775             CSR_MIE,
776             CSR_MIDELEG,
777             CSR_HIDELEG,
778             CSR_MEDELEG,
779             CSR_HEDELEG,
780             CSR_MTVEC,
781             CSR_STVEC,
782             CSR_VSTVEC,
783             CSR_MEPC,
784             CSR_SEPC,
785             CSR_VSEPC,
786             CSR_MCAUSE,
787             CSR_SCAUSE,
788             CSR_VSCAUSE,
789             CSR_MTVAL,
790             CSR_STVAL,
791             CSR_HTVAL,
792             CSR_MTVAL2,
793             CSR_MSCRATCH,
794             CSR_SSCRATCH,
795             CSR_SATP,
796             CSR_MMTE,
797             CSR_UPMBASE,
798             CSR_UPMMASK,
799             CSR_SPMBASE,
800             CSR_SPMMASK,
801             CSR_MPMBASE,
802             CSR_MPMMASK,
803         };
804 
805         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
806             int csrno = dump_csrs[i];
807             target_ulong val = 0;
808             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
809 
810             /*
811              * Rely on the smode, hmode, etc, predicates within csr.c
812              * to do the filtering of the registers that are present.
813              */
814             if (res == RISCV_EXCP_NONE) {
815                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
816                              csr_ops[csrno].name, val);
817             }
818         }
819     }
820 #endif
821 
822     for (i = 0; i < 32; i++) {
823         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
824                      riscv_int_regnames[i], env->gpr[i]);
825         if ((i & 3) == 3) {
826             qemu_fprintf(f, "\n");
827         }
828     }
829     if (flags & CPU_DUMP_FPU) {
830         target_ulong val = 0;
831         RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0);
832         if (res == RISCV_EXCP_NONE) {
833             qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
834                     csr_ops[CSR_FCSR].name, val);
835         }
836         for (i = 0; i < 32; i++) {
837             qemu_fprintf(f, " %-8s %016" PRIx64,
838                          riscv_fpr_regnames[i], env->fpr[i]);
839             if ((i & 3) == 3) {
840                 qemu_fprintf(f, "\n");
841             }
842         }
843     }
844     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
845         static const int dump_rvv_csrs[] = {
846                     CSR_VSTART,
847                     CSR_VXSAT,
848                     CSR_VXRM,
849                     CSR_VCSR,
850                     CSR_VL,
851                     CSR_VTYPE,
852                     CSR_VLENB,
853                 };
854         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
855             int csrno = dump_rvv_csrs[i];
856             target_ulong val = 0;
857             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
858 
859             /*
860              * Rely on the smode, hmode, etc, predicates within csr.c
861              * to do the filtering of the registers that are present.
862              */
863             if (res == RISCV_EXCP_NONE) {
864                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
865                              csr_ops[csrno].name, val);
866             }
867         }
868         uint16_t vlenb = cpu->cfg.vlenb;
869 
870         for (i = 0; i < 32; i++) {
871             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
872             p = (uint8_t *)env->vreg;
873             for (j = vlenb - 1 ; j >= 0; j--) {
874                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
875             }
876             qemu_fprintf(f, "\n");
877         }
878     }
879 }
880 
881 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
882 {
883     RISCVCPU *cpu = RISCV_CPU(cs);
884     CPURISCVState *env = &cpu->env;
885 
886     if (env->xl == MXL_RV32) {
887         env->pc = (int32_t)value;
888     } else {
889         env->pc = value;
890     }
891 }
892 
893 static vaddr riscv_cpu_get_pc(CPUState *cs)
894 {
895     RISCVCPU *cpu = RISCV_CPU(cs);
896     CPURISCVState *env = &cpu->env;
897 
898     /* Match cpu_get_tb_cpu_state. */
899     if (env->xl == MXL_RV32) {
900         return env->pc & UINT32_MAX;
901     }
902     return env->pc;
903 }
904 
905 bool riscv_cpu_has_work(CPUState *cs)
906 {
907 #ifndef CONFIG_USER_ONLY
908     RISCVCPU *cpu = RISCV_CPU(cs);
909     CPURISCVState *env = &cpu->env;
910     /*
911      * Definition of the WFI instruction requires it to ignore the privilege
912      * mode and delegation registers, but respect individual enables
913      */
914     return riscv_cpu_all_pending(env) != 0 ||
915         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
916         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
917 #else
918     return true;
919 #endif
920 }
921 
922 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
923 {
924     return riscv_env_mmu_index(cpu_env(cs), ifetch);
925 }
926 
927 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
928 {
929 #ifndef CONFIG_USER_ONLY
930     uint8_t iprio;
931     int i, irq, rdzero;
932 #endif
933     CPUState *cs = CPU(obj);
934     RISCVCPU *cpu = RISCV_CPU(cs);
935     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
936     CPURISCVState *env = &cpu->env;
937 
938     if (mcc->parent_phases.hold) {
939         mcc->parent_phases.hold(obj, type);
940     }
941 #ifndef CONFIG_USER_ONLY
942     env->misa_mxl = mcc->misa_mxl_max;
943     env->priv = PRV_M;
944     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
945     if (env->misa_mxl > MXL_RV32) {
946         /*
947          * The reset status of SXL/UXL is undefined, but mstatus is WARL
948          * and we must ensure that the value after init is valid for read.
949          */
950         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
951         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
952         if (riscv_has_ext(env, RVH)) {
953             env->vsstatus = set_field(env->vsstatus,
954                                       MSTATUS64_SXL, env->misa_mxl);
955             env->vsstatus = set_field(env->vsstatus,
956                                       MSTATUS64_UXL, env->misa_mxl);
957             env->mstatus_hs = set_field(env->mstatus_hs,
958                                         MSTATUS64_SXL, env->misa_mxl);
959             env->mstatus_hs = set_field(env->mstatus_hs,
960                                         MSTATUS64_UXL, env->misa_mxl);
961         }
962     }
963     env->mcause = 0;
964     env->miclaim = MIP_SGEIP;
965     env->pc = env->resetvec;
966     env->bins = 0;
967     env->two_stage_lookup = false;
968 
969     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
970                    (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
971                     MENVCFG_ADUE : 0);
972     env->henvcfg = 0;
973 
974     /* Initialized default priorities of local interrupts. */
975     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
976         iprio = riscv_cpu_default_priority(i);
977         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
978         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
979         env->hviprio[i] = 0;
980     }
981     i = 0;
982     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
983         if (!rdzero) {
984             env->hviprio[irq] = env->miprio[irq];
985         }
986         i++;
987     }
988     /* mmte is supposed to have pm.current hardwired to 1 */
989     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
990 
991     /*
992      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
993      * extension is enabled.
994      */
995     if (riscv_has_ext(env, RVH)) {
996         env->mideleg |= HS_MODE_INTERRUPTS;
997     }
998 
999     /*
1000      * Clear mseccfg and unlock all the PMP entries upon reset.
1001      * This is allowed as per the priv and smepmp specifications
1002      * and is needed to clear stale entries across reboots.
1003      */
1004     if (riscv_cpu_cfg(env)->ext_smepmp) {
1005         env->mseccfg = 0;
1006     }
1007 
1008     pmp_unlock_entries(env);
1009 #endif
1010     env->xl = riscv_cpu_mxl(env);
1011     riscv_cpu_update_mask(env);
1012     cs->exception_index = RISCV_EXCP_NONE;
1013     env->load_res = -1;
1014     set_default_nan_mode(1, &env->fp_status);
1015 
1016 #ifndef CONFIG_USER_ONLY
1017     if (cpu->cfg.debug) {
1018         riscv_trigger_reset_hold(env);
1019     }
1020 
1021     if (kvm_enabled()) {
1022         kvm_riscv_reset_vcpu(cpu);
1023     }
1024 #endif
1025 }
1026 
1027 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1028 {
1029     RISCVCPU *cpu = RISCV_CPU(s);
1030     CPURISCVState *env = &cpu->env;
1031     info->target_info = &cpu->cfg;
1032 
1033     switch (env->xl) {
1034     case MXL_RV32:
1035         info->print_insn = print_insn_riscv32;
1036         break;
1037     case MXL_RV64:
1038         info->print_insn = print_insn_riscv64;
1039         break;
1040     case MXL_RV128:
1041         info->print_insn = print_insn_riscv128;
1042         break;
1043     default:
1044         g_assert_not_reached();
1045     }
1046 }
1047 
1048 #ifndef CONFIG_USER_ONLY
1049 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1050 {
1051     bool rv32 = riscv_cpu_is_32bit(cpu);
1052     uint8_t satp_mode_map_max, satp_mode_supported_max;
1053 
1054     /* The CPU wants the OS to decide which satp mode to use */
1055     if (cpu->cfg.satp_mode.supported == 0) {
1056         return;
1057     }
1058 
1059     satp_mode_supported_max =
1060                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1061 
1062     if (cpu->cfg.satp_mode.map == 0) {
1063         if (cpu->cfg.satp_mode.init == 0) {
1064             /* If unset by the user, we fallback to the default satp mode. */
1065             set_satp_mode_default_map(cpu);
1066         } else {
1067             /*
1068              * Find the lowest level that was disabled and then enable the
1069              * first valid level below which can be found in
1070              * valid_vm_1_10_32/64.
1071              */
1072             for (int i = 1; i < 16; ++i) {
1073                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1074                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1075                     for (int j = i - 1; j >= 0; --j) {
1076                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1077                             cpu->cfg.satp_mode.map |= (1 << j);
1078                             break;
1079                         }
1080                     }
1081                     break;
1082                 }
1083             }
1084         }
1085     }
1086 
1087     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1088 
1089     /* Make sure the user asked for a supported configuration (HW and qemu) */
1090     if (satp_mode_map_max > satp_mode_supported_max) {
1091         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1092                    satp_mode_str(satp_mode_map_max, rv32),
1093                    satp_mode_str(satp_mode_supported_max, rv32));
1094         return;
1095     }
1096 
1097     /*
1098      * Make sure the user did not ask for an invalid configuration as per
1099      * the specification.
1100      */
1101     if (!rv32) {
1102         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1103             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1104                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1105                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1106                 error_setg(errp, "cannot disable %s satp mode if %s "
1107                            "is enabled", satp_mode_str(i, false),
1108                            satp_mode_str(satp_mode_map_max, false));
1109                 return;
1110             }
1111         }
1112     }
1113 
1114     /* Finally expand the map so that all valid modes are set */
1115     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1116         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1117             cpu->cfg.satp_mode.map |= (1 << i);
1118         }
1119     }
1120 }
1121 #endif
1122 
1123 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1124 {
1125     Error *local_err = NULL;
1126 
1127 #ifndef CONFIG_USER_ONLY
1128     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1129     if (local_err != NULL) {
1130         error_propagate(errp, local_err);
1131         return;
1132     }
1133 #endif
1134 
1135     if (tcg_enabled()) {
1136         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1137         if (local_err != NULL) {
1138             error_propagate(errp, local_err);
1139             return;
1140         }
1141         riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1142     } else if (kvm_enabled()) {
1143         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1144         if (local_err != NULL) {
1145             error_propagate(errp, local_err);
1146             return;
1147         }
1148     }
1149 }
1150 
1151 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1152 {
1153     CPUState *cs = CPU(dev);
1154     RISCVCPU *cpu = RISCV_CPU(dev);
1155     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1156     Error *local_err = NULL;
1157 
1158     cpu_exec_realizefn(cs, &local_err);
1159     if (local_err != NULL) {
1160         error_propagate(errp, local_err);
1161         return;
1162     }
1163 
1164     riscv_cpu_finalize_features(cpu, &local_err);
1165     if (local_err != NULL) {
1166         error_propagate(errp, local_err);
1167         return;
1168     }
1169 
1170     riscv_cpu_register_gdb_regs_for_features(cs);
1171 
1172 #ifndef CONFIG_USER_ONLY
1173     if (cpu->cfg.debug) {
1174         riscv_trigger_realize(&cpu->env);
1175     }
1176 #endif
1177 
1178     qemu_init_vcpu(cs);
1179     cpu_reset(cs);
1180 
1181     mcc->parent_realize(dev, errp);
1182 }
1183 
1184 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1185 {
1186     if (tcg_enabled()) {
1187         return riscv_cpu_tcg_compatible(cpu);
1188     }
1189 
1190     return true;
1191 }
1192 
1193 #ifndef CONFIG_USER_ONLY
1194 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1195                                void *opaque, Error **errp)
1196 {
1197     RISCVSATPMap *satp_map = opaque;
1198     uint8_t satp = satp_mode_from_str(name);
1199     bool value;
1200 
1201     value = satp_map->map & (1 << satp);
1202 
1203     visit_type_bool(v, name, &value, errp);
1204 }
1205 
1206 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1207                                void *opaque, Error **errp)
1208 {
1209     RISCVSATPMap *satp_map = opaque;
1210     uint8_t satp = satp_mode_from_str(name);
1211     bool value;
1212 
1213     if (!visit_type_bool(v, name, &value, errp)) {
1214         return;
1215     }
1216 
1217     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1218     satp_map->init |= 1 << satp;
1219 }
1220 
1221 void riscv_add_satp_mode_properties(Object *obj)
1222 {
1223     RISCVCPU *cpu = RISCV_CPU(obj);
1224 
1225     if (cpu->env.misa_mxl == MXL_RV32) {
1226         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1227                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1228     } else {
1229         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1230                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1231         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1232                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1233         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1234                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1235         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1236                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1237     }
1238 }
1239 
1240 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1241 {
1242     RISCVCPU *cpu = RISCV_CPU(opaque);
1243     CPURISCVState *env = &cpu->env;
1244 
1245     if (irq < IRQ_LOCAL_MAX) {
1246         switch (irq) {
1247         case IRQ_U_SOFT:
1248         case IRQ_S_SOFT:
1249         case IRQ_VS_SOFT:
1250         case IRQ_M_SOFT:
1251         case IRQ_U_TIMER:
1252         case IRQ_S_TIMER:
1253         case IRQ_VS_TIMER:
1254         case IRQ_M_TIMER:
1255         case IRQ_U_EXT:
1256         case IRQ_VS_EXT:
1257         case IRQ_M_EXT:
1258             if (kvm_enabled()) {
1259                 kvm_riscv_set_irq(cpu, irq, level);
1260             } else {
1261                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1262             }
1263              break;
1264         case IRQ_S_EXT:
1265             if (kvm_enabled()) {
1266                 kvm_riscv_set_irq(cpu, irq, level);
1267             } else {
1268                 env->external_seip = level;
1269                 riscv_cpu_update_mip(env, 1 << irq,
1270                                      BOOL_TO_MASK(level | env->software_seip));
1271             }
1272             break;
1273         default:
1274             g_assert_not_reached();
1275         }
1276     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1277         /* Require H-extension for handling guest local interrupts */
1278         if (!riscv_has_ext(env, RVH)) {
1279             g_assert_not_reached();
1280         }
1281 
1282         /* Compute bit position in HGEIP CSR */
1283         irq = irq - IRQ_LOCAL_MAX + 1;
1284         if (env->geilen < irq) {
1285             g_assert_not_reached();
1286         }
1287 
1288         /* Update HGEIP CSR */
1289         env->hgeip &= ~((target_ulong)1 << irq);
1290         if (level) {
1291             env->hgeip |= (target_ulong)1 << irq;
1292         }
1293 
1294         /* Update mip.SGEIP bit */
1295         riscv_cpu_update_mip(env, MIP_SGEIP,
1296                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1297     } else {
1298         g_assert_not_reached();
1299     }
1300 }
1301 #endif /* CONFIG_USER_ONLY */
1302 
1303 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1304 {
1305     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1306 }
1307 
1308 static void riscv_cpu_post_init(Object *obj)
1309 {
1310     accel_cpu_instance_init(CPU(obj));
1311 }
1312 
1313 static void riscv_cpu_init(Object *obj)
1314 {
1315     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1316     RISCVCPU *cpu = RISCV_CPU(obj);
1317     CPURISCVState *env = &cpu->env;
1318 
1319     env->misa_mxl = mcc->misa_mxl_max;
1320 
1321 #ifndef CONFIG_USER_ONLY
1322     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1323                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1324 #endif /* CONFIG_USER_ONLY */
1325 
1326     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1327 
1328     /*
1329      * The timer and performance counters extensions were supported
1330      * in QEMU before they were added as discrete extensions in the
1331      * ISA. To keep compatibility we'll always default them to 'true'
1332      * for all CPUs. Each accelerator will decide what to do when
1333      * users disable them.
1334      */
1335     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1336     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1337 
1338     /* Default values for non-bool cpu properties */
1339     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1340     cpu->cfg.vlenb = 128 >> 3;
1341     cpu->cfg.elen = 64;
1342     cpu->cfg.cbom_blocksize = 64;
1343     cpu->cfg.cbop_blocksize = 64;
1344     cpu->cfg.cboz_blocksize = 64;
1345     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1346 }
1347 
1348 static void riscv_bare_cpu_init(Object *obj)
1349 {
1350     RISCVCPU *cpu = RISCV_CPU(obj);
1351 
1352     /*
1353      * Bare CPUs do not inherit the timer and performance
1354      * counters from the parent class (see riscv_cpu_init()
1355      * for info on why the parent enables them).
1356      *
1357      * Users have to explicitly enable these counters for
1358      * bare CPUs.
1359      */
1360     cpu->cfg.ext_zicntr = false;
1361     cpu->cfg.ext_zihpm = false;
1362 
1363     /* Set to QEMU's first supported priv version */
1364     cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1365 
1366     /*
1367      * Support all available satp_mode settings. The default
1368      * value will be set to MBARE if the user doesn't set
1369      * satp_mode manually (see set_satp_mode_default()).
1370      */
1371 #ifndef CONFIG_USER_ONLY
1372     set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1373 #endif
1374 }
1375 
1376 typedef struct misa_ext_info {
1377     const char *name;
1378     const char *description;
1379 } MISAExtInfo;
1380 
1381 #define MISA_INFO_IDX(_bit) \
1382     __builtin_ctz(_bit)
1383 
1384 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1385     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1386 
1387 static const MISAExtInfo misa_ext_info_arr[] = {
1388     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1389     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1390     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1391     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1392     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1393     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1394     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1395     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1396     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1397     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1398     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1399     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1400     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1401     MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1402 };
1403 
1404 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1405 {
1406     CPUClass *cc = CPU_CLASS(mcc);
1407 
1408     /* Validate that MISA_MXL is set properly. */
1409     switch (mcc->misa_mxl_max) {
1410 #ifdef TARGET_RISCV64
1411     case MXL_RV64:
1412     case MXL_RV128:
1413         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1414         break;
1415 #endif
1416     case MXL_RV32:
1417         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1418         break;
1419     default:
1420         g_assert_not_reached();
1421     }
1422 }
1423 
1424 static int riscv_validate_misa_info_idx(uint32_t bit)
1425 {
1426     int idx;
1427 
1428     /*
1429      * Our lowest valid input (RVA) is 1 and
1430      * __builtin_ctz() is UB with zero.
1431      */
1432     g_assert(bit != 0);
1433     idx = MISA_INFO_IDX(bit);
1434 
1435     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1436     return idx;
1437 }
1438 
1439 const char *riscv_get_misa_ext_name(uint32_t bit)
1440 {
1441     int idx = riscv_validate_misa_info_idx(bit);
1442     const char *val = misa_ext_info_arr[idx].name;
1443 
1444     g_assert(val != NULL);
1445     return val;
1446 }
1447 
1448 const char *riscv_get_misa_ext_description(uint32_t bit)
1449 {
1450     int idx = riscv_validate_misa_info_idx(bit);
1451     const char *val = misa_ext_info_arr[idx].description;
1452 
1453     g_assert(val != NULL);
1454     return val;
1455 }
1456 
1457 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1458     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1459      .enabled = _defval}
1460 
1461 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1462     /* Defaults for standard extensions */
1463     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1464     MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1465     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1466     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1467     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1468     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1469     MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1470     MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1471     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1472     MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1473     MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1474     MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1475     MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1476     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1477     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1478     MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1479     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1480     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1481     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1482     MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1483     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1484     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1485     MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1486     MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1487     MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1488     MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1489     MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1490     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1491 
1492     MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1493     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1494     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1495     MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1496     MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1497     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1498     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1499     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1500     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1501     MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true),
1502 
1503     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1504     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1505 
1506     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1507     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1508     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1509     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1510     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1511     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1512     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1513     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1514     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1515     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1516     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1517     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1518     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1519     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1520     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1521     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1522     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1523     MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1524 
1525     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1526     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1527     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1528     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1529 
1530     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1531     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1532     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1533 
1534     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1535 
1536     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1537     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1538     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1539     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1540     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1541     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1542     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1543     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1544 
1545     /* Vector cryptography extensions */
1546     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1547     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1548     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1549     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1550     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1551     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1552     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1553     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1554     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1555     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1556     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1557     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1558     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1559     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1560     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1561     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1562 
1563     DEFINE_PROP_END_OF_LIST(),
1564 };
1565 
1566 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1567     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1568     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1569     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1570     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1571     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1572     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1573     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1574     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1575     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1576     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1577     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1578     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1579 
1580     DEFINE_PROP_END_OF_LIST(),
1581 };
1582 
1583 /* These are experimental so mark with 'x-' */
1584 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1585     DEFINE_PROP_END_OF_LIST(),
1586 };
1587 
1588 /*
1589  * 'Named features' is the name we give to extensions that we
1590  * don't want to expose to users. They are either immutable
1591  * (always enabled/disable) or they'll vary depending on
1592  * the resulting CPU state. They have riscv,isa strings
1593  * and priv_ver like regular extensions.
1594  */
1595 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1596     MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1597 
1598     DEFINE_PROP_END_OF_LIST(),
1599 };
1600 
1601 /* Deprecated entries marked for future removal */
1602 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1603     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1604     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1605     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1606     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1607     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1608     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1609     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1610     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1611     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1612     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1613     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1614 
1615     DEFINE_PROP_END_OF_LIST(),
1616 };
1617 
1618 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1619                              Error **errp)
1620 {
1621     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1622     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1623                cpuname, propname);
1624 }
1625 
1626 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1627                              void *opaque, Error **errp)
1628 {
1629     RISCVCPU *cpu = RISCV_CPU(obj);
1630     uint8_t pmu_num, curr_pmu_num;
1631     uint32_t pmu_mask;
1632 
1633     visit_type_uint8(v, name, &pmu_num, errp);
1634 
1635     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1636 
1637     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1638         cpu_set_prop_err(cpu, name, errp);
1639         error_append_hint(errp, "Current '%s' val: %u\n",
1640                           name, curr_pmu_num);
1641         return;
1642     }
1643 
1644     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1645         error_setg(errp, "Number of counters exceeds maximum available");
1646         return;
1647     }
1648 
1649     if (pmu_num == 0) {
1650         pmu_mask = 0;
1651     } else {
1652         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1653     }
1654 
1655     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1656     cpu->cfg.pmu_mask = pmu_mask;
1657     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1658 }
1659 
1660 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1661                              void *opaque, Error **errp)
1662 {
1663     RISCVCPU *cpu = RISCV_CPU(obj);
1664     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1665 
1666     visit_type_uint8(v, name, &pmu_num, errp);
1667 }
1668 
1669 static const PropertyInfo prop_pmu_num = {
1670     .name = "pmu-num",
1671     .get = prop_pmu_num_get,
1672     .set = prop_pmu_num_set,
1673 };
1674 
1675 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1676                              void *opaque, Error **errp)
1677 {
1678     RISCVCPU *cpu = RISCV_CPU(obj);
1679     uint32_t value;
1680     uint8_t pmu_num;
1681 
1682     visit_type_uint32(v, name, &value, errp);
1683 
1684     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1685         cpu_set_prop_err(cpu, name, errp);
1686         error_append_hint(errp, "Current '%s' val: %x\n",
1687                           name, cpu->cfg.pmu_mask);
1688         return;
1689     }
1690 
1691     pmu_num = ctpop32(value);
1692 
1693     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1694         error_setg(errp, "Number of counters exceeds maximum available");
1695         return;
1696     }
1697 
1698     cpu_option_add_user_setting(name, value);
1699     cpu->cfg.pmu_mask = value;
1700 }
1701 
1702 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1703                              void *opaque, Error **errp)
1704 {
1705     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1706 
1707     visit_type_uint8(v, name, &pmu_mask, errp);
1708 }
1709 
1710 static const PropertyInfo prop_pmu_mask = {
1711     .name = "pmu-mask",
1712     .get = prop_pmu_mask_get,
1713     .set = prop_pmu_mask_set,
1714 };
1715 
1716 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1717                          void *opaque, Error **errp)
1718 {
1719     RISCVCPU *cpu = RISCV_CPU(obj);
1720     bool value;
1721 
1722     visit_type_bool(v, name, &value, errp);
1723 
1724     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1725         cpu_set_prop_err(cpu, "mmu", errp);
1726         return;
1727     }
1728 
1729     cpu_option_add_user_setting(name, value);
1730     cpu->cfg.mmu = value;
1731 }
1732 
1733 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1734                          void *opaque, Error **errp)
1735 {
1736     bool value = RISCV_CPU(obj)->cfg.mmu;
1737 
1738     visit_type_bool(v, name, &value, errp);
1739 }
1740 
1741 static const PropertyInfo prop_mmu = {
1742     .name = "mmu",
1743     .get = prop_mmu_get,
1744     .set = prop_mmu_set,
1745 };
1746 
1747 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1748                          void *opaque, Error **errp)
1749 {
1750     RISCVCPU *cpu = RISCV_CPU(obj);
1751     bool value;
1752 
1753     visit_type_bool(v, name, &value, errp);
1754 
1755     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1756         cpu_set_prop_err(cpu, name, errp);
1757         return;
1758     }
1759 
1760     cpu_option_add_user_setting(name, value);
1761     cpu->cfg.pmp = value;
1762 }
1763 
1764 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1765                          void *opaque, Error **errp)
1766 {
1767     bool value = RISCV_CPU(obj)->cfg.pmp;
1768 
1769     visit_type_bool(v, name, &value, errp);
1770 }
1771 
1772 static const PropertyInfo prop_pmp = {
1773     .name = "pmp",
1774     .get = prop_pmp_get,
1775     .set = prop_pmp_set,
1776 };
1777 
1778 static int priv_spec_from_str(const char *priv_spec_str)
1779 {
1780     int priv_version = -1;
1781 
1782     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1783         priv_version = PRIV_VERSION_1_13_0;
1784     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1785         priv_version = PRIV_VERSION_1_12_0;
1786     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1787         priv_version = PRIV_VERSION_1_11_0;
1788     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1789         priv_version = PRIV_VERSION_1_10_0;
1790     }
1791 
1792     return priv_version;
1793 }
1794 
1795 const char *priv_spec_to_str(int priv_version)
1796 {
1797     switch (priv_version) {
1798     case PRIV_VERSION_1_10_0:
1799         return PRIV_VER_1_10_0_STR;
1800     case PRIV_VERSION_1_11_0:
1801         return PRIV_VER_1_11_0_STR;
1802     case PRIV_VERSION_1_12_0:
1803         return PRIV_VER_1_12_0_STR;
1804     case PRIV_VERSION_1_13_0:
1805         return PRIV_VER_1_13_0_STR;
1806     default:
1807         return NULL;
1808     }
1809 }
1810 
1811 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1812                                void *opaque, Error **errp)
1813 {
1814     RISCVCPU *cpu = RISCV_CPU(obj);
1815     g_autofree char *value = NULL;
1816     int priv_version = -1;
1817 
1818     visit_type_str(v, name, &value, errp);
1819 
1820     priv_version = priv_spec_from_str(value);
1821     if (priv_version < 0) {
1822         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1823         return;
1824     }
1825 
1826     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1827         cpu_set_prop_err(cpu, name, errp);
1828         error_append_hint(errp, "Current '%s' val: %s\n", name,
1829                           object_property_get_str(obj, name, NULL));
1830         return;
1831     }
1832 
1833     cpu_option_add_user_setting(name, priv_version);
1834     cpu->env.priv_ver = priv_version;
1835 }
1836 
1837 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1838                                void *opaque, Error **errp)
1839 {
1840     RISCVCPU *cpu = RISCV_CPU(obj);
1841     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1842 
1843     visit_type_str(v, name, (char **)&value, errp);
1844 }
1845 
1846 static const PropertyInfo prop_priv_spec = {
1847     .name = "priv_spec",
1848     .get = prop_priv_spec_get,
1849     .set = prop_priv_spec_set,
1850 };
1851 
1852 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1853                                void *opaque, Error **errp)
1854 {
1855     RISCVCPU *cpu = RISCV_CPU(obj);
1856     g_autofree char *value = NULL;
1857 
1858     visit_type_str(v, name, &value, errp);
1859 
1860     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1861         error_setg(errp, "Unsupported vector spec version '%s'", value);
1862         return;
1863     }
1864 
1865     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1866     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1867 }
1868 
1869 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1870                                void *opaque, Error **errp)
1871 {
1872     const char *value = VEXT_VER_1_00_0_STR;
1873 
1874     visit_type_str(v, name, (char **)&value, errp);
1875 }
1876 
1877 static const PropertyInfo prop_vext_spec = {
1878     .name = "vext_spec",
1879     .get = prop_vext_spec_get,
1880     .set = prop_vext_spec_set,
1881 };
1882 
1883 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1884                          void *opaque, Error **errp)
1885 {
1886     RISCVCPU *cpu = RISCV_CPU(obj);
1887     uint16_t value;
1888 
1889     if (!visit_type_uint16(v, name, &value, errp)) {
1890         return;
1891     }
1892 
1893     if (!is_power_of_2(value)) {
1894         error_setg(errp, "Vector extension VLEN must be power of 2");
1895         return;
1896     }
1897 
1898     if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1899         cpu_set_prop_err(cpu, name, errp);
1900         error_append_hint(errp, "Current '%s' val: %u\n",
1901                           name, cpu->cfg.vlenb << 3);
1902         return;
1903     }
1904 
1905     cpu_option_add_user_setting(name, value);
1906     cpu->cfg.vlenb = value >> 3;
1907 }
1908 
1909 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1910                          void *opaque, Error **errp)
1911 {
1912     uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1913 
1914     visit_type_uint16(v, name, &value, errp);
1915 }
1916 
1917 static const PropertyInfo prop_vlen = {
1918     .name = "vlen",
1919     .get = prop_vlen_get,
1920     .set = prop_vlen_set,
1921 };
1922 
1923 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1924                          void *opaque, Error **errp)
1925 {
1926     RISCVCPU *cpu = RISCV_CPU(obj);
1927     uint16_t value;
1928 
1929     if (!visit_type_uint16(v, name, &value, errp)) {
1930         return;
1931     }
1932 
1933     if (!is_power_of_2(value)) {
1934         error_setg(errp, "Vector extension ELEN must be power of 2");
1935         return;
1936     }
1937 
1938     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1939         cpu_set_prop_err(cpu, name, errp);
1940         error_append_hint(errp, "Current '%s' val: %u\n",
1941                           name, cpu->cfg.elen);
1942         return;
1943     }
1944 
1945     cpu_option_add_user_setting(name, value);
1946     cpu->cfg.elen = value;
1947 }
1948 
1949 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1950                          void *opaque, Error **errp)
1951 {
1952     uint16_t value = RISCV_CPU(obj)->cfg.elen;
1953 
1954     visit_type_uint16(v, name, &value, errp);
1955 }
1956 
1957 static const PropertyInfo prop_elen = {
1958     .name = "elen",
1959     .get = prop_elen_get,
1960     .set = prop_elen_set,
1961 };
1962 
1963 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1964                                   void *opaque, Error **errp)
1965 {
1966     RISCVCPU *cpu = RISCV_CPU(obj);
1967     uint16_t value;
1968 
1969     if (!visit_type_uint16(v, name, &value, errp)) {
1970         return;
1971     }
1972 
1973     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1974         cpu_set_prop_err(cpu, name, errp);
1975         error_append_hint(errp, "Current '%s' val: %u\n",
1976                           name, cpu->cfg.cbom_blocksize);
1977         return;
1978     }
1979 
1980     cpu_option_add_user_setting(name, value);
1981     cpu->cfg.cbom_blocksize = value;
1982 }
1983 
1984 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1985                          void *opaque, Error **errp)
1986 {
1987     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1988 
1989     visit_type_uint16(v, name, &value, errp);
1990 }
1991 
1992 static const PropertyInfo prop_cbom_blksize = {
1993     .name = "cbom_blocksize",
1994     .get = prop_cbom_blksize_get,
1995     .set = prop_cbom_blksize_set,
1996 };
1997 
1998 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
1999                                   void *opaque, Error **errp)
2000 {
2001     RISCVCPU *cpu = RISCV_CPU(obj);
2002     uint16_t value;
2003 
2004     if (!visit_type_uint16(v, name, &value, errp)) {
2005         return;
2006     }
2007 
2008     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2009         cpu_set_prop_err(cpu, name, errp);
2010         error_append_hint(errp, "Current '%s' val: %u\n",
2011                           name, cpu->cfg.cbop_blocksize);
2012         return;
2013     }
2014 
2015     cpu_option_add_user_setting(name, value);
2016     cpu->cfg.cbop_blocksize = value;
2017 }
2018 
2019 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2020                          void *opaque, Error **errp)
2021 {
2022     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2023 
2024     visit_type_uint16(v, name, &value, errp);
2025 }
2026 
2027 static const PropertyInfo prop_cbop_blksize = {
2028     .name = "cbop_blocksize",
2029     .get = prop_cbop_blksize_get,
2030     .set = prop_cbop_blksize_set,
2031 };
2032 
2033 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2034                                   void *opaque, Error **errp)
2035 {
2036     RISCVCPU *cpu = RISCV_CPU(obj);
2037     uint16_t value;
2038 
2039     if (!visit_type_uint16(v, name, &value, errp)) {
2040         return;
2041     }
2042 
2043     if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2044         cpu_set_prop_err(cpu, name, errp);
2045         error_append_hint(errp, "Current '%s' val: %u\n",
2046                           name, cpu->cfg.cboz_blocksize);
2047         return;
2048     }
2049 
2050     cpu_option_add_user_setting(name, value);
2051     cpu->cfg.cboz_blocksize = value;
2052 }
2053 
2054 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2055                          void *opaque, Error **errp)
2056 {
2057     uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2058 
2059     visit_type_uint16(v, name, &value, errp);
2060 }
2061 
2062 static const PropertyInfo prop_cboz_blksize = {
2063     .name = "cboz_blocksize",
2064     .get = prop_cboz_blksize_get,
2065     .set = prop_cboz_blksize_set,
2066 };
2067 
2068 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2069                                void *opaque, Error **errp)
2070 {
2071     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2072     RISCVCPU *cpu = RISCV_CPU(obj);
2073     uint32_t prev_val = cpu->cfg.mvendorid;
2074     uint32_t value;
2075 
2076     if (!visit_type_uint32(v, name, &value, errp)) {
2077         return;
2078     }
2079 
2080     if (!dynamic_cpu && prev_val != value) {
2081         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2082                    object_get_typename(obj), prev_val);
2083         return;
2084     }
2085 
2086     cpu->cfg.mvendorid = value;
2087 }
2088 
2089 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2090                                void *opaque, Error **errp)
2091 {
2092     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2093 
2094     visit_type_uint32(v, name, &value, errp);
2095 }
2096 
2097 static const PropertyInfo prop_mvendorid = {
2098     .name = "mvendorid",
2099     .get = prop_mvendorid_get,
2100     .set = prop_mvendorid_set,
2101 };
2102 
2103 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2104                             void *opaque, Error **errp)
2105 {
2106     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2107     RISCVCPU *cpu = RISCV_CPU(obj);
2108     uint64_t prev_val = cpu->cfg.mimpid;
2109     uint64_t value;
2110 
2111     if (!visit_type_uint64(v, name, &value, errp)) {
2112         return;
2113     }
2114 
2115     if (!dynamic_cpu && prev_val != value) {
2116         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2117                    object_get_typename(obj), prev_val);
2118         return;
2119     }
2120 
2121     cpu->cfg.mimpid = value;
2122 }
2123 
2124 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2125                             void *opaque, Error **errp)
2126 {
2127     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2128 
2129     visit_type_uint64(v, name, &value, errp);
2130 }
2131 
2132 static const PropertyInfo prop_mimpid = {
2133     .name = "mimpid",
2134     .get = prop_mimpid_get,
2135     .set = prop_mimpid_set,
2136 };
2137 
2138 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2139                              void *opaque, Error **errp)
2140 {
2141     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2142     RISCVCPU *cpu = RISCV_CPU(obj);
2143     uint64_t prev_val = cpu->cfg.marchid;
2144     uint64_t value, invalid_val;
2145     uint32_t mxlen = 0;
2146 
2147     if (!visit_type_uint64(v, name, &value, errp)) {
2148         return;
2149     }
2150 
2151     if (!dynamic_cpu && prev_val != value) {
2152         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2153                    object_get_typename(obj), prev_val);
2154         return;
2155     }
2156 
2157     switch (riscv_cpu_mxl(&cpu->env)) {
2158     case MXL_RV32:
2159         mxlen = 32;
2160         break;
2161     case MXL_RV64:
2162     case MXL_RV128:
2163         mxlen = 64;
2164         break;
2165     default:
2166         g_assert_not_reached();
2167     }
2168 
2169     invalid_val = 1LL << (mxlen - 1);
2170 
2171     if (value == invalid_val) {
2172         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2173                          "and the remaining bits zero", mxlen);
2174         return;
2175     }
2176 
2177     cpu->cfg.marchid = value;
2178 }
2179 
2180 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2181                              void *opaque, Error **errp)
2182 {
2183     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2184 
2185     visit_type_uint64(v, name, &value, errp);
2186 }
2187 
2188 static const PropertyInfo prop_marchid = {
2189     .name = "marchid",
2190     .get = prop_marchid_get,
2191     .set = prop_marchid_set,
2192 };
2193 
2194 /*
2195  * RVA22U64 defines some 'named features' that are cache
2196  * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2197  * and Zicclsm. They are always implemented in TCG and
2198  * doesn't need to be manually enabled by the profile.
2199  */
2200 static RISCVCPUProfile RVA22U64 = {
2201     .parent = NULL,
2202     .name = "rva22u64",
2203     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2204     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2205     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2206     .ext_offsets = {
2207         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2208         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2209         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2210         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2211         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2212         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2213 
2214         /* mandatory named features for this profile */
2215         CPU_CFG_OFFSET(ext_zic64b),
2216 
2217         RISCV_PROFILE_EXT_LIST_END
2218     }
2219 };
2220 
2221 /*
2222  * As with RVA22U64, RVA22S64 also defines 'named features'.
2223  *
2224  * Cache related features that we consider enabled since we don't
2225  * implement cache: Ssccptr
2226  *
2227  * Other named features that we already implement: Sstvecd, Sstvala,
2228  * Sscounterenw
2229  *
2230  * The remaining features/extensions comes from RVA22U64.
2231  */
2232 static RISCVCPUProfile RVA22S64 = {
2233     .parent = &RVA22U64,
2234     .name = "rva22s64",
2235     .misa_ext = RVS,
2236     .priv_spec = PRIV_VERSION_1_12_0,
2237     .satp_mode = VM_1_10_SV39,
2238     .ext_offsets = {
2239         /* rva22s64 exts */
2240         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2241         CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2242 
2243         RISCV_PROFILE_EXT_LIST_END
2244     }
2245 };
2246 
2247 RISCVCPUProfile *riscv_profiles[] = {
2248     &RVA22U64,
2249     &RVA22S64,
2250     NULL,
2251 };
2252 
2253 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2254     .is_misa = true,
2255     .ext = RVA,
2256     .implied_multi_exts = {
2257         CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2258 
2259         RISCV_IMPLIED_EXTS_RULE_END
2260     },
2261 };
2262 
2263 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2264     .is_misa = true,
2265     .ext = RVD,
2266     .implied_misa_exts = RVF,
2267     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2268 };
2269 
2270 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2271     .is_misa = true,
2272     .ext = RVF,
2273     .implied_multi_exts = {
2274         CPU_CFG_OFFSET(ext_zicsr),
2275 
2276         RISCV_IMPLIED_EXTS_RULE_END
2277     },
2278 };
2279 
2280 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2281     .is_misa = true,
2282     .ext = RVM,
2283     .implied_multi_exts = {
2284         CPU_CFG_OFFSET(ext_zmmul),
2285 
2286         RISCV_IMPLIED_EXTS_RULE_END
2287     },
2288 };
2289 
2290 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2291     .is_misa = true,
2292     .ext = RVV,
2293     .implied_multi_exts = {
2294         CPU_CFG_OFFSET(ext_zve64d),
2295 
2296         RISCV_IMPLIED_EXTS_RULE_END
2297     },
2298 };
2299 
2300 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2301     .ext = CPU_CFG_OFFSET(ext_zcb),
2302     .implied_multi_exts = {
2303         CPU_CFG_OFFSET(ext_zca),
2304 
2305         RISCV_IMPLIED_EXTS_RULE_END
2306     },
2307 };
2308 
2309 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2310     .ext = CPU_CFG_OFFSET(ext_zcd),
2311     .implied_misa_exts = RVD,
2312     .implied_multi_exts = {
2313         CPU_CFG_OFFSET(ext_zca),
2314 
2315         RISCV_IMPLIED_EXTS_RULE_END
2316     },
2317 };
2318 
2319 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2320     .ext = CPU_CFG_OFFSET(ext_zce),
2321     .implied_multi_exts = {
2322         CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2323         CPU_CFG_OFFSET(ext_zcmt),
2324 
2325         RISCV_IMPLIED_EXTS_RULE_END
2326     },
2327 };
2328 
2329 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2330     .ext = CPU_CFG_OFFSET(ext_zcf),
2331     .implied_misa_exts = RVF,
2332     .implied_multi_exts = {
2333         CPU_CFG_OFFSET(ext_zca),
2334 
2335         RISCV_IMPLIED_EXTS_RULE_END
2336     },
2337 };
2338 
2339 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2340     .ext = CPU_CFG_OFFSET(ext_zcmp),
2341     .implied_multi_exts = {
2342         CPU_CFG_OFFSET(ext_zca),
2343 
2344         RISCV_IMPLIED_EXTS_RULE_END
2345     },
2346 };
2347 
2348 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2349     .ext = CPU_CFG_OFFSET(ext_zcmt),
2350     .implied_multi_exts = {
2351         CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2352 
2353         RISCV_IMPLIED_EXTS_RULE_END
2354     },
2355 };
2356 
2357 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2358     .ext = CPU_CFG_OFFSET(ext_zdinx),
2359     .implied_multi_exts = {
2360         CPU_CFG_OFFSET(ext_zfinx),
2361 
2362         RISCV_IMPLIED_EXTS_RULE_END
2363     },
2364 };
2365 
2366 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2367     .ext = CPU_CFG_OFFSET(ext_zfa),
2368     .implied_misa_exts = RVF,
2369     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2370 };
2371 
2372 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2373     .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2374     .implied_misa_exts = RVF,
2375     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2376 };
2377 
2378 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2379     .ext = CPU_CFG_OFFSET(ext_zfh),
2380     .implied_multi_exts = {
2381         CPU_CFG_OFFSET(ext_zfhmin),
2382 
2383         RISCV_IMPLIED_EXTS_RULE_END
2384     },
2385 };
2386 
2387 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2388     .ext = CPU_CFG_OFFSET(ext_zfhmin),
2389     .implied_misa_exts = RVF,
2390     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2391 };
2392 
2393 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2394     .ext = CPU_CFG_OFFSET(ext_zfinx),
2395     .implied_multi_exts = {
2396         CPU_CFG_OFFSET(ext_zicsr),
2397 
2398         RISCV_IMPLIED_EXTS_RULE_END
2399     },
2400 };
2401 
2402 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2403     .ext = CPU_CFG_OFFSET(ext_zhinx),
2404     .implied_multi_exts = {
2405         CPU_CFG_OFFSET(ext_zhinxmin),
2406 
2407         RISCV_IMPLIED_EXTS_RULE_END
2408     },
2409 };
2410 
2411 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2412     .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2413     .implied_multi_exts = {
2414         CPU_CFG_OFFSET(ext_zfinx),
2415 
2416         RISCV_IMPLIED_EXTS_RULE_END
2417     },
2418 };
2419 
2420 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2421     .ext = CPU_CFG_OFFSET(ext_zicntr),
2422     .implied_multi_exts = {
2423         CPU_CFG_OFFSET(ext_zicsr),
2424 
2425         RISCV_IMPLIED_EXTS_RULE_END
2426     },
2427 };
2428 
2429 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2430     .ext = CPU_CFG_OFFSET(ext_zihpm),
2431     .implied_multi_exts = {
2432         CPU_CFG_OFFSET(ext_zicsr),
2433 
2434         RISCV_IMPLIED_EXTS_RULE_END
2435     },
2436 };
2437 
2438 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2439     .ext = CPU_CFG_OFFSET(ext_zk),
2440     .implied_multi_exts = {
2441         CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2442         CPU_CFG_OFFSET(ext_zkt),
2443 
2444         RISCV_IMPLIED_EXTS_RULE_END
2445     },
2446 };
2447 
2448 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2449     .ext = CPU_CFG_OFFSET(ext_zkn),
2450     .implied_multi_exts = {
2451         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2452         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2453         CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2454 
2455         RISCV_IMPLIED_EXTS_RULE_END
2456     },
2457 };
2458 
2459 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2460     .ext = CPU_CFG_OFFSET(ext_zks),
2461     .implied_multi_exts = {
2462         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2463         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2464         CPU_CFG_OFFSET(ext_zksh),
2465 
2466         RISCV_IMPLIED_EXTS_RULE_END
2467     },
2468 };
2469 
2470 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2471     .ext = CPU_CFG_OFFSET(ext_zvbb),
2472     .implied_multi_exts = {
2473         CPU_CFG_OFFSET(ext_zvkb),
2474 
2475         RISCV_IMPLIED_EXTS_RULE_END
2476     },
2477 };
2478 
2479 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2480     .ext = CPU_CFG_OFFSET(ext_zve32f),
2481     .implied_misa_exts = RVF,
2482     .implied_multi_exts = {
2483         CPU_CFG_OFFSET(ext_zve32x),
2484 
2485         RISCV_IMPLIED_EXTS_RULE_END
2486     },
2487 };
2488 
2489 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2490     .ext = CPU_CFG_OFFSET(ext_zve32x),
2491     .implied_multi_exts = {
2492         CPU_CFG_OFFSET(ext_zicsr),
2493 
2494         RISCV_IMPLIED_EXTS_RULE_END
2495     },
2496 };
2497 
2498 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2499     .ext = CPU_CFG_OFFSET(ext_zve64d),
2500     .implied_misa_exts = RVD,
2501     .implied_multi_exts = {
2502         CPU_CFG_OFFSET(ext_zve64f),
2503 
2504         RISCV_IMPLIED_EXTS_RULE_END
2505     },
2506 };
2507 
2508 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2509     .ext = CPU_CFG_OFFSET(ext_zve64f),
2510     .implied_misa_exts = RVF,
2511     .implied_multi_exts = {
2512         CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2513 
2514         RISCV_IMPLIED_EXTS_RULE_END
2515     },
2516 };
2517 
2518 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2519     .ext = CPU_CFG_OFFSET(ext_zve64x),
2520     .implied_multi_exts = {
2521         CPU_CFG_OFFSET(ext_zve32x),
2522 
2523         RISCV_IMPLIED_EXTS_RULE_END
2524     },
2525 };
2526 
2527 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2528     .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2529     .implied_multi_exts = {
2530         CPU_CFG_OFFSET(ext_zve32f),
2531 
2532         RISCV_IMPLIED_EXTS_RULE_END
2533     },
2534 };
2535 
2536 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2537     .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2538     .implied_multi_exts = {
2539         CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2540 
2541         RISCV_IMPLIED_EXTS_RULE_END
2542     },
2543 };
2544 
2545 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2546     .ext = CPU_CFG_OFFSET(ext_zvfh),
2547     .implied_multi_exts = {
2548         CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2549 
2550         RISCV_IMPLIED_EXTS_RULE_END
2551     },
2552 };
2553 
2554 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2555     .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2556     .implied_multi_exts = {
2557         CPU_CFG_OFFSET(ext_zve32f),
2558 
2559         RISCV_IMPLIED_EXTS_RULE_END
2560     },
2561 };
2562 
2563 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2564     .ext = CPU_CFG_OFFSET(ext_zvkn),
2565     .implied_multi_exts = {
2566         CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2567         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2568 
2569         RISCV_IMPLIED_EXTS_RULE_END
2570     },
2571 };
2572 
2573 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2574     .ext = CPU_CFG_OFFSET(ext_zvknc),
2575     .implied_multi_exts = {
2576         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2577 
2578         RISCV_IMPLIED_EXTS_RULE_END
2579     },
2580 };
2581 
2582 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2583     .ext = CPU_CFG_OFFSET(ext_zvkng),
2584     .implied_multi_exts = {
2585         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2586 
2587         RISCV_IMPLIED_EXTS_RULE_END
2588     },
2589 };
2590 
2591 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2592     .ext = CPU_CFG_OFFSET(ext_zvknhb),
2593     .implied_multi_exts = {
2594         CPU_CFG_OFFSET(ext_zve64x),
2595 
2596         RISCV_IMPLIED_EXTS_RULE_END
2597     },
2598 };
2599 
2600 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2601     .ext = CPU_CFG_OFFSET(ext_zvks),
2602     .implied_multi_exts = {
2603         CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2604         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2605 
2606         RISCV_IMPLIED_EXTS_RULE_END
2607     },
2608 };
2609 
2610 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2611     .ext = CPU_CFG_OFFSET(ext_zvksc),
2612     .implied_multi_exts = {
2613         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2614 
2615         RISCV_IMPLIED_EXTS_RULE_END
2616     },
2617 };
2618 
2619 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2620     .ext = CPU_CFG_OFFSET(ext_zvksg),
2621     .implied_multi_exts = {
2622         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2623 
2624         RISCV_IMPLIED_EXTS_RULE_END
2625     },
2626 };
2627 
2628 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2629     &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2630     &RVM_IMPLIED, &RVV_IMPLIED, NULL
2631 };
2632 
2633 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2634     &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2635     &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2636     &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2637     &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2638     &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2639     &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2640     &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2641     &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2642     &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2643     &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2644     &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2645     &ZVKS_IMPLIED,  &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2646     NULL
2647 };
2648 
2649 static Property riscv_cpu_properties[] = {
2650     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2651 
2652     {.name = "pmu-mask", .info = &prop_pmu_mask},
2653     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2654 
2655     {.name = "mmu", .info = &prop_mmu},
2656     {.name = "pmp", .info = &prop_pmp},
2657 
2658     {.name = "priv_spec", .info = &prop_priv_spec},
2659     {.name = "vext_spec", .info = &prop_vext_spec},
2660 
2661     {.name = "vlen", .info = &prop_vlen},
2662     {.name = "elen", .info = &prop_elen},
2663 
2664     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2665     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2666     {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2667 
2668      {.name = "mvendorid", .info = &prop_mvendorid},
2669      {.name = "mimpid", .info = &prop_mimpid},
2670      {.name = "marchid", .info = &prop_marchid},
2671 
2672 #ifndef CONFIG_USER_ONLY
2673     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2674 #endif
2675 
2676     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2677 
2678     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2679     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2680     DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false),
2681 
2682     /*
2683      * write_misa() is marked as experimental for now so mark
2684      * it with -x and default to 'false'.
2685      */
2686     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2687     DEFINE_PROP_END_OF_LIST(),
2688 };
2689 
2690 #if defined(TARGET_RISCV64)
2691 static void rva22u64_profile_cpu_init(Object *obj)
2692 {
2693     rv64i_bare_cpu_init(obj);
2694 
2695     RVA22U64.enabled = true;
2696 }
2697 
2698 static void rva22s64_profile_cpu_init(Object *obj)
2699 {
2700     rv64i_bare_cpu_init(obj);
2701 
2702     RVA22S64.enabled = true;
2703 }
2704 #endif
2705 
2706 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2707 {
2708     RISCVCPU *cpu = RISCV_CPU(cs);
2709     CPURISCVState *env = &cpu->env;
2710 
2711     switch (riscv_cpu_mxl(env)) {
2712     case MXL_RV32:
2713         return "riscv:rv32";
2714     case MXL_RV64:
2715     case MXL_RV128:
2716         return "riscv:rv64";
2717     default:
2718         g_assert_not_reached();
2719     }
2720 }
2721 
2722 #ifndef CONFIG_USER_ONLY
2723 static int64_t riscv_get_arch_id(CPUState *cs)
2724 {
2725     RISCVCPU *cpu = RISCV_CPU(cs);
2726 
2727     return cpu->env.mhartid;
2728 }
2729 
2730 #include "hw/core/sysemu-cpu-ops.h"
2731 
2732 static const struct SysemuCPUOps riscv_sysemu_ops = {
2733     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2734     .write_elf64_note = riscv_cpu_write_elf64_note,
2735     .write_elf32_note = riscv_cpu_write_elf32_note,
2736     .legacy_vmsd = &vmstate_riscv_cpu,
2737 };
2738 #endif
2739 
2740 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2741 {
2742     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2743     CPUClass *cc = CPU_CLASS(c);
2744     DeviceClass *dc = DEVICE_CLASS(c);
2745     ResettableClass *rc = RESETTABLE_CLASS(c);
2746 
2747     device_class_set_parent_realize(dc, riscv_cpu_realize,
2748                                     &mcc->parent_realize);
2749 
2750     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2751                                        &mcc->parent_phases);
2752 
2753     cc->class_by_name = riscv_cpu_class_by_name;
2754     cc->has_work = riscv_cpu_has_work;
2755     cc->mmu_index = riscv_cpu_mmu_index;
2756     cc->dump_state = riscv_cpu_dump_state;
2757     cc->set_pc = riscv_cpu_set_pc;
2758     cc->get_pc = riscv_cpu_get_pc;
2759     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2760     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2761     cc->gdb_stop_before_watchpoint = true;
2762     cc->disas_set_info = riscv_cpu_disas_set_info;
2763 #ifndef CONFIG_USER_ONLY
2764     cc->sysemu_ops = &riscv_sysemu_ops;
2765     cc->get_arch_id = riscv_get_arch_id;
2766 #endif
2767     cc->gdb_arch_name = riscv_gdb_arch_name;
2768 
2769     device_class_set_props(dc, riscv_cpu_properties);
2770 }
2771 
2772 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2773 {
2774     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2775 
2776     mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2777     riscv_cpu_validate_misa_mxl(mcc);
2778 }
2779 
2780 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2781                                  int max_str_len)
2782 {
2783     const RISCVIsaExtData *edata;
2784     char *old = *isa_str;
2785     char *new = *isa_str;
2786 
2787     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2788         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2789             new = g_strconcat(old, "_", edata->name, NULL);
2790             g_free(old);
2791             old = new;
2792         }
2793     }
2794 
2795     *isa_str = new;
2796 }
2797 
2798 char *riscv_isa_string(RISCVCPU *cpu)
2799 {
2800     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2801     int i;
2802     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2803     char *isa_str = g_new(char, maxlen);
2804     int xlen = riscv_cpu_max_xlen(mcc);
2805     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2806 
2807     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2808         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2809             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2810         }
2811     }
2812     *p = '\0';
2813     if (!cpu->cfg.short_isa_string) {
2814         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2815     }
2816     return isa_str;
2817 }
2818 
2819 #ifndef CONFIG_USER_ONLY
2820 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2821 {
2822     int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2823     char **extensions = g_new(char *, maxlen);
2824 
2825     for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2826         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2827             extensions[*count] = g_new(char, 2);
2828             snprintf(extensions[*count], 2, "%c",
2829                      qemu_tolower(riscv_single_letter_exts[i]));
2830             (*count)++;
2831         }
2832     }
2833 
2834     for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2835         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2836             extensions[*count] = g_strdup(edata->name);
2837             (*count)++;
2838         }
2839     }
2840 
2841     return extensions;
2842 }
2843 
2844 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2845 {
2846     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2847     const size_t maxlen = sizeof("rv128i");
2848     g_autofree char *isa_base = g_new(char, maxlen);
2849     g_autofree char *riscv_isa;
2850     char **isa_extensions;
2851     int count = 0;
2852     int xlen = riscv_cpu_max_xlen(mcc);
2853 
2854     riscv_isa = riscv_isa_string(cpu);
2855     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2856 
2857     snprintf(isa_base, maxlen, "rv%di", xlen);
2858     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2859 
2860     isa_extensions = riscv_isa_extensions_list(cpu, &count);
2861     qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2862                                   isa_extensions, count);
2863 
2864     for (int i = 0; i < count; i++) {
2865         g_free(isa_extensions[i]);
2866     }
2867 
2868     g_free(isa_extensions);
2869 }
2870 #endif
2871 
2872 #define DEFINE_CPU(type_name, misa_mxl_max, initfn)         \
2873     {                                                       \
2874         .name = (type_name),                                \
2875         .parent = TYPE_RISCV_CPU,                           \
2876         .instance_init = (initfn),                          \
2877         .class_init = riscv_cpu_class_init,                 \
2878         .class_data = (void *)(misa_mxl_max)                \
2879     }
2880 
2881 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2882     {                                                       \
2883         .name = (type_name),                                \
2884         .parent = TYPE_RISCV_DYNAMIC_CPU,                   \
2885         .instance_init = (initfn),                          \
2886         .class_init = riscv_cpu_class_init,                 \
2887         .class_data = (void *)(misa_mxl_max)                \
2888     }
2889 
2890 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn)  \
2891     {                                                       \
2892         .name = (type_name),                                \
2893         .parent = TYPE_RISCV_VENDOR_CPU,                    \
2894         .instance_init = (initfn),                          \
2895         .class_init = riscv_cpu_class_init,                 \
2896         .class_data = (void *)(misa_mxl_max)                \
2897     }
2898 
2899 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn)    \
2900     {                                                       \
2901         .name = (type_name),                                \
2902         .parent = TYPE_RISCV_BARE_CPU,                      \
2903         .instance_init = (initfn),                          \
2904         .class_init = riscv_cpu_class_init,                 \
2905         .class_data = (void *)(misa_mxl_max)                \
2906     }
2907 
2908 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2909     {                                                       \
2910         .name = (type_name),                                \
2911         .parent = TYPE_RISCV_BARE_CPU,                      \
2912         .instance_init = (initfn),                          \
2913         .class_init = riscv_cpu_class_init,                 \
2914         .class_data = (void *)(misa_mxl_max)                \
2915     }
2916 
2917 static const TypeInfo riscv_cpu_type_infos[] = {
2918     {
2919         .name = TYPE_RISCV_CPU,
2920         .parent = TYPE_CPU,
2921         .instance_size = sizeof(RISCVCPU),
2922         .instance_align = __alignof(RISCVCPU),
2923         .instance_init = riscv_cpu_init,
2924         .instance_post_init = riscv_cpu_post_init,
2925         .abstract = true,
2926         .class_size = sizeof(RISCVCPUClass),
2927         .class_init = riscv_cpu_common_class_init,
2928     },
2929     {
2930         .name = TYPE_RISCV_DYNAMIC_CPU,
2931         .parent = TYPE_RISCV_CPU,
2932         .abstract = true,
2933     },
2934     {
2935         .name = TYPE_RISCV_VENDOR_CPU,
2936         .parent = TYPE_RISCV_CPU,
2937         .abstract = true,
2938     },
2939     {
2940         .name = TYPE_RISCV_BARE_CPU,
2941         .parent = TYPE_RISCV_CPU,
2942         .instance_init = riscv_bare_cpu_init,
2943         .abstract = true,
2944     },
2945 #if defined(TARGET_RISCV32)
2946     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV32,  riscv_max_cpu_init),
2947 #elif defined(TARGET_RISCV64)
2948     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV64,  riscv_max_cpu_init),
2949 #endif
2950 
2951 #if defined(TARGET_RISCV32) || \
2952     (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
2953     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,    MXL_RV32,  rv32_base_cpu_init),
2954     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,       MXL_RV32,  rv32_ibex_cpu_init),
2955     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32,  rv32_sifive_e_cpu_init),
2956     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32,  rv32_imafcu_nommu_cpu_init),
2957     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32,  rv32_sifive_u_cpu_init),
2958     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I,        MXL_RV32,  rv32i_bare_cpu_init),
2959     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E,        MXL_RV32,  rv32e_bare_cpu_init),
2960 #endif
2961 
2962 #if defined(TARGET_RISCV64)
2963     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,    MXL_RV64,  rv64_base_cpu_init),
2964     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64,  rv64_sifive_e_cpu_init),
2965     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64,  rv64_sifive_u_cpu_init),
2966     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,   MXL_RV64,  rv64_sifive_u_cpu_init),
2967     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64,  rv64_thead_c906_cpu_init),
2968     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,  MXL_RV64,  rv64_veyron_v1_cpu_init),
2969 #ifdef CONFIG_TCG
2970     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,   MXL_RV128, rv128_base_cpu_init),
2971 #endif /* CONFIG_TCG */
2972     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I,        MXL_RV64,  rv64i_bare_cpu_init),
2973     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E,        MXL_RV64,  rv64e_bare_cpu_init),
2974     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64,  MXL_RV64,  rva22u64_profile_cpu_init),
2975     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64,  MXL_RV64,  rva22s64_profile_cpu_init),
2976 #endif /* TARGET_RISCV64 */
2977 };
2978 
2979 DEFINE_TYPES(riscv_cpu_type_infos)
2980