xref: /qemu/target/riscv/cpu.c (revision e4a8e093dc74be049f4829831dce76e5edab0003)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "system/device_tree.h"
36 #include "system/kvm.h"
37 #include "system/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41 
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45                               RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46 
47 /*
48  * From vector_helper.c
49  * Note that vector data is stored in host-endian 64-bit chunks,
50  * so addressing bytes needs a host-endian fixup.
51  */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x)   ((x) ^ 7)
54 #else
55 #define BYTE(x)   (x)
56 #endif
57 
58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60     return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62 
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65 
66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68     g_hash_table_insert(general_user_opts, (gpointer)optname,
69                         GUINT_TO_POINTER(value));
70 }
71 
72 bool riscv_cpu_option_set(const char *optname)
73 {
74     return g_hash_table_contains(general_user_opts, optname);
75 }
76 
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78     {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79 
80 /*
81  * Here are the ordering rules of extension naming defined by RISC-V
82  * specification :
83  * 1. All extensions should be separated from other multi-letter extensions
84  *    by an underscore.
85  * 2. The first letter following the 'Z' conventionally indicates the most
86  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87  *    If multiple 'Z' extensions are named, they should be ordered first
88  *    by category, then alphabetically within a category.
89  * 3. Standard supervisor-level extensions (starts with 'S') should be
90  *    listed after standard unprivileged extensions.  If multiple
91  *    supervisor-level extensions are listed, they should be ordered
92  *    alphabetically.
93  * 4. Non-standard extensions (starts with 'X') must be listed after all
94  *    standard extensions. They must be separated from other multi-letter
95  *    extensions by an underscore.
96  *
97  * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98  * instead.
99  */
100 const RISCVIsaExtData isa_edata_arr[] = {
101     ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102     ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103     ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104     ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105     ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106     ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107     ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108     ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109     ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
110     ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
111     ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
112     ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
113     ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
114     ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
115     ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
116     ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
117     ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
118     ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
119     ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
120     ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12),
121     ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
122     ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
123     ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
124     ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
125     ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
126     ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
127     ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
128     ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
129     ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
130     ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
131     ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
132     ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
133     ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
134     ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
135     ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
136     ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
137     ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
138     ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
139     ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
140     ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
141     ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
142     ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
143     ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
144     ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
145     ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
146     ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
147     ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
148     ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
149     ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
150     ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
151     ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
152     ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
153     ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
154     ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
155     ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
156     ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
157     ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
158     ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
159     ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
160     ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
161     ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
162     ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
163     ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
164     ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
165     ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
166     ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
167     ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
168     ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
169     ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
170     ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
171     ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
172     ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
173     ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
174     ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
175     ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
176     ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
177     ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
178     ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
179     ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
180     ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
181     ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
182     ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
183     ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
184     ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
185     ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
186     ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
187     ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
188     ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
189     ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
190     ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
191     ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
192     ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
193     ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
194     ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
195     ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
196     ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
197     ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
198     ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
199     ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
200     ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
201     ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
202     ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
203     ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte),
204     ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc),
205     ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
206     ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
207     ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
208     ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
209     ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
210     ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
211     ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
212     ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
213     ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
214     ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
215     ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
216     ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
217 
218     { },
219 };
220 
221 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
222 {
223     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
224 
225     return *ext_enabled;
226 }
227 
228 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
229 {
230     bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
231 
232     *ext_enabled = en;
233 }
234 
235 bool riscv_cpu_is_vendor(Object *cpu_obj)
236 {
237     return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
238 }
239 
240 const char * const riscv_int_regnames[] = {
241     "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
242     "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
243     "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
244     "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
245     "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
246 };
247 
248 const char * const riscv_int_regnamesh[] = {
249     "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
250     "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
251     "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
252     "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
253     "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
254     "x30h/t5h",  "x31h/t6h"
255 };
256 
257 const char * const riscv_fpr_regnames[] = {
258     "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
259     "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
260     "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
261     "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
262     "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
263     "f30/ft10", "f31/ft11"
264 };
265 
266 const char * const riscv_rvv_regnames[] = {
267   "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",
268   "v7",  "v8",  "v9",  "v10", "v11", "v12", "v13",
269   "v14", "v15", "v16", "v17", "v18", "v19", "v20",
270   "v21", "v22", "v23", "v24", "v25", "v26", "v27",
271   "v28", "v29", "v30", "v31"
272 };
273 
274 static const char * const riscv_excp_names[] = {
275     "misaligned_fetch",
276     "fault_fetch",
277     "illegal_instruction",
278     "breakpoint",
279     "misaligned_load",
280     "fault_load",
281     "misaligned_store",
282     "fault_store",
283     "user_ecall",
284     "supervisor_ecall",
285     "hypervisor_ecall",
286     "machine_ecall",
287     "exec_page_fault",
288     "load_page_fault",
289     "reserved",
290     "store_page_fault",
291     "reserved",
292     "reserved",
293     "reserved",
294     "reserved",
295     "guest_exec_page_fault",
296     "guest_load_page_fault",
297     "reserved",
298     "guest_store_page_fault",
299 };
300 
301 static const char * const riscv_intr_names[] = {
302     "u_software",
303     "s_software",
304     "vs_software",
305     "m_software",
306     "u_timer",
307     "s_timer",
308     "vs_timer",
309     "m_timer",
310     "u_external",
311     "s_external",
312     "vs_external",
313     "m_external",
314     "reserved",
315     "reserved",
316     "reserved",
317     "reserved"
318 };
319 
320 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
321 {
322     if (async) {
323         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
324                riscv_intr_names[cause] : "(unknown)";
325     } else {
326         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
327                riscv_excp_names[cause] : "(unknown)";
328     }
329 }
330 
331 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
332 {
333     env->misa_ext_mask = env->misa_ext = ext;
334 }
335 
336 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
337 {
338     return 16 << mcc->misa_mxl_max;
339 }
340 
341 #ifndef CONFIG_USER_ONLY
342 static uint8_t satp_mode_from_str(const char *satp_mode_str)
343 {
344     if (!strncmp(satp_mode_str, "mbare", 5)) {
345         return VM_1_10_MBARE;
346     }
347 
348     if (!strncmp(satp_mode_str, "sv32", 4)) {
349         return VM_1_10_SV32;
350     }
351 
352     if (!strncmp(satp_mode_str, "sv39", 4)) {
353         return VM_1_10_SV39;
354     }
355 
356     if (!strncmp(satp_mode_str, "sv48", 4)) {
357         return VM_1_10_SV48;
358     }
359 
360     if (!strncmp(satp_mode_str, "sv57", 4)) {
361         return VM_1_10_SV57;
362     }
363 
364     if (!strncmp(satp_mode_str, "sv64", 4)) {
365         return VM_1_10_SV64;
366     }
367 
368     g_assert_not_reached();
369 }
370 
371 uint8_t satp_mode_max_from_map(uint32_t map)
372 {
373     /*
374      * 'map = 0' will make us return (31 - 32), which C will
375      * happily overflow to UINT_MAX. There's no good result to
376      * return if 'map = 0' (e.g. returning 0 will be ambiguous
377      * with the result for 'map = 1').
378      *
379      * Assert out if map = 0. Callers will have to deal with
380      * it outside of this function.
381      */
382     g_assert(map > 0);
383 
384     /* map here has at least one bit set, so no problem with clz */
385     return 31 - __builtin_clz(map);
386 }
387 
388 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
389 {
390     if (is_32_bit) {
391         switch (satp_mode) {
392         case VM_1_10_SV32:
393             return "sv32";
394         case VM_1_10_MBARE:
395             return "none";
396         }
397     } else {
398         switch (satp_mode) {
399         case VM_1_10_SV64:
400             return "sv64";
401         case VM_1_10_SV57:
402             return "sv57";
403         case VM_1_10_SV48:
404             return "sv48";
405         case VM_1_10_SV39:
406             return "sv39";
407         case VM_1_10_MBARE:
408             return "none";
409         }
410     }
411 
412     g_assert_not_reached();
413 }
414 
415 static void set_satp_mode_max_supported(RISCVCPU *cpu,
416                                         uint8_t satp_mode)
417 {
418     bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
419     const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
420 
421     for (int i = 0; i <= satp_mode; ++i) {
422         if (valid_vm[i]) {
423             cpu->cfg.satp_mode.supported |= (1 << i);
424         }
425     }
426 }
427 
428 /* Set the satp mode to the max supported */
429 static void set_satp_mode_default_map(RISCVCPU *cpu)
430 {
431     /*
432      * Bare CPUs do not default to the max available.
433      * Users must set a valid satp_mode in the command
434      * line.
435      */
436     if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
437         warn_report("No satp mode set. Defaulting to 'bare'");
438         cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
439         return;
440     }
441 
442     cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
443 }
444 #endif
445 
446 static void riscv_max_cpu_init(Object *obj)
447 {
448     RISCVCPU *cpu = RISCV_CPU(obj);
449     CPURISCVState *env = &cpu->env;
450 
451     cpu->cfg.mmu = true;
452     cpu->cfg.pmp = true;
453 
454     env->priv_ver = PRIV_VERSION_LATEST;
455 #ifndef CONFIG_USER_ONLY
456     set_satp_mode_max_supported(RISCV_CPU(obj),
457         riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
458         VM_1_10_SV32 : VM_1_10_SV57);
459 #endif
460 }
461 
462 #if defined(TARGET_RISCV64)
463 static void rv64_base_cpu_init(Object *obj)
464 {
465     RISCVCPU *cpu = RISCV_CPU(obj);
466     CPURISCVState *env = &cpu->env;
467 
468     cpu->cfg.mmu = true;
469     cpu->cfg.pmp = true;
470 
471     /* Set latest version of privileged specification */
472     env->priv_ver = PRIV_VERSION_LATEST;
473 #ifndef CONFIG_USER_ONLY
474     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
475 #endif
476 }
477 
478 static void rv64_sifive_u_cpu_init(Object *obj)
479 {
480     RISCVCPU *cpu = RISCV_CPU(obj);
481     CPURISCVState *env = &cpu->env;
482     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
483     env->priv_ver = PRIV_VERSION_1_10_0;
484 #ifndef CONFIG_USER_ONLY
485     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
486 #endif
487 
488     /* inherited from parent obj via riscv_cpu_init() */
489     cpu->cfg.ext_zifencei = true;
490     cpu->cfg.ext_zicsr = true;
491     cpu->cfg.mmu = true;
492     cpu->cfg.pmp = true;
493 }
494 
495 static void rv64_sifive_e_cpu_init(Object *obj)
496 {
497     CPURISCVState *env = &RISCV_CPU(obj)->env;
498     RISCVCPU *cpu = RISCV_CPU(obj);
499 
500     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
501     env->priv_ver = PRIV_VERSION_1_10_0;
502 #ifndef CONFIG_USER_ONLY
503     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
504 #endif
505 
506     /* inherited from parent obj via riscv_cpu_init() */
507     cpu->cfg.ext_zifencei = true;
508     cpu->cfg.ext_zicsr = true;
509     cpu->cfg.pmp = true;
510 }
511 
512 static void rv64_thead_c906_cpu_init(Object *obj)
513 {
514     CPURISCVState *env = &RISCV_CPU(obj)->env;
515     RISCVCPU *cpu = RISCV_CPU(obj);
516 
517     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
518     env->priv_ver = PRIV_VERSION_1_11_0;
519 
520     cpu->cfg.ext_zfa = true;
521     cpu->cfg.ext_zfh = true;
522     cpu->cfg.mmu = true;
523     cpu->cfg.ext_xtheadba = true;
524     cpu->cfg.ext_xtheadbb = true;
525     cpu->cfg.ext_xtheadbs = true;
526     cpu->cfg.ext_xtheadcmo = true;
527     cpu->cfg.ext_xtheadcondmov = true;
528     cpu->cfg.ext_xtheadfmemidx = true;
529     cpu->cfg.ext_xtheadmac = true;
530     cpu->cfg.ext_xtheadmemidx = true;
531     cpu->cfg.ext_xtheadmempair = true;
532     cpu->cfg.ext_xtheadsync = true;
533 
534     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
535 #ifndef CONFIG_USER_ONLY
536     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
537     th_register_custom_csrs(cpu);
538 #endif
539 
540     /* inherited from parent obj via riscv_cpu_init() */
541     cpu->cfg.pmp = true;
542 }
543 
544 static void rv64_veyron_v1_cpu_init(Object *obj)
545 {
546     CPURISCVState *env = &RISCV_CPU(obj)->env;
547     RISCVCPU *cpu = RISCV_CPU(obj);
548 
549     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
550     env->priv_ver = PRIV_VERSION_1_12_0;
551 
552     /* Enable ISA extensions */
553     cpu->cfg.mmu = true;
554     cpu->cfg.ext_zifencei = true;
555     cpu->cfg.ext_zicsr = true;
556     cpu->cfg.pmp = true;
557     cpu->cfg.ext_zicbom = true;
558     cpu->cfg.cbom_blocksize = 64;
559     cpu->cfg.cboz_blocksize = 64;
560     cpu->cfg.ext_zicboz = true;
561     cpu->cfg.ext_smaia = true;
562     cpu->cfg.ext_ssaia = true;
563     cpu->cfg.ext_sscofpmf = true;
564     cpu->cfg.ext_sstc = true;
565     cpu->cfg.ext_svinval = true;
566     cpu->cfg.ext_svnapot = true;
567     cpu->cfg.ext_svpbmt = true;
568     cpu->cfg.ext_smstateen = true;
569     cpu->cfg.ext_zba = true;
570     cpu->cfg.ext_zbb = true;
571     cpu->cfg.ext_zbc = true;
572     cpu->cfg.ext_zbs = true;
573     cpu->cfg.ext_XVentanaCondOps = true;
574 
575     cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
576     cpu->cfg.marchid = VEYRON_V1_MARCHID;
577     cpu->cfg.mimpid = VEYRON_V1_MIMPID;
578 
579 #ifndef CONFIG_USER_ONLY
580     set_satp_mode_max_supported(cpu, VM_1_10_SV48);
581 #endif
582 }
583 
584 /* Tenstorrent Ascalon */
585 static void rv64_tt_ascalon_cpu_init(Object *obj)
586 {
587     CPURISCVState *env = &RISCV_CPU(obj)->env;
588     RISCVCPU *cpu = RISCV_CPU(obj);
589 
590     riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV);
591     env->priv_ver = PRIV_VERSION_1_13_0;
592 
593     /* Enable ISA extensions */
594     cpu->cfg.mmu = true;
595     cpu->cfg.vlenb = 256 >> 3;
596     cpu->cfg.elen = 64;
597     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
598     cpu->cfg.rvv_ma_all_1s = true;
599     cpu->cfg.rvv_ta_all_1s = true;
600     cpu->cfg.misa_w = true;
601     cpu->cfg.pmp = true;
602     cpu->cfg.cbom_blocksize = 64;
603     cpu->cfg.cbop_blocksize = 64;
604     cpu->cfg.cboz_blocksize = 64;
605     cpu->cfg.ext_zic64b = true;
606     cpu->cfg.ext_zicbom = true;
607     cpu->cfg.ext_zicbop = true;
608     cpu->cfg.ext_zicboz = true;
609     cpu->cfg.ext_zicntr = true;
610     cpu->cfg.ext_zicond = true;
611     cpu->cfg.ext_zicsr = true;
612     cpu->cfg.ext_zifencei = true;
613     cpu->cfg.ext_zihintntl = true;
614     cpu->cfg.ext_zihintpause = true;
615     cpu->cfg.ext_zihpm = true;
616     cpu->cfg.ext_zimop = true;
617     cpu->cfg.ext_zawrs = true;
618     cpu->cfg.ext_zfa = true;
619     cpu->cfg.ext_zfbfmin = true;
620     cpu->cfg.ext_zfh = true;
621     cpu->cfg.ext_zfhmin = true;
622     cpu->cfg.ext_zcb = true;
623     cpu->cfg.ext_zcmop = true;
624     cpu->cfg.ext_zba = true;
625     cpu->cfg.ext_zbb = true;
626     cpu->cfg.ext_zbs = true;
627     cpu->cfg.ext_zkt = true;
628     cpu->cfg.ext_zvbb = true;
629     cpu->cfg.ext_zvbc = true;
630     cpu->cfg.ext_zvfbfmin = true;
631     cpu->cfg.ext_zvfbfwma = true;
632     cpu->cfg.ext_zvfh = true;
633     cpu->cfg.ext_zvfhmin = true;
634     cpu->cfg.ext_zvkng = true;
635     cpu->cfg.ext_smaia = true;
636     cpu->cfg.ext_smstateen = true;
637     cpu->cfg.ext_ssaia = true;
638     cpu->cfg.ext_sscofpmf = true;
639     cpu->cfg.ext_sstc = true;
640     cpu->cfg.ext_svade = true;
641     cpu->cfg.ext_svinval = true;
642     cpu->cfg.ext_svnapot = true;
643     cpu->cfg.ext_svpbmt = true;
644 
645 #ifndef CONFIG_USER_ONLY
646     set_satp_mode_max_supported(cpu, VM_1_10_SV57);
647 #endif
648 }
649 
650 static void rv64_xiangshan_nanhu_cpu_init(Object *obj)
651 {
652     CPURISCVState *env = &RISCV_CPU(obj)->env;
653     RISCVCPU *cpu = RISCV_CPU(obj);
654 
655     riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU);
656     env->priv_ver = PRIV_VERSION_1_12_0;
657 
658     /* Enable ISA extensions */
659     cpu->cfg.ext_zbc = true;
660     cpu->cfg.ext_zbkb = true;
661     cpu->cfg.ext_zbkc = true;
662     cpu->cfg.ext_zbkx = true;
663     cpu->cfg.ext_zknd = true;
664     cpu->cfg.ext_zkne = true;
665     cpu->cfg.ext_zknh = true;
666     cpu->cfg.ext_zksed = true;
667     cpu->cfg.ext_zksh = true;
668     cpu->cfg.ext_svinval = true;
669 
670     cpu->cfg.mmu = true;
671     cpu->cfg.pmp = true;
672 
673 #ifndef CONFIG_USER_ONLY
674     set_satp_mode_max_supported(cpu, VM_1_10_SV39);
675 #endif
676 }
677 
678 #ifdef CONFIG_TCG
679 static void rv128_base_cpu_init(Object *obj)
680 {
681     RISCVCPU *cpu = RISCV_CPU(obj);
682     CPURISCVState *env = &cpu->env;
683 
684     if (qemu_tcg_mttcg_enabled()) {
685         /* Missing 128-bit aligned atomics */
686         error_report("128-bit RISC-V currently does not work with Multi "
687                      "Threaded TCG. Please use: -accel tcg,thread=single");
688         exit(EXIT_FAILURE);
689     }
690 
691     cpu->cfg.mmu = true;
692     cpu->cfg.pmp = true;
693 
694     /* Set latest version of privileged specification */
695     env->priv_ver = PRIV_VERSION_LATEST;
696 #ifndef CONFIG_USER_ONLY
697     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
698 #endif
699 }
700 #endif /* CONFIG_TCG */
701 
702 static void rv64i_bare_cpu_init(Object *obj)
703 {
704     CPURISCVState *env = &RISCV_CPU(obj)->env;
705     riscv_cpu_set_misa_ext(env, RVI);
706 }
707 
708 static void rv64e_bare_cpu_init(Object *obj)
709 {
710     CPURISCVState *env = &RISCV_CPU(obj)->env;
711     riscv_cpu_set_misa_ext(env, RVE);
712 }
713 
714 #endif /* !TARGET_RISCV64 */
715 
716 #if defined(TARGET_RISCV32) || \
717     (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
718 
719 static void rv32_base_cpu_init(Object *obj)
720 {
721     RISCVCPU *cpu = RISCV_CPU(obj);
722     CPURISCVState *env = &cpu->env;
723 
724     cpu->cfg.mmu = true;
725     cpu->cfg.pmp = true;
726 
727     /* Set latest version of privileged specification */
728     env->priv_ver = PRIV_VERSION_LATEST;
729 #ifndef CONFIG_USER_ONLY
730     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
731 #endif
732 }
733 
734 static void rv32_sifive_u_cpu_init(Object *obj)
735 {
736     RISCVCPU *cpu = RISCV_CPU(obj);
737     CPURISCVState *env = &cpu->env;
738     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
739     env->priv_ver = PRIV_VERSION_1_10_0;
740 #ifndef CONFIG_USER_ONLY
741     set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
742 #endif
743 
744     /* inherited from parent obj via riscv_cpu_init() */
745     cpu->cfg.ext_zifencei = true;
746     cpu->cfg.ext_zicsr = true;
747     cpu->cfg.mmu = true;
748     cpu->cfg.pmp = true;
749 }
750 
751 static void rv32_sifive_e_cpu_init(Object *obj)
752 {
753     CPURISCVState *env = &RISCV_CPU(obj)->env;
754     RISCVCPU *cpu = RISCV_CPU(obj);
755 
756     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
757     env->priv_ver = PRIV_VERSION_1_10_0;
758 #ifndef CONFIG_USER_ONLY
759     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
760 #endif
761 
762     /* inherited from parent obj via riscv_cpu_init() */
763     cpu->cfg.ext_zifencei = true;
764     cpu->cfg.ext_zicsr = true;
765     cpu->cfg.pmp = true;
766 }
767 
768 static void rv32_ibex_cpu_init(Object *obj)
769 {
770     CPURISCVState *env = &RISCV_CPU(obj)->env;
771     RISCVCPU *cpu = RISCV_CPU(obj);
772 
773     riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
774     env->priv_ver = PRIV_VERSION_1_12_0;
775 #ifndef CONFIG_USER_ONLY
776     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
777 #endif
778     /* inherited from parent obj via riscv_cpu_init() */
779     cpu->cfg.ext_zifencei = true;
780     cpu->cfg.ext_zicsr = true;
781     cpu->cfg.pmp = true;
782     cpu->cfg.ext_smepmp = true;
783 
784     cpu->cfg.ext_zba = true;
785     cpu->cfg.ext_zbb = true;
786     cpu->cfg.ext_zbc = true;
787     cpu->cfg.ext_zbs = true;
788 }
789 
790 static void rv32_imafcu_nommu_cpu_init(Object *obj)
791 {
792     CPURISCVState *env = &RISCV_CPU(obj)->env;
793     RISCVCPU *cpu = RISCV_CPU(obj);
794 
795     riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
796     env->priv_ver = PRIV_VERSION_1_10_0;
797 #ifndef CONFIG_USER_ONLY
798     set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
799 #endif
800 
801     /* inherited from parent obj via riscv_cpu_init() */
802     cpu->cfg.ext_zifencei = true;
803     cpu->cfg.ext_zicsr = true;
804     cpu->cfg.pmp = true;
805 }
806 
807 static void rv32i_bare_cpu_init(Object *obj)
808 {
809     CPURISCVState *env = &RISCV_CPU(obj)->env;
810     riscv_cpu_set_misa_ext(env, RVI);
811 }
812 
813 static void rv32e_bare_cpu_init(Object *obj)
814 {
815     CPURISCVState *env = &RISCV_CPU(obj)->env;
816     riscv_cpu_set_misa_ext(env, RVE);
817 }
818 #endif
819 
820 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
821 {
822     ObjectClass *oc;
823     char *typename;
824     char **cpuname;
825 
826     cpuname = g_strsplit(cpu_model, ",", 1);
827     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
828     oc = object_class_by_name(typename);
829     g_strfreev(cpuname);
830     g_free(typename);
831 
832     return oc;
833 }
834 
835 char *riscv_cpu_get_name(RISCVCPU *cpu)
836 {
837     RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
838     const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
839 
840     g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
841 
842     return cpu_model_from_type(typename);
843 }
844 
845 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
846 {
847     RISCVCPU *cpu = RISCV_CPU(cs);
848     CPURISCVState *env = &cpu->env;
849     int i, j;
850     uint8_t *p;
851 
852 #if !defined(CONFIG_USER_ONLY)
853     if (riscv_has_ext(env, RVH)) {
854         qemu_fprintf(f, " %s %d\n", "V      =  ", env->virt_enabled);
855     }
856 #endif
857     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
858 #ifndef CONFIG_USER_ONLY
859     {
860         static const int dump_csrs[] = {
861             CSR_MHARTID,
862             CSR_MSTATUS,
863             CSR_MSTATUSH,
864             /*
865              * CSR_SSTATUS is intentionally omitted here as its value
866              * can be figured out by looking at CSR_MSTATUS
867              */
868             CSR_HSTATUS,
869             CSR_VSSTATUS,
870             CSR_MIP,
871             CSR_MIE,
872             CSR_MIDELEG,
873             CSR_HIDELEG,
874             CSR_MEDELEG,
875             CSR_HEDELEG,
876             CSR_MTVEC,
877             CSR_STVEC,
878             CSR_VSTVEC,
879             CSR_MEPC,
880             CSR_SEPC,
881             CSR_VSEPC,
882             CSR_MCAUSE,
883             CSR_SCAUSE,
884             CSR_VSCAUSE,
885             CSR_MTVAL,
886             CSR_STVAL,
887             CSR_HTVAL,
888             CSR_MTVAL2,
889             CSR_MSCRATCH,
890             CSR_SSCRATCH,
891             CSR_SATP,
892             CSR_MMTE,
893             CSR_UPMBASE,
894             CSR_UPMMASK,
895             CSR_SPMBASE,
896             CSR_SPMMASK,
897             CSR_MPMBASE,
898             CSR_MPMMASK,
899         };
900 
901         for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
902             int csrno = dump_csrs[i];
903             target_ulong val = 0;
904             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
905 
906             /*
907              * Rely on the smode, hmode, etc, predicates within csr.c
908              * to do the filtering of the registers that are present.
909              */
910             if (res == RISCV_EXCP_NONE) {
911                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
912                              csr_ops[csrno].name, val);
913             }
914         }
915     }
916 #endif
917 
918     for (i = 0; i < 32; i++) {
919         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
920                      riscv_int_regnames[i], env->gpr[i]);
921         if ((i & 3) == 3) {
922             qemu_fprintf(f, "\n");
923         }
924     }
925     if (flags & CPU_DUMP_FPU) {
926         target_ulong val = 0;
927         RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0);
928         if (res == RISCV_EXCP_NONE) {
929             qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
930                     csr_ops[CSR_FCSR].name, val);
931         }
932         for (i = 0; i < 32; i++) {
933             qemu_fprintf(f, " %-8s %016" PRIx64,
934                          riscv_fpr_regnames[i], env->fpr[i]);
935             if ((i & 3) == 3) {
936                 qemu_fprintf(f, "\n");
937             }
938         }
939     }
940     if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
941         static const int dump_rvv_csrs[] = {
942                     CSR_VSTART,
943                     CSR_VXSAT,
944                     CSR_VXRM,
945                     CSR_VCSR,
946                     CSR_VL,
947                     CSR_VTYPE,
948                     CSR_VLENB,
949                 };
950         for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
951             int csrno = dump_rvv_csrs[i];
952             target_ulong val = 0;
953             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
954 
955             /*
956              * Rely on the smode, hmode, etc, predicates within csr.c
957              * to do the filtering of the registers that are present.
958              */
959             if (res == RISCV_EXCP_NONE) {
960                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
961                              csr_ops[csrno].name, val);
962             }
963         }
964         uint16_t vlenb = cpu->cfg.vlenb;
965 
966         for (i = 0; i < 32; i++) {
967             qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
968             p = (uint8_t *)env->vreg;
969             for (j = vlenb - 1 ; j >= 0; j--) {
970                 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
971             }
972             qemu_fprintf(f, "\n");
973         }
974     }
975 }
976 
977 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
978 {
979     RISCVCPU *cpu = RISCV_CPU(cs);
980     CPURISCVState *env = &cpu->env;
981 
982     if (env->xl == MXL_RV32) {
983         env->pc = (int32_t)value;
984     } else {
985         env->pc = value;
986     }
987 }
988 
989 static vaddr riscv_cpu_get_pc(CPUState *cs)
990 {
991     RISCVCPU *cpu = RISCV_CPU(cs);
992     CPURISCVState *env = &cpu->env;
993 
994     /* Match cpu_get_tb_cpu_state. */
995     if (env->xl == MXL_RV32) {
996         return env->pc & UINT32_MAX;
997     }
998     return env->pc;
999 }
1000 
1001 bool riscv_cpu_has_work(CPUState *cs)
1002 {
1003 #ifndef CONFIG_USER_ONLY
1004     RISCVCPU *cpu = RISCV_CPU(cs);
1005     CPURISCVState *env = &cpu->env;
1006     /*
1007      * Definition of the WFI instruction requires it to ignore the privilege
1008      * mode and delegation registers, but respect individual enables
1009      */
1010     return riscv_cpu_all_pending(env) != 0 ||
1011         riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
1012         riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
1013 #else
1014     return true;
1015 #endif
1016 }
1017 
1018 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
1019 {
1020     return riscv_env_mmu_index(cpu_env(cs), ifetch);
1021 }
1022 
1023 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
1024 {
1025 #ifndef CONFIG_USER_ONLY
1026     uint8_t iprio;
1027     int i, irq, rdzero;
1028 #endif
1029     CPUState *cs = CPU(obj);
1030     RISCVCPU *cpu = RISCV_CPU(cs);
1031     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1032     CPURISCVState *env = &cpu->env;
1033 
1034     if (mcc->parent_phases.hold) {
1035         mcc->parent_phases.hold(obj, type);
1036     }
1037 #ifndef CONFIG_USER_ONLY
1038     env->misa_mxl = mcc->misa_mxl_max;
1039     env->priv = PRV_M;
1040     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
1041     if (env->misa_mxl > MXL_RV32) {
1042         /*
1043          * The reset status of SXL/UXL is undefined, but mstatus is WARL
1044          * and we must ensure that the value after init is valid for read.
1045          */
1046         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
1047         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
1048         if (riscv_has_ext(env, RVH)) {
1049             env->vsstatus = set_field(env->vsstatus,
1050                                       MSTATUS64_SXL, env->misa_mxl);
1051             env->vsstatus = set_field(env->vsstatus,
1052                                       MSTATUS64_UXL, env->misa_mxl);
1053             env->mstatus_hs = set_field(env->mstatus_hs,
1054                                         MSTATUS64_SXL, env->misa_mxl);
1055             env->mstatus_hs = set_field(env->mstatus_hs,
1056                                         MSTATUS64_UXL, env->misa_mxl);
1057         }
1058     }
1059     env->mcause = 0;
1060     env->miclaim = MIP_SGEIP;
1061     env->pc = env->resetvec;
1062     env->bins = 0;
1063     env->two_stage_lookup = false;
1064 
1065     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
1066                    (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
1067                     MENVCFG_ADUE : 0);
1068     env->henvcfg = 0;
1069 
1070     /* Initialized default priorities of local interrupts. */
1071     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
1072         iprio = riscv_cpu_default_priority(i);
1073         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
1074         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
1075         env->hviprio[i] = 0;
1076     }
1077     i = 0;
1078     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
1079         if (!rdzero) {
1080             env->hviprio[irq] = env->miprio[irq];
1081         }
1082         i++;
1083     }
1084     /* mmte is supposed to have pm.current hardwired to 1 */
1085     env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
1086 
1087     /*
1088      * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
1089      * extension is enabled.
1090      */
1091     if (riscv_has_ext(env, RVH)) {
1092         env->mideleg |= HS_MODE_INTERRUPTS;
1093     }
1094 
1095     /*
1096      * Clear mseccfg and unlock all the PMP entries upon reset.
1097      * This is allowed as per the priv and smepmp specifications
1098      * and is needed to clear stale entries across reboots.
1099      */
1100     if (riscv_cpu_cfg(env)->ext_smepmp) {
1101         env->mseccfg = 0;
1102     }
1103 
1104     pmp_unlock_entries(env);
1105 #else
1106     env->priv = PRV_U;
1107     env->senvcfg = 0;
1108     env->menvcfg = 0;
1109 #endif
1110 
1111     /* on reset elp is clear */
1112     env->elp = false;
1113     /* on reset ssp is set to 0 */
1114     env->ssp = 0;
1115 
1116     env->xl = riscv_cpu_mxl(env);
1117     riscv_cpu_update_mask(env);
1118     cs->exception_index = RISCV_EXCP_NONE;
1119     env->load_res = -1;
1120     set_default_nan_mode(1, &env->fp_status);
1121     /* Default NaN value: sign bit clear, frac msb set */
1122     set_float_default_nan_pattern(0b01000000, &env->fp_status);
1123     env->vill = true;
1124 
1125 #ifndef CONFIG_USER_ONLY
1126     if (cpu->cfg.debug) {
1127         riscv_trigger_reset_hold(env);
1128     }
1129 
1130     if (kvm_enabled()) {
1131         kvm_riscv_reset_vcpu(cpu);
1132     }
1133 #endif
1134 }
1135 
1136 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1137 {
1138     RISCVCPU *cpu = RISCV_CPU(s);
1139     CPURISCVState *env = &cpu->env;
1140     info->target_info = &cpu->cfg;
1141 
1142     switch (env->xl) {
1143     case MXL_RV32:
1144         info->print_insn = print_insn_riscv32;
1145         break;
1146     case MXL_RV64:
1147         info->print_insn = print_insn_riscv64;
1148         break;
1149     case MXL_RV128:
1150         info->print_insn = print_insn_riscv128;
1151         break;
1152     default:
1153         g_assert_not_reached();
1154     }
1155 }
1156 
1157 #ifndef CONFIG_USER_ONLY
1158 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1159 {
1160     bool rv32 = riscv_cpu_is_32bit(cpu);
1161     uint8_t satp_mode_map_max, satp_mode_supported_max;
1162 
1163     /* The CPU wants the OS to decide which satp mode to use */
1164     if (cpu->cfg.satp_mode.supported == 0) {
1165         return;
1166     }
1167 
1168     satp_mode_supported_max =
1169                     satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1170 
1171     if (cpu->cfg.satp_mode.map == 0) {
1172         if (cpu->cfg.satp_mode.init == 0) {
1173             /* If unset by the user, we fallback to the default satp mode. */
1174             set_satp_mode_default_map(cpu);
1175         } else {
1176             /*
1177              * Find the lowest level that was disabled and then enable the
1178              * first valid level below which can be found in
1179              * valid_vm_1_10_32/64.
1180              */
1181             for (int i = 1; i < 16; ++i) {
1182                 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1183                     (cpu->cfg.satp_mode.supported & (1 << i))) {
1184                     for (int j = i - 1; j >= 0; --j) {
1185                         if (cpu->cfg.satp_mode.supported & (1 << j)) {
1186                             cpu->cfg.satp_mode.map |= (1 << j);
1187                             break;
1188                         }
1189                     }
1190                     break;
1191                 }
1192             }
1193         }
1194     }
1195 
1196     satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1197 
1198     /* Make sure the user asked for a supported configuration (HW and qemu) */
1199     if (satp_mode_map_max > satp_mode_supported_max) {
1200         error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1201                    satp_mode_str(satp_mode_map_max, rv32),
1202                    satp_mode_str(satp_mode_supported_max, rv32));
1203         return;
1204     }
1205 
1206     /*
1207      * Make sure the user did not ask for an invalid configuration as per
1208      * the specification.
1209      */
1210     if (!rv32) {
1211         for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1212             if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1213                 (cpu->cfg.satp_mode.init & (1 << i)) &&
1214                 (cpu->cfg.satp_mode.supported & (1 << i))) {
1215                 error_setg(errp, "cannot disable %s satp mode if %s "
1216                            "is enabled", satp_mode_str(i, false),
1217                            satp_mode_str(satp_mode_map_max, false));
1218                 return;
1219             }
1220         }
1221     }
1222 
1223     /* Finally expand the map so that all valid modes are set */
1224     for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1225         if (cpu->cfg.satp_mode.supported & (1 << i)) {
1226             cpu->cfg.satp_mode.map |= (1 << i);
1227         }
1228     }
1229 }
1230 #endif
1231 
1232 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1233 {
1234     Error *local_err = NULL;
1235 
1236 #ifndef CONFIG_USER_ONLY
1237     riscv_cpu_satp_mode_finalize(cpu, &local_err);
1238     if (local_err != NULL) {
1239         error_propagate(errp, local_err);
1240         return;
1241     }
1242 #endif
1243 
1244     if (tcg_enabled()) {
1245         riscv_tcg_cpu_finalize_features(cpu, &local_err);
1246         if (local_err != NULL) {
1247             error_propagate(errp, local_err);
1248             return;
1249         }
1250         riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1251     } else if (kvm_enabled()) {
1252         riscv_kvm_cpu_finalize_features(cpu, &local_err);
1253         if (local_err != NULL) {
1254             error_propagate(errp, local_err);
1255             return;
1256         }
1257     }
1258 }
1259 
1260 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1261 {
1262     CPUState *cs = CPU(dev);
1263     RISCVCPU *cpu = RISCV_CPU(dev);
1264     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1265     Error *local_err = NULL;
1266 
1267     cpu_exec_realizefn(cs, &local_err);
1268     if (local_err != NULL) {
1269         error_propagate(errp, local_err);
1270         return;
1271     }
1272 
1273     riscv_cpu_finalize_features(cpu, &local_err);
1274     if (local_err != NULL) {
1275         error_propagate(errp, local_err);
1276         return;
1277     }
1278 
1279     riscv_cpu_register_gdb_regs_for_features(cs);
1280 
1281 #ifndef CONFIG_USER_ONLY
1282     if (cpu->cfg.debug) {
1283         riscv_trigger_realize(&cpu->env);
1284     }
1285 #endif
1286 
1287     qemu_init_vcpu(cs);
1288     cpu_reset(cs);
1289 
1290     mcc->parent_realize(dev, errp);
1291 }
1292 
1293 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1294 {
1295     if (tcg_enabled()) {
1296         return riscv_cpu_tcg_compatible(cpu);
1297     }
1298 
1299     return true;
1300 }
1301 
1302 #ifndef CONFIG_USER_ONLY
1303 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1304                                void *opaque, Error **errp)
1305 {
1306     RISCVSATPMap *satp_map = opaque;
1307     uint8_t satp = satp_mode_from_str(name);
1308     bool value;
1309 
1310     value = satp_map->map & (1 << satp);
1311 
1312     visit_type_bool(v, name, &value, errp);
1313 }
1314 
1315 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1316                                void *opaque, Error **errp)
1317 {
1318     RISCVSATPMap *satp_map = opaque;
1319     uint8_t satp = satp_mode_from_str(name);
1320     bool value;
1321 
1322     if (!visit_type_bool(v, name, &value, errp)) {
1323         return;
1324     }
1325 
1326     satp_map->map = deposit32(satp_map->map, satp, 1, value);
1327     satp_map->init |= 1 << satp;
1328 }
1329 
1330 void riscv_add_satp_mode_properties(Object *obj)
1331 {
1332     RISCVCPU *cpu = RISCV_CPU(obj);
1333 
1334     if (cpu->env.misa_mxl == MXL_RV32) {
1335         object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1336                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1337     } else {
1338         object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1339                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1340         object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1341                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1342         object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1343                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1344         object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1345                             cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1346     }
1347 }
1348 
1349 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1350 {
1351     RISCVCPU *cpu = RISCV_CPU(opaque);
1352     CPURISCVState *env = &cpu->env;
1353 
1354     if (irq < IRQ_LOCAL_MAX) {
1355         switch (irq) {
1356         case IRQ_U_SOFT:
1357         case IRQ_S_SOFT:
1358         case IRQ_VS_SOFT:
1359         case IRQ_M_SOFT:
1360         case IRQ_U_TIMER:
1361         case IRQ_S_TIMER:
1362         case IRQ_VS_TIMER:
1363         case IRQ_M_TIMER:
1364         case IRQ_U_EXT:
1365         case IRQ_VS_EXT:
1366         case IRQ_M_EXT:
1367             if (kvm_enabled()) {
1368                 kvm_riscv_set_irq(cpu, irq, level);
1369             } else {
1370                 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1371             }
1372              break;
1373         case IRQ_S_EXT:
1374             if (kvm_enabled()) {
1375                 kvm_riscv_set_irq(cpu, irq, level);
1376             } else {
1377                 env->external_seip = level;
1378                 riscv_cpu_update_mip(env, 1 << irq,
1379                                      BOOL_TO_MASK(level | env->software_seip));
1380             }
1381             break;
1382         default:
1383             g_assert_not_reached();
1384         }
1385     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1386         /* Require H-extension for handling guest local interrupts */
1387         if (!riscv_has_ext(env, RVH)) {
1388             g_assert_not_reached();
1389         }
1390 
1391         /* Compute bit position in HGEIP CSR */
1392         irq = irq - IRQ_LOCAL_MAX + 1;
1393         if (env->geilen < irq) {
1394             g_assert_not_reached();
1395         }
1396 
1397         /* Update HGEIP CSR */
1398         env->hgeip &= ~((target_ulong)1 << irq);
1399         if (level) {
1400             env->hgeip |= (target_ulong)1 << irq;
1401         }
1402 
1403         /* Update mip.SGEIP bit */
1404         riscv_cpu_update_mip(env, MIP_SGEIP,
1405                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1406     } else {
1407         g_assert_not_reached();
1408     }
1409 }
1410 #endif /* CONFIG_USER_ONLY */
1411 
1412 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1413 {
1414     return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1415 }
1416 
1417 static void riscv_cpu_post_init(Object *obj)
1418 {
1419     accel_cpu_instance_init(CPU(obj));
1420 }
1421 
1422 static void riscv_cpu_init(Object *obj)
1423 {
1424     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1425     RISCVCPU *cpu = RISCV_CPU(obj);
1426     CPURISCVState *env = &cpu->env;
1427 
1428     env->misa_mxl = mcc->misa_mxl_max;
1429 
1430 #ifndef CONFIG_USER_ONLY
1431     qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1432                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1433 #endif /* CONFIG_USER_ONLY */
1434 
1435     general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1436 
1437     /*
1438      * The timer and performance counters extensions were supported
1439      * in QEMU before they were added as discrete extensions in the
1440      * ISA. To keep compatibility we'll always default them to 'true'
1441      * for all CPUs. Each accelerator will decide what to do when
1442      * users disable them.
1443      */
1444     RISCV_CPU(obj)->cfg.ext_zicntr = true;
1445     RISCV_CPU(obj)->cfg.ext_zihpm = true;
1446 
1447     /* Default values for non-bool cpu properties */
1448     cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1449     cpu->cfg.vlenb = 128 >> 3;
1450     cpu->cfg.elen = 64;
1451     cpu->cfg.cbom_blocksize = 64;
1452     cpu->cfg.cbop_blocksize = 64;
1453     cpu->cfg.cboz_blocksize = 64;
1454     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1455 }
1456 
1457 static void riscv_bare_cpu_init(Object *obj)
1458 {
1459     RISCVCPU *cpu = RISCV_CPU(obj);
1460 
1461     /*
1462      * Bare CPUs do not inherit the timer and performance
1463      * counters from the parent class (see riscv_cpu_init()
1464      * for info on why the parent enables them).
1465      *
1466      * Users have to explicitly enable these counters for
1467      * bare CPUs.
1468      */
1469     cpu->cfg.ext_zicntr = false;
1470     cpu->cfg.ext_zihpm = false;
1471 
1472     /* Set to QEMU's first supported priv version */
1473     cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1474 
1475     /*
1476      * Support all available satp_mode settings. The default
1477      * value will be set to MBARE if the user doesn't set
1478      * satp_mode manually (see set_satp_mode_default()).
1479      */
1480 #ifndef CONFIG_USER_ONLY
1481     set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1482 #endif
1483 }
1484 
1485 typedef struct misa_ext_info {
1486     const char *name;
1487     const char *description;
1488 } MISAExtInfo;
1489 
1490 #define MISA_INFO_IDX(_bit) \
1491     __builtin_ctz(_bit)
1492 
1493 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1494     [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1495 
1496 static const MISAExtInfo misa_ext_info_arr[] = {
1497     MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1498     MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1499     MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1500     MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1501     MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1502     MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1503     MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1504     MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1505     MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1506     MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1507     MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1508     MISA_EXT_INFO(RVV, "v", "Vector operations"),
1509     MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1510     MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1511 };
1512 
1513 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1514 {
1515     CPUClass *cc = CPU_CLASS(mcc);
1516 
1517     /* Validate that MISA_MXL is set properly. */
1518     switch (mcc->misa_mxl_max) {
1519 #ifdef TARGET_RISCV64
1520     case MXL_RV64:
1521     case MXL_RV128:
1522         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1523         break;
1524 #endif
1525     case MXL_RV32:
1526         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1527         break;
1528     default:
1529         g_assert_not_reached();
1530     }
1531 }
1532 
1533 static int riscv_validate_misa_info_idx(uint32_t bit)
1534 {
1535     int idx;
1536 
1537     /*
1538      * Our lowest valid input (RVA) is 1 and
1539      * __builtin_ctz() is UB with zero.
1540      */
1541     g_assert(bit != 0);
1542     idx = MISA_INFO_IDX(bit);
1543 
1544     g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1545     return idx;
1546 }
1547 
1548 const char *riscv_get_misa_ext_name(uint32_t bit)
1549 {
1550     int idx = riscv_validate_misa_info_idx(bit);
1551     const char *val = misa_ext_info_arr[idx].name;
1552 
1553     g_assert(val != NULL);
1554     return val;
1555 }
1556 
1557 const char *riscv_get_misa_ext_description(uint32_t bit)
1558 {
1559     int idx = riscv_validate_misa_info_idx(bit);
1560     const char *val = misa_ext_info_arr[idx].description;
1561 
1562     g_assert(val != NULL);
1563     return val;
1564 }
1565 
1566 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1567     {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1568      .enabled = _defval}
1569 
1570 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1571     /* Defaults for standard extensions */
1572     MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1573     MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1574     MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1575     MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
1576     MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
1577     MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1578     MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1579     MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1580     MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1581     MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1582     MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1583     MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1584     MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1585     MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1586     MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1587     MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1588     MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1589     MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1590     MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1591     MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1592     MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1593     MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1594     MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1595     MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1596     MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1597     MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1598     MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1599     MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1600     MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1601     MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1602 
1603     MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1604     MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1605     MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1606     MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1607     MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1608     MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1609     MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1610     MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1611     MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1612     MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true),
1613 
1614     MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1615     MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1616 
1617     MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1618     MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1619     MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1620     MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1621     MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1622     MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1623     MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1624     MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1625     MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1626     MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1627     MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1628     MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1629     MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1630     MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1631     MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1632     MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1633     MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1634     MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1635 
1636     MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1637     MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1638     MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1639     MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1640 
1641     MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1642     MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1643     MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1644 
1645     MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1646 
1647     MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1648     MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1649     MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1650     MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1651     MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1652     MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1653     MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1654     MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1655 
1656     /* Vector cryptography extensions */
1657     MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1658     MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1659     MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1660     MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1661     MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1662     MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1663     MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1664     MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1665     MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1666     MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1667     MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1668     MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1669     MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1670     MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1671     MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1672     MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1673 
1674     { },
1675 };
1676 
1677 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1678     MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1679     MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1680     MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1681     MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1682     MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1683     MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1684     MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1685     MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1686     MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1687     MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1688     MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1689     MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1690 
1691     { },
1692 };
1693 
1694 /* These are experimental so mark with 'x-' */
1695 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1696     MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false),
1697 
1698     { },
1699 };
1700 
1701 /*
1702  * 'Named features' is the name we give to extensions that we
1703  * don't want to expose to users. They are either immutable
1704  * (always enabled/disable) or they'll vary depending on
1705  * the resulting CPU state. They have riscv,isa strings
1706  * and priv_ver like regular extensions.
1707  */
1708 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1709     MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1710     MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
1711 
1712     { },
1713 };
1714 
1715 /* Deprecated entries marked for future removal */
1716 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1717     MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1718     MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1719     MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1720     MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1721     MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1722     MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1723     MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1724     MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1725     MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1726     MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1727     MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1728 
1729     { },
1730 };
1731 
1732 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1733                              Error **errp)
1734 {
1735     g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1736     error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1737                cpuname, propname);
1738 }
1739 
1740 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1741                              void *opaque, Error **errp)
1742 {
1743     RISCVCPU *cpu = RISCV_CPU(obj);
1744     uint8_t pmu_num, curr_pmu_num;
1745     uint32_t pmu_mask;
1746 
1747     visit_type_uint8(v, name, &pmu_num, errp);
1748 
1749     curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1750 
1751     if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1752         cpu_set_prop_err(cpu, name, errp);
1753         error_append_hint(errp, "Current '%s' val: %u\n",
1754                           name, curr_pmu_num);
1755         return;
1756     }
1757 
1758     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1759         error_setg(errp, "Number of counters exceeds maximum available");
1760         return;
1761     }
1762 
1763     if (pmu_num == 0) {
1764         pmu_mask = 0;
1765     } else {
1766         pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1767     }
1768 
1769     warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1770     cpu->cfg.pmu_mask = pmu_mask;
1771     cpu_option_add_user_setting("pmu-mask", pmu_mask);
1772 }
1773 
1774 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1775                              void *opaque, Error **errp)
1776 {
1777     RISCVCPU *cpu = RISCV_CPU(obj);
1778     uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1779 
1780     visit_type_uint8(v, name, &pmu_num, errp);
1781 }
1782 
1783 static const PropertyInfo prop_pmu_num = {
1784     .name = "pmu-num",
1785     .get = prop_pmu_num_get,
1786     .set = prop_pmu_num_set,
1787 };
1788 
1789 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1790                              void *opaque, Error **errp)
1791 {
1792     RISCVCPU *cpu = RISCV_CPU(obj);
1793     uint32_t value;
1794     uint8_t pmu_num;
1795 
1796     visit_type_uint32(v, name, &value, errp);
1797 
1798     if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1799         cpu_set_prop_err(cpu, name, errp);
1800         error_append_hint(errp, "Current '%s' val: %x\n",
1801                           name, cpu->cfg.pmu_mask);
1802         return;
1803     }
1804 
1805     pmu_num = ctpop32(value);
1806 
1807     if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1808         error_setg(errp, "Number of counters exceeds maximum available");
1809         return;
1810     }
1811 
1812     cpu_option_add_user_setting(name, value);
1813     cpu->cfg.pmu_mask = value;
1814 }
1815 
1816 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1817                              void *opaque, Error **errp)
1818 {
1819     uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1820 
1821     visit_type_uint8(v, name, &pmu_mask, errp);
1822 }
1823 
1824 static const PropertyInfo prop_pmu_mask = {
1825     .name = "pmu-mask",
1826     .get = prop_pmu_mask_get,
1827     .set = prop_pmu_mask_set,
1828 };
1829 
1830 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1831                          void *opaque, Error **errp)
1832 {
1833     RISCVCPU *cpu = RISCV_CPU(obj);
1834     bool value;
1835 
1836     visit_type_bool(v, name, &value, errp);
1837 
1838     if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1839         cpu_set_prop_err(cpu, "mmu", errp);
1840         return;
1841     }
1842 
1843     cpu_option_add_user_setting(name, value);
1844     cpu->cfg.mmu = value;
1845 }
1846 
1847 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1848                          void *opaque, Error **errp)
1849 {
1850     bool value = RISCV_CPU(obj)->cfg.mmu;
1851 
1852     visit_type_bool(v, name, &value, errp);
1853 }
1854 
1855 static const PropertyInfo prop_mmu = {
1856     .name = "mmu",
1857     .get = prop_mmu_get,
1858     .set = prop_mmu_set,
1859 };
1860 
1861 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1862                          void *opaque, Error **errp)
1863 {
1864     RISCVCPU *cpu = RISCV_CPU(obj);
1865     bool value;
1866 
1867     visit_type_bool(v, name, &value, errp);
1868 
1869     if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1870         cpu_set_prop_err(cpu, name, errp);
1871         return;
1872     }
1873 
1874     cpu_option_add_user_setting(name, value);
1875     cpu->cfg.pmp = value;
1876 }
1877 
1878 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1879                          void *opaque, Error **errp)
1880 {
1881     bool value = RISCV_CPU(obj)->cfg.pmp;
1882 
1883     visit_type_bool(v, name, &value, errp);
1884 }
1885 
1886 static const PropertyInfo prop_pmp = {
1887     .name = "pmp",
1888     .get = prop_pmp_get,
1889     .set = prop_pmp_set,
1890 };
1891 
1892 static int priv_spec_from_str(const char *priv_spec_str)
1893 {
1894     int priv_version = -1;
1895 
1896     if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1897         priv_version = PRIV_VERSION_1_13_0;
1898     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1899         priv_version = PRIV_VERSION_1_12_0;
1900     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1901         priv_version = PRIV_VERSION_1_11_0;
1902     } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1903         priv_version = PRIV_VERSION_1_10_0;
1904     }
1905 
1906     return priv_version;
1907 }
1908 
1909 const char *priv_spec_to_str(int priv_version)
1910 {
1911     switch (priv_version) {
1912     case PRIV_VERSION_1_10_0:
1913         return PRIV_VER_1_10_0_STR;
1914     case PRIV_VERSION_1_11_0:
1915         return PRIV_VER_1_11_0_STR;
1916     case PRIV_VERSION_1_12_0:
1917         return PRIV_VER_1_12_0_STR;
1918     case PRIV_VERSION_1_13_0:
1919         return PRIV_VER_1_13_0_STR;
1920     default:
1921         return NULL;
1922     }
1923 }
1924 
1925 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1926                                void *opaque, Error **errp)
1927 {
1928     RISCVCPU *cpu = RISCV_CPU(obj);
1929     g_autofree char *value = NULL;
1930     int priv_version = -1;
1931 
1932     visit_type_str(v, name, &value, errp);
1933 
1934     priv_version = priv_spec_from_str(value);
1935     if (priv_version < 0) {
1936         error_setg(errp, "Unsupported privilege spec version '%s'", value);
1937         return;
1938     }
1939 
1940     if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1941         cpu_set_prop_err(cpu, name, errp);
1942         error_append_hint(errp, "Current '%s' val: %s\n", name,
1943                           object_property_get_str(obj, name, NULL));
1944         return;
1945     }
1946 
1947     cpu_option_add_user_setting(name, priv_version);
1948     cpu->env.priv_ver = priv_version;
1949 }
1950 
1951 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1952                                void *opaque, Error **errp)
1953 {
1954     RISCVCPU *cpu = RISCV_CPU(obj);
1955     const char *value = priv_spec_to_str(cpu->env.priv_ver);
1956 
1957     visit_type_str(v, name, (char **)&value, errp);
1958 }
1959 
1960 static const PropertyInfo prop_priv_spec = {
1961     .name = "priv_spec",
1962     .get = prop_priv_spec_get,
1963     .set = prop_priv_spec_set,
1964 };
1965 
1966 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1967                                void *opaque, Error **errp)
1968 {
1969     RISCVCPU *cpu = RISCV_CPU(obj);
1970     g_autofree char *value = NULL;
1971 
1972     visit_type_str(v, name, &value, errp);
1973 
1974     if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1975         error_setg(errp, "Unsupported vector spec version '%s'", value);
1976         return;
1977     }
1978 
1979     cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1980     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1981 }
1982 
1983 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1984                                void *opaque, Error **errp)
1985 {
1986     const char *value = VEXT_VER_1_00_0_STR;
1987 
1988     visit_type_str(v, name, (char **)&value, errp);
1989 }
1990 
1991 static const PropertyInfo prop_vext_spec = {
1992     .name = "vext_spec",
1993     .get = prop_vext_spec_get,
1994     .set = prop_vext_spec_set,
1995 };
1996 
1997 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1998                          void *opaque, Error **errp)
1999 {
2000     RISCVCPU *cpu = RISCV_CPU(obj);
2001     uint16_t value;
2002 
2003     if (!visit_type_uint16(v, name, &value, errp)) {
2004         return;
2005     }
2006 
2007     if (!is_power_of_2(value)) {
2008         error_setg(errp, "Vector extension VLEN must be power of 2");
2009         return;
2010     }
2011 
2012     if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
2013         cpu_set_prop_err(cpu, name, errp);
2014         error_append_hint(errp, "Current '%s' val: %u\n",
2015                           name, cpu->cfg.vlenb << 3);
2016         return;
2017     }
2018 
2019     cpu_option_add_user_setting(name, value);
2020     cpu->cfg.vlenb = value >> 3;
2021 }
2022 
2023 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
2024                          void *opaque, Error **errp)
2025 {
2026     uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
2027 
2028     visit_type_uint16(v, name, &value, errp);
2029 }
2030 
2031 static const PropertyInfo prop_vlen = {
2032     .name = "vlen",
2033     .get = prop_vlen_get,
2034     .set = prop_vlen_set,
2035 };
2036 
2037 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
2038                          void *opaque, Error **errp)
2039 {
2040     RISCVCPU *cpu = RISCV_CPU(obj);
2041     uint16_t value;
2042 
2043     if (!visit_type_uint16(v, name, &value, errp)) {
2044         return;
2045     }
2046 
2047     if (!is_power_of_2(value)) {
2048         error_setg(errp, "Vector extension ELEN must be power of 2");
2049         return;
2050     }
2051 
2052     if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
2053         cpu_set_prop_err(cpu, name, errp);
2054         error_append_hint(errp, "Current '%s' val: %u\n",
2055                           name, cpu->cfg.elen);
2056         return;
2057     }
2058 
2059     cpu_option_add_user_setting(name, value);
2060     cpu->cfg.elen = value;
2061 }
2062 
2063 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
2064                          void *opaque, Error **errp)
2065 {
2066     uint16_t value = RISCV_CPU(obj)->cfg.elen;
2067 
2068     visit_type_uint16(v, name, &value, errp);
2069 }
2070 
2071 static const PropertyInfo prop_elen = {
2072     .name = "elen",
2073     .get = prop_elen_get,
2074     .set = prop_elen_set,
2075 };
2076 
2077 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
2078                                   void *opaque, Error **errp)
2079 {
2080     RISCVCPU *cpu = RISCV_CPU(obj);
2081     uint16_t value;
2082 
2083     if (!visit_type_uint16(v, name, &value, errp)) {
2084         return;
2085     }
2086 
2087     if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
2088         cpu_set_prop_err(cpu, name, errp);
2089         error_append_hint(errp, "Current '%s' val: %u\n",
2090                           name, cpu->cfg.cbom_blocksize);
2091         return;
2092     }
2093 
2094     cpu_option_add_user_setting(name, value);
2095     cpu->cfg.cbom_blocksize = value;
2096 }
2097 
2098 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
2099                          void *opaque, Error **errp)
2100 {
2101     uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
2102 
2103     visit_type_uint16(v, name, &value, errp);
2104 }
2105 
2106 static const PropertyInfo prop_cbom_blksize = {
2107     .name = "cbom_blocksize",
2108     .get = prop_cbom_blksize_get,
2109     .set = prop_cbom_blksize_set,
2110 };
2111 
2112 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
2113                                   void *opaque, Error **errp)
2114 {
2115     RISCVCPU *cpu = RISCV_CPU(obj);
2116     uint16_t value;
2117 
2118     if (!visit_type_uint16(v, name, &value, errp)) {
2119         return;
2120     }
2121 
2122     if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
2123         cpu_set_prop_err(cpu, name, errp);
2124         error_append_hint(errp, "Current '%s' val: %u\n",
2125                           name, cpu->cfg.cbop_blocksize);
2126         return;
2127     }
2128 
2129     cpu_option_add_user_setting(name, value);
2130     cpu->cfg.cbop_blocksize = value;
2131 }
2132 
2133 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2134                          void *opaque, Error **errp)
2135 {
2136     uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2137 
2138     visit_type_uint16(v, name, &value, errp);
2139 }
2140 
2141 static const PropertyInfo prop_cbop_blksize = {
2142     .name = "cbop_blocksize",
2143     .get = prop_cbop_blksize_get,
2144     .set = prop_cbop_blksize_set,
2145 };
2146 
2147 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2148                                   void *opaque, Error **errp)
2149 {
2150     RISCVCPU *cpu = RISCV_CPU(obj);
2151     uint16_t value;
2152 
2153     if (!visit_type_uint16(v, name, &value, errp)) {
2154         return;
2155     }
2156 
2157     if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2158         cpu_set_prop_err(cpu, name, errp);
2159         error_append_hint(errp, "Current '%s' val: %u\n",
2160                           name, cpu->cfg.cboz_blocksize);
2161         return;
2162     }
2163 
2164     cpu_option_add_user_setting(name, value);
2165     cpu->cfg.cboz_blocksize = value;
2166 }
2167 
2168 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2169                          void *opaque, Error **errp)
2170 {
2171     uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2172 
2173     visit_type_uint16(v, name, &value, errp);
2174 }
2175 
2176 static const PropertyInfo prop_cboz_blksize = {
2177     .name = "cboz_blocksize",
2178     .get = prop_cboz_blksize_get,
2179     .set = prop_cboz_blksize_set,
2180 };
2181 
2182 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2183                                void *opaque, Error **errp)
2184 {
2185     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2186     RISCVCPU *cpu = RISCV_CPU(obj);
2187     uint32_t prev_val = cpu->cfg.mvendorid;
2188     uint32_t value;
2189 
2190     if (!visit_type_uint32(v, name, &value, errp)) {
2191         return;
2192     }
2193 
2194     if (!dynamic_cpu && prev_val != value) {
2195         error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2196                    object_get_typename(obj), prev_val);
2197         return;
2198     }
2199 
2200     cpu->cfg.mvendorid = value;
2201 }
2202 
2203 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2204                                void *opaque, Error **errp)
2205 {
2206     uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2207 
2208     visit_type_uint32(v, name, &value, errp);
2209 }
2210 
2211 static const PropertyInfo prop_mvendorid = {
2212     .name = "mvendorid",
2213     .get = prop_mvendorid_get,
2214     .set = prop_mvendorid_set,
2215 };
2216 
2217 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2218                             void *opaque, Error **errp)
2219 {
2220     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2221     RISCVCPU *cpu = RISCV_CPU(obj);
2222     uint64_t prev_val = cpu->cfg.mimpid;
2223     uint64_t value;
2224 
2225     if (!visit_type_uint64(v, name, &value, errp)) {
2226         return;
2227     }
2228 
2229     if (!dynamic_cpu && prev_val != value) {
2230         error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2231                    object_get_typename(obj), prev_val);
2232         return;
2233     }
2234 
2235     cpu->cfg.mimpid = value;
2236 }
2237 
2238 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2239                             void *opaque, Error **errp)
2240 {
2241     uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2242 
2243     visit_type_uint64(v, name, &value, errp);
2244 }
2245 
2246 static const PropertyInfo prop_mimpid = {
2247     .name = "mimpid",
2248     .get = prop_mimpid_get,
2249     .set = prop_mimpid_set,
2250 };
2251 
2252 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2253                              void *opaque, Error **errp)
2254 {
2255     bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2256     RISCVCPU *cpu = RISCV_CPU(obj);
2257     uint64_t prev_val = cpu->cfg.marchid;
2258     uint64_t value, invalid_val;
2259     uint32_t mxlen = 0;
2260 
2261     if (!visit_type_uint64(v, name, &value, errp)) {
2262         return;
2263     }
2264 
2265     if (!dynamic_cpu && prev_val != value) {
2266         error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2267                    object_get_typename(obj), prev_val);
2268         return;
2269     }
2270 
2271     switch (riscv_cpu_mxl(&cpu->env)) {
2272     case MXL_RV32:
2273         mxlen = 32;
2274         break;
2275     case MXL_RV64:
2276     case MXL_RV128:
2277         mxlen = 64;
2278         break;
2279     default:
2280         g_assert_not_reached();
2281     }
2282 
2283     invalid_val = 1LL << (mxlen - 1);
2284 
2285     if (value == invalid_val) {
2286         error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2287                          "and the remaining bits zero", mxlen);
2288         return;
2289     }
2290 
2291     cpu->cfg.marchid = value;
2292 }
2293 
2294 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2295                              void *opaque, Error **errp)
2296 {
2297     uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2298 
2299     visit_type_uint64(v, name, &value, errp);
2300 }
2301 
2302 static const PropertyInfo prop_marchid = {
2303     .name = "marchid",
2304     .get = prop_marchid_get,
2305     .set = prop_marchid_set,
2306 };
2307 
2308 /*
2309  * RVA22U64 defines some 'named features' that are cache
2310  * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2311  * and Zicclsm. They are always implemented in TCG and
2312  * doesn't need to be manually enabled by the profile.
2313  */
2314 static RISCVCPUProfile RVA22U64 = {
2315     .parent = NULL,
2316     .name = "rva22u64",
2317     .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2318     .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2319     .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2320     .ext_offsets = {
2321         CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2322         CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2323         CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2324         CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2325         CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2326         CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2327 
2328         /* mandatory named features for this profile */
2329         CPU_CFG_OFFSET(ext_zic64b),
2330 
2331         RISCV_PROFILE_EXT_LIST_END
2332     }
2333 };
2334 
2335 /*
2336  * As with RVA22U64, RVA22S64 also defines 'named features'.
2337  *
2338  * Cache related features that we consider enabled since we don't
2339  * implement cache: Ssccptr
2340  *
2341  * Other named features that we already implement: Sstvecd, Sstvala,
2342  * Sscounterenw
2343  *
2344  * The remaining features/extensions comes from RVA22U64.
2345  */
2346 static RISCVCPUProfile RVA22S64 = {
2347     .parent = &RVA22U64,
2348     .name = "rva22s64",
2349     .misa_ext = RVS,
2350     .priv_spec = PRIV_VERSION_1_12_0,
2351     .satp_mode = VM_1_10_SV39,
2352     .ext_offsets = {
2353         /* rva22s64 exts */
2354         CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2355         CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2356 
2357         RISCV_PROFILE_EXT_LIST_END
2358     }
2359 };
2360 
2361 RISCVCPUProfile *riscv_profiles[] = {
2362     &RVA22U64,
2363     &RVA22S64,
2364     NULL,
2365 };
2366 
2367 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2368     .is_misa = true,
2369     .ext = RVA,
2370     .implied_multi_exts = {
2371         CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2372 
2373         RISCV_IMPLIED_EXTS_RULE_END
2374     },
2375 };
2376 
2377 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2378     .is_misa = true,
2379     .ext = RVD,
2380     .implied_misa_exts = RVF,
2381     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2382 };
2383 
2384 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2385     .is_misa = true,
2386     .ext = RVF,
2387     .implied_multi_exts = {
2388         CPU_CFG_OFFSET(ext_zicsr),
2389 
2390         RISCV_IMPLIED_EXTS_RULE_END
2391     },
2392 };
2393 
2394 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2395     .is_misa = true,
2396     .ext = RVM,
2397     .implied_multi_exts = {
2398         CPU_CFG_OFFSET(ext_zmmul),
2399 
2400         RISCV_IMPLIED_EXTS_RULE_END
2401     },
2402 };
2403 
2404 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2405     .is_misa = true,
2406     .ext = RVV,
2407     .implied_multi_exts = {
2408         CPU_CFG_OFFSET(ext_zve64d),
2409 
2410         RISCV_IMPLIED_EXTS_RULE_END
2411     },
2412 };
2413 
2414 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2415     .ext = CPU_CFG_OFFSET(ext_zcb),
2416     .implied_multi_exts = {
2417         CPU_CFG_OFFSET(ext_zca),
2418 
2419         RISCV_IMPLIED_EXTS_RULE_END
2420     },
2421 };
2422 
2423 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2424     .ext = CPU_CFG_OFFSET(ext_zcd),
2425     .implied_misa_exts = RVD,
2426     .implied_multi_exts = {
2427         CPU_CFG_OFFSET(ext_zca),
2428 
2429         RISCV_IMPLIED_EXTS_RULE_END
2430     },
2431 };
2432 
2433 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2434     .ext = CPU_CFG_OFFSET(ext_zce),
2435     .implied_multi_exts = {
2436         CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2437         CPU_CFG_OFFSET(ext_zcmt),
2438 
2439         RISCV_IMPLIED_EXTS_RULE_END
2440     },
2441 };
2442 
2443 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2444     .ext = CPU_CFG_OFFSET(ext_zcf),
2445     .implied_misa_exts = RVF,
2446     .implied_multi_exts = {
2447         CPU_CFG_OFFSET(ext_zca),
2448 
2449         RISCV_IMPLIED_EXTS_RULE_END
2450     },
2451 };
2452 
2453 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2454     .ext = CPU_CFG_OFFSET(ext_zcmp),
2455     .implied_multi_exts = {
2456         CPU_CFG_OFFSET(ext_zca),
2457 
2458         RISCV_IMPLIED_EXTS_RULE_END
2459     },
2460 };
2461 
2462 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2463     .ext = CPU_CFG_OFFSET(ext_zcmt),
2464     .implied_multi_exts = {
2465         CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2466 
2467         RISCV_IMPLIED_EXTS_RULE_END
2468     },
2469 };
2470 
2471 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2472     .ext = CPU_CFG_OFFSET(ext_zdinx),
2473     .implied_multi_exts = {
2474         CPU_CFG_OFFSET(ext_zfinx),
2475 
2476         RISCV_IMPLIED_EXTS_RULE_END
2477     },
2478 };
2479 
2480 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2481     .ext = CPU_CFG_OFFSET(ext_zfa),
2482     .implied_misa_exts = RVF,
2483     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2484 };
2485 
2486 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2487     .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2488     .implied_misa_exts = RVF,
2489     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2490 };
2491 
2492 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2493     .ext = CPU_CFG_OFFSET(ext_zfh),
2494     .implied_multi_exts = {
2495         CPU_CFG_OFFSET(ext_zfhmin),
2496 
2497         RISCV_IMPLIED_EXTS_RULE_END
2498     },
2499 };
2500 
2501 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2502     .ext = CPU_CFG_OFFSET(ext_zfhmin),
2503     .implied_misa_exts = RVF,
2504     .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2505 };
2506 
2507 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2508     .ext = CPU_CFG_OFFSET(ext_zfinx),
2509     .implied_multi_exts = {
2510         CPU_CFG_OFFSET(ext_zicsr),
2511 
2512         RISCV_IMPLIED_EXTS_RULE_END
2513     },
2514 };
2515 
2516 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2517     .ext = CPU_CFG_OFFSET(ext_zhinx),
2518     .implied_multi_exts = {
2519         CPU_CFG_OFFSET(ext_zhinxmin),
2520 
2521         RISCV_IMPLIED_EXTS_RULE_END
2522     },
2523 };
2524 
2525 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2526     .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2527     .implied_multi_exts = {
2528         CPU_CFG_OFFSET(ext_zfinx),
2529 
2530         RISCV_IMPLIED_EXTS_RULE_END
2531     },
2532 };
2533 
2534 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2535     .ext = CPU_CFG_OFFSET(ext_zicntr),
2536     .implied_multi_exts = {
2537         CPU_CFG_OFFSET(ext_zicsr),
2538 
2539         RISCV_IMPLIED_EXTS_RULE_END
2540     },
2541 };
2542 
2543 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2544     .ext = CPU_CFG_OFFSET(ext_zihpm),
2545     .implied_multi_exts = {
2546         CPU_CFG_OFFSET(ext_zicsr),
2547 
2548         RISCV_IMPLIED_EXTS_RULE_END
2549     },
2550 };
2551 
2552 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2553     .ext = CPU_CFG_OFFSET(ext_zk),
2554     .implied_multi_exts = {
2555         CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2556         CPU_CFG_OFFSET(ext_zkt),
2557 
2558         RISCV_IMPLIED_EXTS_RULE_END
2559     },
2560 };
2561 
2562 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2563     .ext = CPU_CFG_OFFSET(ext_zkn),
2564     .implied_multi_exts = {
2565         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2566         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2567         CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2568 
2569         RISCV_IMPLIED_EXTS_RULE_END
2570     },
2571 };
2572 
2573 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2574     .ext = CPU_CFG_OFFSET(ext_zks),
2575     .implied_multi_exts = {
2576         CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2577         CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2578         CPU_CFG_OFFSET(ext_zksh),
2579 
2580         RISCV_IMPLIED_EXTS_RULE_END
2581     },
2582 };
2583 
2584 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2585     .ext = CPU_CFG_OFFSET(ext_zvbb),
2586     .implied_multi_exts = {
2587         CPU_CFG_OFFSET(ext_zvkb),
2588 
2589         RISCV_IMPLIED_EXTS_RULE_END
2590     },
2591 };
2592 
2593 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2594     .ext = CPU_CFG_OFFSET(ext_zve32f),
2595     .implied_misa_exts = RVF,
2596     .implied_multi_exts = {
2597         CPU_CFG_OFFSET(ext_zve32x),
2598 
2599         RISCV_IMPLIED_EXTS_RULE_END
2600     },
2601 };
2602 
2603 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2604     .ext = CPU_CFG_OFFSET(ext_zve32x),
2605     .implied_multi_exts = {
2606         CPU_CFG_OFFSET(ext_zicsr),
2607 
2608         RISCV_IMPLIED_EXTS_RULE_END
2609     },
2610 };
2611 
2612 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2613     .ext = CPU_CFG_OFFSET(ext_zve64d),
2614     .implied_misa_exts = RVD,
2615     .implied_multi_exts = {
2616         CPU_CFG_OFFSET(ext_zve64f),
2617 
2618         RISCV_IMPLIED_EXTS_RULE_END
2619     },
2620 };
2621 
2622 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2623     .ext = CPU_CFG_OFFSET(ext_zve64f),
2624     .implied_misa_exts = RVF,
2625     .implied_multi_exts = {
2626         CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2627 
2628         RISCV_IMPLIED_EXTS_RULE_END
2629     },
2630 };
2631 
2632 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2633     .ext = CPU_CFG_OFFSET(ext_zve64x),
2634     .implied_multi_exts = {
2635         CPU_CFG_OFFSET(ext_zve32x),
2636 
2637         RISCV_IMPLIED_EXTS_RULE_END
2638     },
2639 };
2640 
2641 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2642     .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2643     .implied_multi_exts = {
2644         CPU_CFG_OFFSET(ext_zve32f),
2645 
2646         RISCV_IMPLIED_EXTS_RULE_END
2647     },
2648 };
2649 
2650 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2651     .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2652     .implied_multi_exts = {
2653         CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2654 
2655         RISCV_IMPLIED_EXTS_RULE_END
2656     },
2657 };
2658 
2659 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2660     .ext = CPU_CFG_OFFSET(ext_zvfh),
2661     .implied_multi_exts = {
2662         CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2663 
2664         RISCV_IMPLIED_EXTS_RULE_END
2665     },
2666 };
2667 
2668 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2669     .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2670     .implied_multi_exts = {
2671         CPU_CFG_OFFSET(ext_zve32f),
2672 
2673         RISCV_IMPLIED_EXTS_RULE_END
2674     },
2675 };
2676 
2677 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2678     .ext = CPU_CFG_OFFSET(ext_zvkn),
2679     .implied_multi_exts = {
2680         CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2681         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2682 
2683         RISCV_IMPLIED_EXTS_RULE_END
2684     },
2685 };
2686 
2687 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2688     .ext = CPU_CFG_OFFSET(ext_zvknc),
2689     .implied_multi_exts = {
2690         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2691 
2692         RISCV_IMPLIED_EXTS_RULE_END
2693     },
2694 };
2695 
2696 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2697     .ext = CPU_CFG_OFFSET(ext_zvkng),
2698     .implied_multi_exts = {
2699         CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2700 
2701         RISCV_IMPLIED_EXTS_RULE_END
2702     },
2703 };
2704 
2705 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2706     .ext = CPU_CFG_OFFSET(ext_zvknhb),
2707     .implied_multi_exts = {
2708         CPU_CFG_OFFSET(ext_zve64x),
2709 
2710         RISCV_IMPLIED_EXTS_RULE_END
2711     },
2712 };
2713 
2714 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2715     .ext = CPU_CFG_OFFSET(ext_zvks),
2716     .implied_multi_exts = {
2717         CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2718         CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2719 
2720         RISCV_IMPLIED_EXTS_RULE_END
2721     },
2722 };
2723 
2724 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2725     .ext = CPU_CFG_OFFSET(ext_zvksc),
2726     .implied_multi_exts = {
2727         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2728 
2729         RISCV_IMPLIED_EXTS_RULE_END
2730     },
2731 };
2732 
2733 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2734     .ext = CPU_CFG_OFFSET(ext_zvksg),
2735     .implied_multi_exts = {
2736         CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2737 
2738         RISCV_IMPLIED_EXTS_RULE_END
2739     },
2740 };
2741 
2742 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2743     &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2744     &RVM_IMPLIED, &RVV_IMPLIED, NULL
2745 };
2746 
2747 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2748     &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2749     &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2750     &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2751     &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2752     &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2753     &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2754     &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2755     &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2756     &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2757     &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2758     &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2759     &ZVKS_IMPLIED,  &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2760     NULL
2761 };
2762 
2763 static const Property riscv_cpu_properties[] = {
2764     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2765 
2766     {.name = "pmu-mask", .info = &prop_pmu_mask},
2767     {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2768 
2769     {.name = "mmu", .info = &prop_mmu},
2770     {.name = "pmp", .info = &prop_pmp},
2771 
2772     {.name = "priv_spec", .info = &prop_priv_spec},
2773     {.name = "vext_spec", .info = &prop_vext_spec},
2774 
2775     {.name = "vlen", .info = &prop_vlen},
2776     {.name = "elen", .info = &prop_elen},
2777 
2778     {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2779     {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2780     {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2781 
2782      {.name = "mvendorid", .info = &prop_mvendorid},
2783      {.name = "mimpid", .info = &prop_mimpid},
2784      {.name = "marchid", .info = &prop_marchid},
2785 
2786 #ifndef CONFIG_USER_ONLY
2787     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2788 #endif
2789 
2790     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2791 
2792     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2793     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2794     DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false),
2795 
2796     /*
2797      * write_misa() is marked as experimental for now so mark
2798      * it with -x and default to 'false'.
2799      */
2800     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2801 };
2802 
2803 #if defined(TARGET_RISCV64)
2804 static void rva22u64_profile_cpu_init(Object *obj)
2805 {
2806     rv64i_bare_cpu_init(obj);
2807 
2808     RVA22U64.enabled = true;
2809 }
2810 
2811 static void rva22s64_profile_cpu_init(Object *obj)
2812 {
2813     rv64i_bare_cpu_init(obj);
2814 
2815     RVA22S64.enabled = true;
2816 }
2817 #endif
2818 
2819 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2820 {
2821     RISCVCPU *cpu = RISCV_CPU(cs);
2822     CPURISCVState *env = &cpu->env;
2823 
2824     switch (riscv_cpu_mxl(env)) {
2825     case MXL_RV32:
2826         return "riscv:rv32";
2827     case MXL_RV64:
2828     case MXL_RV128:
2829         return "riscv:rv64";
2830     default:
2831         g_assert_not_reached();
2832     }
2833 }
2834 
2835 #ifndef CONFIG_USER_ONLY
2836 static int64_t riscv_get_arch_id(CPUState *cs)
2837 {
2838     RISCVCPU *cpu = RISCV_CPU(cs);
2839 
2840     return cpu->env.mhartid;
2841 }
2842 
2843 #include "hw/core/sysemu-cpu-ops.h"
2844 
2845 static const struct SysemuCPUOps riscv_sysemu_ops = {
2846     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2847     .write_elf64_note = riscv_cpu_write_elf64_note,
2848     .write_elf32_note = riscv_cpu_write_elf32_note,
2849     .legacy_vmsd = &vmstate_riscv_cpu,
2850 };
2851 #endif
2852 
2853 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2854 {
2855     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2856     CPUClass *cc = CPU_CLASS(c);
2857     DeviceClass *dc = DEVICE_CLASS(c);
2858     ResettableClass *rc = RESETTABLE_CLASS(c);
2859 
2860     device_class_set_parent_realize(dc, riscv_cpu_realize,
2861                                     &mcc->parent_realize);
2862 
2863     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2864                                        &mcc->parent_phases);
2865 
2866     cc->class_by_name = riscv_cpu_class_by_name;
2867     cc->has_work = riscv_cpu_has_work;
2868     cc->mmu_index = riscv_cpu_mmu_index;
2869     cc->dump_state = riscv_cpu_dump_state;
2870     cc->set_pc = riscv_cpu_set_pc;
2871     cc->get_pc = riscv_cpu_get_pc;
2872     cc->gdb_read_register = riscv_cpu_gdb_read_register;
2873     cc->gdb_write_register = riscv_cpu_gdb_write_register;
2874     cc->gdb_stop_before_watchpoint = true;
2875     cc->disas_set_info = riscv_cpu_disas_set_info;
2876 #ifndef CONFIG_USER_ONLY
2877     cc->sysemu_ops = &riscv_sysemu_ops;
2878     cc->get_arch_id = riscv_get_arch_id;
2879 #endif
2880     cc->gdb_arch_name = riscv_gdb_arch_name;
2881 
2882     device_class_set_props(dc, riscv_cpu_properties);
2883 }
2884 
2885 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2886 {
2887     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2888 
2889     mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2890     riscv_cpu_validate_misa_mxl(mcc);
2891 }
2892 
2893 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2894                                  int max_str_len)
2895 {
2896     const RISCVIsaExtData *edata;
2897     char *old = *isa_str;
2898     char *new = *isa_str;
2899 
2900     for (edata = isa_edata_arr; edata && edata->name; edata++) {
2901         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2902             new = g_strconcat(old, "_", edata->name, NULL);
2903             g_free(old);
2904             old = new;
2905         }
2906     }
2907 
2908     *isa_str = new;
2909 }
2910 
2911 char *riscv_isa_string(RISCVCPU *cpu)
2912 {
2913     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2914     int i;
2915     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2916     char *isa_str = g_new(char, maxlen);
2917     int xlen = riscv_cpu_max_xlen(mcc);
2918     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2919 
2920     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2921         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2922             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2923         }
2924     }
2925     *p = '\0';
2926     if (!cpu->cfg.short_isa_string) {
2927         riscv_isa_string_ext(cpu, &isa_str, maxlen);
2928     }
2929     return isa_str;
2930 }
2931 
2932 #ifndef CONFIG_USER_ONLY
2933 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2934 {
2935     int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2936     char **extensions = g_new(char *, maxlen);
2937 
2938     for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2939         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2940             extensions[*count] = g_new(char, 2);
2941             snprintf(extensions[*count], 2, "%c",
2942                      qemu_tolower(riscv_single_letter_exts[i]));
2943             (*count)++;
2944         }
2945     }
2946 
2947     for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2948         if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2949             extensions[*count] = g_strdup(edata->name);
2950             (*count)++;
2951         }
2952     }
2953 
2954     return extensions;
2955 }
2956 
2957 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2958 {
2959     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2960     const size_t maxlen = sizeof("rv128i");
2961     g_autofree char *isa_base = g_new(char, maxlen);
2962     g_autofree char *riscv_isa;
2963     char **isa_extensions;
2964     int count = 0;
2965     int xlen = riscv_cpu_max_xlen(mcc);
2966 
2967     riscv_isa = riscv_isa_string(cpu);
2968     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2969 
2970     snprintf(isa_base, maxlen, "rv%di", xlen);
2971     qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2972 
2973     isa_extensions = riscv_isa_extensions_list(cpu, &count);
2974     qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2975                                   isa_extensions, count);
2976 
2977     for (int i = 0; i < count; i++) {
2978         g_free(isa_extensions[i]);
2979     }
2980 
2981     g_free(isa_extensions);
2982 }
2983 #endif
2984 
2985 #define DEFINE_CPU(type_name, misa_mxl_max, initfn)         \
2986     {                                                       \
2987         .name = (type_name),                                \
2988         .parent = TYPE_RISCV_CPU,                           \
2989         .instance_init = (initfn),                          \
2990         .class_init = riscv_cpu_class_init,                 \
2991         .class_data = (void *)(misa_mxl_max)                \
2992     }
2993 
2994 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2995     {                                                       \
2996         .name = (type_name),                                \
2997         .parent = TYPE_RISCV_DYNAMIC_CPU,                   \
2998         .instance_init = (initfn),                          \
2999         .class_init = riscv_cpu_class_init,                 \
3000         .class_data = (void *)(misa_mxl_max)                \
3001     }
3002 
3003 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn)  \
3004     {                                                       \
3005         .name = (type_name),                                \
3006         .parent = TYPE_RISCV_VENDOR_CPU,                    \
3007         .instance_init = (initfn),                          \
3008         .class_init = riscv_cpu_class_init,                 \
3009         .class_data = (void *)(misa_mxl_max)                \
3010     }
3011 
3012 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn)    \
3013     {                                                       \
3014         .name = (type_name),                                \
3015         .parent = TYPE_RISCV_BARE_CPU,                      \
3016         .instance_init = (initfn),                          \
3017         .class_init = riscv_cpu_class_init,                 \
3018         .class_data = (void *)(misa_mxl_max)                \
3019     }
3020 
3021 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
3022     {                                                       \
3023         .name = (type_name),                                \
3024         .parent = TYPE_RISCV_BARE_CPU,                      \
3025         .instance_init = (initfn),                          \
3026         .class_init = riscv_cpu_class_init,                 \
3027         .class_data = (void *)(misa_mxl_max)                \
3028     }
3029 
3030 static const TypeInfo riscv_cpu_type_infos[] = {
3031     {
3032         .name = TYPE_RISCV_CPU,
3033         .parent = TYPE_CPU,
3034         .instance_size = sizeof(RISCVCPU),
3035         .instance_align = __alignof(RISCVCPU),
3036         .instance_init = riscv_cpu_init,
3037         .instance_post_init = riscv_cpu_post_init,
3038         .abstract = true,
3039         .class_size = sizeof(RISCVCPUClass),
3040         .class_init = riscv_cpu_common_class_init,
3041     },
3042     {
3043         .name = TYPE_RISCV_DYNAMIC_CPU,
3044         .parent = TYPE_RISCV_CPU,
3045         .abstract = true,
3046     },
3047     {
3048         .name = TYPE_RISCV_VENDOR_CPU,
3049         .parent = TYPE_RISCV_CPU,
3050         .abstract = true,
3051     },
3052     {
3053         .name = TYPE_RISCV_BARE_CPU,
3054         .parent = TYPE_RISCV_CPU,
3055         .instance_init = riscv_bare_cpu_init,
3056         .abstract = true,
3057     },
3058 #if defined(TARGET_RISCV32)
3059     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV32,  riscv_max_cpu_init),
3060 #elif defined(TARGET_RISCV64)
3061     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX,       MXL_RV64,  riscv_max_cpu_init),
3062 #endif
3063 
3064 #if defined(TARGET_RISCV32) || \
3065     (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
3066     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32,    MXL_RV32,  rv32_base_cpu_init),
3067     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX,       MXL_RV32,  rv32_ibex_cpu_init),
3068     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32,  rv32_sifive_e_cpu_init),
3069     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32,  rv32_imafcu_nommu_cpu_init),
3070     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32,  rv32_sifive_u_cpu_init),
3071     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I,        MXL_RV32,  rv32i_bare_cpu_init),
3072     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E,        MXL_RV32,  rv32e_bare_cpu_init),
3073 #endif
3074 
3075 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
3076     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32,     MXL_RV32,  riscv_max_cpu_init),
3077 #endif
3078 
3079 #if defined(TARGET_RISCV64)
3080     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64,    MXL_RV64,  rv64_base_cpu_init),
3081     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64,  rv64_sifive_e_cpu_init),
3082     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64,  rv64_sifive_u_cpu_init),
3083     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C,   MXL_RV64,  rv64_sifive_u_cpu_init),
3084     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64,  rv64_thead_c906_cpu_init),
3085     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64,  rv64_tt_ascalon_cpu_init),
3086     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1,  MXL_RV64,  rv64_veyron_v1_cpu_init),
3087     DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU,
3088                                                  MXL_RV64, rv64_xiangshan_nanhu_cpu_init),
3089 #ifdef CONFIG_TCG
3090     DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128,   MXL_RV128, rv128_base_cpu_init),
3091 #endif /* CONFIG_TCG */
3092     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I,        MXL_RV64,  rv64i_bare_cpu_init),
3093     DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E,        MXL_RV64,  rv64e_bare_cpu_init),
3094     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64,  MXL_RV64,  rva22u64_profile_cpu_init),
3095     DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64,  MXL_RV64,  rva22s64_profile_cpu_init),
3096 #endif /* TARGET_RISCV64 */
3097 };
3098 
3099 DEFINE_TYPES(riscv_cpu_type_infos)
3100