1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "qapi/error.h"
28 #include "qapi/visitor.h"
29 #include "qemu/error-report.h"
30 #include "hw/qdev-properties.h"
31 #include "hw/core/qdev-prop-internal.h"
32 #include "migration/vmstate.h"
33 #include "fpu/softfloat-helpers.h"
34 #include "system/device_tree.h"
35 #include "system/kvm.h"
36 #include "system/tcg.h"
37 #include "kvm/kvm_riscv.h"
38 #include "tcg/tcg-cpu.h"
39 #include "tcg/tcg.h"
40
41 /* RISC-V CPU definitions */
42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
44 RVC, RVS, RVU, RVH, RVG, RVB, 0};
45
46 /*
47 * From vector_helper.c
48 * Note that vector data is stored in host-endian 64-bit chunks,
49 * so addressing bytes needs a host-endian fixup.
50 */
51 #if HOST_BIG_ENDIAN
52 #define BYTE(x) ((x) ^ 7)
53 #else
54 #define BYTE(x) (x)
55 #endif
56
riscv_cpu_is_32bit(RISCVCPU * cpu)57 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
58 {
59 return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
60 }
61
62 /* Hash that stores general user set numeric options */
63 static GHashTable *general_user_opts;
64
cpu_option_add_user_setting(const char * optname,uint32_t value)65 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
66 {
67 g_hash_table_insert(general_user_opts, (gpointer)optname,
68 GUINT_TO_POINTER(value));
69 }
70
riscv_cpu_option_set(const char * optname)71 bool riscv_cpu_option_set(const char *optname)
72 {
73 return g_hash_table_contains(general_user_opts, optname);
74 }
75
riscv_cpu_cfg_merge(RISCVCPUConfig * dest,const RISCVCPUConfig * src)76 static void riscv_cpu_cfg_merge(RISCVCPUConfig *dest, const RISCVCPUConfig *src)
77 {
78 #define BOOL_FIELD(x) dest->x |= src->x;
79 #define TYPED_FIELD(type, x, default_) if (src->x != default_) dest->x = src->x;
80 #include "cpu_cfg_fields.h.inc"
81 }
82
83 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
84 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
85
86 /*
87 * Here are the ordering rules of extension naming defined by RISC-V
88 * specification :
89 * 1. All extensions should be separated from other multi-letter extensions
90 * by an underscore.
91 * 2. The first letter following the 'Z' conventionally indicates the most
92 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
93 * If multiple 'Z' extensions are named, they should be ordered first
94 * by category, then alphabetically within a category.
95 * 3. Standard supervisor-level extensions (starts with 'S') should be
96 * listed after standard unprivileged extensions. If multiple
97 * supervisor-level extensions are listed, they should be ordered
98 * alphabetically.
99 * 4. Non-standard extensions (starts with 'X') must be listed after all
100 * standard extensions. They must be separated from other multi-letter
101 * extensions by an underscore.
102 *
103 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
104 * instead.
105 */
106 const RISCVIsaExtData isa_edata_arr[] = {
107 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
108 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
109 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
110 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
111 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
112 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
113 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
114 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse),
115 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
116 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
117 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
118 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
119 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
120 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
121 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
122 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
123 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
124 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
125 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
126 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12),
127 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
128 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
129 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
130 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
131 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
132 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
133 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
134 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
135 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
136 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
137 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
138 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
139 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
140 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
141 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
142 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
143 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
144 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
145 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
146 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
147 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
148 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
149 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
150 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
151 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
152 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
153 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
154 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
155 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
156 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
157 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
158 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
159 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
160 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
161 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
162 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
163 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
164 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
165 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
166 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
167 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
168 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
169 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
170 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
171 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
172 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
173 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
174 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
175 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
176 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
177 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
178 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
179 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
180 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
181 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
182 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
183 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
184 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
185 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
186 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
187 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
188 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
189 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
190 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
191 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
192 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
193 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha),
194 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
195 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
196 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
197 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
198 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
199 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
200 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg),
201 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
202 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind),
203 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp),
204 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
205 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi),
206 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
207 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm),
208 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
209 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
210 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg),
211 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
212 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
213 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
214 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind),
215 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp),
216 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
217 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm),
218 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
219 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
220 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
221 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
222 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12),
223 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
224 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
225 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr),
226 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr),
227 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
228 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
229 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
230 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
231 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte),
232 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc),
233 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
234 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
235 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
236 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
237 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
238 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
239 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
240 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
241 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
242 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
243 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
244 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
245
246 { },
247 };
248
isa_ext_is_enabled(RISCVCPU * cpu,uint32_t ext_offset)249 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
250 {
251 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
252
253 return *ext_enabled;
254 }
255
isa_ext_update_enabled(RISCVCPU * cpu,uint32_t ext_offset,bool en)256 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
257 {
258 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
259
260 *ext_enabled = en;
261 }
262
riscv_cpu_is_vendor(Object * cpu_obj)263 bool riscv_cpu_is_vendor(Object *cpu_obj)
264 {
265 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
266 }
267
268 const char * const riscv_int_regnames[] = {
269 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
270 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
271 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
272 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
273 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
274 };
275
276 const char * const riscv_int_regnamesh[] = {
277 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
278 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
279 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
280 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
281 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
282 "x30h/t5h", "x31h/t6h"
283 };
284
285 const char * const riscv_fpr_regnames[] = {
286 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
287 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
288 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
289 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
290 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
291 "f30/ft10", "f31/ft11"
292 };
293
294 const char * const riscv_rvv_regnames[] = {
295 "v0", "v1", "v2", "v3", "v4", "v5", "v6",
296 "v7", "v8", "v9", "v10", "v11", "v12", "v13",
297 "v14", "v15", "v16", "v17", "v18", "v19", "v20",
298 "v21", "v22", "v23", "v24", "v25", "v26", "v27",
299 "v28", "v29", "v30", "v31"
300 };
301
302 static const char * const riscv_excp_names[] = {
303 "misaligned_fetch",
304 "fault_fetch",
305 "illegal_instruction",
306 "breakpoint",
307 "misaligned_load",
308 "fault_load",
309 "misaligned_store",
310 "fault_store",
311 "user_ecall",
312 "supervisor_ecall",
313 "hypervisor_ecall",
314 "machine_ecall",
315 "exec_page_fault",
316 "load_page_fault",
317 "reserved",
318 "store_page_fault",
319 "double_trap",
320 "reserved",
321 "reserved",
322 "reserved",
323 "guest_exec_page_fault",
324 "guest_load_page_fault",
325 "reserved",
326 "guest_store_page_fault",
327 };
328
329 static const char * const riscv_intr_names[] = {
330 "u_software",
331 "s_software",
332 "vs_software",
333 "m_software",
334 "u_timer",
335 "s_timer",
336 "vs_timer",
337 "m_timer",
338 "u_external",
339 "s_external",
340 "vs_external",
341 "m_external",
342 "reserved",
343 "reserved",
344 "reserved",
345 "reserved"
346 };
347
riscv_cpu_get_trap_name(target_ulong cause,bool async)348 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
349 {
350 if (async) {
351 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
352 riscv_intr_names[cause] : "(unknown)";
353 } else {
354 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
355 riscv_excp_names[cause] : "(unknown)";
356 }
357 }
358
riscv_cpu_set_misa_ext(CPURISCVState * env,uint32_t ext)359 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
360 {
361 env->misa_ext_mask = env->misa_ext = ext;
362 }
363
riscv_cpu_max_xlen(RISCVCPUClass * mcc)364 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
365 {
366 return 16 << mcc->def->misa_mxl_max;
367 }
368
369 #ifndef CONFIG_USER_ONLY
satp_mode_from_str(const char * satp_mode_str)370 static uint8_t satp_mode_from_str(const char *satp_mode_str)
371 {
372 if (!strncmp(satp_mode_str, "mbare", 5)) {
373 return VM_1_10_MBARE;
374 }
375
376 if (!strncmp(satp_mode_str, "sv32", 4)) {
377 return VM_1_10_SV32;
378 }
379
380 if (!strncmp(satp_mode_str, "sv39", 4)) {
381 return VM_1_10_SV39;
382 }
383
384 if (!strncmp(satp_mode_str, "sv48", 4)) {
385 return VM_1_10_SV48;
386 }
387
388 if (!strncmp(satp_mode_str, "sv57", 4)) {
389 return VM_1_10_SV57;
390 }
391
392 if (!strncmp(satp_mode_str, "sv64", 4)) {
393 return VM_1_10_SV64;
394 }
395
396 g_assert_not_reached();
397 }
398
satp_mode_max_from_map(uint32_t map)399 static uint8_t satp_mode_max_from_map(uint32_t map)
400 {
401 /*
402 * 'map = 0' will make us return (31 - 32), which C will
403 * happily overflow to UINT_MAX. There's no good result to
404 * return if 'map = 0' (e.g. returning 0 will be ambiguous
405 * with the result for 'map = 1').
406 *
407 * Assert out if map = 0. Callers will have to deal with
408 * it outside of this function.
409 */
410 g_assert(map > 0);
411
412 /* map here has at least one bit set, so no problem with clz */
413 return 31 - __builtin_clz(map);
414 }
415
satp_mode_str(uint8_t satp_mode,bool is_32_bit)416 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
417 {
418 if (is_32_bit) {
419 switch (satp_mode) {
420 case VM_1_10_SV32:
421 return "sv32";
422 case VM_1_10_MBARE:
423 return "none";
424 }
425 } else {
426 switch (satp_mode) {
427 case VM_1_10_SV64:
428 return "sv64";
429 case VM_1_10_SV57:
430 return "sv57";
431 case VM_1_10_SV48:
432 return "sv48";
433 case VM_1_10_SV39:
434 return "sv39";
435 case VM_1_10_MBARE:
436 return "none";
437 }
438 }
439
440 g_assert_not_reached();
441 }
442
get_satp_mode_supported(RISCVCPU * cpu,uint16_t * supported)443 static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported)
444 {
445 bool rv32 = riscv_cpu_is_32bit(cpu);
446 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
447 int satp_mode = cpu->cfg.max_satp_mode;
448
449 if (satp_mode == -1) {
450 return false;
451 }
452
453 *supported = 0;
454 for (int i = 0; i <= satp_mode; ++i) {
455 if (valid_vm[i]) {
456 *supported |= (1 << i);
457 }
458 }
459 return true;
460 }
461
462 /* Set the satp mode to the max supported */
set_satp_mode_default_map(RISCVCPU * cpu)463 static void set_satp_mode_default_map(RISCVCPU *cpu)
464 {
465 /*
466 * Bare CPUs do not default to the max available.
467 * Users must set a valid satp_mode in the command
468 * line. Otherwise, leave the existing max_satp_mode
469 * in place.
470 */
471 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
472 warn_report("No satp mode set. Defaulting to 'bare'");
473 cpu->cfg.max_satp_mode = VM_1_10_MBARE;
474 }
475 }
476 #endif
477
478 #ifndef CONFIG_USER_ONLY
riscv_register_custom_csrs(RISCVCPU * cpu,const RISCVCSR * csr_list)479 static void riscv_register_custom_csrs(RISCVCPU *cpu, const RISCVCSR *csr_list)
480 {
481 for (size_t i = 0; csr_list[i].csr_ops.name; i++) {
482 int csrno = csr_list[i].csrno;
483 const riscv_csr_operations *csr_ops = &csr_list[i].csr_ops;
484 if (!csr_list[i].insertion_test || csr_list[i].insertion_test(cpu)) {
485 riscv_set_csr_ops(csrno, csr_ops);
486 }
487 }
488 }
489 #endif
490
riscv_cpu_class_by_name(const char * cpu_model)491 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
492 {
493 ObjectClass *oc;
494 char *typename;
495 char **cpuname;
496
497 cpuname = g_strsplit(cpu_model, ",", 1);
498 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
499 oc = object_class_by_name(typename);
500 g_strfreev(cpuname);
501 g_free(typename);
502
503 return oc;
504 }
505
riscv_cpu_get_name(RISCVCPU * cpu)506 char *riscv_cpu_get_name(RISCVCPU *cpu)
507 {
508 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
509 const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
510
511 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
512
513 return cpu_model_from_type(typename);
514 }
515
riscv_cpu_dump_state(CPUState * cs,FILE * f,int flags)516 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
517 {
518 RISCVCPU *cpu = RISCV_CPU(cs);
519 CPURISCVState *env = &cpu->env;
520 int i, j;
521 uint8_t *p;
522
523 #if !defined(CONFIG_USER_ONLY)
524 if (riscv_has_ext(env, RVH)) {
525 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled);
526 }
527 #endif
528 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
529 #ifndef CONFIG_USER_ONLY
530 {
531 static const int dump_csrs[] = {
532 CSR_MHARTID,
533 CSR_MSTATUS,
534 CSR_MSTATUSH,
535 /*
536 * CSR_SSTATUS is intentionally omitted here as its value
537 * can be figured out by looking at CSR_MSTATUS
538 */
539 CSR_HSTATUS,
540 CSR_VSSTATUS,
541 CSR_MIP,
542 CSR_MIE,
543 CSR_MIDELEG,
544 CSR_HIDELEG,
545 CSR_MEDELEG,
546 CSR_HEDELEG,
547 CSR_MTVEC,
548 CSR_STVEC,
549 CSR_VSTVEC,
550 CSR_MEPC,
551 CSR_SEPC,
552 CSR_VSEPC,
553 CSR_MCAUSE,
554 CSR_SCAUSE,
555 CSR_VSCAUSE,
556 CSR_MTVAL,
557 CSR_STVAL,
558 CSR_HTVAL,
559 CSR_MTVAL2,
560 CSR_MSCRATCH,
561 CSR_SSCRATCH,
562 CSR_SATP,
563 };
564
565 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
566 int csrno = dump_csrs[i];
567 target_ulong val = 0;
568 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
569
570 /*
571 * Rely on the smode, hmode, etc, predicates within csr.c
572 * to do the filtering of the registers that are present.
573 */
574 if (res == RISCV_EXCP_NONE) {
575 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
576 csr_ops[csrno].name, val);
577 }
578 }
579 }
580 #endif
581
582 for (i = 0; i < 32; i++) {
583 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
584 riscv_int_regnames[i], env->gpr[i]);
585 if ((i & 3) == 3) {
586 qemu_fprintf(f, "\n");
587 }
588 }
589 if (flags & CPU_DUMP_FPU) {
590 target_ulong val = 0;
591 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0);
592 if (res == RISCV_EXCP_NONE) {
593 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
594 csr_ops[CSR_FCSR].name, val);
595 }
596 for (i = 0; i < 32; i++) {
597 qemu_fprintf(f, " %-8s %016" PRIx64,
598 riscv_fpr_regnames[i], env->fpr[i]);
599 if ((i & 3) == 3) {
600 qemu_fprintf(f, "\n");
601 }
602 }
603 }
604 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
605 static const int dump_rvv_csrs[] = {
606 CSR_VSTART,
607 CSR_VXSAT,
608 CSR_VXRM,
609 CSR_VCSR,
610 CSR_VL,
611 CSR_VTYPE,
612 CSR_VLENB,
613 };
614 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
615 int csrno = dump_rvv_csrs[i];
616 target_ulong val = 0;
617 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
618
619 /*
620 * Rely on the smode, hmode, etc, predicates within csr.c
621 * to do the filtering of the registers that are present.
622 */
623 if (res == RISCV_EXCP_NONE) {
624 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
625 csr_ops[csrno].name, val);
626 }
627 }
628 uint16_t vlenb = cpu->cfg.vlenb;
629
630 for (i = 0; i < 32; i++) {
631 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
632 p = (uint8_t *)env->vreg;
633 for (j = vlenb - 1 ; j >= 0; j--) {
634 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
635 }
636 qemu_fprintf(f, "\n");
637 }
638 }
639 }
640
riscv_cpu_set_pc(CPUState * cs,vaddr value)641 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
642 {
643 RISCVCPU *cpu = RISCV_CPU(cs);
644 CPURISCVState *env = &cpu->env;
645
646 if (env->xl == MXL_RV32) {
647 env->pc = (int32_t)value;
648 } else {
649 env->pc = value;
650 }
651 }
652
riscv_cpu_get_pc(CPUState * cs)653 static vaddr riscv_cpu_get_pc(CPUState *cs)
654 {
655 RISCVCPU *cpu = RISCV_CPU(cs);
656 CPURISCVState *env = &cpu->env;
657
658 /* Match cpu_get_tb_cpu_state. */
659 if (env->xl == MXL_RV32) {
660 return env->pc & UINT32_MAX;
661 }
662 return env->pc;
663 }
664
665 #ifndef CONFIG_USER_ONLY
riscv_cpu_has_work(CPUState * cs)666 bool riscv_cpu_has_work(CPUState *cs)
667 {
668 RISCVCPU *cpu = RISCV_CPU(cs);
669 CPURISCVState *env = &cpu->env;
670 /*
671 * Definition of the WFI instruction requires it to ignore the privilege
672 * mode and delegation registers, but respect individual enables
673 */
674 return riscv_cpu_all_pending(env) != 0 ||
675 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
676 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
677 }
678 #endif /* !CONFIG_USER_ONLY */
679
riscv_cpu_reset_hold(Object * obj,ResetType type)680 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
681 {
682 #ifndef CONFIG_USER_ONLY
683 uint8_t iprio;
684 int i, irq, rdzero;
685 #endif
686 CPUState *cs = CPU(obj);
687 RISCVCPU *cpu = RISCV_CPU(cs);
688 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
689 CPURISCVState *env = &cpu->env;
690
691 if (mcc->parent_phases.hold) {
692 mcc->parent_phases.hold(obj, type);
693 }
694 #ifndef CONFIG_USER_ONLY
695 env->misa_mxl = mcc->def->misa_mxl_max;
696 env->priv = PRV_M;
697 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
698 if (env->misa_mxl > MXL_RV32) {
699 /*
700 * The reset status of SXL/UXL is undefined, but mstatus is WARL
701 * and we must ensure that the value after init is valid for read.
702 */
703 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
704 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
705 if (riscv_has_ext(env, RVH)) {
706 env->vsstatus = set_field(env->vsstatus,
707 MSTATUS64_SXL, env->misa_mxl);
708 env->vsstatus = set_field(env->vsstatus,
709 MSTATUS64_UXL, env->misa_mxl);
710 env->mstatus_hs = set_field(env->mstatus_hs,
711 MSTATUS64_SXL, env->misa_mxl);
712 env->mstatus_hs = set_field(env->mstatus_hs,
713 MSTATUS64_UXL, env->misa_mxl);
714 }
715 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
716 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1);
717 }
718 }
719 env->mcause = 0;
720 env->miclaim = MIP_SGEIP;
721 env->pc = env->resetvec;
722 env->bins = 0;
723 env->two_stage_lookup = false;
724
725 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
726 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
727 MENVCFG_ADUE : 0);
728 env->henvcfg = 0;
729
730 /* Initialized default priorities of local interrupts. */
731 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
732 iprio = riscv_cpu_default_priority(i);
733 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
734 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
735 env->hviprio[i] = 0;
736 }
737 i = 0;
738 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
739 if (!rdzero) {
740 env->hviprio[irq] = env->miprio[irq];
741 }
742 i++;
743 }
744
745 /*
746 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
747 * extension is enabled.
748 */
749 if (riscv_has_ext(env, RVH)) {
750 env->mideleg |= HS_MODE_INTERRUPTS;
751 }
752
753 /*
754 * Clear mseccfg and unlock all the PMP entries upon reset.
755 * This is allowed as per the priv and smepmp specifications
756 * and is needed to clear stale entries across reboots.
757 */
758 if (riscv_cpu_cfg(env)->ext_smepmp) {
759 env->mseccfg = 0;
760 }
761
762 pmp_unlock_entries(env);
763 #else
764 env->priv = PRV_U;
765 env->senvcfg = 0;
766 env->menvcfg = 0;
767 #endif
768
769 /* on reset elp is clear */
770 env->elp = false;
771 /* on reset ssp is set to 0 */
772 env->ssp = 0;
773
774 env->xl = riscv_cpu_mxl(env);
775 cs->exception_index = RISCV_EXCP_NONE;
776 env->load_res = -1;
777 set_default_nan_mode(1, &env->fp_status);
778 /* Default NaN value: sign bit clear, frac msb set */
779 set_float_default_nan_pattern(0b01000000, &env->fp_status);
780 env->vill = true;
781
782 #ifndef CONFIG_USER_ONLY
783 if (cpu->cfg.debug) {
784 riscv_trigger_reset_hold(env);
785 }
786
787 if (cpu->cfg.ext_smrnmi) {
788 env->rnmip = 0;
789 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
790 }
791
792 if (kvm_enabled()) {
793 kvm_riscv_reset_vcpu(cpu);
794 }
795 #endif
796 }
797
riscv_cpu_disas_set_info(CPUState * s,disassemble_info * info)798 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
799 {
800 RISCVCPU *cpu = RISCV_CPU(s);
801 CPURISCVState *env = &cpu->env;
802 info->target_info = &cpu->cfg;
803
804 /*
805 * A couple of bits in MSTATUS set the endianness:
806 * - MSTATUS_UBE (User-mode),
807 * - MSTATUS_SBE (Supervisor-mode),
808 * - MSTATUS_MBE (Machine-mode)
809 * but we don't implement that yet.
810 */
811 info->endian = BFD_ENDIAN_LITTLE;
812
813 switch (env->xl) {
814 case MXL_RV32:
815 info->print_insn = print_insn_riscv32;
816 break;
817 case MXL_RV64:
818 info->print_insn = print_insn_riscv64;
819 break;
820 case MXL_RV128:
821 info->print_insn = print_insn_riscv128;
822 break;
823 default:
824 g_assert_not_reached();
825 }
826 }
827
828 #ifndef CONFIG_USER_ONLY
riscv_cpu_satp_mode_finalize(RISCVCPU * cpu,Error ** errp)829 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
830 {
831 bool rv32 = riscv_cpu_is_32bit(cpu);
832 uint16_t supported;
833 uint8_t satp_mode_map_max;
834
835 if (!get_satp_mode_supported(cpu, &supported)) {
836 /* The CPU wants the hypervisor to decide which satp mode to allow */
837 return;
838 }
839
840 if (cpu->satp_modes.map == 0) {
841 if (cpu->satp_modes.init == 0) {
842 /* If unset by the user, we fallback to the default satp mode. */
843 set_satp_mode_default_map(cpu);
844 } else {
845 /*
846 * Find the lowest level that was disabled and then enable the
847 * first valid level below which can be found in
848 * valid_vm_1_10_32/64.
849 */
850 for (int i = 1; i < 16; ++i) {
851 if ((cpu->satp_modes.init & (1 << i)) &&
852 supported & (1 << i)) {
853 for (int j = i - 1; j >= 0; --j) {
854 if (supported & (1 << j)) {
855 cpu->cfg.max_satp_mode = j;
856 return;
857 }
858 }
859 }
860 }
861 }
862 return;
863 }
864
865 satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map);
866
867 /* Make sure the user asked for a supported configuration (HW and qemu) */
868 if (satp_mode_map_max > cpu->cfg.max_satp_mode) {
869 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
870 satp_mode_str(satp_mode_map_max, rv32),
871 satp_mode_str(cpu->cfg.max_satp_mode, rv32));
872 return;
873 }
874
875 /*
876 * Make sure the user did not ask for an invalid configuration as per
877 * the specification.
878 */
879 if (!rv32) {
880 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
881 if (!(cpu->satp_modes.map & (1 << i)) &&
882 (cpu->satp_modes.init & (1 << i)) &&
883 (supported & (1 << i))) {
884 error_setg(errp, "cannot disable %s satp mode if %s "
885 "is enabled", satp_mode_str(i, false),
886 satp_mode_str(satp_mode_map_max, false));
887 return;
888 }
889 }
890 }
891
892 cpu->cfg.max_satp_mode = satp_mode_map_max;
893 }
894 #endif
895
riscv_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)896 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
897 {
898 Error *local_err = NULL;
899
900 #ifndef CONFIG_USER_ONLY
901 riscv_cpu_satp_mode_finalize(cpu, &local_err);
902 if (local_err != NULL) {
903 error_propagate(errp, local_err);
904 return;
905 }
906 #endif
907
908 if (tcg_enabled()) {
909 riscv_tcg_cpu_finalize_features(cpu, &local_err);
910 if (local_err != NULL) {
911 error_propagate(errp, local_err);
912 return;
913 }
914 riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
915 } else if (kvm_enabled()) {
916 riscv_kvm_cpu_finalize_features(cpu, &local_err);
917 if (local_err != NULL) {
918 error_propagate(errp, local_err);
919 return;
920 }
921 }
922 }
923
riscv_cpu_realize(DeviceState * dev,Error ** errp)924 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
925 {
926 CPUState *cs = CPU(dev);
927 RISCVCPU *cpu = RISCV_CPU(dev);
928 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
929 Error *local_err = NULL;
930
931 cpu_exec_realizefn(cs, &local_err);
932 if (local_err != NULL) {
933 error_propagate(errp, local_err);
934 return;
935 }
936
937 riscv_cpu_finalize_features(cpu, &local_err);
938 if (local_err != NULL) {
939 error_propagate(errp, local_err);
940 return;
941 }
942
943 riscv_cpu_register_gdb_regs_for_features(cs);
944
945 #ifndef CONFIG_USER_ONLY
946 if (cpu->cfg.debug) {
947 riscv_trigger_realize(&cpu->env);
948 }
949 #endif
950
951 qemu_init_vcpu(cs);
952 cpu_reset(cs);
953
954 mcc->parent_realize(dev, errp);
955 }
956
riscv_cpu_accelerator_compatible(RISCVCPU * cpu)957 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
958 {
959 if (tcg_enabled()) {
960 return riscv_cpu_tcg_compatible(cpu);
961 }
962
963 return true;
964 }
965
966 #ifndef CONFIG_USER_ONLY
cpu_riscv_get_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)967 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
968 void *opaque, Error **errp)
969 {
970 RISCVSATPModes *satp_modes = opaque;
971 uint8_t satp = satp_mode_from_str(name);
972 bool value;
973
974 value = satp_modes->map & (1 << satp);
975
976 visit_type_bool(v, name, &value, errp);
977 }
978
cpu_riscv_set_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)979 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
980 void *opaque, Error **errp)
981 {
982 RISCVSATPModes *satp_modes = opaque;
983 uint8_t satp = satp_mode_from_str(name);
984 bool value;
985
986 if (!visit_type_bool(v, name, &value, errp)) {
987 return;
988 }
989
990 satp_modes->map = deposit32(satp_modes->map, satp, 1, value);
991 satp_modes->init |= 1 << satp;
992 }
993
riscv_add_satp_mode_properties(Object * obj)994 void riscv_add_satp_mode_properties(Object *obj)
995 {
996 RISCVCPU *cpu = RISCV_CPU(obj);
997
998 if (cpu->env.misa_mxl == MXL_RV32) {
999 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1000 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1001 } else {
1002 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1003 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1004 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1005 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1006 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1007 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1008 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1009 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1010 }
1011 }
1012
riscv_cpu_set_irq(void * opaque,int irq,int level)1013 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1014 {
1015 RISCVCPU *cpu = RISCV_CPU(opaque);
1016 CPURISCVState *env = &cpu->env;
1017
1018 if (irq < IRQ_LOCAL_MAX) {
1019 switch (irq) {
1020 case IRQ_U_SOFT:
1021 case IRQ_S_SOFT:
1022 case IRQ_VS_SOFT:
1023 case IRQ_M_SOFT:
1024 case IRQ_U_TIMER:
1025 case IRQ_S_TIMER:
1026 case IRQ_VS_TIMER:
1027 case IRQ_M_TIMER:
1028 case IRQ_U_EXT:
1029 case IRQ_VS_EXT:
1030 case IRQ_M_EXT:
1031 if (kvm_enabled()) {
1032 kvm_riscv_set_irq(cpu, irq, level);
1033 } else {
1034 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1035 }
1036 break;
1037 case IRQ_S_EXT:
1038 if (kvm_enabled()) {
1039 kvm_riscv_set_irq(cpu, irq, level);
1040 } else {
1041 env->external_seip = level;
1042 riscv_cpu_update_mip(env, 1 << irq,
1043 BOOL_TO_MASK(level | env->software_seip));
1044 }
1045 break;
1046 default:
1047 g_assert_not_reached();
1048 }
1049 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1050 /* Require H-extension for handling guest local interrupts */
1051 if (!riscv_has_ext(env, RVH)) {
1052 g_assert_not_reached();
1053 }
1054
1055 /* Compute bit position in HGEIP CSR */
1056 irq = irq - IRQ_LOCAL_MAX + 1;
1057 if (env->geilen < irq) {
1058 g_assert_not_reached();
1059 }
1060
1061 /* Update HGEIP CSR */
1062 env->hgeip &= ~((target_ulong)1 << irq);
1063 if (level) {
1064 env->hgeip |= (target_ulong)1 << irq;
1065 }
1066
1067 /* Update mip.SGEIP bit */
1068 riscv_cpu_update_mip(env, MIP_SGEIP,
1069 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1070 } else {
1071 g_assert_not_reached();
1072 }
1073 }
1074
riscv_cpu_set_nmi(void * opaque,int irq,int level)1075 static void riscv_cpu_set_nmi(void *opaque, int irq, int level)
1076 {
1077 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level);
1078 }
1079 #endif /* CONFIG_USER_ONLY */
1080
riscv_cpu_is_dynamic(Object * cpu_obj)1081 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1082 {
1083 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1084 }
1085
riscv_cpu_init(Object * obj)1086 static void riscv_cpu_init(Object *obj)
1087 {
1088 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1089 RISCVCPU *cpu = RISCV_CPU(obj);
1090 CPURISCVState *env = &cpu->env;
1091
1092 env->misa_mxl = mcc->def->misa_mxl_max;
1093
1094 #ifndef CONFIG_USER_ONLY
1095 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1096 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1097 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi,
1098 "riscv.cpu.rnmi", RNMI_MAX);
1099 #endif /* CONFIG_USER_ONLY */
1100
1101 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1102
1103 /*
1104 * The timer and performance counters extensions were supported
1105 * in QEMU before they were added as discrete extensions in the
1106 * ISA. To keep compatibility we'll always default them to 'true'
1107 * for all CPUs. Each accelerator will decide what to do when
1108 * users disable them.
1109 */
1110 RISCV_CPU(obj)->cfg.ext_zicntr = !mcc->def->bare;
1111 RISCV_CPU(obj)->cfg.ext_zihpm = !mcc->def->bare;
1112
1113 /* Default values for non-bool cpu properties */
1114 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1115 cpu->cfg.vlenb = 128 >> 3;
1116 cpu->cfg.elen = 64;
1117 cpu->cfg.cbom_blocksize = 64;
1118 cpu->cfg.cbop_blocksize = 64;
1119 cpu->cfg.cboz_blocksize = 64;
1120 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1121 cpu->cfg.max_satp_mode = -1;
1122
1123 if (mcc->def->profile) {
1124 mcc->def->profile->enabled = true;
1125 }
1126
1127 env->misa_ext_mask = env->misa_ext = mcc->def->misa_ext;
1128 riscv_cpu_cfg_merge(&cpu->cfg, &mcc->def->cfg);
1129
1130 if (mcc->def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
1131 cpu->env.priv_ver = mcc->def->priv_spec;
1132 }
1133 if (mcc->def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
1134 cpu->env.vext_ver = mcc->def->vext_spec;
1135 }
1136 #ifndef CONFIG_USER_ONLY
1137 if (mcc->def->custom_csrs) {
1138 riscv_register_custom_csrs(cpu, mcc->def->custom_csrs);
1139 }
1140 #endif
1141
1142 accel_cpu_instance_init(CPU(obj));
1143 }
1144
1145 typedef struct misa_ext_info {
1146 const char *name;
1147 const char *description;
1148 } MISAExtInfo;
1149
1150 #define MISA_INFO_IDX(_bit) \
1151 __builtin_ctz(_bit)
1152
1153 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1154 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1155
1156 static const MISAExtInfo misa_ext_info_arr[] = {
1157 MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1158 MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1159 MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1160 MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1161 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1162 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1163 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1164 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1165 MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1166 MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1167 MISA_EXT_INFO(RVV, "v", "Vector operations"),
1168 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1169 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1170 };
1171
riscv_cpu_validate_misa_mxl(RISCVCPUClass * mcc)1172 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1173 {
1174 CPUClass *cc = CPU_CLASS(mcc);
1175
1176 /* Validate that MISA_MXL is set properly. */
1177 switch (mcc->def->misa_mxl_max) {
1178 #ifdef TARGET_RISCV64
1179 case MXL_RV64:
1180 case MXL_RV128:
1181 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1182 break;
1183 #endif
1184 case MXL_RV32:
1185 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1186 break;
1187 default:
1188 g_assert_not_reached();
1189 }
1190 }
1191
riscv_validate_misa_info_idx(uint32_t bit)1192 static int riscv_validate_misa_info_idx(uint32_t bit)
1193 {
1194 int idx;
1195
1196 /*
1197 * Our lowest valid input (RVA) is 1 and
1198 * __builtin_ctz() is UB with zero.
1199 */
1200 g_assert(bit != 0);
1201 idx = MISA_INFO_IDX(bit);
1202
1203 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1204 return idx;
1205 }
1206
riscv_get_misa_ext_name(uint32_t bit)1207 const char *riscv_get_misa_ext_name(uint32_t bit)
1208 {
1209 int idx = riscv_validate_misa_info_idx(bit);
1210 const char *val = misa_ext_info_arr[idx].name;
1211
1212 g_assert(val != NULL);
1213 return val;
1214 }
1215
riscv_get_misa_ext_description(uint32_t bit)1216 const char *riscv_get_misa_ext_description(uint32_t bit)
1217 {
1218 int idx = riscv_validate_misa_info_idx(bit);
1219 const char *val = misa_ext_info_arr[idx].description;
1220
1221 g_assert(val != NULL);
1222 return val;
1223 }
1224
1225 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1226 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1227 .enabled = _defval}
1228
1229 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1230 /* Defaults for standard extensions */
1231 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1232 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1233 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false),
1234 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
1235 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
1236 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
1237 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false),
1238 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false),
1239 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1240 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
1241 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
1242 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1243 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1244 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1245 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1246 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1247 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1248 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1249 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1250 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1251 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1252 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1253 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1254 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1255 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1256 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1257 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1258 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1259 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1260 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1261 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1262 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1263 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1264 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1265 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1266 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1267 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false),
1268 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false),
1269 MULTI_EXT_CFG_BOOL("supm", ext_supm, false),
1270
1271 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1272 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false),
1273 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1274 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false),
1275 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false),
1276 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false),
1277 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1278 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1279 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false),
1280 MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1281 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1282 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1283 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1284 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1285 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true),
1286
1287 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1288 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1289
1290 MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1291 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1292 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1293 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1294 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1295 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1296 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1297 MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1298 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1299 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1300 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1301 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1302 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1303 MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1304 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1305 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1306 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1307 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1308
1309 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1310 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1311 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1312 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1313
1314 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1315 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1316 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1317
1318 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1319
1320 MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1321 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1322 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1323 MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1324 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1325 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1326 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1327 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1328
1329 /* Vector cryptography extensions */
1330 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1331 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1332 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1333 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1334 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1335 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1336 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1337 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1338 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1339 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1340 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1341 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1342 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1343 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1344 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1345 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1346
1347 { },
1348 };
1349
1350 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1351 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1352 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1353 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1354 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1355 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1356 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1357 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1358 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1359 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1360 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1361 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1362 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1363
1364 { },
1365 };
1366
1367 /* These are experimental so mark with 'x-' */
1368 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1369 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false),
1370
1371 { },
1372 };
1373
1374 /*
1375 * 'Named features' is the name we give to extensions that we
1376 * don't want to expose to users. They are either immutable
1377 * (always enabled/disable) or they'll vary depending on
1378 * the resulting CPU state. They have riscv,isa strings
1379 * and priv_ver like regular extensions.
1380 */
1381 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1382 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1383 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
1384 MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
1385 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true),
1386
1387 { },
1388 };
1389
1390 /* Deprecated entries marked for future removal */
1391 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1392 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1393 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1394 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1395 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1396 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1397 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1398 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1399 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1400 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1401 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1402 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1403
1404 { },
1405 };
1406
cpu_set_prop_err(RISCVCPU * cpu,const char * propname,Error ** errp)1407 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1408 Error **errp)
1409 {
1410 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1411 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1412 cpuname, propname);
1413 }
1414
prop_pmu_num_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1415 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1416 void *opaque, Error **errp)
1417 {
1418 RISCVCPU *cpu = RISCV_CPU(obj);
1419 uint8_t pmu_num, curr_pmu_num;
1420 uint32_t pmu_mask;
1421
1422 visit_type_uint8(v, name, &pmu_num, errp);
1423
1424 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1425
1426 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1427 cpu_set_prop_err(cpu, name, errp);
1428 error_append_hint(errp, "Current '%s' val: %u\n",
1429 name, curr_pmu_num);
1430 return;
1431 }
1432
1433 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1434 error_setg(errp, "Number of counters exceeds maximum available");
1435 return;
1436 }
1437
1438 if (pmu_num == 0) {
1439 pmu_mask = 0;
1440 } else {
1441 pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1442 }
1443
1444 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1445 cpu->cfg.pmu_mask = pmu_mask;
1446 cpu_option_add_user_setting("pmu-mask", pmu_mask);
1447 }
1448
prop_pmu_num_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1449 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1450 void *opaque, Error **errp)
1451 {
1452 RISCVCPU *cpu = RISCV_CPU(obj);
1453 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1454
1455 visit_type_uint8(v, name, &pmu_num, errp);
1456 }
1457
1458 static const PropertyInfo prop_pmu_num = {
1459 .type = "int8",
1460 .description = "pmu-num",
1461 .get = prop_pmu_num_get,
1462 .set = prop_pmu_num_set,
1463 };
1464
prop_pmu_mask_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1465 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1466 void *opaque, Error **errp)
1467 {
1468 RISCVCPU *cpu = RISCV_CPU(obj);
1469 uint32_t value;
1470 uint8_t pmu_num;
1471
1472 visit_type_uint32(v, name, &value, errp);
1473
1474 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1475 cpu_set_prop_err(cpu, name, errp);
1476 error_append_hint(errp, "Current '%s' val: %x\n",
1477 name, cpu->cfg.pmu_mask);
1478 return;
1479 }
1480
1481 pmu_num = ctpop32(value);
1482
1483 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1484 error_setg(errp, "Number of counters exceeds maximum available");
1485 return;
1486 }
1487
1488 cpu_option_add_user_setting(name, value);
1489 cpu->cfg.pmu_mask = value;
1490 }
1491
prop_pmu_mask_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1492 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1493 void *opaque, Error **errp)
1494 {
1495 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1496
1497 visit_type_uint8(v, name, &pmu_mask, errp);
1498 }
1499
1500 static const PropertyInfo prop_pmu_mask = {
1501 .type = "int8",
1502 .description = "pmu-mask",
1503 .get = prop_pmu_mask_get,
1504 .set = prop_pmu_mask_set,
1505 };
1506
prop_mmu_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1507 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1508 void *opaque, Error **errp)
1509 {
1510 RISCVCPU *cpu = RISCV_CPU(obj);
1511 bool value;
1512
1513 visit_type_bool(v, name, &value, errp);
1514
1515 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1516 cpu_set_prop_err(cpu, "mmu", errp);
1517 return;
1518 }
1519
1520 cpu_option_add_user_setting(name, value);
1521 cpu->cfg.mmu = value;
1522 }
1523
prop_mmu_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1524 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1525 void *opaque, Error **errp)
1526 {
1527 bool value = RISCV_CPU(obj)->cfg.mmu;
1528
1529 visit_type_bool(v, name, &value, errp);
1530 }
1531
1532 static const PropertyInfo prop_mmu = {
1533 .type = "bool",
1534 .description = "mmu",
1535 .get = prop_mmu_get,
1536 .set = prop_mmu_set,
1537 };
1538
prop_pmp_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1539 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1540 void *opaque, Error **errp)
1541 {
1542 RISCVCPU *cpu = RISCV_CPU(obj);
1543 bool value;
1544
1545 visit_type_bool(v, name, &value, errp);
1546
1547 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1548 cpu_set_prop_err(cpu, name, errp);
1549 return;
1550 }
1551
1552 cpu_option_add_user_setting(name, value);
1553 cpu->cfg.pmp = value;
1554 }
1555
prop_pmp_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1556 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1557 void *opaque, Error **errp)
1558 {
1559 bool value = RISCV_CPU(obj)->cfg.pmp;
1560
1561 visit_type_bool(v, name, &value, errp);
1562 }
1563
1564 static const PropertyInfo prop_pmp = {
1565 .type = "bool",
1566 .description = "pmp",
1567 .get = prop_pmp_get,
1568 .set = prop_pmp_set,
1569 };
1570
priv_spec_from_str(const char * priv_spec_str)1571 static int priv_spec_from_str(const char *priv_spec_str)
1572 {
1573 int priv_version = -1;
1574
1575 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1576 priv_version = PRIV_VERSION_1_13_0;
1577 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1578 priv_version = PRIV_VERSION_1_12_0;
1579 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1580 priv_version = PRIV_VERSION_1_11_0;
1581 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1582 priv_version = PRIV_VERSION_1_10_0;
1583 }
1584
1585 return priv_version;
1586 }
1587
priv_spec_to_str(int priv_version)1588 const char *priv_spec_to_str(int priv_version)
1589 {
1590 switch (priv_version) {
1591 case PRIV_VERSION_1_10_0:
1592 return PRIV_VER_1_10_0_STR;
1593 case PRIV_VERSION_1_11_0:
1594 return PRIV_VER_1_11_0_STR;
1595 case PRIV_VERSION_1_12_0:
1596 return PRIV_VER_1_12_0_STR;
1597 case PRIV_VERSION_1_13_0:
1598 return PRIV_VER_1_13_0_STR;
1599 default:
1600 return NULL;
1601 }
1602 }
1603
prop_priv_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1604 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1605 void *opaque, Error **errp)
1606 {
1607 RISCVCPU *cpu = RISCV_CPU(obj);
1608 g_autofree char *value = NULL;
1609 int priv_version = -1;
1610
1611 visit_type_str(v, name, &value, errp);
1612
1613 priv_version = priv_spec_from_str(value);
1614 if (priv_version < 0) {
1615 error_setg(errp, "Unsupported privilege spec version '%s'", value);
1616 return;
1617 }
1618
1619 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1620 cpu_set_prop_err(cpu, name, errp);
1621 error_append_hint(errp, "Current '%s' val: %s\n", name,
1622 object_property_get_str(obj, name, NULL));
1623 return;
1624 }
1625
1626 cpu_option_add_user_setting(name, priv_version);
1627 cpu->env.priv_ver = priv_version;
1628 }
1629
prop_priv_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1630 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1631 void *opaque, Error **errp)
1632 {
1633 RISCVCPU *cpu = RISCV_CPU(obj);
1634 const char *value = priv_spec_to_str(cpu->env.priv_ver);
1635
1636 visit_type_str(v, name, (char **)&value, errp);
1637 }
1638
1639 static const PropertyInfo prop_priv_spec = {
1640 .type = "str",
1641 .description = "priv_spec",
1642 /* FIXME enum? */
1643 .get = prop_priv_spec_get,
1644 .set = prop_priv_spec_set,
1645 };
1646
prop_vext_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1647 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1648 void *opaque, Error **errp)
1649 {
1650 RISCVCPU *cpu = RISCV_CPU(obj);
1651 g_autofree char *value = NULL;
1652
1653 visit_type_str(v, name, &value, errp);
1654
1655 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1656 error_setg(errp, "Unsupported vector spec version '%s'", value);
1657 return;
1658 }
1659
1660 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1661 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1662 }
1663
prop_vext_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1664 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1665 void *opaque, Error **errp)
1666 {
1667 const char *value = VEXT_VER_1_00_0_STR;
1668
1669 visit_type_str(v, name, (char **)&value, errp);
1670 }
1671
1672 static const PropertyInfo prop_vext_spec = {
1673 .type = "str",
1674 .description = "vext_spec",
1675 /* FIXME enum? */
1676 .get = prop_vext_spec_get,
1677 .set = prop_vext_spec_set,
1678 };
1679
prop_vlen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1680 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1681 void *opaque, Error **errp)
1682 {
1683 RISCVCPU *cpu = RISCV_CPU(obj);
1684 uint16_t cpu_vlen = cpu->cfg.vlenb << 3;
1685 uint16_t value;
1686
1687 if (!visit_type_uint16(v, name, &value, errp)) {
1688 return;
1689 }
1690
1691 if (!is_power_of_2(value)) {
1692 error_setg(errp, "Vector extension VLEN must be power of 2");
1693 return;
1694 }
1695
1696 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) {
1697 cpu_set_prop_err(cpu, name, errp);
1698 error_append_hint(errp, "Current '%s' val: %u\n",
1699 name, cpu_vlen);
1700 return;
1701 }
1702
1703 cpu_option_add_user_setting(name, value);
1704 cpu->cfg.vlenb = value >> 3;
1705 }
1706
prop_vlen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1707 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1708 void *opaque, Error **errp)
1709 {
1710 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1711
1712 visit_type_uint16(v, name, &value, errp);
1713 }
1714
1715 static const PropertyInfo prop_vlen = {
1716 .type = "uint16",
1717 .description = "vlen",
1718 .get = prop_vlen_get,
1719 .set = prop_vlen_set,
1720 };
1721
prop_elen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1722 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1723 void *opaque, Error **errp)
1724 {
1725 RISCVCPU *cpu = RISCV_CPU(obj);
1726 uint16_t value;
1727
1728 if (!visit_type_uint16(v, name, &value, errp)) {
1729 return;
1730 }
1731
1732 if (!is_power_of_2(value)) {
1733 error_setg(errp, "Vector extension ELEN must be power of 2");
1734 return;
1735 }
1736
1737 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1738 cpu_set_prop_err(cpu, name, errp);
1739 error_append_hint(errp, "Current '%s' val: %u\n",
1740 name, cpu->cfg.elen);
1741 return;
1742 }
1743
1744 cpu_option_add_user_setting(name, value);
1745 cpu->cfg.elen = value;
1746 }
1747
prop_elen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1748 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1749 void *opaque, Error **errp)
1750 {
1751 uint16_t value = RISCV_CPU(obj)->cfg.elen;
1752
1753 visit_type_uint16(v, name, &value, errp);
1754 }
1755
1756 static const PropertyInfo prop_elen = {
1757 .type = "uint16",
1758 .description = "elen",
1759 .get = prop_elen_get,
1760 .set = prop_elen_set,
1761 };
1762
prop_cbom_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1763 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1764 void *opaque, Error **errp)
1765 {
1766 RISCVCPU *cpu = RISCV_CPU(obj);
1767 uint16_t value;
1768
1769 if (!visit_type_uint16(v, name, &value, errp)) {
1770 return;
1771 }
1772
1773 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1774 cpu_set_prop_err(cpu, name, errp);
1775 error_append_hint(errp, "Current '%s' val: %u\n",
1776 name, cpu->cfg.cbom_blocksize);
1777 return;
1778 }
1779
1780 cpu_option_add_user_setting(name, value);
1781 cpu->cfg.cbom_blocksize = value;
1782 }
1783
prop_cbom_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1784 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1785 void *opaque, Error **errp)
1786 {
1787 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1788
1789 visit_type_uint16(v, name, &value, errp);
1790 }
1791
1792 static const PropertyInfo prop_cbom_blksize = {
1793 .type = "uint16",
1794 .description = "cbom_blocksize",
1795 .get = prop_cbom_blksize_get,
1796 .set = prop_cbom_blksize_set,
1797 };
1798
prop_cbop_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1799 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
1800 void *opaque, Error **errp)
1801 {
1802 RISCVCPU *cpu = RISCV_CPU(obj);
1803 uint16_t value;
1804
1805 if (!visit_type_uint16(v, name, &value, errp)) {
1806 return;
1807 }
1808
1809 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
1810 cpu_set_prop_err(cpu, name, errp);
1811 error_append_hint(errp, "Current '%s' val: %u\n",
1812 name, cpu->cfg.cbop_blocksize);
1813 return;
1814 }
1815
1816 cpu_option_add_user_setting(name, value);
1817 cpu->cfg.cbop_blocksize = value;
1818 }
1819
prop_cbop_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1820 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
1821 void *opaque, Error **errp)
1822 {
1823 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
1824
1825 visit_type_uint16(v, name, &value, errp);
1826 }
1827
1828 static const PropertyInfo prop_cbop_blksize = {
1829 .type = "uint16",
1830 .description = "cbop_blocksize",
1831 .get = prop_cbop_blksize_get,
1832 .set = prop_cbop_blksize_set,
1833 };
1834
prop_cboz_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1835 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
1836 void *opaque, Error **errp)
1837 {
1838 RISCVCPU *cpu = RISCV_CPU(obj);
1839 uint16_t value;
1840
1841 if (!visit_type_uint16(v, name, &value, errp)) {
1842 return;
1843 }
1844
1845 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
1846 cpu_set_prop_err(cpu, name, errp);
1847 error_append_hint(errp, "Current '%s' val: %u\n",
1848 name, cpu->cfg.cboz_blocksize);
1849 return;
1850 }
1851
1852 cpu_option_add_user_setting(name, value);
1853 cpu->cfg.cboz_blocksize = value;
1854 }
1855
prop_cboz_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1856 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
1857 void *opaque, Error **errp)
1858 {
1859 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
1860
1861 visit_type_uint16(v, name, &value, errp);
1862 }
1863
1864 static const PropertyInfo prop_cboz_blksize = {
1865 .type = "uint16",
1866 .description = "cboz_blocksize",
1867 .get = prop_cboz_blksize_get,
1868 .set = prop_cboz_blksize_set,
1869 };
1870
prop_mvendorid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1871 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
1872 void *opaque, Error **errp)
1873 {
1874 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1875 RISCVCPU *cpu = RISCV_CPU(obj);
1876 uint32_t prev_val = cpu->cfg.mvendorid;
1877 uint32_t value;
1878
1879 if (!visit_type_uint32(v, name, &value, errp)) {
1880 return;
1881 }
1882
1883 if (!dynamic_cpu && prev_val != value) {
1884 error_setg(errp, "Unable to change %s mvendorid (0x%x)",
1885 object_get_typename(obj), prev_val);
1886 return;
1887 }
1888
1889 cpu->cfg.mvendorid = value;
1890 }
1891
prop_mvendorid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1892 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
1893 void *opaque, Error **errp)
1894 {
1895 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
1896
1897 visit_type_uint32(v, name, &value, errp);
1898 }
1899
1900 static const PropertyInfo prop_mvendorid = {
1901 .type = "uint32",
1902 .description = "mvendorid",
1903 .get = prop_mvendorid_get,
1904 .set = prop_mvendorid_set,
1905 };
1906
prop_mimpid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1907 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
1908 void *opaque, Error **errp)
1909 {
1910 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1911 RISCVCPU *cpu = RISCV_CPU(obj);
1912 uint64_t prev_val = cpu->cfg.mimpid;
1913 uint64_t value;
1914
1915 if (!visit_type_uint64(v, name, &value, errp)) {
1916 return;
1917 }
1918
1919 if (!dynamic_cpu && prev_val != value) {
1920 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
1921 object_get_typename(obj), prev_val);
1922 return;
1923 }
1924
1925 cpu->cfg.mimpid = value;
1926 }
1927
prop_mimpid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1928 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
1929 void *opaque, Error **errp)
1930 {
1931 uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
1932
1933 visit_type_uint64(v, name, &value, errp);
1934 }
1935
1936 static const PropertyInfo prop_mimpid = {
1937 .type = "uint64",
1938 .description = "mimpid",
1939 .get = prop_mimpid_get,
1940 .set = prop_mimpid_set,
1941 };
1942
prop_marchid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1943 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
1944 void *opaque, Error **errp)
1945 {
1946 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1947 RISCVCPU *cpu = RISCV_CPU(obj);
1948 uint64_t prev_val = cpu->cfg.marchid;
1949 uint64_t value, invalid_val;
1950 uint32_t mxlen = 0;
1951
1952 if (!visit_type_uint64(v, name, &value, errp)) {
1953 return;
1954 }
1955
1956 if (!dynamic_cpu && prev_val != value) {
1957 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
1958 object_get_typename(obj), prev_val);
1959 return;
1960 }
1961
1962 switch (riscv_cpu_mxl(&cpu->env)) {
1963 case MXL_RV32:
1964 mxlen = 32;
1965 break;
1966 case MXL_RV64:
1967 case MXL_RV128:
1968 mxlen = 64;
1969 break;
1970 default:
1971 g_assert_not_reached();
1972 }
1973
1974 invalid_val = 1LL << (mxlen - 1);
1975
1976 if (value == invalid_val) {
1977 error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
1978 "and the remaining bits zero", mxlen);
1979 return;
1980 }
1981
1982 cpu->cfg.marchid = value;
1983 }
1984
prop_marchid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1985 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
1986 void *opaque, Error **errp)
1987 {
1988 uint64_t value = RISCV_CPU(obj)->cfg.marchid;
1989
1990 visit_type_uint64(v, name, &value, errp);
1991 }
1992
1993 static const PropertyInfo prop_marchid = {
1994 .type = "uint64",
1995 .description = "marchid",
1996 .get = prop_marchid_get,
1997 .set = prop_marchid_set,
1998 };
1999
2000 /*
2001 * RVA22U64 defines some 'named features' that are cache
2002 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2003 * and Zicclsm. They are always implemented in TCG and
2004 * doesn't need to be manually enabled by the profile.
2005 */
2006 static RISCVCPUProfile RVA22U64 = {
2007 .u_parent = NULL,
2008 .s_parent = NULL,
2009 .name = "rva22u64",
2010 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU,
2011 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2012 .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2013 .ext_offsets = {
2014 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2015 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2016 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2017 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2018 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2019 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2020
2021 /* mandatory named features for this profile */
2022 CPU_CFG_OFFSET(ext_zic64b),
2023
2024 RISCV_PROFILE_EXT_LIST_END
2025 }
2026 };
2027
2028 /*
2029 * As with RVA22U64, RVA22S64 also defines 'named features'.
2030 *
2031 * Cache related features that we consider enabled since we don't
2032 * implement cache: Ssccptr
2033 *
2034 * Other named features that we already implement: Sstvecd, Sstvala,
2035 * Sscounterenw
2036 *
2037 * The remaining features/extensions comes from RVA22U64.
2038 */
2039 static RISCVCPUProfile RVA22S64 = {
2040 .u_parent = &RVA22U64,
2041 .s_parent = NULL,
2042 .name = "rva22s64",
2043 .misa_ext = RVS,
2044 .priv_spec = PRIV_VERSION_1_12_0,
2045 .satp_mode = VM_1_10_SV39,
2046 .ext_offsets = {
2047 /* rva22s64 exts */
2048 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2049 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2050
2051 RISCV_PROFILE_EXT_LIST_END
2052 }
2053 };
2054
2055 /*
2056 * All mandatory extensions from RVA22U64 are present
2057 * in RVA23U64 so set RVA22 as a parent. We need to
2058 * declare just the newly added mandatory extensions.
2059 */
2060 static RISCVCPUProfile RVA23U64 = {
2061 .u_parent = &RVA22U64,
2062 .s_parent = NULL,
2063 .name = "rva23u64",
2064 .misa_ext = RVV,
2065 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2066 .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2067 .ext_offsets = {
2068 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb),
2069 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl),
2070 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop),
2071 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb),
2072 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs),
2073 CPU_CFG_OFFSET(ext_supm),
2074
2075 RISCV_PROFILE_EXT_LIST_END
2076 }
2077 };
2078
2079 /*
2080 * As with RVA23U64, RVA23S64 also defines 'named features'.
2081 *
2082 * Cache related features that we consider enabled since we don't
2083 * implement cache: Ssccptr
2084 *
2085 * Other named features that we already implement: Sstvecd, Sstvala,
2086 * Sscounterenw, Ssu64xl
2087 *
2088 * The remaining features/extensions comes from RVA23S64.
2089 */
2090 static RISCVCPUProfile RVA23S64 = {
2091 .u_parent = &RVA23U64,
2092 .s_parent = &RVA22S64,
2093 .name = "rva23s64",
2094 .misa_ext = RVS,
2095 .priv_spec = PRIV_VERSION_1_13_0,
2096 .satp_mode = VM_1_10_SV39,
2097 .ext_offsets = {
2098 /* New in RVA23S64 */
2099 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc),
2100 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm),
2101
2102 /* Named features: Sha */
2103 CPU_CFG_OFFSET(ext_sha),
2104
2105 RISCV_PROFILE_EXT_LIST_END
2106 }
2107 };
2108
2109 RISCVCPUProfile *riscv_profiles[] = {
2110 &RVA22U64,
2111 &RVA22S64,
2112 &RVA23U64,
2113 &RVA23S64,
2114 NULL,
2115 };
2116
2117 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2118 .is_misa = true,
2119 .ext = RVA,
2120 .implied_multi_exts = {
2121 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2122
2123 RISCV_IMPLIED_EXTS_RULE_END
2124 },
2125 };
2126
2127 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2128 .is_misa = true,
2129 .ext = RVD,
2130 .implied_misa_exts = RVF,
2131 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2132 };
2133
2134 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2135 .is_misa = true,
2136 .ext = RVF,
2137 .implied_multi_exts = {
2138 CPU_CFG_OFFSET(ext_zicsr),
2139
2140 RISCV_IMPLIED_EXTS_RULE_END
2141 },
2142 };
2143
2144 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2145 .is_misa = true,
2146 .ext = RVM,
2147 .implied_multi_exts = {
2148 CPU_CFG_OFFSET(ext_zmmul),
2149
2150 RISCV_IMPLIED_EXTS_RULE_END
2151 },
2152 };
2153
2154 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2155 .is_misa = true,
2156 .ext = RVV,
2157 .implied_multi_exts = {
2158 CPU_CFG_OFFSET(ext_zve64d),
2159
2160 RISCV_IMPLIED_EXTS_RULE_END
2161 },
2162 };
2163
2164 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2165 .ext = CPU_CFG_OFFSET(ext_zcb),
2166 .implied_multi_exts = {
2167 CPU_CFG_OFFSET(ext_zca),
2168
2169 RISCV_IMPLIED_EXTS_RULE_END
2170 },
2171 };
2172
2173 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2174 .ext = CPU_CFG_OFFSET(ext_zcd),
2175 .implied_misa_exts = RVD,
2176 .implied_multi_exts = {
2177 CPU_CFG_OFFSET(ext_zca),
2178
2179 RISCV_IMPLIED_EXTS_RULE_END
2180 },
2181 };
2182
2183 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2184 .ext = CPU_CFG_OFFSET(ext_zce),
2185 .implied_multi_exts = {
2186 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2187 CPU_CFG_OFFSET(ext_zcmt),
2188
2189 RISCV_IMPLIED_EXTS_RULE_END
2190 },
2191 };
2192
2193 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2194 .ext = CPU_CFG_OFFSET(ext_zcf),
2195 .implied_misa_exts = RVF,
2196 .implied_multi_exts = {
2197 CPU_CFG_OFFSET(ext_zca),
2198
2199 RISCV_IMPLIED_EXTS_RULE_END
2200 },
2201 };
2202
2203 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2204 .ext = CPU_CFG_OFFSET(ext_zcmp),
2205 .implied_multi_exts = {
2206 CPU_CFG_OFFSET(ext_zca),
2207
2208 RISCV_IMPLIED_EXTS_RULE_END
2209 },
2210 };
2211
2212 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2213 .ext = CPU_CFG_OFFSET(ext_zcmt),
2214 .implied_multi_exts = {
2215 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2216
2217 RISCV_IMPLIED_EXTS_RULE_END
2218 },
2219 };
2220
2221 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2222 .ext = CPU_CFG_OFFSET(ext_zdinx),
2223 .implied_multi_exts = {
2224 CPU_CFG_OFFSET(ext_zfinx),
2225
2226 RISCV_IMPLIED_EXTS_RULE_END
2227 },
2228 };
2229
2230 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2231 .ext = CPU_CFG_OFFSET(ext_zfa),
2232 .implied_misa_exts = RVF,
2233 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2234 };
2235
2236 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2237 .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2238 .implied_misa_exts = RVF,
2239 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2240 };
2241
2242 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2243 .ext = CPU_CFG_OFFSET(ext_zfh),
2244 .implied_multi_exts = {
2245 CPU_CFG_OFFSET(ext_zfhmin),
2246
2247 RISCV_IMPLIED_EXTS_RULE_END
2248 },
2249 };
2250
2251 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2252 .ext = CPU_CFG_OFFSET(ext_zfhmin),
2253 .implied_misa_exts = RVF,
2254 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2255 };
2256
2257 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2258 .ext = CPU_CFG_OFFSET(ext_zfinx),
2259 .implied_multi_exts = {
2260 CPU_CFG_OFFSET(ext_zicsr),
2261
2262 RISCV_IMPLIED_EXTS_RULE_END
2263 },
2264 };
2265
2266 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2267 .ext = CPU_CFG_OFFSET(ext_zhinx),
2268 .implied_multi_exts = {
2269 CPU_CFG_OFFSET(ext_zhinxmin),
2270
2271 RISCV_IMPLIED_EXTS_RULE_END
2272 },
2273 };
2274
2275 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2276 .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2277 .implied_multi_exts = {
2278 CPU_CFG_OFFSET(ext_zfinx),
2279
2280 RISCV_IMPLIED_EXTS_RULE_END
2281 },
2282 };
2283
2284 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2285 .ext = CPU_CFG_OFFSET(ext_zicntr),
2286 .implied_multi_exts = {
2287 CPU_CFG_OFFSET(ext_zicsr),
2288
2289 RISCV_IMPLIED_EXTS_RULE_END
2290 },
2291 };
2292
2293 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2294 .ext = CPU_CFG_OFFSET(ext_zihpm),
2295 .implied_multi_exts = {
2296 CPU_CFG_OFFSET(ext_zicsr),
2297
2298 RISCV_IMPLIED_EXTS_RULE_END
2299 },
2300 };
2301
2302 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2303 .ext = CPU_CFG_OFFSET(ext_zk),
2304 .implied_multi_exts = {
2305 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2306 CPU_CFG_OFFSET(ext_zkt),
2307
2308 RISCV_IMPLIED_EXTS_RULE_END
2309 },
2310 };
2311
2312 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2313 .ext = CPU_CFG_OFFSET(ext_zkn),
2314 .implied_multi_exts = {
2315 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2316 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2317 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2318
2319 RISCV_IMPLIED_EXTS_RULE_END
2320 },
2321 };
2322
2323 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2324 .ext = CPU_CFG_OFFSET(ext_zks),
2325 .implied_multi_exts = {
2326 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2327 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2328 CPU_CFG_OFFSET(ext_zksh),
2329
2330 RISCV_IMPLIED_EXTS_RULE_END
2331 },
2332 };
2333
2334 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2335 .ext = CPU_CFG_OFFSET(ext_zvbb),
2336 .implied_multi_exts = {
2337 CPU_CFG_OFFSET(ext_zvkb),
2338
2339 RISCV_IMPLIED_EXTS_RULE_END
2340 },
2341 };
2342
2343 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2344 .ext = CPU_CFG_OFFSET(ext_zve32f),
2345 .implied_misa_exts = RVF,
2346 .implied_multi_exts = {
2347 CPU_CFG_OFFSET(ext_zve32x),
2348
2349 RISCV_IMPLIED_EXTS_RULE_END
2350 },
2351 };
2352
2353 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2354 .ext = CPU_CFG_OFFSET(ext_zve32x),
2355 .implied_multi_exts = {
2356 CPU_CFG_OFFSET(ext_zicsr),
2357
2358 RISCV_IMPLIED_EXTS_RULE_END
2359 },
2360 };
2361
2362 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2363 .ext = CPU_CFG_OFFSET(ext_zve64d),
2364 .implied_misa_exts = RVD,
2365 .implied_multi_exts = {
2366 CPU_CFG_OFFSET(ext_zve64f),
2367
2368 RISCV_IMPLIED_EXTS_RULE_END
2369 },
2370 };
2371
2372 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2373 .ext = CPU_CFG_OFFSET(ext_zve64f),
2374 .implied_misa_exts = RVF,
2375 .implied_multi_exts = {
2376 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2377
2378 RISCV_IMPLIED_EXTS_RULE_END
2379 },
2380 };
2381
2382 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2383 .ext = CPU_CFG_OFFSET(ext_zve64x),
2384 .implied_multi_exts = {
2385 CPU_CFG_OFFSET(ext_zve32x),
2386
2387 RISCV_IMPLIED_EXTS_RULE_END
2388 },
2389 };
2390
2391 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2392 .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2393 .implied_multi_exts = {
2394 CPU_CFG_OFFSET(ext_zve32f),
2395
2396 RISCV_IMPLIED_EXTS_RULE_END
2397 },
2398 };
2399
2400 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2401 .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2402 .implied_multi_exts = {
2403 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2404
2405 RISCV_IMPLIED_EXTS_RULE_END
2406 },
2407 };
2408
2409 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2410 .ext = CPU_CFG_OFFSET(ext_zvfh),
2411 .implied_multi_exts = {
2412 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2413
2414 RISCV_IMPLIED_EXTS_RULE_END
2415 },
2416 };
2417
2418 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2419 .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2420 .implied_multi_exts = {
2421 CPU_CFG_OFFSET(ext_zve32f),
2422
2423 RISCV_IMPLIED_EXTS_RULE_END
2424 },
2425 };
2426
2427 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2428 .ext = CPU_CFG_OFFSET(ext_zvkn),
2429 .implied_multi_exts = {
2430 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2431 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2432
2433 RISCV_IMPLIED_EXTS_RULE_END
2434 },
2435 };
2436
2437 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2438 .ext = CPU_CFG_OFFSET(ext_zvknc),
2439 .implied_multi_exts = {
2440 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2441
2442 RISCV_IMPLIED_EXTS_RULE_END
2443 },
2444 };
2445
2446 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2447 .ext = CPU_CFG_OFFSET(ext_zvkng),
2448 .implied_multi_exts = {
2449 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2450
2451 RISCV_IMPLIED_EXTS_RULE_END
2452 },
2453 };
2454
2455 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2456 .ext = CPU_CFG_OFFSET(ext_zvknhb),
2457 .implied_multi_exts = {
2458 CPU_CFG_OFFSET(ext_zve64x),
2459
2460 RISCV_IMPLIED_EXTS_RULE_END
2461 },
2462 };
2463
2464 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2465 .ext = CPU_CFG_OFFSET(ext_zvks),
2466 .implied_multi_exts = {
2467 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2468 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2469
2470 RISCV_IMPLIED_EXTS_RULE_END
2471 },
2472 };
2473
2474 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2475 .ext = CPU_CFG_OFFSET(ext_zvksc),
2476 .implied_multi_exts = {
2477 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2478
2479 RISCV_IMPLIED_EXTS_RULE_END
2480 },
2481 };
2482
2483 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2484 .ext = CPU_CFG_OFFSET(ext_zvksg),
2485 .implied_multi_exts = {
2486 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2487
2488 RISCV_IMPLIED_EXTS_RULE_END
2489 },
2490 };
2491
2492 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = {
2493 .ext = CPU_CFG_OFFSET(ext_ssccfg),
2494 .implied_multi_exts = {
2495 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind),
2496 CPU_CFG_OFFSET(ext_smcdeleg),
2497
2498 RISCV_IMPLIED_EXTS_RULE_END
2499 },
2500 };
2501
2502 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = {
2503 .ext = CPU_CFG_OFFSET(ext_supm),
2504 .implied_multi_exts = {
2505 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm),
2506
2507 RISCV_IMPLIED_EXTS_RULE_END
2508 },
2509 };
2510
2511 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
2512 .ext = CPU_CFG_OFFSET(ext_sspm),
2513 .implied_multi_exts = {
2514 CPU_CFG_OFFSET(ext_smnpm),
2515
2516 RISCV_IMPLIED_EXTS_RULE_END
2517 },
2518 };
2519
2520 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = {
2521 .ext = CPU_CFG_OFFSET(ext_smctr),
2522 .implied_misa_exts = RVS,
2523 .implied_multi_exts = {
2524 CPU_CFG_OFFSET(ext_sscsrind),
2525
2526 RISCV_IMPLIED_EXTS_RULE_END
2527 },
2528 };
2529
2530 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = {
2531 .ext = CPU_CFG_OFFSET(ext_ssctr),
2532 .implied_misa_exts = RVS,
2533 .implied_multi_exts = {
2534 CPU_CFG_OFFSET(ext_sscsrind),
2535
2536 RISCV_IMPLIED_EXTS_RULE_END
2537 },
2538 };
2539
2540 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2541 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2542 &RVM_IMPLIED, &RVV_IMPLIED, NULL
2543 };
2544
2545 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2546 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2547 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2548 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2549 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2550 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2551 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2552 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2553 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2554 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2555 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2556 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2557 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
2558 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED,
2559 NULL
2560 };
2561
2562 static const Property riscv_cpu_properties[] = {
2563 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2564
2565 {.name = "pmu-mask", .info = &prop_pmu_mask},
2566 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2567
2568 {.name = "mmu", .info = &prop_mmu},
2569 {.name = "pmp", .info = &prop_pmp},
2570
2571 {.name = "priv_spec", .info = &prop_priv_spec},
2572 {.name = "vext_spec", .info = &prop_vext_spec},
2573
2574 {.name = "vlen", .info = &prop_vlen},
2575 {.name = "elen", .info = &prop_elen},
2576
2577 {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2578 {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2579 {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2580
2581 {.name = "mvendorid", .info = &prop_mvendorid},
2582 {.name = "mimpid", .info = &prop_mimpid},
2583 {.name = "marchid", .info = &prop_marchid},
2584
2585 #ifndef CONFIG_USER_ONLY
2586 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2587 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec,
2588 DEFAULT_RNMI_IRQVEC),
2589 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec,
2590 DEFAULT_RNMI_EXCPVEC),
2591 #endif
2592
2593 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2594
2595 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2596 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2597 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false),
2598
2599 /*
2600 * write_misa() is marked as experimental for now so mark
2601 * it with -x and default to 'false'.
2602 */
2603 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2604 };
2605
riscv_gdb_arch_name(CPUState * cs)2606 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2607 {
2608 RISCVCPU *cpu = RISCV_CPU(cs);
2609 CPURISCVState *env = &cpu->env;
2610
2611 switch (riscv_cpu_mxl(env)) {
2612 case MXL_RV32:
2613 return "riscv:rv32";
2614 case MXL_RV64:
2615 case MXL_RV128:
2616 return "riscv:rv64";
2617 default:
2618 g_assert_not_reached();
2619 }
2620 }
2621
2622 #ifndef CONFIG_USER_ONLY
riscv_get_arch_id(CPUState * cs)2623 static int64_t riscv_get_arch_id(CPUState *cs)
2624 {
2625 RISCVCPU *cpu = RISCV_CPU(cs);
2626
2627 return cpu->env.mhartid;
2628 }
2629
2630 #include "hw/core/sysemu-cpu-ops.h"
2631
2632 static const struct SysemuCPUOps riscv_sysemu_ops = {
2633 .has_work = riscv_cpu_has_work,
2634 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2635 .write_elf64_note = riscv_cpu_write_elf64_note,
2636 .write_elf32_note = riscv_cpu_write_elf32_note,
2637 .legacy_vmsd = &vmstate_riscv_cpu,
2638 };
2639 #endif
2640
riscv_cpu_common_class_init(ObjectClass * c,const void * data)2641 static void riscv_cpu_common_class_init(ObjectClass *c, const void *data)
2642 {
2643 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2644 CPUClass *cc = CPU_CLASS(c);
2645 DeviceClass *dc = DEVICE_CLASS(c);
2646 ResettableClass *rc = RESETTABLE_CLASS(c);
2647
2648 device_class_set_parent_realize(dc, riscv_cpu_realize,
2649 &mcc->parent_realize);
2650
2651 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2652 &mcc->parent_phases);
2653
2654 cc->class_by_name = riscv_cpu_class_by_name;
2655 cc->dump_state = riscv_cpu_dump_state;
2656 cc->set_pc = riscv_cpu_set_pc;
2657 cc->get_pc = riscv_cpu_get_pc;
2658 cc->gdb_read_register = riscv_cpu_gdb_read_register;
2659 cc->gdb_write_register = riscv_cpu_gdb_write_register;
2660 cc->gdb_stop_before_watchpoint = true;
2661 cc->disas_set_info = riscv_cpu_disas_set_info;
2662 #ifndef CONFIG_USER_ONLY
2663 cc->sysemu_ops = &riscv_sysemu_ops;
2664 cc->get_arch_id = riscv_get_arch_id;
2665 #endif
2666 cc->gdb_arch_name = riscv_gdb_arch_name;
2667 #ifdef CONFIG_TCG
2668 cc->tcg_ops = &riscv_tcg_ops;
2669 #endif /* CONFIG_TCG */
2670
2671 device_class_set_props(dc, riscv_cpu_properties);
2672 }
2673
profile_extends(RISCVCPUProfile * trial,RISCVCPUProfile * parent)2674 static bool profile_extends(RISCVCPUProfile *trial, RISCVCPUProfile *parent)
2675 {
2676 RISCVCPUProfile *curr;
2677 if (!parent) {
2678 return true;
2679 }
2680
2681 curr = trial;
2682 while (curr) {
2683 if (curr == parent) {
2684 return true;
2685 }
2686 curr = curr->u_parent;
2687 }
2688
2689 curr = trial;
2690 while (curr) {
2691 if (curr == parent) {
2692 return true;
2693 }
2694 curr = curr->s_parent;
2695 }
2696
2697 return false;
2698 }
2699
riscv_cpu_class_base_init(ObjectClass * c,const void * data)2700 static void riscv_cpu_class_base_init(ObjectClass *c, const void *data)
2701 {
2702 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2703 RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c));
2704
2705 if (pcc->def) {
2706 mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def));
2707 } else {
2708 mcc->def = g_new0(RISCVCPUDef, 1);
2709 }
2710
2711 if (data) {
2712 const RISCVCPUDef *def = data;
2713 mcc->def->bare |= def->bare;
2714 if (def->profile) {
2715 assert(profile_extends(def->profile, mcc->def->profile));
2716 assert(mcc->def->bare);
2717 mcc->def->profile = def->profile;
2718 }
2719 if (def->misa_mxl_max) {
2720 assert(def->misa_mxl_max <= MXL_RV128);
2721 mcc->def->misa_mxl_max = def->misa_mxl_max;
2722
2723 #ifndef CONFIG_USER_ONLY
2724 /*
2725 * Hack to simplify CPU class hierarchies that include both 32- and
2726 * 64-bit models: reduce SV39/48/57/64 to SV32 for 32-bit models.
2727 */
2728 if (mcc->def->misa_mxl_max == MXL_RV32 &&
2729 !valid_vm_1_10_32[mcc->def->cfg.max_satp_mode]) {
2730 mcc->def->cfg.max_satp_mode = VM_1_10_SV32;
2731 }
2732 #endif
2733 }
2734 if (def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
2735 assert(def->priv_spec <= PRIV_VERSION_LATEST);
2736 mcc->def->priv_spec = def->priv_spec;
2737 }
2738 if (def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
2739 assert(def->vext_spec != 0);
2740 mcc->def->vext_spec = def->vext_spec;
2741 }
2742 mcc->def->misa_ext |= def->misa_ext;
2743
2744 riscv_cpu_cfg_merge(&mcc->def->cfg, &def->cfg);
2745
2746 if (def->custom_csrs) {
2747 assert(!mcc->def->custom_csrs);
2748 mcc->def->custom_csrs = def->custom_csrs;
2749 }
2750 }
2751
2752 if (!object_class_is_abstract(c)) {
2753 riscv_cpu_validate_misa_mxl(mcc);
2754 }
2755 }
2756
riscv_isa_string_ext(RISCVCPU * cpu,char ** isa_str,int max_str_len)2757 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2758 int max_str_len)
2759 {
2760 const RISCVIsaExtData *edata;
2761 char *old = *isa_str;
2762 char *new = *isa_str;
2763
2764 for (edata = isa_edata_arr; edata && edata->name; edata++) {
2765 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2766 new = g_strconcat(old, "_", edata->name, NULL);
2767 g_free(old);
2768 old = new;
2769 }
2770 }
2771
2772 *isa_str = new;
2773 }
2774
riscv_isa_string(RISCVCPU * cpu)2775 char *riscv_isa_string(RISCVCPU *cpu)
2776 {
2777 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2778 int i;
2779 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2780 char *isa_str = g_new(char, maxlen);
2781 int xlen = riscv_cpu_max_xlen(mcc);
2782 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2783
2784 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2785 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2786 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2787 }
2788 }
2789 *p = '\0';
2790 if (!cpu->cfg.short_isa_string) {
2791 riscv_isa_string_ext(cpu, &isa_str, maxlen);
2792 }
2793 return isa_str;
2794 }
2795
2796 #ifndef CONFIG_USER_ONLY
riscv_isa_extensions_list(RISCVCPU * cpu,int * count)2797 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2798 {
2799 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2800 char **extensions = g_new(char *, maxlen);
2801
2802 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2803 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2804 extensions[*count] = g_new(char, 2);
2805 snprintf(extensions[*count], 2, "%c",
2806 qemu_tolower(riscv_single_letter_exts[i]));
2807 (*count)++;
2808 }
2809 }
2810
2811 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2812 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2813 extensions[*count] = g_strdup(edata->name);
2814 (*count)++;
2815 }
2816 }
2817
2818 return extensions;
2819 }
2820
riscv_isa_write_fdt(RISCVCPU * cpu,void * fdt,char * nodename)2821 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2822 {
2823 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2824 const size_t maxlen = sizeof("rv128i");
2825 g_autofree char *isa_base = g_new(char, maxlen);
2826 g_autofree char *riscv_isa;
2827 char **isa_extensions;
2828 int count = 0;
2829 int xlen = riscv_cpu_max_xlen(mcc);
2830
2831 riscv_isa = riscv_isa_string(cpu);
2832 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2833
2834 snprintf(isa_base, maxlen, "rv%di", xlen);
2835 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2836
2837 isa_extensions = riscv_isa_extensions_list(cpu, &count);
2838 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2839 isa_extensions, count);
2840
2841 for (int i = 0; i < count; i++) {
2842 g_free(isa_extensions[i]);
2843 }
2844
2845 g_free(isa_extensions);
2846 }
2847 #endif
2848
2849 #define DEFINE_ABSTRACT_RISCV_CPU(type_name, parent_type_name, ...) \
2850 { \
2851 .name = (type_name), \
2852 .parent = (parent_type_name), \
2853 .abstract = true, \
2854 .class_data = &(const RISCVCPUDef) { \
2855 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
2856 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
2857 .cfg.max_satp_mode = -1, \
2858 __VA_ARGS__ \
2859 }, \
2860 }
2861
2862 #define DEFINE_RISCV_CPU(type_name, parent_type_name, ...) \
2863 { \
2864 .name = (type_name), \
2865 .parent = (parent_type_name), \
2866 .class_data = &(const RISCVCPUDef) { \
2867 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
2868 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
2869 .cfg.max_satp_mode = -1, \
2870 __VA_ARGS__ \
2871 }, \
2872 }
2873
2874 #define DEFINE_PROFILE_CPU(type_name, parent_type_name, profile_) \
2875 DEFINE_RISCV_CPU(type_name, parent_type_name, \
2876 .profile = &(profile_))
2877
2878 static const TypeInfo riscv_cpu_type_infos[] = {
2879 {
2880 .name = TYPE_RISCV_CPU,
2881 .parent = TYPE_CPU,
2882 .instance_size = sizeof(RISCVCPU),
2883 .instance_align = __alignof(RISCVCPU),
2884 .instance_init = riscv_cpu_init,
2885 .abstract = true,
2886 .class_size = sizeof(RISCVCPUClass),
2887 .class_init = riscv_cpu_common_class_init,
2888 .class_base_init = riscv_cpu_class_base_init,
2889 },
2890
2891 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU,
2892 .cfg.mmu = true,
2893 .cfg.pmp = true,
2894 .priv_spec = PRIV_VERSION_LATEST,
2895 ),
2896
2897 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_VENDOR_CPU, TYPE_RISCV_CPU),
2898 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_BARE_CPU, TYPE_RISCV_CPU,
2899 /*
2900 * Bare CPUs do not inherit the timer and performance
2901 * counters from the parent class (see riscv_cpu_init()
2902 * for info on why the parent enables them).
2903 *
2904 * Users have to explicitly enable these counters for
2905 * bare CPUs.
2906 */
2907 .bare = true,
2908
2909 /* Set to QEMU's first supported priv version */
2910 .priv_spec = PRIV_VERSION_1_10_0,
2911
2912 /*
2913 * Support all available satp_mode settings. By default
2914 * only MBARE will be available if the user doesn't enable
2915 * a mode manually (see riscv_cpu_satp_mode_finalize()).
2916 */
2917 #ifdef TARGET_RISCV32
2918 .cfg.max_satp_mode = VM_1_10_SV32,
2919 #else
2920 .cfg.max_satp_mode = VM_1_10_SV57,
2921 #endif
2922 ),
2923
2924 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX, TYPE_RISCV_DYNAMIC_CPU,
2925 #if defined(TARGET_RISCV32)
2926 .misa_mxl_max = MXL_RV32,
2927 .cfg.max_satp_mode = VM_1_10_SV32,
2928 #elif defined(TARGET_RISCV64)
2929 .misa_mxl_max = MXL_RV64,
2930 .cfg.max_satp_mode = VM_1_10_SV57,
2931 #endif
2932 ),
2933
2934 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E, TYPE_RISCV_VENDOR_CPU,
2935 .misa_ext = RVI | RVM | RVA | RVC | RVU,
2936 .priv_spec = PRIV_VERSION_1_10_0,
2937 .cfg.max_satp_mode = VM_1_10_MBARE,
2938 .cfg.ext_zifencei = true,
2939 .cfg.ext_zicsr = true,
2940 .cfg.pmp = true
2941 ),
2942
2943 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
2944 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU,
2945 .priv_spec = PRIV_VERSION_1_10_0,
2946
2947 .cfg.max_satp_mode = VM_1_10_SV39,
2948 .cfg.ext_zifencei = true,
2949 .cfg.ext_zicsr = true,
2950 .cfg.mmu = true,
2951 .cfg.pmp = true
2952 ),
2953
2954 #if defined(TARGET_RISCV32) || \
2955 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
2956 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE32, TYPE_RISCV_DYNAMIC_CPU,
2957 .cfg.max_satp_mode = VM_1_10_SV32,
2958 .misa_mxl_max = MXL_RV32,
2959 ),
2960
2961 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_IBEX, TYPE_RISCV_VENDOR_CPU,
2962 .misa_mxl_max = MXL_RV32,
2963 .misa_ext = RVI | RVM | RVC | RVU,
2964 .priv_spec = PRIV_VERSION_1_12_0,
2965 .cfg.max_satp_mode = VM_1_10_MBARE,
2966 .cfg.ext_zifencei = true,
2967 .cfg.ext_zicsr = true,
2968 .cfg.pmp = true,
2969 .cfg.ext_smepmp = true,
2970
2971 .cfg.ext_zba = true,
2972 .cfg.ext_zbb = true,
2973 .cfg.ext_zbc = true,
2974 .cfg.ext_zbs = true
2975 ),
2976
2977 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E31, TYPE_RISCV_CPU_SIFIVE_E,
2978 .misa_mxl_max = MXL_RV32
2979 ),
2980 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E34, TYPE_RISCV_CPU_SIFIVE_E,
2981 .misa_mxl_max = MXL_RV32,
2982 .misa_ext = RVF, /* IMAFCU */
2983 ),
2984
2985 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U34, TYPE_RISCV_CPU_SIFIVE_U,
2986 .misa_mxl_max = MXL_RV32,
2987 ),
2988
2989 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32I, TYPE_RISCV_BARE_CPU,
2990 .misa_mxl_max = MXL_RV32,
2991 .misa_ext = RVI
2992 ),
2993 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32E, TYPE_RISCV_BARE_CPU,
2994 .misa_mxl_max = MXL_RV32,
2995 .misa_ext = RVE
2996 ),
2997 #endif
2998
2999 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
3000 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX32, TYPE_RISCV_DYNAMIC_CPU,
3001 .cfg.max_satp_mode = VM_1_10_SV32,
3002 .misa_mxl_max = MXL_RV32,
3003 ),
3004 #endif
3005
3006 #if defined(TARGET_RISCV64)
3007 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE64, TYPE_RISCV_DYNAMIC_CPU,
3008 .cfg.max_satp_mode = VM_1_10_SV57,
3009 .misa_mxl_max = MXL_RV64,
3010 ),
3011
3012 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E51, TYPE_RISCV_CPU_SIFIVE_E,
3013 .misa_mxl_max = MXL_RV64
3014 ),
3015
3016 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U54, TYPE_RISCV_CPU_SIFIVE_U,
3017 .misa_mxl_max = MXL_RV64,
3018 ),
3019
3020 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SHAKTI_C, TYPE_RISCV_CPU_SIFIVE_U,
3021 .misa_mxl_max = MXL_RV64,
3022 ),
3023
3024 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_THEAD_C906, TYPE_RISCV_VENDOR_CPU,
3025 .misa_mxl_max = MXL_RV64,
3026 .misa_ext = RVG | RVC | RVS | RVU,
3027 .priv_spec = PRIV_VERSION_1_11_0,
3028
3029 .cfg.ext_zfa = true,
3030 .cfg.ext_zfh = true,
3031 .cfg.mmu = true,
3032 .cfg.ext_xtheadba = true,
3033 .cfg.ext_xtheadbb = true,
3034 .cfg.ext_xtheadbs = true,
3035 .cfg.ext_xtheadcmo = true,
3036 .cfg.ext_xtheadcondmov = true,
3037 .cfg.ext_xtheadfmemidx = true,
3038 .cfg.ext_xtheadmac = true,
3039 .cfg.ext_xtheadmemidx = true,
3040 .cfg.ext_xtheadmempair = true,
3041 .cfg.ext_xtheadsync = true,
3042 .cfg.pmp = true,
3043
3044 .cfg.mvendorid = THEAD_VENDOR_ID,
3045
3046 .cfg.max_satp_mode = VM_1_10_SV39,
3047 #ifndef CONFIG_USER_ONLY
3048 .custom_csrs = th_csr_list,
3049 #endif
3050 ),
3051
3052 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_TT_ASCALON, TYPE_RISCV_VENDOR_CPU,
3053 .misa_mxl_max = MXL_RV64,
3054 .misa_ext = RVG | RVC | RVS | RVU | RVH | RVV,
3055 .priv_spec = PRIV_VERSION_1_13_0,
3056 .vext_spec = VEXT_VERSION_1_00_0,
3057
3058 /* ISA extensions */
3059 .cfg.mmu = true,
3060 .cfg.vlenb = 256 >> 3,
3061 .cfg.elen = 64,
3062 .cfg.rvv_ma_all_1s = true,
3063 .cfg.rvv_ta_all_1s = true,
3064 .cfg.misa_w = true,
3065 .cfg.pmp = true,
3066 .cfg.cbom_blocksize = 64,
3067 .cfg.cbop_blocksize = 64,
3068 .cfg.cboz_blocksize = 64,
3069 .cfg.ext_zic64b = true,
3070 .cfg.ext_zicbom = true,
3071 .cfg.ext_zicbop = true,
3072 .cfg.ext_zicboz = true,
3073 .cfg.ext_zicntr = true,
3074 .cfg.ext_zicond = true,
3075 .cfg.ext_zicsr = true,
3076 .cfg.ext_zifencei = true,
3077 .cfg.ext_zihintntl = true,
3078 .cfg.ext_zihintpause = true,
3079 .cfg.ext_zihpm = true,
3080 .cfg.ext_zimop = true,
3081 .cfg.ext_zawrs = true,
3082 .cfg.ext_zfa = true,
3083 .cfg.ext_zfbfmin = true,
3084 .cfg.ext_zfh = true,
3085 .cfg.ext_zfhmin = true,
3086 .cfg.ext_zcb = true,
3087 .cfg.ext_zcmop = true,
3088 .cfg.ext_zba = true,
3089 .cfg.ext_zbb = true,
3090 .cfg.ext_zbs = true,
3091 .cfg.ext_zkt = true,
3092 .cfg.ext_zvbb = true,
3093 .cfg.ext_zvbc = true,
3094 .cfg.ext_zvfbfmin = true,
3095 .cfg.ext_zvfbfwma = true,
3096 .cfg.ext_zvfh = true,
3097 .cfg.ext_zvfhmin = true,
3098 .cfg.ext_zvkng = true,
3099 .cfg.ext_smaia = true,
3100 .cfg.ext_smstateen = true,
3101 .cfg.ext_ssaia = true,
3102 .cfg.ext_sscofpmf = true,
3103 .cfg.ext_sstc = true,
3104 .cfg.ext_svade = true,
3105 .cfg.ext_svinval = true,
3106 .cfg.ext_svnapot = true,
3107 .cfg.ext_svpbmt = true,
3108
3109 .cfg.max_satp_mode = VM_1_10_SV57,
3110 ),
3111
3112 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_VEYRON_V1, TYPE_RISCV_VENDOR_CPU,
3113 .misa_mxl_max = MXL_RV64,
3114 .misa_ext = RVG | RVC | RVS | RVU | RVH,
3115 .priv_spec = PRIV_VERSION_1_12_0,
3116
3117 /* ISA extensions */
3118 .cfg.mmu = true,
3119 .cfg.ext_zifencei = true,
3120 .cfg.ext_zicsr = true,
3121 .cfg.pmp = true,
3122 .cfg.ext_zicbom = true,
3123 .cfg.cbom_blocksize = 64,
3124 .cfg.cboz_blocksize = 64,
3125 .cfg.ext_zicboz = true,
3126 .cfg.ext_smaia = true,
3127 .cfg.ext_ssaia = true,
3128 .cfg.ext_sscofpmf = true,
3129 .cfg.ext_sstc = true,
3130 .cfg.ext_svinval = true,
3131 .cfg.ext_svnapot = true,
3132 .cfg.ext_svpbmt = true,
3133 .cfg.ext_smstateen = true,
3134 .cfg.ext_zba = true,
3135 .cfg.ext_zbb = true,
3136 .cfg.ext_zbc = true,
3137 .cfg.ext_zbs = true,
3138 .cfg.ext_XVentanaCondOps = true,
3139
3140 .cfg.mvendorid = VEYRON_V1_MVENDORID,
3141 .cfg.marchid = VEYRON_V1_MARCHID,
3142 .cfg.mimpid = VEYRON_V1_MIMPID,
3143
3144 .cfg.max_satp_mode = VM_1_10_SV48,
3145 ),
3146
3147 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, TYPE_RISCV_VENDOR_CPU,
3148 .misa_mxl_max = MXL_RV64,
3149 .misa_ext = RVG | RVC | RVB | RVS | RVU,
3150 .priv_spec = PRIV_VERSION_1_12_0,
3151
3152 /* ISA extensions */
3153 .cfg.ext_zbc = true,
3154 .cfg.ext_zbkb = true,
3155 .cfg.ext_zbkc = true,
3156 .cfg.ext_zbkx = true,
3157 .cfg.ext_zknd = true,
3158 .cfg.ext_zkne = true,
3159 .cfg.ext_zknh = true,
3160 .cfg.ext_zksed = true,
3161 .cfg.ext_zksh = true,
3162 .cfg.ext_svinval = true,
3163
3164 .cfg.mmu = true,
3165 .cfg.pmp = true,
3166
3167 .cfg.max_satp_mode = VM_1_10_SV39,
3168 ),
3169
3170 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
3171 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE128, TYPE_RISCV_DYNAMIC_CPU,
3172 .cfg.max_satp_mode = VM_1_10_SV57,
3173 .misa_mxl_max = MXL_RV128,
3174 ),
3175 #endif /* CONFIG_TCG */
3176 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64I, TYPE_RISCV_BARE_CPU,
3177 .misa_mxl_max = MXL_RV64,
3178 .misa_ext = RVI
3179 ),
3180 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64E, TYPE_RISCV_BARE_CPU,
3181 .misa_mxl_max = MXL_RV64,
3182 .misa_ext = RVE
3183 ),
3184
3185 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, TYPE_RISCV_CPU_RV64I, RVA22U64),
3186 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, TYPE_RISCV_CPU_RV64I, RVA22S64),
3187 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, TYPE_RISCV_CPU_RV64I, RVA23U64),
3188 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, TYPE_RISCV_CPU_RV64I, RVA23S64),
3189 #endif /* TARGET_RISCV64 */
3190 };
3191
3192 DEFINE_TYPES(riscv_cpu_type_infos)
3193