1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "qapi/error.h"
28 #include "qapi/visitor.h"
29 #include "qemu/error-report.h"
30 #include "hw/qdev-properties.h"
31 #include "hw/core/qdev-prop-internal.h"
32 #include "migration/vmstate.h"
33 #include "fpu/softfloat-helpers.h"
34 #include "system/device_tree.h"
35 #include "system/kvm.h"
36 #include "system/tcg.h"
37 #include "kvm/kvm_riscv.h"
38 #include "tcg/tcg-cpu.h"
39 #include "tcg/tcg.h"
40
41 /* RISC-V CPU definitions */
42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
44 RVC, RVS, RVU, RVH, RVG, RVB, 0};
45
46 /*
47 * From vector_helper.c
48 * Note that vector data is stored in host-endian 64-bit chunks,
49 * so addressing bytes needs a host-endian fixup.
50 */
51 #if HOST_BIG_ENDIAN
52 #define BYTE(x) ((x) ^ 7)
53 #else
54 #define BYTE(x) (x)
55 #endif
56
riscv_cpu_is_32bit(RISCVCPU * cpu)57 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
58 {
59 return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
60 }
61
62 /* Hash that stores general user set numeric options */
63 static GHashTable *general_user_opts;
64
cpu_option_add_user_setting(const char * optname,uint32_t value)65 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
66 {
67 g_hash_table_insert(general_user_opts, (gpointer)optname,
68 GUINT_TO_POINTER(value));
69 }
70
riscv_cpu_option_set(const char * optname)71 bool riscv_cpu_option_set(const char *optname)
72 {
73 return g_hash_table_contains(general_user_opts, optname);
74 }
75
riscv_cpu_cfg_merge(RISCVCPUConfig * dest,const RISCVCPUConfig * src)76 static void riscv_cpu_cfg_merge(RISCVCPUConfig *dest, const RISCVCPUConfig *src)
77 {
78 #define BOOL_FIELD(x) dest->x |= src->x;
79 #define TYPED_FIELD(type, x, default_) if (src->x != default_) dest->x = src->x;
80 #include "cpu_cfg_fields.h.inc"
81 }
82
83 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
84 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
85
86 /*
87 * Here are the ordering rules of extension naming defined by RISC-V
88 * specification :
89 * 1. All extensions should be separated from other multi-letter extensions
90 * by an underscore.
91 * 2. The first letter following the 'Z' conventionally indicates the most
92 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
93 * If multiple 'Z' extensions are named, they should be ordered first
94 * by category, then alphabetically within a category.
95 * 3. Standard supervisor-level extensions (starts with 'S') should be
96 * listed after standard unprivileged extensions. If multiple
97 * supervisor-level extensions are listed, they should be ordered
98 * alphabetically.
99 * 4. Non-standard extensions (starts with 'X') must be listed after all
100 * standard extensions. They must be separated from other multi-letter
101 * extensions by an underscore.
102 *
103 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
104 * instead.
105 */
106 const RISCVIsaExtData isa_edata_arr[] = {
107 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
108 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
109 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
110 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
111 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
112 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
113 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
114 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse),
115 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
116 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
117 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
118 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
119 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
120 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
121 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
122 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
123 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
124 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
125 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
126 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12),
127 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
128 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
129 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
130 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
131 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
132 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
133 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
134 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
135 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
136 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
137 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
138 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
139 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
140 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
141 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
142 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
143 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
144 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
145 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
146 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
147 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
148 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
149 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
150 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
151 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
152 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
153 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
154 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
155 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
156 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
157 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
158 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
159 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
160 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
161 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
162 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
163 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
164 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
165 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
166 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
167 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
168 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
169 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
170 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
171 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
172 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
173 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
174 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
175 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
176 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
177 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
178 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
179 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
180 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
181 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
182 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
183 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
184 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
185 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
186 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
187 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
188 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
189 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
190 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
191 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
192 ISA_EXT_DATA_ENTRY(sdtrig, PRIV_VERSION_1_12_0, debug),
193 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
194 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha),
195 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
196 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
197 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
198 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
199 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
200 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
201 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg),
202 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
203 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind),
204 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp),
205 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
206 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi),
207 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
208 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm),
209 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
210 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
211 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg),
212 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
213 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
214 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
215 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind),
216 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp),
217 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
218 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm),
219 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
220 ISA_EXT_DATA_ENTRY(ssstrict, PRIV_VERSION_1_12_0, has_priv_1_12),
221 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
222 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
223 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
224 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12),
225 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
226 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
227 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr),
228 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr),
229 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
230 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
231 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
232 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
233 ISA_EXT_DATA_ENTRY(svrsw60t59b, PRIV_VERSION_1_13_0, ext_svrsw60t59b),
234 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte),
235 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc),
236 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
237 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
238 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
239 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
240 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
241 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
242 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
243 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
244 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
245 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
246 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
247 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
248
249 { },
250 };
251
isa_ext_is_enabled(RISCVCPU * cpu,uint32_t ext_offset)252 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
253 {
254 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
255
256 return *ext_enabled;
257 }
258
isa_ext_update_enabled(RISCVCPU * cpu,uint32_t ext_offset,bool en)259 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
260 {
261 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
262
263 *ext_enabled = en;
264 }
265
riscv_cpu_is_vendor(Object * cpu_obj)266 bool riscv_cpu_is_vendor(Object *cpu_obj)
267 {
268 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
269 }
270
271 const char * const riscv_int_regnames[] = {
272 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
273 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
274 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
275 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
276 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
277 };
278
279 const char * const riscv_int_regnamesh[] = {
280 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
281 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
282 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
283 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
284 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
285 "x30h/t5h", "x31h/t6h"
286 };
287
288 const char * const riscv_fpr_regnames[] = {
289 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
290 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
291 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
292 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
293 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
294 "f30/ft10", "f31/ft11"
295 };
296
297 const char * const riscv_rvv_regnames[] = {
298 "v0", "v1", "v2", "v3", "v4", "v5", "v6",
299 "v7", "v8", "v9", "v10", "v11", "v12", "v13",
300 "v14", "v15", "v16", "v17", "v18", "v19", "v20",
301 "v21", "v22", "v23", "v24", "v25", "v26", "v27",
302 "v28", "v29", "v30", "v31"
303 };
304
305 static const char * const riscv_excp_names[] = {
306 "misaligned_fetch",
307 "fault_fetch",
308 "illegal_instruction",
309 "breakpoint",
310 "misaligned_load",
311 "fault_load",
312 "misaligned_store",
313 "fault_store",
314 "user_ecall",
315 "supervisor_ecall",
316 "hypervisor_ecall",
317 "machine_ecall",
318 "exec_page_fault",
319 "load_page_fault",
320 "reserved",
321 "store_page_fault",
322 "double_trap",
323 "reserved",
324 "reserved",
325 "reserved",
326 "guest_exec_page_fault",
327 "guest_load_page_fault",
328 "reserved",
329 "guest_store_page_fault",
330 };
331
332 static const char * const riscv_intr_names[] = {
333 "u_software",
334 "s_software",
335 "vs_software",
336 "m_software",
337 "u_timer",
338 "s_timer",
339 "vs_timer",
340 "m_timer",
341 "u_external",
342 "s_external",
343 "vs_external",
344 "m_external",
345 "reserved",
346 "reserved",
347 "reserved",
348 "reserved"
349 };
350
riscv_cpu_get_trap_name(target_ulong cause,bool async)351 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
352 {
353 if (async) {
354 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
355 riscv_intr_names[cause] : "(unknown)";
356 } else {
357 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
358 riscv_excp_names[cause] : "(unknown)";
359 }
360 }
361
riscv_cpu_set_misa_ext(CPURISCVState * env,uint32_t ext)362 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
363 {
364 env->misa_ext_mask = env->misa_ext = ext;
365 }
366
riscv_cpu_max_xlen(RISCVCPUClass * mcc)367 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
368 {
369 return 16 << mcc->def->misa_mxl_max;
370 }
371
372 #ifndef CONFIG_USER_ONLY
satp_mode_from_str(const char * satp_mode_str)373 static uint8_t satp_mode_from_str(const char *satp_mode_str)
374 {
375 if (!strncmp(satp_mode_str, "mbare", 5)) {
376 return VM_1_10_MBARE;
377 }
378
379 if (!strncmp(satp_mode_str, "sv32", 4)) {
380 return VM_1_10_SV32;
381 }
382
383 if (!strncmp(satp_mode_str, "sv39", 4)) {
384 return VM_1_10_SV39;
385 }
386
387 if (!strncmp(satp_mode_str, "sv48", 4)) {
388 return VM_1_10_SV48;
389 }
390
391 if (!strncmp(satp_mode_str, "sv57", 4)) {
392 return VM_1_10_SV57;
393 }
394
395 if (!strncmp(satp_mode_str, "sv64", 4)) {
396 return VM_1_10_SV64;
397 }
398
399 g_assert_not_reached();
400 }
401
satp_mode_max_from_map(uint32_t map)402 static uint8_t satp_mode_max_from_map(uint32_t map)
403 {
404 /*
405 * 'map = 0' will make us return (31 - 32), which C will
406 * happily overflow to UINT_MAX. There's no good result to
407 * return if 'map = 0' (e.g. returning 0 will be ambiguous
408 * with the result for 'map = 1').
409 *
410 * Assert out if map = 0. Callers will have to deal with
411 * it outside of this function.
412 */
413 g_assert(map > 0);
414
415 /* map here has at least one bit set, so no problem with clz */
416 return 31 - __builtin_clz(map);
417 }
418
satp_mode_str(uint8_t satp_mode,bool is_32_bit)419 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
420 {
421 if (is_32_bit) {
422 switch (satp_mode) {
423 case VM_1_10_SV32:
424 return "sv32";
425 case VM_1_10_MBARE:
426 return "none";
427 }
428 } else {
429 switch (satp_mode) {
430 case VM_1_10_SV64:
431 return "sv64";
432 case VM_1_10_SV57:
433 return "sv57";
434 case VM_1_10_SV48:
435 return "sv48";
436 case VM_1_10_SV39:
437 return "sv39";
438 case VM_1_10_MBARE:
439 return "none";
440 }
441 }
442
443 g_assert_not_reached();
444 }
445
get_satp_mode_supported(RISCVCPU * cpu,uint16_t * supported)446 static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported)
447 {
448 bool rv32 = riscv_cpu_is_32bit(cpu);
449 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
450 int satp_mode = cpu->cfg.max_satp_mode;
451
452 if (satp_mode == -1) {
453 return false;
454 }
455
456 *supported = 0;
457 for (int i = 0; i <= satp_mode; ++i) {
458 if (valid_vm[i]) {
459 *supported |= (1 << i);
460 }
461 }
462 return true;
463 }
464
465 /* Set the satp mode to the max supported */
set_satp_mode_default_map(RISCVCPU * cpu)466 static void set_satp_mode_default_map(RISCVCPU *cpu)
467 {
468 /*
469 * Bare CPUs do not default to the max available.
470 * Users must set a valid satp_mode in the command
471 * line. Otherwise, leave the existing max_satp_mode
472 * in place.
473 */
474 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
475 warn_report("No satp mode set. Defaulting to 'bare'");
476 cpu->cfg.max_satp_mode = VM_1_10_MBARE;
477 }
478 }
479 #endif
480
481 #ifndef CONFIG_USER_ONLY
riscv_register_custom_csrs(RISCVCPU * cpu,const RISCVCSR * csr_list)482 static void riscv_register_custom_csrs(RISCVCPU *cpu, const RISCVCSR *csr_list)
483 {
484 for (size_t i = 0; csr_list[i].csr_ops.name; i++) {
485 int csrno = csr_list[i].csrno;
486 const riscv_csr_operations *csr_ops = &csr_list[i].csr_ops;
487 if (!csr_list[i].insertion_test || csr_list[i].insertion_test(cpu)) {
488 riscv_set_csr_ops(csrno, csr_ops);
489 }
490 }
491 }
492 #endif
493
riscv_cpu_class_by_name(const char * cpu_model)494 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
495 {
496 ObjectClass *oc;
497 char *typename;
498 char **cpuname;
499
500 cpuname = g_strsplit(cpu_model, ",", 1);
501 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
502 oc = object_class_by_name(typename);
503 g_strfreev(cpuname);
504 g_free(typename);
505
506 return oc;
507 }
508
riscv_cpu_get_name(RISCVCPU * cpu)509 char *riscv_cpu_get_name(RISCVCPU *cpu)
510 {
511 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
512 const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
513
514 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
515
516 return cpu_model_from_type(typename);
517 }
518
riscv_cpu_dump_state(CPUState * cs,FILE * f,int flags)519 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
520 {
521 RISCVCPU *cpu = RISCV_CPU(cs);
522 CPURISCVState *env = &cpu->env;
523 int i, j;
524 uint8_t *p;
525
526 #if !defined(CONFIG_USER_ONLY)
527 if (riscv_has_ext(env, RVH)) {
528 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled);
529 }
530 #endif
531 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
532 #ifndef CONFIG_USER_ONLY
533 {
534 static const int dump_csrs[] = {
535 CSR_MHARTID,
536 CSR_MSTATUS,
537 CSR_MSTATUSH,
538 /*
539 * CSR_SSTATUS is intentionally omitted here as its value
540 * can be figured out by looking at CSR_MSTATUS
541 */
542 CSR_HSTATUS,
543 CSR_VSSTATUS,
544 CSR_MIP,
545 CSR_MIE,
546 CSR_MIDELEG,
547 CSR_HIDELEG,
548 CSR_MEDELEG,
549 CSR_HEDELEG,
550 CSR_MTVEC,
551 CSR_STVEC,
552 CSR_VSTVEC,
553 CSR_MEPC,
554 CSR_SEPC,
555 CSR_VSEPC,
556 CSR_MCAUSE,
557 CSR_SCAUSE,
558 CSR_VSCAUSE,
559 CSR_MTVAL,
560 CSR_STVAL,
561 CSR_HTVAL,
562 CSR_MTVAL2,
563 CSR_MSCRATCH,
564 CSR_SSCRATCH,
565 CSR_SATP,
566 };
567
568 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
569 int csrno = dump_csrs[i];
570 target_ulong val = 0;
571 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
572
573 /*
574 * Rely on the smode, hmode, etc, predicates within csr.c
575 * to do the filtering of the registers that are present.
576 */
577 if (res == RISCV_EXCP_NONE) {
578 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
579 csr_ops[csrno].name, val);
580 }
581 }
582 }
583 #endif
584
585 for (i = 0; i < 32; i++) {
586 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
587 riscv_int_regnames[i], env->gpr[i]);
588 if ((i & 3) == 3) {
589 qemu_fprintf(f, "\n");
590 }
591 }
592 if (flags & CPU_DUMP_FPU) {
593 target_ulong val = 0;
594 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0);
595 if (res == RISCV_EXCP_NONE) {
596 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
597 csr_ops[CSR_FCSR].name, val);
598 }
599 for (i = 0; i < 32; i++) {
600 qemu_fprintf(f, " %-8s %016" PRIx64,
601 riscv_fpr_regnames[i], env->fpr[i]);
602 if ((i & 3) == 3) {
603 qemu_fprintf(f, "\n");
604 }
605 }
606 }
607 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
608 static const int dump_rvv_csrs[] = {
609 CSR_VSTART,
610 CSR_VXSAT,
611 CSR_VXRM,
612 CSR_VCSR,
613 CSR_VL,
614 CSR_VTYPE,
615 CSR_VLENB,
616 };
617 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
618 int csrno = dump_rvv_csrs[i];
619 target_ulong val = 0;
620 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
621
622 /*
623 * Rely on the smode, hmode, etc, predicates within csr.c
624 * to do the filtering of the registers that are present.
625 */
626 if (res == RISCV_EXCP_NONE) {
627 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
628 csr_ops[csrno].name, val);
629 }
630 }
631 uint16_t vlenb = cpu->cfg.vlenb;
632
633 for (i = 0; i < 32; i++) {
634 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
635 p = (uint8_t *)env->vreg;
636 for (j = vlenb - 1 ; j >= 0; j--) {
637 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
638 }
639 qemu_fprintf(f, "\n");
640 }
641 }
642 }
643
riscv_cpu_set_pc(CPUState * cs,vaddr value)644 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
645 {
646 RISCVCPU *cpu = RISCV_CPU(cs);
647 CPURISCVState *env = &cpu->env;
648
649 if (env->xl == MXL_RV32) {
650 env->pc = (int32_t)value;
651 } else {
652 env->pc = value;
653 }
654 }
655
riscv_cpu_get_pc(CPUState * cs)656 static vaddr riscv_cpu_get_pc(CPUState *cs)
657 {
658 RISCVCPU *cpu = RISCV_CPU(cs);
659 CPURISCVState *env = &cpu->env;
660
661 /* Match cpu_get_tb_cpu_state. */
662 if (env->xl == MXL_RV32) {
663 return env->pc & UINT32_MAX;
664 }
665 return env->pc;
666 }
667
668 #ifndef CONFIG_USER_ONLY
riscv_cpu_has_work(CPUState * cs)669 bool riscv_cpu_has_work(CPUState *cs)
670 {
671 RISCVCPU *cpu = RISCV_CPU(cs);
672 CPURISCVState *env = &cpu->env;
673 /*
674 * Definition of the WFI instruction requires it to ignore the privilege
675 * mode and delegation registers, but respect individual enables
676 */
677 return riscv_cpu_all_pending(env) != 0 ||
678 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
679 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
680 }
681 #endif /* !CONFIG_USER_ONLY */
682
riscv_cpu_reset_hold(Object * obj,ResetType type)683 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
684 {
685 #ifndef CONFIG_USER_ONLY
686 uint8_t iprio;
687 int i, irq, rdzero;
688 #endif
689 CPUState *cs = CPU(obj);
690 RISCVCPU *cpu = RISCV_CPU(cs);
691 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
692 CPURISCVState *env = &cpu->env;
693
694 if (mcc->parent_phases.hold) {
695 mcc->parent_phases.hold(obj, type);
696 }
697 #ifndef CONFIG_USER_ONLY
698 env->misa_mxl = mcc->def->misa_mxl_max;
699 env->priv = PRV_M;
700 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
701 if (env->misa_mxl > MXL_RV32) {
702 /*
703 * The reset status of SXL/UXL is undefined, but mstatus is WARL
704 * and we must ensure that the value after init is valid for read.
705 */
706 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
707 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
708 if (riscv_has_ext(env, RVH)) {
709 env->vsstatus = set_field(env->vsstatus,
710 MSTATUS64_SXL, env->misa_mxl);
711 env->vsstatus = set_field(env->vsstatus,
712 MSTATUS64_UXL, env->misa_mxl);
713 env->mstatus_hs = set_field(env->mstatus_hs,
714 MSTATUS64_SXL, env->misa_mxl);
715 env->mstatus_hs = set_field(env->mstatus_hs,
716 MSTATUS64_UXL, env->misa_mxl);
717 }
718 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
719 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1);
720 }
721 }
722 env->mcause = 0;
723 env->miclaim = MIP_SGEIP;
724 env->pc = env->resetvec;
725 env->bins = 0;
726 env->two_stage_lookup = false;
727
728 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
729 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
730 MENVCFG_ADUE : 0);
731 env->henvcfg = 0;
732
733 /* Initialized default priorities of local interrupts. */
734 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
735 iprio = riscv_cpu_default_priority(i);
736 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
737 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
738 env->hviprio[i] = 0;
739 }
740 i = 0;
741 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
742 if (!rdzero) {
743 env->hviprio[irq] = env->miprio[irq];
744 }
745 i++;
746 }
747
748 /*
749 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
750 * extension is enabled.
751 */
752 if (riscv_has_ext(env, RVH)) {
753 env->mideleg |= HS_MODE_INTERRUPTS;
754 }
755
756 /*
757 * Clear mseccfg and unlock all the PMP entries upon reset.
758 * This is allowed as per the priv and smepmp specifications
759 * and is needed to clear stale entries across reboots.
760 */
761 if (riscv_cpu_cfg(env)->ext_smepmp) {
762 env->mseccfg = 0;
763 }
764
765 pmp_unlock_entries(env);
766 #else
767 env->priv = PRV_U;
768 env->senvcfg = 0;
769 env->menvcfg = 0;
770 #endif
771
772 /* on reset elp is clear */
773 env->elp = false;
774 /* on reset ssp is set to 0 */
775 env->ssp = 0;
776
777 env->xl = riscv_cpu_mxl(env);
778 cs->exception_index = RISCV_EXCP_NONE;
779 env->load_res = -1;
780 set_default_nan_mode(1, &env->fp_status);
781 /* Default NaN value: sign bit clear, frac msb set */
782 set_float_default_nan_pattern(0b01000000, &env->fp_status);
783 env->vill = true;
784
785 #ifndef CONFIG_USER_ONLY
786 if (cpu->cfg.debug) {
787 riscv_trigger_reset_hold(env);
788 }
789
790 if (cpu->cfg.ext_smrnmi) {
791 env->rnmip = 0;
792 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
793 }
794
795 if (kvm_enabled()) {
796 kvm_riscv_reset_vcpu(cpu);
797 }
798 #endif
799 }
800
riscv_cpu_disas_set_info(CPUState * s,disassemble_info * info)801 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
802 {
803 RISCVCPU *cpu = RISCV_CPU(s);
804 CPURISCVState *env = &cpu->env;
805 info->target_info = &cpu->cfg;
806
807 /*
808 * A couple of bits in MSTATUS set the endianness:
809 * - MSTATUS_UBE (User-mode),
810 * - MSTATUS_SBE (Supervisor-mode),
811 * - MSTATUS_MBE (Machine-mode)
812 * but we don't implement that yet.
813 */
814 info->endian = BFD_ENDIAN_LITTLE;
815
816 switch (env->xl) {
817 case MXL_RV32:
818 info->print_insn = print_insn_riscv32;
819 break;
820 case MXL_RV64:
821 info->print_insn = print_insn_riscv64;
822 break;
823 case MXL_RV128:
824 info->print_insn = print_insn_riscv128;
825 break;
826 default:
827 g_assert_not_reached();
828 }
829 }
830
831 #ifndef CONFIG_USER_ONLY
riscv_cpu_satp_mode_finalize(RISCVCPU * cpu,Error ** errp)832 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
833 {
834 bool rv32 = riscv_cpu_is_32bit(cpu);
835 uint16_t supported;
836 uint8_t satp_mode_map_max;
837
838 if (!get_satp_mode_supported(cpu, &supported)) {
839 /* The CPU wants the hypervisor to decide which satp mode to allow */
840 return;
841 }
842
843 if (cpu->satp_modes.map == 0) {
844 if (cpu->satp_modes.init == 0) {
845 /* If unset by the user, we fallback to the default satp mode. */
846 set_satp_mode_default_map(cpu);
847 } else {
848 /*
849 * Find the lowest level that was disabled and then enable the
850 * first valid level below which can be found in
851 * valid_vm_1_10_32/64.
852 */
853 for (int i = 1; i < 16; ++i) {
854 if ((cpu->satp_modes.init & (1 << i)) &&
855 supported & (1 << i)) {
856 for (int j = i - 1; j >= 0; --j) {
857 if (supported & (1 << j)) {
858 cpu->cfg.max_satp_mode = j;
859 return;
860 }
861 }
862 }
863 }
864 }
865 return;
866 }
867
868 satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map);
869
870 /* Make sure the user asked for a supported configuration (HW and qemu) */
871 if (satp_mode_map_max > cpu->cfg.max_satp_mode) {
872 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
873 satp_mode_str(satp_mode_map_max, rv32),
874 satp_mode_str(cpu->cfg.max_satp_mode, rv32));
875 return;
876 }
877
878 /*
879 * Make sure the user did not ask for an invalid configuration as per
880 * the specification.
881 */
882 if (!rv32) {
883 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
884 if (!(cpu->satp_modes.map & (1 << i)) &&
885 (cpu->satp_modes.init & (1 << i)) &&
886 (supported & (1 << i))) {
887 error_setg(errp, "cannot disable %s satp mode if %s "
888 "is enabled", satp_mode_str(i, false),
889 satp_mode_str(satp_mode_map_max, false));
890 return;
891 }
892 }
893 }
894
895 cpu->cfg.max_satp_mode = satp_mode_map_max;
896 }
897 #endif
898
riscv_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)899 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
900 {
901 Error *local_err = NULL;
902
903 #ifndef CONFIG_USER_ONLY
904 riscv_cpu_satp_mode_finalize(cpu, &local_err);
905 if (local_err != NULL) {
906 error_propagate(errp, local_err);
907 return;
908 }
909 #endif
910
911 if (tcg_enabled()) {
912 riscv_tcg_cpu_finalize_features(cpu, &local_err);
913 if (local_err != NULL) {
914 error_propagate(errp, local_err);
915 return;
916 }
917 riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
918 } else if (kvm_enabled()) {
919 riscv_kvm_cpu_finalize_features(cpu, &local_err);
920 if (local_err != NULL) {
921 error_propagate(errp, local_err);
922 return;
923 }
924 }
925 }
926
riscv_cpu_realize(DeviceState * dev,Error ** errp)927 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
928 {
929 CPUState *cs = CPU(dev);
930 RISCVCPU *cpu = RISCV_CPU(dev);
931 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
932 Error *local_err = NULL;
933
934 cpu_exec_realizefn(cs, &local_err);
935 if (local_err != NULL) {
936 error_propagate(errp, local_err);
937 return;
938 }
939
940 riscv_cpu_finalize_features(cpu, &local_err);
941 if (local_err != NULL) {
942 error_propagate(errp, local_err);
943 return;
944 }
945
946 riscv_cpu_register_gdb_regs_for_features(cs);
947
948 #ifndef CONFIG_USER_ONLY
949 if (cpu->cfg.debug) {
950 riscv_trigger_realize(&cpu->env);
951 }
952 #endif
953
954 qemu_init_vcpu(cs);
955 cpu_reset(cs);
956
957 mcc->parent_realize(dev, errp);
958 }
959
riscv_cpu_accelerator_compatible(RISCVCPU * cpu)960 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
961 {
962 if (tcg_enabled()) {
963 return riscv_cpu_tcg_compatible(cpu);
964 }
965
966 return true;
967 }
968
969 #ifndef CONFIG_USER_ONLY
cpu_riscv_get_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)970 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
971 void *opaque, Error **errp)
972 {
973 RISCVSATPModes *satp_modes = opaque;
974 uint8_t satp = satp_mode_from_str(name);
975 bool value;
976
977 value = satp_modes->map & (1 << satp);
978
979 visit_type_bool(v, name, &value, errp);
980 }
981
cpu_riscv_set_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)982 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
983 void *opaque, Error **errp)
984 {
985 RISCVSATPModes *satp_modes = opaque;
986 uint8_t satp = satp_mode_from_str(name);
987 bool value;
988
989 if (!visit_type_bool(v, name, &value, errp)) {
990 return;
991 }
992
993 satp_modes->map = deposit32(satp_modes->map, satp, 1, value);
994 satp_modes->init |= 1 << satp;
995 }
996
riscv_add_satp_mode_properties(Object * obj)997 void riscv_add_satp_mode_properties(Object *obj)
998 {
999 RISCVCPU *cpu = RISCV_CPU(obj);
1000
1001 if (cpu->env.misa_mxl == MXL_RV32) {
1002 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1003 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1004 } else {
1005 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1006 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1007 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1008 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1009 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1010 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1011 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1012 cpu_riscv_set_satp, NULL, &cpu->satp_modes);
1013 }
1014 }
1015
riscv_cpu_set_irq(void * opaque,int irq,int level)1016 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1017 {
1018 RISCVCPU *cpu = RISCV_CPU(opaque);
1019 CPURISCVState *env = &cpu->env;
1020
1021 if (irq < IRQ_LOCAL_MAX) {
1022 switch (irq) {
1023 case IRQ_U_SOFT:
1024 case IRQ_S_SOFT:
1025 case IRQ_VS_SOFT:
1026 case IRQ_M_SOFT:
1027 case IRQ_U_TIMER:
1028 case IRQ_S_TIMER:
1029 case IRQ_VS_TIMER:
1030 case IRQ_M_TIMER:
1031 case IRQ_U_EXT:
1032 case IRQ_VS_EXT:
1033 case IRQ_M_EXT:
1034 if (kvm_enabled()) {
1035 kvm_riscv_set_irq(cpu, irq, level);
1036 } else {
1037 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1038 }
1039 break;
1040 case IRQ_S_EXT:
1041 if (kvm_enabled()) {
1042 kvm_riscv_set_irq(cpu, irq, level);
1043 } else {
1044 env->external_seip = level;
1045 riscv_cpu_update_mip(env, 1 << irq,
1046 BOOL_TO_MASK(level | env->software_seip));
1047 }
1048 break;
1049 default:
1050 g_assert_not_reached();
1051 }
1052 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1053 /* Require H-extension for handling guest local interrupts */
1054 if (!riscv_has_ext(env, RVH)) {
1055 g_assert_not_reached();
1056 }
1057
1058 /* Compute bit position in HGEIP CSR */
1059 irq = irq - IRQ_LOCAL_MAX + 1;
1060 if (env->geilen < irq) {
1061 g_assert_not_reached();
1062 }
1063
1064 /* Update HGEIP CSR */
1065 env->hgeip &= ~((target_ulong)1 << irq);
1066 if (level) {
1067 env->hgeip |= (target_ulong)1 << irq;
1068 }
1069
1070 /* Update mip.SGEIP bit */
1071 riscv_cpu_update_mip(env, MIP_SGEIP,
1072 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1073 } else {
1074 g_assert_not_reached();
1075 }
1076 }
1077
riscv_cpu_set_nmi(void * opaque,int irq,int level)1078 static void riscv_cpu_set_nmi(void *opaque, int irq, int level)
1079 {
1080 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level);
1081 }
1082 #endif /* CONFIG_USER_ONLY */
1083
riscv_cpu_is_dynamic(Object * cpu_obj)1084 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1085 {
1086 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1087 }
1088
riscv_cpu_init(Object * obj)1089 static void riscv_cpu_init(Object *obj)
1090 {
1091 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1092 RISCVCPU *cpu = RISCV_CPU(obj);
1093 CPURISCVState *env = &cpu->env;
1094
1095 env->misa_mxl = mcc->def->misa_mxl_max;
1096
1097 #ifndef CONFIG_USER_ONLY
1098 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1099 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1100 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi,
1101 "riscv.cpu.rnmi", RNMI_MAX);
1102 #endif /* CONFIG_USER_ONLY */
1103
1104 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1105
1106 /*
1107 * The timer and performance counters extensions were supported
1108 * in QEMU before they were added as discrete extensions in the
1109 * ISA. To keep compatibility we'll always default them to 'true'
1110 * for all CPUs. Each accelerator will decide what to do when
1111 * users disable them.
1112 */
1113 RISCV_CPU(obj)->cfg.ext_zicntr = !mcc->def->bare;
1114 RISCV_CPU(obj)->cfg.ext_zihpm = !mcc->def->bare;
1115
1116 /* Default values for non-bool cpu properties */
1117 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1118 cpu->cfg.vlenb = 128 >> 3;
1119 cpu->cfg.elen = 64;
1120 cpu->cfg.cbom_blocksize = 64;
1121 cpu->cfg.cbop_blocksize = 64;
1122 cpu->cfg.cboz_blocksize = 64;
1123 cpu->cfg.pmp_regions = 16;
1124 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1125 cpu->cfg.max_satp_mode = -1;
1126
1127 if (mcc->def->profile) {
1128 mcc->def->profile->enabled = true;
1129 }
1130
1131 env->misa_ext_mask = env->misa_ext = mcc->def->misa_ext;
1132 riscv_cpu_cfg_merge(&cpu->cfg, &mcc->def->cfg);
1133
1134 if (mcc->def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
1135 cpu->env.priv_ver = mcc->def->priv_spec;
1136 }
1137 if (mcc->def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
1138 cpu->env.vext_ver = mcc->def->vext_spec;
1139 }
1140 #ifndef CONFIG_USER_ONLY
1141 if (mcc->def->custom_csrs) {
1142 riscv_register_custom_csrs(cpu, mcc->def->custom_csrs);
1143 }
1144 #endif
1145
1146 accel_cpu_instance_init(CPU(obj));
1147 }
1148
1149 typedef struct misa_ext_info {
1150 const char *name;
1151 const char *description;
1152 } MISAExtInfo;
1153
1154 #define MISA_INFO_IDX(_bit) \
1155 __builtin_ctz(_bit)
1156
1157 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1158 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1159
1160 static const MISAExtInfo misa_ext_info_arr[] = {
1161 MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1162 MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1163 MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1164 MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1165 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1166 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1167 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1168 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1169 MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1170 MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1171 MISA_EXT_INFO(RVV, "v", "Vector operations"),
1172 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1173 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1174 };
1175
riscv_cpu_validate_misa_mxl(RISCVCPUClass * mcc)1176 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1177 {
1178 CPUClass *cc = CPU_CLASS(mcc);
1179
1180 /* Validate that MISA_MXL is set properly. */
1181 switch (mcc->def->misa_mxl_max) {
1182 #ifdef TARGET_RISCV64
1183 case MXL_RV64:
1184 case MXL_RV128:
1185 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1186 break;
1187 #endif
1188 case MXL_RV32:
1189 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1190 break;
1191 default:
1192 g_assert_not_reached();
1193 }
1194 }
1195
riscv_validate_misa_info_idx(uint32_t bit)1196 static int riscv_validate_misa_info_idx(uint32_t bit)
1197 {
1198 int idx;
1199
1200 /*
1201 * Our lowest valid input (RVA) is 1 and
1202 * __builtin_ctz() is UB with zero.
1203 */
1204 g_assert(bit != 0);
1205 idx = MISA_INFO_IDX(bit);
1206
1207 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1208 return idx;
1209 }
1210
riscv_get_misa_ext_name(uint32_t bit)1211 const char *riscv_get_misa_ext_name(uint32_t bit)
1212 {
1213 int idx = riscv_validate_misa_info_idx(bit);
1214 const char *val = misa_ext_info_arr[idx].name;
1215
1216 g_assert(val != NULL);
1217 return val;
1218 }
1219
riscv_get_misa_ext_description(uint32_t bit)1220 const char *riscv_get_misa_ext_description(uint32_t bit)
1221 {
1222 int idx = riscv_validate_misa_info_idx(bit);
1223 const char *val = misa_ext_info_arr[idx].description;
1224
1225 g_assert(val != NULL);
1226 return val;
1227 }
1228
1229 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1230 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1231 .enabled = _defval}
1232
1233 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1234 /* Defaults for standard extensions */
1235 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1236 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1237 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false),
1238 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
1239 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
1240 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
1241 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false),
1242 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false),
1243 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1244 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
1245 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
1246 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1247 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1248 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1249 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1250 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1251 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1252 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1253 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1254 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1255 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1256 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1257 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1258 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1259 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1260 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1261 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1262 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1263 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1264 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1265 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1266 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1267 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1268 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1269 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1270 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1271 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false),
1272 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false),
1273 MULTI_EXT_CFG_BOOL("supm", ext_supm, false),
1274
1275 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1276 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false),
1277 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1278 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false),
1279 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false),
1280 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false),
1281 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1282 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1283 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false),
1284 MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1285 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1286 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1287 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1288 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1289 MULTI_EXT_CFG_BOOL("svrsw60t59b", ext_svrsw60t59b, false),
1290 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true),
1291
1292 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1293 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1294
1295 MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1296 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1297 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1298 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1299 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1300 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1301 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1302 MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1303 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1304 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1305 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1306 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1307 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1308 MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1309 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1310 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1311 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1312 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1313
1314 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1315 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1316 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1317 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1318
1319 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1320 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1321 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1322
1323 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1324
1325 MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1326 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1327 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1328 MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1329 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1330 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1331 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1332 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1333
1334 /* Vector cryptography extensions */
1335 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1336 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1337 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1338 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1339 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1340 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1341 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1342 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1343 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1344 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1345 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1346 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1347 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1348 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1349 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1350 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1351
1352 { },
1353 };
1354
1355 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1356 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1357 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1358 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1359 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1360 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1361 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1362 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1363 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1364 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1365 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1366 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1367 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1368
1369 { },
1370 };
1371
1372 /* These are experimental so mark with 'x-' */
1373 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1374 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false),
1375
1376 { },
1377 };
1378
1379 /*
1380 * 'Named features' is the name we give to extensions that we
1381 * don't want to expose to users. They are either immutable
1382 * (always enabled/disable) or they'll vary depending on
1383 * the resulting CPU state.
1384 *
1385 * Some of them are always enabled depending on priv version
1386 * of the CPU and are declared directly in isa_edata_arr[].
1387 * The ones listed here have special checks during finalize()
1388 * time and require their own flags like regular extensions.
1389 * See riscv_cpu_update_named_features() for more info.
1390 */
1391 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1392 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1393 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
1394 MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
1395
1396 /*
1397 * 'ziccrse' has its own flag because the KVM driver
1398 * wants to enable/disable it on its own accord.
1399 */
1400 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true),
1401
1402 { },
1403 };
1404
cpu_set_prop_err(RISCVCPU * cpu,const char * propname,Error ** errp)1405 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1406 Error **errp)
1407 {
1408 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1409 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1410 cpuname, propname);
1411 }
1412
prop_pmu_num_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1413 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1414 void *opaque, Error **errp)
1415 {
1416 RISCVCPU *cpu = RISCV_CPU(obj);
1417 uint8_t pmu_num, curr_pmu_num;
1418 uint32_t pmu_mask;
1419
1420 visit_type_uint8(v, name, &pmu_num, errp);
1421
1422 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1423
1424 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1425 cpu_set_prop_err(cpu, name, errp);
1426 error_append_hint(errp, "Current '%s' val: %u\n",
1427 name, curr_pmu_num);
1428 return;
1429 }
1430
1431 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1432 error_setg(errp, "Number of counters exceeds maximum available");
1433 return;
1434 }
1435
1436 if (pmu_num == 0) {
1437 pmu_mask = 0;
1438 } else {
1439 pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1440 }
1441
1442 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1443 cpu->cfg.pmu_mask = pmu_mask;
1444 cpu_option_add_user_setting("pmu-mask", pmu_mask);
1445 }
1446
prop_pmu_num_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1447 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1448 void *opaque, Error **errp)
1449 {
1450 RISCVCPU *cpu = RISCV_CPU(obj);
1451 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1452
1453 visit_type_uint8(v, name, &pmu_num, errp);
1454 }
1455
1456 static const PropertyInfo prop_pmu_num = {
1457 .type = "int8",
1458 .description = "pmu-num",
1459 .get = prop_pmu_num_get,
1460 .set = prop_pmu_num_set,
1461 };
1462
prop_pmu_mask_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1463 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1464 void *opaque, Error **errp)
1465 {
1466 RISCVCPU *cpu = RISCV_CPU(obj);
1467 uint32_t value;
1468 uint8_t pmu_num;
1469
1470 visit_type_uint32(v, name, &value, errp);
1471
1472 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1473 cpu_set_prop_err(cpu, name, errp);
1474 error_append_hint(errp, "Current '%s' val: %x\n",
1475 name, cpu->cfg.pmu_mask);
1476 return;
1477 }
1478
1479 pmu_num = ctpop32(value);
1480
1481 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1482 error_setg(errp, "Number of counters exceeds maximum available");
1483 return;
1484 }
1485
1486 cpu_option_add_user_setting(name, value);
1487 cpu->cfg.pmu_mask = value;
1488 }
1489
prop_pmu_mask_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1490 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1491 void *opaque, Error **errp)
1492 {
1493 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1494
1495 visit_type_uint8(v, name, &pmu_mask, errp);
1496 }
1497
1498 static const PropertyInfo prop_pmu_mask = {
1499 .type = "int8",
1500 .description = "pmu-mask",
1501 .get = prop_pmu_mask_get,
1502 .set = prop_pmu_mask_set,
1503 };
1504
prop_mmu_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1505 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1506 void *opaque, Error **errp)
1507 {
1508 RISCVCPU *cpu = RISCV_CPU(obj);
1509 bool value;
1510
1511 visit_type_bool(v, name, &value, errp);
1512
1513 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1514 cpu_set_prop_err(cpu, "mmu", errp);
1515 return;
1516 }
1517
1518 cpu_option_add_user_setting(name, value);
1519 cpu->cfg.mmu = value;
1520 }
1521
prop_mmu_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1522 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1523 void *opaque, Error **errp)
1524 {
1525 bool value = RISCV_CPU(obj)->cfg.mmu;
1526
1527 visit_type_bool(v, name, &value, errp);
1528 }
1529
1530 static const PropertyInfo prop_mmu = {
1531 .type = "bool",
1532 .description = "mmu",
1533 .get = prop_mmu_get,
1534 .set = prop_mmu_set,
1535 };
1536
prop_pmp_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1537 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1538 void *opaque, Error **errp)
1539 {
1540 RISCVCPU *cpu = RISCV_CPU(obj);
1541 bool value;
1542
1543 visit_type_bool(v, name, &value, errp);
1544
1545 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1546 cpu_set_prop_err(cpu, name, errp);
1547 return;
1548 }
1549
1550 cpu_option_add_user_setting(name, value);
1551 cpu->cfg.pmp = value;
1552 }
1553
prop_pmp_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1554 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1555 void *opaque, Error **errp)
1556 {
1557 bool value = RISCV_CPU(obj)->cfg.pmp;
1558
1559 visit_type_bool(v, name, &value, errp);
1560 }
1561
1562 static const PropertyInfo prop_pmp = {
1563 .type = "bool",
1564 .description = "pmp",
1565 .get = prop_pmp_get,
1566 .set = prop_pmp_set,
1567 };
1568
prop_num_pmp_regions_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1569 static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
1570 void *opaque, Error **errp)
1571 {
1572 RISCVCPU *cpu = RISCV_CPU(obj);
1573 uint8_t value;
1574
1575 visit_type_uint8(v, name, &value, errp);
1576
1577 if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
1578 cpu_set_prop_err(cpu, name, errp);
1579 return;
1580 }
1581
1582 if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > OLD_MAX_RISCV_PMPS) {
1583 error_setg(errp, "Number of PMP regions exceeds maximum available");
1584 return;
1585 } else if (value > MAX_RISCV_PMPS) {
1586 error_setg(errp, "Number of PMP regions exceeds maximum available");
1587 return;
1588 }
1589
1590 cpu_option_add_user_setting(name, value);
1591 cpu->cfg.pmp_regions = value;
1592 }
1593
prop_num_pmp_regions_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1594 static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
1595 void *opaque, Error **errp)
1596 {
1597 uint8_t value = RISCV_CPU(obj)->cfg.pmp_regions;
1598
1599 visit_type_uint8(v, name, &value, errp);
1600 }
1601
1602 static const PropertyInfo prop_num_pmp_regions = {
1603 .type = "uint8",
1604 .description = "num-pmp-regions",
1605 .get = prop_num_pmp_regions_get,
1606 .set = prop_num_pmp_regions_set,
1607 };
1608
priv_spec_from_str(const char * priv_spec_str)1609 static int priv_spec_from_str(const char *priv_spec_str)
1610 {
1611 int priv_version = -1;
1612
1613 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1614 priv_version = PRIV_VERSION_1_13_0;
1615 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1616 priv_version = PRIV_VERSION_1_12_0;
1617 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1618 priv_version = PRIV_VERSION_1_11_0;
1619 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1620 priv_version = PRIV_VERSION_1_10_0;
1621 }
1622
1623 return priv_version;
1624 }
1625
priv_spec_to_str(int priv_version)1626 const char *priv_spec_to_str(int priv_version)
1627 {
1628 switch (priv_version) {
1629 case PRIV_VERSION_1_10_0:
1630 return PRIV_VER_1_10_0_STR;
1631 case PRIV_VERSION_1_11_0:
1632 return PRIV_VER_1_11_0_STR;
1633 case PRIV_VERSION_1_12_0:
1634 return PRIV_VER_1_12_0_STR;
1635 case PRIV_VERSION_1_13_0:
1636 return PRIV_VER_1_13_0_STR;
1637 default:
1638 return NULL;
1639 }
1640 }
1641
prop_priv_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1642 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1643 void *opaque, Error **errp)
1644 {
1645 RISCVCPU *cpu = RISCV_CPU(obj);
1646 g_autofree char *value = NULL;
1647 int priv_version = -1;
1648
1649 visit_type_str(v, name, &value, errp);
1650
1651 priv_version = priv_spec_from_str(value);
1652 if (priv_version < 0) {
1653 error_setg(errp, "Unsupported privilege spec version '%s'", value);
1654 return;
1655 }
1656
1657 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1658 cpu_set_prop_err(cpu, name, errp);
1659 error_append_hint(errp, "Current '%s' val: %s\n", name,
1660 object_property_get_str(obj, name, NULL));
1661 return;
1662 }
1663
1664 cpu_option_add_user_setting(name, priv_version);
1665 cpu->env.priv_ver = priv_version;
1666 }
1667
prop_priv_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1668 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1669 void *opaque, Error **errp)
1670 {
1671 RISCVCPU *cpu = RISCV_CPU(obj);
1672 const char *value = priv_spec_to_str(cpu->env.priv_ver);
1673
1674 visit_type_str(v, name, (char **)&value, errp);
1675 }
1676
1677 static const PropertyInfo prop_priv_spec = {
1678 .type = "str",
1679 .description = "priv_spec",
1680 /* FIXME enum? */
1681 .get = prop_priv_spec_get,
1682 .set = prop_priv_spec_set,
1683 };
1684
prop_vext_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1685 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1686 void *opaque, Error **errp)
1687 {
1688 RISCVCPU *cpu = RISCV_CPU(obj);
1689 g_autofree char *value = NULL;
1690
1691 visit_type_str(v, name, &value, errp);
1692
1693 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1694 error_setg(errp, "Unsupported vector spec version '%s'", value);
1695 return;
1696 }
1697
1698 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1699 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1700 }
1701
prop_vext_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1702 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1703 void *opaque, Error **errp)
1704 {
1705 const char *value = VEXT_VER_1_00_0_STR;
1706
1707 visit_type_str(v, name, (char **)&value, errp);
1708 }
1709
1710 static const PropertyInfo prop_vext_spec = {
1711 .type = "str",
1712 .description = "vext_spec",
1713 /* FIXME enum? */
1714 .get = prop_vext_spec_get,
1715 .set = prop_vext_spec_set,
1716 };
1717
prop_vlen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1718 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1719 void *opaque, Error **errp)
1720 {
1721 RISCVCPU *cpu = RISCV_CPU(obj);
1722 uint16_t cpu_vlen = cpu->cfg.vlenb << 3;
1723 uint16_t value;
1724
1725 if (!visit_type_uint16(v, name, &value, errp)) {
1726 return;
1727 }
1728
1729 if (!is_power_of_2(value)) {
1730 error_setg(errp, "Vector extension VLEN must be power of 2");
1731 return;
1732 }
1733
1734 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) {
1735 cpu_set_prop_err(cpu, name, errp);
1736 error_append_hint(errp, "Current '%s' val: %u\n",
1737 name, cpu_vlen);
1738 return;
1739 }
1740
1741 cpu_option_add_user_setting(name, value);
1742 cpu->cfg.vlenb = value >> 3;
1743 }
1744
prop_vlen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1745 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1746 void *opaque, Error **errp)
1747 {
1748 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1749
1750 visit_type_uint16(v, name, &value, errp);
1751 }
1752
1753 static const PropertyInfo prop_vlen = {
1754 .type = "uint16",
1755 .description = "vlen",
1756 .get = prop_vlen_get,
1757 .set = prop_vlen_set,
1758 };
1759
prop_elen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1760 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1761 void *opaque, Error **errp)
1762 {
1763 RISCVCPU *cpu = RISCV_CPU(obj);
1764 uint16_t value;
1765
1766 if (!visit_type_uint16(v, name, &value, errp)) {
1767 return;
1768 }
1769
1770 if (!is_power_of_2(value)) {
1771 error_setg(errp, "Vector extension ELEN must be power of 2");
1772 return;
1773 }
1774
1775 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1776 cpu_set_prop_err(cpu, name, errp);
1777 error_append_hint(errp, "Current '%s' val: %u\n",
1778 name, cpu->cfg.elen);
1779 return;
1780 }
1781
1782 cpu_option_add_user_setting(name, value);
1783 cpu->cfg.elen = value;
1784 }
1785
prop_elen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1786 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1787 void *opaque, Error **errp)
1788 {
1789 uint16_t value = RISCV_CPU(obj)->cfg.elen;
1790
1791 visit_type_uint16(v, name, &value, errp);
1792 }
1793
1794 static const PropertyInfo prop_elen = {
1795 .type = "uint16",
1796 .description = "elen",
1797 .get = prop_elen_get,
1798 .set = prop_elen_set,
1799 };
1800
prop_cbom_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1801 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1802 void *opaque, Error **errp)
1803 {
1804 RISCVCPU *cpu = RISCV_CPU(obj);
1805 uint16_t value;
1806
1807 if (!visit_type_uint16(v, name, &value, errp)) {
1808 return;
1809 }
1810
1811 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1812 cpu_set_prop_err(cpu, name, errp);
1813 error_append_hint(errp, "Current '%s' val: %u\n",
1814 name, cpu->cfg.cbom_blocksize);
1815 return;
1816 }
1817
1818 cpu_option_add_user_setting(name, value);
1819 cpu->cfg.cbom_blocksize = value;
1820 }
1821
prop_cbom_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1822 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1823 void *opaque, Error **errp)
1824 {
1825 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1826
1827 visit_type_uint16(v, name, &value, errp);
1828 }
1829
1830 static const PropertyInfo prop_cbom_blksize = {
1831 .type = "uint16",
1832 .description = "cbom_blocksize",
1833 .get = prop_cbom_blksize_get,
1834 .set = prop_cbom_blksize_set,
1835 };
1836
prop_cbop_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1837 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
1838 void *opaque, Error **errp)
1839 {
1840 RISCVCPU *cpu = RISCV_CPU(obj);
1841 uint16_t value;
1842
1843 if (!visit_type_uint16(v, name, &value, errp)) {
1844 return;
1845 }
1846
1847 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
1848 cpu_set_prop_err(cpu, name, errp);
1849 error_append_hint(errp, "Current '%s' val: %u\n",
1850 name, cpu->cfg.cbop_blocksize);
1851 return;
1852 }
1853
1854 cpu_option_add_user_setting(name, value);
1855 cpu->cfg.cbop_blocksize = value;
1856 }
1857
prop_cbop_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1858 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
1859 void *opaque, Error **errp)
1860 {
1861 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
1862
1863 visit_type_uint16(v, name, &value, errp);
1864 }
1865
1866 static const PropertyInfo prop_cbop_blksize = {
1867 .type = "uint16",
1868 .description = "cbop_blocksize",
1869 .get = prop_cbop_blksize_get,
1870 .set = prop_cbop_blksize_set,
1871 };
1872
prop_cboz_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1873 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
1874 void *opaque, Error **errp)
1875 {
1876 RISCVCPU *cpu = RISCV_CPU(obj);
1877 uint16_t value;
1878
1879 if (!visit_type_uint16(v, name, &value, errp)) {
1880 return;
1881 }
1882
1883 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
1884 cpu_set_prop_err(cpu, name, errp);
1885 error_append_hint(errp, "Current '%s' val: %u\n",
1886 name, cpu->cfg.cboz_blocksize);
1887 return;
1888 }
1889
1890 cpu_option_add_user_setting(name, value);
1891 cpu->cfg.cboz_blocksize = value;
1892 }
1893
prop_cboz_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1894 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
1895 void *opaque, Error **errp)
1896 {
1897 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
1898
1899 visit_type_uint16(v, name, &value, errp);
1900 }
1901
1902 static const PropertyInfo prop_cboz_blksize = {
1903 .type = "uint16",
1904 .description = "cboz_blocksize",
1905 .get = prop_cboz_blksize_get,
1906 .set = prop_cboz_blksize_set,
1907 };
1908
prop_mvendorid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1909 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
1910 void *opaque, Error **errp)
1911 {
1912 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1913 RISCVCPU *cpu = RISCV_CPU(obj);
1914 uint32_t prev_val = cpu->cfg.mvendorid;
1915 uint32_t value;
1916
1917 if (!visit_type_uint32(v, name, &value, errp)) {
1918 return;
1919 }
1920
1921 if (!dynamic_cpu && prev_val != value) {
1922 error_setg(errp, "Unable to change %s mvendorid (0x%x)",
1923 object_get_typename(obj), prev_val);
1924 return;
1925 }
1926
1927 cpu->cfg.mvendorid = value;
1928 }
1929
prop_mvendorid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1930 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
1931 void *opaque, Error **errp)
1932 {
1933 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
1934
1935 visit_type_uint32(v, name, &value, errp);
1936 }
1937
1938 static const PropertyInfo prop_mvendorid = {
1939 .type = "uint32",
1940 .description = "mvendorid",
1941 .get = prop_mvendorid_get,
1942 .set = prop_mvendorid_set,
1943 };
1944
prop_mimpid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1945 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
1946 void *opaque, Error **errp)
1947 {
1948 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1949 RISCVCPU *cpu = RISCV_CPU(obj);
1950 uint64_t prev_val = cpu->cfg.mimpid;
1951 uint64_t value;
1952
1953 if (!visit_type_uint64(v, name, &value, errp)) {
1954 return;
1955 }
1956
1957 if (!dynamic_cpu && prev_val != value) {
1958 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
1959 object_get_typename(obj), prev_val);
1960 return;
1961 }
1962
1963 cpu->cfg.mimpid = value;
1964 }
1965
prop_mimpid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1966 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
1967 void *opaque, Error **errp)
1968 {
1969 uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
1970
1971 visit_type_uint64(v, name, &value, errp);
1972 }
1973
1974 static const PropertyInfo prop_mimpid = {
1975 .type = "uint64",
1976 .description = "mimpid",
1977 .get = prop_mimpid_get,
1978 .set = prop_mimpid_set,
1979 };
1980
prop_marchid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1981 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
1982 void *opaque, Error **errp)
1983 {
1984 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
1985 RISCVCPU *cpu = RISCV_CPU(obj);
1986 uint64_t prev_val = cpu->cfg.marchid;
1987 uint64_t value, invalid_val;
1988 uint32_t mxlen = 0;
1989
1990 if (!visit_type_uint64(v, name, &value, errp)) {
1991 return;
1992 }
1993
1994 if (!dynamic_cpu && prev_val != value) {
1995 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
1996 object_get_typename(obj), prev_val);
1997 return;
1998 }
1999
2000 switch (riscv_cpu_mxl(&cpu->env)) {
2001 case MXL_RV32:
2002 mxlen = 32;
2003 break;
2004 case MXL_RV64:
2005 case MXL_RV128:
2006 mxlen = 64;
2007 break;
2008 default:
2009 g_assert_not_reached();
2010 }
2011
2012 invalid_val = 1LL << (mxlen - 1);
2013
2014 if (value == invalid_val) {
2015 error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2016 "and the remaining bits zero", mxlen);
2017 return;
2018 }
2019
2020 cpu->cfg.marchid = value;
2021 }
2022
prop_marchid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2023 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2024 void *opaque, Error **errp)
2025 {
2026 uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2027
2028 visit_type_uint64(v, name, &value, errp);
2029 }
2030
2031 static const PropertyInfo prop_marchid = {
2032 .type = "uint64",
2033 .description = "marchid",
2034 .get = prop_marchid_get,
2035 .set = prop_marchid_set,
2036 };
2037
2038 /*
2039 * RVA22U64 defines some 'named features' that are cache
2040 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2041 * and Zicclsm. They are always implemented in TCG and
2042 * doesn't need to be manually enabled by the profile.
2043 */
2044 static RISCVCPUProfile RVA22U64 = {
2045 .u_parent = NULL,
2046 .s_parent = NULL,
2047 .name = "rva22u64",
2048 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU,
2049 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2050 .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2051 .ext_offsets = {
2052 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2053 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2054 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2055 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2056 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2057 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2058
2059 /* mandatory named features for this profile */
2060 CPU_CFG_OFFSET(ext_zic64b),
2061
2062 RISCV_PROFILE_EXT_LIST_END
2063 }
2064 };
2065
2066 /*
2067 * As with RVA22U64, RVA22S64 also defines 'named features'.
2068 *
2069 * Cache related features that we consider enabled since we don't
2070 * implement cache: Ssccptr
2071 *
2072 * Other named features that we already implement: Sstvecd, Sstvala,
2073 * Sscounterenw
2074 *
2075 * The remaining features/extensions comes from RVA22U64.
2076 */
2077 static RISCVCPUProfile RVA22S64 = {
2078 .u_parent = &RVA22U64,
2079 .s_parent = NULL,
2080 .name = "rva22s64",
2081 .misa_ext = RVS,
2082 .priv_spec = PRIV_VERSION_1_12_0,
2083 .satp_mode = VM_1_10_SV39,
2084 .ext_offsets = {
2085 /* rva22s64 exts */
2086 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2087 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2088
2089 RISCV_PROFILE_EXT_LIST_END
2090 }
2091 };
2092
2093 /*
2094 * All mandatory extensions from RVA22U64 are present
2095 * in RVA23U64 so set RVA22 as a parent. We need to
2096 * declare just the newly added mandatory extensions.
2097 */
2098 static RISCVCPUProfile RVA23U64 = {
2099 .u_parent = &RVA22U64,
2100 .s_parent = NULL,
2101 .name = "rva23u64",
2102 .misa_ext = RVV,
2103 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2104 .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2105 .ext_offsets = {
2106 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb),
2107 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl),
2108 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop),
2109 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb),
2110 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs),
2111 CPU_CFG_OFFSET(ext_supm),
2112
2113 RISCV_PROFILE_EXT_LIST_END
2114 }
2115 };
2116
2117 /*
2118 * As with RVA23U64, RVA23S64 also defines 'named features'.
2119 *
2120 * Cache related features that we consider enabled since we don't
2121 * implement cache: Ssccptr
2122 *
2123 * Other named features that we already implement: Sstvecd, Sstvala,
2124 * Sscounterenw, Ssu64xl
2125 *
2126 * The remaining features/extensions comes from RVA23S64.
2127 */
2128 static RISCVCPUProfile RVA23S64 = {
2129 .u_parent = &RVA23U64,
2130 .s_parent = &RVA22S64,
2131 .name = "rva23s64",
2132 .misa_ext = RVS,
2133 .priv_spec = PRIV_VERSION_1_13_0,
2134 .satp_mode = VM_1_10_SV39,
2135 .ext_offsets = {
2136 /* New in RVA23S64 */
2137 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc),
2138 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm),
2139
2140 /* Named features: Sha */
2141 CPU_CFG_OFFSET(ext_sha),
2142
2143 RISCV_PROFILE_EXT_LIST_END
2144 }
2145 };
2146
2147 RISCVCPUProfile *riscv_profiles[] = {
2148 &RVA22U64,
2149 &RVA22S64,
2150 &RVA23U64,
2151 &RVA23S64,
2152 NULL,
2153 };
2154
2155 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2156 .is_misa = true,
2157 .ext = RVA,
2158 .implied_multi_exts = {
2159 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2160
2161 RISCV_IMPLIED_EXTS_RULE_END
2162 },
2163 };
2164
2165 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2166 .is_misa = true,
2167 .ext = RVD,
2168 .implied_misa_exts = RVF,
2169 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2170 };
2171
2172 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2173 .is_misa = true,
2174 .ext = RVF,
2175 .implied_multi_exts = {
2176 CPU_CFG_OFFSET(ext_zicsr),
2177
2178 RISCV_IMPLIED_EXTS_RULE_END
2179 },
2180 };
2181
2182 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2183 .is_misa = true,
2184 .ext = RVM,
2185 .implied_multi_exts = {
2186 CPU_CFG_OFFSET(ext_zmmul),
2187
2188 RISCV_IMPLIED_EXTS_RULE_END
2189 },
2190 };
2191
2192 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2193 .is_misa = true,
2194 .ext = RVV,
2195 .implied_multi_exts = {
2196 CPU_CFG_OFFSET(ext_zve64d),
2197
2198 RISCV_IMPLIED_EXTS_RULE_END
2199 },
2200 };
2201
2202 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2203 .ext = CPU_CFG_OFFSET(ext_zcb),
2204 .implied_multi_exts = {
2205 CPU_CFG_OFFSET(ext_zca),
2206
2207 RISCV_IMPLIED_EXTS_RULE_END
2208 },
2209 };
2210
2211 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2212 .ext = CPU_CFG_OFFSET(ext_zcd),
2213 .implied_misa_exts = RVD,
2214 .implied_multi_exts = {
2215 CPU_CFG_OFFSET(ext_zca),
2216
2217 RISCV_IMPLIED_EXTS_RULE_END
2218 },
2219 };
2220
2221 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2222 .ext = CPU_CFG_OFFSET(ext_zce),
2223 .implied_multi_exts = {
2224 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2225 CPU_CFG_OFFSET(ext_zcmt),
2226
2227 RISCV_IMPLIED_EXTS_RULE_END
2228 },
2229 };
2230
2231 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2232 .ext = CPU_CFG_OFFSET(ext_zcf),
2233 .implied_misa_exts = RVF,
2234 .implied_multi_exts = {
2235 CPU_CFG_OFFSET(ext_zca),
2236
2237 RISCV_IMPLIED_EXTS_RULE_END
2238 },
2239 };
2240
2241 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2242 .ext = CPU_CFG_OFFSET(ext_zcmp),
2243 .implied_multi_exts = {
2244 CPU_CFG_OFFSET(ext_zca),
2245
2246 RISCV_IMPLIED_EXTS_RULE_END
2247 },
2248 };
2249
2250 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2251 .ext = CPU_CFG_OFFSET(ext_zcmt),
2252 .implied_multi_exts = {
2253 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2254
2255 RISCV_IMPLIED_EXTS_RULE_END
2256 },
2257 };
2258
2259 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2260 .ext = CPU_CFG_OFFSET(ext_zdinx),
2261 .implied_multi_exts = {
2262 CPU_CFG_OFFSET(ext_zfinx),
2263
2264 RISCV_IMPLIED_EXTS_RULE_END
2265 },
2266 };
2267
2268 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2269 .ext = CPU_CFG_OFFSET(ext_zfa),
2270 .implied_misa_exts = RVF,
2271 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2272 };
2273
2274 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2275 .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2276 .implied_misa_exts = RVF,
2277 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2278 };
2279
2280 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2281 .ext = CPU_CFG_OFFSET(ext_zfh),
2282 .implied_multi_exts = {
2283 CPU_CFG_OFFSET(ext_zfhmin),
2284
2285 RISCV_IMPLIED_EXTS_RULE_END
2286 },
2287 };
2288
2289 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2290 .ext = CPU_CFG_OFFSET(ext_zfhmin),
2291 .implied_misa_exts = RVF,
2292 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2293 };
2294
2295 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2296 .ext = CPU_CFG_OFFSET(ext_zfinx),
2297 .implied_multi_exts = {
2298 CPU_CFG_OFFSET(ext_zicsr),
2299
2300 RISCV_IMPLIED_EXTS_RULE_END
2301 },
2302 };
2303
2304 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2305 .ext = CPU_CFG_OFFSET(ext_zhinx),
2306 .implied_multi_exts = {
2307 CPU_CFG_OFFSET(ext_zhinxmin),
2308
2309 RISCV_IMPLIED_EXTS_RULE_END
2310 },
2311 };
2312
2313 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2314 .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2315 .implied_multi_exts = {
2316 CPU_CFG_OFFSET(ext_zfinx),
2317
2318 RISCV_IMPLIED_EXTS_RULE_END
2319 },
2320 };
2321
2322 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2323 .ext = CPU_CFG_OFFSET(ext_zicntr),
2324 .implied_multi_exts = {
2325 CPU_CFG_OFFSET(ext_zicsr),
2326
2327 RISCV_IMPLIED_EXTS_RULE_END
2328 },
2329 };
2330
2331 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2332 .ext = CPU_CFG_OFFSET(ext_zihpm),
2333 .implied_multi_exts = {
2334 CPU_CFG_OFFSET(ext_zicsr),
2335
2336 RISCV_IMPLIED_EXTS_RULE_END
2337 },
2338 };
2339
2340 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2341 .ext = CPU_CFG_OFFSET(ext_zk),
2342 .implied_multi_exts = {
2343 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2344 CPU_CFG_OFFSET(ext_zkt),
2345
2346 RISCV_IMPLIED_EXTS_RULE_END
2347 },
2348 };
2349
2350 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2351 .ext = CPU_CFG_OFFSET(ext_zkn),
2352 .implied_multi_exts = {
2353 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2354 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2355 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2356
2357 RISCV_IMPLIED_EXTS_RULE_END
2358 },
2359 };
2360
2361 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2362 .ext = CPU_CFG_OFFSET(ext_zks),
2363 .implied_multi_exts = {
2364 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2365 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2366 CPU_CFG_OFFSET(ext_zksh),
2367
2368 RISCV_IMPLIED_EXTS_RULE_END
2369 },
2370 };
2371
2372 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2373 .ext = CPU_CFG_OFFSET(ext_zvbb),
2374 .implied_multi_exts = {
2375 CPU_CFG_OFFSET(ext_zvkb),
2376
2377 RISCV_IMPLIED_EXTS_RULE_END
2378 },
2379 };
2380
2381 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2382 .ext = CPU_CFG_OFFSET(ext_zve32f),
2383 .implied_misa_exts = RVF,
2384 .implied_multi_exts = {
2385 CPU_CFG_OFFSET(ext_zve32x),
2386
2387 RISCV_IMPLIED_EXTS_RULE_END
2388 },
2389 };
2390
2391 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2392 .ext = CPU_CFG_OFFSET(ext_zve32x),
2393 .implied_multi_exts = {
2394 CPU_CFG_OFFSET(ext_zicsr),
2395
2396 RISCV_IMPLIED_EXTS_RULE_END
2397 },
2398 };
2399
2400 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2401 .ext = CPU_CFG_OFFSET(ext_zve64d),
2402 .implied_misa_exts = RVD,
2403 .implied_multi_exts = {
2404 CPU_CFG_OFFSET(ext_zve64f),
2405
2406 RISCV_IMPLIED_EXTS_RULE_END
2407 },
2408 };
2409
2410 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2411 .ext = CPU_CFG_OFFSET(ext_zve64f),
2412 .implied_misa_exts = RVF,
2413 .implied_multi_exts = {
2414 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2415
2416 RISCV_IMPLIED_EXTS_RULE_END
2417 },
2418 };
2419
2420 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2421 .ext = CPU_CFG_OFFSET(ext_zve64x),
2422 .implied_multi_exts = {
2423 CPU_CFG_OFFSET(ext_zve32x),
2424
2425 RISCV_IMPLIED_EXTS_RULE_END
2426 },
2427 };
2428
2429 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2430 .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2431 .implied_multi_exts = {
2432 CPU_CFG_OFFSET(ext_zve32f),
2433
2434 RISCV_IMPLIED_EXTS_RULE_END
2435 },
2436 };
2437
2438 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2439 .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2440 .implied_multi_exts = {
2441 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2442
2443 RISCV_IMPLIED_EXTS_RULE_END
2444 },
2445 };
2446
2447 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2448 .ext = CPU_CFG_OFFSET(ext_zvfh),
2449 .implied_multi_exts = {
2450 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2451
2452 RISCV_IMPLIED_EXTS_RULE_END
2453 },
2454 };
2455
2456 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2457 .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2458 .implied_multi_exts = {
2459 CPU_CFG_OFFSET(ext_zve32f),
2460
2461 RISCV_IMPLIED_EXTS_RULE_END
2462 },
2463 };
2464
2465 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2466 .ext = CPU_CFG_OFFSET(ext_zvkn),
2467 .implied_multi_exts = {
2468 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2469 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2470
2471 RISCV_IMPLIED_EXTS_RULE_END
2472 },
2473 };
2474
2475 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2476 .ext = CPU_CFG_OFFSET(ext_zvknc),
2477 .implied_multi_exts = {
2478 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2479
2480 RISCV_IMPLIED_EXTS_RULE_END
2481 },
2482 };
2483
2484 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2485 .ext = CPU_CFG_OFFSET(ext_zvkng),
2486 .implied_multi_exts = {
2487 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2488
2489 RISCV_IMPLIED_EXTS_RULE_END
2490 },
2491 };
2492
2493 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2494 .ext = CPU_CFG_OFFSET(ext_zvknhb),
2495 .implied_multi_exts = {
2496 CPU_CFG_OFFSET(ext_zve64x),
2497
2498 RISCV_IMPLIED_EXTS_RULE_END
2499 },
2500 };
2501
2502 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2503 .ext = CPU_CFG_OFFSET(ext_zvks),
2504 .implied_multi_exts = {
2505 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2506 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2507
2508 RISCV_IMPLIED_EXTS_RULE_END
2509 },
2510 };
2511
2512 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2513 .ext = CPU_CFG_OFFSET(ext_zvksc),
2514 .implied_multi_exts = {
2515 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2516
2517 RISCV_IMPLIED_EXTS_RULE_END
2518 },
2519 };
2520
2521 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2522 .ext = CPU_CFG_OFFSET(ext_zvksg),
2523 .implied_multi_exts = {
2524 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2525
2526 RISCV_IMPLIED_EXTS_RULE_END
2527 },
2528 };
2529
2530 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = {
2531 .ext = CPU_CFG_OFFSET(ext_ssccfg),
2532 .implied_multi_exts = {
2533 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind),
2534 CPU_CFG_OFFSET(ext_smcdeleg),
2535
2536 RISCV_IMPLIED_EXTS_RULE_END
2537 },
2538 };
2539
2540 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = {
2541 .ext = CPU_CFG_OFFSET(ext_supm),
2542 .implied_multi_exts = {
2543 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm),
2544
2545 RISCV_IMPLIED_EXTS_RULE_END
2546 },
2547 };
2548
2549 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
2550 .ext = CPU_CFG_OFFSET(ext_sspm),
2551 .implied_multi_exts = {
2552 CPU_CFG_OFFSET(ext_smnpm),
2553
2554 RISCV_IMPLIED_EXTS_RULE_END
2555 },
2556 };
2557
2558 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = {
2559 .ext = CPU_CFG_OFFSET(ext_smctr),
2560 .implied_misa_exts = RVS,
2561 .implied_multi_exts = {
2562 CPU_CFG_OFFSET(ext_sscsrind),
2563
2564 RISCV_IMPLIED_EXTS_RULE_END
2565 },
2566 };
2567
2568 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = {
2569 .ext = CPU_CFG_OFFSET(ext_ssctr),
2570 .implied_misa_exts = RVS,
2571 .implied_multi_exts = {
2572 CPU_CFG_OFFSET(ext_sscsrind),
2573
2574 RISCV_IMPLIED_EXTS_RULE_END
2575 },
2576 };
2577
2578 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2579 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2580 &RVM_IMPLIED, &RVV_IMPLIED, NULL
2581 };
2582
2583 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2584 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2585 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2586 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2587 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2588 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2589 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2590 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2591 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2592 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2593 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2594 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2595 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
2596 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED,
2597 NULL
2598 };
2599
2600 static const Property riscv_cpu_properties[] = {
2601 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2602
2603 {.name = "pmu-mask", .info = &prop_pmu_mask},
2604 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2605
2606 {.name = "mmu", .info = &prop_mmu},
2607 {.name = "pmp", .info = &prop_pmp},
2608 {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
2609
2610 {.name = "priv_spec", .info = &prop_priv_spec},
2611 {.name = "vext_spec", .info = &prop_vext_spec},
2612
2613 {.name = "vlen", .info = &prop_vlen},
2614 {.name = "elen", .info = &prop_elen},
2615
2616 {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2617 {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2618 {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2619
2620 {.name = "mvendorid", .info = &prop_mvendorid},
2621 {.name = "mimpid", .info = &prop_mimpid},
2622 {.name = "marchid", .info = &prop_marchid},
2623
2624 #ifndef CONFIG_USER_ONLY
2625 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2626 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec,
2627 DEFAULT_RNMI_IRQVEC),
2628 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec,
2629 DEFAULT_RNMI_EXCPVEC),
2630 #endif
2631
2632 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2633
2634 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2635 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2636 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false),
2637 DEFINE_PROP_BOOL("rvv_vsetvl_x0_vill", RISCVCPU, cfg.rvv_vsetvl_x0_vill, false),
2638
2639 /*
2640 * write_misa() is marked as experimental for now so mark
2641 * it with -x and default to 'false'.
2642 */
2643 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2644 };
2645
riscv_gdb_arch_name(CPUState * cs)2646 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2647 {
2648 RISCVCPU *cpu = RISCV_CPU(cs);
2649 CPURISCVState *env = &cpu->env;
2650
2651 switch (riscv_cpu_mxl(env)) {
2652 case MXL_RV32:
2653 return "riscv:rv32";
2654 case MXL_RV64:
2655 case MXL_RV128:
2656 return "riscv:rv64";
2657 default:
2658 g_assert_not_reached();
2659 }
2660 }
2661
2662 #ifndef CONFIG_USER_ONLY
riscv_get_arch_id(CPUState * cs)2663 static int64_t riscv_get_arch_id(CPUState *cs)
2664 {
2665 RISCVCPU *cpu = RISCV_CPU(cs);
2666
2667 return cpu->env.mhartid;
2668 }
2669
2670 #include "hw/core/sysemu-cpu-ops.h"
2671
2672 static const struct SysemuCPUOps riscv_sysemu_ops = {
2673 .has_work = riscv_cpu_has_work,
2674 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2675 .write_elf64_note = riscv_cpu_write_elf64_note,
2676 .write_elf32_note = riscv_cpu_write_elf32_note,
2677 .legacy_vmsd = &vmstate_riscv_cpu,
2678 };
2679 #endif
2680
riscv_cpu_common_class_init(ObjectClass * c,const void * data)2681 static void riscv_cpu_common_class_init(ObjectClass *c, const void *data)
2682 {
2683 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2684 CPUClass *cc = CPU_CLASS(c);
2685 DeviceClass *dc = DEVICE_CLASS(c);
2686 ResettableClass *rc = RESETTABLE_CLASS(c);
2687
2688 device_class_set_parent_realize(dc, riscv_cpu_realize,
2689 &mcc->parent_realize);
2690
2691 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2692 &mcc->parent_phases);
2693
2694 cc->class_by_name = riscv_cpu_class_by_name;
2695 cc->dump_state = riscv_cpu_dump_state;
2696 cc->set_pc = riscv_cpu_set_pc;
2697 cc->get_pc = riscv_cpu_get_pc;
2698 cc->gdb_read_register = riscv_cpu_gdb_read_register;
2699 cc->gdb_write_register = riscv_cpu_gdb_write_register;
2700 cc->gdb_stop_before_watchpoint = true;
2701 cc->disas_set_info = riscv_cpu_disas_set_info;
2702 #ifndef CONFIG_USER_ONLY
2703 cc->sysemu_ops = &riscv_sysemu_ops;
2704 cc->get_arch_id = riscv_get_arch_id;
2705 #endif
2706 cc->gdb_arch_name = riscv_gdb_arch_name;
2707 #ifdef CONFIG_TCG
2708 cc->tcg_ops = &riscv_tcg_ops;
2709 #endif /* CONFIG_TCG */
2710
2711 device_class_set_props(dc, riscv_cpu_properties);
2712 }
2713
profile_extends(RISCVCPUProfile * trial,RISCVCPUProfile * parent)2714 static bool profile_extends(RISCVCPUProfile *trial, RISCVCPUProfile *parent)
2715 {
2716 RISCVCPUProfile *curr;
2717 if (!parent) {
2718 return true;
2719 }
2720
2721 curr = trial;
2722 while (curr) {
2723 if (curr == parent) {
2724 return true;
2725 }
2726 curr = curr->u_parent;
2727 }
2728
2729 curr = trial;
2730 while (curr) {
2731 if (curr == parent) {
2732 return true;
2733 }
2734 curr = curr->s_parent;
2735 }
2736
2737 return false;
2738 }
2739
riscv_cpu_class_base_init(ObjectClass * c,const void * data)2740 static void riscv_cpu_class_base_init(ObjectClass *c, const void *data)
2741 {
2742 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2743 RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c));
2744
2745 if (pcc->def) {
2746 mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def));
2747 } else {
2748 mcc->def = g_new0(RISCVCPUDef, 1);
2749 }
2750
2751 if (data) {
2752 const RISCVCPUDef *def = data;
2753 mcc->def->bare |= def->bare;
2754 if (def->profile) {
2755 assert(profile_extends(def->profile, mcc->def->profile));
2756 assert(mcc->def->bare);
2757 mcc->def->profile = def->profile;
2758 }
2759 if (def->misa_mxl_max) {
2760 assert(def->misa_mxl_max <= MXL_RV128);
2761 mcc->def->misa_mxl_max = def->misa_mxl_max;
2762
2763 #ifndef CONFIG_USER_ONLY
2764 /*
2765 * Hack to simplify CPU class hierarchies that include both 32- and
2766 * 64-bit models: reduce SV39/48/57/64 to SV32 for 32-bit models.
2767 */
2768 if (mcc->def->misa_mxl_max == MXL_RV32 &&
2769 !valid_vm_1_10_32[mcc->def->cfg.max_satp_mode]) {
2770 mcc->def->cfg.max_satp_mode = VM_1_10_SV32;
2771 }
2772 #endif
2773 }
2774 if (def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
2775 assert(def->priv_spec <= PRIV_VERSION_LATEST);
2776 mcc->def->priv_spec = def->priv_spec;
2777 }
2778 if (def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
2779 assert(def->vext_spec != 0);
2780 mcc->def->vext_spec = def->vext_spec;
2781 }
2782 mcc->def->misa_ext |= def->misa_ext;
2783
2784 riscv_cpu_cfg_merge(&mcc->def->cfg, &def->cfg);
2785
2786 if (def->custom_csrs) {
2787 assert(!mcc->def->custom_csrs);
2788 mcc->def->custom_csrs = def->custom_csrs;
2789 }
2790 }
2791
2792 if (!object_class_is_abstract(c)) {
2793 riscv_cpu_validate_misa_mxl(mcc);
2794 }
2795 }
2796
riscv_isa_string_ext(RISCVCPU * cpu,char ** isa_str,int max_str_len)2797 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2798 int max_str_len)
2799 {
2800 const RISCVIsaExtData *edata;
2801 char *old = *isa_str;
2802 char *new = *isa_str;
2803
2804 for (edata = isa_edata_arr; edata && edata->name; edata++) {
2805 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2806 new = g_strconcat(old, "_", edata->name, NULL);
2807 g_free(old);
2808 old = new;
2809 }
2810 }
2811
2812 *isa_str = new;
2813 }
2814
riscv_isa_string(RISCVCPU * cpu)2815 char *riscv_isa_string(RISCVCPU *cpu)
2816 {
2817 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2818 int i;
2819 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2820 char *isa_str = g_new(char, maxlen);
2821 int xlen = riscv_cpu_max_xlen(mcc);
2822 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2823
2824 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2825 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2826 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2827 }
2828 }
2829 *p = '\0';
2830 if (!cpu->cfg.short_isa_string) {
2831 riscv_isa_string_ext(cpu, &isa_str, maxlen);
2832 }
2833 return isa_str;
2834 }
2835
2836 #ifndef CONFIG_USER_ONLY
riscv_isa_extensions_list(RISCVCPU * cpu,int * count)2837 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2838 {
2839 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2840 char **extensions = g_new(char *, maxlen);
2841
2842 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2843 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2844 extensions[*count] = g_new(char, 2);
2845 snprintf(extensions[*count], 2, "%c",
2846 qemu_tolower(riscv_single_letter_exts[i]));
2847 (*count)++;
2848 }
2849 }
2850
2851 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2852 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2853 extensions[*count] = g_strdup(edata->name);
2854 (*count)++;
2855 }
2856 }
2857
2858 return extensions;
2859 }
2860
riscv_isa_write_fdt(RISCVCPU * cpu,void * fdt,char * nodename)2861 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2862 {
2863 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2864 const size_t maxlen = sizeof("rv128i");
2865 g_autofree char *isa_base = g_new(char, maxlen);
2866 g_autofree char *riscv_isa;
2867 char **isa_extensions;
2868 int count = 0;
2869 int xlen = riscv_cpu_max_xlen(mcc);
2870
2871 riscv_isa = riscv_isa_string(cpu);
2872 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2873
2874 snprintf(isa_base, maxlen, "rv%di", xlen);
2875 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2876
2877 isa_extensions = riscv_isa_extensions_list(cpu, &count);
2878 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2879 isa_extensions, count);
2880
2881 for (int i = 0; i < count; i++) {
2882 g_free(isa_extensions[i]);
2883 }
2884
2885 g_free(isa_extensions);
2886 }
2887 #endif
2888
2889 #define DEFINE_ABSTRACT_RISCV_CPU(type_name, parent_type_name, ...) \
2890 { \
2891 .name = (type_name), \
2892 .parent = (parent_type_name), \
2893 .abstract = true, \
2894 .class_data = &(const RISCVCPUDef) { \
2895 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
2896 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
2897 .cfg.max_satp_mode = -1, \
2898 __VA_ARGS__ \
2899 }, \
2900 }
2901
2902 #define DEFINE_RISCV_CPU(type_name, parent_type_name, ...) \
2903 { \
2904 .name = (type_name), \
2905 .parent = (parent_type_name), \
2906 .class_data = &(const RISCVCPUDef) { \
2907 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
2908 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
2909 .cfg.max_satp_mode = -1, \
2910 __VA_ARGS__ \
2911 }, \
2912 }
2913
2914 #define DEFINE_PROFILE_CPU(type_name, parent_type_name, profile_) \
2915 DEFINE_RISCV_CPU(type_name, parent_type_name, \
2916 .profile = &(profile_))
2917
2918 static const TypeInfo riscv_cpu_type_infos[] = {
2919 {
2920 .name = TYPE_RISCV_CPU,
2921 .parent = TYPE_CPU,
2922 .instance_size = sizeof(RISCVCPU),
2923 .instance_align = __alignof(RISCVCPU),
2924 .instance_init = riscv_cpu_init,
2925 .abstract = true,
2926 .class_size = sizeof(RISCVCPUClass),
2927 .class_init = riscv_cpu_common_class_init,
2928 .class_base_init = riscv_cpu_class_base_init,
2929 },
2930
2931 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU,
2932 .cfg.mmu = true,
2933 .cfg.pmp = true,
2934 .priv_spec = PRIV_VERSION_LATEST,
2935 ),
2936
2937 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_VENDOR_CPU, TYPE_RISCV_CPU),
2938 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_BARE_CPU, TYPE_RISCV_CPU,
2939 /*
2940 * Bare CPUs do not inherit the timer and performance
2941 * counters from the parent class (see riscv_cpu_init()
2942 * for info on why the parent enables them).
2943 *
2944 * Users have to explicitly enable these counters for
2945 * bare CPUs.
2946 */
2947 .bare = true,
2948
2949 /* Set to QEMU's first supported priv version */
2950 .priv_spec = PRIV_VERSION_1_10_0,
2951
2952 /*
2953 * Support all available satp_mode settings. By default
2954 * only MBARE will be available if the user doesn't enable
2955 * a mode manually (see riscv_cpu_satp_mode_finalize()).
2956 */
2957 #ifdef TARGET_RISCV32
2958 .cfg.max_satp_mode = VM_1_10_SV32,
2959 #else
2960 .cfg.max_satp_mode = VM_1_10_SV57,
2961 #endif
2962 ),
2963
2964 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX, TYPE_RISCV_DYNAMIC_CPU,
2965 #if defined(TARGET_RISCV32)
2966 .misa_mxl_max = MXL_RV32,
2967 .cfg.max_satp_mode = VM_1_10_SV32,
2968 #elif defined(TARGET_RISCV64)
2969 .misa_mxl_max = MXL_RV64,
2970 .cfg.max_satp_mode = VM_1_10_SV57,
2971 #endif
2972 ),
2973
2974 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E, TYPE_RISCV_VENDOR_CPU,
2975 .misa_ext = RVI | RVM | RVA | RVC | RVU,
2976 .priv_spec = PRIV_VERSION_1_10_0,
2977 .cfg.max_satp_mode = VM_1_10_MBARE,
2978 .cfg.ext_zifencei = true,
2979 .cfg.ext_zicsr = true,
2980 .cfg.pmp = true,
2981 .cfg.pmp_regions = 8
2982 ),
2983
2984 DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
2985 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU,
2986 .priv_spec = PRIV_VERSION_1_10_0,
2987
2988 .cfg.max_satp_mode = VM_1_10_SV39,
2989 .cfg.ext_zifencei = true,
2990 .cfg.ext_zicsr = true,
2991 .cfg.mmu = true,
2992 .cfg.pmp = true,
2993 .cfg.pmp_regions = 8
2994 ),
2995
2996 #if defined(TARGET_RISCV32) || \
2997 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
2998 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE32, TYPE_RISCV_DYNAMIC_CPU,
2999 .cfg.max_satp_mode = VM_1_10_SV32,
3000 .misa_mxl_max = MXL_RV32,
3001 ),
3002
3003 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_IBEX, TYPE_RISCV_VENDOR_CPU,
3004 .misa_mxl_max = MXL_RV32,
3005 .misa_ext = RVI | RVM | RVC | RVU,
3006 .priv_spec = PRIV_VERSION_1_12_0,
3007 .cfg.max_satp_mode = VM_1_10_MBARE,
3008 .cfg.ext_zifencei = true,
3009 .cfg.ext_zicsr = true,
3010 .cfg.pmp = true,
3011 .cfg.ext_smepmp = true,
3012
3013 .cfg.ext_zba = true,
3014 .cfg.ext_zbb = true,
3015 .cfg.ext_zbc = true,
3016 .cfg.ext_zbs = true
3017 ),
3018
3019 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E31, TYPE_RISCV_CPU_SIFIVE_E,
3020 .misa_mxl_max = MXL_RV32
3021 ),
3022 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E34, TYPE_RISCV_CPU_SIFIVE_E,
3023 .misa_mxl_max = MXL_RV32,
3024 .misa_ext = RVF, /* IMAFCU */
3025 ),
3026
3027 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U34, TYPE_RISCV_CPU_SIFIVE_U,
3028 .misa_mxl_max = MXL_RV32,
3029 ),
3030
3031 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32I, TYPE_RISCV_BARE_CPU,
3032 .misa_mxl_max = MXL_RV32,
3033 .misa_ext = RVI
3034 ),
3035 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32E, TYPE_RISCV_BARE_CPU,
3036 .misa_mxl_max = MXL_RV32,
3037 .misa_ext = RVE
3038 ),
3039 #endif
3040
3041 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
3042 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX32, TYPE_RISCV_DYNAMIC_CPU,
3043 .cfg.max_satp_mode = VM_1_10_SV32,
3044 .misa_mxl_max = MXL_RV32,
3045 ),
3046 #endif
3047
3048 #if defined(TARGET_RISCV64)
3049 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE64, TYPE_RISCV_DYNAMIC_CPU,
3050 .cfg.max_satp_mode = VM_1_10_SV57,
3051 .misa_mxl_max = MXL_RV64,
3052 ),
3053
3054 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E51, TYPE_RISCV_CPU_SIFIVE_E,
3055 .misa_mxl_max = MXL_RV64
3056 ),
3057
3058 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U54, TYPE_RISCV_CPU_SIFIVE_U,
3059 .misa_mxl_max = MXL_RV64,
3060 ),
3061
3062 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SHAKTI_C, TYPE_RISCV_CPU_SIFIVE_U,
3063 .misa_mxl_max = MXL_RV64,
3064 ),
3065
3066 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_THEAD_C906, TYPE_RISCV_VENDOR_CPU,
3067 .misa_mxl_max = MXL_RV64,
3068 .misa_ext = RVG | RVC | RVS | RVU,
3069 .priv_spec = PRIV_VERSION_1_11_0,
3070
3071 .cfg.ext_zfa = true,
3072 .cfg.ext_zfh = true,
3073 .cfg.mmu = true,
3074 .cfg.ext_xtheadba = true,
3075 .cfg.ext_xtheadbb = true,
3076 .cfg.ext_xtheadbs = true,
3077 .cfg.ext_xtheadcmo = true,
3078 .cfg.ext_xtheadcondmov = true,
3079 .cfg.ext_xtheadfmemidx = true,
3080 .cfg.ext_xtheadmac = true,
3081 .cfg.ext_xtheadmemidx = true,
3082 .cfg.ext_xtheadmempair = true,
3083 .cfg.ext_xtheadsync = true,
3084 .cfg.pmp = true,
3085
3086 .cfg.mvendorid = THEAD_VENDOR_ID,
3087
3088 .cfg.max_satp_mode = VM_1_10_SV39,
3089 #ifndef CONFIG_USER_ONLY
3090 .custom_csrs = th_csr_list,
3091 #endif
3092 ),
3093
3094 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_TT_ASCALON, TYPE_RISCV_VENDOR_CPU,
3095 .misa_mxl_max = MXL_RV64,
3096 .misa_ext = RVG | RVC | RVS | RVU | RVH | RVV,
3097 .priv_spec = PRIV_VERSION_1_13_0,
3098 .vext_spec = VEXT_VERSION_1_00_0,
3099
3100 /* ISA extensions */
3101 .cfg.mmu = true,
3102 .cfg.vlenb = 256 >> 3,
3103 .cfg.elen = 64,
3104 .cfg.rvv_ma_all_1s = true,
3105 .cfg.rvv_ta_all_1s = true,
3106 .cfg.misa_w = true,
3107 .cfg.pmp = true,
3108 .cfg.cbom_blocksize = 64,
3109 .cfg.cbop_blocksize = 64,
3110 .cfg.cboz_blocksize = 64,
3111 .cfg.ext_zic64b = true,
3112 .cfg.ext_zicbom = true,
3113 .cfg.ext_zicbop = true,
3114 .cfg.ext_zicboz = true,
3115 .cfg.ext_zicntr = true,
3116 .cfg.ext_zicond = true,
3117 .cfg.ext_zicsr = true,
3118 .cfg.ext_zifencei = true,
3119 .cfg.ext_zihintntl = true,
3120 .cfg.ext_zihintpause = true,
3121 .cfg.ext_zihpm = true,
3122 .cfg.ext_zimop = true,
3123 .cfg.ext_zawrs = true,
3124 .cfg.ext_zfa = true,
3125 .cfg.ext_zfbfmin = true,
3126 .cfg.ext_zfh = true,
3127 .cfg.ext_zfhmin = true,
3128 .cfg.ext_zcb = true,
3129 .cfg.ext_zcmop = true,
3130 .cfg.ext_zba = true,
3131 .cfg.ext_zbb = true,
3132 .cfg.ext_zbs = true,
3133 .cfg.ext_zkt = true,
3134 .cfg.ext_zvbb = true,
3135 .cfg.ext_zvbc = true,
3136 .cfg.ext_zvfbfmin = true,
3137 .cfg.ext_zvfbfwma = true,
3138 .cfg.ext_zvfh = true,
3139 .cfg.ext_zvfhmin = true,
3140 .cfg.ext_zvkng = true,
3141 .cfg.ext_smaia = true,
3142 .cfg.ext_smstateen = true,
3143 .cfg.ext_ssaia = true,
3144 .cfg.ext_sscofpmf = true,
3145 .cfg.ext_sstc = true,
3146 .cfg.ext_svade = true,
3147 .cfg.ext_svinval = true,
3148 .cfg.ext_svnapot = true,
3149 .cfg.ext_svpbmt = true,
3150
3151 .cfg.max_satp_mode = VM_1_10_SV57,
3152 ),
3153
3154 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_VEYRON_V1, TYPE_RISCV_VENDOR_CPU,
3155 .misa_mxl_max = MXL_RV64,
3156 .misa_ext = RVG | RVC | RVS | RVU | RVH,
3157 .priv_spec = PRIV_VERSION_1_12_0,
3158
3159 /* ISA extensions */
3160 .cfg.mmu = true,
3161 .cfg.ext_zifencei = true,
3162 .cfg.ext_zicsr = true,
3163 .cfg.pmp = true,
3164 .cfg.ext_zicbom = true,
3165 .cfg.cbom_blocksize = 64,
3166 .cfg.cboz_blocksize = 64,
3167 .cfg.ext_zicboz = true,
3168 .cfg.ext_smaia = true,
3169 .cfg.ext_ssaia = true,
3170 .cfg.ext_sscofpmf = true,
3171 .cfg.ext_sstc = true,
3172 .cfg.ext_svinval = true,
3173 .cfg.ext_svnapot = true,
3174 .cfg.ext_svpbmt = true,
3175 .cfg.ext_smstateen = true,
3176 .cfg.ext_zba = true,
3177 .cfg.ext_zbb = true,
3178 .cfg.ext_zbc = true,
3179 .cfg.ext_zbs = true,
3180 .cfg.ext_XVentanaCondOps = true,
3181
3182 .cfg.mvendorid = VEYRON_V1_MVENDORID,
3183 .cfg.marchid = VEYRON_V1_MARCHID,
3184 .cfg.mimpid = VEYRON_V1_MIMPID,
3185
3186 .cfg.max_satp_mode = VM_1_10_SV48,
3187 ),
3188
3189 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, TYPE_RISCV_VENDOR_CPU,
3190 .misa_mxl_max = MXL_RV64,
3191 .misa_ext = RVG | RVC | RVB | RVS | RVU,
3192 .priv_spec = PRIV_VERSION_1_12_0,
3193
3194 /* ISA extensions */
3195 .cfg.ext_zbc = true,
3196 .cfg.ext_zbkb = true,
3197 .cfg.ext_zbkc = true,
3198 .cfg.ext_zbkx = true,
3199 .cfg.ext_zknd = true,
3200 .cfg.ext_zkne = true,
3201 .cfg.ext_zknh = true,
3202 .cfg.ext_zksed = true,
3203 .cfg.ext_zksh = true,
3204 .cfg.ext_svinval = true,
3205
3206 .cfg.mmu = true,
3207 .cfg.pmp = true,
3208
3209 .cfg.max_satp_mode = VM_1_10_SV39,
3210 ),
3211
3212 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_KMH, TYPE_RISCV_VENDOR_CPU,
3213 .misa_mxl_max = MXL_RV64,
3214 .misa_ext = RVG | RVC | RVB | RVS | RVU | RVH | RVV,
3215 .priv_spec = PRIV_VERSION_1_13_0,
3216 /*
3217 * The RISC-V Instruction Set Manual: Volume I
3218 * Unprivileged Architecture
3219 */
3220 .cfg.ext_zicntr = true,
3221 .cfg.ext_zihpm = true,
3222 .cfg.ext_zihintntl = true,
3223 .cfg.ext_zihintpause = true,
3224 .cfg.ext_zimop = true,
3225 .cfg.ext_zcmop = true,
3226 .cfg.ext_zicond = true,
3227 .cfg.ext_zawrs = true,
3228 .cfg.ext_zacas = true,
3229 .cfg.ext_zfh = true,
3230 .cfg.ext_zfa = true,
3231 .cfg.ext_zcb = true,
3232 .cfg.ext_zbc = true,
3233 .cfg.ext_zvfh = true,
3234 .cfg.ext_zkn = true,
3235 .cfg.ext_zks = true,
3236 .cfg.ext_zkt = true,
3237 .cfg.ext_zvbb = true,
3238 .cfg.ext_zvkt = true,
3239 /*
3240 * The RISC-V Instruction Set Manual: Volume II
3241 * Privileged Architecture
3242 */
3243 .cfg.ext_smstateen = true,
3244 .cfg.ext_smcsrind = true,
3245 .cfg.ext_sscsrind = true,
3246 .cfg.ext_svnapot = true,
3247 .cfg.ext_svpbmt = true,
3248 .cfg.ext_svinval = true,
3249 .cfg.ext_sstc = true,
3250 .cfg.ext_sscofpmf = true,
3251 .cfg.ext_ssdbltrp = true,
3252 .cfg.ext_ssnpm = true,
3253 .cfg.ext_smnpm = true,
3254 .cfg.ext_smmpm = true,
3255 .cfg.ext_sspm = true,
3256 .cfg.ext_supm = true,
3257 /* The RISC-V Advanced Interrupt Architecture */
3258 .cfg.ext_smaia = true,
3259 .cfg.ext_ssaia = true,
3260 /* RVA23 Profiles */
3261 .cfg.ext_zicbom = true,
3262 .cfg.ext_zicbop = true,
3263 .cfg.ext_zicboz = true,
3264 .cfg.ext_svade = true,
3265 .cfg.mmu = true,
3266 .cfg.pmp = true,
3267 .cfg.max_satp_mode = VM_1_10_SV48,
3268 ),
3269
3270 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
3271 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE128, TYPE_RISCV_DYNAMIC_CPU,
3272 .cfg.max_satp_mode = VM_1_10_SV57,
3273 .misa_mxl_max = MXL_RV128,
3274 ),
3275 #endif /* CONFIG_TCG */
3276 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64I, TYPE_RISCV_BARE_CPU,
3277 .misa_mxl_max = MXL_RV64,
3278 .misa_ext = RVI
3279 ),
3280 DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64E, TYPE_RISCV_BARE_CPU,
3281 .misa_mxl_max = MXL_RV64,
3282 .misa_ext = RVE
3283 ),
3284
3285 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, TYPE_RISCV_CPU_RV64I, RVA22U64),
3286 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, TYPE_RISCV_CPU_RV64I, RVA22S64),
3287 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, TYPE_RISCV_CPU_RV64I, RVA23U64),
3288 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, TYPE_RISCV_CPU_RV64I, RVA23S64),
3289 #endif /* TARGET_RISCV64 */
3290 };
3291
3292 DEFINE_TYPES(riscv_cpu_type_infos)
3293