1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "system/device_tree.h" 36 #include "system/kvm.h" 37 #include "system/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 188 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 194 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 195 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 196 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 197 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 198 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 199 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 200 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 201 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 202 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 203 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 204 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 205 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 206 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 207 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 208 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 209 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 210 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 211 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 212 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 213 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 214 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 217 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 218 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 219 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 220 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 221 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 222 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 223 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 224 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 225 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 226 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 227 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 228 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 229 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 230 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 231 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 232 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 233 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 234 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 235 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 236 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 237 238 { }, 239 }; 240 241 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 242 { 243 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 244 245 return *ext_enabled; 246 } 247 248 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 249 { 250 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 251 252 *ext_enabled = en; 253 } 254 255 bool riscv_cpu_is_vendor(Object *cpu_obj) 256 { 257 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 258 } 259 260 const char * const riscv_int_regnames[] = { 261 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 262 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 263 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 264 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 265 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 266 }; 267 268 const char * const riscv_int_regnamesh[] = { 269 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 270 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 271 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 272 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 273 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 274 "x30h/t5h", "x31h/t6h" 275 }; 276 277 const char * const riscv_fpr_regnames[] = { 278 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 279 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 280 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 281 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 282 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 283 "f30/ft10", "f31/ft11" 284 }; 285 286 const char * const riscv_rvv_regnames[] = { 287 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 288 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 289 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 290 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 291 "v28", "v29", "v30", "v31" 292 }; 293 294 static const char * const riscv_excp_names[] = { 295 "misaligned_fetch", 296 "fault_fetch", 297 "illegal_instruction", 298 "breakpoint", 299 "misaligned_load", 300 "fault_load", 301 "misaligned_store", 302 "fault_store", 303 "user_ecall", 304 "supervisor_ecall", 305 "hypervisor_ecall", 306 "machine_ecall", 307 "exec_page_fault", 308 "load_page_fault", 309 "reserved", 310 "store_page_fault", 311 "double_trap", 312 "reserved", 313 "reserved", 314 "reserved", 315 "guest_exec_page_fault", 316 "guest_load_page_fault", 317 "reserved", 318 "guest_store_page_fault", 319 }; 320 321 static const char * const riscv_intr_names[] = { 322 "u_software", 323 "s_software", 324 "vs_software", 325 "m_software", 326 "u_timer", 327 "s_timer", 328 "vs_timer", 329 "m_timer", 330 "u_external", 331 "s_external", 332 "vs_external", 333 "m_external", 334 "reserved", 335 "reserved", 336 "reserved", 337 "reserved" 338 }; 339 340 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 341 { 342 if (async) { 343 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 344 riscv_intr_names[cause] : "(unknown)"; 345 } else { 346 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 347 riscv_excp_names[cause] : "(unknown)"; 348 } 349 } 350 351 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 352 { 353 env->misa_ext_mask = env->misa_ext = ext; 354 } 355 356 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 357 { 358 return 16 << mcc->misa_mxl_max; 359 } 360 361 #ifndef CONFIG_USER_ONLY 362 static uint8_t satp_mode_from_str(const char *satp_mode_str) 363 { 364 if (!strncmp(satp_mode_str, "mbare", 5)) { 365 return VM_1_10_MBARE; 366 } 367 368 if (!strncmp(satp_mode_str, "sv32", 4)) { 369 return VM_1_10_SV32; 370 } 371 372 if (!strncmp(satp_mode_str, "sv39", 4)) { 373 return VM_1_10_SV39; 374 } 375 376 if (!strncmp(satp_mode_str, "sv48", 4)) { 377 return VM_1_10_SV48; 378 } 379 380 if (!strncmp(satp_mode_str, "sv57", 4)) { 381 return VM_1_10_SV57; 382 } 383 384 if (!strncmp(satp_mode_str, "sv64", 4)) { 385 return VM_1_10_SV64; 386 } 387 388 g_assert_not_reached(); 389 } 390 391 uint8_t satp_mode_max_from_map(uint32_t map) 392 { 393 /* 394 * 'map = 0' will make us return (31 - 32), which C will 395 * happily overflow to UINT_MAX. There's no good result to 396 * return if 'map = 0' (e.g. returning 0 will be ambiguous 397 * with the result for 'map = 1'). 398 * 399 * Assert out if map = 0. Callers will have to deal with 400 * it outside of this function. 401 */ 402 g_assert(map > 0); 403 404 /* map here has at least one bit set, so no problem with clz */ 405 return 31 - __builtin_clz(map); 406 } 407 408 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 409 { 410 if (is_32_bit) { 411 switch (satp_mode) { 412 case VM_1_10_SV32: 413 return "sv32"; 414 case VM_1_10_MBARE: 415 return "none"; 416 } 417 } else { 418 switch (satp_mode) { 419 case VM_1_10_SV64: 420 return "sv64"; 421 case VM_1_10_SV57: 422 return "sv57"; 423 case VM_1_10_SV48: 424 return "sv48"; 425 case VM_1_10_SV39: 426 return "sv39"; 427 case VM_1_10_MBARE: 428 return "none"; 429 } 430 } 431 432 g_assert_not_reached(); 433 } 434 435 static void set_satp_mode_max_supported(RISCVCPU *cpu, 436 uint8_t satp_mode) 437 { 438 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 439 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 440 441 for (int i = 0; i <= satp_mode; ++i) { 442 if (valid_vm[i]) { 443 cpu->cfg.satp_mode.supported |= (1 << i); 444 } 445 } 446 } 447 448 /* Set the satp mode to the max supported */ 449 static void set_satp_mode_default_map(RISCVCPU *cpu) 450 { 451 /* 452 * Bare CPUs do not default to the max available. 453 * Users must set a valid satp_mode in the command 454 * line. 455 */ 456 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 457 warn_report("No satp mode set. Defaulting to 'bare'"); 458 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 459 return; 460 } 461 462 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 463 } 464 #endif 465 466 static void riscv_max_cpu_init(Object *obj) 467 { 468 RISCVCPU *cpu = RISCV_CPU(obj); 469 CPURISCVState *env = &cpu->env; 470 471 cpu->cfg.mmu = true; 472 cpu->cfg.pmp = true; 473 474 env->priv_ver = PRIV_VERSION_LATEST; 475 #ifndef CONFIG_USER_ONLY 476 set_satp_mode_max_supported(RISCV_CPU(obj), 477 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 478 VM_1_10_SV32 : VM_1_10_SV57); 479 #endif 480 } 481 482 #if defined(TARGET_RISCV64) 483 static void rv64_base_cpu_init(Object *obj) 484 { 485 RISCVCPU *cpu = RISCV_CPU(obj); 486 CPURISCVState *env = &cpu->env; 487 488 cpu->cfg.mmu = true; 489 cpu->cfg.pmp = true; 490 491 /* Set latest version of privileged specification */ 492 env->priv_ver = PRIV_VERSION_LATEST; 493 #ifndef CONFIG_USER_ONLY 494 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 495 #endif 496 } 497 498 static void rv64_sifive_u_cpu_init(Object *obj) 499 { 500 RISCVCPU *cpu = RISCV_CPU(obj); 501 CPURISCVState *env = &cpu->env; 502 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 503 env->priv_ver = PRIV_VERSION_1_10_0; 504 #ifndef CONFIG_USER_ONLY 505 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 506 #endif 507 508 /* inherited from parent obj via riscv_cpu_init() */ 509 cpu->cfg.ext_zifencei = true; 510 cpu->cfg.ext_zicsr = true; 511 cpu->cfg.mmu = true; 512 cpu->cfg.pmp = true; 513 } 514 515 static void rv64_sifive_e_cpu_init(Object *obj) 516 { 517 CPURISCVState *env = &RISCV_CPU(obj)->env; 518 RISCVCPU *cpu = RISCV_CPU(obj); 519 520 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 521 env->priv_ver = PRIV_VERSION_1_10_0; 522 #ifndef CONFIG_USER_ONLY 523 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 524 #endif 525 526 /* inherited from parent obj via riscv_cpu_init() */ 527 cpu->cfg.ext_zifencei = true; 528 cpu->cfg.ext_zicsr = true; 529 cpu->cfg.pmp = true; 530 } 531 532 static void rv64_thead_c906_cpu_init(Object *obj) 533 { 534 CPURISCVState *env = &RISCV_CPU(obj)->env; 535 RISCVCPU *cpu = RISCV_CPU(obj); 536 537 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 538 env->priv_ver = PRIV_VERSION_1_11_0; 539 540 cpu->cfg.ext_zfa = true; 541 cpu->cfg.ext_zfh = true; 542 cpu->cfg.mmu = true; 543 cpu->cfg.ext_xtheadba = true; 544 cpu->cfg.ext_xtheadbb = true; 545 cpu->cfg.ext_xtheadbs = true; 546 cpu->cfg.ext_xtheadcmo = true; 547 cpu->cfg.ext_xtheadcondmov = true; 548 cpu->cfg.ext_xtheadfmemidx = true; 549 cpu->cfg.ext_xtheadmac = true; 550 cpu->cfg.ext_xtheadmemidx = true; 551 cpu->cfg.ext_xtheadmempair = true; 552 cpu->cfg.ext_xtheadsync = true; 553 554 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 555 #ifndef CONFIG_USER_ONLY 556 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 557 th_register_custom_csrs(cpu); 558 #endif 559 560 /* inherited from parent obj via riscv_cpu_init() */ 561 cpu->cfg.pmp = true; 562 } 563 564 static void rv64_veyron_v1_cpu_init(Object *obj) 565 { 566 CPURISCVState *env = &RISCV_CPU(obj)->env; 567 RISCVCPU *cpu = RISCV_CPU(obj); 568 569 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 570 env->priv_ver = PRIV_VERSION_1_12_0; 571 572 /* Enable ISA extensions */ 573 cpu->cfg.mmu = true; 574 cpu->cfg.ext_zifencei = true; 575 cpu->cfg.ext_zicsr = true; 576 cpu->cfg.pmp = true; 577 cpu->cfg.ext_zicbom = true; 578 cpu->cfg.cbom_blocksize = 64; 579 cpu->cfg.cboz_blocksize = 64; 580 cpu->cfg.ext_zicboz = true; 581 cpu->cfg.ext_smaia = true; 582 cpu->cfg.ext_ssaia = true; 583 cpu->cfg.ext_sscofpmf = true; 584 cpu->cfg.ext_sstc = true; 585 cpu->cfg.ext_svinval = true; 586 cpu->cfg.ext_svnapot = true; 587 cpu->cfg.ext_svpbmt = true; 588 cpu->cfg.ext_smstateen = true; 589 cpu->cfg.ext_zba = true; 590 cpu->cfg.ext_zbb = true; 591 cpu->cfg.ext_zbc = true; 592 cpu->cfg.ext_zbs = true; 593 cpu->cfg.ext_XVentanaCondOps = true; 594 595 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 596 cpu->cfg.marchid = VEYRON_V1_MARCHID; 597 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 598 599 #ifndef CONFIG_USER_ONLY 600 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 601 #endif 602 } 603 604 /* Tenstorrent Ascalon */ 605 static void rv64_tt_ascalon_cpu_init(Object *obj) 606 { 607 CPURISCVState *env = &RISCV_CPU(obj)->env; 608 RISCVCPU *cpu = RISCV_CPU(obj); 609 610 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 611 env->priv_ver = PRIV_VERSION_1_13_0; 612 613 /* Enable ISA extensions */ 614 cpu->cfg.mmu = true; 615 cpu->cfg.vlenb = 256 >> 3; 616 cpu->cfg.elen = 64; 617 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 618 cpu->cfg.rvv_ma_all_1s = true; 619 cpu->cfg.rvv_ta_all_1s = true; 620 cpu->cfg.misa_w = true; 621 cpu->cfg.pmp = true; 622 cpu->cfg.cbom_blocksize = 64; 623 cpu->cfg.cbop_blocksize = 64; 624 cpu->cfg.cboz_blocksize = 64; 625 cpu->cfg.ext_zic64b = true; 626 cpu->cfg.ext_zicbom = true; 627 cpu->cfg.ext_zicbop = true; 628 cpu->cfg.ext_zicboz = true; 629 cpu->cfg.ext_zicntr = true; 630 cpu->cfg.ext_zicond = true; 631 cpu->cfg.ext_zicsr = true; 632 cpu->cfg.ext_zifencei = true; 633 cpu->cfg.ext_zihintntl = true; 634 cpu->cfg.ext_zihintpause = true; 635 cpu->cfg.ext_zihpm = true; 636 cpu->cfg.ext_zimop = true; 637 cpu->cfg.ext_zawrs = true; 638 cpu->cfg.ext_zfa = true; 639 cpu->cfg.ext_zfbfmin = true; 640 cpu->cfg.ext_zfh = true; 641 cpu->cfg.ext_zfhmin = true; 642 cpu->cfg.ext_zcb = true; 643 cpu->cfg.ext_zcmop = true; 644 cpu->cfg.ext_zba = true; 645 cpu->cfg.ext_zbb = true; 646 cpu->cfg.ext_zbs = true; 647 cpu->cfg.ext_zkt = true; 648 cpu->cfg.ext_zvbb = true; 649 cpu->cfg.ext_zvbc = true; 650 cpu->cfg.ext_zvfbfmin = true; 651 cpu->cfg.ext_zvfbfwma = true; 652 cpu->cfg.ext_zvfh = true; 653 cpu->cfg.ext_zvfhmin = true; 654 cpu->cfg.ext_zvkng = true; 655 cpu->cfg.ext_smaia = true; 656 cpu->cfg.ext_smstateen = true; 657 cpu->cfg.ext_ssaia = true; 658 cpu->cfg.ext_sscofpmf = true; 659 cpu->cfg.ext_sstc = true; 660 cpu->cfg.ext_svade = true; 661 cpu->cfg.ext_svinval = true; 662 cpu->cfg.ext_svnapot = true; 663 cpu->cfg.ext_svpbmt = true; 664 665 #ifndef CONFIG_USER_ONLY 666 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 667 #endif 668 } 669 670 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 671 { 672 CPURISCVState *env = &RISCV_CPU(obj)->env; 673 RISCVCPU *cpu = RISCV_CPU(obj); 674 675 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 676 env->priv_ver = PRIV_VERSION_1_12_0; 677 678 /* Enable ISA extensions */ 679 cpu->cfg.ext_zbc = true; 680 cpu->cfg.ext_zbkb = true; 681 cpu->cfg.ext_zbkc = true; 682 cpu->cfg.ext_zbkx = true; 683 cpu->cfg.ext_zknd = true; 684 cpu->cfg.ext_zkne = true; 685 cpu->cfg.ext_zknh = true; 686 cpu->cfg.ext_zksed = true; 687 cpu->cfg.ext_zksh = true; 688 cpu->cfg.ext_svinval = true; 689 690 cpu->cfg.mmu = true; 691 cpu->cfg.pmp = true; 692 693 #ifndef CONFIG_USER_ONLY 694 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 695 #endif 696 } 697 698 #ifdef CONFIG_TCG 699 static void rv128_base_cpu_init(Object *obj) 700 { 701 RISCVCPU *cpu = RISCV_CPU(obj); 702 CPURISCVState *env = &cpu->env; 703 704 cpu->cfg.mmu = true; 705 cpu->cfg.pmp = true; 706 707 /* Set latest version of privileged specification */ 708 env->priv_ver = PRIV_VERSION_LATEST; 709 #ifndef CONFIG_USER_ONLY 710 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 711 #endif 712 } 713 #endif /* CONFIG_TCG */ 714 715 static void rv64i_bare_cpu_init(Object *obj) 716 { 717 CPURISCVState *env = &RISCV_CPU(obj)->env; 718 riscv_cpu_set_misa_ext(env, RVI); 719 } 720 721 static void rv64e_bare_cpu_init(Object *obj) 722 { 723 CPURISCVState *env = &RISCV_CPU(obj)->env; 724 riscv_cpu_set_misa_ext(env, RVE); 725 } 726 727 #endif /* !TARGET_RISCV64 */ 728 729 #if defined(TARGET_RISCV32) || \ 730 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 731 732 static void rv32_base_cpu_init(Object *obj) 733 { 734 RISCVCPU *cpu = RISCV_CPU(obj); 735 CPURISCVState *env = &cpu->env; 736 737 cpu->cfg.mmu = true; 738 cpu->cfg.pmp = true; 739 740 /* Set latest version of privileged specification */ 741 env->priv_ver = PRIV_VERSION_LATEST; 742 #ifndef CONFIG_USER_ONLY 743 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 744 #endif 745 } 746 747 static void rv32_sifive_u_cpu_init(Object *obj) 748 { 749 RISCVCPU *cpu = RISCV_CPU(obj); 750 CPURISCVState *env = &cpu->env; 751 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 752 env->priv_ver = PRIV_VERSION_1_10_0; 753 #ifndef CONFIG_USER_ONLY 754 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 755 #endif 756 757 /* inherited from parent obj via riscv_cpu_init() */ 758 cpu->cfg.ext_zifencei = true; 759 cpu->cfg.ext_zicsr = true; 760 cpu->cfg.mmu = true; 761 cpu->cfg.pmp = true; 762 } 763 764 static void rv32_sifive_e_cpu_init(Object *obj) 765 { 766 CPURISCVState *env = &RISCV_CPU(obj)->env; 767 RISCVCPU *cpu = RISCV_CPU(obj); 768 769 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 770 env->priv_ver = PRIV_VERSION_1_10_0; 771 #ifndef CONFIG_USER_ONLY 772 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 773 #endif 774 775 /* inherited from parent obj via riscv_cpu_init() */ 776 cpu->cfg.ext_zifencei = true; 777 cpu->cfg.ext_zicsr = true; 778 cpu->cfg.pmp = true; 779 } 780 781 static void rv32_ibex_cpu_init(Object *obj) 782 { 783 CPURISCVState *env = &RISCV_CPU(obj)->env; 784 RISCVCPU *cpu = RISCV_CPU(obj); 785 786 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 787 env->priv_ver = PRIV_VERSION_1_12_0; 788 #ifndef CONFIG_USER_ONLY 789 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 790 #endif 791 /* inherited from parent obj via riscv_cpu_init() */ 792 cpu->cfg.ext_zifencei = true; 793 cpu->cfg.ext_zicsr = true; 794 cpu->cfg.pmp = true; 795 cpu->cfg.ext_smepmp = true; 796 797 cpu->cfg.ext_zba = true; 798 cpu->cfg.ext_zbb = true; 799 cpu->cfg.ext_zbc = true; 800 cpu->cfg.ext_zbs = true; 801 } 802 803 static void rv32_imafcu_nommu_cpu_init(Object *obj) 804 { 805 CPURISCVState *env = &RISCV_CPU(obj)->env; 806 RISCVCPU *cpu = RISCV_CPU(obj); 807 808 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 809 env->priv_ver = PRIV_VERSION_1_10_0; 810 #ifndef CONFIG_USER_ONLY 811 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 812 #endif 813 814 /* inherited from parent obj via riscv_cpu_init() */ 815 cpu->cfg.ext_zifencei = true; 816 cpu->cfg.ext_zicsr = true; 817 cpu->cfg.pmp = true; 818 } 819 820 static void rv32i_bare_cpu_init(Object *obj) 821 { 822 CPURISCVState *env = &RISCV_CPU(obj)->env; 823 riscv_cpu_set_misa_ext(env, RVI); 824 } 825 826 static void rv32e_bare_cpu_init(Object *obj) 827 { 828 CPURISCVState *env = &RISCV_CPU(obj)->env; 829 riscv_cpu_set_misa_ext(env, RVE); 830 } 831 #endif 832 833 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 834 { 835 ObjectClass *oc; 836 char *typename; 837 char **cpuname; 838 839 cpuname = g_strsplit(cpu_model, ",", 1); 840 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 841 oc = object_class_by_name(typename); 842 g_strfreev(cpuname); 843 g_free(typename); 844 845 return oc; 846 } 847 848 char *riscv_cpu_get_name(RISCVCPU *cpu) 849 { 850 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 851 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 852 853 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 854 855 return cpu_model_from_type(typename); 856 } 857 858 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 859 { 860 RISCVCPU *cpu = RISCV_CPU(cs); 861 CPURISCVState *env = &cpu->env; 862 int i, j; 863 uint8_t *p; 864 865 #if !defined(CONFIG_USER_ONLY) 866 if (riscv_has_ext(env, RVH)) { 867 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 868 } 869 #endif 870 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 871 #ifndef CONFIG_USER_ONLY 872 { 873 static const int dump_csrs[] = { 874 CSR_MHARTID, 875 CSR_MSTATUS, 876 CSR_MSTATUSH, 877 /* 878 * CSR_SSTATUS is intentionally omitted here as its value 879 * can be figured out by looking at CSR_MSTATUS 880 */ 881 CSR_HSTATUS, 882 CSR_VSSTATUS, 883 CSR_MIP, 884 CSR_MIE, 885 CSR_MIDELEG, 886 CSR_HIDELEG, 887 CSR_MEDELEG, 888 CSR_HEDELEG, 889 CSR_MTVEC, 890 CSR_STVEC, 891 CSR_VSTVEC, 892 CSR_MEPC, 893 CSR_SEPC, 894 CSR_VSEPC, 895 CSR_MCAUSE, 896 CSR_SCAUSE, 897 CSR_VSCAUSE, 898 CSR_MTVAL, 899 CSR_STVAL, 900 CSR_HTVAL, 901 CSR_MTVAL2, 902 CSR_MSCRATCH, 903 CSR_SSCRATCH, 904 CSR_SATP, 905 }; 906 907 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 908 int csrno = dump_csrs[i]; 909 target_ulong val = 0; 910 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 911 912 /* 913 * Rely on the smode, hmode, etc, predicates within csr.c 914 * to do the filtering of the registers that are present. 915 */ 916 if (res == RISCV_EXCP_NONE) { 917 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 918 csr_ops[csrno].name, val); 919 } 920 } 921 } 922 #endif 923 924 for (i = 0; i < 32; i++) { 925 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 926 riscv_int_regnames[i], env->gpr[i]); 927 if ((i & 3) == 3) { 928 qemu_fprintf(f, "\n"); 929 } 930 } 931 if (flags & CPU_DUMP_FPU) { 932 target_ulong val = 0; 933 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 934 if (res == RISCV_EXCP_NONE) { 935 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 936 csr_ops[CSR_FCSR].name, val); 937 } 938 for (i = 0; i < 32; i++) { 939 qemu_fprintf(f, " %-8s %016" PRIx64, 940 riscv_fpr_regnames[i], env->fpr[i]); 941 if ((i & 3) == 3) { 942 qemu_fprintf(f, "\n"); 943 } 944 } 945 } 946 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 947 static const int dump_rvv_csrs[] = { 948 CSR_VSTART, 949 CSR_VXSAT, 950 CSR_VXRM, 951 CSR_VCSR, 952 CSR_VL, 953 CSR_VTYPE, 954 CSR_VLENB, 955 }; 956 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 957 int csrno = dump_rvv_csrs[i]; 958 target_ulong val = 0; 959 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 960 961 /* 962 * Rely on the smode, hmode, etc, predicates within csr.c 963 * to do the filtering of the registers that are present. 964 */ 965 if (res == RISCV_EXCP_NONE) { 966 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 967 csr_ops[csrno].name, val); 968 } 969 } 970 uint16_t vlenb = cpu->cfg.vlenb; 971 972 for (i = 0; i < 32; i++) { 973 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 974 p = (uint8_t *)env->vreg; 975 for (j = vlenb - 1 ; j >= 0; j--) { 976 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 977 } 978 qemu_fprintf(f, "\n"); 979 } 980 } 981 } 982 983 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 984 { 985 RISCVCPU *cpu = RISCV_CPU(cs); 986 CPURISCVState *env = &cpu->env; 987 988 if (env->xl == MXL_RV32) { 989 env->pc = (int32_t)value; 990 } else { 991 env->pc = value; 992 } 993 } 994 995 static vaddr riscv_cpu_get_pc(CPUState *cs) 996 { 997 RISCVCPU *cpu = RISCV_CPU(cs); 998 CPURISCVState *env = &cpu->env; 999 1000 /* Match cpu_get_tb_cpu_state. */ 1001 if (env->xl == MXL_RV32) { 1002 return env->pc & UINT32_MAX; 1003 } 1004 return env->pc; 1005 } 1006 1007 bool riscv_cpu_has_work(CPUState *cs) 1008 { 1009 #ifndef CONFIG_USER_ONLY 1010 RISCVCPU *cpu = RISCV_CPU(cs); 1011 CPURISCVState *env = &cpu->env; 1012 /* 1013 * Definition of the WFI instruction requires it to ignore the privilege 1014 * mode and delegation registers, but respect individual enables 1015 */ 1016 return riscv_cpu_all_pending(env) != 0 || 1017 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1018 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1019 #else 1020 return true; 1021 #endif 1022 } 1023 1024 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 1025 { 1026 return riscv_env_mmu_index(cpu_env(cs), ifetch); 1027 } 1028 1029 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1030 { 1031 #ifndef CONFIG_USER_ONLY 1032 uint8_t iprio; 1033 int i, irq, rdzero; 1034 #endif 1035 CPUState *cs = CPU(obj); 1036 RISCVCPU *cpu = RISCV_CPU(cs); 1037 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1038 CPURISCVState *env = &cpu->env; 1039 1040 if (mcc->parent_phases.hold) { 1041 mcc->parent_phases.hold(obj, type); 1042 } 1043 #ifndef CONFIG_USER_ONLY 1044 env->misa_mxl = mcc->misa_mxl_max; 1045 env->priv = PRV_M; 1046 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1047 if (env->misa_mxl > MXL_RV32) { 1048 /* 1049 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1050 * and we must ensure that the value after init is valid for read. 1051 */ 1052 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1053 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1054 if (riscv_has_ext(env, RVH)) { 1055 env->vsstatus = set_field(env->vsstatus, 1056 MSTATUS64_SXL, env->misa_mxl); 1057 env->vsstatus = set_field(env->vsstatus, 1058 MSTATUS64_UXL, env->misa_mxl); 1059 env->mstatus_hs = set_field(env->mstatus_hs, 1060 MSTATUS64_SXL, env->misa_mxl); 1061 env->mstatus_hs = set_field(env->mstatus_hs, 1062 MSTATUS64_UXL, env->misa_mxl); 1063 } 1064 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1065 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1066 } 1067 } 1068 env->mcause = 0; 1069 env->miclaim = MIP_SGEIP; 1070 env->pc = env->resetvec; 1071 env->bins = 0; 1072 env->two_stage_lookup = false; 1073 1074 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1075 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1076 MENVCFG_ADUE : 0); 1077 env->henvcfg = 0; 1078 1079 /* Initialized default priorities of local interrupts. */ 1080 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1081 iprio = riscv_cpu_default_priority(i); 1082 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1083 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1084 env->hviprio[i] = 0; 1085 } 1086 i = 0; 1087 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1088 if (!rdzero) { 1089 env->hviprio[irq] = env->miprio[irq]; 1090 } 1091 i++; 1092 } 1093 1094 /* 1095 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1096 * extension is enabled. 1097 */ 1098 if (riscv_has_ext(env, RVH)) { 1099 env->mideleg |= HS_MODE_INTERRUPTS; 1100 } 1101 1102 /* 1103 * Clear mseccfg and unlock all the PMP entries upon reset. 1104 * This is allowed as per the priv and smepmp specifications 1105 * and is needed to clear stale entries across reboots. 1106 */ 1107 if (riscv_cpu_cfg(env)->ext_smepmp) { 1108 env->mseccfg = 0; 1109 } 1110 1111 pmp_unlock_entries(env); 1112 #else 1113 env->priv = PRV_U; 1114 env->senvcfg = 0; 1115 env->menvcfg = 0; 1116 #endif 1117 1118 /* on reset elp is clear */ 1119 env->elp = false; 1120 /* on reset ssp is set to 0 */ 1121 env->ssp = 0; 1122 1123 env->xl = riscv_cpu_mxl(env); 1124 cs->exception_index = RISCV_EXCP_NONE; 1125 env->load_res = -1; 1126 set_default_nan_mode(1, &env->fp_status); 1127 /* Default NaN value: sign bit clear, frac msb set */ 1128 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1129 env->vill = true; 1130 1131 #ifndef CONFIG_USER_ONLY 1132 if (cpu->cfg.debug) { 1133 riscv_trigger_reset_hold(env); 1134 } 1135 1136 if (cpu->cfg.ext_smrnmi) { 1137 env->rnmip = 0; 1138 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1139 } 1140 1141 if (kvm_enabled()) { 1142 kvm_riscv_reset_vcpu(cpu); 1143 } 1144 #endif 1145 } 1146 1147 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1148 { 1149 RISCVCPU *cpu = RISCV_CPU(s); 1150 CPURISCVState *env = &cpu->env; 1151 info->target_info = &cpu->cfg; 1152 1153 switch (env->xl) { 1154 case MXL_RV32: 1155 info->print_insn = print_insn_riscv32; 1156 break; 1157 case MXL_RV64: 1158 info->print_insn = print_insn_riscv64; 1159 break; 1160 case MXL_RV128: 1161 info->print_insn = print_insn_riscv128; 1162 break; 1163 default: 1164 g_assert_not_reached(); 1165 } 1166 } 1167 1168 #ifndef CONFIG_USER_ONLY 1169 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1170 { 1171 bool rv32 = riscv_cpu_is_32bit(cpu); 1172 uint8_t satp_mode_map_max, satp_mode_supported_max; 1173 1174 /* The CPU wants the OS to decide which satp mode to use */ 1175 if (cpu->cfg.satp_mode.supported == 0) { 1176 return; 1177 } 1178 1179 satp_mode_supported_max = 1180 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1181 1182 if (cpu->cfg.satp_mode.map == 0) { 1183 if (cpu->cfg.satp_mode.init == 0) { 1184 /* If unset by the user, we fallback to the default satp mode. */ 1185 set_satp_mode_default_map(cpu); 1186 } else { 1187 /* 1188 * Find the lowest level that was disabled and then enable the 1189 * first valid level below which can be found in 1190 * valid_vm_1_10_32/64. 1191 */ 1192 for (int i = 1; i < 16; ++i) { 1193 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1194 (cpu->cfg.satp_mode.supported & (1 << i))) { 1195 for (int j = i - 1; j >= 0; --j) { 1196 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1197 cpu->cfg.satp_mode.map |= (1 << j); 1198 break; 1199 } 1200 } 1201 break; 1202 } 1203 } 1204 } 1205 } 1206 1207 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1208 1209 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1210 if (satp_mode_map_max > satp_mode_supported_max) { 1211 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1212 satp_mode_str(satp_mode_map_max, rv32), 1213 satp_mode_str(satp_mode_supported_max, rv32)); 1214 return; 1215 } 1216 1217 /* 1218 * Make sure the user did not ask for an invalid configuration as per 1219 * the specification. 1220 */ 1221 if (!rv32) { 1222 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1223 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1224 (cpu->cfg.satp_mode.init & (1 << i)) && 1225 (cpu->cfg.satp_mode.supported & (1 << i))) { 1226 error_setg(errp, "cannot disable %s satp mode if %s " 1227 "is enabled", satp_mode_str(i, false), 1228 satp_mode_str(satp_mode_map_max, false)); 1229 return; 1230 } 1231 } 1232 } 1233 1234 /* Finally expand the map so that all valid modes are set */ 1235 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1236 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1237 cpu->cfg.satp_mode.map |= (1 << i); 1238 } 1239 } 1240 } 1241 #endif 1242 1243 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1244 { 1245 Error *local_err = NULL; 1246 1247 #ifndef CONFIG_USER_ONLY 1248 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1249 if (local_err != NULL) { 1250 error_propagate(errp, local_err); 1251 return; 1252 } 1253 #endif 1254 1255 if (tcg_enabled()) { 1256 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1257 if (local_err != NULL) { 1258 error_propagate(errp, local_err); 1259 return; 1260 } 1261 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1262 } else if (kvm_enabled()) { 1263 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1264 if (local_err != NULL) { 1265 error_propagate(errp, local_err); 1266 return; 1267 } 1268 } 1269 } 1270 1271 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1272 { 1273 CPUState *cs = CPU(dev); 1274 RISCVCPU *cpu = RISCV_CPU(dev); 1275 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1276 Error *local_err = NULL; 1277 1278 cpu_exec_realizefn(cs, &local_err); 1279 if (local_err != NULL) { 1280 error_propagate(errp, local_err); 1281 return; 1282 } 1283 1284 riscv_cpu_finalize_features(cpu, &local_err); 1285 if (local_err != NULL) { 1286 error_propagate(errp, local_err); 1287 return; 1288 } 1289 1290 riscv_cpu_register_gdb_regs_for_features(cs); 1291 1292 #ifndef CONFIG_USER_ONLY 1293 if (cpu->cfg.debug) { 1294 riscv_trigger_realize(&cpu->env); 1295 } 1296 #endif 1297 1298 qemu_init_vcpu(cs); 1299 cpu_reset(cs); 1300 1301 mcc->parent_realize(dev, errp); 1302 } 1303 1304 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1305 { 1306 if (tcg_enabled()) { 1307 return riscv_cpu_tcg_compatible(cpu); 1308 } 1309 1310 return true; 1311 } 1312 1313 #ifndef CONFIG_USER_ONLY 1314 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1315 void *opaque, Error **errp) 1316 { 1317 RISCVSATPMap *satp_map = opaque; 1318 uint8_t satp = satp_mode_from_str(name); 1319 bool value; 1320 1321 value = satp_map->map & (1 << satp); 1322 1323 visit_type_bool(v, name, &value, errp); 1324 } 1325 1326 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1327 void *opaque, Error **errp) 1328 { 1329 RISCVSATPMap *satp_map = opaque; 1330 uint8_t satp = satp_mode_from_str(name); 1331 bool value; 1332 1333 if (!visit_type_bool(v, name, &value, errp)) { 1334 return; 1335 } 1336 1337 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1338 satp_map->init |= 1 << satp; 1339 } 1340 1341 void riscv_add_satp_mode_properties(Object *obj) 1342 { 1343 RISCVCPU *cpu = RISCV_CPU(obj); 1344 1345 if (cpu->env.misa_mxl == MXL_RV32) { 1346 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1347 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1348 } else { 1349 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1350 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1351 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1352 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1353 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1354 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1355 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1356 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1357 } 1358 } 1359 1360 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1361 { 1362 RISCVCPU *cpu = RISCV_CPU(opaque); 1363 CPURISCVState *env = &cpu->env; 1364 1365 if (irq < IRQ_LOCAL_MAX) { 1366 switch (irq) { 1367 case IRQ_U_SOFT: 1368 case IRQ_S_SOFT: 1369 case IRQ_VS_SOFT: 1370 case IRQ_M_SOFT: 1371 case IRQ_U_TIMER: 1372 case IRQ_S_TIMER: 1373 case IRQ_VS_TIMER: 1374 case IRQ_M_TIMER: 1375 case IRQ_U_EXT: 1376 case IRQ_VS_EXT: 1377 case IRQ_M_EXT: 1378 if (kvm_enabled()) { 1379 kvm_riscv_set_irq(cpu, irq, level); 1380 } else { 1381 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1382 } 1383 break; 1384 case IRQ_S_EXT: 1385 if (kvm_enabled()) { 1386 kvm_riscv_set_irq(cpu, irq, level); 1387 } else { 1388 env->external_seip = level; 1389 riscv_cpu_update_mip(env, 1 << irq, 1390 BOOL_TO_MASK(level | env->software_seip)); 1391 } 1392 break; 1393 default: 1394 g_assert_not_reached(); 1395 } 1396 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1397 /* Require H-extension for handling guest local interrupts */ 1398 if (!riscv_has_ext(env, RVH)) { 1399 g_assert_not_reached(); 1400 } 1401 1402 /* Compute bit position in HGEIP CSR */ 1403 irq = irq - IRQ_LOCAL_MAX + 1; 1404 if (env->geilen < irq) { 1405 g_assert_not_reached(); 1406 } 1407 1408 /* Update HGEIP CSR */ 1409 env->hgeip &= ~((target_ulong)1 << irq); 1410 if (level) { 1411 env->hgeip |= (target_ulong)1 << irq; 1412 } 1413 1414 /* Update mip.SGEIP bit */ 1415 riscv_cpu_update_mip(env, MIP_SGEIP, 1416 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1417 } else { 1418 g_assert_not_reached(); 1419 } 1420 } 1421 1422 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1423 { 1424 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1425 } 1426 #endif /* CONFIG_USER_ONLY */ 1427 1428 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1429 { 1430 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1431 } 1432 1433 static void riscv_cpu_post_init(Object *obj) 1434 { 1435 accel_cpu_instance_init(CPU(obj)); 1436 } 1437 1438 static void riscv_cpu_init(Object *obj) 1439 { 1440 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1441 RISCVCPU *cpu = RISCV_CPU(obj); 1442 CPURISCVState *env = &cpu->env; 1443 1444 env->misa_mxl = mcc->misa_mxl_max; 1445 1446 #ifndef CONFIG_USER_ONLY 1447 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1448 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1449 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1450 "riscv.cpu.rnmi", RNMI_MAX); 1451 #endif /* CONFIG_USER_ONLY */ 1452 1453 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1454 1455 /* 1456 * The timer and performance counters extensions were supported 1457 * in QEMU before they were added as discrete extensions in the 1458 * ISA. To keep compatibility we'll always default them to 'true' 1459 * for all CPUs. Each accelerator will decide what to do when 1460 * users disable them. 1461 */ 1462 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1463 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1464 1465 /* Default values for non-bool cpu properties */ 1466 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1467 cpu->cfg.vlenb = 128 >> 3; 1468 cpu->cfg.elen = 64; 1469 cpu->cfg.cbom_blocksize = 64; 1470 cpu->cfg.cbop_blocksize = 64; 1471 cpu->cfg.cboz_blocksize = 64; 1472 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1473 } 1474 1475 static void riscv_bare_cpu_init(Object *obj) 1476 { 1477 RISCVCPU *cpu = RISCV_CPU(obj); 1478 1479 /* 1480 * Bare CPUs do not inherit the timer and performance 1481 * counters from the parent class (see riscv_cpu_init() 1482 * for info on why the parent enables them). 1483 * 1484 * Users have to explicitly enable these counters for 1485 * bare CPUs. 1486 */ 1487 cpu->cfg.ext_zicntr = false; 1488 cpu->cfg.ext_zihpm = false; 1489 1490 /* Set to QEMU's first supported priv version */ 1491 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1492 1493 /* 1494 * Support all available satp_mode settings. The default 1495 * value will be set to MBARE if the user doesn't set 1496 * satp_mode manually (see set_satp_mode_default()). 1497 */ 1498 #ifndef CONFIG_USER_ONLY 1499 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1500 #endif 1501 } 1502 1503 typedef struct misa_ext_info { 1504 const char *name; 1505 const char *description; 1506 } MISAExtInfo; 1507 1508 #define MISA_INFO_IDX(_bit) \ 1509 __builtin_ctz(_bit) 1510 1511 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1512 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1513 1514 static const MISAExtInfo misa_ext_info_arr[] = { 1515 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1516 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1517 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1518 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1519 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1520 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1521 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1522 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1523 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1524 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1525 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1526 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1527 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1528 }; 1529 1530 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1531 { 1532 CPUClass *cc = CPU_CLASS(mcc); 1533 1534 /* Validate that MISA_MXL is set properly. */ 1535 switch (mcc->misa_mxl_max) { 1536 #ifdef TARGET_RISCV64 1537 case MXL_RV64: 1538 case MXL_RV128: 1539 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1540 break; 1541 #endif 1542 case MXL_RV32: 1543 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1544 break; 1545 default: 1546 g_assert_not_reached(); 1547 } 1548 } 1549 1550 static int riscv_validate_misa_info_idx(uint32_t bit) 1551 { 1552 int idx; 1553 1554 /* 1555 * Our lowest valid input (RVA) is 1 and 1556 * __builtin_ctz() is UB with zero. 1557 */ 1558 g_assert(bit != 0); 1559 idx = MISA_INFO_IDX(bit); 1560 1561 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1562 return idx; 1563 } 1564 1565 const char *riscv_get_misa_ext_name(uint32_t bit) 1566 { 1567 int idx = riscv_validate_misa_info_idx(bit); 1568 const char *val = misa_ext_info_arr[idx].name; 1569 1570 g_assert(val != NULL); 1571 return val; 1572 } 1573 1574 const char *riscv_get_misa_ext_description(uint32_t bit) 1575 { 1576 int idx = riscv_validate_misa_info_idx(bit); 1577 const char *val = misa_ext_info_arr[idx].description; 1578 1579 g_assert(val != NULL); 1580 return val; 1581 } 1582 1583 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1584 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1585 .enabled = _defval} 1586 1587 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1588 /* Defaults for standard extensions */ 1589 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1590 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1591 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1592 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1593 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1594 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1595 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1596 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1597 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1598 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1599 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1600 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1601 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1602 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1603 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1604 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1605 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1606 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1607 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1608 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1609 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1610 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1611 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1612 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1613 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1614 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1615 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1616 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1617 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1618 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1619 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1620 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1621 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1622 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1623 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1624 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1625 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1626 1627 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1628 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1629 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1630 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1631 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1632 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1633 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1634 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1635 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1636 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1637 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1638 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1639 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1640 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1641 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1642 1643 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1644 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1645 1646 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1647 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1648 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1649 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1650 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1651 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1652 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1653 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1654 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1655 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1656 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1657 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1658 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1659 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1660 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1661 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1662 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1663 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1664 1665 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1666 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1667 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1668 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1669 1670 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1671 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1672 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1673 1674 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1675 1676 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1677 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1678 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1679 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1680 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1681 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1682 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1683 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1684 1685 /* Vector cryptography extensions */ 1686 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1687 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1688 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1689 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1690 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1691 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1692 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1693 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1694 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1695 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1696 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1697 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1698 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1699 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1700 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1701 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1702 1703 { }, 1704 }; 1705 1706 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1707 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1708 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1709 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1710 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1711 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1712 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1713 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1714 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1715 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1716 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1717 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1718 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1719 1720 { }, 1721 }; 1722 1723 /* These are experimental so mark with 'x-' */ 1724 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1725 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1726 1727 { }, 1728 }; 1729 1730 /* 1731 * 'Named features' is the name we give to extensions that we 1732 * don't want to expose to users. They are either immutable 1733 * (always enabled/disable) or they'll vary depending on 1734 * the resulting CPU state. They have riscv,isa strings 1735 * and priv_ver like regular extensions. 1736 */ 1737 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1738 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1739 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1740 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1741 1742 { }, 1743 }; 1744 1745 /* Deprecated entries marked for future removal */ 1746 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1747 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1748 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1749 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1750 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1751 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1752 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1753 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1754 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1755 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1756 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1757 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1758 1759 { }, 1760 }; 1761 1762 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1763 Error **errp) 1764 { 1765 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1766 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1767 cpuname, propname); 1768 } 1769 1770 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1771 void *opaque, Error **errp) 1772 { 1773 RISCVCPU *cpu = RISCV_CPU(obj); 1774 uint8_t pmu_num, curr_pmu_num; 1775 uint32_t pmu_mask; 1776 1777 visit_type_uint8(v, name, &pmu_num, errp); 1778 1779 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1780 1781 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1782 cpu_set_prop_err(cpu, name, errp); 1783 error_append_hint(errp, "Current '%s' val: %u\n", 1784 name, curr_pmu_num); 1785 return; 1786 } 1787 1788 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1789 error_setg(errp, "Number of counters exceeds maximum available"); 1790 return; 1791 } 1792 1793 if (pmu_num == 0) { 1794 pmu_mask = 0; 1795 } else { 1796 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1797 } 1798 1799 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1800 cpu->cfg.pmu_mask = pmu_mask; 1801 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1802 } 1803 1804 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1805 void *opaque, Error **errp) 1806 { 1807 RISCVCPU *cpu = RISCV_CPU(obj); 1808 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1809 1810 visit_type_uint8(v, name, &pmu_num, errp); 1811 } 1812 1813 static const PropertyInfo prop_pmu_num = { 1814 .name = "pmu-num", 1815 .get = prop_pmu_num_get, 1816 .set = prop_pmu_num_set, 1817 }; 1818 1819 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1820 void *opaque, Error **errp) 1821 { 1822 RISCVCPU *cpu = RISCV_CPU(obj); 1823 uint32_t value; 1824 uint8_t pmu_num; 1825 1826 visit_type_uint32(v, name, &value, errp); 1827 1828 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1829 cpu_set_prop_err(cpu, name, errp); 1830 error_append_hint(errp, "Current '%s' val: %x\n", 1831 name, cpu->cfg.pmu_mask); 1832 return; 1833 } 1834 1835 pmu_num = ctpop32(value); 1836 1837 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1838 error_setg(errp, "Number of counters exceeds maximum available"); 1839 return; 1840 } 1841 1842 cpu_option_add_user_setting(name, value); 1843 cpu->cfg.pmu_mask = value; 1844 } 1845 1846 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1847 void *opaque, Error **errp) 1848 { 1849 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1850 1851 visit_type_uint8(v, name, &pmu_mask, errp); 1852 } 1853 1854 static const PropertyInfo prop_pmu_mask = { 1855 .name = "pmu-mask", 1856 .get = prop_pmu_mask_get, 1857 .set = prop_pmu_mask_set, 1858 }; 1859 1860 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1861 void *opaque, Error **errp) 1862 { 1863 RISCVCPU *cpu = RISCV_CPU(obj); 1864 bool value; 1865 1866 visit_type_bool(v, name, &value, errp); 1867 1868 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1869 cpu_set_prop_err(cpu, "mmu", errp); 1870 return; 1871 } 1872 1873 cpu_option_add_user_setting(name, value); 1874 cpu->cfg.mmu = value; 1875 } 1876 1877 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1878 void *opaque, Error **errp) 1879 { 1880 bool value = RISCV_CPU(obj)->cfg.mmu; 1881 1882 visit_type_bool(v, name, &value, errp); 1883 } 1884 1885 static const PropertyInfo prop_mmu = { 1886 .name = "mmu", 1887 .get = prop_mmu_get, 1888 .set = prop_mmu_set, 1889 }; 1890 1891 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1892 void *opaque, Error **errp) 1893 { 1894 RISCVCPU *cpu = RISCV_CPU(obj); 1895 bool value; 1896 1897 visit_type_bool(v, name, &value, errp); 1898 1899 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1900 cpu_set_prop_err(cpu, name, errp); 1901 return; 1902 } 1903 1904 cpu_option_add_user_setting(name, value); 1905 cpu->cfg.pmp = value; 1906 } 1907 1908 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1909 void *opaque, Error **errp) 1910 { 1911 bool value = RISCV_CPU(obj)->cfg.pmp; 1912 1913 visit_type_bool(v, name, &value, errp); 1914 } 1915 1916 static const PropertyInfo prop_pmp = { 1917 .name = "pmp", 1918 .get = prop_pmp_get, 1919 .set = prop_pmp_set, 1920 }; 1921 1922 static int priv_spec_from_str(const char *priv_spec_str) 1923 { 1924 int priv_version = -1; 1925 1926 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1927 priv_version = PRIV_VERSION_1_13_0; 1928 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1929 priv_version = PRIV_VERSION_1_12_0; 1930 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1931 priv_version = PRIV_VERSION_1_11_0; 1932 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1933 priv_version = PRIV_VERSION_1_10_0; 1934 } 1935 1936 return priv_version; 1937 } 1938 1939 const char *priv_spec_to_str(int priv_version) 1940 { 1941 switch (priv_version) { 1942 case PRIV_VERSION_1_10_0: 1943 return PRIV_VER_1_10_0_STR; 1944 case PRIV_VERSION_1_11_0: 1945 return PRIV_VER_1_11_0_STR; 1946 case PRIV_VERSION_1_12_0: 1947 return PRIV_VER_1_12_0_STR; 1948 case PRIV_VERSION_1_13_0: 1949 return PRIV_VER_1_13_0_STR; 1950 default: 1951 return NULL; 1952 } 1953 } 1954 1955 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1956 void *opaque, Error **errp) 1957 { 1958 RISCVCPU *cpu = RISCV_CPU(obj); 1959 g_autofree char *value = NULL; 1960 int priv_version = -1; 1961 1962 visit_type_str(v, name, &value, errp); 1963 1964 priv_version = priv_spec_from_str(value); 1965 if (priv_version < 0) { 1966 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1967 return; 1968 } 1969 1970 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1971 cpu_set_prop_err(cpu, name, errp); 1972 error_append_hint(errp, "Current '%s' val: %s\n", name, 1973 object_property_get_str(obj, name, NULL)); 1974 return; 1975 } 1976 1977 cpu_option_add_user_setting(name, priv_version); 1978 cpu->env.priv_ver = priv_version; 1979 } 1980 1981 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1982 void *opaque, Error **errp) 1983 { 1984 RISCVCPU *cpu = RISCV_CPU(obj); 1985 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1986 1987 visit_type_str(v, name, (char **)&value, errp); 1988 } 1989 1990 static const PropertyInfo prop_priv_spec = { 1991 .name = "priv_spec", 1992 .get = prop_priv_spec_get, 1993 .set = prop_priv_spec_set, 1994 }; 1995 1996 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1997 void *opaque, Error **errp) 1998 { 1999 RISCVCPU *cpu = RISCV_CPU(obj); 2000 g_autofree char *value = NULL; 2001 2002 visit_type_str(v, name, &value, errp); 2003 2004 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2005 error_setg(errp, "Unsupported vector spec version '%s'", value); 2006 return; 2007 } 2008 2009 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2010 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2011 } 2012 2013 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2014 void *opaque, Error **errp) 2015 { 2016 const char *value = VEXT_VER_1_00_0_STR; 2017 2018 visit_type_str(v, name, (char **)&value, errp); 2019 } 2020 2021 static const PropertyInfo prop_vext_spec = { 2022 .name = "vext_spec", 2023 .get = prop_vext_spec_get, 2024 .set = prop_vext_spec_set, 2025 }; 2026 2027 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2028 void *opaque, Error **errp) 2029 { 2030 RISCVCPU *cpu = RISCV_CPU(obj); 2031 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2032 uint16_t value; 2033 2034 if (!visit_type_uint16(v, name, &value, errp)) { 2035 return; 2036 } 2037 2038 if (!is_power_of_2(value)) { 2039 error_setg(errp, "Vector extension VLEN must be power of 2"); 2040 return; 2041 } 2042 2043 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2044 cpu_set_prop_err(cpu, name, errp); 2045 error_append_hint(errp, "Current '%s' val: %u\n", 2046 name, cpu_vlen); 2047 return; 2048 } 2049 2050 cpu_option_add_user_setting(name, value); 2051 cpu->cfg.vlenb = value >> 3; 2052 } 2053 2054 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2055 void *opaque, Error **errp) 2056 { 2057 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2058 2059 visit_type_uint16(v, name, &value, errp); 2060 } 2061 2062 static const PropertyInfo prop_vlen = { 2063 .name = "vlen", 2064 .get = prop_vlen_get, 2065 .set = prop_vlen_set, 2066 }; 2067 2068 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2069 void *opaque, Error **errp) 2070 { 2071 RISCVCPU *cpu = RISCV_CPU(obj); 2072 uint16_t value; 2073 2074 if (!visit_type_uint16(v, name, &value, errp)) { 2075 return; 2076 } 2077 2078 if (!is_power_of_2(value)) { 2079 error_setg(errp, "Vector extension ELEN must be power of 2"); 2080 return; 2081 } 2082 2083 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2084 cpu_set_prop_err(cpu, name, errp); 2085 error_append_hint(errp, "Current '%s' val: %u\n", 2086 name, cpu->cfg.elen); 2087 return; 2088 } 2089 2090 cpu_option_add_user_setting(name, value); 2091 cpu->cfg.elen = value; 2092 } 2093 2094 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2095 void *opaque, Error **errp) 2096 { 2097 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2098 2099 visit_type_uint16(v, name, &value, errp); 2100 } 2101 2102 static const PropertyInfo prop_elen = { 2103 .name = "elen", 2104 .get = prop_elen_get, 2105 .set = prop_elen_set, 2106 }; 2107 2108 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2109 void *opaque, Error **errp) 2110 { 2111 RISCVCPU *cpu = RISCV_CPU(obj); 2112 uint16_t value; 2113 2114 if (!visit_type_uint16(v, name, &value, errp)) { 2115 return; 2116 } 2117 2118 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2119 cpu_set_prop_err(cpu, name, errp); 2120 error_append_hint(errp, "Current '%s' val: %u\n", 2121 name, cpu->cfg.cbom_blocksize); 2122 return; 2123 } 2124 2125 cpu_option_add_user_setting(name, value); 2126 cpu->cfg.cbom_blocksize = value; 2127 } 2128 2129 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2130 void *opaque, Error **errp) 2131 { 2132 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2133 2134 visit_type_uint16(v, name, &value, errp); 2135 } 2136 2137 static const PropertyInfo prop_cbom_blksize = { 2138 .name = "cbom_blocksize", 2139 .get = prop_cbom_blksize_get, 2140 .set = prop_cbom_blksize_set, 2141 }; 2142 2143 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2144 void *opaque, Error **errp) 2145 { 2146 RISCVCPU *cpu = RISCV_CPU(obj); 2147 uint16_t value; 2148 2149 if (!visit_type_uint16(v, name, &value, errp)) { 2150 return; 2151 } 2152 2153 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2154 cpu_set_prop_err(cpu, name, errp); 2155 error_append_hint(errp, "Current '%s' val: %u\n", 2156 name, cpu->cfg.cbop_blocksize); 2157 return; 2158 } 2159 2160 cpu_option_add_user_setting(name, value); 2161 cpu->cfg.cbop_blocksize = value; 2162 } 2163 2164 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2165 void *opaque, Error **errp) 2166 { 2167 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2168 2169 visit_type_uint16(v, name, &value, errp); 2170 } 2171 2172 static const PropertyInfo prop_cbop_blksize = { 2173 .name = "cbop_blocksize", 2174 .get = prop_cbop_blksize_get, 2175 .set = prop_cbop_blksize_set, 2176 }; 2177 2178 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2179 void *opaque, Error **errp) 2180 { 2181 RISCVCPU *cpu = RISCV_CPU(obj); 2182 uint16_t value; 2183 2184 if (!visit_type_uint16(v, name, &value, errp)) { 2185 return; 2186 } 2187 2188 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2189 cpu_set_prop_err(cpu, name, errp); 2190 error_append_hint(errp, "Current '%s' val: %u\n", 2191 name, cpu->cfg.cboz_blocksize); 2192 return; 2193 } 2194 2195 cpu_option_add_user_setting(name, value); 2196 cpu->cfg.cboz_blocksize = value; 2197 } 2198 2199 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2200 void *opaque, Error **errp) 2201 { 2202 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2203 2204 visit_type_uint16(v, name, &value, errp); 2205 } 2206 2207 static const PropertyInfo prop_cboz_blksize = { 2208 .name = "cboz_blocksize", 2209 .get = prop_cboz_blksize_get, 2210 .set = prop_cboz_blksize_set, 2211 }; 2212 2213 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2214 void *opaque, Error **errp) 2215 { 2216 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2217 RISCVCPU *cpu = RISCV_CPU(obj); 2218 uint32_t prev_val = cpu->cfg.mvendorid; 2219 uint32_t value; 2220 2221 if (!visit_type_uint32(v, name, &value, errp)) { 2222 return; 2223 } 2224 2225 if (!dynamic_cpu && prev_val != value) { 2226 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2227 object_get_typename(obj), prev_val); 2228 return; 2229 } 2230 2231 cpu->cfg.mvendorid = value; 2232 } 2233 2234 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2235 void *opaque, Error **errp) 2236 { 2237 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2238 2239 visit_type_uint32(v, name, &value, errp); 2240 } 2241 2242 static const PropertyInfo prop_mvendorid = { 2243 .name = "mvendorid", 2244 .get = prop_mvendorid_get, 2245 .set = prop_mvendorid_set, 2246 }; 2247 2248 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2249 void *opaque, Error **errp) 2250 { 2251 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2252 RISCVCPU *cpu = RISCV_CPU(obj); 2253 uint64_t prev_val = cpu->cfg.mimpid; 2254 uint64_t value; 2255 2256 if (!visit_type_uint64(v, name, &value, errp)) { 2257 return; 2258 } 2259 2260 if (!dynamic_cpu && prev_val != value) { 2261 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2262 object_get_typename(obj), prev_val); 2263 return; 2264 } 2265 2266 cpu->cfg.mimpid = value; 2267 } 2268 2269 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2270 void *opaque, Error **errp) 2271 { 2272 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2273 2274 visit_type_uint64(v, name, &value, errp); 2275 } 2276 2277 static const PropertyInfo prop_mimpid = { 2278 .name = "mimpid", 2279 .get = prop_mimpid_get, 2280 .set = prop_mimpid_set, 2281 }; 2282 2283 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2284 void *opaque, Error **errp) 2285 { 2286 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2287 RISCVCPU *cpu = RISCV_CPU(obj); 2288 uint64_t prev_val = cpu->cfg.marchid; 2289 uint64_t value, invalid_val; 2290 uint32_t mxlen = 0; 2291 2292 if (!visit_type_uint64(v, name, &value, errp)) { 2293 return; 2294 } 2295 2296 if (!dynamic_cpu && prev_val != value) { 2297 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2298 object_get_typename(obj), prev_val); 2299 return; 2300 } 2301 2302 switch (riscv_cpu_mxl(&cpu->env)) { 2303 case MXL_RV32: 2304 mxlen = 32; 2305 break; 2306 case MXL_RV64: 2307 case MXL_RV128: 2308 mxlen = 64; 2309 break; 2310 default: 2311 g_assert_not_reached(); 2312 } 2313 2314 invalid_val = 1LL << (mxlen - 1); 2315 2316 if (value == invalid_val) { 2317 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2318 "and the remaining bits zero", mxlen); 2319 return; 2320 } 2321 2322 cpu->cfg.marchid = value; 2323 } 2324 2325 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2326 void *opaque, Error **errp) 2327 { 2328 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2329 2330 visit_type_uint64(v, name, &value, errp); 2331 } 2332 2333 static const PropertyInfo prop_marchid = { 2334 .name = "marchid", 2335 .get = prop_marchid_get, 2336 .set = prop_marchid_set, 2337 }; 2338 2339 /* 2340 * RVA22U64 defines some 'named features' that are cache 2341 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2342 * and Zicclsm. They are always implemented in TCG and 2343 * doesn't need to be manually enabled by the profile. 2344 */ 2345 static RISCVCPUProfile RVA22U64 = { 2346 .u_parent = NULL, 2347 .s_parent = NULL, 2348 .name = "rva22u64", 2349 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2350 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2351 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2352 .ext_offsets = { 2353 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2354 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2355 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2356 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2357 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2358 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2359 2360 /* mandatory named features for this profile */ 2361 CPU_CFG_OFFSET(ext_zic64b), 2362 2363 RISCV_PROFILE_EXT_LIST_END 2364 } 2365 }; 2366 2367 /* 2368 * As with RVA22U64, RVA22S64 also defines 'named features'. 2369 * 2370 * Cache related features that we consider enabled since we don't 2371 * implement cache: Ssccptr 2372 * 2373 * Other named features that we already implement: Sstvecd, Sstvala, 2374 * Sscounterenw 2375 * 2376 * The remaining features/extensions comes from RVA22U64. 2377 */ 2378 static RISCVCPUProfile RVA22S64 = { 2379 .u_parent = &RVA22U64, 2380 .s_parent = NULL, 2381 .name = "rva22s64", 2382 .misa_ext = RVS, 2383 .priv_spec = PRIV_VERSION_1_12_0, 2384 .satp_mode = VM_1_10_SV39, 2385 .ext_offsets = { 2386 /* rva22s64 exts */ 2387 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2388 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2389 2390 RISCV_PROFILE_EXT_LIST_END 2391 } 2392 }; 2393 2394 /* 2395 * All mandatory extensions from RVA22U64 are present 2396 * in RVA23U64 so set RVA22 as a parent. We need to 2397 * declare just the newly added mandatory extensions. 2398 */ 2399 static RISCVCPUProfile RVA23U64 = { 2400 .u_parent = &RVA22U64, 2401 .s_parent = NULL, 2402 .name = "rva23u64", 2403 .misa_ext = RVV, 2404 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2405 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2406 .ext_offsets = { 2407 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2408 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2409 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2410 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2411 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2412 CPU_CFG_OFFSET(ext_supm), 2413 2414 RISCV_PROFILE_EXT_LIST_END 2415 } 2416 }; 2417 2418 /* 2419 * As with RVA23U64, RVA23S64 also defines 'named features'. 2420 * 2421 * Cache related features that we consider enabled since we don't 2422 * implement cache: Ssccptr 2423 * 2424 * Other named features that we already implement: Sstvecd, Sstvala, 2425 * Sscounterenw, Ssu64xl 2426 * 2427 * The remaining features/extensions comes from RVA23S64. 2428 */ 2429 static RISCVCPUProfile RVA23S64 = { 2430 .u_parent = &RVA23U64, 2431 .s_parent = &RVA22S64, 2432 .name = "rva23s64", 2433 .misa_ext = RVS, 2434 .priv_spec = PRIV_VERSION_1_13_0, 2435 .satp_mode = VM_1_10_SV39, 2436 .ext_offsets = { 2437 /* New in RVA23S64 */ 2438 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2439 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2440 2441 /* Named features: Sha */ 2442 CPU_CFG_OFFSET(ext_sha), 2443 2444 RISCV_PROFILE_EXT_LIST_END 2445 } 2446 }; 2447 2448 RISCVCPUProfile *riscv_profiles[] = { 2449 &RVA22U64, 2450 &RVA22S64, 2451 &RVA23U64, 2452 &RVA23S64, 2453 NULL, 2454 }; 2455 2456 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2457 .is_misa = true, 2458 .ext = RVA, 2459 .implied_multi_exts = { 2460 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2461 2462 RISCV_IMPLIED_EXTS_RULE_END 2463 }, 2464 }; 2465 2466 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2467 .is_misa = true, 2468 .ext = RVD, 2469 .implied_misa_exts = RVF, 2470 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2471 }; 2472 2473 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2474 .is_misa = true, 2475 .ext = RVF, 2476 .implied_multi_exts = { 2477 CPU_CFG_OFFSET(ext_zicsr), 2478 2479 RISCV_IMPLIED_EXTS_RULE_END 2480 }, 2481 }; 2482 2483 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2484 .is_misa = true, 2485 .ext = RVM, 2486 .implied_multi_exts = { 2487 CPU_CFG_OFFSET(ext_zmmul), 2488 2489 RISCV_IMPLIED_EXTS_RULE_END 2490 }, 2491 }; 2492 2493 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2494 .is_misa = true, 2495 .ext = RVV, 2496 .implied_multi_exts = { 2497 CPU_CFG_OFFSET(ext_zve64d), 2498 2499 RISCV_IMPLIED_EXTS_RULE_END 2500 }, 2501 }; 2502 2503 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2504 .ext = CPU_CFG_OFFSET(ext_zcb), 2505 .implied_multi_exts = { 2506 CPU_CFG_OFFSET(ext_zca), 2507 2508 RISCV_IMPLIED_EXTS_RULE_END 2509 }, 2510 }; 2511 2512 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2513 .ext = CPU_CFG_OFFSET(ext_zcd), 2514 .implied_misa_exts = RVD, 2515 .implied_multi_exts = { 2516 CPU_CFG_OFFSET(ext_zca), 2517 2518 RISCV_IMPLIED_EXTS_RULE_END 2519 }, 2520 }; 2521 2522 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2523 .ext = CPU_CFG_OFFSET(ext_zce), 2524 .implied_multi_exts = { 2525 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2526 CPU_CFG_OFFSET(ext_zcmt), 2527 2528 RISCV_IMPLIED_EXTS_RULE_END 2529 }, 2530 }; 2531 2532 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2533 .ext = CPU_CFG_OFFSET(ext_zcf), 2534 .implied_misa_exts = RVF, 2535 .implied_multi_exts = { 2536 CPU_CFG_OFFSET(ext_zca), 2537 2538 RISCV_IMPLIED_EXTS_RULE_END 2539 }, 2540 }; 2541 2542 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2543 .ext = CPU_CFG_OFFSET(ext_zcmp), 2544 .implied_multi_exts = { 2545 CPU_CFG_OFFSET(ext_zca), 2546 2547 RISCV_IMPLIED_EXTS_RULE_END 2548 }, 2549 }; 2550 2551 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2552 .ext = CPU_CFG_OFFSET(ext_zcmt), 2553 .implied_multi_exts = { 2554 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2555 2556 RISCV_IMPLIED_EXTS_RULE_END 2557 }, 2558 }; 2559 2560 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2561 .ext = CPU_CFG_OFFSET(ext_zdinx), 2562 .implied_multi_exts = { 2563 CPU_CFG_OFFSET(ext_zfinx), 2564 2565 RISCV_IMPLIED_EXTS_RULE_END 2566 }, 2567 }; 2568 2569 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2570 .ext = CPU_CFG_OFFSET(ext_zfa), 2571 .implied_misa_exts = RVF, 2572 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2573 }; 2574 2575 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2576 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2577 .implied_misa_exts = RVF, 2578 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2579 }; 2580 2581 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2582 .ext = CPU_CFG_OFFSET(ext_zfh), 2583 .implied_multi_exts = { 2584 CPU_CFG_OFFSET(ext_zfhmin), 2585 2586 RISCV_IMPLIED_EXTS_RULE_END 2587 }, 2588 }; 2589 2590 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2591 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2592 .implied_misa_exts = RVF, 2593 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2594 }; 2595 2596 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2597 .ext = CPU_CFG_OFFSET(ext_zfinx), 2598 .implied_multi_exts = { 2599 CPU_CFG_OFFSET(ext_zicsr), 2600 2601 RISCV_IMPLIED_EXTS_RULE_END 2602 }, 2603 }; 2604 2605 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2606 .ext = CPU_CFG_OFFSET(ext_zhinx), 2607 .implied_multi_exts = { 2608 CPU_CFG_OFFSET(ext_zhinxmin), 2609 2610 RISCV_IMPLIED_EXTS_RULE_END 2611 }, 2612 }; 2613 2614 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2615 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2616 .implied_multi_exts = { 2617 CPU_CFG_OFFSET(ext_zfinx), 2618 2619 RISCV_IMPLIED_EXTS_RULE_END 2620 }, 2621 }; 2622 2623 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2624 .ext = CPU_CFG_OFFSET(ext_zicntr), 2625 .implied_multi_exts = { 2626 CPU_CFG_OFFSET(ext_zicsr), 2627 2628 RISCV_IMPLIED_EXTS_RULE_END 2629 }, 2630 }; 2631 2632 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2633 .ext = CPU_CFG_OFFSET(ext_zihpm), 2634 .implied_multi_exts = { 2635 CPU_CFG_OFFSET(ext_zicsr), 2636 2637 RISCV_IMPLIED_EXTS_RULE_END 2638 }, 2639 }; 2640 2641 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2642 .ext = CPU_CFG_OFFSET(ext_zk), 2643 .implied_multi_exts = { 2644 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2645 CPU_CFG_OFFSET(ext_zkt), 2646 2647 RISCV_IMPLIED_EXTS_RULE_END 2648 }, 2649 }; 2650 2651 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2652 .ext = CPU_CFG_OFFSET(ext_zkn), 2653 .implied_multi_exts = { 2654 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2655 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2656 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2657 2658 RISCV_IMPLIED_EXTS_RULE_END 2659 }, 2660 }; 2661 2662 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2663 .ext = CPU_CFG_OFFSET(ext_zks), 2664 .implied_multi_exts = { 2665 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2666 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2667 CPU_CFG_OFFSET(ext_zksh), 2668 2669 RISCV_IMPLIED_EXTS_RULE_END 2670 }, 2671 }; 2672 2673 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2674 .ext = CPU_CFG_OFFSET(ext_zvbb), 2675 .implied_multi_exts = { 2676 CPU_CFG_OFFSET(ext_zvkb), 2677 2678 RISCV_IMPLIED_EXTS_RULE_END 2679 }, 2680 }; 2681 2682 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2683 .ext = CPU_CFG_OFFSET(ext_zve32f), 2684 .implied_misa_exts = RVF, 2685 .implied_multi_exts = { 2686 CPU_CFG_OFFSET(ext_zve32x), 2687 2688 RISCV_IMPLIED_EXTS_RULE_END 2689 }, 2690 }; 2691 2692 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2693 .ext = CPU_CFG_OFFSET(ext_zve32x), 2694 .implied_multi_exts = { 2695 CPU_CFG_OFFSET(ext_zicsr), 2696 2697 RISCV_IMPLIED_EXTS_RULE_END 2698 }, 2699 }; 2700 2701 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2702 .ext = CPU_CFG_OFFSET(ext_zve64d), 2703 .implied_misa_exts = RVD, 2704 .implied_multi_exts = { 2705 CPU_CFG_OFFSET(ext_zve64f), 2706 2707 RISCV_IMPLIED_EXTS_RULE_END 2708 }, 2709 }; 2710 2711 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2712 .ext = CPU_CFG_OFFSET(ext_zve64f), 2713 .implied_misa_exts = RVF, 2714 .implied_multi_exts = { 2715 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2716 2717 RISCV_IMPLIED_EXTS_RULE_END 2718 }, 2719 }; 2720 2721 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2722 .ext = CPU_CFG_OFFSET(ext_zve64x), 2723 .implied_multi_exts = { 2724 CPU_CFG_OFFSET(ext_zve32x), 2725 2726 RISCV_IMPLIED_EXTS_RULE_END 2727 }, 2728 }; 2729 2730 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2731 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2732 .implied_multi_exts = { 2733 CPU_CFG_OFFSET(ext_zve32f), 2734 2735 RISCV_IMPLIED_EXTS_RULE_END 2736 }, 2737 }; 2738 2739 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2740 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2741 .implied_multi_exts = { 2742 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2743 2744 RISCV_IMPLIED_EXTS_RULE_END 2745 }, 2746 }; 2747 2748 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2749 .ext = CPU_CFG_OFFSET(ext_zvfh), 2750 .implied_multi_exts = { 2751 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2752 2753 RISCV_IMPLIED_EXTS_RULE_END 2754 }, 2755 }; 2756 2757 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2758 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2759 .implied_multi_exts = { 2760 CPU_CFG_OFFSET(ext_zve32f), 2761 2762 RISCV_IMPLIED_EXTS_RULE_END 2763 }, 2764 }; 2765 2766 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2767 .ext = CPU_CFG_OFFSET(ext_zvkn), 2768 .implied_multi_exts = { 2769 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2770 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2771 2772 RISCV_IMPLIED_EXTS_RULE_END 2773 }, 2774 }; 2775 2776 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2777 .ext = CPU_CFG_OFFSET(ext_zvknc), 2778 .implied_multi_exts = { 2779 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2780 2781 RISCV_IMPLIED_EXTS_RULE_END 2782 }, 2783 }; 2784 2785 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2786 .ext = CPU_CFG_OFFSET(ext_zvkng), 2787 .implied_multi_exts = { 2788 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2789 2790 RISCV_IMPLIED_EXTS_RULE_END 2791 }, 2792 }; 2793 2794 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2795 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2796 .implied_multi_exts = { 2797 CPU_CFG_OFFSET(ext_zve64x), 2798 2799 RISCV_IMPLIED_EXTS_RULE_END 2800 }, 2801 }; 2802 2803 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2804 .ext = CPU_CFG_OFFSET(ext_zvks), 2805 .implied_multi_exts = { 2806 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2807 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2808 2809 RISCV_IMPLIED_EXTS_RULE_END 2810 }, 2811 }; 2812 2813 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2814 .ext = CPU_CFG_OFFSET(ext_zvksc), 2815 .implied_multi_exts = { 2816 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2817 2818 RISCV_IMPLIED_EXTS_RULE_END 2819 }, 2820 }; 2821 2822 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2823 .ext = CPU_CFG_OFFSET(ext_zvksg), 2824 .implied_multi_exts = { 2825 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2826 2827 RISCV_IMPLIED_EXTS_RULE_END 2828 }, 2829 }; 2830 2831 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2832 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2833 .implied_multi_exts = { 2834 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2835 CPU_CFG_OFFSET(ext_smcdeleg), 2836 2837 RISCV_IMPLIED_EXTS_RULE_END 2838 }, 2839 }; 2840 2841 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2842 .ext = CPU_CFG_OFFSET(ext_supm), 2843 .implied_multi_exts = { 2844 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2845 2846 RISCV_IMPLIED_EXTS_RULE_END 2847 }, 2848 }; 2849 2850 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2851 .ext = CPU_CFG_OFFSET(ext_sspm), 2852 .implied_multi_exts = { 2853 CPU_CFG_OFFSET(ext_smnpm), 2854 2855 RISCV_IMPLIED_EXTS_RULE_END 2856 }, 2857 }; 2858 2859 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2860 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2861 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2862 }; 2863 2864 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2865 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2866 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2867 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2868 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2869 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2870 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2871 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2872 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2873 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2874 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2875 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2876 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2877 &SUPM_IMPLIED, &SSPM_IMPLIED, 2878 NULL 2879 }; 2880 2881 static const Property riscv_cpu_properties[] = { 2882 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2883 2884 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2885 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2886 2887 {.name = "mmu", .info = &prop_mmu}, 2888 {.name = "pmp", .info = &prop_pmp}, 2889 2890 {.name = "priv_spec", .info = &prop_priv_spec}, 2891 {.name = "vext_spec", .info = &prop_vext_spec}, 2892 2893 {.name = "vlen", .info = &prop_vlen}, 2894 {.name = "elen", .info = &prop_elen}, 2895 2896 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2897 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2898 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2899 2900 {.name = "mvendorid", .info = &prop_mvendorid}, 2901 {.name = "mimpid", .info = &prop_mimpid}, 2902 {.name = "marchid", .info = &prop_marchid}, 2903 2904 #ifndef CONFIG_USER_ONLY 2905 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2906 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2907 DEFAULT_RNMI_IRQVEC), 2908 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2909 DEFAULT_RNMI_EXCPVEC), 2910 #endif 2911 2912 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2913 2914 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2915 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2916 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2917 2918 /* 2919 * write_misa() is marked as experimental for now so mark 2920 * it with -x and default to 'false'. 2921 */ 2922 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2923 }; 2924 2925 #if defined(TARGET_RISCV64) 2926 static void rva22u64_profile_cpu_init(Object *obj) 2927 { 2928 rv64i_bare_cpu_init(obj); 2929 2930 RVA22U64.enabled = true; 2931 } 2932 2933 static void rva22s64_profile_cpu_init(Object *obj) 2934 { 2935 rv64i_bare_cpu_init(obj); 2936 2937 RVA22S64.enabled = true; 2938 } 2939 2940 static void rva23u64_profile_cpu_init(Object *obj) 2941 { 2942 rv64i_bare_cpu_init(obj); 2943 2944 RVA23U64.enabled = true; 2945 } 2946 2947 static void rva23s64_profile_cpu_init(Object *obj) 2948 { 2949 rv64i_bare_cpu_init(obj); 2950 2951 RVA23S64.enabled = true; 2952 } 2953 #endif 2954 2955 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2956 { 2957 RISCVCPU *cpu = RISCV_CPU(cs); 2958 CPURISCVState *env = &cpu->env; 2959 2960 switch (riscv_cpu_mxl(env)) { 2961 case MXL_RV32: 2962 return "riscv:rv32"; 2963 case MXL_RV64: 2964 case MXL_RV128: 2965 return "riscv:rv64"; 2966 default: 2967 g_assert_not_reached(); 2968 } 2969 } 2970 2971 #ifndef CONFIG_USER_ONLY 2972 static int64_t riscv_get_arch_id(CPUState *cs) 2973 { 2974 RISCVCPU *cpu = RISCV_CPU(cs); 2975 2976 return cpu->env.mhartid; 2977 } 2978 2979 #include "hw/core/sysemu-cpu-ops.h" 2980 2981 static const struct SysemuCPUOps riscv_sysemu_ops = { 2982 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2983 .write_elf64_note = riscv_cpu_write_elf64_note, 2984 .write_elf32_note = riscv_cpu_write_elf32_note, 2985 .legacy_vmsd = &vmstate_riscv_cpu, 2986 }; 2987 #endif 2988 2989 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2990 { 2991 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2992 CPUClass *cc = CPU_CLASS(c); 2993 DeviceClass *dc = DEVICE_CLASS(c); 2994 ResettableClass *rc = RESETTABLE_CLASS(c); 2995 2996 device_class_set_parent_realize(dc, riscv_cpu_realize, 2997 &mcc->parent_realize); 2998 2999 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 3000 &mcc->parent_phases); 3001 3002 cc->class_by_name = riscv_cpu_class_by_name; 3003 cc->has_work = riscv_cpu_has_work; 3004 cc->mmu_index = riscv_cpu_mmu_index; 3005 cc->dump_state = riscv_cpu_dump_state; 3006 cc->set_pc = riscv_cpu_set_pc; 3007 cc->get_pc = riscv_cpu_get_pc; 3008 cc->gdb_read_register = riscv_cpu_gdb_read_register; 3009 cc->gdb_write_register = riscv_cpu_gdb_write_register; 3010 cc->gdb_stop_before_watchpoint = true; 3011 cc->disas_set_info = riscv_cpu_disas_set_info; 3012 #ifndef CONFIG_USER_ONLY 3013 cc->sysemu_ops = &riscv_sysemu_ops; 3014 cc->get_arch_id = riscv_get_arch_id; 3015 #endif 3016 cc->gdb_arch_name = riscv_gdb_arch_name; 3017 3018 device_class_set_props(dc, riscv_cpu_properties); 3019 } 3020 3021 static void riscv_cpu_class_init(ObjectClass *c, void *data) 3022 { 3023 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3024 3025 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 3026 riscv_cpu_validate_misa_mxl(mcc); 3027 } 3028 3029 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 3030 int max_str_len) 3031 { 3032 const RISCVIsaExtData *edata; 3033 char *old = *isa_str; 3034 char *new = *isa_str; 3035 3036 for (edata = isa_edata_arr; edata && edata->name; edata++) { 3037 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3038 new = g_strconcat(old, "_", edata->name, NULL); 3039 g_free(old); 3040 old = new; 3041 } 3042 } 3043 3044 *isa_str = new; 3045 } 3046 3047 char *riscv_isa_string(RISCVCPU *cpu) 3048 { 3049 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3050 int i; 3051 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 3052 char *isa_str = g_new(char, maxlen); 3053 int xlen = riscv_cpu_max_xlen(mcc); 3054 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 3055 3056 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3057 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3058 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 3059 } 3060 } 3061 *p = '\0'; 3062 if (!cpu->cfg.short_isa_string) { 3063 riscv_isa_string_ext(cpu, &isa_str, maxlen); 3064 } 3065 return isa_str; 3066 } 3067 3068 #ifndef CONFIG_USER_ONLY 3069 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3070 { 3071 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3072 char **extensions = g_new(char *, maxlen); 3073 3074 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3075 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3076 extensions[*count] = g_new(char, 2); 3077 snprintf(extensions[*count], 2, "%c", 3078 qemu_tolower(riscv_single_letter_exts[i])); 3079 (*count)++; 3080 } 3081 } 3082 3083 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3084 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3085 extensions[*count] = g_strdup(edata->name); 3086 (*count)++; 3087 } 3088 } 3089 3090 return extensions; 3091 } 3092 3093 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3094 { 3095 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3096 const size_t maxlen = sizeof("rv128i"); 3097 g_autofree char *isa_base = g_new(char, maxlen); 3098 g_autofree char *riscv_isa; 3099 char **isa_extensions; 3100 int count = 0; 3101 int xlen = riscv_cpu_max_xlen(mcc); 3102 3103 riscv_isa = riscv_isa_string(cpu); 3104 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3105 3106 snprintf(isa_base, maxlen, "rv%di", xlen); 3107 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3108 3109 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3110 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3111 isa_extensions, count); 3112 3113 for (int i = 0; i < count; i++) { 3114 g_free(isa_extensions[i]); 3115 } 3116 3117 g_free(isa_extensions); 3118 } 3119 #endif 3120 3121 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3122 { \ 3123 .name = (type_name), \ 3124 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3125 .instance_init = (initfn), \ 3126 .class_init = riscv_cpu_class_init, \ 3127 .class_data = (void *)(misa_mxl_max) \ 3128 } 3129 3130 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3131 { \ 3132 .name = (type_name), \ 3133 .parent = TYPE_RISCV_VENDOR_CPU, \ 3134 .instance_init = (initfn), \ 3135 .class_init = riscv_cpu_class_init, \ 3136 .class_data = (void *)(misa_mxl_max) \ 3137 } 3138 3139 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3140 { \ 3141 .name = (type_name), \ 3142 .parent = TYPE_RISCV_BARE_CPU, \ 3143 .instance_init = (initfn), \ 3144 .class_init = riscv_cpu_class_init, \ 3145 .class_data = (void *)(misa_mxl_max) \ 3146 } 3147 3148 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3149 { \ 3150 .name = (type_name), \ 3151 .parent = TYPE_RISCV_BARE_CPU, \ 3152 .instance_init = (initfn), \ 3153 .class_init = riscv_cpu_class_init, \ 3154 .class_data = (void *)(misa_mxl_max) \ 3155 } 3156 3157 static const TypeInfo riscv_cpu_type_infos[] = { 3158 { 3159 .name = TYPE_RISCV_CPU, 3160 .parent = TYPE_CPU, 3161 .instance_size = sizeof(RISCVCPU), 3162 .instance_align = __alignof(RISCVCPU), 3163 .instance_init = riscv_cpu_init, 3164 .instance_post_init = riscv_cpu_post_init, 3165 .abstract = true, 3166 .class_size = sizeof(RISCVCPUClass), 3167 .class_init = riscv_cpu_common_class_init, 3168 }, 3169 { 3170 .name = TYPE_RISCV_DYNAMIC_CPU, 3171 .parent = TYPE_RISCV_CPU, 3172 .abstract = true, 3173 }, 3174 { 3175 .name = TYPE_RISCV_VENDOR_CPU, 3176 .parent = TYPE_RISCV_CPU, 3177 .abstract = true, 3178 }, 3179 { 3180 .name = TYPE_RISCV_BARE_CPU, 3181 .parent = TYPE_RISCV_CPU, 3182 .instance_init = riscv_bare_cpu_init, 3183 .abstract = true, 3184 }, 3185 #if defined(TARGET_RISCV32) 3186 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3187 #elif defined(TARGET_RISCV64) 3188 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3189 #endif 3190 3191 #if defined(TARGET_RISCV32) || \ 3192 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3193 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3194 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3195 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3196 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3197 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3198 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3199 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3200 #endif 3201 3202 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3203 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3204 #endif 3205 3206 #if defined(TARGET_RISCV64) 3207 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3208 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3209 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3210 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3211 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3212 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3213 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3214 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3215 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3216 #ifdef CONFIG_TCG 3217 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3218 #endif /* CONFIG_TCG */ 3219 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3220 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3221 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3222 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3223 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), 3224 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), 3225 #endif /* TARGET_RISCV64 */ 3226 }; 3227 3228 DEFINE_TYPES(riscv_cpu_type_infos) 3229