1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "system/device_tree.h" 36 #include "system/kvm.h" 37 #include "system/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 188 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 194 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 195 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 196 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 197 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 198 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 199 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 200 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 201 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 202 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 203 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 204 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 205 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 206 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 207 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 208 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 209 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 210 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 211 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 212 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 213 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 214 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 217 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 218 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 219 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 220 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 221 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 222 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 223 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 224 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 225 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 226 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 227 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 228 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 229 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 230 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 231 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 232 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 233 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 234 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 235 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 236 237 { }, 238 }; 239 240 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 241 { 242 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 243 244 return *ext_enabled; 245 } 246 247 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 248 { 249 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 250 251 *ext_enabled = en; 252 } 253 254 bool riscv_cpu_is_vendor(Object *cpu_obj) 255 { 256 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 257 } 258 259 const char * const riscv_int_regnames[] = { 260 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 261 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 262 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 263 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 264 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 265 }; 266 267 const char * const riscv_int_regnamesh[] = { 268 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 269 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 270 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 271 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 272 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 273 "x30h/t5h", "x31h/t6h" 274 }; 275 276 const char * const riscv_fpr_regnames[] = { 277 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 278 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 279 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 280 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 281 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 282 "f30/ft10", "f31/ft11" 283 }; 284 285 const char * const riscv_rvv_regnames[] = { 286 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 287 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 288 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 289 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 290 "v28", "v29", "v30", "v31" 291 }; 292 293 static const char * const riscv_excp_names[] = { 294 "misaligned_fetch", 295 "fault_fetch", 296 "illegal_instruction", 297 "breakpoint", 298 "misaligned_load", 299 "fault_load", 300 "misaligned_store", 301 "fault_store", 302 "user_ecall", 303 "supervisor_ecall", 304 "hypervisor_ecall", 305 "machine_ecall", 306 "exec_page_fault", 307 "load_page_fault", 308 "reserved", 309 "store_page_fault", 310 "double_trap", 311 "reserved", 312 "reserved", 313 "reserved", 314 "guest_exec_page_fault", 315 "guest_load_page_fault", 316 "reserved", 317 "guest_store_page_fault", 318 }; 319 320 static const char * const riscv_intr_names[] = { 321 "u_software", 322 "s_software", 323 "vs_software", 324 "m_software", 325 "u_timer", 326 "s_timer", 327 "vs_timer", 328 "m_timer", 329 "u_external", 330 "s_external", 331 "vs_external", 332 "m_external", 333 "reserved", 334 "reserved", 335 "reserved", 336 "reserved" 337 }; 338 339 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 340 { 341 if (async) { 342 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 343 riscv_intr_names[cause] : "(unknown)"; 344 } else { 345 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 346 riscv_excp_names[cause] : "(unknown)"; 347 } 348 } 349 350 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 351 { 352 env->misa_ext_mask = env->misa_ext = ext; 353 } 354 355 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 356 { 357 return 16 << mcc->misa_mxl_max; 358 } 359 360 #ifndef CONFIG_USER_ONLY 361 static uint8_t satp_mode_from_str(const char *satp_mode_str) 362 { 363 if (!strncmp(satp_mode_str, "mbare", 5)) { 364 return VM_1_10_MBARE; 365 } 366 367 if (!strncmp(satp_mode_str, "sv32", 4)) { 368 return VM_1_10_SV32; 369 } 370 371 if (!strncmp(satp_mode_str, "sv39", 4)) { 372 return VM_1_10_SV39; 373 } 374 375 if (!strncmp(satp_mode_str, "sv48", 4)) { 376 return VM_1_10_SV48; 377 } 378 379 if (!strncmp(satp_mode_str, "sv57", 4)) { 380 return VM_1_10_SV57; 381 } 382 383 if (!strncmp(satp_mode_str, "sv64", 4)) { 384 return VM_1_10_SV64; 385 } 386 387 g_assert_not_reached(); 388 } 389 390 uint8_t satp_mode_max_from_map(uint32_t map) 391 { 392 /* 393 * 'map = 0' will make us return (31 - 32), which C will 394 * happily overflow to UINT_MAX. There's no good result to 395 * return if 'map = 0' (e.g. returning 0 will be ambiguous 396 * with the result for 'map = 1'). 397 * 398 * Assert out if map = 0. Callers will have to deal with 399 * it outside of this function. 400 */ 401 g_assert(map > 0); 402 403 /* map here has at least one bit set, so no problem with clz */ 404 return 31 - __builtin_clz(map); 405 } 406 407 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 408 { 409 if (is_32_bit) { 410 switch (satp_mode) { 411 case VM_1_10_SV32: 412 return "sv32"; 413 case VM_1_10_MBARE: 414 return "none"; 415 } 416 } else { 417 switch (satp_mode) { 418 case VM_1_10_SV64: 419 return "sv64"; 420 case VM_1_10_SV57: 421 return "sv57"; 422 case VM_1_10_SV48: 423 return "sv48"; 424 case VM_1_10_SV39: 425 return "sv39"; 426 case VM_1_10_MBARE: 427 return "none"; 428 } 429 } 430 431 g_assert_not_reached(); 432 } 433 434 static void set_satp_mode_max_supported(RISCVCPU *cpu, 435 uint8_t satp_mode) 436 { 437 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 438 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 439 440 for (int i = 0; i <= satp_mode; ++i) { 441 if (valid_vm[i]) { 442 cpu->cfg.satp_mode.supported |= (1 << i); 443 } 444 } 445 } 446 447 /* Set the satp mode to the max supported */ 448 static void set_satp_mode_default_map(RISCVCPU *cpu) 449 { 450 /* 451 * Bare CPUs do not default to the max available. 452 * Users must set a valid satp_mode in the command 453 * line. 454 */ 455 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 456 warn_report("No satp mode set. Defaulting to 'bare'"); 457 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 458 return; 459 } 460 461 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 462 } 463 #endif 464 465 static void riscv_max_cpu_init(Object *obj) 466 { 467 RISCVCPU *cpu = RISCV_CPU(obj); 468 CPURISCVState *env = &cpu->env; 469 470 cpu->cfg.mmu = true; 471 cpu->cfg.pmp = true; 472 473 env->priv_ver = PRIV_VERSION_LATEST; 474 #ifndef CONFIG_USER_ONLY 475 set_satp_mode_max_supported(RISCV_CPU(obj), 476 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 477 VM_1_10_SV32 : VM_1_10_SV57); 478 #endif 479 } 480 481 #if defined(TARGET_RISCV64) 482 static void rv64_base_cpu_init(Object *obj) 483 { 484 RISCVCPU *cpu = RISCV_CPU(obj); 485 CPURISCVState *env = &cpu->env; 486 487 cpu->cfg.mmu = true; 488 cpu->cfg.pmp = true; 489 490 /* Set latest version of privileged specification */ 491 env->priv_ver = PRIV_VERSION_LATEST; 492 #ifndef CONFIG_USER_ONLY 493 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 494 #endif 495 } 496 497 static void rv64_sifive_u_cpu_init(Object *obj) 498 { 499 RISCVCPU *cpu = RISCV_CPU(obj); 500 CPURISCVState *env = &cpu->env; 501 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 502 env->priv_ver = PRIV_VERSION_1_10_0; 503 #ifndef CONFIG_USER_ONLY 504 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 505 #endif 506 507 /* inherited from parent obj via riscv_cpu_init() */ 508 cpu->cfg.ext_zifencei = true; 509 cpu->cfg.ext_zicsr = true; 510 cpu->cfg.mmu = true; 511 cpu->cfg.pmp = true; 512 } 513 514 static void rv64_sifive_e_cpu_init(Object *obj) 515 { 516 CPURISCVState *env = &RISCV_CPU(obj)->env; 517 RISCVCPU *cpu = RISCV_CPU(obj); 518 519 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 520 env->priv_ver = PRIV_VERSION_1_10_0; 521 #ifndef CONFIG_USER_ONLY 522 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 523 #endif 524 525 /* inherited from parent obj via riscv_cpu_init() */ 526 cpu->cfg.ext_zifencei = true; 527 cpu->cfg.ext_zicsr = true; 528 cpu->cfg.pmp = true; 529 } 530 531 static void rv64_thead_c906_cpu_init(Object *obj) 532 { 533 CPURISCVState *env = &RISCV_CPU(obj)->env; 534 RISCVCPU *cpu = RISCV_CPU(obj); 535 536 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 537 env->priv_ver = PRIV_VERSION_1_11_0; 538 539 cpu->cfg.ext_zfa = true; 540 cpu->cfg.ext_zfh = true; 541 cpu->cfg.mmu = true; 542 cpu->cfg.ext_xtheadba = true; 543 cpu->cfg.ext_xtheadbb = true; 544 cpu->cfg.ext_xtheadbs = true; 545 cpu->cfg.ext_xtheadcmo = true; 546 cpu->cfg.ext_xtheadcondmov = true; 547 cpu->cfg.ext_xtheadfmemidx = true; 548 cpu->cfg.ext_xtheadmac = true; 549 cpu->cfg.ext_xtheadmemidx = true; 550 cpu->cfg.ext_xtheadmempair = true; 551 cpu->cfg.ext_xtheadsync = true; 552 553 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 554 #ifndef CONFIG_USER_ONLY 555 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 556 th_register_custom_csrs(cpu); 557 #endif 558 559 /* inherited from parent obj via riscv_cpu_init() */ 560 cpu->cfg.pmp = true; 561 } 562 563 static void rv64_veyron_v1_cpu_init(Object *obj) 564 { 565 CPURISCVState *env = &RISCV_CPU(obj)->env; 566 RISCVCPU *cpu = RISCV_CPU(obj); 567 568 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 569 env->priv_ver = PRIV_VERSION_1_12_0; 570 571 /* Enable ISA extensions */ 572 cpu->cfg.mmu = true; 573 cpu->cfg.ext_zifencei = true; 574 cpu->cfg.ext_zicsr = true; 575 cpu->cfg.pmp = true; 576 cpu->cfg.ext_zicbom = true; 577 cpu->cfg.cbom_blocksize = 64; 578 cpu->cfg.cboz_blocksize = 64; 579 cpu->cfg.ext_zicboz = true; 580 cpu->cfg.ext_smaia = true; 581 cpu->cfg.ext_ssaia = true; 582 cpu->cfg.ext_sscofpmf = true; 583 cpu->cfg.ext_sstc = true; 584 cpu->cfg.ext_svinval = true; 585 cpu->cfg.ext_svnapot = true; 586 cpu->cfg.ext_svpbmt = true; 587 cpu->cfg.ext_smstateen = true; 588 cpu->cfg.ext_zba = true; 589 cpu->cfg.ext_zbb = true; 590 cpu->cfg.ext_zbc = true; 591 cpu->cfg.ext_zbs = true; 592 cpu->cfg.ext_XVentanaCondOps = true; 593 594 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 595 cpu->cfg.marchid = VEYRON_V1_MARCHID; 596 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 597 598 #ifndef CONFIG_USER_ONLY 599 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 600 #endif 601 } 602 603 /* Tenstorrent Ascalon */ 604 static void rv64_tt_ascalon_cpu_init(Object *obj) 605 { 606 CPURISCVState *env = &RISCV_CPU(obj)->env; 607 RISCVCPU *cpu = RISCV_CPU(obj); 608 609 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 610 env->priv_ver = PRIV_VERSION_1_13_0; 611 612 /* Enable ISA extensions */ 613 cpu->cfg.mmu = true; 614 cpu->cfg.vlenb = 256 >> 3; 615 cpu->cfg.elen = 64; 616 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 617 cpu->cfg.rvv_ma_all_1s = true; 618 cpu->cfg.rvv_ta_all_1s = true; 619 cpu->cfg.misa_w = true; 620 cpu->cfg.pmp = true; 621 cpu->cfg.cbom_blocksize = 64; 622 cpu->cfg.cbop_blocksize = 64; 623 cpu->cfg.cboz_blocksize = 64; 624 cpu->cfg.ext_zic64b = true; 625 cpu->cfg.ext_zicbom = true; 626 cpu->cfg.ext_zicbop = true; 627 cpu->cfg.ext_zicboz = true; 628 cpu->cfg.ext_zicntr = true; 629 cpu->cfg.ext_zicond = true; 630 cpu->cfg.ext_zicsr = true; 631 cpu->cfg.ext_zifencei = true; 632 cpu->cfg.ext_zihintntl = true; 633 cpu->cfg.ext_zihintpause = true; 634 cpu->cfg.ext_zihpm = true; 635 cpu->cfg.ext_zimop = true; 636 cpu->cfg.ext_zawrs = true; 637 cpu->cfg.ext_zfa = true; 638 cpu->cfg.ext_zfbfmin = true; 639 cpu->cfg.ext_zfh = true; 640 cpu->cfg.ext_zfhmin = true; 641 cpu->cfg.ext_zcb = true; 642 cpu->cfg.ext_zcmop = true; 643 cpu->cfg.ext_zba = true; 644 cpu->cfg.ext_zbb = true; 645 cpu->cfg.ext_zbs = true; 646 cpu->cfg.ext_zkt = true; 647 cpu->cfg.ext_zvbb = true; 648 cpu->cfg.ext_zvbc = true; 649 cpu->cfg.ext_zvfbfmin = true; 650 cpu->cfg.ext_zvfbfwma = true; 651 cpu->cfg.ext_zvfh = true; 652 cpu->cfg.ext_zvfhmin = true; 653 cpu->cfg.ext_zvkng = true; 654 cpu->cfg.ext_smaia = true; 655 cpu->cfg.ext_smstateen = true; 656 cpu->cfg.ext_ssaia = true; 657 cpu->cfg.ext_sscofpmf = true; 658 cpu->cfg.ext_sstc = true; 659 cpu->cfg.ext_svade = true; 660 cpu->cfg.ext_svinval = true; 661 cpu->cfg.ext_svnapot = true; 662 cpu->cfg.ext_svpbmt = true; 663 664 #ifndef CONFIG_USER_ONLY 665 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 666 #endif 667 } 668 669 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 670 { 671 CPURISCVState *env = &RISCV_CPU(obj)->env; 672 RISCVCPU *cpu = RISCV_CPU(obj); 673 674 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 675 env->priv_ver = PRIV_VERSION_1_12_0; 676 677 /* Enable ISA extensions */ 678 cpu->cfg.ext_zbc = true; 679 cpu->cfg.ext_zbkb = true; 680 cpu->cfg.ext_zbkc = true; 681 cpu->cfg.ext_zbkx = true; 682 cpu->cfg.ext_zknd = true; 683 cpu->cfg.ext_zkne = true; 684 cpu->cfg.ext_zknh = true; 685 cpu->cfg.ext_zksed = true; 686 cpu->cfg.ext_zksh = true; 687 cpu->cfg.ext_svinval = true; 688 689 cpu->cfg.mmu = true; 690 cpu->cfg.pmp = true; 691 692 #ifndef CONFIG_USER_ONLY 693 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 694 #endif 695 } 696 697 #ifdef CONFIG_TCG 698 static void rv128_base_cpu_init(Object *obj) 699 { 700 RISCVCPU *cpu = RISCV_CPU(obj); 701 CPURISCVState *env = &cpu->env; 702 703 if (qemu_tcg_mttcg_enabled()) { 704 /* Missing 128-bit aligned atomics */ 705 error_report("128-bit RISC-V currently does not work with Multi " 706 "Threaded TCG. Please use: -accel tcg,thread=single"); 707 exit(EXIT_FAILURE); 708 } 709 710 cpu->cfg.mmu = true; 711 cpu->cfg.pmp = true; 712 713 /* Set latest version of privileged specification */ 714 env->priv_ver = PRIV_VERSION_LATEST; 715 #ifndef CONFIG_USER_ONLY 716 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 717 #endif 718 } 719 #endif /* CONFIG_TCG */ 720 721 static void rv64i_bare_cpu_init(Object *obj) 722 { 723 CPURISCVState *env = &RISCV_CPU(obj)->env; 724 riscv_cpu_set_misa_ext(env, RVI); 725 } 726 727 static void rv64e_bare_cpu_init(Object *obj) 728 { 729 CPURISCVState *env = &RISCV_CPU(obj)->env; 730 riscv_cpu_set_misa_ext(env, RVE); 731 } 732 733 #endif /* !TARGET_RISCV64 */ 734 735 #if defined(TARGET_RISCV32) || \ 736 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 737 738 static void rv32_base_cpu_init(Object *obj) 739 { 740 RISCVCPU *cpu = RISCV_CPU(obj); 741 CPURISCVState *env = &cpu->env; 742 743 cpu->cfg.mmu = true; 744 cpu->cfg.pmp = true; 745 746 /* Set latest version of privileged specification */ 747 env->priv_ver = PRIV_VERSION_LATEST; 748 #ifndef CONFIG_USER_ONLY 749 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 750 #endif 751 } 752 753 static void rv32_sifive_u_cpu_init(Object *obj) 754 { 755 RISCVCPU *cpu = RISCV_CPU(obj); 756 CPURISCVState *env = &cpu->env; 757 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 758 env->priv_ver = PRIV_VERSION_1_10_0; 759 #ifndef CONFIG_USER_ONLY 760 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 761 #endif 762 763 /* inherited from parent obj via riscv_cpu_init() */ 764 cpu->cfg.ext_zifencei = true; 765 cpu->cfg.ext_zicsr = true; 766 cpu->cfg.mmu = true; 767 cpu->cfg.pmp = true; 768 } 769 770 static void rv32_sifive_e_cpu_init(Object *obj) 771 { 772 CPURISCVState *env = &RISCV_CPU(obj)->env; 773 RISCVCPU *cpu = RISCV_CPU(obj); 774 775 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 776 env->priv_ver = PRIV_VERSION_1_10_0; 777 #ifndef CONFIG_USER_ONLY 778 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 779 #endif 780 781 /* inherited from parent obj via riscv_cpu_init() */ 782 cpu->cfg.ext_zifencei = true; 783 cpu->cfg.ext_zicsr = true; 784 cpu->cfg.pmp = true; 785 } 786 787 static void rv32_ibex_cpu_init(Object *obj) 788 { 789 CPURISCVState *env = &RISCV_CPU(obj)->env; 790 RISCVCPU *cpu = RISCV_CPU(obj); 791 792 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 793 env->priv_ver = PRIV_VERSION_1_12_0; 794 #ifndef CONFIG_USER_ONLY 795 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 796 #endif 797 /* inherited from parent obj via riscv_cpu_init() */ 798 cpu->cfg.ext_zifencei = true; 799 cpu->cfg.ext_zicsr = true; 800 cpu->cfg.pmp = true; 801 cpu->cfg.ext_smepmp = true; 802 803 cpu->cfg.ext_zba = true; 804 cpu->cfg.ext_zbb = true; 805 cpu->cfg.ext_zbc = true; 806 cpu->cfg.ext_zbs = true; 807 } 808 809 static void rv32_imafcu_nommu_cpu_init(Object *obj) 810 { 811 CPURISCVState *env = &RISCV_CPU(obj)->env; 812 RISCVCPU *cpu = RISCV_CPU(obj); 813 814 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 815 env->priv_ver = PRIV_VERSION_1_10_0; 816 #ifndef CONFIG_USER_ONLY 817 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 818 #endif 819 820 /* inherited from parent obj via riscv_cpu_init() */ 821 cpu->cfg.ext_zifencei = true; 822 cpu->cfg.ext_zicsr = true; 823 cpu->cfg.pmp = true; 824 } 825 826 static void rv32i_bare_cpu_init(Object *obj) 827 { 828 CPURISCVState *env = &RISCV_CPU(obj)->env; 829 riscv_cpu_set_misa_ext(env, RVI); 830 } 831 832 static void rv32e_bare_cpu_init(Object *obj) 833 { 834 CPURISCVState *env = &RISCV_CPU(obj)->env; 835 riscv_cpu_set_misa_ext(env, RVE); 836 } 837 #endif 838 839 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 840 { 841 ObjectClass *oc; 842 char *typename; 843 char **cpuname; 844 845 cpuname = g_strsplit(cpu_model, ",", 1); 846 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 847 oc = object_class_by_name(typename); 848 g_strfreev(cpuname); 849 g_free(typename); 850 851 return oc; 852 } 853 854 char *riscv_cpu_get_name(RISCVCPU *cpu) 855 { 856 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 857 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 858 859 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 860 861 return cpu_model_from_type(typename); 862 } 863 864 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 865 { 866 RISCVCPU *cpu = RISCV_CPU(cs); 867 CPURISCVState *env = &cpu->env; 868 int i, j; 869 uint8_t *p; 870 871 #if !defined(CONFIG_USER_ONLY) 872 if (riscv_has_ext(env, RVH)) { 873 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 874 } 875 #endif 876 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 877 #ifndef CONFIG_USER_ONLY 878 { 879 static const int dump_csrs[] = { 880 CSR_MHARTID, 881 CSR_MSTATUS, 882 CSR_MSTATUSH, 883 /* 884 * CSR_SSTATUS is intentionally omitted here as its value 885 * can be figured out by looking at CSR_MSTATUS 886 */ 887 CSR_HSTATUS, 888 CSR_VSSTATUS, 889 CSR_MIP, 890 CSR_MIE, 891 CSR_MIDELEG, 892 CSR_HIDELEG, 893 CSR_MEDELEG, 894 CSR_HEDELEG, 895 CSR_MTVEC, 896 CSR_STVEC, 897 CSR_VSTVEC, 898 CSR_MEPC, 899 CSR_SEPC, 900 CSR_VSEPC, 901 CSR_MCAUSE, 902 CSR_SCAUSE, 903 CSR_VSCAUSE, 904 CSR_MTVAL, 905 CSR_STVAL, 906 CSR_HTVAL, 907 CSR_MTVAL2, 908 CSR_MSCRATCH, 909 CSR_SSCRATCH, 910 CSR_SATP, 911 }; 912 913 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 914 int csrno = dump_csrs[i]; 915 target_ulong val = 0; 916 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 917 918 /* 919 * Rely on the smode, hmode, etc, predicates within csr.c 920 * to do the filtering of the registers that are present. 921 */ 922 if (res == RISCV_EXCP_NONE) { 923 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 924 csr_ops[csrno].name, val); 925 } 926 } 927 } 928 #endif 929 930 for (i = 0; i < 32; i++) { 931 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 932 riscv_int_regnames[i], env->gpr[i]); 933 if ((i & 3) == 3) { 934 qemu_fprintf(f, "\n"); 935 } 936 } 937 if (flags & CPU_DUMP_FPU) { 938 target_ulong val = 0; 939 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 940 if (res == RISCV_EXCP_NONE) { 941 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 942 csr_ops[CSR_FCSR].name, val); 943 } 944 for (i = 0; i < 32; i++) { 945 qemu_fprintf(f, " %-8s %016" PRIx64, 946 riscv_fpr_regnames[i], env->fpr[i]); 947 if ((i & 3) == 3) { 948 qemu_fprintf(f, "\n"); 949 } 950 } 951 } 952 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 953 static const int dump_rvv_csrs[] = { 954 CSR_VSTART, 955 CSR_VXSAT, 956 CSR_VXRM, 957 CSR_VCSR, 958 CSR_VL, 959 CSR_VTYPE, 960 CSR_VLENB, 961 }; 962 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 963 int csrno = dump_rvv_csrs[i]; 964 target_ulong val = 0; 965 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 966 967 /* 968 * Rely on the smode, hmode, etc, predicates within csr.c 969 * to do the filtering of the registers that are present. 970 */ 971 if (res == RISCV_EXCP_NONE) { 972 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 973 csr_ops[csrno].name, val); 974 } 975 } 976 uint16_t vlenb = cpu->cfg.vlenb; 977 978 for (i = 0; i < 32; i++) { 979 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 980 p = (uint8_t *)env->vreg; 981 for (j = vlenb - 1 ; j >= 0; j--) { 982 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 983 } 984 qemu_fprintf(f, "\n"); 985 } 986 } 987 } 988 989 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 990 { 991 RISCVCPU *cpu = RISCV_CPU(cs); 992 CPURISCVState *env = &cpu->env; 993 994 if (env->xl == MXL_RV32) { 995 env->pc = (int32_t)value; 996 } else { 997 env->pc = value; 998 } 999 } 1000 1001 static vaddr riscv_cpu_get_pc(CPUState *cs) 1002 { 1003 RISCVCPU *cpu = RISCV_CPU(cs); 1004 CPURISCVState *env = &cpu->env; 1005 1006 /* Match cpu_get_tb_cpu_state. */ 1007 if (env->xl == MXL_RV32) { 1008 return env->pc & UINT32_MAX; 1009 } 1010 return env->pc; 1011 } 1012 1013 bool riscv_cpu_has_work(CPUState *cs) 1014 { 1015 #ifndef CONFIG_USER_ONLY 1016 RISCVCPU *cpu = RISCV_CPU(cs); 1017 CPURISCVState *env = &cpu->env; 1018 /* 1019 * Definition of the WFI instruction requires it to ignore the privilege 1020 * mode and delegation registers, but respect individual enables 1021 */ 1022 return riscv_cpu_all_pending(env) != 0 || 1023 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1024 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1025 #else 1026 return true; 1027 #endif 1028 } 1029 1030 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 1031 { 1032 return riscv_env_mmu_index(cpu_env(cs), ifetch); 1033 } 1034 1035 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1036 { 1037 #ifndef CONFIG_USER_ONLY 1038 uint8_t iprio; 1039 int i, irq, rdzero; 1040 #endif 1041 CPUState *cs = CPU(obj); 1042 RISCVCPU *cpu = RISCV_CPU(cs); 1043 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1044 CPURISCVState *env = &cpu->env; 1045 1046 if (mcc->parent_phases.hold) { 1047 mcc->parent_phases.hold(obj, type); 1048 } 1049 #ifndef CONFIG_USER_ONLY 1050 env->misa_mxl = mcc->misa_mxl_max; 1051 env->priv = PRV_M; 1052 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1053 if (env->misa_mxl > MXL_RV32) { 1054 /* 1055 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1056 * and we must ensure that the value after init is valid for read. 1057 */ 1058 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1059 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1060 if (riscv_has_ext(env, RVH)) { 1061 env->vsstatus = set_field(env->vsstatus, 1062 MSTATUS64_SXL, env->misa_mxl); 1063 env->vsstatus = set_field(env->vsstatus, 1064 MSTATUS64_UXL, env->misa_mxl); 1065 env->mstatus_hs = set_field(env->mstatus_hs, 1066 MSTATUS64_SXL, env->misa_mxl); 1067 env->mstatus_hs = set_field(env->mstatus_hs, 1068 MSTATUS64_UXL, env->misa_mxl); 1069 } 1070 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1071 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1072 } 1073 } 1074 env->mcause = 0; 1075 env->miclaim = MIP_SGEIP; 1076 env->pc = env->resetvec; 1077 env->bins = 0; 1078 env->two_stage_lookup = false; 1079 1080 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1081 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1082 MENVCFG_ADUE : 0); 1083 env->henvcfg = 0; 1084 1085 /* Initialized default priorities of local interrupts. */ 1086 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1087 iprio = riscv_cpu_default_priority(i); 1088 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1089 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1090 env->hviprio[i] = 0; 1091 } 1092 i = 0; 1093 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1094 if (!rdzero) { 1095 env->hviprio[irq] = env->miprio[irq]; 1096 } 1097 i++; 1098 } 1099 1100 /* 1101 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1102 * extension is enabled. 1103 */ 1104 if (riscv_has_ext(env, RVH)) { 1105 env->mideleg |= HS_MODE_INTERRUPTS; 1106 } 1107 1108 /* 1109 * Clear mseccfg and unlock all the PMP entries upon reset. 1110 * This is allowed as per the priv and smepmp specifications 1111 * and is needed to clear stale entries across reboots. 1112 */ 1113 if (riscv_cpu_cfg(env)->ext_smepmp) { 1114 env->mseccfg = 0; 1115 } 1116 1117 pmp_unlock_entries(env); 1118 #else 1119 env->priv = PRV_U; 1120 env->senvcfg = 0; 1121 env->menvcfg = 0; 1122 #endif 1123 1124 /* on reset elp is clear */ 1125 env->elp = false; 1126 /* on reset ssp is set to 0 */ 1127 env->ssp = 0; 1128 1129 env->xl = riscv_cpu_mxl(env); 1130 cs->exception_index = RISCV_EXCP_NONE; 1131 env->load_res = -1; 1132 set_default_nan_mode(1, &env->fp_status); 1133 /* Default NaN value: sign bit clear, frac msb set */ 1134 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1135 env->vill = true; 1136 1137 #ifndef CONFIG_USER_ONLY 1138 if (cpu->cfg.debug) { 1139 riscv_trigger_reset_hold(env); 1140 } 1141 1142 if (cpu->cfg.ext_smrnmi) { 1143 env->rnmip = 0; 1144 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1145 } 1146 1147 if (kvm_enabled()) { 1148 kvm_riscv_reset_vcpu(cpu); 1149 } 1150 #endif 1151 } 1152 1153 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1154 { 1155 RISCVCPU *cpu = RISCV_CPU(s); 1156 CPURISCVState *env = &cpu->env; 1157 info->target_info = &cpu->cfg; 1158 1159 switch (env->xl) { 1160 case MXL_RV32: 1161 info->print_insn = print_insn_riscv32; 1162 break; 1163 case MXL_RV64: 1164 info->print_insn = print_insn_riscv64; 1165 break; 1166 case MXL_RV128: 1167 info->print_insn = print_insn_riscv128; 1168 break; 1169 default: 1170 g_assert_not_reached(); 1171 } 1172 } 1173 1174 #ifndef CONFIG_USER_ONLY 1175 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1176 { 1177 bool rv32 = riscv_cpu_is_32bit(cpu); 1178 uint8_t satp_mode_map_max, satp_mode_supported_max; 1179 1180 /* The CPU wants the OS to decide which satp mode to use */ 1181 if (cpu->cfg.satp_mode.supported == 0) { 1182 return; 1183 } 1184 1185 satp_mode_supported_max = 1186 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1187 1188 if (cpu->cfg.satp_mode.map == 0) { 1189 if (cpu->cfg.satp_mode.init == 0) { 1190 /* If unset by the user, we fallback to the default satp mode. */ 1191 set_satp_mode_default_map(cpu); 1192 } else { 1193 /* 1194 * Find the lowest level that was disabled and then enable the 1195 * first valid level below which can be found in 1196 * valid_vm_1_10_32/64. 1197 */ 1198 for (int i = 1; i < 16; ++i) { 1199 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1200 (cpu->cfg.satp_mode.supported & (1 << i))) { 1201 for (int j = i - 1; j >= 0; --j) { 1202 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1203 cpu->cfg.satp_mode.map |= (1 << j); 1204 break; 1205 } 1206 } 1207 break; 1208 } 1209 } 1210 } 1211 } 1212 1213 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1214 1215 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1216 if (satp_mode_map_max > satp_mode_supported_max) { 1217 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1218 satp_mode_str(satp_mode_map_max, rv32), 1219 satp_mode_str(satp_mode_supported_max, rv32)); 1220 return; 1221 } 1222 1223 /* 1224 * Make sure the user did not ask for an invalid configuration as per 1225 * the specification. 1226 */ 1227 if (!rv32) { 1228 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1229 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1230 (cpu->cfg.satp_mode.init & (1 << i)) && 1231 (cpu->cfg.satp_mode.supported & (1 << i))) { 1232 error_setg(errp, "cannot disable %s satp mode if %s " 1233 "is enabled", satp_mode_str(i, false), 1234 satp_mode_str(satp_mode_map_max, false)); 1235 return; 1236 } 1237 } 1238 } 1239 1240 /* Finally expand the map so that all valid modes are set */ 1241 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1242 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1243 cpu->cfg.satp_mode.map |= (1 << i); 1244 } 1245 } 1246 } 1247 #endif 1248 1249 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1250 { 1251 Error *local_err = NULL; 1252 1253 #ifndef CONFIG_USER_ONLY 1254 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1255 if (local_err != NULL) { 1256 error_propagate(errp, local_err); 1257 return; 1258 } 1259 #endif 1260 1261 if (tcg_enabled()) { 1262 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1263 if (local_err != NULL) { 1264 error_propagate(errp, local_err); 1265 return; 1266 } 1267 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1268 } else if (kvm_enabled()) { 1269 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1270 if (local_err != NULL) { 1271 error_propagate(errp, local_err); 1272 return; 1273 } 1274 } 1275 } 1276 1277 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1278 { 1279 CPUState *cs = CPU(dev); 1280 RISCVCPU *cpu = RISCV_CPU(dev); 1281 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1282 Error *local_err = NULL; 1283 1284 cpu_exec_realizefn(cs, &local_err); 1285 if (local_err != NULL) { 1286 error_propagate(errp, local_err); 1287 return; 1288 } 1289 1290 riscv_cpu_finalize_features(cpu, &local_err); 1291 if (local_err != NULL) { 1292 error_propagate(errp, local_err); 1293 return; 1294 } 1295 1296 riscv_cpu_register_gdb_regs_for_features(cs); 1297 1298 #ifndef CONFIG_USER_ONLY 1299 if (cpu->cfg.debug) { 1300 riscv_trigger_realize(&cpu->env); 1301 } 1302 #endif 1303 1304 qemu_init_vcpu(cs); 1305 cpu_reset(cs); 1306 1307 mcc->parent_realize(dev, errp); 1308 } 1309 1310 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1311 { 1312 if (tcg_enabled()) { 1313 return riscv_cpu_tcg_compatible(cpu); 1314 } 1315 1316 return true; 1317 } 1318 1319 #ifndef CONFIG_USER_ONLY 1320 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1321 void *opaque, Error **errp) 1322 { 1323 RISCVSATPMap *satp_map = opaque; 1324 uint8_t satp = satp_mode_from_str(name); 1325 bool value; 1326 1327 value = satp_map->map & (1 << satp); 1328 1329 visit_type_bool(v, name, &value, errp); 1330 } 1331 1332 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1333 void *opaque, Error **errp) 1334 { 1335 RISCVSATPMap *satp_map = opaque; 1336 uint8_t satp = satp_mode_from_str(name); 1337 bool value; 1338 1339 if (!visit_type_bool(v, name, &value, errp)) { 1340 return; 1341 } 1342 1343 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1344 satp_map->init |= 1 << satp; 1345 } 1346 1347 void riscv_add_satp_mode_properties(Object *obj) 1348 { 1349 RISCVCPU *cpu = RISCV_CPU(obj); 1350 1351 if (cpu->env.misa_mxl == MXL_RV32) { 1352 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1353 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1354 } else { 1355 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1356 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1357 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1358 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1359 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1360 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1361 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1362 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1363 } 1364 } 1365 1366 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1367 { 1368 RISCVCPU *cpu = RISCV_CPU(opaque); 1369 CPURISCVState *env = &cpu->env; 1370 1371 if (irq < IRQ_LOCAL_MAX) { 1372 switch (irq) { 1373 case IRQ_U_SOFT: 1374 case IRQ_S_SOFT: 1375 case IRQ_VS_SOFT: 1376 case IRQ_M_SOFT: 1377 case IRQ_U_TIMER: 1378 case IRQ_S_TIMER: 1379 case IRQ_VS_TIMER: 1380 case IRQ_M_TIMER: 1381 case IRQ_U_EXT: 1382 case IRQ_VS_EXT: 1383 case IRQ_M_EXT: 1384 if (kvm_enabled()) { 1385 kvm_riscv_set_irq(cpu, irq, level); 1386 } else { 1387 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1388 } 1389 break; 1390 case IRQ_S_EXT: 1391 if (kvm_enabled()) { 1392 kvm_riscv_set_irq(cpu, irq, level); 1393 } else { 1394 env->external_seip = level; 1395 riscv_cpu_update_mip(env, 1 << irq, 1396 BOOL_TO_MASK(level | env->software_seip)); 1397 } 1398 break; 1399 default: 1400 g_assert_not_reached(); 1401 } 1402 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1403 /* Require H-extension for handling guest local interrupts */ 1404 if (!riscv_has_ext(env, RVH)) { 1405 g_assert_not_reached(); 1406 } 1407 1408 /* Compute bit position in HGEIP CSR */ 1409 irq = irq - IRQ_LOCAL_MAX + 1; 1410 if (env->geilen < irq) { 1411 g_assert_not_reached(); 1412 } 1413 1414 /* Update HGEIP CSR */ 1415 env->hgeip &= ~((target_ulong)1 << irq); 1416 if (level) { 1417 env->hgeip |= (target_ulong)1 << irq; 1418 } 1419 1420 /* Update mip.SGEIP bit */ 1421 riscv_cpu_update_mip(env, MIP_SGEIP, 1422 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1423 } else { 1424 g_assert_not_reached(); 1425 } 1426 } 1427 1428 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1429 { 1430 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1431 } 1432 #endif /* CONFIG_USER_ONLY */ 1433 1434 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1435 { 1436 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1437 } 1438 1439 static void riscv_cpu_post_init(Object *obj) 1440 { 1441 accel_cpu_instance_init(CPU(obj)); 1442 } 1443 1444 static void riscv_cpu_init(Object *obj) 1445 { 1446 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1447 RISCVCPU *cpu = RISCV_CPU(obj); 1448 CPURISCVState *env = &cpu->env; 1449 1450 env->misa_mxl = mcc->misa_mxl_max; 1451 1452 #ifndef CONFIG_USER_ONLY 1453 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1454 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1455 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1456 "riscv.cpu.rnmi", RNMI_MAX); 1457 #endif /* CONFIG_USER_ONLY */ 1458 1459 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1460 1461 /* 1462 * The timer and performance counters extensions were supported 1463 * in QEMU before they were added as discrete extensions in the 1464 * ISA. To keep compatibility we'll always default them to 'true' 1465 * for all CPUs. Each accelerator will decide what to do when 1466 * users disable them. 1467 */ 1468 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1469 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1470 1471 /* Default values for non-bool cpu properties */ 1472 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1473 cpu->cfg.vlenb = 128 >> 3; 1474 cpu->cfg.elen = 64; 1475 cpu->cfg.cbom_blocksize = 64; 1476 cpu->cfg.cbop_blocksize = 64; 1477 cpu->cfg.cboz_blocksize = 64; 1478 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1479 } 1480 1481 static void riscv_bare_cpu_init(Object *obj) 1482 { 1483 RISCVCPU *cpu = RISCV_CPU(obj); 1484 1485 /* 1486 * Bare CPUs do not inherit the timer and performance 1487 * counters from the parent class (see riscv_cpu_init() 1488 * for info on why the parent enables them). 1489 * 1490 * Users have to explicitly enable these counters for 1491 * bare CPUs. 1492 */ 1493 cpu->cfg.ext_zicntr = false; 1494 cpu->cfg.ext_zihpm = false; 1495 1496 /* Set to QEMU's first supported priv version */ 1497 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1498 1499 /* 1500 * Support all available satp_mode settings. The default 1501 * value will be set to MBARE if the user doesn't set 1502 * satp_mode manually (see set_satp_mode_default()). 1503 */ 1504 #ifndef CONFIG_USER_ONLY 1505 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1506 #endif 1507 } 1508 1509 typedef struct misa_ext_info { 1510 const char *name; 1511 const char *description; 1512 } MISAExtInfo; 1513 1514 #define MISA_INFO_IDX(_bit) \ 1515 __builtin_ctz(_bit) 1516 1517 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1518 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1519 1520 static const MISAExtInfo misa_ext_info_arr[] = { 1521 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1522 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1523 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1524 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1525 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1526 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1527 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1528 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1529 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1530 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1531 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1532 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1533 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1534 }; 1535 1536 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1537 { 1538 CPUClass *cc = CPU_CLASS(mcc); 1539 1540 /* Validate that MISA_MXL is set properly. */ 1541 switch (mcc->misa_mxl_max) { 1542 #ifdef TARGET_RISCV64 1543 case MXL_RV64: 1544 case MXL_RV128: 1545 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1546 break; 1547 #endif 1548 case MXL_RV32: 1549 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1550 break; 1551 default: 1552 g_assert_not_reached(); 1553 } 1554 } 1555 1556 static int riscv_validate_misa_info_idx(uint32_t bit) 1557 { 1558 int idx; 1559 1560 /* 1561 * Our lowest valid input (RVA) is 1 and 1562 * __builtin_ctz() is UB with zero. 1563 */ 1564 g_assert(bit != 0); 1565 idx = MISA_INFO_IDX(bit); 1566 1567 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1568 return idx; 1569 } 1570 1571 const char *riscv_get_misa_ext_name(uint32_t bit) 1572 { 1573 int idx = riscv_validate_misa_info_idx(bit); 1574 const char *val = misa_ext_info_arr[idx].name; 1575 1576 g_assert(val != NULL); 1577 return val; 1578 } 1579 1580 const char *riscv_get_misa_ext_description(uint32_t bit) 1581 { 1582 int idx = riscv_validate_misa_info_idx(bit); 1583 const char *val = misa_ext_info_arr[idx].description; 1584 1585 g_assert(val != NULL); 1586 return val; 1587 } 1588 1589 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1590 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1591 .enabled = _defval} 1592 1593 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1594 /* Defaults for standard extensions */ 1595 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1596 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1597 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1598 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1599 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1600 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1601 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1602 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1603 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1604 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1605 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1606 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1607 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1608 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1609 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1610 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1611 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1612 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1613 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1614 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1615 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1616 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1617 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1618 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1619 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1620 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1621 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1622 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1623 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1624 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1625 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1626 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1627 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1628 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1629 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1630 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1631 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1632 1633 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1634 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1635 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1636 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1637 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1638 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1639 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1640 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1641 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1642 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1643 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1644 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1645 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1646 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1647 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1648 1649 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1650 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1651 1652 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1653 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1654 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1655 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1656 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1657 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1658 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1659 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1660 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1661 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1662 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1663 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1664 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1665 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1666 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1667 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1668 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1669 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1670 1671 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1672 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1673 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1674 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1675 1676 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1677 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1678 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1679 1680 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1681 1682 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1683 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1684 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1685 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1686 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1687 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1688 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1689 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1690 1691 /* Vector cryptography extensions */ 1692 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1693 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1694 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1695 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1696 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1697 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1698 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1699 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1700 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1701 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1702 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1703 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1704 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1705 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1706 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1707 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1708 1709 { }, 1710 }; 1711 1712 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1713 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1714 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1715 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1716 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1717 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1718 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1719 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1720 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1721 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1722 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1723 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1724 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1725 1726 { }, 1727 }; 1728 1729 /* These are experimental so mark with 'x-' */ 1730 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1731 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1732 1733 { }, 1734 }; 1735 1736 /* 1737 * 'Named features' is the name we give to extensions that we 1738 * don't want to expose to users. They are either immutable 1739 * (always enabled/disable) or they'll vary depending on 1740 * the resulting CPU state. They have riscv,isa strings 1741 * and priv_ver like regular extensions. 1742 */ 1743 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1744 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1745 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1746 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1747 1748 { }, 1749 }; 1750 1751 /* Deprecated entries marked for future removal */ 1752 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1753 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1754 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1755 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1756 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1757 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1758 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1759 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1760 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1761 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1762 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1763 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1764 1765 { }, 1766 }; 1767 1768 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1769 Error **errp) 1770 { 1771 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1772 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1773 cpuname, propname); 1774 } 1775 1776 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1777 void *opaque, Error **errp) 1778 { 1779 RISCVCPU *cpu = RISCV_CPU(obj); 1780 uint8_t pmu_num, curr_pmu_num; 1781 uint32_t pmu_mask; 1782 1783 visit_type_uint8(v, name, &pmu_num, errp); 1784 1785 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1786 1787 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1788 cpu_set_prop_err(cpu, name, errp); 1789 error_append_hint(errp, "Current '%s' val: %u\n", 1790 name, curr_pmu_num); 1791 return; 1792 } 1793 1794 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1795 error_setg(errp, "Number of counters exceeds maximum available"); 1796 return; 1797 } 1798 1799 if (pmu_num == 0) { 1800 pmu_mask = 0; 1801 } else { 1802 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1803 } 1804 1805 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1806 cpu->cfg.pmu_mask = pmu_mask; 1807 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1808 } 1809 1810 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1811 void *opaque, Error **errp) 1812 { 1813 RISCVCPU *cpu = RISCV_CPU(obj); 1814 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1815 1816 visit_type_uint8(v, name, &pmu_num, errp); 1817 } 1818 1819 static const PropertyInfo prop_pmu_num = { 1820 .name = "pmu-num", 1821 .get = prop_pmu_num_get, 1822 .set = prop_pmu_num_set, 1823 }; 1824 1825 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1826 void *opaque, Error **errp) 1827 { 1828 RISCVCPU *cpu = RISCV_CPU(obj); 1829 uint32_t value; 1830 uint8_t pmu_num; 1831 1832 visit_type_uint32(v, name, &value, errp); 1833 1834 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1835 cpu_set_prop_err(cpu, name, errp); 1836 error_append_hint(errp, "Current '%s' val: %x\n", 1837 name, cpu->cfg.pmu_mask); 1838 return; 1839 } 1840 1841 pmu_num = ctpop32(value); 1842 1843 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1844 error_setg(errp, "Number of counters exceeds maximum available"); 1845 return; 1846 } 1847 1848 cpu_option_add_user_setting(name, value); 1849 cpu->cfg.pmu_mask = value; 1850 } 1851 1852 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1853 void *opaque, Error **errp) 1854 { 1855 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1856 1857 visit_type_uint8(v, name, &pmu_mask, errp); 1858 } 1859 1860 static const PropertyInfo prop_pmu_mask = { 1861 .name = "pmu-mask", 1862 .get = prop_pmu_mask_get, 1863 .set = prop_pmu_mask_set, 1864 }; 1865 1866 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1867 void *opaque, Error **errp) 1868 { 1869 RISCVCPU *cpu = RISCV_CPU(obj); 1870 bool value; 1871 1872 visit_type_bool(v, name, &value, errp); 1873 1874 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1875 cpu_set_prop_err(cpu, "mmu", errp); 1876 return; 1877 } 1878 1879 cpu_option_add_user_setting(name, value); 1880 cpu->cfg.mmu = value; 1881 } 1882 1883 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1884 void *opaque, Error **errp) 1885 { 1886 bool value = RISCV_CPU(obj)->cfg.mmu; 1887 1888 visit_type_bool(v, name, &value, errp); 1889 } 1890 1891 static const PropertyInfo prop_mmu = { 1892 .name = "mmu", 1893 .get = prop_mmu_get, 1894 .set = prop_mmu_set, 1895 }; 1896 1897 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1898 void *opaque, Error **errp) 1899 { 1900 RISCVCPU *cpu = RISCV_CPU(obj); 1901 bool value; 1902 1903 visit_type_bool(v, name, &value, errp); 1904 1905 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1906 cpu_set_prop_err(cpu, name, errp); 1907 return; 1908 } 1909 1910 cpu_option_add_user_setting(name, value); 1911 cpu->cfg.pmp = value; 1912 } 1913 1914 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1915 void *opaque, Error **errp) 1916 { 1917 bool value = RISCV_CPU(obj)->cfg.pmp; 1918 1919 visit_type_bool(v, name, &value, errp); 1920 } 1921 1922 static const PropertyInfo prop_pmp = { 1923 .name = "pmp", 1924 .get = prop_pmp_get, 1925 .set = prop_pmp_set, 1926 }; 1927 1928 static int priv_spec_from_str(const char *priv_spec_str) 1929 { 1930 int priv_version = -1; 1931 1932 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1933 priv_version = PRIV_VERSION_1_13_0; 1934 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1935 priv_version = PRIV_VERSION_1_12_0; 1936 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1937 priv_version = PRIV_VERSION_1_11_0; 1938 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1939 priv_version = PRIV_VERSION_1_10_0; 1940 } 1941 1942 return priv_version; 1943 } 1944 1945 const char *priv_spec_to_str(int priv_version) 1946 { 1947 switch (priv_version) { 1948 case PRIV_VERSION_1_10_0: 1949 return PRIV_VER_1_10_0_STR; 1950 case PRIV_VERSION_1_11_0: 1951 return PRIV_VER_1_11_0_STR; 1952 case PRIV_VERSION_1_12_0: 1953 return PRIV_VER_1_12_0_STR; 1954 case PRIV_VERSION_1_13_0: 1955 return PRIV_VER_1_13_0_STR; 1956 default: 1957 return NULL; 1958 } 1959 } 1960 1961 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1962 void *opaque, Error **errp) 1963 { 1964 RISCVCPU *cpu = RISCV_CPU(obj); 1965 g_autofree char *value = NULL; 1966 int priv_version = -1; 1967 1968 visit_type_str(v, name, &value, errp); 1969 1970 priv_version = priv_spec_from_str(value); 1971 if (priv_version < 0) { 1972 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1973 return; 1974 } 1975 1976 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1977 cpu_set_prop_err(cpu, name, errp); 1978 error_append_hint(errp, "Current '%s' val: %s\n", name, 1979 object_property_get_str(obj, name, NULL)); 1980 return; 1981 } 1982 1983 cpu_option_add_user_setting(name, priv_version); 1984 cpu->env.priv_ver = priv_version; 1985 } 1986 1987 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1988 void *opaque, Error **errp) 1989 { 1990 RISCVCPU *cpu = RISCV_CPU(obj); 1991 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1992 1993 visit_type_str(v, name, (char **)&value, errp); 1994 } 1995 1996 static const PropertyInfo prop_priv_spec = { 1997 .name = "priv_spec", 1998 .get = prop_priv_spec_get, 1999 .set = prop_priv_spec_set, 2000 }; 2001 2002 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 2003 void *opaque, Error **errp) 2004 { 2005 RISCVCPU *cpu = RISCV_CPU(obj); 2006 g_autofree char *value = NULL; 2007 2008 visit_type_str(v, name, &value, errp); 2009 2010 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2011 error_setg(errp, "Unsupported vector spec version '%s'", value); 2012 return; 2013 } 2014 2015 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2016 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2017 } 2018 2019 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2020 void *opaque, Error **errp) 2021 { 2022 const char *value = VEXT_VER_1_00_0_STR; 2023 2024 visit_type_str(v, name, (char **)&value, errp); 2025 } 2026 2027 static const PropertyInfo prop_vext_spec = { 2028 .name = "vext_spec", 2029 .get = prop_vext_spec_get, 2030 .set = prop_vext_spec_set, 2031 }; 2032 2033 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2034 void *opaque, Error **errp) 2035 { 2036 RISCVCPU *cpu = RISCV_CPU(obj); 2037 uint16_t value; 2038 2039 if (!visit_type_uint16(v, name, &value, errp)) { 2040 return; 2041 } 2042 2043 if (!is_power_of_2(value)) { 2044 error_setg(errp, "Vector extension VLEN must be power of 2"); 2045 return; 2046 } 2047 2048 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 2049 cpu_set_prop_err(cpu, name, errp); 2050 error_append_hint(errp, "Current '%s' val: %u\n", 2051 name, cpu->cfg.vlenb << 3); 2052 return; 2053 } 2054 2055 cpu_option_add_user_setting(name, value); 2056 cpu->cfg.vlenb = value >> 3; 2057 } 2058 2059 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2060 void *opaque, Error **errp) 2061 { 2062 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2063 2064 visit_type_uint16(v, name, &value, errp); 2065 } 2066 2067 static const PropertyInfo prop_vlen = { 2068 .name = "vlen", 2069 .get = prop_vlen_get, 2070 .set = prop_vlen_set, 2071 }; 2072 2073 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2074 void *opaque, Error **errp) 2075 { 2076 RISCVCPU *cpu = RISCV_CPU(obj); 2077 uint16_t value; 2078 2079 if (!visit_type_uint16(v, name, &value, errp)) { 2080 return; 2081 } 2082 2083 if (!is_power_of_2(value)) { 2084 error_setg(errp, "Vector extension ELEN must be power of 2"); 2085 return; 2086 } 2087 2088 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2089 cpu_set_prop_err(cpu, name, errp); 2090 error_append_hint(errp, "Current '%s' val: %u\n", 2091 name, cpu->cfg.elen); 2092 return; 2093 } 2094 2095 cpu_option_add_user_setting(name, value); 2096 cpu->cfg.elen = value; 2097 } 2098 2099 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2100 void *opaque, Error **errp) 2101 { 2102 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2103 2104 visit_type_uint16(v, name, &value, errp); 2105 } 2106 2107 static const PropertyInfo prop_elen = { 2108 .name = "elen", 2109 .get = prop_elen_get, 2110 .set = prop_elen_set, 2111 }; 2112 2113 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2114 void *opaque, Error **errp) 2115 { 2116 RISCVCPU *cpu = RISCV_CPU(obj); 2117 uint16_t value; 2118 2119 if (!visit_type_uint16(v, name, &value, errp)) { 2120 return; 2121 } 2122 2123 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2124 cpu_set_prop_err(cpu, name, errp); 2125 error_append_hint(errp, "Current '%s' val: %u\n", 2126 name, cpu->cfg.cbom_blocksize); 2127 return; 2128 } 2129 2130 cpu_option_add_user_setting(name, value); 2131 cpu->cfg.cbom_blocksize = value; 2132 } 2133 2134 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2135 void *opaque, Error **errp) 2136 { 2137 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2138 2139 visit_type_uint16(v, name, &value, errp); 2140 } 2141 2142 static const PropertyInfo prop_cbom_blksize = { 2143 .name = "cbom_blocksize", 2144 .get = prop_cbom_blksize_get, 2145 .set = prop_cbom_blksize_set, 2146 }; 2147 2148 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2149 void *opaque, Error **errp) 2150 { 2151 RISCVCPU *cpu = RISCV_CPU(obj); 2152 uint16_t value; 2153 2154 if (!visit_type_uint16(v, name, &value, errp)) { 2155 return; 2156 } 2157 2158 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2159 cpu_set_prop_err(cpu, name, errp); 2160 error_append_hint(errp, "Current '%s' val: %u\n", 2161 name, cpu->cfg.cbop_blocksize); 2162 return; 2163 } 2164 2165 cpu_option_add_user_setting(name, value); 2166 cpu->cfg.cbop_blocksize = value; 2167 } 2168 2169 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2170 void *opaque, Error **errp) 2171 { 2172 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2173 2174 visit_type_uint16(v, name, &value, errp); 2175 } 2176 2177 static const PropertyInfo prop_cbop_blksize = { 2178 .name = "cbop_blocksize", 2179 .get = prop_cbop_blksize_get, 2180 .set = prop_cbop_blksize_set, 2181 }; 2182 2183 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2184 void *opaque, Error **errp) 2185 { 2186 RISCVCPU *cpu = RISCV_CPU(obj); 2187 uint16_t value; 2188 2189 if (!visit_type_uint16(v, name, &value, errp)) { 2190 return; 2191 } 2192 2193 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2194 cpu_set_prop_err(cpu, name, errp); 2195 error_append_hint(errp, "Current '%s' val: %u\n", 2196 name, cpu->cfg.cboz_blocksize); 2197 return; 2198 } 2199 2200 cpu_option_add_user_setting(name, value); 2201 cpu->cfg.cboz_blocksize = value; 2202 } 2203 2204 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2205 void *opaque, Error **errp) 2206 { 2207 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2208 2209 visit_type_uint16(v, name, &value, errp); 2210 } 2211 2212 static const PropertyInfo prop_cboz_blksize = { 2213 .name = "cboz_blocksize", 2214 .get = prop_cboz_blksize_get, 2215 .set = prop_cboz_blksize_set, 2216 }; 2217 2218 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2219 void *opaque, Error **errp) 2220 { 2221 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2222 RISCVCPU *cpu = RISCV_CPU(obj); 2223 uint32_t prev_val = cpu->cfg.mvendorid; 2224 uint32_t value; 2225 2226 if (!visit_type_uint32(v, name, &value, errp)) { 2227 return; 2228 } 2229 2230 if (!dynamic_cpu && prev_val != value) { 2231 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2232 object_get_typename(obj), prev_val); 2233 return; 2234 } 2235 2236 cpu->cfg.mvendorid = value; 2237 } 2238 2239 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2240 void *opaque, Error **errp) 2241 { 2242 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2243 2244 visit_type_uint32(v, name, &value, errp); 2245 } 2246 2247 static const PropertyInfo prop_mvendorid = { 2248 .name = "mvendorid", 2249 .get = prop_mvendorid_get, 2250 .set = prop_mvendorid_set, 2251 }; 2252 2253 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2254 void *opaque, Error **errp) 2255 { 2256 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2257 RISCVCPU *cpu = RISCV_CPU(obj); 2258 uint64_t prev_val = cpu->cfg.mimpid; 2259 uint64_t value; 2260 2261 if (!visit_type_uint64(v, name, &value, errp)) { 2262 return; 2263 } 2264 2265 if (!dynamic_cpu && prev_val != value) { 2266 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2267 object_get_typename(obj), prev_val); 2268 return; 2269 } 2270 2271 cpu->cfg.mimpid = value; 2272 } 2273 2274 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2275 void *opaque, Error **errp) 2276 { 2277 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2278 2279 visit_type_uint64(v, name, &value, errp); 2280 } 2281 2282 static const PropertyInfo prop_mimpid = { 2283 .name = "mimpid", 2284 .get = prop_mimpid_get, 2285 .set = prop_mimpid_set, 2286 }; 2287 2288 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2289 void *opaque, Error **errp) 2290 { 2291 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2292 RISCVCPU *cpu = RISCV_CPU(obj); 2293 uint64_t prev_val = cpu->cfg.marchid; 2294 uint64_t value, invalid_val; 2295 uint32_t mxlen = 0; 2296 2297 if (!visit_type_uint64(v, name, &value, errp)) { 2298 return; 2299 } 2300 2301 if (!dynamic_cpu && prev_val != value) { 2302 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2303 object_get_typename(obj), prev_val); 2304 return; 2305 } 2306 2307 switch (riscv_cpu_mxl(&cpu->env)) { 2308 case MXL_RV32: 2309 mxlen = 32; 2310 break; 2311 case MXL_RV64: 2312 case MXL_RV128: 2313 mxlen = 64; 2314 break; 2315 default: 2316 g_assert_not_reached(); 2317 } 2318 2319 invalid_val = 1LL << (mxlen - 1); 2320 2321 if (value == invalid_val) { 2322 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2323 "and the remaining bits zero", mxlen); 2324 return; 2325 } 2326 2327 cpu->cfg.marchid = value; 2328 } 2329 2330 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2331 void *opaque, Error **errp) 2332 { 2333 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2334 2335 visit_type_uint64(v, name, &value, errp); 2336 } 2337 2338 static const PropertyInfo prop_marchid = { 2339 .name = "marchid", 2340 .get = prop_marchid_get, 2341 .set = prop_marchid_set, 2342 }; 2343 2344 /* 2345 * RVA22U64 defines some 'named features' that are cache 2346 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2347 * and Zicclsm. They are always implemented in TCG and 2348 * doesn't need to be manually enabled by the profile. 2349 */ 2350 static RISCVCPUProfile RVA22U64 = { 2351 .parent = NULL, 2352 .name = "rva22u64", 2353 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2354 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2355 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2356 .ext_offsets = { 2357 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2358 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2359 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2360 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2361 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2362 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2363 2364 /* mandatory named features for this profile */ 2365 CPU_CFG_OFFSET(ext_zic64b), 2366 2367 RISCV_PROFILE_EXT_LIST_END 2368 } 2369 }; 2370 2371 /* 2372 * As with RVA22U64, RVA22S64 also defines 'named features'. 2373 * 2374 * Cache related features that we consider enabled since we don't 2375 * implement cache: Ssccptr 2376 * 2377 * Other named features that we already implement: Sstvecd, Sstvala, 2378 * Sscounterenw 2379 * 2380 * The remaining features/extensions comes from RVA22U64. 2381 */ 2382 static RISCVCPUProfile RVA22S64 = { 2383 .parent = &RVA22U64, 2384 .name = "rva22s64", 2385 .misa_ext = RVS, 2386 .priv_spec = PRIV_VERSION_1_12_0, 2387 .satp_mode = VM_1_10_SV39, 2388 .ext_offsets = { 2389 /* rva22s64 exts */ 2390 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2391 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2392 2393 RISCV_PROFILE_EXT_LIST_END 2394 } 2395 }; 2396 2397 RISCVCPUProfile *riscv_profiles[] = { 2398 &RVA22U64, 2399 &RVA22S64, 2400 NULL, 2401 }; 2402 2403 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2404 .is_misa = true, 2405 .ext = RVA, 2406 .implied_multi_exts = { 2407 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2408 2409 RISCV_IMPLIED_EXTS_RULE_END 2410 }, 2411 }; 2412 2413 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2414 .is_misa = true, 2415 .ext = RVD, 2416 .implied_misa_exts = RVF, 2417 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2418 }; 2419 2420 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2421 .is_misa = true, 2422 .ext = RVF, 2423 .implied_multi_exts = { 2424 CPU_CFG_OFFSET(ext_zicsr), 2425 2426 RISCV_IMPLIED_EXTS_RULE_END 2427 }, 2428 }; 2429 2430 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2431 .is_misa = true, 2432 .ext = RVM, 2433 .implied_multi_exts = { 2434 CPU_CFG_OFFSET(ext_zmmul), 2435 2436 RISCV_IMPLIED_EXTS_RULE_END 2437 }, 2438 }; 2439 2440 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2441 .is_misa = true, 2442 .ext = RVV, 2443 .implied_multi_exts = { 2444 CPU_CFG_OFFSET(ext_zve64d), 2445 2446 RISCV_IMPLIED_EXTS_RULE_END 2447 }, 2448 }; 2449 2450 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2451 .ext = CPU_CFG_OFFSET(ext_zcb), 2452 .implied_multi_exts = { 2453 CPU_CFG_OFFSET(ext_zca), 2454 2455 RISCV_IMPLIED_EXTS_RULE_END 2456 }, 2457 }; 2458 2459 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2460 .ext = CPU_CFG_OFFSET(ext_zcd), 2461 .implied_misa_exts = RVD, 2462 .implied_multi_exts = { 2463 CPU_CFG_OFFSET(ext_zca), 2464 2465 RISCV_IMPLIED_EXTS_RULE_END 2466 }, 2467 }; 2468 2469 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2470 .ext = CPU_CFG_OFFSET(ext_zce), 2471 .implied_multi_exts = { 2472 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2473 CPU_CFG_OFFSET(ext_zcmt), 2474 2475 RISCV_IMPLIED_EXTS_RULE_END 2476 }, 2477 }; 2478 2479 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2480 .ext = CPU_CFG_OFFSET(ext_zcf), 2481 .implied_misa_exts = RVF, 2482 .implied_multi_exts = { 2483 CPU_CFG_OFFSET(ext_zca), 2484 2485 RISCV_IMPLIED_EXTS_RULE_END 2486 }, 2487 }; 2488 2489 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2490 .ext = CPU_CFG_OFFSET(ext_zcmp), 2491 .implied_multi_exts = { 2492 CPU_CFG_OFFSET(ext_zca), 2493 2494 RISCV_IMPLIED_EXTS_RULE_END 2495 }, 2496 }; 2497 2498 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2499 .ext = CPU_CFG_OFFSET(ext_zcmt), 2500 .implied_multi_exts = { 2501 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2502 2503 RISCV_IMPLIED_EXTS_RULE_END 2504 }, 2505 }; 2506 2507 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2508 .ext = CPU_CFG_OFFSET(ext_zdinx), 2509 .implied_multi_exts = { 2510 CPU_CFG_OFFSET(ext_zfinx), 2511 2512 RISCV_IMPLIED_EXTS_RULE_END 2513 }, 2514 }; 2515 2516 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2517 .ext = CPU_CFG_OFFSET(ext_zfa), 2518 .implied_misa_exts = RVF, 2519 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2520 }; 2521 2522 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2523 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2524 .implied_misa_exts = RVF, 2525 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2526 }; 2527 2528 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2529 .ext = CPU_CFG_OFFSET(ext_zfh), 2530 .implied_multi_exts = { 2531 CPU_CFG_OFFSET(ext_zfhmin), 2532 2533 RISCV_IMPLIED_EXTS_RULE_END 2534 }, 2535 }; 2536 2537 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2538 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2539 .implied_misa_exts = RVF, 2540 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2541 }; 2542 2543 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2544 .ext = CPU_CFG_OFFSET(ext_zfinx), 2545 .implied_multi_exts = { 2546 CPU_CFG_OFFSET(ext_zicsr), 2547 2548 RISCV_IMPLIED_EXTS_RULE_END 2549 }, 2550 }; 2551 2552 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2553 .ext = CPU_CFG_OFFSET(ext_zhinx), 2554 .implied_multi_exts = { 2555 CPU_CFG_OFFSET(ext_zhinxmin), 2556 2557 RISCV_IMPLIED_EXTS_RULE_END 2558 }, 2559 }; 2560 2561 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2562 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2563 .implied_multi_exts = { 2564 CPU_CFG_OFFSET(ext_zfinx), 2565 2566 RISCV_IMPLIED_EXTS_RULE_END 2567 }, 2568 }; 2569 2570 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2571 .ext = CPU_CFG_OFFSET(ext_zicntr), 2572 .implied_multi_exts = { 2573 CPU_CFG_OFFSET(ext_zicsr), 2574 2575 RISCV_IMPLIED_EXTS_RULE_END 2576 }, 2577 }; 2578 2579 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2580 .ext = CPU_CFG_OFFSET(ext_zihpm), 2581 .implied_multi_exts = { 2582 CPU_CFG_OFFSET(ext_zicsr), 2583 2584 RISCV_IMPLIED_EXTS_RULE_END 2585 }, 2586 }; 2587 2588 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2589 .ext = CPU_CFG_OFFSET(ext_zk), 2590 .implied_multi_exts = { 2591 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2592 CPU_CFG_OFFSET(ext_zkt), 2593 2594 RISCV_IMPLIED_EXTS_RULE_END 2595 }, 2596 }; 2597 2598 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2599 .ext = CPU_CFG_OFFSET(ext_zkn), 2600 .implied_multi_exts = { 2601 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2602 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2603 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2604 2605 RISCV_IMPLIED_EXTS_RULE_END 2606 }, 2607 }; 2608 2609 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2610 .ext = CPU_CFG_OFFSET(ext_zks), 2611 .implied_multi_exts = { 2612 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2613 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2614 CPU_CFG_OFFSET(ext_zksh), 2615 2616 RISCV_IMPLIED_EXTS_RULE_END 2617 }, 2618 }; 2619 2620 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2621 .ext = CPU_CFG_OFFSET(ext_zvbb), 2622 .implied_multi_exts = { 2623 CPU_CFG_OFFSET(ext_zvkb), 2624 2625 RISCV_IMPLIED_EXTS_RULE_END 2626 }, 2627 }; 2628 2629 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2630 .ext = CPU_CFG_OFFSET(ext_zve32f), 2631 .implied_misa_exts = RVF, 2632 .implied_multi_exts = { 2633 CPU_CFG_OFFSET(ext_zve32x), 2634 2635 RISCV_IMPLIED_EXTS_RULE_END 2636 }, 2637 }; 2638 2639 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2640 .ext = CPU_CFG_OFFSET(ext_zve32x), 2641 .implied_multi_exts = { 2642 CPU_CFG_OFFSET(ext_zicsr), 2643 2644 RISCV_IMPLIED_EXTS_RULE_END 2645 }, 2646 }; 2647 2648 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2649 .ext = CPU_CFG_OFFSET(ext_zve64d), 2650 .implied_misa_exts = RVD, 2651 .implied_multi_exts = { 2652 CPU_CFG_OFFSET(ext_zve64f), 2653 2654 RISCV_IMPLIED_EXTS_RULE_END 2655 }, 2656 }; 2657 2658 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2659 .ext = CPU_CFG_OFFSET(ext_zve64f), 2660 .implied_misa_exts = RVF, 2661 .implied_multi_exts = { 2662 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2663 2664 RISCV_IMPLIED_EXTS_RULE_END 2665 }, 2666 }; 2667 2668 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2669 .ext = CPU_CFG_OFFSET(ext_zve64x), 2670 .implied_multi_exts = { 2671 CPU_CFG_OFFSET(ext_zve32x), 2672 2673 RISCV_IMPLIED_EXTS_RULE_END 2674 }, 2675 }; 2676 2677 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2678 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2679 .implied_multi_exts = { 2680 CPU_CFG_OFFSET(ext_zve32f), 2681 2682 RISCV_IMPLIED_EXTS_RULE_END 2683 }, 2684 }; 2685 2686 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2687 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2688 .implied_multi_exts = { 2689 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2690 2691 RISCV_IMPLIED_EXTS_RULE_END 2692 }, 2693 }; 2694 2695 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2696 .ext = CPU_CFG_OFFSET(ext_zvfh), 2697 .implied_multi_exts = { 2698 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2699 2700 RISCV_IMPLIED_EXTS_RULE_END 2701 }, 2702 }; 2703 2704 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2705 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2706 .implied_multi_exts = { 2707 CPU_CFG_OFFSET(ext_zve32f), 2708 2709 RISCV_IMPLIED_EXTS_RULE_END 2710 }, 2711 }; 2712 2713 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2714 .ext = CPU_CFG_OFFSET(ext_zvkn), 2715 .implied_multi_exts = { 2716 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2717 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2718 2719 RISCV_IMPLIED_EXTS_RULE_END 2720 }, 2721 }; 2722 2723 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2724 .ext = CPU_CFG_OFFSET(ext_zvknc), 2725 .implied_multi_exts = { 2726 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2727 2728 RISCV_IMPLIED_EXTS_RULE_END 2729 }, 2730 }; 2731 2732 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2733 .ext = CPU_CFG_OFFSET(ext_zvkng), 2734 .implied_multi_exts = { 2735 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2736 2737 RISCV_IMPLIED_EXTS_RULE_END 2738 }, 2739 }; 2740 2741 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2742 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2743 .implied_multi_exts = { 2744 CPU_CFG_OFFSET(ext_zve64x), 2745 2746 RISCV_IMPLIED_EXTS_RULE_END 2747 }, 2748 }; 2749 2750 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2751 .ext = CPU_CFG_OFFSET(ext_zvks), 2752 .implied_multi_exts = { 2753 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2754 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2755 2756 RISCV_IMPLIED_EXTS_RULE_END 2757 }, 2758 }; 2759 2760 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2761 .ext = CPU_CFG_OFFSET(ext_zvksc), 2762 .implied_multi_exts = { 2763 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2764 2765 RISCV_IMPLIED_EXTS_RULE_END 2766 }, 2767 }; 2768 2769 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2770 .ext = CPU_CFG_OFFSET(ext_zvksg), 2771 .implied_multi_exts = { 2772 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2773 2774 RISCV_IMPLIED_EXTS_RULE_END 2775 }, 2776 }; 2777 2778 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2779 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2780 .implied_multi_exts = { 2781 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2782 CPU_CFG_OFFSET(ext_smcdeleg), 2783 2784 RISCV_IMPLIED_EXTS_RULE_END 2785 }, 2786 }; 2787 2788 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2789 .ext = CPU_CFG_OFFSET(ext_supm), 2790 .implied_multi_exts = { 2791 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2792 2793 RISCV_IMPLIED_EXTS_RULE_END 2794 }, 2795 }; 2796 2797 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2798 .ext = CPU_CFG_OFFSET(ext_sspm), 2799 .implied_multi_exts = { 2800 CPU_CFG_OFFSET(ext_smnpm), 2801 2802 RISCV_IMPLIED_EXTS_RULE_END 2803 }, 2804 }; 2805 2806 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2807 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2808 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2809 }; 2810 2811 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2812 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2813 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2814 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2815 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2816 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2817 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2818 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2819 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2820 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2821 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2822 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2823 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2824 &SUPM_IMPLIED, &SSPM_IMPLIED, 2825 NULL 2826 }; 2827 2828 static const Property riscv_cpu_properties[] = { 2829 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2830 2831 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2832 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2833 2834 {.name = "mmu", .info = &prop_mmu}, 2835 {.name = "pmp", .info = &prop_pmp}, 2836 2837 {.name = "priv_spec", .info = &prop_priv_spec}, 2838 {.name = "vext_spec", .info = &prop_vext_spec}, 2839 2840 {.name = "vlen", .info = &prop_vlen}, 2841 {.name = "elen", .info = &prop_elen}, 2842 2843 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2844 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2845 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2846 2847 {.name = "mvendorid", .info = &prop_mvendorid}, 2848 {.name = "mimpid", .info = &prop_mimpid}, 2849 {.name = "marchid", .info = &prop_marchid}, 2850 2851 #ifndef CONFIG_USER_ONLY 2852 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2853 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2854 DEFAULT_RNMI_IRQVEC), 2855 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2856 DEFAULT_RNMI_EXCPVEC), 2857 #endif 2858 2859 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2860 2861 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2862 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2863 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2864 2865 /* 2866 * write_misa() is marked as experimental for now so mark 2867 * it with -x and default to 'false'. 2868 */ 2869 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2870 }; 2871 2872 #if defined(TARGET_RISCV64) 2873 static void rva22u64_profile_cpu_init(Object *obj) 2874 { 2875 rv64i_bare_cpu_init(obj); 2876 2877 RVA22U64.enabled = true; 2878 } 2879 2880 static void rva22s64_profile_cpu_init(Object *obj) 2881 { 2882 rv64i_bare_cpu_init(obj); 2883 2884 RVA22S64.enabled = true; 2885 } 2886 #endif 2887 2888 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2889 { 2890 RISCVCPU *cpu = RISCV_CPU(cs); 2891 CPURISCVState *env = &cpu->env; 2892 2893 switch (riscv_cpu_mxl(env)) { 2894 case MXL_RV32: 2895 return "riscv:rv32"; 2896 case MXL_RV64: 2897 case MXL_RV128: 2898 return "riscv:rv64"; 2899 default: 2900 g_assert_not_reached(); 2901 } 2902 } 2903 2904 #ifndef CONFIG_USER_ONLY 2905 static int64_t riscv_get_arch_id(CPUState *cs) 2906 { 2907 RISCVCPU *cpu = RISCV_CPU(cs); 2908 2909 return cpu->env.mhartid; 2910 } 2911 2912 #include "hw/core/sysemu-cpu-ops.h" 2913 2914 static const struct SysemuCPUOps riscv_sysemu_ops = { 2915 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2916 .write_elf64_note = riscv_cpu_write_elf64_note, 2917 .write_elf32_note = riscv_cpu_write_elf32_note, 2918 .legacy_vmsd = &vmstate_riscv_cpu, 2919 }; 2920 #endif 2921 2922 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2923 { 2924 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2925 CPUClass *cc = CPU_CLASS(c); 2926 DeviceClass *dc = DEVICE_CLASS(c); 2927 ResettableClass *rc = RESETTABLE_CLASS(c); 2928 2929 device_class_set_parent_realize(dc, riscv_cpu_realize, 2930 &mcc->parent_realize); 2931 2932 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2933 &mcc->parent_phases); 2934 2935 cc->class_by_name = riscv_cpu_class_by_name; 2936 cc->has_work = riscv_cpu_has_work; 2937 cc->mmu_index = riscv_cpu_mmu_index; 2938 cc->dump_state = riscv_cpu_dump_state; 2939 cc->set_pc = riscv_cpu_set_pc; 2940 cc->get_pc = riscv_cpu_get_pc; 2941 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2942 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2943 cc->gdb_stop_before_watchpoint = true; 2944 cc->disas_set_info = riscv_cpu_disas_set_info; 2945 #ifndef CONFIG_USER_ONLY 2946 cc->sysemu_ops = &riscv_sysemu_ops; 2947 cc->get_arch_id = riscv_get_arch_id; 2948 #endif 2949 cc->gdb_arch_name = riscv_gdb_arch_name; 2950 2951 device_class_set_props(dc, riscv_cpu_properties); 2952 } 2953 2954 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2955 { 2956 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2957 2958 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2959 riscv_cpu_validate_misa_mxl(mcc); 2960 } 2961 2962 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2963 int max_str_len) 2964 { 2965 const RISCVIsaExtData *edata; 2966 char *old = *isa_str; 2967 char *new = *isa_str; 2968 2969 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2970 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2971 new = g_strconcat(old, "_", edata->name, NULL); 2972 g_free(old); 2973 old = new; 2974 } 2975 } 2976 2977 *isa_str = new; 2978 } 2979 2980 char *riscv_isa_string(RISCVCPU *cpu) 2981 { 2982 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2983 int i; 2984 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2985 char *isa_str = g_new(char, maxlen); 2986 int xlen = riscv_cpu_max_xlen(mcc); 2987 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2988 2989 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2990 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2991 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2992 } 2993 } 2994 *p = '\0'; 2995 if (!cpu->cfg.short_isa_string) { 2996 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2997 } 2998 return isa_str; 2999 } 3000 3001 #ifndef CONFIG_USER_ONLY 3002 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3003 { 3004 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3005 char **extensions = g_new(char *, maxlen); 3006 3007 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3008 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3009 extensions[*count] = g_new(char, 2); 3010 snprintf(extensions[*count], 2, "%c", 3011 qemu_tolower(riscv_single_letter_exts[i])); 3012 (*count)++; 3013 } 3014 } 3015 3016 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3017 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3018 extensions[*count] = g_strdup(edata->name); 3019 (*count)++; 3020 } 3021 } 3022 3023 return extensions; 3024 } 3025 3026 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3027 { 3028 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3029 const size_t maxlen = sizeof("rv128i"); 3030 g_autofree char *isa_base = g_new(char, maxlen); 3031 g_autofree char *riscv_isa; 3032 char **isa_extensions; 3033 int count = 0; 3034 int xlen = riscv_cpu_max_xlen(mcc); 3035 3036 riscv_isa = riscv_isa_string(cpu); 3037 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3038 3039 snprintf(isa_base, maxlen, "rv%di", xlen); 3040 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3041 3042 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3043 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3044 isa_extensions, count); 3045 3046 for (int i = 0; i < count; i++) { 3047 g_free(isa_extensions[i]); 3048 } 3049 3050 g_free(isa_extensions); 3051 } 3052 #endif 3053 3054 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 3055 { \ 3056 .name = (type_name), \ 3057 .parent = TYPE_RISCV_CPU, \ 3058 .instance_init = (initfn), \ 3059 .class_init = riscv_cpu_class_init, \ 3060 .class_data = (void *)(misa_mxl_max) \ 3061 } 3062 3063 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3064 { \ 3065 .name = (type_name), \ 3066 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3067 .instance_init = (initfn), \ 3068 .class_init = riscv_cpu_class_init, \ 3069 .class_data = (void *)(misa_mxl_max) \ 3070 } 3071 3072 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3073 { \ 3074 .name = (type_name), \ 3075 .parent = TYPE_RISCV_VENDOR_CPU, \ 3076 .instance_init = (initfn), \ 3077 .class_init = riscv_cpu_class_init, \ 3078 .class_data = (void *)(misa_mxl_max) \ 3079 } 3080 3081 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3082 { \ 3083 .name = (type_name), \ 3084 .parent = TYPE_RISCV_BARE_CPU, \ 3085 .instance_init = (initfn), \ 3086 .class_init = riscv_cpu_class_init, \ 3087 .class_data = (void *)(misa_mxl_max) \ 3088 } 3089 3090 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3091 { \ 3092 .name = (type_name), \ 3093 .parent = TYPE_RISCV_BARE_CPU, \ 3094 .instance_init = (initfn), \ 3095 .class_init = riscv_cpu_class_init, \ 3096 .class_data = (void *)(misa_mxl_max) \ 3097 } 3098 3099 static const TypeInfo riscv_cpu_type_infos[] = { 3100 { 3101 .name = TYPE_RISCV_CPU, 3102 .parent = TYPE_CPU, 3103 .instance_size = sizeof(RISCVCPU), 3104 .instance_align = __alignof(RISCVCPU), 3105 .instance_init = riscv_cpu_init, 3106 .instance_post_init = riscv_cpu_post_init, 3107 .abstract = true, 3108 .class_size = sizeof(RISCVCPUClass), 3109 .class_init = riscv_cpu_common_class_init, 3110 }, 3111 { 3112 .name = TYPE_RISCV_DYNAMIC_CPU, 3113 .parent = TYPE_RISCV_CPU, 3114 .abstract = true, 3115 }, 3116 { 3117 .name = TYPE_RISCV_VENDOR_CPU, 3118 .parent = TYPE_RISCV_CPU, 3119 .abstract = true, 3120 }, 3121 { 3122 .name = TYPE_RISCV_BARE_CPU, 3123 .parent = TYPE_RISCV_CPU, 3124 .instance_init = riscv_bare_cpu_init, 3125 .abstract = true, 3126 }, 3127 #if defined(TARGET_RISCV32) 3128 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3129 #elif defined(TARGET_RISCV64) 3130 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3131 #endif 3132 3133 #if defined(TARGET_RISCV32) || \ 3134 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3135 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3136 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3137 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3138 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3139 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3140 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3141 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3142 #endif 3143 3144 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3145 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3146 #endif 3147 3148 #if defined(TARGET_RISCV64) 3149 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3150 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3151 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3152 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3153 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3154 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3155 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3156 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3157 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3158 #ifdef CONFIG_TCG 3159 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3160 #endif /* CONFIG_TCG */ 3161 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3162 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3163 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3164 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3165 #endif /* TARGET_RISCV64 */ 3166 }; 3167 3168 DEFINE_TYPES(riscv_cpu_type_infos) 3169