1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "qapi/error.h" 28 #include "qapi/visitor.h" 29 #include "qemu/error-report.h" 30 #include "hw/qdev-properties.h" 31 #include "hw/core/qdev-prop-internal.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "system/device_tree.h" 35 #include "system/kvm.h" 36 #include "system/tcg.h" 37 #include "kvm/kvm_riscv.h" 38 #include "tcg/tcg-cpu.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 44 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 45 46 /* 47 * From vector_helper.c 48 * Note that vector data is stored in host-endian 64-bit chunks, 49 * so addressing bytes needs a host-endian fixup. 50 */ 51 #if HOST_BIG_ENDIAN 52 #define BYTE(x) ((x) ^ 7) 53 #else 54 #define BYTE(x) (x) 55 #endif 56 57 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 58 { 59 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 60 } 61 62 /* Hash that stores general user set numeric options */ 63 static GHashTable *general_user_opts; 64 65 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 66 { 67 g_hash_table_insert(general_user_opts, (gpointer)optname, 68 GUINT_TO_POINTER(value)); 69 } 70 71 bool riscv_cpu_option_set(const char *optname) 72 { 73 return g_hash_table_contains(general_user_opts, optname); 74 } 75 76 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 77 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 78 79 /* 80 * Here are the ordering rules of extension naming defined by RISC-V 81 * specification : 82 * 1. All extensions should be separated from other multi-letter extensions 83 * by an underscore. 84 * 2. The first letter following the 'Z' conventionally indicates the most 85 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 86 * If multiple 'Z' extensions are named, they should be ordered first 87 * by category, then alphabetically within a category. 88 * 3. Standard supervisor-level extensions (starts with 'S') should be 89 * listed after standard unprivileged extensions. If multiple 90 * supervisor-level extensions are listed, they should be ordered 91 * alphabetically. 92 * 4. Non-standard extensions (starts with 'X') must be listed after all 93 * standard extensions. They must be separated from other multi-letter 94 * extensions by an underscore. 95 * 96 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 97 * instead. 98 */ 99 const RISCVIsaExtData isa_edata_arr[] = { 100 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 101 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 102 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 103 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 104 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 105 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse), 108 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 109 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 110 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 111 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 112 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 113 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 114 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 115 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 116 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 117 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 118 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 119 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 120 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 121 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 122 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 123 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 124 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 125 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 126 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 127 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 128 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 129 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 130 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 131 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 132 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 133 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 134 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 135 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 136 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 137 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 138 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 139 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 140 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 141 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 142 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 143 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 144 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 145 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 146 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 147 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 148 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 149 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 150 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 151 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 152 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 153 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 154 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 155 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 156 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 157 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 158 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 159 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 160 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 161 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 162 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 163 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 164 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 165 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 166 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 167 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 168 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 169 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 170 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 171 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 172 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 173 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 174 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 175 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 176 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 177 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 178 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 179 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 180 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 181 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 182 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 183 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 184 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 185 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 186 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 187 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 188 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 193 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 194 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 195 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 196 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 197 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 198 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 199 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 200 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 201 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 202 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 203 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 204 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 205 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 206 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 207 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 208 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 209 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 210 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 211 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 212 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 213 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 214 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 217 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 218 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), 219 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), 220 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 221 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 222 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 223 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 224 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 225 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 226 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 227 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 228 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 229 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 230 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 231 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 232 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 233 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 234 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 235 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 236 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 237 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 238 239 { }, 240 }; 241 242 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 243 { 244 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 245 246 return *ext_enabled; 247 } 248 249 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 250 { 251 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 252 253 *ext_enabled = en; 254 } 255 256 bool riscv_cpu_is_vendor(Object *cpu_obj) 257 { 258 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 259 } 260 261 const char * const riscv_int_regnames[] = { 262 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 263 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 264 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 265 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 266 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 267 }; 268 269 const char * const riscv_int_regnamesh[] = { 270 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 271 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 272 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 273 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 274 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 275 "x30h/t5h", "x31h/t6h" 276 }; 277 278 const char * const riscv_fpr_regnames[] = { 279 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 280 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 281 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 282 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 283 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 284 "f30/ft10", "f31/ft11" 285 }; 286 287 const char * const riscv_rvv_regnames[] = { 288 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 289 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 290 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 291 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 292 "v28", "v29", "v30", "v31" 293 }; 294 295 static const char * const riscv_excp_names[] = { 296 "misaligned_fetch", 297 "fault_fetch", 298 "illegal_instruction", 299 "breakpoint", 300 "misaligned_load", 301 "fault_load", 302 "misaligned_store", 303 "fault_store", 304 "user_ecall", 305 "supervisor_ecall", 306 "hypervisor_ecall", 307 "machine_ecall", 308 "exec_page_fault", 309 "load_page_fault", 310 "reserved", 311 "store_page_fault", 312 "double_trap", 313 "reserved", 314 "reserved", 315 "reserved", 316 "guest_exec_page_fault", 317 "guest_load_page_fault", 318 "reserved", 319 "guest_store_page_fault", 320 }; 321 322 static const char * const riscv_intr_names[] = { 323 "u_software", 324 "s_software", 325 "vs_software", 326 "m_software", 327 "u_timer", 328 "s_timer", 329 "vs_timer", 330 "m_timer", 331 "u_external", 332 "s_external", 333 "vs_external", 334 "m_external", 335 "reserved", 336 "reserved", 337 "reserved", 338 "reserved" 339 }; 340 341 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 342 { 343 if (async) { 344 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 345 riscv_intr_names[cause] : "(unknown)"; 346 } else { 347 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 348 riscv_excp_names[cause] : "(unknown)"; 349 } 350 } 351 352 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 353 { 354 env->misa_ext_mask = env->misa_ext = ext; 355 } 356 357 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 358 { 359 return 16 << mcc->def->misa_mxl_max; 360 } 361 362 #ifndef CONFIG_USER_ONLY 363 static uint8_t satp_mode_from_str(const char *satp_mode_str) 364 { 365 if (!strncmp(satp_mode_str, "mbare", 5)) { 366 return VM_1_10_MBARE; 367 } 368 369 if (!strncmp(satp_mode_str, "sv32", 4)) { 370 return VM_1_10_SV32; 371 } 372 373 if (!strncmp(satp_mode_str, "sv39", 4)) { 374 return VM_1_10_SV39; 375 } 376 377 if (!strncmp(satp_mode_str, "sv48", 4)) { 378 return VM_1_10_SV48; 379 } 380 381 if (!strncmp(satp_mode_str, "sv57", 4)) { 382 return VM_1_10_SV57; 383 } 384 385 if (!strncmp(satp_mode_str, "sv64", 4)) { 386 return VM_1_10_SV64; 387 } 388 389 g_assert_not_reached(); 390 } 391 392 static uint8_t satp_mode_max_from_map(uint32_t map) 393 { 394 /* 395 * 'map = 0' will make us return (31 - 32), which C will 396 * happily overflow to UINT_MAX. There's no good result to 397 * return if 'map = 0' (e.g. returning 0 will be ambiguous 398 * with the result for 'map = 1'). 399 * 400 * Assert out if map = 0. Callers will have to deal with 401 * it outside of this function. 402 */ 403 g_assert(map > 0); 404 405 /* map here has at least one bit set, so no problem with clz */ 406 return 31 - __builtin_clz(map); 407 } 408 409 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 410 { 411 if (is_32_bit) { 412 switch (satp_mode) { 413 case VM_1_10_SV32: 414 return "sv32"; 415 case VM_1_10_MBARE: 416 return "none"; 417 } 418 } else { 419 switch (satp_mode) { 420 case VM_1_10_SV64: 421 return "sv64"; 422 case VM_1_10_SV57: 423 return "sv57"; 424 case VM_1_10_SV48: 425 return "sv48"; 426 case VM_1_10_SV39: 427 return "sv39"; 428 case VM_1_10_MBARE: 429 return "none"; 430 } 431 } 432 433 g_assert_not_reached(); 434 } 435 436 static void set_satp_mode_max_supported(RISCVCPU *cpu, 437 uint8_t satp_mode) 438 { 439 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 440 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 441 442 assert(valid_vm[satp_mode]); 443 cpu->cfg.max_satp_mode = satp_mode; 444 } 445 446 static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported) 447 { 448 bool rv32 = riscv_cpu_is_32bit(cpu); 449 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 450 int satp_mode = cpu->cfg.max_satp_mode; 451 452 if (satp_mode == -1) { 453 return false; 454 } 455 456 *supported = 0; 457 for (int i = 0; i <= satp_mode; ++i) { 458 if (valid_vm[i]) { 459 *supported |= (1 << i); 460 } 461 } 462 return true; 463 } 464 465 /* Set the satp mode to the max supported */ 466 static void set_satp_mode_default_map(RISCVCPU *cpu) 467 { 468 /* 469 * Bare CPUs do not default to the max available. 470 * Users must set a valid satp_mode in the command 471 * line. Otherwise, leave the existing max_satp_mode 472 * in place. 473 */ 474 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 475 warn_report("No satp mode set. Defaulting to 'bare'"); 476 cpu->cfg.max_satp_mode = VM_1_10_MBARE; 477 } 478 } 479 #endif 480 481 static void riscv_max_cpu_init(Object *obj) 482 { 483 RISCVCPU *cpu = RISCV_CPU(obj); 484 CPURISCVState *env = &cpu->env; 485 486 cpu->cfg.mmu = true; 487 cpu->cfg.pmp = true; 488 489 env->priv_ver = PRIV_VERSION_LATEST; 490 #ifndef CONFIG_USER_ONLY 491 set_satp_mode_max_supported(RISCV_CPU(obj), 492 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 493 VM_1_10_SV32 : VM_1_10_SV57); 494 #endif 495 } 496 497 #if defined(TARGET_RISCV64) 498 static void rv64_base_cpu_init(Object *obj) 499 { 500 RISCVCPU *cpu = RISCV_CPU(obj); 501 CPURISCVState *env = &cpu->env; 502 503 cpu->cfg.mmu = true; 504 cpu->cfg.pmp = true; 505 506 /* Set latest version of privileged specification */ 507 env->priv_ver = PRIV_VERSION_LATEST; 508 #ifndef CONFIG_USER_ONLY 509 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 510 #endif 511 } 512 513 static void rv64_sifive_u_cpu_init(Object *obj) 514 { 515 RISCVCPU *cpu = RISCV_CPU(obj); 516 CPURISCVState *env = &cpu->env; 517 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 518 env->priv_ver = PRIV_VERSION_1_10_0; 519 #ifndef CONFIG_USER_ONLY 520 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 521 #endif 522 523 /* inherited from parent obj via riscv_cpu_init() */ 524 cpu->cfg.ext_zifencei = true; 525 cpu->cfg.ext_zicsr = true; 526 cpu->cfg.mmu = true; 527 cpu->cfg.pmp = true; 528 } 529 530 static void rv64_sifive_e_cpu_init(Object *obj) 531 { 532 CPURISCVState *env = &RISCV_CPU(obj)->env; 533 RISCVCPU *cpu = RISCV_CPU(obj); 534 535 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 536 env->priv_ver = PRIV_VERSION_1_10_0; 537 #ifndef CONFIG_USER_ONLY 538 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 539 #endif 540 541 /* inherited from parent obj via riscv_cpu_init() */ 542 cpu->cfg.ext_zifencei = true; 543 cpu->cfg.ext_zicsr = true; 544 cpu->cfg.pmp = true; 545 } 546 547 static void rv64_thead_c906_cpu_init(Object *obj) 548 { 549 CPURISCVState *env = &RISCV_CPU(obj)->env; 550 RISCVCPU *cpu = RISCV_CPU(obj); 551 552 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 553 env->priv_ver = PRIV_VERSION_1_11_0; 554 555 cpu->cfg.ext_zfa = true; 556 cpu->cfg.ext_zfh = true; 557 cpu->cfg.mmu = true; 558 cpu->cfg.ext_xtheadba = true; 559 cpu->cfg.ext_xtheadbb = true; 560 cpu->cfg.ext_xtheadbs = true; 561 cpu->cfg.ext_xtheadcmo = true; 562 cpu->cfg.ext_xtheadcondmov = true; 563 cpu->cfg.ext_xtheadfmemidx = true; 564 cpu->cfg.ext_xtheadmac = true; 565 cpu->cfg.ext_xtheadmemidx = true; 566 cpu->cfg.ext_xtheadmempair = true; 567 cpu->cfg.ext_xtheadsync = true; 568 569 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 570 #ifndef CONFIG_USER_ONLY 571 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 572 th_register_custom_csrs(cpu); 573 #endif 574 575 /* inherited from parent obj via riscv_cpu_init() */ 576 cpu->cfg.pmp = true; 577 } 578 579 static void rv64_veyron_v1_cpu_init(Object *obj) 580 { 581 CPURISCVState *env = &RISCV_CPU(obj)->env; 582 RISCVCPU *cpu = RISCV_CPU(obj); 583 584 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 585 env->priv_ver = PRIV_VERSION_1_12_0; 586 587 /* Enable ISA extensions */ 588 cpu->cfg.mmu = true; 589 cpu->cfg.ext_zifencei = true; 590 cpu->cfg.ext_zicsr = true; 591 cpu->cfg.pmp = true; 592 cpu->cfg.ext_zicbom = true; 593 cpu->cfg.cbom_blocksize = 64; 594 cpu->cfg.cboz_blocksize = 64; 595 cpu->cfg.ext_zicboz = true; 596 cpu->cfg.ext_smaia = true; 597 cpu->cfg.ext_ssaia = true; 598 cpu->cfg.ext_sscofpmf = true; 599 cpu->cfg.ext_sstc = true; 600 cpu->cfg.ext_svinval = true; 601 cpu->cfg.ext_svnapot = true; 602 cpu->cfg.ext_svpbmt = true; 603 cpu->cfg.ext_smstateen = true; 604 cpu->cfg.ext_zba = true; 605 cpu->cfg.ext_zbb = true; 606 cpu->cfg.ext_zbc = true; 607 cpu->cfg.ext_zbs = true; 608 cpu->cfg.ext_XVentanaCondOps = true; 609 610 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 611 cpu->cfg.marchid = VEYRON_V1_MARCHID; 612 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 613 614 #ifndef CONFIG_USER_ONLY 615 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 616 #endif 617 } 618 619 /* Tenstorrent Ascalon */ 620 static void rv64_tt_ascalon_cpu_init(Object *obj) 621 { 622 CPURISCVState *env = &RISCV_CPU(obj)->env; 623 RISCVCPU *cpu = RISCV_CPU(obj); 624 625 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 626 env->priv_ver = PRIV_VERSION_1_13_0; 627 628 /* Enable ISA extensions */ 629 cpu->cfg.mmu = true; 630 cpu->cfg.vlenb = 256 >> 3; 631 cpu->cfg.elen = 64; 632 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 633 cpu->cfg.rvv_ma_all_1s = true; 634 cpu->cfg.rvv_ta_all_1s = true; 635 cpu->cfg.misa_w = true; 636 cpu->cfg.pmp = true; 637 cpu->cfg.cbom_blocksize = 64; 638 cpu->cfg.cbop_blocksize = 64; 639 cpu->cfg.cboz_blocksize = 64; 640 cpu->cfg.ext_zic64b = true; 641 cpu->cfg.ext_zicbom = true; 642 cpu->cfg.ext_zicbop = true; 643 cpu->cfg.ext_zicboz = true; 644 cpu->cfg.ext_zicntr = true; 645 cpu->cfg.ext_zicond = true; 646 cpu->cfg.ext_zicsr = true; 647 cpu->cfg.ext_zifencei = true; 648 cpu->cfg.ext_zihintntl = true; 649 cpu->cfg.ext_zihintpause = true; 650 cpu->cfg.ext_zihpm = true; 651 cpu->cfg.ext_zimop = true; 652 cpu->cfg.ext_zawrs = true; 653 cpu->cfg.ext_zfa = true; 654 cpu->cfg.ext_zfbfmin = true; 655 cpu->cfg.ext_zfh = true; 656 cpu->cfg.ext_zfhmin = true; 657 cpu->cfg.ext_zcb = true; 658 cpu->cfg.ext_zcmop = true; 659 cpu->cfg.ext_zba = true; 660 cpu->cfg.ext_zbb = true; 661 cpu->cfg.ext_zbs = true; 662 cpu->cfg.ext_zkt = true; 663 cpu->cfg.ext_zvbb = true; 664 cpu->cfg.ext_zvbc = true; 665 cpu->cfg.ext_zvfbfmin = true; 666 cpu->cfg.ext_zvfbfwma = true; 667 cpu->cfg.ext_zvfh = true; 668 cpu->cfg.ext_zvfhmin = true; 669 cpu->cfg.ext_zvkng = true; 670 cpu->cfg.ext_smaia = true; 671 cpu->cfg.ext_smstateen = true; 672 cpu->cfg.ext_ssaia = true; 673 cpu->cfg.ext_sscofpmf = true; 674 cpu->cfg.ext_sstc = true; 675 cpu->cfg.ext_svade = true; 676 cpu->cfg.ext_svinval = true; 677 cpu->cfg.ext_svnapot = true; 678 cpu->cfg.ext_svpbmt = true; 679 680 #ifndef CONFIG_USER_ONLY 681 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 682 #endif 683 } 684 685 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 686 { 687 CPURISCVState *env = &RISCV_CPU(obj)->env; 688 RISCVCPU *cpu = RISCV_CPU(obj); 689 690 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 691 env->priv_ver = PRIV_VERSION_1_12_0; 692 693 /* Enable ISA extensions */ 694 cpu->cfg.ext_zbc = true; 695 cpu->cfg.ext_zbkb = true; 696 cpu->cfg.ext_zbkc = true; 697 cpu->cfg.ext_zbkx = true; 698 cpu->cfg.ext_zknd = true; 699 cpu->cfg.ext_zkne = true; 700 cpu->cfg.ext_zknh = true; 701 cpu->cfg.ext_zksed = true; 702 cpu->cfg.ext_zksh = true; 703 cpu->cfg.ext_svinval = true; 704 705 cpu->cfg.mmu = true; 706 cpu->cfg.pmp = true; 707 708 #ifndef CONFIG_USER_ONLY 709 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 710 #endif 711 } 712 713 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 714 static void rv128_base_cpu_init(Object *obj) 715 { 716 RISCVCPU *cpu = RISCV_CPU(obj); 717 CPURISCVState *env = &cpu->env; 718 719 cpu->cfg.mmu = true; 720 cpu->cfg.pmp = true; 721 722 /* Set latest version of privileged specification */ 723 env->priv_ver = PRIV_VERSION_LATEST; 724 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 725 } 726 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 727 728 static void rv64i_bare_cpu_init(Object *obj) 729 { 730 CPURISCVState *env = &RISCV_CPU(obj)->env; 731 riscv_cpu_set_misa_ext(env, RVI); 732 } 733 734 static void rv64e_bare_cpu_init(Object *obj) 735 { 736 CPURISCVState *env = &RISCV_CPU(obj)->env; 737 riscv_cpu_set_misa_ext(env, RVE); 738 } 739 740 #endif /* !TARGET_RISCV64 */ 741 742 #if defined(TARGET_RISCV32) || \ 743 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 744 745 static void rv32_base_cpu_init(Object *obj) 746 { 747 RISCVCPU *cpu = RISCV_CPU(obj); 748 CPURISCVState *env = &cpu->env; 749 750 cpu->cfg.mmu = true; 751 cpu->cfg.pmp = true; 752 753 /* Set latest version of privileged specification */ 754 env->priv_ver = PRIV_VERSION_LATEST; 755 #ifndef CONFIG_USER_ONLY 756 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 757 #endif 758 } 759 760 static void rv32_sifive_u_cpu_init(Object *obj) 761 { 762 RISCVCPU *cpu = RISCV_CPU(obj); 763 CPURISCVState *env = &cpu->env; 764 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 765 env->priv_ver = PRIV_VERSION_1_10_0; 766 #ifndef CONFIG_USER_ONLY 767 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 768 #endif 769 770 /* inherited from parent obj via riscv_cpu_init() */ 771 cpu->cfg.ext_zifencei = true; 772 cpu->cfg.ext_zicsr = true; 773 cpu->cfg.mmu = true; 774 cpu->cfg.pmp = true; 775 } 776 777 static void rv32_sifive_e_cpu_init(Object *obj) 778 { 779 CPURISCVState *env = &RISCV_CPU(obj)->env; 780 RISCVCPU *cpu = RISCV_CPU(obj); 781 782 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 783 env->priv_ver = PRIV_VERSION_1_10_0; 784 #ifndef CONFIG_USER_ONLY 785 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 786 #endif 787 788 /* inherited from parent obj via riscv_cpu_init() */ 789 cpu->cfg.ext_zifencei = true; 790 cpu->cfg.ext_zicsr = true; 791 cpu->cfg.pmp = true; 792 } 793 794 static void rv32_ibex_cpu_init(Object *obj) 795 { 796 CPURISCVState *env = &RISCV_CPU(obj)->env; 797 RISCVCPU *cpu = RISCV_CPU(obj); 798 799 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 800 env->priv_ver = PRIV_VERSION_1_12_0; 801 #ifndef CONFIG_USER_ONLY 802 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 803 #endif 804 /* inherited from parent obj via riscv_cpu_init() */ 805 cpu->cfg.ext_zifencei = true; 806 cpu->cfg.ext_zicsr = true; 807 cpu->cfg.pmp = true; 808 cpu->cfg.ext_smepmp = true; 809 810 cpu->cfg.ext_zba = true; 811 cpu->cfg.ext_zbb = true; 812 cpu->cfg.ext_zbc = true; 813 cpu->cfg.ext_zbs = true; 814 } 815 816 static void rv32_imafcu_nommu_cpu_init(Object *obj) 817 { 818 CPURISCVState *env = &RISCV_CPU(obj)->env; 819 RISCVCPU *cpu = RISCV_CPU(obj); 820 821 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 822 env->priv_ver = PRIV_VERSION_1_10_0; 823 #ifndef CONFIG_USER_ONLY 824 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 825 #endif 826 827 /* inherited from parent obj via riscv_cpu_init() */ 828 cpu->cfg.ext_zifencei = true; 829 cpu->cfg.ext_zicsr = true; 830 cpu->cfg.pmp = true; 831 } 832 833 static void rv32i_bare_cpu_init(Object *obj) 834 { 835 CPURISCVState *env = &RISCV_CPU(obj)->env; 836 riscv_cpu_set_misa_ext(env, RVI); 837 } 838 839 static void rv32e_bare_cpu_init(Object *obj) 840 { 841 CPURISCVState *env = &RISCV_CPU(obj)->env; 842 riscv_cpu_set_misa_ext(env, RVE); 843 } 844 #endif 845 846 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 847 { 848 ObjectClass *oc; 849 char *typename; 850 char **cpuname; 851 852 cpuname = g_strsplit(cpu_model, ",", 1); 853 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 854 oc = object_class_by_name(typename); 855 g_strfreev(cpuname); 856 g_free(typename); 857 858 return oc; 859 } 860 861 char *riscv_cpu_get_name(RISCVCPU *cpu) 862 { 863 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 864 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 865 866 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 867 868 return cpu_model_from_type(typename); 869 } 870 871 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 872 { 873 RISCVCPU *cpu = RISCV_CPU(cs); 874 CPURISCVState *env = &cpu->env; 875 int i, j; 876 uint8_t *p; 877 878 #if !defined(CONFIG_USER_ONLY) 879 if (riscv_has_ext(env, RVH)) { 880 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 881 } 882 #endif 883 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 884 #ifndef CONFIG_USER_ONLY 885 { 886 static const int dump_csrs[] = { 887 CSR_MHARTID, 888 CSR_MSTATUS, 889 CSR_MSTATUSH, 890 /* 891 * CSR_SSTATUS is intentionally omitted here as its value 892 * can be figured out by looking at CSR_MSTATUS 893 */ 894 CSR_HSTATUS, 895 CSR_VSSTATUS, 896 CSR_MIP, 897 CSR_MIE, 898 CSR_MIDELEG, 899 CSR_HIDELEG, 900 CSR_MEDELEG, 901 CSR_HEDELEG, 902 CSR_MTVEC, 903 CSR_STVEC, 904 CSR_VSTVEC, 905 CSR_MEPC, 906 CSR_SEPC, 907 CSR_VSEPC, 908 CSR_MCAUSE, 909 CSR_SCAUSE, 910 CSR_VSCAUSE, 911 CSR_MTVAL, 912 CSR_STVAL, 913 CSR_HTVAL, 914 CSR_MTVAL2, 915 CSR_MSCRATCH, 916 CSR_SSCRATCH, 917 CSR_SATP, 918 }; 919 920 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 921 int csrno = dump_csrs[i]; 922 target_ulong val = 0; 923 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 924 925 /* 926 * Rely on the smode, hmode, etc, predicates within csr.c 927 * to do the filtering of the registers that are present. 928 */ 929 if (res == RISCV_EXCP_NONE) { 930 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 931 csr_ops[csrno].name, val); 932 } 933 } 934 } 935 #endif 936 937 for (i = 0; i < 32; i++) { 938 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 939 riscv_int_regnames[i], env->gpr[i]); 940 if ((i & 3) == 3) { 941 qemu_fprintf(f, "\n"); 942 } 943 } 944 if (flags & CPU_DUMP_FPU) { 945 target_ulong val = 0; 946 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 947 if (res == RISCV_EXCP_NONE) { 948 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 949 csr_ops[CSR_FCSR].name, val); 950 } 951 for (i = 0; i < 32; i++) { 952 qemu_fprintf(f, " %-8s %016" PRIx64, 953 riscv_fpr_regnames[i], env->fpr[i]); 954 if ((i & 3) == 3) { 955 qemu_fprintf(f, "\n"); 956 } 957 } 958 } 959 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 960 static const int dump_rvv_csrs[] = { 961 CSR_VSTART, 962 CSR_VXSAT, 963 CSR_VXRM, 964 CSR_VCSR, 965 CSR_VL, 966 CSR_VTYPE, 967 CSR_VLENB, 968 }; 969 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 970 int csrno = dump_rvv_csrs[i]; 971 target_ulong val = 0; 972 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 973 974 /* 975 * Rely on the smode, hmode, etc, predicates within csr.c 976 * to do the filtering of the registers that are present. 977 */ 978 if (res == RISCV_EXCP_NONE) { 979 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 980 csr_ops[csrno].name, val); 981 } 982 } 983 uint16_t vlenb = cpu->cfg.vlenb; 984 985 for (i = 0; i < 32; i++) { 986 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 987 p = (uint8_t *)env->vreg; 988 for (j = vlenb - 1 ; j >= 0; j--) { 989 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 990 } 991 qemu_fprintf(f, "\n"); 992 } 993 } 994 } 995 996 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 997 { 998 RISCVCPU *cpu = RISCV_CPU(cs); 999 CPURISCVState *env = &cpu->env; 1000 1001 if (env->xl == MXL_RV32) { 1002 env->pc = (int32_t)value; 1003 } else { 1004 env->pc = value; 1005 } 1006 } 1007 1008 static vaddr riscv_cpu_get_pc(CPUState *cs) 1009 { 1010 RISCVCPU *cpu = RISCV_CPU(cs); 1011 CPURISCVState *env = &cpu->env; 1012 1013 /* Match cpu_get_tb_cpu_state. */ 1014 if (env->xl == MXL_RV32) { 1015 return env->pc & UINT32_MAX; 1016 } 1017 return env->pc; 1018 } 1019 1020 #ifndef CONFIG_USER_ONLY 1021 bool riscv_cpu_has_work(CPUState *cs) 1022 { 1023 RISCVCPU *cpu = RISCV_CPU(cs); 1024 CPURISCVState *env = &cpu->env; 1025 /* 1026 * Definition of the WFI instruction requires it to ignore the privilege 1027 * mode and delegation registers, but respect individual enables 1028 */ 1029 return riscv_cpu_all_pending(env) != 0 || 1030 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1031 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1032 } 1033 #endif /* !CONFIG_USER_ONLY */ 1034 1035 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1036 { 1037 #ifndef CONFIG_USER_ONLY 1038 uint8_t iprio; 1039 int i, irq, rdzero; 1040 #endif 1041 CPUState *cs = CPU(obj); 1042 RISCVCPU *cpu = RISCV_CPU(cs); 1043 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1044 CPURISCVState *env = &cpu->env; 1045 1046 if (mcc->parent_phases.hold) { 1047 mcc->parent_phases.hold(obj, type); 1048 } 1049 #ifndef CONFIG_USER_ONLY 1050 env->misa_mxl = mcc->def->misa_mxl_max; 1051 env->priv = PRV_M; 1052 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1053 if (env->misa_mxl > MXL_RV32) { 1054 /* 1055 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1056 * and we must ensure that the value after init is valid for read. 1057 */ 1058 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1059 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1060 if (riscv_has_ext(env, RVH)) { 1061 env->vsstatus = set_field(env->vsstatus, 1062 MSTATUS64_SXL, env->misa_mxl); 1063 env->vsstatus = set_field(env->vsstatus, 1064 MSTATUS64_UXL, env->misa_mxl); 1065 env->mstatus_hs = set_field(env->mstatus_hs, 1066 MSTATUS64_SXL, env->misa_mxl); 1067 env->mstatus_hs = set_field(env->mstatus_hs, 1068 MSTATUS64_UXL, env->misa_mxl); 1069 } 1070 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1071 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1072 } 1073 } 1074 env->mcause = 0; 1075 env->miclaim = MIP_SGEIP; 1076 env->pc = env->resetvec; 1077 env->bins = 0; 1078 env->two_stage_lookup = false; 1079 1080 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1081 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1082 MENVCFG_ADUE : 0); 1083 env->henvcfg = 0; 1084 1085 /* Initialized default priorities of local interrupts. */ 1086 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1087 iprio = riscv_cpu_default_priority(i); 1088 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1089 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1090 env->hviprio[i] = 0; 1091 } 1092 i = 0; 1093 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1094 if (!rdzero) { 1095 env->hviprio[irq] = env->miprio[irq]; 1096 } 1097 i++; 1098 } 1099 1100 /* 1101 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1102 * extension is enabled. 1103 */ 1104 if (riscv_has_ext(env, RVH)) { 1105 env->mideleg |= HS_MODE_INTERRUPTS; 1106 } 1107 1108 /* 1109 * Clear mseccfg and unlock all the PMP entries upon reset. 1110 * This is allowed as per the priv and smepmp specifications 1111 * and is needed to clear stale entries across reboots. 1112 */ 1113 if (riscv_cpu_cfg(env)->ext_smepmp) { 1114 env->mseccfg = 0; 1115 } 1116 1117 pmp_unlock_entries(env); 1118 #else 1119 env->priv = PRV_U; 1120 env->senvcfg = 0; 1121 env->menvcfg = 0; 1122 #endif 1123 1124 /* on reset elp is clear */ 1125 env->elp = false; 1126 /* on reset ssp is set to 0 */ 1127 env->ssp = 0; 1128 1129 env->xl = riscv_cpu_mxl(env); 1130 cs->exception_index = RISCV_EXCP_NONE; 1131 env->load_res = -1; 1132 set_default_nan_mode(1, &env->fp_status); 1133 /* Default NaN value: sign bit clear, frac msb set */ 1134 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1135 env->vill = true; 1136 1137 #ifndef CONFIG_USER_ONLY 1138 if (cpu->cfg.debug) { 1139 riscv_trigger_reset_hold(env); 1140 } 1141 1142 if (cpu->cfg.ext_smrnmi) { 1143 env->rnmip = 0; 1144 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1145 } 1146 1147 if (kvm_enabled()) { 1148 kvm_riscv_reset_vcpu(cpu); 1149 } 1150 #endif 1151 } 1152 1153 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1154 { 1155 RISCVCPU *cpu = RISCV_CPU(s); 1156 CPURISCVState *env = &cpu->env; 1157 info->target_info = &cpu->cfg; 1158 1159 /* 1160 * A couple of bits in MSTATUS set the endianness: 1161 * - MSTATUS_UBE (User-mode), 1162 * - MSTATUS_SBE (Supervisor-mode), 1163 * - MSTATUS_MBE (Machine-mode) 1164 * but we don't implement that yet. 1165 */ 1166 info->endian = BFD_ENDIAN_LITTLE; 1167 1168 switch (env->xl) { 1169 case MXL_RV32: 1170 info->print_insn = print_insn_riscv32; 1171 break; 1172 case MXL_RV64: 1173 info->print_insn = print_insn_riscv64; 1174 break; 1175 case MXL_RV128: 1176 info->print_insn = print_insn_riscv128; 1177 break; 1178 default: 1179 g_assert_not_reached(); 1180 } 1181 } 1182 1183 #ifndef CONFIG_USER_ONLY 1184 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1185 { 1186 bool rv32 = riscv_cpu_is_32bit(cpu); 1187 uint16_t supported; 1188 uint8_t satp_mode_map_max; 1189 1190 if (!get_satp_mode_supported(cpu, &supported)) { 1191 /* The CPU wants the hypervisor to decide which satp mode to allow */ 1192 return; 1193 } 1194 1195 if (cpu->satp_modes.map == 0) { 1196 if (cpu->satp_modes.init == 0) { 1197 /* If unset by the user, we fallback to the default satp mode. */ 1198 set_satp_mode_default_map(cpu); 1199 } else { 1200 /* 1201 * Find the lowest level that was disabled and then enable the 1202 * first valid level below which can be found in 1203 * valid_vm_1_10_32/64. 1204 */ 1205 for (int i = 1; i < 16; ++i) { 1206 if ((cpu->satp_modes.init & (1 << i)) && 1207 supported & (1 << i)) { 1208 for (int j = i - 1; j >= 0; --j) { 1209 if (supported & (1 << j)) { 1210 cpu->cfg.max_satp_mode = j; 1211 return; 1212 } 1213 } 1214 } 1215 } 1216 } 1217 return; 1218 } 1219 1220 satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map); 1221 1222 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1223 if (satp_mode_map_max > cpu->cfg.max_satp_mode) { 1224 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1225 satp_mode_str(satp_mode_map_max, rv32), 1226 satp_mode_str(cpu->cfg.max_satp_mode, rv32)); 1227 return; 1228 } 1229 1230 /* 1231 * Make sure the user did not ask for an invalid configuration as per 1232 * the specification. 1233 */ 1234 if (!rv32) { 1235 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1236 if (!(cpu->satp_modes.map & (1 << i)) && 1237 (cpu->satp_modes.init & (1 << i)) && 1238 (supported & (1 << i))) { 1239 error_setg(errp, "cannot disable %s satp mode if %s " 1240 "is enabled", satp_mode_str(i, false), 1241 satp_mode_str(satp_mode_map_max, false)); 1242 return; 1243 } 1244 } 1245 } 1246 1247 cpu->cfg.max_satp_mode = satp_mode_map_max; 1248 } 1249 #endif 1250 1251 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1252 { 1253 Error *local_err = NULL; 1254 1255 #ifndef CONFIG_USER_ONLY 1256 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1257 if (local_err != NULL) { 1258 error_propagate(errp, local_err); 1259 return; 1260 } 1261 #endif 1262 1263 if (tcg_enabled()) { 1264 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1265 if (local_err != NULL) { 1266 error_propagate(errp, local_err); 1267 return; 1268 } 1269 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1270 } else if (kvm_enabled()) { 1271 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1272 if (local_err != NULL) { 1273 error_propagate(errp, local_err); 1274 return; 1275 } 1276 } 1277 } 1278 1279 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1280 { 1281 CPUState *cs = CPU(dev); 1282 RISCVCPU *cpu = RISCV_CPU(dev); 1283 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1284 Error *local_err = NULL; 1285 1286 cpu_exec_realizefn(cs, &local_err); 1287 if (local_err != NULL) { 1288 error_propagate(errp, local_err); 1289 return; 1290 } 1291 1292 riscv_cpu_finalize_features(cpu, &local_err); 1293 if (local_err != NULL) { 1294 error_propagate(errp, local_err); 1295 return; 1296 } 1297 1298 riscv_cpu_register_gdb_regs_for_features(cs); 1299 1300 #ifndef CONFIG_USER_ONLY 1301 if (cpu->cfg.debug) { 1302 riscv_trigger_realize(&cpu->env); 1303 } 1304 #endif 1305 1306 qemu_init_vcpu(cs); 1307 cpu_reset(cs); 1308 1309 mcc->parent_realize(dev, errp); 1310 } 1311 1312 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1313 { 1314 if (tcg_enabled()) { 1315 return riscv_cpu_tcg_compatible(cpu); 1316 } 1317 1318 return true; 1319 } 1320 1321 #ifndef CONFIG_USER_ONLY 1322 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1323 void *opaque, Error **errp) 1324 { 1325 RISCVSATPModes *satp_modes = opaque; 1326 uint8_t satp = satp_mode_from_str(name); 1327 bool value; 1328 1329 value = satp_modes->map & (1 << satp); 1330 1331 visit_type_bool(v, name, &value, errp); 1332 } 1333 1334 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1335 void *opaque, Error **errp) 1336 { 1337 RISCVSATPModes *satp_modes = opaque; 1338 uint8_t satp = satp_mode_from_str(name); 1339 bool value; 1340 1341 if (!visit_type_bool(v, name, &value, errp)) { 1342 return; 1343 } 1344 1345 satp_modes->map = deposit32(satp_modes->map, satp, 1, value); 1346 satp_modes->init |= 1 << satp; 1347 } 1348 1349 void riscv_add_satp_mode_properties(Object *obj) 1350 { 1351 RISCVCPU *cpu = RISCV_CPU(obj); 1352 1353 if (cpu->env.misa_mxl == MXL_RV32) { 1354 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1355 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1356 } else { 1357 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1358 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1359 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1360 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1361 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1362 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1363 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1364 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1365 } 1366 } 1367 1368 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1369 { 1370 RISCVCPU *cpu = RISCV_CPU(opaque); 1371 CPURISCVState *env = &cpu->env; 1372 1373 if (irq < IRQ_LOCAL_MAX) { 1374 switch (irq) { 1375 case IRQ_U_SOFT: 1376 case IRQ_S_SOFT: 1377 case IRQ_VS_SOFT: 1378 case IRQ_M_SOFT: 1379 case IRQ_U_TIMER: 1380 case IRQ_S_TIMER: 1381 case IRQ_VS_TIMER: 1382 case IRQ_M_TIMER: 1383 case IRQ_U_EXT: 1384 case IRQ_VS_EXT: 1385 case IRQ_M_EXT: 1386 if (kvm_enabled()) { 1387 kvm_riscv_set_irq(cpu, irq, level); 1388 } else { 1389 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1390 } 1391 break; 1392 case IRQ_S_EXT: 1393 if (kvm_enabled()) { 1394 kvm_riscv_set_irq(cpu, irq, level); 1395 } else { 1396 env->external_seip = level; 1397 riscv_cpu_update_mip(env, 1 << irq, 1398 BOOL_TO_MASK(level | env->software_seip)); 1399 } 1400 break; 1401 default: 1402 g_assert_not_reached(); 1403 } 1404 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1405 /* Require H-extension for handling guest local interrupts */ 1406 if (!riscv_has_ext(env, RVH)) { 1407 g_assert_not_reached(); 1408 } 1409 1410 /* Compute bit position in HGEIP CSR */ 1411 irq = irq - IRQ_LOCAL_MAX + 1; 1412 if (env->geilen < irq) { 1413 g_assert_not_reached(); 1414 } 1415 1416 /* Update HGEIP CSR */ 1417 env->hgeip &= ~((target_ulong)1 << irq); 1418 if (level) { 1419 env->hgeip |= (target_ulong)1 << irq; 1420 } 1421 1422 /* Update mip.SGEIP bit */ 1423 riscv_cpu_update_mip(env, MIP_SGEIP, 1424 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1425 } else { 1426 g_assert_not_reached(); 1427 } 1428 } 1429 1430 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1431 { 1432 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1433 } 1434 #endif /* CONFIG_USER_ONLY */ 1435 1436 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1437 { 1438 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1439 } 1440 1441 static void riscv_cpu_post_init(Object *obj) 1442 { 1443 accel_cpu_instance_init(CPU(obj)); 1444 } 1445 1446 static void riscv_cpu_init(Object *obj) 1447 { 1448 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1449 RISCVCPU *cpu = RISCV_CPU(obj); 1450 CPURISCVState *env = &cpu->env; 1451 1452 env->misa_mxl = mcc->def->misa_mxl_max; 1453 1454 #ifndef CONFIG_USER_ONLY 1455 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1456 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1457 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1458 "riscv.cpu.rnmi", RNMI_MAX); 1459 #endif /* CONFIG_USER_ONLY */ 1460 1461 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1462 1463 /* 1464 * The timer and performance counters extensions were supported 1465 * in QEMU before they were added as discrete extensions in the 1466 * ISA. To keep compatibility we'll always default them to 'true' 1467 * for all CPUs. Each accelerator will decide what to do when 1468 * users disable them. 1469 */ 1470 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1471 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1472 1473 /* Default values for non-bool cpu properties */ 1474 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1475 cpu->cfg.vlenb = 128 >> 3; 1476 cpu->cfg.elen = 64; 1477 cpu->cfg.cbom_blocksize = 64; 1478 cpu->cfg.cbop_blocksize = 64; 1479 cpu->cfg.cboz_blocksize = 64; 1480 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1481 cpu->cfg.max_satp_mode = -1; 1482 } 1483 1484 static void riscv_bare_cpu_init(Object *obj) 1485 { 1486 RISCVCPU *cpu = RISCV_CPU(obj); 1487 1488 /* 1489 * Bare CPUs do not inherit the timer and performance 1490 * counters from the parent class (see riscv_cpu_init() 1491 * for info on why the parent enables them). 1492 * 1493 * Users have to explicitly enable these counters for 1494 * bare CPUs. 1495 */ 1496 cpu->cfg.ext_zicntr = false; 1497 cpu->cfg.ext_zihpm = false; 1498 1499 /* Set to QEMU's first supported priv version */ 1500 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1501 1502 /* 1503 * Support all available satp_mode settings. The default 1504 * value will be set to MBARE if the user doesn't set 1505 * satp_mode manually (see set_satp_mode_default()). 1506 */ 1507 #ifndef CONFIG_USER_ONLY 1508 set_satp_mode_max_supported(RISCV_CPU(obj), 1509 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 1510 VM_1_10_SV32 : VM_1_10_SV57); 1511 #endif 1512 } 1513 1514 typedef struct misa_ext_info { 1515 const char *name; 1516 const char *description; 1517 } MISAExtInfo; 1518 1519 #define MISA_INFO_IDX(_bit) \ 1520 __builtin_ctz(_bit) 1521 1522 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1523 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1524 1525 static const MISAExtInfo misa_ext_info_arr[] = { 1526 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1527 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1528 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1529 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1530 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1531 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1532 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1533 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1534 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1535 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1536 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1537 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1538 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1539 }; 1540 1541 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1542 { 1543 CPUClass *cc = CPU_CLASS(mcc); 1544 1545 /* Validate that MISA_MXL is set properly. */ 1546 switch (mcc->def->misa_mxl_max) { 1547 #ifdef TARGET_RISCV64 1548 case MXL_RV64: 1549 case MXL_RV128: 1550 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1551 break; 1552 #endif 1553 case MXL_RV32: 1554 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1555 break; 1556 default: 1557 g_assert_not_reached(); 1558 } 1559 } 1560 1561 static int riscv_validate_misa_info_idx(uint32_t bit) 1562 { 1563 int idx; 1564 1565 /* 1566 * Our lowest valid input (RVA) is 1 and 1567 * __builtin_ctz() is UB with zero. 1568 */ 1569 g_assert(bit != 0); 1570 idx = MISA_INFO_IDX(bit); 1571 1572 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1573 return idx; 1574 } 1575 1576 const char *riscv_get_misa_ext_name(uint32_t bit) 1577 { 1578 int idx = riscv_validate_misa_info_idx(bit); 1579 const char *val = misa_ext_info_arr[idx].name; 1580 1581 g_assert(val != NULL); 1582 return val; 1583 } 1584 1585 const char *riscv_get_misa_ext_description(uint32_t bit) 1586 { 1587 int idx = riscv_validate_misa_info_idx(bit); 1588 const char *val = misa_ext_info_arr[idx].description; 1589 1590 g_assert(val != NULL); 1591 return val; 1592 } 1593 1594 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1595 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1596 .enabled = _defval} 1597 1598 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1599 /* Defaults for standard extensions */ 1600 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1601 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1602 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1603 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1604 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1605 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1606 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), 1607 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), 1608 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1609 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1610 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1611 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1612 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1613 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1614 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1615 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1616 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1617 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1618 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1619 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1620 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1621 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1622 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1623 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1624 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1625 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1626 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1627 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1628 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1629 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1630 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1631 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1632 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1633 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1634 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1635 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1636 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1637 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1638 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1639 1640 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1641 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1642 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1643 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1644 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1645 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1646 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1647 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1648 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1649 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1650 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1651 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1652 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1653 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1654 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1655 1656 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1657 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1658 1659 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1660 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1661 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1662 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1663 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1664 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1665 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1666 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1667 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1668 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1669 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1670 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1671 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1672 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1673 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1674 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1675 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1676 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1677 1678 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1679 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1680 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1681 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1682 1683 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1684 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1685 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1686 1687 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1688 1689 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1690 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1691 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1692 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1693 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1694 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1695 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1696 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1697 1698 /* Vector cryptography extensions */ 1699 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1700 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1701 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1702 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1703 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1704 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1705 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1706 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1707 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1708 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1709 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1710 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1711 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1712 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1713 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1714 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1715 1716 { }, 1717 }; 1718 1719 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1720 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1721 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1722 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1723 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1724 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1725 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1726 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1727 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1728 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1729 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1730 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1731 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1732 1733 { }, 1734 }; 1735 1736 /* These are experimental so mark with 'x-' */ 1737 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1738 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1739 1740 { }, 1741 }; 1742 1743 /* 1744 * 'Named features' is the name we give to extensions that we 1745 * don't want to expose to users. They are either immutable 1746 * (always enabled/disable) or they'll vary depending on 1747 * the resulting CPU state. They have riscv,isa strings 1748 * and priv_ver like regular extensions. 1749 */ 1750 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1751 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1752 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1753 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1754 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), 1755 1756 { }, 1757 }; 1758 1759 /* Deprecated entries marked for future removal */ 1760 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1761 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1762 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1763 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1764 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1765 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1766 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1767 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1768 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1769 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1770 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1771 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1772 1773 { }, 1774 }; 1775 1776 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1777 Error **errp) 1778 { 1779 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1780 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1781 cpuname, propname); 1782 } 1783 1784 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1785 void *opaque, Error **errp) 1786 { 1787 RISCVCPU *cpu = RISCV_CPU(obj); 1788 uint8_t pmu_num, curr_pmu_num; 1789 uint32_t pmu_mask; 1790 1791 visit_type_uint8(v, name, &pmu_num, errp); 1792 1793 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1794 1795 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1796 cpu_set_prop_err(cpu, name, errp); 1797 error_append_hint(errp, "Current '%s' val: %u\n", 1798 name, curr_pmu_num); 1799 return; 1800 } 1801 1802 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1803 error_setg(errp, "Number of counters exceeds maximum available"); 1804 return; 1805 } 1806 1807 if (pmu_num == 0) { 1808 pmu_mask = 0; 1809 } else { 1810 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1811 } 1812 1813 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1814 cpu->cfg.pmu_mask = pmu_mask; 1815 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1816 } 1817 1818 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1819 void *opaque, Error **errp) 1820 { 1821 RISCVCPU *cpu = RISCV_CPU(obj); 1822 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1823 1824 visit_type_uint8(v, name, &pmu_num, errp); 1825 } 1826 1827 static const PropertyInfo prop_pmu_num = { 1828 .type = "int8", 1829 .description = "pmu-num", 1830 .get = prop_pmu_num_get, 1831 .set = prop_pmu_num_set, 1832 }; 1833 1834 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1835 void *opaque, Error **errp) 1836 { 1837 RISCVCPU *cpu = RISCV_CPU(obj); 1838 uint32_t value; 1839 uint8_t pmu_num; 1840 1841 visit_type_uint32(v, name, &value, errp); 1842 1843 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1844 cpu_set_prop_err(cpu, name, errp); 1845 error_append_hint(errp, "Current '%s' val: %x\n", 1846 name, cpu->cfg.pmu_mask); 1847 return; 1848 } 1849 1850 pmu_num = ctpop32(value); 1851 1852 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1853 error_setg(errp, "Number of counters exceeds maximum available"); 1854 return; 1855 } 1856 1857 cpu_option_add_user_setting(name, value); 1858 cpu->cfg.pmu_mask = value; 1859 } 1860 1861 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1862 void *opaque, Error **errp) 1863 { 1864 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1865 1866 visit_type_uint8(v, name, &pmu_mask, errp); 1867 } 1868 1869 static const PropertyInfo prop_pmu_mask = { 1870 .type = "int8", 1871 .description = "pmu-mask", 1872 .get = prop_pmu_mask_get, 1873 .set = prop_pmu_mask_set, 1874 }; 1875 1876 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1877 void *opaque, Error **errp) 1878 { 1879 RISCVCPU *cpu = RISCV_CPU(obj); 1880 bool value; 1881 1882 visit_type_bool(v, name, &value, errp); 1883 1884 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1885 cpu_set_prop_err(cpu, "mmu", errp); 1886 return; 1887 } 1888 1889 cpu_option_add_user_setting(name, value); 1890 cpu->cfg.mmu = value; 1891 } 1892 1893 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1894 void *opaque, Error **errp) 1895 { 1896 bool value = RISCV_CPU(obj)->cfg.mmu; 1897 1898 visit_type_bool(v, name, &value, errp); 1899 } 1900 1901 static const PropertyInfo prop_mmu = { 1902 .type = "bool", 1903 .description = "mmu", 1904 .get = prop_mmu_get, 1905 .set = prop_mmu_set, 1906 }; 1907 1908 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1909 void *opaque, Error **errp) 1910 { 1911 RISCVCPU *cpu = RISCV_CPU(obj); 1912 bool value; 1913 1914 visit_type_bool(v, name, &value, errp); 1915 1916 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1917 cpu_set_prop_err(cpu, name, errp); 1918 return; 1919 } 1920 1921 cpu_option_add_user_setting(name, value); 1922 cpu->cfg.pmp = value; 1923 } 1924 1925 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1926 void *opaque, Error **errp) 1927 { 1928 bool value = RISCV_CPU(obj)->cfg.pmp; 1929 1930 visit_type_bool(v, name, &value, errp); 1931 } 1932 1933 static const PropertyInfo prop_pmp = { 1934 .type = "bool", 1935 .description = "pmp", 1936 .get = prop_pmp_get, 1937 .set = prop_pmp_set, 1938 }; 1939 1940 static int priv_spec_from_str(const char *priv_spec_str) 1941 { 1942 int priv_version = -1; 1943 1944 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1945 priv_version = PRIV_VERSION_1_13_0; 1946 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1947 priv_version = PRIV_VERSION_1_12_0; 1948 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1949 priv_version = PRIV_VERSION_1_11_0; 1950 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1951 priv_version = PRIV_VERSION_1_10_0; 1952 } 1953 1954 return priv_version; 1955 } 1956 1957 const char *priv_spec_to_str(int priv_version) 1958 { 1959 switch (priv_version) { 1960 case PRIV_VERSION_1_10_0: 1961 return PRIV_VER_1_10_0_STR; 1962 case PRIV_VERSION_1_11_0: 1963 return PRIV_VER_1_11_0_STR; 1964 case PRIV_VERSION_1_12_0: 1965 return PRIV_VER_1_12_0_STR; 1966 case PRIV_VERSION_1_13_0: 1967 return PRIV_VER_1_13_0_STR; 1968 default: 1969 return NULL; 1970 } 1971 } 1972 1973 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1974 void *opaque, Error **errp) 1975 { 1976 RISCVCPU *cpu = RISCV_CPU(obj); 1977 g_autofree char *value = NULL; 1978 int priv_version = -1; 1979 1980 visit_type_str(v, name, &value, errp); 1981 1982 priv_version = priv_spec_from_str(value); 1983 if (priv_version < 0) { 1984 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1985 return; 1986 } 1987 1988 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1989 cpu_set_prop_err(cpu, name, errp); 1990 error_append_hint(errp, "Current '%s' val: %s\n", name, 1991 object_property_get_str(obj, name, NULL)); 1992 return; 1993 } 1994 1995 cpu_option_add_user_setting(name, priv_version); 1996 cpu->env.priv_ver = priv_version; 1997 } 1998 1999 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 2000 void *opaque, Error **errp) 2001 { 2002 RISCVCPU *cpu = RISCV_CPU(obj); 2003 const char *value = priv_spec_to_str(cpu->env.priv_ver); 2004 2005 visit_type_str(v, name, (char **)&value, errp); 2006 } 2007 2008 static const PropertyInfo prop_priv_spec = { 2009 .type = "str", 2010 .description = "priv_spec", 2011 /* FIXME enum? */ 2012 .get = prop_priv_spec_get, 2013 .set = prop_priv_spec_set, 2014 }; 2015 2016 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 2017 void *opaque, Error **errp) 2018 { 2019 RISCVCPU *cpu = RISCV_CPU(obj); 2020 g_autofree char *value = NULL; 2021 2022 visit_type_str(v, name, &value, errp); 2023 2024 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2025 error_setg(errp, "Unsupported vector spec version '%s'", value); 2026 return; 2027 } 2028 2029 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2030 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2031 } 2032 2033 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2034 void *opaque, Error **errp) 2035 { 2036 const char *value = VEXT_VER_1_00_0_STR; 2037 2038 visit_type_str(v, name, (char **)&value, errp); 2039 } 2040 2041 static const PropertyInfo prop_vext_spec = { 2042 .type = "str", 2043 .description = "vext_spec", 2044 /* FIXME enum? */ 2045 .get = prop_vext_spec_get, 2046 .set = prop_vext_spec_set, 2047 }; 2048 2049 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2050 void *opaque, Error **errp) 2051 { 2052 RISCVCPU *cpu = RISCV_CPU(obj); 2053 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2054 uint16_t value; 2055 2056 if (!visit_type_uint16(v, name, &value, errp)) { 2057 return; 2058 } 2059 2060 if (!is_power_of_2(value)) { 2061 error_setg(errp, "Vector extension VLEN must be power of 2"); 2062 return; 2063 } 2064 2065 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2066 cpu_set_prop_err(cpu, name, errp); 2067 error_append_hint(errp, "Current '%s' val: %u\n", 2068 name, cpu_vlen); 2069 return; 2070 } 2071 2072 cpu_option_add_user_setting(name, value); 2073 cpu->cfg.vlenb = value >> 3; 2074 } 2075 2076 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2077 void *opaque, Error **errp) 2078 { 2079 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2080 2081 visit_type_uint16(v, name, &value, errp); 2082 } 2083 2084 static const PropertyInfo prop_vlen = { 2085 .type = "uint16", 2086 .description = "vlen", 2087 .get = prop_vlen_get, 2088 .set = prop_vlen_set, 2089 }; 2090 2091 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2092 void *opaque, Error **errp) 2093 { 2094 RISCVCPU *cpu = RISCV_CPU(obj); 2095 uint16_t value; 2096 2097 if (!visit_type_uint16(v, name, &value, errp)) { 2098 return; 2099 } 2100 2101 if (!is_power_of_2(value)) { 2102 error_setg(errp, "Vector extension ELEN must be power of 2"); 2103 return; 2104 } 2105 2106 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2107 cpu_set_prop_err(cpu, name, errp); 2108 error_append_hint(errp, "Current '%s' val: %u\n", 2109 name, cpu->cfg.elen); 2110 return; 2111 } 2112 2113 cpu_option_add_user_setting(name, value); 2114 cpu->cfg.elen = value; 2115 } 2116 2117 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2118 void *opaque, Error **errp) 2119 { 2120 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2121 2122 visit_type_uint16(v, name, &value, errp); 2123 } 2124 2125 static const PropertyInfo prop_elen = { 2126 .type = "uint16", 2127 .description = "elen", 2128 .get = prop_elen_get, 2129 .set = prop_elen_set, 2130 }; 2131 2132 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2133 void *opaque, Error **errp) 2134 { 2135 RISCVCPU *cpu = RISCV_CPU(obj); 2136 uint16_t value; 2137 2138 if (!visit_type_uint16(v, name, &value, errp)) { 2139 return; 2140 } 2141 2142 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2143 cpu_set_prop_err(cpu, name, errp); 2144 error_append_hint(errp, "Current '%s' val: %u\n", 2145 name, cpu->cfg.cbom_blocksize); 2146 return; 2147 } 2148 2149 cpu_option_add_user_setting(name, value); 2150 cpu->cfg.cbom_blocksize = value; 2151 } 2152 2153 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2154 void *opaque, Error **errp) 2155 { 2156 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2157 2158 visit_type_uint16(v, name, &value, errp); 2159 } 2160 2161 static const PropertyInfo prop_cbom_blksize = { 2162 .type = "uint16", 2163 .description = "cbom_blocksize", 2164 .get = prop_cbom_blksize_get, 2165 .set = prop_cbom_blksize_set, 2166 }; 2167 2168 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2169 void *opaque, Error **errp) 2170 { 2171 RISCVCPU *cpu = RISCV_CPU(obj); 2172 uint16_t value; 2173 2174 if (!visit_type_uint16(v, name, &value, errp)) { 2175 return; 2176 } 2177 2178 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2179 cpu_set_prop_err(cpu, name, errp); 2180 error_append_hint(errp, "Current '%s' val: %u\n", 2181 name, cpu->cfg.cbop_blocksize); 2182 return; 2183 } 2184 2185 cpu_option_add_user_setting(name, value); 2186 cpu->cfg.cbop_blocksize = value; 2187 } 2188 2189 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2190 void *opaque, Error **errp) 2191 { 2192 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2193 2194 visit_type_uint16(v, name, &value, errp); 2195 } 2196 2197 static const PropertyInfo prop_cbop_blksize = { 2198 .type = "uint16", 2199 .description = "cbop_blocksize", 2200 .get = prop_cbop_blksize_get, 2201 .set = prop_cbop_blksize_set, 2202 }; 2203 2204 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2205 void *opaque, Error **errp) 2206 { 2207 RISCVCPU *cpu = RISCV_CPU(obj); 2208 uint16_t value; 2209 2210 if (!visit_type_uint16(v, name, &value, errp)) { 2211 return; 2212 } 2213 2214 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2215 cpu_set_prop_err(cpu, name, errp); 2216 error_append_hint(errp, "Current '%s' val: %u\n", 2217 name, cpu->cfg.cboz_blocksize); 2218 return; 2219 } 2220 2221 cpu_option_add_user_setting(name, value); 2222 cpu->cfg.cboz_blocksize = value; 2223 } 2224 2225 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2226 void *opaque, Error **errp) 2227 { 2228 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2229 2230 visit_type_uint16(v, name, &value, errp); 2231 } 2232 2233 static const PropertyInfo prop_cboz_blksize = { 2234 .type = "uint16", 2235 .description = "cboz_blocksize", 2236 .get = prop_cboz_blksize_get, 2237 .set = prop_cboz_blksize_set, 2238 }; 2239 2240 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2241 void *opaque, Error **errp) 2242 { 2243 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2244 RISCVCPU *cpu = RISCV_CPU(obj); 2245 uint32_t prev_val = cpu->cfg.mvendorid; 2246 uint32_t value; 2247 2248 if (!visit_type_uint32(v, name, &value, errp)) { 2249 return; 2250 } 2251 2252 if (!dynamic_cpu && prev_val != value) { 2253 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2254 object_get_typename(obj), prev_val); 2255 return; 2256 } 2257 2258 cpu->cfg.mvendorid = value; 2259 } 2260 2261 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2262 void *opaque, Error **errp) 2263 { 2264 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2265 2266 visit_type_uint32(v, name, &value, errp); 2267 } 2268 2269 static const PropertyInfo prop_mvendorid = { 2270 .type = "uint32", 2271 .description = "mvendorid", 2272 .get = prop_mvendorid_get, 2273 .set = prop_mvendorid_set, 2274 }; 2275 2276 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2277 void *opaque, Error **errp) 2278 { 2279 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2280 RISCVCPU *cpu = RISCV_CPU(obj); 2281 uint64_t prev_val = cpu->cfg.mimpid; 2282 uint64_t value; 2283 2284 if (!visit_type_uint64(v, name, &value, errp)) { 2285 return; 2286 } 2287 2288 if (!dynamic_cpu && prev_val != value) { 2289 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2290 object_get_typename(obj), prev_val); 2291 return; 2292 } 2293 2294 cpu->cfg.mimpid = value; 2295 } 2296 2297 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2298 void *opaque, Error **errp) 2299 { 2300 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2301 2302 visit_type_uint64(v, name, &value, errp); 2303 } 2304 2305 static const PropertyInfo prop_mimpid = { 2306 .type = "uint64", 2307 .description = "mimpid", 2308 .get = prop_mimpid_get, 2309 .set = prop_mimpid_set, 2310 }; 2311 2312 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2313 void *opaque, Error **errp) 2314 { 2315 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2316 RISCVCPU *cpu = RISCV_CPU(obj); 2317 uint64_t prev_val = cpu->cfg.marchid; 2318 uint64_t value, invalid_val; 2319 uint32_t mxlen = 0; 2320 2321 if (!visit_type_uint64(v, name, &value, errp)) { 2322 return; 2323 } 2324 2325 if (!dynamic_cpu && prev_val != value) { 2326 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2327 object_get_typename(obj), prev_val); 2328 return; 2329 } 2330 2331 switch (riscv_cpu_mxl(&cpu->env)) { 2332 case MXL_RV32: 2333 mxlen = 32; 2334 break; 2335 case MXL_RV64: 2336 case MXL_RV128: 2337 mxlen = 64; 2338 break; 2339 default: 2340 g_assert_not_reached(); 2341 } 2342 2343 invalid_val = 1LL << (mxlen - 1); 2344 2345 if (value == invalid_val) { 2346 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2347 "and the remaining bits zero", mxlen); 2348 return; 2349 } 2350 2351 cpu->cfg.marchid = value; 2352 } 2353 2354 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2355 void *opaque, Error **errp) 2356 { 2357 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2358 2359 visit_type_uint64(v, name, &value, errp); 2360 } 2361 2362 static const PropertyInfo prop_marchid = { 2363 .type = "uint64", 2364 .description = "marchid", 2365 .get = prop_marchid_get, 2366 .set = prop_marchid_set, 2367 }; 2368 2369 /* 2370 * RVA22U64 defines some 'named features' that are cache 2371 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2372 * and Zicclsm. They are always implemented in TCG and 2373 * doesn't need to be manually enabled by the profile. 2374 */ 2375 static RISCVCPUProfile RVA22U64 = { 2376 .u_parent = NULL, 2377 .s_parent = NULL, 2378 .name = "rva22u64", 2379 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2380 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2381 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2382 .ext_offsets = { 2383 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2384 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2385 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2386 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2387 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2388 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2389 2390 /* mandatory named features for this profile */ 2391 CPU_CFG_OFFSET(ext_zic64b), 2392 2393 RISCV_PROFILE_EXT_LIST_END 2394 } 2395 }; 2396 2397 /* 2398 * As with RVA22U64, RVA22S64 also defines 'named features'. 2399 * 2400 * Cache related features that we consider enabled since we don't 2401 * implement cache: Ssccptr 2402 * 2403 * Other named features that we already implement: Sstvecd, Sstvala, 2404 * Sscounterenw 2405 * 2406 * The remaining features/extensions comes from RVA22U64. 2407 */ 2408 static RISCVCPUProfile RVA22S64 = { 2409 .u_parent = &RVA22U64, 2410 .s_parent = NULL, 2411 .name = "rva22s64", 2412 .misa_ext = RVS, 2413 .priv_spec = PRIV_VERSION_1_12_0, 2414 .satp_mode = VM_1_10_SV39, 2415 .ext_offsets = { 2416 /* rva22s64 exts */ 2417 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2418 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2419 2420 RISCV_PROFILE_EXT_LIST_END 2421 } 2422 }; 2423 2424 /* 2425 * All mandatory extensions from RVA22U64 are present 2426 * in RVA23U64 so set RVA22 as a parent. We need to 2427 * declare just the newly added mandatory extensions. 2428 */ 2429 static RISCVCPUProfile RVA23U64 = { 2430 .u_parent = &RVA22U64, 2431 .s_parent = NULL, 2432 .name = "rva23u64", 2433 .misa_ext = RVV, 2434 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2435 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2436 .ext_offsets = { 2437 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2438 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2439 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2440 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2441 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2442 CPU_CFG_OFFSET(ext_supm), 2443 2444 RISCV_PROFILE_EXT_LIST_END 2445 } 2446 }; 2447 2448 /* 2449 * As with RVA23U64, RVA23S64 also defines 'named features'. 2450 * 2451 * Cache related features that we consider enabled since we don't 2452 * implement cache: Ssccptr 2453 * 2454 * Other named features that we already implement: Sstvecd, Sstvala, 2455 * Sscounterenw, Ssu64xl 2456 * 2457 * The remaining features/extensions comes from RVA23S64. 2458 */ 2459 static RISCVCPUProfile RVA23S64 = { 2460 .u_parent = &RVA23U64, 2461 .s_parent = &RVA22S64, 2462 .name = "rva23s64", 2463 .misa_ext = RVS, 2464 .priv_spec = PRIV_VERSION_1_13_0, 2465 .satp_mode = VM_1_10_SV39, 2466 .ext_offsets = { 2467 /* New in RVA23S64 */ 2468 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2469 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2470 2471 /* Named features: Sha */ 2472 CPU_CFG_OFFSET(ext_sha), 2473 2474 RISCV_PROFILE_EXT_LIST_END 2475 } 2476 }; 2477 2478 RISCVCPUProfile *riscv_profiles[] = { 2479 &RVA22U64, 2480 &RVA22S64, 2481 &RVA23U64, 2482 &RVA23S64, 2483 NULL, 2484 }; 2485 2486 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2487 .is_misa = true, 2488 .ext = RVA, 2489 .implied_multi_exts = { 2490 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2491 2492 RISCV_IMPLIED_EXTS_RULE_END 2493 }, 2494 }; 2495 2496 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2497 .is_misa = true, 2498 .ext = RVD, 2499 .implied_misa_exts = RVF, 2500 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2501 }; 2502 2503 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2504 .is_misa = true, 2505 .ext = RVF, 2506 .implied_multi_exts = { 2507 CPU_CFG_OFFSET(ext_zicsr), 2508 2509 RISCV_IMPLIED_EXTS_RULE_END 2510 }, 2511 }; 2512 2513 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2514 .is_misa = true, 2515 .ext = RVM, 2516 .implied_multi_exts = { 2517 CPU_CFG_OFFSET(ext_zmmul), 2518 2519 RISCV_IMPLIED_EXTS_RULE_END 2520 }, 2521 }; 2522 2523 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2524 .is_misa = true, 2525 .ext = RVV, 2526 .implied_multi_exts = { 2527 CPU_CFG_OFFSET(ext_zve64d), 2528 2529 RISCV_IMPLIED_EXTS_RULE_END 2530 }, 2531 }; 2532 2533 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2534 .ext = CPU_CFG_OFFSET(ext_zcb), 2535 .implied_multi_exts = { 2536 CPU_CFG_OFFSET(ext_zca), 2537 2538 RISCV_IMPLIED_EXTS_RULE_END 2539 }, 2540 }; 2541 2542 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2543 .ext = CPU_CFG_OFFSET(ext_zcd), 2544 .implied_misa_exts = RVD, 2545 .implied_multi_exts = { 2546 CPU_CFG_OFFSET(ext_zca), 2547 2548 RISCV_IMPLIED_EXTS_RULE_END 2549 }, 2550 }; 2551 2552 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2553 .ext = CPU_CFG_OFFSET(ext_zce), 2554 .implied_multi_exts = { 2555 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2556 CPU_CFG_OFFSET(ext_zcmt), 2557 2558 RISCV_IMPLIED_EXTS_RULE_END 2559 }, 2560 }; 2561 2562 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2563 .ext = CPU_CFG_OFFSET(ext_zcf), 2564 .implied_misa_exts = RVF, 2565 .implied_multi_exts = { 2566 CPU_CFG_OFFSET(ext_zca), 2567 2568 RISCV_IMPLIED_EXTS_RULE_END 2569 }, 2570 }; 2571 2572 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2573 .ext = CPU_CFG_OFFSET(ext_zcmp), 2574 .implied_multi_exts = { 2575 CPU_CFG_OFFSET(ext_zca), 2576 2577 RISCV_IMPLIED_EXTS_RULE_END 2578 }, 2579 }; 2580 2581 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2582 .ext = CPU_CFG_OFFSET(ext_zcmt), 2583 .implied_multi_exts = { 2584 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2585 2586 RISCV_IMPLIED_EXTS_RULE_END 2587 }, 2588 }; 2589 2590 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2591 .ext = CPU_CFG_OFFSET(ext_zdinx), 2592 .implied_multi_exts = { 2593 CPU_CFG_OFFSET(ext_zfinx), 2594 2595 RISCV_IMPLIED_EXTS_RULE_END 2596 }, 2597 }; 2598 2599 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2600 .ext = CPU_CFG_OFFSET(ext_zfa), 2601 .implied_misa_exts = RVF, 2602 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2603 }; 2604 2605 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2606 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2607 .implied_misa_exts = RVF, 2608 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2609 }; 2610 2611 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2612 .ext = CPU_CFG_OFFSET(ext_zfh), 2613 .implied_multi_exts = { 2614 CPU_CFG_OFFSET(ext_zfhmin), 2615 2616 RISCV_IMPLIED_EXTS_RULE_END 2617 }, 2618 }; 2619 2620 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2621 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2622 .implied_misa_exts = RVF, 2623 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2624 }; 2625 2626 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2627 .ext = CPU_CFG_OFFSET(ext_zfinx), 2628 .implied_multi_exts = { 2629 CPU_CFG_OFFSET(ext_zicsr), 2630 2631 RISCV_IMPLIED_EXTS_RULE_END 2632 }, 2633 }; 2634 2635 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2636 .ext = CPU_CFG_OFFSET(ext_zhinx), 2637 .implied_multi_exts = { 2638 CPU_CFG_OFFSET(ext_zhinxmin), 2639 2640 RISCV_IMPLIED_EXTS_RULE_END 2641 }, 2642 }; 2643 2644 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2645 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2646 .implied_multi_exts = { 2647 CPU_CFG_OFFSET(ext_zfinx), 2648 2649 RISCV_IMPLIED_EXTS_RULE_END 2650 }, 2651 }; 2652 2653 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2654 .ext = CPU_CFG_OFFSET(ext_zicntr), 2655 .implied_multi_exts = { 2656 CPU_CFG_OFFSET(ext_zicsr), 2657 2658 RISCV_IMPLIED_EXTS_RULE_END 2659 }, 2660 }; 2661 2662 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2663 .ext = CPU_CFG_OFFSET(ext_zihpm), 2664 .implied_multi_exts = { 2665 CPU_CFG_OFFSET(ext_zicsr), 2666 2667 RISCV_IMPLIED_EXTS_RULE_END 2668 }, 2669 }; 2670 2671 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2672 .ext = CPU_CFG_OFFSET(ext_zk), 2673 .implied_multi_exts = { 2674 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2675 CPU_CFG_OFFSET(ext_zkt), 2676 2677 RISCV_IMPLIED_EXTS_RULE_END 2678 }, 2679 }; 2680 2681 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2682 .ext = CPU_CFG_OFFSET(ext_zkn), 2683 .implied_multi_exts = { 2684 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2685 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2686 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2687 2688 RISCV_IMPLIED_EXTS_RULE_END 2689 }, 2690 }; 2691 2692 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2693 .ext = CPU_CFG_OFFSET(ext_zks), 2694 .implied_multi_exts = { 2695 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2696 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2697 CPU_CFG_OFFSET(ext_zksh), 2698 2699 RISCV_IMPLIED_EXTS_RULE_END 2700 }, 2701 }; 2702 2703 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2704 .ext = CPU_CFG_OFFSET(ext_zvbb), 2705 .implied_multi_exts = { 2706 CPU_CFG_OFFSET(ext_zvkb), 2707 2708 RISCV_IMPLIED_EXTS_RULE_END 2709 }, 2710 }; 2711 2712 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2713 .ext = CPU_CFG_OFFSET(ext_zve32f), 2714 .implied_misa_exts = RVF, 2715 .implied_multi_exts = { 2716 CPU_CFG_OFFSET(ext_zve32x), 2717 2718 RISCV_IMPLIED_EXTS_RULE_END 2719 }, 2720 }; 2721 2722 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2723 .ext = CPU_CFG_OFFSET(ext_zve32x), 2724 .implied_multi_exts = { 2725 CPU_CFG_OFFSET(ext_zicsr), 2726 2727 RISCV_IMPLIED_EXTS_RULE_END 2728 }, 2729 }; 2730 2731 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2732 .ext = CPU_CFG_OFFSET(ext_zve64d), 2733 .implied_misa_exts = RVD, 2734 .implied_multi_exts = { 2735 CPU_CFG_OFFSET(ext_zve64f), 2736 2737 RISCV_IMPLIED_EXTS_RULE_END 2738 }, 2739 }; 2740 2741 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2742 .ext = CPU_CFG_OFFSET(ext_zve64f), 2743 .implied_misa_exts = RVF, 2744 .implied_multi_exts = { 2745 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2746 2747 RISCV_IMPLIED_EXTS_RULE_END 2748 }, 2749 }; 2750 2751 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2752 .ext = CPU_CFG_OFFSET(ext_zve64x), 2753 .implied_multi_exts = { 2754 CPU_CFG_OFFSET(ext_zve32x), 2755 2756 RISCV_IMPLIED_EXTS_RULE_END 2757 }, 2758 }; 2759 2760 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2761 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2762 .implied_multi_exts = { 2763 CPU_CFG_OFFSET(ext_zve32f), 2764 2765 RISCV_IMPLIED_EXTS_RULE_END 2766 }, 2767 }; 2768 2769 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2770 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2771 .implied_multi_exts = { 2772 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2773 2774 RISCV_IMPLIED_EXTS_RULE_END 2775 }, 2776 }; 2777 2778 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2779 .ext = CPU_CFG_OFFSET(ext_zvfh), 2780 .implied_multi_exts = { 2781 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2782 2783 RISCV_IMPLIED_EXTS_RULE_END 2784 }, 2785 }; 2786 2787 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2788 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2789 .implied_multi_exts = { 2790 CPU_CFG_OFFSET(ext_zve32f), 2791 2792 RISCV_IMPLIED_EXTS_RULE_END 2793 }, 2794 }; 2795 2796 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2797 .ext = CPU_CFG_OFFSET(ext_zvkn), 2798 .implied_multi_exts = { 2799 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2800 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2801 2802 RISCV_IMPLIED_EXTS_RULE_END 2803 }, 2804 }; 2805 2806 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2807 .ext = CPU_CFG_OFFSET(ext_zvknc), 2808 .implied_multi_exts = { 2809 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2810 2811 RISCV_IMPLIED_EXTS_RULE_END 2812 }, 2813 }; 2814 2815 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2816 .ext = CPU_CFG_OFFSET(ext_zvkng), 2817 .implied_multi_exts = { 2818 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2819 2820 RISCV_IMPLIED_EXTS_RULE_END 2821 }, 2822 }; 2823 2824 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2825 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2826 .implied_multi_exts = { 2827 CPU_CFG_OFFSET(ext_zve64x), 2828 2829 RISCV_IMPLIED_EXTS_RULE_END 2830 }, 2831 }; 2832 2833 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2834 .ext = CPU_CFG_OFFSET(ext_zvks), 2835 .implied_multi_exts = { 2836 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2837 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2838 2839 RISCV_IMPLIED_EXTS_RULE_END 2840 }, 2841 }; 2842 2843 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2844 .ext = CPU_CFG_OFFSET(ext_zvksc), 2845 .implied_multi_exts = { 2846 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2847 2848 RISCV_IMPLIED_EXTS_RULE_END 2849 }, 2850 }; 2851 2852 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2853 .ext = CPU_CFG_OFFSET(ext_zvksg), 2854 .implied_multi_exts = { 2855 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2856 2857 RISCV_IMPLIED_EXTS_RULE_END 2858 }, 2859 }; 2860 2861 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2862 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2863 .implied_multi_exts = { 2864 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2865 CPU_CFG_OFFSET(ext_smcdeleg), 2866 2867 RISCV_IMPLIED_EXTS_RULE_END 2868 }, 2869 }; 2870 2871 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2872 .ext = CPU_CFG_OFFSET(ext_supm), 2873 .implied_multi_exts = { 2874 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2875 2876 RISCV_IMPLIED_EXTS_RULE_END 2877 }, 2878 }; 2879 2880 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2881 .ext = CPU_CFG_OFFSET(ext_sspm), 2882 .implied_multi_exts = { 2883 CPU_CFG_OFFSET(ext_smnpm), 2884 2885 RISCV_IMPLIED_EXTS_RULE_END 2886 }, 2887 }; 2888 2889 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { 2890 .ext = CPU_CFG_OFFSET(ext_smctr), 2891 .implied_misa_exts = RVS, 2892 .implied_multi_exts = { 2893 CPU_CFG_OFFSET(ext_sscsrind), 2894 2895 RISCV_IMPLIED_EXTS_RULE_END 2896 }, 2897 }; 2898 2899 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { 2900 .ext = CPU_CFG_OFFSET(ext_ssctr), 2901 .implied_misa_exts = RVS, 2902 .implied_multi_exts = { 2903 CPU_CFG_OFFSET(ext_sscsrind), 2904 2905 RISCV_IMPLIED_EXTS_RULE_END 2906 }, 2907 }; 2908 2909 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2910 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2911 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2912 }; 2913 2914 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2915 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2916 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2917 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2918 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2919 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2920 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2921 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2922 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2923 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2924 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2925 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2926 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2927 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, 2928 NULL 2929 }; 2930 2931 static const Property riscv_cpu_properties[] = { 2932 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2933 2934 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2935 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2936 2937 {.name = "mmu", .info = &prop_mmu}, 2938 {.name = "pmp", .info = &prop_pmp}, 2939 2940 {.name = "priv_spec", .info = &prop_priv_spec}, 2941 {.name = "vext_spec", .info = &prop_vext_spec}, 2942 2943 {.name = "vlen", .info = &prop_vlen}, 2944 {.name = "elen", .info = &prop_elen}, 2945 2946 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2947 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2948 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2949 2950 {.name = "mvendorid", .info = &prop_mvendorid}, 2951 {.name = "mimpid", .info = &prop_mimpid}, 2952 {.name = "marchid", .info = &prop_marchid}, 2953 2954 #ifndef CONFIG_USER_ONLY 2955 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2956 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2957 DEFAULT_RNMI_IRQVEC), 2958 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2959 DEFAULT_RNMI_EXCPVEC), 2960 #endif 2961 2962 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2963 2964 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2965 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2966 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2967 2968 /* 2969 * write_misa() is marked as experimental for now so mark 2970 * it with -x and default to 'false'. 2971 */ 2972 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2973 }; 2974 2975 #if defined(TARGET_RISCV64) 2976 static void rva22u64_profile_cpu_init(Object *obj) 2977 { 2978 rv64i_bare_cpu_init(obj); 2979 2980 RVA22U64.enabled = true; 2981 } 2982 2983 static void rva22s64_profile_cpu_init(Object *obj) 2984 { 2985 rv64i_bare_cpu_init(obj); 2986 2987 RVA22S64.enabled = true; 2988 } 2989 2990 static void rva23u64_profile_cpu_init(Object *obj) 2991 { 2992 rv64i_bare_cpu_init(obj); 2993 2994 RVA23U64.enabled = true; 2995 } 2996 2997 static void rva23s64_profile_cpu_init(Object *obj) 2998 { 2999 rv64i_bare_cpu_init(obj); 3000 3001 RVA23S64.enabled = true; 3002 } 3003 #endif 3004 3005 static const gchar *riscv_gdb_arch_name(CPUState *cs) 3006 { 3007 RISCVCPU *cpu = RISCV_CPU(cs); 3008 CPURISCVState *env = &cpu->env; 3009 3010 switch (riscv_cpu_mxl(env)) { 3011 case MXL_RV32: 3012 return "riscv:rv32"; 3013 case MXL_RV64: 3014 case MXL_RV128: 3015 return "riscv:rv64"; 3016 default: 3017 g_assert_not_reached(); 3018 } 3019 } 3020 3021 #ifndef CONFIG_USER_ONLY 3022 static int64_t riscv_get_arch_id(CPUState *cs) 3023 { 3024 RISCVCPU *cpu = RISCV_CPU(cs); 3025 3026 return cpu->env.mhartid; 3027 } 3028 3029 #include "hw/core/sysemu-cpu-ops.h" 3030 3031 static const struct SysemuCPUOps riscv_sysemu_ops = { 3032 .has_work = riscv_cpu_has_work, 3033 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 3034 .write_elf64_note = riscv_cpu_write_elf64_note, 3035 .write_elf32_note = riscv_cpu_write_elf32_note, 3036 .legacy_vmsd = &vmstate_riscv_cpu, 3037 }; 3038 #endif 3039 3040 static void riscv_cpu_common_class_init(ObjectClass *c, const void *data) 3041 { 3042 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3043 CPUClass *cc = CPU_CLASS(c); 3044 DeviceClass *dc = DEVICE_CLASS(c); 3045 ResettableClass *rc = RESETTABLE_CLASS(c); 3046 3047 device_class_set_parent_realize(dc, riscv_cpu_realize, 3048 &mcc->parent_realize); 3049 3050 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 3051 &mcc->parent_phases); 3052 3053 cc->class_by_name = riscv_cpu_class_by_name; 3054 cc->dump_state = riscv_cpu_dump_state; 3055 cc->set_pc = riscv_cpu_set_pc; 3056 cc->get_pc = riscv_cpu_get_pc; 3057 cc->gdb_read_register = riscv_cpu_gdb_read_register; 3058 cc->gdb_write_register = riscv_cpu_gdb_write_register; 3059 cc->gdb_stop_before_watchpoint = true; 3060 cc->disas_set_info = riscv_cpu_disas_set_info; 3061 #ifndef CONFIG_USER_ONLY 3062 cc->sysemu_ops = &riscv_sysemu_ops; 3063 cc->get_arch_id = riscv_get_arch_id; 3064 #endif 3065 cc->gdb_arch_name = riscv_gdb_arch_name; 3066 #ifdef CONFIG_TCG 3067 cc->tcg_ops = &riscv_tcg_ops; 3068 #endif /* CONFIG_TCG */ 3069 3070 device_class_set_props(dc, riscv_cpu_properties); 3071 } 3072 3073 static void riscv_cpu_class_base_init(ObjectClass *c, const void *data) 3074 { 3075 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3076 RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c)); 3077 3078 if (pcc->def) { 3079 mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def)); 3080 } else { 3081 mcc->def = g_new0(RISCVCPUDef, 1); 3082 } 3083 } 3084 3085 static void riscv_cpu_class_init(ObjectClass *c, const void *data) 3086 { 3087 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3088 const RISCVCPUDef *def = data; 3089 3090 mcc->def->misa_mxl_max = def->misa_mxl_max; 3091 riscv_cpu_validate_misa_mxl(mcc); 3092 } 3093 3094 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 3095 int max_str_len) 3096 { 3097 const RISCVIsaExtData *edata; 3098 char *old = *isa_str; 3099 char *new = *isa_str; 3100 3101 for (edata = isa_edata_arr; edata && edata->name; edata++) { 3102 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3103 new = g_strconcat(old, "_", edata->name, NULL); 3104 g_free(old); 3105 old = new; 3106 } 3107 } 3108 3109 *isa_str = new; 3110 } 3111 3112 char *riscv_isa_string(RISCVCPU *cpu) 3113 { 3114 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3115 int i; 3116 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 3117 char *isa_str = g_new(char, maxlen); 3118 int xlen = riscv_cpu_max_xlen(mcc); 3119 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 3120 3121 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3122 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3123 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 3124 } 3125 } 3126 *p = '\0'; 3127 if (!cpu->cfg.short_isa_string) { 3128 riscv_isa_string_ext(cpu, &isa_str, maxlen); 3129 } 3130 return isa_str; 3131 } 3132 3133 #ifndef CONFIG_USER_ONLY 3134 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3135 { 3136 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3137 char **extensions = g_new(char *, maxlen); 3138 3139 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3140 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3141 extensions[*count] = g_new(char, 2); 3142 snprintf(extensions[*count], 2, "%c", 3143 qemu_tolower(riscv_single_letter_exts[i])); 3144 (*count)++; 3145 } 3146 } 3147 3148 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3149 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3150 extensions[*count] = g_strdup(edata->name); 3151 (*count)++; 3152 } 3153 } 3154 3155 return extensions; 3156 } 3157 3158 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3159 { 3160 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3161 const size_t maxlen = sizeof("rv128i"); 3162 g_autofree char *isa_base = g_new(char, maxlen); 3163 g_autofree char *riscv_isa; 3164 char **isa_extensions; 3165 int count = 0; 3166 int xlen = riscv_cpu_max_xlen(mcc); 3167 3168 riscv_isa = riscv_isa_string(cpu); 3169 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3170 3171 snprintf(isa_base, maxlen, "rv%di", xlen); 3172 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3173 3174 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3175 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3176 isa_extensions, count); 3177 3178 for (int i = 0; i < count; i++) { 3179 g_free(isa_extensions[i]); 3180 } 3181 3182 g_free(isa_extensions); 3183 } 3184 #endif 3185 3186 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max_, initfn) \ 3187 { \ 3188 .name = (type_name), \ 3189 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3190 .instance_init = (initfn), \ 3191 .class_init = riscv_cpu_class_init, \ 3192 .class_data = &(const RISCVCPUDef) { \ 3193 .misa_mxl_max = (misa_mxl_max_), \ 3194 }, \ 3195 } 3196 3197 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max_, initfn) \ 3198 { \ 3199 .name = (type_name), \ 3200 .parent = TYPE_RISCV_VENDOR_CPU, \ 3201 .instance_init = (initfn), \ 3202 .class_init = riscv_cpu_class_init, \ 3203 .class_data = &(const RISCVCPUDef) { \ 3204 .misa_mxl_max = (misa_mxl_max_), \ 3205 }, \ 3206 } 3207 3208 #define DEFINE_BARE_CPU(type_name, misa_mxl_max_, initfn) \ 3209 { \ 3210 .name = (type_name), \ 3211 .parent = TYPE_RISCV_BARE_CPU, \ 3212 .instance_init = (initfn), \ 3213 .class_init = riscv_cpu_class_init, \ 3214 .class_data = &(const RISCVCPUDef) { \ 3215 .misa_mxl_max = (misa_mxl_max_), \ 3216 }, \ 3217 } 3218 3219 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max_, initfn) \ 3220 { \ 3221 .name = (type_name), \ 3222 .parent = TYPE_RISCV_BARE_CPU, \ 3223 .instance_init = (initfn), \ 3224 .class_init = riscv_cpu_class_init, \ 3225 .class_data = &(const RISCVCPUDef) { \ 3226 .misa_mxl_max = (misa_mxl_max_), \ 3227 }, \ 3228 } 3229 3230 static const TypeInfo riscv_cpu_type_infos[] = { 3231 { 3232 .name = TYPE_RISCV_CPU, 3233 .parent = TYPE_CPU, 3234 .instance_size = sizeof(RISCVCPU), 3235 .instance_align = __alignof(RISCVCPU), 3236 .instance_init = riscv_cpu_init, 3237 .instance_post_init = riscv_cpu_post_init, 3238 .abstract = true, 3239 .class_size = sizeof(RISCVCPUClass), 3240 .class_init = riscv_cpu_common_class_init, 3241 .class_base_init = riscv_cpu_class_base_init, 3242 }, 3243 { 3244 .name = TYPE_RISCV_DYNAMIC_CPU, 3245 .parent = TYPE_RISCV_CPU, 3246 .abstract = true, 3247 }, 3248 { 3249 .name = TYPE_RISCV_VENDOR_CPU, 3250 .parent = TYPE_RISCV_CPU, 3251 .abstract = true, 3252 }, 3253 { 3254 .name = TYPE_RISCV_BARE_CPU, 3255 .parent = TYPE_RISCV_CPU, 3256 .instance_init = riscv_bare_cpu_init, 3257 .abstract = true, 3258 }, 3259 #if defined(TARGET_RISCV32) 3260 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3261 #elif defined(TARGET_RISCV64) 3262 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3263 #endif 3264 3265 #if defined(TARGET_RISCV32) || \ 3266 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3267 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3268 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3269 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3270 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3271 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3272 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3273 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3274 #endif 3275 3276 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3277 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3278 #endif 3279 3280 #if defined(TARGET_RISCV64) 3281 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3282 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3283 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3284 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3285 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3286 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3287 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3288 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3289 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3290 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 3291 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3292 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 3293 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3294 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3295 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3296 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3297 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), 3298 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), 3299 #endif /* TARGET_RISCV64 */ 3300 }; 3301 3302 DEFINE_TYPES(riscv_cpu_type_infos) 3303