1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "system/device_tree.h" 36 #include "system/kvm.h" 37 #include "system/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 188 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 194 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 195 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 196 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 197 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 198 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 199 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 200 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 201 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 202 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 203 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 204 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 205 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 206 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 207 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 208 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 209 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 210 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 211 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 212 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 213 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 214 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 217 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 218 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 219 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), 220 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), 221 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 222 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 223 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 224 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 225 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 226 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 227 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 228 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 229 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 230 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 231 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 232 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 233 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 234 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 235 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 236 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 237 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 238 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 239 240 { }, 241 }; 242 243 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 244 { 245 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 246 247 return *ext_enabled; 248 } 249 250 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 251 { 252 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 253 254 *ext_enabled = en; 255 } 256 257 bool riscv_cpu_is_vendor(Object *cpu_obj) 258 { 259 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 260 } 261 262 const char * const riscv_int_regnames[] = { 263 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 264 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 265 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 266 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 267 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 268 }; 269 270 const char * const riscv_int_regnamesh[] = { 271 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 272 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 273 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 274 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 275 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 276 "x30h/t5h", "x31h/t6h" 277 }; 278 279 const char * const riscv_fpr_regnames[] = { 280 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 281 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 282 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 283 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 284 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 285 "f30/ft10", "f31/ft11" 286 }; 287 288 const char * const riscv_rvv_regnames[] = { 289 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 290 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 291 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 292 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 293 "v28", "v29", "v30", "v31" 294 }; 295 296 static const char * const riscv_excp_names[] = { 297 "misaligned_fetch", 298 "fault_fetch", 299 "illegal_instruction", 300 "breakpoint", 301 "misaligned_load", 302 "fault_load", 303 "misaligned_store", 304 "fault_store", 305 "user_ecall", 306 "supervisor_ecall", 307 "hypervisor_ecall", 308 "machine_ecall", 309 "exec_page_fault", 310 "load_page_fault", 311 "reserved", 312 "store_page_fault", 313 "double_trap", 314 "reserved", 315 "reserved", 316 "reserved", 317 "guest_exec_page_fault", 318 "guest_load_page_fault", 319 "reserved", 320 "guest_store_page_fault", 321 }; 322 323 static const char * const riscv_intr_names[] = { 324 "u_software", 325 "s_software", 326 "vs_software", 327 "m_software", 328 "u_timer", 329 "s_timer", 330 "vs_timer", 331 "m_timer", 332 "u_external", 333 "s_external", 334 "vs_external", 335 "m_external", 336 "reserved", 337 "reserved", 338 "reserved", 339 "reserved" 340 }; 341 342 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 343 { 344 if (async) { 345 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 346 riscv_intr_names[cause] : "(unknown)"; 347 } else { 348 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 349 riscv_excp_names[cause] : "(unknown)"; 350 } 351 } 352 353 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 354 { 355 env->misa_ext_mask = env->misa_ext = ext; 356 } 357 358 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 359 { 360 return 16 << mcc->misa_mxl_max; 361 } 362 363 #ifndef CONFIG_USER_ONLY 364 static uint8_t satp_mode_from_str(const char *satp_mode_str) 365 { 366 if (!strncmp(satp_mode_str, "mbare", 5)) { 367 return VM_1_10_MBARE; 368 } 369 370 if (!strncmp(satp_mode_str, "sv32", 4)) { 371 return VM_1_10_SV32; 372 } 373 374 if (!strncmp(satp_mode_str, "sv39", 4)) { 375 return VM_1_10_SV39; 376 } 377 378 if (!strncmp(satp_mode_str, "sv48", 4)) { 379 return VM_1_10_SV48; 380 } 381 382 if (!strncmp(satp_mode_str, "sv57", 4)) { 383 return VM_1_10_SV57; 384 } 385 386 if (!strncmp(satp_mode_str, "sv64", 4)) { 387 return VM_1_10_SV64; 388 } 389 390 g_assert_not_reached(); 391 } 392 393 uint8_t satp_mode_max_from_map(uint32_t map) 394 { 395 /* 396 * 'map = 0' will make us return (31 - 32), which C will 397 * happily overflow to UINT_MAX. There's no good result to 398 * return if 'map = 0' (e.g. returning 0 will be ambiguous 399 * with the result for 'map = 1'). 400 * 401 * Assert out if map = 0. Callers will have to deal with 402 * it outside of this function. 403 */ 404 g_assert(map > 0); 405 406 /* map here has at least one bit set, so no problem with clz */ 407 return 31 - __builtin_clz(map); 408 } 409 410 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 411 { 412 if (is_32_bit) { 413 switch (satp_mode) { 414 case VM_1_10_SV32: 415 return "sv32"; 416 case VM_1_10_MBARE: 417 return "none"; 418 } 419 } else { 420 switch (satp_mode) { 421 case VM_1_10_SV64: 422 return "sv64"; 423 case VM_1_10_SV57: 424 return "sv57"; 425 case VM_1_10_SV48: 426 return "sv48"; 427 case VM_1_10_SV39: 428 return "sv39"; 429 case VM_1_10_MBARE: 430 return "none"; 431 } 432 } 433 434 g_assert_not_reached(); 435 } 436 437 static void set_satp_mode_max_supported(RISCVCPU *cpu, 438 uint8_t satp_mode) 439 { 440 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 441 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 442 443 for (int i = 0; i <= satp_mode; ++i) { 444 if (valid_vm[i]) { 445 cpu->cfg.satp_mode.supported |= (1 << i); 446 } 447 } 448 } 449 450 /* Set the satp mode to the max supported */ 451 static void set_satp_mode_default_map(RISCVCPU *cpu) 452 { 453 /* 454 * Bare CPUs do not default to the max available. 455 * Users must set a valid satp_mode in the command 456 * line. 457 */ 458 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 459 warn_report("No satp mode set. Defaulting to 'bare'"); 460 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 461 return; 462 } 463 464 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 465 } 466 #endif 467 468 static void riscv_max_cpu_init(Object *obj) 469 { 470 RISCVCPU *cpu = RISCV_CPU(obj); 471 CPURISCVState *env = &cpu->env; 472 473 cpu->cfg.mmu = true; 474 cpu->cfg.pmp = true; 475 476 env->priv_ver = PRIV_VERSION_LATEST; 477 #ifndef CONFIG_USER_ONLY 478 set_satp_mode_max_supported(RISCV_CPU(obj), 479 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 480 VM_1_10_SV32 : VM_1_10_SV57); 481 #endif 482 } 483 484 #if defined(TARGET_RISCV64) 485 static void rv64_base_cpu_init(Object *obj) 486 { 487 RISCVCPU *cpu = RISCV_CPU(obj); 488 CPURISCVState *env = &cpu->env; 489 490 cpu->cfg.mmu = true; 491 cpu->cfg.pmp = true; 492 493 /* Set latest version of privileged specification */ 494 env->priv_ver = PRIV_VERSION_LATEST; 495 #ifndef CONFIG_USER_ONLY 496 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 497 #endif 498 } 499 500 static void rv64_sifive_u_cpu_init(Object *obj) 501 { 502 RISCVCPU *cpu = RISCV_CPU(obj); 503 CPURISCVState *env = &cpu->env; 504 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 505 env->priv_ver = PRIV_VERSION_1_10_0; 506 #ifndef CONFIG_USER_ONLY 507 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 508 #endif 509 510 /* inherited from parent obj via riscv_cpu_init() */ 511 cpu->cfg.ext_zifencei = true; 512 cpu->cfg.ext_zicsr = true; 513 cpu->cfg.mmu = true; 514 cpu->cfg.pmp = true; 515 } 516 517 static void rv64_sifive_e_cpu_init(Object *obj) 518 { 519 CPURISCVState *env = &RISCV_CPU(obj)->env; 520 RISCVCPU *cpu = RISCV_CPU(obj); 521 522 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 523 env->priv_ver = PRIV_VERSION_1_10_0; 524 #ifndef CONFIG_USER_ONLY 525 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 526 #endif 527 528 /* inherited from parent obj via riscv_cpu_init() */ 529 cpu->cfg.ext_zifencei = true; 530 cpu->cfg.ext_zicsr = true; 531 cpu->cfg.pmp = true; 532 } 533 534 static void rv64_thead_c906_cpu_init(Object *obj) 535 { 536 CPURISCVState *env = &RISCV_CPU(obj)->env; 537 RISCVCPU *cpu = RISCV_CPU(obj); 538 539 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 540 env->priv_ver = PRIV_VERSION_1_11_0; 541 542 cpu->cfg.ext_zfa = true; 543 cpu->cfg.ext_zfh = true; 544 cpu->cfg.mmu = true; 545 cpu->cfg.ext_xtheadba = true; 546 cpu->cfg.ext_xtheadbb = true; 547 cpu->cfg.ext_xtheadbs = true; 548 cpu->cfg.ext_xtheadcmo = true; 549 cpu->cfg.ext_xtheadcondmov = true; 550 cpu->cfg.ext_xtheadfmemidx = true; 551 cpu->cfg.ext_xtheadmac = true; 552 cpu->cfg.ext_xtheadmemidx = true; 553 cpu->cfg.ext_xtheadmempair = true; 554 cpu->cfg.ext_xtheadsync = true; 555 556 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 557 #ifndef CONFIG_USER_ONLY 558 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 559 th_register_custom_csrs(cpu); 560 #endif 561 562 /* inherited from parent obj via riscv_cpu_init() */ 563 cpu->cfg.pmp = true; 564 } 565 566 static void rv64_veyron_v1_cpu_init(Object *obj) 567 { 568 CPURISCVState *env = &RISCV_CPU(obj)->env; 569 RISCVCPU *cpu = RISCV_CPU(obj); 570 571 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 572 env->priv_ver = PRIV_VERSION_1_12_0; 573 574 /* Enable ISA extensions */ 575 cpu->cfg.mmu = true; 576 cpu->cfg.ext_zifencei = true; 577 cpu->cfg.ext_zicsr = true; 578 cpu->cfg.pmp = true; 579 cpu->cfg.ext_zicbom = true; 580 cpu->cfg.cbom_blocksize = 64; 581 cpu->cfg.cboz_blocksize = 64; 582 cpu->cfg.ext_zicboz = true; 583 cpu->cfg.ext_smaia = true; 584 cpu->cfg.ext_ssaia = true; 585 cpu->cfg.ext_sscofpmf = true; 586 cpu->cfg.ext_sstc = true; 587 cpu->cfg.ext_svinval = true; 588 cpu->cfg.ext_svnapot = true; 589 cpu->cfg.ext_svpbmt = true; 590 cpu->cfg.ext_smstateen = true; 591 cpu->cfg.ext_zba = true; 592 cpu->cfg.ext_zbb = true; 593 cpu->cfg.ext_zbc = true; 594 cpu->cfg.ext_zbs = true; 595 cpu->cfg.ext_XVentanaCondOps = true; 596 597 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 598 cpu->cfg.marchid = VEYRON_V1_MARCHID; 599 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 600 601 #ifndef CONFIG_USER_ONLY 602 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 603 #endif 604 } 605 606 /* Tenstorrent Ascalon */ 607 static void rv64_tt_ascalon_cpu_init(Object *obj) 608 { 609 CPURISCVState *env = &RISCV_CPU(obj)->env; 610 RISCVCPU *cpu = RISCV_CPU(obj); 611 612 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 613 env->priv_ver = PRIV_VERSION_1_13_0; 614 615 /* Enable ISA extensions */ 616 cpu->cfg.mmu = true; 617 cpu->cfg.vlenb = 256 >> 3; 618 cpu->cfg.elen = 64; 619 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 620 cpu->cfg.rvv_ma_all_1s = true; 621 cpu->cfg.rvv_ta_all_1s = true; 622 cpu->cfg.misa_w = true; 623 cpu->cfg.pmp = true; 624 cpu->cfg.cbom_blocksize = 64; 625 cpu->cfg.cbop_blocksize = 64; 626 cpu->cfg.cboz_blocksize = 64; 627 cpu->cfg.ext_zic64b = true; 628 cpu->cfg.ext_zicbom = true; 629 cpu->cfg.ext_zicbop = true; 630 cpu->cfg.ext_zicboz = true; 631 cpu->cfg.ext_zicntr = true; 632 cpu->cfg.ext_zicond = true; 633 cpu->cfg.ext_zicsr = true; 634 cpu->cfg.ext_zifencei = true; 635 cpu->cfg.ext_zihintntl = true; 636 cpu->cfg.ext_zihintpause = true; 637 cpu->cfg.ext_zihpm = true; 638 cpu->cfg.ext_zimop = true; 639 cpu->cfg.ext_zawrs = true; 640 cpu->cfg.ext_zfa = true; 641 cpu->cfg.ext_zfbfmin = true; 642 cpu->cfg.ext_zfh = true; 643 cpu->cfg.ext_zfhmin = true; 644 cpu->cfg.ext_zcb = true; 645 cpu->cfg.ext_zcmop = true; 646 cpu->cfg.ext_zba = true; 647 cpu->cfg.ext_zbb = true; 648 cpu->cfg.ext_zbs = true; 649 cpu->cfg.ext_zkt = true; 650 cpu->cfg.ext_zvbb = true; 651 cpu->cfg.ext_zvbc = true; 652 cpu->cfg.ext_zvfbfmin = true; 653 cpu->cfg.ext_zvfbfwma = true; 654 cpu->cfg.ext_zvfh = true; 655 cpu->cfg.ext_zvfhmin = true; 656 cpu->cfg.ext_zvkng = true; 657 cpu->cfg.ext_smaia = true; 658 cpu->cfg.ext_smstateen = true; 659 cpu->cfg.ext_ssaia = true; 660 cpu->cfg.ext_sscofpmf = true; 661 cpu->cfg.ext_sstc = true; 662 cpu->cfg.ext_svade = true; 663 cpu->cfg.ext_svinval = true; 664 cpu->cfg.ext_svnapot = true; 665 cpu->cfg.ext_svpbmt = true; 666 667 #ifndef CONFIG_USER_ONLY 668 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 669 #endif 670 } 671 672 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 673 { 674 CPURISCVState *env = &RISCV_CPU(obj)->env; 675 RISCVCPU *cpu = RISCV_CPU(obj); 676 677 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 678 env->priv_ver = PRIV_VERSION_1_12_0; 679 680 /* Enable ISA extensions */ 681 cpu->cfg.ext_zbc = true; 682 cpu->cfg.ext_zbkb = true; 683 cpu->cfg.ext_zbkc = true; 684 cpu->cfg.ext_zbkx = true; 685 cpu->cfg.ext_zknd = true; 686 cpu->cfg.ext_zkne = true; 687 cpu->cfg.ext_zknh = true; 688 cpu->cfg.ext_zksed = true; 689 cpu->cfg.ext_zksh = true; 690 cpu->cfg.ext_svinval = true; 691 692 cpu->cfg.mmu = true; 693 cpu->cfg.pmp = true; 694 695 #ifndef CONFIG_USER_ONLY 696 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 697 #endif 698 } 699 700 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 701 static void rv128_base_cpu_init(Object *obj) 702 { 703 RISCVCPU *cpu = RISCV_CPU(obj); 704 CPURISCVState *env = &cpu->env; 705 706 cpu->cfg.mmu = true; 707 cpu->cfg.pmp = true; 708 709 /* Set latest version of privileged specification */ 710 env->priv_ver = PRIV_VERSION_LATEST; 711 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 712 } 713 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 714 715 static void rv64i_bare_cpu_init(Object *obj) 716 { 717 CPURISCVState *env = &RISCV_CPU(obj)->env; 718 riscv_cpu_set_misa_ext(env, RVI); 719 } 720 721 static void rv64e_bare_cpu_init(Object *obj) 722 { 723 CPURISCVState *env = &RISCV_CPU(obj)->env; 724 riscv_cpu_set_misa_ext(env, RVE); 725 } 726 727 #endif /* !TARGET_RISCV64 */ 728 729 #if defined(TARGET_RISCV32) || \ 730 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 731 732 static void rv32_base_cpu_init(Object *obj) 733 { 734 RISCVCPU *cpu = RISCV_CPU(obj); 735 CPURISCVState *env = &cpu->env; 736 737 cpu->cfg.mmu = true; 738 cpu->cfg.pmp = true; 739 740 /* Set latest version of privileged specification */ 741 env->priv_ver = PRIV_VERSION_LATEST; 742 #ifndef CONFIG_USER_ONLY 743 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 744 #endif 745 } 746 747 static void rv32_sifive_u_cpu_init(Object *obj) 748 { 749 RISCVCPU *cpu = RISCV_CPU(obj); 750 CPURISCVState *env = &cpu->env; 751 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 752 env->priv_ver = PRIV_VERSION_1_10_0; 753 #ifndef CONFIG_USER_ONLY 754 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 755 #endif 756 757 /* inherited from parent obj via riscv_cpu_init() */ 758 cpu->cfg.ext_zifencei = true; 759 cpu->cfg.ext_zicsr = true; 760 cpu->cfg.mmu = true; 761 cpu->cfg.pmp = true; 762 } 763 764 static void rv32_sifive_e_cpu_init(Object *obj) 765 { 766 CPURISCVState *env = &RISCV_CPU(obj)->env; 767 RISCVCPU *cpu = RISCV_CPU(obj); 768 769 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 770 env->priv_ver = PRIV_VERSION_1_10_0; 771 #ifndef CONFIG_USER_ONLY 772 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 773 #endif 774 775 /* inherited from parent obj via riscv_cpu_init() */ 776 cpu->cfg.ext_zifencei = true; 777 cpu->cfg.ext_zicsr = true; 778 cpu->cfg.pmp = true; 779 } 780 781 static void rv32_ibex_cpu_init(Object *obj) 782 { 783 CPURISCVState *env = &RISCV_CPU(obj)->env; 784 RISCVCPU *cpu = RISCV_CPU(obj); 785 786 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 787 env->priv_ver = PRIV_VERSION_1_12_0; 788 #ifndef CONFIG_USER_ONLY 789 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 790 #endif 791 /* inherited from parent obj via riscv_cpu_init() */ 792 cpu->cfg.ext_zifencei = true; 793 cpu->cfg.ext_zicsr = true; 794 cpu->cfg.pmp = true; 795 cpu->cfg.ext_smepmp = true; 796 797 cpu->cfg.ext_zba = true; 798 cpu->cfg.ext_zbb = true; 799 cpu->cfg.ext_zbc = true; 800 cpu->cfg.ext_zbs = true; 801 } 802 803 static void rv32_imafcu_nommu_cpu_init(Object *obj) 804 { 805 CPURISCVState *env = &RISCV_CPU(obj)->env; 806 RISCVCPU *cpu = RISCV_CPU(obj); 807 808 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 809 env->priv_ver = PRIV_VERSION_1_10_0; 810 #ifndef CONFIG_USER_ONLY 811 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 812 #endif 813 814 /* inherited from parent obj via riscv_cpu_init() */ 815 cpu->cfg.ext_zifencei = true; 816 cpu->cfg.ext_zicsr = true; 817 cpu->cfg.pmp = true; 818 } 819 820 static void rv32i_bare_cpu_init(Object *obj) 821 { 822 CPURISCVState *env = &RISCV_CPU(obj)->env; 823 riscv_cpu_set_misa_ext(env, RVI); 824 } 825 826 static void rv32e_bare_cpu_init(Object *obj) 827 { 828 CPURISCVState *env = &RISCV_CPU(obj)->env; 829 riscv_cpu_set_misa_ext(env, RVE); 830 } 831 #endif 832 833 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 834 { 835 ObjectClass *oc; 836 char *typename; 837 char **cpuname; 838 839 cpuname = g_strsplit(cpu_model, ",", 1); 840 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 841 oc = object_class_by_name(typename); 842 g_strfreev(cpuname); 843 g_free(typename); 844 845 return oc; 846 } 847 848 char *riscv_cpu_get_name(RISCVCPU *cpu) 849 { 850 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 851 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 852 853 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 854 855 return cpu_model_from_type(typename); 856 } 857 858 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 859 { 860 RISCVCPU *cpu = RISCV_CPU(cs); 861 CPURISCVState *env = &cpu->env; 862 int i, j; 863 uint8_t *p; 864 865 #if !defined(CONFIG_USER_ONLY) 866 if (riscv_has_ext(env, RVH)) { 867 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 868 } 869 #endif 870 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 871 #ifndef CONFIG_USER_ONLY 872 { 873 static const int dump_csrs[] = { 874 CSR_MHARTID, 875 CSR_MSTATUS, 876 CSR_MSTATUSH, 877 /* 878 * CSR_SSTATUS is intentionally omitted here as its value 879 * can be figured out by looking at CSR_MSTATUS 880 */ 881 CSR_HSTATUS, 882 CSR_VSSTATUS, 883 CSR_MIP, 884 CSR_MIE, 885 CSR_MIDELEG, 886 CSR_HIDELEG, 887 CSR_MEDELEG, 888 CSR_HEDELEG, 889 CSR_MTVEC, 890 CSR_STVEC, 891 CSR_VSTVEC, 892 CSR_MEPC, 893 CSR_SEPC, 894 CSR_VSEPC, 895 CSR_MCAUSE, 896 CSR_SCAUSE, 897 CSR_VSCAUSE, 898 CSR_MTVAL, 899 CSR_STVAL, 900 CSR_HTVAL, 901 CSR_MTVAL2, 902 CSR_MSCRATCH, 903 CSR_SSCRATCH, 904 CSR_SATP, 905 }; 906 907 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 908 int csrno = dump_csrs[i]; 909 target_ulong val = 0; 910 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 911 912 /* 913 * Rely on the smode, hmode, etc, predicates within csr.c 914 * to do the filtering of the registers that are present. 915 */ 916 if (res == RISCV_EXCP_NONE) { 917 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 918 csr_ops[csrno].name, val); 919 } 920 } 921 } 922 #endif 923 924 for (i = 0; i < 32; i++) { 925 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 926 riscv_int_regnames[i], env->gpr[i]); 927 if ((i & 3) == 3) { 928 qemu_fprintf(f, "\n"); 929 } 930 } 931 if (flags & CPU_DUMP_FPU) { 932 target_ulong val = 0; 933 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 934 if (res == RISCV_EXCP_NONE) { 935 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 936 csr_ops[CSR_FCSR].name, val); 937 } 938 for (i = 0; i < 32; i++) { 939 qemu_fprintf(f, " %-8s %016" PRIx64, 940 riscv_fpr_regnames[i], env->fpr[i]); 941 if ((i & 3) == 3) { 942 qemu_fprintf(f, "\n"); 943 } 944 } 945 } 946 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 947 static const int dump_rvv_csrs[] = { 948 CSR_VSTART, 949 CSR_VXSAT, 950 CSR_VXRM, 951 CSR_VCSR, 952 CSR_VL, 953 CSR_VTYPE, 954 CSR_VLENB, 955 }; 956 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 957 int csrno = dump_rvv_csrs[i]; 958 target_ulong val = 0; 959 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 960 961 /* 962 * Rely on the smode, hmode, etc, predicates within csr.c 963 * to do the filtering of the registers that are present. 964 */ 965 if (res == RISCV_EXCP_NONE) { 966 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 967 csr_ops[csrno].name, val); 968 } 969 } 970 uint16_t vlenb = cpu->cfg.vlenb; 971 972 for (i = 0; i < 32; i++) { 973 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 974 p = (uint8_t *)env->vreg; 975 for (j = vlenb - 1 ; j >= 0; j--) { 976 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 977 } 978 qemu_fprintf(f, "\n"); 979 } 980 } 981 } 982 983 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 984 { 985 RISCVCPU *cpu = RISCV_CPU(cs); 986 CPURISCVState *env = &cpu->env; 987 988 if (env->xl == MXL_RV32) { 989 env->pc = (int32_t)value; 990 } else { 991 env->pc = value; 992 } 993 } 994 995 static vaddr riscv_cpu_get_pc(CPUState *cs) 996 { 997 RISCVCPU *cpu = RISCV_CPU(cs); 998 CPURISCVState *env = &cpu->env; 999 1000 /* Match cpu_get_tb_cpu_state. */ 1001 if (env->xl == MXL_RV32) { 1002 return env->pc & UINT32_MAX; 1003 } 1004 return env->pc; 1005 } 1006 1007 #ifndef CONFIG_USER_ONLY 1008 bool riscv_cpu_has_work(CPUState *cs) 1009 { 1010 RISCVCPU *cpu = RISCV_CPU(cs); 1011 CPURISCVState *env = &cpu->env; 1012 /* 1013 * Definition of the WFI instruction requires it to ignore the privilege 1014 * mode and delegation registers, but respect individual enables 1015 */ 1016 return riscv_cpu_all_pending(env) != 0 || 1017 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1018 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1019 } 1020 #endif /* !CONFIG_USER_ONLY */ 1021 1022 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1023 { 1024 #ifndef CONFIG_USER_ONLY 1025 uint8_t iprio; 1026 int i, irq, rdzero; 1027 #endif 1028 CPUState *cs = CPU(obj); 1029 RISCVCPU *cpu = RISCV_CPU(cs); 1030 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1031 CPURISCVState *env = &cpu->env; 1032 1033 if (mcc->parent_phases.hold) { 1034 mcc->parent_phases.hold(obj, type); 1035 } 1036 #ifndef CONFIG_USER_ONLY 1037 env->misa_mxl = mcc->misa_mxl_max; 1038 env->priv = PRV_M; 1039 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1040 if (env->misa_mxl > MXL_RV32) { 1041 /* 1042 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1043 * and we must ensure that the value after init is valid for read. 1044 */ 1045 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1046 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1047 if (riscv_has_ext(env, RVH)) { 1048 env->vsstatus = set_field(env->vsstatus, 1049 MSTATUS64_SXL, env->misa_mxl); 1050 env->vsstatus = set_field(env->vsstatus, 1051 MSTATUS64_UXL, env->misa_mxl); 1052 env->mstatus_hs = set_field(env->mstatus_hs, 1053 MSTATUS64_SXL, env->misa_mxl); 1054 env->mstatus_hs = set_field(env->mstatus_hs, 1055 MSTATUS64_UXL, env->misa_mxl); 1056 } 1057 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1058 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1059 } 1060 } 1061 env->mcause = 0; 1062 env->miclaim = MIP_SGEIP; 1063 env->pc = env->resetvec; 1064 env->bins = 0; 1065 env->two_stage_lookup = false; 1066 1067 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1068 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1069 MENVCFG_ADUE : 0); 1070 env->henvcfg = 0; 1071 1072 /* Initialized default priorities of local interrupts. */ 1073 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1074 iprio = riscv_cpu_default_priority(i); 1075 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1076 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1077 env->hviprio[i] = 0; 1078 } 1079 i = 0; 1080 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1081 if (!rdzero) { 1082 env->hviprio[irq] = env->miprio[irq]; 1083 } 1084 i++; 1085 } 1086 1087 /* 1088 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1089 * extension is enabled. 1090 */ 1091 if (riscv_has_ext(env, RVH)) { 1092 env->mideleg |= HS_MODE_INTERRUPTS; 1093 } 1094 1095 /* 1096 * Clear mseccfg and unlock all the PMP entries upon reset. 1097 * This is allowed as per the priv and smepmp specifications 1098 * and is needed to clear stale entries across reboots. 1099 */ 1100 if (riscv_cpu_cfg(env)->ext_smepmp) { 1101 env->mseccfg = 0; 1102 } 1103 1104 pmp_unlock_entries(env); 1105 #else 1106 env->priv = PRV_U; 1107 env->senvcfg = 0; 1108 env->menvcfg = 0; 1109 #endif 1110 1111 /* on reset elp is clear */ 1112 env->elp = false; 1113 /* on reset ssp is set to 0 */ 1114 env->ssp = 0; 1115 1116 env->xl = riscv_cpu_mxl(env); 1117 cs->exception_index = RISCV_EXCP_NONE; 1118 env->load_res = -1; 1119 set_default_nan_mode(1, &env->fp_status); 1120 /* Default NaN value: sign bit clear, frac msb set */ 1121 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1122 env->vill = true; 1123 1124 #ifndef CONFIG_USER_ONLY 1125 if (cpu->cfg.debug) { 1126 riscv_trigger_reset_hold(env); 1127 } 1128 1129 if (cpu->cfg.ext_smrnmi) { 1130 env->rnmip = 0; 1131 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1132 } 1133 1134 if (kvm_enabled()) { 1135 kvm_riscv_reset_vcpu(cpu); 1136 } 1137 #endif 1138 } 1139 1140 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1141 { 1142 RISCVCPU *cpu = RISCV_CPU(s); 1143 CPURISCVState *env = &cpu->env; 1144 info->target_info = &cpu->cfg; 1145 1146 /* 1147 * A couple of bits in MSTATUS set the endianness: 1148 * - MSTATUS_UBE (User-mode), 1149 * - MSTATUS_SBE (Supervisor-mode), 1150 * - MSTATUS_MBE (Machine-mode) 1151 * but we don't implement that yet. 1152 */ 1153 info->endian = BFD_ENDIAN_LITTLE; 1154 1155 switch (env->xl) { 1156 case MXL_RV32: 1157 info->print_insn = print_insn_riscv32; 1158 break; 1159 case MXL_RV64: 1160 info->print_insn = print_insn_riscv64; 1161 break; 1162 case MXL_RV128: 1163 info->print_insn = print_insn_riscv128; 1164 break; 1165 default: 1166 g_assert_not_reached(); 1167 } 1168 } 1169 1170 #ifndef CONFIG_USER_ONLY 1171 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1172 { 1173 bool rv32 = riscv_cpu_is_32bit(cpu); 1174 uint8_t satp_mode_map_max, satp_mode_supported_max; 1175 1176 /* The CPU wants the OS to decide which satp mode to use */ 1177 if (cpu->cfg.satp_mode.supported == 0) { 1178 return; 1179 } 1180 1181 satp_mode_supported_max = 1182 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1183 1184 if (cpu->cfg.satp_mode.map == 0) { 1185 if (cpu->cfg.satp_mode.init == 0) { 1186 /* If unset by the user, we fallback to the default satp mode. */ 1187 set_satp_mode_default_map(cpu); 1188 } else { 1189 /* 1190 * Find the lowest level that was disabled and then enable the 1191 * first valid level below which can be found in 1192 * valid_vm_1_10_32/64. 1193 */ 1194 for (int i = 1; i < 16; ++i) { 1195 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1196 (cpu->cfg.satp_mode.supported & (1 << i))) { 1197 for (int j = i - 1; j >= 0; --j) { 1198 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1199 cpu->cfg.satp_mode.map |= (1 << j); 1200 break; 1201 } 1202 } 1203 break; 1204 } 1205 } 1206 } 1207 } 1208 1209 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1210 1211 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1212 if (satp_mode_map_max > satp_mode_supported_max) { 1213 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1214 satp_mode_str(satp_mode_map_max, rv32), 1215 satp_mode_str(satp_mode_supported_max, rv32)); 1216 return; 1217 } 1218 1219 /* 1220 * Make sure the user did not ask for an invalid configuration as per 1221 * the specification. 1222 */ 1223 if (!rv32) { 1224 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1225 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1226 (cpu->cfg.satp_mode.init & (1 << i)) && 1227 (cpu->cfg.satp_mode.supported & (1 << i))) { 1228 error_setg(errp, "cannot disable %s satp mode if %s " 1229 "is enabled", satp_mode_str(i, false), 1230 satp_mode_str(satp_mode_map_max, false)); 1231 return; 1232 } 1233 } 1234 } 1235 1236 /* Finally expand the map so that all valid modes are set */ 1237 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1238 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1239 cpu->cfg.satp_mode.map |= (1 << i); 1240 } 1241 } 1242 } 1243 #endif 1244 1245 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1246 { 1247 Error *local_err = NULL; 1248 1249 #ifndef CONFIG_USER_ONLY 1250 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1251 if (local_err != NULL) { 1252 error_propagate(errp, local_err); 1253 return; 1254 } 1255 #endif 1256 1257 if (tcg_enabled()) { 1258 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1259 if (local_err != NULL) { 1260 error_propagate(errp, local_err); 1261 return; 1262 } 1263 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1264 } else if (kvm_enabled()) { 1265 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1266 if (local_err != NULL) { 1267 error_propagate(errp, local_err); 1268 return; 1269 } 1270 } 1271 } 1272 1273 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1274 { 1275 CPUState *cs = CPU(dev); 1276 RISCVCPU *cpu = RISCV_CPU(dev); 1277 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1278 Error *local_err = NULL; 1279 1280 cpu_exec_realizefn(cs, &local_err); 1281 if (local_err != NULL) { 1282 error_propagate(errp, local_err); 1283 return; 1284 } 1285 1286 riscv_cpu_finalize_features(cpu, &local_err); 1287 if (local_err != NULL) { 1288 error_propagate(errp, local_err); 1289 return; 1290 } 1291 1292 riscv_cpu_register_gdb_regs_for_features(cs); 1293 1294 #ifndef CONFIG_USER_ONLY 1295 if (cpu->cfg.debug) { 1296 riscv_trigger_realize(&cpu->env); 1297 } 1298 #endif 1299 1300 qemu_init_vcpu(cs); 1301 cpu_reset(cs); 1302 1303 mcc->parent_realize(dev, errp); 1304 } 1305 1306 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1307 { 1308 if (tcg_enabled()) { 1309 return riscv_cpu_tcg_compatible(cpu); 1310 } 1311 1312 return true; 1313 } 1314 1315 #ifndef CONFIG_USER_ONLY 1316 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1317 void *opaque, Error **errp) 1318 { 1319 RISCVSATPMap *satp_map = opaque; 1320 uint8_t satp = satp_mode_from_str(name); 1321 bool value; 1322 1323 value = satp_map->map & (1 << satp); 1324 1325 visit_type_bool(v, name, &value, errp); 1326 } 1327 1328 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1329 void *opaque, Error **errp) 1330 { 1331 RISCVSATPMap *satp_map = opaque; 1332 uint8_t satp = satp_mode_from_str(name); 1333 bool value; 1334 1335 if (!visit_type_bool(v, name, &value, errp)) { 1336 return; 1337 } 1338 1339 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1340 satp_map->init |= 1 << satp; 1341 } 1342 1343 void riscv_add_satp_mode_properties(Object *obj) 1344 { 1345 RISCVCPU *cpu = RISCV_CPU(obj); 1346 1347 if (cpu->env.misa_mxl == MXL_RV32) { 1348 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1349 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1350 } else { 1351 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1352 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1353 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1354 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1355 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1356 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1357 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1358 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1359 } 1360 } 1361 1362 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1363 { 1364 RISCVCPU *cpu = RISCV_CPU(opaque); 1365 CPURISCVState *env = &cpu->env; 1366 1367 if (irq < IRQ_LOCAL_MAX) { 1368 switch (irq) { 1369 case IRQ_U_SOFT: 1370 case IRQ_S_SOFT: 1371 case IRQ_VS_SOFT: 1372 case IRQ_M_SOFT: 1373 case IRQ_U_TIMER: 1374 case IRQ_S_TIMER: 1375 case IRQ_VS_TIMER: 1376 case IRQ_M_TIMER: 1377 case IRQ_U_EXT: 1378 case IRQ_VS_EXT: 1379 case IRQ_M_EXT: 1380 if (kvm_enabled()) { 1381 kvm_riscv_set_irq(cpu, irq, level); 1382 } else { 1383 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1384 } 1385 break; 1386 case IRQ_S_EXT: 1387 if (kvm_enabled()) { 1388 kvm_riscv_set_irq(cpu, irq, level); 1389 } else { 1390 env->external_seip = level; 1391 riscv_cpu_update_mip(env, 1 << irq, 1392 BOOL_TO_MASK(level | env->software_seip)); 1393 } 1394 break; 1395 default: 1396 g_assert_not_reached(); 1397 } 1398 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1399 /* Require H-extension for handling guest local interrupts */ 1400 if (!riscv_has_ext(env, RVH)) { 1401 g_assert_not_reached(); 1402 } 1403 1404 /* Compute bit position in HGEIP CSR */ 1405 irq = irq - IRQ_LOCAL_MAX + 1; 1406 if (env->geilen < irq) { 1407 g_assert_not_reached(); 1408 } 1409 1410 /* Update HGEIP CSR */ 1411 env->hgeip &= ~((target_ulong)1 << irq); 1412 if (level) { 1413 env->hgeip |= (target_ulong)1 << irq; 1414 } 1415 1416 /* Update mip.SGEIP bit */ 1417 riscv_cpu_update_mip(env, MIP_SGEIP, 1418 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1419 } else { 1420 g_assert_not_reached(); 1421 } 1422 } 1423 1424 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1425 { 1426 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1427 } 1428 #endif /* CONFIG_USER_ONLY */ 1429 1430 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1431 { 1432 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1433 } 1434 1435 static void riscv_cpu_post_init(Object *obj) 1436 { 1437 accel_cpu_instance_init(CPU(obj)); 1438 } 1439 1440 static void riscv_cpu_init(Object *obj) 1441 { 1442 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1443 RISCVCPU *cpu = RISCV_CPU(obj); 1444 CPURISCVState *env = &cpu->env; 1445 1446 env->misa_mxl = mcc->misa_mxl_max; 1447 1448 #ifndef CONFIG_USER_ONLY 1449 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1450 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1451 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1452 "riscv.cpu.rnmi", RNMI_MAX); 1453 #endif /* CONFIG_USER_ONLY */ 1454 1455 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1456 1457 /* 1458 * The timer and performance counters extensions were supported 1459 * in QEMU before they were added as discrete extensions in the 1460 * ISA. To keep compatibility we'll always default them to 'true' 1461 * for all CPUs. Each accelerator will decide what to do when 1462 * users disable them. 1463 */ 1464 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1465 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1466 1467 /* Default values for non-bool cpu properties */ 1468 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1469 cpu->cfg.vlenb = 128 >> 3; 1470 cpu->cfg.elen = 64; 1471 cpu->cfg.cbom_blocksize = 64; 1472 cpu->cfg.cbop_blocksize = 64; 1473 cpu->cfg.cboz_blocksize = 64; 1474 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1475 } 1476 1477 static void riscv_bare_cpu_init(Object *obj) 1478 { 1479 RISCVCPU *cpu = RISCV_CPU(obj); 1480 1481 /* 1482 * Bare CPUs do not inherit the timer and performance 1483 * counters from the parent class (see riscv_cpu_init() 1484 * for info on why the parent enables them). 1485 * 1486 * Users have to explicitly enable these counters for 1487 * bare CPUs. 1488 */ 1489 cpu->cfg.ext_zicntr = false; 1490 cpu->cfg.ext_zihpm = false; 1491 1492 /* Set to QEMU's first supported priv version */ 1493 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1494 1495 /* 1496 * Support all available satp_mode settings. The default 1497 * value will be set to MBARE if the user doesn't set 1498 * satp_mode manually (see set_satp_mode_default()). 1499 */ 1500 #ifndef CONFIG_USER_ONLY 1501 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1502 #endif 1503 } 1504 1505 typedef struct misa_ext_info { 1506 const char *name; 1507 const char *description; 1508 } MISAExtInfo; 1509 1510 #define MISA_INFO_IDX(_bit) \ 1511 __builtin_ctz(_bit) 1512 1513 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1514 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1515 1516 static const MISAExtInfo misa_ext_info_arr[] = { 1517 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1518 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1519 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1520 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1521 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1522 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1523 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1524 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1525 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1526 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1527 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1528 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1529 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1530 }; 1531 1532 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1533 { 1534 CPUClass *cc = CPU_CLASS(mcc); 1535 1536 /* Validate that MISA_MXL is set properly. */ 1537 switch (mcc->misa_mxl_max) { 1538 #ifdef TARGET_RISCV64 1539 case MXL_RV64: 1540 case MXL_RV128: 1541 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1542 break; 1543 #endif 1544 case MXL_RV32: 1545 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1546 break; 1547 default: 1548 g_assert_not_reached(); 1549 } 1550 } 1551 1552 static int riscv_validate_misa_info_idx(uint32_t bit) 1553 { 1554 int idx; 1555 1556 /* 1557 * Our lowest valid input (RVA) is 1 and 1558 * __builtin_ctz() is UB with zero. 1559 */ 1560 g_assert(bit != 0); 1561 idx = MISA_INFO_IDX(bit); 1562 1563 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1564 return idx; 1565 } 1566 1567 const char *riscv_get_misa_ext_name(uint32_t bit) 1568 { 1569 int idx = riscv_validate_misa_info_idx(bit); 1570 const char *val = misa_ext_info_arr[idx].name; 1571 1572 g_assert(val != NULL); 1573 return val; 1574 } 1575 1576 const char *riscv_get_misa_ext_description(uint32_t bit) 1577 { 1578 int idx = riscv_validate_misa_info_idx(bit); 1579 const char *val = misa_ext_info_arr[idx].description; 1580 1581 g_assert(val != NULL); 1582 return val; 1583 } 1584 1585 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1586 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1587 .enabled = _defval} 1588 1589 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1590 /* Defaults for standard extensions */ 1591 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1592 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1593 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1594 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1595 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1596 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1597 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), 1598 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), 1599 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1600 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1601 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1602 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1603 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1604 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1605 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1606 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1607 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1608 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1609 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1610 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1611 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1612 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1613 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1614 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1615 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1616 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1617 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1618 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1619 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1620 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1621 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1622 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1623 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1624 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1625 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1626 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1627 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1628 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1629 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1630 1631 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1632 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1633 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1634 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1635 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1636 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1637 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1638 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1639 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1640 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1641 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1642 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1643 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1644 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1645 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1646 1647 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1648 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1649 1650 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1651 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1652 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1653 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1654 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1655 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1656 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1657 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1658 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1659 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1660 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1661 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1662 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1663 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1664 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1665 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1666 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1667 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1668 1669 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1670 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1671 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1672 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1673 1674 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1675 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1676 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1677 1678 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1679 1680 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1681 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1682 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1683 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1684 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1685 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1686 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1687 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1688 1689 /* Vector cryptography extensions */ 1690 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1691 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1692 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1693 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1694 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1695 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1696 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1697 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1698 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1699 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1700 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1701 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1702 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1703 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1704 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1705 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1706 1707 { }, 1708 }; 1709 1710 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1711 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1712 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1713 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1714 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1715 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1716 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1717 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1718 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1719 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1720 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1721 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1722 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1723 1724 { }, 1725 }; 1726 1727 /* These are experimental so mark with 'x-' */ 1728 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1729 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1730 1731 { }, 1732 }; 1733 1734 /* 1735 * 'Named features' is the name we give to extensions that we 1736 * don't want to expose to users. They are either immutable 1737 * (always enabled/disable) or they'll vary depending on 1738 * the resulting CPU state. They have riscv,isa strings 1739 * and priv_ver like regular extensions. 1740 */ 1741 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1742 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1743 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1744 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1745 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), 1746 1747 { }, 1748 }; 1749 1750 /* Deprecated entries marked for future removal */ 1751 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1752 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1753 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1754 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1755 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1756 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1757 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1758 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1759 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1760 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1761 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1762 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1763 1764 { }, 1765 }; 1766 1767 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1768 Error **errp) 1769 { 1770 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1771 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1772 cpuname, propname); 1773 } 1774 1775 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1776 void *opaque, Error **errp) 1777 { 1778 RISCVCPU *cpu = RISCV_CPU(obj); 1779 uint8_t pmu_num, curr_pmu_num; 1780 uint32_t pmu_mask; 1781 1782 visit_type_uint8(v, name, &pmu_num, errp); 1783 1784 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1785 1786 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1787 cpu_set_prop_err(cpu, name, errp); 1788 error_append_hint(errp, "Current '%s' val: %u\n", 1789 name, curr_pmu_num); 1790 return; 1791 } 1792 1793 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1794 error_setg(errp, "Number of counters exceeds maximum available"); 1795 return; 1796 } 1797 1798 if (pmu_num == 0) { 1799 pmu_mask = 0; 1800 } else { 1801 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1802 } 1803 1804 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1805 cpu->cfg.pmu_mask = pmu_mask; 1806 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1807 } 1808 1809 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1810 void *opaque, Error **errp) 1811 { 1812 RISCVCPU *cpu = RISCV_CPU(obj); 1813 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1814 1815 visit_type_uint8(v, name, &pmu_num, errp); 1816 } 1817 1818 static const PropertyInfo prop_pmu_num = { 1819 .type = "int8", 1820 .description = "pmu-num", 1821 .get = prop_pmu_num_get, 1822 .set = prop_pmu_num_set, 1823 }; 1824 1825 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1826 void *opaque, Error **errp) 1827 { 1828 RISCVCPU *cpu = RISCV_CPU(obj); 1829 uint32_t value; 1830 uint8_t pmu_num; 1831 1832 visit_type_uint32(v, name, &value, errp); 1833 1834 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1835 cpu_set_prop_err(cpu, name, errp); 1836 error_append_hint(errp, "Current '%s' val: %x\n", 1837 name, cpu->cfg.pmu_mask); 1838 return; 1839 } 1840 1841 pmu_num = ctpop32(value); 1842 1843 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1844 error_setg(errp, "Number of counters exceeds maximum available"); 1845 return; 1846 } 1847 1848 cpu_option_add_user_setting(name, value); 1849 cpu->cfg.pmu_mask = value; 1850 } 1851 1852 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1853 void *opaque, Error **errp) 1854 { 1855 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1856 1857 visit_type_uint8(v, name, &pmu_mask, errp); 1858 } 1859 1860 static const PropertyInfo prop_pmu_mask = { 1861 .type = "int8", 1862 .description = "pmu-mask", 1863 .get = prop_pmu_mask_get, 1864 .set = prop_pmu_mask_set, 1865 }; 1866 1867 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1868 void *opaque, Error **errp) 1869 { 1870 RISCVCPU *cpu = RISCV_CPU(obj); 1871 bool value; 1872 1873 visit_type_bool(v, name, &value, errp); 1874 1875 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1876 cpu_set_prop_err(cpu, "mmu", errp); 1877 return; 1878 } 1879 1880 cpu_option_add_user_setting(name, value); 1881 cpu->cfg.mmu = value; 1882 } 1883 1884 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1885 void *opaque, Error **errp) 1886 { 1887 bool value = RISCV_CPU(obj)->cfg.mmu; 1888 1889 visit_type_bool(v, name, &value, errp); 1890 } 1891 1892 static const PropertyInfo prop_mmu = { 1893 .type = "bool", 1894 .description = "mmu", 1895 .get = prop_mmu_get, 1896 .set = prop_mmu_set, 1897 }; 1898 1899 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1900 void *opaque, Error **errp) 1901 { 1902 RISCVCPU *cpu = RISCV_CPU(obj); 1903 bool value; 1904 1905 visit_type_bool(v, name, &value, errp); 1906 1907 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1908 cpu_set_prop_err(cpu, name, errp); 1909 return; 1910 } 1911 1912 cpu_option_add_user_setting(name, value); 1913 cpu->cfg.pmp = value; 1914 } 1915 1916 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1917 void *opaque, Error **errp) 1918 { 1919 bool value = RISCV_CPU(obj)->cfg.pmp; 1920 1921 visit_type_bool(v, name, &value, errp); 1922 } 1923 1924 static const PropertyInfo prop_pmp = { 1925 .type = "bool", 1926 .description = "pmp", 1927 .get = prop_pmp_get, 1928 .set = prop_pmp_set, 1929 }; 1930 1931 static int priv_spec_from_str(const char *priv_spec_str) 1932 { 1933 int priv_version = -1; 1934 1935 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1936 priv_version = PRIV_VERSION_1_13_0; 1937 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1938 priv_version = PRIV_VERSION_1_12_0; 1939 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1940 priv_version = PRIV_VERSION_1_11_0; 1941 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1942 priv_version = PRIV_VERSION_1_10_0; 1943 } 1944 1945 return priv_version; 1946 } 1947 1948 const char *priv_spec_to_str(int priv_version) 1949 { 1950 switch (priv_version) { 1951 case PRIV_VERSION_1_10_0: 1952 return PRIV_VER_1_10_0_STR; 1953 case PRIV_VERSION_1_11_0: 1954 return PRIV_VER_1_11_0_STR; 1955 case PRIV_VERSION_1_12_0: 1956 return PRIV_VER_1_12_0_STR; 1957 case PRIV_VERSION_1_13_0: 1958 return PRIV_VER_1_13_0_STR; 1959 default: 1960 return NULL; 1961 } 1962 } 1963 1964 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1965 void *opaque, Error **errp) 1966 { 1967 RISCVCPU *cpu = RISCV_CPU(obj); 1968 g_autofree char *value = NULL; 1969 int priv_version = -1; 1970 1971 visit_type_str(v, name, &value, errp); 1972 1973 priv_version = priv_spec_from_str(value); 1974 if (priv_version < 0) { 1975 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1976 return; 1977 } 1978 1979 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1980 cpu_set_prop_err(cpu, name, errp); 1981 error_append_hint(errp, "Current '%s' val: %s\n", name, 1982 object_property_get_str(obj, name, NULL)); 1983 return; 1984 } 1985 1986 cpu_option_add_user_setting(name, priv_version); 1987 cpu->env.priv_ver = priv_version; 1988 } 1989 1990 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1991 void *opaque, Error **errp) 1992 { 1993 RISCVCPU *cpu = RISCV_CPU(obj); 1994 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1995 1996 visit_type_str(v, name, (char **)&value, errp); 1997 } 1998 1999 static const PropertyInfo prop_priv_spec = { 2000 .type = "str", 2001 .description = "priv_spec", 2002 /* FIXME enum? */ 2003 .get = prop_priv_spec_get, 2004 .set = prop_priv_spec_set, 2005 }; 2006 2007 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 2008 void *opaque, Error **errp) 2009 { 2010 RISCVCPU *cpu = RISCV_CPU(obj); 2011 g_autofree char *value = NULL; 2012 2013 visit_type_str(v, name, &value, errp); 2014 2015 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2016 error_setg(errp, "Unsupported vector spec version '%s'", value); 2017 return; 2018 } 2019 2020 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2021 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2022 } 2023 2024 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2025 void *opaque, Error **errp) 2026 { 2027 const char *value = VEXT_VER_1_00_0_STR; 2028 2029 visit_type_str(v, name, (char **)&value, errp); 2030 } 2031 2032 static const PropertyInfo prop_vext_spec = { 2033 .type = "str", 2034 .description = "vext_spec", 2035 /* FIXME enum? */ 2036 .get = prop_vext_spec_get, 2037 .set = prop_vext_spec_set, 2038 }; 2039 2040 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2041 void *opaque, Error **errp) 2042 { 2043 RISCVCPU *cpu = RISCV_CPU(obj); 2044 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2045 uint16_t value; 2046 2047 if (!visit_type_uint16(v, name, &value, errp)) { 2048 return; 2049 } 2050 2051 if (!is_power_of_2(value)) { 2052 error_setg(errp, "Vector extension VLEN must be power of 2"); 2053 return; 2054 } 2055 2056 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2057 cpu_set_prop_err(cpu, name, errp); 2058 error_append_hint(errp, "Current '%s' val: %u\n", 2059 name, cpu_vlen); 2060 return; 2061 } 2062 2063 cpu_option_add_user_setting(name, value); 2064 cpu->cfg.vlenb = value >> 3; 2065 } 2066 2067 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2068 void *opaque, Error **errp) 2069 { 2070 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2071 2072 visit_type_uint16(v, name, &value, errp); 2073 } 2074 2075 static const PropertyInfo prop_vlen = { 2076 .type = "uint16", 2077 .description = "vlen", 2078 .get = prop_vlen_get, 2079 .set = prop_vlen_set, 2080 }; 2081 2082 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2083 void *opaque, Error **errp) 2084 { 2085 RISCVCPU *cpu = RISCV_CPU(obj); 2086 uint16_t value; 2087 2088 if (!visit_type_uint16(v, name, &value, errp)) { 2089 return; 2090 } 2091 2092 if (!is_power_of_2(value)) { 2093 error_setg(errp, "Vector extension ELEN must be power of 2"); 2094 return; 2095 } 2096 2097 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2098 cpu_set_prop_err(cpu, name, errp); 2099 error_append_hint(errp, "Current '%s' val: %u\n", 2100 name, cpu->cfg.elen); 2101 return; 2102 } 2103 2104 cpu_option_add_user_setting(name, value); 2105 cpu->cfg.elen = value; 2106 } 2107 2108 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2109 void *opaque, Error **errp) 2110 { 2111 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2112 2113 visit_type_uint16(v, name, &value, errp); 2114 } 2115 2116 static const PropertyInfo prop_elen = { 2117 .type = "uint16", 2118 .description = "elen", 2119 .get = prop_elen_get, 2120 .set = prop_elen_set, 2121 }; 2122 2123 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2124 void *opaque, Error **errp) 2125 { 2126 RISCVCPU *cpu = RISCV_CPU(obj); 2127 uint16_t value; 2128 2129 if (!visit_type_uint16(v, name, &value, errp)) { 2130 return; 2131 } 2132 2133 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2134 cpu_set_prop_err(cpu, name, errp); 2135 error_append_hint(errp, "Current '%s' val: %u\n", 2136 name, cpu->cfg.cbom_blocksize); 2137 return; 2138 } 2139 2140 cpu_option_add_user_setting(name, value); 2141 cpu->cfg.cbom_blocksize = value; 2142 } 2143 2144 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2145 void *opaque, Error **errp) 2146 { 2147 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2148 2149 visit_type_uint16(v, name, &value, errp); 2150 } 2151 2152 static const PropertyInfo prop_cbom_blksize = { 2153 .type = "uint16", 2154 .description = "cbom_blocksize", 2155 .get = prop_cbom_blksize_get, 2156 .set = prop_cbom_blksize_set, 2157 }; 2158 2159 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2160 void *opaque, Error **errp) 2161 { 2162 RISCVCPU *cpu = RISCV_CPU(obj); 2163 uint16_t value; 2164 2165 if (!visit_type_uint16(v, name, &value, errp)) { 2166 return; 2167 } 2168 2169 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2170 cpu_set_prop_err(cpu, name, errp); 2171 error_append_hint(errp, "Current '%s' val: %u\n", 2172 name, cpu->cfg.cbop_blocksize); 2173 return; 2174 } 2175 2176 cpu_option_add_user_setting(name, value); 2177 cpu->cfg.cbop_blocksize = value; 2178 } 2179 2180 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2181 void *opaque, Error **errp) 2182 { 2183 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2184 2185 visit_type_uint16(v, name, &value, errp); 2186 } 2187 2188 static const PropertyInfo prop_cbop_blksize = { 2189 .type = "uint16", 2190 .description = "cbop_blocksize", 2191 .get = prop_cbop_blksize_get, 2192 .set = prop_cbop_blksize_set, 2193 }; 2194 2195 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2196 void *opaque, Error **errp) 2197 { 2198 RISCVCPU *cpu = RISCV_CPU(obj); 2199 uint16_t value; 2200 2201 if (!visit_type_uint16(v, name, &value, errp)) { 2202 return; 2203 } 2204 2205 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2206 cpu_set_prop_err(cpu, name, errp); 2207 error_append_hint(errp, "Current '%s' val: %u\n", 2208 name, cpu->cfg.cboz_blocksize); 2209 return; 2210 } 2211 2212 cpu_option_add_user_setting(name, value); 2213 cpu->cfg.cboz_blocksize = value; 2214 } 2215 2216 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2217 void *opaque, Error **errp) 2218 { 2219 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2220 2221 visit_type_uint16(v, name, &value, errp); 2222 } 2223 2224 static const PropertyInfo prop_cboz_blksize = { 2225 .type = "uint16", 2226 .description = "cboz_blocksize", 2227 .get = prop_cboz_blksize_get, 2228 .set = prop_cboz_blksize_set, 2229 }; 2230 2231 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2232 void *opaque, Error **errp) 2233 { 2234 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2235 RISCVCPU *cpu = RISCV_CPU(obj); 2236 uint32_t prev_val = cpu->cfg.mvendorid; 2237 uint32_t value; 2238 2239 if (!visit_type_uint32(v, name, &value, errp)) { 2240 return; 2241 } 2242 2243 if (!dynamic_cpu && prev_val != value) { 2244 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2245 object_get_typename(obj), prev_val); 2246 return; 2247 } 2248 2249 cpu->cfg.mvendorid = value; 2250 } 2251 2252 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2253 void *opaque, Error **errp) 2254 { 2255 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2256 2257 visit_type_uint32(v, name, &value, errp); 2258 } 2259 2260 static const PropertyInfo prop_mvendorid = { 2261 .type = "uint32", 2262 .description = "mvendorid", 2263 .get = prop_mvendorid_get, 2264 .set = prop_mvendorid_set, 2265 }; 2266 2267 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2268 void *opaque, Error **errp) 2269 { 2270 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2271 RISCVCPU *cpu = RISCV_CPU(obj); 2272 uint64_t prev_val = cpu->cfg.mimpid; 2273 uint64_t value; 2274 2275 if (!visit_type_uint64(v, name, &value, errp)) { 2276 return; 2277 } 2278 2279 if (!dynamic_cpu && prev_val != value) { 2280 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2281 object_get_typename(obj), prev_val); 2282 return; 2283 } 2284 2285 cpu->cfg.mimpid = value; 2286 } 2287 2288 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2289 void *opaque, Error **errp) 2290 { 2291 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2292 2293 visit_type_uint64(v, name, &value, errp); 2294 } 2295 2296 static const PropertyInfo prop_mimpid = { 2297 .type = "uint64", 2298 .description = "mimpid", 2299 .get = prop_mimpid_get, 2300 .set = prop_mimpid_set, 2301 }; 2302 2303 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2304 void *opaque, Error **errp) 2305 { 2306 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2307 RISCVCPU *cpu = RISCV_CPU(obj); 2308 uint64_t prev_val = cpu->cfg.marchid; 2309 uint64_t value, invalid_val; 2310 uint32_t mxlen = 0; 2311 2312 if (!visit_type_uint64(v, name, &value, errp)) { 2313 return; 2314 } 2315 2316 if (!dynamic_cpu && prev_val != value) { 2317 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2318 object_get_typename(obj), prev_val); 2319 return; 2320 } 2321 2322 switch (riscv_cpu_mxl(&cpu->env)) { 2323 case MXL_RV32: 2324 mxlen = 32; 2325 break; 2326 case MXL_RV64: 2327 case MXL_RV128: 2328 mxlen = 64; 2329 break; 2330 default: 2331 g_assert_not_reached(); 2332 } 2333 2334 invalid_val = 1LL << (mxlen - 1); 2335 2336 if (value == invalid_val) { 2337 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2338 "and the remaining bits zero", mxlen); 2339 return; 2340 } 2341 2342 cpu->cfg.marchid = value; 2343 } 2344 2345 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2346 void *opaque, Error **errp) 2347 { 2348 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2349 2350 visit_type_uint64(v, name, &value, errp); 2351 } 2352 2353 static const PropertyInfo prop_marchid = { 2354 .type = "uint64", 2355 .description = "marchid", 2356 .get = prop_marchid_get, 2357 .set = prop_marchid_set, 2358 }; 2359 2360 /* 2361 * RVA22U64 defines some 'named features' that are cache 2362 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2363 * and Zicclsm. They are always implemented in TCG and 2364 * doesn't need to be manually enabled by the profile. 2365 */ 2366 static RISCVCPUProfile RVA22U64 = { 2367 .u_parent = NULL, 2368 .s_parent = NULL, 2369 .name = "rva22u64", 2370 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2371 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2372 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2373 .ext_offsets = { 2374 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2375 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2376 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2377 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2378 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2379 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2380 2381 /* mandatory named features for this profile */ 2382 CPU_CFG_OFFSET(ext_zic64b), 2383 2384 RISCV_PROFILE_EXT_LIST_END 2385 } 2386 }; 2387 2388 /* 2389 * As with RVA22U64, RVA22S64 also defines 'named features'. 2390 * 2391 * Cache related features that we consider enabled since we don't 2392 * implement cache: Ssccptr 2393 * 2394 * Other named features that we already implement: Sstvecd, Sstvala, 2395 * Sscounterenw 2396 * 2397 * The remaining features/extensions comes from RVA22U64. 2398 */ 2399 static RISCVCPUProfile RVA22S64 = { 2400 .u_parent = &RVA22U64, 2401 .s_parent = NULL, 2402 .name = "rva22s64", 2403 .misa_ext = RVS, 2404 .priv_spec = PRIV_VERSION_1_12_0, 2405 .satp_mode = VM_1_10_SV39, 2406 .ext_offsets = { 2407 /* rva22s64 exts */ 2408 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2409 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2410 2411 RISCV_PROFILE_EXT_LIST_END 2412 } 2413 }; 2414 2415 /* 2416 * All mandatory extensions from RVA22U64 are present 2417 * in RVA23U64 so set RVA22 as a parent. We need to 2418 * declare just the newly added mandatory extensions. 2419 */ 2420 static RISCVCPUProfile RVA23U64 = { 2421 .u_parent = &RVA22U64, 2422 .s_parent = NULL, 2423 .name = "rva23u64", 2424 .misa_ext = RVV, 2425 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2426 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2427 .ext_offsets = { 2428 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2429 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2430 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2431 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2432 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2433 CPU_CFG_OFFSET(ext_supm), 2434 2435 RISCV_PROFILE_EXT_LIST_END 2436 } 2437 }; 2438 2439 /* 2440 * As with RVA23U64, RVA23S64 also defines 'named features'. 2441 * 2442 * Cache related features that we consider enabled since we don't 2443 * implement cache: Ssccptr 2444 * 2445 * Other named features that we already implement: Sstvecd, Sstvala, 2446 * Sscounterenw, Ssu64xl 2447 * 2448 * The remaining features/extensions comes from RVA23S64. 2449 */ 2450 static RISCVCPUProfile RVA23S64 = { 2451 .u_parent = &RVA23U64, 2452 .s_parent = &RVA22S64, 2453 .name = "rva23s64", 2454 .misa_ext = RVS, 2455 .priv_spec = PRIV_VERSION_1_13_0, 2456 .satp_mode = VM_1_10_SV39, 2457 .ext_offsets = { 2458 /* New in RVA23S64 */ 2459 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2460 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2461 2462 /* Named features: Sha */ 2463 CPU_CFG_OFFSET(ext_sha), 2464 2465 RISCV_PROFILE_EXT_LIST_END 2466 } 2467 }; 2468 2469 RISCVCPUProfile *riscv_profiles[] = { 2470 &RVA22U64, 2471 &RVA22S64, 2472 &RVA23U64, 2473 &RVA23S64, 2474 NULL, 2475 }; 2476 2477 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2478 .is_misa = true, 2479 .ext = RVA, 2480 .implied_multi_exts = { 2481 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2482 2483 RISCV_IMPLIED_EXTS_RULE_END 2484 }, 2485 }; 2486 2487 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2488 .is_misa = true, 2489 .ext = RVD, 2490 .implied_misa_exts = RVF, 2491 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2492 }; 2493 2494 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2495 .is_misa = true, 2496 .ext = RVF, 2497 .implied_multi_exts = { 2498 CPU_CFG_OFFSET(ext_zicsr), 2499 2500 RISCV_IMPLIED_EXTS_RULE_END 2501 }, 2502 }; 2503 2504 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2505 .is_misa = true, 2506 .ext = RVM, 2507 .implied_multi_exts = { 2508 CPU_CFG_OFFSET(ext_zmmul), 2509 2510 RISCV_IMPLIED_EXTS_RULE_END 2511 }, 2512 }; 2513 2514 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2515 .is_misa = true, 2516 .ext = RVV, 2517 .implied_multi_exts = { 2518 CPU_CFG_OFFSET(ext_zve64d), 2519 2520 RISCV_IMPLIED_EXTS_RULE_END 2521 }, 2522 }; 2523 2524 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2525 .ext = CPU_CFG_OFFSET(ext_zcb), 2526 .implied_multi_exts = { 2527 CPU_CFG_OFFSET(ext_zca), 2528 2529 RISCV_IMPLIED_EXTS_RULE_END 2530 }, 2531 }; 2532 2533 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2534 .ext = CPU_CFG_OFFSET(ext_zcd), 2535 .implied_misa_exts = RVD, 2536 .implied_multi_exts = { 2537 CPU_CFG_OFFSET(ext_zca), 2538 2539 RISCV_IMPLIED_EXTS_RULE_END 2540 }, 2541 }; 2542 2543 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2544 .ext = CPU_CFG_OFFSET(ext_zce), 2545 .implied_multi_exts = { 2546 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2547 CPU_CFG_OFFSET(ext_zcmt), 2548 2549 RISCV_IMPLIED_EXTS_RULE_END 2550 }, 2551 }; 2552 2553 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2554 .ext = CPU_CFG_OFFSET(ext_zcf), 2555 .implied_misa_exts = RVF, 2556 .implied_multi_exts = { 2557 CPU_CFG_OFFSET(ext_zca), 2558 2559 RISCV_IMPLIED_EXTS_RULE_END 2560 }, 2561 }; 2562 2563 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2564 .ext = CPU_CFG_OFFSET(ext_zcmp), 2565 .implied_multi_exts = { 2566 CPU_CFG_OFFSET(ext_zca), 2567 2568 RISCV_IMPLIED_EXTS_RULE_END 2569 }, 2570 }; 2571 2572 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2573 .ext = CPU_CFG_OFFSET(ext_zcmt), 2574 .implied_multi_exts = { 2575 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2576 2577 RISCV_IMPLIED_EXTS_RULE_END 2578 }, 2579 }; 2580 2581 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2582 .ext = CPU_CFG_OFFSET(ext_zdinx), 2583 .implied_multi_exts = { 2584 CPU_CFG_OFFSET(ext_zfinx), 2585 2586 RISCV_IMPLIED_EXTS_RULE_END 2587 }, 2588 }; 2589 2590 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2591 .ext = CPU_CFG_OFFSET(ext_zfa), 2592 .implied_misa_exts = RVF, 2593 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2594 }; 2595 2596 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2597 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2598 .implied_misa_exts = RVF, 2599 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2600 }; 2601 2602 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2603 .ext = CPU_CFG_OFFSET(ext_zfh), 2604 .implied_multi_exts = { 2605 CPU_CFG_OFFSET(ext_zfhmin), 2606 2607 RISCV_IMPLIED_EXTS_RULE_END 2608 }, 2609 }; 2610 2611 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2612 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2613 .implied_misa_exts = RVF, 2614 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2615 }; 2616 2617 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2618 .ext = CPU_CFG_OFFSET(ext_zfinx), 2619 .implied_multi_exts = { 2620 CPU_CFG_OFFSET(ext_zicsr), 2621 2622 RISCV_IMPLIED_EXTS_RULE_END 2623 }, 2624 }; 2625 2626 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2627 .ext = CPU_CFG_OFFSET(ext_zhinx), 2628 .implied_multi_exts = { 2629 CPU_CFG_OFFSET(ext_zhinxmin), 2630 2631 RISCV_IMPLIED_EXTS_RULE_END 2632 }, 2633 }; 2634 2635 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2636 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2637 .implied_multi_exts = { 2638 CPU_CFG_OFFSET(ext_zfinx), 2639 2640 RISCV_IMPLIED_EXTS_RULE_END 2641 }, 2642 }; 2643 2644 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2645 .ext = CPU_CFG_OFFSET(ext_zicntr), 2646 .implied_multi_exts = { 2647 CPU_CFG_OFFSET(ext_zicsr), 2648 2649 RISCV_IMPLIED_EXTS_RULE_END 2650 }, 2651 }; 2652 2653 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2654 .ext = CPU_CFG_OFFSET(ext_zihpm), 2655 .implied_multi_exts = { 2656 CPU_CFG_OFFSET(ext_zicsr), 2657 2658 RISCV_IMPLIED_EXTS_RULE_END 2659 }, 2660 }; 2661 2662 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2663 .ext = CPU_CFG_OFFSET(ext_zk), 2664 .implied_multi_exts = { 2665 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2666 CPU_CFG_OFFSET(ext_zkt), 2667 2668 RISCV_IMPLIED_EXTS_RULE_END 2669 }, 2670 }; 2671 2672 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2673 .ext = CPU_CFG_OFFSET(ext_zkn), 2674 .implied_multi_exts = { 2675 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2676 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2677 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2678 2679 RISCV_IMPLIED_EXTS_RULE_END 2680 }, 2681 }; 2682 2683 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2684 .ext = CPU_CFG_OFFSET(ext_zks), 2685 .implied_multi_exts = { 2686 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2687 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2688 CPU_CFG_OFFSET(ext_zksh), 2689 2690 RISCV_IMPLIED_EXTS_RULE_END 2691 }, 2692 }; 2693 2694 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2695 .ext = CPU_CFG_OFFSET(ext_zvbb), 2696 .implied_multi_exts = { 2697 CPU_CFG_OFFSET(ext_zvkb), 2698 2699 RISCV_IMPLIED_EXTS_RULE_END 2700 }, 2701 }; 2702 2703 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2704 .ext = CPU_CFG_OFFSET(ext_zve32f), 2705 .implied_misa_exts = RVF, 2706 .implied_multi_exts = { 2707 CPU_CFG_OFFSET(ext_zve32x), 2708 2709 RISCV_IMPLIED_EXTS_RULE_END 2710 }, 2711 }; 2712 2713 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2714 .ext = CPU_CFG_OFFSET(ext_zve32x), 2715 .implied_multi_exts = { 2716 CPU_CFG_OFFSET(ext_zicsr), 2717 2718 RISCV_IMPLIED_EXTS_RULE_END 2719 }, 2720 }; 2721 2722 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2723 .ext = CPU_CFG_OFFSET(ext_zve64d), 2724 .implied_misa_exts = RVD, 2725 .implied_multi_exts = { 2726 CPU_CFG_OFFSET(ext_zve64f), 2727 2728 RISCV_IMPLIED_EXTS_RULE_END 2729 }, 2730 }; 2731 2732 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2733 .ext = CPU_CFG_OFFSET(ext_zve64f), 2734 .implied_misa_exts = RVF, 2735 .implied_multi_exts = { 2736 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2737 2738 RISCV_IMPLIED_EXTS_RULE_END 2739 }, 2740 }; 2741 2742 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2743 .ext = CPU_CFG_OFFSET(ext_zve64x), 2744 .implied_multi_exts = { 2745 CPU_CFG_OFFSET(ext_zve32x), 2746 2747 RISCV_IMPLIED_EXTS_RULE_END 2748 }, 2749 }; 2750 2751 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2752 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2753 .implied_multi_exts = { 2754 CPU_CFG_OFFSET(ext_zve32f), 2755 2756 RISCV_IMPLIED_EXTS_RULE_END 2757 }, 2758 }; 2759 2760 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2761 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2762 .implied_multi_exts = { 2763 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2764 2765 RISCV_IMPLIED_EXTS_RULE_END 2766 }, 2767 }; 2768 2769 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2770 .ext = CPU_CFG_OFFSET(ext_zvfh), 2771 .implied_multi_exts = { 2772 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2773 2774 RISCV_IMPLIED_EXTS_RULE_END 2775 }, 2776 }; 2777 2778 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2779 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2780 .implied_multi_exts = { 2781 CPU_CFG_OFFSET(ext_zve32f), 2782 2783 RISCV_IMPLIED_EXTS_RULE_END 2784 }, 2785 }; 2786 2787 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2788 .ext = CPU_CFG_OFFSET(ext_zvkn), 2789 .implied_multi_exts = { 2790 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2791 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2792 2793 RISCV_IMPLIED_EXTS_RULE_END 2794 }, 2795 }; 2796 2797 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2798 .ext = CPU_CFG_OFFSET(ext_zvknc), 2799 .implied_multi_exts = { 2800 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2801 2802 RISCV_IMPLIED_EXTS_RULE_END 2803 }, 2804 }; 2805 2806 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2807 .ext = CPU_CFG_OFFSET(ext_zvkng), 2808 .implied_multi_exts = { 2809 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2810 2811 RISCV_IMPLIED_EXTS_RULE_END 2812 }, 2813 }; 2814 2815 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2816 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2817 .implied_multi_exts = { 2818 CPU_CFG_OFFSET(ext_zve64x), 2819 2820 RISCV_IMPLIED_EXTS_RULE_END 2821 }, 2822 }; 2823 2824 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2825 .ext = CPU_CFG_OFFSET(ext_zvks), 2826 .implied_multi_exts = { 2827 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2828 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2829 2830 RISCV_IMPLIED_EXTS_RULE_END 2831 }, 2832 }; 2833 2834 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2835 .ext = CPU_CFG_OFFSET(ext_zvksc), 2836 .implied_multi_exts = { 2837 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2838 2839 RISCV_IMPLIED_EXTS_RULE_END 2840 }, 2841 }; 2842 2843 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2844 .ext = CPU_CFG_OFFSET(ext_zvksg), 2845 .implied_multi_exts = { 2846 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2847 2848 RISCV_IMPLIED_EXTS_RULE_END 2849 }, 2850 }; 2851 2852 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2853 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2854 .implied_multi_exts = { 2855 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2856 CPU_CFG_OFFSET(ext_smcdeleg), 2857 2858 RISCV_IMPLIED_EXTS_RULE_END 2859 }, 2860 }; 2861 2862 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2863 .ext = CPU_CFG_OFFSET(ext_supm), 2864 .implied_multi_exts = { 2865 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2866 2867 RISCV_IMPLIED_EXTS_RULE_END 2868 }, 2869 }; 2870 2871 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2872 .ext = CPU_CFG_OFFSET(ext_sspm), 2873 .implied_multi_exts = { 2874 CPU_CFG_OFFSET(ext_smnpm), 2875 2876 RISCV_IMPLIED_EXTS_RULE_END 2877 }, 2878 }; 2879 2880 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { 2881 .ext = CPU_CFG_OFFSET(ext_smctr), 2882 .implied_misa_exts = RVS, 2883 .implied_multi_exts = { 2884 CPU_CFG_OFFSET(ext_sscsrind), 2885 2886 RISCV_IMPLIED_EXTS_RULE_END 2887 }, 2888 }; 2889 2890 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { 2891 .ext = CPU_CFG_OFFSET(ext_ssctr), 2892 .implied_misa_exts = RVS, 2893 .implied_multi_exts = { 2894 CPU_CFG_OFFSET(ext_sscsrind), 2895 2896 RISCV_IMPLIED_EXTS_RULE_END 2897 }, 2898 }; 2899 2900 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2901 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2902 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2903 }; 2904 2905 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2906 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2907 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2908 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2909 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2910 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2911 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2912 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2913 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2914 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2915 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2916 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2917 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2918 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, 2919 NULL 2920 }; 2921 2922 static const Property riscv_cpu_properties[] = { 2923 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2924 2925 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2926 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2927 2928 {.name = "mmu", .info = &prop_mmu}, 2929 {.name = "pmp", .info = &prop_pmp}, 2930 2931 {.name = "priv_spec", .info = &prop_priv_spec}, 2932 {.name = "vext_spec", .info = &prop_vext_spec}, 2933 2934 {.name = "vlen", .info = &prop_vlen}, 2935 {.name = "elen", .info = &prop_elen}, 2936 2937 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2938 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2939 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2940 2941 {.name = "mvendorid", .info = &prop_mvendorid}, 2942 {.name = "mimpid", .info = &prop_mimpid}, 2943 {.name = "marchid", .info = &prop_marchid}, 2944 2945 #ifndef CONFIG_USER_ONLY 2946 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2947 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2948 DEFAULT_RNMI_IRQVEC), 2949 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2950 DEFAULT_RNMI_EXCPVEC), 2951 #endif 2952 2953 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2954 2955 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2956 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2957 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2958 2959 /* 2960 * write_misa() is marked as experimental for now so mark 2961 * it with -x and default to 'false'. 2962 */ 2963 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2964 }; 2965 2966 #if defined(TARGET_RISCV64) 2967 static void rva22u64_profile_cpu_init(Object *obj) 2968 { 2969 rv64i_bare_cpu_init(obj); 2970 2971 RVA22U64.enabled = true; 2972 } 2973 2974 static void rva22s64_profile_cpu_init(Object *obj) 2975 { 2976 rv64i_bare_cpu_init(obj); 2977 2978 RVA22S64.enabled = true; 2979 } 2980 2981 static void rva23u64_profile_cpu_init(Object *obj) 2982 { 2983 rv64i_bare_cpu_init(obj); 2984 2985 RVA23U64.enabled = true; 2986 } 2987 2988 static void rva23s64_profile_cpu_init(Object *obj) 2989 { 2990 rv64i_bare_cpu_init(obj); 2991 2992 RVA23S64.enabled = true; 2993 } 2994 #endif 2995 2996 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2997 { 2998 RISCVCPU *cpu = RISCV_CPU(cs); 2999 CPURISCVState *env = &cpu->env; 3000 3001 switch (riscv_cpu_mxl(env)) { 3002 case MXL_RV32: 3003 return "riscv:rv32"; 3004 case MXL_RV64: 3005 case MXL_RV128: 3006 return "riscv:rv64"; 3007 default: 3008 g_assert_not_reached(); 3009 } 3010 } 3011 3012 #ifndef CONFIG_USER_ONLY 3013 static int64_t riscv_get_arch_id(CPUState *cs) 3014 { 3015 RISCVCPU *cpu = RISCV_CPU(cs); 3016 3017 return cpu->env.mhartid; 3018 } 3019 3020 #include "hw/core/sysemu-cpu-ops.h" 3021 3022 static const struct SysemuCPUOps riscv_sysemu_ops = { 3023 .has_work = riscv_cpu_has_work, 3024 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 3025 .write_elf64_note = riscv_cpu_write_elf64_note, 3026 .write_elf32_note = riscv_cpu_write_elf32_note, 3027 .legacy_vmsd = &vmstate_riscv_cpu, 3028 }; 3029 #endif 3030 3031 static void riscv_cpu_common_class_init(ObjectClass *c, const void *data) 3032 { 3033 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3034 CPUClass *cc = CPU_CLASS(c); 3035 DeviceClass *dc = DEVICE_CLASS(c); 3036 ResettableClass *rc = RESETTABLE_CLASS(c); 3037 3038 device_class_set_parent_realize(dc, riscv_cpu_realize, 3039 &mcc->parent_realize); 3040 3041 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 3042 &mcc->parent_phases); 3043 3044 cc->class_by_name = riscv_cpu_class_by_name; 3045 cc->dump_state = riscv_cpu_dump_state; 3046 cc->set_pc = riscv_cpu_set_pc; 3047 cc->get_pc = riscv_cpu_get_pc; 3048 cc->gdb_read_register = riscv_cpu_gdb_read_register; 3049 cc->gdb_write_register = riscv_cpu_gdb_write_register; 3050 cc->gdb_stop_before_watchpoint = true; 3051 cc->disas_set_info = riscv_cpu_disas_set_info; 3052 #ifndef CONFIG_USER_ONLY 3053 cc->sysemu_ops = &riscv_sysemu_ops; 3054 cc->get_arch_id = riscv_get_arch_id; 3055 #endif 3056 cc->gdb_arch_name = riscv_gdb_arch_name; 3057 #ifdef CONFIG_TCG 3058 cc->tcg_ops = &riscv_tcg_ops; 3059 #endif /* CONFIG_TCG */ 3060 3061 device_class_set_props(dc, riscv_cpu_properties); 3062 } 3063 3064 static void riscv_cpu_class_init(ObjectClass *c, const void *data) 3065 { 3066 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3067 3068 mcc->misa_mxl_max = (RISCVMXL)GPOINTER_TO_UINT(data); 3069 riscv_cpu_validate_misa_mxl(mcc); 3070 } 3071 3072 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 3073 int max_str_len) 3074 { 3075 const RISCVIsaExtData *edata; 3076 char *old = *isa_str; 3077 char *new = *isa_str; 3078 3079 for (edata = isa_edata_arr; edata && edata->name; edata++) { 3080 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3081 new = g_strconcat(old, "_", edata->name, NULL); 3082 g_free(old); 3083 old = new; 3084 } 3085 } 3086 3087 *isa_str = new; 3088 } 3089 3090 char *riscv_isa_string(RISCVCPU *cpu) 3091 { 3092 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3093 int i; 3094 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 3095 char *isa_str = g_new(char, maxlen); 3096 int xlen = riscv_cpu_max_xlen(mcc); 3097 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 3098 3099 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3100 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3101 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 3102 } 3103 } 3104 *p = '\0'; 3105 if (!cpu->cfg.short_isa_string) { 3106 riscv_isa_string_ext(cpu, &isa_str, maxlen); 3107 } 3108 return isa_str; 3109 } 3110 3111 #ifndef CONFIG_USER_ONLY 3112 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3113 { 3114 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3115 char **extensions = g_new(char *, maxlen); 3116 3117 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3118 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3119 extensions[*count] = g_new(char, 2); 3120 snprintf(extensions[*count], 2, "%c", 3121 qemu_tolower(riscv_single_letter_exts[i])); 3122 (*count)++; 3123 } 3124 } 3125 3126 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3127 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3128 extensions[*count] = g_strdup(edata->name); 3129 (*count)++; 3130 } 3131 } 3132 3133 return extensions; 3134 } 3135 3136 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3137 { 3138 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3139 const size_t maxlen = sizeof("rv128i"); 3140 g_autofree char *isa_base = g_new(char, maxlen); 3141 g_autofree char *riscv_isa; 3142 char **isa_extensions; 3143 int count = 0; 3144 int xlen = riscv_cpu_max_xlen(mcc); 3145 3146 riscv_isa = riscv_isa_string(cpu); 3147 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3148 3149 snprintf(isa_base, maxlen, "rv%di", xlen); 3150 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3151 3152 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3153 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3154 isa_extensions, count); 3155 3156 for (int i = 0; i < count; i++) { 3157 g_free(isa_extensions[i]); 3158 } 3159 3160 g_free(isa_extensions); 3161 } 3162 #endif 3163 3164 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3165 { \ 3166 .name = (type_name), \ 3167 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3168 .instance_init = (initfn), \ 3169 .class_init = riscv_cpu_class_init, \ 3170 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3171 } 3172 3173 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3174 { \ 3175 .name = (type_name), \ 3176 .parent = TYPE_RISCV_VENDOR_CPU, \ 3177 .instance_init = (initfn), \ 3178 .class_init = riscv_cpu_class_init, \ 3179 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3180 } 3181 3182 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3183 { \ 3184 .name = (type_name), \ 3185 .parent = TYPE_RISCV_BARE_CPU, \ 3186 .instance_init = (initfn), \ 3187 .class_init = riscv_cpu_class_init, \ 3188 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3189 } 3190 3191 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3192 { \ 3193 .name = (type_name), \ 3194 .parent = TYPE_RISCV_BARE_CPU, \ 3195 .instance_init = (initfn), \ 3196 .class_init = riscv_cpu_class_init, \ 3197 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3198 } 3199 3200 static const TypeInfo riscv_cpu_type_infos[] = { 3201 { 3202 .name = TYPE_RISCV_CPU, 3203 .parent = TYPE_CPU, 3204 .instance_size = sizeof(RISCVCPU), 3205 .instance_align = __alignof(RISCVCPU), 3206 .instance_init = riscv_cpu_init, 3207 .instance_post_init = riscv_cpu_post_init, 3208 .abstract = true, 3209 .class_size = sizeof(RISCVCPUClass), 3210 .class_init = riscv_cpu_common_class_init, 3211 }, 3212 { 3213 .name = TYPE_RISCV_DYNAMIC_CPU, 3214 .parent = TYPE_RISCV_CPU, 3215 .abstract = true, 3216 }, 3217 { 3218 .name = TYPE_RISCV_VENDOR_CPU, 3219 .parent = TYPE_RISCV_CPU, 3220 .abstract = true, 3221 }, 3222 { 3223 .name = TYPE_RISCV_BARE_CPU, 3224 .parent = TYPE_RISCV_CPU, 3225 .instance_init = riscv_bare_cpu_init, 3226 .abstract = true, 3227 }, 3228 #if defined(TARGET_RISCV32) 3229 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3230 #elif defined(TARGET_RISCV64) 3231 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3232 #endif 3233 3234 #if defined(TARGET_RISCV32) || \ 3235 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3236 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3237 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3238 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3239 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3240 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3241 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3242 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3243 #endif 3244 3245 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3246 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3247 #endif 3248 3249 #if defined(TARGET_RISCV64) 3250 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3251 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3252 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3253 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3254 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3255 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3256 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3257 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3258 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3259 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 3260 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3261 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 3262 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3263 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3264 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3265 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3266 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), 3267 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), 3268 #endif /* TARGET_RISCV64 */ 3269 }; 3270 3271 DEFINE_TYPES(riscv_cpu_type_infos) 3272