1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "qapi/error.h" 28 #include "qapi/visitor.h" 29 #include "qemu/error-report.h" 30 #include "hw/qdev-properties.h" 31 #include "hw/core/qdev-prop-internal.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "system/device_tree.h" 35 #include "system/kvm.h" 36 #include "system/tcg.h" 37 #include "kvm/kvm_riscv.h" 38 #include "tcg/tcg-cpu.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 44 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 45 46 /* 47 * From vector_helper.c 48 * Note that vector data is stored in host-endian 64-bit chunks, 49 * so addressing bytes needs a host-endian fixup. 50 */ 51 #if HOST_BIG_ENDIAN 52 #define BYTE(x) ((x) ^ 7) 53 #else 54 #define BYTE(x) (x) 55 #endif 56 57 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 58 { 59 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 60 } 61 62 /* Hash that stores general user set numeric options */ 63 static GHashTable *general_user_opts; 64 65 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 66 { 67 g_hash_table_insert(general_user_opts, (gpointer)optname, 68 GUINT_TO_POINTER(value)); 69 } 70 71 bool riscv_cpu_option_set(const char *optname) 72 { 73 return g_hash_table_contains(general_user_opts, optname); 74 } 75 76 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 77 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 78 79 /* 80 * Here are the ordering rules of extension naming defined by RISC-V 81 * specification : 82 * 1. All extensions should be separated from other multi-letter extensions 83 * by an underscore. 84 * 2. The first letter following the 'Z' conventionally indicates the most 85 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 86 * If multiple 'Z' extensions are named, they should be ordered first 87 * by category, then alphabetically within a category. 88 * 3. Standard supervisor-level extensions (starts with 'S') should be 89 * listed after standard unprivileged extensions. If multiple 90 * supervisor-level extensions are listed, they should be ordered 91 * alphabetically. 92 * 4. Non-standard extensions (starts with 'X') must be listed after all 93 * standard extensions. They must be separated from other multi-letter 94 * extensions by an underscore. 95 * 96 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 97 * instead. 98 */ 99 const RISCVIsaExtData isa_edata_arr[] = { 100 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 101 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 102 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 103 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 104 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 105 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse), 108 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 109 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 110 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 111 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 112 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 113 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 114 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 115 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 116 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 117 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 118 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 119 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 120 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 121 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 122 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 123 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 124 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 125 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 126 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 127 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 128 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 129 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 130 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 131 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 132 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 133 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 134 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 135 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 136 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 137 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 138 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 139 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 140 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 141 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 142 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 143 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 144 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 145 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 146 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 147 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 148 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 149 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 150 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 151 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 152 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 153 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 154 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 155 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 156 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 157 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 158 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 159 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 160 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 161 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 162 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 163 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 164 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 165 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 166 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 167 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 168 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 169 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 170 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 171 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 172 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 173 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 174 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 175 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 176 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 177 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 178 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 179 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 180 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 181 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 182 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 183 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 184 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 185 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 186 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 187 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 188 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 193 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 194 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 195 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 196 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 197 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 198 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 199 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 200 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 201 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 202 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 203 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 204 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 205 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 206 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 207 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 208 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 209 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 210 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 211 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 212 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 213 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 214 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 217 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 218 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), 219 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), 220 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 221 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 222 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 223 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 224 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 225 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 226 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 227 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 228 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 229 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 230 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 231 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 232 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 233 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 234 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 235 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 236 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 237 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 238 239 { }, 240 }; 241 242 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 243 { 244 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 245 246 return *ext_enabled; 247 } 248 249 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 250 { 251 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 252 253 *ext_enabled = en; 254 } 255 256 bool riscv_cpu_is_vendor(Object *cpu_obj) 257 { 258 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 259 } 260 261 const char * const riscv_int_regnames[] = { 262 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 263 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 264 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 265 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 266 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 267 }; 268 269 const char * const riscv_int_regnamesh[] = { 270 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 271 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 272 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 273 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 274 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 275 "x30h/t5h", "x31h/t6h" 276 }; 277 278 const char * const riscv_fpr_regnames[] = { 279 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 280 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 281 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 282 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 283 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 284 "f30/ft10", "f31/ft11" 285 }; 286 287 const char * const riscv_rvv_regnames[] = { 288 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 289 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 290 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 291 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 292 "v28", "v29", "v30", "v31" 293 }; 294 295 static const char * const riscv_excp_names[] = { 296 "misaligned_fetch", 297 "fault_fetch", 298 "illegal_instruction", 299 "breakpoint", 300 "misaligned_load", 301 "fault_load", 302 "misaligned_store", 303 "fault_store", 304 "user_ecall", 305 "supervisor_ecall", 306 "hypervisor_ecall", 307 "machine_ecall", 308 "exec_page_fault", 309 "load_page_fault", 310 "reserved", 311 "store_page_fault", 312 "double_trap", 313 "reserved", 314 "reserved", 315 "reserved", 316 "guest_exec_page_fault", 317 "guest_load_page_fault", 318 "reserved", 319 "guest_store_page_fault", 320 }; 321 322 static const char * const riscv_intr_names[] = { 323 "u_software", 324 "s_software", 325 "vs_software", 326 "m_software", 327 "u_timer", 328 "s_timer", 329 "vs_timer", 330 "m_timer", 331 "u_external", 332 "s_external", 333 "vs_external", 334 "m_external", 335 "reserved", 336 "reserved", 337 "reserved", 338 "reserved" 339 }; 340 341 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 342 { 343 if (async) { 344 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 345 riscv_intr_names[cause] : "(unknown)"; 346 } else { 347 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 348 riscv_excp_names[cause] : "(unknown)"; 349 } 350 } 351 352 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 353 { 354 env->misa_ext_mask = env->misa_ext = ext; 355 } 356 357 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 358 { 359 return 16 << mcc->misa_mxl_max; 360 } 361 362 #ifndef CONFIG_USER_ONLY 363 static uint8_t satp_mode_from_str(const char *satp_mode_str) 364 { 365 if (!strncmp(satp_mode_str, "mbare", 5)) { 366 return VM_1_10_MBARE; 367 } 368 369 if (!strncmp(satp_mode_str, "sv32", 4)) { 370 return VM_1_10_SV32; 371 } 372 373 if (!strncmp(satp_mode_str, "sv39", 4)) { 374 return VM_1_10_SV39; 375 } 376 377 if (!strncmp(satp_mode_str, "sv48", 4)) { 378 return VM_1_10_SV48; 379 } 380 381 if (!strncmp(satp_mode_str, "sv57", 4)) { 382 return VM_1_10_SV57; 383 } 384 385 if (!strncmp(satp_mode_str, "sv64", 4)) { 386 return VM_1_10_SV64; 387 } 388 389 g_assert_not_reached(); 390 } 391 392 uint8_t satp_mode_max_from_map(uint32_t map) 393 { 394 /* 395 * 'map = 0' will make us return (31 - 32), which C will 396 * happily overflow to UINT_MAX. There's no good result to 397 * return if 'map = 0' (e.g. returning 0 will be ambiguous 398 * with the result for 'map = 1'). 399 * 400 * Assert out if map = 0. Callers will have to deal with 401 * it outside of this function. 402 */ 403 g_assert(map > 0); 404 405 /* map here has at least one bit set, so no problem with clz */ 406 return 31 - __builtin_clz(map); 407 } 408 409 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 410 { 411 if (is_32_bit) { 412 switch (satp_mode) { 413 case VM_1_10_SV32: 414 return "sv32"; 415 case VM_1_10_MBARE: 416 return "none"; 417 } 418 } else { 419 switch (satp_mode) { 420 case VM_1_10_SV64: 421 return "sv64"; 422 case VM_1_10_SV57: 423 return "sv57"; 424 case VM_1_10_SV48: 425 return "sv48"; 426 case VM_1_10_SV39: 427 return "sv39"; 428 case VM_1_10_MBARE: 429 return "none"; 430 } 431 } 432 433 g_assert_not_reached(); 434 } 435 436 static void set_satp_mode_max_supported(RISCVCPU *cpu, 437 uint8_t satp_mode) 438 { 439 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 440 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 441 442 for (int i = 0; i <= satp_mode; ++i) { 443 if (valid_vm[i]) { 444 cpu->cfg.satp_mode.supported |= (1 << i); 445 } 446 } 447 } 448 449 /* Set the satp mode to the max supported */ 450 static void set_satp_mode_default_map(RISCVCPU *cpu) 451 { 452 /* 453 * Bare CPUs do not default to the max available. 454 * Users must set a valid satp_mode in the command 455 * line. 456 */ 457 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 458 warn_report("No satp mode set. Defaulting to 'bare'"); 459 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 460 return; 461 } 462 463 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 464 } 465 #endif 466 467 static void riscv_max_cpu_init(Object *obj) 468 { 469 RISCVCPU *cpu = RISCV_CPU(obj); 470 CPURISCVState *env = &cpu->env; 471 472 cpu->cfg.mmu = true; 473 cpu->cfg.pmp = true; 474 475 env->priv_ver = PRIV_VERSION_LATEST; 476 #ifndef CONFIG_USER_ONLY 477 set_satp_mode_max_supported(RISCV_CPU(obj), 478 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 479 VM_1_10_SV32 : VM_1_10_SV57); 480 #endif 481 } 482 483 #if defined(TARGET_RISCV64) 484 static void rv64_base_cpu_init(Object *obj) 485 { 486 RISCVCPU *cpu = RISCV_CPU(obj); 487 CPURISCVState *env = &cpu->env; 488 489 cpu->cfg.mmu = true; 490 cpu->cfg.pmp = true; 491 492 /* Set latest version of privileged specification */ 493 env->priv_ver = PRIV_VERSION_LATEST; 494 #ifndef CONFIG_USER_ONLY 495 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 496 #endif 497 } 498 499 static void rv64_sifive_u_cpu_init(Object *obj) 500 { 501 RISCVCPU *cpu = RISCV_CPU(obj); 502 CPURISCVState *env = &cpu->env; 503 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 504 env->priv_ver = PRIV_VERSION_1_10_0; 505 #ifndef CONFIG_USER_ONLY 506 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 507 #endif 508 509 /* inherited from parent obj via riscv_cpu_init() */ 510 cpu->cfg.ext_zifencei = true; 511 cpu->cfg.ext_zicsr = true; 512 cpu->cfg.mmu = true; 513 cpu->cfg.pmp = true; 514 } 515 516 static void rv64_sifive_e_cpu_init(Object *obj) 517 { 518 CPURISCVState *env = &RISCV_CPU(obj)->env; 519 RISCVCPU *cpu = RISCV_CPU(obj); 520 521 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 522 env->priv_ver = PRIV_VERSION_1_10_0; 523 #ifndef CONFIG_USER_ONLY 524 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 525 #endif 526 527 /* inherited from parent obj via riscv_cpu_init() */ 528 cpu->cfg.ext_zifencei = true; 529 cpu->cfg.ext_zicsr = true; 530 cpu->cfg.pmp = true; 531 } 532 533 static void rv64_thead_c906_cpu_init(Object *obj) 534 { 535 CPURISCVState *env = &RISCV_CPU(obj)->env; 536 RISCVCPU *cpu = RISCV_CPU(obj); 537 538 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 539 env->priv_ver = PRIV_VERSION_1_11_0; 540 541 cpu->cfg.ext_zfa = true; 542 cpu->cfg.ext_zfh = true; 543 cpu->cfg.mmu = true; 544 cpu->cfg.ext_xtheadba = true; 545 cpu->cfg.ext_xtheadbb = true; 546 cpu->cfg.ext_xtheadbs = true; 547 cpu->cfg.ext_xtheadcmo = true; 548 cpu->cfg.ext_xtheadcondmov = true; 549 cpu->cfg.ext_xtheadfmemidx = true; 550 cpu->cfg.ext_xtheadmac = true; 551 cpu->cfg.ext_xtheadmemidx = true; 552 cpu->cfg.ext_xtheadmempair = true; 553 cpu->cfg.ext_xtheadsync = true; 554 555 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 556 #ifndef CONFIG_USER_ONLY 557 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 558 th_register_custom_csrs(cpu); 559 #endif 560 561 /* inherited from parent obj via riscv_cpu_init() */ 562 cpu->cfg.pmp = true; 563 } 564 565 static void rv64_veyron_v1_cpu_init(Object *obj) 566 { 567 CPURISCVState *env = &RISCV_CPU(obj)->env; 568 RISCVCPU *cpu = RISCV_CPU(obj); 569 570 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 571 env->priv_ver = PRIV_VERSION_1_12_0; 572 573 /* Enable ISA extensions */ 574 cpu->cfg.mmu = true; 575 cpu->cfg.ext_zifencei = true; 576 cpu->cfg.ext_zicsr = true; 577 cpu->cfg.pmp = true; 578 cpu->cfg.ext_zicbom = true; 579 cpu->cfg.cbom_blocksize = 64; 580 cpu->cfg.cboz_blocksize = 64; 581 cpu->cfg.ext_zicboz = true; 582 cpu->cfg.ext_smaia = true; 583 cpu->cfg.ext_ssaia = true; 584 cpu->cfg.ext_sscofpmf = true; 585 cpu->cfg.ext_sstc = true; 586 cpu->cfg.ext_svinval = true; 587 cpu->cfg.ext_svnapot = true; 588 cpu->cfg.ext_svpbmt = true; 589 cpu->cfg.ext_smstateen = true; 590 cpu->cfg.ext_zba = true; 591 cpu->cfg.ext_zbb = true; 592 cpu->cfg.ext_zbc = true; 593 cpu->cfg.ext_zbs = true; 594 cpu->cfg.ext_XVentanaCondOps = true; 595 596 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 597 cpu->cfg.marchid = VEYRON_V1_MARCHID; 598 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 599 600 #ifndef CONFIG_USER_ONLY 601 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 602 #endif 603 } 604 605 /* Tenstorrent Ascalon */ 606 static void rv64_tt_ascalon_cpu_init(Object *obj) 607 { 608 CPURISCVState *env = &RISCV_CPU(obj)->env; 609 RISCVCPU *cpu = RISCV_CPU(obj); 610 611 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 612 env->priv_ver = PRIV_VERSION_1_13_0; 613 614 /* Enable ISA extensions */ 615 cpu->cfg.mmu = true; 616 cpu->cfg.vlenb = 256 >> 3; 617 cpu->cfg.elen = 64; 618 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 619 cpu->cfg.rvv_ma_all_1s = true; 620 cpu->cfg.rvv_ta_all_1s = true; 621 cpu->cfg.misa_w = true; 622 cpu->cfg.pmp = true; 623 cpu->cfg.cbom_blocksize = 64; 624 cpu->cfg.cbop_blocksize = 64; 625 cpu->cfg.cboz_blocksize = 64; 626 cpu->cfg.ext_zic64b = true; 627 cpu->cfg.ext_zicbom = true; 628 cpu->cfg.ext_zicbop = true; 629 cpu->cfg.ext_zicboz = true; 630 cpu->cfg.ext_zicntr = true; 631 cpu->cfg.ext_zicond = true; 632 cpu->cfg.ext_zicsr = true; 633 cpu->cfg.ext_zifencei = true; 634 cpu->cfg.ext_zihintntl = true; 635 cpu->cfg.ext_zihintpause = true; 636 cpu->cfg.ext_zihpm = true; 637 cpu->cfg.ext_zimop = true; 638 cpu->cfg.ext_zawrs = true; 639 cpu->cfg.ext_zfa = true; 640 cpu->cfg.ext_zfbfmin = true; 641 cpu->cfg.ext_zfh = true; 642 cpu->cfg.ext_zfhmin = true; 643 cpu->cfg.ext_zcb = true; 644 cpu->cfg.ext_zcmop = true; 645 cpu->cfg.ext_zba = true; 646 cpu->cfg.ext_zbb = true; 647 cpu->cfg.ext_zbs = true; 648 cpu->cfg.ext_zkt = true; 649 cpu->cfg.ext_zvbb = true; 650 cpu->cfg.ext_zvbc = true; 651 cpu->cfg.ext_zvfbfmin = true; 652 cpu->cfg.ext_zvfbfwma = true; 653 cpu->cfg.ext_zvfh = true; 654 cpu->cfg.ext_zvfhmin = true; 655 cpu->cfg.ext_zvkng = true; 656 cpu->cfg.ext_smaia = true; 657 cpu->cfg.ext_smstateen = true; 658 cpu->cfg.ext_ssaia = true; 659 cpu->cfg.ext_sscofpmf = true; 660 cpu->cfg.ext_sstc = true; 661 cpu->cfg.ext_svade = true; 662 cpu->cfg.ext_svinval = true; 663 cpu->cfg.ext_svnapot = true; 664 cpu->cfg.ext_svpbmt = true; 665 666 #ifndef CONFIG_USER_ONLY 667 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 668 #endif 669 } 670 671 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 672 { 673 CPURISCVState *env = &RISCV_CPU(obj)->env; 674 RISCVCPU *cpu = RISCV_CPU(obj); 675 676 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 677 env->priv_ver = PRIV_VERSION_1_12_0; 678 679 /* Enable ISA extensions */ 680 cpu->cfg.ext_zbc = true; 681 cpu->cfg.ext_zbkb = true; 682 cpu->cfg.ext_zbkc = true; 683 cpu->cfg.ext_zbkx = true; 684 cpu->cfg.ext_zknd = true; 685 cpu->cfg.ext_zkne = true; 686 cpu->cfg.ext_zknh = true; 687 cpu->cfg.ext_zksed = true; 688 cpu->cfg.ext_zksh = true; 689 cpu->cfg.ext_svinval = true; 690 691 cpu->cfg.mmu = true; 692 cpu->cfg.pmp = true; 693 694 #ifndef CONFIG_USER_ONLY 695 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 696 #endif 697 } 698 699 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 700 static void rv128_base_cpu_init(Object *obj) 701 { 702 RISCVCPU *cpu = RISCV_CPU(obj); 703 CPURISCVState *env = &cpu->env; 704 705 cpu->cfg.mmu = true; 706 cpu->cfg.pmp = true; 707 708 /* Set latest version of privileged specification */ 709 env->priv_ver = PRIV_VERSION_LATEST; 710 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 711 } 712 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 713 714 static void rv64i_bare_cpu_init(Object *obj) 715 { 716 CPURISCVState *env = &RISCV_CPU(obj)->env; 717 riscv_cpu_set_misa_ext(env, RVI); 718 } 719 720 static void rv64e_bare_cpu_init(Object *obj) 721 { 722 CPURISCVState *env = &RISCV_CPU(obj)->env; 723 riscv_cpu_set_misa_ext(env, RVE); 724 } 725 726 #endif /* !TARGET_RISCV64 */ 727 728 #if defined(TARGET_RISCV32) || \ 729 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 730 731 static void rv32_base_cpu_init(Object *obj) 732 { 733 RISCVCPU *cpu = RISCV_CPU(obj); 734 CPURISCVState *env = &cpu->env; 735 736 cpu->cfg.mmu = true; 737 cpu->cfg.pmp = true; 738 739 /* Set latest version of privileged specification */ 740 env->priv_ver = PRIV_VERSION_LATEST; 741 #ifndef CONFIG_USER_ONLY 742 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 743 #endif 744 } 745 746 static void rv32_sifive_u_cpu_init(Object *obj) 747 { 748 RISCVCPU *cpu = RISCV_CPU(obj); 749 CPURISCVState *env = &cpu->env; 750 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 751 env->priv_ver = PRIV_VERSION_1_10_0; 752 #ifndef CONFIG_USER_ONLY 753 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 754 #endif 755 756 /* inherited from parent obj via riscv_cpu_init() */ 757 cpu->cfg.ext_zifencei = true; 758 cpu->cfg.ext_zicsr = true; 759 cpu->cfg.mmu = true; 760 cpu->cfg.pmp = true; 761 } 762 763 static void rv32_sifive_e_cpu_init(Object *obj) 764 { 765 CPURISCVState *env = &RISCV_CPU(obj)->env; 766 RISCVCPU *cpu = RISCV_CPU(obj); 767 768 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 769 env->priv_ver = PRIV_VERSION_1_10_0; 770 #ifndef CONFIG_USER_ONLY 771 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 772 #endif 773 774 /* inherited from parent obj via riscv_cpu_init() */ 775 cpu->cfg.ext_zifencei = true; 776 cpu->cfg.ext_zicsr = true; 777 cpu->cfg.pmp = true; 778 } 779 780 static void rv32_ibex_cpu_init(Object *obj) 781 { 782 CPURISCVState *env = &RISCV_CPU(obj)->env; 783 RISCVCPU *cpu = RISCV_CPU(obj); 784 785 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 786 env->priv_ver = PRIV_VERSION_1_12_0; 787 #ifndef CONFIG_USER_ONLY 788 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 789 #endif 790 /* inherited from parent obj via riscv_cpu_init() */ 791 cpu->cfg.ext_zifencei = true; 792 cpu->cfg.ext_zicsr = true; 793 cpu->cfg.pmp = true; 794 cpu->cfg.ext_smepmp = true; 795 796 cpu->cfg.ext_zba = true; 797 cpu->cfg.ext_zbb = true; 798 cpu->cfg.ext_zbc = true; 799 cpu->cfg.ext_zbs = true; 800 } 801 802 static void rv32_imafcu_nommu_cpu_init(Object *obj) 803 { 804 CPURISCVState *env = &RISCV_CPU(obj)->env; 805 RISCVCPU *cpu = RISCV_CPU(obj); 806 807 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 808 env->priv_ver = PRIV_VERSION_1_10_0; 809 #ifndef CONFIG_USER_ONLY 810 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 811 #endif 812 813 /* inherited from parent obj via riscv_cpu_init() */ 814 cpu->cfg.ext_zifencei = true; 815 cpu->cfg.ext_zicsr = true; 816 cpu->cfg.pmp = true; 817 } 818 819 static void rv32i_bare_cpu_init(Object *obj) 820 { 821 CPURISCVState *env = &RISCV_CPU(obj)->env; 822 riscv_cpu_set_misa_ext(env, RVI); 823 } 824 825 static void rv32e_bare_cpu_init(Object *obj) 826 { 827 CPURISCVState *env = &RISCV_CPU(obj)->env; 828 riscv_cpu_set_misa_ext(env, RVE); 829 } 830 #endif 831 832 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 833 { 834 ObjectClass *oc; 835 char *typename; 836 char **cpuname; 837 838 cpuname = g_strsplit(cpu_model, ",", 1); 839 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 840 oc = object_class_by_name(typename); 841 g_strfreev(cpuname); 842 g_free(typename); 843 844 return oc; 845 } 846 847 char *riscv_cpu_get_name(RISCVCPU *cpu) 848 { 849 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 850 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 851 852 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 853 854 return cpu_model_from_type(typename); 855 } 856 857 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 858 { 859 RISCVCPU *cpu = RISCV_CPU(cs); 860 CPURISCVState *env = &cpu->env; 861 int i, j; 862 uint8_t *p; 863 864 #if !defined(CONFIG_USER_ONLY) 865 if (riscv_has_ext(env, RVH)) { 866 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 867 } 868 #endif 869 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 870 #ifndef CONFIG_USER_ONLY 871 { 872 static const int dump_csrs[] = { 873 CSR_MHARTID, 874 CSR_MSTATUS, 875 CSR_MSTATUSH, 876 /* 877 * CSR_SSTATUS is intentionally omitted here as its value 878 * can be figured out by looking at CSR_MSTATUS 879 */ 880 CSR_HSTATUS, 881 CSR_VSSTATUS, 882 CSR_MIP, 883 CSR_MIE, 884 CSR_MIDELEG, 885 CSR_HIDELEG, 886 CSR_MEDELEG, 887 CSR_HEDELEG, 888 CSR_MTVEC, 889 CSR_STVEC, 890 CSR_VSTVEC, 891 CSR_MEPC, 892 CSR_SEPC, 893 CSR_VSEPC, 894 CSR_MCAUSE, 895 CSR_SCAUSE, 896 CSR_VSCAUSE, 897 CSR_MTVAL, 898 CSR_STVAL, 899 CSR_HTVAL, 900 CSR_MTVAL2, 901 CSR_MSCRATCH, 902 CSR_SSCRATCH, 903 CSR_SATP, 904 }; 905 906 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 907 int csrno = dump_csrs[i]; 908 target_ulong val = 0; 909 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 910 911 /* 912 * Rely on the smode, hmode, etc, predicates within csr.c 913 * to do the filtering of the registers that are present. 914 */ 915 if (res == RISCV_EXCP_NONE) { 916 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 917 csr_ops[csrno].name, val); 918 } 919 } 920 } 921 #endif 922 923 for (i = 0; i < 32; i++) { 924 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 925 riscv_int_regnames[i], env->gpr[i]); 926 if ((i & 3) == 3) { 927 qemu_fprintf(f, "\n"); 928 } 929 } 930 if (flags & CPU_DUMP_FPU) { 931 target_ulong val = 0; 932 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 933 if (res == RISCV_EXCP_NONE) { 934 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 935 csr_ops[CSR_FCSR].name, val); 936 } 937 for (i = 0; i < 32; i++) { 938 qemu_fprintf(f, " %-8s %016" PRIx64, 939 riscv_fpr_regnames[i], env->fpr[i]); 940 if ((i & 3) == 3) { 941 qemu_fprintf(f, "\n"); 942 } 943 } 944 } 945 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 946 static const int dump_rvv_csrs[] = { 947 CSR_VSTART, 948 CSR_VXSAT, 949 CSR_VXRM, 950 CSR_VCSR, 951 CSR_VL, 952 CSR_VTYPE, 953 CSR_VLENB, 954 }; 955 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 956 int csrno = dump_rvv_csrs[i]; 957 target_ulong val = 0; 958 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 959 960 /* 961 * Rely on the smode, hmode, etc, predicates within csr.c 962 * to do the filtering of the registers that are present. 963 */ 964 if (res == RISCV_EXCP_NONE) { 965 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 966 csr_ops[csrno].name, val); 967 } 968 } 969 uint16_t vlenb = cpu->cfg.vlenb; 970 971 for (i = 0; i < 32; i++) { 972 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 973 p = (uint8_t *)env->vreg; 974 for (j = vlenb - 1 ; j >= 0; j--) { 975 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 976 } 977 qemu_fprintf(f, "\n"); 978 } 979 } 980 } 981 982 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 983 { 984 RISCVCPU *cpu = RISCV_CPU(cs); 985 CPURISCVState *env = &cpu->env; 986 987 if (env->xl == MXL_RV32) { 988 env->pc = (int32_t)value; 989 } else { 990 env->pc = value; 991 } 992 } 993 994 static vaddr riscv_cpu_get_pc(CPUState *cs) 995 { 996 RISCVCPU *cpu = RISCV_CPU(cs); 997 CPURISCVState *env = &cpu->env; 998 999 /* Match cpu_get_tb_cpu_state. */ 1000 if (env->xl == MXL_RV32) { 1001 return env->pc & UINT32_MAX; 1002 } 1003 return env->pc; 1004 } 1005 1006 #ifndef CONFIG_USER_ONLY 1007 bool riscv_cpu_has_work(CPUState *cs) 1008 { 1009 RISCVCPU *cpu = RISCV_CPU(cs); 1010 CPURISCVState *env = &cpu->env; 1011 /* 1012 * Definition of the WFI instruction requires it to ignore the privilege 1013 * mode and delegation registers, but respect individual enables 1014 */ 1015 return riscv_cpu_all_pending(env) != 0 || 1016 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1017 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1018 } 1019 #endif /* !CONFIG_USER_ONLY */ 1020 1021 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1022 { 1023 #ifndef CONFIG_USER_ONLY 1024 uint8_t iprio; 1025 int i, irq, rdzero; 1026 #endif 1027 CPUState *cs = CPU(obj); 1028 RISCVCPU *cpu = RISCV_CPU(cs); 1029 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1030 CPURISCVState *env = &cpu->env; 1031 1032 if (mcc->parent_phases.hold) { 1033 mcc->parent_phases.hold(obj, type); 1034 } 1035 #ifndef CONFIG_USER_ONLY 1036 env->misa_mxl = mcc->misa_mxl_max; 1037 env->priv = PRV_M; 1038 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1039 if (env->misa_mxl > MXL_RV32) { 1040 /* 1041 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1042 * and we must ensure that the value after init is valid for read. 1043 */ 1044 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1045 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1046 if (riscv_has_ext(env, RVH)) { 1047 env->vsstatus = set_field(env->vsstatus, 1048 MSTATUS64_SXL, env->misa_mxl); 1049 env->vsstatus = set_field(env->vsstatus, 1050 MSTATUS64_UXL, env->misa_mxl); 1051 env->mstatus_hs = set_field(env->mstatus_hs, 1052 MSTATUS64_SXL, env->misa_mxl); 1053 env->mstatus_hs = set_field(env->mstatus_hs, 1054 MSTATUS64_UXL, env->misa_mxl); 1055 } 1056 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1057 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1058 } 1059 } 1060 env->mcause = 0; 1061 env->miclaim = MIP_SGEIP; 1062 env->pc = env->resetvec; 1063 env->bins = 0; 1064 env->two_stage_lookup = false; 1065 1066 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1067 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1068 MENVCFG_ADUE : 0); 1069 env->henvcfg = 0; 1070 1071 /* Initialized default priorities of local interrupts. */ 1072 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1073 iprio = riscv_cpu_default_priority(i); 1074 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1075 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1076 env->hviprio[i] = 0; 1077 } 1078 i = 0; 1079 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1080 if (!rdzero) { 1081 env->hviprio[irq] = env->miprio[irq]; 1082 } 1083 i++; 1084 } 1085 1086 /* 1087 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1088 * extension is enabled. 1089 */ 1090 if (riscv_has_ext(env, RVH)) { 1091 env->mideleg |= HS_MODE_INTERRUPTS; 1092 } 1093 1094 /* 1095 * Clear mseccfg and unlock all the PMP entries upon reset. 1096 * This is allowed as per the priv and smepmp specifications 1097 * and is needed to clear stale entries across reboots. 1098 */ 1099 if (riscv_cpu_cfg(env)->ext_smepmp) { 1100 env->mseccfg = 0; 1101 } 1102 1103 pmp_unlock_entries(env); 1104 #else 1105 env->priv = PRV_U; 1106 env->senvcfg = 0; 1107 env->menvcfg = 0; 1108 #endif 1109 1110 /* on reset elp is clear */ 1111 env->elp = false; 1112 /* on reset ssp is set to 0 */ 1113 env->ssp = 0; 1114 1115 env->xl = riscv_cpu_mxl(env); 1116 cs->exception_index = RISCV_EXCP_NONE; 1117 env->load_res = -1; 1118 set_default_nan_mode(1, &env->fp_status); 1119 /* Default NaN value: sign bit clear, frac msb set */ 1120 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1121 env->vill = true; 1122 1123 #ifndef CONFIG_USER_ONLY 1124 if (cpu->cfg.debug) { 1125 riscv_trigger_reset_hold(env); 1126 } 1127 1128 if (cpu->cfg.ext_smrnmi) { 1129 env->rnmip = 0; 1130 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1131 } 1132 1133 if (kvm_enabled()) { 1134 kvm_riscv_reset_vcpu(cpu); 1135 } 1136 #endif 1137 } 1138 1139 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1140 { 1141 RISCVCPU *cpu = RISCV_CPU(s); 1142 CPURISCVState *env = &cpu->env; 1143 info->target_info = &cpu->cfg; 1144 1145 /* 1146 * A couple of bits in MSTATUS set the endianness: 1147 * - MSTATUS_UBE (User-mode), 1148 * - MSTATUS_SBE (Supervisor-mode), 1149 * - MSTATUS_MBE (Machine-mode) 1150 * but we don't implement that yet. 1151 */ 1152 info->endian = BFD_ENDIAN_LITTLE; 1153 1154 switch (env->xl) { 1155 case MXL_RV32: 1156 info->print_insn = print_insn_riscv32; 1157 break; 1158 case MXL_RV64: 1159 info->print_insn = print_insn_riscv64; 1160 break; 1161 case MXL_RV128: 1162 info->print_insn = print_insn_riscv128; 1163 break; 1164 default: 1165 g_assert_not_reached(); 1166 } 1167 } 1168 1169 #ifndef CONFIG_USER_ONLY 1170 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1171 { 1172 bool rv32 = riscv_cpu_is_32bit(cpu); 1173 uint8_t satp_mode_map_max, satp_mode_supported_max; 1174 1175 /* The CPU wants the OS to decide which satp mode to use */ 1176 if (cpu->cfg.satp_mode.supported == 0) { 1177 return; 1178 } 1179 1180 satp_mode_supported_max = 1181 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1182 1183 if (cpu->cfg.satp_mode.map == 0) { 1184 if (cpu->cfg.satp_mode.init == 0) { 1185 /* If unset by the user, we fallback to the default satp mode. */ 1186 set_satp_mode_default_map(cpu); 1187 } else { 1188 /* 1189 * Find the lowest level that was disabled and then enable the 1190 * first valid level below which can be found in 1191 * valid_vm_1_10_32/64. 1192 */ 1193 for (int i = 1; i < 16; ++i) { 1194 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1195 (cpu->cfg.satp_mode.supported & (1 << i))) { 1196 for (int j = i - 1; j >= 0; --j) { 1197 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1198 cpu->cfg.satp_mode.map |= (1 << j); 1199 break; 1200 } 1201 } 1202 break; 1203 } 1204 } 1205 } 1206 } 1207 1208 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1209 1210 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1211 if (satp_mode_map_max > satp_mode_supported_max) { 1212 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1213 satp_mode_str(satp_mode_map_max, rv32), 1214 satp_mode_str(satp_mode_supported_max, rv32)); 1215 return; 1216 } 1217 1218 /* 1219 * Make sure the user did not ask for an invalid configuration as per 1220 * the specification. 1221 */ 1222 if (!rv32) { 1223 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1224 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1225 (cpu->cfg.satp_mode.init & (1 << i)) && 1226 (cpu->cfg.satp_mode.supported & (1 << i))) { 1227 error_setg(errp, "cannot disable %s satp mode if %s " 1228 "is enabled", satp_mode_str(i, false), 1229 satp_mode_str(satp_mode_map_max, false)); 1230 return; 1231 } 1232 } 1233 } 1234 1235 /* Finally expand the map so that all valid modes are set */ 1236 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1237 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1238 cpu->cfg.satp_mode.map |= (1 << i); 1239 } 1240 } 1241 } 1242 #endif 1243 1244 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1245 { 1246 Error *local_err = NULL; 1247 1248 #ifndef CONFIG_USER_ONLY 1249 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1250 if (local_err != NULL) { 1251 error_propagate(errp, local_err); 1252 return; 1253 } 1254 #endif 1255 1256 if (tcg_enabled()) { 1257 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1258 if (local_err != NULL) { 1259 error_propagate(errp, local_err); 1260 return; 1261 } 1262 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1263 } else if (kvm_enabled()) { 1264 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1265 if (local_err != NULL) { 1266 error_propagate(errp, local_err); 1267 return; 1268 } 1269 } 1270 } 1271 1272 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1273 { 1274 CPUState *cs = CPU(dev); 1275 RISCVCPU *cpu = RISCV_CPU(dev); 1276 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1277 Error *local_err = NULL; 1278 1279 cpu_exec_realizefn(cs, &local_err); 1280 if (local_err != NULL) { 1281 error_propagate(errp, local_err); 1282 return; 1283 } 1284 1285 riscv_cpu_finalize_features(cpu, &local_err); 1286 if (local_err != NULL) { 1287 error_propagate(errp, local_err); 1288 return; 1289 } 1290 1291 riscv_cpu_register_gdb_regs_for_features(cs); 1292 1293 #ifndef CONFIG_USER_ONLY 1294 if (cpu->cfg.debug) { 1295 riscv_trigger_realize(&cpu->env); 1296 } 1297 #endif 1298 1299 qemu_init_vcpu(cs); 1300 cpu_reset(cs); 1301 1302 mcc->parent_realize(dev, errp); 1303 } 1304 1305 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1306 { 1307 if (tcg_enabled()) { 1308 return riscv_cpu_tcg_compatible(cpu); 1309 } 1310 1311 return true; 1312 } 1313 1314 #ifndef CONFIG_USER_ONLY 1315 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1316 void *opaque, Error **errp) 1317 { 1318 RISCVSATPMap *satp_map = opaque; 1319 uint8_t satp = satp_mode_from_str(name); 1320 bool value; 1321 1322 value = satp_map->map & (1 << satp); 1323 1324 visit_type_bool(v, name, &value, errp); 1325 } 1326 1327 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1328 void *opaque, Error **errp) 1329 { 1330 RISCVSATPMap *satp_map = opaque; 1331 uint8_t satp = satp_mode_from_str(name); 1332 bool value; 1333 1334 if (!visit_type_bool(v, name, &value, errp)) { 1335 return; 1336 } 1337 1338 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1339 satp_map->init |= 1 << satp; 1340 } 1341 1342 void riscv_add_satp_mode_properties(Object *obj) 1343 { 1344 RISCVCPU *cpu = RISCV_CPU(obj); 1345 1346 if (cpu->env.misa_mxl == MXL_RV32) { 1347 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1348 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1349 } else { 1350 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1351 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1352 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1353 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1354 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1355 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1356 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1357 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1358 } 1359 } 1360 1361 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1362 { 1363 RISCVCPU *cpu = RISCV_CPU(opaque); 1364 CPURISCVState *env = &cpu->env; 1365 1366 if (irq < IRQ_LOCAL_MAX) { 1367 switch (irq) { 1368 case IRQ_U_SOFT: 1369 case IRQ_S_SOFT: 1370 case IRQ_VS_SOFT: 1371 case IRQ_M_SOFT: 1372 case IRQ_U_TIMER: 1373 case IRQ_S_TIMER: 1374 case IRQ_VS_TIMER: 1375 case IRQ_M_TIMER: 1376 case IRQ_U_EXT: 1377 case IRQ_VS_EXT: 1378 case IRQ_M_EXT: 1379 if (kvm_enabled()) { 1380 kvm_riscv_set_irq(cpu, irq, level); 1381 } else { 1382 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1383 } 1384 break; 1385 case IRQ_S_EXT: 1386 if (kvm_enabled()) { 1387 kvm_riscv_set_irq(cpu, irq, level); 1388 } else { 1389 env->external_seip = level; 1390 riscv_cpu_update_mip(env, 1 << irq, 1391 BOOL_TO_MASK(level | env->software_seip)); 1392 } 1393 break; 1394 default: 1395 g_assert_not_reached(); 1396 } 1397 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1398 /* Require H-extension for handling guest local interrupts */ 1399 if (!riscv_has_ext(env, RVH)) { 1400 g_assert_not_reached(); 1401 } 1402 1403 /* Compute bit position in HGEIP CSR */ 1404 irq = irq - IRQ_LOCAL_MAX + 1; 1405 if (env->geilen < irq) { 1406 g_assert_not_reached(); 1407 } 1408 1409 /* Update HGEIP CSR */ 1410 env->hgeip &= ~((target_ulong)1 << irq); 1411 if (level) { 1412 env->hgeip |= (target_ulong)1 << irq; 1413 } 1414 1415 /* Update mip.SGEIP bit */ 1416 riscv_cpu_update_mip(env, MIP_SGEIP, 1417 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1418 } else { 1419 g_assert_not_reached(); 1420 } 1421 } 1422 1423 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1424 { 1425 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1426 } 1427 #endif /* CONFIG_USER_ONLY */ 1428 1429 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1430 { 1431 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1432 } 1433 1434 static void riscv_cpu_post_init(Object *obj) 1435 { 1436 accel_cpu_instance_init(CPU(obj)); 1437 } 1438 1439 static void riscv_cpu_init(Object *obj) 1440 { 1441 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1442 RISCVCPU *cpu = RISCV_CPU(obj); 1443 CPURISCVState *env = &cpu->env; 1444 1445 env->misa_mxl = mcc->misa_mxl_max; 1446 1447 #ifndef CONFIG_USER_ONLY 1448 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1449 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1450 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1451 "riscv.cpu.rnmi", RNMI_MAX); 1452 #endif /* CONFIG_USER_ONLY */ 1453 1454 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1455 1456 /* 1457 * The timer and performance counters extensions were supported 1458 * in QEMU before they were added as discrete extensions in the 1459 * ISA. To keep compatibility we'll always default them to 'true' 1460 * for all CPUs. Each accelerator will decide what to do when 1461 * users disable them. 1462 */ 1463 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1464 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1465 1466 /* Default values for non-bool cpu properties */ 1467 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1468 cpu->cfg.vlenb = 128 >> 3; 1469 cpu->cfg.elen = 64; 1470 cpu->cfg.cbom_blocksize = 64; 1471 cpu->cfg.cbop_blocksize = 64; 1472 cpu->cfg.cboz_blocksize = 64; 1473 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1474 } 1475 1476 static void riscv_bare_cpu_init(Object *obj) 1477 { 1478 RISCVCPU *cpu = RISCV_CPU(obj); 1479 1480 /* 1481 * Bare CPUs do not inherit the timer and performance 1482 * counters from the parent class (see riscv_cpu_init() 1483 * for info on why the parent enables them). 1484 * 1485 * Users have to explicitly enable these counters for 1486 * bare CPUs. 1487 */ 1488 cpu->cfg.ext_zicntr = false; 1489 cpu->cfg.ext_zihpm = false; 1490 1491 /* Set to QEMU's first supported priv version */ 1492 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1493 1494 /* 1495 * Support all available satp_mode settings. The default 1496 * value will be set to MBARE if the user doesn't set 1497 * satp_mode manually (see set_satp_mode_default()). 1498 */ 1499 #ifndef CONFIG_USER_ONLY 1500 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1501 #endif 1502 } 1503 1504 typedef struct misa_ext_info { 1505 const char *name; 1506 const char *description; 1507 } MISAExtInfo; 1508 1509 #define MISA_INFO_IDX(_bit) \ 1510 __builtin_ctz(_bit) 1511 1512 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1513 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1514 1515 static const MISAExtInfo misa_ext_info_arr[] = { 1516 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1517 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1518 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1519 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1520 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1521 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1522 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1523 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1524 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1525 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1526 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1527 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1528 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1529 }; 1530 1531 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1532 { 1533 CPUClass *cc = CPU_CLASS(mcc); 1534 1535 /* Validate that MISA_MXL is set properly. */ 1536 switch (mcc->misa_mxl_max) { 1537 #ifdef TARGET_RISCV64 1538 case MXL_RV64: 1539 case MXL_RV128: 1540 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1541 break; 1542 #endif 1543 case MXL_RV32: 1544 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1545 break; 1546 default: 1547 g_assert_not_reached(); 1548 } 1549 } 1550 1551 static int riscv_validate_misa_info_idx(uint32_t bit) 1552 { 1553 int idx; 1554 1555 /* 1556 * Our lowest valid input (RVA) is 1 and 1557 * __builtin_ctz() is UB with zero. 1558 */ 1559 g_assert(bit != 0); 1560 idx = MISA_INFO_IDX(bit); 1561 1562 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1563 return idx; 1564 } 1565 1566 const char *riscv_get_misa_ext_name(uint32_t bit) 1567 { 1568 int idx = riscv_validate_misa_info_idx(bit); 1569 const char *val = misa_ext_info_arr[idx].name; 1570 1571 g_assert(val != NULL); 1572 return val; 1573 } 1574 1575 const char *riscv_get_misa_ext_description(uint32_t bit) 1576 { 1577 int idx = riscv_validate_misa_info_idx(bit); 1578 const char *val = misa_ext_info_arr[idx].description; 1579 1580 g_assert(val != NULL); 1581 return val; 1582 } 1583 1584 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1585 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1586 .enabled = _defval} 1587 1588 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1589 /* Defaults for standard extensions */ 1590 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1591 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1592 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1593 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1594 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1595 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1596 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), 1597 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), 1598 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1599 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1600 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1601 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1602 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1603 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1604 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1605 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1606 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1607 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1608 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1609 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1610 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1611 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1612 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1613 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1614 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1615 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1616 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1617 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1618 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1619 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1620 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1621 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1622 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1623 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1624 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1625 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1626 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1627 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1628 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1629 1630 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1631 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1632 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1633 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1634 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1635 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1636 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1637 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1638 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1639 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1640 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1641 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1642 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1643 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1644 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1645 1646 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1647 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1648 1649 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1650 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1651 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1652 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1653 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1654 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1655 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1656 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1657 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1658 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1659 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1660 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1661 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1662 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1663 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1664 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1665 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1666 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1667 1668 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1669 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1670 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1671 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1672 1673 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1674 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1675 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1676 1677 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1678 1679 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1680 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1681 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1682 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1683 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1684 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1685 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1686 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1687 1688 /* Vector cryptography extensions */ 1689 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1690 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1691 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1692 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1693 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1694 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1695 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1696 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1697 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1698 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1699 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1700 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1701 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1702 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1703 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1704 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1705 1706 { }, 1707 }; 1708 1709 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1710 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1711 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1712 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1713 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1714 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1715 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1716 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1717 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1718 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1719 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1720 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1721 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1722 1723 { }, 1724 }; 1725 1726 /* These are experimental so mark with 'x-' */ 1727 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1728 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1729 1730 { }, 1731 }; 1732 1733 /* 1734 * 'Named features' is the name we give to extensions that we 1735 * don't want to expose to users. They are either immutable 1736 * (always enabled/disable) or they'll vary depending on 1737 * the resulting CPU state. They have riscv,isa strings 1738 * and priv_ver like regular extensions. 1739 */ 1740 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1741 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1742 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1743 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1744 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), 1745 1746 { }, 1747 }; 1748 1749 /* Deprecated entries marked for future removal */ 1750 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1751 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1752 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1753 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1754 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1755 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1756 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1757 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1758 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1759 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1760 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1761 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1762 1763 { }, 1764 }; 1765 1766 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1767 Error **errp) 1768 { 1769 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1770 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1771 cpuname, propname); 1772 } 1773 1774 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1775 void *opaque, Error **errp) 1776 { 1777 RISCVCPU *cpu = RISCV_CPU(obj); 1778 uint8_t pmu_num, curr_pmu_num; 1779 uint32_t pmu_mask; 1780 1781 visit_type_uint8(v, name, &pmu_num, errp); 1782 1783 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1784 1785 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1786 cpu_set_prop_err(cpu, name, errp); 1787 error_append_hint(errp, "Current '%s' val: %u\n", 1788 name, curr_pmu_num); 1789 return; 1790 } 1791 1792 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1793 error_setg(errp, "Number of counters exceeds maximum available"); 1794 return; 1795 } 1796 1797 if (pmu_num == 0) { 1798 pmu_mask = 0; 1799 } else { 1800 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1801 } 1802 1803 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1804 cpu->cfg.pmu_mask = pmu_mask; 1805 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1806 } 1807 1808 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1809 void *opaque, Error **errp) 1810 { 1811 RISCVCPU *cpu = RISCV_CPU(obj); 1812 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1813 1814 visit_type_uint8(v, name, &pmu_num, errp); 1815 } 1816 1817 static const PropertyInfo prop_pmu_num = { 1818 .type = "int8", 1819 .description = "pmu-num", 1820 .get = prop_pmu_num_get, 1821 .set = prop_pmu_num_set, 1822 }; 1823 1824 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1825 void *opaque, Error **errp) 1826 { 1827 RISCVCPU *cpu = RISCV_CPU(obj); 1828 uint32_t value; 1829 uint8_t pmu_num; 1830 1831 visit_type_uint32(v, name, &value, errp); 1832 1833 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1834 cpu_set_prop_err(cpu, name, errp); 1835 error_append_hint(errp, "Current '%s' val: %x\n", 1836 name, cpu->cfg.pmu_mask); 1837 return; 1838 } 1839 1840 pmu_num = ctpop32(value); 1841 1842 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1843 error_setg(errp, "Number of counters exceeds maximum available"); 1844 return; 1845 } 1846 1847 cpu_option_add_user_setting(name, value); 1848 cpu->cfg.pmu_mask = value; 1849 } 1850 1851 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1852 void *opaque, Error **errp) 1853 { 1854 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1855 1856 visit_type_uint8(v, name, &pmu_mask, errp); 1857 } 1858 1859 static const PropertyInfo prop_pmu_mask = { 1860 .type = "int8", 1861 .description = "pmu-mask", 1862 .get = prop_pmu_mask_get, 1863 .set = prop_pmu_mask_set, 1864 }; 1865 1866 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1867 void *opaque, Error **errp) 1868 { 1869 RISCVCPU *cpu = RISCV_CPU(obj); 1870 bool value; 1871 1872 visit_type_bool(v, name, &value, errp); 1873 1874 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1875 cpu_set_prop_err(cpu, "mmu", errp); 1876 return; 1877 } 1878 1879 cpu_option_add_user_setting(name, value); 1880 cpu->cfg.mmu = value; 1881 } 1882 1883 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1884 void *opaque, Error **errp) 1885 { 1886 bool value = RISCV_CPU(obj)->cfg.mmu; 1887 1888 visit_type_bool(v, name, &value, errp); 1889 } 1890 1891 static const PropertyInfo prop_mmu = { 1892 .type = "bool", 1893 .description = "mmu", 1894 .get = prop_mmu_get, 1895 .set = prop_mmu_set, 1896 }; 1897 1898 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1899 void *opaque, Error **errp) 1900 { 1901 RISCVCPU *cpu = RISCV_CPU(obj); 1902 bool value; 1903 1904 visit_type_bool(v, name, &value, errp); 1905 1906 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1907 cpu_set_prop_err(cpu, name, errp); 1908 return; 1909 } 1910 1911 cpu_option_add_user_setting(name, value); 1912 cpu->cfg.pmp = value; 1913 } 1914 1915 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1916 void *opaque, Error **errp) 1917 { 1918 bool value = RISCV_CPU(obj)->cfg.pmp; 1919 1920 visit_type_bool(v, name, &value, errp); 1921 } 1922 1923 static const PropertyInfo prop_pmp = { 1924 .type = "bool", 1925 .description = "pmp", 1926 .get = prop_pmp_get, 1927 .set = prop_pmp_set, 1928 }; 1929 1930 static int priv_spec_from_str(const char *priv_spec_str) 1931 { 1932 int priv_version = -1; 1933 1934 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1935 priv_version = PRIV_VERSION_1_13_0; 1936 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1937 priv_version = PRIV_VERSION_1_12_0; 1938 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1939 priv_version = PRIV_VERSION_1_11_0; 1940 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1941 priv_version = PRIV_VERSION_1_10_0; 1942 } 1943 1944 return priv_version; 1945 } 1946 1947 const char *priv_spec_to_str(int priv_version) 1948 { 1949 switch (priv_version) { 1950 case PRIV_VERSION_1_10_0: 1951 return PRIV_VER_1_10_0_STR; 1952 case PRIV_VERSION_1_11_0: 1953 return PRIV_VER_1_11_0_STR; 1954 case PRIV_VERSION_1_12_0: 1955 return PRIV_VER_1_12_0_STR; 1956 case PRIV_VERSION_1_13_0: 1957 return PRIV_VER_1_13_0_STR; 1958 default: 1959 return NULL; 1960 } 1961 } 1962 1963 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1964 void *opaque, Error **errp) 1965 { 1966 RISCVCPU *cpu = RISCV_CPU(obj); 1967 g_autofree char *value = NULL; 1968 int priv_version = -1; 1969 1970 visit_type_str(v, name, &value, errp); 1971 1972 priv_version = priv_spec_from_str(value); 1973 if (priv_version < 0) { 1974 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1975 return; 1976 } 1977 1978 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1979 cpu_set_prop_err(cpu, name, errp); 1980 error_append_hint(errp, "Current '%s' val: %s\n", name, 1981 object_property_get_str(obj, name, NULL)); 1982 return; 1983 } 1984 1985 cpu_option_add_user_setting(name, priv_version); 1986 cpu->env.priv_ver = priv_version; 1987 } 1988 1989 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1990 void *opaque, Error **errp) 1991 { 1992 RISCVCPU *cpu = RISCV_CPU(obj); 1993 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1994 1995 visit_type_str(v, name, (char **)&value, errp); 1996 } 1997 1998 static const PropertyInfo prop_priv_spec = { 1999 .type = "str", 2000 .description = "priv_spec", 2001 /* FIXME enum? */ 2002 .get = prop_priv_spec_get, 2003 .set = prop_priv_spec_set, 2004 }; 2005 2006 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 2007 void *opaque, Error **errp) 2008 { 2009 RISCVCPU *cpu = RISCV_CPU(obj); 2010 g_autofree char *value = NULL; 2011 2012 visit_type_str(v, name, &value, errp); 2013 2014 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2015 error_setg(errp, "Unsupported vector spec version '%s'", value); 2016 return; 2017 } 2018 2019 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2020 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2021 } 2022 2023 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2024 void *opaque, Error **errp) 2025 { 2026 const char *value = VEXT_VER_1_00_0_STR; 2027 2028 visit_type_str(v, name, (char **)&value, errp); 2029 } 2030 2031 static const PropertyInfo prop_vext_spec = { 2032 .type = "str", 2033 .description = "vext_spec", 2034 /* FIXME enum? */ 2035 .get = prop_vext_spec_get, 2036 .set = prop_vext_spec_set, 2037 }; 2038 2039 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2040 void *opaque, Error **errp) 2041 { 2042 RISCVCPU *cpu = RISCV_CPU(obj); 2043 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2044 uint16_t value; 2045 2046 if (!visit_type_uint16(v, name, &value, errp)) { 2047 return; 2048 } 2049 2050 if (!is_power_of_2(value)) { 2051 error_setg(errp, "Vector extension VLEN must be power of 2"); 2052 return; 2053 } 2054 2055 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2056 cpu_set_prop_err(cpu, name, errp); 2057 error_append_hint(errp, "Current '%s' val: %u\n", 2058 name, cpu_vlen); 2059 return; 2060 } 2061 2062 cpu_option_add_user_setting(name, value); 2063 cpu->cfg.vlenb = value >> 3; 2064 } 2065 2066 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2067 void *opaque, Error **errp) 2068 { 2069 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2070 2071 visit_type_uint16(v, name, &value, errp); 2072 } 2073 2074 static const PropertyInfo prop_vlen = { 2075 .type = "uint16", 2076 .description = "vlen", 2077 .get = prop_vlen_get, 2078 .set = prop_vlen_set, 2079 }; 2080 2081 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2082 void *opaque, Error **errp) 2083 { 2084 RISCVCPU *cpu = RISCV_CPU(obj); 2085 uint16_t value; 2086 2087 if (!visit_type_uint16(v, name, &value, errp)) { 2088 return; 2089 } 2090 2091 if (!is_power_of_2(value)) { 2092 error_setg(errp, "Vector extension ELEN must be power of 2"); 2093 return; 2094 } 2095 2096 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2097 cpu_set_prop_err(cpu, name, errp); 2098 error_append_hint(errp, "Current '%s' val: %u\n", 2099 name, cpu->cfg.elen); 2100 return; 2101 } 2102 2103 cpu_option_add_user_setting(name, value); 2104 cpu->cfg.elen = value; 2105 } 2106 2107 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2108 void *opaque, Error **errp) 2109 { 2110 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2111 2112 visit_type_uint16(v, name, &value, errp); 2113 } 2114 2115 static const PropertyInfo prop_elen = { 2116 .type = "uint16", 2117 .description = "elen", 2118 .get = prop_elen_get, 2119 .set = prop_elen_set, 2120 }; 2121 2122 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2123 void *opaque, Error **errp) 2124 { 2125 RISCVCPU *cpu = RISCV_CPU(obj); 2126 uint16_t value; 2127 2128 if (!visit_type_uint16(v, name, &value, errp)) { 2129 return; 2130 } 2131 2132 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2133 cpu_set_prop_err(cpu, name, errp); 2134 error_append_hint(errp, "Current '%s' val: %u\n", 2135 name, cpu->cfg.cbom_blocksize); 2136 return; 2137 } 2138 2139 cpu_option_add_user_setting(name, value); 2140 cpu->cfg.cbom_blocksize = value; 2141 } 2142 2143 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2144 void *opaque, Error **errp) 2145 { 2146 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2147 2148 visit_type_uint16(v, name, &value, errp); 2149 } 2150 2151 static const PropertyInfo prop_cbom_blksize = { 2152 .type = "uint16", 2153 .description = "cbom_blocksize", 2154 .get = prop_cbom_blksize_get, 2155 .set = prop_cbom_blksize_set, 2156 }; 2157 2158 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2159 void *opaque, Error **errp) 2160 { 2161 RISCVCPU *cpu = RISCV_CPU(obj); 2162 uint16_t value; 2163 2164 if (!visit_type_uint16(v, name, &value, errp)) { 2165 return; 2166 } 2167 2168 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2169 cpu_set_prop_err(cpu, name, errp); 2170 error_append_hint(errp, "Current '%s' val: %u\n", 2171 name, cpu->cfg.cbop_blocksize); 2172 return; 2173 } 2174 2175 cpu_option_add_user_setting(name, value); 2176 cpu->cfg.cbop_blocksize = value; 2177 } 2178 2179 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2180 void *opaque, Error **errp) 2181 { 2182 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2183 2184 visit_type_uint16(v, name, &value, errp); 2185 } 2186 2187 static const PropertyInfo prop_cbop_blksize = { 2188 .type = "uint16", 2189 .description = "cbop_blocksize", 2190 .get = prop_cbop_blksize_get, 2191 .set = prop_cbop_blksize_set, 2192 }; 2193 2194 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2195 void *opaque, Error **errp) 2196 { 2197 RISCVCPU *cpu = RISCV_CPU(obj); 2198 uint16_t value; 2199 2200 if (!visit_type_uint16(v, name, &value, errp)) { 2201 return; 2202 } 2203 2204 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2205 cpu_set_prop_err(cpu, name, errp); 2206 error_append_hint(errp, "Current '%s' val: %u\n", 2207 name, cpu->cfg.cboz_blocksize); 2208 return; 2209 } 2210 2211 cpu_option_add_user_setting(name, value); 2212 cpu->cfg.cboz_blocksize = value; 2213 } 2214 2215 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2216 void *opaque, Error **errp) 2217 { 2218 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2219 2220 visit_type_uint16(v, name, &value, errp); 2221 } 2222 2223 static const PropertyInfo prop_cboz_blksize = { 2224 .type = "uint16", 2225 .description = "cboz_blocksize", 2226 .get = prop_cboz_blksize_get, 2227 .set = prop_cboz_blksize_set, 2228 }; 2229 2230 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2231 void *opaque, Error **errp) 2232 { 2233 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2234 RISCVCPU *cpu = RISCV_CPU(obj); 2235 uint32_t prev_val = cpu->cfg.mvendorid; 2236 uint32_t value; 2237 2238 if (!visit_type_uint32(v, name, &value, errp)) { 2239 return; 2240 } 2241 2242 if (!dynamic_cpu && prev_val != value) { 2243 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2244 object_get_typename(obj), prev_val); 2245 return; 2246 } 2247 2248 cpu->cfg.mvendorid = value; 2249 } 2250 2251 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2252 void *opaque, Error **errp) 2253 { 2254 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2255 2256 visit_type_uint32(v, name, &value, errp); 2257 } 2258 2259 static const PropertyInfo prop_mvendorid = { 2260 .type = "uint32", 2261 .description = "mvendorid", 2262 .get = prop_mvendorid_get, 2263 .set = prop_mvendorid_set, 2264 }; 2265 2266 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2267 void *opaque, Error **errp) 2268 { 2269 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2270 RISCVCPU *cpu = RISCV_CPU(obj); 2271 uint64_t prev_val = cpu->cfg.mimpid; 2272 uint64_t value; 2273 2274 if (!visit_type_uint64(v, name, &value, errp)) { 2275 return; 2276 } 2277 2278 if (!dynamic_cpu && prev_val != value) { 2279 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2280 object_get_typename(obj), prev_val); 2281 return; 2282 } 2283 2284 cpu->cfg.mimpid = value; 2285 } 2286 2287 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2288 void *opaque, Error **errp) 2289 { 2290 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2291 2292 visit_type_uint64(v, name, &value, errp); 2293 } 2294 2295 static const PropertyInfo prop_mimpid = { 2296 .type = "uint64", 2297 .description = "mimpid", 2298 .get = prop_mimpid_get, 2299 .set = prop_mimpid_set, 2300 }; 2301 2302 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2303 void *opaque, Error **errp) 2304 { 2305 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2306 RISCVCPU *cpu = RISCV_CPU(obj); 2307 uint64_t prev_val = cpu->cfg.marchid; 2308 uint64_t value, invalid_val; 2309 uint32_t mxlen = 0; 2310 2311 if (!visit_type_uint64(v, name, &value, errp)) { 2312 return; 2313 } 2314 2315 if (!dynamic_cpu && prev_val != value) { 2316 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2317 object_get_typename(obj), prev_val); 2318 return; 2319 } 2320 2321 switch (riscv_cpu_mxl(&cpu->env)) { 2322 case MXL_RV32: 2323 mxlen = 32; 2324 break; 2325 case MXL_RV64: 2326 case MXL_RV128: 2327 mxlen = 64; 2328 break; 2329 default: 2330 g_assert_not_reached(); 2331 } 2332 2333 invalid_val = 1LL << (mxlen - 1); 2334 2335 if (value == invalid_val) { 2336 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2337 "and the remaining bits zero", mxlen); 2338 return; 2339 } 2340 2341 cpu->cfg.marchid = value; 2342 } 2343 2344 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2345 void *opaque, Error **errp) 2346 { 2347 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2348 2349 visit_type_uint64(v, name, &value, errp); 2350 } 2351 2352 static const PropertyInfo prop_marchid = { 2353 .type = "uint64", 2354 .description = "marchid", 2355 .get = prop_marchid_get, 2356 .set = prop_marchid_set, 2357 }; 2358 2359 /* 2360 * RVA22U64 defines some 'named features' that are cache 2361 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2362 * and Zicclsm. They are always implemented in TCG and 2363 * doesn't need to be manually enabled by the profile. 2364 */ 2365 static RISCVCPUProfile RVA22U64 = { 2366 .u_parent = NULL, 2367 .s_parent = NULL, 2368 .name = "rva22u64", 2369 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2370 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2371 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2372 .ext_offsets = { 2373 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2374 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2375 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2376 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2377 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2378 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2379 2380 /* mandatory named features for this profile */ 2381 CPU_CFG_OFFSET(ext_zic64b), 2382 2383 RISCV_PROFILE_EXT_LIST_END 2384 } 2385 }; 2386 2387 /* 2388 * As with RVA22U64, RVA22S64 also defines 'named features'. 2389 * 2390 * Cache related features that we consider enabled since we don't 2391 * implement cache: Ssccptr 2392 * 2393 * Other named features that we already implement: Sstvecd, Sstvala, 2394 * Sscounterenw 2395 * 2396 * The remaining features/extensions comes from RVA22U64. 2397 */ 2398 static RISCVCPUProfile RVA22S64 = { 2399 .u_parent = &RVA22U64, 2400 .s_parent = NULL, 2401 .name = "rva22s64", 2402 .misa_ext = RVS, 2403 .priv_spec = PRIV_VERSION_1_12_0, 2404 .satp_mode = VM_1_10_SV39, 2405 .ext_offsets = { 2406 /* rva22s64 exts */ 2407 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2408 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2409 2410 RISCV_PROFILE_EXT_LIST_END 2411 } 2412 }; 2413 2414 /* 2415 * All mandatory extensions from RVA22U64 are present 2416 * in RVA23U64 so set RVA22 as a parent. We need to 2417 * declare just the newly added mandatory extensions. 2418 */ 2419 static RISCVCPUProfile RVA23U64 = { 2420 .u_parent = &RVA22U64, 2421 .s_parent = NULL, 2422 .name = "rva23u64", 2423 .misa_ext = RVV, 2424 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2425 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2426 .ext_offsets = { 2427 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2428 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2429 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2430 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2431 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2432 CPU_CFG_OFFSET(ext_supm), 2433 2434 RISCV_PROFILE_EXT_LIST_END 2435 } 2436 }; 2437 2438 /* 2439 * As with RVA23U64, RVA23S64 also defines 'named features'. 2440 * 2441 * Cache related features that we consider enabled since we don't 2442 * implement cache: Ssccptr 2443 * 2444 * Other named features that we already implement: Sstvecd, Sstvala, 2445 * Sscounterenw, Ssu64xl 2446 * 2447 * The remaining features/extensions comes from RVA23S64. 2448 */ 2449 static RISCVCPUProfile RVA23S64 = { 2450 .u_parent = &RVA23U64, 2451 .s_parent = &RVA22S64, 2452 .name = "rva23s64", 2453 .misa_ext = RVS, 2454 .priv_spec = PRIV_VERSION_1_13_0, 2455 .satp_mode = VM_1_10_SV39, 2456 .ext_offsets = { 2457 /* New in RVA23S64 */ 2458 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2459 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2460 2461 /* Named features: Sha */ 2462 CPU_CFG_OFFSET(ext_sha), 2463 2464 RISCV_PROFILE_EXT_LIST_END 2465 } 2466 }; 2467 2468 RISCVCPUProfile *riscv_profiles[] = { 2469 &RVA22U64, 2470 &RVA22S64, 2471 &RVA23U64, 2472 &RVA23S64, 2473 NULL, 2474 }; 2475 2476 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2477 .is_misa = true, 2478 .ext = RVA, 2479 .implied_multi_exts = { 2480 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2481 2482 RISCV_IMPLIED_EXTS_RULE_END 2483 }, 2484 }; 2485 2486 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2487 .is_misa = true, 2488 .ext = RVD, 2489 .implied_misa_exts = RVF, 2490 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2491 }; 2492 2493 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2494 .is_misa = true, 2495 .ext = RVF, 2496 .implied_multi_exts = { 2497 CPU_CFG_OFFSET(ext_zicsr), 2498 2499 RISCV_IMPLIED_EXTS_RULE_END 2500 }, 2501 }; 2502 2503 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2504 .is_misa = true, 2505 .ext = RVM, 2506 .implied_multi_exts = { 2507 CPU_CFG_OFFSET(ext_zmmul), 2508 2509 RISCV_IMPLIED_EXTS_RULE_END 2510 }, 2511 }; 2512 2513 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2514 .is_misa = true, 2515 .ext = RVV, 2516 .implied_multi_exts = { 2517 CPU_CFG_OFFSET(ext_zve64d), 2518 2519 RISCV_IMPLIED_EXTS_RULE_END 2520 }, 2521 }; 2522 2523 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2524 .ext = CPU_CFG_OFFSET(ext_zcb), 2525 .implied_multi_exts = { 2526 CPU_CFG_OFFSET(ext_zca), 2527 2528 RISCV_IMPLIED_EXTS_RULE_END 2529 }, 2530 }; 2531 2532 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2533 .ext = CPU_CFG_OFFSET(ext_zcd), 2534 .implied_misa_exts = RVD, 2535 .implied_multi_exts = { 2536 CPU_CFG_OFFSET(ext_zca), 2537 2538 RISCV_IMPLIED_EXTS_RULE_END 2539 }, 2540 }; 2541 2542 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2543 .ext = CPU_CFG_OFFSET(ext_zce), 2544 .implied_multi_exts = { 2545 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2546 CPU_CFG_OFFSET(ext_zcmt), 2547 2548 RISCV_IMPLIED_EXTS_RULE_END 2549 }, 2550 }; 2551 2552 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2553 .ext = CPU_CFG_OFFSET(ext_zcf), 2554 .implied_misa_exts = RVF, 2555 .implied_multi_exts = { 2556 CPU_CFG_OFFSET(ext_zca), 2557 2558 RISCV_IMPLIED_EXTS_RULE_END 2559 }, 2560 }; 2561 2562 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2563 .ext = CPU_CFG_OFFSET(ext_zcmp), 2564 .implied_multi_exts = { 2565 CPU_CFG_OFFSET(ext_zca), 2566 2567 RISCV_IMPLIED_EXTS_RULE_END 2568 }, 2569 }; 2570 2571 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2572 .ext = CPU_CFG_OFFSET(ext_zcmt), 2573 .implied_multi_exts = { 2574 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2575 2576 RISCV_IMPLIED_EXTS_RULE_END 2577 }, 2578 }; 2579 2580 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2581 .ext = CPU_CFG_OFFSET(ext_zdinx), 2582 .implied_multi_exts = { 2583 CPU_CFG_OFFSET(ext_zfinx), 2584 2585 RISCV_IMPLIED_EXTS_RULE_END 2586 }, 2587 }; 2588 2589 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2590 .ext = CPU_CFG_OFFSET(ext_zfa), 2591 .implied_misa_exts = RVF, 2592 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2593 }; 2594 2595 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2596 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2597 .implied_misa_exts = RVF, 2598 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2599 }; 2600 2601 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2602 .ext = CPU_CFG_OFFSET(ext_zfh), 2603 .implied_multi_exts = { 2604 CPU_CFG_OFFSET(ext_zfhmin), 2605 2606 RISCV_IMPLIED_EXTS_RULE_END 2607 }, 2608 }; 2609 2610 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2611 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2612 .implied_misa_exts = RVF, 2613 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2614 }; 2615 2616 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2617 .ext = CPU_CFG_OFFSET(ext_zfinx), 2618 .implied_multi_exts = { 2619 CPU_CFG_OFFSET(ext_zicsr), 2620 2621 RISCV_IMPLIED_EXTS_RULE_END 2622 }, 2623 }; 2624 2625 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2626 .ext = CPU_CFG_OFFSET(ext_zhinx), 2627 .implied_multi_exts = { 2628 CPU_CFG_OFFSET(ext_zhinxmin), 2629 2630 RISCV_IMPLIED_EXTS_RULE_END 2631 }, 2632 }; 2633 2634 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2635 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2636 .implied_multi_exts = { 2637 CPU_CFG_OFFSET(ext_zfinx), 2638 2639 RISCV_IMPLIED_EXTS_RULE_END 2640 }, 2641 }; 2642 2643 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2644 .ext = CPU_CFG_OFFSET(ext_zicntr), 2645 .implied_multi_exts = { 2646 CPU_CFG_OFFSET(ext_zicsr), 2647 2648 RISCV_IMPLIED_EXTS_RULE_END 2649 }, 2650 }; 2651 2652 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2653 .ext = CPU_CFG_OFFSET(ext_zihpm), 2654 .implied_multi_exts = { 2655 CPU_CFG_OFFSET(ext_zicsr), 2656 2657 RISCV_IMPLIED_EXTS_RULE_END 2658 }, 2659 }; 2660 2661 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2662 .ext = CPU_CFG_OFFSET(ext_zk), 2663 .implied_multi_exts = { 2664 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2665 CPU_CFG_OFFSET(ext_zkt), 2666 2667 RISCV_IMPLIED_EXTS_RULE_END 2668 }, 2669 }; 2670 2671 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2672 .ext = CPU_CFG_OFFSET(ext_zkn), 2673 .implied_multi_exts = { 2674 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2675 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2676 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2677 2678 RISCV_IMPLIED_EXTS_RULE_END 2679 }, 2680 }; 2681 2682 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2683 .ext = CPU_CFG_OFFSET(ext_zks), 2684 .implied_multi_exts = { 2685 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2686 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2687 CPU_CFG_OFFSET(ext_zksh), 2688 2689 RISCV_IMPLIED_EXTS_RULE_END 2690 }, 2691 }; 2692 2693 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2694 .ext = CPU_CFG_OFFSET(ext_zvbb), 2695 .implied_multi_exts = { 2696 CPU_CFG_OFFSET(ext_zvkb), 2697 2698 RISCV_IMPLIED_EXTS_RULE_END 2699 }, 2700 }; 2701 2702 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2703 .ext = CPU_CFG_OFFSET(ext_zve32f), 2704 .implied_misa_exts = RVF, 2705 .implied_multi_exts = { 2706 CPU_CFG_OFFSET(ext_zve32x), 2707 2708 RISCV_IMPLIED_EXTS_RULE_END 2709 }, 2710 }; 2711 2712 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2713 .ext = CPU_CFG_OFFSET(ext_zve32x), 2714 .implied_multi_exts = { 2715 CPU_CFG_OFFSET(ext_zicsr), 2716 2717 RISCV_IMPLIED_EXTS_RULE_END 2718 }, 2719 }; 2720 2721 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2722 .ext = CPU_CFG_OFFSET(ext_zve64d), 2723 .implied_misa_exts = RVD, 2724 .implied_multi_exts = { 2725 CPU_CFG_OFFSET(ext_zve64f), 2726 2727 RISCV_IMPLIED_EXTS_RULE_END 2728 }, 2729 }; 2730 2731 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2732 .ext = CPU_CFG_OFFSET(ext_zve64f), 2733 .implied_misa_exts = RVF, 2734 .implied_multi_exts = { 2735 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2736 2737 RISCV_IMPLIED_EXTS_RULE_END 2738 }, 2739 }; 2740 2741 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2742 .ext = CPU_CFG_OFFSET(ext_zve64x), 2743 .implied_multi_exts = { 2744 CPU_CFG_OFFSET(ext_zve32x), 2745 2746 RISCV_IMPLIED_EXTS_RULE_END 2747 }, 2748 }; 2749 2750 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2751 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2752 .implied_multi_exts = { 2753 CPU_CFG_OFFSET(ext_zve32f), 2754 2755 RISCV_IMPLIED_EXTS_RULE_END 2756 }, 2757 }; 2758 2759 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2760 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2761 .implied_multi_exts = { 2762 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2763 2764 RISCV_IMPLIED_EXTS_RULE_END 2765 }, 2766 }; 2767 2768 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2769 .ext = CPU_CFG_OFFSET(ext_zvfh), 2770 .implied_multi_exts = { 2771 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2772 2773 RISCV_IMPLIED_EXTS_RULE_END 2774 }, 2775 }; 2776 2777 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2778 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2779 .implied_multi_exts = { 2780 CPU_CFG_OFFSET(ext_zve32f), 2781 2782 RISCV_IMPLIED_EXTS_RULE_END 2783 }, 2784 }; 2785 2786 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2787 .ext = CPU_CFG_OFFSET(ext_zvkn), 2788 .implied_multi_exts = { 2789 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2790 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2791 2792 RISCV_IMPLIED_EXTS_RULE_END 2793 }, 2794 }; 2795 2796 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2797 .ext = CPU_CFG_OFFSET(ext_zvknc), 2798 .implied_multi_exts = { 2799 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2800 2801 RISCV_IMPLIED_EXTS_RULE_END 2802 }, 2803 }; 2804 2805 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2806 .ext = CPU_CFG_OFFSET(ext_zvkng), 2807 .implied_multi_exts = { 2808 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2809 2810 RISCV_IMPLIED_EXTS_RULE_END 2811 }, 2812 }; 2813 2814 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2815 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2816 .implied_multi_exts = { 2817 CPU_CFG_OFFSET(ext_zve64x), 2818 2819 RISCV_IMPLIED_EXTS_RULE_END 2820 }, 2821 }; 2822 2823 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2824 .ext = CPU_CFG_OFFSET(ext_zvks), 2825 .implied_multi_exts = { 2826 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2827 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2828 2829 RISCV_IMPLIED_EXTS_RULE_END 2830 }, 2831 }; 2832 2833 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2834 .ext = CPU_CFG_OFFSET(ext_zvksc), 2835 .implied_multi_exts = { 2836 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2837 2838 RISCV_IMPLIED_EXTS_RULE_END 2839 }, 2840 }; 2841 2842 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2843 .ext = CPU_CFG_OFFSET(ext_zvksg), 2844 .implied_multi_exts = { 2845 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2846 2847 RISCV_IMPLIED_EXTS_RULE_END 2848 }, 2849 }; 2850 2851 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2852 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2853 .implied_multi_exts = { 2854 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2855 CPU_CFG_OFFSET(ext_smcdeleg), 2856 2857 RISCV_IMPLIED_EXTS_RULE_END 2858 }, 2859 }; 2860 2861 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2862 .ext = CPU_CFG_OFFSET(ext_supm), 2863 .implied_multi_exts = { 2864 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2865 2866 RISCV_IMPLIED_EXTS_RULE_END 2867 }, 2868 }; 2869 2870 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2871 .ext = CPU_CFG_OFFSET(ext_sspm), 2872 .implied_multi_exts = { 2873 CPU_CFG_OFFSET(ext_smnpm), 2874 2875 RISCV_IMPLIED_EXTS_RULE_END 2876 }, 2877 }; 2878 2879 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { 2880 .ext = CPU_CFG_OFFSET(ext_smctr), 2881 .implied_misa_exts = RVS, 2882 .implied_multi_exts = { 2883 CPU_CFG_OFFSET(ext_sscsrind), 2884 2885 RISCV_IMPLIED_EXTS_RULE_END 2886 }, 2887 }; 2888 2889 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { 2890 .ext = CPU_CFG_OFFSET(ext_ssctr), 2891 .implied_misa_exts = RVS, 2892 .implied_multi_exts = { 2893 CPU_CFG_OFFSET(ext_sscsrind), 2894 2895 RISCV_IMPLIED_EXTS_RULE_END 2896 }, 2897 }; 2898 2899 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2900 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2901 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2902 }; 2903 2904 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2905 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2906 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2907 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2908 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2909 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2910 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2911 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2912 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2913 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2914 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2915 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2916 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2917 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, 2918 NULL 2919 }; 2920 2921 static const Property riscv_cpu_properties[] = { 2922 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2923 2924 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2925 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2926 2927 {.name = "mmu", .info = &prop_mmu}, 2928 {.name = "pmp", .info = &prop_pmp}, 2929 2930 {.name = "priv_spec", .info = &prop_priv_spec}, 2931 {.name = "vext_spec", .info = &prop_vext_spec}, 2932 2933 {.name = "vlen", .info = &prop_vlen}, 2934 {.name = "elen", .info = &prop_elen}, 2935 2936 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2937 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2938 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2939 2940 {.name = "mvendorid", .info = &prop_mvendorid}, 2941 {.name = "mimpid", .info = &prop_mimpid}, 2942 {.name = "marchid", .info = &prop_marchid}, 2943 2944 #ifndef CONFIG_USER_ONLY 2945 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2946 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2947 DEFAULT_RNMI_IRQVEC), 2948 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2949 DEFAULT_RNMI_EXCPVEC), 2950 #endif 2951 2952 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2953 2954 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2955 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2956 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2957 2958 /* 2959 * write_misa() is marked as experimental for now so mark 2960 * it with -x and default to 'false'. 2961 */ 2962 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2963 }; 2964 2965 #if defined(TARGET_RISCV64) 2966 static void rva22u64_profile_cpu_init(Object *obj) 2967 { 2968 rv64i_bare_cpu_init(obj); 2969 2970 RVA22U64.enabled = true; 2971 } 2972 2973 static void rva22s64_profile_cpu_init(Object *obj) 2974 { 2975 rv64i_bare_cpu_init(obj); 2976 2977 RVA22S64.enabled = true; 2978 } 2979 2980 static void rva23u64_profile_cpu_init(Object *obj) 2981 { 2982 rv64i_bare_cpu_init(obj); 2983 2984 RVA23U64.enabled = true; 2985 } 2986 2987 static void rva23s64_profile_cpu_init(Object *obj) 2988 { 2989 rv64i_bare_cpu_init(obj); 2990 2991 RVA23S64.enabled = true; 2992 } 2993 #endif 2994 2995 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2996 { 2997 RISCVCPU *cpu = RISCV_CPU(cs); 2998 CPURISCVState *env = &cpu->env; 2999 3000 switch (riscv_cpu_mxl(env)) { 3001 case MXL_RV32: 3002 return "riscv:rv32"; 3003 case MXL_RV64: 3004 case MXL_RV128: 3005 return "riscv:rv64"; 3006 default: 3007 g_assert_not_reached(); 3008 } 3009 } 3010 3011 #ifndef CONFIG_USER_ONLY 3012 static int64_t riscv_get_arch_id(CPUState *cs) 3013 { 3014 RISCVCPU *cpu = RISCV_CPU(cs); 3015 3016 return cpu->env.mhartid; 3017 } 3018 3019 #include "hw/core/sysemu-cpu-ops.h" 3020 3021 static const struct SysemuCPUOps riscv_sysemu_ops = { 3022 .has_work = riscv_cpu_has_work, 3023 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 3024 .write_elf64_note = riscv_cpu_write_elf64_note, 3025 .write_elf32_note = riscv_cpu_write_elf32_note, 3026 .legacy_vmsd = &vmstate_riscv_cpu, 3027 }; 3028 #endif 3029 3030 static void riscv_cpu_common_class_init(ObjectClass *c, const void *data) 3031 { 3032 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3033 CPUClass *cc = CPU_CLASS(c); 3034 DeviceClass *dc = DEVICE_CLASS(c); 3035 ResettableClass *rc = RESETTABLE_CLASS(c); 3036 3037 device_class_set_parent_realize(dc, riscv_cpu_realize, 3038 &mcc->parent_realize); 3039 3040 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 3041 &mcc->parent_phases); 3042 3043 cc->class_by_name = riscv_cpu_class_by_name; 3044 cc->dump_state = riscv_cpu_dump_state; 3045 cc->set_pc = riscv_cpu_set_pc; 3046 cc->get_pc = riscv_cpu_get_pc; 3047 cc->gdb_read_register = riscv_cpu_gdb_read_register; 3048 cc->gdb_write_register = riscv_cpu_gdb_write_register; 3049 cc->gdb_stop_before_watchpoint = true; 3050 cc->disas_set_info = riscv_cpu_disas_set_info; 3051 #ifndef CONFIG_USER_ONLY 3052 cc->sysemu_ops = &riscv_sysemu_ops; 3053 cc->get_arch_id = riscv_get_arch_id; 3054 #endif 3055 cc->gdb_arch_name = riscv_gdb_arch_name; 3056 #ifdef CONFIG_TCG 3057 cc->tcg_ops = &riscv_tcg_ops; 3058 #endif /* CONFIG_TCG */ 3059 3060 device_class_set_props(dc, riscv_cpu_properties); 3061 } 3062 3063 static void riscv_cpu_class_init(ObjectClass *c, const void *data) 3064 { 3065 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3066 3067 mcc->misa_mxl_max = (RISCVMXL)GPOINTER_TO_UINT(data); 3068 riscv_cpu_validate_misa_mxl(mcc); 3069 } 3070 3071 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 3072 int max_str_len) 3073 { 3074 const RISCVIsaExtData *edata; 3075 char *old = *isa_str; 3076 char *new = *isa_str; 3077 3078 for (edata = isa_edata_arr; edata && edata->name; edata++) { 3079 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3080 new = g_strconcat(old, "_", edata->name, NULL); 3081 g_free(old); 3082 old = new; 3083 } 3084 } 3085 3086 *isa_str = new; 3087 } 3088 3089 char *riscv_isa_string(RISCVCPU *cpu) 3090 { 3091 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3092 int i; 3093 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 3094 char *isa_str = g_new(char, maxlen); 3095 int xlen = riscv_cpu_max_xlen(mcc); 3096 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 3097 3098 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3099 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3100 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 3101 } 3102 } 3103 *p = '\0'; 3104 if (!cpu->cfg.short_isa_string) { 3105 riscv_isa_string_ext(cpu, &isa_str, maxlen); 3106 } 3107 return isa_str; 3108 } 3109 3110 #ifndef CONFIG_USER_ONLY 3111 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3112 { 3113 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3114 char **extensions = g_new(char *, maxlen); 3115 3116 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3117 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3118 extensions[*count] = g_new(char, 2); 3119 snprintf(extensions[*count], 2, "%c", 3120 qemu_tolower(riscv_single_letter_exts[i])); 3121 (*count)++; 3122 } 3123 } 3124 3125 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3126 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3127 extensions[*count] = g_strdup(edata->name); 3128 (*count)++; 3129 } 3130 } 3131 3132 return extensions; 3133 } 3134 3135 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3136 { 3137 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3138 const size_t maxlen = sizeof("rv128i"); 3139 g_autofree char *isa_base = g_new(char, maxlen); 3140 g_autofree char *riscv_isa; 3141 char **isa_extensions; 3142 int count = 0; 3143 int xlen = riscv_cpu_max_xlen(mcc); 3144 3145 riscv_isa = riscv_isa_string(cpu); 3146 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3147 3148 snprintf(isa_base, maxlen, "rv%di", xlen); 3149 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3150 3151 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3152 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3153 isa_extensions, count); 3154 3155 for (int i = 0; i < count; i++) { 3156 g_free(isa_extensions[i]); 3157 } 3158 3159 g_free(isa_extensions); 3160 } 3161 #endif 3162 3163 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3164 { \ 3165 .name = (type_name), \ 3166 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3167 .instance_init = (initfn), \ 3168 .class_init = riscv_cpu_class_init, \ 3169 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3170 } 3171 3172 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3173 { \ 3174 .name = (type_name), \ 3175 .parent = TYPE_RISCV_VENDOR_CPU, \ 3176 .instance_init = (initfn), \ 3177 .class_init = riscv_cpu_class_init, \ 3178 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3179 } 3180 3181 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3182 { \ 3183 .name = (type_name), \ 3184 .parent = TYPE_RISCV_BARE_CPU, \ 3185 .instance_init = (initfn), \ 3186 .class_init = riscv_cpu_class_init, \ 3187 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3188 } 3189 3190 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3191 { \ 3192 .name = (type_name), \ 3193 .parent = TYPE_RISCV_BARE_CPU, \ 3194 .instance_init = (initfn), \ 3195 .class_init = riscv_cpu_class_init, \ 3196 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3197 } 3198 3199 static const TypeInfo riscv_cpu_type_infos[] = { 3200 { 3201 .name = TYPE_RISCV_CPU, 3202 .parent = TYPE_CPU, 3203 .instance_size = sizeof(RISCVCPU), 3204 .instance_align = __alignof(RISCVCPU), 3205 .instance_init = riscv_cpu_init, 3206 .instance_post_init = riscv_cpu_post_init, 3207 .abstract = true, 3208 .class_size = sizeof(RISCVCPUClass), 3209 .class_init = riscv_cpu_common_class_init, 3210 }, 3211 { 3212 .name = TYPE_RISCV_DYNAMIC_CPU, 3213 .parent = TYPE_RISCV_CPU, 3214 .abstract = true, 3215 }, 3216 { 3217 .name = TYPE_RISCV_VENDOR_CPU, 3218 .parent = TYPE_RISCV_CPU, 3219 .abstract = true, 3220 }, 3221 { 3222 .name = TYPE_RISCV_BARE_CPU, 3223 .parent = TYPE_RISCV_CPU, 3224 .instance_init = riscv_bare_cpu_init, 3225 .abstract = true, 3226 }, 3227 #if defined(TARGET_RISCV32) 3228 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3229 #elif defined(TARGET_RISCV64) 3230 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3231 #endif 3232 3233 #if defined(TARGET_RISCV32) || \ 3234 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3235 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3236 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3237 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3238 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3239 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3240 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3241 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3242 #endif 3243 3244 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3245 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3246 #endif 3247 3248 #if defined(TARGET_RISCV64) 3249 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3250 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3251 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3252 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3253 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3254 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3255 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3256 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3257 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3258 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 3259 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3260 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 3261 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3262 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3263 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3264 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3265 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), 3266 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), 3267 #endif /* TARGET_RISCV64 */ 3268 }; 3269 3270 DEFINE_TYPES(riscv_cpu_type_infos) 3271