1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "system/device_tree.h" 36 #include "system/kvm.h" 37 #include "system/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 188 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 194 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 195 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 196 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 197 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 198 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 199 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 200 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 201 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 202 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 203 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 204 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 205 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 206 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 207 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 208 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 209 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 210 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 211 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 212 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 213 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 214 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 217 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 218 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 219 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), 220 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), 221 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 222 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 223 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 224 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 225 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 226 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 227 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 228 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 229 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 230 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 231 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 232 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 233 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 234 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 235 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 236 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 237 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 238 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 239 240 { }, 241 }; 242 243 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 244 { 245 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 246 247 return *ext_enabled; 248 } 249 250 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 251 { 252 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 253 254 *ext_enabled = en; 255 } 256 257 bool riscv_cpu_is_vendor(Object *cpu_obj) 258 { 259 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 260 } 261 262 const char * const riscv_int_regnames[] = { 263 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 264 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 265 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 266 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 267 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 268 }; 269 270 const char * const riscv_int_regnamesh[] = { 271 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 272 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 273 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 274 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 275 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 276 "x30h/t5h", "x31h/t6h" 277 }; 278 279 const char * const riscv_fpr_regnames[] = { 280 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 281 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 282 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 283 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 284 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 285 "f30/ft10", "f31/ft11" 286 }; 287 288 const char * const riscv_rvv_regnames[] = { 289 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 290 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 291 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 292 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 293 "v28", "v29", "v30", "v31" 294 }; 295 296 static const char * const riscv_excp_names[] = { 297 "misaligned_fetch", 298 "fault_fetch", 299 "illegal_instruction", 300 "breakpoint", 301 "misaligned_load", 302 "fault_load", 303 "misaligned_store", 304 "fault_store", 305 "user_ecall", 306 "supervisor_ecall", 307 "hypervisor_ecall", 308 "machine_ecall", 309 "exec_page_fault", 310 "load_page_fault", 311 "reserved", 312 "store_page_fault", 313 "double_trap", 314 "reserved", 315 "reserved", 316 "reserved", 317 "guest_exec_page_fault", 318 "guest_load_page_fault", 319 "reserved", 320 "guest_store_page_fault", 321 }; 322 323 static const char * const riscv_intr_names[] = { 324 "u_software", 325 "s_software", 326 "vs_software", 327 "m_software", 328 "u_timer", 329 "s_timer", 330 "vs_timer", 331 "m_timer", 332 "u_external", 333 "s_external", 334 "vs_external", 335 "m_external", 336 "reserved", 337 "reserved", 338 "reserved", 339 "reserved" 340 }; 341 342 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 343 { 344 if (async) { 345 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 346 riscv_intr_names[cause] : "(unknown)"; 347 } else { 348 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 349 riscv_excp_names[cause] : "(unknown)"; 350 } 351 } 352 353 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 354 { 355 env->misa_ext_mask = env->misa_ext = ext; 356 } 357 358 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 359 { 360 return 16 << mcc->misa_mxl_max; 361 } 362 363 #ifndef CONFIG_USER_ONLY 364 static uint8_t satp_mode_from_str(const char *satp_mode_str) 365 { 366 if (!strncmp(satp_mode_str, "mbare", 5)) { 367 return VM_1_10_MBARE; 368 } 369 370 if (!strncmp(satp_mode_str, "sv32", 4)) { 371 return VM_1_10_SV32; 372 } 373 374 if (!strncmp(satp_mode_str, "sv39", 4)) { 375 return VM_1_10_SV39; 376 } 377 378 if (!strncmp(satp_mode_str, "sv48", 4)) { 379 return VM_1_10_SV48; 380 } 381 382 if (!strncmp(satp_mode_str, "sv57", 4)) { 383 return VM_1_10_SV57; 384 } 385 386 if (!strncmp(satp_mode_str, "sv64", 4)) { 387 return VM_1_10_SV64; 388 } 389 390 g_assert_not_reached(); 391 } 392 393 uint8_t satp_mode_max_from_map(uint32_t map) 394 { 395 /* 396 * 'map = 0' will make us return (31 - 32), which C will 397 * happily overflow to UINT_MAX. There's no good result to 398 * return if 'map = 0' (e.g. returning 0 will be ambiguous 399 * with the result for 'map = 1'). 400 * 401 * Assert out if map = 0. Callers will have to deal with 402 * it outside of this function. 403 */ 404 g_assert(map > 0); 405 406 /* map here has at least one bit set, so no problem with clz */ 407 return 31 - __builtin_clz(map); 408 } 409 410 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 411 { 412 if (is_32_bit) { 413 switch (satp_mode) { 414 case VM_1_10_SV32: 415 return "sv32"; 416 case VM_1_10_MBARE: 417 return "none"; 418 } 419 } else { 420 switch (satp_mode) { 421 case VM_1_10_SV64: 422 return "sv64"; 423 case VM_1_10_SV57: 424 return "sv57"; 425 case VM_1_10_SV48: 426 return "sv48"; 427 case VM_1_10_SV39: 428 return "sv39"; 429 case VM_1_10_MBARE: 430 return "none"; 431 } 432 } 433 434 g_assert_not_reached(); 435 } 436 437 static void set_satp_mode_max_supported(RISCVCPU *cpu, 438 uint8_t satp_mode) 439 { 440 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 441 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 442 443 for (int i = 0; i <= satp_mode; ++i) { 444 if (valid_vm[i]) { 445 cpu->cfg.satp_mode.supported |= (1 << i); 446 } 447 } 448 } 449 450 /* Set the satp mode to the max supported */ 451 static void set_satp_mode_default_map(RISCVCPU *cpu) 452 { 453 /* 454 * Bare CPUs do not default to the max available. 455 * Users must set a valid satp_mode in the command 456 * line. 457 */ 458 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 459 warn_report("No satp mode set. Defaulting to 'bare'"); 460 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 461 return; 462 } 463 464 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 465 } 466 #endif 467 468 static void riscv_max_cpu_init(Object *obj) 469 { 470 RISCVCPU *cpu = RISCV_CPU(obj); 471 CPURISCVState *env = &cpu->env; 472 473 cpu->cfg.mmu = true; 474 cpu->cfg.pmp = true; 475 476 env->priv_ver = PRIV_VERSION_LATEST; 477 #ifndef CONFIG_USER_ONLY 478 set_satp_mode_max_supported(RISCV_CPU(obj), 479 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 480 VM_1_10_SV32 : VM_1_10_SV57); 481 #endif 482 } 483 484 #if defined(TARGET_RISCV64) 485 static void rv64_base_cpu_init(Object *obj) 486 { 487 RISCVCPU *cpu = RISCV_CPU(obj); 488 CPURISCVState *env = &cpu->env; 489 490 cpu->cfg.mmu = true; 491 cpu->cfg.pmp = true; 492 493 /* Set latest version of privileged specification */ 494 env->priv_ver = PRIV_VERSION_LATEST; 495 #ifndef CONFIG_USER_ONLY 496 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 497 #endif 498 } 499 500 static void rv64_sifive_u_cpu_init(Object *obj) 501 { 502 RISCVCPU *cpu = RISCV_CPU(obj); 503 CPURISCVState *env = &cpu->env; 504 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 505 env->priv_ver = PRIV_VERSION_1_10_0; 506 #ifndef CONFIG_USER_ONLY 507 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 508 #endif 509 510 /* inherited from parent obj via riscv_cpu_init() */ 511 cpu->cfg.ext_zifencei = true; 512 cpu->cfg.ext_zicsr = true; 513 cpu->cfg.mmu = true; 514 cpu->cfg.pmp = true; 515 } 516 517 static void rv64_sifive_e_cpu_init(Object *obj) 518 { 519 CPURISCVState *env = &RISCV_CPU(obj)->env; 520 RISCVCPU *cpu = RISCV_CPU(obj); 521 522 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 523 env->priv_ver = PRIV_VERSION_1_10_0; 524 #ifndef CONFIG_USER_ONLY 525 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 526 #endif 527 528 /* inherited from parent obj via riscv_cpu_init() */ 529 cpu->cfg.ext_zifencei = true; 530 cpu->cfg.ext_zicsr = true; 531 cpu->cfg.pmp = true; 532 } 533 534 static void rv64_thead_c906_cpu_init(Object *obj) 535 { 536 CPURISCVState *env = &RISCV_CPU(obj)->env; 537 RISCVCPU *cpu = RISCV_CPU(obj); 538 539 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 540 env->priv_ver = PRIV_VERSION_1_11_0; 541 542 cpu->cfg.ext_zfa = true; 543 cpu->cfg.ext_zfh = true; 544 cpu->cfg.mmu = true; 545 cpu->cfg.ext_xtheadba = true; 546 cpu->cfg.ext_xtheadbb = true; 547 cpu->cfg.ext_xtheadbs = true; 548 cpu->cfg.ext_xtheadcmo = true; 549 cpu->cfg.ext_xtheadcondmov = true; 550 cpu->cfg.ext_xtheadfmemidx = true; 551 cpu->cfg.ext_xtheadmac = true; 552 cpu->cfg.ext_xtheadmemidx = true; 553 cpu->cfg.ext_xtheadmempair = true; 554 cpu->cfg.ext_xtheadsync = true; 555 556 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 557 #ifndef CONFIG_USER_ONLY 558 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 559 th_register_custom_csrs(cpu); 560 #endif 561 562 /* inherited from parent obj via riscv_cpu_init() */ 563 cpu->cfg.pmp = true; 564 } 565 566 static void rv64_veyron_v1_cpu_init(Object *obj) 567 { 568 CPURISCVState *env = &RISCV_CPU(obj)->env; 569 RISCVCPU *cpu = RISCV_CPU(obj); 570 571 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 572 env->priv_ver = PRIV_VERSION_1_12_0; 573 574 /* Enable ISA extensions */ 575 cpu->cfg.mmu = true; 576 cpu->cfg.ext_zifencei = true; 577 cpu->cfg.ext_zicsr = true; 578 cpu->cfg.pmp = true; 579 cpu->cfg.ext_zicbom = true; 580 cpu->cfg.cbom_blocksize = 64; 581 cpu->cfg.cboz_blocksize = 64; 582 cpu->cfg.ext_zicboz = true; 583 cpu->cfg.ext_smaia = true; 584 cpu->cfg.ext_ssaia = true; 585 cpu->cfg.ext_sscofpmf = true; 586 cpu->cfg.ext_sstc = true; 587 cpu->cfg.ext_svinval = true; 588 cpu->cfg.ext_svnapot = true; 589 cpu->cfg.ext_svpbmt = true; 590 cpu->cfg.ext_smstateen = true; 591 cpu->cfg.ext_zba = true; 592 cpu->cfg.ext_zbb = true; 593 cpu->cfg.ext_zbc = true; 594 cpu->cfg.ext_zbs = true; 595 cpu->cfg.ext_XVentanaCondOps = true; 596 597 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 598 cpu->cfg.marchid = VEYRON_V1_MARCHID; 599 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 600 601 #ifndef CONFIG_USER_ONLY 602 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 603 #endif 604 } 605 606 /* Tenstorrent Ascalon */ 607 static void rv64_tt_ascalon_cpu_init(Object *obj) 608 { 609 CPURISCVState *env = &RISCV_CPU(obj)->env; 610 RISCVCPU *cpu = RISCV_CPU(obj); 611 612 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 613 env->priv_ver = PRIV_VERSION_1_13_0; 614 615 /* Enable ISA extensions */ 616 cpu->cfg.mmu = true; 617 cpu->cfg.vlenb = 256 >> 3; 618 cpu->cfg.elen = 64; 619 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 620 cpu->cfg.rvv_ma_all_1s = true; 621 cpu->cfg.rvv_ta_all_1s = true; 622 cpu->cfg.misa_w = true; 623 cpu->cfg.pmp = true; 624 cpu->cfg.cbom_blocksize = 64; 625 cpu->cfg.cbop_blocksize = 64; 626 cpu->cfg.cboz_blocksize = 64; 627 cpu->cfg.ext_zic64b = true; 628 cpu->cfg.ext_zicbom = true; 629 cpu->cfg.ext_zicbop = true; 630 cpu->cfg.ext_zicboz = true; 631 cpu->cfg.ext_zicntr = true; 632 cpu->cfg.ext_zicond = true; 633 cpu->cfg.ext_zicsr = true; 634 cpu->cfg.ext_zifencei = true; 635 cpu->cfg.ext_zihintntl = true; 636 cpu->cfg.ext_zihintpause = true; 637 cpu->cfg.ext_zihpm = true; 638 cpu->cfg.ext_zimop = true; 639 cpu->cfg.ext_zawrs = true; 640 cpu->cfg.ext_zfa = true; 641 cpu->cfg.ext_zfbfmin = true; 642 cpu->cfg.ext_zfh = true; 643 cpu->cfg.ext_zfhmin = true; 644 cpu->cfg.ext_zcb = true; 645 cpu->cfg.ext_zcmop = true; 646 cpu->cfg.ext_zba = true; 647 cpu->cfg.ext_zbb = true; 648 cpu->cfg.ext_zbs = true; 649 cpu->cfg.ext_zkt = true; 650 cpu->cfg.ext_zvbb = true; 651 cpu->cfg.ext_zvbc = true; 652 cpu->cfg.ext_zvfbfmin = true; 653 cpu->cfg.ext_zvfbfwma = true; 654 cpu->cfg.ext_zvfh = true; 655 cpu->cfg.ext_zvfhmin = true; 656 cpu->cfg.ext_zvkng = true; 657 cpu->cfg.ext_smaia = true; 658 cpu->cfg.ext_smstateen = true; 659 cpu->cfg.ext_ssaia = true; 660 cpu->cfg.ext_sscofpmf = true; 661 cpu->cfg.ext_sstc = true; 662 cpu->cfg.ext_svade = true; 663 cpu->cfg.ext_svinval = true; 664 cpu->cfg.ext_svnapot = true; 665 cpu->cfg.ext_svpbmt = true; 666 667 #ifndef CONFIG_USER_ONLY 668 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 669 #endif 670 } 671 672 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 673 { 674 CPURISCVState *env = &RISCV_CPU(obj)->env; 675 RISCVCPU *cpu = RISCV_CPU(obj); 676 677 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 678 env->priv_ver = PRIV_VERSION_1_12_0; 679 680 /* Enable ISA extensions */ 681 cpu->cfg.ext_zbc = true; 682 cpu->cfg.ext_zbkb = true; 683 cpu->cfg.ext_zbkc = true; 684 cpu->cfg.ext_zbkx = true; 685 cpu->cfg.ext_zknd = true; 686 cpu->cfg.ext_zkne = true; 687 cpu->cfg.ext_zknh = true; 688 cpu->cfg.ext_zksed = true; 689 cpu->cfg.ext_zksh = true; 690 cpu->cfg.ext_svinval = true; 691 692 cpu->cfg.mmu = true; 693 cpu->cfg.pmp = true; 694 695 #ifndef CONFIG_USER_ONLY 696 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 697 #endif 698 } 699 700 #ifdef CONFIG_TCG 701 static void rv128_base_cpu_init(Object *obj) 702 { 703 RISCVCPU *cpu = RISCV_CPU(obj); 704 CPURISCVState *env = &cpu->env; 705 706 cpu->cfg.mmu = true; 707 cpu->cfg.pmp = true; 708 709 /* Set latest version of privileged specification */ 710 env->priv_ver = PRIV_VERSION_LATEST; 711 #ifndef CONFIG_USER_ONLY 712 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 713 #endif 714 } 715 #endif /* CONFIG_TCG */ 716 717 static void rv64i_bare_cpu_init(Object *obj) 718 { 719 CPURISCVState *env = &RISCV_CPU(obj)->env; 720 riscv_cpu_set_misa_ext(env, RVI); 721 } 722 723 static void rv64e_bare_cpu_init(Object *obj) 724 { 725 CPURISCVState *env = &RISCV_CPU(obj)->env; 726 riscv_cpu_set_misa_ext(env, RVE); 727 } 728 729 #endif /* !TARGET_RISCV64 */ 730 731 #if defined(TARGET_RISCV32) || \ 732 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 733 734 static void rv32_base_cpu_init(Object *obj) 735 { 736 RISCVCPU *cpu = RISCV_CPU(obj); 737 CPURISCVState *env = &cpu->env; 738 739 cpu->cfg.mmu = true; 740 cpu->cfg.pmp = true; 741 742 /* Set latest version of privileged specification */ 743 env->priv_ver = PRIV_VERSION_LATEST; 744 #ifndef CONFIG_USER_ONLY 745 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 746 #endif 747 } 748 749 static void rv32_sifive_u_cpu_init(Object *obj) 750 { 751 RISCVCPU *cpu = RISCV_CPU(obj); 752 CPURISCVState *env = &cpu->env; 753 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 754 env->priv_ver = PRIV_VERSION_1_10_0; 755 #ifndef CONFIG_USER_ONLY 756 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 757 #endif 758 759 /* inherited from parent obj via riscv_cpu_init() */ 760 cpu->cfg.ext_zifencei = true; 761 cpu->cfg.ext_zicsr = true; 762 cpu->cfg.mmu = true; 763 cpu->cfg.pmp = true; 764 } 765 766 static void rv32_sifive_e_cpu_init(Object *obj) 767 { 768 CPURISCVState *env = &RISCV_CPU(obj)->env; 769 RISCVCPU *cpu = RISCV_CPU(obj); 770 771 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 772 env->priv_ver = PRIV_VERSION_1_10_0; 773 #ifndef CONFIG_USER_ONLY 774 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 775 #endif 776 777 /* inherited from parent obj via riscv_cpu_init() */ 778 cpu->cfg.ext_zifencei = true; 779 cpu->cfg.ext_zicsr = true; 780 cpu->cfg.pmp = true; 781 } 782 783 static void rv32_ibex_cpu_init(Object *obj) 784 { 785 CPURISCVState *env = &RISCV_CPU(obj)->env; 786 RISCVCPU *cpu = RISCV_CPU(obj); 787 788 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 789 env->priv_ver = PRIV_VERSION_1_12_0; 790 #ifndef CONFIG_USER_ONLY 791 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 792 #endif 793 /* inherited from parent obj via riscv_cpu_init() */ 794 cpu->cfg.ext_zifencei = true; 795 cpu->cfg.ext_zicsr = true; 796 cpu->cfg.pmp = true; 797 cpu->cfg.ext_smepmp = true; 798 799 cpu->cfg.ext_zba = true; 800 cpu->cfg.ext_zbb = true; 801 cpu->cfg.ext_zbc = true; 802 cpu->cfg.ext_zbs = true; 803 } 804 805 static void rv32_imafcu_nommu_cpu_init(Object *obj) 806 { 807 CPURISCVState *env = &RISCV_CPU(obj)->env; 808 RISCVCPU *cpu = RISCV_CPU(obj); 809 810 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 811 env->priv_ver = PRIV_VERSION_1_10_0; 812 #ifndef CONFIG_USER_ONLY 813 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 814 #endif 815 816 /* inherited from parent obj via riscv_cpu_init() */ 817 cpu->cfg.ext_zifencei = true; 818 cpu->cfg.ext_zicsr = true; 819 cpu->cfg.pmp = true; 820 } 821 822 static void rv32i_bare_cpu_init(Object *obj) 823 { 824 CPURISCVState *env = &RISCV_CPU(obj)->env; 825 riscv_cpu_set_misa_ext(env, RVI); 826 } 827 828 static void rv32e_bare_cpu_init(Object *obj) 829 { 830 CPURISCVState *env = &RISCV_CPU(obj)->env; 831 riscv_cpu_set_misa_ext(env, RVE); 832 } 833 #endif 834 835 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 836 { 837 ObjectClass *oc; 838 char *typename; 839 char **cpuname; 840 841 cpuname = g_strsplit(cpu_model, ",", 1); 842 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 843 oc = object_class_by_name(typename); 844 g_strfreev(cpuname); 845 g_free(typename); 846 847 return oc; 848 } 849 850 char *riscv_cpu_get_name(RISCVCPU *cpu) 851 { 852 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 853 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 854 855 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 856 857 return cpu_model_from_type(typename); 858 } 859 860 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 861 { 862 RISCVCPU *cpu = RISCV_CPU(cs); 863 CPURISCVState *env = &cpu->env; 864 int i, j; 865 uint8_t *p; 866 867 #if !defined(CONFIG_USER_ONLY) 868 if (riscv_has_ext(env, RVH)) { 869 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 870 } 871 #endif 872 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 873 #ifndef CONFIG_USER_ONLY 874 { 875 static const int dump_csrs[] = { 876 CSR_MHARTID, 877 CSR_MSTATUS, 878 CSR_MSTATUSH, 879 /* 880 * CSR_SSTATUS is intentionally omitted here as its value 881 * can be figured out by looking at CSR_MSTATUS 882 */ 883 CSR_HSTATUS, 884 CSR_VSSTATUS, 885 CSR_MIP, 886 CSR_MIE, 887 CSR_MIDELEG, 888 CSR_HIDELEG, 889 CSR_MEDELEG, 890 CSR_HEDELEG, 891 CSR_MTVEC, 892 CSR_STVEC, 893 CSR_VSTVEC, 894 CSR_MEPC, 895 CSR_SEPC, 896 CSR_VSEPC, 897 CSR_MCAUSE, 898 CSR_SCAUSE, 899 CSR_VSCAUSE, 900 CSR_MTVAL, 901 CSR_STVAL, 902 CSR_HTVAL, 903 CSR_MTVAL2, 904 CSR_MSCRATCH, 905 CSR_SSCRATCH, 906 CSR_SATP, 907 }; 908 909 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 910 int csrno = dump_csrs[i]; 911 target_ulong val = 0; 912 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 913 914 /* 915 * Rely on the smode, hmode, etc, predicates within csr.c 916 * to do the filtering of the registers that are present. 917 */ 918 if (res == RISCV_EXCP_NONE) { 919 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 920 csr_ops[csrno].name, val); 921 } 922 } 923 } 924 #endif 925 926 for (i = 0; i < 32; i++) { 927 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 928 riscv_int_regnames[i], env->gpr[i]); 929 if ((i & 3) == 3) { 930 qemu_fprintf(f, "\n"); 931 } 932 } 933 if (flags & CPU_DUMP_FPU) { 934 target_ulong val = 0; 935 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 936 if (res == RISCV_EXCP_NONE) { 937 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 938 csr_ops[CSR_FCSR].name, val); 939 } 940 for (i = 0; i < 32; i++) { 941 qemu_fprintf(f, " %-8s %016" PRIx64, 942 riscv_fpr_regnames[i], env->fpr[i]); 943 if ((i & 3) == 3) { 944 qemu_fprintf(f, "\n"); 945 } 946 } 947 } 948 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 949 static const int dump_rvv_csrs[] = { 950 CSR_VSTART, 951 CSR_VXSAT, 952 CSR_VXRM, 953 CSR_VCSR, 954 CSR_VL, 955 CSR_VTYPE, 956 CSR_VLENB, 957 }; 958 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 959 int csrno = dump_rvv_csrs[i]; 960 target_ulong val = 0; 961 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 962 963 /* 964 * Rely on the smode, hmode, etc, predicates within csr.c 965 * to do the filtering of the registers that are present. 966 */ 967 if (res == RISCV_EXCP_NONE) { 968 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 969 csr_ops[csrno].name, val); 970 } 971 } 972 uint16_t vlenb = cpu->cfg.vlenb; 973 974 for (i = 0; i < 32; i++) { 975 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 976 p = (uint8_t *)env->vreg; 977 for (j = vlenb - 1 ; j >= 0; j--) { 978 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 979 } 980 qemu_fprintf(f, "\n"); 981 } 982 } 983 } 984 985 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 986 { 987 RISCVCPU *cpu = RISCV_CPU(cs); 988 CPURISCVState *env = &cpu->env; 989 990 if (env->xl == MXL_RV32) { 991 env->pc = (int32_t)value; 992 } else { 993 env->pc = value; 994 } 995 } 996 997 static vaddr riscv_cpu_get_pc(CPUState *cs) 998 { 999 RISCVCPU *cpu = RISCV_CPU(cs); 1000 CPURISCVState *env = &cpu->env; 1001 1002 /* Match cpu_get_tb_cpu_state. */ 1003 if (env->xl == MXL_RV32) { 1004 return env->pc & UINT32_MAX; 1005 } 1006 return env->pc; 1007 } 1008 1009 #ifndef CONFIG_USER_ONLY 1010 bool riscv_cpu_has_work(CPUState *cs) 1011 { 1012 RISCVCPU *cpu = RISCV_CPU(cs); 1013 CPURISCVState *env = &cpu->env; 1014 /* 1015 * Definition of the WFI instruction requires it to ignore the privilege 1016 * mode and delegation registers, but respect individual enables 1017 */ 1018 return riscv_cpu_all_pending(env) != 0 || 1019 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1020 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1021 } 1022 #endif /* !CONFIG_USER_ONLY */ 1023 1024 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 1025 { 1026 return riscv_env_mmu_index(cpu_env(cs), ifetch); 1027 } 1028 1029 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1030 { 1031 #ifndef CONFIG_USER_ONLY 1032 uint8_t iprio; 1033 int i, irq, rdzero; 1034 #endif 1035 CPUState *cs = CPU(obj); 1036 RISCVCPU *cpu = RISCV_CPU(cs); 1037 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1038 CPURISCVState *env = &cpu->env; 1039 1040 if (mcc->parent_phases.hold) { 1041 mcc->parent_phases.hold(obj, type); 1042 } 1043 #ifndef CONFIG_USER_ONLY 1044 env->misa_mxl = mcc->misa_mxl_max; 1045 env->priv = PRV_M; 1046 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1047 if (env->misa_mxl > MXL_RV32) { 1048 /* 1049 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1050 * and we must ensure that the value after init is valid for read. 1051 */ 1052 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1053 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1054 if (riscv_has_ext(env, RVH)) { 1055 env->vsstatus = set_field(env->vsstatus, 1056 MSTATUS64_SXL, env->misa_mxl); 1057 env->vsstatus = set_field(env->vsstatus, 1058 MSTATUS64_UXL, env->misa_mxl); 1059 env->mstatus_hs = set_field(env->mstatus_hs, 1060 MSTATUS64_SXL, env->misa_mxl); 1061 env->mstatus_hs = set_field(env->mstatus_hs, 1062 MSTATUS64_UXL, env->misa_mxl); 1063 } 1064 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1065 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1066 } 1067 } 1068 env->mcause = 0; 1069 env->miclaim = MIP_SGEIP; 1070 env->pc = env->resetvec; 1071 env->bins = 0; 1072 env->two_stage_lookup = false; 1073 1074 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1075 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1076 MENVCFG_ADUE : 0); 1077 env->henvcfg = 0; 1078 1079 /* Initialized default priorities of local interrupts. */ 1080 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1081 iprio = riscv_cpu_default_priority(i); 1082 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1083 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1084 env->hviprio[i] = 0; 1085 } 1086 i = 0; 1087 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1088 if (!rdzero) { 1089 env->hviprio[irq] = env->miprio[irq]; 1090 } 1091 i++; 1092 } 1093 1094 /* 1095 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1096 * extension is enabled. 1097 */ 1098 if (riscv_has_ext(env, RVH)) { 1099 env->mideleg |= HS_MODE_INTERRUPTS; 1100 } 1101 1102 /* 1103 * Clear mseccfg and unlock all the PMP entries upon reset. 1104 * This is allowed as per the priv and smepmp specifications 1105 * and is needed to clear stale entries across reboots. 1106 */ 1107 if (riscv_cpu_cfg(env)->ext_smepmp) { 1108 env->mseccfg = 0; 1109 } 1110 1111 pmp_unlock_entries(env); 1112 #else 1113 env->priv = PRV_U; 1114 env->senvcfg = 0; 1115 env->menvcfg = 0; 1116 #endif 1117 1118 /* on reset elp is clear */ 1119 env->elp = false; 1120 /* on reset ssp is set to 0 */ 1121 env->ssp = 0; 1122 1123 env->xl = riscv_cpu_mxl(env); 1124 cs->exception_index = RISCV_EXCP_NONE; 1125 env->load_res = -1; 1126 set_default_nan_mode(1, &env->fp_status); 1127 /* Default NaN value: sign bit clear, frac msb set */ 1128 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1129 env->vill = true; 1130 1131 #ifndef CONFIG_USER_ONLY 1132 if (cpu->cfg.debug) { 1133 riscv_trigger_reset_hold(env); 1134 } 1135 1136 if (cpu->cfg.ext_smrnmi) { 1137 env->rnmip = 0; 1138 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1139 } 1140 1141 if (kvm_enabled()) { 1142 kvm_riscv_reset_vcpu(cpu); 1143 } 1144 #endif 1145 } 1146 1147 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1148 { 1149 RISCVCPU *cpu = RISCV_CPU(s); 1150 CPURISCVState *env = &cpu->env; 1151 info->target_info = &cpu->cfg; 1152 1153 /* 1154 * A couple of bits in MSTATUS set the endianness: 1155 * - MSTATUS_UBE (User-mode), 1156 * - MSTATUS_SBE (Supervisor-mode), 1157 * - MSTATUS_MBE (Machine-mode) 1158 * but we don't implement that yet. 1159 */ 1160 info->endian = BFD_ENDIAN_LITTLE; 1161 1162 switch (env->xl) { 1163 case MXL_RV32: 1164 info->print_insn = print_insn_riscv32; 1165 break; 1166 case MXL_RV64: 1167 info->print_insn = print_insn_riscv64; 1168 break; 1169 case MXL_RV128: 1170 info->print_insn = print_insn_riscv128; 1171 break; 1172 default: 1173 g_assert_not_reached(); 1174 } 1175 } 1176 1177 #ifndef CONFIG_USER_ONLY 1178 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1179 { 1180 bool rv32 = riscv_cpu_is_32bit(cpu); 1181 uint8_t satp_mode_map_max, satp_mode_supported_max; 1182 1183 /* The CPU wants the OS to decide which satp mode to use */ 1184 if (cpu->cfg.satp_mode.supported == 0) { 1185 return; 1186 } 1187 1188 satp_mode_supported_max = 1189 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1190 1191 if (cpu->cfg.satp_mode.map == 0) { 1192 if (cpu->cfg.satp_mode.init == 0) { 1193 /* If unset by the user, we fallback to the default satp mode. */ 1194 set_satp_mode_default_map(cpu); 1195 } else { 1196 /* 1197 * Find the lowest level that was disabled and then enable the 1198 * first valid level below which can be found in 1199 * valid_vm_1_10_32/64. 1200 */ 1201 for (int i = 1; i < 16; ++i) { 1202 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1203 (cpu->cfg.satp_mode.supported & (1 << i))) { 1204 for (int j = i - 1; j >= 0; --j) { 1205 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1206 cpu->cfg.satp_mode.map |= (1 << j); 1207 break; 1208 } 1209 } 1210 break; 1211 } 1212 } 1213 } 1214 } 1215 1216 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1217 1218 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1219 if (satp_mode_map_max > satp_mode_supported_max) { 1220 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1221 satp_mode_str(satp_mode_map_max, rv32), 1222 satp_mode_str(satp_mode_supported_max, rv32)); 1223 return; 1224 } 1225 1226 /* 1227 * Make sure the user did not ask for an invalid configuration as per 1228 * the specification. 1229 */ 1230 if (!rv32) { 1231 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1232 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1233 (cpu->cfg.satp_mode.init & (1 << i)) && 1234 (cpu->cfg.satp_mode.supported & (1 << i))) { 1235 error_setg(errp, "cannot disable %s satp mode if %s " 1236 "is enabled", satp_mode_str(i, false), 1237 satp_mode_str(satp_mode_map_max, false)); 1238 return; 1239 } 1240 } 1241 } 1242 1243 /* Finally expand the map so that all valid modes are set */ 1244 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1245 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1246 cpu->cfg.satp_mode.map |= (1 << i); 1247 } 1248 } 1249 } 1250 #endif 1251 1252 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1253 { 1254 Error *local_err = NULL; 1255 1256 #ifndef CONFIG_USER_ONLY 1257 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1258 if (local_err != NULL) { 1259 error_propagate(errp, local_err); 1260 return; 1261 } 1262 #endif 1263 1264 if (tcg_enabled()) { 1265 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1266 if (local_err != NULL) { 1267 error_propagate(errp, local_err); 1268 return; 1269 } 1270 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1271 } else if (kvm_enabled()) { 1272 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1273 if (local_err != NULL) { 1274 error_propagate(errp, local_err); 1275 return; 1276 } 1277 } 1278 } 1279 1280 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1281 { 1282 CPUState *cs = CPU(dev); 1283 RISCVCPU *cpu = RISCV_CPU(dev); 1284 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1285 Error *local_err = NULL; 1286 1287 cpu_exec_realizefn(cs, &local_err); 1288 if (local_err != NULL) { 1289 error_propagate(errp, local_err); 1290 return; 1291 } 1292 1293 riscv_cpu_finalize_features(cpu, &local_err); 1294 if (local_err != NULL) { 1295 error_propagate(errp, local_err); 1296 return; 1297 } 1298 1299 riscv_cpu_register_gdb_regs_for_features(cs); 1300 1301 #ifndef CONFIG_USER_ONLY 1302 if (cpu->cfg.debug) { 1303 riscv_trigger_realize(&cpu->env); 1304 } 1305 #endif 1306 1307 qemu_init_vcpu(cs); 1308 cpu_reset(cs); 1309 1310 mcc->parent_realize(dev, errp); 1311 } 1312 1313 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1314 { 1315 if (tcg_enabled()) { 1316 return riscv_cpu_tcg_compatible(cpu); 1317 } 1318 1319 return true; 1320 } 1321 1322 #ifndef CONFIG_USER_ONLY 1323 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1324 void *opaque, Error **errp) 1325 { 1326 RISCVSATPMap *satp_map = opaque; 1327 uint8_t satp = satp_mode_from_str(name); 1328 bool value; 1329 1330 value = satp_map->map & (1 << satp); 1331 1332 visit_type_bool(v, name, &value, errp); 1333 } 1334 1335 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1336 void *opaque, Error **errp) 1337 { 1338 RISCVSATPMap *satp_map = opaque; 1339 uint8_t satp = satp_mode_from_str(name); 1340 bool value; 1341 1342 if (!visit_type_bool(v, name, &value, errp)) { 1343 return; 1344 } 1345 1346 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1347 satp_map->init |= 1 << satp; 1348 } 1349 1350 void riscv_add_satp_mode_properties(Object *obj) 1351 { 1352 RISCVCPU *cpu = RISCV_CPU(obj); 1353 1354 if (cpu->env.misa_mxl == MXL_RV32) { 1355 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1356 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1357 } else { 1358 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1359 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1360 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1361 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1362 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1363 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1364 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1365 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1366 } 1367 } 1368 1369 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1370 { 1371 RISCVCPU *cpu = RISCV_CPU(opaque); 1372 CPURISCVState *env = &cpu->env; 1373 1374 if (irq < IRQ_LOCAL_MAX) { 1375 switch (irq) { 1376 case IRQ_U_SOFT: 1377 case IRQ_S_SOFT: 1378 case IRQ_VS_SOFT: 1379 case IRQ_M_SOFT: 1380 case IRQ_U_TIMER: 1381 case IRQ_S_TIMER: 1382 case IRQ_VS_TIMER: 1383 case IRQ_M_TIMER: 1384 case IRQ_U_EXT: 1385 case IRQ_VS_EXT: 1386 case IRQ_M_EXT: 1387 if (kvm_enabled()) { 1388 kvm_riscv_set_irq(cpu, irq, level); 1389 } else { 1390 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1391 } 1392 break; 1393 case IRQ_S_EXT: 1394 if (kvm_enabled()) { 1395 kvm_riscv_set_irq(cpu, irq, level); 1396 } else { 1397 env->external_seip = level; 1398 riscv_cpu_update_mip(env, 1 << irq, 1399 BOOL_TO_MASK(level | env->software_seip)); 1400 } 1401 break; 1402 default: 1403 g_assert_not_reached(); 1404 } 1405 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1406 /* Require H-extension for handling guest local interrupts */ 1407 if (!riscv_has_ext(env, RVH)) { 1408 g_assert_not_reached(); 1409 } 1410 1411 /* Compute bit position in HGEIP CSR */ 1412 irq = irq - IRQ_LOCAL_MAX + 1; 1413 if (env->geilen < irq) { 1414 g_assert_not_reached(); 1415 } 1416 1417 /* Update HGEIP CSR */ 1418 env->hgeip &= ~((target_ulong)1 << irq); 1419 if (level) { 1420 env->hgeip |= (target_ulong)1 << irq; 1421 } 1422 1423 /* Update mip.SGEIP bit */ 1424 riscv_cpu_update_mip(env, MIP_SGEIP, 1425 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1426 } else { 1427 g_assert_not_reached(); 1428 } 1429 } 1430 1431 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1432 { 1433 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1434 } 1435 #endif /* CONFIG_USER_ONLY */ 1436 1437 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1438 { 1439 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1440 } 1441 1442 static void riscv_cpu_post_init(Object *obj) 1443 { 1444 accel_cpu_instance_init(CPU(obj)); 1445 } 1446 1447 static void riscv_cpu_init(Object *obj) 1448 { 1449 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1450 RISCVCPU *cpu = RISCV_CPU(obj); 1451 CPURISCVState *env = &cpu->env; 1452 1453 env->misa_mxl = mcc->misa_mxl_max; 1454 1455 #ifndef CONFIG_USER_ONLY 1456 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1457 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1458 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1459 "riscv.cpu.rnmi", RNMI_MAX); 1460 #endif /* CONFIG_USER_ONLY */ 1461 1462 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1463 1464 /* 1465 * The timer and performance counters extensions were supported 1466 * in QEMU before they were added as discrete extensions in the 1467 * ISA. To keep compatibility we'll always default them to 'true' 1468 * for all CPUs. Each accelerator will decide what to do when 1469 * users disable them. 1470 */ 1471 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1472 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1473 1474 /* Default values for non-bool cpu properties */ 1475 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1476 cpu->cfg.vlenb = 128 >> 3; 1477 cpu->cfg.elen = 64; 1478 cpu->cfg.cbom_blocksize = 64; 1479 cpu->cfg.cbop_blocksize = 64; 1480 cpu->cfg.cboz_blocksize = 64; 1481 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1482 } 1483 1484 static void riscv_bare_cpu_init(Object *obj) 1485 { 1486 RISCVCPU *cpu = RISCV_CPU(obj); 1487 1488 /* 1489 * Bare CPUs do not inherit the timer and performance 1490 * counters from the parent class (see riscv_cpu_init() 1491 * for info on why the parent enables them). 1492 * 1493 * Users have to explicitly enable these counters for 1494 * bare CPUs. 1495 */ 1496 cpu->cfg.ext_zicntr = false; 1497 cpu->cfg.ext_zihpm = false; 1498 1499 /* Set to QEMU's first supported priv version */ 1500 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1501 1502 /* 1503 * Support all available satp_mode settings. The default 1504 * value will be set to MBARE if the user doesn't set 1505 * satp_mode manually (see set_satp_mode_default()). 1506 */ 1507 #ifndef CONFIG_USER_ONLY 1508 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1509 #endif 1510 } 1511 1512 typedef struct misa_ext_info { 1513 const char *name; 1514 const char *description; 1515 } MISAExtInfo; 1516 1517 #define MISA_INFO_IDX(_bit) \ 1518 __builtin_ctz(_bit) 1519 1520 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1521 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1522 1523 static const MISAExtInfo misa_ext_info_arr[] = { 1524 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1525 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1526 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1527 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1528 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1529 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1530 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1531 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1532 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1533 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1534 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1535 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1536 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1537 }; 1538 1539 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1540 { 1541 CPUClass *cc = CPU_CLASS(mcc); 1542 1543 /* Validate that MISA_MXL is set properly. */ 1544 switch (mcc->misa_mxl_max) { 1545 #ifdef TARGET_RISCV64 1546 case MXL_RV64: 1547 case MXL_RV128: 1548 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1549 break; 1550 #endif 1551 case MXL_RV32: 1552 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1553 break; 1554 default: 1555 g_assert_not_reached(); 1556 } 1557 } 1558 1559 static int riscv_validate_misa_info_idx(uint32_t bit) 1560 { 1561 int idx; 1562 1563 /* 1564 * Our lowest valid input (RVA) is 1 and 1565 * __builtin_ctz() is UB with zero. 1566 */ 1567 g_assert(bit != 0); 1568 idx = MISA_INFO_IDX(bit); 1569 1570 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1571 return idx; 1572 } 1573 1574 const char *riscv_get_misa_ext_name(uint32_t bit) 1575 { 1576 int idx = riscv_validate_misa_info_idx(bit); 1577 const char *val = misa_ext_info_arr[idx].name; 1578 1579 g_assert(val != NULL); 1580 return val; 1581 } 1582 1583 const char *riscv_get_misa_ext_description(uint32_t bit) 1584 { 1585 int idx = riscv_validate_misa_info_idx(bit); 1586 const char *val = misa_ext_info_arr[idx].description; 1587 1588 g_assert(val != NULL); 1589 return val; 1590 } 1591 1592 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1593 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1594 .enabled = _defval} 1595 1596 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1597 /* Defaults for standard extensions */ 1598 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1599 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1600 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1601 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1602 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1603 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1604 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), 1605 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), 1606 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1607 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1608 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1609 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1610 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1611 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1612 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1613 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1614 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1615 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1616 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1617 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1618 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1619 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1620 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1621 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1622 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1623 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1624 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1625 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1626 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1627 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1628 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1629 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1630 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1631 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1632 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1633 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1634 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1635 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1636 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1637 1638 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1639 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1640 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1641 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1642 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1643 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1644 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1645 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1646 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1647 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1648 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1649 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1650 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1651 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1652 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1653 1654 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1655 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1656 1657 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1658 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1659 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1660 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1661 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1662 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1663 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1664 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1665 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1666 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1667 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1668 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1669 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1670 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1671 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1672 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1673 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1674 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1675 1676 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1677 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1678 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1679 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1680 1681 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1682 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1683 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1684 1685 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1686 1687 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1688 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1689 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1690 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1691 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1692 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1693 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1694 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1695 1696 /* Vector cryptography extensions */ 1697 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1698 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1699 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1700 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1701 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1702 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1703 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1704 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1705 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1706 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1707 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1708 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1709 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1710 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1711 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1712 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1713 1714 { }, 1715 }; 1716 1717 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1718 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1719 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1720 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1721 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1722 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1723 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1724 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1725 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1726 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1727 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1728 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1729 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1730 1731 { }, 1732 }; 1733 1734 /* These are experimental so mark with 'x-' */ 1735 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1736 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1737 1738 { }, 1739 }; 1740 1741 /* 1742 * 'Named features' is the name we give to extensions that we 1743 * don't want to expose to users. They are either immutable 1744 * (always enabled/disable) or they'll vary depending on 1745 * the resulting CPU state. They have riscv,isa strings 1746 * and priv_ver like regular extensions. 1747 */ 1748 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1749 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1750 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1751 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1752 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), 1753 1754 { }, 1755 }; 1756 1757 /* Deprecated entries marked for future removal */ 1758 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1759 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1760 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1761 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1762 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1763 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1764 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1765 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1766 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1767 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1768 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1769 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1770 1771 { }, 1772 }; 1773 1774 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1775 Error **errp) 1776 { 1777 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1778 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1779 cpuname, propname); 1780 } 1781 1782 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1783 void *opaque, Error **errp) 1784 { 1785 RISCVCPU *cpu = RISCV_CPU(obj); 1786 uint8_t pmu_num, curr_pmu_num; 1787 uint32_t pmu_mask; 1788 1789 visit_type_uint8(v, name, &pmu_num, errp); 1790 1791 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1792 1793 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1794 cpu_set_prop_err(cpu, name, errp); 1795 error_append_hint(errp, "Current '%s' val: %u\n", 1796 name, curr_pmu_num); 1797 return; 1798 } 1799 1800 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1801 error_setg(errp, "Number of counters exceeds maximum available"); 1802 return; 1803 } 1804 1805 if (pmu_num == 0) { 1806 pmu_mask = 0; 1807 } else { 1808 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1809 } 1810 1811 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1812 cpu->cfg.pmu_mask = pmu_mask; 1813 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1814 } 1815 1816 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1817 void *opaque, Error **errp) 1818 { 1819 RISCVCPU *cpu = RISCV_CPU(obj); 1820 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1821 1822 visit_type_uint8(v, name, &pmu_num, errp); 1823 } 1824 1825 static const PropertyInfo prop_pmu_num = { 1826 .type = "int8", 1827 .description = "pmu-num", 1828 .get = prop_pmu_num_get, 1829 .set = prop_pmu_num_set, 1830 }; 1831 1832 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1833 void *opaque, Error **errp) 1834 { 1835 RISCVCPU *cpu = RISCV_CPU(obj); 1836 uint32_t value; 1837 uint8_t pmu_num; 1838 1839 visit_type_uint32(v, name, &value, errp); 1840 1841 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1842 cpu_set_prop_err(cpu, name, errp); 1843 error_append_hint(errp, "Current '%s' val: %x\n", 1844 name, cpu->cfg.pmu_mask); 1845 return; 1846 } 1847 1848 pmu_num = ctpop32(value); 1849 1850 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1851 error_setg(errp, "Number of counters exceeds maximum available"); 1852 return; 1853 } 1854 1855 cpu_option_add_user_setting(name, value); 1856 cpu->cfg.pmu_mask = value; 1857 } 1858 1859 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1860 void *opaque, Error **errp) 1861 { 1862 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1863 1864 visit_type_uint8(v, name, &pmu_mask, errp); 1865 } 1866 1867 static const PropertyInfo prop_pmu_mask = { 1868 .type = "int8", 1869 .description = "pmu-mask", 1870 .get = prop_pmu_mask_get, 1871 .set = prop_pmu_mask_set, 1872 }; 1873 1874 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1875 void *opaque, Error **errp) 1876 { 1877 RISCVCPU *cpu = RISCV_CPU(obj); 1878 bool value; 1879 1880 visit_type_bool(v, name, &value, errp); 1881 1882 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1883 cpu_set_prop_err(cpu, "mmu", errp); 1884 return; 1885 } 1886 1887 cpu_option_add_user_setting(name, value); 1888 cpu->cfg.mmu = value; 1889 } 1890 1891 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1892 void *opaque, Error **errp) 1893 { 1894 bool value = RISCV_CPU(obj)->cfg.mmu; 1895 1896 visit_type_bool(v, name, &value, errp); 1897 } 1898 1899 static const PropertyInfo prop_mmu = { 1900 .type = "bool", 1901 .description = "mmu", 1902 .get = prop_mmu_get, 1903 .set = prop_mmu_set, 1904 }; 1905 1906 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1907 void *opaque, Error **errp) 1908 { 1909 RISCVCPU *cpu = RISCV_CPU(obj); 1910 bool value; 1911 1912 visit_type_bool(v, name, &value, errp); 1913 1914 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1915 cpu_set_prop_err(cpu, name, errp); 1916 return; 1917 } 1918 1919 cpu_option_add_user_setting(name, value); 1920 cpu->cfg.pmp = value; 1921 } 1922 1923 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1924 void *opaque, Error **errp) 1925 { 1926 bool value = RISCV_CPU(obj)->cfg.pmp; 1927 1928 visit_type_bool(v, name, &value, errp); 1929 } 1930 1931 static const PropertyInfo prop_pmp = { 1932 .type = "bool", 1933 .description = "pmp", 1934 .get = prop_pmp_get, 1935 .set = prop_pmp_set, 1936 }; 1937 1938 static int priv_spec_from_str(const char *priv_spec_str) 1939 { 1940 int priv_version = -1; 1941 1942 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1943 priv_version = PRIV_VERSION_1_13_0; 1944 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1945 priv_version = PRIV_VERSION_1_12_0; 1946 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1947 priv_version = PRIV_VERSION_1_11_0; 1948 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1949 priv_version = PRIV_VERSION_1_10_0; 1950 } 1951 1952 return priv_version; 1953 } 1954 1955 const char *priv_spec_to_str(int priv_version) 1956 { 1957 switch (priv_version) { 1958 case PRIV_VERSION_1_10_0: 1959 return PRIV_VER_1_10_0_STR; 1960 case PRIV_VERSION_1_11_0: 1961 return PRIV_VER_1_11_0_STR; 1962 case PRIV_VERSION_1_12_0: 1963 return PRIV_VER_1_12_0_STR; 1964 case PRIV_VERSION_1_13_0: 1965 return PRIV_VER_1_13_0_STR; 1966 default: 1967 return NULL; 1968 } 1969 } 1970 1971 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1972 void *opaque, Error **errp) 1973 { 1974 RISCVCPU *cpu = RISCV_CPU(obj); 1975 g_autofree char *value = NULL; 1976 int priv_version = -1; 1977 1978 visit_type_str(v, name, &value, errp); 1979 1980 priv_version = priv_spec_from_str(value); 1981 if (priv_version < 0) { 1982 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1983 return; 1984 } 1985 1986 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1987 cpu_set_prop_err(cpu, name, errp); 1988 error_append_hint(errp, "Current '%s' val: %s\n", name, 1989 object_property_get_str(obj, name, NULL)); 1990 return; 1991 } 1992 1993 cpu_option_add_user_setting(name, priv_version); 1994 cpu->env.priv_ver = priv_version; 1995 } 1996 1997 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1998 void *opaque, Error **errp) 1999 { 2000 RISCVCPU *cpu = RISCV_CPU(obj); 2001 const char *value = priv_spec_to_str(cpu->env.priv_ver); 2002 2003 visit_type_str(v, name, (char **)&value, errp); 2004 } 2005 2006 static const PropertyInfo prop_priv_spec = { 2007 .type = "str", 2008 .description = "priv_spec", 2009 /* FIXME enum? */ 2010 .get = prop_priv_spec_get, 2011 .set = prop_priv_spec_set, 2012 }; 2013 2014 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 2015 void *opaque, Error **errp) 2016 { 2017 RISCVCPU *cpu = RISCV_CPU(obj); 2018 g_autofree char *value = NULL; 2019 2020 visit_type_str(v, name, &value, errp); 2021 2022 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2023 error_setg(errp, "Unsupported vector spec version '%s'", value); 2024 return; 2025 } 2026 2027 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2028 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2029 } 2030 2031 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2032 void *opaque, Error **errp) 2033 { 2034 const char *value = VEXT_VER_1_00_0_STR; 2035 2036 visit_type_str(v, name, (char **)&value, errp); 2037 } 2038 2039 static const PropertyInfo prop_vext_spec = { 2040 .type = "str", 2041 .description = "vext_spec", 2042 /* FIXME enum? */ 2043 .get = prop_vext_spec_get, 2044 .set = prop_vext_spec_set, 2045 }; 2046 2047 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2048 void *opaque, Error **errp) 2049 { 2050 RISCVCPU *cpu = RISCV_CPU(obj); 2051 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2052 uint16_t value; 2053 2054 if (!visit_type_uint16(v, name, &value, errp)) { 2055 return; 2056 } 2057 2058 if (!is_power_of_2(value)) { 2059 error_setg(errp, "Vector extension VLEN must be power of 2"); 2060 return; 2061 } 2062 2063 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2064 cpu_set_prop_err(cpu, name, errp); 2065 error_append_hint(errp, "Current '%s' val: %u\n", 2066 name, cpu_vlen); 2067 return; 2068 } 2069 2070 cpu_option_add_user_setting(name, value); 2071 cpu->cfg.vlenb = value >> 3; 2072 } 2073 2074 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2075 void *opaque, Error **errp) 2076 { 2077 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2078 2079 visit_type_uint16(v, name, &value, errp); 2080 } 2081 2082 static const PropertyInfo prop_vlen = { 2083 .type = "uint16", 2084 .description = "vlen", 2085 .get = prop_vlen_get, 2086 .set = prop_vlen_set, 2087 }; 2088 2089 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2090 void *opaque, Error **errp) 2091 { 2092 RISCVCPU *cpu = RISCV_CPU(obj); 2093 uint16_t value; 2094 2095 if (!visit_type_uint16(v, name, &value, errp)) { 2096 return; 2097 } 2098 2099 if (!is_power_of_2(value)) { 2100 error_setg(errp, "Vector extension ELEN must be power of 2"); 2101 return; 2102 } 2103 2104 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2105 cpu_set_prop_err(cpu, name, errp); 2106 error_append_hint(errp, "Current '%s' val: %u\n", 2107 name, cpu->cfg.elen); 2108 return; 2109 } 2110 2111 cpu_option_add_user_setting(name, value); 2112 cpu->cfg.elen = value; 2113 } 2114 2115 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2116 void *opaque, Error **errp) 2117 { 2118 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2119 2120 visit_type_uint16(v, name, &value, errp); 2121 } 2122 2123 static const PropertyInfo prop_elen = { 2124 .type = "uint16", 2125 .description = "elen", 2126 .get = prop_elen_get, 2127 .set = prop_elen_set, 2128 }; 2129 2130 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2131 void *opaque, Error **errp) 2132 { 2133 RISCVCPU *cpu = RISCV_CPU(obj); 2134 uint16_t value; 2135 2136 if (!visit_type_uint16(v, name, &value, errp)) { 2137 return; 2138 } 2139 2140 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2141 cpu_set_prop_err(cpu, name, errp); 2142 error_append_hint(errp, "Current '%s' val: %u\n", 2143 name, cpu->cfg.cbom_blocksize); 2144 return; 2145 } 2146 2147 cpu_option_add_user_setting(name, value); 2148 cpu->cfg.cbom_blocksize = value; 2149 } 2150 2151 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2152 void *opaque, Error **errp) 2153 { 2154 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2155 2156 visit_type_uint16(v, name, &value, errp); 2157 } 2158 2159 static const PropertyInfo prop_cbom_blksize = { 2160 .type = "uint16", 2161 .description = "cbom_blocksize", 2162 .get = prop_cbom_blksize_get, 2163 .set = prop_cbom_blksize_set, 2164 }; 2165 2166 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2167 void *opaque, Error **errp) 2168 { 2169 RISCVCPU *cpu = RISCV_CPU(obj); 2170 uint16_t value; 2171 2172 if (!visit_type_uint16(v, name, &value, errp)) { 2173 return; 2174 } 2175 2176 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2177 cpu_set_prop_err(cpu, name, errp); 2178 error_append_hint(errp, "Current '%s' val: %u\n", 2179 name, cpu->cfg.cbop_blocksize); 2180 return; 2181 } 2182 2183 cpu_option_add_user_setting(name, value); 2184 cpu->cfg.cbop_blocksize = value; 2185 } 2186 2187 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2188 void *opaque, Error **errp) 2189 { 2190 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2191 2192 visit_type_uint16(v, name, &value, errp); 2193 } 2194 2195 static const PropertyInfo prop_cbop_blksize = { 2196 .type = "uint16", 2197 .description = "cbop_blocksize", 2198 .get = prop_cbop_blksize_get, 2199 .set = prop_cbop_blksize_set, 2200 }; 2201 2202 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2203 void *opaque, Error **errp) 2204 { 2205 RISCVCPU *cpu = RISCV_CPU(obj); 2206 uint16_t value; 2207 2208 if (!visit_type_uint16(v, name, &value, errp)) { 2209 return; 2210 } 2211 2212 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2213 cpu_set_prop_err(cpu, name, errp); 2214 error_append_hint(errp, "Current '%s' val: %u\n", 2215 name, cpu->cfg.cboz_blocksize); 2216 return; 2217 } 2218 2219 cpu_option_add_user_setting(name, value); 2220 cpu->cfg.cboz_blocksize = value; 2221 } 2222 2223 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2224 void *opaque, Error **errp) 2225 { 2226 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2227 2228 visit_type_uint16(v, name, &value, errp); 2229 } 2230 2231 static const PropertyInfo prop_cboz_blksize = { 2232 .type = "uint16", 2233 .description = "cboz_blocksize", 2234 .get = prop_cboz_blksize_get, 2235 .set = prop_cboz_blksize_set, 2236 }; 2237 2238 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2239 void *opaque, Error **errp) 2240 { 2241 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2242 RISCVCPU *cpu = RISCV_CPU(obj); 2243 uint32_t prev_val = cpu->cfg.mvendorid; 2244 uint32_t value; 2245 2246 if (!visit_type_uint32(v, name, &value, errp)) { 2247 return; 2248 } 2249 2250 if (!dynamic_cpu && prev_val != value) { 2251 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2252 object_get_typename(obj), prev_val); 2253 return; 2254 } 2255 2256 cpu->cfg.mvendorid = value; 2257 } 2258 2259 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2260 void *opaque, Error **errp) 2261 { 2262 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2263 2264 visit_type_uint32(v, name, &value, errp); 2265 } 2266 2267 static const PropertyInfo prop_mvendorid = { 2268 .type = "uint32", 2269 .description = "mvendorid", 2270 .get = prop_mvendorid_get, 2271 .set = prop_mvendorid_set, 2272 }; 2273 2274 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2275 void *opaque, Error **errp) 2276 { 2277 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2278 RISCVCPU *cpu = RISCV_CPU(obj); 2279 uint64_t prev_val = cpu->cfg.mimpid; 2280 uint64_t value; 2281 2282 if (!visit_type_uint64(v, name, &value, errp)) { 2283 return; 2284 } 2285 2286 if (!dynamic_cpu && prev_val != value) { 2287 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2288 object_get_typename(obj), prev_val); 2289 return; 2290 } 2291 2292 cpu->cfg.mimpid = value; 2293 } 2294 2295 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2296 void *opaque, Error **errp) 2297 { 2298 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2299 2300 visit_type_uint64(v, name, &value, errp); 2301 } 2302 2303 static const PropertyInfo prop_mimpid = { 2304 .type = "uint64", 2305 .description = "mimpid", 2306 .get = prop_mimpid_get, 2307 .set = prop_mimpid_set, 2308 }; 2309 2310 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2311 void *opaque, Error **errp) 2312 { 2313 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2314 RISCVCPU *cpu = RISCV_CPU(obj); 2315 uint64_t prev_val = cpu->cfg.marchid; 2316 uint64_t value, invalid_val; 2317 uint32_t mxlen = 0; 2318 2319 if (!visit_type_uint64(v, name, &value, errp)) { 2320 return; 2321 } 2322 2323 if (!dynamic_cpu && prev_val != value) { 2324 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2325 object_get_typename(obj), prev_val); 2326 return; 2327 } 2328 2329 switch (riscv_cpu_mxl(&cpu->env)) { 2330 case MXL_RV32: 2331 mxlen = 32; 2332 break; 2333 case MXL_RV64: 2334 case MXL_RV128: 2335 mxlen = 64; 2336 break; 2337 default: 2338 g_assert_not_reached(); 2339 } 2340 2341 invalid_val = 1LL << (mxlen - 1); 2342 2343 if (value == invalid_val) { 2344 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2345 "and the remaining bits zero", mxlen); 2346 return; 2347 } 2348 2349 cpu->cfg.marchid = value; 2350 } 2351 2352 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2353 void *opaque, Error **errp) 2354 { 2355 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2356 2357 visit_type_uint64(v, name, &value, errp); 2358 } 2359 2360 static const PropertyInfo prop_marchid = { 2361 .type = "uint64", 2362 .description = "marchid", 2363 .get = prop_marchid_get, 2364 .set = prop_marchid_set, 2365 }; 2366 2367 /* 2368 * RVA22U64 defines some 'named features' that are cache 2369 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2370 * and Zicclsm. They are always implemented in TCG and 2371 * doesn't need to be manually enabled by the profile. 2372 */ 2373 static RISCVCPUProfile RVA22U64 = { 2374 .u_parent = NULL, 2375 .s_parent = NULL, 2376 .name = "rva22u64", 2377 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2378 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2379 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2380 .ext_offsets = { 2381 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2382 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2383 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2384 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2385 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2386 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2387 2388 /* mandatory named features for this profile */ 2389 CPU_CFG_OFFSET(ext_zic64b), 2390 2391 RISCV_PROFILE_EXT_LIST_END 2392 } 2393 }; 2394 2395 /* 2396 * As with RVA22U64, RVA22S64 also defines 'named features'. 2397 * 2398 * Cache related features that we consider enabled since we don't 2399 * implement cache: Ssccptr 2400 * 2401 * Other named features that we already implement: Sstvecd, Sstvala, 2402 * Sscounterenw 2403 * 2404 * The remaining features/extensions comes from RVA22U64. 2405 */ 2406 static RISCVCPUProfile RVA22S64 = { 2407 .u_parent = &RVA22U64, 2408 .s_parent = NULL, 2409 .name = "rva22s64", 2410 .misa_ext = RVS, 2411 .priv_spec = PRIV_VERSION_1_12_0, 2412 .satp_mode = VM_1_10_SV39, 2413 .ext_offsets = { 2414 /* rva22s64 exts */ 2415 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2416 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2417 2418 RISCV_PROFILE_EXT_LIST_END 2419 } 2420 }; 2421 2422 /* 2423 * All mandatory extensions from RVA22U64 are present 2424 * in RVA23U64 so set RVA22 as a parent. We need to 2425 * declare just the newly added mandatory extensions. 2426 */ 2427 static RISCVCPUProfile RVA23U64 = { 2428 .u_parent = &RVA22U64, 2429 .s_parent = NULL, 2430 .name = "rva23u64", 2431 .misa_ext = RVV, 2432 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2433 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2434 .ext_offsets = { 2435 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2436 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2437 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2438 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2439 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2440 CPU_CFG_OFFSET(ext_supm), 2441 2442 RISCV_PROFILE_EXT_LIST_END 2443 } 2444 }; 2445 2446 /* 2447 * As with RVA23U64, RVA23S64 also defines 'named features'. 2448 * 2449 * Cache related features that we consider enabled since we don't 2450 * implement cache: Ssccptr 2451 * 2452 * Other named features that we already implement: Sstvecd, Sstvala, 2453 * Sscounterenw, Ssu64xl 2454 * 2455 * The remaining features/extensions comes from RVA23S64. 2456 */ 2457 static RISCVCPUProfile RVA23S64 = { 2458 .u_parent = &RVA23U64, 2459 .s_parent = &RVA22S64, 2460 .name = "rva23s64", 2461 .misa_ext = RVS, 2462 .priv_spec = PRIV_VERSION_1_13_0, 2463 .satp_mode = VM_1_10_SV39, 2464 .ext_offsets = { 2465 /* New in RVA23S64 */ 2466 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2467 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2468 2469 /* Named features: Sha */ 2470 CPU_CFG_OFFSET(ext_sha), 2471 2472 RISCV_PROFILE_EXT_LIST_END 2473 } 2474 }; 2475 2476 RISCVCPUProfile *riscv_profiles[] = { 2477 &RVA22U64, 2478 &RVA22S64, 2479 &RVA23U64, 2480 &RVA23S64, 2481 NULL, 2482 }; 2483 2484 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2485 .is_misa = true, 2486 .ext = RVA, 2487 .implied_multi_exts = { 2488 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2489 2490 RISCV_IMPLIED_EXTS_RULE_END 2491 }, 2492 }; 2493 2494 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2495 .is_misa = true, 2496 .ext = RVD, 2497 .implied_misa_exts = RVF, 2498 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2499 }; 2500 2501 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2502 .is_misa = true, 2503 .ext = RVF, 2504 .implied_multi_exts = { 2505 CPU_CFG_OFFSET(ext_zicsr), 2506 2507 RISCV_IMPLIED_EXTS_RULE_END 2508 }, 2509 }; 2510 2511 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2512 .is_misa = true, 2513 .ext = RVM, 2514 .implied_multi_exts = { 2515 CPU_CFG_OFFSET(ext_zmmul), 2516 2517 RISCV_IMPLIED_EXTS_RULE_END 2518 }, 2519 }; 2520 2521 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2522 .is_misa = true, 2523 .ext = RVV, 2524 .implied_multi_exts = { 2525 CPU_CFG_OFFSET(ext_zve64d), 2526 2527 RISCV_IMPLIED_EXTS_RULE_END 2528 }, 2529 }; 2530 2531 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2532 .ext = CPU_CFG_OFFSET(ext_zcb), 2533 .implied_multi_exts = { 2534 CPU_CFG_OFFSET(ext_zca), 2535 2536 RISCV_IMPLIED_EXTS_RULE_END 2537 }, 2538 }; 2539 2540 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2541 .ext = CPU_CFG_OFFSET(ext_zcd), 2542 .implied_misa_exts = RVD, 2543 .implied_multi_exts = { 2544 CPU_CFG_OFFSET(ext_zca), 2545 2546 RISCV_IMPLIED_EXTS_RULE_END 2547 }, 2548 }; 2549 2550 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2551 .ext = CPU_CFG_OFFSET(ext_zce), 2552 .implied_multi_exts = { 2553 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2554 CPU_CFG_OFFSET(ext_zcmt), 2555 2556 RISCV_IMPLIED_EXTS_RULE_END 2557 }, 2558 }; 2559 2560 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2561 .ext = CPU_CFG_OFFSET(ext_zcf), 2562 .implied_misa_exts = RVF, 2563 .implied_multi_exts = { 2564 CPU_CFG_OFFSET(ext_zca), 2565 2566 RISCV_IMPLIED_EXTS_RULE_END 2567 }, 2568 }; 2569 2570 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2571 .ext = CPU_CFG_OFFSET(ext_zcmp), 2572 .implied_multi_exts = { 2573 CPU_CFG_OFFSET(ext_zca), 2574 2575 RISCV_IMPLIED_EXTS_RULE_END 2576 }, 2577 }; 2578 2579 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2580 .ext = CPU_CFG_OFFSET(ext_zcmt), 2581 .implied_multi_exts = { 2582 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2583 2584 RISCV_IMPLIED_EXTS_RULE_END 2585 }, 2586 }; 2587 2588 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2589 .ext = CPU_CFG_OFFSET(ext_zdinx), 2590 .implied_multi_exts = { 2591 CPU_CFG_OFFSET(ext_zfinx), 2592 2593 RISCV_IMPLIED_EXTS_RULE_END 2594 }, 2595 }; 2596 2597 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2598 .ext = CPU_CFG_OFFSET(ext_zfa), 2599 .implied_misa_exts = RVF, 2600 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2601 }; 2602 2603 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2604 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2605 .implied_misa_exts = RVF, 2606 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2607 }; 2608 2609 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2610 .ext = CPU_CFG_OFFSET(ext_zfh), 2611 .implied_multi_exts = { 2612 CPU_CFG_OFFSET(ext_zfhmin), 2613 2614 RISCV_IMPLIED_EXTS_RULE_END 2615 }, 2616 }; 2617 2618 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2619 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2620 .implied_misa_exts = RVF, 2621 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2622 }; 2623 2624 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2625 .ext = CPU_CFG_OFFSET(ext_zfinx), 2626 .implied_multi_exts = { 2627 CPU_CFG_OFFSET(ext_zicsr), 2628 2629 RISCV_IMPLIED_EXTS_RULE_END 2630 }, 2631 }; 2632 2633 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2634 .ext = CPU_CFG_OFFSET(ext_zhinx), 2635 .implied_multi_exts = { 2636 CPU_CFG_OFFSET(ext_zhinxmin), 2637 2638 RISCV_IMPLIED_EXTS_RULE_END 2639 }, 2640 }; 2641 2642 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2643 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2644 .implied_multi_exts = { 2645 CPU_CFG_OFFSET(ext_zfinx), 2646 2647 RISCV_IMPLIED_EXTS_RULE_END 2648 }, 2649 }; 2650 2651 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2652 .ext = CPU_CFG_OFFSET(ext_zicntr), 2653 .implied_multi_exts = { 2654 CPU_CFG_OFFSET(ext_zicsr), 2655 2656 RISCV_IMPLIED_EXTS_RULE_END 2657 }, 2658 }; 2659 2660 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2661 .ext = CPU_CFG_OFFSET(ext_zihpm), 2662 .implied_multi_exts = { 2663 CPU_CFG_OFFSET(ext_zicsr), 2664 2665 RISCV_IMPLIED_EXTS_RULE_END 2666 }, 2667 }; 2668 2669 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2670 .ext = CPU_CFG_OFFSET(ext_zk), 2671 .implied_multi_exts = { 2672 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2673 CPU_CFG_OFFSET(ext_zkt), 2674 2675 RISCV_IMPLIED_EXTS_RULE_END 2676 }, 2677 }; 2678 2679 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2680 .ext = CPU_CFG_OFFSET(ext_zkn), 2681 .implied_multi_exts = { 2682 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2683 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2684 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2685 2686 RISCV_IMPLIED_EXTS_RULE_END 2687 }, 2688 }; 2689 2690 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2691 .ext = CPU_CFG_OFFSET(ext_zks), 2692 .implied_multi_exts = { 2693 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2694 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2695 CPU_CFG_OFFSET(ext_zksh), 2696 2697 RISCV_IMPLIED_EXTS_RULE_END 2698 }, 2699 }; 2700 2701 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2702 .ext = CPU_CFG_OFFSET(ext_zvbb), 2703 .implied_multi_exts = { 2704 CPU_CFG_OFFSET(ext_zvkb), 2705 2706 RISCV_IMPLIED_EXTS_RULE_END 2707 }, 2708 }; 2709 2710 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2711 .ext = CPU_CFG_OFFSET(ext_zve32f), 2712 .implied_misa_exts = RVF, 2713 .implied_multi_exts = { 2714 CPU_CFG_OFFSET(ext_zve32x), 2715 2716 RISCV_IMPLIED_EXTS_RULE_END 2717 }, 2718 }; 2719 2720 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2721 .ext = CPU_CFG_OFFSET(ext_zve32x), 2722 .implied_multi_exts = { 2723 CPU_CFG_OFFSET(ext_zicsr), 2724 2725 RISCV_IMPLIED_EXTS_RULE_END 2726 }, 2727 }; 2728 2729 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2730 .ext = CPU_CFG_OFFSET(ext_zve64d), 2731 .implied_misa_exts = RVD, 2732 .implied_multi_exts = { 2733 CPU_CFG_OFFSET(ext_zve64f), 2734 2735 RISCV_IMPLIED_EXTS_RULE_END 2736 }, 2737 }; 2738 2739 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2740 .ext = CPU_CFG_OFFSET(ext_zve64f), 2741 .implied_misa_exts = RVF, 2742 .implied_multi_exts = { 2743 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2744 2745 RISCV_IMPLIED_EXTS_RULE_END 2746 }, 2747 }; 2748 2749 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2750 .ext = CPU_CFG_OFFSET(ext_zve64x), 2751 .implied_multi_exts = { 2752 CPU_CFG_OFFSET(ext_zve32x), 2753 2754 RISCV_IMPLIED_EXTS_RULE_END 2755 }, 2756 }; 2757 2758 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2759 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2760 .implied_multi_exts = { 2761 CPU_CFG_OFFSET(ext_zve32f), 2762 2763 RISCV_IMPLIED_EXTS_RULE_END 2764 }, 2765 }; 2766 2767 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2768 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2769 .implied_multi_exts = { 2770 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2771 2772 RISCV_IMPLIED_EXTS_RULE_END 2773 }, 2774 }; 2775 2776 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2777 .ext = CPU_CFG_OFFSET(ext_zvfh), 2778 .implied_multi_exts = { 2779 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2780 2781 RISCV_IMPLIED_EXTS_RULE_END 2782 }, 2783 }; 2784 2785 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2786 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2787 .implied_multi_exts = { 2788 CPU_CFG_OFFSET(ext_zve32f), 2789 2790 RISCV_IMPLIED_EXTS_RULE_END 2791 }, 2792 }; 2793 2794 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2795 .ext = CPU_CFG_OFFSET(ext_zvkn), 2796 .implied_multi_exts = { 2797 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2798 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2799 2800 RISCV_IMPLIED_EXTS_RULE_END 2801 }, 2802 }; 2803 2804 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2805 .ext = CPU_CFG_OFFSET(ext_zvknc), 2806 .implied_multi_exts = { 2807 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2808 2809 RISCV_IMPLIED_EXTS_RULE_END 2810 }, 2811 }; 2812 2813 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2814 .ext = CPU_CFG_OFFSET(ext_zvkng), 2815 .implied_multi_exts = { 2816 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2817 2818 RISCV_IMPLIED_EXTS_RULE_END 2819 }, 2820 }; 2821 2822 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2823 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2824 .implied_multi_exts = { 2825 CPU_CFG_OFFSET(ext_zve64x), 2826 2827 RISCV_IMPLIED_EXTS_RULE_END 2828 }, 2829 }; 2830 2831 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2832 .ext = CPU_CFG_OFFSET(ext_zvks), 2833 .implied_multi_exts = { 2834 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2835 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2836 2837 RISCV_IMPLIED_EXTS_RULE_END 2838 }, 2839 }; 2840 2841 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2842 .ext = CPU_CFG_OFFSET(ext_zvksc), 2843 .implied_multi_exts = { 2844 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2845 2846 RISCV_IMPLIED_EXTS_RULE_END 2847 }, 2848 }; 2849 2850 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2851 .ext = CPU_CFG_OFFSET(ext_zvksg), 2852 .implied_multi_exts = { 2853 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2854 2855 RISCV_IMPLIED_EXTS_RULE_END 2856 }, 2857 }; 2858 2859 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2860 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2861 .implied_multi_exts = { 2862 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2863 CPU_CFG_OFFSET(ext_smcdeleg), 2864 2865 RISCV_IMPLIED_EXTS_RULE_END 2866 }, 2867 }; 2868 2869 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2870 .ext = CPU_CFG_OFFSET(ext_supm), 2871 .implied_multi_exts = { 2872 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2873 2874 RISCV_IMPLIED_EXTS_RULE_END 2875 }, 2876 }; 2877 2878 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2879 .ext = CPU_CFG_OFFSET(ext_sspm), 2880 .implied_multi_exts = { 2881 CPU_CFG_OFFSET(ext_smnpm), 2882 2883 RISCV_IMPLIED_EXTS_RULE_END 2884 }, 2885 }; 2886 2887 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { 2888 .ext = CPU_CFG_OFFSET(ext_smctr), 2889 .implied_misa_exts = RVS, 2890 .implied_multi_exts = { 2891 CPU_CFG_OFFSET(ext_sscsrind), 2892 2893 RISCV_IMPLIED_EXTS_RULE_END 2894 }, 2895 }; 2896 2897 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { 2898 .ext = CPU_CFG_OFFSET(ext_ssctr), 2899 .implied_misa_exts = RVS, 2900 .implied_multi_exts = { 2901 CPU_CFG_OFFSET(ext_sscsrind), 2902 2903 RISCV_IMPLIED_EXTS_RULE_END 2904 }, 2905 }; 2906 2907 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2908 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2909 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2910 }; 2911 2912 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2913 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2914 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2915 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2916 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2917 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2918 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2919 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2920 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2921 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2922 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2923 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2924 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2925 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, 2926 NULL 2927 }; 2928 2929 static const Property riscv_cpu_properties[] = { 2930 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2931 2932 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2933 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2934 2935 {.name = "mmu", .info = &prop_mmu}, 2936 {.name = "pmp", .info = &prop_pmp}, 2937 2938 {.name = "priv_spec", .info = &prop_priv_spec}, 2939 {.name = "vext_spec", .info = &prop_vext_spec}, 2940 2941 {.name = "vlen", .info = &prop_vlen}, 2942 {.name = "elen", .info = &prop_elen}, 2943 2944 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2945 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2946 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2947 2948 {.name = "mvendorid", .info = &prop_mvendorid}, 2949 {.name = "mimpid", .info = &prop_mimpid}, 2950 {.name = "marchid", .info = &prop_marchid}, 2951 2952 #ifndef CONFIG_USER_ONLY 2953 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2954 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2955 DEFAULT_RNMI_IRQVEC), 2956 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2957 DEFAULT_RNMI_EXCPVEC), 2958 #endif 2959 2960 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2961 2962 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2963 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2964 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2965 2966 /* 2967 * write_misa() is marked as experimental for now so mark 2968 * it with -x and default to 'false'. 2969 */ 2970 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2971 }; 2972 2973 #if defined(TARGET_RISCV64) 2974 static void rva22u64_profile_cpu_init(Object *obj) 2975 { 2976 rv64i_bare_cpu_init(obj); 2977 2978 RVA22U64.enabled = true; 2979 } 2980 2981 static void rva22s64_profile_cpu_init(Object *obj) 2982 { 2983 rv64i_bare_cpu_init(obj); 2984 2985 RVA22S64.enabled = true; 2986 } 2987 2988 static void rva23u64_profile_cpu_init(Object *obj) 2989 { 2990 rv64i_bare_cpu_init(obj); 2991 2992 RVA23U64.enabled = true; 2993 } 2994 2995 static void rva23s64_profile_cpu_init(Object *obj) 2996 { 2997 rv64i_bare_cpu_init(obj); 2998 2999 RVA23S64.enabled = true; 3000 } 3001 #endif 3002 3003 static const gchar *riscv_gdb_arch_name(CPUState *cs) 3004 { 3005 RISCVCPU *cpu = RISCV_CPU(cs); 3006 CPURISCVState *env = &cpu->env; 3007 3008 switch (riscv_cpu_mxl(env)) { 3009 case MXL_RV32: 3010 return "riscv:rv32"; 3011 case MXL_RV64: 3012 case MXL_RV128: 3013 return "riscv:rv64"; 3014 default: 3015 g_assert_not_reached(); 3016 } 3017 } 3018 3019 #ifndef CONFIG_USER_ONLY 3020 static int64_t riscv_get_arch_id(CPUState *cs) 3021 { 3022 RISCVCPU *cpu = RISCV_CPU(cs); 3023 3024 return cpu->env.mhartid; 3025 } 3026 3027 #include "hw/core/sysemu-cpu-ops.h" 3028 3029 static const struct SysemuCPUOps riscv_sysemu_ops = { 3030 .has_work = riscv_cpu_has_work, 3031 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 3032 .write_elf64_note = riscv_cpu_write_elf64_note, 3033 .write_elf32_note = riscv_cpu_write_elf32_note, 3034 .legacy_vmsd = &vmstate_riscv_cpu, 3035 }; 3036 #endif 3037 3038 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 3039 { 3040 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3041 CPUClass *cc = CPU_CLASS(c); 3042 DeviceClass *dc = DEVICE_CLASS(c); 3043 ResettableClass *rc = RESETTABLE_CLASS(c); 3044 3045 device_class_set_parent_realize(dc, riscv_cpu_realize, 3046 &mcc->parent_realize); 3047 3048 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 3049 &mcc->parent_phases); 3050 3051 cc->class_by_name = riscv_cpu_class_by_name; 3052 cc->mmu_index = riscv_cpu_mmu_index; 3053 cc->dump_state = riscv_cpu_dump_state; 3054 cc->set_pc = riscv_cpu_set_pc; 3055 cc->get_pc = riscv_cpu_get_pc; 3056 cc->gdb_read_register = riscv_cpu_gdb_read_register; 3057 cc->gdb_write_register = riscv_cpu_gdb_write_register; 3058 cc->gdb_stop_before_watchpoint = true; 3059 cc->disas_set_info = riscv_cpu_disas_set_info; 3060 #ifndef CONFIG_USER_ONLY 3061 cc->sysemu_ops = &riscv_sysemu_ops; 3062 cc->get_arch_id = riscv_get_arch_id; 3063 #endif 3064 cc->gdb_arch_name = riscv_gdb_arch_name; 3065 3066 device_class_set_props(dc, riscv_cpu_properties); 3067 } 3068 3069 static void riscv_cpu_class_init(ObjectClass *c, void *data) 3070 { 3071 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3072 3073 mcc->misa_mxl_max = (RISCVMXL)GPOINTER_TO_UINT(data); 3074 riscv_cpu_validate_misa_mxl(mcc); 3075 } 3076 3077 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 3078 int max_str_len) 3079 { 3080 const RISCVIsaExtData *edata; 3081 char *old = *isa_str; 3082 char *new = *isa_str; 3083 3084 for (edata = isa_edata_arr; edata && edata->name; edata++) { 3085 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3086 new = g_strconcat(old, "_", edata->name, NULL); 3087 g_free(old); 3088 old = new; 3089 } 3090 } 3091 3092 *isa_str = new; 3093 } 3094 3095 char *riscv_isa_string(RISCVCPU *cpu) 3096 { 3097 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3098 int i; 3099 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 3100 char *isa_str = g_new(char, maxlen); 3101 int xlen = riscv_cpu_max_xlen(mcc); 3102 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 3103 3104 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3105 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3106 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 3107 } 3108 } 3109 *p = '\0'; 3110 if (!cpu->cfg.short_isa_string) { 3111 riscv_isa_string_ext(cpu, &isa_str, maxlen); 3112 } 3113 return isa_str; 3114 } 3115 3116 #ifndef CONFIG_USER_ONLY 3117 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3118 { 3119 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3120 char **extensions = g_new(char *, maxlen); 3121 3122 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3123 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3124 extensions[*count] = g_new(char, 2); 3125 snprintf(extensions[*count], 2, "%c", 3126 qemu_tolower(riscv_single_letter_exts[i])); 3127 (*count)++; 3128 } 3129 } 3130 3131 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3132 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3133 extensions[*count] = g_strdup(edata->name); 3134 (*count)++; 3135 } 3136 } 3137 3138 return extensions; 3139 } 3140 3141 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3142 { 3143 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3144 const size_t maxlen = sizeof("rv128i"); 3145 g_autofree char *isa_base = g_new(char, maxlen); 3146 g_autofree char *riscv_isa; 3147 char **isa_extensions; 3148 int count = 0; 3149 int xlen = riscv_cpu_max_xlen(mcc); 3150 3151 riscv_isa = riscv_isa_string(cpu); 3152 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3153 3154 snprintf(isa_base, maxlen, "rv%di", xlen); 3155 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3156 3157 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3158 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3159 isa_extensions, count); 3160 3161 for (int i = 0; i < count; i++) { 3162 g_free(isa_extensions[i]); 3163 } 3164 3165 g_free(isa_extensions); 3166 } 3167 #endif 3168 3169 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3170 { \ 3171 .name = (type_name), \ 3172 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3173 .instance_init = (initfn), \ 3174 .class_init = riscv_cpu_class_init, \ 3175 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3176 } 3177 3178 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3179 { \ 3180 .name = (type_name), \ 3181 .parent = TYPE_RISCV_VENDOR_CPU, \ 3182 .instance_init = (initfn), \ 3183 .class_init = riscv_cpu_class_init, \ 3184 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3185 } 3186 3187 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3188 { \ 3189 .name = (type_name), \ 3190 .parent = TYPE_RISCV_BARE_CPU, \ 3191 .instance_init = (initfn), \ 3192 .class_init = riscv_cpu_class_init, \ 3193 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3194 } 3195 3196 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3197 { \ 3198 .name = (type_name), \ 3199 .parent = TYPE_RISCV_BARE_CPU, \ 3200 .instance_init = (initfn), \ 3201 .class_init = riscv_cpu_class_init, \ 3202 .class_data = GUINT_TO_POINTER(misa_mxl_max) \ 3203 } 3204 3205 static const TypeInfo riscv_cpu_type_infos[] = { 3206 { 3207 .name = TYPE_RISCV_CPU, 3208 .parent = TYPE_CPU, 3209 .instance_size = sizeof(RISCVCPU), 3210 .instance_align = __alignof(RISCVCPU), 3211 .instance_init = riscv_cpu_init, 3212 .instance_post_init = riscv_cpu_post_init, 3213 .abstract = true, 3214 .class_size = sizeof(RISCVCPUClass), 3215 .class_init = riscv_cpu_common_class_init, 3216 }, 3217 { 3218 .name = TYPE_RISCV_DYNAMIC_CPU, 3219 .parent = TYPE_RISCV_CPU, 3220 .abstract = true, 3221 }, 3222 { 3223 .name = TYPE_RISCV_VENDOR_CPU, 3224 .parent = TYPE_RISCV_CPU, 3225 .abstract = true, 3226 }, 3227 { 3228 .name = TYPE_RISCV_BARE_CPU, 3229 .parent = TYPE_RISCV_CPU, 3230 .instance_init = riscv_bare_cpu_init, 3231 .abstract = true, 3232 }, 3233 #if defined(TARGET_RISCV32) 3234 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3235 #elif defined(TARGET_RISCV64) 3236 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3237 #endif 3238 3239 #if defined(TARGET_RISCV32) || \ 3240 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3241 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3242 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3243 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3244 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3245 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3246 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3247 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3248 #endif 3249 3250 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3251 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3252 #endif 3253 3254 #if defined(TARGET_RISCV64) 3255 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3256 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3257 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3258 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3259 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3260 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3261 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3262 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3263 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3264 #ifdef CONFIG_TCG 3265 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3266 #endif /* CONFIG_TCG */ 3267 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3268 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3269 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3270 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3271 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), 3272 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), 3273 #endif /* TARGET_RISCV64 */ 3274 }; 3275 3276 DEFINE_TYPES(riscv_cpu_type_infos) 3277