1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "system/device_tree.h" 36 #include "system/kvm.h" 37 #include "system/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 188 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 194 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 195 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 196 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 197 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 198 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 199 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 200 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 201 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 202 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 203 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 204 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 205 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 206 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 207 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 208 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 209 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 210 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 211 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 212 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 213 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 214 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 215 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 216 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 217 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 218 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 219 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), 220 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), 221 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 222 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 223 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 224 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 225 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 226 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 227 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 228 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 229 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 230 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 231 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 232 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 233 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 234 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 235 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 236 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 237 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 238 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 239 240 { }, 241 }; 242 243 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 244 { 245 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 246 247 return *ext_enabled; 248 } 249 250 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 251 { 252 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 253 254 *ext_enabled = en; 255 } 256 257 bool riscv_cpu_is_vendor(Object *cpu_obj) 258 { 259 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 260 } 261 262 const char * const riscv_int_regnames[] = { 263 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 264 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 265 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 266 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 267 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 268 }; 269 270 const char * const riscv_int_regnamesh[] = { 271 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 272 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 273 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 274 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 275 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 276 "x30h/t5h", "x31h/t6h" 277 }; 278 279 const char * const riscv_fpr_regnames[] = { 280 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 281 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 282 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 283 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 284 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 285 "f30/ft10", "f31/ft11" 286 }; 287 288 const char * const riscv_rvv_regnames[] = { 289 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 290 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 291 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 292 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 293 "v28", "v29", "v30", "v31" 294 }; 295 296 static const char * const riscv_excp_names[] = { 297 "misaligned_fetch", 298 "fault_fetch", 299 "illegal_instruction", 300 "breakpoint", 301 "misaligned_load", 302 "fault_load", 303 "misaligned_store", 304 "fault_store", 305 "user_ecall", 306 "supervisor_ecall", 307 "hypervisor_ecall", 308 "machine_ecall", 309 "exec_page_fault", 310 "load_page_fault", 311 "reserved", 312 "store_page_fault", 313 "double_trap", 314 "reserved", 315 "reserved", 316 "reserved", 317 "guest_exec_page_fault", 318 "guest_load_page_fault", 319 "reserved", 320 "guest_store_page_fault", 321 }; 322 323 static const char * const riscv_intr_names[] = { 324 "u_software", 325 "s_software", 326 "vs_software", 327 "m_software", 328 "u_timer", 329 "s_timer", 330 "vs_timer", 331 "m_timer", 332 "u_external", 333 "s_external", 334 "vs_external", 335 "m_external", 336 "reserved", 337 "reserved", 338 "reserved", 339 "reserved" 340 }; 341 342 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 343 { 344 if (async) { 345 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 346 riscv_intr_names[cause] : "(unknown)"; 347 } else { 348 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 349 riscv_excp_names[cause] : "(unknown)"; 350 } 351 } 352 353 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 354 { 355 env->misa_ext_mask = env->misa_ext = ext; 356 } 357 358 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 359 { 360 return 16 << mcc->misa_mxl_max; 361 } 362 363 #ifndef CONFIG_USER_ONLY 364 static uint8_t satp_mode_from_str(const char *satp_mode_str) 365 { 366 if (!strncmp(satp_mode_str, "mbare", 5)) { 367 return VM_1_10_MBARE; 368 } 369 370 if (!strncmp(satp_mode_str, "sv32", 4)) { 371 return VM_1_10_SV32; 372 } 373 374 if (!strncmp(satp_mode_str, "sv39", 4)) { 375 return VM_1_10_SV39; 376 } 377 378 if (!strncmp(satp_mode_str, "sv48", 4)) { 379 return VM_1_10_SV48; 380 } 381 382 if (!strncmp(satp_mode_str, "sv57", 4)) { 383 return VM_1_10_SV57; 384 } 385 386 if (!strncmp(satp_mode_str, "sv64", 4)) { 387 return VM_1_10_SV64; 388 } 389 390 g_assert_not_reached(); 391 } 392 393 uint8_t satp_mode_max_from_map(uint32_t map) 394 { 395 /* 396 * 'map = 0' will make us return (31 - 32), which C will 397 * happily overflow to UINT_MAX. There's no good result to 398 * return if 'map = 0' (e.g. returning 0 will be ambiguous 399 * with the result for 'map = 1'). 400 * 401 * Assert out if map = 0. Callers will have to deal with 402 * it outside of this function. 403 */ 404 g_assert(map > 0); 405 406 /* map here has at least one bit set, so no problem with clz */ 407 return 31 - __builtin_clz(map); 408 } 409 410 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 411 { 412 if (is_32_bit) { 413 switch (satp_mode) { 414 case VM_1_10_SV32: 415 return "sv32"; 416 case VM_1_10_MBARE: 417 return "none"; 418 } 419 } else { 420 switch (satp_mode) { 421 case VM_1_10_SV64: 422 return "sv64"; 423 case VM_1_10_SV57: 424 return "sv57"; 425 case VM_1_10_SV48: 426 return "sv48"; 427 case VM_1_10_SV39: 428 return "sv39"; 429 case VM_1_10_MBARE: 430 return "none"; 431 } 432 } 433 434 g_assert_not_reached(); 435 } 436 437 static void set_satp_mode_max_supported(RISCVCPU *cpu, 438 uint8_t satp_mode) 439 { 440 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 441 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 442 443 for (int i = 0; i <= satp_mode; ++i) { 444 if (valid_vm[i]) { 445 cpu->cfg.satp_mode.supported |= (1 << i); 446 } 447 } 448 } 449 450 /* Set the satp mode to the max supported */ 451 static void set_satp_mode_default_map(RISCVCPU *cpu) 452 { 453 /* 454 * Bare CPUs do not default to the max available. 455 * Users must set a valid satp_mode in the command 456 * line. 457 */ 458 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 459 warn_report("No satp mode set. Defaulting to 'bare'"); 460 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 461 return; 462 } 463 464 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 465 } 466 #endif 467 468 static void riscv_max_cpu_init(Object *obj) 469 { 470 RISCVCPU *cpu = RISCV_CPU(obj); 471 CPURISCVState *env = &cpu->env; 472 473 cpu->cfg.mmu = true; 474 cpu->cfg.pmp = true; 475 476 env->priv_ver = PRIV_VERSION_LATEST; 477 #ifndef CONFIG_USER_ONLY 478 set_satp_mode_max_supported(RISCV_CPU(obj), 479 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 480 VM_1_10_SV32 : VM_1_10_SV57); 481 #endif 482 } 483 484 #if defined(TARGET_RISCV64) 485 static void rv64_base_cpu_init(Object *obj) 486 { 487 RISCVCPU *cpu = RISCV_CPU(obj); 488 CPURISCVState *env = &cpu->env; 489 490 cpu->cfg.mmu = true; 491 cpu->cfg.pmp = true; 492 493 /* Set latest version of privileged specification */ 494 env->priv_ver = PRIV_VERSION_LATEST; 495 #ifndef CONFIG_USER_ONLY 496 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 497 #endif 498 } 499 500 static void rv64_sifive_u_cpu_init(Object *obj) 501 { 502 RISCVCPU *cpu = RISCV_CPU(obj); 503 CPURISCVState *env = &cpu->env; 504 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 505 env->priv_ver = PRIV_VERSION_1_10_0; 506 #ifndef CONFIG_USER_ONLY 507 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 508 #endif 509 510 /* inherited from parent obj via riscv_cpu_init() */ 511 cpu->cfg.ext_zifencei = true; 512 cpu->cfg.ext_zicsr = true; 513 cpu->cfg.mmu = true; 514 cpu->cfg.pmp = true; 515 } 516 517 static void rv64_sifive_e_cpu_init(Object *obj) 518 { 519 CPURISCVState *env = &RISCV_CPU(obj)->env; 520 RISCVCPU *cpu = RISCV_CPU(obj); 521 522 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 523 env->priv_ver = PRIV_VERSION_1_10_0; 524 #ifndef CONFIG_USER_ONLY 525 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 526 #endif 527 528 /* inherited from parent obj via riscv_cpu_init() */ 529 cpu->cfg.ext_zifencei = true; 530 cpu->cfg.ext_zicsr = true; 531 cpu->cfg.pmp = true; 532 } 533 534 static void rv64_thead_c906_cpu_init(Object *obj) 535 { 536 CPURISCVState *env = &RISCV_CPU(obj)->env; 537 RISCVCPU *cpu = RISCV_CPU(obj); 538 539 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 540 env->priv_ver = PRIV_VERSION_1_11_0; 541 542 cpu->cfg.ext_zfa = true; 543 cpu->cfg.ext_zfh = true; 544 cpu->cfg.mmu = true; 545 cpu->cfg.ext_xtheadba = true; 546 cpu->cfg.ext_xtheadbb = true; 547 cpu->cfg.ext_xtheadbs = true; 548 cpu->cfg.ext_xtheadcmo = true; 549 cpu->cfg.ext_xtheadcondmov = true; 550 cpu->cfg.ext_xtheadfmemidx = true; 551 cpu->cfg.ext_xtheadmac = true; 552 cpu->cfg.ext_xtheadmemidx = true; 553 cpu->cfg.ext_xtheadmempair = true; 554 cpu->cfg.ext_xtheadsync = true; 555 556 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 557 #ifndef CONFIG_USER_ONLY 558 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 559 th_register_custom_csrs(cpu); 560 #endif 561 562 /* inherited from parent obj via riscv_cpu_init() */ 563 cpu->cfg.pmp = true; 564 } 565 566 static void rv64_veyron_v1_cpu_init(Object *obj) 567 { 568 CPURISCVState *env = &RISCV_CPU(obj)->env; 569 RISCVCPU *cpu = RISCV_CPU(obj); 570 571 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 572 env->priv_ver = PRIV_VERSION_1_12_0; 573 574 /* Enable ISA extensions */ 575 cpu->cfg.mmu = true; 576 cpu->cfg.ext_zifencei = true; 577 cpu->cfg.ext_zicsr = true; 578 cpu->cfg.pmp = true; 579 cpu->cfg.ext_zicbom = true; 580 cpu->cfg.cbom_blocksize = 64; 581 cpu->cfg.cboz_blocksize = 64; 582 cpu->cfg.ext_zicboz = true; 583 cpu->cfg.ext_smaia = true; 584 cpu->cfg.ext_ssaia = true; 585 cpu->cfg.ext_sscofpmf = true; 586 cpu->cfg.ext_sstc = true; 587 cpu->cfg.ext_svinval = true; 588 cpu->cfg.ext_svnapot = true; 589 cpu->cfg.ext_svpbmt = true; 590 cpu->cfg.ext_smstateen = true; 591 cpu->cfg.ext_zba = true; 592 cpu->cfg.ext_zbb = true; 593 cpu->cfg.ext_zbc = true; 594 cpu->cfg.ext_zbs = true; 595 cpu->cfg.ext_XVentanaCondOps = true; 596 597 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 598 cpu->cfg.marchid = VEYRON_V1_MARCHID; 599 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 600 601 #ifndef CONFIG_USER_ONLY 602 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 603 #endif 604 } 605 606 /* Tenstorrent Ascalon */ 607 static void rv64_tt_ascalon_cpu_init(Object *obj) 608 { 609 CPURISCVState *env = &RISCV_CPU(obj)->env; 610 RISCVCPU *cpu = RISCV_CPU(obj); 611 612 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 613 env->priv_ver = PRIV_VERSION_1_13_0; 614 615 /* Enable ISA extensions */ 616 cpu->cfg.mmu = true; 617 cpu->cfg.vlenb = 256 >> 3; 618 cpu->cfg.elen = 64; 619 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 620 cpu->cfg.rvv_ma_all_1s = true; 621 cpu->cfg.rvv_ta_all_1s = true; 622 cpu->cfg.misa_w = true; 623 cpu->cfg.pmp = true; 624 cpu->cfg.cbom_blocksize = 64; 625 cpu->cfg.cbop_blocksize = 64; 626 cpu->cfg.cboz_blocksize = 64; 627 cpu->cfg.ext_zic64b = true; 628 cpu->cfg.ext_zicbom = true; 629 cpu->cfg.ext_zicbop = true; 630 cpu->cfg.ext_zicboz = true; 631 cpu->cfg.ext_zicntr = true; 632 cpu->cfg.ext_zicond = true; 633 cpu->cfg.ext_zicsr = true; 634 cpu->cfg.ext_zifencei = true; 635 cpu->cfg.ext_zihintntl = true; 636 cpu->cfg.ext_zihintpause = true; 637 cpu->cfg.ext_zihpm = true; 638 cpu->cfg.ext_zimop = true; 639 cpu->cfg.ext_zawrs = true; 640 cpu->cfg.ext_zfa = true; 641 cpu->cfg.ext_zfbfmin = true; 642 cpu->cfg.ext_zfh = true; 643 cpu->cfg.ext_zfhmin = true; 644 cpu->cfg.ext_zcb = true; 645 cpu->cfg.ext_zcmop = true; 646 cpu->cfg.ext_zba = true; 647 cpu->cfg.ext_zbb = true; 648 cpu->cfg.ext_zbs = true; 649 cpu->cfg.ext_zkt = true; 650 cpu->cfg.ext_zvbb = true; 651 cpu->cfg.ext_zvbc = true; 652 cpu->cfg.ext_zvfbfmin = true; 653 cpu->cfg.ext_zvfbfwma = true; 654 cpu->cfg.ext_zvfh = true; 655 cpu->cfg.ext_zvfhmin = true; 656 cpu->cfg.ext_zvkng = true; 657 cpu->cfg.ext_smaia = true; 658 cpu->cfg.ext_smstateen = true; 659 cpu->cfg.ext_ssaia = true; 660 cpu->cfg.ext_sscofpmf = true; 661 cpu->cfg.ext_sstc = true; 662 cpu->cfg.ext_svade = true; 663 cpu->cfg.ext_svinval = true; 664 cpu->cfg.ext_svnapot = true; 665 cpu->cfg.ext_svpbmt = true; 666 667 #ifndef CONFIG_USER_ONLY 668 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 669 #endif 670 } 671 672 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 673 { 674 CPURISCVState *env = &RISCV_CPU(obj)->env; 675 RISCVCPU *cpu = RISCV_CPU(obj); 676 677 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 678 env->priv_ver = PRIV_VERSION_1_12_0; 679 680 /* Enable ISA extensions */ 681 cpu->cfg.ext_zbc = true; 682 cpu->cfg.ext_zbkb = true; 683 cpu->cfg.ext_zbkc = true; 684 cpu->cfg.ext_zbkx = true; 685 cpu->cfg.ext_zknd = true; 686 cpu->cfg.ext_zkne = true; 687 cpu->cfg.ext_zknh = true; 688 cpu->cfg.ext_zksed = true; 689 cpu->cfg.ext_zksh = true; 690 cpu->cfg.ext_svinval = true; 691 692 cpu->cfg.mmu = true; 693 cpu->cfg.pmp = true; 694 695 #ifndef CONFIG_USER_ONLY 696 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 697 #endif 698 } 699 700 #ifdef CONFIG_TCG 701 static void rv128_base_cpu_init(Object *obj) 702 { 703 RISCVCPU *cpu = RISCV_CPU(obj); 704 CPURISCVState *env = &cpu->env; 705 706 cpu->cfg.mmu = true; 707 cpu->cfg.pmp = true; 708 709 /* Set latest version of privileged specification */ 710 env->priv_ver = PRIV_VERSION_LATEST; 711 #ifndef CONFIG_USER_ONLY 712 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 713 #endif 714 } 715 #endif /* CONFIG_TCG */ 716 717 static void rv64i_bare_cpu_init(Object *obj) 718 { 719 CPURISCVState *env = &RISCV_CPU(obj)->env; 720 riscv_cpu_set_misa_ext(env, RVI); 721 } 722 723 static void rv64e_bare_cpu_init(Object *obj) 724 { 725 CPURISCVState *env = &RISCV_CPU(obj)->env; 726 riscv_cpu_set_misa_ext(env, RVE); 727 } 728 729 #endif /* !TARGET_RISCV64 */ 730 731 #if defined(TARGET_RISCV32) || \ 732 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 733 734 static void rv32_base_cpu_init(Object *obj) 735 { 736 RISCVCPU *cpu = RISCV_CPU(obj); 737 CPURISCVState *env = &cpu->env; 738 739 cpu->cfg.mmu = true; 740 cpu->cfg.pmp = true; 741 742 /* Set latest version of privileged specification */ 743 env->priv_ver = PRIV_VERSION_LATEST; 744 #ifndef CONFIG_USER_ONLY 745 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 746 #endif 747 } 748 749 static void rv32_sifive_u_cpu_init(Object *obj) 750 { 751 RISCVCPU *cpu = RISCV_CPU(obj); 752 CPURISCVState *env = &cpu->env; 753 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 754 env->priv_ver = PRIV_VERSION_1_10_0; 755 #ifndef CONFIG_USER_ONLY 756 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 757 #endif 758 759 /* inherited from parent obj via riscv_cpu_init() */ 760 cpu->cfg.ext_zifencei = true; 761 cpu->cfg.ext_zicsr = true; 762 cpu->cfg.mmu = true; 763 cpu->cfg.pmp = true; 764 } 765 766 static void rv32_sifive_e_cpu_init(Object *obj) 767 { 768 CPURISCVState *env = &RISCV_CPU(obj)->env; 769 RISCVCPU *cpu = RISCV_CPU(obj); 770 771 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 772 env->priv_ver = PRIV_VERSION_1_10_0; 773 #ifndef CONFIG_USER_ONLY 774 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 775 #endif 776 777 /* inherited from parent obj via riscv_cpu_init() */ 778 cpu->cfg.ext_zifencei = true; 779 cpu->cfg.ext_zicsr = true; 780 cpu->cfg.pmp = true; 781 } 782 783 static void rv32_ibex_cpu_init(Object *obj) 784 { 785 CPURISCVState *env = &RISCV_CPU(obj)->env; 786 RISCVCPU *cpu = RISCV_CPU(obj); 787 788 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 789 env->priv_ver = PRIV_VERSION_1_12_0; 790 #ifndef CONFIG_USER_ONLY 791 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 792 #endif 793 /* inherited from parent obj via riscv_cpu_init() */ 794 cpu->cfg.ext_zifencei = true; 795 cpu->cfg.ext_zicsr = true; 796 cpu->cfg.pmp = true; 797 cpu->cfg.ext_smepmp = true; 798 799 cpu->cfg.ext_zba = true; 800 cpu->cfg.ext_zbb = true; 801 cpu->cfg.ext_zbc = true; 802 cpu->cfg.ext_zbs = true; 803 } 804 805 static void rv32_imafcu_nommu_cpu_init(Object *obj) 806 { 807 CPURISCVState *env = &RISCV_CPU(obj)->env; 808 RISCVCPU *cpu = RISCV_CPU(obj); 809 810 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 811 env->priv_ver = PRIV_VERSION_1_10_0; 812 #ifndef CONFIG_USER_ONLY 813 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 814 #endif 815 816 /* inherited from parent obj via riscv_cpu_init() */ 817 cpu->cfg.ext_zifencei = true; 818 cpu->cfg.ext_zicsr = true; 819 cpu->cfg.pmp = true; 820 } 821 822 static void rv32i_bare_cpu_init(Object *obj) 823 { 824 CPURISCVState *env = &RISCV_CPU(obj)->env; 825 riscv_cpu_set_misa_ext(env, RVI); 826 } 827 828 static void rv32e_bare_cpu_init(Object *obj) 829 { 830 CPURISCVState *env = &RISCV_CPU(obj)->env; 831 riscv_cpu_set_misa_ext(env, RVE); 832 } 833 #endif 834 835 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 836 { 837 ObjectClass *oc; 838 char *typename; 839 char **cpuname; 840 841 cpuname = g_strsplit(cpu_model, ",", 1); 842 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 843 oc = object_class_by_name(typename); 844 g_strfreev(cpuname); 845 g_free(typename); 846 847 return oc; 848 } 849 850 char *riscv_cpu_get_name(RISCVCPU *cpu) 851 { 852 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 853 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 854 855 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 856 857 return cpu_model_from_type(typename); 858 } 859 860 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 861 { 862 RISCVCPU *cpu = RISCV_CPU(cs); 863 CPURISCVState *env = &cpu->env; 864 int i, j; 865 uint8_t *p; 866 867 #if !defined(CONFIG_USER_ONLY) 868 if (riscv_has_ext(env, RVH)) { 869 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 870 } 871 #endif 872 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 873 #ifndef CONFIG_USER_ONLY 874 { 875 static const int dump_csrs[] = { 876 CSR_MHARTID, 877 CSR_MSTATUS, 878 CSR_MSTATUSH, 879 /* 880 * CSR_SSTATUS is intentionally omitted here as its value 881 * can be figured out by looking at CSR_MSTATUS 882 */ 883 CSR_HSTATUS, 884 CSR_VSSTATUS, 885 CSR_MIP, 886 CSR_MIE, 887 CSR_MIDELEG, 888 CSR_HIDELEG, 889 CSR_MEDELEG, 890 CSR_HEDELEG, 891 CSR_MTVEC, 892 CSR_STVEC, 893 CSR_VSTVEC, 894 CSR_MEPC, 895 CSR_SEPC, 896 CSR_VSEPC, 897 CSR_MCAUSE, 898 CSR_SCAUSE, 899 CSR_VSCAUSE, 900 CSR_MTVAL, 901 CSR_STVAL, 902 CSR_HTVAL, 903 CSR_MTVAL2, 904 CSR_MSCRATCH, 905 CSR_SSCRATCH, 906 CSR_SATP, 907 }; 908 909 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 910 int csrno = dump_csrs[i]; 911 target_ulong val = 0; 912 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 913 914 /* 915 * Rely on the smode, hmode, etc, predicates within csr.c 916 * to do the filtering of the registers that are present. 917 */ 918 if (res == RISCV_EXCP_NONE) { 919 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 920 csr_ops[csrno].name, val); 921 } 922 } 923 } 924 #endif 925 926 for (i = 0; i < 32; i++) { 927 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 928 riscv_int_regnames[i], env->gpr[i]); 929 if ((i & 3) == 3) { 930 qemu_fprintf(f, "\n"); 931 } 932 } 933 if (flags & CPU_DUMP_FPU) { 934 target_ulong val = 0; 935 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 936 if (res == RISCV_EXCP_NONE) { 937 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 938 csr_ops[CSR_FCSR].name, val); 939 } 940 for (i = 0; i < 32; i++) { 941 qemu_fprintf(f, " %-8s %016" PRIx64, 942 riscv_fpr_regnames[i], env->fpr[i]); 943 if ((i & 3) == 3) { 944 qemu_fprintf(f, "\n"); 945 } 946 } 947 } 948 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 949 static const int dump_rvv_csrs[] = { 950 CSR_VSTART, 951 CSR_VXSAT, 952 CSR_VXRM, 953 CSR_VCSR, 954 CSR_VL, 955 CSR_VTYPE, 956 CSR_VLENB, 957 }; 958 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 959 int csrno = dump_rvv_csrs[i]; 960 target_ulong val = 0; 961 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 962 963 /* 964 * Rely on the smode, hmode, etc, predicates within csr.c 965 * to do the filtering of the registers that are present. 966 */ 967 if (res == RISCV_EXCP_NONE) { 968 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 969 csr_ops[csrno].name, val); 970 } 971 } 972 uint16_t vlenb = cpu->cfg.vlenb; 973 974 for (i = 0; i < 32; i++) { 975 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 976 p = (uint8_t *)env->vreg; 977 for (j = vlenb - 1 ; j >= 0; j--) { 978 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 979 } 980 qemu_fprintf(f, "\n"); 981 } 982 } 983 } 984 985 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 986 { 987 RISCVCPU *cpu = RISCV_CPU(cs); 988 CPURISCVState *env = &cpu->env; 989 990 if (env->xl == MXL_RV32) { 991 env->pc = (int32_t)value; 992 } else { 993 env->pc = value; 994 } 995 } 996 997 static vaddr riscv_cpu_get_pc(CPUState *cs) 998 { 999 RISCVCPU *cpu = RISCV_CPU(cs); 1000 CPURISCVState *env = &cpu->env; 1001 1002 /* Match cpu_get_tb_cpu_state. */ 1003 if (env->xl == MXL_RV32) { 1004 return env->pc & UINT32_MAX; 1005 } 1006 return env->pc; 1007 } 1008 1009 bool riscv_cpu_has_work(CPUState *cs) 1010 { 1011 #ifndef CONFIG_USER_ONLY 1012 RISCVCPU *cpu = RISCV_CPU(cs); 1013 CPURISCVState *env = &cpu->env; 1014 /* 1015 * Definition of the WFI instruction requires it to ignore the privilege 1016 * mode and delegation registers, but respect individual enables 1017 */ 1018 return riscv_cpu_all_pending(env) != 0 || 1019 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1020 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1021 #else 1022 return true; 1023 #endif 1024 } 1025 1026 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 1027 { 1028 return riscv_env_mmu_index(cpu_env(cs), ifetch); 1029 } 1030 1031 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1032 { 1033 #ifndef CONFIG_USER_ONLY 1034 uint8_t iprio; 1035 int i, irq, rdzero; 1036 #endif 1037 CPUState *cs = CPU(obj); 1038 RISCVCPU *cpu = RISCV_CPU(cs); 1039 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1040 CPURISCVState *env = &cpu->env; 1041 1042 if (mcc->parent_phases.hold) { 1043 mcc->parent_phases.hold(obj, type); 1044 } 1045 #ifndef CONFIG_USER_ONLY 1046 env->misa_mxl = mcc->misa_mxl_max; 1047 env->priv = PRV_M; 1048 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1049 if (env->misa_mxl > MXL_RV32) { 1050 /* 1051 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1052 * and we must ensure that the value after init is valid for read. 1053 */ 1054 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1055 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1056 if (riscv_has_ext(env, RVH)) { 1057 env->vsstatus = set_field(env->vsstatus, 1058 MSTATUS64_SXL, env->misa_mxl); 1059 env->vsstatus = set_field(env->vsstatus, 1060 MSTATUS64_UXL, env->misa_mxl); 1061 env->mstatus_hs = set_field(env->mstatus_hs, 1062 MSTATUS64_SXL, env->misa_mxl); 1063 env->mstatus_hs = set_field(env->mstatus_hs, 1064 MSTATUS64_UXL, env->misa_mxl); 1065 } 1066 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1067 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1068 } 1069 } 1070 env->mcause = 0; 1071 env->miclaim = MIP_SGEIP; 1072 env->pc = env->resetvec; 1073 env->bins = 0; 1074 env->two_stage_lookup = false; 1075 1076 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1077 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1078 MENVCFG_ADUE : 0); 1079 env->henvcfg = 0; 1080 1081 /* Initialized default priorities of local interrupts. */ 1082 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1083 iprio = riscv_cpu_default_priority(i); 1084 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1085 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1086 env->hviprio[i] = 0; 1087 } 1088 i = 0; 1089 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1090 if (!rdzero) { 1091 env->hviprio[irq] = env->miprio[irq]; 1092 } 1093 i++; 1094 } 1095 1096 /* 1097 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1098 * extension is enabled. 1099 */ 1100 if (riscv_has_ext(env, RVH)) { 1101 env->mideleg |= HS_MODE_INTERRUPTS; 1102 } 1103 1104 /* 1105 * Clear mseccfg and unlock all the PMP entries upon reset. 1106 * This is allowed as per the priv and smepmp specifications 1107 * and is needed to clear stale entries across reboots. 1108 */ 1109 if (riscv_cpu_cfg(env)->ext_smepmp) { 1110 env->mseccfg = 0; 1111 } 1112 1113 pmp_unlock_entries(env); 1114 #else 1115 env->priv = PRV_U; 1116 env->senvcfg = 0; 1117 env->menvcfg = 0; 1118 #endif 1119 1120 /* on reset elp is clear */ 1121 env->elp = false; 1122 /* on reset ssp is set to 0 */ 1123 env->ssp = 0; 1124 1125 env->xl = riscv_cpu_mxl(env); 1126 cs->exception_index = RISCV_EXCP_NONE; 1127 env->load_res = -1; 1128 set_default_nan_mode(1, &env->fp_status); 1129 /* Default NaN value: sign bit clear, frac msb set */ 1130 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1131 env->vill = true; 1132 1133 #ifndef CONFIG_USER_ONLY 1134 if (cpu->cfg.debug) { 1135 riscv_trigger_reset_hold(env); 1136 } 1137 1138 if (cpu->cfg.ext_smrnmi) { 1139 env->rnmip = 0; 1140 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1141 } 1142 1143 if (kvm_enabled()) { 1144 kvm_riscv_reset_vcpu(cpu); 1145 } 1146 #endif 1147 } 1148 1149 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1150 { 1151 RISCVCPU *cpu = RISCV_CPU(s); 1152 CPURISCVState *env = &cpu->env; 1153 info->target_info = &cpu->cfg; 1154 1155 switch (env->xl) { 1156 case MXL_RV32: 1157 info->print_insn = print_insn_riscv32; 1158 break; 1159 case MXL_RV64: 1160 info->print_insn = print_insn_riscv64; 1161 break; 1162 case MXL_RV128: 1163 info->print_insn = print_insn_riscv128; 1164 break; 1165 default: 1166 g_assert_not_reached(); 1167 } 1168 } 1169 1170 #ifndef CONFIG_USER_ONLY 1171 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1172 { 1173 bool rv32 = riscv_cpu_is_32bit(cpu); 1174 uint8_t satp_mode_map_max, satp_mode_supported_max; 1175 1176 /* The CPU wants the OS to decide which satp mode to use */ 1177 if (cpu->cfg.satp_mode.supported == 0) { 1178 return; 1179 } 1180 1181 satp_mode_supported_max = 1182 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1183 1184 if (cpu->cfg.satp_mode.map == 0) { 1185 if (cpu->cfg.satp_mode.init == 0) { 1186 /* If unset by the user, we fallback to the default satp mode. */ 1187 set_satp_mode_default_map(cpu); 1188 } else { 1189 /* 1190 * Find the lowest level that was disabled and then enable the 1191 * first valid level below which can be found in 1192 * valid_vm_1_10_32/64. 1193 */ 1194 for (int i = 1; i < 16; ++i) { 1195 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1196 (cpu->cfg.satp_mode.supported & (1 << i))) { 1197 for (int j = i - 1; j >= 0; --j) { 1198 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1199 cpu->cfg.satp_mode.map |= (1 << j); 1200 break; 1201 } 1202 } 1203 break; 1204 } 1205 } 1206 } 1207 } 1208 1209 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1210 1211 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1212 if (satp_mode_map_max > satp_mode_supported_max) { 1213 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1214 satp_mode_str(satp_mode_map_max, rv32), 1215 satp_mode_str(satp_mode_supported_max, rv32)); 1216 return; 1217 } 1218 1219 /* 1220 * Make sure the user did not ask for an invalid configuration as per 1221 * the specification. 1222 */ 1223 if (!rv32) { 1224 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1225 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1226 (cpu->cfg.satp_mode.init & (1 << i)) && 1227 (cpu->cfg.satp_mode.supported & (1 << i))) { 1228 error_setg(errp, "cannot disable %s satp mode if %s " 1229 "is enabled", satp_mode_str(i, false), 1230 satp_mode_str(satp_mode_map_max, false)); 1231 return; 1232 } 1233 } 1234 } 1235 1236 /* Finally expand the map so that all valid modes are set */ 1237 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1238 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1239 cpu->cfg.satp_mode.map |= (1 << i); 1240 } 1241 } 1242 } 1243 #endif 1244 1245 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1246 { 1247 Error *local_err = NULL; 1248 1249 #ifndef CONFIG_USER_ONLY 1250 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1251 if (local_err != NULL) { 1252 error_propagate(errp, local_err); 1253 return; 1254 } 1255 #endif 1256 1257 if (tcg_enabled()) { 1258 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1259 if (local_err != NULL) { 1260 error_propagate(errp, local_err); 1261 return; 1262 } 1263 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1264 } else if (kvm_enabled()) { 1265 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1266 if (local_err != NULL) { 1267 error_propagate(errp, local_err); 1268 return; 1269 } 1270 } 1271 } 1272 1273 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1274 { 1275 CPUState *cs = CPU(dev); 1276 RISCVCPU *cpu = RISCV_CPU(dev); 1277 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1278 Error *local_err = NULL; 1279 1280 cpu_exec_realizefn(cs, &local_err); 1281 if (local_err != NULL) { 1282 error_propagate(errp, local_err); 1283 return; 1284 } 1285 1286 riscv_cpu_finalize_features(cpu, &local_err); 1287 if (local_err != NULL) { 1288 error_propagate(errp, local_err); 1289 return; 1290 } 1291 1292 riscv_cpu_register_gdb_regs_for_features(cs); 1293 1294 #ifndef CONFIG_USER_ONLY 1295 if (cpu->cfg.debug) { 1296 riscv_trigger_realize(&cpu->env); 1297 } 1298 #endif 1299 1300 qemu_init_vcpu(cs); 1301 cpu_reset(cs); 1302 1303 mcc->parent_realize(dev, errp); 1304 } 1305 1306 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1307 { 1308 if (tcg_enabled()) { 1309 return riscv_cpu_tcg_compatible(cpu); 1310 } 1311 1312 return true; 1313 } 1314 1315 #ifndef CONFIG_USER_ONLY 1316 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1317 void *opaque, Error **errp) 1318 { 1319 RISCVSATPMap *satp_map = opaque; 1320 uint8_t satp = satp_mode_from_str(name); 1321 bool value; 1322 1323 value = satp_map->map & (1 << satp); 1324 1325 visit_type_bool(v, name, &value, errp); 1326 } 1327 1328 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1329 void *opaque, Error **errp) 1330 { 1331 RISCVSATPMap *satp_map = opaque; 1332 uint8_t satp = satp_mode_from_str(name); 1333 bool value; 1334 1335 if (!visit_type_bool(v, name, &value, errp)) { 1336 return; 1337 } 1338 1339 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1340 satp_map->init |= 1 << satp; 1341 } 1342 1343 void riscv_add_satp_mode_properties(Object *obj) 1344 { 1345 RISCVCPU *cpu = RISCV_CPU(obj); 1346 1347 if (cpu->env.misa_mxl == MXL_RV32) { 1348 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1349 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1350 } else { 1351 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1352 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1353 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1354 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1355 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1356 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1357 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1358 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1359 } 1360 } 1361 1362 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1363 { 1364 RISCVCPU *cpu = RISCV_CPU(opaque); 1365 CPURISCVState *env = &cpu->env; 1366 1367 if (irq < IRQ_LOCAL_MAX) { 1368 switch (irq) { 1369 case IRQ_U_SOFT: 1370 case IRQ_S_SOFT: 1371 case IRQ_VS_SOFT: 1372 case IRQ_M_SOFT: 1373 case IRQ_U_TIMER: 1374 case IRQ_S_TIMER: 1375 case IRQ_VS_TIMER: 1376 case IRQ_M_TIMER: 1377 case IRQ_U_EXT: 1378 case IRQ_VS_EXT: 1379 case IRQ_M_EXT: 1380 if (kvm_enabled()) { 1381 kvm_riscv_set_irq(cpu, irq, level); 1382 } else { 1383 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1384 } 1385 break; 1386 case IRQ_S_EXT: 1387 if (kvm_enabled()) { 1388 kvm_riscv_set_irq(cpu, irq, level); 1389 } else { 1390 env->external_seip = level; 1391 riscv_cpu_update_mip(env, 1 << irq, 1392 BOOL_TO_MASK(level | env->software_seip)); 1393 } 1394 break; 1395 default: 1396 g_assert_not_reached(); 1397 } 1398 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1399 /* Require H-extension for handling guest local interrupts */ 1400 if (!riscv_has_ext(env, RVH)) { 1401 g_assert_not_reached(); 1402 } 1403 1404 /* Compute bit position in HGEIP CSR */ 1405 irq = irq - IRQ_LOCAL_MAX + 1; 1406 if (env->geilen < irq) { 1407 g_assert_not_reached(); 1408 } 1409 1410 /* Update HGEIP CSR */ 1411 env->hgeip &= ~((target_ulong)1 << irq); 1412 if (level) { 1413 env->hgeip |= (target_ulong)1 << irq; 1414 } 1415 1416 /* Update mip.SGEIP bit */ 1417 riscv_cpu_update_mip(env, MIP_SGEIP, 1418 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1419 } else { 1420 g_assert_not_reached(); 1421 } 1422 } 1423 1424 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1425 { 1426 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1427 } 1428 #endif /* CONFIG_USER_ONLY */ 1429 1430 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1431 { 1432 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1433 } 1434 1435 static void riscv_cpu_post_init(Object *obj) 1436 { 1437 accel_cpu_instance_init(CPU(obj)); 1438 } 1439 1440 static void riscv_cpu_init(Object *obj) 1441 { 1442 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1443 RISCVCPU *cpu = RISCV_CPU(obj); 1444 CPURISCVState *env = &cpu->env; 1445 1446 env->misa_mxl = mcc->misa_mxl_max; 1447 1448 #ifndef CONFIG_USER_ONLY 1449 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1450 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1451 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1452 "riscv.cpu.rnmi", RNMI_MAX); 1453 #endif /* CONFIG_USER_ONLY */ 1454 1455 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1456 1457 /* 1458 * The timer and performance counters extensions were supported 1459 * in QEMU before they were added as discrete extensions in the 1460 * ISA. To keep compatibility we'll always default them to 'true' 1461 * for all CPUs. Each accelerator will decide what to do when 1462 * users disable them. 1463 */ 1464 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1465 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1466 1467 /* Default values for non-bool cpu properties */ 1468 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1469 cpu->cfg.vlenb = 128 >> 3; 1470 cpu->cfg.elen = 64; 1471 cpu->cfg.cbom_blocksize = 64; 1472 cpu->cfg.cbop_blocksize = 64; 1473 cpu->cfg.cboz_blocksize = 64; 1474 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1475 } 1476 1477 static void riscv_bare_cpu_init(Object *obj) 1478 { 1479 RISCVCPU *cpu = RISCV_CPU(obj); 1480 1481 /* 1482 * Bare CPUs do not inherit the timer and performance 1483 * counters from the parent class (see riscv_cpu_init() 1484 * for info on why the parent enables them). 1485 * 1486 * Users have to explicitly enable these counters for 1487 * bare CPUs. 1488 */ 1489 cpu->cfg.ext_zicntr = false; 1490 cpu->cfg.ext_zihpm = false; 1491 1492 /* Set to QEMU's first supported priv version */ 1493 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1494 1495 /* 1496 * Support all available satp_mode settings. The default 1497 * value will be set to MBARE if the user doesn't set 1498 * satp_mode manually (see set_satp_mode_default()). 1499 */ 1500 #ifndef CONFIG_USER_ONLY 1501 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1502 #endif 1503 } 1504 1505 typedef struct misa_ext_info { 1506 const char *name; 1507 const char *description; 1508 } MISAExtInfo; 1509 1510 #define MISA_INFO_IDX(_bit) \ 1511 __builtin_ctz(_bit) 1512 1513 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1514 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1515 1516 static const MISAExtInfo misa_ext_info_arr[] = { 1517 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1518 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1519 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1520 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1521 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1522 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1523 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1524 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1525 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1526 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1527 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1528 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1529 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1530 }; 1531 1532 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1533 { 1534 CPUClass *cc = CPU_CLASS(mcc); 1535 1536 /* Validate that MISA_MXL is set properly. */ 1537 switch (mcc->misa_mxl_max) { 1538 #ifdef TARGET_RISCV64 1539 case MXL_RV64: 1540 case MXL_RV128: 1541 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1542 break; 1543 #endif 1544 case MXL_RV32: 1545 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1546 break; 1547 default: 1548 g_assert_not_reached(); 1549 } 1550 } 1551 1552 static int riscv_validate_misa_info_idx(uint32_t bit) 1553 { 1554 int idx; 1555 1556 /* 1557 * Our lowest valid input (RVA) is 1 and 1558 * __builtin_ctz() is UB with zero. 1559 */ 1560 g_assert(bit != 0); 1561 idx = MISA_INFO_IDX(bit); 1562 1563 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1564 return idx; 1565 } 1566 1567 const char *riscv_get_misa_ext_name(uint32_t bit) 1568 { 1569 int idx = riscv_validate_misa_info_idx(bit); 1570 const char *val = misa_ext_info_arr[idx].name; 1571 1572 g_assert(val != NULL); 1573 return val; 1574 } 1575 1576 const char *riscv_get_misa_ext_description(uint32_t bit) 1577 { 1578 int idx = riscv_validate_misa_info_idx(bit); 1579 const char *val = misa_ext_info_arr[idx].description; 1580 1581 g_assert(val != NULL); 1582 return val; 1583 } 1584 1585 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1586 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1587 .enabled = _defval} 1588 1589 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1590 /* Defaults for standard extensions */ 1591 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1592 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1593 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1594 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1595 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1596 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1597 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), 1598 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), 1599 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1600 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1601 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1602 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1603 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1604 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1605 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1606 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1607 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1608 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1609 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1610 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1611 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1612 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1613 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1614 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1615 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1616 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1617 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1618 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1619 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1620 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1621 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1622 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1623 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1624 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1625 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1626 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1627 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1628 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1629 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1630 1631 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1632 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1633 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1634 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1635 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1636 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1637 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1638 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1639 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1640 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1641 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1642 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1643 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1644 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1645 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1646 1647 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1648 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1649 1650 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1651 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1652 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1653 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1654 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1655 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1656 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1657 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1658 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1659 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1660 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1661 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1662 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1663 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1664 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1665 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1666 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1667 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1668 1669 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1670 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1671 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1672 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1673 1674 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1675 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1676 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1677 1678 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1679 1680 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1681 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1682 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1683 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1684 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1685 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1686 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1687 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1688 1689 /* Vector cryptography extensions */ 1690 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1691 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1692 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1693 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1694 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1695 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1696 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1697 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1698 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1699 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1700 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1701 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1702 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1703 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1704 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1705 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1706 1707 { }, 1708 }; 1709 1710 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1711 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1712 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1713 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1714 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1715 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1716 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1717 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1718 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1719 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1720 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1721 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1722 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1723 1724 { }, 1725 }; 1726 1727 /* These are experimental so mark with 'x-' */ 1728 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1729 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1730 1731 { }, 1732 }; 1733 1734 /* 1735 * 'Named features' is the name we give to extensions that we 1736 * don't want to expose to users. They are either immutable 1737 * (always enabled/disable) or they'll vary depending on 1738 * the resulting CPU state. They have riscv,isa strings 1739 * and priv_ver like regular extensions. 1740 */ 1741 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1742 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1743 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1744 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1745 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), 1746 1747 { }, 1748 }; 1749 1750 /* Deprecated entries marked for future removal */ 1751 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1752 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1753 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1754 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1755 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1756 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1757 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1758 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1759 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1760 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1761 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1762 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1763 1764 { }, 1765 }; 1766 1767 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1768 Error **errp) 1769 { 1770 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1771 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1772 cpuname, propname); 1773 } 1774 1775 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1776 void *opaque, Error **errp) 1777 { 1778 RISCVCPU *cpu = RISCV_CPU(obj); 1779 uint8_t pmu_num, curr_pmu_num; 1780 uint32_t pmu_mask; 1781 1782 visit_type_uint8(v, name, &pmu_num, errp); 1783 1784 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1785 1786 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1787 cpu_set_prop_err(cpu, name, errp); 1788 error_append_hint(errp, "Current '%s' val: %u\n", 1789 name, curr_pmu_num); 1790 return; 1791 } 1792 1793 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1794 error_setg(errp, "Number of counters exceeds maximum available"); 1795 return; 1796 } 1797 1798 if (pmu_num == 0) { 1799 pmu_mask = 0; 1800 } else { 1801 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1802 } 1803 1804 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1805 cpu->cfg.pmu_mask = pmu_mask; 1806 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1807 } 1808 1809 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1810 void *opaque, Error **errp) 1811 { 1812 RISCVCPU *cpu = RISCV_CPU(obj); 1813 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1814 1815 visit_type_uint8(v, name, &pmu_num, errp); 1816 } 1817 1818 static const PropertyInfo prop_pmu_num = { 1819 .name = "pmu-num", 1820 .get = prop_pmu_num_get, 1821 .set = prop_pmu_num_set, 1822 }; 1823 1824 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1825 void *opaque, Error **errp) 1826 { 1827 RISCVCPU *cpu = RISCV_CPU(obj); 1828 uint32_t value; 1829 uint8_t pmu_num; 1830 1831 visit_type_uint32(v, name, &value, errp); 1832 1833 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1834 cpu_set_prop_err(cpu, name, errp); 1835 error_append_hint(errp, "Current '%s' val: %x\n", 1836 name, cpu->cfg.pmu_mask); 1837 return; 1838 } 1839 1840 pmu_num = ctpop32(value); 1841 1842 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1843 error_setg(errp, "Number of counters exceeds maximum available"); 1844 return; 1845 } 1846 1847 cpu_option_add_user_setting(name, value); 1848 cpu->cfg.pmu_mask = value; 1849 } 1850 1851 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1852 void *opaque, Error **errp) 1853 { 1854 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1855 1856 visit_type_uint8(v, name, &pmu_mask, errp); 1857 } 1858 1859 static const PropertyInfo prop_pmu_mask = { 1860 .name = "pmu-mask", 1861 .get = prop_pmu_mask_get, 1862 .set = prop_pmu_mask_set, 1863 }; 1864 1865 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1866 void *opaque, Error **errp) 1867 { 1868 RISCVCPU *cpu = RISCV_CPU(obj); 1869 bool value; 1870 1871 visit_type_bool(v, name, &value, errp); 1872 1873 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1874 cpu_set_prop_err(cpu, "mmu", errp); 1875 return; 1876 } 1877 1878 cpu_option_add_user_setting(name, value); 1879 cpu->cfg.mmu = value; 1880 } 1881 1882 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1883 void *opaque, Error **errp) 1884 { 1885 bool value = RISCV_CPU(obj)->cfg.mmu; 1886 1887 visit_type_bool(v, name, &value, errp); 1888 } 1889 1890 static const PropertyInfo prop_mmu = { 1891 .name = "mmu", 1892 .get = prop_mmu_get, 1893 .set = prop_mmu_set, 1894 }; 1895 1896 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1897 void *opaque, Error **errp) 1898 { 1899 RISCVCPU *cpu = RISCV_CPU(obj); 1900 bool value; 1901 1902 visit_type_bool(v, name, &value, errp); 1903 1904 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1905 cpu_set_prop_err(cpu, name, errp); 1906 return; 1907 } 1908 1909 cpu_option_add_user_setting(name, value); 1910 cpu->cfg.pmp = value; 1911 } 1912 1913 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1914 void *opaque, Error **errp) 1915 { 1916 bool value = RISCV_CPU(obj)->cfg.pmp; 1917 1918 visit_type_bool(v, name, &value, errp); 1919 } 1920 1921 static const PropertyInfo prop_pmp = { 1922 .name = "pmp", 1923 .get = prop_pmp_get, 1924 .set = prop_pmp_set, 1925 }; 1926 1927 static int priv_spec_from_str(const char *priv_spec_str) 1928 { 1929 int priv_version = -1; 1930 1931 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1932 priv_version = PRIV_VERSION_1_13_0; 1933 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1934 priv_version = PRIV_VERSION_1_12_0; 1935 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1936 priv_version = PRIV_VERSION_1_11_0; 1937 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1938 priv_version = PRIV_VERSION_1_10_0; 1939 } 1940 1941 return priv_version; 1942 } 1943 1944 const char *priv_spec_to_str(int priv_version) 1945 { 1946 switch (priv_version) { 1947 case PRIV_VERSION_1_10_0: 1948 return PRIV_VER_1_10_0_STR; 1949 case PRIV_VERSION_1_11_0: 1950 return PRIV_VER_1_11_0_STR; 1951 case PRIV_VERSION_1_12_0: 1952 return PRIV_VER_1_12_0_STR; 1953 case PRIV_VERSION_1_13_0: 1954 return PRIV_VER_1_13_0_STR; 1955 default: 1956 return NULL; 1957 } 1958 } 1959 1960 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1961 void *opaque, Error **errp) 1962 { 1963 RISCVCPU *cpu = RISCV_CPU(obj); 1964 g_autofree char *value = NULL; 1965 int priv_version = -1; 1966 1967 visit_type_str(v, name, &value, errp); 1968 1969 priv_version = priv_spec_from_str(value); 1970 if (priv_version < 0) { 1971 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1972 return; 1973 } 1974 1975 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1976 cpu_set_prop_err(cpu, name, errp); 1977 error_append_hint(errp, "Current '%s' val: %s\n", name, 1978 object_property_get_str(obj, name, NULL)); 1979 return; 1980 } 1981 1982 cpu_option_add_user_setting(name, priv_version); 1983 cpu->env.priv_ver = priv_version; 1984 } 1985 1986 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1987 void *opaque, Error **errp) 1988 { 1989 RISCVCPU *cpu = RISCV_CPU(obj); 1990 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1991 1992 visit_type_str(v, name, (char **)&value, errp); 1993 } 1994 1995 static const PropertyInfo prop_priv_spec = { 1996 .name = "priv_spec", 1997 .get = prop_priv_spec_get, 1998 .set = prop_priv_spec_set, 1999 }; 2000 2001 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 2002 void *opaque, Error **errp) 2003 { 2004 RISCVCPU *cpu = RISCV_CPU(obj); 2005 g_autofree char *value = NULL; 2006 2007 visit_type_str(v, name, &value, errp); 2008 2009 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2010 error_setg(errp, "Unsupported vector spec version '%s'", value); 2011 return; 2012 } 2013 2014 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2015 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2016 } 2017 2018 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2019 void *opaque, Error **errp) 2020 { 2021 const char *value = VEXT_VER_1_00_0_STR; 2022 2023 visit_type_str(v, name, (char **)&value, errp); 2024 } 2025 2026 static const PropertyInfo prop_vext_spec = { 2027 .name = "vext_spec", 2028 .get = prop_vext_spec_get, 2029 .set = prop_vext_spec_set, 2030 }; 2031 2032 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2033 void *opaque, Error **errp) 2034 { 2035 RISCVCPU *cpu = RISCV_CPU(obj); 2036 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2037 uint16_t value; 2038 2039 if (!visit_type_uint16(v, name, &value, errp)) { 2040 return; 2041 } 2042 2043 if (!is_power_of_2(value)) { 2044 error_setg(errp, "Vector extension VLEN must be power of 2"); 2045 return; 2046 } 2047 2048 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2049 cpu_set_prop_err(cpu, name, errp); 2050 error_append_hint(errp, "Current '%s' val: %u\n", 2051 name, cpu_vlen); 2052 return; 2053 } 2054 2055 cpu_option_add_user_setting(name, value); 2056 cpu->cfg.vlenb = value >> 3; 2057 } 2058 2059 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2060 void *opaque, Error **errp) 2061 { 2062 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2063 2064 visit_type_uint16(v, name, &value, errp); 2065 } 2066 2067 static const PropertyInfo prop_vlen = { 2068 .name = "vlen", 2069 .get = prop_vlen_get, 2070 .set = prop_vlen_set, 2071 }; 2072 2073 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2074 void *opaque, Error **errp) 2075 { 2076 RISCVCPU *cpu = RISCV_CPU(obj); 2077 uint16_t value; 2078 2079 if (!visit_type_uint16(v, name, &value, errp)) { 2080 return; 2081 } 2082 2083 if (!is_power_of_2(value)) { 2084 error_setg(errp, "Vector extension ELEN must be power of 2"); 2085 return; 2086 } 2087 2088 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2089 cpu_set_prop_err(cpu, name, errp); 2090 error_append_hint(errp, "Current '%s' val: %u\n", 2091 name, cpu->cfg.elen); 2092 return; 2093 } 2094 2095 cpu_option_add_user_setting(name, value); 2096 cpu->cfg.elen = value; 2097 } 2098 2099 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2100 void *opaque, Error **errp) 2101 { 2102 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2103 2104 visit_type_uint16(v, name, &value, errp); 2105 } 2106 2107 static const PropertyInfo prop_elen = { 2108 .name = "elen", 2109 .get = prop_elen_get, 2110 .set = prop_elen_set, 2111 }; 2112 2113 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2114 void *opaque, Error **errp) 2115 { 2116 RISCVCPU *cpu = RISCV_CPU(obj); 2117 uint16_t value; 2118 2119 if (!visit_type_uint16(v, name, &value, errp)) { 2120 return; 2121 } 2122 2123 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2124 cpu_set_prop_err(cpu, name, errp); 2125 error_append_hint(errp, "Current '%s' val: %u\n", 2126 name, cpu->cfg.cbom_blocksize); 2127 return; 2128 } 2129 2130 cpu_option_add_user_setting(name, value); 2131 cpu->cfg.cbom_blocksize = value; 2132 } 2133 2134 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2135 void *opaque, Error **errp) 2136 { 2137 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2138 2139 visit_type_uint16(v, name, &value, errp); 2140 } 2141 2142 static const PropertyInfo prop_cbom_blksize = { 2143 .name = "cbom_blocksize", 2144 .get = prop_cbom_blksize_get, 2145 .set = prop_cbom_blksize_set, 2146 }; 2147 2148 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2149 void *opaque, Error **errp) 2150 { 2151 RISCVCPU *cpu = RISCV_CPU(obj); 2152 uint16_t value; 2153 2154 if (!visit_type_uint16(v, name, &value, errp)) { 2155 return; 2156 } 2157 2158 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2159 cpu_set_prop_err(cpu, name, errp); 2160 error_append_hint(errp, "Current '%s' val: %u\n", 2161 name, cpu->cfg.cbop_blocksize); 2162 return; 2163 } 2164 2165 cpu_option_add_user_setting(name, value); 2166 cpu->cfg.cbop_blocksize = value; 2167 } 2168 2169 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2170 void *opaque, Error **errp) 2171 { 2172 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2173 2174 visit_type_uint16(v, name, &value, errp); 2175 } 2176 2177 static const PropertyInfo prop_cbop_blksize = { 2178 .name = "cbop_blocksize", 2179 .get = prop_cbop_blksize_get, 2180 .set = prop_cbop_blksize_set, 2181 }; 2182 2183 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2184 void *opaque, Error **errp) 2185 { 2186 RISCVCPU *cpu = RISCV_CPU(obj); 2187 uint16_t value; 2188 2189 if (!visit_type_uint16(v, name, &value, errp)) { 2190 return; 2191 } 2192 2193 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2194 cpu_set_prop_err(cpu, name, errp); 2195 error_append_hint(errp, "Current '%s' val: %u\n", 2196 name, cpu->cfg.cboz_blocksize); 2197 return; 2198 } 2199 2200 cpu_option_add_user_setting(name, value); 2201 cpu->cfg.cboz_blocksize = value; 2202 } 2203 2204 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2205 void *opaque, Error **errp) 2206 { 2207 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2208 2209 visit_type_uint16(v, name, &value, errp); 2210 } 2211 2212 static const PropertyInfo prop_cboz_blksize = { 2213 .name = "cboz_blocksize", 2214 .get = prop_cboz_blksize_get, 2215 .set = prop_cboz_blksize_set, 2216 }; 2217 2218 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2219 void *opaque, Error **errp) 2220 { 2221 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2222 RISCVCPU *cpu = RISCV_CPU(obj); 2223 uint32_t prev_val = cpu->cfg.mvendorid; 2224 uint32_t value; 2225 2226 if (!visit_type_uint32(v, name, &value, errp)) { 2227 return; 2228 } 2229 2230 if (!dynamic_cpu && prev_val != value) { 2231 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2232 object_get_typename(obj), prev_val); 2233 return; 2234 } 2235 2236 cpu->cfg.mvendorid = value; 2237 } 2238 2239 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2240 void *opaque, Error **errp) 2241 { 2242 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2243 2244 visit_type_uint32(v, name, &value, errp); 2245 } 2246 2247 static const PropertyInfo prop_mvendorid = { 2248 .name = "mvendorid", 2249 .get = prop_mvendorid_get, 2250 .set = prop_mvendorid_set, 2251 }; 2252 2253 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2254 void *opaque, Error **errp) 2255 { 2256 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2257 RISCVCPU *cpu = RISCV_CPU(obj); 2258 uint64_t prev_val = cpu->cfg.mimpid; 2259 uint64_t value; 2260 2261 if (!visit_type_uint64(v, name, &value, errp)) { 2262 return; 2263 } 2264 2265 if (!dynamic_cpu && prev_val != value) { 2266 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2267 object_get_typename(obj), prev_val); 2268 return; 2269 } 2270 2271 cpu->cfg.mimpid = value; 2272 } 2273 2274 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2275 void *opaque, Error **errp) 2276 { 2277 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2278 2279 visit_type_uint64(v, name, &value, errp); 2280 } 2281 2282 static const PropertyInfo prop_mimpid = { 2283 .name = "mimpid", 2284 .get = prop_mimpid_get, 2285 .set = prop_mimpid_set, 2286 }; 2287 2288 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2289 void *opaque, Error **errp) 2290 { 2291 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2292 RISCVCPU *cpu = RISCV_CPU(obj); 2293 uint64_t prev_val = cpu->cfg.marchid; 2294 uint64_t value, invalid_val; 2295 uint32_t mxlen = 0; 2296 2297 if (!visit_type_uint64(v, name, &value, errp)) { 2298 return; 2299 } 2300 2301 if (!dynamic_cpu && prev_val != value) { 2302 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2303 object_get_typename(obj), prev_val); 2304 return; 2305 } 2306 2307 switch (riscv_cpu_mxl(&cpu->env)) { 2308 case MXL_RV32: 2309 mxlen = 32; 2310 break; 2311 case MXL_RV64: 2312 case MXL_RV128: 2313 mxlen = 64; 2314 break; 2315 default: 2316 g_assert_not_reached(); 2317 } 2318 2319 invalid_val = 1LL << (mxlen - 1); 2320 2321 if (value == invalid_val) { 2322 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2323 "and the remaining bits zero", mxlen); 2324 return; 2325 } 2326 2327 cpu->cfg.marchid = value; 2328 } 2329 2330 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2331 void *opaque, Error **errp) 2332 { 2333 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2334 2335 visit_type_uint64(v, name, &value, errp); 2336 } 2337 2338 static const PropertyInfo prop_marchid = { 2339 .name = "marchid", 2340 .get = prop_marchid_get, 2341 .set = prop_marchid_set, 2342 }; 2343 2344 /* 2345 * RVA22U64 defines some 'named features' that are cache 2346 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2347 * and Zicclsm. They are always implemented in TCG and 2348 * doesn't need to be manually enabled by the profile. 2349 */ 2350 static RISCVCPUProfile RVA22U64 = { 2351 .u_parent = NULL, 2352 .s_parent = NULL, 2353 .name = "rva22u64", 2354 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2355 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2356 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2357 .ext_offsets = { 2358 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2359 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2360 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2361 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2362 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2363 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2364 2365 /* mandatory named features for this profile */ 2366 CPU_CFG_OFFSET(ext_zic64b), 2367 2368 RISCV_PROFILE_EXT_LIST_END 2369 } 2370 }; 2371 2372 /* 2373 * As with RVA22U64, RVA22S64 also defines 'named features'. 2374 * 2375 * Cache related features that we consider enabled since we don't 2376 * implement cache: Ssccptr 2377 * 2378 * Other named features that we already implement: Sstvecd, Sstvala, 2379 * Sscounterenw 2380 * 2381 * The remaining features/extensions comes from RVA22U64. 2382 */ 2383 static RISCVCPUProfile RVA22S64 = { 2384 .u_parent = &RVA22U64, 2385 .s_parent = NULL, 2386 .name = "rva22s64", 2387 .misa_ext = RVS, 2388 .priv_spec = PRIV_VERSION_1_12_0, 2389 .satp_mode = VM_1_10_SV39, 2390 .ext_offsets = { 2391 /* rva22s64 exts */ 2392 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2393 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2394 2395 RISCV_PROFILE_EXT_LIST_END 2396 } 2397 }; 2398 2399 /* 2400 * All mandatory extensions from RVA22U64 are present 2401 * in RVA23U64 so set RVA22 as a parent. We need to 2402 * declare just the newly added mandatory extensions. 2403 */ 2404 static RISCVCPUProfile RVA23U64 = { 2405 .u_parent = &RVA22U64, 2406 .s_parent = NULL, 2407 .name = "rva23u64", 2408 .misa_ext = RVV, 2409 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2410 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2411 .ext_offsets = { 2412 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2413 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2414 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2415 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2416 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2417 CPU_CFG_OFFSET(ext_supm), 2418 2419 RISCV_PROFILE_EXT_LIST_END 2420 } 2421 }; 2422 2423 /* 2424 * As with RVA23U64, RVA23S64 also defines 'named features'. 2425 * 2426 * Cache related features that we consider enabled since we don't 2427 * implement cache: Ssccptr 2428 * 2429 * Other named features that we already implement: Sstvecd, Sstvala, 2430 * Sscounterenw, Ssu64xl 2431 * 2432 * The remaining features/extensions comes from RVA23S64. 2433 */ 2434 static RISCVCPUProfile RVA23S64 = { 2435 .u_parent = &RVA23U64, 2436 .s_parent = &RVA22S64, 2437 .name = "rva23s64", 2438 .misa_ext = RVS, 2439 .priv_spec = PRIV_VERSION_1_13_0, 2440 .satp_mode = VM_1_10_SV39, 2441 .ext_offsets = { 2442 /* New in RVA23S64 */ 2443 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2444 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2445 2446 /* Named features: Sha */ 2447 CPU_CFG_OFFSET(ext_sha), 2448 2449 RISCV_PROFILE_EXT_LIST_END 2450 } 2451 }; 2452 2453 RISCVCPUProfile *riscv_profiles[] = { 2454 &RVA22U64, 2455 &RVA22S64, 2456 &RVA23U64, 2457 &RVA23S64, 2458 NULL, 2459 }; 2460 2461 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2462 .is_misa = true, 2463 .ext = RVA, 2464 .implied_multi_exts = { 2465 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2466 2467 RISCV_IMPLIED_EXTS_RULE_END 2468 }, 2469 }; 2470 2471 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2472 .is_misa = true, 2473 .ext = RVD, 2474 .implied_misa_exts = RVF, 2475 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2476 }; 2477 2478 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2479 .is_misa = true, 2480 .ext = RVF, 2481 .implied_multi_exts = { 2482 CPU_CFG_OFFSET(ext_zicsr), 2483 2484 RISCV_IMPLIED_EXTS_RULE_END 2485 }, 2486 }; 2487 2488 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2489 .is_misa = true, 2490 .ext = RVM, 2491 .implied_multi_exts = { 2492 CPU_CFG_OFFSET(ext_zmmul), 2493 2494 RISCV_IMPLIED_EXTS_RULE_END 2495 }, 2496 }; 2497 2498 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2499 .is_misa = true, 2500 .ext = RVV, 2501 .implied_multi_exts = { 2502 CPU_CFG_OFFSET(ext_zve64d), 2503 2504 RISCV_IMPLIED_EXTS_RULE_END 2505 }, 2506 }; 2507 2508 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2509 .ext = CPU_CFG_OFFSET(ext_zcb), 2510 .implied_multi_exts = { 2511 CPU_CFG_OFFSET(ext_zca), 2512 2513 RISCV_IMPLIED_EXTS_RULE_END 2514 }, 2515 }; 2516 2517 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2518 .ext = CPU_CFG_OFFSET(ext_zcd), 2519 .implied_misa_exts = RVD, 2520 .implied_multi_exts = { 2521 CPU_CFG_OFFSET(ext_zca), 2522 2523 RISCV_IMPLIED_EXTS_RULE_END 2524 }, 2525 }; 2526 2527 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2528 .ext = CPU_CFG_OFFSET(ext_zce), 2529 .implied_multi_exts = { 2530 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2531 CPU_CFG_OFFSET(ext_zcmt), 2532 2533 RISCV_IMPLIED_EXTS_RULE_END 2534 }, 2535 }; 2536 2537 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2538 .ext = CPU_CFG_OFFSET(ext_zcf), 2539 .implied_misa_exts = RVF, 2540 .implied_multi_exts = { 2541 CPU_CFG_OFFSET(ext_zca), 2542 2543 RISCV_IMPLIED_EXTS_RULE_END 2544 }, 2545 }; 2546 2547 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2548 .ext = CPU_CFG_OFFSET(ext_zcmp), 2549 .implied_multi_exts = { 2550 CPU_CFG_OFFSET(ext_zca), 2551 2552 RISCV_IMPLIED_EXTS_RULE_END 2553 }, 2554 }; 2555 2556 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2557 .ext = CPU_CFG_OFFSET(ext_zcmt), 2558 .implied_multi_exts = { 2559 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2560 2561 RISCV_IMPLIED_EXTS_RULE_END 2562 }, 2563 }; 2564 2565 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2566 .ext = CPU_CFG_OFFSET(ext_zdinx), 2567 .implied_multi_exts = { 2568 CPU_CFG_OFFSET(ext_zfinx), 2569 2570 RISCV_IMPLIED_EXTS_RULE_END 2571 }, 2572 }; 2573 2574 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2575 .ext = CPU_CFG_OFFSET(ext_zfa), 2576 .implied_misa_exts = RVF, 2577 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2578 }; 2579 2580 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2581 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2582 .implied_misa_exts = RVF, 2583 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2584 }; 2585 2586 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2587 .ext = CPU_CFG_OFFSET(ext_zfh), 2588 .implied_multi_exts = { 2589 CPU_CFG_OFFSET(ext_zfhmin), 2590 2591 RISCV_IMPLIED_EXTS_RULE_END 2592 }, 2593 }; 2594 2595 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2596 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2597 .implied_misa_exts = RVF, 2598 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2599 }; 2600 2601 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2602 .ext = CPU_CFG_OFFSET(ext_zfinx), 2603 .implied_multi_exts = { 2604 CPU_CFG_OFFSET(ext_zicsr), 2605 2606 RISCV_IMPLIED_EXTS_RULE_END 2607 }, 2608 }; 2609 2610 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2611 .ext = CPU_CFG_OFFSET(ext_zhinx), 2612 .implied_multi_exts = { 2613 CPU_CFG_OFFSET(ext_zhinxmin), 2614 2615 RISCV_IMPLIED_EXTS_RULE_END 2616 }, 2617 }; 2618 2619 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2620 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2621 .implied_multi_exts = { 2622 CPU_CFG_OFFSET(ext_zfinx), 2623 2624 RISCV_IMPLIED_EXTS_RULE_END 2625 }, 2626 }; 2627 2628 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2629 .ext = CPU_CFG_OFFSET(ext_zicntr), 2630 .implied_multi_exts = { 2631 CPU_CFG_OFFSET(ext_zicsr), 2632 2633 RISCV_IMPLIED_EXTS_RULE_END 2634 }, 2635 }; 2636 2637 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2638 .ext = CPU_CFG_OFFSET(ext_zihpm), 2639 .implied_multi_exts = { 2640 CPU_CFG_OFFSET(ext_zicsr), 2641 2642 RISCV_IMPLIED_EXTS_RULE_END 2643 }, 2644 }; 2645 2646 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2647 .ext = CPU_CFG_OFFSET(ext_zk), 2648 .implied_multi_exts = { 2649 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2650 CPU_CFG_OFFSET(ext_zkt), 2651 2652 RISCV_IMPLIED_EXTS_RULE_END 2653 }, 2654 }; 2655 2656 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2657 .ext = CPU_CFG_OFFSET(ext_zkn), 2658 .implied_multi_exts = { 2659 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2660 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2661 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2662 2663 RISCV_IMPLIED_EXTS_RULE_END 2664 }, 2665 }; 2666 2667 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2668 .ext = CPU_CFG_OFFSET(ext_zks), 2669 .implied_multi_exts = { 2670 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2671 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2672 CPU_CFG_OFFSET(ext_zksh), 2673 2674 RISCV_IMPLIED_EXTS_RULE_END 2675 }, 2676 }; 2677 2678 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2679 .ext = CPU_CFG_OFFSET(ext_zvbb), 2680 .implied_multi_exts = { 2681 CPU_CFG_OFFSET(ext_zvkb), 2682 2683 RISCV_IMPLIED_EXTS_RULE_END 2684 }, 2685 }; 2686 2687 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2688 .ext = CPU_CFG_OFFSET(ext_zve32f), 2689 .implied_misa_exts = RVF, 2690 .implied_multi_exts = { 2691 CPU_CFG_OFFSET(ext_zve32x), 2692 2693 RISCV_IMPLIED_EXTS_RULE_END 2694 }, 2695 }; 2696 2697 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2698 .ext = CPU_CFG_OFFSET(ext_zve32x), 2699 .implied_multi_exts = { 2700 CPU_CFG_OFFSET(ext_zicsr), 2701 2702 RISCV_IMPLIED_EXTS_RULE_END 2703 }, 2704 }; 2705 2706 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2707 .ext = CPU_CFG_OFFSET(ext_zve64d), 2708 .implied_misa_exts = RVD, 2709 .implied_multi_exts = { 2710 CPU_CFG_OFFSET(ext_zve64f), 2711 2712 RISCV_IMPLIED_EXTS_RULE_END 2713 }, 2714 }; 2715 2716 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2717 .ext = CPU_CFG_OFFSET(ext_zve64f), 2718 .implied_misa_exts = RVF, 2719 .implied_multi_exts = { 2720 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2721 2722 RISCV_IMPLIED_EXTS_RULE_END 2723 }, 2724 }; 2725 2726 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2727 .ext = CPU_CFG_OFFSET(ext_zve64x), 2728 .implied_multi_exts = { 2729 CPU_CFG_OFFSET(ext_zve32x), 2730 2731 RISCV_IMPLIED_EXTS_RULE_END 2732 }, 2733 }; 2734 2735 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2736 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2737 .implied_multi_exts = { 2738 CPU_CFG_OFFSET(ext_zve32f), 2739 2740 RISCV_IMPLIED_EXTS_RULE_END 2741 }, 2742 }; 2743 2744 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2745 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2746 .implied_multi_exts = { 2747 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2748 2749 RISCV_IMPLIED_EXTS_RULE_END 2750 }, 2751 }; 2752 2753 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2754 .ext = CPU_CFG_OFFSET(ext_zvfh), 2755 .implied_multi_exts = { 2756 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2757 2758 RISCV_IMPLIED_EXTS_RULE_END 2759 }, 2760 }; 2761 2762 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2763 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2764 .implied_multi_exts = { 2765 CPU_CFG_OFFSET(ext_zve32f), 2766 2767 RISCV_IMPLIED_EXTS_RULE_END 2768 }, 2769 }; 2770 2771 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2772 .ext = CPU_CFG_OFFSET(ext_zvkn), 2773 .implied_multi_exts = { 2774 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2775 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2776 2777 RISCV_IMPLIED_EXTS_RULE_END 2778 }, 2779 }; 2780 2781 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2782 .ext = CPU_CFG_OFFSET(ext_zvknc), 2783 .implied_multi_exts = { 2784 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2785 2786 RISCV_IMPLIED_EXTS_RULE_END 2787 }, 2788 }; 2789 2790 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2791 .ext = CPU_CFG_OFFSET(ext_zvkng), 2792 .implied_multi_exts = { 2793 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2794 2795 RISCV_IMPLIED_EXTS_RULE_END 2796 }, 2797 }; 2798 2799 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2800 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2801 .implied_multi_exts = { 2802 CPU_CFG_OFFSET(ext_zve64x), 2803 2804 RISCV_IMPLIED_EXTS_RULE_END 2805 }, 2806 }; 2807 2808 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2809 .ext = CPU_CFG_OFFSET(ext_zvks), 2810 .implied_multi_exts = { 2811 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2812 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2813 2814 RISCV_IMPLIED_EXTS_RULE_END 2815 }, 2816 }; 2817 2818 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2819 .ext = CPU_CFG_OFFSET(ext_zvksc), 2820 .implied_multi_exts = { 2821 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2822 2823 RISCV_IMPLIED_EXTS_RULE_END 2824 }, 2825 }; 2826 2827 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2828 .ext = CPU_CFG_OFFSET(ext_zvksg), 2829 .implied_multi_exts = { 2830 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2831 2832 RISCV_IMPLIED_EXTS_RULE_END 2833 }, 2834 }; 2835 2836 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2837 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2838 .implied_multi_exts = { 2839 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2840 CPU_CFG_OFFSET(ext_smcdeleg), 2841 2842 RISCV_IMPLIED_EXTS_RULE_END 2843 }, 2844 }; 2845 2846 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2847 .ext = CPU_CFG_OFFSET(ext_supm), 2848 .implied_multi_exts = { 2849 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2850 2851 RISCV_IMPLIED_EXTS_RULE_END 2852 }, 2853 }; 2854 2855 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2856 .ext = CPU_CFG_OFFSET(ext_sspm), 2857 .implied_multi_exts = { 2858 CPU_CFG_OFFSET(ext_smnpm), 2859 2860 RISCV_IMPLIED_EXTS_RULE_END 2861 }, 2862 }; 2863 2864 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { 2865 .ext = CPU_CFG_OFFSET(ext_smctr), 2866 .implied_misa_exts = RVS, 2867 .implied_multi_exts = { 2868 CPU_CFG_OFFSET(ext_sscsrind), 2869 2870 RISCV_IMPLIED_EXTS_RULE_END 2871 }, 2872 }; 2873 2874 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { 2875 .ext = CPU_CFG_OFFSET(ext_ssctr), 2876 .implied_misa_exts = RVS, 2877 .implied_multi_exts = { 2878 CPU_CFG_OFFSET(ext_sscsrind), 2879 2880 RISCV_IMPLIED_EXTS_RULE_END 2881 }, 2882 }; 2883 2884 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2885 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2886 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2887 }; 2888 2889 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2890 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2891 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2892 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2893 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2894 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2895 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2896 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2897 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2898 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2899 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2900 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2901 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2902 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, 2903 NULL 2904 }; 2905 2906 static const Property riscv_cpu_properties[] = { 2907 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2908 2909 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2910 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2911 2912 {.name = "mmu", .info = &prop_mmu}, 2913 {.name = "pmp", .info = &prop_pmp}, 2914 2915 {.name = "priv_spec", .info = &prop_priv_spec}, 2916 {.name = "vext_spec", .info = &prop_vext_spec}, 2917 2918 {.name = "vlen", .info = &prop_vlen}, 2919 {.name = "elen", .info = &prop_elen}, 2920 2921 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2922 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2923 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2924 2925 {.name = "mvendorid", .info = &prop_mvendorid}, 2926 {.name = "mimpid", .info = &prop_mimpid}, 2927 {.name = "marchid", .info = &prop_marchid}, 2928 2929 #ifndef CONFIG_USER_ONLY 2930 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2931 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2932 DEFAULT_RNMI_IRQVEC), 2933 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2934 DEFAULT_RNMI_EXCPVEC), 2935 #endif 2936 2937 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2938 2939 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2940 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2941 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2942 2943 /* 2944 * write_misa() is marked as experimental for now so mark 2945 * it with -x and default to 'false'. 2946 */ 2947 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2948 }; 2949 2950 #if defined(TARGET_RISCV64) 2951 static void rva22u64_profile_cpu_init(Object *obj) 2952 { 2953 rv64i_bare_cpu_init(obj); 2954 2955 RVA22U64.enabled = true; 2956 } 2957 2958 static void rva22s64_profile_cpu_init(Object *obj) 2959 { 2960 rv64i_bare_cpu_init(obj); 2961 2962 RVA22S64.enabled = true; 2963 } 2964 2965 static void rva23u64_profile_cpu_init(Object *obj) 2966 { 2967 rv64i_bare_cpu_init(obj); 2968 2969 RVA23U64.enabled = true; 2970 } 2971 2972 static void rva23s64_profile_cpu_init(Object *obj) 2973 { 2974 rv64i_bare_cpu_init(obj); 2975 2976 RVA23S64.enabled = true; 2977 } 2978 #endif 2979 2980 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2981 { 2982 RISCVCPU *cpu = RISCV_CPU(cs); 2983 CPURISCVState *env = &cpu->env; 2984 2985 switch (riscv_cpu_mxl(env)) { 2986 case MXL_RV32: 2987 return "riscv:rv32"; 2988 case MXL_RV64: 2989 case MXL_RV128: 2990 return "riscv:rv64"; 2991 default: 2992 g_assert_not_reached(); 2993 } 2994 } 2995 2996 #ifndef CONFIG_USER_ONLY 2997 static int64_t riscv_get_arch_id(CPUState *cs) 2998 { 2999 RISCVCPU *cpu = RISCV_CPU(cs); 3000 3001 return cpu->env.mhartid; 3002 } 3003 3004 #include "hw/core/sysemu-cpu-ops.h" 3005 3006 static const struct SysemuCPUOps riscv_sysemu_ops = { 3007 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 3008 .write_elf64_note = riscv_cpu_write_elf64_note, 3009 .write_elf32_note = riscv_cpu_write_elf32_note, 3010 .legacy_vmsd = &vmstate_riscv_cpu, 3011 }; 3012 #endif 3013 3014 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 3015 { 3016 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3017 CPUClass *cc = CPU_CLASS(c); 3018 DeviceClass *dc = DEVICE_CLASS(c); 3019 ResettableClass *rc = RESETTABLE_CLASS(c); 3020 3021 device_class_set_parent_realize(dc, riscv_cpu_realize, 3022 &mcc->parent_realize); 3023 3024 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 3025 &mcc->parent_phases); 3026 3027 cc->class_by_name = riscv_cpu_class_by_name; 3028 cc->has_work = riscv_cpu_has_work; 3029 cc->mmu_index = riscv_cpu_mmu_index; 3030 cc->dump_state = riscv_cpu_dump_state; 3031 cc->set_pc = riscv_cpu_set_pc; 3032 cc->get_pc = riscv_cpu_get_pc; 3033 cc->gdb_read_register = riscv_cpu_gdb_read_register; 3034 cc->gdb_write_register = riscv_cpu_gdb_write_register; 3035 cc->gdb_stop_before_watchpoint = true; 3036 cc->disas_set_info = riscv_cpu_disas_set_info; 3037 #ifndef CONFIG_USER_ONLY 3038 cc->sysemu_ops = &riscv_sysemu_ops; 3039 cc->get_arch_id = riscv_get_arch_id; 3040 #endif 3041 cc->gdb_arch_name = riscv_gdb_arch_name; 3042 3043 device_class_set_props(dc, riscv_cpu_properties); 3044 } 3045 3046 static void riscv_cpu_class_init(ObjectClass *c, void *data) 3047 { 3048 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3049 3050 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 3051 riscv_cpu_validate_misa_mxl(mcc); 3052 } 3053 3054 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 3055 int max_str_len) 3056 { 3057 const RISCVIsaExtData *edata; 3058 char *old = *isa_str; 3059 char *new = *isa_str; 3060 3061 for (edata = isa_edata_arr; edata && edata->name; edata++) { 3062 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3063 new = g_strconcat(old, "_", edata->name, NULL); 3064 g_free(old); 3065 old = new; 3066 } 3067 } 3068 3069 *isa_str = new; 3070 } 3071 3072 char *riscv_isa_string(RISCVCPU *cpu) 3073 { 3074 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3075 int i; 3076 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 3077 char *isa_str = g_new(char, maxlen); 3078 int xlen = riscv_cpu_max_xlen(mcc); 3079 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 3080 3081 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3082 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3083 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 3084 } 3085 } 3086 *p = '\0'; 3087 if (!cpu->cfg.short_isa_string) { 3088 riscv_isa_string_ext(cpu, &isa_str, maxlen); 3089 } 3090 return isa_str; 3091 } 3092 3093 #ifndef CONFIG_USER_ONLY 3094 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3095 { 3096 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3097 char **extensions = g_new(char *, maxlen); 3098 3099 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3100 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3101 extensions[*count] = g_new(char, 2); 3102 snprintf(extensions[*count], 2, "%c", 3103 qemu_tolower(riscv_single_letter_exts[i])); 3104 (*count)++; 3105 } 3106 } 3107 3108 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3109 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3110 extensions[*count] = g_strdup(edata->name); 3111 (*count)++; 3112 } 3113 } 3114 3115 return extensions; 3116 } 3117 3118 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3119 { 3120 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3121 const size_t maxlen = sizeof("rv128i"); 3122 g_autofree char *isa_base = g_new(char, maxlen); 3123 g_autofree char *riscv_isa; 3124 char **isa_extensions; 3125 int count = 0; 3126 int xlen = riscv_cpu_max_xlen(mcc); 3127 3128 riscv_isa = riscv_isa_string(cpu); 3129 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3130 3131 snprintf(isa_base, maxlen, "rv%di", xlen); 3132 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3133 3134 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3135 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3136 isa_extensions, count); 3137 3138 for (int i = 0; i < count; i++) { 3139 g_free(isa_extensions[i]); 3140 } 3141 3142 g_free(isa_extensions); 3143 } 3144 #endif 3145 3146 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3147 { \ 3148 .name = (type_name), \ 3149 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3150 .instance_init = (initfn), \ 3151 .class_init = riscv_cpu_class_init, \ 3152 .class_data = (void *)(misa_mxl_max) \ 3153 } 3154 3155 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3156 { \ 3157 .name = (type_name), \ 3158 .parent = TYPE_RISCV_VENDOR_CPU, \ 3159 .instance_init = (initfn), \ 3160 .class_init = riscv_cpu_class_init, \ 3161 .class_data = (void *)(misa_mxl_max) \ 3162 } 3163 3164 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3165 { \ 3166 .name = (type_name), \ 3167 .parent = TYPE_RISCV_BARE_CPU, \ 3168 .instance_init = (initfn), \ 3169 .class_init = riscv_cpu_class_init, \ 3170 .class_data = (void *)(misa_mxl_max) \ 3171 } 3172 3173 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3174 { \ 3175 .name = (type_name), \ 3176 .parent = TYPE_RISCV_BARE_CPU, \ 3177 .instance_init = (initfn), \ 3178 .class_init = riscv_cpu_class_init, \ 3179 .class_data = (void *)(misa_mxl_max) \ 3180 } 3181 3182 static const TypeInfo riscv_cpu_type_infos[] = { 3183 { 3184 .name = TYPE_RISCV_CPU, 3185 .parent = TYPE_CPU, 3186 .instance_size = sizeof(RISCVCPU), 3187 .instance_align = __alignof(RISCVCPU), 3188 .instance_init = riscv_cpu_init, 3189 .instance_post_init = riscv_cpu_post_init, 3190 .abstract = true, 3191 .class_size = sizeof(RISCVCPUClass), 3192 .class_init = riscv_cpu_common_class_init, 3193 }, 3194 { 3195 .name = TYPE_RISCV_DYNAMIC_CPU, 3196 .parent = TYPE_RISCV_CPU, 3197 .abstract = true, 3198 }, 3199 { 3200 .name = TYPE_RISCV_VENDOR_CPU, 3201 .parent = TYPE_RISCV_CPU, 3202 .abstract = true, 3203 }, 3204 { 3205 .name = TYPE_RISCV_BARE_CPU, 3206 .parent = TYPE_RISCV_CPU, 3207 .instance_init = riscv_bare_cpu_init, 3208 .abstract = true, 3209 }, 3210 #if defined(TARGET_RISCV32) 3211 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3212 #elif defined(TARGET_RISCV64) 3213 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3214 #endif 3215 3216 #if defined(TARGET_RISCV32) || \ 3217 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3218 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3219 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3220 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3221 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3222 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3223 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3224 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3225 #endif 3226 3227 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3228 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3229 #endif 3230 3231 #if defined(TARGET_RISCV64) 3232 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3233 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3234 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3235 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3236 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3237 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3238 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3239 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3240 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3241 #ifdef CONFIG_TCG 3242 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3243 #endif /* CONFIG_TCG */ 3244 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3245 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3246 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3247 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3248 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), 3249 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), 3250 #endif /* TARGET_RISCV64 */ 3251 }; 3252 3253 DEFINE_TYPES(riscv_cpu_type_infos) 3254