1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "sysemu/kvm.h" 35 #include "sysemu/tcg.h" 36 #include "kvm/kvm_riscv.h" 37 #include "tcg/tcg.h" 38 39 /* RISC-V CPU definitions */ 40 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 41 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 42 RVC, RVS, RVU, RVH, RVJ, RVG, 0}; 43 44 /* 45 * From vector_helper.c 46 * Note that vector data is stored in host-endian 64-bit chunks, 47 * so addressing bytes needs a host-endian fixup. 48 */ 49 #if HOST_BIG_ENDIAN 50 #define BYTE(x) ((x) ^ 7) 51 #else 52 #define BYTE(x) (x) 53 #endif 54 55 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 56 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 57 58 /* 59 * Here are the ordering rules of extension naming defined by RISC-V 60 * specification : 61 * 1. All extensions should be separated from other multi-letter extensions 62 * by an underscore. 63 * 2. The first letter following the 'Z' conventionally indicates the most 64 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 65 * If multiple 'Z' extensions are named, they should be ordered first 66 * by category, then alphabetically within a category. 67 * 3. Standard supervisor-level extensions (starts with 'S') should be 68 * listed after standard unprivileged extensions. If multiple 69 * supervisor-level extensions are listed, they should be ordered 70 * alphabetically. 71 * 4. Non-standard extensions (starts with 'X') must be listed after all 72 * standard extensions. They must be separated from other multi-letter 73 * extensions by an underscore. 74 * 75 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 76 * instead. 77 */ 78 const RISCVIsaExtData isa_edata_arr[] = { 79 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 80 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 81 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 82 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 83 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 84 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 85 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 86 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 87 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 88 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 89 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 90 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 91 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 92 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 93 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 94 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 95 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 96 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 97 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 98 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 99 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 100 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 101 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 102 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 103 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 104 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 105 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 106 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 107 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 108 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 109 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 110 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 111 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 112 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 113 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 114 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 115 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 116 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 117 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 118 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 119 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 120 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 121 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 122 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 123 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 124 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 125 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 126 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 127 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 128 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 129 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 130 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 131 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 132 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 133 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 134 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 135 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 136 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp), 137 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 138 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 139 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 140 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 141 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 142 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 143 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 144 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 145 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 146 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 147 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 148 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 149 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 150 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 151 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 152 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 153 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 154 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 155 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 156 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 157 158 DEFINE_PROP_END_OF_LIST(), 159 }; 160 161 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 162 { 163 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 164 165 return *ext_enabled; 166 } 167 168 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 169 { 170 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 171 172 *ext_enabled = en; 173 } 174 175 const char * const riscv_int_regnames[] = { 176 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 177 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 178 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 179 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 180 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 181 }; 182 183 const char * const riscv_int_regnamesh[] = { 184 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 185 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 186 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 187 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 188 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 189 "x30h/t5h", "x31h/t6h" 190 }; 191 192 const char * const riscv_fpr_regnames[] = { 193 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 194 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 195 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 196 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 197 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 198 "f30/ft10", "f31/ft11" 199 }; 200 201 const char * const riscv_rvv_regnames[] = { 202 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 203 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 204 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 205 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 206 "v28", "v29", "v30", "v31" 207 }; 208 209 static const char * const riscv_excp_names[] = { 210 "misaligned_fetch", 211 "fault_fetch", 212 "illegal_instruction", 213 "breakpoint", 214 "misaligned_load", 215 "fault_load", 216 "misaligned_store", 217 "fault_store", 218 "user_ecall", 219 "supervisor_ecall", 220 "hypervisor_ecall", 221 "machine_ecall", 222 "exec_page_fault", 223 "load_page_fault", 224 "reserved", 225 "store_page_fault", 226 "reserved", 227 "reserved", 228 "reserved", 229 "reserved", 230 "guest_exec_page_fault", 231 "guest_load_page_fault", 232 "reserved", 233 "guest_store_page_fault", 234 }; 235 236 static const char * const riscv_intr_names[] = { 237 "u_software", 238 "s_software", 239 "vs_software", 240 "m_software", 241 "u_timer", 242 "s_timer", 243 "vs_timer", 244 "m_timer", 245 "u_external", 246 "s_external", 247 "vs_external", 248 "m_external", 249 "reserved", 250 "reserved", 251 "reserved", 252 "reserved" 253 }; 254 255 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 256 { 257 if (async) { 258 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 259 riscv_intr_names[cause] : "(unknown)"; 260 } else { 261 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 262 riscv_excp_names[cause] : "(unknown)"; 263 } 264 } 265 266 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 267 { 268 env->misa_mxl_max = env->misa_mxl = mxl; 269 env->misa_ext_mask = env->misa_ext = ext; 270 } 271 272 #ifndef CONFIG_USER_ONLY 273 static uint8_t satp_mode_from_str(const char *satp_mode_str) 274 { 275 if (!strncmp(satp_mode_str, "mbare", 5)) { 276 return VM_1_10_MBARE; 277 } 278 279 if (!strncmp(satp_mode_str, "sv32", 4)) { 280 return VM_1_10_SV32; 281 } 282 283 if (!strncmp(satp_mode_str, "sv39", 4)) { 284 return VM_1_10_SV39; 285 } 286 287 if (!strncmp(satp_mode_str, "sv48", 4)) { 288 return VM_1_10_SV48; 289 } 290 291 if (!strncmp(satp_mode_str, "sv57", 4)) { 292 return VM_1_10_SV57; 293 } 294 295 if (!strncmp(satp_mode_str, "sv64", 4)) { 296 return VM_1_10_SV64; 297 } 298 299 g_assert_not_reached(); 300 } 301 302 uint8_t satp_mode_max_from_map(uint32_t map) 303 { 304 /* 305 * 'map = 0' will make us return (31 - 32), which C will 306 * happily overflow to UINT_MAX. There's no good result to 307 * return if 'map = 0' (e.g. returning 0 will be ambiguous 308 * with the result for 'map = 1'). 309 * 310 * Assert out if map = 0. Callers will have to deal with 311 * it outside of this function. 312 */ 313 g_assert(map > 0); 314 315 /* map here has at least one bit set, so no problem with clz */ 316 return 31 - __builtin_clz(map); 317 } 318 319 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 320 { 321 if (is_32_bit) { 322 switch (satp_mode) { 323 case VM_1_10_SV32: 324 return "sv32"; 325 case VM_1_10_MBARE: 326 return "none"; 327 } 328 } else { 329 switch (satp_mode) { 330 case VM_1_10_SV64: 331 return "sv64"; 332 case VM_1_10_SV57: 333 return "sv57"; 334 case VM_1_10_SV48: 335 return "sv48"; 336 case VM_1_10_SV39: 337 return "sv39"; 338 case VM_1_10_MBARE: 339 return "none"; 340 } 341 } 342 343 g_assert_not_reached(); 344 } 345 346 static void set_satp_mode_max_supported(RISCVCPU *cpu, 347 uint8_t satp_mode) 348 { 349 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 350 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 351 352 for (int i = 0; i <= satp_mode; ++i) { 353 if (valid_vm[i]) { 354 cpu->cfg.satp_mode.supported |= (1 << i); 355 } 356 } 357 } 358 359 /* Set the satp mode to the max supported */ 360 static void set_satp_mode_default_map(RISCVCPU *cpu) 361 { 362 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 363 } 364 #endif 365 366 static void riscv_any_cpu_init(Object *obj) 367 { 368 RISCVCPU *cpu = RISCV_CPU(obj); 369 CPURISCVState *env = &cpu->env; 370 #if defined(TARGET_RISCV32) 371 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 372 #elif defined(TARGET_RISCV64) 373 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 374 #endif 375 376 #ifndef CONFIG_USER_ONLY 377 set_satp_mode_max_supported(RISCV_CPU(obj), 378 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 379 VM_1_10_SV32 : VM_1_10_SV57); 380 #endif 381 382 env->priv_ver = PRIV_VERSION_LATEST; 383 384 /* inherited from parent obj via riscv_cpu_init() */ 385 cpu->cfg.ext_zifencei = true; 386 cpu->cfg.ext_zicsr = true; 387 cpu->cfg.mmu = true; 388 cpu->cfg.pmp = true; 389 } 390 391 static void riscv_max_cpu_init(Object *obj) 392 { 393 RISCVCPU *cpu = RISCV_CPU(obj); 394 CPURISCVState *env = &cpu->env; 395 RISCVMXL mlx = MXL_RV64; 396 397 #ifdef TARGET_RISCV32 398 mlx = MXL_RV32; 399 #endif 400 riscv_cpu_set_misa(env, mlx, 0); 401 env->priv_ver = PRIV_VERSION_LATEST; 402 #ifndef CONFIG_USER_ONLY 403 set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ? 404 VM_1_10_SV32 : VM_1_10_SV57); 405 #endif 406 } 407 408 #if defined(TARGET_RISCV64) 409 static void rv64_base_cpu_init(Object *obj) 410 { 411 CPURISCVState *env = &RISCV_CPU(obj)->env; 412 /* We set this in the realise function */ 413 riscv_cpu_set_misa(env, MXL_RV64, 0); 414 /* Set latest version of privileged specification */ 415 env->priv_ver = PRIV_VERSION_LATEST; 416 #ifndef CONFIG_USER_ONLY 417 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 418 #endif 419 } 420 421 static void rv64_sifive_u_cpu_init(Object *obj) 422 { 423 RISCVCPU *cpu = RISCV_CPU(obj); 424 CPURISCVState *env = &cpu->env; 425 riscv_cpu_set_misa(env, MXL_RV64, 426 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 427 env->priv_ver = PRIV_VERSION_1_10_0; 428 #ifndef CONFIG_USER_ONLY 429 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 430 #endif 431 432 /* inherited from parent obj via riscv_cpu_init() */ 433 cpu->cfg.ext_zifencei = true; 434 cpu->cfg.ext_zicsr = true; 435 cpu->cfg.mmu = true; 436 cpu->cfg.pmp = true; 437 } 438 439 static void rv64_sifive_e_cpu_init(Object *obj) 440 { 441 CPURISCVState *env = &RISCV_CPU(obj)->env; 442 RISCVCPU *cpu = RISCV_CPU(obj); 443 444 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 445 env->priv_ver = PRIV_VERSION_1_10_0; 446 #ifndef CONFIG_USER_ONLY 447 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 448 #endif 449 450 /* inherited from parent obj via riscv_cpu_init() */ 451 cpu->cfg.ext_zifencei = true; 452 cpu->cfg.ext_zicsr = true; 453 cpu->cfg.pmp = true; 454 } 455 456 static void rv64_thead_c906_cpu_init(Object *obj) 457 { 458 CPURISCVState *env = &RISCV_CPU(obj)->env; 459 RISCVCPU *cpu = RISCV_CPU(obj); 460 461 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 462 env->priv_ver = PRIV_VERSION_1_11_0; 463 464 cpu->cfg.ext_zfa = true; 465 cpu->cfg.ext_zfh = true; 466 cpu->cfg.mmu = true; 467 cpu->cfg.ext_xtheadba = true; 468 cpu->cfg.ext_xtheadbb = true; 469 cpu->cfg.ext_xtheadbs = true; 470 cpu->cfg.ext_xtheadcmo = true; 471 cpu->cfg.ext_xtheadcondmov = true; 472 cpu->cfg.ext_xtheadfmemidx = true; 473 cpu->cfg.ext_xtheadmac = true; 474 cpu->cfg.ext_xtheadmemidx = true; 475 cpu->cfg.ext_xtheadmempair = true; 476 cpu->cfg.ext_xtheadsync = true; 477 478 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 479 #ifndef CONFIG_USER_ONLY 480 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 481 #endif 482 483 /* inherited from parent obj via riscv_cpu_init() */ 484 cpu->cfg.pmp = true; 485 } 486 487 static void rv64_veyron_v1_cpu_init(Object *obj) 488 { 489 CPURISCVState *env = &RISCV_CPU(obj)->env; 490 RISCVCPU *cpu = RISCV_CPU(obj); 491 492 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 493 env->priv_ver = PRIV_VERSION_1_12_0; 494 495 /* Enable ISA extensions */ 496 cpu->cfg.mmu = true; 497 cpu->cfg.ext_zifencei = true; 498 cpu->cfg.ext_zicsr = true; 499 cpu->cfg.pmp = true; 500 cpu->cfg.ext_zicbom = true; 501 cpu->cfg.cbom_blocksize = 64; 502 cpu->cfg.cboz_blocksize = 64; 503 cpu->cfg.ext_zicboz = true; 504 cpu->cfg.ext_smaia = true; 505 cpu->cfg.ext_ssaia = true; 506 cpu->cfg.ext_sscofpmf = true; 507 cpu->cfg.ext_sstc = true; 508 cpu->cfg.ext_svinval = true; 509 cpu->cfg.ext_svnapot = true; 510 cpu->cfg.ext_svpbmt = true; 511 cpu->cfg.ext_smstateen = true; 512 cpu->cfg.ext_zba = true; 513 cpu->cfg.ext_zbb = true; 514 cpu->cfg.ext_zbc = true; 515 cpu->cfg.ext_zbs = true; 516 cpu->cfg.ext_XVentanaCondOps = true; 517 518 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 519 cpu->cfg.marchid = VEYRON_V1_MARCHID; 520 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 521 522 #ifndef CONFIG_USER_ONLY 523 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 524 #endif 525 } 526 527 static void rv128_base_cpu_init(Object *obj) 528 { 529 if (qemu_tcg_mttcg_enabled()) { 530 /* Missing 128-bit aligned atomics */ 531 error_report("128-bit RISC-V currently does not work with Multi " 532 "Threaded TCG. Please use: -accel tcg,thread=single"); 533 exit(EXIT_FAILURE); 534 } 535 CPURISCVState *env = &RISCV_CPU(obj)->env; 536 /* We set this in the realise function */ 537 riscv_cpu_set_misa(env, MXL_RV128, 0); 538 /* Set latest version of privileged specification */ 539 env->priv_ver = PRIV_VERSION_LATEST; 540 #ifndef CONFIG_USER_ONLY 541 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 542 #endif 543 } 544 #else 545 static void rv32_base_cpu_init(Object *obj) 546 { 547 CPURISCVState *env = &RISCV_CPU(obj)->env; 548 /* We set this in the realise function */ 549 riscv_cpu_set_misa(env, MXL_RV32, 0); 550 /* Set latest version of privileged specification */ 551 env->priv_ver = PRIV_VERSION_LATEST; 552 #ifndef CONFIG_USER_ONLY 553 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 554 #endif 555 } 556 557 static void rv32_sifive_u_cpu_init(Object *obj) 558 { 559 RISCVCPU *cpu = RISCV_CPU(obj); 560 CPURISCVState *env = &cpu->env; 561 riscv_cpu_set_misa(env, MXL_RV32, 562 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 563 env->priv_ver = PRIV_VERSION_1_10_0; 564 #ifndef CONFIG_USER_ONLY 565 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 566 #endif 567 568 /* inherited from parent obj via riscv_cpu_init() */ 569 cpu->cfg.ext_zifencei = true; 570 cpu->cfg.ext_zicsr = true; 571 cpu->cfg.mmu = true; 572 cpu->cfg.pmp = true; 573 } 574 575 static void rv32_sifive_e_cpu_init(Object *obj) 576 { 577 CPURISCVState *env = &RISCV_CPU(obj)->env; 578 RISCVCPU *cpu = RISCV_CPU(obj); 579 580 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 581 env->priv_ver = PRIV_VERSION_1_10_0; 582 #ifndef CONFIG_USER_ONLY 583 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 584 #endif 585 586 /* inherited from parent obj via riscv_cpu_init() */ 587 cpu->cfg.ext_zifencei = true; 588 cpu->cfg.ext_zicsr = true; 589 cpu->cfg.pmp = true; 590 } 591 592 static void rv32_ibex_cpu_init(Object *obj) 593 { 594 CPURISCVState *env = &RISCV_CPU(obj)->env; 595 RISCVCPU *cpu = RISCV_CPU(obj); 596 597 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 598 env->priv_ver = PRIV_VERSION_1_11_0; 599 #ifndef CONFIG_USER_ONLY 600 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 601 #endif 602 cpu->cfg.epmp = true; 603 604 /* inherited from parent obj via riscv_cpu_init() */ 605 cpu->cfg.ext_zifencei = true; 606 cpu->cfg.ext_zicsr = true; 607 cpu->cfg.pmp = true; 608 } 609 610 static void rv32_imafcu_nommu_cpu_init(Object *obj) 611 { 612 CPURISCVState *env = &RISCV_CPU(obj)->env; 613 RISCVCPU *cpu = RISCV_CPU(obj); 614 615 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 616 env->priv_ver = PRIV_VERSION_1_10_0; 617 #ifndef CONFIG_USER_ONLY 618 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 619 #endif 620 621 /* inherited from parent obj via riscv_cpu_init() */ 622 cpu->cfg.ext_zifencei = true; 623 cpu->cfg.ext_zicsr = true; 624 cpu->cfg.pmp = true; 625 } 626 #endif 627 628 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 629 { 630 ObjectClass *oc; 631 char *typename; 632 char **cpuname; 633 634 cpuname = g_strsplit(cpu_model, ",", 1); 635 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 636 oc = object_class_by_name(typename); 637 g_strfreev(cpuname); 638 g_free(typename); 639 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 640 object_class_is_abstract(oc)) { 641 return NULL; 642 } 643 return oc; 644 } 645 646 char *riscv_cpu_get_name(RISCVCPU *cpu) 647 { 648 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 649 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 650 651 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 652 653 return g_strndup(typename, 654 strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX)); 655 } 656 657 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 658 { 659 RISCVCPU *cpu = RISCV_CPU(cs); 660 CPURISCVState *env = &cpu->env; 661 int i, j; 662 uint8_t *p; 663 664 #if !defined(CONFIG_USER_ONLY) 665 if (riscv_has_ext(env, RVH)) { 666 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 667 } 668 #endif 669 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 670 #ifndef CONFIG_USER_ONLY 671 { 672 static const int dump_csrs[] = { 673 CSR_MHARTID, 674 CSR_MSTATUS, 675 CSR_MSTATUSH, 676 /* 677 * CSR_SSTATUS is intentionally omitted here as its value 678 * can be figured out by looking at CSR_MSTATUS 679 */ 680 CSR_HSTATUS, 681 CSR_VSSTATUS, 682 CSR_MIP, 683 CSR_MIE, 684 CSR_MIDELEG, 685 CSR_HIDELEG, 686 CSR_MEDELEG, 687 CSR_HEDELEG, 688 CSR_MTVEC, 689 CSR_STVEC, 690 CSR_VSTVEC, 691 CSR_MEPC, 692 CSR_SEPC, 693 CSR_VSEPC, 694 CSR_MCAUSE, 695 CSR_SCAUSE, 696 CSR_VSCAUSE, 697 CSR_MTVAL, 698 CSR_STVAL, 699 CSR_HTVAL, 700 CSR_MTVAL2, 701 CSR_MSCRATCH, 702 CSR_SSCRATCH, 703 CSR_SATP, 704 CSR_MMTE, 705 CSR_UPMBASE, 706 CSR_UPMMASK, 707 CSR_SPMBASE, 708 CSR_SPMMASK, 709 CSR_MPMBASE, 710 CSR_MPMMASK, 711 }; 712 713 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 714 int csrno = dump_csrs[i]; 715 target_ulong val = 0; 716 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 717 718 /* 719 * Rely on the smode, hmode, etc, predicates within csr.c 720 * to do the filtering of the registers that are present. 721 */ 722 if (res == RISCV_EXCP_NONE) { 723 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 724 csr_ops[csrno].name, val); 725 } 726 } 727 } 728 #endif 729 730 for (i = 0; i < 32; i++) { 731 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 732 riscv_int_regnames[i], env->gpr[i]); 733 if ((i & 3) == 3) { 734 qemu_fprintf(f, "\n"); 735 } 736 } 737 if (flags & CPU_DUMP_FPU) { 738 for (i = 0; i < 32; i++) { 739 qemu_fprintf(f, " %-8s %016" PRIx64, 740 riscv_fpr_regnames[i], env->fpr[i]); 741 if ((i & 3) == 3) { 742 qemu_fprintf(f, "\n"); 743 } 744 } 745 } 746 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 747 static const int dump_rvv_csrs[] = { 748 CSR_VSTART, 749 CSR_VXSAT, 750 CSR_VXRM, 751 CSR_VCSR, 752 CSR_VL, 753 CSR_VTYPE, 754 CSR_VLENB, 755 }; 756 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 757 int csrno = dump_rvv_csrs[i]; 758 target_ulong val = 0; 759 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 760 761 /* 762 * Rely on the smode, hmode, etc, predicates within csr.c 763 * to do the filtering of the registers that are present. 764 */ 765 if (res == RISCV_EXCP_NONE) { 766 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 767 csr_ops[csrno].name, val); 768 } 769 } 770 uint16_t vlenb = cpu->cfg.vlen >> 3; 771 772 for (i = 0; i < 32; i++) { 773 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 774 p = (uint8_t *)env->vreg; 775 for (j = vlenb - 1 ; j >= 0; j--) { 776 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 777 } 778 qemu_fprintf(f, "\n"); 779 } 780 } 781 } 782 783 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 784 { 785 RISCVCPU *cpu = RISCV_CPU(cs); 786 CPURISCVState *env = &cpu->env; 787 788 if (env->xl == MXL_RV32) { 789 env->pc = (int32_t)value; 790 } else { 791 env->pc = value; 792 } 793 } 794 795 static vaddr riscv_cpu_get_pc(CPUState *cs) 796 { 797 RISCVCPU *cpu = RISCV_CPU(cs); 798 CPURISCVState *env = &cpu->env; 799 800 /* Match cpu_get_tb_cpu_state. */ 801 if (env->xl == MXL_RV32) { 802 return env->pc & UINT32_MAX; 803 } 804 return env->pc; 805 } 806 807 static bool riscv_cpu_has_work(CPUState *cs) 808 { 809 #ifndef CONFIG_USER_ONLY 810 RISCVCPU *cpu = RISCV_CPU(cs); 811 CPURISCVState *env = &cpu->env; 812 /* 813 * Definition of the WFI instruction requires it to ignore the privilege 814 * mode and delegation registers, but respect individual enables 815 */ 816 return riscv_cpu_all_pending(env) != 0 || 817 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 818 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 819 #else 820 return true; 821 #endif 822 } 823 824 static void riscv_cpu_reset_hold(Object *obj) 825 { 826 #ifndef CONFIG_USER_ONLY 827 uint8_t iprio; 828 int i, irq, rdzero; 829 #endif 830 CPUState *cs = CPU(obj); 831 RISCVCPU *cpu = RISCV_CPU(cs); 832 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 833 CPURISCVState *env = &cpu->env; 834 835 if (mcc->parent_phases.hold) { 836 mcc->parent_phases.hold(obj); 837 } 838 #ifndef CONFIG_USER_ONLY 839 env->misa_mxl = env->misa_mxl_max; 840 env->priv = PRV_M; 841 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 842 if (env->misa_mxl > MXL_RV32) { 843 /* 844 * The reset status of SXL/UXL is undefined, but mstatus is WARL 845 * and we must ensure that the value after init is valid for read. 846 */ 847 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 848 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 849 if (riscv_has_ext(env, RVH)) { 850 env->vsstatus = set_field(env->vsstatus, 851 MSTATUS64_SXL, env->misa_mxl); 852 env->vsstatus = set_field(env->vsstatus, 853 MSTATUS64_UXL, env->misa_mxl); 854 env->mstatus_hs = set_field(env->mstatus_hs, 855 MSTATUS64_SXL, env->misa_mxl); 856 env->mstatus_hs = set_field(env->mstatus_hs, 857 MSTATUS64_UXL, env->misa_mxl); 858 } 859 } 860 env->mcause = 0; 861 env->miclaim = MIP_SGEIP; 862 env->pc = env->resetvec; 863 env->bins = 0; 864 env->two_stage_lookup = false; 865 866 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 867 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 868 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 869 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 870 871 /* Initialized default priorities of local interrupts. */ 872 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 873 iprio = riscv_cpu_default_priority(i); 874 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 875 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 876 env->hviprio[i] = 0; 877 } 878 i = 0; 879 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 880 if (!rdzero) { 881 env->hviprio[irq] = env->miprio[irq]; 882 } 883 i++; 884 } 885 /* mmte is supposed to have pm.current hardwired to 1 */ 886 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 887 #endif 888 env->xl = riscv_cpu_mxl(env); 889 riscv_cpu_update_mask(env); 890 cs->exception_index = RISCV_EXCP_NONE; 891 env->load_res = -1; 892 set_default_nan_mode(1, &env->fp_status); 893 894 #ifndef CONFIG_USER_ONLY 895 if (cpu->cfg.debug) { 896 riscv_trigger_reset_hold(env); 897 } 898 899 if (kvm_enabled()) { 900 kvm_riscv_reset_vcpu(cpu); 901 } 902 #endif 903 } 904 905 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 906 { 907 RISCVCPU *cpu = RISCV_CPU(s); 908 CPURISCVState *env = &cpu->env; 909 info->target_info = &cpu->cfg; 910 911 switch (env->xl) { 912 case MXL_RV32: 913 info->print_insn = print_insn_riscv32; 914 break; 915 case MXL_RV64: 916 info->print_insn = print_insn_riscv64; 917 break; 918 case MXL_RV128: 919 info->print_insn = print_insn_riscv128; 920 break; 921 default: 922 g_assert_not_reached(); 923 } 924 } 925 926 #ifndef CONFIG_USER_ONLY 927 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 928 { 929 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 930 uint8_t satp_mode_map_max, satp_mode_supported_max; 931 932 /* The CPU wants the OS to decide which satp mode to use */ 933 if (cpu->cfg.satp_mode.supported == 0) { 934 return; 935 } 936 937 satp_mode_supported_max = 938 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 939 940 if (cpu->cfg.satp_mode.map == 0) { 941 if (cpu->cfg.satp_mode.init == 0) { 942 /* If unset by the user, we fallback to the default satp mode. */ 943 set_satp_mode_default_map(cpu); 944 } else { 945 /* 946 * Find the lowest level that was disabled and then enable the 947 * first valid level below which can be found in 948 * valid_vm_1_10_32/64. 949 */ 950 for (int i = 1; i < 16; ++i) { 951 if ((cpu->cfg.satp_mode.init & (1 << i)) && 952 (cpu->cfg.satp_mode.supported & (1 << i))) { 953 for (int j = i - 1; j >= 0; --j) { 954 if (cpu->cfg.satp_mode.supported & (1 << j)) { 955 cpu->cfg.satp_mode.map |= (1 << j); 956 break; 957 } 958 } 959 break; 960 } 961 } 962 } 963 } 964 965 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 966 967 /* Make sure the user asked for a supported configuration (HW and qemu) */ 968 if (satp_mode_map_max > satp_mode_supported_max) { 969 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 970 satp_mode_str(satp_mode_map_max, rv32), 971 satp_mode_str(satp_mode_supported_max, rv32)); 972 return; 973 } 974 975 /* 976 * Make sure the user did not ask for an invalid configuration as per 977 * the specification. 978 */ 979 if (!rv32) { 980 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 981 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 982 (cpu->cfg.satp_mode.init & (1 << i)) && 983 (cpu->cfg.satp_mode.supported & (1 << i))) { 984 error_setg(errp, "cannot disable %s satp mode if %s " 985 "is enabled", satp_mode_str(i, false), 986 satp_mode_str(satp_mode_map_max, false)); 987 return; 988 } 989 } 990 } 991 992 /* Finally expand the map so that all valid modes are set */ 993 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 994 if (cpu->cfg.satp_mode.supported & (1 << i)) { 995 cpu->cfg.satp_mode.map |= (1 << i); 996 } 997 } 998 } 999 #endif 1000 1001 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1002 { 1003 #ifndef CONFIG_USER_ONLY 1004 Error *local_err = NULL; 1005 1006 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1007 if (local_err != NULL) { 1008 error_propagate(errp, local_err); 1009 return; 1010 } 1011 #endif 1012 } 1013 1014 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1015 { 1016 CPUState *cs = CPU(dev); 1017 RISCVCPU *cpu = RISCV_CPU(dev); 1018 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1019 Error *local_err = NULL; 1020 1021 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1022 warn_report("The 'any' CPU is deprecated and will be " 1023 "removed in the future."); 1024 } 1025 1026 cpu_exec_realizefn(cs, &local_err); 1027 if (local_err != NULL) { 1028 error_propagate(errp, local_err); 1029 return; 1030 } 1031 1032 riscv_cpu_finalize_features(cpu, &local_err); 1033 if (local_err != NULL) { 1034 error_propagate(errp, local_err); 1035 return; 1036 } 1037 1038 riscv_cpu_register_gdb_regs_for_features(cs); 1039 1040 #ifndef CONFIG_USER_ONLY 1041 if (cpu->cfg.debug) { 1042 riscv_trigger_realize(&cpu->env); 1043 } 1044 #endif 1045 1046 qemu_init_vcpu(cs); 1047 cpu_reset(cs); 1048 1049 mcc->parent_realize(dev, errp); 1050 } 1051 1052 #ifndef CONFIG_USER_ONLY 1053 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1054 void *opaque, Error **errp) 1055 { 1056 RISCVSATPMap *satp_map = opaque; 1057 uint8_t satp = satp_mode_from_str(name); 1058 bool value; 1059 1060 value = satp_map->map & (1 << satp); 1061 1062 visit_type_bool(v, name, &value, errp); 1063 } 1064 1065 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1066 void *opaque, Error **errp) 1067 { 1068 RISCVSATPMap *satp_map = opaque; 1069 uint8_t satp = satp_mode_from_str(name); 1070 bool value; 1071 1072 if (!visit_type_bool(v, name, &value, errp)) { 1073 return; 1074 } 1075 1076 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1077 satp_map->init |= 1 << satp; 1078 } 1079 1080 void riscv_add_satp_mode_properties(Object *obj) 1081 { 1082 RISCVCPU *cpu = RISCV_CPU(obj); 1083 1084 if (cpu->env.misa_mxl == MXL_RV32) { 1085 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1086 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1087 } else { 1088 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1089 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1090 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1091 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1092 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1093 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1094 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1095 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1096 } 1097 } 1098 1099 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1100 { 1101 RISCVCPU *cpu = RISCV_CPU(opaque); 1102 CPURISCVState *env = &cpu->env; 1103 1104 if (irq < IRQ_LOCAL_MAX) { 1105 switch (irq) { 1106 case IRQ_U_SOFT: 1107 case IRQ_S_SOFT: 1108 case IRQ_VS_SOFT: 1109 case IRQ_M_SOFT: 1110 case IRQ_U_TIMER: 1111 case IRQ_S_TIMER: 1112 case IRQ_VS_TIMER: 1113 case IRQ_M_TIMER: 1114 case IRQ_U_EXT: 1115 case IRQ_VS_EXT: 1116 case IRQ_M_EXT: 1117 if (kvm_enabled()) { 1118 kvm_riscv_set_irq(cpu, irq, level); 1119 } else { 1120 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1121 } 1122 break; 1123 case IRQ_S_EXT: 1124 if (kvm_enabled()) { 1125 kvm_riscv_set_irq(cpu, irq, level); 1126 } else { 1127 env->external_seip = level; 1128 riscv_cpu_update_mip(env, 1 << irq, 1129 BOOL_TO_MASK(level | env->software_seip)); 1130 } 1131 break; 1132 default: 1133 g_assert_not_reached(); 1134 } 1135 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1136 /* Require H-extension for handling guest local interrupts */ 1137 if (!riscv_has_ext(env, RVH)) { 1138 g_assert_not_reached(); 1139 } 1140 1141 /* Compute bit position in HGEIP CSR */ 1142 irq = irq - IRQ_LOCAL_MAX + 1; 1143 if (env->geilen < irq) { 1144 g_assert_not_reached(); 1145 } 1146 1147 /* Update HGEIP CSR */ 1148 env->hgeip &= ~((target_ulong)1 << irq); 1149 if (level) { 1150 env->hgeip |= (target_ulong)1 << irq; 1151 } 1152 1153 /* Update mip.SGEIP bit */ 1154 riscv_cpu_update_mip(env, MIP_SGEIP, 1155 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1156 } else { 1157 g_assert_not_reached(); 1158 } 1159 } 1160 #endif /* CONFIG_USER_ONLY */ 1161 1162 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1163 { 1164 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1165 } 1166 1167 static void riscv_cpu_post_init(Object *obj) 1168 { 1169 accel_cpu_instance_init(CPU(obj)); 1170 } 1171 1172 static void riscv_cpu_init(Object *obj) 1173 { 1174 #ifndef CONFIG_USER_ONLY 1175 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1176 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1177 #endif /* CONFIG_USER_ONLY */ 1178 } 1179 1180 typedef struct misa_ext_info { 1181 const char *name; 1182 const char *description; 1183 } MISAExtInfo; 1184 1185 #define MISA_INFO_IDX(_bit) \ 1186 __builtin_ctz(_bit) 1187 1188 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1189 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1190 1191 static const MISAExtInfo misa_ext_info_arr[] = { 1192 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1193 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1194 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1195 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1196 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1197 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1198 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1199 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1200 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1201 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1202 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1203 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1204 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1205 }; 1206 1207 static int riscv_validate_misa_info_idx(uint32_t bit) 1208 { 1209 int idx; 1210 1211 /* 1212 * Our lowest valid input (RVA) is 1 and 1213 * __builtin_ctz() is UB with zero. 1214 */ 1215 g_assert(bit != 0); 1216 idx = MISA_INFO_IDX(bit); 1217 1218 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1219 return idx; 1220 } 1221 1222 const char *riscv_get_misa_ext_name(uint32_t bit) 1223 { 1224 int idx = riscv_validate_misa_info_idx(bit); 1225 const char *val = misa_ext_info_arr[idx].name; 1226 1227 g_assert(val != NULL); 1228 return val; 1229 } 1230 1231 const char *riscv_get_misa_ext_description(uint32_t bit) 1232 { 1233 int idx = riscv_validate_misa_info_idx(bit); 1234 const char *val = misa_ext_info_arr[idx].description; 1235 1236 g_assert(val != NULL); 1237 return val; 1238 } 1239 1240 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1241 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1242 .enabled = _defval} 1243 1244 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1245 /* Defaults for standard extensions */ 1246 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1247 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1248 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1249 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1250 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1251 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1252 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1253 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1254 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1255 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1256 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1257 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1258 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1259 1260 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1261 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1262 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1263 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1264 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1265 1266 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1267 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1268 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1269 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1270 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1271 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1272 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1273 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1274 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1275 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1276 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1277 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1278 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1279 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1280 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1281 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1282 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1283 1284 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1285 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1286 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1287 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1288 1289 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1290 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1291 1292 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1293 1294 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1295 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1296 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1297 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1298 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1299 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1300 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1301 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1302 1303 DEFINE_PROP_END_OF_LIST(), 1304 }; 1305 1306 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1307 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1308 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1309 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1310 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1311 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1312 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1313 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1314 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1315 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1316 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1317 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1318 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1319 1320 DEFINE_PROP_END_OF_LIST(), 1321 }; 1322 1323 /* These are experimental so mark with 'x-' */ 1324 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1325 /* ePMP 0.9.3 */ 1326 MULTI_EXT_CFG_BOOL("x-epmp", epmp, false), 1327 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1328 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1329 1330 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1331 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1332 1333 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1334 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1335 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1336 1337 /* Vector cryptography extensions */ 1338 MULTI_EXT_CFG_BOOL("x-zvbb", ext_zvbb, false), 1339 MULTI_EXT_CFG_BOOL("x-zvbc", ext_zvbc, false), 1340 MULTI_EXT_CFG_BOOL("x-zvkg", ext_zvkg, false), 1341 MULTI_EXT_CFG_BOOL("x-zvkned", ext_zvkned, false), 1342 MULTI_EXT_CFG_BOOL("x-zvknha", ext_zvknha, false), 1343 MULTI_EXT_CFG_BOOL("x-zvknhb", ext_zvknhb, false), 1344 MULTI_EXT_CFG_BOOL("x-zvksed", ext_zvksed, false), 1345 MULTI_EXT_CFG_BOOL("x-zvksh", ext_zvksh, false), 1346 1347 DEFINE_PROP_END_OF_LIST(), 1348 }; 1349 1350 /* Deprecated entries marked for future removal */ 1351 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1352 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1353 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1354 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1355 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1356 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1357 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1358 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1359 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1360 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1361 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1362 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1363 1364 DEFINE_PROP_END_OF_LIST(), 1365 }; 1366 1367 Property riscv_cpu_options[] = { 1368 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1369 1370 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1371 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1372 1373 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1374 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1375 1376 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1377 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1378 1379 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1380 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1381 1382 DEFINE_PROP_END_OF_LIST(), 1383 }; 1384 1385 static Property riscv_cpu_properties[] = { 1386 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1387 1388 #ifndef CONFIG_USER_ONLY 1389 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1390 #endif 1391 1392 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1393 1394 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1395 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1396 1397 /* 1398 * write_misa() is marked as experimental for now so mark 1399 * it with -x and default to 'false'. 1400 */ 1401 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1402 DEFINE_PROP_END_OF_LIST(), 1403 }; 1404 1405 static const gchar *riscv_gdb_arch_name(CPUState *cs) 1406 { 1407 RISCVCPU *cpu = RISCV_CPU(cs); 1408 CPURISCVState *env = &cpu->env; 1409 1410 switch (riscv_cpu_mxl(env)) { 1411 case MXL_RV32: 1412 return "riscv:rv32"; 1413 case MXL_RV64: 1414 case MXL_RV128: 1415 return "riscv:rv64"; 1416 default: 1417 g_assert_not_reached(); 1418 } 1419 } 1420 1421 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1422 { 1423 RISCVCPU *cpu = RISCV_CPU(cs); 1424 1425 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1426 return cpu->dyn_csr_xml; 1427 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1428 return cpu->dyn_vreg_xml; 1429 } 1430 1431 return NULL; 1432 } 1433 1434 #ifndef CONFIG_USER_ONLY 1435 static int64_t riscv_get_arch_id(CPUState *cs) 1436 { 1437 RISCVCPU *cpu = RISCV_CPU(cs); 1438 1439 return cpu->env.mhartid; 1440 } 1441 1442 #include "hw/core/sysemu-cpu-ops.h" 1443 1444 static const struct SysemuCPUOps riscv_sysemu_ops = { 1445 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1446 .write_elf64_note = riscv_cpu_write_elf64_note, 1447 .write_elf32_note = riscv_cpu_write_elf32_note, 1448 .legacy_vmsd = &vmstate_riscv_cpu, 1449 }; 1450 #endif 1451 1452 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 1453 void *opaque, Error **errp) 1454 { 1455 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1456 RISCVCPU *cpu = RISCV_CPU(obj); 1457 uint32_t prev_val = cpu->cfg.mvendorid; 1458 uint32_t value; 1459 1460 if (!visit_type_uint32(v, name, &value, errp)) { 1461 return; 1462 } 1463 1464 if (!dynamic_cpu && prev_val != value) { 1465 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 1466 object_get_typename(obj), prev_val); 1467 return; 1468 } 1469 1470 cpu->cfg.mvendorid = value; 1471 } 1472 1473 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 1474 void *opaque, Error **errp) 1475 { 1476 bool value = RISCV_CPU(obj)->cfg.mvendorid; 1477 1478 visit_type_bool(v, name, &value, errp); 1479 } 1480 1481 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 1482 void *opaque, Error **errp) 1483 { 1484 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1485 RISCVCPU *cpu = RISCV_CPU(obj); 1486 uint64_t prev_val = cpu->cfg.mimpid; 1487 uint64_t value; 1488 1489 if (!visit_type_uint64(v, name, &value, errp)) { 1490 return; 1491 } 1492 1493 if (!dynamic_cpu && prev_val != value) { 1494 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 1495 object_get_typename(obj), prev_val); 1496 return; 1497 } 1498 1499 cpu->cfg.mimpid = value; 1500 } 1501 1502 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 1503 void *opaque, Error **errp) 1504 { 1505 bool value = RISCV_CPU(obj)->cfg.mimpid; 1506 1507 visit_type_bool(v, name, &value, errp); 1508 } 1509 1510 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 1511 void *opaque, Error **errp) 1512 { 1513 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1514 RISCVCPU *cpu = RISCV_CPU(obj); 1515 uint64_t prev_val = cpu->cfg.marchid; 1516 uint64_t value, invalid_val; 1517 uint32_t mxlen = 0; 1518 1519 if (!visit_type_uint64(v, name, &value, errp)) { 1520 return; 1521 } 1522 1523 if (!dynamic_cpu && prev_val != value) { 1524 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 1525 object_get_typename(obj), prev_val); 1526 return; 1527 } 1528 1529 switch (riscv_cpu_mxl(&cpu->env)) { 1530 case MXL_RV32: 1531 mxlen = 32; 1532 break; 1533 case MXL_RV64: 1534 case MXL_RV128: 1535 mxlen = 64; 1536 break; 1537 default: 1538 g_assert_not_reached(); 1539 } 1540 1541 invalid_val = 1LL << (mxlen - 1); 1542 1543 if (value == invalid_val) { 1544 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 1545 "and the remaining bits zero", mxlen); 1546 return; 1547 } 1548 1549 cpu->cfg.marchid = value; 1550 } 1551 1552 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 1553 void *opaque, Error **errp) 1554 { 1555 bool value = RISCV_CPU(obj)->cfg.marchid; 1556 1557 visit_type_bool(v, name, &value, errp); 1558 } 1559 1560 static void riscv_cpu_class_init(ObjectClass *c, void *data) 1561 { 1562 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 1563 CPUClass *cc = CPU_CLASS(c); 1564 DeviceClass *dc = DEVICE_CLASS(c); 1565 ResettableClass *rc = RESETTABLE_CLASS(c); 1566 1567 device_class_set_parent_realize(dc, riscv_cpu_realize, 1568 &mcc->parent_realize); 1569 1570 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 1571 &mcc->parent_phases); 1572 1573 cc->class_by_name = riscv_cpu_class_by_name; 1574 cc->has_work = riscv_cpu_has_work; 1575 cc->dump_state = riscv_cpu_dump_state; 1576 cc->set_pc = riscv_cpu_set_pc; 1577 cc->get_pc = riscv_cpu_get_pc; 1578 cc->gdb_read_register = riscv_cpu_gdb_read_register; 1579 cc->gdb_write_register = riscv_cpu_gdb_write_register; 1580 cc->gdb_num_core_regs = 33; 1581 cc->gdb_stop_before_watchpoint = true; 1582 cc->disas_set_info = riscv_cpu_disas_set_info; 1583 #ifndef CONFIG_USER_ONLY 1584 cc->sysemu_ops = &riscv_sysemu_ops; 1585 cc->get_arch_id = riscv_get_arch_id; 1586 #endif 1587 cc->gdb_arch_name = riscv_gdb_arch_name; 1588 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 1589 1590 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 1591 cpu_set_mvendorid, NULL, NULL); 1592 1593 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 1594 cpu_set_mimpid, NULL, NULL); 1595 1596 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 1597 cpu_set_marchid, NULL, NULL); 1598 1599 device_class_set_props(dc, riscv_cpu_properties); 1600 } 1601 1602 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 1603 int max_str_len) 1604 { 1605 const RISCVIsaExtData *edata; 1606 char *old = *isa_str; 1607 char *new = *isa_str; 1608 1609 for (edata = isa_edata_arr; edata && edata->name; edata++) { 1610 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 1611 new = g_strconcat(old, "_", edata->name, NULL); 1612 g_free(old); 1613 old = new; 1614 } 1615 } 1616 1617 *isa_str = new; 1618 } 1619 1620 char *riscv_isa_string(RISCVCPU *cpu) 1621 { 1622 int i; 1623 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 1624 char *isa_str = g_new(char, maxlen); 1625 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 1626 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 1627 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 1628 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 1629 } 1630 } 1631 *p = '\0'; 1632 if (!cpu->cfg.short_isa_string) { 1633 riscv_isa_string_ext(cpu, &isa_str, maxlen); 1634 } 1635 return isa_str; 1636 } 1637 1638 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 1639 { 1640 ObjectClass *class_a = (ObjectClass *)a; 1641 ObjectClass *class_b = (ObjectClass *)b; 1642 const char *name_a, *name_b; 1643 1644 name_a = object_class_get_name(class_a); 1645 name_b = object_class_get_name(class_b); 1646 return strcmp(name_a, name_b); 1647 } 1648 1649 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 1650 { 1651 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 1652 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 1653 1654 qemu_printf("%.*s\n", len, typename); 1655 } 1656 1657 void riscv_cpu_list(void) 1658 { 1659 GSList *list; 1660 1661 list = object_class_get_list(TYPE_RISCV_CPU, false); 1662 list = g_slist_sort(list, riscv_cpu_list_compare); 1663 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 1664 g_slist_free(list); 1665 } 1666 1667 #define DEFINE_CPU(type_name, initfn) \ 1668 { \ 1669 .name = type_name, \ 1670 .parent = TYPE_RISCV_CPU, \ 1671 .instance_init = initfn \ 1672 } 1673 1674 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 1675 { \ 1676 .name = type_name, \ 1677 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 1678 .instance_init = initfn \ 1679 } 1680 1681 static const TypeInfo riscv_cpu_type_infos[] = { 1682 { 1683 .name = TYPE_RISCV_CPU, 1684 .parent = TYPE_CPU, 1685 .instance_size = sizeof(RISCVCPU), 1686 .instance_align = __alignof(RISCVCPU), 1687 .instance_init = riscv_cpu_init, 1688 .instance_post_init = riscv_cpu_post_init, 1689 .abstract = true, 1690 .class_size = sizeof(RISCVCPUClass), 1691 .class_init = riscv_cpu_class_init, 1692 }, 1693 { 1694 .name = TYPE_RISCV_DYNAMIC_CPU, 1695 .parent = TYPE_RISCV_CPU, 1696 .abstract = true, 1697 }, 1698 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 1699 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init), 1700 #if defined(TARGET_RISCV32) 1701 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 1702 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 1703 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 1704 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 1705 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 1706 #elif defined(TARGET_RISCV64) 1707 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 1708 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 1709 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 1710 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 1711 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 1712 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 1713 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 1714 #endif 1715 }; 1716 1717 DEFINE_TYPES(riscv_cpu_type_infos) 1718