1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 187 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 188 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 189 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 190 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 191 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 192 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 193 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 194 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 195 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 196 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 197 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 198 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 199 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 200 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 201 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 202 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 203 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 204 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 205 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 206 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 207 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 208 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 209 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 210 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 211 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 212 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 213 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 214 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 215 216 { }, 217 }; 218 219 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 220 { 221 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 222 223 return *ext_enabled; 224 } 225 226 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 227 { 228 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 229 230 *ext_enabled = en; 231 } 232 233 bool riscv_cpu_is_vendor(Object *cpu_obj) 234 { 235 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 236 } 237 238 const char * const riscv_int_regnames[] = { 239 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 240 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 241 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 242 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 243 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 244 }; 245 246 const char * const riscv_int_regnamesh[] = { 247 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 248 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 249 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 250 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 251 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 252 "x30h/t5h", "x31h/t6h" 253 }; 254 255 const char * const riscv_fpr_regnames[] = { 256 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 257 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 258 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 259 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 260 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 261 "f30/ft10", "f31/ft11" 262 }; 263 264 const char * const riscv_rvv_regnames[] = { 265 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 266 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 267 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 268 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 269 "v28", "v29", "v30", "v31" 270 }; 271 272 static const char * const riscv_excp_names[] = { 273 "misaligned_fetch", 274 "fault_fetch", 275 "illegal_instruction", 276 "breakpoint", 277 "misaligned_load", 278 "fault_load", 279 "misaligned_store", 280 "fault_store", 281 "user_ecall", 282 "supervisor_ecall", 283 "hypervisor_ecall", 284 "machine_ecall", 285 "exec_page_fault", 286 "load_page_fault", 287 "reserved", 288 "store_page_fault", 289 "reserved", 290 "reserved", 291 "reserved", 292 "reserved", 293 "guest_exec_page_fault", 294 "guest_load_page_fault", 295 "reserved", 296 "guest_store_page_fault", 297 }; 298 299 static const char * const riscv_intr_names[] = { 300 "u_software", 301 "s_software", 302 "vs_software", 303 "m_software", 304 "u_timer", 305 "s_timer", 306 "vs_timer", 307 "m_timer", 308 "u_external", 309 "s_external", 310 "vs_external", 311 "m_external", 312 "reserved", 313 "reserved", 314 "reserved", 315 "reserved" 316 }; 317 318 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 319 { 320 if (async) { 321 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 322 riscv_intr_names[cause] : "(unknown)"; 323 } else { 324 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 325 riscv_excp_names[cause] : "(unknown)"; 326 } 327 } 328 329 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 330 { 331 env->misa_ext_mask = env->misa_ext = ext; 332 } 333 334 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 335 { 336 return 16 << mcc->misa_mxl_max; 337 } 338 339 #ifndef CONFIG_USER_ONLY 340 static uint8_t satp_mode_from_str(const char *satp_mode_str) 341 { 342 if (!strncmp(satp_mode_str, "mbare", 5)) { 343 return VM_1_10_MBARE; 344 } 345 346 if (!strncmp(satp_mode_str, "sv32", 4)) { 347 return VM_1_10_SV32; 348 } 349 350 if (!strncmp(satp_mode_str, "sv39", 4)) { 351 return VM_1_10_SV39; 352 } 353 354 if (!strncmp(satp_mode_str, "sv48", 4)) { 355 return VM_1_10_SV48; 356 } 357 358 if (!strncmp(satp_mode_str, "sv57", 4)) { 359 return VM_1_10_SV57; 360 } 361 362 if (!strncmp(satp_mode_str, "sv64", 4)) { 363 return VM_1_10_SV64; 364 } 365 366 g_assert_not_reached(); 367 } 368 369 uint8_t satp_mode_max_from_map(uint32_t map) 370 { 371 /* 372 * 'map = 0' will make us return (31 - 32), which C will 373 * happily overflow to UINT_MAX. There's no good result to 374 * return if 'map = 0' (e.g. returning 0 will be ambiguous 375 * with the result for 'map = 1'). 376 * 377 * Assert out if map = 0. Callers will have to deal with 378 * it outside of this function. 379 */ 380 g_assert(map > 0); 381 382 /* map here has at least one bit set, so no problem with clz */ 383 return 31 - __builtin_clz(map); 384 } 385 386 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 387 { 388 if (is_32_bit) { 389 switch (satp_mode) { 390 case VM_1_10_SV32: 391 return "sv32"; 392 case VM_1_10_MBARE: 393 return "none"; 394 } 395 } else { 396 switch (satp_mode) { 397 case VM_1_10_SV64: 398 return "sv64"; 399 case VM_1_10_SV57: 400 return "sv57"; 401 case VM_1_10_SV48: 402 return "sv48"; 403 case VM_1_10_SV39: 404 return "sv39"; 405 case VM_1_10_MBARE: 406 return "none"; 407 } 408 } 409 410 g_assert_not_reached(); 411 } 412 413 static void set_satp_mode_max_supported(RISCVCPU *cpu, 414 uint8_t satp_mode) 415 { 416 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 417 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 418 419 for (int i = 0; i <= satp_mode; ++i) { 420 if (valid_vm[i]) { 421 cpu->cfg.satp_mode.supported |= (1 << i); 422 } 423 } 424 } 425 426 /* Set the satp mode to the max supported */ 427 static void set_satp_mode_default_map(RISCVCPU *cpu) 428 { 429 /* 430 * Bare CPUs do not default to the max available. 431 * Users must set a valid satp_mode in the command 432 * line. 433 */ 434 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 435 warn_report("No satp mode set. Defaulting to 'bare'"); 436 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 437 return; 438 } 439 440 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 441 } 442 #endif 443 444 static void riscv_max_cpu_init(Object *obj) 445 { 446 RISCVCPU *cpu = RISCV_CPU(obj); 447 CPURISCVState *env = &cpu->env; 448 449 cpu->cfg.mmu = true; 450 cpu->cfg.pmp = true; 451 452 env->priv_ver = PRIV_VERSION_LATEST; 453 #ifndef CONFIG_USER_ONLY 454 set_satp_mode_max_supported(RISCV_CPU(obj), 455 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 456 VM_1_10_SV32 : VM_1_10_SV57); 457 #endif 458 } 459 460 #if defined(TARGET_RISCV64) 461 static void rv64_base_cpu_init(Object *obj) 462 { 463 RISCVCPU *cpu = RISCV_CPU(obj); 464 CPURISCVState *env = &cpu->env; 465 466 cpu->cfg.mmu = true; 467 cpu->cfg.pmp = true; 468 469 /* Set latest version of privileged specification */ 470 env->priv_ver = PRIV_VERSION_LATEST; 471 #ifndef CONFIG_USER_ONLY 472 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 473 #endif 474 } 475 476 static void rv64_sifive_u_cpu_init(Object *obj) 477 { 478 RISCVCPU *cpu = RISCV_CPU(obj); 479 CPURISCVState *env = &cpu->env; 480 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 481 env->priv_ver = PRIV_VERSION_1_10_0; 482 #ifndef CONFIG_USER_ONLY 483 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 484 #endif 485 486 /* inherited from parent obj via riscv_cpu_init() */ 487 cpu->cfg.ext_zifencei = true; 488 cpu->cfg.ext_zicsr = true; 489 cpu->cfg.mmu = true; 490 cpu->cfg.pmp = true; 491 } 492 493 static void rv64_sifive_e_cpu_init(Object *obj) 494 { 495 CPURISCVState *env = &RISCV_CPU(obj)->env; 496 RISCVCPU *cpu = RISCV_CPU(obj); 497 498 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 499 env->priv_ver = PRIV_VERSION_1_10_0; 500 #ifndef CONFIG_USER_ONLY 501 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 502 #endif 503 504 /* inherited from parent obj via riscv_cpu_init() */ 505 cpu->cfg.ext_zifencei = true; 506 cpu->cfg.ext_zicsr = true; 507 cpu->cfg.pmp = true; 508 } 509 510 static void rv64_thead_c906_cpu_init(Object *obj) 511 { 512 CPURISCVState *env = &RISCV_CPU(obj)->env; 513 RISCVCPU *cpu = RISCV_CPU(obj); 514 515 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 516 env->priv_ver = PRIV_VERSION_1_11_0; 517 518 cpu->cfg.ext_zfa = true; 519 cpu->cfg.ext_zfh = true; 520 cpu->cfg.mmu = true; 521 cpu->cfg.ext_xtheadba = true; 522 cpu->cfg.ext_xtheadbb = true; 523 cpu->cfg.ext_xtheadbs = true; 524 cpu->cfg.ext_xtheadcmo = true; 525 cpu->cfg.ext_xtheadcondmov = true; 526 cpu->cfg.ext_xtheadfmemidx = true; 527 cpu->cfg.ext_xtheadmac = true; 528 cpu->cfg.ext_xtheadmemidx = true; 529 cpu->cfg.ext_xtheadmempair = true; 530 cpu->cfg.ext_xtheadsync = true; 531 532 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 533 #ifndef CONFIG_USER_ONLY 534 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 535 th_register_custom_csrs(cpu); 536 #endif 537 538 /* inherited from parent obj via riscv_cpu_init() */ 539 cpu->cfg.pmp = true; 540 } 541 542 static void rv64_veyron_v1_cpu_init(Object *obj) 543 { 544 CPURISCVState *env = &RISCV_CPU(obj)->env; 545 RISCVCPU *cpu = RISCV_CPU(obj); 546 547 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 548 env->priv_ver = PRIV_VERSION_1_12_0; 549 550 /* Enable ISA extensions */ 551 cpu->cfg.mmu = true; 552 cpu->cfg.ext_zifencei = true; 553 cpu->cfg.ext_zicsr = true; 554 cpu->cfg.pmp = true; 555 cpu->cfg.ext_zicbom = true; 556 cpu->cfg.cbom_blocksize = 64; 557 cpu->cfg.cboz_blocksize = 64; 558 cpu->cfg.ext_zicboz = true; 559 cpu->cfg.ext_smaia = true; 560 cpu->cfg.ext_ssaia = true; 561 cpu->cfg.ext_sscofpmf = true; 562 cpu->cfg.ext_sstc = true; 563 cpu->cfg.ext_svinval = true; 564 cpu->cfg.ext_svnapot = true; 565 cpu->cfg.ext_svpbmt = true; 566 cpu->cfg.ext_smstateen = true; 567 cpu->cfg.ext_zba = true; 568 cpu->cfg.ext_zbb = true; 569 cpu->cfg.ext_zbc = true; 570 cpu->cfg.ext_zbs = true; 571 cpu->cfg.ext_XVentanaCondOps = true; 572 573 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 574 cpu->cfg.marchid = VEYRON_V1_MARCHID; 575 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 576 577 #ifndef CONFIG_USER_ONLY 578 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 579 #endif 580 } 581 582 #ifdef CONFIG_TCG 583 static void rv128_base_cpu_init(Object *obj) 584 { 585 RISCVCPU *cpu = RISCV_CPU(obj); 586 CPURISCVState *env = &cpu->env; 587 588 if (qemu_tcg_mttcg_enabled()) { 589 /* Missing 128-bit aligned atomics */ 590 error_report("128-bit RISC-V currently does not work with Multi " 591 "Threaded TCG. Please use: -accel tcg,thread=single"); 592 exit(EXIT_FAILURE); 593 } 594 595 cpu->cfg.mmu = true; 596 cpu->cfg.pmp = true; 597 598 /* Set latest version of privileged specification */ 599 env->priv_ver = PRIV_VERSION_LATEST; 600 #ifndef CONFIG_USER_ONLY 601 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 602 #endif 603 } 604 #endif /* CONFIG_TCG */ 605 606 static void rv64i_bare_cpu_init(Object *obj) 607 { 608 CPURISCVState *env = &RISCV_CPU(obj)->env; 609 riscv_cpu_set_misa_ext(env, RVI); 610 } 611 612 static void rv64e_bare_cpu_init(Object *obj) 613 { 614 CPURISCVState *env = &RISCV_CPU(obj)->env; 615 riscv_cpu_set_misa_ext(env, RVE); 616 } 617 618 #endif /* !TARGET_RISCV64 */ 619 620 #if defined(TARGET_RISCV32) || \ 621 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 622 623 static void rv32_base_cpu_init(Object *obj) 624 { 625 RISCVCPU *cpu = RISCV_CPU(obj); 626 CPURISCVState *env = &cpu->env; 627 628 cpu->cfg.mmu = true; 629 cpu->cfg.pmp = true; 630 631 /* Set latest version of privileged specification */ 632 env->priv_ver = PRIV_VERSION_LATEST; 633 #ifndef CONFIG_USER_ONLY 634 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 635 #endif 636 } 637 638 static void rv32_sifive_u_cpu_init(Object *obj) 639 { 640 RISCVCPU *cpu = RISCV_CPU(obj); 641 CPURISCVState *env = &cpu->env; 642 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 643 env->priv_ver = PRIV_VERSION_1_10_0; 644 #ifndef CONFIG_USER_ONLY 645 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 646 #endif 647 648 /* inherited from parent obj via riscv_cpu_init() */ 649 cpu->cfg.ext_zifencei = true; 650 cpu->cfg.ext_zicsr = true; 651 cpu->cfg.mmu = true; 652 cpu->cfg.pmp = true; 653 } 654 655 static void rv32_sifive_e_cpu_init(Object *obj) 656 { 657 CPURISCVState *env = &RISCV_CPU(obj)->env; 658 RISCVCPU *cpu = RISCV_CPU(obj); 659 660 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 661 env->priv_ver = PRIV_VERSION_1_10_0; 662 #ifndef CONFIG_USER_ONLY 663 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 664 #endif 665 666 /* inherited from parent obj via riscv_cpu_init() */ 667 cpu->cfg.ext_zifencei = true; 668 cpu->cfg.ext_zicsr = true; 669 cpu->cfg.pmp = true; 670 } 671 672 static void rv32_ibex_cpu_init(Object *obj) 673 { 674 CPURISCVState *env = &RISCV_CPU(obj)->env; 675 RISCVCPU *cpu = RISCV_CPU(obj); 676 677 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 678 env->priv_ver = PRIV_VERSION_1_12_0; 679 #ifndef CONFIG_USER_ONLY 680 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 681 #endif 682 /* inherited from parent obj via riscv_cpu_init() */ 683 cpu->cfg.ext_zifencei = true; 684 cpu->cfg.ext_zicsr = true; 685 cpu->cfg.pmp = true; 686 cpu->cfg.ext_smepmp = true; 687 688 cpu->cfg.ext_zba = true; 689 cpu->cfg.ext_zbb = true; 690 cpu->cfg.ext_zbc = true; 691 cpu->cfg.ext_zbs = true; 692 } 693 694 static void rv32_imafcu_nommu_cpu_init(Object *obj) 695 { 696 CPURISCVState *env = &RISCV_CPU(obj)->env; 697 RISCVCPU *cpu = RISCV_CPU(obj); 698 699 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 700 env->priv_ver = PRIV_VERSION_1_10_0; 701 #ifndef CONFIG_USER_ONLY 702 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 703 #endif 704 705 /* inherited from parent obj via riscv_cpu_init() */ 706 cpu->cfg.ext_zifencei = true; 707 cpu->cfg.ext_zicsr = true; 708 cpu->cfg.pmp = true; 709 } 710 711 static void rv32i_bare_cpu_init(Object *obj) 712 { 713 CPURISCVState *env = &RISCV_CPU(obj)->env; 714 riscv_cpu_set_misa_ext(env, RVI); 715 } 716 717 static void rv32e_bare_cpu_init(Object *obj) 718 { 719 CPURISCVState *env = &RISCV_CPU(obj)->env; 720 riscv_cpu_set_misa_ext(env, RVE); 721 } 722 #endif 723 724 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 725 { 726 ObjectClass *oc; 727 char *typename; 728 char **cpuname; 729 730 cpuname = g_strsplit(cpu_model, ",", 1); 731 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 732 oc = object_class_by_name(typename); 733 g_strfreev(cpuname); 734 g_free(typename); 735 736 return oc; 737 } 738 739 char *riscv_cpu_get_name(RISCVCPU *cpu) 740 { 741 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 742 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 743 744 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 745 746 return cpu_model_from_type(typename); 747 } 748 749 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 750 { 751 RISCVCPU *cpu = RISCV_CPU(cs); 752 CPURISCVState *env = &cpu->env; 753 int i, j; 754 uint8_t *p; 755 756 #if !defined(CONFIG_USER_ONLY) 757 if (riscv_has_ext(env, RVH)) { 758 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 759 } 760 #endif 761 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 762 #ifndef CONFIG_USER_ONLY 763 { 764 static const int dump_csrs[] = { 765 CSR_MHARTID, 766 CSR_MSTATUS, 767 CSR_MSTATUSH, 768 /* 769 * CSR_SSTATUS is intentionally omitted here as its value 770 * can be figured out by looking at CSR_MSTATUS 771 */ 772 CSR_HSTATUS, 773 CSR_VSSTATUS, 774 CSR_MIP, 775 CSR_MIE, 776 CSR_MIDELEG, 777 CSR_HIDELEG, 778 CSR_MEDELEG, 779 CSR_HEDELEG, 780 CSR_MTVEC, 781 CSR_STVEC, 782 CSR_VSTVEC, 783 CSR_MEPC, 784 CSR_SEPC, 785 CSR_VSEPC, 786 CSR_MCAUSE, 787 CSR_SCAUSE, 788 CSR_VSCAUSE, 789 CSR_MTVAL, 790 CSR_STVAL, 791 CSR_HTVAL, 792 CSR_MTVAL2, 793 CSR_MSCRATCH, 794 CSR_SSCRATCH, 795 CSR_SATP, 796 CSR_MMTE, 797 CSR_UPMBASE, 798 CSR_UPMMASK, 799 CSR_SPMBASE, 800 CSR_SPMMASK, 801 CSR_MPMBASE, 802 CSR_MPMMASK, 803 }; 804 805 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 806 int csrno = dump_csrs[i]; 807 target_ulong val = 0; 808 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 809 810 /* 811 * Rely on the smode, hmode, etc, predicates within csr.c 812 * to do the filtering of the registers that are present. 813 */ 814 if (res == RISCV_EXCP_NONE) { 815 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 816 csr_ops[csrno].name, val); 817 } 818 } 819 } 820 #endif 821 822 for (i = 0; i < 32; i++) { 823 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 824 riscv_int_regnames[i], env->gpr[i]); 825 if ((i & 3) == 3) { 826 qemu_fprintf(f, "\n"); 827 } 828 } 829 if (flags & CPU_DUMP_FPU) { 830 target_ulong val = 0; 831 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 832 if (res == RISCV_EXCP_NONE) { 833 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 834 csr_ops[CSR_FCSR].name, val); 835 } 836 for (i = 0; i < 32; i++) { 837 qemu_fprintf(f, " %-8s %016" PRIx64, 838 riscv_fpr_regnames[i], env->fpr[i]); 839 if ((i & 3) == 3) { 840 qemu_fprintf(f, "\n"); 841 } 842 } 843 } 844 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 845 static const int dump_rvv_csrs[] = { 846 CSR_VSTART, 847 CSR_VXSAT, 848 CSR_VXRM, 849 CSR_VCSR, 850 CSR_VL, 851 CSR_VTYPE, 852 CSR_VLENB, 853 }; 854 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 855 int csrno = dump_rvv_csrs[i]; 856 target_ulong val = 0; 857 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 858 859 /* 860 * Rely on the smode, hmode, etc, predicates within csr.c 861 * to do the filtering of the registers that are present. 862 */ 863 if (res == RISCV_EXCP_NONE) { 864 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 865 csr_ops[csrno].name, val); 866 } 867 } 868 uint16_t vlenb = cpu->cfg.vlenb; 869 870 for (i = 0; i < 32; i++) { 871 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 872 p = (uint8_t *)env->vreg; 873 for (j = vlenb - 1 ; j >= 0; j--) { 874 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 875 } 876 qemu_fprintf(f, "\n"); 877 } 878 } 879 } 880 881 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 882 { 883 RISCVCPU *cpu = RISCV_CPU(cs); 884 CPURISCVState *env = &cpu->env; 885 886 if (env->xl == MXL_RV32) { 887 env->pc = (int32_t)value; 888 } else { 889 env->pc = value; 890 } 891 } 892 893 static vaddr riscv_cpu_get_pc(CPUState *cs) 894 { 895 RISCVCPU *cpu = RISCV_CPU(cs); 896 CPURISCVState *env = &cpu->env; 897 898 /* Match cpu_get_tb_cpu_state. */ 899 if (env->xl == MXL_RV32) { 900 return env->pc & UINT32_MAX; 901 } 902 return env->pc; 903 } 904 905 bool riscv_cpu_has_work(CPUState *cs) 906 { 907 #ifndef CONFIG_USER_ONLY 908 RISCVCPU *cpu = RISCV_CPU(cs); 909 CPURISCVState *env = &cpu->env; 910 /* 911 * Definition of the WFI instruction requires it to ignore the privilege 912 * mode and delegation registers, but respect individual enables 913 */ 914 return riscv_cpu_all_pending(env) != 0 || 915 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 916 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 917 #else 918 return true; 919 #endif 920 } 921 922 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 923 { 924 return riscv_env_mmu_index(cpu_env(cs), ifetch); 925 } 926 927 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 928 { 929 #ifndef CONFIG_USER_ONLY 930 uint8_t iprio; 931 int i, irq, rdzero; 932 #endif 933 CPUState *cs = CPU(obj); 934 RISCVCPU *cpu = RISCV_CPU(cs); 935 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 936 CPURISCVState *env = &cpu->env; 937 938 if (mcc->parent_phases.hold) { 939 mcc->parent_phases.hold(obj, type); 940 } 941 #ifndef CONFIG_USER_ONLY 942 env->misa_mxl = mcc->misa_mxl_max; 943 env->priv = PRV_M; 944 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 945 if (env->misa_mxl > MXL_RV32) { 946 /* 947 * The reset status of SXL/UXL is undefined, but mstatus is WARL 948 * and we must ensure that the value after init is valid for read. 949 */ 950 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 951 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 952 if (riscv_has_ext(env, RVH)) { 953 env->vsstatus = set_field(env->vsstatus, 954 MSTATUS64_SXL, env->misa_mxl); 955 env->vsstatus = set_field(env->vsstatus, 956 MSTATUS64_UXL, env->misa_mxl); 957 env->mstatus_hs = set_field(env->mstatus_hs, 958 MSTATUS64_SXL, env->misa_mxl); 959 env->mstatus_hs = set_field(env->mstatus_hs, 960 MSTATUS64_UXL, env->misa_mxl); 961 } 962 } 963 env->mcause = 0; 964 env->miclaim = MIP_SGEIP; 965 env->pc = env->resetvec; 966 env->bins = 0; 967 env->two_stage_lookup = false; 968 969 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 970 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 971 MENVCFG_ADUE : 0); 972 env->henvcfg = 0; 973 974 /* Initialized default priorities of local interrupts. */ 975 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 976 iprio = riscv_cpu_default_priority(i); 977 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 978 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 979 env->hviprio[i] = 0; 980 } 981 i = 0; 982 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 983 if (!rdzero) { 984 env->hviprio[irq] = env->miprio[irq]; 985 } 986 i++; 987 } 988 /* mmte is supposed to have pm.current hardwired to 1 */ 989 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 990 991 /* 992 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 993 * extension is enabled. 994 */ 995 if (riscv_has_ext(env, RVH)) { 996 env->mideleg |= HS_MODE_INTERRUPTS; 997 } 998 999 /* 1000 * Clear mseccfg and unlock all the PMP entries upon reset. 1001 * This is allowed as per the priv and smepmp specifications 1002 * and is needed to clear stale entries across reboots. 1003 */ 1004 if (riscv_cpu_cfg(env)->ext_smepmp) { 1005 env->mseccfg = 0; 1006 } 1007 1008 pmp_unlock_entries(env); 1009 #else 1010 env->priv = PRV_U; 1011 env->senvcfg = 0; 1012 env->menvcfg = 0; 1013 #endif 1014 1015 /* on reset elp is clear */ 1016 env->elp = false; 1017 /* on reset ssp is set to 0 */ 1018 env->ssp = 0; 1019 1020 env->xl = riscv_cpu_mxl(env); 1021 riscv_cpu_update_mask(env); 1022 cs->exception_index = RISCV_EXCP_NONE; 1023 env->load_res = -1; 1024 set_default_nan_mode(1, &env->fp_status); 1025 /* Default NaN value: sign bit clear, frac msb set */ 1026 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1027 env->vill = true; 1028 1029 #ifndef CONFIG_USER_ONLY 1030 if (cpu->cfg.debug) { 1031 riscv_trigger_reset_hold(env); 1032 } 1033 1034 if (kvm_enabled()) { 1035 kvm_riscv_reset_vcpu(cpu); 1036 } 1037 #endif 1038 } 1039 1040 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1041 { 1042 RISCVCPU *cpu = RISCV_CPU(s); 1043 CPURISCVState *env = &cpu->env; 1044 info->target_info = &cpu->cfg; 1045 1046 switch (env->xl) { 1047 case MXL_RV32: 1048 info->print_insn = print_insn_riscv32; 1049 break; 1050 case MXL_RV64: 1051 info->print_insn = print_insn_riscv64; 1052 break; 1053 case MXL_RV128: 1054 info->print_insn = print_insn_riscv128; 1055 break; 1056 default: 1057 g_assert_not_reached(); 1058 } 1059 } 1060 1061 #ifndef CONFIG_USER_ONLY 1062 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1063 { 1064 bool rv32 = riscv_cpu_is_32bit(cpu); 1065 uint8_t satp_mode_map_max, satp_mode_supported_max; 1066 1067 /* The CPU wants the OS to decide which satp mode to use */ 1068 if (cpu->cfg.satp_mode.supported == 0) { 1069 return; 1070 } 1071 1072 satp_mode_supported_max = 1073 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1074 1075 if (cpu->cfg.satp_mode.map == 0) { 1076 if (cpu->cfg.satp_mode.init == 0) { 1077 /* If unset by the user, we fallback to the default satp mode. */ 1078 set_satp_mode_default_map(cpu); 1079 } else { 1080 /* 1081 * Find the lowest level that was disabled and then enable the 1082 * first valid level below which can be found in 1083 * valid_vm_1_10_32/64. 1084 */ 1085 for (int i = 1; i < 16; ++i) { 1086 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1087 (cpu->cfg.satp_mode.supported & (1 << i))) { 1088 for (int j = i - 1; j >= 0; --j) { 1089 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1090 cpu->cfg.satp_mode.map |= (1 << j); 1091 break; 1092 } 1093 } 1094 break; 1095 } 1096 } 1097 } 1098 } 1099 1100 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1101 1102 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1103 if (satp_mode_map_max > satp_mode_supported_max) { 1104 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1105 satp_mode_str(satp_mode_map_max, rv32), 1106 satp_mode_str(satp_mode_supported_max, rv32)); 1107 return; 1108 } 1109 1110 /* 1111 * Make sure the user did not ask for an invalid configuration as per 1112 * the specification. 1113 */ 1114 if (!rv32) { 1115 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1116 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1117 (cpu->cfg.satp_mode.init & (1 << i)) && 1118 (cpu->cfg.satp_mode.supported & (1 << i))) { 1119 error_setg(errp, "cannot disable %s satp mode if %s " 1120 "is enabled", satp_mode_str(i, false), 1121 satp_mode_str(satp_mode_map_max, false)); 1122 return; 1123 } 1124 } 1125 } 1126 1127 /* Finally expand the map so that all valid modes are set */ 1128 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1129 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1130 cpu->cfg.satp_mode.map |= (1 << i); 1131 } 1132 } 1133 } 1134 #endif 1135 1136 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1137 { 1138 Error *local_err = NULL; 1139 1140 #ifndef CONFIG_USER_ONLY 1141 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1142 if (local_err != NULL) { 1143 error_propagate(errp, local_err); 1144 return; 1145 } 1146 #endif 1147 1148 if (tcg_enabled()) { 1149 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1150 if (local_err != NULL) { 1151 error_propagate(errp, local_err); 1152 return; 1153 } 1154 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1155 } else if (kvm_enabled()) { 1156 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1157 if (local_err != NULL) { 1158 error_propagate(errp, local_err); 1159 return; 1160 } 1161 } 1162 } 1163 1164 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1165 { 1166 CPUState *cs = CPU(dev); 1167 RISCVCPU *cpu = RISCV_CPU(dev); 1168 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1169 Error *local_err = NULL; 1170 1171 cpu_exec_realizefn(cs, &local_err); 1172 if (local_err != NULL) { 1173 error_propagate(errp, local_err); 1174 return; 1175 } 1176 1177 riscv_cpu_finalize_features(cpu, &local_err); 1178 if (local_err != NULL) { 1179 error_propagate(errp, local_err); 1180 return; 1181 } 1182 1183 riscv_cpu_register_gdb_regs_for_features(cs); 1184 1185 #ifndef CONFIG_USER_ONLY 1186 if (cpu->cfg.debug) { 1187 riscv_trigger_realize(&cpu->env); 1188 } 1189 #endif 1190 1191 qemu_init_vcpu(cs); 1192 cpu_reset(cs); 1193 1194 mcc->parent_realize(dev, errp); 1195 } 1196 1197 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1198 { 1199 if (tcg_enabled()) { 1200 return riscv_cpu_tcg_compatible(cpu); 1201 } 1202 1203 return true; 1204 } 1205 1206 #ifndef CONFIG_USER_ONLY 1207 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1208 void *opaque, Error **errp) 1209 { 1210 RISCVSATPMap *satp_map = opaque; 1211 uint8_t satp = satp_mode_from_str(name); 1212 bool value; 1213 1214 value = satp_map->map & (1 << satp); 1215 1216 visit_type_bool(v, name, &value, errp); 1217 } 1218 1219 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1220 void *opaque, Error **errp) 1221 { 1222 RISCVSATPMap *satp_map = opaque; 1223 uint8_t satp = satp_mode_from_str(name); 1224 bool value; 1225 1226 if (!visit_type_bool(v, name, &value, errp)) { 1227 return; 1228 } 1229 1230 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1231 satp_map->init |= 1 << satp; 1232 } 1233 1234 void riscv_add_satp_mode_properties(Object *obj) 1235 { 1236 RISCVCPU *cpu = RISCV_CPU(obj); 1237 1238 if (cpu->env.misa_mxl == MXL_RV32) { 1239 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1240 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1241 } else { 1242 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1243 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1244 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1245 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1246 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1247 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1248 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1249 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1250 } 1251 } 1252 1253 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1254 { 1255 RISCVCPU *cpu = RISCV_CPU(opaque); 1256 CPURISCVState *env = &cpu->env; 1257 1258 if (irq < IRQ_LOCAL_MAX) { 1259 switch (irq) { 1260 case IRQ_U_SOFT: 1261 case IRQ_S_SOFT: 1262 case IRQ_VS_SOFT: 1263 case IRQ_M_SOFT: 1264 case IRQ_U_TIMER: 1265 case IRQ_S_TIMER: 1266 case IRQ_VS_TIMER: 1267 case IRQ_M_TIMER: 1268 case IRQ_U_EXT: 1269 case IRQ_VS_EXT: 1270 case IRQ_M_EXT: 1271 if (kvm_enabled()) { 1272 kvm_riscv_set_irq(cpu, irq, level); 1273 } else { 1274 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1275 } 1276 break; 1277 case IRQ_S_EXT: 1278 if (kvm_enabled()) { 1279 kvm_riscv_set_irq(cpu, irq, level); 1280 } else { 1281 env->external_seip = level; 1282 riscv_cpu_update_mip(env, 1 << irq, 1283 BOOL_TO_MASK(level | env->software_seip)); 1284 } 1285 break; 1286 default: 1287 g_assert_not_reached(); 1288 } 1289 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1290 /* Require H-extension for handling guest local interrupts */ 1291 if (!riscv_has_ext(env, RVH)) { 1292 g_assert_not_reached(); 1293 } 1294 1295 /* Compute bit position in HGEIP CSR */ 1296 irq = irq - IRQ_LOCAL_MAX + 1; 1297 if (env->geilen < irq) { 1298 g_assert_not_reached(); 1299 } 1300 1301 /* Update HGEIP CSR */ 1302 env->hgeip &= ~((target_ulong)1 << irq); 1303 if (level) { 1304 env->hgeip |= (target_ulong)1 << irq; 1305 } 1306 1307 /* Update mip.SGEIP bit */ 1308 riscv_cpu_update_mip(env, MIP_SGEIP, 1309 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1310 } else { 1311 g_assert_not_reached(); 1312 } 1313 } 1314 #endif /* CONFIG_USER_ONLY */ 1315 1316 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1317 { 1318 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1319 } 1320 1321 static void riscv_cpu_post_init(Object *obj) 1322 { 1323 accel_cpu_instance_init(CPU(obj)); 1324 } 1325 1326 static void riscv_cpu_init(Object *obj) 1327 { 1328 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1329 RISCVCPU *cpu = RISCV_CPU(obj); 1330 CPURISCVState *env = &cpu->env; 1331 1332 env->misa_mxl = mcc->misa_mxl_max; 1333 1334 #ifndef CONFIG_USER_ONLY 1335 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1336 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1337 #endif /* CONFIG_USER_ONLY */ 1338 1339 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1340 1341 /* 1342 * The timer and performance counters extensions were supported 1343 * in QEMU before they were added as discrete extensions in the 1344 * ISA. To keep compatibility we'll always default them to 'true' 1345 * for all CPUs. Each accelerator will decide what to do when 1346 * users disable them. 1347 */ 1348 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1349 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1350 1351 /* Default values for non-bool cpu properties */ 1352 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1353 cpu->cfg.vlenb = 128 >> 3; 1354 cpu->cfg.elen = 64; 1355 cpu->cfg.cbom_blocksize = 64; 1356 cpu->cfg.cbop_blocksize = 64; 1357 cpu->cfg.cboz_blocksize = 64; 1358 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1359 } 1360 1361 static void riscv_bare_cpu_init(Object *obj) 1362 { 1363 RISCVCPU *cpu = RISCV_CPU(obj); 1364 1365 /* 1366 * Bare CPUs do not inherit the timer and performance 1367 * counters from the parent class (see riscv_cpu_init() 1368 * for info on why the parent enables them). 1369 * 1370 * Users have to explicitly enable these counters for 1371 * bare CPUs. 1372 */ 1373 cpu->cfg.ext_zicntr = false; 1374 cpu->cfg.ext_zihpm = false; 1375 1376 /* Set to QEMU's first supported priv version */ 1377 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1378 1379 /* 1380 * Support all available satp_mode settings. The default 1381 * value will be set to MBARE if the user doesn't set 1382 * satp_mode manually (see set_satp_mode_default()). 1383 */ 1384 #ifndef CONFIG_USER_ONLY 1385 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1386 #endif 1387 } 1388 1389 typedef struct misa_ext_info { 1390 const char *name; 1391 const char *description; 1392 } MISAExtInfo; 1393 1394 #define MISA_INFO_IDX(_bit) \ 1395 __builtin_ctz(_bit) 1396 1397 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1398 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1399 1400 static const MISAExtInfo misa_ext_info_arr[] = { 1401 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1402 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1403 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1404 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1405 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1406 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1407 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1408 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1409 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1410 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1411 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1412 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1413 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1414 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1415 }; 1416 1417 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1418 { 1419 CPUClass *cc = CPU_CLASS(mcc); 1420 1421 /* Validate that MISA_MXL is set properly. */ 1422 switch (mcc->misa_mxl_max) { 1423 #ifdef TARGET_RISCV64 1424 case MXL_RV64: 1425 case MXL_RV128: 1426 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1427 break; 1428 #endif 1429 case MXL_RV32: 1430 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1431 break; 1432 default: 1433 g_assert_not_reached(); 1434 } 1435 } 1436 1437 static int riscv_validate_misa_info_idx(uint32_t bit) 1438 { 1439 int idx; 1440 1441 /* 1442 * Our lowest valid input (RVA) is 1 and 1443 * __builtin_ctz() is UB with zero. 1444 */ 1445 g_assert(bit != 0); 1446 idx = MISA_INFO_IDX(bit); 1447 1448 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1449 return idx; 1450 } 1451 1452 const char *riscv_get_misa_ext_name(uint32_t bit) 1453 { 1454 int idx = riscv_validate_misa_info_idx(bit); 1455 const char *val = misa_ext_info_arr[idx].name; 1456 1457 g_assert(val != NULL); 1458 return val; 1459 } 1460 1461 const char *riscv_get_misa_ext_description(uint32_t bit) 1462 { 1463 int idx = riscv_validate_misa_info_idx(bit); 1464 const char *val = misa_ext_info_arr[idx].description; 1465 1466 g_assert(val != NULL); 1467 return val; 1468 } 1469 1470 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1471 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1472 .enabled = _defval} 1473 1474 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1475 /* Defaults for standard extensions */ 1476 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1477 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1478 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1479 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1480 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1481 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1482 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1483 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1484 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1485 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1486 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1487 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1488 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1489 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1490 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1491 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1492 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1493 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1494 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1495 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1496 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1497 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1498 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1499 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1500 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1501 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1502 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1503 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1504 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1505 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1506 1507 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1508 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1509 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1510 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1511 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1512 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1513 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1514 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1515 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1516 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1517 1518 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1519 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1520 1521 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1522 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1523 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1524 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1525 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1526 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1527 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1528 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1529 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1530 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1531 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1532 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1533 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1534 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1535 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1536 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1537 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1538 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1539 1540 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1541 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1542 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1543 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1544 1545 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1546 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1547 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1548 1549 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1550 1551 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1552 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1553 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1554 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1555 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1556 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1557 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1558 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1559 1560 /* Vector cryptography extensions */ 1561 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1562 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1563 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1564 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1565 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1566 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1567 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1568 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1569 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1570 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1571 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1572 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1573 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1574 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1575 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1576 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1577 1578 { }, 1579 }; 1580 1581 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1582 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1583 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1584 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1585 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1586 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1587 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1588 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1589 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1590 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1591 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1592 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1593 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1594 1595 { }, 1596 }; 1597 1598 /* These are experimental so mark with 'x-' */ 1599 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1600 { }, 1601 }; 1602 1603 /* 1604 * 'Named features' is the name we give to extensions that we 1605 * don't want to expose to users. They are either immutable 1606 * (always enabled/disable) or they'll vary depending on 1607 * the resulting CPU state. They have riscv,isa strings 1608 * and priv_ver like regular extensions. 1609 */ 1610 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1611 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1612 1613 { }, 1614 }; 1615 1616 /* Deprecated entries marked for future removal */ 1617 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1618 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1619 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1620 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1621 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1622 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1623 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1624 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1625 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1626 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1627 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1628 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1629 1630 { }, 1631 }; 1632 1633 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1634 Error **errp) 1635 { 1636 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1637 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1638 cpuname, propname); 1639 } 1640 1641 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1642 void *opaque, Error **errp) 1643 { 1644 RISCVCPU *cpu = RISCV_CPU(obj); 1645 uint8_t pmu_num, curr_pmu_num; 1646 uint32_t pmu_mask; 1647 1648 visit_type_uint8(v, name, &pmu_num, errp); 1649 1650 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1651 1652 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1653 cpu_set_prop_err(cpu, name, errp); 1654 error_append_hint(errp, "Current '%s' val: %u\n", 1655 name, curr_pmu_num); 1656 return; 1657 } 1658 1659 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1660 error_setg(errp, "Number of counters exceeds maximum available"); 1661 return; 1662 } 1663 1664 if (pmu_num == 0) { 1665 pmu_mask = 0; 1666 } else { 1667 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1668 } 1669 1670 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1671 cpu->cfg.pmu_mask = pmu_mask; 1672 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1673 } 1674 1675 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1676 void *opaque, Error **errp) 1677 { 1678 RISCVCPU *cpu = RISCV_CPU(obj); 1679 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1680 1681 visit_type_uint8(v, name, &pmu_num, errp); 1682 } 1683 1684 static const PropertyInfo prop_pmu_num = { 1685 .name = "pmu-num", 1686 .get = prop_pmu_num_get, 1687 .set = prop_pmu_num_set, 1688 }; 1689 1690 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1691 void *opaque, Error **errp) 1692 { 1693 RISCVCPU *cpu = RISCV_CPU(obj); 1694 uint32_t value; 1695 uint8_t pmu_num; 1696 1697 visit_type_uint32(v, name, &value, errp); 1698 1699 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1700 cpu_set_prop_err(cpu, name, errp); 1701 error_append_hint(errp, "Current '%s' val: %x\n", 1702 name, cpu->cfg.pmu_mask); 1703 return; 1704 } 1705 1706 pmu_num = ctpop32(value); 1707 1708 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1709 error_setg(errp, "Number of counters exceeds maximum available"); 1710 return; 1711 } 1712 1713 cpu_option_add_user_setting(name, value); 1714 cpu->cfg.pmu_mask = value; 1715 } 1716 1717 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1718 void *opaque, Error **errp) 1719 { 1720 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1721 1722 visit_type_uint8(v, name, &pmu_mask, errp); 1723 } 1724 1725 static const PropertyInfo prop_pmu_mask = { 1726 .name = "pmu-mask", 1727 .get = prop_pmu_mask_get, 1728 .set = prop_pmu_mask_set, 1729 }; 1730 1731 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1732 void *opaque, Error **errp) 1733 { 1734 RISCVCPU *cpu = RISCV_CPU(obj); 1735 bool value; 1736 1737 visit_type_bool(v, name, &value, errp); 1738 1739 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1740 cpu_set_prop_err(cpu, "mmu", errp); 1741 return; 1742 } 1743 1744 cpu_option_add_user_setting(name, value); 1745 cpu->cfg.mmu = value; 1746 } 1747 1748 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1749 void *opaque, Error **errp) 1750 { 1751 bool value = RISCV_CPU(obj)->cfg.mmu; 1752 1753 visit_type_bool(v, name, &value, errp); 1754 } 1755 1756 static const PropertyInfo prop_mmu = { 1757 .name = "mmu", 1758 .get = prop_mmu_get, 1759 .set = prop_mmu_set, 1760 }; 1761 1762 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1763 void *opaque, Error **errp) 1764 { 1765 RISCVCPU *cpu = RISCV_CPU(obj); 1766 bool value; 1767 1768 visit_type_bool(v, name, &value, errp); 1769 1770 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1771 cpu_set_prop_err(cpu, name, errp); 1772 return; 1773 } 1774 1775 cpu_option_add_user_setting(name, value); 1776 cpu->cfg.pmp = value; 1777 } 1778 1779 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1780 void *opaque, Error **errp) 1781 { 1782 bool value = RISCV_CPU(obj)->cfg.pmp; 1783 1784 visit_type_bool(v, name, &value, errp); 1785 } 1786 1787 static const PropertyInfo prop_pmp = { 1788 .name = "pmp", 1789 .get = prop_pmp_get, 1790 .set = prop_pmp_set, 1791 }; 1792 1793 static int priv_spec_from_str(const char *priv_spec_str) 1794 { 1795 int priv_version = -1; 1796 1797 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1798 priv_version = PRIV_VERSION_1_13_0; 1799 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1800 priv_version = PRIV_VERSION_1_12_0; 1801 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1802 priv_version = PRIV_VERSION_1_11_0; 1803 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1804 priv_version = PRIV_VERSION_1_10_0; 1805 } 1806 1807 return priv_version; 1808 } 1809 1810 const char *priv_spec_to_str(int priv_version) 1811 { 1812 switch (priv_version) { 1813 case PRIV_VERSION_1_10_0: 1814 return PRIV_VER_1_10_0_STR; 1815 case PRIV_VERSION_1_11_0: 1816 return PRIV_VER_1_11_0_STR; 1817 case PRIV_VERSION_1_12_0: 1818 return PRIV_VER_1_12_0_STR; 1819 case PRIV_VERSION_1_13_0: 1820 return PRIV_VER_1_13_0_STR; 1821 default: 1822 return NULL; 1823 } 1824 } 1825 1826 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1827 void *opaque, Error **errp) 1828 { 1829 RISCVCPU *cpu = RISCV_CPU(obj); 1830 g_autofree char *value = NULL; 1831 int priv_version = -1; 1832 1833 visit_type_str(v, name, &value, errp); 1834 1835 priv_version = priv_spec_from_str(value); 1836 if (priv_version < 0) { 1837 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1838 return; 1839 } 1840 1841 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1842 cpu_set_prop_err(cpu, name, errp); 1843 error_append_hint(errp, "Current '%s' val: %s\n", name, 1844 object_property_get_str(obj, name, NULL)); 1845 return; 1846 } 1847 1848 cpu_option_add_user_setting(name, priv_version); 1849 cpu->env.priv_ver = priv_version; 1850 } 1851 1852 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1853 void *opaque, Error **errp) 1854 { 1855 RISCVCPU *cpu = RISCV_CPU(obj); 1856 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1857 1858 visit_type_str(v, name, (char **)&value, errp); 1859 } 1860 1861 static const PropertyInfo prop_priv_spec = { 1862 .name = "priv_spec", 1863 .get = prop_priv_spec_get, 1864 .set = prop_priv_spec_set, 1865 }; 1866 1867 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1868 void *opaque, Error **errp) 1869 { 1870 RISCVCPU *cpu = RISCV_CPU(obj); 1871 g_autofree char *value = NULL; 1872 1873 visit_type_str(v, name, &value, errp); 1874 1875 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1876 error_setg(errp, "Unsupported vector spec version '%s'", value); 1877 return; 1878 } 1879 1880 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1881 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1882 } 1883 1884 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1885 void *opaque, Error **errp) 1886 { 1887 const char *value = VEXT_VER_1_00_0_STR; 1888 1889 visit_type_str(v, name, (char **)&value, errp); 1890 } 1891 1892 static const PropertyInfo prop_vext_spec = { 1893 .name = "vext_spec", 1894 .get = prop_vext_spec_get, 1895 .set = prop_vext_spec_set, 1896 }; 1897 1898 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1899 void *opaque, Error **errp) 1900 { 1901 RISCVCPU *cpu = RISCV_CPU(obj); 1902 uint16_t value; 1903 1904 if (!visit_type_uint16(v, name, &value, errp)) { 1905 return; 1906 } 1907 1908 if (!is_power_of_2(value)) { 1909 error_setg(errp, "Vector extension VLEN must be power of 2"); 1910 return; 1911 } 1912 1913 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1914 cpu_set_prop_err(cpu, name, errp); 1915 error_append_hint(errp, "Current '%s' val: %u\n", 1916 name, cpu->cfg.vlenb << 3); 1917 return; 1918 } 1919 1920 cpu_option_add_user_setting(name, value); 1921 cpu->cfg.vlenb = value >> 3; 1922 } 1923 1924 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1925 void *opaque, Error **errp) 1926 { 1927 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1928 1929 visit_type_uint16(v, name, &value, errp); 1930 } 1931 1932 static const PropertyInfo prop_vlen = { 1933 .name = "vlen", 1934 .get = prop_vlen_get, 1935 .set = prop_vlen_set, 1936 }; 1937 1938 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1939 void *opaque, Error **errp) 1940 { 1941 RISCVCPU *cpu = RISCV_CPU(obj); 1942 uint16_t value; 1943 1944 if (!visit_type_uint16(v, name, &value, errp)) { 1945 return; 1946 } 1947 1948 if (!is_power_of_2(value)) { 1949 error_setg(errp, "Vector extension ELEN must be power of 2"); 1950 return; 1951 } 1952 1953 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1954 cpu_set_prop_err(cpu, name, errp); 1955 error_append_hint(errp, "Current '%s' val: %u\n", 1956 name, cpu->cfg.elen); 1957 return; 1958 } 1959 1960 cpu_option_add_user_setting(name, value); 1961 cpu->cfg.elen = value; 1962 } 1963 1964 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1965 void *opaque, Error **errp) 1966 { 1967 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1968 1969 visit_type_uint16(v, name, &value, errp); 1970 } 1971 1972 static const PropertyInfo prop_elen = { 1973 .name = "elen", 1974 .get = prop_elen_get, 1975 .set = prop_elen_set, 1976 }; 1977 1978 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1979 void *opaque, Error **errp) 1980 { 1981 RISCVCPU *cpu = RISCV_CPU(obj); 1982 uint16_t value; 1983 1984 if (!visit_type_uint16(v, name, &value, errp)) { 1985 return; 1986 } 1987 1988 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1989 cpu_set_prop_err(cpu, name, errp); 1990 error_append_hint(errp, "Current '%s' val: %u\n", 1991 name, cpu->cfg.cbom_blocksize); 1992 return; 1993 } 1994 1995 cpu_option_add_user_setting(name, value); 1996 cpu->cfg.cbom_blocksize = value; 1997 } 1998 1999 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2000 void *opaque, Error **errp) 2001 { 2002 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2003 2004 visit_type_uint16(v, name, &value, errp); 2005 } 2006 2007 static const PropertyInfo prop_cbom_blksize = { 2008 .name = "cbom_blocksize", 2009 .get = prop_cbom_blksize_get, 2010 .set = prop_cbom_blksize_set, 2011 }; 2012 2013 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2014 void *opaque, Error **errp) 2015 { 2016 RISCVCPU *cpu = RISCV_CPU(obj); 2017 uint16_t value; 2018 2019 if (!visit_type_uint16(v, name, &value, errp)) { 2020 return; 2021 } 2022 2023 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2024 cpu_set_prop_err(cpu, name, errp); 2025 error_append_hint(errp, "Current '%s' val: %u\n", 2026 name, cpu->cfg.cbop_blocksize); 2027 return; 2028 } 2029 2030 cpu_option_add_user_setting(name, value); 2031 cpu->cfg.cbop_blocksize = value; 2032 } 2033 2034 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2035 void *opaque, Error **errp) 2036 { 2037 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2038 2039 visit_type_uint16(v, name, &value, errp); 2040 } 2041 2042 static const PropertyInfo prop_cbop_blksize = { 2043 .name = "cbop_blocksize", 2044 .get = prop_cbop_blksize_get, 2045 .set = prop_cbop_blksize_set, 2046 }; 2047 2048 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2049 void *opaque, Error **errp) 2050 { 2051 RISCVCPU *cpu = RISCV_CPU(obj); 2052 uint16_t value; 2053 2054 if (!visit_type_uint16(v, name, &value, errp)) { 2055 return; 2056 } 2057 2058 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2059 cpu_set_prop_err(cpu, name, errp); 2060 error_append_hint(errp, "Current '%s' val: %u\n", 2061 name, cpu->cfg.cboz_blocksize); 2062 return; 2063 } 2064 2065 cpu_option_add_user_setting(name, value); 2066 cpu->cfg.cboz_blocksize = value; 2067 } 2068 2069 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2070 void *opaque, Error **errp) 2071 { 2072 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2073 2074 visit_type_uint16(v, name, &value, errp); 2075 } 2076 2077 static const PropertyInfo prop_cboz_blksize = { 2078 .name = "cboz_blocksize", 2079 .get = prop_cboz_blksize_get, 2080 .set = prop_cboz_blksize_set, 2081 }; 2082 2083 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2084 void *opaque, Error **errp) 2085 { 2086 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2087 RISCVCPU *cpu = RISCV_CPU(obj); 2088 uint32_t prev_val = cpu->cfg.mvendorid; 2089 uint32_t value; 2090 2091 if (!visit_type_uint32(v, name, &value, errp)) { 2092 return; 2093 } 2094 2095 if (!dynamic_cpu && prev_val != value) { 2096 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2097 object_get_typename(obj), prev_val); 2098 return; 2099 } 2100 2101 cpu->cfg.mvendorid = value; 2102 } 2103 2104 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2105 void *opaque, Error **errp) 2106 { 2107 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2108 2109 visit_type_uint32(v, name, &value, errp); 2110 } 2111 2112 static const PropertyInfo prop_mvendorid = { 2113 .name = "mvendorid", 2114 .get = prop_mvendorid_get, 2115 .set = prop_mvendorid_set, 2116 }; 2117 2118 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2119 void *opaque, Error **errp) 2120 { 2121 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2122 RISCVCPU *cpu = RISCV_CPU(obj); 2123 uint64_t prev_val = cpu->cfg.mimpid; 2124 uint64_t value; 2125 2126 if (!visit_type_uint64(v, name, &value, errp)) { 2127 return; 2128 } 2129 2130 if (!dynamic_cpu && prev_val != value) { 2131 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2132 object_get_typename(obj), prev_val); 2133 return; 2134 } 2135 2136 cpu->cfg.mimpid = value; 2137 } 2138 2139 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2140 void *opaque, Error **errp) 2141 { 2142 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2143 2144 visit_type_uint64(v, name, &value, errp); 2145 } 2146 2147 static const PropertyInfo prop_mimpid = { 2148 .name = "mimpid", 2149 .get = prop_mimpid_get, 2150 .set = prop_mimpid_set, 2151 }; 2152 2153 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2154 void *opaque, Error **errp) 2155 { 2156 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2157 RISCVCPU *cpu = RISCV_CPU(obj); 2158 uint64_t prev_val = cpu->cfg.marchid; 2159 uint64_t value, invalid_val; 2160 uint32_t mxlen = 0; 2161 2162 if (!visit_type_uint64(v, name, &value, errp)) { 2163 return; 2164 } 2165 2166 if (!dynamic_cpu && prev_val != value) { 2167 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2168 object_get_typename(obj), prev_val); 2169 return; 2170 } 2171 2172 switch (riscv_cpu_mxl(&cpu->env)) { 2173 case MXL_RV32: 2174 mxlen = 32; 2175 break; 2176 case MXL_RV64: 2177 case MXL_RV128: 2178 mxlen = 64; 2179 break; 2180 default: 2181 g_assert_not_reached(); 2182 } 2183 2184 invalid_val = 1LL << (mxlen - 1); 2185 2186 if (value == invalid_val) { 2187 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2188 "and the remaining bits zero", mxlen); 2189 return; 2190 } 2191 2192 cpu->cfg.marchid = value; 2193 } 2194 2195 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2196 void *opaque, Error **errp) 2197 { 2198 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2199 2200 visit_type_uint64(v, name, &value, errp); 2201 } 2202 2203 static const PropertyInfo prop_marchid = { 2204 .name = "marchid", 2205 .get = prop_marchid_get, 2206 .set = prop_marchid_set, 2207 }; 2208 2209 /* 2210 * RVA22U64 defines some 'named features' that are cache 2211 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2212 * and Zicclsm. They are always implemented in TCG and 2213 * doesn't need to be manually enabled by the profile. 2214 */ 2215 static RISCVCPUProfile RVA22U64 = { 2216 .parent = NULL, 2217 .name = "rva22u64", 2218 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2219 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2220 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2221 .ext_offsets = { 2222 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2223 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2224 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2225 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2226 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2227 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2228 2229 /* mandatory named features for this profile */ 2230 CPU_CFG_OFFSET(ext_zic64b), 2231 2232 RISCV_PROFILE_EXT_LIST_END 2233 } 2234 }; 2235 2236 /* 2237 * As with RVA22U64, RVA22S64 also defines 'named features'. 2238 * 2239 * Cache related features that we consider enabled since we don't 2240 * implement cache: Ssccptr 2241 * 2242 * Other named features that we already implement: Sstvecd, Sstvala, 2243 * Sscounterenw 2244 * 2245 * The remaining features/extensions comes from RVA22U64. 2246 */ 2247 static RISCVCPUProfile RVA22S64 = { 2248 .parent = &RVA22U64, 2249 .name = "rva22s64", 2250 .misa_ext = RVS, 2251 .priv_spec = PRIV_VERSION_1_12_0, 2252 .satp_mode = VM_1_10_SV39, 2253 .ext_offsets = { 2254 /* rva22s64 exts */ 2255 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2256 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2257 2258 RISCV_PROFILE_EXT_LIST_END 2259 } 2260 }; 2261 2262 RISCVCPUProfile *riscv_profiles[] = { 2263 &RVA22U64, 2264 &RVA22S64, 2265 NULL, 2266 }; 2267 2268 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2269 .is_misa = true, 2270 .ext = RVA, 2271 .implied_multi_exts = { 2272 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2273 2274 RISCV_IMPLIED_EXTS_RULE_END 2275 }, 2276 }; 2277 2278 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2279 .is_misa = true, 2280 .ext = RVD, 2281 .implied_misa_exts = RVF, 2282 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2283 }; 2284 2285 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2286 .is_misa = true, 2287 .ext = RVF, 2288 .implied_multi_exts = { 2289 CPU_CFG_OFFSET(ext_zicsr), 2290 2291 RISCV_IMPLIED_EXTS_RULE_END 2292 }, 2293 }; 2294 2295 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2296 .is_misa = true, 2297 .ext = RVM, 2298 .implied_multi_exts = { 2299 CPU_CFG_OFFSET(ext_zmmul), 2300 2301 RISCV_IMPLIED_EXTS_RULE_END 2302 }, 2303 }; 2304 2305 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2306 .is_misa = true, 2307 .ext = RVV, 2308 .implied_multi_exts = { 2309 CPU_CFG_OFFSET(ext_zve64d), 2310 2311 RISCV_IMPLIED_EXTS_RULE_END 2312 }, 2313 }; 2314 2315 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2316 .ext = CPU_CFG_OFFSET(ext_zcb), 2317 .implied_multi_exts = { 2318 CPU_CFG_OFFSET(ext_zca), 2319 2320 RISCV_IMPLIED_EXTS_RULE_END 2321 }, 2322 }; 2323 2324 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2325 .ext = CPU_CFG_OFFSET(ext_zcd), 2326 .implied_misa_exts = RVD, 2327 .implied_multi_exts = { 2328 CPU_CFG_OFFSET(ext_zca), 2329 2330 RISCV_IMPLIED_EXTS_RULE_END 2331 }, 2332 }; 2333 2334 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2335 .ext = CPU_CFG_OFFSET(ext_zce), 2336 .implied_multi_exts = { 2337 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2338 CPU_CFG_OFFSET(ext_zcmt), 2339 2340 RISCV_IMPLIED_EXTS_RULE_END 2341 }, 2342 }; 2343 2344 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2345 .ext = CPU_CFG_OFFSET(ext_zcf), 2346 .implied_misa_exts = RVF, 2347 .implied_multi_exts = { 2348 CPU_CFG_OFFSET(ext_zca), 2349 2350 RISCV_IMPLIED_EXTS_RULE_END 2351 }, 2352 }; 2353 2354 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2355 .ext = CPU_CFG_OFFSET(ext_zcmp), 2356 .implied_multi_exts = { 2357 CPU_CFG_OFFSET(ext_zca), 2358 2359 RISCV_IMPLIED_EXTS_RULE_END 2360 }, 2361 }; 2362 2363 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2364 .ext = CPU_CFG_OFFSET(ext_zcmt), 2365 .implied_multi_exts = { 2366 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2367 2368 RISCV_IMPLIED_EXTS_RULE_END 2369 }, 2370 }; 2371 2372 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2373 .ext = CPU_CFG_OFFSET(ext_zdinx), 2374 .implied_multi_exts = { 2375 CPU_CFG_OFFSET(ext_zfinx), 2376 2377 RISCV_IMPLIED_EXTS_RULE_END 2378 }, 2379 }; 2380 2381 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2382 .ext = CPU_CFG_OFFSET(ext_zfa), 2383 .implied_misa_exts = RVF, 2384 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2385 }; 2386 2387 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2388 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2389 .implied_misa_exts = RVF, 2390 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2391 }; 2392 2393 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2394 .ext = CPU_CFG_OFFSET(ext_zfh), 2395 .implied_multi_exts = { 2396 CPU_CFG_OFFSET(ext_zfhmin), 2397 2398 RISCV_IMPLIED_EXTS_RULE_END 2399 }, 2400 }; 2401 2402 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2403 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2404 .implied_misa_exts = RVF, 2405 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2406 }; 2407 2408 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2409 .ext = CPU_CFG_OFFSET(ext_zfinx), 2410 .implied_multi_exts = { 2411 CPU_CFG_OFFSET(ext_zicsr), 2412 2413 RISCV_IMPLIED_EXTS_RULE_END 2414 }, 2415 }; 2416 2417 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2418 .ext = CPU_CFG_OFFSET(ext_zhinx), 2419 .implied_multi_exts = { 2420 CPU_CFG_OFFSET(ext_zhinxmin), 2421 2422 RISCV_IMPLIED_EXTS_RULE_END 2423 }, 2424 }; 2425 2426 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2427 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2428 .implied_multi_exts = { 2429 CPU_CFG_OFFSET(ext_zfinx), 2430 2431 RISCV_IMPLIED_EXTS_RULE_END 2432 }, 2433 }; 2434 2435 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2436 .ext = CPU_CFG_OFFSET(ext_zicntr), 2437 .implied_multi_exts = { 2438 CPU_CFG_OFFSET(ext_zicsr), 2439 2440 RISCV_IMPLIED_EXTS_RULE_END 2441 }, 2442 }; 2443 2444 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2445 .ext = CPU_CFG_OFFSET(ext_zihpm), 2446 .implied_multi_exts = { 2447 CPU_CFG_OFFSET(ext_zicsr), 2448 2449 RISCV_IMPLIED_EXTS_RULE_END 2450 }, 2451 }; 2452 2453 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2454 .ext = CPU_CFG_OFFSET(ext_zk), 2455 .implied_multi_exts = { 2456 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2457 CPU_CFG_OFFSET(ext_zkt), 2458 2459 RISCV_IMPLIED_EXTS_RULE_END 2460 }, 2461 }; 2462 2463 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2464 .ext = CPU_CFG_OFFSET(ext_zkn), 2465 .implied_multi_exts = { 2466 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2467 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2468 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2469 2470 RISCV_IMPLIED_EXTS_RULE_END 2471 }, 2472 }; 2473 2474 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2475 .ext = CPU_CFG_OFFSET(ext_zks), 2476 .implied_multi_exts = { 2477 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2478 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2479 CPU_CFG_OFFSET(ext_zksh), 2480 2481 RISCV_IMPLIED_EXTS_RULE_END 2482 }, 2483 }; 2484 2485 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2486 .ext = CPU_CFG_OFFSET(ext_zvbb), 2487 .implied_multi_exts = { 2488 CPU_CFG_OFFSET(ext_zvkb), 2489 2490 RISCV_IMPLIED_EXTS_RULE_END 2491 }, 2492 }; 2493 2494 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2495 .ext = CPU_CFG_OFFSET(ext_zve32f), 2496 .implied_misa_exts = RVF, 2497 .implied_multi_exts = { 2498 CPU_CFG_OFFSET(ext_zve32x), 2499 2500 RISCV_IMPLIED_EXTS_RULE_END 2501 }, 2502 }; 2503 2504 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2505 .ext = CPU_CFG_OFFSET(ext_zve32x), 2506 .implied_multi_exts = { 2507 CPU_CFG_OFFSET(ext_zicsr), 2508 2509 RISCV_IMPLIED_EXTS_RULE_END 2510 }, 2511 }; 2512 2513 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2514 .ext = CPU_CFG_OFFSET(ext_zve64d), 2515 .implied_misa_exts = RVD, 2516 .implied_multi_exts = { 2517 CPU_CFG_OFFSET(ext_zve64f), 2518 2519 RISCV_IMPLIED_EXTS_RULE_END 2520 }, 2521 }; 2522 2523 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2524 .ext = CPU_CFG_OFFSET(ext_zve64f), 2525 .implied_misa_exts = RVF, 2526 .implied_multi_exts = { 2527 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2528 2529 RISCV_IMPLIED_EXTS_RULE_END 2530 }, 2531 }; 2532 2533 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2534 .ext = CPU_CFG_OFFSET(ext_zve64x), 2535 .implied_multi_exts = { 2536 CPU_CFG_OFFSET(ext_zve32x), 2537 2538 RISCV_IMPLIED_EXTS_RULE_END 2539 }, 2540 }; 2541 2542 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2543 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2544 .implied_multi_exts = { 2545 CPU_CFG_OFFSET(ext_zve32f), 2546 2547 RISCV_IMPLIED_EXTS_RULE_END 2548 }, 2549 }; 2550 2551 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2552 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2553 .implied_multi_exts = { 2554 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2555 2556 RISCV_IMPLIED_EXTS_RULE_END 2557 }, 2558 }; 2559 2560 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2561 .ext = CPU_CFG_OFFSET(ext_zvfh), 2562 .implied_multi_exts = { 2563 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2564 2565 RISCV_IMPLIED_EXTS_RULE_END 2566 }, 2567 }; 2568 2569 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2570 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2571 .implied_multi_exts = { 2572 CPU_CFG_OFFSET(ext_zve32f), 2573 2574 RISCV_IMPLIED_EXTS_RULE_END 2575 }, 2576 }; 2577 2578 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2579 .ext = CPU_CFG_OFFSET(ext_zvkn), 2580 .implied_multi_exts = { 2581 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2582 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2583 2584 RISCV_IMPLIED_EXTS_RULE_END 2585 }, 2586 }; 2587 2588 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2589 .ext = CPU_CFG_OFFSET(ext_zvknc), 2590 .implied_multi_exts = { 2591 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2592 2593 RISCV_IMPLIED_EXTS_RULE_END 2594 }, 2595 }; 2596 2597 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2598 .ext = CPU_CFG_OFFSET(ext_zvkng), 2599 .implied_multi_exts = { 2600 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2601 2602 RISCV_IMPLIED_EXTS_RULE_END 2603 }, 2604 }; 2605 2606 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2607 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2608 .implied_multi_exts = { 2609 CPU_CFG_OFFSET(ext_zve64x), 2610 2611 RISCV_IMPLIED_EXTS_RULE_END 2612 }, 2613 }; 2614 2615 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2616 .ext = CPU_CFG_OFFSET(ext_zvks), 2617 .implied_multi_exts = { 2618 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2619 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2620 2621 RISCV_IMPLIED_EXTS_RULE_END 2622 }, 2623 }; 2624 2625 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2626 .ext = CPU_CFG_OFFSET(ext_zvksc), 2627 .implied_multi_exts = { 2628 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2629 2630 RISCV_IMPLIED_EXTS_RULE_END 2631 }, 2632 }; 2633 2634 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2635 .ext = CPU_CFG_OFFSET(ext_zvksg), 2636 .implied_multi_exts = { 2637 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2638 2639 RISCV_IMPLIED_EXTS_RULE_END 2640 }, 2641 }; 2642 2643 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2644 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2645 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2646 }; 2647 2648 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2649 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2650 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2651 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2652 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2653 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2654 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2655 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2656 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2657 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2658 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2659 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2660 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, 2661 NULL 2662 }; 2663 2664 static const Property riscv_cpu_properties[] = { 2665 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2666 2667 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2668 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2669 2670 {.name = "mmu", .info = &prop_mmu}, 2671 {.name = "pmp", .info = &prop_pmp}, 2672 2673 {.name = "priv_spec", .info = &prop_priv_spec}, 2674 {.name = "vext_spec", .info = &prop_vext_spec}, 2675 2676 {.name = "vlen", .info = &prop_vlen}, 2677 {.name = "elen", .info = &prop_elen}, 2678 2679 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2680 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2681 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2682 2683 {.name = "mvendorid", .info = &prop_mvendorid}, 2684 {.name = "mimpid", .info = &prop_mimpid}, 2685 {.name = "marchid", .info = &prop_marchid}, 2686 2687 #ifndef CONFIG_USER_ONLY 2688 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2689 #endif 2690 2691 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2692 2693 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2694 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2695 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2696 2697 /* 2698 * write_misa() is marked as experimental for now so mark 2699 * it with -x and default to 'false'. 2700 */ 2701 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2702 }; 2703 2704 #if defined(TARGET_RISCV64) 2705 static void rva22u64_profile_cpu_init(Object *obj) 2706 { 2707 rv64i_bare_cpu_init(obj); 2708 2709 RVA22U64.enabled = true; 2710 } 2711 2712 static void rva22s64_profile_cpu_init(Object *obj) 2713 { 2714 rv64i_bare_cpu_init(obj); 2715 2716 RVA22S64.enabled = true; 2717 } 2718 #endif 2719 2720 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2721 { 2722 RISCVCPU *cpu = RISCV_CPU(cs); 2723 CPURISCVState *env = &cpu->env; 2724 2725 switch (riscv_cpu_mxl(env)) { 2726 case MXL_RV32: 2727 return "riscv:rv32"; 2728 case MXL_RV64: 2729 case MXL_RV128: 2730 return "riscv:rv64"; 2731 default: 2732 g_assert_not_reached(); 2733 } 2734 } 2735 2736 #ifndef CONFIG_USER_ONLY 2737 static int64_t riscv_get_arch_id(CPUState *cs) 2738 { 2739 RISCVCPU *cpu = RISCV_CPU(cs); 2740 2741 return cpu->env.mhartid; 2742 } 2743 2744 #include "hw/core/sysemu-cpu-ops.h" 2745 2746 static const struct SysemuCPUOps riscv_sysemu_ops = { 2747 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2748 .write_elf64_note = riscv_cpu_write_elf64_note, 2749 .write_elf32_note = riscv_cpu_write_elf32_note, 2750 .legacy_vmsd = &vmstate_riscv_cpu, 2751 }; 2752 #endif 2753 2754 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2755 { 2756 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2757 CPUClass *cc = CPU_CLASS(c); 2758 DeviceClass *dc = DEVICE_CLASS(c); 2759 ResettableClass *rc = RESETTABLE_CLASS(c); 2760 2761 device_class_set_parent_realize(dc, riscv_cpu_realize, 2762 &mcc->parent_realize); 2763 2764 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2765 &mcc->parent_phases); 2766 2767 cc->class_by_name = riscv_cpu_class_by_name; 2768 cc->has_work = riscv_cpu_has_work; 2769 cc->mmu_index = riscv_cpu_mmu_index; 2770 cc->dump_state = riscv_cpu_dump_state; 2771 cc->set_pc = riscv_cpu_set_pc; 2772 cc->get_pc = riscv_cpu_get_pc; 2773 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2774 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2775 cc->gdb_stop_before_watchpoint = true; 2776 cc->disas_set_info = riscv_cpu_disas_set_info; 2777 #ifndef CONFIG_USER_ONLY 2778 cc->sysemu_ops = &riscv_sysemu_ops; 2779 cc->get_arch_id = riscv_get_arch_id; 2780 #endif 2781 cc->gdb_arch_name = riscv_gdb_arch_name; 2782 2783 device_class_set_props(dc, riscv_cpu_properties); 2784 } 2785 2786 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2787 { 2788 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2789 2790 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2791 riscv_cpu_validate_misa_mxl(mcc); 2792 } 2793 2794 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2795 int max_str_len) 2796 { 2797 const RISCVIsaExtData *edata; 2798 char *old = *isa_str; 2799 char *new = *isa_str; 2800 2801 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2802 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2803 new = g_strconcat(old, "_", edata->name, NULL); 2804 g_free(old); 2805 old = new; 2806 } 2807 } 2808 2809 *isa_str = new; 2810 } 2811 2812 char *riscv_isa_string(RISCVCPU *cpu) 2813 { 2814 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2815 int i; 2816 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2817 char *isa_str = g_new(char, maxlen); 2818 int xlen = riscv_cpu_max_xlen(mcc); 2819 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2820 2821 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2822 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2823 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2824 } 2825 } 2826 *p = '\0'; 2827 if (!cpu->cfg.short_isa_string) { 2828 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2829 } 2830 return isa_str; 2831 } 2832 2833 #ifndef CONFIG_USER_ONLY 2834 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2835 { 2836 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2837 char **extensions = g_new(char *, maxlen); 2838 2839 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2840 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2841 extensions[*count] = g_new(char, 2); 2842 snprintf(extensions[*count], 2, "%c", 2843 qemu_tolower(riscv_single_letter_exts[i])); 2844 (*count)++; 2845 } 2846 } 2847 2848 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2849 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2850 extensions[*count] = g_strdup(edata->name); 2851 (*count)++; 2852 } 2853 } 2854 2855 return extensions; 2856 } 2857 2858 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2859 { 2860 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2861 const size_t maxlen = sizeof("rv128i"); 2862 g_autofree char *isa_base = g_new(char, maxlen); 2863 g_autofree char *riscv_isa; 2864 char **isa_extensions; 2865 int count = 0; 2866 int xlen = riscv_cpu_max_xlen(mcc); 2867 2868 riscv_isa = riscv_isa_string(cpu); 2869 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2870 2871 snprintf(isa_base, maxlen, "rv%di", xlen); 2872 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2873 2874 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2875 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2876 isa_extensions, count); 2877 2878 for (int i = 0; i < count; i++) { 2879 g_free(isa_extensions[i]); 2880 } 2881 2882 g_free(isa_extensions); 2883 } 2884 #endif 2885 2886 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2887 { \ 2888 .name = (type_name), \ 2889 .parent = TYPE_RISCV_CPU, \ 2890 .instance_init = (initfn), \ 2891 .class_init = riscv_cpu_class_init, \ 2892 .class_data = (void *)(misa_mxl_max) \ 2893 } 2894 2895 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2896 { \ 2897 .name = (type_name), \ 2898 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2899 .instance_init = (initfn), \ 2900 .class_init = riscv_cpu_class_init, \ 2901 .class_data = (void *)(misa_mxl_max) \ 2902 } 2903 2904 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2905 { \ 2906 .name = (type_name), \ 2907 .parent = TYPE_RISCV_VENDOR_CPU, \ 2908 .instance_init = (initfn), \ 2909 .class_init = riscv_cpu_class_init, \ 2910 .class_data = (void *)(misa_mxl_max) \ 2911 } 2912 2913 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2914 { \ 2915 .name = (type_name), \ 2916 .parent = TYPE_RISCV_BARE_CPU, \ 2917 .instance_init = (initfn), \ 2918 .class_init = riscv_cpu_class_init, \ 2919 .class_data = (void *)(misa_mxl_max) \ 2920 } 2921 2922 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2923 { \ 2924 .name = (type_name), \ 2925 .parent = TYPE_RISCV_BARE_CPU, \ 2926 .instance_init = (initfn), \ 2927 .class_init = riscv_cpu_class_init, \ 2928 .class_data = (void *)(misa_mxl_max) \ 2929 } 2930 2931 static const TypeInfo riscv_cpu_type_infos[] = { 2932 { 2933 .name = TYPE_RISCV_CPU, 2934 .parent = TYPE_CPU, 2935 .instance_size = sizeof(RISCVCPU), 2936 .instance_align = __alignof(RISCVCPU), 2937 .instance_init = riscv_cpu_init, 2938 .instance_post_init = riscv_cpu_post_init, 2939 .abstract = true, 2940 .class_size = sizeof(RISCVCPUClass), 2941 .class_init = riscv_cpu_common_class_init, 2942 }, 2943 { 2944 .name = TYPE_RISCV_DYNAMIC_CPU, 2945 .parent = TYPE_RISCV_CPU, 2946 .abstract = true, 2947 }, 2948 { 2949 .name = TYPE_RISCV_VENDOR_CPU, 2950 .parent = TYPE_RISCV_CPU, 2951 .abstract = true, 2952 }, 2953 { 2954 .name = TYPE_RISCV_BARE_CPU, 2955 .parent = TYPE_RISCV_CPU, 2956 .instance_init = riscv_bare_cpu_init, 2957 .abstract = true, 2958 }, 2959 #if defined(TARGET_RISCV32) 2960 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2961 #elif defined(TARGET_RISCV64) 2962 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2963 #endif 2964 2965 #if defined(TARGET_RISCV32) || \ 2966 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 2967 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2968 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2969 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2970 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2971 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2972 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 2973 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 2974 #endif 2975 2976 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 2977 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 2978 #endif 2979 2980 #if defined(TARGET_RISCV64) 2981 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2982 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2983 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2984 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2985 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2986 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2987 #ifdef CONFIG_TCG 2988 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2989 #endif /* CONFIG_TCG */ 2990 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2991 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 2992 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2993 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2994 #endif /* TARGET_RISCV64 */ 2995 }; 2996 2997 DEFINE_TYPES(riscv_cpu_type_infos) 2998