1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/device_tree.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 187 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 188 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 189 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 190 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 191 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 192 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 193 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 194 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 195 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 196 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 197 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 198 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 199 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 200 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 201 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 202 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 203 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 204 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 205 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 206 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 207 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 208 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 209 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 210 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 211 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 212 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 213 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 214 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 215 216 DEFINE_PROP_END_OF_LIST(), 217 }; 218 219 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 220 { 221 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 222 223 return *ext_enabled; 224 } 225 226 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 227 { 228 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 229 230 *ext_enabled = en; 231 } 232 233 bool riscv_cpu_is_vendor(Object *cpu_obj) 234 { 235 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 236 } 237 238 const char * const riscv_int_regnames[] = { 239 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 240 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 241 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 242 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 243 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 244 }; 245 246 const char * const riscv_int_regnamesh[] = { 247 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 248 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 249 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 250 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 251 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 252 "x30h/t5h", "x31h/t6h" 253 }; 254 255 const char * const riscv_fpr_regnames[] = { 256 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 257 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 258 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 259 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 260 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 261 "f30/ft10", "f31/ft11" 262 }; 263 264 const char * const riscv_rvv_regnames[] = { 265 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 266 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 267 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 268 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 269 "v28", "v29", "v30", "v31" 270 }; 271 272 static const char * const riscv_excp_names[] = { 273 "misaligned_fetch", 274 "fault_fetch", 275 "illegal_instruction", 276 "breakpoint", 277 "misaligned_load", 278 "fault_load", 279 "misaligned_store", 280 "fault_store", 281 "user_ecall", 282 "supervisor_ecall", 283 "hypervisor_ecall", 284 "machine_ecall", 285 "exec_page_fault", 286 "load_page_fault", 287 "reserved", 288 "store_page_fault", 289 "reserved", 290 "reserved", 291 "reserved", 292 "reserved", 293 "guest_exec_page_fault", 294 "guest_load_page_fault", 295 "reserved", 296 "guest_store_page_fault", 297 }; 298 299 static const char * const riscv_intr_names[] = { 300 "u_software", 301 "s_software", 302 "vs_software", 303 "m_software", 304 "u_timer", 305 "s_timer", 306 "vs_timer", 307 "m_timer", 308 "u_external", 309 "s_external", 310 "vs_external", 311 "m_external", 312 "reserved", 313 "reserved", 314 "reserved", 315 "reserved" 316 }; 317 318 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 319 { 320 if (async) { 321 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 322 riscv_intr_names[cause] : "(unknown)"; 323 } else { 324 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 325 riscv_excp_names[cause] : "(unknown)"; 326 } 327 } 328 329 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 330 { 331 env->misa_ext_mask = env->misa_ext = ext; 332 } 333 334 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 335 { 336 return 16 << mcc->misa_mxl_max; 337 } 338 339 #ifndef CONFIG_USER_ONLY 340 static uint8_t satp_mode_from_str(const char *satp_mode_str) 341 { 342 if (!strncmp(satp_mode_str, "mbare", 5)) { 343 return VM_1_10_MBARE; 344 } 345 346 if (!strncmp(satp_mode_str, "sv32", 4)) { 347 return VM_1_10_SV32; 348 } 349 350 if (!strncmp(satp_mode_str, "sv39", 4)) { 351 return VM_1_10_SV39; 352 } 353 354 if (!strncmp(satp_mode_str, "sv48", 4)) { 355 return VM_1_10_SV48; 356 } 357 358 if (!strncmp(satp_mode_str, "sv57", 4)) { 359 return VM_1_10_SV57; 360 } 361 362 if (!strncmp(satp_mode_str, "sv64", 4)) { 363 return VM_1_10_SV64; 364 } 365 366 g_assert_not_reached(); 367 } 368 369 uint8_t satp_mode_max_from_map(uint32_t map) 370 { 371 /* 372 * 'map = 0' will make us return (31 - 32), which C will 373 * happily overflow to UINT_MAX. There's no good result to 374 * return if 'map = 0' (e.g. returning 0 will be ambiguous 375 * with the result for 'map = 1'). 376 * 377 * Assert out if map = 0. Callers will have to deal with 378 * it outside of this function. 379 */ 380 g_assert(map > 0); 381 382 /* map here has at least one bit set, so no problem with clz */ 383 return 31 - __builtin_clz(map); 384 } 385 386 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 387 { 388 if (is_32_bit) { 389 switch (satp_mode) { 390 case VM_1_10_SV32: 391 return "sv32"; 392 case VM_1_10_MBARE: 393 return "none"; 394 } 395 } else { 396 switch (satp_mode) { 397 case VM_1_10_SV64: 398 return "sv64"; 399 case VM_1_10_SV57: 400 return "sv57"; 401 case VM_1_10_SV48: 402 return "sv48"; 403 case VM_1_10_SV39: 404 return "sv39"; 405 case VM_1_10_MBARE: 406 return "none"; 407 } 408 } 409 410 g_assert_not_reached(); 411 } 412 413 static void set_satp_mode_max_supported(RISCVCPU *cpu, 414 uint8_t satp_mode) 415 { 416 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 417 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 418 419 for (int i = 0; i <= satp_mode; ++i) { 420 if (valid_vm[i]) { 421 cpu->cfg.satp_mode.supported |= (1 << i); 422 } 423 } 424 } 425 426 /* Set the satp mode to the max supported */ 427 static void set_satp_mode_default_map(RISCVCPU *cpu) 428 { 429 /* 430 * Bare CPUs do not default to the max available. 431 * Users must set a valid satp_mode in the command 432 * line. 433 */ 434 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 435 warn_report("No satp mode set. Defaulting to 'bare'"); 436 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 437 return; 438 } 439 440 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 441 } 442 #endif 443 444 static void riscv_max_cpu_init(Object *obj) 445 { 446 RISCVCPU *cpu = RISCV_CPU(obj); 447 CPURISCVState *env = &cpu->env; 448 449 cpu->cfg.mmu = true; 450 cpu->cfg.pmp = true; 451 452 env->priv_ver = PRIV_VERSION_LATEST; 453 #ifndef CONFIG_USER_ONLY 454 set_satp_mode_max_supported(RISCV_CPU(obj), 455 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 456 VM_1_10_SV32 : VM_1_10_SV57); 457 #endif 458 } 459 460 #if defined(TARGET_RISCV64) 461 static void rv64_base_cpu_init(Object *obj) 462 { 463 RISCVCPU *cpu = RISCV_CPU(obj); 464 CPURISCVState *env = &cpu->env; 465 466 cpu->cfg.mmu = true; 467 cpu->cfg.pmp = true; 468 469 /* Set latest version of privileged specification */ 470 env->priv_ver = PRIV_VERSION_LATEST; 471 #ifndef CONFIG_USER_ONLY 472 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 473 #endif 474 } 475 476 static void rv64_sifive_u_cpu_init(Object *obj) 477 { 478 RISCVCPU *cpu = RISCV_CPU(obj); 479 CPURISCVState *env = &cpu->env; 480 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 481 env->priv_ver = PRIV_VERSION_1_10_0; 482 #ifndef CONFIG_USER_ONLY 483 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 484 #endif 485 486 /* inherited from parent obj via riscv_cpu_init() */ 487 cpu->cfg.ext_zifencei = true; 488 cpu->cfg.ext_zicsr = true; 489 cpu->cfg.mmu = true; 490 cpu->cfg.pmp = true; 491 } 492 493 static void rv64_sifive_e_cpu_init(Object *obj) 494 { 495 CPURISCVState *env = &RISCV_CPU(obj)->env; 496 RISCVCPU *cpu = RISCV_CPU(obj); 497 498 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 499 env->priv_ver = PRIV_VERSION_1_10_0; 500 #ifndef CONFIG_USER_ONLY 501 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 502 #endif 503 504 /* inherited from parent obj via riscv_cpu_init() */ 505 cpu->cfg.ext_zifencei = true; 506 cpu->cfg.ext_zicsr = true; 507 cpu->cfg.pmp = true; 508 } 509 510 static void rv64_thead_c906_cpu_init(Object *obj) 511 { 512 CPURISCVState *env = &RISCV_CPU(obj)->env; 513 RISCVCPU *cpu = RISCV_CPU(obj); 514 515 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 516 env->priv_ver = PRIV_VERSION_1_11_0; 517 518 cpu->cfg.ext_zfa = true; 519 cpu->cfg.ext_zfh = true; 520 cpu->cfg.mmu = true; 521 cpu->cfg.ext_xtheadba = true; 522 cpu->cfg.ext_xtheadbb = true; 523 cpu->cfg.ext_xtheadbs = true; 524 cpu->cfg.ext_xtheadcmo = true; 525 cpu->cfg.ext_xtheadcondmov = true; 526 cpu->cfg.ext_xtheadfmemidx = true; 527 cpu->cfg.ext_xtheadmac = true; 528 cpu->cfg.ext_xtheadmemidx = true; 529 cpu->cfg.ext_xtheadmempair = true; 530 cpu->cfg.ext_xtheadsync = true; 531 532 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 533 #ifndef CONFIG_USER_ONLY 534 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 535 th_register_custom_csrs(cpu); 536 #endif 537 538 /* inherited from parent obj via riscv_cpu_init() */ 539 cpu->cfg.pmp = true; 540 } 541 542 static void rv64_veyron_v1_cpu_init(Object *obj) 543 { 544 CPURISCVState *env = &RISCV_CPU(obj)->env; 545 RISCVCPU *cpu = RISCV_CPU(obj); 546 547 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 548 env->priv_ver = PRIV_VERSION_1_12_0; 549 550 /* Enable ISA extensions */ 551 cpu->cfg.mmu = true; 552 cpu->cfg.ext_zifencei = true; 553 cpu->cfg.ext_zicsr = true; 554 cpu->cfg.pmp = true; 555 cpu->cfg.ext_zicbom = true; 556 cpu->cfg.cbom_blocksize = 64; 557 cpu->cfg.cboz_blocksize = 64; 558 cpu->cfg.ext_zicboz = true; 559 cpu->cfg.ext_smaia = true; 560 cpu->cfg.ext_ssaia = true; 561 cpu->cfg.ext_sscofpmf = true; 562 cpu->cfg.ext_sstc = true; 563 cpu->cfg.ext_svinval = true; 564 cpu->cfg.ext_svnapot = true; 565 cpu->cfg.ext_svpbmt = true; 566 cpu->cfg.ext_smstateen = true; 567 cpu->cfg.ext_zba = true; 568 cpu->cfg.ext_zbb = true; 569 cpu->cfg.ext_zbc = true; 570 cpu->cfg.ext_zbs = true; 571 cpu->cfg.ext_XVentanaCondOps = true; 572 573 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 574 cpu->cfg.marchid = VEYRON_V1_MARCHID; 575 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 576 577 #ifndef CONFIG_USER_ONLY 578 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 579 #endif 580 } 581 582 /* Tenstorrent Ascalon */ 583 static void rv64_tt_ascalon_cpu_init(Object *obj) 584 { 585 CPURISCVState *env = &RISCV_CPU(obj)->env; 586 RISCVCPU *cpu = RISCV_CPU(obj); 587 588 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 589 env->priv_ver = PRIV_VERSION_1_13_0; 590 591 /* Enable ISA extensions */ 592 cpu->cfg.mmu = true; 593 cpu->cfg.vlenb = 256 >> 3; 594 cpu->cfg.elen = 64; 595 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 596 cpu->cfg.rvv_ma_all_1s = true; 597 cpu->cfg.rvv_ta_all_1s = true; 598 cpu->cfg.misa_w = true; 599 cpu->cfg.pmp = true; 600 cpu->cfg.cbom_blocksize = 64; 601 cpu->cfg.cbop_blocksize = 64; 602 cpu->cfg.cboz_blocksize = 64; 603 cpu->cfg.ext_zic64b = true; 604 cpu->cfg.ext_zicbom = true; 605 cpu->cfg.ext_zicbop = true; 606 cpu->cfg.ext_zicboz = true; 607 cpu->cfg.ext_zicntr = true; 608 cpu->cfg.ext_zicond = true; 609 cpu->cfg.ext_zicsr = true; 610 cpu->cfg.ext_zifencei = true; 611 cpu->cfg.ext_zihintntl = true; 612 cpu->cfg.ext_zihintpause = true; 613 cpu->cfg.ext_zihpm = true; 614 cpu->cfg.ext_zimop = true; 615 cpu->cfg.ext_zawrs = true; 616 cpu->cfg.ext_zfa = true; 617 cpu->cfg.ext_zfbfmin = true; 618 cpu->cfg.ext_zfh = true; 619 cpu->cfg.ext_zfhmin = true; 620 cpu->cfg.ext_zcb = true; 621 cpu->cfg.ext_zcmop = true; 622 cpu->cfg.ext_zba = true; 623 cpu->cfg.ext_zbb = true; 624 cpu->cfg.ext_zbs = true; 625 cpu->cfg.ext_zkt = true; 626 cpu->cfg.ext_zvbb = true; 627 cpu->cfg.ext_zvbc = true; 628 cpu->cfg.ext_zvfbfmin = true; 629 cpu->cfg.ext_zvfbfwma = true; 630 cpu->cfg.ext_zvfh = true; 631 cpu->cfg.ext_zvfhmin = true; 632 cpu->cfg.ext_zvkng = true; 633 cpu->cfg.ext_smaia = true; 634 cpu->cfg.ext_smstateen = true; 635 cpu->cfg.ext_ssaia = true; 636 cpu->cfg.ext_sscofpmf = true; 637 cpu->cfg.ext_sstc = true; 638 cpu->cfg.ext_svade = true; 639 cpu->cfg.ext_svinval = true; 640 cpu->cfg.ext_svnapot = true; 641 cpu->cfg.ext_svpbmt = true; 642 643 #ifndef CONFIG_USER_ONLY 644 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 645 #endif 646 } 647 648 #ifdef CONFIG_TCG 649 static void rv128_base_cpu_init(Object *obj) 650 { 651 RISCVCPU *cpu = RISCV_CPU(obj); 652 CPURISCVState *env = &cpu->env; 653 654 if (qemu_tcg_mttcg_enabled()) { 655 /* Missing 128-bit aligned atomics */ 656 error_report("128-bit RISC-V currently does not work with Multi " 657 "Threaded TCG. Please use: -accel tcg,thread=single"); 658 exit(EXIT_FAILURE); 659 } 660 661 cpu->cfg.mmu = true; 662 cpu->cfg.pmp = true; 663 664 /* Set latest version of privileged specification */ 665 env->priv_ver = PRIV_VERSION_LATEST; 666 #ifndef CONFIG_USER_ONLY 667 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 668 #endif 669 } 670 #endif /* CONFIG_TCG */ 671 672 static void rv64i_bare_cpu_init(Object *obj) 673 { 674 CPURISCVState *env = &RISCV_CPU(obj)->env; 675 riscv_cpu_set_misa_ext(env, RVI); 676 } 677 678 static void rv64e_bare_cpu_init(Object *obj) 679 { 680 CPURISCVState *env = &RISCV_CPU(obj)->env; 681 riscv_cpu_set_misa_ext(env, RVE); 682 } 683 684 #endif /* !TARGET_RISCV64 */ 685 686 #if defined(TARGET_RISCV32) || \ 687 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 688 689 static void rv32_base_cpu_init(Object *obj) 690 { 691 RISCVCPU *cpu = RISCV_CPU(obj); 692 CPURISCVState *env = &cpu->env; 693 694 cpu->cfg.mmu = true; 695 cpu->cfg.pmp = true; 696 697 /* Set latest version of privileged specification */ 698 env->priv_ver = PRIV_VERSION_LATEST; 699 #ifndef CONFIG_USER_ONLY 700 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 701 #endif 702 } 703 704 static void rv32_sifive_u_cpu_init(Object *obj) 705 { 706 RISCVCPU *cpu = RISCV_CPU(obj); 707 CPURISCVState *env = &cpu->env; 708 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 709 env->priv_ver = PRIV_VERSION_1_10_0; 710 #ifndef CONFIG_USER_ONLY 711 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 712 #endif 713 714 /* inherited from parent obj via riscv_cpu_init() */ 715 cpu->cfg.ext_zifencei = true; 716 cpu->cfg.ext_zicsr = true; 717 cpu->cfg.mmu = true; 718 cpu->cfg.pmp = true; 719 } 720 721 static void rv32_sifive_e_cpu_init(Object *obj) 722 { 723 CPURISCVState *env = &RISCV_CPU(obj)->env; 724 RISCVCPU *cpu = RISCV_CPU(obj); 725 726 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 727 env->priv_ver = PRIV_VERSION_1_10_0; 728 #ifndef CONFIG_USER_ONLY 729 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 730 #endif 731 732 /* inherited from parent obj via riscv_cpu_init() */ 733 cpu->cfg.ext_zifencei = true; 734 cpu->cfg.ext_zicsr = true; 735 cpu->cfg.pmp = true; 736 } 737 738 static void rv32_ibex_cpu_init(Object *obj) 739 { 740 CPURISCVState *env = &RISCV_CPU(obj)->env; 741 RISCVCPU *cpu = RISCV_CPU(obj); 742 743 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 744 env->priv_ver = PRIV_VERSION_1_12_0; 745 #ifndef CONFIG_USER_ONLY 746 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 747 #endif 748 /* inherited from parent obj via riscv_cpu_init() */ 749 cpu->cfg.ext_zifencei = true; 750 cpu->cfg.ext_zicsr = true; 751 cpu->cfg.pmp = true; 752 cpu->cfg.ext_smepmp = true; 753 754 cpu->cfg.ext_zba = true; 755 cpu->cfg.ext_zbb = true; 756 cpu->cfg.ext_zbc = true; 757 cpu->cfg.ext_zbs = true; 758 } 759 760 static void rv32_imafcu_nommu_cpu_init(Object *obj) 761 { 762 CPURISCVState *env = &RISCV_CPU(obj)->env; 763 RISCVCPU *cpu = RISCV_CPU(obj); 764 765 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 766 env->priv_ver = PRIV_VERSION_1_10_0; 767 #ifndef CONFIG_USER_ONLY 768 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 769 #endif 770 771 /* inherited from parent obj via riscv_cpu_init() */ 772 cpu->cfg.ext_zifencei = true; 773 cpu->cfg.ext_zicsr = true; 774 cpu->cfg.pmp = true; 775 } 776 777 static void rv32i_bare_cpu_init(Object *obj) 778 { 779 CPURISCVState *env = &RISCV_CPU(obj)->env; 780 riscv_cpu_set_misa_ext(env, RVI); 781 } 782 783 static void rv32e_bare_cpu_init(Object *obj) 784 { 785 CPURISCVState *env = &RISCV_CPU(obj)->env; 786 riscv_cpu_set_misa_ext(env, RVE); 787 } 788 #endif 789 790 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 791 { 792 ObjectClass *oc; 793 char *typename; 794 char **cpuname; 795 796 cpuname = g_strsplit(cpu_model, ",", 1); 797 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 798 oc = object_class_by_name(typename); 799 g_strfreev(cpuname); 800 g_free(typename); 801 802 return oc; 803 } 804 805 char *riscv_cpu_get_name(RISCVCPU *cpu) 806 { 807 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 808 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 809 810 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 811 812 return cpu_model_from_type(typename); 813 } 814 815 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 816 { 817 RISCVCPU *cpu = RISCV_CPU(cs); 818 CPURISCVState *env = &cpu->env; 819 int i, j; 820 uint8_t *p; 821 822 #if !defined(CONFIG_USER_ONLY) 823 if (riscv_has_ext(env, RVH)) { 824 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 825 } 826 #endif 827 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 828 #ifndef CONFIG_USER_ONLY 829 { 830 static const int dump_csrs[] = { 831 CSR_MHARTID, 832 CSR_MSTATUS, 833 CSR_MSTATUSH, 834 /* 835 * CSR_SSTATUS is intentionally omitted here as its value 836 * can be figured out by looking at CSR_MSTATUS 837 */ 838 CSR_HSTATUS, 839 CSR_VSSTATUS, 840 CSR_MIP, 841 CSR_MIE, 842 CSR_MIDELEG, 843 CSR_HIDELEG, 844 CSR_MEDELEG, 845 CSR_HEDELEG, 846 CSR_MTVEC, 847 CSR_STVEC, 848 CSR_VSTVEC, 849 CSR_MEPC, 850 CSR_SEPC, 851 CSR_VSEPC, 852 CSR_MCAUSE, 853 CSR_SCAUSE, 854 CSR_VSCAUSE, 855 CSR_MTVAL, 856 CSR_STVAL, 857 CSR_HTVAL, 858 CSR_MTVAL2, 859 CSR_MSCRATCH, 860 CSR_SSCRATCH, 861 CSR_SATP, 862 CSR_MMTE, 863 CSR_UPMBASE, 864 CSR_UPMMASK, 865 CSR_SPMBASE, 866 CSR_SPMMASK, 867 CSR_MPMBASE, 868 CSR_MPMMASK, 869 }; 870 871 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 872 int csrno = dump_csrs[i]; 873 target_ulong val = 0; 874 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 875 876 /* 877 * Rely on the smode, hmode, etc, predicates within csr.c 878 * to do the filtering of the registers that are present. 879 */ 880 if (res == RISCV_EXCP_NONE) { 881 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 882 csr_ops[csrno].name, val); 883 } 884 } 885 } 886 #endif 887 888 for (i = 0; i < 32; i++) { 889 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 890 riscv_int_regnames[i], env->gpr[i]); 891 if ((i & 3) == 3) { 892 qemu_fprintf(f, "\n"); 893 } 894 } 895 if (flags & CPU_DUMP_FPU) { 896 target_ulong val = 0; 897 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 898 if (res == RISCV_EXCP_NONE) { 899 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 900 csr_ops[CSR_FCSR].name, val); 901 } 902 for (i = 0; i < 32; i++) { 903 qemu_fprintf(f, " %-8s %016" PRIx64, 904 riscv_fpr_regnames[i], env->fpr[i]); 905 if ((i & 3) == 3) { 906 qemu_fprintf(f, "\n"); 907 } 908 } 909 } 910 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 911 static const int dump_rvv_csrs[] = { 912 CSR_VSTART, 913 CSR_VXSAT, 914 CSR_VXRM, 915 CSR_VCSR, 916 CSR_VL, 917 CSR_VTYPE, 918 CSR_VLENB, 919 }; 920 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 921 int csrno = dump_rvv_csrs[i]; 922 target_ulong val = 0; 923 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 924 925 /* 926 * Rely on the smode, hmode, etc, predicates within csr.c 927 * to do the filtering of the registers that are present. 928 */ 929 if (res == RISCV_EXCP_NONE) { 930 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 931 csr_ops[csrno].name, val); 932 } 933 } 934 uint16_t vlenb = cpu->cfg.vlenb; 935 936 for (i = 0; i < 32; i++) { 937 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 938 p = (uint8_t *)env->vreg; 939 for (j = vlenb - 1 ; j >= 0; j--) { 940 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 941 } 942 qemu_fprintf(f, "\n"); 943 } 944 } 945 } 946 947 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 948 { 949 RISCVCPU *cpu = RISCV_CPU(cs); 950 CPURISCVState *env = &cpu->env; 951 952 if (env->xl == MXL_RV32) { 953 env->pc = (int32_t)value; 954 } else { 955 env->pc = value; 956 } 957 } 958 959 static vaddr riscv_cpu_get_pc(CPUState *cs) 960 { 961 RISCVCPU *cpu = RISCV_CPU(cs); 962 CPURISCVState *env = &cpu->env; 963 964 /* Match cpu_get_tb_cpu_state. */ 965 if (env->xl == MXL_RV32) { 966 return env->pc & UINT32_MAX; 967 } 968 return env->pc; 969 } 970 971 bool riscv_cpu_has_work(CPUState *cs) 972 { 973 #ifndef CONFIG_USER_ONLY 974 RISCVCPU *cpu = RISCV_CPU(cs); 975 CPURISCVState *env = &cpu->env; 976 /* 977 * Definition of the WFI instruction requires it to ignore the privilege 978 * mode and delegation registers, but respect individual enables 979 */ 980 return riscv_cpu_all_pending(env) != 0 || 981 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 982 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 983 #else 984 return true; 985 #endif 986 } 987 988 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 989 { 990 return riscv_env_mmu_index(cpu_env(cs), ifetch); 991 } 992 993 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 994 { 995 #ifndef CONFIG_USER_ONLY 996 uint8_t iprio; 997 int i, irq, rdzero; 998 #endif 999 CPUState *cs = CPU(obj); 1000 RISCVCPU *cpu = RISCV_CPU(cs); 1001 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1002 CPURISCVState *env = &cpu->env; 1003 1004 if (mcc->parent_phases.hold) { 1005 mcc->parent_phases.hold(obj, type); 1006 } 1007 #ifndef CONFIG_USER_ONLY 1008 env->misa_mxl = mcc->misa_mxl_max; 1009 env->priv = PRV_M; 1010 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1011 if (env->misa_mxl > MXL_RV32) { 1012 /* 1013 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1014 * and we must ensure that the value after init is valid for read. 1015 */ 1016 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1017 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1018 if (riscv_has_ext(env, RVH)) { 1019 env->vsstatus = set_field(env->vsstatus, 1020 MSTATUS64_SXL, env->misa_mxl); 1021 env->vsstatus = set_field(env->vsstatus, 1022 MSTATUS64_UXL, env->misa_mxl); 1023 env->mstatus_hs = set_field(env->mstatus_hs, 1024 MSTATUS64_SXL, env->misa_mxl); 1025 env->mstatus_hs = set_field(env->mstatus_hs, 1026 MSTATUS64_UXL, env->misa_mxl); 1027 } 1028 } 1029 env->mcause = 0; 1030 env->miclaim = MIP_SGEIP; 1031 env->pc = env->resetvec; 1032 env->bins = 0; 1033 env->two_stage_lookup = false; 1034 1035 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1036 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1037 MENVCFG_ADUE : 0); 1038 env->henvcfg = 0; 1039 1040 /* Initialized default priorities of local interrupts. */ 1041 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1042 iprio = riscv_cpu_default_priority(i); 1043 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1044 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1045 env->hviprio[i] = 0; 1046 } 1047 i = 0; 1048 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1049 if (!rdzero) { 1050 env->hviprio[irq] = env->miprio[irq]; 1051 } 1052 i++; 1053 } 1054 /* mmte is supposed to have pm.current hardwired to 1 */ 1055 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 1056 1057 /* 1058 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1059 * extension is enabled. 1060 */ 1061 if (riscv_has_ext(env, RVH)) { 1062 env->mideleg |= HS_MODE_INTERRUPTS; 1063 } 1064 1065 /* 1066 * Clear mseccfg and unlock all the PMP entries upon reset. 1067 * This is allowed as per the priv and smepmp specifications 1068 * and is needed to clear stale entries across reboots. 1069 */ 1070 if (riscv_cpu_cfg(env)->ext_smepmp) { 1071 env->mseccfg = 0; 1072 } 1073 1074 pmp_unlock_entries(env); 1075 #else 1076 env->priv = PRV_U; 1077 env->senvcfg = 0; 1078 env->menvcfg = 0; 1079 #endif 1080 1081 /* on reset elp is clear */ 1082 env->elp = false; 1083 /* on reset ssp is set to 0 */ 1084 env->ssp = 0; 1085 1086 env->xl = riscv_cpu_mxl(env); 1087 riscv_cpu_update_mask(env); 1088 cs->exception_index = RISCV_EXCP_NONE; 1089 env->load_res = -1; 1090 set_default_nan_mode(1, &env->fp_status); 1091 /* Default NaN value: sign bit clear, frac msb set */ 1092 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1093 env->vill = true; 1094 1095 #ifndef CONFIG_USER_ONLY 1096 if (cpu->cfg.debug) { 1097 riscv_trigger_reset_hold(env); 1098 } 1099 1100 if (kvm_enabled()) { 1101 kvm_riscv_reset_vcpu(cpu); 1102 } 1103 #endif 1104 } 1105 1106 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1107 { 1108 RISCVCPU *cpu = RISCV_CPU(s); 1109 CPURISCVState *env = &cpu->env; 1110 info->target_info = &cpu->cfg; 1111 1112 switch (env->xl) { 1113 case MXL_RV32: 1114 info->print_insn = print_insn_riscv32; 1115 break; 1116 case MXL_RV64: 1117 info->print_insn = print_insn_riscv64; 1118 break; 1119 case MXL_RV128: 1120 info->print_insn = print_insn_riscv128; 1121 break; 1122 default: 1123 g_assert_not_reached(); 1124 } 1125 } 1126 1127 #ifndef CONFIG_USER_ONLY 1128 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1129 { 1130 bool rv32 = riscv_cpu_is_32bit(cpu); 1131 uint8_t satp_mode_map_max, satp_mode_supported_max; 1132 1133 /* The CPU wants the OS to decide which satp mode to use */ 1134 if (cpu->cfg.satp_mode.supported == 0) { 1135 return; 1136 } 1137 1138 satp_mode_supported_max = 1139 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1140 1141 if (cpu->cfg.satp_mode.map == 0) { 1142 if (cpu->cfg.satp_mode.init == 0) { 1143 /* If unset by the user, we fallback to the default satp mode. */ 1144 set_satp_mode_default_map(cpu); 1145 } else { 1146 /* 1147 * Find the lowest level that was disabled and then enable the 1148 * first valid level below which can be found in 1149 * valid_vm_1_10_32/64. 1150 */ 1151 for (int i = 1; i < 16; ++i) { 1152 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1153 (cpu->cfg.satp_mode.supported & (1 << i))) { 1154 for (int j = i - 1; j >= 0; --j) { 1155 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1156 cpu->cfg.satp_mode.map |= (1 << j); 1157 break; 1158 } 1159 } 1160 break; 1161 } 1162 } 1163 } 1164 } 1165 1166 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1167 1168 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1169 if (satp_mode_map_max > satp_mode_supported_max) { 1170 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1171 satp_mode_str(satp_mode_map_max, rv32), 1172 satp_mode_str(satp_mode_supported_max, rv32)); 1173 return; 1174 } 1175 1176 /* 1177 * Make sure the user did not ask for an invalid configuration as per 1178 * the specification. 1179 */ 1180 if (!rv32) { 1181 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1182 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1183 (cpu->cfg.satp_mode.init & (1 << i)) && 1184 (cpu->cfg.satp_mode.supported & (1 << i))) { 1185 error_setg(errp, "cannot disable %s satp mode if %s " 1186 "is enabled", satp_mode_str(i, false), 1187 satp_mode_str(satp_mode_map_max, false)); 1188 return; 1189 } 1190 } 1191 } 1192 1193 /* Finally expand the map so that all valid modes are set */ 1194 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1195 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1196 cpu->cfg.satp_mode.map |= (1 << i); 1197 } 1198 } 1199 } 1200 #endif 1201 1202 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1203 { 1204 Error *local_err = NULL; 1205 1206 #ifndef CONFIG_USER_ONLY 1207 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1208 if (local_err != NULL) { 1209 error_propagate(errp, local_err); 1210 return; 1211 } 1212 #endif 1213 1214 if (tcg_enabled()) { 1215 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1216 if (local_err != NULL) { 1217 error_propagate(errp, local_err); 1218 return; 1219 } 1220 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1221 } else if (kvm_enabled()) { 1222 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1223 if (local_err != NULL) { 1224 error_propagate(errp, local_err); 1225 return; 1226 } 1227 } 1228 } 1229 1230 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1231 { 1232 CPUState *cs = CPU(dev); 1233 RISCVCPU *cpu = RISCV_CPU(dev); 1234 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1235 Error *local_err = NULL; 1236 1237 cpu_exec_realizefn(cs, &local_err); 1238 if (local_err != NULL) { 1239 error_propagate(errp, local_err); 1240 return; 1241 } 1242 1243 riscv_cpu_finalize_features(cpu, &local_err); 1244 if (local_err != NULL) { 1245 error_propagate(errp, local_err); 1246 return; 1247 } 1248 1249 riscv_cpu_register_gdb_regs_for_features(cs); 1250 1251 #ifndef CONFIG_USER_ONLY 1252 if (cpu->cfg.debug) { 1253 riscv_trigger_realize(&cpu->env); 1254 } 1255 #endif 1256 1257 qemu_init_vcpu(cs); 1258 cpu_reset(cs); 1259 1260 mcc->parent_realize(dev, errp); 1261 } 1262 1263 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1264 { 1265 if (tcg_enabled()) { 1266 return riscv_cpu_tcg_compatible(cpu); 1267 } 1268 1269 return true; 1270 } 1271 1272 #ifndef CONFIG_USER_ONLY 1273 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1274 void *opaque, Error **errp) 1275 { 1276 RISCVSATPMap *satp_map = opaque; 1277 uint8_t satp = satp_mode_from_str(name); 1278 bool value; 1279 1280 value = satp_map->map & (1 << satp); 1281 1282 visit_type_bool(v, name, &value, errp); 1283 } 1284 1285 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1286 void *opaque, Error **errp) 1287 { 1288 RISCVSATPMap *satp_map = opaque; 1289 uint8_t satp = satp_mode_from_str(name); 1290 bool value; 1291 1292 if (!visit_type_bool(v, name, &value, errp)) { 1293 return; 1294 } 1295 1296 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1297 satp_map->init |= 1 << satp; 1298 } 1299 1300 void riscv_add_satp_mode_properties(Object *obj) 1301 { 1302 RISCVCPU *cpu = RISCV_CPU(obj); 1303 1304 if (cpu->env.misa_mxl == MXL_RV32) { 1305 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1306 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1307 } else { 1308 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1309 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1310 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1311 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1312 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1313 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1314 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1315 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1316 } 1317 } 1318 1319 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1320 { 1321 RISCVCPU *cpu = RISCV_CPU(opaque); 1322 CPURISCVState *env = &cpu->env; 1323 1324 if (irq < IRQ_LOCAL_MAX) { 1325 switch (irq) { 1326 case IRQ_U_SOFT: 1327 case IRQ_S_SOFT: 1328 case IRQ_VS_SOFT: 1329 case IRQ_M_SOFT: 1330 case IRQ_U_TIMER: 1331 case IRQ_S_TIMER: 1332 case IRQ_VS_TIMER: 1333 case IRQ_M_TIMER: 1334 case IRQ_U_EXT: 1335 case IRQ_VS_EXT: 1336 case IRQ_M_EXT: 1337 if (kvm_enabled()) { 1338 kvm_riscv_set_irq(cpu, irq, level); 1339 } else { 1340 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1341 } 1342 break; 1343 case IRQ_S_EXT: 1344 if (kvm_enabled()) { 1345 kvm_riscv_set_irq(cpu, irq, level); 1346 } else { 1347 env->external_seip = level; 1348 riscv_cpu_update_mip(env, 1 << irq, 1349 BOOL_TO_MASK(level | env->software_seip)); 1350 } 1351 break; 1352 default: 1353 g_assert_not_reached(); 1354 } 1355 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1356 /* Require H-extension for handling guest local interrupts */ 1357 if (!riscv_has_ext(env, RVH)) { 1358 g_assert_not_reached(); 1359 } 1360 1361 /* Compute bit position in HGEIP CSR */ 1362 irq = irq - IRQ_LOCAL_MAX + 1; 1363 if (env->geilen < irq) { 1364 g_assert_not_reached(); 1365 } 1366 1367 /* Update HGEIP CSR */ 1368 env->hgeip &= ~((target_ulong)1 << irq); 1369 if (level) { 1370 env->hgeip |= (target_ulong)1 << irq; 1371 } 1372 1373 /* Update mip.SGEIP bit */ 1374 riscv_cpu_update_mip(env, MIP_SGEIP, 1375 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1376 } else { 1377 g_assert_not_reached(); 1378 } 1379 } 1380 #endif /* CONFIG_USER_ONLY */ 1381 1382 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1383 { 1384 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1385 } 1386 1387 static void riscv_cpu_post_init(Object *obj) 1388 { 1389 accel_cpu_instance_init(CPU(obj)); 1390 } 1391 1392 static void riscv_cpu_init(Object *obj) 1393 { 1394 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1395 RISCVCPU *cpu = RISCV_CPU(obj); 1396 CPURISCVState *env = &cpu->env; 1397 1398 env->misa_mxl = mcc->misa_mxl_max; 1399 1400 #ifndef CONFIG_USER_ONLY 1401 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1402 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1403 #endif /* CONFIG_USER_ONLY */ 1404 1405 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1406 1407 /* 1408 * The timer and performance counters extensions were supported 1409 * in QEMU before they were added as discrete extensions in the 1410 * ISA. To keep compatibility we'll always default them to 'true' 1411 * for all CPUs. Each accelerator will decide what to do when 1412 * users disable them. 1413 */ 1414 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1415 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1416 1417 /* Default values for non-bool cpu properties */ 1418 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1419 cpu->cfg.vlenb = 128 >> 3; 1420 cpu->cfg.elen = 64; 1421 cpu->cfg.cbom_blocksize = 64; 1422 cpu->cfg.cbop_blocksize = 64; 1423 cpu->cfg.cboz_blocksize = 64; 1424 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1425 } 1426 1427 static void riscv_bare_cpu_init(Object *obj) 1428 { 1429 RISCVCPU *cpu = RISCV_CPU(obj); 1430 1431 /* 1432 * Bare CPUs do not inherit the timer and performance 1433 * counters from the parent class (see riscv_cpu_init() 1434 * for info on why the parent enables them). 1435 * 1436 * Users have to explicitly enable these counters for 1437 * bare CPUs. 1438 */ 1439 cpu->cfg.ext_zicntr = false; 1440 cpu->cfg.ext_zihpm = false; 1441 1442 /* Set to QEMU's first supported priv version */ 1443 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1444 1445 /* 1446 * Support all available satp_mode settings. The default 1447 * value will be set to MBARE if the user doesn't set 1448 * satp_mode manually (see set_satp_mode_default()). 1449 */ 1450 #ifndef CONFIG_USER_ONLY 1451 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1452 #endif 1453 } 1454 1455 typedef struct misa_ext_info { 1456 const char *name; 1457 const char *description; 1458 } MISAExtInfo; 1459 1460 #define MISA_INFO_IDX(_bit) \ 1461 __builtin_ctz(_bit) 1462 1463 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1464 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1465 1466 static const MISAExtInfo misa_ext_info_arr[] = { 1467 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1468 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1469 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1470 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1471 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1472 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1473 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1474 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1475 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1476 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1477 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1478 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1479 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1480 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1481 }; 1482 1483 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1484 { 1485 CPUClass *cc = CPU_CLASS(mcc); 1486 1487 /* Validate that MISA_MXL is set properly. */ 1488 switch (mcc->misa_mxl_max) { 1489 #ifdef TARGET_RISCV64 1490 case MXL_RV64: 1491 case MXL_RV128: 1492 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1493 break; 1494 #endif 1495 case MXL_RV32: 1496 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1497 break; 1498 default: 1499 g_assert_not_reached(); 1500 } 1501 } 1502 1503 static int riscv_validate_misa_info_idx(uint32_t bit) 1504 { 1505 int idx; 1506 1507 /* 1508 * Our lowest valid input (RVA) is 1 and 1509 * __builtin_ctz() is UB with zero. 1510 */ 1511 g_assert(bit != 0); 1512 idx = MISA_INFO_IDX(bit); 1513 1514 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1515 return idx; 1516 } 1517 1518 const char *riscv_get_misa_ext_name(uint32_t bit) 1519 { 1520 int idx = riscv_validate_misa_info_idx(bit); 1521 const char *val = misa_ext_info_arr[idx].name; 1522 1523 g_assert(val != NULL); 1524 return val; 1525 } 1526 1527 const char *riscv_get_misa_ext_description(uint32_t bit) 1528 { 1529 int idx = riscv_validate_misa_info_idx(bit); 1530 const char *val = misa_ext_info_arr[idx].description; 1531 1532 g_assert(val != NULL); 1533 return val; 1534 } 1535 1536 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1537 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1538 .enabled = _defval} 1539 1540 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1541 /* Defaults for standard extensions */ 1542 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1543 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1544 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1545 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1546 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1547 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1548 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1549 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1550 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1551 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1552 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1553 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1554 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1555 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1556 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1557 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1558 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1559 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1560 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1561 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1562 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1563 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1564 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1565 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1566 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1567 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1568 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1569 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1570 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1571 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1572 1573 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1574 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1575 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1576 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1577 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1578 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1579 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1580 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1581 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1582 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1583 1584 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1585 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1586 1587 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1588 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1589 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1590 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1591 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1592 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1593 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1594 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1595 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1596 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1597 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1598 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1599 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1600 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1601 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1602 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1603 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1604 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1605 1606 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1607 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1608 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1609 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1610 1611 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1612 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1613 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1614 1615 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1616 1617 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1618 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1619 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1620 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1621 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1622 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1623 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1624 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1625 1626 /* Vector cryptography extensions */ 1627 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1628 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1629 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1630 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1631 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1632 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1633 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1634 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1635 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1636 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1637 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1638 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1639 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1640 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1641 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1642 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1643 1644 DEFINE_PROP_END_OF_LIST(), 1645 }; 1646 1647 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1648 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1649 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1650 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1651 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1652 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1653 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1654 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1655 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1656 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1657 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1658 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1659 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1660 1661 DEFINE_PROP_END_OF_LIST(), 1662 }; 1663 1664 /* These are experimental so mark with 'x-' */ 1665 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1666 DEFINE_PROP_END_OF_LIST(), 1667 }; 1668 1669 /* 1670 * 'Named features' is the name we give to extensions that we 1671 * don't want to expose to users. They are either immutable 1672 * (always enabled/disable) or they'll vary depending on 1673 * the resulting CPU state. They have riscv,isa strings 1674 * and priv_ver like regular extensions. 1675 */ 1676 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1677 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1678 1679 DEFINE_PROP_END_OF_LIST(), 1680 }; 1681 1682 /* Deprecated entries marked for future removal */ 1683 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1684 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1685 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1686 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1687 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1688 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1689 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1690 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1691 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1692 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1693 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1694 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1695 1696 DEFINE_PROP_END_OF_LIST(), 1697 }; 1698 1699 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1700 Error **errp) 1701 { 1702 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1703 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1704 cpuname, propname); 1705 } 1706 1707 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1708 void *opaque, Error **errp) 1709 { 1710 RISCVCPU *cpu = RISCV_CPU(obj); 1711 uint8_t pmu_num, curr_pmu_num; 1712 uint32_t pmu_mask; 1713 1714 visit_type_uint8(v, name, &pmu_num, errp); 1715 1716 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1717 1718 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1719 cpu_set_prop_err(cpu, name, errp); 1720 error_append_hint(errp, "Current '%s' val: %u\n", 1721 name, curr_pmu_num); 1722 return; 1723 } 1724 1725 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1726 error_setg(errp, "Number of counters exceeds maximum available"); 1727 return; 1728 } 1729 1730 if (pmu_num == 0) { 1731 pmu_mask = 0; 1732 } else { 1733 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1734 } 1735 1736 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1737 cpu->cfg.pmu_mask = pmu_mask; 1738 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1739 } 1740 1741 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1742 void *opaque, Error **errp) 1743 { 1744 RISCVCPU *cpu = RISCV_CPU(obj); 1745 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1746 1747 visit_type_uint8(v, name, &pmu_num, errp); 1748 } 1749 1750 static const PropertyInfo prop_pmu_num = { 1751 .name = "pmu-num", 1752 .get = prop_pmu_num_get, 1753 .set = prop_pmu_num_set, 1754 }; 1755 1756 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1757 void *opaque, Error **errp) 1758 { 1759 RISCVCPU *cpu = RISCV_CPU(obj); 1760 uint32_t value; 1761 uint8_t pmu_num; 1762 1763 visit_type_uint32(v, name, &value, errp); 1764 1765 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1766 cpu_set_prop_err(cpu, name, errp); 1767 error_append_hint(errp, "Current '%s' val: %x\n", 1768 name, cpu->cfg.pmu_mask); 1769 return; 1770 } 1771 1772 pmu_num = ctpop32(value); 1773 1774 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1775 error_setg(errp, "Number of counters exceeds maximum available"); 1776 return; 1777 } 1778 1779 cpu_option_add_user_setting(name, value); 1780 cpu->cfg.pmu_mask = value; 1781 } 1782 1783 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1784 void *opaque, Error **errp) 1785 { 1786 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1787 1788 visit_type_uint8(v, name, &pmu_mask, errp); 1789 } 1790 1791 static const PropertyInfo prop_pmu_mask = { 1792 .name = "pmu-mask", 1793 .get = prop_pmu_mask_get, 1794 .set = prop_pmu_mask_set, 1795 }; 1796 1797 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1798 void *opaque, Error **errp) 1799 { 1800 RISCVCPU *cpu = RISCV_CPU(obj); 1801 bool value; 1802 1803 visit_type_bool(v, name, &value, errp); 1804 1805 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1806 cpu_set_prop_err(cpu, "mmu", errp); 1807 return; 1808 } 1809 1810 cpu_option_add_user_setting(name, value); 1811 cpu->cfg.mmu = value; 1812 } 1813 1814 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1815 void *opaque, Error **errp) 1816 { 1817 bool value = RISCV_CPU(obj)->cfg.mmu; 1818 1819 visit_type_bool(v, name, &value, errp); 1820 } 1821 1822 static const PropertyInfo prop_mmu = { 1823 .name = "mmu", 1824 .get = prop_mmu_get, 1825 .set = prop_mmu_set, 1826 }; 1827 1828 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1829 void *opaque, Error **errp) 1830 { 1831 RISCVCPU *cpu = RISCV_CPU(obj); 1832 bool value; 1833 1834 visit_type_bool(v, name, &value, errp); 1835 1836 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1837 cpu_set_prop_err(cpu, name, errp); 1838 return; 1839 } 1840 1841 cpu_option_add_user_setting(name, value); 1842 cpu->cfg.pmp = value; 1843 } 1844 1845 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1846 void *opaque, Error **errp) 1847 { 1848 bool value = RISCV_CPU(obj)->cfg.pmp; 1849 1850 visit_type_bool(v, name, &value, errp); 1851 } 1852 1853 static const PropertyInfo prop_pmp = { 1854 .name = "pmp", 1855 .get = prop_pmp_get, 1856 .set = prop_pmp_set, 1857 }; 1858 1859 static int priv_spec_from_str(const char *priv_spec_str) 1860 { 1861 int priv_version = -1; 1862 1863 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1864 priv_version = PRIV_VERSION_1_13_0; 1865 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1866 priv_version = PRIV_VERSION_1_12_0; 1867 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1868 priv_version = PRIV_VERSION_1_11_0; 1869 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1870 priv_version = PRIV_VERSION_1_10_0; 1871 } 1872 1873 return priv_version; 1874 } 1875 1876 const char *priv_spec_to_str(int priv_version) 1877 { 1878 switch (priv_version) { 1879 case PRIV_VERSION_1_10_0: 1880 return PRIV_VER_1_10_0_STR; 1881 case PRIV_VERSION_1_11_0: 1882 return PRIV_VER_1_11_0_STR; 1883 case PRIV_VERSION_1_12_0: 1884 return PRIV_VER_1_12_0_STR; 1885 case PRIV_VERSION_1_13_0: 1886 return PRIV_VER_1_13_0_STR; 1887 default: 1888 return NULL; 1889 } 1890 } 1891 1892 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1893 void *opaque, Error **errp) 1894 { 1895 RISCVCPU *cpu = RISCV_CPU(obj); 1896 g_autofree char *value = NULL; 1897 int priv_version = -1; 1898 1899 visit_type_str(v, name, &value, errp); 1900 1901 priv_version = priv_spec_from_str(value); 1902 if (priv_version < 0) { 1903 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1904 return; 1905 } 1906 1907 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1908 cpu_set_prop_err(cpu, name, errp); 1909 error_append_hint(errp, "Current '%s' val: %s\n", name, 1910 object_property_get_str(obj, name, NULL)); 1911 return; 1912 } 1913 1914 cpu_option_add_user_setting(name, priv_version); 1915 cpu->env.priv_ver = priv_version; 1916 } 1917 1918 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1919 void *opaque, Error **errp) 1920 { 1921 RISCVCPU *cpu = RISCV_CPU(obj); 1922 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1923 1924 visit_type_str(v, name, (char **)&value, errp); 1925 } 1926 1927 static const PropertyInfo prop_priv_spec = { 1928 .name = "priv_spec", 1929 .get = prop_priv_spec_get, 1930 .set = prop_priv_spec_set, 1931 }; 1932 1933 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1934 void *opaque, Error **errp) 1935 { 1936 RISCVCPU *cpu = RISCV_CPU(obj); 1937 g_autofree char *value = NULL; 1938 1939 visit_type_str(v, name, &value, errp); 1940 1941 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1942 error_setg(errp, "Unsupported vector spec version '%s'", value); 1943 return; 1944 } 1945 1946 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1947 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1948 } 1949 1950 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1951 void *opaque, Error **errp) 1952 { 1953 const char *value = VEXT_VER_1_00_0_STR; 1954 1955 visit_type_str(v, name, (char **)&value, errp); 1956 } 1957 1958 static const PropertyInfo prop_vext_spec = { 1959 .name = "vext_spec", 1960 .get = prop_vext_spec_get, 1961 .set = prop_vext_spec_set, 1962 }; 1963 1964 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1965 void *opaque, Error **errp) 1966 { 1967 RISCVCPU *cpu = RISCV_CPU(obj); 1968 uint16_t value; 1969 1970 if (!visit_type_uint16(v, name, &value, errp)) { 1971 return; 1972 } 1973 1974 if (!is_power_of_2(value)) { 1975 error_setg(errp, "Vector extension VLEN must be power of 2"); 1976 return; 1977 } 1978 1979 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1980 cpu_set_prop_err(cpu, name, errp); 1981 error_append_hint(errp, "Current '%s' val: %u\n", 1982 name, cpu->cfg.vlenb << 3); 1983 return; 1984 } 1985 1986 cpu_option_add_user_setting(name, value); 1987 cpu->cfg.vlenb = value >> 3; 1988 } 1989 1990 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1991 void *opaque, Error **errp) 1992 { 1993 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1994 1995 visit_type_uint16(v, name, &value, errp); 1996 } 1997 1998 static const PropertyInfo prop_vlen = { 1999 .name = "vlen", 2000 .get = prop_vlen_get, 2001 .set = prop_vlen_set, 2002 }; 2003 2004 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2005 void *opaque, Error **errp) 2006 { 2007 RISCVCPU *cpu = RISCV_CPU(obj); 2008 uint16_t value; 2009 2010 if (!visit_type_uint16(v, name, &value, errp)) { 2011 return; 2012 } 2013 2014 if (!is_power_of_2(value)) { 2015 error_setg(errp, "Vector extension ELEN must be power of 2"); 2016 return; 2017 } 2018 2019 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2020 cpu_set_prop_err(cpu, name, errp); 2021 error_append_hint(errp, "Current '%s' val: %u\n", 2022 name, cpu->cfg.elen); 2023 return; 2024 } 2025 2026 cpu_option_add_user_setting(name, value); 2027 cpu->cfg.elen = value; 2028 } 2029 2030 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2031 void *opaque, Error **errp) 2032 { 2033 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2034 2035 visit_type_uint16(v, name, &value, errp); 2036 } 2037 2038 static const PropertyInfo prop_elen = { 2039 .name = "elen", 2040 .get = prop_elen_get, 2041 .set = prop_elen_set, 2042 }; 2043 2044 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2045 void *opaque, Error **errp) 2046 { 2047 RISCVCPU *cpu = RISCV_CPU(obj); 2048 uint16_t value; 2049 2050 if (!visit_type_uint16(v, name, &value, errp)) { 2051 return; 2052 } 2053 2054 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2055 cpu_set_prop_err(cpu, name, errp); 2056 error_append_hint(errp, "Current '%s' val: %u\n", 2057 name, cpu->cfg.cbom_blocksize); 2058 return; 2059 } 2060 2061 cpu_option_add_user_setting(name, value); 2062 cpu->cfg.cbom_blocksize = value; 2063 } 2064 2065 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2066 void *opaque, Error **errp) 2067 { 2068 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2069 2070 visit_type_uint16(v, name, &value, errp); 2071 } 2072 2073 static const PropertyInfo prop_cbom_blksize = { 2074 .name = "cbom_blocksize", 2075 .get = prop_cbom_blksize_get, 2076 .set = prop_cbom_blksize_set, 2077 }; 2078 2079 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2080 void *opaque, Error **errp) 2081 { 2082 RISCVCPU *cpu = RISCV_CPU(obj); 2083 uint16_t value; 2084 2085 if (!visit_type_uint16(v, name, &value, errp)) { 2086 return; 2087 } 2088 2089 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2090 cpu_set_prop_err(cpu, name, errp); 2091 error_append_hint(errp, "Current '%s' val: %u\n", 2092 name, cpu->cfg.cbop_blocksize); 2093 return; 2094 } 2095 2096 cpu_option_add_user_setting(name, value); 2097 cpu->cfg.cbop_blocksize = value; 2098 } 2099 2100 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2101 void *opaque, Error **errp) 2102 { 2103 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2104 2105 visit_type_uint16(v, name, &value, errp); 2106 } 2107 2108 static const PropertyInfo prop_cbop_blksize = { 2109 .name = "cbop_blocksize", 2110 .get = prop_cbop_blksize_get, 2111 .set = prop_cbop_blksize_set, 2112 }; 2113 2114 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2115 void *opaque, Error **errp) 2116 { 2117 RISCVCPU *cpu = RISCV_CPU(obj); 2118 uint16_t value; 2119 2120 if (!visit_type_uint16(v, name, &value, errp)) { 2121 return; 2122 } 2123 2124 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2125 cpu_set_prop_err(cpu, name, errp); 2126 error_append_hint(errp, "Current '%s' val: %u\n", 2127 name, cpu->cfg.cboz_blocksize); 2128 return; 2129 } 2130 2131 cpu_option_add_user_setting(name, value); 2132 cpu->cfg.cboz_blocksize = value; 2133 } 2134 2135 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2136 void *opaque, Error **errp) 2137 { 2138 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2139 2140 visit_type_uint16(v, name, &value, errp); 2141 } 2142 2143 static const PropertyInfo prop_cboz_blksize = { 2144 .name = "cboz_blocksize", 2145 .get = prop_cboz_blksize_get, 2146 .set = prop_cboz_blksize_set, 2147 }; 2148 2149 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2150 void *opaque, Error **errp) 2151 { 2152 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2153 RISCVCPU *cpu = RISCV_CPU(obj); 2154 uint32_t prev_val = cpu->cfg.mvendorid; 2155 uint32_t value; 2156 2157 if (!visit_type_uint32(v, name, &value, errp)) { 2158 return; 2159 } 2160 2161 if (!dynamic_cpu && prev_val != value) { 2162 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2163 object_get_typename(obj), prev_val); 2164 return; 2165 } 2166 2167 cpu->cfg.mvendorid = value; 2168 } 2169 2170 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2171 void *opaque, Error **errp) 2172 { 2173 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2174 2175 visit_type_uint32(v, name, &value, errp); 2176 } 2177 2178 static const PropertyInfo prop_mvendorid = { 2179 .name = "mvendorid", 2180 .get = prop_mvendorid_get, 2181 .set = prop_mvendorid_set, 2182 }; 2183 2184 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2185 void *opaque, Error **errp) 2186 { 2187 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2188 RISCVCPU *cpu = RISCV_CPU(obj); 2189 uint64_t prev_val = cpu->cfg.mimpid; 2190 uint64_t value; 2191 2192 if (!visit_type_uint64(v, name, &value, errp)) { 2193 return; 2194 } 2195 2196 if (!dynamic_cpu && prev_val != value) { 2197 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2198 object_get_typename(obj), prev_val); 2199 return; 2200 } 2201 2202 cpu->cfg.mimpid = value; 2203 } 2204 2205 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2206 void *opaque, Error **errp) 2207 { 2208 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2209 2210 visit_type_uint64(v, name, &value, errp); 2211 } 2212 2213 static const PropertyInfo prop_mimpid = { 2214 .name = "mimpid", 2215 .get = prop_mimpid_get, 2216 .set = prop_mimpid_set, 2217 }; 2218 2219 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2220 void *opaque, Error **errp) 2221 { 2222 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2223 RISCVCPU *cpu = RISCV_CPU(obj); 2224 uint64_t prev_val = cpu->cfg.marchid; 2225 uint64_t value, invalid_val; 2226 uint32_t mxlen = 0; 2227 2228 if (!visit_type_uint64(v, name, &value, errp)) { 2229 return; 2230 } 2231 2232 if (!dynamic_cpu && prev_val != value) { 2233 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2234 object_get_typename(obj), prev_val); 2235 return; 2236 } 2237 2238 switch (riscv_cpu_mxl(&cpu->env)) { 2239 case MXL_RV32: 2240 mxlen = 32; 2241 break; 2242 case MXL_RV64: 2243 case MXL_RV128: 2244 mxlen = 64; 2245 break; 2246 default: 2247 g_assert_not_reached(); 2248 } 2249 2250 invalid_val = 1LL << (mxlen - 1); 2251 2252 if (value == invalid_val) { 2253 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2254 "and the remaining bits zero", mxlen); 2255 return; 2256 } 2257 2258 cpu->cfg.marchid = value; 2259 } 2260 2261 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2262 void *opaque, Error **errp) 2263 { 2264 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2265 2266 visit_type_uint64(v, name, &value, errp); 2267 } 2268 2269 static const PropertyInfo prop_marchid = { 2270 .name = "marchid", 2271 .get = prop_marchid_get, 2272 .set = prop_marchid_set, 2273 }; 2274 2275 /* 2276 * RVA22U64 defines some 'named features' that are cache 2277 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2278 * and Zicclsm. They are always implemented in TCG and 2279 * doesn't need to be manually enabled by the profile. 2280 */ 2281 static RISCVCPUProfile RVA22U64 = { 2282 .parent = NULL, 2283 .name = "rva22u64", 2284 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2285 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2286 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2287 .ext_offsets = { 2288 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2289 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2290 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2291 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2292 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2293 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2294 2295 /* mandatory named features for this profile */ 2296 CPU_CFG_OFFSET(ext_zic64b), 2297 2298 RISCV_PROFILE_EXT_LIST_END 2299 } 2300 }; 2301 2302 /* 2303 * As with RVA22U64, RVA22S64 also defines 'named features'. 2304 * 2305 * Cache related features that we consider enabled since we don't 2306 * implement cache: Ssccptr 2307 * 2308 * Other named features that we already implement: Sstvecd, Sstvala, 2309 * Sscounterenw 2310 * 2311 * The remaining features/extensions comes from RVA22U64. 2312 */ 2313 static RISCVCPUProfile RVA22S64 = { 2314 .parent = &RVA22U64, 2315 .name = "rva22s64", 2316 .misa_ext = RVS, 2317 .priv_spec = PRIV_VERSION_1_12_0, 2318 .satp_mode = VM_1_10_SV39, 2319 .ext_offsets = { 2320 /* rva22s64 exts */ 2321 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2322 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2323 2324 RISCV_PROFILE_EXT_LIST_END 2325 } 2326 }; 2327 2328 RISCVCPUProfile *riscv_profiles[] = { 2329 &RVA22U64, 2330 &RVA22S64, 2331 NULL, 2332 }; 2333 2334 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2335 .is_misa = true, 2336 .ext = RVA, 2337 .implied_multi_exts = { 2338 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2339 2340 RISCV_IMPLIED_EXTS_RULE_END 2341 }, 2342 }; 2343 2344 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2345 .is_misa = true, 2346 .ext = RVD, 2347 .implied_misa_exts = RVF, 2348 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2349 }; 2350 2351 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2352 .is_misa = true, 2353 .ext = RVF, 2354 .implied_multi_exts = { 2355 CPU_CFG_OFFSET(ext_zicsr), 2356 2357 RISCV_IMPLIED_EXTS_RULE_END 2358 }, 2359 }; 2360 2361 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2362 .is_misa = true, 2363 .ext = RVM, 2364 .implied_multi_exts = { 2365 CPU_CFG_OFFSET(ext_zmmul), 2366 2367 RISCV_IMPLIED_EXTS_RULE_END 2368 }, 2369 }; 2370 2371 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2372 .is_misa = true, 2373 .ext = RVV, 2374 .implied_multi_exts = { 2375 CPU_CFG_OFFSET(ext_zve64d), 2376 2377 RISCV_IMPLIED_EXTS_RULE_END 2378 }, 2379 }; 2380 2381 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2382 .ext = CPU_CFG_OFFSET(ext_zcb), 2383 .implied_multi_exts = { 2384 CPU_CFG_OFFSET(ext_zca), 2385 2386 RISCV_IMPLIED_EXTS_RULE_END 2387 }, 2388 }; 2389 2390 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2391 .ext = CPU_CFG_OFFSET(ext_zcd), 2392 .implied_misa_exts = RVD, 2393 .implied_multi_exts = { 2394 CPU_CFG_OFFSET(ext_zca), 2395 2396 RISCV_IMPLIED_EXTS_RULE_END 2397 }, 2398 }; 2399 2400 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2401 .ext = CPU_CFG_OFFSET(ext_zce), 2402 .implied_multi_exts = { 2403 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2404 CPU_CFG_OFFSET(ext_zcmt), 2405 2406 RISCV_IMPLIED_EXTS_RULE_END 2407 }, 2408 }; 2409 2410 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2411 .ext = CPU_CFG_OFFSET(ext_zcf), 2412 .implied_misa_exts = RVF, 2413 .implied_multi_exts = { 2414 CPU_CFG_OFFSET(ext_zca), 2415 2416 RISCV_IMPLIED_EXTS_RULE_END 2417 }, 2418 }; 2419 2420 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2421 .ext = CPU_CFG_OFFSET(ext_zcmp), 2422 .implied_multi_exts = { 2423 CPU_CFG_OFFSET(ext_zca), 2424 2425 RISCV_IMPLIED_EXTS_RULE_END 2426 }, 2427 }; 2428 2429 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2430 .ext = CPU_CFG_OFFSET(ext_zcmt), 2431 .implied_multi_exts = { 2432 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2433 2434 RISCV_IMPLIED_EXTS_RULE_END 2435 }, 2436 }; 2437 2438 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2439 .ext = CPU_CFG_OFFSET(ext_zdinx), 2440 .implied_multi_exts = { 2441 CPU_CFG_OFFSET(ext_zfinx), 2442 2443 RISCV_IMPLIED_EXTS_RULE_END 2444 }, 2445 }; 2446 2447 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2448 .ext = CPU_CFG_OFFSET(ext_zfa), 2449 .implied_misa_exts = RVF, 2450 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2451 }; 2452 2453 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2454 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2455 .implied_misa_exts = RVF, 2456 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2457 }; 2458 2459 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2460 .ext = CPU_CFG_OFFSET(ext_zfh), 2461 .implied_multi_exts = { 2462 CPU_CFG_OFFSET(ext_zfhmin), 2463 2464 RISCV_IMPLIED_EXTS_RULE_END 2465 }, 2466 }; 2467 2468 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2469 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2470 .implied_misa_exts = RVF, 2471 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2472 }; 2473 2474 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2475 .ext = CPU_CFG_OFFSET(ext_zfinx), 2476 .implied_multi_exts = { 2477 CPU_CFG_OFFSET(ext_zicsr), 2478 2479 RISCV_IMPLIED_EXTS_RULE_END 2480 }, 2481 }; 2482 2483 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2484 .ext = CPU_CFG_OFFSET(ext_zhinx), 2485 .implied_multi_exts = { 2486 CPU_CFG_OFFSET(ext_zhinxmin), 2487 2488 RISCV_IMPLIED_EXTS_RULE_END 2489 }, 2490 }; 2491 2492 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2493 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2494 .implied_multi_exts = { 2495 CPU_CFG_OFFSET(ext_zfinx), 2496 2497 RISCV_IMPLIED_EXTS_RULE_END 2498 }, 2499 }; 2500 2501 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2502 .ext = CPU_CFG_OFFSET(ext_zicntr), 2503 .implied_multi_exts = { 2504 CPU_CFG_OFFSET(ext_zicsr), 2505 2506 RISCV_IMPLIED_EXTS_RULE_END 2507 }, 2508 }; 2509 2510 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2511 .ext = CPU_CFG_OFFSET(ext_zihpm), 2512 .implied_multi_exts = { 2513 CPU_CFG_OFFSET(ext_zicsr), 2514 2515 RISCV_IMPLIED_EXTS_RULE_END 2516 }, 2517 }; 2518 2519 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2520 .ext = CPU_CFG_OFFSET(ext_zk), 2521 .implied_multi_exts = { 2522 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2523 CPU_CFG_OFFSET(ext_zkt), 2524 2525 RISCV_IMPLIED_EXTS_RULE_END 2526 }, 2527 }; 2528 2529 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2530 .ext = CPU_CFG_OFFSET(ext_zkn), 2531 .implied_multi_exts = { 2532 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2533 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2534 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2535 2536 RISCV_IMPLIED_EXTS_RULE_END 2537 }, 2538 }; 2539 2540 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2541 .ext = CPU_CFG_OFFSET(ext_zks), 2542 .implied_multi_exts = { 2543 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2544 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2545 CPU_CFG_OFFSET(ext_zksh), 2546 2547 RISCV_IMPLIED_EXTS_RULE_END 2548 }, 2549 }; 2550 2551 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2552 .ext = CPU_CFG_OFFSET(ext_zvbb), 2553 .implied_multi_exts = { 2554 CPU_CFG_OFFSET(ext_zvkb), 2555 2556 RISCV_IMPLIED_EXTS_RULE_END 2557 }, 2558 }; 2559 2560 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2561 .ext = CPU_CFG_OFFSET(ext_zve32f), 2562 .implied_misa_exts = RVF, 2563 .implied_multi_exts = { 2564 CPU_CFG_OFFSET(ext_zve32x), 2565 2566 RISCV_IMPLIED_EXTS_RULE_END 2567 }, 2568 }; 2569 2570 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2571 .ext = CPU_CFG_OFFSET(ext_zve32x), 2572 .implied_multi_exts = { 2573 CPU_CFG_OFFSET(ext_zicsr), 2574 2575 RISCV_IMPLIED_EXTS_RULE_END 2576 }, 2577 }; 2578 2579 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2580 .ext = CPU_CFG_OFFSET(ext_zve64d), 2581 .implied_misa_exts = RVD, 2582 .implied_multi_exts = { 2583 CPU_CFG_OFFSET(ext_zve64f), 2584 2585 RISCV_IMPLIED_EXTS_RULE_END 2586 }, 2587 }; 2588 2589 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2590 .ext = CPU_CFG_OFFSET(ext_zve64f), 2591 .implied_misa_exts = RVF, 2592 .implied_multi_exts = { 2593 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2594 2595 RISCV_IMPLIED_EXTS_RULE_END 2596 }, 2597 }; 2598 2599 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2600 .ext = CPU_CFG_OFFSET(ext_zve64x), 2601 .implied_multi_exts = { 2602 CPU_CFG_OFFSET(ext_zve32x), 2603 2604 RISCV_IMPLIED_EXTS_RULE_END 2605 }, 2606 }; 2607 2608 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2609 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2610 .implied_multi_exts = { 2611 CPU_CFG_OFFSET(ext_zve32f), 2612 2613 RISCV_IMPLIED_EXTS_RULE_END 2614 }, 2615 }; 2616 2617 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2618 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2619 .implied_multi_exts = { 2620 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2621 2622 RISCV_IMPLIED_EXTS_RULE_END 2623 }, 2624 }; 2625 2626 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2627 .ext = CPU_CFG_OFFSET(ext_zvfh), 2628 .implied_multi_exts = { 2629 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2630 2631 RISCV_IMPLIED_EXTS_RULE_END 2632 }, 2633 }; 2634 2635 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2636 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2637 .implied_multi_exts = { 2638 CPU_CFG_OFFSET(ext_zve32f), 2639 2640 RISCV_IMPLIED_EXTS_RULE_END 2641 }, 2642 }; 2643 2644 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2645 .ext = CPU_CFG_OFFSET(ext_zvkn), 2646 .implied_multi_exts = { 2647 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2648 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2649 2650 RISCV_IMPLIED_EXTS_RULE_END 2651 }, 2652 }; 2653 2654 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2655 .ext = CPU_CFG_OFFSET(ext_zvknc), 2656 .implied_multi_exts = { 2657 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2658 2659 RISCV_IMPLIED_EXTS_RULE_END 2660 }, 2661 }; 2662 2663 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2664 .ext = CPU_CFG_OFFSET(ext_zvkng), 2665 .implied_multi_exts = { 2666 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2667 2668 RISCV_IMPLIED_EXTS_RULE_END 2669 }, 2670 }; 2671 2672 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2673 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2674 .implied_multi_exts = { 2675 CPU_CFG_OFFSET(ext_zve64x), 2676 2677 RISCV_IMPLIED_EXTS_RULE_END 2678 }, 2679 }; 2680 2681 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2682 .ext = CPU_CFG_OFFSET(ext_zvks), 2683 .implied_multi_exts = { 2684 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2685 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2686 2687 RISCV_IMPLIED_EXTS_RULE_END 2688 }, 2689 }; 2690 2691 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2692 .ext = CPU_CFG_OFFSET(ext_zvksc), 2693 .implied_multi_exts = { 2694 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2695 2696 RISCV_IMPLIED_EXTS_RULE_END 2697 }, 2698 }; 2699 2700 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2701 .ext = CPU_CFG_OFFSET(ext_zvksg), 2702 .implied_multi_exts = { 2703 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2704 2705 RISCV_IMPLIED_EXTS_RULE_END 2706 }, 2707 }; 2708 2709 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2710 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2711 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2712 }; 2713 2714 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2715 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2716 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2717 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2718 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2719 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2720 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2721 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2722 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2723 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2724 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2725 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2726 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, 2727 NULL 2728 }; 2729 2730 static const Property riscv_cpu_properties[] = { 2731 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2732 2733 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2734 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2735 2736 {.name = "mmu", .info = &prop_mmu}, 2737 {.name = "pmp", .info = &prop_pmp}, 2738 2739 {.name = "priv_spec", .info = &prop_priv_spec}, 2740 {.name = "vext_spec", .info = &prop_vext_spec}, 2741 2742 {.name = "vlen", .info = &prop_vlen}, 2743 {.name = "elen", .info = &prop_elen}, 2744 2745 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2746 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2747 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2748 2749 {.name = "mvendorid", .info = &prop_mvendorid}, 2750 {.name = "mimpid", .info = &prop_mimpid}, 2751 {.name = "marchid", .info = &prop_marchid}, 2752 2753 #ifndef CONFIG_USER_ONLY 2754 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2755 #endif 2756 2757 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2758 2759 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2760 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2761 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2762 2763 /* 2764 * write_misa() is marked as experimental for now so mark 2765 * it with -x and default to 'false'. 2766 */ 2767 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2768 DEFINE_PROP_END_OF_LIST(), 2769 }; 2770 2771 #if defined(TARGET_RISCV64) 2772 static void rva22u64_profile_cpu_init(Object *obj) 2773 { 2774 rv64i_bare_cpu_init(obj); 2775 2776 RVA22U64.enabled = true; 2777 } 2778 2779 static void rva22s64_profile_cpu_init(Object *obj) 2780 { 2781 rv64i_bare_cpu_init(obj); 2782 2783 RVA22S64.enabled = true; 2784 } 2785 #endif 2786 2787 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2788 { 2789 RISCVCPU *cpu = RISCV_CPU(cs); 2790 CPURISCVState *env = &cpu->env; 2791 2792 switch (riscv_cpu_mxl(env)) { 2793 case MXL_RV32: 2794 return "riscv:rv32"; 2795 case MXL_RV64: 2796 case MXL_RV128: 2797 return "riscv:rv64"; 2798 default: 2799 g_assert_not_reached(); 2800 } 2801 } 2802 2803 #ifndef CONFIG_USER_ONLY 2804 static int64_t riscv_get_arch_id(CPUState *cs) 2805 { 2806 RISCVCPU *cpu = RISCV_CPU(cs); 2807 2808 return cpu->env.mhartid; 2809 } 2810 2811 #include "hw/core/sysemu-cpu-ops.h" 2812 2813 static const struct SysemuCPUOps riscv_sysemu_ops = { 2814 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2815 .write_elf64_note = riscv_cpu_write_elf64_note, 2816 .write_elf32_note = riscv_cpu_write_elf32_note, 2817 .legacy_vmsd = &vmstate_riscv_cpu, 2818 }; 2819 #endif 2820 2821 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2822 { 2823 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2824 CPUClass *cc = CPU_CLASS(c); 2825 DeviceClass *dc = DEVICE_CLASS(c); 2826 ResettableClass *rc = RESETTABLE_CLASS(c); 2827 2828 device_class_set_parent_realize(dc, riscv_cpu_realize, 2829 &mcc->parent_realize); 2830 2831 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2832 &mcc->parent_phases); 2833 2834 cc->class_by_name = riscv_cpu_class_by_name; 2835 cc->has_work = riscv_cpu_has_work; 2836 cc->mmu_index = riscv_cpu_mmu_index; 2837 cc->dump_state = riscv_cpu_dump_state; 2838 cc->set_pc = riscv_cpu_set_pc; 2839 cc->get_pc = riscv_cpu_get_pc; 2840 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2841 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2842 cc->gdb_stop_before_watchpoint = true; 2843 cc->disas_set_info = riscv_cpu_disas_set_info; 2844 #ifndef CONFIG_USER_ONLY 2845 cc->sysemu_ops = &riscv_sysemu_ops; 2846 cc->get_arch_id = riscv_get_arch_id; 2847 #endif 2848 cc->gdb_arch_name = riscv_gdb_arch_name; 2849 2850 device_class_set_props(dc, riscv_cpu_properties); 2851 } 2852 2853 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2854 { 2855 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2856 2857 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2858 riscv_cpu_validate_misa_mxl(mcc); 2859 } 2860 2861 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2862 int max_str_len) 2863 { 2864 const RISCVIsaExtData *edata; 2865 char *old = *isa_str; 2866 char *new = *isa_str; 2867 2868 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2869 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2870 new = g_strconcat(old, "_", edata->name, NULL); 2871 g_free(old); 2872 old = new; 2873 } 2874 } 2875 2876 *isa_str = new; 2877 } 2878 2879 char *riscv_isa_string(RISCVCPU *cpu) 2880 { 2881 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2882 int i; 2883 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2884 char *isa_str = g_new(char, maxlen); 2885 int xlen = riscv_cpu_max_xlen(mcc); 2886 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2887 2888 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2889 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2890 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2891 } 2892 } 2893 *p = '\0'; 2894 if (!cpu->cfg.short_isa_string) { 2895 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2896 } 2897 return isa_str; 2898 } 2899 2900 #ifndef CONFIG_USER_ONLY 2901 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2902 { 2903 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2904 char **extensions = g_new(char *, maxlen); 2905 2906 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2907 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2908 extensions[*count] = g_new(char, 2); 2909 snprintf(extensions[*count], 2, "%c", 2910 qemu_tolower(riscv_single_letter_exts[i])); 2911 (*count)++; 2912 } 2913 } 2914 2915 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2916 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2917 extensions[*count] = g_strdup(edata->name); 2918 (*count)++; 2919 } 2920 } 2921 2922 return extensions; 2923 } 2924 2925 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2926 { 2927 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2928 const size_t maxlen = sizeof("rv128i"); 2929 g_autofree char *isa_base = g_new(char, maxlen); 2930 g_autofree char *riscv_isa; 2931 char **isa_extensions; 2932 int count = 0; 2933 int xlen = riscv_cpu_max_xlen(mcc); 2934 2935 riscv_isa = riscv_isa_string(cpu); 2936 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2937 2938 snprintf(isa_base, maxlen, "rv%di", xlen); 2939 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2940 2941 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2942 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2943 isa_extensions, count); 2944 2945 for (int i = 0; i < count; i++) { 2946 g_free(isa_extensions[i]); 2947 } 2948 2949 g_free(isa_extensions); 2950 } 2951 #endif 2952 2953 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2954 { \ 2955 .name = (type_name), \ 2956 .parent = TYPE_RISCV_CPU, \ 2957 .instance_init = (initfn), \ 2958 .class_init = riscv_cpu_class_init, \ 2959 .class_data = (void *)(misa_mxl_max) \ 2960 } 2961 2962 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2963 { \ 2964 .name = (type_name), \ 2965 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2966 .instance_init = (initfn), \ 2967 .class_init = riscv_cpu_class_init, \ 2968 .class_data = (void *)(misa_mxl_max) \ 2969 } 2970 2971 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2972 { \ 2973 .name = (type_name), \ 2974 .parent = TYPE_RISCV_VENDOR_CPU, \ 2975 .instance_init = (initfn), \ 2976 .class_init = riscv_cpu_class_init, \ 2977 .class_data = (void *)(misa_mxl_max) \ 2978 } 2979 2980 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2981 { \ 2982 .name = (type_name), \ 2983 .parent = TYPE_RISCV_BARE_CPU, \ 2984 .instance_init = (initfn), \ 2985 .class_init = riscv_cpu_class_init, \ 2986 .class_data = (void *)(misa_mxl_max) \ 2987 } 2988 2989 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2990 { \ 2991 .name = (type_name), \ 2992 .parent = TYPE_RISCV_BARE_CPU, \ 2993 .instance_init = (initfn), \ 2994 .class_init = riscv_cpu_class_init, \ 2995 .class_data = (void *)(misa_mxl_max) \ 2996 } 2997 2998 static const TypeInfo riscv_cpu_type_infos[] = { 2999 { 3000 .name = TYPE_RISCV_CPU, 3001 .parent = TYPE_CPU, 3002 .instance_size = sizeof(RISCVCPU), 3003 .instance_align = __alignof(RISCVCPU), 3004 .instance_init = riscv_cpu_init, 3005 .instance_post_init = riscv_cpu_post_init, 3006 .abstract = true, 3007 .class_size = sizeof(RISCVCPUClass), 3008 .class_init = riscv_cpu_common_class_init, 3009 }, 3010 { 3011 .name = TYPE_RISCV_DYNAMIC_CPU, 3012 .parent = TYPE_RISCV_CPU, 3013 .abstract = true, 3014 }, 3015 { 3016 .name = TYPE_RISCV_VENDOR_CPU, 3017 .parent = TYPE_RISCV_CPU, 3018 .abstract = true, 3019 }, 3020 { 3021 .name = TYPE_RISCV_BARE_CPU, 3022 .parent = TYPE_RISCV_CPU, 3023 .instance_init = riscv_bare_cpu_init, 3024 .abstract = true, 3025 }, 3026 #if defined(TARGET_RISCV32) 3027 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3028 #elif defined(TARGET_RISCV64) 3029 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3030 #endif 3031 3032 #if defined(TARGET_RISCV32) || \ 3033 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3034 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3035 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3036 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3037 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3038 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3039 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3040 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3041 #endif 3042 3043 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3044 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3045 #endif 3046 3047 #if defined(TARGET_RISCV64) 3048 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3049 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3050 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3051 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3052 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3053 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3054 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3055 #ifdef CONFIG_TCG 3056 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3057 #endif /* CONFIG_TCG */ 3058 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3059 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3060 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3061 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3062 #endif /* TARGET_RISCV64 */ 3063 }; 3064 3065 DEFINE_TYPES(riscv_cpu_type_infos) 3066