1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "system/device_tree.h" 36 #include "system/kvm.h" 37 #include "system/tcg.h" 38 #include "kvm/kvm_riscv.h" 39 #include "tcg/tcg-cpu.h" 40 #include "tcg/tcg.h" 41 42 /* RISC-V CPU definitions */ 43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 45 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 46 47 /* 48 * From vector_helper.c 49 * Note that vector data is stored in host-endian 64-bit chunks, 50 * so addressing bytes needs a host-endian fixup. 51 */ 52 #if HOST_BIG_ENDIAN 53 #define BYTE(x) ((x) ^ 7) 54 #else 55 #define BYTE(x) (x) 56 #endif 57 58 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 59 { 60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 61 } 62 63 /* Hash that stores general user set numeric options */ 64 static GHashTable *general_user_opts; 65 66 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 67 { 68 g_hash_table_insert(general_user_opts, (gpointer)optname, 69 GUINT_TO_POINTER(value)); 70 } 71 72 bool riscv_cpu_option_set(const char *optname) 73 { 74 return g_hash_table_contains(general_user_opts, optname); 75 } 76 77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 79 80 /* 81 * Here are the ordering rules of extension naming defined by RISC-V 82 * specification : 83 * 1. All extensions should be separated from other multi-letter extensions 84 * by an underscore. 85 * 2. The first letter following the 'Z' conventionally indicates the most 86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 87 * If multiple 'Z' extensions are named, they should be ordered first 88 * by category, then alphabetically within a category. 89 * 3. Standard supervisor-level extensions (starts with 'S') should be 90 * listed after standard unprivileged extensions. If multiple 91 * supervisor-level extensions are listed, they should be ordered 92 * alphabetically. 93 * 4. Non-standard extensions (starts with 'X') must be listed after all 94 * standard extensions. They must be separated from other multi-letter 95 * extensions by an underscore. 96 * 97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 98 * instead. 99 */ 100 const RISCVIsaExtData isa_edata_arr[] = { 101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), 109 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 110 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 111 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 112 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 113 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 114 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 115 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 116 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 117 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 118 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 119 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 120 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 121 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 122 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 123 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 124 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 125 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 126 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 127 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 128 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 129 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 130 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 131 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 132 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 133 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 134 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 135 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 136 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 137 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 138 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 139 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 140 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 141 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 142 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 143 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 144 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 145 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 146 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 147 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 148 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 149 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 150 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 151 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 152 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 153 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 154 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 155 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 156 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 157 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 158 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 159 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 160 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 161 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 162 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 163 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 164 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 165 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 166 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 167 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 168 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 169 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 170 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 171 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 172 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 173 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 174 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 175 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 176 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 177 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 178 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 179 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 180 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 181 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 182 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 183 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 184 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 185 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 186 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 187 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 188 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 189 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 190 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 191 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 192 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 194 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 195 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 196 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 197 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 198 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 199 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 200 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 201 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 202 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 203 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 204 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 205 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 206 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 207 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 208 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 209 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 210 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 211 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 212 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 213 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 214 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 215 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 216 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 217 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 218 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 219 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 220 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 221 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 222 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 223 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 224 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 225 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 226 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 227 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 228 229 { }, 230 }; 231 232 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 233 { 234 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 235 236 return *ext_enabled; 237 } 238 239 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 240 { 241 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 242 243 *ext_enabled = en; 244 } 245 246 bool riscv_cpu_is_vendor(Object *cpu_obj) 247 { 248 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 249 } 250 251 const char * const riscv_int_regnames[] = { 252 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 253 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 254 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 255 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 256 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 257 }; 258 259 const char * const riscv_int_regnamesh[] = { 260 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 261 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 262 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 263 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 264 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 265 "x30h/t5h", "x31h/t6h" 266 }; 267 268 const char * const riscv_fpr_regnames[] = { 269 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 270 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 271 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 272 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 273 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 274 "f30/ft10", "f31/ft11" 275 }; 276 277 const char * const riscv_rvv_regnames[] = { 278 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 279 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 280 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 281 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 282 "v28", "v29", "v30", "v31" 283 }; 284 285 static const char * const riscv_excp_names[] = { 286 "misaligned_fetch", 287 "fault_fetch", 288 "illegal_instruction", 289 "breakpoint", 290 "misaligned_load", 291 "fault_load", 292 "misaligned_store", 293 "fault_store", 294 "user_ecall", 295 "supervisor_ecall", 296 "hypervisor_ecall", 297 "machine_ecall", 298 "exec_page_fault", 299 "load_page_fault", 300 "reserved", 301 "store_page_fault", 302 "reserved", 303 "reserved", 304 "reserved", 305 "reserved", 306 "guest_exec_page_fault", 307 "guest_load_page_fault", 308 "reserved", 309 "guest_store_page_fault", 310 }; 311 312 static const char * const riscv_intr_names[] = { 313 "u_software", 314 "s_software", 315 "vs_software", 316 "m_software", 317 "u_timer", 318 "s_timer", 319 "vs_timer", 320 "m_timer", 321 "u_external", 322 "s_external", 323 "vs_external", 324 "m_external", 325 "reserved", 326 "reserved", 327 "reserved", 328 "reserved" 329 }; 330 331 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 332 { 333 if (async) { 334 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 335 riscv_intr_names[cause] : "(unknown)"; 336 } else { 337 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 338 riscv_excp_names[cause] : "(unknown)"; 339 } 340 } 341 342 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 343 { 344 env->misa_ext_mask = env->misa_ext = ext; 345 } 346 347 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 348 { 349 return 16 << mcc->misa_mxl_max; 350 } 351 352 #ifndef CONFIG_USER_ONLY 353 static uint8_t satp_mode_from_str(const char *satp_mode_str) 354 { 355 if (!strncmp(satp_mode_str, "mbare", 5)) { 356 return VM_1_10_MBARE; 357 } 358 359 if (!strncmp(satp_mode_str, "sv32", 4)) { 360 return VM_1_10_SV32; 361 } 362 363 if (!strncmp(satp_mode_str, "sv39", 4)) { 364 return VM_1_10_SV39; 365 } 366 367 if (!strncmp(satp_mode_str, "sv48", 4)) { 368 return VM_1_10_SV48; 369 } 370 371 if (!strncmp(satp_mode_str, "sv57", 4)) { 372 return VM_1_10_SV57; 373 } 374 375 if (!strncmp(satp_mode_str, "sv64", 4)) { 376 return VM_1_10_SV64; 377 } 378 379 g_assert_not_reached(); 380 } 381 382 uint8_t satp_mode_max_from_map(uint32_t map) 383 { 384 /* 385 * 'map = 0' will make us return (31 - 32), which C will 386 * happily overflow to UINT_MAX. There's no good result to 387 * return if 'map = 0' (e.g. returning 0 will be ambiguous 388 * with the result for 'map = 1'). 389 * 390 * Assert out if map = 0. Callers will have to deal with 391 * it outside of this function. 392 */ 393 g_assert(map > 0); 394 395 /* map here has at least one bit set, so no problem with clz */ 396 return 31 - __builtin_clz(map); 397 } 398 399 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 400 { 401 if (is_32_bit) { 402 switch (satp_mode) { 403 case VM_1_10_SV32: 404 return "sv32"; 405 case VM_1_10_MBARE: 406 return "none"; 407 } 408 } else { 409 switch (satp_mode) { 410 case VM_1_10_SV64: 411 return "sv64"; 412 case VM_1_10_SV57: 413 return "sv57"; 414 case VM_1_10_SV48: 415 return "sv48"; 416 case VM_1_10_SV39: 417 return "sv39"; 418 case VM_1_10_MBARE: 419 return "none"; 420 } 421 } 422 423 g_assert_not_reached(); 424 } 425 426 static void set_satp_mode_max_supported(RISCVCPU *cpu, 427 uint8_t satp_mode) 428 { 429 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 430 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 431 432 for (int i = 0; i <= satp_mode; ++i) { 433 if (valid_vm[i]) { 434 cpu->cfg.satp_mode.supported |= (1 << i); 435 } 436 } 437 } 438 439 /* Set the satp mode to the max supported */ 440 static void set_satp_mode_default_map(RISCVCPU *cpu) 441 { 442 /* 443 * Bare CPUs do not default to the max available. 444 * Users must set a valid satp_mode in the command 445 * line. 446 */ 447 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 448 warn_report("No satp mode set. Defaulting to 'bare'"); 449 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 450 return; 451 } 452 453 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 454 } 455 #endif 456 457 static void riscv_max_cpu_init(Object *obj) 458 { 459 RISCVCPU *cpu = RISCV_CPU(obj); 460 CPURISCVState *env = &cpu->env; 461 462 cpu->cfg.mmu = true; 463 cpu->cfg.pmp = true; 464 465 env->priv_ver = PRIV_VERSION_LATEST; 466 #ifndef CONFIG_USER_ONLY 467 set_satp_mode_max_supported(RISCV_CPU(obj), 468 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 469 VM_1_10_SV32 : VM_1_10_SV57); 470 #endif 471 } 472 473 #if defined(TARGET_RISCV64) 474 static void rv64_base_cpu_init(Object *obj) 475 { 476 RISCVCPU *cpu = RISCV_CPU(obj); 477 CPURISCVState *env = &cpu->env; 478 479 cpu->cfg.mmu = true; 480 cpu->cfg.pmp = true; 481 482 /* Set latest version of privileged specification */ 483 env->priv_ver = PRIV_VERSION_LATEST; 484 #ifndef CONFIG_USER_ONLY 485 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 486 #endif 487 } 488 489 static void rv64_sifive_u_cpu_init(Object *obj) 490 { 491 RISCVCPU *cpu = RISCV_CPU(obj); 492 CPURISCVState *env = &cpu->env; 493 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 494 env->priv_ver = PRIV_VERSION_1_10_0; 495 #ifndef CONFIG_USER_ONLY 496 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 497 #endif 498 499 /* inherited from parent obj via riscv_cpu_init() */ 500 cpu->cfg.ext_zifencei = true; 501 cpu->cfg.ext_zicsr = true; 502 cpu->cfg.mmu = true; 503 cpu->cfg.pmp = true; 504 } 505 506 static void rv64_sifive_e_cpu_init(Object *obj) 507 { 508 CPURISCVState *env = &RISCV_CPU(obj)->env; 509 RISCVCPU *cpu = RISCV_CPU(obj); 510 511 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 512 env->priv_ver = PRIV_VERSION_1_10_0; 513 #ifndef CONFIG_USER_ONLY 514 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 515 #endif 516 517 /* inherited from parent obj via riscv_cpu_init() */ 518 cpu->cfg.ext_zifencei = true; 519 cpu->cfg.ext_zicsr = true; 520 cpu->cfg.pmp = true; 521 } 522 523 static void rv64_thead_c906_cpu_init(Object *obj) 524 { 525 CPURISCVState *env = &RISCV_CPU(obj)->env; 526 RISCVCPU *cpu = RISCV_CPU(obj); 527 528 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 529 env->priv_ver = PRIV_VERSION_1_11_0; 530 531 cpu->cfg.ext_zfa = true; 532 cpu->cfg.ext_zfh = true; 533 cpu->cfg.mmu = true; 534 cpu->cfg.ext_xtheadba = true; 535 cpu->cfg.ext_xtheadbb = true; 536 cpu->cfg.ext_xtheadbs = true; 537 cpu->cfg.ext_xtheadcmo = true; 538 cpu->cfg.ext_xtheadcondmov = true; 539 cpu->cfg.ext_xtheadfmemidx = true; 540 cpu->cfg.ext_xtheadmac = true; 541 cpu->cfg.ext_xtheadmemidx = true; 542 cpu->cfg.ext_xtheadmempair = true; 543 cpu->cfg.ext_xtheadsync = true; 544 545 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 546 #ifndef CONFIG_USER_ONLY 547 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 548 th_register_custom_csrs(cpu); 549 #endif 550 551 /* inherited from parent obj via riscv_cpu_init() */ 552 cpu->cfg.pmp = true; 553 } 554 555 static void rv64_veyron_v1_cpu_init(Object *obj) 556 { 557 CPURISCVState *env = &RISCV_CPU(obj)->env; 558 RISCVCPU *cpu = RISCV_CPU(obj); 559 560 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 561 env->priv_ver = PRIV_VERSION_1_12_0; 562 563 /* Enable ISA extensions */ 564 cpu->cfg.mmu = true; 565 cpu->cfg.ext_zifencei = true; 566 cpu->cfg.ext_zicsr = true; 567 cpu->cfg.pmp = true; 568 cpu->cfg.ext_zicbom = true; 569 cpu->cfg.cbom_blocksize = 64; 570 cpu->cfg.cboz_blocksize = 64; 571 cpu->cfg.ext_zicboz = true; 572 cpu->cfg.ext_smaia = true; 573 cpu->cfg.ext_ssaia = true; 574 cpu->cfg.ext_sscofpmf = true; 575 cpu->cfg.ext_sstc = true; 576 cpu->cfg.ext_svinval = true; 577 cpu->cfg.ext_svnapot = true; 578 cpu->cfg.ext_svpbmt = true; 579 cpu->cfg.ext_smstateen = true; 580 cpu->cfg.ext_zba = true; 581 cpu->cfg.ext_zbb = true; 582 cpu->cfg.ext_zbc = true; 583 cpu->cfg.ext_zbs = true; 584 cpu->cfg.ext_XVentanaCondOps = true; 585 586 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 587 cpu->cfg.marchid = VEYRON_V1_MARCHID; 588 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 589 590 #ifndef CONFIG_USER_ONLY 591 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 592 #endif 593 } 594 595 /* Tenstorrent Ascalon */ 596 static void rv64_tt_ascalon_cpu_init(Object *obj) 597 { 598 CPURISCVState *env = &RISCV_CPU(obj)->env; 599 RISCVCPU *cpu = RISCV_CPU(obj); 600 601 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 602 env->priv_ver = PRIV_VERSION_1_13_0; 603 604 /* Enable ISA extensions */ 605 cpu->cfg.mmu = true; 606 cpu->cfg.vlenb = 256 >> 3; 607 cpu->cfg.elen = 64; 608 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 609 cpu->cfg.rvv_ma_all_1s = true; 610 cpu->cfg.rvv_ta_all_1s = true; 611 cpu->cfg.misa_w = true; 612 cpu->cfg.pmp = true; 613 cpu->cfg.cbom_blocksize = 64; 614 cpu->cfg.cbop_blocksize = 64; 615 cpu->cfg.cboz_blocksize = 64; 616 cpu->cfg.ext_zic64b = true; 617 cpu->cfg.ext_zicbom = true; 618 cpu->cfg.ext_zicbop = true; 619 cpu->cfg.ext_zicboz = true; 620 cpu->cfg.ext_zicntr = true; 621 cpu->cfg.ext_zicond = true; 622 cpu->cfg.ext_zicsr = true; 623 cpu->cfg.ext_zifencei = true; 624 cpu->cfg.ext_zihintntl = true; 625 cpu->cfg.ext_zihintpause = true; 626 cpu->cfg.ext_zihpm = true; 627 cpu->cfg.ext_zimop = true; 628 cpu->cfg.ext_zawrs = true; 629 cpu->cfg.ext_zfa = true; 630 cpu->cfg.ext_zfbfmin = true; 631 cpu->cfg.ext_zfh = true; 632 cpu->cfg.ext_zfhmin = true; 633 cpu->cfg.ext_zcb = true; 634 cpu->cfg.ext_zcmop = true; 635 cpu->cfg.ext_zba = true; 636 cpu->cfg.ext_zbb = true; 637 cpu->cfg.ext_zbs = true; 638 cpu->cfg.ext_zkt = true; 639 cpu->cfg.ext_zvbb = true; 640 cpu->cfg.ext_zvbc = true; 641 cpu->cfg.ext_zvfbfmin = true; 642 cpu->cfg.ext_zvfbfwma = true; 643 cpu->cfg.ext_zvfh = true; 644 cpu->cfg.ext_zvfhmin = true; 645 cpu->cfg.ext_zvkng = true; 646 cpu->cfg.ext_smaia = true; 647 cpu->cfg.ext_smstateen = true; 648 cpu->cfg.ext_ssaia = true; 649 cpu->cfg.ext_sscofpmf = true; 650 cpu->cfg.ext_sstc = true; 651 cpu->cfg.ext_svade = true; 652 cpu->cfg.ext_svinval = true; 653 cpu->cfg.ext_svnapot = true; 654 cpu->cfg.ext_svpbmt = true; 655 656 #ifndef CONFIG_USER_ONLY 657 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 658 #endif 659 } 660 661 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 662 { 663 CPURISCVState *env = &RISCV_CPU(obj)->env; 664 RISCVCPU *cpu = RISCV_CPU(obj); 665 666 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 667 env->priv_ver = PRIV_VERSION_1_12_0; 668 669 /* Enable ISA extensions */ 670 cpu->cfg.ext_zbc = true; 671 cpu->cfg.ext_zbkb = true; 672 cpu->cfg.ext_zbkc = true; 673 cpu->cfg.ext_zbkx = true; 674 cpu->cfg.ext_zknd = true; 675 cpu->cfg.ext_zkne = true; 676 cpu->cfg.ext_zknh = true; 677 cpu->cfg.ext_zksed = true; 678 cpu->cfg.ext_zksh = true; 679 cpu->cfg.ext_svinval = true; 680 681 cpu->cfg.mmu = true; 682 cpu->cfg.pmp = true; 683 684 #ifndef CONFIG_USER_ONLY 685 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 686 #endif 687 } 688 689 #ifdef CONFIG_TCG 690 static void rv128_base_cpu_init(Object *obj) 691 { 692 RISCVCPU *cpu = RISCV_CPU(obj); 693 CPURISCVState *env = &cpu->env; 694 695 if (qemu_tcg_mttcg_enabled()) { 696 /* Missing 128-bit aligned atomics */ 697 error_report("128-bit RISC-V currently does not work with Multi " 698 "Threaded TCG. Please use: -accel tcg,thread=single"); 699 exit(EXIT_FAILURE); 700 } 701 702 cpu->cfg.mmu = true; 703 cpu->cfg.pmp = true; 704 705 /* Set latest version of privileged specification */ 706 env->priv_ver = PRIV_VERSION_LATEST; 707 #ifndef CONFIG_USER_ONLY 708 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 709 #endif 710 } 711 #endif /* CONFIG_TCG */ 712 713 static void rv64i_bare_cpu_init(Object *obj) 714 { 715 CPURISCVState *env = &RISCV_CPU(obj)->env; 716 riscv_cpu_set_misa_ext(env, RVI); 717 } 718 719 static void rv64e_bare_cpu_init(Object *obj) 720 { 721 CPURISCVState *env = &RISCV_CPU(obj)->env; 722 riscv_cpu_set_misa_ext(env, RVE); 723 } 724 725 #endif /* !TARGET_RISCV64 */ 726 727 #if defined(TARGET_RISCV32) || \ 728 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 729 730 static void rv32_base_cpu_init(Object *obj) 731 { 732 RISCVCPU *cpu = RISCV_CPU(obj); 733 CPURISCVState *env = &cpu->env; 734 735 cpu->cfg.mmu = true; 736 cpu->cfg.pmp = true; 737 738 /* Set latest version of privileged specification */ 739 env->priv_ver = PRIV_VERSION_LATEST; 740 #ifndef CONFIG_USER_ONLY 741 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 742 #endif 743 } 744 745 static void rv32_sifive_u_cpu_init(Object *obj) 746 { 747 RISCVCPU *cpu = RISCV_CPU(obj); 748 CPURISCVState *env = &cpu->env; 749 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 750 env->priv_ver = PRIV_VERSION_1_10_0; 751 #ifndef CONFIG_USER_ONLY 752 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 753 #endif 754 755 /* inherited from parent obj via riscv_cpu_init() */ 756 cpu->cfg.ext_zifencei = true; 757 cpu->cfg.ext_zicsr = true; 758 cpu->cfg.mmu = true; 759 cpu->cfg.pmp = true; 760 } 761 762 static void rv32_sifive_e_cpu_init(Object *obj) 763 { 764 CPURISCVState *env = &RISCV_CPU(obj)->env; 765 RISCVCPU *cpu = RISCV_CPU(obj); 766 767 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 768 env->priv_ver = PRIV_VERSION_1_10_0; 769 #ifndef CONFIG_USER_ONLY 770 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 771 #endif 772 773 /* inherited from parent obj via riscv_cpu_init() */ 774 cpu->cfg.ext_zifencei = true; 775 cpu->cfg.ext_zicsr = true; 776 cpu->cfg.pmp = true; 777 } 778 779 static void rv32_ibex_cpu_init(Object *obj) 780 { 781 CPURISCVState *env = &RISCV_CPU(obj)->env; 782 RISCVCPU *cpu = RISCV_CPU(obj); 783 784 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 785 env->priv_ver = PRIV_VERSION_1_12_0; 786 #ifndef CONFIG_USER_ONLY 787 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 788 #endif 789 /* inherited from parent obj via riscv_cpu_init() */ 790 cpu->cfg.ext_zifencei = true; 791 cpu->cfg.ext_zicsr = true; 792 cpu->cfg.pmp = true; 793 cpu->cfg.ext_smepmp = true; 794 795 cpu->cfg.ext_zba = true; 796 cpu->cfg.ext_zbb = true; 797 cpu->cfg.ext_zbc = true; 798 cpu->cfg.ext_zbs = true; 799 } 800 801 static void rv32_imafcu_nommu_cpu_init(Object *obj) 802 { 803 CPURISCVState *env = &RISCV_CPU(obj)->env; 804 RISCVCPU *cpu = RISCV_CPU(obj); 805 806 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 807 env->priv_ver = PRIV_VERSION_1_10_0; 808 #ifndef CONFIG_USER_ONLY 809 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 810 #endif 811 812 /* inherited from parent obj via riscv_cpu_init() */ 813 cpu->cfg.ext_zifencei = true; 814 cpu->cfg.ext_zicsr = true; 815 cpu->cfg.pmp = true; 816 } 817 818 static void rv32i_bare_cpu_init(Object *obj) 819 { 820 CPURISCVState *env = &RISCV_CPU(obj)->env; 821 riscv_cpu_set_misa_ext(env, RVI); 822 } 823 824 static void rv32e_bare_cpu_init(Object *obj) 825 { 826 CPURISCVState *env = &RISCV_CPU(obj)->env; 827 riscv_cpu_set_misa_ext(env, RVE); 828 } 829 #endif 830 831 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 832 { 833 ObjectClass *oc; 834 char *typename; 835 char **cpuname; 836 837 cpuname = g_strsplit(cpu_model, ",", 1); 838 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 839 oc = object_class_by_name(typename); 840 g_strfreev(cpuname); 841 g_free(typename); 842 843 return oc; 844 } 845 846 char *riscv_cpu_get_name(RISCVCPU *cpu) 847 { 848 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 849 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 850 851 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 852 853 return cpu_model_from_type(typename); 854 } 855 856 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 857 { 858 RISCVCPU *cpu = RISCV_CPU(cs); 859 CPURISCVState *env = &cpu->env; 860 int i, j; 861 uint8_t *p; 862 863 #if !defined(CONFIG_USER_ONLY) 864 if (riscv_has_ext(env, RVH)) { 865 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 866 } 867 #endif 868 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 869 #ifndef CONFIG_USER_ONLY 870 { 871 static const int dump_csrs[] = { 872 CSR_MHARTID, 873 CSR_MSTATUS, 874 CSR_MSTATUSH, 875 /* 876 * CSR_SSTATUS is intentionally omitted here as its value 877 * can be figured out by looking at CSR_MSTATUS 878 */ 879 CSR_HSTATUS, 880 CSR_VSSTATUS, 881 CSR_MIP, 882 CSR_MIE, 883 CSR_MIDELEG, 884 CSR_HIDELEG, 885 CSR_MEDELEG, 886 CSR_HEDELEG, 887 CSR_MTVEC, 888 CSR_STVEC, 889 CSR_VSTVEC, 890 CSR_MEPC, 891 CSR_SEPC, 892 CSR_VSEPC, 893 CSR_MCAUSE, 894 CSR_SCAUSE, 895 CSR_VSCAUSE, 896 CSR_MTVAL, 897 CSR_STVAL, 898 CSR_HTVAL, 899 CSR_MTVAL2, 900 CSR_MSCRATCH, 901 CSR_SSCRATCH, 902 CSR_SATP, 903 }; 904 905 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 906 int csrno = dump_csrs[i]; 907 target_ulong val = 0; 908 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 909 910 /* 911 * Rely on the smode, hmode, etc, predicates within csr.c 912 * to do the filtering of the registers that are present. 913 */ 914 if (res == RISCV_EXCP_NONE) { 915 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 916 csr_ops[csrno].name, val); 917 } 918 } 919 } 920 #endif 921 922 for (i = 0; i < 32; i++) { 923 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 924 riscv_int_regnames[i], env->gpr[i]); 925 if ((i & 3) == 3) { 926 qemu_fprintf(f, "\n"); 927 } 928 } 929 if (flags & CPU_DUMP_FPU) { 930 target_ulong val = 0; 931 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 932 if (res == RISCV_EXCP_NONE) { 933 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 934 csr_ops[CSR_FCSR].name, val); 935 } 936 for (i = 0; i < 32; i++) { 937 qemu_fprintf(f, " %-8s %016" PRIx64, 938 riscv_fpr_regnames[i], env->fpr[i]); 939 if ((i & 3) == 3) { 940 qemu_fprintf(f, "\n"); 941 } 942 } 943 } 944 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 945 static const int dump_rvv_csrs[] = { 946 CSR_VSTART, 947 CSR_VXSAT, 948 CSR_VXRM, 949 CSR_VCSR, 950 CSR_VL, 951 CSR_VTYPE, 952 CSR_VLENB, 953 }; 954 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 955 int csrno = dump_rvv_csrs[i]; 956 target_ulong val = 0; 957 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 958 959 /* 960 * Rely on the smode, hmode, etc, predicates within csr.c 961 * to do the filtering of the registers that are present. 962 */ 963 if (res == RISCV_EXCP_NONE) { 964 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 965 csr_ops[csrno].name, val); 966 } 967 } 968 uint16_t vlenb = cpu->cfg.vlenb; 969 970 for (i = 0; i < 32; i++) { 971 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 972 p = (uint8_t *)env->vreg; 973 for (j = vlenb - 1 ; j >= 0; j--) { 974 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 975 } 976 qemu_fprintf(f, "\n"); 977 } 978 } 979 } 980 981 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 982 { 983 RISCVCPU *cpu = RISCV_CPU(cs); 984 CPURISCVState *env = &cpu->env; 985 986 if (env->xl == MXL_RV32) { 987 env->pc = (int32_t)value; 988 } else { 989 env->pc = value; 990 } 991 } 992 993 static vaddr riscv_cpu_get_pc(CPUState *cs) 994 { 995 RISCVCPU *cpu = RISCV_CPU(cs); 996 CPURISCVState *env = &cpu->env; 997 998 /* Match cpu_get_tb_cpu_state. */ 999 if (env->xl == MXL_RV32) { 1000 return env->pc & UINT32_MAX; 1001 } 1002 return env->pc; 1003 } 1004 1005 bool riscv_cpu_has_work(CPUState *cs) 1006 { 1007 #ifndef CONFIG_USER_ONLY 1008 RISCVCPU *cpu = RISCV_CPU(cs); 1009 CPURISCVState *env = &cpu->env; 1010 /* 1011 * Definition of the WFI instruction requires it to ignore the privilege 1012 * mode and delegation registers, but respect individual enables 1013 */ 1014 return riscv_cpu_all_pending(env) != 0 || 1015 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1016 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1017 #else 1018 return true; 1019 #endif 1020 } 1021 1022 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 1023 { 1024 return riscv_env_mmu_index(cpu_env(cs), ifetch); 1025 } 1026 1027 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1028 { 1029 #ifndef CONFIG_USER_ONLY 1030 uint8_t iprio; 1031 int i, irq, rdzero; 1032 #endif 1033 CPUState *cs = CPU(obj); 1034 RISCVCPU *cpu = RISCV_CPU(cs); 1035 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1036 CPURISCVState *env = &cpu->env; 1037 1038 if (mcc->parent_phases.hold) { 1039 mcc->parent_phases.hold(obj, type); 1040 } 1041 #ifndef CONFIG_USER_ONLY 1042 env->misa_mxl = mcc->misa_mxl_max; 1043 env->priv = PRV_M; 1044 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1045 if (env->misa_mxl > MXL_RV32) { 1046 /* 1047 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1048 * and we must ensure that the value after init is valid for read. 1049 */ 1050 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1051 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1052 if (riscv_has_ext(env, RVH)) { 1053 env->vsstatus = set_field(env->vsstatus, 1054 MSTATUS64_SXL, env->misa_mxl); 1055 env->vsstatus = set_field(env->vsstatus, 1056 MSTATUS64_UXL, env->misa_mxl); 1057 env->mstatus_hs = set_field(env->mstatus_hs, 1058 MSTATUS64_SXL, env->misa_mxl); 1059 env->mstatus_hs = set_field(env->mstatus_hs, 1060 MSTATUS64_UXL, env->misa_mxl); 1061 } 1062 } 1063 env->mcause = 0; 1064 env->miclaim = MIP_SGEIP; 1065 env->pc = env->resetvec; 1066 env->bins = 0; 1067 env->two_stage_lookup = false; 1068 1069 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1070 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1071 MENVCFG_ADUE : 0); 1072 env->henvcfg = 0; 1073 1074 /* Initialized default priorities of local interrupts. */ 1075 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1076 iprio = riscv_cpu_default_priority(i); 1077 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1078 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1079 env->hviprio[i] = 0; 1080 } 1081 i = 0; 1082 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1083 if (!rdzero) { 1084 env->hviprio[irq] = env->miprio[irq]; 1085 } 1086 i++; 1087 } 1088 1089 /* 1090 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1091 * extension is enabled. 1092 */ 1093 if (riscv_has_ext(env, RVH)) { 1094 env->mideleg |= HS_MODE_INTERRUPTS; 1095 } 1096 1097 /* 1098 * Clear mseccfg and unlock all the PMP entries upon reset. 1099 * This is allowed as per the priv and smepmp specifications 1100 * and is needed to clear stale entries across reboots. 1101 */ 1102 if (riscv_cpu_cfg(env)->ext_smepmp) { 1103 env->mseccfg = 0; 1104 } 1105 1106 pmp_unlock_entries(env); 1107 #else 1108 env->priv = PRV_U; 1109 env->senvcfg = 0; 1110 env->menvcfg = 0; 1111 #endif 1112 1113 /* on reset elp is clear */ 1114 env->elp = false; 1115 /* on reset ssp is set to 0 */ 1116 env->ssp = 0; 1117 1118 env->xl = riscv_cpu_mxl(env); 1119 cs->exception_index = RISCV_EXCP_NONE; 1120 env->load_res = -1; 1121 set_default_nan_mode(1, &env->fp_status); 1122 /* Default NaN value: sign bit clear, frac msb set */ 1123 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1124 env->vill = true; 1125 1126 #ifndef CONFIG_USER_ONLY 1127 if (cpu->cfg.debug) { 1128 riscv_trigger_reset_hold(env); 1129 } 1130 1131 if (cpu->cfg.ext_smrnmi) { 1132 env->rnmip = 0; 1133 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1134 } 1135 1136 if (kvm_enabled()) { 1137 kvm_riscv_reset_vcpu(cpu); 1138 } 1139 #endif 1140 } 1141 1142 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1143 { 1144 RISCVCPU *cpu = RISCV_CPU(s); 1145 CPURISCVState *env = &cpu->env; 1146 info->target_info = &cpu->cfg; 1147 1148 switch (env->xl) { 1149 case MXL_RV32: 1150 info->print_insn = print_insn_riscv32; 1151 break; 1152 case MXL_RV64: 1153 info->print_insn = print_insn_riscv64; 1154 break; 1155 case MXL_RV128: 1156 info->print_insn = print_insn_riscv128; 1157 break; 1158 default: 1159 g_assert_not_reached(); 1160 } 1161 } 1162 1163 #ifndef CONFIG_USER_ONLY 1164 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1165 { 1166 bool rv32 = riscv_cpu_is_32bit(cpu); 1167 uint8_t satp_mode_map_max, satp_mode_supported_max; 1168 1169 /* The CPU wants the OS to decide which satp mode to use */ 1170 if (cpu->cfg.satp_mode.supported == 0) { 1171 return; 1172 } 1173 1174 satp_mode_supported_max = 1175 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1176 1177 if (cpu->cfg.satp_mode.map == 0) { 1178 if (cpu->cfg.satp_mode.init == 0) { 1179 /* If unset by the user, we fallback to the default satp mode. */ 1180 set_satp_mode_default_map(cpu); 1181 } else { 1182 /* 1183 * Find the lowest level that was disabled and then enable the 1184 * first valid level below which can be found in 1185 * valid_vm_1_10_32/64. 1186 */ 1187 for (int i = 1; i < 16; ++i) { 1188 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1189 (cpu->cfg.satp_mode.supported & (1 << i))) { 1190 for (int j = i - 1; j >= 0; --j) { 1191 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1192 cpu->cfg.satp_mode.map |= (1 << j); 1193 break; 1194 } 1195 } 1196 break; 1197 } 1198 } 1199 } 1200 } 1201 1202 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1203 1204 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1205 if (satp_mode_map_max > satp_mode_supported_max) { 1206 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1207 satp_mode_str(satp_mode_map_max, rv32), 1208 satp_mode_str(satp_mode_supported_max, rv32)); 1209 return; 1210 } 1211 1212 /* 1213 * Make sure the user did not ask for an invalid configuration as per 1214 * the specification. 1215 */ 1216 if (!rv32) { 1217 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1218 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1219 (cpu->cfg.satp_mode.init & (1 << i)) && 1220 (cpu->cfg.satp_mode.supported & (1 << i))) { 1221 error_setg(errp, "cannot disable %s satp mode if %s " 1222 "is enabled", satp_mode_str(i, false), 1223 satp_mode_str(satp_mode_map_max, false)); 1224 return; 1225 } 1226 } 1227 } 1228 1229 /* Finally expand the map so that all valid modes are set */ 1230 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1231 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1232 cpu->cfg.satp_mode.map |= (1 << i); 1233 } 1234 } 1235 } 1236 #endif 1237 1238 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1239 { 1240 Error *local_err = NULL; 1241 1242 #ifndef CONFIG_USER_ONLY 1243 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1244 if (local_err != NULL) { 1245 error_propagate(errp, local_err); 1246 return; 1247 } 1248 #endif 1249 1250 if (tcg_enabled()) { 1251 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1252 if (local_err != NULL) { 1253 error_propagate(errp, local_err); 1254 return; 1255 } 1256 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1257 } else if (kvm_enabled()) { 1258 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1259 if (local_err != NULL) { 1260 error_propagate(errp, local_err); 1261 return; 1262 } 1263 } 1264 } 1265 1266 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1267 { 1268 CPUState *cs = CPU(dev); 1269 RISCVCPU *cpu = RISCV_CPU(dev); 1270 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1271 Error *local_err = NULL; 1272 1273 cpu_exec_realizefn(cs, &local_err); 1274 if (local_err != NULL) { 1275 error_propagate(errp, local_err); 1276 return; 1277 } 1278 1279 riscv_cpu_finalize_features(cpu, &local_err); 1280 if (local_err != NULL) { 1281 error_propagate(errp, local_err); 1282 return; 1283 } 1284 1285 riscv_cpu_register_gdb_regs_for_features(cs); 1286 1287 #ifndef CONFIG_USER_ONLY 1288 if (cpu->cfg.debug) { 1289 riscv_trigger_realize(&cpu->env); 1290 } 1291 #endif 1292 1293 qemu_init_vcpu(cs); 1294 cpu_reset(cs); 1295 1296 mcc->parent_realize(dev, errp); 1297 } 1298 1299 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1300 { 1301 if (tcg_enabled()) { 1302 return riscv_cpu_tcg_compatible(cpu); 1303 } 1304 1305 return true; 1306 } 1307 1308 #ifndef CONFIG_USER_ONLY 1309 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1310 void *opaque, Error **errp) 1311 { 1312 RISCVSATPMap *satp_map = opaque; 1313 uint8_t satp = satp_mode_from_str(name); 1314 bool value; 1315 1316 value = satp_map->map & (1 << satp); 1317 1318 visit_type_bool(v, name, &value, errp); 1319 } 1320 1321 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1322 void *opaque, Error **errp) 1323 { 1324 RISCVSATPMap *satp_map = opaque; 1325 uint8_t satp = satp_mode_from_str(name); 1326 bool value; 1327 1328 if (!visit_type_bool(v, name, &value, errp)) { 1329 return; 1330 } 1331 1332 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1333 satp_map->init |= 1 << satp; 1334 } 1335 1336 void riscv_add_satp_mode_properties(Object *obj) 1337 { 1338 RISCVCPU *cpu = RISCV_CPU(obj); 1339 1340 if (cpu->env.misa_mxl == MXL_RV32) { 1341 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1342 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1343 } else { 1344 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1345 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1346 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1347 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1348 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1349 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1350 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1351 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1352 } 1353 } 1354 1355 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1356 { 1357 RISCVCPU *cpu = RISCV_CPU(opaque); 1358 CPURISCVState *env = &cpu->env; 1359 1360 if (irq < IRQ_LOCAL_MAX) { 1361 switch (irq) { 1362 case IRQ_U_SOFT: 1363 case IRQ_S_SOFT: 1364 case IRQ_VS_SOFT: 1365 case IRQ_M_SOFT: 1366 case IRQ_U_TIMER: 1367 case IRQ_S_TIMER: 1368 case IRQ_VS_TIMER: 1369 case IRQ_M_TIMER: 1370 case IRQ_U_EXT: 1371 case IRQ_VS_EXT: 1372 case IRQ_M_EXT: 1373 if (kvm_enabled()) { 1374 kvm_riscv_set_irq(cpu, irq, level); 1375 } else { 1376 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1377 } 1378 break; 1379 case IRQ_S_EXT: 1380 if (kvm_enabled()) { 1381 kvm_riscv_set_irq(cpu, irq, level); 1382 } else { 1383 env->external_seip = level; 1384 riscv_cpu_update_mip(env, 1 << irq, 1385 BOOL_TO_MASK(level | env->software_seip)); 1386 } 1387 break; 1388 default: 1389 g_assert_not_reached(); 1390 } 1391 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1392 /* Require H-extension for handling guest local interrupts */ 1393 if (!riscv_has_ext(env, RVH)) { 1394 g_assert_not_reached(); 1395 } 1396 1397 /* Compute bit position in HGEIP CSR */ 1398 irq = irq - IRQ_LOCAL_MAX + 1; 1399 if (env->geilen < irq) { 1400 g_assert_not_reached(); 1401 } 1402 1403 /* Update HGEIP CSR */ 1404 env->hgeip &= ~((target_ulong)1 << irq); 1405 if (level) { 1406 env->hgeip |= (target_ulong)1 << irq; 1407 } 1408 1409 /* Update mip.SGEIP bit */ 1410 riscv_cpu_update_mip(env, MIP_SGEIP, 1411 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1412 } else { 1413 g_assert_not_reached(); 1414 } 1415 } 1416 1417 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1418 { 1419 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1420 } 1421 #endif /* CONFIG_USER_ONLY */ 1422 1423 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1424 { 1425 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1426 } 1427 1428 static void riscv_cpu_post_init(Object *obj) 1429 { 1430 accel_cpu_instance_init(CPU(obj)); 1431 } 1432 1433 static void riscv_cpu_init(Object *obj) 1434 { 1435 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1436 RISCVCPU *cpu = RISCV_CPU(obj); 1437 CPURISCVState *env = &cpu->env; 1438 1439 env->misa_mxl = mcc->misa_mxl_max; 1440 1441 #ifndef CONFIG_USER_ONLY 1442 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1443 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1444 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1445 "riscv.cpu.rnmi", RNMI_MAX); 1446 #endif /* CONFIG_USER_ONLY */ 1447 1448 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1449 1450 /* 1451 * The timer and performance counters extensions were supported 1452 * in QEMU before they were added as discrete extensions in the 1453 * ISA. To keep compatibility we'll always default them to 'true' 1454 * for all CPUs. Each accelerator will decide what to do when 1455 * users disable them. 1456 */ 1457 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1458 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1459 1460 /* Default values for non-bool cpu properties */ 1461 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1462 cpu->cfg.vlenb = 128 >> 3; 1463 cpu->cfg.elen = 64; 1464 cpu->cfg.cbom_blocksize = 64; 1465 cpu->cfg.cbop_blocksize = 64; 1466 cpu->cfg.cboz_blocksize = 64; 1467 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1468 } 1469 1470 static void riscv_bare_cpu_init(Object *obj) 1471 { 1472 RISCVCPU *cpu = RISCV_CPU(obj); 1473 1474 /* 1475 * Bare CPUs do not inherit the timer and performance 1476 * counters from the parent class (see riscv_cpu_init() 1477 * for info on why the parent enables them). 1478 * 1479 * Users have to explicitly enable these counters for 1480 * bare CPUs. 1481 */ 1482 cpu->cfg.ext_zicntr = false; 1483 cpu->cfg.ext_zihpm = false; 1484 1485 /* Set to QEMU's first supported priv version */ 1486 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1487 1488 /* 1489 * Support all available satp_mode settings. The default 1490 * value will be set to MBARE if the user doesn't set 1491 * satp_mode manually (see set_satp_mode_default()). 1492 */ 1493 #ifndef CONFIG_USER_ONLY 1494 set_satp_mode_max_supported(cpu, VM_1_10_SV64); 1495 #endif 1496 } 1497 1498 typedef struct misa_ext_info { 1499 const char *name; 1500 const char *description; 1501 } MISAExtInfo; 1502 1503 #define MISA_INFO_IDX(_bit) \ 1504 __builtin_ctz(_bit) 1505 1506 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1507 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1508 1509 static const MISAExtInfo misa_ext_info_arr[] = { 1510 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1511 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1512 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1513 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1514 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1515 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1516 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1517 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1518 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1519 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1520 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1521 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1522 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1523 }; 1524 1525 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1526 { 1527 CPUClass *cc = CPU_CLASS(mcc); 1528 1529 /* Validate that MISA_MXL is set properly. */ 1530 switch (mcc->misa_mxl_max) { 1531 #ifdef TARGET_RISCV64 1532 case MXL_RV64: 1533 case MXL_RV128: 1534 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1535 break; 1536 #endif 1537 case MXL_RV32: 1538 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1539 break; 1540 default: 1541 g_assert_not_reached(); 1542 } 1543 } 1544 1545 static int riscv_validate_misa_info_idx(uint32_t bit) 1546 { 1547 int idx; 1548 1549 /* 1550 * Our lowest valid input (RVA) is 1 and 1551 * __builtin_ctz() is UB with zero. 1552 */ 1553 g_assert(bit != 0); 1554 idx = MISA_INFO_IDX(bit); 1555 1556 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1557 return idx; 1558 } 1559 1560 const char *riscv_get_misa_ext_name(uint32_t bit) 1561 { 1562 int idx = riscv_validate_misa_info_idx(bit); 1563 const char *val = misa_ext_info_arr[idx].name; 1564 1565 g_assert(val != NULL); 1566 return val; 1567 } 1568 1569 const char *riscv_get_misa_ext_description(uint32_t bit) 1570 { 1571 int idx = riscv_validate_misa_info_idx(bit); 1572 const char *val = misa_ext_info_arr[idx].description; 1573 1574 g_assert(val != NULL); 1575 return val; 1576 } 1577 1578 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1579 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1580 .enabled = _defval} 1581 1582 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1583 /* Defaults for standard extensions */ 1584 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1585 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1586 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1587 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1588 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1589 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1590 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1591 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1592 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1593 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1594 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1595 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1596 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1597 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1598 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1599 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1600 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1601 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1602 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1603 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1604 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1605 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1606 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1607 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1608 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1609 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1610 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1611 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1612 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1613 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1614 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1615 1616 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1617 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1618 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1619 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1620 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1621 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1622 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1623 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1624 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1625 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1626 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1627 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1628 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1629 1630 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1631 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1632 1633 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1634 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1635 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1636 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1637 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1638 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1639 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1640 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1641 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1642 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1643 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1644 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1645 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1646 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1647 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1648 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1649 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1650 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1651 1652 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1653 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1654 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1655 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1656 1657 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1658 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1659 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1660 1661 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1662 1663 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1664 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1665 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1666 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1667 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1668 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1669 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1670 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1671 1672 /* Vector cryptography extensions */ 1673 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1674 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1675 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1676 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1677 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1678 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1679 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1680 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1681 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1682 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1683 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1684 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1685 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1686 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1687 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1688 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1689 1690 { }, 1691 }; 1692 1693 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1694 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1695 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1696 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1697 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1698 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1699 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1700 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1701 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1702 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1703 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1704 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1705 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1706 1707 { }, 1708 }; 1709 1710 /* These are experimental so mark with 'x-' */ 1711 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1712 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1713 1714 { }, 1715 }; 1716 1717 /* 1718 * 'Named features' is the name we give to extensions that we 1719 * don't want to expose to users. They are either immutable 1720 * (always enabled/disable) or they'll vary depending on 1721 * the resulting CPU state. They have riscv,isa strings 1722 * and priv_ver like regular extensions. 1723 */ 1724 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1725 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1726 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1727 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1728 1729 { }, 1730 }; 1731 1732 /* Deprecated entries marked for future removal */ 1733 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1734 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1735 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1736 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1737 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1738 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1739 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1740 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1741 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1742 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1743 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1744 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1745 1746 { }, 1747 }; 1748 1749 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1750 Error **errp) 1751 { 1752 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1753 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1754 cpuname, propname); 1755 } 1756 1757 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1758 void *opaque, Error **errp) 1759 { 1760 RISCVCPU *cpu = RISCV_CPU(obj); 1761 uint8_t pmu_num, curr_pmu_num; 1762 uint32_t pmu_mask; 1763 1764 visit_type_uint8(v, name, &pmu_num, errp); 1765 1766 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1767 1768 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1769 cpu_set_prop_err(cpu, name, errp); 1770 error_append_hint(errp, "Current '%s' val: %u\n", 1771 name, curr_pmu_num); 1772 return; 1773 } 1774 1775 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1776 error_setg(errp, "Number of counters exceeds maximum available"); 1777 return; 1778 } 1779 1780 if (pmu_num == 0) { 1781 pmu_mask = 0; 1782 } else { 1783 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1784 } 1785 1786 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1787 cpu->cfg.pmu_mask = pmu_mask; 1788 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1789 } 1790 1791 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1792 void *opaque, Error **errp) 1793 { 1794 RISCVCPU *cpu = RISCV_CPU(obj); 1795 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1796 1797 visit_type_uint8(v, name, &pmu_num, errp); 1798 } 1799 1800 static const PropertyInfo prop_pmu_num = { 1801 .name = "pmu-num", 1802 .get = prop_pmu_num_get, 1803 .set = prop_pmu_num_set, 1804 }; 1805 1806 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1807 void *opaque, Error **errp) 1808 { 1809 RISCVCPU *cpu = RISCV_CPU(obj); 1810 uint32_t value; 1811 uint8_t pmu_num; 1812 1813 visit_type_uint32(v, name, &value, errp); 1814 1815 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1816 cpu_set_prop_err(cpu, name, errp); 1817 error_append_hint(errp, "Current '%s' val: %x\n", 1818 name, cpu->cfg.pmu_mask); 1819 return; 1820 } 1821 1822 pmu_num = ctpop32(value); 1823 1824 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1825 error_setg(errp, "Number of counters exceeds maximum available"); 1826 return; 1827 } 1828 1829 cpu_option_add_user_setting(name, value); 1830 cpu->cfg.pmu_mask = value; 1831 } 1832 1833 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1834 void *opaque, Error **errp) 1835 { 1836 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1837 1838 visit_type_uint8(v, name, &pmu_mask, errp); 1839 } 1840 1841 static const PropertyInfo prop_pmu_mask = { 1842 .name = "pmu-mask", 1843 .get = prop_pmu_mask_get, 1844 .set = prop_pmu_mask_set, 1845 }; 1846 1847 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1848 void *opaque, Error **errp) 1849 { 1850 RISCVCPU *cpu = RISCV_CPU(obj); 1851 bool value; 1852 1853 visit_type_bool(v, name, &value, errp); 1854 1855 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1856 cpu_set_prop_err(cpu, "mmu", errp); 1857 return; 1858 } 1859 1860 cpu_option_add_user_setting(name, value); 1861 cpu->cfg.mmu = value; 1862 } 1863 1864 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1865 void *opaque, Error **errp) 1866 { 1867 bool value = RISCV_CPU(obj)->cfg.mmu; 1868 1869 visit_type_bool(v, name, &value, errp); 1870 } 1871 1872 static const PropertyInfo prop_mmu = { 1873 .name = "mmu", 1874 .get = prop_mmu_get, 1875 .set = prop_mmu_set, 1876 }; 1877 1878 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1879 void *opaque, Error **errp) 1880 { 1881 RISCVCPU *cpu = RISCV_CPU(obj); 1882 bool value; 1883 1884 visit_type_bool(v, name, &value, errp); 1885 1886 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1887 cpu_set_prop_err(cpu, name, errp); 1888 return; 1889 } 1890 1891 cpu_option_add_user_setting(name, value); 1892 cpu->cfg.pmp = value; 1893 } 1894 1895 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1896 void *opaque, Error **errp) 1897 { 1898 bool value = RISCV_CPU(obj)->cfg.pmp; 1899 1900 visit_type_bool(v, name, &value, errp); 1901 } 1902 1903 static const PropertyInfo prop_pmp = { 1904 .name = "pmp", 1905 .get = prop_pmp_get, 1906 .set = prop_pmp_set, 1907 }; 1908 1909 static int priv_spec_from_str(const char *priv_spec_str) 1910 { 1911 int priv_version = -1; 1912 1913 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1914 priv_version = PRIV_VERSION_1_13_0; 1915 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1916 priv_version = PRIV_VERSION_1_12_0; 1917 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1918 priv_version = PRIV_VERSION_1_11_0; 1919 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1920 priv_version = PRIV_VERSION_1_10_0; 1921 } 1922 1923 return priv_version; 1924 } 1925 1926 const char *priv_spec_to_str(int priv_version) 1927 { 1928 switch (priv_version) { 1929 case PRIV_VERSION_1_10_0: 1930 return PRIV_VER_1_10_0_STR; 1931 case PRIV_VERSION_1_11_0: 1932 return PRIV_VER_1_11_0_STR; 1933 case PRIV_VERSION_1_12_0: 1934 return PRIV_VER_1_12_0_STR; 1935 case PRIV_VERSION_1_13_0: 1936 return PRIV_VER_1_13_0_STR; 1937 default: 1938 return NULL; 1939 } 1940 } 1941 1942 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1943 void *opaque, Error **errp) 1944 { 1945 RISCVCPU *cpu = RISCV_CPU(obj); 1946 g_autofree char *value = NULL; 1947 int priv_version = -1; 1948 1949 visit_type_str(v, name, &value, errp); 1950 1951 priv_version = priv_spec_from_str(value); 1952 if (priv_version < 0) { 1953 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1954 return; 1955 } 1956 1957 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1958 cpu_set_prop_err(cpu, name, errp); 1959 error_append_hint(errp, "Current '%s' val: %s\n", name, 1960 object_property_get_str(obj, name, NULL)); 1961 return; 1962 } 1963 1964 cpu_option_add_user_setting(name, priv_version); 1965 cpu->env.priv_ver = priv_version; 1966 } 1967 1968 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1969 void *opaque, Error **errp) 1970 { 1971 RISCVCPU *cpu = RISCV_CPU(obj); 1972 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1973 1974 visit_type_str(v, name, (char **)&value, errp); 1975 } 1976 1977 static const PropertyInfo prop_priv_spec = { 1978 .name = "priv_spec", 1979 .get = prop_priv_spec_get, 1980 .set = prop_priv_spec_set, 1981 }; 1982 1983 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1984 void *opaque, Error **errp) 1985 { 1986 RISCVCPU *cpu = RISCV_CPU(obj); 1987 g_autofree char *value = NULL; 1988 1989 visit_type_str(v, name, &value, errp); 1990 1991 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1992 error_setg(errp, "Unsupported vector spec version '%s'", value); 1993 return; 1994 } 1995 1996 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1997 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1998 } 1999 2000 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2001 void *opaque, Error **errp) 2002 { 2003 const char *value = VEXT_VER_1_00_0_STR; 2004 2005 visit_type_str(v, name, (char **)&value, errp); 2006 } 2007 2008 static const PropertyInfo prop_vext_spec = { 2009 .name = "vext_spec", 2010 .get = prop_vext_spec_get, 2011 .set = prop_vext_spec_set, 2012 }; 2013 2014 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2015 void *opaque, Error **errp) 2016 { 2017 RISCVCPU *cpu = RISCV_CPU(obj); 2018 uint16_t value; 2019 2020 if (!visit_type_uint16(v, name, &value, errp)) { 2021 return; 2022 } 2023 2024 if (!is_power_of_2(value)) { 2025 error_setg(errp, "Vector extension VLEN must be power of 2"); 2026 return; 2027 } 2028 2029 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 2030 cpu_set_prop_err(cpu, name, errp); 2031 error_append_hint(errp, "Current '%s' val: %u\n", 2032 name, cpu->cfg.vlenb << 3); 2033 return; 2034 } 2035 2036 cpu_option_add_user_setting(name, value); 2037 cpu->cfg.vlenb = value >> 3; 2038 } 2039 2040 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2041 void *opaque, Error **errp) 2042 { 2043 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2044 2045 visit_type_uint16(v, name, &value, errp); 2046 } 2047 2048 static const PropertyInfo prop_vlen = { 2049 .name = "vlen", 2050 .get = prop_vlen_get, 2051 .set = prop_vlen_set, 2052 }; 2053 2054 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2055 void *opaque, Error **errp) 2056 { 2057 RISCVCPU *cpu = RISCV_CPU(obj); 2058 uint16_t value; 2059 2060 if (!visit_type_uint16(v, name, &value, errp)) { 2061 return; 2062 } 2063 2064 if (!is_power_of_2(value)) { 2065 error_setg(errp, "Vector extension ELEN must be power of 2"); 2066 return; 2067 } 2068 2069 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2070 cpu_set_prop_err(cpu, name, errp); 2071 error_append_hint(errp, "Current '%s' val: %u\n", 2072 name, cpu->cfg.elen); 2073 return; 2074 } 2075 2076 cpu_option_add_user_setting(name, value); 2077 cpu->cfg.elen = value; 2078 } 2079 2080 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2081 void *opaque, Error **errp) 2082 { 2083 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2084 2085 visit_type_uint16(v, name, &value, errp); 2086 } 2087 2088 static const PropertyInfo prop_elen = { 2089 .name = "elen", 2090 .get = prop_elen_get, 2091 .set = prop_elen_set, 2092 }; 2093 2094 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2095 void *opaque, Error **errp) 2096 { 2097 RISCVCPU *cpu = RISCV_CPU(obj); 2098 uint16_t value; 2099 2100 if (!visit_type_uint16(v, name, &value, errp)) { 2101 return; 2102 } 2103 2104 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2105 cpu_set_prop_err(cpu, name, errp); 2106 error_append_hint(errp, "Current '%s' val: %u\n", 2107 name, cpu->cfg.cbom_blocksize); 2108 return; 2109 } 2110 2111 cpu_option_add_user_setting(name, value); 2112 cpu->cfg.cbom_blocksize = value; 2113 } 2114 2115 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2116 void *opaque, Error **errp) 2117 { 2118 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2119 2120 visit_type_uint16(v, name, &value, errp); 2121 } 2122 2123 static const PropertyInfo prop_cbom_blksize = { 2124 .name = "cbom_blocksize", 2125 .get = prop_cbom_blksize_get, 2126 .set = prop_cbom_blksize_set, 2127 }; 2128 2129 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2130 void *opaque, Error **errp) 2131 { 2132 RISCVCPU *cpu = RISCV_CPU(obj); 2133 uint16_t value; 2134 2135 if (!visit_type_uint16(v, name, &value, errp)) { 2136 return; 2137 } 2138 2139 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2140 cpu_set_prop_err(cpu, name, errp); 2141 error_append_hint(errp, "Current '%s' val: %u\n", 2142 name, cpu->cfg.cbop_blocksize); 2143 return; 2144 } 2145 2146 cpu_option_add_user_setting(name, value); 2147 cpu->cfg.cbop_blocksize = value; 2148 } 2149 2150 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2151 void *opaque, Error **errp) 2152 { 2153 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2154 2155 visit_type_uint16(v, name, &value, errp); 2156 } 2157 2158 static const PropertyInfo prop_cbop_blksize = { 2159 .name = "cbop_blocksize", 2160 .get = prop_cbop_blksize_get, 2161 .set = prop_cbop_blksize_set, 2162 }; 2163 2164 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2165 void *opaque, Error **errp) 2166 { 2167 RISCVCPU *cpu = RISCV_CPU(obj); 2168 uint16_t value; 2169 2170 if (!visit_type_uint16(v, name, &value, errp)) { 2171 return; 2172 } 2173 2174 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2175 cpu_set_prop_err(cpu, name, errp); 2176 error_append_hint(errp, "Current '%s' val: %u\n", 2177 name, cpu->cfg.cboz_blocksize); 2178 return; 2179 } 2180 2181 cpu_option_add_user_setting(name, value); 2182 cpu->cfg.cboz_blocksize = value; 2183 } 2184 2185 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2186 void *opaque, Error **errp) 2187 { 2188 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2189 2190 visit_type_uint16(v, name, &value, errp); 2191 } 2192 2193 static const PropertyInfo prop_cboz_blksize = { 2194 .name = "cboz_blocksize", 2195 .get = prop_cboz_blksize_get, 2196 .set = prop_cboz_blksize_set, 2197 }; 2198 2199 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2200 void *opaque, Error **errp) 2201 { 2202 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2203 RISCVCPU *cpu = RISCV_CPU(obj); 2204 uint32_t prev_val = cpu->cfg.mvendorid; 2205 uint32_t value; 2206 2207 if (!visit_type_uint32(v, name, &value, errp)) { 2208 return; 2209 } 2210 2211 if (!dynamic_cpu && prev_val != value) { 2212 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2213 object_get_typename(obj), prev_val); 2214 return; 2215 } 2216 2217 cpu->cfg.mvendorid = value; 2218 } 2219 2220 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2221 void *opaque, Error **errp) 2222 { 2223 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2224 2225 visit_type_uint32(v, name, &value, errp); 2226 } 2227 2228 static const PropertyInfo prop_mvendorid = { 2229 .name = "mvendorid", 2230 .get = prop_mvendorid_get, 2231 .set = prop_mvendorid_set, 2232 }; 2233 2234 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2235 void *opaque, Error **errp) 2236 { 2237 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2238 RISCVCPU *cpu = RISCV_CPU(obj); 2239 uint64_t prev_val = cpu->cfg.mimpid; 2240 uint64_t value; 2241 2242 if (!visit_type_uint64(v, name, &value, errp)) { 2243 return; 2244 } 2245 2246 if (!dynamic_cpu && prev_val != value) { 2247 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2248 object_get_typename(obj), prev_val); 2249 return; 2250 } 2251 2252 cpu->cfg.mimpid = value; 2253 } 2254 2255 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2256 void *opaque, Error **errp) 2257 { 2258 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2259 2260 visit_type_uint64(v, name, &value, errp); 2261 } 2262 2263 static const PropertyInfo prop_mimpid = { 2264 .name = "mimpid", 2265 .get = prop_mimpid_get, 2266 .set = prop_mimpid_set, 2267 }; 2268 2269 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2270 void *opaque, Error **errp) 2271 { 2272 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2273 RISCVCPU *cpu = RISCV_CPU(obj); 2274 uint64_t prev_val = cpu->cfg.marchid; 2275 uint64_t value, invalid_val; 2276 uint32_t mxlen = 0; 2277 2278 if (!visit_type_uint64(v, name, &value, errp)) { 2279 return; 2280 } 2281 2282 if (!dynamic_cpu && prev_val != value) { 2283 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2284 object_get_typename(obj), prev_val); 2285 return; 2286 } 2287 2288 switch (riscv_cpu_mxl(&cpu->env)) { 2289 case MXL_RV32: 2290 mxlen = 32; 2291 break; 2292 case MXL_RV64: 2293 case MXL_RV128: 2294 mxlen = 64; 2295 break; 2296 default: 2297 g_assert_not_reached(); 2298 } 2299 2300 invalid_val = 1LL << (mxlen - 1); 2301 2302 if (value == invalid_val) { 2303 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2304 "and the remaining bits zero", mxlen); 2305 return; 2306 } 2307 2308 cpu->cfg.marchid = value; 2309 } 2310 2311 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2312 void *opaque, Error **errp) 2313 { 2314 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2315 2316 visit_type_uint64(v, name, &value, errp); 2317 } 2318 2319 static const PropertyInfo prop_marchid = { 2320 .name = "marchid", 2321 .get = prop_marchid_get, 2322 .set = prop_marchid_set, 2323 }; 2324 2325 /* 2326 * RVA22U64 defines some 'named features' that are cache 2327 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2328 * and Zicclsm. They are always implemented in TCG and 2329 * doesn't need to be manually enabled by the profile. 2330 */ 2331 static RISCVCPUProfile RVA22U64 = { 2332 .parent = NULL, 2333 .name = "rva22u64", 2334 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2335 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2336 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2337 .ext_offsets = { 2338 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2339 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2340 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2341 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2342 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2343 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2344 2345 /* mandatory named features for this profile */ 2346 CPU_CFG_OFFSET(ext_zic64b), 2347 2348 RISCV_PROFILE_EXT_LIST_END 2349 } 2350 }; 2351 2352 /* 2353 * As with RVA22U64, RVA22S64 also defines 'named features'. 2354 * 2355 * Cache related features that we consider enabled since we don't 2356 * implement cache: Ssccptr 2357 * 2358 * Other named features that we already implement: Sstvecd, Sstvala, 2359 * Sscounterenw 2360 * 2361 * The remaining features/extensions comes from RVA22U64. 2362 */ 2363 static RISCVCPUProfile RVA22S64 = { 2364 .parent = &RVA22U64, 2365 .name = "rva22s64", 2366 .misa_ext = RVS, 2367 .priv_spec = PRIV_VERSION_1_12_0, 2368 .satp_mode = VM_1_10_SV39, 2369 .ext_offsets = { 2370 /* rva22s64 exts */ 2371 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2372 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2373 2374 RISCV_PROFILE_EXT_LIST_END 2375 } 2376 }; 2377 2378 RISCVCPUProfile *riscv_profiles[] = { 2379 &RVA22U64, 2380 &RVA22S64, 2381 NULL, 2382 }; 2383 2384 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2385 .is_misa = true, 2386 .ext = RVA, 2387 .implied_multi_exts = { 2388 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2389 2390 RISCV_IMPLIED_EXTS_RULE_END 2391 }, 2392 }; 2393 2394 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2395 .is_misa = true, 2396 .ext = RVD, 2397 .implied_misa_exts = RVF, 2398 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2399 }; 2400 2401 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2402 .is_misa = true, 2403 .ext = RVF, 2404 .implied_multi_exts = { 2405 CPU_CFG_OFFSET(ext_zicsr), 2406 2407 RISCV_IMPLIED_EXTS_RULE_END 2408 }, 2409 }; 2410 2411 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2412 .is_misa = true, 2413 .ext = RVM, 2414 .implied_multi_exts = { 2415 CPU_CFG_OFFSET(ext_zmmul), 2416 2417 RISCV_IMPLIED_EXTS_RULE_END 2418 }, 2419 }; 2420 2421 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2422 .is_misa = true, 2423 .ext = RVV, 2424 .implied_multi_exts = { 2425 CPU_CFG_OFFSET(ext_zve64d), 2426 2427 RISCV_IMPLIED_EXTS_RULE_END 2428 }, 2429 }; 2430 2431 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2432 .ext = CPU_CFG_OFFSET(ext_zcb), 2433 .implied_multi_exts = { 2434 CPU_CFG_OFFSET(ext_zca), 2435 2436 RISCV_IMPLIED_EXTS_RULE_END 2437 }, 2438 }; 2439 2440 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2441 .ext = CPU_CFG_OFFSET(ext_zcd), 2442 .implied_misa_exts = RVD, 2443 .implied_multi_exts = { 2444 CPU_CFG_OFFSET(ext_zca), 2445 2446 RISCV_IMPLIED_EXTS_RULE_END 2447 }, 2448 }; 2449 2450 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2451 .ext = CPU_CFG_OFFSET(ext_zce), 2452 .implied_multi_exts = { 2453 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2454 CPU_CFG_OFFSET(ext_zcmt), 2455 2456 RISCV_IMPLIED_EXTS_RULE_END 2457 }, 2458 }; 2459 2460 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2461 .ext = CPU_CFG_OFFSET(ext_zcf), 2462 .implied_misa_exts = RVF, 2463 .implied_multi_exts = { 2464 CPU_CFG_OFFSET(ext_zca), 2465 2466 RISCV_IMPLIED_EXTS_RULE_END 2467 }, 2468 }; 2469 2470 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2471 .ext = CPU_CFG_OFFSET(ext_zcmp), 2472 .implied_multi_exts = { 2473 CPU_CFG_OFFSET(ext_zca), 2474 2475 RISCV_IMPLIED_EXTS_RULE_END 2476 }, 2477 }; 2478 2479 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2480 .ext = CPU_CFG_OFFSET(ext_zcmt), 2481 .implied_multi_exts = { 2482 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2483 2484 RISCV_IMPLIED_EXTS_RULE_END 2485 }, 2486 }; 2487 2488 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2489 .ext = CPU_CFG_OFFSET(ext_zdinx), 2490 .implied_multi_exts = { 2491 CPU_CFG_OFFSET(ext_zfinx), 2492 2493 RISCV_IMPLIED_EXTS_RULE_END 2494 }, 2495 }; 2496 2497 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2498 .ext = CPU_CFG_OFFSET(ext_zfa), 2499 .implied_misa_exts = RVF, 2500 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2501 }; 2502 2503 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2504 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2505 .implied_misa_exts = RVF, 2506 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2507 }; 2508 2509 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2510 .ext = CPU_CFG_OFFSET(ext_zfh), 2511 .implied_multi_exts = { 2512 CPU_CFG_OFFSET(ext_zfhmin), 2513 2514 RISCV_IMPLIED_EXTS_RULE_END 2515 }, 2516 }; 2517 2518 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2519 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2520 .implied_misa_exts = RVF, 2521 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2522 }; 2523 2524 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2525 .ext = CPU_CFG_OFFSET(ext_zfinx), 2526 .implied_multi_exts = { 2527 CPU_CFG_OFFSET(ext_zicsr), 2528 2529 RISCV_IMPLIED_EXTS_RULE_END 2530 }, 2531 }; 2532 2533 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2534 .ext = CPU_CFG_OFFSET(ext_zhinx), 2535 .implied_multi_exts = { 2536 CPU_CFG_OFFSET(ext_zhinxmin), 2537 2538 RISCV_IMPLIED_EXTS_RULE_END 2539 }, 2540 }; 2541 2542 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2543 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2544 .implied_multi_exts = { 2545 CPU_CFG_OFFSET(ext_zfinx), 2546 2547 RISCV_IMPLIED_EXTS_RULE_END 2548 }, 2549 }; 2550 2551 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2552 .ext = CPU_CFG_OFFSET(ext_zicntr), 2553 .implied_multi_exts = { 2554 CPU_CFG_OFFSET(ext_zicsr), 2555 2556 RISCV_IMPLIED_EXTS_RULE_END 2557 }, 2558 }; 2559 2560 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2561 .ext = CPU_CFG_OFFSET(ext_zihpm), 2562 .implied_multi_exts = { 2563 CPU_CFG_OFFSET(ext_zicsr), 2564 2565 RISCV_IMPLIED_EXTS_RULE_END 2566 }, 2567 }; 2568 2569 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2570 .ext = CPU_CFG_OFFSET(ext_zk), 2571 .implied_multi_exts = { 2572 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2573 CPU_CFG_OFFSET(ext_zkt), 2574 2575 RISCV_IMPLIED_EXTS_RULE_END 2576 }, 2577 }; 2578 2579 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2580 .ext = CPU_CFG_OFFSET(ext_zkn), 2581 .implied_multi_exts = { 2582 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2583 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2584 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2585 2586 RISCV_IMPLIED_EXTS_RULE_END 2587 }, 2588 }; 2589 2590 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2591 .ext = CPU_CFG_OFFSET(ext_zks), 2592 .implied_multi_exts = { 2593 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2594 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2595 CPU_CFG_OFFSET(ext_zksh), 2596 2597 RISCV_IMPLIED_EXTS_RULE_END 2598 }, 2599 }; 2600 2601 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2602 .ext = CPU_CFG_OFFSET(ext_zvbb), 2603 .implied_multi_exts = { 2604 CPU_CFG_OFFSET(ext_zvkb), 2605 2606 RISCV_IMPLIED_EXTS_RULE_END 2607 }, 2608 }; 2609 2610 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2611 .ext = CPU_CFG_OFFSET(ext_zve32f), 2612 .implied_misa_exts = RVF, 2613 .implied_multi_exts = { 2614 CPU_CFG_OFFSET(ext_zve32x), 2615 2616 RISCV_IMPLIED_EXTS_RULE_END 2617 }, 2618 }; 2619 2620 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2621 .ext = CPU_CFG_OFFSET(ext_zve32x), 2622 .implied_multi_exts = { 2623 CPU_CFG_OFFSET(ext_zicsr), 2624 2625 RISCV_IMPLIED_EXTS_RULE_END 2626 }, 2627 }; 2628 2629 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2630 .ext = CPU_CFG_OFFSET(ext_zve64d), 2631 .implied_misa_exts = RVD, 2632 .implied_multi_exts = { 2633 CPU_CFG_OFFSET(ext_zve64f), 2634 2635 RISCV_IMPLIED_EXTS_RULE_END 2636 }, 2637 }; 2638 2639 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2640 .ext = CPU_CFG_OFFSET(ext_zve64f), 2641 .implied_misa_exts = RVF, 2642 .implied_multi_exts = { 2643 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2644 2645 RISCV_IMPLIED_EXTS_RULE_END 2646 }, 2647 }; 2648 2649 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2650 .ext = CPU_CFG_OFFSET(ext_zve64x), 2651 .implied_multi_exts = { 2652 CPU_CFG_OFFSET(ext_zve32x), 2653 2654 RISCV_IMPLIED_EXTS_RULE_END 2655 }, 2656 }; 2657 2658 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2659 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2660 .implied_multi_exts = { 2661 CPU_CFG_OFFSET(ext_zve32f), 2662 2663 RISCV_IMPLIED_EXTS_RULE_END 2664 }, 2665 }; 2666 2667 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2668 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2669 .implied_multi_exts = { 2670 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2671 2672 RISCV_IMPLIED_EXTS_RULE_END 2673 }, 2674 }; 2675 2676 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2677 .ext = CPU_CFG_OFFSET(ext_zvfh), 2678 .implied_multi_exts = { 2679 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2680 2681 RISCV_IMPLIED_EXTS_RULE_END 2682 }, 2683 }; 2684 2685 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2686 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2687 .implied_multi_exts = { 2688 CPU_CFG_OFFSET(ext_zve32f), 2689 2690 RISCV_IMPLIED_EXTS_RULE_END 2691 }, 2692 }; 2693 2694 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2695 .ext = CPU_CFG_OFFSET(ext_zvkn), 2696 .implied_multi_exts = { 2697 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2698 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2699 2700 RISCV_IMPLIED_EXTS_RULE_END 2701 }, 2702 }; 2703 2704 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2705 .ext = CPU_CFG_OFFSET(ext_zvknc), 2706 .implied_multi_exts = { 2707 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2708 2709 RISCV_IMPLIED_EXTS_RULE_END 2710 }, 2711 }; 2712 2713 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2714 .ext = CPU_CFG_OFFSET(ext_zvkng), 2715 .implied_multi_exts = { 2716 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2717 2718 RISCV_IMPLIED_EXTS_RULE_END 2719 }, 2720 }; 2721 2722 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2723 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2724 .implied_multi_exts = { 2725 CPU_CFG_OFFSET(ext_zve64x), 2726 2727 RISCV_IMPLIED_EXTS_RULE_END 2728 }, 2729 }; 2730 2731 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2732 .ext = CPU_CFG_OFFSET(ext_zvks), 2733 .implied_multi_exts = { 2734 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2735 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2736 2737 RISCV_IMPLIED_EXTS_RULE_END 2738 }, 2739 }; 2740 2741 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2742 .ext = CPU_CFG_OFFSET(ext_zvksc), 2743 .implied_multi_exts = { 2744 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2745 2746 RISCV_IMPLIED_EXTS_RULE_END 2747 }, 2748 }; 2749 2750 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2751 .ext = CPU_CFG_OFFSET(ext_zvksg), 2752 .implied_multi_exts = { 2753 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2754 2755 RISCV_IMPLIED_EXTS_RULE_END 2756 }, 2757 }; 2758 2759 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2760 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2761 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2762 }; 2763 2764 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2765 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2766 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2767 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2768 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2769 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2770 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2771 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2772 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2773 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2774 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2775 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2776 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, 2777 NULL 2778 }; 2779 2780 static const Property riscv_cpu_properties[] = { 2781 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2782 2783 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2784 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2785 2786 {.name = "mmu", .info = &prop_mmu}, 2787 {.name = "pmp", .info = &prop_pmp}, 2788 2789 {.name = "priv_spec", .info = &prop_priv_spec}, 2790 {.name = "vext_spec", .info = &prop_vext_spec}, 2791 2792 {.name = "vlen", .info = &prop_vlen}, 2793 {.name = "elen", .info = &prop_elen}, 2794 2795 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2796 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2797 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2798 2799 {.name = "mvendorid", .info = &prop_mvendorid}, 2800 {.name = "mimpid", .info = &prop_mimpid}, 2801 {.name = "marchid", .info = &prop_marchid}, 2802 2803 #ifndef CONFIG_USER_ONLY 2804 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2805 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2806 DEFAULT_RNMI_IRQVEC), 2807 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2808 DEFAULT_RNMI_EXCPVEC), 2809 #endif 2810 2811 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2812 2813 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2814 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2815 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2816 2817 /* 2818 * write_misa() is marked as experimental for now so mark 2819 * it with -x and default to 'false'. 2820 */ 2821 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2822 }; 2823 2824 #if defined(TARGET_RISCV64) 2825 static void rva22u64_profile_cpu_init(Object *obj) 2826 { 2827 rv64i_bare_cpu_init(obj); 2828 2829 RVA22U64.enabled = true; 2830 } 2831 2832 static void rva22s64_profile_cpu_init(Object *obj) 2833 { 2834 rv64i_bare_cpu_init(obj); 2835 2836 RVA22S64.enabled = true; 2837 } 2838 #endif 2839 2840 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2841 { 2842 RISCVCPU *cpu = RISCV_CPU(cs); 2843 CPURISCVState *env = &cpu->env; 2844 2845 switch (riscv_cpu_mxl(env)) { 2846 case MXL_RV32: 2847 return "riscv:rv32"; 2848 case MXL_RV64: 2849 case MXL_RV128: 2850 return "riscv:rv64"; 2851 default: 2852 g_assert_not_reached(); 2853 } 2854 } 2855 2856 #ifndef CONFIG_USER_ONLY 2857 static int64_t riscv_get_arch_id(CPUState *cs) 2858 { 2859 RISCVCPU *cpu = RISCV_CPU(cs); 2860 2861 return cpu->env.mhartid; 2862 } 2863 2864 #include "hw/core/sysemu-cpu-ops.h" 2865 2866 static const struct SysemuCPUOps riscv_sysemu_ops = { 2867 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2868 .write_elf64_note = riscv_cpu_write_elf64_note, 2869 .write_elf32_note = riscv_cpu_write_elf32_note, 2870 .legacy_vmsd = &vmstate_riscv_cpu, 2871 }; 2872 #endif 2873 2874 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2875 { 2876 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2877 CPUClass *cc = CPU_CLASS(c); 2878 DeviceClass *dc = DEVICE_CLASS(c); 2879 ResettableClass *rc = RESETTABLE_CLASS(c); 2880 2881 device_class_set_parent_realize(dc, riscv_cpu_realize, 2882 &mcc->parent_realize); 2883 2884 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2885 &mcc->parent_phases); 2886 2887 cc->class_by_name = riscv_cpu_class_by_name; 2888 cc->has_work = riscv_cpu_has_work; 2889 cc->mmu_index = riscv_cpu_mmu_index; 2890 cc->dump_state = riscv_cpu_dump_state; 2891 cc->set_pc = riscv_cpu_set_pc; 2892 cc->get_pc = riscv_cpu_get_pc; 2893 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2894 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2895 cc->gdb_stop_before_watchpoint = true; 2896 cc->disas_set_info = riscv_cpu_disas_set_info; 2897 #ifndef CONFIG_USER_ONLY 2898 cc->sysemu_ops = &riscv_sysemu_ops; 2899 cc->get_arch_id = riscv_get_arch_id; 2900 #endif 2901 cc->gdb_arch_name = riscv_gdb_arch_name; 2902 2903 device_class_set_props(dc, riscv_cpu_properties); 2904 } 2905 2906 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2907 { 2908 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2909 2910 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2911 riscv_cpu_validate_misa_mxl(mcc); 2912 } 2913 2914 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2915 int max_str_len) 2916 { 2917 const RISCVIsaExtData *edata; 2918 char *old = *isa_str; 2919 char *new = *isa_str; 2920 2921 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2922 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2923 new = g_strconcat(old, "_", edata->name, NULL); 2924 g_free(old); 2925 old = new; 2926 } 2927 } 2928 2929 *isa_str = new; 2930 } 2931 2932 char *riscv_isa_string(RISCVCPU *cpu) 2933 { 2934 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2935 int i; 2936 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2937 char *isa_str = g_new(char, maxlen); 2938 int xlen = riscv_cpu_max_xlen(mcc); 2939 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2940 2941 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2942 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2943 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2944 } 2945 } 2946 *p = '\0'; 2947 if (!cpu->cfg.short_isa_string) { 2948 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2949 } 2950 return isa_str; 2951 } 2952 2953 #ifndef CONFIG_USER_ONLY 2954 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 2955 { 2956 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 2957 char **extensions = g_new(char *, maxlen); 2958 2959 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2960 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2961 extensions[*count] = g_new(char, 2); 2962 snprintf(extensions[*count], 2, "%c", 2963 qemu_tolower(riscv_single_letter_exts[i])); 2964 (*count)++; 2965 } 2966 } 2967 2968 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 2969 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2970 extensions[*count] = g_strdup(edata->name); 2971 (*count)++; 2972 } 2973 } 2974 2975 return extensions; 2976 } 2977 2978 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 2979 { 2980 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2981 const size_t maxlen = sizeof("rv128i"); 2982 g_autofree char *isa_base = g_new(char, maxlen); 2983 g_autofree char *riscv_isa; 2984 char **isa_extensions; 2985 int count = 0; 2986 int xlen = riscv_cpu_max_xlen(mcc); 2987 2988 riscv_isa = riscv_isa_string(cpu); 2989 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 2990 2991 snprintf(isa_base, maxlen, "rv%di", xlen); 2992 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 2993 2994 isa_extensions = riscv_isa_extensions_list(cpu, &count); 2995 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 2996 isa_extensions, count); 2997 2998 for (int i = 0; i < count; i++) { 2999 g_free(isa_extensions[i]); 3000 } 3001 3002 g_free(isa_extensions); 3003 } 3004 #endif 3005 3006 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 3007 { \ 3008 .name = (type_name), \ 3009 .parent = TYPE_RISCV_CPU, \ 3010 .instance_init = (initfn), \ 3011 .class_init = riscv_cpu_class_init, \ 3012 .class_data = (void *)(misa_mxl_max) \ 3013 } 3014 3015 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 3016 { \ 3017 .name = (type_name), \ 3018 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3019 .instance_init = (initfn), \ 3020 .class_init = riscv_cpu_class_init, \ 3021 .class_data = (void *)(misa_mxl_max) \ 3022 } 3023 3024 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 3025 { \ 3026 .name = (type_name), \ 3027 .parent = TYPE_RISCV_VENDOR_CPU, \ 3028 .instance_init = (initfn), \ 3029 .class_init = riscv_cpu_class_init, \ 3030 .class_data = (void *)(misa_mxl_max) \ 3031 } 3032 3033 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 3034 { \ 3035 .name = (type_name), \ 3036 .parent = TYPE_RISCV_BARE_CPU, \ 3037 .instance_init = (initfn), \ 3038 .class_init = riscv_cpu_class_init, \ 3039 .class_data = (void *)(misa_mxl_max) \ 3040 } 3041 3042 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 3043 { \ 3044 .name = (type_name), \ 3045 .parent = TYPE_RISCV_BARE_CPU, \ 3046 .instance_init = (initfn), \ 3047 .class_init = riscv_cpu_class_init, \ 3048 .class_data = (void *)(misa_mxl_max) \ 3049 } 3050 3051 static const TypeInfo riscv_cpu_type_infos[] = { 3052 { 3053 .name = TYPE_RISCV_CPU, 3054 .parent = TYPE_CPU, 3055 .instance_size = sizeof(RISCVCPU), 3056 .instance_align = __alignof(RISCVCPU), 3057 .instance_init = riscv_cpu_init, 3058 .instance_post_init = riscv_cpu_post_init, 3059 .abstract = true, 3060 .class_size = sizeof(RISCVCPUClass), 3061 .class_init = riscv_cpu_common_class_init, 3062 }, 3063 { 3064 .name = TYPE_RISCV_DYNAMIC_CPU, 3065 .parent = TYPE_RISCV_CPU, 3066 .abstract = true, 3067 }, 3068 { 3069 .name = TYPE_RISCV_VENDOR_CPU, 3070 .parent = TYPE_RISCV_CPU, 3071 .abstract = true, 3072 }, 3073 { 3074 .name = TYPE_RISCV_BARE_CPU, 3075 .parent = TYPE_RISCV_CPU, 3076 .instance_init = riscv_bare_cpu_init, 3077 .abstract = true, 3078 }, 3079 #if defined(TARGET_RISCV32) 3080 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3081 #elif defined(TARGET_RISCV64) 3082 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3083 #endif 3084 3085 #if defined(TARGET_RISCV32) || \ 3086 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3087 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3088 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3089 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3090 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3091 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3092 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3093 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3094 #endif 3095 3096 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3097 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3098 #endif 3099 3100 #if defined(TARGET_RISCV64) 3101 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3102 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3103 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3104 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3105 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3106 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3107 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3108 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3109 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3110 #ifdef CONFIG_TCG 3111 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3112 #endif /* CONFIG_TCG */ 3113 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3114 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3115 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3116 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3117 #endif /* TARGET_RISCV64 */ 3118 }; 3119 3120 DEFINE_TYPES(riscv_cpu_type_infos) 3121