1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "qapi/error.h" 28 #include "qapi/visitor.h" 29 #include "qemu/error-report.h" 30 #include "hw/qdev-properties.h" 31 #include "hw/core/qdev-prop-internal.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "system/device_tree.h" 35 #include "system/kvm.h" 36 #include "system/tcg.h" 37 #include "kvm/kvm_riscv.h" 38 #include "tcg/tcg-cpu.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 44 RVC, RVS, RVU, RVH, RVG, RVB, 0}; 45 46 /* 47 * From vector_helper.c 48 * Note that vector data is stored in host-endian 64-bit chunks, 49 * so addressing bytes needs a host-endian fixup. 50 */ 51 #if HOST_BIG_ENDIAN 52 #define BYTE(x) ((x) ^ 7) 53 #else 54 #define BYTE(x) (x) 55 #endif 56 57 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 58 { 59 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 60 } 61 62 /* Hash that stores general user set numeric options */ 63 static GHashTable *general_user_opts; 64 65 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 66 { 67 g_hash_table_insert(general_user_opts, (gpointer)optname, 68 GUINT_TO_POINTER(value)); 69 } 70 71 bool riscv_cpu_option_set(const char *optname) 72 { 73 return g_hash_table_contains(general_user_opts, optname); 74 } 75 76 static void riscv_cpu_cfg_merge(RISCVCPUConfig *dest, const RISCVCPUConfig *src) 77 { 78 #define BOOL_FIELD(x) dest->x |= src->x; 79 #define TYPED_FIELD(type, x, default_) if (src->x != default_) dest->x = src->x; 80 #include "cpu_cfg_fields.h.inc" 81 } 82 83 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 84 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 85 86 /* 87 * Here are the ordering rules of extension naming defined by RISC-V 88 * specification : 89 * 1. All extensions should be separated from other multi-letter extensions 90 * by an underscore. 91 * 2. The first letter following the 'Z' conventionally indicates the most 92 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 93 * If multiple 'Z' extensions are named, they should be ordered first 94 * by category, then alphabetically within a category. 95 * 3. Standard supervisor-level extensions (starts with 'S') should be 96 * listed after standard unprivileged extensions. If multiple 97 * supervisor-level extensions are listed, they should be ordered 98 * alphabetically. 99 * 4. Non-standard extensions (starts with 'X') must be listed after all 100 * standard extensions. They must be separated from other multi-letter 101 * extensions by an underscore. 102 * 103 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 104 * instead. 105 */ 106 const RISCVIsaExtData isa_edata_arr[] = { 107 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b), 108 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 109 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 110 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 111 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), 112 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), 113 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), 114 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse), 115 ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), 116 ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), 117 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 118 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 119 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 120 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 121 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 122 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 123 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 124 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), 125 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 126 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), 127 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 128 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), 129 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 130 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), 131 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 132 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 133 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 134 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 135 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 136 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 137 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 138 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 139 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 140 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 141 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 142 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 143 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 144 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), 145 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 146 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 147 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 148 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 149 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 150 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 151 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 152 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 153 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 154 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 155 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 156 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 157 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 158 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 159 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 160 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 161 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 162 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 163 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 164 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso), 165 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 166 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 167 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 168 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x), 169 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 170 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 171 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x), 172 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 173 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 174 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 175 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 176 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 177 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 178 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 179 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 180 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 181 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 182 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 183 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 184 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 185 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 186 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 187 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 188 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 189 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 190 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 191 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 192 ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 193 ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), 194 ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 195 ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), 196 ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), 197 ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 198 ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 199 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 200 ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), 201 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), 202 ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), 203 ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), 204 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 205 ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), 206 ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), 207 ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), 208 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 209 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 210 ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), 211 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), 212 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 213 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), 214 ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), 215 ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), 216 ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), 217 ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), 218 ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), 219 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 220 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), 221 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), 222 ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), 223 ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), 224 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), 225 ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), 226 ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), 227 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 228 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 229 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 230 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 231 ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), 232 ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), 233 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 234 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 235 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 236 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 237 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 238 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 239 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 240 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 241 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 242 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 243 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 244 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 245 246 { }, 247 }; 248 249 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 250 { 251 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 252 253 return *ext_enabled; 254 } 255 256 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 257 { 258 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 259 260 *ext_enabled = en; 261 } 262 263 bool riscv_cpu_is_vendor(Object *cpu_obj) 264 { 265 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 266 } 267 268 const char * const riscv_int_regnames[] = { 269 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 270 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 271 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 272 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 273 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 274 }; 275 276 const char * const riscv_int_regnamesh[] = { 277 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 278 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 279 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 280 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 281 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 282 "x30h/t5h", "x31h/t6h" 283 }; 284 285 const char * const riscv_fpr_regnames[] = { 286 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 287 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 288 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 289 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 290 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 291 "f30/ft10", "f31/ft11" 292 }; 293 294 const char * const riscv_rvv_regnames[] = { 295 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 296 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 297 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 298 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 299 "v28", "v29", "v30", "v31" 300 }; 301 302 static const char * const riscv_excp_names[] = { 303 "misaligned_fetch", 304 "fault_fetch", 305 "illegal_instruction", 306 "breakpoint", 307 "misaligned_load", 308 "fault_load", 309 "misaligned_store", 310 "fault_store", 311 "user_ecall", 312 "supervisor_ecall", 313 "hypervisor_ecall", 314 "machine_ecall", 315 "exec_page_fault", 316 "load_page_fault", 317 "reserved", 318 "store_page_fault", 319 "double_trap", 320 "reserved", 321 "reserved", 322 "reserved", 323 "guest_exec_page_fault", 324 "guest_load_page_fault", 325 "reserved", 326 "guest_store_page_fault", 327 }; 328 329 static const char * const riscv_intr_names[] = { 330 "u_software", 331 "s_software", 332 "vs_software", 333 "m_software", 334 "u_timer", 335 "s_timer", 336 "vs_timer", 337 "m_timer", 338 "u_external", 339 "s_external", 340 "vs_external", 341 "m_external", 342 "reserved", 343 "reserved", 344 "reserved", 345 "reserved" 346 }; 347 348 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 349 { 350 if (async) { 351 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 352 riscv_intr_names[cause] : "(unknown)"; 353 } else { 354 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 355 riscv_excp_names[cause] : "(unknown)"; 356 } 357 } 358 359 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 360 { 361 env->misa_ext_mask = env->misa_ext = ext; 362 } 363 364 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 365 { 366 return 16 << mcc->def->misa_mxl_max; 367 } 368 369 #ifndef CONFIG_USER_ONLY 370 static uint8_t satp_mode_from_str(const char *satp_mode_str) 371 { 372 if (!strncmp(satp_mode_str, "mbare", 5)) { 373 return VM_1_10_MBARE; 374 } 375 376 if (!strncmp(satp_mode_str, "sv32", 4)) { 377 return VM_1_10_SV32; 378 } 379 380 if (!strncmp(satp_mode_str, "sv39", 4)) { 381 return VM_1_10_SV39; 382 } 383 384 if (!strncmp(satp_mode_str, "sv48", 4)) { 385 return VM_1_10_SV48; 386 } 387 388 if (!strncmp(satp_mode_str, "sv57", 4)) { 389 return VM_1_10_SV57; 390 } 391 392 if (!strncmp(satp_mode_str, "sv64", 4)) { 393 return VM_1_10_SV64; 394 } 395 396 g_assert_not_reached(); 397 } 398 399 static uint8_t satp_mode_max_from_map(uint32_t map) 400 { 401 /* 402 * 'map = 0' will make us return (31 - 32), which C will 403 * happily overflow to UINT_MAX. There's no good result to 404 * return if 'map = 0' (e.g. returning 0 will be ambiguous 405 * with the result for 'map = 1'). 406 * 407 * Assert out if map = 0. Callers will have to deal with 408 * it outside of this function. 409 */ 410 g_assert(map > 0); 411 412 /* map here has at least one bit set, so no problem with clz */ 413 return 31 - __builtin_clz(map); 414 } 415 416 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 417 { 418 if (is_32_bit) { 419 switch (satp_mode) { 420 case VM_1_10_SV32: 421 return "sv32"; 422 case VM_1_10_MBARE: 423 return "none"; 424 } 425 } else { 426 switch (satp_mode) { 427 case VM_1_10_SV64: 428 return "sv64"; 429 case VM_1_10_SV57: 430 return "sv57"; 431 case VM_1_10_SV48: 432 return "sv48"; 433 case VM_1_10_SV39: 434 return "sv39"; 435 case VM_1_10_MBARE: 436 return "none"; 437 } 438 } 439 440 g_assert_not_reached(); 441 } 442 443 static void set_satp_mode_max_supported(RISCVCPU *cpu, 444 int satp_mode) 445 { 446 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 447 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 448 449 assert(valid_vm[satp_mode]); 450 cpu->cfg.max_satp_mode = satp_mode; 451 } 452 453 static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported) 454 { 455 bool rv32 = riscv_cpu_is_32bit(cpu); 456 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 457 int satp_mode = cpu->cfg.max_satp_mode; 458 459 if (satp_mode == -1) { 460 return false; 461 } 462 463 *supported = 0; 464 for (int i = 0; i <= satp_mode; ++i) { 465 if (valid_vm[i]) { 466 *supported |= (1 << i); 467 } 468 } 469 return true; 470 } 471 472 /* Set the satp mode to the max supported */ 473 static void set_satp_mode_default_map(RISCVCPU *cpu) 474 { 475 /* 476 * Bare CPUs do not default to the max available. 477 * Users must set a valid satp_mode in the command 478 * line. Otherwise, leave the existing max_satp_mode 479 * in place. 480 */ 481 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 482 warn_report("No satp mode set. Defaulting to 'bare'"); 483 cpu->cfg.max_satp_mode = VM_1_10_MBARE; 484 } 485 } 486 #endif 487 488 static void riscv_max_cpu_init(Object *obj) 489 { 490 RISCVCPU *cpu = RISCV_CPU(obj); 491 CPURISCVState *env = &cpu->env; 492 493 cpu->cfg.mmu = true; 494 cpu->cfg.pmp = true; 495 496 env->priv_ver = PRIV_VERSION_LATEST; 497 #ifndef CONFIG_USER_ONLY 498 set_satp_mode_max_supported(RISCV_CPU(obj), 499 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 500 VM_1_10_SV32 : VM_1_10_SV57); 501 #endif 502 } 503 504 #if defined(TARGET_RISCV64) 505 static void rv64_base_cpu_init(Object *obj) 506 { 507 RISCVCPU *cpu = RISCV_CPU(obj); 508 CPURISCVState *env = &cpu->env; 509 510 cpu->cfg.mmu = true; 511 cpu->cfg.pmp = true; 512 513 /* Set latest version of privileged specification */ 514 env->priv_ver = PRIV_VERSION_LATEST; 515 #ifndef CONFIG_USER_ONLY 516 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 517 #endif 518 } 519 520 static void rv64_sifive_u_cpu_init(Object *obj) 521 { 522 RISCVCPU *cpu = RISCV_CPU(obj); 523 CPURISCVState *env = &cpu->env; 524 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 525 env->priv_ver = PRIV_VERSION_1_10_0; 526 #ifndef CONFIG_USER_ONLY 527 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 528 #endif 529 530 /* inherited from parent obj via riscv_cpu_init() */ 531 cpu->cfg.ext_zifencei = true; 532 cpu->cfg.ext_zicsr = true; 533 cpu->cfg.mmu = true; 534 cpu->cfg.pmp = true; 535 } 536 537 static void rv64_sifive_e_cpu_init(Object *obj) 538 { 539 CPURISCVState *env = &RISCV_CPU(obj)->env; 540 RISCVCPU *cpu = RISCV_CPU(obj); 541 542 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 543 env->priv_ver = PRIV_VERSION_1_10_0; 544 #ifndef CONFIG_USER_ONLY 545 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 546 #endif 547 548 /* inherited from parent obj via riscv_cpu_init() */ 549 cpu->cfg.ext_zifencei = true; 550 cpu->cfg.ext_zicsr = true; 551 cpu->cfg.pmp = true; 552 } 553 554 static void rv64_thead_c906_cpu_init(Object *obj) 555 { 556 CPURISCVState *env = &RISCV_CPU(obj)->env; 557 RISCVCPU *cpu = RISCV_CPU(obj); 558 559 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 560 env->priv_ver = PRIV_VERSION_1_11_0; 561 562 cpu->cfg.ext_zfa = true; 563 cpu->cfg.ext_zfh = true; 564 cpu->cfg.mmu = true; 565 cpu->cfg.ext_xtheadba = true; 566 cpu->cfg.ext_xtheadbb = true; 567 cpu->cfg.ext_xtheadbs = true; 568 cpu->cfg.ext_xtheadcmo = true; 569 cpu->cfg.ext_xtheadcondmov = true; 570 cpu->cfg.ext_xtheadfmemidx = true; 571 cpu->cfg.ext_xtheadmac = true; 572 cpu->cfg.ext_xtheadmemidx = true; 573 cpu->cfg.ext_xtheadmempair = true; 574 cpu->cfg.ext_xtheadsync = true; 575 576 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 577 #ifndef CONFIG_USER_ONLY 578 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 579 th_register_custom_csrs(cpu); 580 #endif 581 582 /* inherited from parent obj via riscv_cpu_init() */ 583 cpu->cfg.pmp = true; 584 } 585 586 static void rv64_veyron_v1_cpu_init(Object *obj) 587 { 588 CPURISCVState *env = &RISCV_CPU(obj)->env; 589 RISCVCPU *cpu = RISCV_CPU(obj); 590 591 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 592 env->priv_ver = PRIV_VERSION_1_12_0; 593 594 /* Enable ISA extensions */ 595 cpu->cfg.mmu = true; 596 cpu->cfg.ext_zifencei = true; 597 cpu->cfg.ext_zicsr = true; 598 cpu->cfg.pmp = true; 599 cpu->cfg.ext_zicbom = true; 600 cpu->cfg.cbom_blocksize = 64; 601 cpu->cfg.cboz_blocksize = 64; 602 cpu->cfg.ext_zicboz = true; 603 cpu->cfg.ext_smaia = true; 604 cpu->cfg.ext_ssaia = true; 605 cpu->cfg.ext_sscofpmf = true; 606 cpu->cfg.ext_sstc = true; 607 cpu->cfg.ext_svinval = true; 608 cpu->cfg.ext_svnapot = true; 609 cpu->cfg.ext_svpbmt = true; 610 cpu->cfg.ext_smstateen = true; 611 cpu->cfg.ext_zba = true; 612 cpu->cfg.ext_zbb = true; 613 cpu->cfg.ext_zbc = true; 614 cpu->cfg.ext_zbs = true; 615 cpu->cfg.ext_XVentanaCondOps = true; 616 617 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 618 cpu->cfg.marchid = VEYRON_V1_MARCHID; 619 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 620 621 #ifndef CONFIG_USER_ONLY 622 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 623 #endif 624 } 625 626 /* Tenstorrent Ascalon */ 627 static void rv64_tt_ascalon_cpu_init(Object *obj) 628 { 629 CPURISCVState *env = &RISCV_CPU(obj)->env; 630 RISCVCPU *cpu = RISCV_CPU(obj); 631 632 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); 633 env->priv_ver = PRIV_VERSION_1_13_0; 634 635 /* Enable ISA extensions */ 636 cpu->cfg.mmu = true; 637 cpu->cfg.vlenb = 256 >> 3; 638 cpu->cfg.elen = 64; 639 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 640 cpu->cfg.rvv_ma_all_1s = true; 641 cpu->cfg.rvv_ta_all_1s = true; 642 cpu->cfg.misa_w = true; 643 cpu->cfg.pmp = true; 644 cpu->cfg.cbom_blocksize = 64; 645 cpu->cfg.cbop_blocksize = 64; 646 cpu->cfg.cboz_blocksize = 64; 647 cpu->cfg.ext_zic64b = true; 648 cpu->cfg.ext_zicbom = true; 649 cpu->cfg.ext_zicbop = true; 650 cpu->cfg.ext_zicboz = true; 651 cpu->cfg.ext_zicntr = true; 652 cpu->cfg.ext_zicond = true; 653 cpu->cfg.ext_zicsr = true; 654 cpu->cfg.ext_zifencei = true; 655 cpu->cfg.ext_zihintntl = true; 656 cpu->cfg.ext_zihintpause = true; 657 cpu->cfg.ext_zihpm = true; 658 cpu->cfg.ext_zimop = true; 659 cpu->cfg.ext_zawrs = true; 660 cpu->cfg.ext_zfa = true; 661 cpu->cfg.ext_zfbfmin = true; 662 cpu->cfg.ext_zfh = true; 663 cpu->cfg.ext_zfhmin = true; 664 cpu->cfg.ext_zcb = true; 665 cpu->cfg.ext_zcmop = true; 666 cpu->cfg.ext_zba = true; 667 cpu->cfg.ext_zbb = true; 668 cpu->cfg.ext_zbs = true; 669 cpu->cfg.ext_zkt = true; 670 cpu->cfg.ext_zvbb = true; 671 cpu->cfg.ext_zvbc = true; 672 cpu->cfg.ext_zvfbfmin = true; 673 cpu->cfg.ext_zvfbfwma = true; 674 cpu->cfg.ext_zvfh = true; 675 cpu->cfg.ext_zvfhmin = true; 676 cpu->cfg.ext_zvkng = true; 677 cpu->cfg.ext_smaia = true; 678 cpu->cfg.ext_smstateen = true; 679 cpu->cfg.ext_ssaia = true; 680 cpu->cfg.ext_sscofpmf = true; 681 cpu->cfg.ext_sstc = true; 682 cpu->cfg.ext_svade = true; 683 cpu->cfg.ext_svinval = true; 684 cpu->cfg.ext_svnapot = true; 685 cpu->cfg.ext_svpbmt = true; 686 687 #ifndef CONFIG_USER_ONLY 688 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 689 #endif 690 } 691 692 static void rv64_xiangshan_nanhu_cpu_init(Object *obj) 693 { 694 CPURISCVState *env = &RISCV_CPU(obj)->env; 695 RISCVCPU *cpu = RISCV_CPU(obj); 696 697 riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); 698 env->priv_ver = PRIV_VERSION_1_12_0; 699 700 /* Enable ISA extensions */ 701 cpu->cfg.ext_zbc = true; 702 cpu->cfg.ext_zbkb = true; 703 cpu->cfg.ext_zbkc = true; 704 cpu->cfg.ext_zbkx = true; 705 cpu->cfg.ext_zknd = true; 706 cpu->cfg.ext_zkne = true; 707 cpu->cfg.ext_zknh = true; 708 cpu->cfg.ext_zksed = true; 709 cpu->cfg.ext_zksh = true; 710 cpu->cfg.ext_svinval = true; 711 712 cpu->cfg.mmu = true; 713 cpu->cfg.pmp = true; 714 715 #ifndef CONFIG_USER_ONLY 716 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 717 #endif 718 } 719 720 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 721 static void rv128_base_cpu_init(Object *obj) 722 { 723 RISCVCPU *cpu = RISCV_CPU(obj); 724 CPURISCVState *env = &cpu->env; 725 726 cpu->cfg.mmu = true; 727 cpu->cfg.pmp = true; 728 729 /* Set latest version of privileged specification */ 730 env->priv_ver = PRIV_VERSION_LATEST; 731 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 732 } 733 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 734 735 static void rv64i_bare_cpu_init(Object *obj) 736 { 737 CPURISCVState *env = &RISCV_CPU(obj)->env; 738 riscv_cpu_set_misa_ext(env, RVI); 739 } 740 741 static void rv64e_bare_cpu_init(Object *obj) 742 { 743 CPURISCVState *env = &RISCV_CPU(obj)->env; 744 riscv_cpu_set_misa_ext(env, RVE); 745 } 746 747 #endif /* !TARGET_RISCV64 */ 748 749 #if defined(TARGET_RISCV32) || \ 750 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 751 752 static void rv32_base_cpu_init(Object *obj) 753 { 754 RISCVCPU *cpu = RISCV_CPU(obj); 755 CPURISCVState *env = &cpu->env; 756 757 cpu->cfg.mmu = true; 758 cpu->cfg.pmp = true; 759 760 /* Set latest version of privileged specification */ 761 env->priv_ver = PRIV_VERSION_LATEST; 762 #ifndef CONFIG_USER_ONLY 763 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 764 #endif 765 } 766 767 static void rv32_sifive_u_cpu_init(Object *obj) 768 { 769 RISCVCPU *cpu = RISCV_CPU(obj); 770 CPURISCVState *env = &cpu->env; 771 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 772 env->priv_ver = PRIV_VERSION_1_10_0; 773 #ifndef CONFIG_USER_ONLY 774 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 775 #endif 776 777 /* inherited from parent obj via riscv_cpu_init() */ 778 cpu->cfg.ext_zifencei = true; 779 cpu->cfg.ext_zicsr = true; 780 cpu->cfg.mmu = true; 781 cpu->cfg.pmp = true; 782 } 783 784 static void rv32_sifive_e_cpu_init(Object *obj) 785 { 786 CPURISCVState *env = &RISCV_CPU(obj)->env; 787 RISCVCPU *cpu = RISCV_CPU(obj); 788 789 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 790 env->priv_ver = PRIV_VERSION_1_10_0; 791 #ifndef CONFIG_USER_ONLY 792 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 793 #endif 794 795 /* inherited from parent obj via riscv_cpu_init() */ 796 cpu->cfg.ext_zifencei = true; 797 cpu->cfg.ext_zicsr = true; 798 cpu->cfg.pmp = true; 799 } 800 801 static void rv32_ibex_cpu_init(Object *obj) 802 { 803 CPURISCVState *env = &RISCV_CPU(obj)->env; 804 RISCVCPU *cpu = RISCV_CPU(obj); 805 806 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 807 env->priv_ver = PRIV_VERSION_1_12_0; 808 #ifndef CONFIG_USER_ONLY 809 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 810 #endif 811 /* inherited from parent obj via riscv_cpu_init() */ 812 cpu->cfg.ext_zifencei = true; 813 cpu->cfg.ext_zicsr = true; 814 cpu->cfg.pmp = true; 815 cpu->cfg.ext_smepmp = true; 816 817 cpu->cfg.ext_zba = true; 818 cpu->cfg.ext_zbb = true; 819 cpu->cfg.ext_zbc = true; 820 cpu->cfg.ext_zbs = true; 821 } 822 823 static void rv32_imafcu_nommu_cpu_init(Object *obj) 824 { 825 CPURISCVState *env = &RISCV_CPU(obj)->env; 826 RISCVCPU *cpu = RISCV_CPU(obj); 827 828 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 829 env->priv_ver = PRIV_VERSION_1_10_0; 830 #ifndef CONFIG_USER_ONLY 831 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 832 #endif 833 834 /* inherited from parent obj via riscv_cpu_init() */ 835 cpu->cfg.ext_zifencei = true; 836 cpu->cfg.ext_zicsr = true; 837 cpu->cfg.pmp = true; 838 } 839 840 static void rv32i_bare_cpu_init(Object *obj) 841 { 842 CPURISCVState *env = &RISCV_CPU(obj)->env; 843 riscv_cpu_set_misa_ext(env, RVI); 844 } 845 846 static void rv32e_bare_cpu_init(Object *obj) 847 { 848 CPURISCVState *env = &RISCV_CPU(obj)->env; 849 riscv_cpu_set_misa_ext(env, RVE); 850 } 851 #endif 852 853 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 854 { 855 ObjectClass *oc; 856 char *typename; 857 char **cpuname; 858 859 cpuname = g_strsplit(cpu_model, ",", 1); 860 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 861 oc = object_class_by_name(typename); 862 g_strfreev(cpuname); 863 g_free(typename); 864 865 return oc; 866 } 867 868 char *riscv_cpu_get_name(RISCVCPU *cpu) 869 { 870 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 871 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 872 873 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 874 875 return cpu_model_from_type(typename); 876 } 877 878 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 879 { 880 RISCVCPU *cpu = RISCV_CPU(cs); 881 CPURISCVState *env = &cpu->env; 882 int i, j; 883 uint8_t *p; 884 885 #if !defined(CONFIG_USER_ONLY) 886 if (riscv_has_ext(env, RVH)) { 887 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 888 } 889 #endif 890 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 891 #ifndef CONFIG_USER_ONLY 892 { 893 static const int dump_csrs[] = { 894 CSR_MHARTID, 895 CSR_MSTATUS, 896 CSR_MSTATUSH, 897 /* 898 * CSR_SSTATUS is intentionally omitted here as its value 899 * can be figured out by looking at CSR_MSTATUS 900 */ 901 CSR_HSTATUS, 902 CSR_VSSTATUS, 903 CSR_MIP, 904 CSR_MIE, 905 CSR_MIDELEG, 906 CSR_HIDELEG, 907 CSR_MEDELEG, 908 CSR_HEDELEG, 909 CSR_MTVEC, 910 CSR_STVEC, 911 CSR_VSTVEC, 912 CSR_MEPC, 913 CSR_SEPC, 914 CSR_VSEPC, 915 CSR_MCAUSE, 916 CSR_SCAUSE, 917 CSR_VSCAUSE, 918 CSR_MTVAL, 919 CSR_STVAL, 920 CSR_HTVAL, 921 CSR_MTVAL2, 922 CSR_MSCRATCH, 923 CSR_SSCRATCH, 924 CSR_SATP, 925 }; 926 927 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 928 int csrno = dump_csrs[i]; 929 target_ulong val = 0; 930 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 931 932 /* 933 * Rely on the smode, hmode, etc, predicates within csr.c 934 * to do the filtering of the registers that are present. 935 */ 936 if (res == RISCV_EXCP_NONE) { 937 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 938 csr_ops[csrno].name, val); 939 } 940 } 941 } 942 #endif 943 944 for (i = 0; i < 32; i++) { 945 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 946 riscv_int_regnames[i], env->gpr[i]); 947 if ((i & 3) == 3) { 948 qemu_fprintf(f, "\n"); 949 } 950 } 951 if (flags & CPU_DUMP_FPU) { 952 target_ulong val = 0; 953 RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); 954 if (res == RISCV_EXCP_NONE) { 955 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 956 csr_ops[CSR_FCSR].name, val); 957 } 958 for (i = 0; i < 32; i++) { 959 qemu_fprintf(f, " %-8s %016" PRIx64, 960 riscv_fpr_regnames[i], env->fpr[i]); 961 if ((i & 3) == 3) { 962 qemu_fprintf(f, "\n"); 963 } 964 } 965 } 966 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 967 static const int dump_rvv_csrs[] = { 968 CSR_VSTART, 969 CSR_VXSAT, 970 CSR_VXRM, 971 CSR_VCSR, 972 CSR_VL, 973 CSR_VTYPE, 974 CSR_VLENB, 975 }; 976 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 977 int csrno = dump_rvv_csrs[i]; 978 target_ulong val = 0; 979 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 980 981 /* 982 * Rely on the smode, hmode, etc, predicates within csr.c 983 * to do the filtering of the registers that are present. 984 */ 985 if (res == RISCV_EXCP_NONE) { 986 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 987 csr_ops[csrno].name, val); 988 } 989 } 990 uint16_t vlenb = cpu->cfg.vlenb; 991 992 for (i = 0; i < 32; i++) { 993 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 994 p = (uint8_t *)env->vreg; 995 for (j = vlenb - 1 ; j >= 0; j--) { 996 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 997 } 998 qemu_fprintf(f, "\n"); 999 } 1000 } 1001 } 1002 1003 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 1004 { 1005 RISCVCPU *cpu = RISCV_CPU(cs); 1006 CPURISCVState *env = &cpu->env; 1007 1008 if (env->xl == MXL_RV32) { 1009 env->pc = (int32_t)value; 1010 } else { 1011 env->pc = value; 1012 } 1013 } 1014 1015 static vaddr riscv_cpu_get_pc(CPUState *cs) 1016 { 1017 RISCVCPU *cpu = RISCV_CPU(cs); 1018 CPURISCVState *env = &cpu->env; 1019 1020 /* Match cpu_get_tb_cpu_state. */ 1021 if (env->xl == MXL_RV32) { 1022 return env->pc & UINT32_MAX; 1023 } 1024 return env->pc; 1025 } 1026 1027 #ifndef CONFIG_USER_ONLY 1028 bool riscv_cpu_has_work(CPUState *cs) 1029 { 1030 RISCVCPU *cpu = RISCV_CPU(cs); 1031 CPURISCVState *env = &cpu->env; 1032 /* 1033 * Definition of the WFI instruction requires it to ignore the privilege 1034 * mode and delegation registers, but respect individual enables 1035 */ 1036 return riscv_cpu_all_pending(env) != 0 || 1037 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 1038 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 1039 } 1040 #endif /* !CONFIG_USER_ONLY */ 1041 1042 static void riscv_cpu_reset_hold(Object *obj, ResetType type) 1043 { 1044 #ifndef CONFIG_USER_ONLY 1045 uint8_t iprio; 1046 int i, irq, rdzero; 1047 #endif 1048 CPUState *cs = CPU(obj); 1049 RISCVCPU *cpu = RISCV_CPU(cs); 1050 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1051 CPURISCVState *env = &cpu->env; 1052 1053 if (mcc->parent_phases.hold) { 1054 mcc->parent_phases.hold(obj, type); 1055 } 1056 #ifndef CONFIG_USER_ONLY 1057 env->misa_mxl = mcc->def->misa_mxl_max; 1058 env->priv = PRV_M; 1059 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 1060 if (env->misa_mxl > MXL_RV32) { 1061 /* 1062 * The reset status of SXL/UXL is undefined, but mstatus is WARL 1063 * and we must ensure that the value after init is valid for read. 1064 */ 1065 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 1066 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 1067 if (riscv_has_ext(env, RVH)) { 1068 env->vsstatus = set_field(env->vsstatus, 1069 MSTATUS64_SXL, env->misa_mxl); 1070 env->vsstatus = set_field(env->vsstatus, 1071 MSTATUS64_UXL, env->misa_mxl); 1072 env->mstatus_hs = set_field(env->mstatus_hs, 1073 MSTATUS64_SXL, env->misa_mxl); 1074 env->mstatus_hs = set_field(env->mstatus_hs, 1075 MSTATUS64_UXL, env->misa_mxl); 1076 } 1077 if (riscv_cpu_cfg(env)->ext_smdbltrp) { 1078 env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); 1079 } 1080 } 1081 env->mcause = 0; 1082 env->miclaim = MIP_SGEIP; 1083 env->pc = env->resetvec; 1084 env->bins = 0; 1085 env->two_stage_lookup = false; 1086 1087 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 1088 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ? 1089 MENVCFG_ADUE : 0); 1090 env->henvcfg = 0; 1091 1092 /* Initialized default priorities of local interrupts. */ 1093 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 1094 iprio = riscv_cpu_default_priority(i); 1095 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 1096 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 1097 env->hviprio[i] = 0; 1098 } 1099 i = 0; 1100 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 1101 if (!rdzero) { 1102 env->hviprio[irq] = env->miprio[irq]; 1103 } 1104 i++; 1105 } 1106 1107 /* 1108 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 1109 * extension is enabled. 1110 */ 1111 if (riscv_has_ext(env, RVH)) { 1112 env->mideleg |= HS_MODE_INTERRUPTS; 1113 } 1114 1115 /* 1116 * Clear mseccfg and unlock all the PMP entries upon reset. 1117 * This is allowed as per the priv and smepmp specifications 1118 * and is needed to clear stale entries across reboots. 1119 */ 1120 if (riscv_cpu_cfg(env)->ext_smepmp) { 1121 env->mseccfg = 0; 1122 } 1123 1124 pmp_unlock_entries(env); 1125 #else 1126 env->priv = PRV_U; 1127 env->senvcfg = 0; 1128 env->menvcfg = 0; 1129 #endif 1130 1131 /* on reset elp is clear */ 1132 env->elp = false; 1133 /* on reset ssp is set to 0 */ 1134 env->ssp = 0; 1135 1136 env->xl = riscv_cpu_mxl(env); 1137 cs->exception_index = RISCV_EXCP_NONE; 1138 env->load_res = -1; 1139 set_default_nan_mode(1, &env->fp_status); 1140 /* Default NaN value: sign bit clear, frac msb set */ 1141 set_float_default_nan_pattern(0b01000000, &env->fp_status); 1142 env->vill = true; 1143 1144 #ifndef CONFIG_USER_ONLY 1145 if (cpu->cfg.debug) { 1146 riscv_trigger_reset_hold(env); 1147 } 1148 1149 if (cpu->cfg.ext_smrnmi) { 1150 env->rnmip = 0; 1151 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); 1152 } 1153 1154 if (kvm_enabled()) { 1155 kvm_riscv_reset_vcpu(cpu); 1156 } 1157 #endif 1158 } 1159 1160 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1161 { 1162 RISCVCPU *cpu = RISCV_CPU(s); 1163 CPURISCVState *env = &cpu->env; 1164 info->target_info = &cpu->cfg; 1165 1166 /* 1167 * A couple of bits in MSTATUS set the endianness: 1168 * - MSTATUS_UBE (User-mode), 1169 * - MSTATUS_SBE (Supervisor-mode), 1170 * - MSTATUS_MBE (Machine-mode) 1171 * but we don't implement that yet. 1172 */ 1173 info->endian = BFD_ENDIAN_LITTLE; 1174 1175 switch (env->xl) { 1176 case MXL_RV32: 1177 info->print_insn = print_insn_riscv32; 1178 break; 1179 case MXL_RV64: 1180 info->print_insn = print_insn_riscv64; 1181 break; 1182 case MXL_RV128: 1183 info->print_insn = print_insn_riscv128; 1184 break; 1185 default: 1186 g_assert_not_reached(); 1187 } 1188 } 1189 1190 #ifndef CONFIG_USER_ONLY 1191 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1192 { 1193 bool rv32 = riscv_cpu_is_32bit(cpu); 1194 uint16_t supported; 1195 uint8_t satp_mode_map_max; 1196 1197 if (!get_satp_mode_supported(cpu, &supported)) { 1198 /* The CPU wants the hypervisor to decide which satp mode to allow */ 1199 return; 1200 } 1201 1202 if (cpu->satp_modes.map == 0) { 1203 if (cpu->satp_modes.init == 0) { 1204 /* If unset by the user, we fallback to the default satp mode. */ 1205 set_satp_mode_default_map(cpu); 1206 } else { 1207 /* 1208 * Find the lowest level that was disabled and then enable the 1209 * first valid level below which can be found in 1210 * valid_vm_1_10_32/64. 1211 */ 1212 for (int i = 1; i < 16; ++i) { 1213 if ((cpu->satp_modes.init & (1 << i)) && 1214 supported & (1 << i)) { 1215 for (int j = i - 1; j >= 0; --j) { 1216 if (supported & (1 << j)) { 1217 cpu->cfg.max_satp_mode = j; 1218 return; 1219 } 1220 } 1221 } 1222 } 1223 } 1224 return; 1225 } 1226 1227 satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map); 1228 1229 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1230 if (satp_mode_map_max > cpu->cfg.max_satp_mode) { 1231 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1232 satp_mode_str(satp_mode_map_max, rv32), 1233 satp_mode_str(cpu->cfg.max_satp_mode, rv32)); 1234 return; 1235 } 1236 1237 /* 1238 * Make sure the user did not ask for an invalid configuration as per 1239 * the specification. 1240 */ 1241 if (!rv32) { 1242 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1243 if (!(cpu->satp_modes.map & (1 << i)) && 1244 (cpu->satp_modes.init & (1 << i)) && 1245 (supported & (1 << i))) { 1246 error_setg(errp, "cannot disable %s satp mode if %s " 1247 "is enabled", satp_mode_str(i, false), 1248 satp_mode_str(satp_mode_map_max, false)); 1249 return; 1250 } 1251 } 1252 } 1253 1254 cpu->cfg.max_satp_mode = satp_mode_map_max; 1255 } 1256 #endif 1257 1258 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1259 { 1260 Error *local_err = NULL; 1261 1262 #ifndef CONFIG_USER_ONLY 1263 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1264 if (local_err != NULL) { 1265 error_propagate(errp, local_err); 1266 return; 1267 } 1268 #endif 1269 1270 if (tcg_enabled()) { 1271 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1272 if (local_err != NULL) { 1273 error_propagate(errp, local_err); 1274 return; 1275 } 1276 riscv_tcg_cpu_finalize_dynamic_decoder(cpu); 1277 } else if (kvm_enabled()) { 1278 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1279 if (local_err != NULL) { 1280 error_propagate(errp, local_err); 1281 return; 1282 } 1283 } 1284 } 1285 1286 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1287 { 1288 CPUState *cs = CPU(dev); 1289 RISCVCPU *cpu = RISCV_CPU(dev); 1290 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1291 Error *local_err = NULL; 1292 1293 cpu_exec_realizefn(cs, &local_err); 1294 if (local_err != NULL) { 1295 error_propagate(errp, local_err); 1296 return; 1297 } 1298 1299 riscv_cpu_finalize_features(cpu, &local_err); 1300 if (local_err != NULL) { 1301 error_propagate(errp, local_err); 1302 return; 1303 } 1304 1305 riscv_cpu_register_gdb_regs_for_features(cs); 1306 1307 #ifndef CONFIG_USER_ONLY 1308 if (cpu->cfg.debug) { 1309 riscv_trigger_realize(&cpu->env); 1310 } 1311 #endif 1312 1313 qemu_init_vcpu(cs); 1314 cpu_reset(cs); 1315 1316 mcc->parent_realize(dev, errp); 1317 } 1318 1319 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1320 { 1321 if (tcg_enabled()) { 1322 return riscv_cpu_tcg_compatible(cpu); 1323 } 1324 1325 return true; 1326 } 1327 1328 #ifndef CONFIG_USER_ONLY 1329 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1330 void *opaque, Error **errp) 1331 { 1332 RISCVSATPModes *satp_modes = opaque; 1333 uint8_t satp = satp_mode_from_str(name); 1334 bool value; 1335 1336 value = satp_modes->map & (1 << satp); 1337 1338 visit_type_bool(v, name, &value, errp); 1339 } 1340 1341 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1342 void *opaque, Error **errp) 1343 { 1344 RISCVSATPModes *satp_modes = opaque; 1345 uint8_t satp = satp_mode_from_str(name); 1346 bool value; 1347 1348 if (!visit_type_bool(v, name, &value, errp)) { 1349 return; 1350 } 1351 1352 satp_modes->map = deposit32(satp_modes->map, satp, 1, value); 1353 satp_modes->init |= 1 << satp; 1354 } 1355 1356 void riscv_add_satp_mode_properties(Object *obj) 1357 { 1358 RISCVCPU *cpu = RISCV_CPU(obj); 1359 1360 if (cpu->env.misa_mxl == MXL_RV32) { 1361 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1362 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1363 } else { 1364 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1365 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1366 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1367 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1368 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1369 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1370 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1371 cpu_riscv_set_satp, NULL, &cpu->satp_modes); 1372 } 1373 } 1374 1375 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1376 { 1377 RISCVCPU *cpu = RISCV_CPU(opaque); 1378 CPURISCVState *env = &cpu->env; 1379 1380 if (irq < IRQ_LOCAL_MAX) { 1381 switch (irq) { 1382 case IRQ_U_SOFT: 1383 case IRQ_S_SOFT: 1384 case IRQ_VS_SOFT: 1385 case IRQ_M_SOFT: 1386 case IRQ_U_TIMER: 1387 case IRQ_S_TIMER: 1388 case IRQ_VS_TIMER: 1389 case IRQ_M_TIMER: 1390 case IRQ_U_EXT: 1391 case IRQ_VS_EXT: 1392 case IRQ_M_EXT: 1393 if (kvm_enabled()) { 1394 kvm_riscv_set_irq(cpu, irq, level); 1395 } else { 1396 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1397 } 1398 break; 1399 case IRQ_S_EXT: 1400 if (kvm_enabled()) { 1401 kvm_riscv_set_irq(cpu, irq, level); 1402 } else { 1403 env->external_seip = level; 1404 riscv_cpu_update_mip(env, 1 << irq, 1405 BOOL_TO_MASK(level | env->software_seip)); 1406 } 1407 break; 1408 default: 1409 g_assert_not_reached(); 1410 } 1411 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1412 /* Require H-extension for handling guest local interrupts */ 1413 if (!riscv_has_ext(env, RVH)) { 1414 g_assert_not_reached(); 1415 } 1416 1417 /* Compute bit position in HGEIP CSR */ 1418 irq = irq - IRQ_LOCAL_MAX + 1; 1419 if (env->geilen < irq) { 1420 g_assert_not_reached(); 1421 } 1422 1423 /* Update HGEIP CSR */ 1424 env->hgeip &= ~((target_ulong)1 << irq); 1425 if (level) { 1426 env->hgeip |= (target_ulong)1 << irq; 1427 } 1428 1429 /* Update mip.SGEIP bit */ 1430 riscv_cpu_update_mip(env, MIP_SGEIP, 1431 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1432 } else { 1433 g_assert_not_reached(); 1434 } 1435 } 1436 1437 static void riscv_cpu_set_nmi(void *opaque, int irq, int level) 1438 { 1439 riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); 1440 } 1441 #endif /* CONFIG_USER_ONLY */ 1442 1443 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1444 { 1445 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1446 } 1447 1448 static void riscv_cpu_post_init(Object *obj) 1449 { 1450 accel_cpu_instance_init(CPU(obj)); 1451 } 1452 1453 static void riscv_cpu_init(Object *obj) 1454 { 1455 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1456 RISCVCPU *cpu = RISCV_CPU(obj); 1457 CPURISCVState *env = &cpu->env; 1458 1459 env->misa_mxl = mcc->def->misa_mxl_max; 1460 1461 #ifndef CONFIG_USER_ONLY 1462 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1463 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1464 qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, 1465 "riscv.cpu.rnmi", RNMI_MAX); 1466 #endif /* CONFIG_USER_ONLY */ 1467 1468 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1469 1470 /* 1471 * The timer and performance counters extensions were supported 1472 * in QEMU before they were added as discrete extensions in the 1473 * ISA. To keep compatibility we'll always default them to 'true' 1474 * for all CPUs. Each accelerator will decide what to do when 1475 * users disable them. 1476 */ 1477 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1478 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1479 1480 /* Default values for non-bool cpu properties */ 1481 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1482 cpu->cfg.vlenb = 128 >> 3; 1483 cpu->cfg.elen = 64; 1484 cpu->cfg.cbom_blocksize = 64; 1485 cpu->cfg.cbop_blocksize = 64; 1486 cpu->cfg.cboz_blocksize = 64; 1487 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1488 cpu->cfg.max_satp_mode = -1; 1489 1490 env->misa_ext_mask = env->misa_ext = mcc->def->misa_ext; 1491 riscv_cpu_cfg_merge(&cpu->cfg, &mcc->def->cfg); 1492 1493 if (mcc->def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) { 1494 cpu->env.priv_ver = mcc->def->priv_spec; 1495 } 1496 if (mcc->def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) { 1497 cpu->env.vext_ver = mcc->def->vext_spec; 1498 } 1499 } 1500 1501 static void riscv_bare_cpu_init(Object *obj) 1502 { 1503 RISCVCPU *cpu = RISCV_CPU(obj); 1504 1505 /* 1506 * Bare CPUs do not inherit the timer and performance 1507 * counters from the parent class (see riscv_cpu_init() 1508 * for info on why the parent enables them). 1509 * 1510 * Users have to explicitly enable these counters for 1511 * bare CPUs. 1512 */ 1513 cpu->cfg.ext_zicntr = false; 1514 cpu->cfg.ext_zihpm = false; 1515 1516 /* Set to QEMU's first supported priv version */ 1517 cpu->env.priv_ver = PRIV_VERSION_1_10_0; 1518 1519 /* 1520 * Support all available satp_mode settings. The default 1521 * value will be set to MBARE if the user doesn't set 1522 * satp_mode manually (see set_satp_mode_default()). 1523 */ 1524 #ifndef CONFIG_USER_ONLY 1525 set_satp_mode_max_supported(RISCV_CPU(obj), 1526 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 1527 VM_1_10_SV32 : VM_1_10_SV57); 1528 #endif 1529 } 1530 1531 typedef struct misa_ext_info { 1532 const char *name; 1533 const char *description; 1534 } MISAExtInfo; 1535 1536 #define MISA_INFO_IDX(_bit) \ 1537 __builtin_ctz(_bit) 1538 1539 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1540 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1541 1542 static const MISAExtInfo misa_ext_info_arr[] = { 1543 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1544 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1545 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1546 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1547 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1548 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1549 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1550 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1551 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1552 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1553 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1554 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1555 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") 1556 }; 1557 1558 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1559 { 1560 CPUClass *cc = CPU_CLASS(mcc); 1561 1562 /* Validate that MISA_MXL is set properly. */ 1563 switch (mcc->def->misa_mxl_max) { 1564 #ifdef TARGET_RISCV64 1565 case MXL_RV64: 1566 case MXL_RV128: 1567 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1568 break; 1569 #endif 1570 case MXL_RV32: 1571 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1572 break; 1573 default: 1574 g_assert_not_reached(); 1575 } 1576 } 1577 1578 static int riscv_validate_misa_info_idx(uint32_t bit) 1579 { 1580 int idx; 1581 1582 /* 1583 * Our lowest valid input (RVA) is 1 and 1584 * __builtin_ctz() is UB with zero. 1585 */ 1586 g_assert(bit != 0); 1587 idx = MISA_INFO_IDX(bit); 1588 1589 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1590 return idx; 1591 } 1592 1593 const char *riscv_get_misa_ext_name(uint32_t bit) 1594 { 1595 int idx = riscv_validate_misa_info_idx(bit); 1596 const char *val = misa_ext_info_arr[idx].name; 1597 1598 g_assert(val != NULL); 1599 return val; 1600 } 1601 1602 const char *riscv_get_misa_ext_description(uint32_t bit) 1603 { 1604 int idx = riscv_validate_misa_info_idx(bit); 1605 const char *val = misa_ext_info_arr[idx].description; 1606 1607 g_assert(val != NULL); 1608 return val; 1609 } 1610 1611 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1612 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1613 .enabled = _defval} 1614 1615 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1616 /* Defaults for standard extensions */ 1617 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1618 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), 1619 MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), 1620 MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), 1621 MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), 1622 MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), 1623 MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), 1624 MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), 1625 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1626 MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), 1627 MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), 1628 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1629 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1630 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1631 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), 1632 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), 1633 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1634 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), 1635 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), 1636 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), 1637 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), 1638 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1639 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1640 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false), 1641 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1642 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1643 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1644 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false), 1645 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1646 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1647 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false), 1648 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false), 1649 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false), 1650 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), 1651 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), 1652 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1653 MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), 1654 MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), 1655 MULTI_EXT_CFG_BOOL("supm", ext_supm, false), 1656 1657 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), 1658 MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), 1659 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1660 MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), 1661 MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), 1662 MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), 1663 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1664 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), 1665 MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), 1666 MULTI_EXT_CFG_BOOL("svade", ext_svade, false), 1667 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1668 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1669 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1670 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1671 MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), 1672 1673 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1674 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1675 1676 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1677 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1678 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1679 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1680 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1681 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1682 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1683 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1684 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1685 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1686 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1687 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1688 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1689 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1690 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1691 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1692 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1693 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false), 1694 1695 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1696 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1697 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1698 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1699 1700 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1701 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1702 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1703 1704 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1705 1706 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1707 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1708 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1709 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1710 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1711 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1712 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1713 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1714 1715 /* Vector cryptography extensions */ 1716 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1717 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1718 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false), 1719 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1720 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1721 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1722 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1723 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1724 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1725 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1726 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1727 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1728 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1729 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1730 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1731 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1732 1733 { }, 1734 }; 1735 1736 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1737 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1738 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1739 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1740 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1741 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1742 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1743 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1744 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1745 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1746 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1747 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1748 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1749 1750 { }, 1751 }; 1752 1753 /* These are experimental so mark with 'x-' */ 1754 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1755 MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), 1756 1757 { }, 1758 }; 1759 1760 /* 1761 * 'Named features' is the name we give to extensions that we 1762 * don't want to expose to users. They are either immutable 1763 * (always enabled/disable) or they'll vary depending on 1764 * the resulting CPU state. They have riscv,isa strings 1765 * and priv_ver like regular extensions. 1766 */ 1767 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1768 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), 1769 MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), 1770 MULTI_EXT_CFG_BOOL("sha", ext_sha, true), 1771 MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), 1772 1773 { }, 1774 }; 1775 1776 /* Deprecated entries marked for future removal */ 1777 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1778 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1779 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1780 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1781 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1782 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1783 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1784 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1785 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1786 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1787 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1788 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1789 1790 { }, 1791 }; 1792 1793 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1794 Error **errp) 1795 { 1796 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1797 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1798 cpuname, propname); 1799 } 1800 1801 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1802 void *opaque, Error **errp) 1803 { 1804 RISCVCPU *cpu = RISCV_CPU(obj); 1805 uint8_t pmu_num, curr_pmu_num; 1806 uint32_t pmu_mask; 1807 1808 visit_type_uint8(v, name, &pmu_num, errp); 1809 1810 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1811 1812 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1813 cpu_set_prop_err(cpu, name, errp); 1814 error_append_hint(errp, "Current '%s' val: %u\n", 1815 name, curr_pmu_num); 1816 return; 1817 } 1818 1819 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1820 error_setg(errp, "Number of counters exceeds maximum available"); 1821 return; 1822 } 1823 1824 if (pmu_num == 0) { 1825 pmu_mask = 0; 1826 } else { 1827 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1828 } 1829 1830 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1831 cpu->cfg.pmu_mask = pmu_mask; 1832 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1833 } 1834 1835 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1836 void *opaque, Error **errp) 1837 { 1838 RISCVCPU *cpu = RISCV_CPU(obj); 1839 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1840 1841 visit_type_uint8(v, name, &pmu_num, errp); 1842 } 1843 1844 static const PropertyInfo prop_pmu_num = { 1845 .type = "int8", 1846 .description = "pmu-num", 1847 .get = prop_pmu_num_get, 1848 .set = prop_pmu_num_set, 1849 }; 1850 1851 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1852 void *opaque, Error **errp) 1853 { 1854 RISCVCPU *cpu = RISCV_CPU(obj); 1855 uint32_t value; 1856 uint8_t pmu_num; 1857 1858 visit_type_uint32(v, name, &value, errp); 1859 1860 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1861 cpu_set_prop_err(cpu, name, errp); 1862 error_append_hint(errp, "Current '%s' val: %x\n", 1863 name, cpu->cfg.pmu_mask); 1864 return; 1865 } 1866 1867 pmu_num = ctpop32(value); 1868 1869 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1870 error_setg(errp, "Number of counters exceeds maximum available"); 1871 return; 1872 } 1873 1874 cpu_option_add_user_setting(name, value); 1875 cpu->cfg.pmu_mask = value; 1876 } 1877 1878 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1879 void *opaque, Error **errp) 1880 { 1881 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1882 1883 visit_type_uint8(v, name, &pmu_mask, errp); 1884 } 1885 1886 static const PropertyInfo prop_pmu_mask = { 1887 .type = "int8", 1888 .description = "pmu-mask", 1889 .get = prop_pmu_mask_get, 1890 .set = prop_pmu_mask_set, 1891 }; 1892 1893 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1894 void *opaque, Error **errp) 1895 { 1896 RISCVCPU *cpu = RISCV_CPU(obj); 1897 bool value; 1898 1899 visit_type_bool(v, name, &value, errp); 1900 1901 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1902 cpu_set_prop_err(cpu, "mmu", errp); 1903 return; 1904 } 1905 1906 cpu_option_add_user_setting(name, value); 1907 cpu->cfg.mmu = value; 1908 } 1909 1910 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1911 void *opaque, Error **errp) 1912 { 1913 bool value = RISCV_CPU(obj)->cfg.mmu; 1914 1915 visit_type_bool(v, name, &value, errp); 1916 } 1917 1918 static const PropertyInfo prop_mmu = { 1919 .type = "bool", 1920 .description = "mmu", 1921 .get = prop_mmu_get, 1922 .set = prop_mmu_set, 1923 }; 1924 1925 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1926 void *opaque, Error **errp) 1927 { 1928 RISCVCPU *cpu = RISCV_CPU(obj); 1929 bool value; 1930 1931 visit_type_bool(v, name, &value, errp); 1932 1933 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1934 cpu_set_prop_err(cpu, name, errp); 1935 return; 1936 } 1937 1938 cpu_option_add_user_setting(name, value); 1939 cpu->cfg.pmp = value; 1940 } 1941 1942 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1943 void *opaque, Error **errp) 1944 { 1945 bool value = RISCV_CPU(obj)->cfg.pmp; 1946 1947 visit_type_bool(v, name, &value, errp); 1948 } 1949 1950 static const PropertyInfo prop_pmp = { 1951 .type = "bool", 1952 .description = "pmp", 1953 .get = prop_pmp_get, 1954 .set = prop_pmp_set, 1955 }; 1956 1957 static int priv_spec_from_str(const char *priv_spec_str) 1958 { 1959 int priv_version = -1; 1960 1961 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) { 1962 priv_version = PRIV_VERSION_1_13_0; 1963 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1964 priv_version = PRIV_VERSION_1_12_0; 1965 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1966 priv_version = PRIV_VERSION_1_11_0; 1967 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1968 priv_version = PRIV_VERSION_1_10_0; 1969 } 1970 1971 return priv_version; 1972 } 1973 1974 const char *priv_spec_to_str(int priv_version) 1975 { 1976 switch (priv_version) { 1977 case PRIV_VERSION_1_10_0: 1978 return PRIV_VER_1_10_0_STR; 1979 case PRIV_VERSION_1_11_0: 1980 return PRIV_VER_1_11_0_STR; 1981 case PRIV_VERSION_1_12_0: 1982 return PRIV_VER_1_12_0_STR; 1983 case PRIV_VERSION_1_13_0: 1984 return PRIV_VER_1_13_0_STR; 1985 default: 1986 return NULL; 1987 } 1988 } 1989 1990 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1991 void *opaque, Error **errp) 1992 { 1993 RISCVCPU *cpu = RISCV_CPU(obj); 1994 g_autofree char *value = NULL; 1995 int priv_version = -1; 1996 1997 visit_type_str(v, name, &value, errp); 1998 1999 priv_version = priv_spec_from_str(value); 2000 if (priv_version < 0) { 2001 error_setg(errp, "Unsupported privilege spec version '%s'", value); 2002 return; 2003 } 2004 2005 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 2006 cpu_set_prop_err(cpu, name, errp); 2007 error_append_hint(errp, "Current '%s' val: %s\n", name, 2008 object_property_get_str(obj, name, NULL)); 2009 return; 2010 } 2011 2012 cpu_option_add_user_setting(name, priv_version); 2013 cpu->env.priv_ver = priv_version; 2014 } 2015 2016 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 2017 void *opaque, Error **errp) 2018 { 2019 RISCVCPU *cpu = RISCV_CPU(obj); 2020 const char *value = priv_spec_to_str(cpu->env.priv_ver); 2021 2022 visit_type_str(v, name, (char **)&value, errp); 2023 } 2024 2025 static const PropertyInfo prop_priv_spec = { 2026 .type = "str", 2027 .description = "priv_spec", 2028 /* FIXME enum? */ 2029 .get = prop_priv_spec_get, 2030 .set = prop_priv_spec_set, 2031 }; 2032 2033 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 2034 void *opaque, Error **errp) 2035 { 2036 RISCVCPU *cpu = RISCV_CPU(obj); 2037 g_autofree char *value = NULL; 2038 2039 visit_type_str(v, name, &value, errp); 2040 2041 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 2042 error_setg(errp, "Unsupported vector spec version '%s'", value); 2043 return; 2044 } 2045 2046 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 2047 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 2048 } 2049 2050 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 2051 void *opaque, Error **errp) 2052 { 2053 const char *value = VEXT_VER_1_00_0_STR; 2054 2055 visit_type_str(v, name, (char **)&value, errp); 2056 } 2057 2058 static const PropertyInfo prop_vext_spec = { 2059 .type = "str", 2060 .description = "vext_spec", 2061 /* FIXME enum? */ 2062 .get = prop_vext_spec_get, 2063 .set = prop_vext_spec_set, 2064 }; 2065 2066 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 2067 void *opaque, Error **errp) 2068 { 2069 RISCVCPU *cpu = RISCV_CPU(obj); 2070 uint16_t cpu_vlen = cpu->cfg.vlenb << 3; 2071 uint16_t value; 2072 2073 if (!visit_type_uint16(v, name, &value, errp)) { 2074 return; 2075 } 2076 2077 if (!is_power_of_2(value)) { 2078 error_setg(errp, "Vector extension VLEN must be power of 2"); 2079 return; 2080 } 2081 2082 if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { 2083 cpu_set_prop_err(cpu, name, errp); 2084 error_append_hint(errp, "Current '%s' val: %u\n", 2085 name, cpu_vlen); 2086 return; 2087 } 2088 2089 cpu_option_add_user_setting(name, value); 2090 cpu->cfg.vlenb = value >> 3; 2091 } 2092 2093 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 2094 void *opaque, Error **errp) 2095 { 2096 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 2097 2098 visit_type_uint16(v, name, &value, errp); 2099 } 2100 2101 static const PropertyInfo prop_vlen = { 2102 .type = "uint16", 2103 .description = "vlen", 2104 .get = prop_vlen_get, 2105 .set = prop_vlen_set, 2106 }; 2107 2108 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 2109 void *opaque, Error **errp) 2110 { 2111 RISCVCPU *cpu = RISCV_CPU(obj); 2112 uint16_t value; 2113 2114 if (!visit_type_uint16(v, name, &value, errp)) { 2115 return; 2116 } 2117 2118 if (!is_power_of_2(value)) { 2119 error_setg(errp, "Vector extension ELEN must be power of 2"); 2120 return; 2121 } 2122 2123 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 2124 cpu_set_prop_err(cpu, name, errp); 2125 error_append_hint(errp, "Current '%s' val: %u\n", 2126 name, cpu->cfg.elen); 2127 return; 2128 } 2129 2130 cpu_option_add_user_setting(name, value); 2131 cpu->cfg.elen = value; 2132 } 2133 2134 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 2135 void *opaque, Error **errp) 2136 { 2137 uint16_t value = RISCV_CPU(obj)->cfg.elen; 2138 2139 visit_type_uint16(v, name, &value, errp); 2140 } 2141 2142 static const PropertyInfo prop_elen = { 2143 .type = "uint16", 2144 .description = "elen", 2145 .get = prop_elen_get, 2146 .set = prop_elen_set, 2147 }; 2148 2149 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 2150 void *opaque, Error **errp) 2151 { 2152 RISCVCPU *cpu = RISCV_CPU(obj); 2153 uint16_t value; 2154 2155 if (!visit_type_uint16(v, name, &value, errp)) { 2156 return; 2157 } 2158 2159 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 2160 cpu_set_prop_err(cpu, name, errp); 2161 error_append_hint(errp, "Current '%s' val: %u\n", 2162 name, cpu->cfg.cbom_blocksize); 2163 return; 2164 } 2165 2166 cpu_option_add_user_setting(name, value); 2167 cpu->cfg.cbom_blocksize = value; 2168 } 2169 2170 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 2171 void *opaque, Error **errp) 2172 { 2173 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 2174 2175 visit_type_uint16(v, name, &value, errp); 2176 } 2177 2178 static const PropertyInfo prop_cbom_blksize = { 2179 .type = "uint16", 2180 .description = "cbom_blocksize", 2181 .get = prop_cbom_blksize_get, 2182 .set = prop_cbom_blksize_set, 2183 }; 2184 2185 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 2186 void *opaque, Error **errp) 2187 { 2188 RISCVCPU *cpu = RISCV_CPU(obj); 2189 uint16_t value; 2190 2191 if (!visit_type_uint16(v, name, &value, errp)) { 2192 return; 2193 } 2194 2195 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 2196 cpu_set_prop_err(cpu, name, errp); 2197 error_append_hint(errp, "Current '%s' val: %u\n", 2198 name, cpu->cfg.cbop_blocksize); 2199 return; 2200 } 2201 2202 cpu_option_add_user_setting(name, value); 2203 cpu->cfg.cbop_blocksize = value; 2204 } 2205 2206 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 2207 void *opaque, Error **errp) 2208 { 2209 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 2210 2211 visit_type_uint16(v, name, &value, errp); 2212 } 2213 2214 static const PropertyInfo prop_cbop_blksize = { 2215 .type = "uint16", 2216 .description = "cbop_blocksize", 2217 .get = prop_cbop_blksize_get, 2218 .set = prop_cbop_blksize_set, 2219 }; 2220 2221 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 2222 void *opaque, Error **errp) 2223 { 2224 RISCVCPU *cpu = RISCV_CPU(obj); 2225 uint16_t value; 2226 2227 if (!visit_type_uint16(v, name, &value, errp)) { 2228 return; 2229 } 2230 2231 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 2232 cpu_set_prop_err(cpu, name, errp); 2233 error_append_hint(errp, "Current '%s' val: %u\n", 2234 name, cpu->cfg.cboz_blocksize); 2235 return; 2236 } 2237 2238 cpu_option_add_user_setting(name, value); 2239 cpu->cfg.cboz_blocksize = value; 2240 } 2241 2242 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 2243 void *opaque, Error **errp) 2244 { 2245 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 2246 2247 visit_type_uint16(v, name, &value, errp); 2248 } 2249 2250 static const PropertyInfo prop_cboz_blksize = { 2251 .type = "uint16", 2252 .description = "cboz_blocksize", 2253 .get = prop_cboz_blksize_get, 2254 .set = prop_cboz_blksize_set, 2255 }; 2256 2257 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2258 void *opaque, Error **errp) 2259 { 2260 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2261 RISCVCPU *cpu = RISCV_CPU(obj); 2262 uint32_t prev_val = cpu->cfg.mvendorid; 2263 uint32_t value; 2264 2265 if (!visit_type_uint32(v, name, &value, errp)) { 2266 return; 2267 } 2268 2269 if (!dynamic_cpu && prev_val != value) { 2270 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2271 object_get_typename(obj), prev_val); 2272 return; 2273 } 2274 2275 cpu->cfg.mvendorid = value; 2276 } 2277 2278 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2279 void *opaque, Error **errp) 2280 { 2281 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2282 2283 visit_type_uint32(v, name, &value, errp); 2284 } 2285 2286 static const PropertyInfo prop_mvendorid = { 2287 .type = "uint32", 2288 .description = "mvendorid", 2289 .get = prop_mvendorid_get, 2290 .set = prop_mvendorid_set, 2291 }; 2292 2293 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2294 void *opaque, Error **errp) 2295 { 2296 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2297 RISCVCPU *cpu = RISCV_CPU(obj); 2298 uint64_t prev_val = cpu->cfg.mimpid; 2299 uint64_t value; 2300 2301 if (!visit_type_uint64(v, name, &value, errp)) { 2302 return; 2303 } 2304 2305 if (!dynamic_cpu && prev_val != value) { 2306 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2307 object_get_typename(obj), prev_val); 2308 return; 2309 } 2310 2311 cpu->cfg.mimpid = value; 2312 } 2313 2314 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2315 void *opaque, Error **errp) 2316 { 2317 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2318 2319 visit_type_uint64(v, name, &value, errp); 2320 } 2321 2322 static const PropertyInfo prop_mimpid = { 2323 .type = "uint64", 2324 .description = "mimpid", 2325 .get = prop_mimpid_get, 2326 .set = prop_mimpid_set, 2327 }; 2328 2329 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2330 void *opaque, Error **errp) 2331 { 2332 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2333 RISCVCPU *cpu = RISCV_CPU(obj); 2334 uint64_t prev_val = cpu->cfg.marchid; 2335 uint64_t value, invalid_val; 2336 uint32_t mxlen = 0; 2337 2338 if (!visit_type_uint64(v, name, &value, errp)) { 2339 return; 2340 } 2341 2342 if (!dynamic_cpu && prev_val != value) { 2343 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2344 object_get_typename(obj), prev_val); 2345 return; 2346 } 2347 2348 switch (riscv_cpu_mxl(&cpu->env)) { 2349 case MXL_RV32: 2350 mxlen = 32; 2351 break; 2352 case MXL_RV64: 2353 case MXL_RV128: 2354 mxlen = 64; 2355 break; 2356 default: 2357 g_assert_not_reached(); 2358 } 2359 2360 invalid_val = 1LL << (mxlen - 1); 2361 2362 if (value == invalid_val) { 2363 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2364 "and the remaining bits zero", mxlen); 2365 return; 2366 } 2367 2368 cpu->cfg.marchid = value; 2369 } 2370 2371 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2372 void *opaque, Error **errp) 2373 { 2374 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2375 2376 visit_type_uint64(v, name, &value, errp); 2377 } 2378 2379 static const PropertyInfo prop_marchid = { 2380 .type = "uint64", 2381 .description = "marchid", 2382 .get = prop_marchid_get, 2383 .set = prop_marchid_set, 2384 }; 2385 2386 /* 2387 * RVA22U64 defines some 'named features' that are cache 2388 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2389 * and Zicclsm. They are always implemented in TCG and 2390 * doesn't need to be manually enabled by the profile. 2391 */ 2392 static RISCVCPUProfile RVA22U64 = { 2393 .u_parent = NULL, 2394 .s_parent = NULL, 2395 .name = "rva22u64", 2396 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, 2397 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2398 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2399 .ext_offsets = { 2400 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2401 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2402 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2403 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2404 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2405 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2406 2407 /* mandatory named features for this profile */ 2408 CPU_CFG_OFFSET(ext_zic64b), 2409 2410 RISCV_PROFILE_EXT_LIST_END 2411 } 2412 }; 2413 2414 /* 2415 * As with RVA22U64, RVA22S64 also defines 'named features'. 2416 * 2417 * Cache related features that we consider enabled since we don't 2418 * implement cache: Ssccptr 2419 * 2420 * Other named features that we already implement: Sstvecd, Sstvala, 2421 * Sscounterenw 2422 * 2423 * The remaining features/extensions comes from RVA22U64. 2424 */ 2425 static RISCVCPUProfile RVA22S64 = { 2426 .u_parent = &RVA22U64, 2427 .s_parent = NULL, 2428 .name = "rva22s64", 2429 .misa_ext = RVS, 2430 .priv_spec = PRIV_VERSION_1_12_0, 2431 .satp_mode = VM_1_10_SV39, 2432 .ext_offsets = { 2433 /* rva22s64 exts */ 2434 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2435 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade), 2436 2437 RISCV_PROFILE_EXT_LIST_END 2438 } 2439 }; 2440 2441 /* 2442 * All mandatory extensions from RVA22U64 are present 2443 * in RVA23U64 so set RVA22 as a parent. We need to 2444 * declare just the newly added mandatory extensions. 2445 */ 2446 static RISCVCPUProfile RVA23U64 = { 2447 .u_parent = &RVA22U64, 2448 .s_parent = NULL, 2449 .name = "rva23u64", 2450 .misa_ext = RVV, 2451 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2452 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2453 .ext_offsets = { 2454 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), 2455 CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), 2456 CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), 2457 CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), 2458 CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), 2459 CPU_CFG_OFFSET(ext_supm), 2460 2461 RISCV_PROFILE_EXT_LIST_END 2462 } 2463 }; 2464 2465 /* 2466 * As with RVA23U64, RVA23S64 also defines 'named features'. 2467 * 2468 * Cache related features that we consider enabled since we don't 2469 * implement cache: Ssccptr 2470 * 2471 * Other named features that we already implement: Sstvecd, Sstvala, 2472 * Sscounterenw, Ssu64xl 2473 * 2474 * The remaining features/extensions comes from RVA23S64. 2475 */ 2476 static RISCVCPUProfile RVA23S64 = { 2477 .u_parent = &RVA23U64, 2478 .s_parent = &RVA22S64, 2479 .name = "rva23s64", 2480 .misa_ext = RVS, 2481 .priv_spec = PRIV_VERSION_1_13_0, 2482 .satp_mode = VM_1_10_SV39, 2483 .ext_offsets = { 2484 /* New in RVA23S64 */ 2485 CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), 2486 CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), 2487 2488 /* Named features: Sha */ 2489 CPU_CFG_OFFSET(ext_sha), 2490 2491 RISCV_PROFILE_EXT_LIST_END 2492 } 2493 }; 2494 2495 RISCVCPUProfile *riscv_profiles[] = { 2496 &RVA22U64, 2497 &RVA22S64, 2498 &RVA23U64, 2499 &RVA23S64, 2500 NULL, 2501 }; 2502 2503 static RISCVCPUImpliedExtsRule RVA_IMPLIED = { 2504 .is_misa = true, 2505 .ext = RVA, 2506 .implied_multi_exts = { 2507 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo), 2508 2509 RISCV_IMPLIED_EXTS_RULE_END 2510 }, 2511 }; 2512 2513 static RISCVCPUImpliedExtsRule RVD_IMPLIED = { 2514 .is_misa = true, 2515 .ext = RVD, 2516 .implied_misa_exts = RVF, 2517 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2518 }; 2519 2520 static RISCVCPUImpliedExtsRule RVF_IMPLIED = { 2521 .is_misa = true, 2522 .ext = RVF, 2523 .implied_multi_exts = { 2524 CPU_CFG_OFFSET(ext_zicsr), 2525 2526 RISCV_IMPLIED_EXTS_RULE_END 2527 }, 2528 }; 2529 2530 static RISCVCPUImpliedExtsRule RVM_IMPLIED = { 2531 .is_misa = true, 2532 .ext = RVM, 2533 .implied_multi_exts = { 2534 CPU_CFG_OFFSET(ext_zmmul), 2535 2536 RISCV_IMPLIED_EXTS_RULE_END 2537 }, 2538 }; 2539 2540 static RISCVCPUImpliedExtsRule RVV_IMPLIED = { 2541 .is_misa = true, 2542 .ext = RVV, 2543 .implied_multi_exts = { 2544 CPU_CFG_OFFSET(ext_zve64d), 2545 2546 RISCV_IMPLIED_EXTS_RULE_END 2547 }, 2548 }; 2549 2550 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = { 2551 .ext = CPU_CFG_OFFSET(ext_zcb), 2552 .implied_multi_exts = { 2553 CPU_CFG_OFFSET(ext_zca), 2554 2555 RISCV_IMPLIED_EXTS_RULE_END 2556 }, 2557 }; 2558 2559 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = { 2560 .ext = CPU_CFG_OFFSET(ext_zcd), 2561 .implied_misa_exts = RVD, 2562 .implied_multi_exts = { 2563 CPU_CFG_OFFSET(ext_zca), 2564 2565 RISCV_IMPLIED_EXTS_RULE_END 2566 }, 2567 }; 2568 2569 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = { 2570 .ext = CPU_CFG_OFFSET(ext_zce), 2571 .implied_multi_exts = { 2572 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp), 2573 CPU_CFG_OFFSET(ext_zcmt), 2574 2575 RISCV_IMPLIED_EXTS_RULE_END 2576 }, 2577 }; 2578 2579 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = { 2580 .ext = CPU_CFG_OFFSET(ext_zcf), 2581 .implied_misa_exts = RVF, 2582 .implied_multi_exts = { 2583 CPU_CFG_OFFSET(ext_zca), 2584 2585 RISCV_IMPLIED_EXTS_RULE_END 2586 }, 2587 }; 2588 2589 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = { 2590 .ext = CPU_CFG_OFFSET(ext_zcmp), 2591 .implied_multi_exts = { 2592 CPU_CFG_OFFSET(ext_zca), 2593 2594 RISCV_IMPLIED_EXTS_RULE_END 2595 }, 2596 }; 2597 2598 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = { 2599 .ext = CPU_CFG_OFFSET(ext_zcmt), 2600 .implied_multi_exts = { 2601 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr), 2602 2603 RISCV_IMPLIED_EXTS_RULE_END 2604 }, 2605 }; 2606 2607 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = { 2608 .ext = CPU_CFG_OFFSET(ext_zdinx), 2609 .implied_multi_exts = { 2610 CPU_CFG_OFFSET(ext_zfinx), 2611 2612 RISCV_IMPLIED_EXTS_RULE_END 2613 }, 2614 }; 2615 2616 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = { 2617 .ext = CPU_CFG_OFFSET(ext_zfa), 2618 .implied_misa_exts = RVF, 2619 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2620 }; 2621 2622 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = { 2623 .ext = CPU_CFG_OFFSET(ext_zfbfmin), 2624 .implied_misa_exts = RVF, 2625 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2626 }; 2627 2628 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = { 2629 .ext = CPU_CFG_OFFSET(ext_zfh), 2630 .implied_multi_exts = { 2631 CPU_CFG_OFFSET(ext_zfhmin), 2632 2633 RISCV_IMPLIED_EXTS_RULE_END 2634 }, 2635 }; 2636 2637 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = { 2638 .ext = CPU_CFG_OFFSET(ext_zfhmin), 2639 .implied_misa_exts = RVF, 2640 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END }, 2641 }; 2642 2643 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = { 2644 .ext = CPU_CFG_OFFSET(ext_zfinx), 2645 .implied_multi_exts = { 2646 CPU_CFG_OFFSET(ext_zicsr), 2647 2648 RISCV_IMPLIED_EXTS_RULE_END 2649 }, 2650 }; 2651 2652 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = { 2653 .ext = CPU_CFG_OFFSET(ext_zhinx), 2654 .implied_multi_exts = { 2655 CPU_CFG_OFFSET(ext_zhinxmin), 2656 2657 RISCV_IMPLIED_EXTS_RULE_END 2658 }, 2659 }; 2660 2661 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = { 2662 .ext = CPU_CFG_OFFSET(ext_zhinxmin), 2663 .implied_multi_exts = { 2664 CPU_CFG_OFFSET(ext_zfinx), 2665 2666 RISCV_IMPLIED_EXTS_RULE_END 2667 }, 2668 }; 2669 2670 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = { 2671 .ext = CPU_CFG_OFFSET(ext_zicntr), 2672 .implied_multi_exts = { 2673 CPU_CFG_OFFSET(ext_zicsr), 2674 2675 RISCV_IMPLIED_EXTS_RULE_END 2676 }, 2677 }; 2678 2679 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = { 2680 .ext = CPU_CFG_OFFSET(ext_zihpm), 2681 .implied_multi_exts = { 2682 CPU_CFG_OFFSET(ext_zicsr), 2683 2684 RISCV_IMPLIED_EXTS_RULE_END 2685 }, 2686 }; 2687 2688 static RISCVCPUImpliedExtsRule ZK_IMPLIED = { 2689 .ext = CPU_CFG_OFFSET(ext_zk), 2690 .implied_multi_exts = { 2691 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr), 2692 CPU_CFG_OFFSET(ext_zkt), 2693 2694 RISCV_IMPLIED_EXTS_RULE_END 2695 }, 2696 }; 2697 2698 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = { 2699 .ext = CPU_CFG_OFFSET(ext_zkn), 2700 .implied_multi_exts = { 2701 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2702 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne), 2703 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh), 2704 2705 RISCV_IMPLIED_EXTS_RULE_END 2706 }, 2707 }; 2708 2709 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = { 2710 .ext = CPU_CFG_OFFSET(ext_zks), 2711 .implied_multi_exts = { 2712 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc), 2713 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed), 2714 CPU_CFG_OFFSET(ext_zksh), 2715 2716 RISCV_IMPLIED_EXTS_RULE_END 2717 }, 2718 }; 2719 2720 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = { 2721 .ext = CPU_CFG_OFFSET(ext_zvbb), 2722 .implied_multi_exts = { 2723 CPU_CFG_OFFSET(ext_zvkb), 2724 2725 RISCV_IMPLIED_EXTS_RULE_END 2726 }, 2727 }; 2728 2729 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = { 2730 .ext = CPU_CFG_OFFSET(ext_zve32f), 2731 .implied_misa_exts = RVF, 2732 .implied_multi_exts = { 2733 CPU_CFG_OFFSET(ext_zve32x), 2734 2735 RISCV_IMPLIED_EXTS_RULE_END 2736 }, 2737 }; 2738 2739 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = { 2740 .ext = CPU_CFG_OFFSET(ext_zve32x), 2741 .implied_multi_exts = { 2742 CPU_CFG_OFFSET(ext_zicsr), 2743 2744 RISCV_IMPLIED_EXTS_RULE_END 2745 }, 2746 }; 2747 2748 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = { 2749 .ext = CPU_CFG_OFFSET(ext_zve64d), 2750 .implied_misa_exts = RVD, 2751 .implied_multi_exts = { 2752 CPU_CFG_OFFSET(ext_zve64f), 2753 2754 RISCV_IMPLIED_EXTS_RULE_END 2755 }, 2756 }; 2757 2758 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = { 2759 .ext = CPU_CFG_OFFSET(ext_zve64f), 2760 .implied_misa_exts = RVF, 2761 .implied_multi_exts = { 2762 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x), 2763 2764 RISCV_IMPLIED_EXTS_RULE_END 2765 }, 2766 }; 2767 2768 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = { 2769 .ext = CPU_CFG_OFFSET(ext_zve64x), 2770 .implied_multi_exts = { 2771 CPU_CFG_OFFSET(ext_zve32x), 2772 2773 RISCV_IMPLIED_EXTS_RULE_END 2774 }, 2775 }; 2776 2777 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = { 2778 .ext = CPU_CFG_OFFSET(ext_zvfbfmin), 2779 .implied_multi_exts = { 2780 CPU_CFG_OFFSET(ext_zve32f), 2781 2782 RISCV_IMPLIED_EXTS_RULE_END 2783 }, 2784 }; 2785 2786 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = { 2787 .ext = CPU_CFG_OFFSET(ext_zvfbfwma), 2788 .implied_multi_exts = { 2789 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin), 2790 2791 RISCV_IMPLIED_EXTS_RULE_END 2792 }, 2793 }; 2794 2795 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = { 2796 .ext = CPU_CFG_OFFSET(ext_zvfh), 2797 .implied_multi_exts = { 2798 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin), 2799 2800 RISCV_IMPLIED_EXTS_RULE_END 2801 }, 2802 }; 2803 2804 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = { 2805 .ext = CPU_CFG_OFFSET(ext_zvfhmin), 2806 .implied_multi_exts = { 2807 CPU_CFG_OFFSET(ext_zve32f), 2808 2809 RISCV_IMPLIED_EXTS_RULE_END 2810 }, 2811 }; 2812 2813 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = { 2814 .ext = CPU_CFG_OFFSET(ext_zvkn), 2815 .implied_multi_exts = { 2816 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb), 2817 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2818 2819 RISCV_IMPLIED_EXTS_RULE_END 2820 }, 2821 }; 2822 2823 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = { 2824 .ext = CPU_CFG_OFFSET(ext_zvknc), 2825 .implied_multi_exts = { 2826 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc), 2827 2828 RISCV_IMPLIED_EXTS_RULE_END 2829 }, 2830 }; 2831 2832 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = { 2833 .ext = CPU_CFG_OFFSET(ext_zvkng), 2834 .implied_multi_exts = { 2835 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg), 2836 2837 RISCV_IMPLIED_EXTS_RULE_END 2838 }, 2839 }; 2840 2841 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = { 2842 .ext = CPU_CFG_OFFSET(ext_zvknhb), 2843 .implied_multi_exts = { 2844 CPU_CFG_OFFSET(ext_zve64x), 2845 2846 RISCV_IMPLIED_EXTS_RULE_END 2847 }, 2848 }; 2849 2850 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = { 2851 .ext = CPU_CFG_OFFSET(ext_zvks), 2852 .implied_multi_exts = { 2853 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh), 2854 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt), 2855 2856 RISCV_IMPLIED_EXTS_RULE_END 2857 }, 2858 }; 2859 2860 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = { 2861 .ext = CPU_CFG_OFFSET(ext_zvksc), 2862 .implied_multi_exts = { 2863 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc), 2864 2865 RISCV_IMPLIED_EXTS_RULE_END 2866 }, 2867 }; 2868 2869 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { 2870 .ext = CPU_CFG_OFFSET(ext_zvksg), 2871 .implied_multi_exts = { 2872 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg), 2873 2874 RISCV_IMPLIED_EXTS_RULE_END 2875 }, 2876 }; 2877 2878 static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { 2879 .ext = CPU_CFG_OFFSET(ext_ssccfg), 2880 .implied_multi_exts = { 2881 CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), 2882 CPU_CFG_OFFSET(ext_smcdeleg), 2883 2884 RISCV_IMPLIED_EXTS_RULE_END 2885 }, 2886 }; 2887 2888 static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { 2889 .ext = CPU_CFG_OFFSET(ext_supm), 2890 .implied_multi_exts = { 2891 CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), 2892 2893 RISCV_IMPLIED_EXTS_RULE_END 2894 }, 2895 }; 2896 2897 static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { 2898 .ext = CPU_CFG_OFFSET(ext_sspm), 2899 .implied_multi_exts = { 2900 CPU_CFG_OFFSET(ext_smnpm), 2901 2902 RISCV_IMPLIED_EXTS_RULE_END 2903 }, 2904 }; 2905 2906 static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { 2907 .ext = CPU_CFG_OFFSET(ext_smctr), 2908 .implied_misa_exts = RVS, 2909 .implied_multi_exts = { 2910 CPU_CFG_OFFSET(ext_sscsrind), 2911 2912 RISCV_IMPLIED_EXTS_RULE_END 2913 }, 2914 }; 2915 2916 static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { 2917 .ext = CPU_CFG_OFFSET(ext_ssctr), 2918 .implied_misa_exts = RVS, 2919 .implied_multi_exts = { 2920 CPU_CFG_OFFSET(ext_sscsrind), 2921 2922 RISCV_IMPLIED_EXTS_RULE_END 2923 }, 2924 }; 2925 2926 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { 2927 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, 2928 &RVM_IMPLIED, &RVV_IMPLIED, NULL 2929 }; 2930 2931 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { 2932 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED, 2933 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED, 2934 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED, 2935 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED, 2936 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED, 2937 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED, 2938 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED, 2939 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED, 2940 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, 2941 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, 2942 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, 2943 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, 2944 &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, 2945 NULL 2946 }; 2947 2948 static const Property riscv_cpu_properties[] = { 2949 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2950 2951 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2952 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2953 2954 {.name = "mmu", .info = &prop_mmu}, 2955 {.name = "pmp", .info = &prop_pmp}, 2956 2957 {.name = "priv_spec", .info = &prop_priv_spec}, 2958 {.name = "vext_spec", .info = &prop_vext_spec}, 2959 2960 {.name = "vlen", .info = &prop_vlen}, 2961 {.name = "elen", .info = &prop_elen}, 2962 2963 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2964 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2965 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2966 2967 {.name = "mvendorid", .info = &prop_mvendorid}, 2968 {.name = "mimpid", .info = &prop_mimpid}, 2969 {.name = "marchid", .info = &prop_marchid}, 2970 2971 #ifndef CONFIG_USER_ONLY 2972 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2973 DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, 2974 DEFAULT_RNMI_IRQVEC), 2975 DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, 2976 DEFAULT_RNMI_EXCPVEC), 2977 #endif 2978 2979 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2980 2981 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2982 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2983 DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), 2984 2985 /* 2986 * write_misa() is marked as experimental for now so mark 2987 * it with -x and default to 'false'. 2988 */ 2989 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2990 }; 2991 2992 #if defined(TARGET_RISCV64) 2993 static void rva22u64_profile_cpu_init(Object *obj) 2994 { 2995 rv64i_bare_cpu_init(obj); 2996 2997 RVA22U64.enabled = true; 2998 } 2999 3000 static void rva22s64_profile_cpu_init(Object *obj) 3001 { 3002 rv64i_bare_cpu_init(obj); 3003 3004 RVA22S64.enabled = true; 3005 } 3006 3007 static void rva23u64_profile_cpu_init(Object *obj) 3008 { 3009 rv64i_bare_cpu_init(obj); 3010 3011 RVA23U64.enabled = true; 3012 } 3013 3014 static void rva23s64_profile_cpu_init(Object *obj) 3015 { 3016 rv64i_bare_cpu_init(obj); 3017 3018 RVA23S64.enabled = true; 3019 } 3020 #endif 3021 3022 static const gchar *riscv_gdb_arch_name(CPUState *cs) 3023 { 3024 RISCVCPU *cpu = RISCV_CPU(cs); 3025 CPURISCVState *env = &cpu->env; 3026 3027 switch (riscv_cpu_mxl(env)) { 3028 case MXL_RV32: 3029 return "riscv:rv32"; 3030 case MXL_RV64: 3031 case MXL_RV128: 3032 return "riscv:rv64"; 3033 default: 3034 g_assert_not_reached(); 3035 } 3036 } 3037 3038 #ifndef CONFIG_USER_ONLY 3039 static int64_t riscv_get_arch_id(CPUState *cs) 3040 { 3041 RISCVCPU *cpu = RISCV_CPU(cs); 3042 3043 return cpu->env.mhartid; 3044 } 3045 3046 #include "hw/core/sysemu-cpu-ops.h" 3047 3048 static const struct SysemuCPUOps riscv_sysemu_ops = { 3049 .has_work = riscv_cpu_has_work, 3050 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 3051 .write_elf64_note = riscv_cpu_write_elf64_note, 3052 .write_elf32_note = riscv_cpu_write_elf32_note, 3053 .legacy_vmsd = &vmstate_riscv_cpu, 3054 }; 3055 #endif 3056 3057 static void riscv_cpu_common_class_init(ObjectClass *c, const void *data) 3058 { 3059 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3060 CPUClass *cc = CPU_CLASS(c); 3061 DeviceClass *dc = DEVICE_CLASS(c); 3062 ResettableClass *rc = RESETTABLE_CLASS(c); 3063 3064 device_class_set_parent_realize(dc, riscv_cpu_realize, 3065 &mcc->parent_realize); 3066 3067 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 3068 &mcc->parent_phases); 3069 3070 cc->class_by_name = riscv_cpu_class_by_name; 3071 cc->dump_state = riscv_cpu_dump_state; 3072 cc->set_pc = riscv_cpu_set_pc; 3073 cc->get_pc = riscv_cpu_get_pc; 3074 cc->gdb_read_register = riscv_cpu_gdb_read_register; 3075 cc->gdb_write_register = riscv_cpu_gdb_write_register; 3076 cc->gdb_stop_before_watchpoint = true; 3077 cc->disas_set_info = riscv_cpu_disas_set_info; 3078 #ifndef CONFIG_USER_ONLY 3079 cc->sysemu_ops = &riscv_sysemu_ops; 3080 cc->get_arch_id = riscv_get_arch_id; 3081 #endif 3082 cc->gdb_arch_name = riscv_gdb_arch_name; 3083 #ifdef CONFIG_TCG 3084 cc->tcg_ops = &riscv_tcg_ops; 3085 #endif /* CONFIG_TCG */ 3086 3087 device_class_set_props(dc, riscv_cpu_properties); 3088 } 3089 3090 static void riscv_cpu_class_base_init(ObjectClass *c, const void *data) 3091 { 3092 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 3093 RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c)); 3094 3095 if (pcc->def) { 3096 mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def)); 3097 } else { 3098 mcc->def = g_new0(RISCVCPUDef, 1); 3099 } 3100 3101 if (data) { 3102 const RISCVCPUDef *def = data; 3103 if (def->misa_mxl_max) { 3104 assert(def->misa_mxl_max <= MXL_RV128); 3105 mcc->def->misa_mxl_max = def->misa_mxl_max; 3106 } 3107 if (def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) { 3108 assert(def->priv_spec <= PRIV_VERSION_LATEST); 3109 mcc->def->priv_spec = def->priv_spec; 3110 } 3111 if (def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) { 3112 assert(def->vext_spec != 0); 3113 mcc->def->vext_spec = def->vext_spec; 3114 } 3115 mcc->def->misa_ext |= def->misa_ext; 3116 3117 riscv_cpu_cfg_merge(&mcc->def->cfg, &def->cfg); 3118 } 3119 3120 if (!object_class_is_abstract(c)) { 3121 riscv_cpu_validate_misa_mxl(mcc); 3122 } 3123 } 3124 3125 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 3126 int max_str_len) 3127 { 3128 const RISCVIsaExtData *edata; 3129 char *old = *isa_str; 3130 char *new = *isa_str; 3131 3132 for (edata = isa_edata_arr; edata && edata->name; edata++) { 3133 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3134 new = g_strconcat(old, "_", edata->name, NULL); 3135 g_free(old); 3136 old = new; 3137 } 3138 } 3139 3140 *isa_str = new; 3141 } 3142 3143 char *riscv_isa_string(RISCVCPU *cpu) 3144 { 3145 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3146 int i; 3147 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 3148 char *isa_str = g_new(char, maxlen); 3149 int xlen = riscv_cpu_max_xlen(mcc); 3150 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 3151 3152 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3153 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3154 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 3155 } 3156 } 3157 *p = '\0'; 3158 if (!cpu->cfg.short_isa_string) { 3159 riscv_isa_string_ext(cpu, &isa_str, maxlen); 3160 } 3161 return isa_str; 3162 } 3163 3164 #ifndef CONFIG_USER_ONLY 3165 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) 3166 { 3167 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); 3168 char **extensions = g_new(char *, maxlen); 3169 3170 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 3171 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 3172 extensions[*count] = g_new(char, 2); 3173 snprintf(extensions[*count], 2, "%c", 3174 qemu_tolower(riscv_single_letter_exts[i])); 3175 (*count)++; 3176 } 3177 } 3178 3179 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { 3180 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 3181 extensions[*count] = g_strdup(edata->name); 3182 (*count)++; 3183 } 3184 } 3185 3186 return extensions; 3187 } 3188 3189 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) 3190 { 3191 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 3192 const size_t maxlen = sizeof("rv128i"); 3193 g_autofree char *isa_base = g_new(char, maxlen); 3194 g_autofree char *riscv_isa; 3195 char **isa_extensions; 3196 int count = 0; 3197 int xlen = riscv_cpu_max_xlen(mcc); 3198 3199 riscv_isa = riscv_isa_string(cpu); 3200 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); 3201 3202 snprintf(isa_base, maxlen, "rv%di", xlen); 3203 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); 3204 3205 isa_extensions = riscv_isa_extensions_list(cpu, &count); 3206 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", 3207 isa_extensions, count); 3208 3209 for (int i = 0; i < count; i++) { 3210 g_free(isa_extensions[i]); 3211 } 3212 3213 g_free(isa_extensions); 3214 } 3215 #endif 3216 3217 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max_, initfn) \ 3218 { \ 3219 .name = (type_name), \ 3220 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 3221 .instance_init = (initfn), \ 3222 .class_data = &(const RISCVCPUDef) { \ 3223 .misa_mxl_max = (misa_mxl_max_), \ 3224 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ 3225 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ 3226 .cfg.max_satp_mode = -1, \ 3227 }, \ 3228 } 3229 3230 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max_, initfn) \ 3231 { \ 3232 .name = (type_name), \ 3233 .parent = TYPE_RISCV_VENDOR_CPU, \ 3234 .instance_init = (initfn), \ 3235 .class_data = &(const RISCVCPUDef) { \ 3236 .misa_mxl_max = (misa_mxl_max_), \ 3237 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ 3238 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ 3239 .cfg.max_satp_mode = -1, \ 3240 }, \ 3241 } 3242 3243 #define DEFINE_BARE_CPU(type_name, misa_mxl_max_, initfn) \ 3244 { \ 3245 .name = (type_name), \ 3246 .parent = TYPE_RISCV_BARE_CPU, \ 3247 .instance_init = (initfn), \ 3248 .class_data = &(const RISCVCPUDef) { \ 3249 .misa_mxl_max = (misa_mxl_max_), \ 3250 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ 3251 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ 3252 .cfg.max_satp_mode = -1, \ 3253 }, \ 3254 } 3255 3256 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max_, initfn) \ 3257 { \ 3258 .name = (type_name), \ 3259 .parent = TYPE_RISCV_BARE_CPU, \ 3260 .instance_init = (initfn), \ 3261 .class_data = &(const RISCVCPUDef) { \ 3262 .misa_mxl_max = (misa_mxl_max_), \ 3263 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ 3264 .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ 3265 .cfg.max_satp_mode = -1, \ 3266 }, \ 3267 } 3268 3269 static const TypeInfo riscv_cpu_type_infos[] = { 3270 { 3271 .name = TYPE_RISCV_CPU, 3272 .parent = TYPE_CPU, 3273 .instance_size = sizeof(RISCVCPU), 3274 .instance_align = __alignof(RISCVCPU), 3275 .instance_init = riscv_cpu_init, 3276 .instance_post_init = riscv_cpu_post_init, 3277 .abstract = true, 3278 .class_size = sizeof(RISCVCPUClass), 3279 .class_init = riscv_cpu_common_class_init, 3280 .class_base_init = riscv_cpu_class_base_init, 3281 }, 3282 { 3283 .name = TYPE_RISCV_DYNAMIC_CPU, 3284 .parent = TYPE_RISCV_CPU, 3285 .abstract = true, 3286 }, 3287 { 3288 .name = TYPE_RISCV_VENDOR_CPU, 3289 .parent = TYPE_RISCV_CPU, 3290 .abstract = true, 3291 }, 3292 { 3293 .name = TYPE_RISCV_BARE_CPU, 3294 .parent = TYPE_RISCV_CPU, 3295 .instance_init = riscv_bare_cpu_init, 3296 .abstract = true, 3297 }, 3298 #if defined(TARGET_RISCV32) 3299 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 3300 #elif defined(TARGET_RISCV64) 3301 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 3302 #endif 3303 3304 #if defined(TARGET_RISCV32) || \ 3305 (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3306 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 3307 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 3308 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 3309 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 3310 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 3311 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), 3312 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), 3313 #endif 3314 3315 #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) 3316 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), 3317 #endif 3318 3319 #if defined(TARGET_RISCV64) 3320 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 3321 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 3322 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 3323 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 3324 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 3325 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), 3326 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 3327 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, 3328 MXL_RV64, rv64_xiangshan_nanhu_cpu_init), 3329 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 3330 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 3331 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ 3332 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 3333 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), 3334 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 3335 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 3336 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), 3337 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), 3338 #endif /* TARGET_RISCV64 */ 3339 }; 3340 3341 DEFINE_TYPES(riscv_cpu_type_infos) 3342