1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/translation-block.h" 22 #include "tcg-cpu.h" 23 #include "cpu.h" 24 #include "exec/target_page.h" 25 #include "internals.h" 26 #include "pmu.h" 27 #include "time_helper.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/accel.h" 31 #include "qemu/error-report.h" 32 #include "qemu/log.h" 33 #include "accel/accel-cpu-target.h" 34 #include "accel/tcg/cpu-ops.h" 35 #include "tcg/tcg.h" 36 #ifndef CONFIG_USER_ONLY 37 #include "hw/boards.h" 38 #include "system/tcg.h" 39 #include "exec/icount.h" 40 #endif 41 42 /* Hash that stores user set extensions */ 43 static GHashTable *multi_ext_user_opts; 44 static GHashTable *misa_ext_user_opts; 45 46 static GHashTable *multi_ext_implied_rules; 47 static GHashTable *misa_ext_implied_rules; 48 49 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 50 { 51 return g_hash_table_contains(multi_ext_user_opts, 52 GUINT_TO_POINTER(ext_offset)); 53 } 54 55 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 56 { 57 return g_hash_table_contains(misa_ext_user_opts, 58 GUINT_TO_POINTER(misa_bit)); 59 } 60 61 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 62 { 63 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 64 (gpointer)value); 65 } 66 67 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 68 { 69 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 70 (gpointer)value); 71 } 72 73 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 74 bool enabled) 75 { 76 CPURISCVState *env = &cpu->env; 77 78 if (enabled) { 79 env->misa_ext |= bit; 80 env->misa_ext_mask |= bit; 81 } else { 82 env->misa_ext &= ~bit; 83 env->misa_ext_mask &= ~bit; 84 } 85 } 86 87 static const char *cpu_priv_ver_to_str(int priv_ver) 88 { 89 const char *priv_spec_str = priv_spec_to_str(priv_ver); 90 91 g_assert(priv_spec_str); 92 93 return priv_spec_str; 94 } 95 96 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 97 { 98 return riscv_env_mmu_index(cpu_env(cs), ifetch); 99 } 100 101 static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs) 102 { 103 CPURISCVState *env = cpu_env(cs); 104 RISCVCPU *cpu = env_archcpu(env); 105 RISCVExtStatus fs, vs; 106 uint32_t flags = 0; 107 bool pm_signext = riscv_cpu_virt_mem_enabled(env); 108 109 if (cpu->cfg.ext_zve32x) { 110 /* 111 * If env->vl equals to VLMAX, we can use generic vector operation 112 * expanders (GVEC) to accerlate the vector operations. 113 * However, as LMUL could be a fractional number. The maximum 114 * vector size can be operated might be less than 8 bytes, 115 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true 116 * only when maxsz >= 8 bytes. 117 */ 118 119 /* lmul encoded as in DisasContext::lmul */ 120 int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3); 121 uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW); 122 uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); 123 uint32_t maxsz = vlmax << vsew; 124 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && 125 (maxsz >= 8); 126 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); 127 flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew); 128 flags = FIELD_DP32(flags, TB_FLAGS, LMUL, 129 FIELD_EX64(env->vtype, VTYPE, VLMUL)); 130 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); 131 flags = FIELD_DP32(flags, TB_FLAGS, VTA, 132 FIELD_EX64(env->vtype, VTYPE, VTA)); 133 flags = FIELD_DP32(flags, TB_FLAGS, VMA, 134 FIELD_EX64(env->vtype, VTYPE, VMA)); 135 flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); 136 } else { 137 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); 138 } 139 140 if (cpu_get_fcfien(env)) { 141 /* 142 * For Forward CFI, only the expectation of a lpad at 143 * the start of the block is tracked via env->elp. env->elp 144 * is turned on during jalr translation. 145 */ 146 flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp); 147 flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1); 148 } 149 150 if (cpu_get_bcfien(env)) { 151 flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1); 152 } 153 154 #ifdef CONFIG_USER_ONLY 155 fs = EXT_STATUS_DIRTY; 156 vs = EXT_STATUS_DIRTY; 157 #else 158 flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); 159 160 flags |= riscv_env_mmu_index(env, 0); 161 fs = get_field(env->mstatus, MSTATUS_FS); 162 vs = get_field(env->mstatus, MSTATUS_VS); 163 164 if (env->virt_enabled) { 165 flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); 166 /* 167 * Merge DISABLED and !DIRTY states using MIN. 168 * We will set both fields when dirtying. 169 */ 170 fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); 171 vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); 172 } 173 174 /* With Zfinx, floating point is enabled/disabled by Smstateen. */ 175 if (!riscv_has_ext(env, RVF)) { 176 fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE) 177 ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED; 178 } 179 180 if (cpu->cfg.debug && !icount_enabled()) { 181 flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); 182 } 183 #endif 184 185 flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); 186 flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); 187 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); 188 flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env)); 189 flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env)); 190 flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext); 191 192 return (TCGTBCPUState){ 193 .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc, 194 .flags = flags 195 }; 196 } 197 198 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 199 const TranslationBlock *tb) 200 { 201 if (!(tb_cflags(tb) & CF_PCREL)) { 202 RISCVCPU *cpu = RISCV_CPU(cs); 203 CPURISCVState *env = &cpu->env; 204 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 205 206 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 207 208 if (xl == MXL_RV32) { 209 env->pc = (int32_t) tb->pc; 210 } else { 211 env->pc = tb->pc; 212 } 213 } 214 } 215 216 static void riscv_restore_state_to_opc(CPUState *cs, 217 const TranslationBlock *tb, 218 const uint64_t *data) 219 { 220 RISCVCPU *cpu = RISCV_CPU(cs); 221 CPURISCVState *env = &cpu->env; 222 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 223 target_ulong pc; 224 225 if (tb_cflags(tb) & CF_PCREL) { 226 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 227 } else { 228 pc = data[0]; 229 } 230 231 if (xl == MXL_RV32) { 232 env->pc = (int32_t)pc; 233 } else { 234 env->pc = pc; 235 } 236 env->bins = data[1]; 237 env->excp_uw2 = data[2]; 238 } 239 240 const TCGCPUOps riscv_tcg_ops = { 241 .mttcg_supported = true, 242 .guest_default_memory_order = 0, 243 244 .initialize = riscv_translate_init, 245 .translate_code = riscv_translate_code, 246 .get_tb_cpu_state = riscv_get_tb_cpu_state, 247 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 248 .restore_state_to_opc = riscv_restore_state_to_opc, 249 .mmu_index = riscv_cpu_mmu_index, 250 251 #ifndef CONFIG_USER_ONLY 252 .tlb_fill = riscv_cpu_tlb_fill, 253 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 254 .cpu_exec_halt = riscv_cpu_has_work, 255 .cpu_exec_reset = cpu_reset, 256 .do_interrupt = riscv_cpu_do_interrupt, 257 .do_transaction_failed = riscv_cpu_do_transaction_failed, 258 .do_unaligned_access = riscv_cpu_do_unaligned_access, 259 .debug_excp_handler = riscv_cpu_debug_excp_handler, 260 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 261 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 262 #endif /* !CONFIG_USER_ONLY */ 263 }; 264 265 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 266 { 267 const RISCVIsaExtData *edata; 268 269 for (edata = isa_edata_arr; edata && edata->name; edata++) { 270 if (edata->ext_enable_offset != ext_offset) { 271 continue; 272 } 273 274 return edata->min_version; 275 } 276 277 g_assert_not_reached(); 278 } 279 280 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 281 { 282 const RISCVCPUMultiExtConfig *feat; 283 const RISCVIsaExtData *edata; 284 285 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 286 if (edata->ext_enable_offset == ext_offset) { 287 return edata->name; 288 } 289 } 290 291 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 292 if (feat->offset == ext_offset) { 293 return feat->name; 294 } 295 } 296 297 g_assert_not_reached(); 298 } 299 300 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 301 { 302 const RISCVCPUMultiExtConfig *feat; 303 304 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 305 if (feat->offset == ext_offset) { 306 return true; 307 } 308 } 309 310 return false; 311 } 312 313 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 314 { 315 /* 316 * All other named features are already enabled 317 * in riscv_tcg_cpu_instance_init(). 318 */ 319 switch (feat_offset) { 320 case CPU_CFG_OFFSET(ext_zic64b): 321 cpu->cfg.cbom_blocksize = 64; 322 cpu->cfg.cbop_blocksize = 64; 323 cpu->cfg.cboz_blocksize = 64; 324 break; 325 case CPU_CFG_OFFSET(ext_sha): 326 if (!cpu_misa_ext_is_user_set(RVH)) { 327 riscv_cpu_write_misa_bit(cpu, RVH, true); 328 } 329 /* fallthrough */ 330 case CPU_CFG_OFFSET(ext_ssstateen): 331 cpu->cfg.ext_smstateen = true; 332 break; 333 } 334 } 335 336 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 337 uint32_t ext_offset) 338 { 339 int ext_priv_ver; 340 341 if (env->priv_ver == PRIV_VERSION_LATEST) { 342 return; 343 } 344 345 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 346 347 if (env->priv_ver < ext_priv_ver) { 348 /* 349 * Note: the 'priv_spec' command line option, if present, 350 * will take precedence over this priv_ver bump. 351 */ 352 env->priv_ver = ext_priv_ver; 353 } 354 } 355 356 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 357 bool value) 358 { 359 CPURISCVState *env = &cpu->env; 360 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 361 int min_version; 362 363 if (prev_val == value) { 364 return; 365 } 366 367 if (cpu_cfg_ext_is_user_set(ext_offset)) { 368 return; 369 } 370 371 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 372 /* Do not enable it if priv_ver is older than min_version */ 373 min_version = cpu_cfg_ext_get_min_version(ext_offset); 374 if (env->priv_ver < min_version) { 375 return; 376 } 377 } 378 379 isa_ext_update_enabled(cpu, ext_offset, value); 380 } 381 382 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 383 { 384 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 385 error_setg(errp, "H extension requires priv spec 1.12.0"); 386 return; 387 } 388 } 389 390 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 391 Error **errp) 392 { 393 uint32_t vlen = cfg->vlenb << 3; 394 395 if (vlen > RV_VLEN_MAX || vlen < 128) { 396 error_setg(errp, 397 "Vector extension implementation only supports VLEN " 398 "in the range [128, %d]", RV_VLEN_MAX); 399 return; 400 } 401 402 if (cfg->elen > 64 || cfg->elen < 8) { 403 error_setg(errp, 404 "Vector extension implementation only supports ELEN " 405 "in the range [8, 64]"); 406 return; 407 } 408 } 409 410 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 411 { 412 CPURISCVState *env = &cpu->env; 413 const RISCVIsaExtData *edata; 414 415 /* Force disable extensions if priv spec version does not match */ 416 for (edata = isa_edata_arr; edata && edata->name; edata++) { 417 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 418 (env->priv_ver < edata->min_version)) { 419 /* 420 * These two extensions are always enabled as they were supported 421 * by QEMU before they were added as extensions in the ISA. 422 */ 423 if (!strcmp(edata->name, "zicntr") || 424 !strcmp(edata->name, "zihpm")) { 425 continue; 426 } 427 428 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 429 430 /* 431 * Do not show user warnings for named features that users 432 * can't enable/disable in the command line. See commit 433 * 68c9e54bea for more info. 434 */ 435 if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) { 436 continue; 437 } 438 #ifndef CONFIG_USER_ONLY 439 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 440 " because privilege spec version does not match", 441 edata->name, env->mhartid); 442 #else 443 warn_report("disabling %s extension because " 444 "privilege spec version does not match", 445 edata->name); 446 #endif 447 } 448 } 449 } 450 451 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 452 { 453 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { 454 cpu->cfg.has_priv_1_11 = true; 455 } 456 457 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { 458 cpu->cfg.has_priv_1_12 = true; 459 } 460 461 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) { 462 cpu->cfg.has_priv_1_13 = true; 463 } 464 465 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && 466 cpu->cfg.cbop_blocksize == 64 && 467 cpu->cfg.cboz_blocksize == 64; 468 469 cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen; 470 471 cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) && 472 cpu->cfg.ext_ssstateen; 473 474 cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11; 475 } 476 477 static void riscv_cpu_validate_g(RISCVCPU *cpu) 478 { 479 const char *warn_msg = "RVG mandates disabled extension %s"; 480 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 481 bool send_warn = cpu_misa_ext_is_user_set(RVG); 482 483 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 484 uint32_t bit = g_misa_bits[i]; 485 486 if (riscv_has_ext(&cpu->env, bit)) { 487 continue; 488 } 489 490 if (!cpu_misa_ext_is_user_set(bit)) { 491 riscv_cpu_write_misa_bit(cpu, bit, true); 492 continue; 493 } 494 495 if (send_warn) { 496 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 497 } 498 } 499 500 if (!cpu->cfg.ext_zicsr) { 501 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 502 cpu->cfg.ext_zicsr = true; 503 } else if (send_warn) { 504 warn_report(warn_msg, "zicsr"); 505 } 506 } 507 508 if (!cpu->cfg.ext_zifencei) { 509 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 510 cpu->cfg.ext_zifencei = true; 511 } else if (send_warn) { 512 warn_report(warn_msg, "zifencei"); 513 } 514 } 515 } 516 517 static void riscv_cpu_validate_b(RISCVCPU *cpu) 518 { 519 const char *warn_msg = "RVB mandates disabled extension %s"; 520 521 if (!cpu->cfg.ext_zba) { 522 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { 523 cpu->cfg.ext_zba = true; 524 } else { 525 warn_report(warn_msg, "zba"); 526 } 527 } 528 529 if (!cpu->cfg.ext_zbb) { 530 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { 531 cpu->cfg.ext_zbb = true; 532 } else { 533 warn_report(warn_msg, "zbb"); 534 } 535 } 536 537 if (!cpu->cfg.ext_zbs) { 538 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { 539 cpu->cfg.ext_zbs = true; 540 } else { 541 warn_report(warn_msg, "zbs"); 542 } 543 } 544 } 545 546 /* 547 * Check consistency between chosen extensions while setting 548 * cpu->cfg accordingly. 549 */ 550 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 551 { 552 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 553 CPURISCVState *env = &cpu->env; 554 Error *local_err = NULL; 555 556 if (riscv_has_ext(env, RVG)) { 557 riscv_cpu_validate_g(cpu); 558 } 559 560 if (riscv_has_ext(env, RVB)) { 561 riscv_cpu_validate_b(cpu); 562 } 563 564 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 565 error_setg(errp, 566 "I and E extensions are incompatible"); 567 return; 568 } 569 570 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 571 error_setg(errp, 572 "Either I or E extension must be set"); 573 return; 574 } 575 576 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 577 error_setg(errp, 578 "Setting S extension without U extension is illegal"); 579 return; 580 } 581 582 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 583 error_setg(errp, 584 "H depends on an I base integer ISA with 32 x registers"); 585 return; 586 } 587 588 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 589 error_setg(errp, "H extension implicitly requires S-mode"); 590 return; 591 } 592 593 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 594 error_setg(errp, "F extension requires Zicsr"); 595 return; 596 } 597 598 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 599 error_setg(errp, "Zacas extension requires A extension"); 600 return; 601 } 602 603 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 604 error_setg(errp, "Zawrs extension requires A extension"); 605 return; 606 } 607 608 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 609 error_setg(errp, "Zfa extension requires F extension"); 610 return; 611 } 612 613 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 614 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 615 return; 616 } 617 618 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 619 error_setg(errp, "Zfbfmin extension depends on F extension"); 620 return; 621 } 622 623 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 624 error_setg(errp, "D extension requires F extension"); 625 return; 626 } 627 628 if (riscv_has_ext(env, RVV)) { 629 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 630 if (local_err != NULL) { 631 error_propagate(errp, local_err); 632 return; 633 } 634 } 635 636 /* The Zve64d extension depends on the Zve64f extension */ 637 if (cpu->cfg.ext_zve64d) { 638 if (!riscv_has_ext(env, RVD)) { 639 error_setg(errp, "Zve64d/V extensions require D extension"); 640 return; 641 } 642 } 643 644 /* The Zve32f extension depends on the Zve32x extension */ 645 if (cpu->cfg.ext_zve32f) { 646 if (!riscv_has_ext(env, RVF)) { 647 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 648 return; 649 } 650 } 651 652 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 653 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 654 return; 655 } 656 657 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 658 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 659 return; 660 } 661 662 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 663 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 664 return; 665 } 666 667 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 668 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 669 return; 670 } 671 672 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 673 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 674 return; 675 } 676 677 if (cpu->cfg.ext_zfinx) { 678 if (!cpu->cfg.ext_zicsr) { 679 error_setg(errp, "Zfinx extension requires Zicsr"); 680 return; 681 } 682 if (riscv_has_ext(env, RVF)) { 683 error_setg(errp, 684 "Zfinx cannot be supported together with F extension"); 685 return; 686 } 687 } 688 689 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) { 690 error_setg(errp, "Zcmop extensions require Zca"); 691 return; 692 } 693 694 if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 695 error_setg(errp, "Zcf extension is only relevant to RV32"); 696 return; 697 } 698 699 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 700 error_setg(errp, "Zcf extension requires F extension"); 701 return; 702 } 703 704 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 705 error_setg(errp, "Zcd extension requires D extension"); 706 return; 707 } 708 709 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 710 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 711 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 712 "extension"); 713 return; 714 } 715 716 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 717 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 718 "Zcd extension"); 719 return; 720 } 721 722 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 723 error_setg(errp, "Zcmt extension requires Zicsr extension"); 724 return; 725 } 726 727 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 728 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 729 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) { 730 error_setg(errp, 731 "Vector crypto extensions require V or Zve* extensions"); 732 return; 733 } 734 735 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { 736 error_setg( 737 errp, 738 "Zvbc and Zvknhb extensions require V or Zve64x extensions"); 739 return; 740 } 741 742 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 743 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 744 error_setg(errp, "zicntr requires zicsr"); 745 return; 746 } 747 cpu->cfg.ext_zicntr = false; 748 } 749 750 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 751 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 752 error_setg(errp, "zihpm requires zicsr"); 753 return; 754 } 755 cpu->cfg.ext_zihpm = false; 756 } 757 758 if (cpu->cfg.ext_zicfiss) { 759 if (!cpu->cfg.ext_zicsr) { 760 error_setg(errp, "zicfiss extension requires zicsr extension"); 761 return; 762 } 763 if (!riscv_has_ext(env, RVA)) { 764 error_setg(errp, "zicfiss extension requires A extension"); 765 return; 766 } 767 if (!riscv_has_ext(env, RVS)) { 768 error_setg(errp, "zicfiss extension requires S"); 769 return; 770 } 771 if (!cpu->cfg.ext_zimop) { 772 error_setg(errp, "zicfiss extension requires zimop extension"); 773 return; 774 } 775 if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { 776 error_setg(errp, "zicfiss with zca requires zcmop extension"); 777 return; 778 } 779 } 780 781 if (!cpu->cfg.ext_zihpm) { 782 cpu->cfg.pmu_mask = 0; 783 cpu->pmu_avail_ctrs = 0; 784 } 785 786 if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { 787 error_setg(errp, "zicfilp extension requires zicsr extension"); 788 return; 789 } 790 791 if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { 792 error_setg(errp, "svukte is not supported for RV32"); 793 return; 794 } 795 796 if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) && 797 (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) { 798 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) || 799 cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) { 800 error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind"); 801 return; 802 } 803 cpu->cfg.ext_smctr = false; 804 cpu->cfg.ext_ssctr = false; 805 } 806 807 /* 808 * Disable isa extensions based on priv spec after we 809 * validated and set everything we need. 810 */ 811 riscv_cpu_disable_priv_spec_isa_exts(cpu); 812 } 813 814 #ifndef CONFIG_USER_ONLY 815 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 816 RISCVCPUProfile *profile, 817 bool send_warn) 818 { 819 int satp_max = cpu->cfg.max_satp_mode; 820 821 assert(satp_max >= 0); 822 if (profile->satp_mode > satp_max) { 823 if (send_warn) { 824 bool is_32bit = riscv_cpu_is_32bit(cpu); 825 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 826 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 827 828 warn_report("Profile %s requires satp mode %s, " 829 "but satp mode %s was set", profile->name, 830 req_satp, cur_satp); 831 } 832 833 return false; 834 } 835 836 return true; 837 } 838 #endif 839 840 static void riscv_cpu_check_parent_profile(RISCVCPU *cpu, 841 RISCVCPUProfile *profile, 842 RISCVCPUProfile *parent) 843 { 844 const char *parent_name; 845 bool parent_enabled; 846 847 if (!profile->enabled || !parent) { 848 return; 849 } 850 851 parent_name = parent->name; 852 parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL); 853 profile->enabled = parent_enabled; 854 } 855 856 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 857 RISCVCPUProfile *profile) 858 { 859 CPURISCVState *env = &cpu->env; 860 const char *warn_msg = "Profile %s mandates disabled extension %s"; 861 bool send_warn = profile->user_set && profile->enabled; 862 bool profile_impl = true; 863 int i; 864 865 #ifndef CONFIG_USER_ONLY 866 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 867 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 868 send_warn); 869 } 870 #endif 871 872 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 873 profile->priv_spec > env->priv_ver) { 874 profile_impl = false; 875 876 if (send_warn) { 877 warn_report("Profile %s requires priv spec %s, " 878 "but priv ver %s was set", profile->name, 879 cpu_priv_ver_to_str(profile->priv_spec), 880 cpu_priv_ver_to_str(env->priv_ver)); 881 } 882 } 883 884 for (i = 0; misa_bits[i] != 0; i++) { 885 uint32_t bit = misa_bits[i]; 886 887 if (!(profile->misa_ext & bit)) { 888 continue; 889 } 890 891 if (!riscv_has_ext(&cpu->env, bit)) { 892 profile_impl = false; 893 894 if (send_warn) { 895 warn_report(warn_msg, profile->name, 896 riscv_get_misa_ext_name(bit)); 897 } 898 } 899 } 900 901 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 902 int ext_offset = profile->ext_offsets[i]; 903 904 if (!isa_ext_is_enabled(cpu, ext_offset)) { 905 profile_impl = false; 906 907 if (send_warn) { 908 warn_report(warn_msg, profile->name, 909 cpu_cfg_ext_get_name(ext_offset)); 910 } 911 } 912 } 913 914 profile->enabled = profile_impl; 915 916 riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent); 917 riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent); 918 } 919 920 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 921 { 922 for (int i = 0; riscv_profiles[i] != NULL; i++) { 923 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 924 } 925 } 926 927 static void riscv_cpu_init_implied_exts_rules(void) 928 { 929 RISCVCPUImpliedExtsRule *rule; 930 #ifndef CONFIG_USER_ONLY 931 MachineState *ms = MACHINE(qdev_get_machine()); 932 #endif 933 static bool initialized; 934 int i; 935 936 /* Implied rules only need to be initialized once. */ 937 if (initialized) { 938 return; 939 } 940 941 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 942 #ifndef CONFIG_USER_ONLY 943 rule->enabled = bitmap_new(ms->smp.cpus); 944 #endif 945 g_hash_table_insert(misa_ext_implied_rules, 946 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 947 } 948 949 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 950 #ifndef CONFIG_USER_ONLY 951 rule->enabled = bitmap_new(ms->smp.cpus); 952 #endif 953 g_hash_table_insert(multi_ext_implied_rules, 954 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 955 } 956 957 initialized = true; 958 } 959 960 static void cpu_enable_implied_rule(RISCVCPU *cpu, 961 RISCVCPUImpliedExtsRule *rule) 962 { 963 CPURISCVState *env = &cpu->env; 964 RISCVCPUImpliedExtsRule *ir; 965 bool enabled = false; 966 int i; 967 968 #ifndef CONFIG_USER_ONLY 969 enabled = test_bit(cpu->env.mhartid, rule->enabled); 970 #endif 971 972 if (!enabled) { 973 /* Enable the implied MISAs. */ 974 if (rule->implied_misa_exts) { 975 for (i = 0; misa_bits[i] != 0; i++) { 976 if (rule->implied_misa_exts & misa_bits[i]) { 977 /* 978 * If the user disabled the misa_bit do not re-enable it 979 * and do not apply any implied rules related to it. 980 */ 981 if (cpu_misa_ext_is_user_set(misa_bits[i]) && 982 !(env->misa_ext & misa_bits[i])) { 983 continue; 984 } 985 986 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); 987 ir = g_hash_table_lookup(misa_ext_implied_rules, 988 GUINT_TO_POINTER(misa_bits[i])); 989 990 if (ir) { 991 cpu_enable_implied_rule(cpu, ir); 992 } 993 } 994 } 995 } 996 997 /* Enable the implied extensions. */ 998 for (i = 0; 999 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) { 1000 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true); 1001 1002 ir = g_hash_table_lookup(multi_ext_implied_rules, 1003 GUINT_TO_POINTER( 1004 rule->implied_multi_exts[i])); 1005 1006 if (ir) { 1007 cpu_enable_implied_rule(cpu, ir); 1008 } 1009 } 1010 1011 #ifndef CONFIG_USER_ONLY 1012 bitmap_set(rule->enabled, cpu->env.mhartid, 1); 1013 #endif 1014 } 1015 } 1016 1017 /* Zc extension has special implied rules that need to be handled separately. */ 1018 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) 1019 { 1020 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1021 CPURISCVState *env = &cpu->env; 1022 1023 if (cpu->cfg.ext_zce) { 1024 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 1025 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 1026 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 1027 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 1028 1029 if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) { 1030 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 1031 } 1032 } 1033 1034 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */ 1035 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1036 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 1037 1038 if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) { 1039 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 1040 } 1041 1042 if (riscv_has_ext(env, RVD)) { 1043 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 1044 } 1045 } 1046 } 1047 1048 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu) 1049 { 1050 RISCVCPUImpliedExtsRule *rule; 1051 int i; 1052 1053 /* Enable the implied extensions for Zc. */ 1054 cpu_enable_zc_implied_rules(cpu); 1055 1056 /* Enable the implied MISAs. */ 1057 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 1058 if (riscv_has_ext(&cpu->env, rule->ext)) { 1059 cpu_enable_implied_rule(cpu, rule); 1060 } 1061 } 1062 1063 /* Enable the implied extensions. */ 1064 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 1065 if (isa_ext_is_enabled(cpu, rule->ext)) { 1066 cpu_enable_implied_rule(cpu, rule); 1067 } 1068 } 1069 } 1070 1071 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1072 { 1073 CPURISCVState *env = &cpu->env; 1074 Error *local_err = NULL; 1075 1076 riscv_cpu_init_implied_exts_rules(); 1077 riscv_cpu_enable_implied_rules(cpu); 1078 1079 riscv_cpu_validate_misa_priv(env, &local_err); 1080 if (local_err != NULL) { 1081 error_propagate(errp, local_err); 1082 return; 1083 } 1084 1085 riscv_cpu_update_named_features(cpu); 1086 riscv_cpu_validate_profiles(cpu); 1087 1088 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 1089 /* 1090 * Enhanced PMP should only be available 1091 * on harts with PMP support 1092 */ 1093 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 1094 return; 1095 } 1096 1097 riscv_cpu_validate_set_extensions(cpu, &local_err); 1098 if (local_err != NULL) { 1099 error_propagate(errp, local_err); 1100 return; 1101 } 1102 #ifndef CONFIG_USER_ONLY 1103 if (cpu->cfg.pmu_mask) { 1104 riscv_pmu_init(cpu, &local_err); 1105 if (local_err != NULL) { 1106 error_propagate(errp, local_err); 1107 return; 1108 } 1109 1110 if (cpu->cfg.ext_sscofpmf) { 1111 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1112 riscv_pmu_timer_cb, cpu); 1113 } 1114 } 1115 #endif 1116 } 1117 1118 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) 1119 { 1120 GPtrArray *dynamic_decoders; 1121 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size); 1122 for (size_t i = 0; i < decoder_table_size; ++i) { 1123 if (decoder_table[i].guard_func && 1124 decoder_table[i].guard_func(&cpu->cfg)) { 1125 g_ptr_array_add(dynamic_decoders, 1126 (gpointer)decoder_table[i].riscv_cpu_decode_fn); 1127 } 1128 } 1129 1130 cpu->decoders = dynamic_decoders; 1131 } 1132 1133 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 1134 { 1135 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 1136 } 1137 1138 static bool riscv_cpu_is_generic(Object *cpu_obj) 1139 { 1140 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1141 } 1142 1143 /* 1144 * We'll get here via the following path: 1145 * 1146 * riscv_cpu_realize() 1147 * -> cpu_exec_realizefn() 1148 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 1149 */ 1150 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 1151 { 1152 RISCVCPU *cpu = RISCV_CPU(cs); 1153 1154 if (!riscv_cpu_tcg_compatible(cpu)) { 1155 g_autofree char *name = riscv_cpu_get_name(cpu); 1156 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 1157 name); 1158 return false; 1159 } 1160 1161 #ifndef CONFIG_USER_ONLY 1162 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1163 1164 if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) { 1165 /* Missing 128-bit aligned atomics */ 1166 error_setg(errp, 1167 "128-bit RISC-V currently does not work with Multi " 1168 "Threaded TCG. Please use: -accel tcg,thread=single"); 1169 return false; 1170 } 1171 1172 CPURISCVState *env = &cpu->env; 1173 1174 tcg_cflags_set(CPU(cs), CF_PCREL); 1175 1176 if (cpu->cfg.ext_sstc) { 1177 riscv_timer_init(cpu); 1178 } 1179 1180 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 1181 if (riscv_has_ext(env, RVH)) { 1182 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 1183 } 1184 #endif 1185 1186 return true; 1187 } 1188 1189 typedef struct RISCVCPUMisaExtConfig { 1190 target_ulong misa_bit; 1191 bool enabled; 1192 } RISCVCPUMisaExtConfig; 1193 1194 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1195 void *opaque, Error **errp) 1196 { 1197 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1198 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1199 RISCVCPU *cpu = RISCV_CPU(obj); 1200 CPURISCVState *env = &cpu->env; 1201 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1202 bool prev_val, value; 1203 1204 if (!visit_type_bool(v, name, &value, errp)) { 1205 return; 1206 } 1207 1208 cpu_misa_ext_add_user_opt(misa_bit, value); 1209 1210 prev_val = env->misa_ext & misa_bit; 1211 1212 if (value == prev_val) { 1213 return; 1214 } 1215 1216 if (value) { 1217 if (vendor_cpu) { 1218 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1219 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1220 cpuname); 1221 return; 1222 } 1223 1224 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 1225 /* 1226 * Note: the 'priv_spec' command line option, if present, 1227 * will take precedence over this priv_ver bump. 1228 */ 1229 env->priv_ver = PRIV_VERSION_1_12_0; 1230 } 1231 } 1232 1233 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 1234 } 1235 1236 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1237 void *opaque, Error **errp) 1238 { 1239 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1240 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1241 RISCVCPU *cpu = RISCV_CPU(obj); 1242 CPURISCVState *env = &cpu->env; 1243 bool value; 1244 1245 value = env->misa_ext & misa_bit; 1246 1247 visit_type_bool(v, name, &value, errp); 1248 } 1249 1250 #define MISA_CFG(_bit, _enabled) \ 1251 {.misa_bit = _bit, .enabled = _enabled} 1252 1253 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1254 MISA_CFG(RVA, true), 1255 MISA_CFG(RVC, true), 1256 MISA_CFG(RVD, true), 1257 MISA_CFG(RVF, true), 1258 MISA_CFG(RVI, true), 1259 MISA_CFG(RVE, false), 1260 MISA_CFG(RVM, true), 1261 MISA_CFG(RVS, true), 1262 MISA_CFG(RVU, true), 1263 MISA_CFG(RVH, true), 1264 MISA_CFG(RVV, false), 1265 MISA_CFG(RVG, false), 1266 MISA_CFG(RVB, false), 1267 }; 1268 1269 /* 1270 * We do not support user choice tracking for MISA 1271 * extensions yet because, so far, we do not silently 1272 * change MISA bits during realize() (RVG enables MISA 1273 * bits but the user is warned about it). 1274 */ 1275 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1276 { 1277 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1278 int i; 1279 1280 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1281 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1282 int bit = misa_cfg->misa_bit; 1283 const char *name = riscv_get_misa_ext_name(bit); 1284 const char *desc = riscv_get_misa_ext_description(bit); 1285 1286 /* Check if KVM already created the property */ 1287 if (object_property_find(cpu_obj, name)) { 1288 continue; 1289 } 1290 1291 object_property_add(cpu_obj, name, "bool", 1292 cpu_get_misa_ext_cfg, 1293 cpu_set_misa_ext_cfg, 1294 NULL, (void *)misa_cfg); 1295 object_property_set_description(cpu_obj, name, desc); 1296 if (use_def_vals) { 1297 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1298 misa_cfg->enabled); 1299 } 1300 } 1301 } 1302 1303 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1304 void *opaque, Error **errp) 1305 { 1306 RISCVCPUProfile *profile = opaque; 1307 RISCVCPU *cpu = RISCV_CPU(obj); 1308 bool value; 1309 int i, ext_offset; 1310 1311 if (riscv_cpu_is_vendor(obj)) { 1312 error_setg(errp, "Profile %s is not available for vendor CPUs", 1313 profile->name); 1314 return; 1315 } 1316 1317 if (cpu->env.misa_mxl != MXL_RV64) { 1318 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1319 profile->name); 1320 return; 1321 } 1322 1323 if (!visit_type_bool(v, name, &value, errp)) { 1324 return; 1325 } 1326 1327 profile->user_set = true; 1328 profile->enabled = value; 1329 1330 if (profile->u_parent != NULL) { 1331 object_property_set_bool(obj, profile->u_parent->name, 1332 profile->enabled, NULL); 1333 } 1334 1335 if (profile->s_parent != NULL) { 1336 object_property_set_bool(obj, profile->s_parent->name, 1337 profile->enabled, NULL); 1338 } 1339 1340 if (profile->enabled) { 1341 cpu->env.priv_ver = profile->priv_spec; 1342 } 1343 1344 #ifndef CONFIG_USER_ONLY 1345 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1346 object_property_set_bool(obj, "mmu", true, NULL); 1347 const char *satp_prop = satp_mode_str(profile->satp_mode, 1348 riscv_cpu_is_32bit(cpu)); 1349 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1350 } 1351 #endif 1352 1353 for (i = 0; misa_bits[i] != 0; i++) { 1354 uint32_t bit = misa_bits[i]; 1355 1356 if (!(profile->misa_ext & bit)) { 1357 continue; 1358 } 1359 1360 if (bit == RVI && !profile->enabled) { 1361 /* 1362 * Disabling profiles will not disable the base 1363 * ISA RV64I. 1364 */ 1365 continue; 1366 } 1367 1368 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1369 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1370 } 1371 1372 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1373 ext_offset = profile->ext_offsets[i]; 1374 1375 if (profile->enabled) { 1376 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1377 riscv_cpu_enable_named_feat(cpu, ext_offset); 1378 } 1379 1380 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1381 } 1382 1383 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1384 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1385 } 1386 } 1387 1388 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1389 void *opaque, Error **errp) 1390 { 1391 RISCVCPUProfile *profile = opaque; 1392 bool value = profile->enabled; 1393 1394 visit_type_bool(v, name, &value, errp); 1395 } 1396 1397 static void riscv_cpu_add_profiles(Object *cpu_obj) 1398 { 1399 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1400 const RISCVCPUProfile *profile = riscv_profiles[i]; 1401 1402 object_property_add(cpu_obj, profile->name, "bool", 1403 cpu_get_profile, cpu_set_profile, 1404 NULL, (void *)profile); 1405 1406 /* 1407 * CPUs might enable a profile right from the start. 1408 * Enable its mandatory extensions right away in this 1409 * case. 1410 */ 1411 if (profile->enabled) { 1412 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1413 } 1414 } 1415 } 1416 1417 static bool cpu_ext_is_deprecated(const char *ext_name) 1418 { 1419 return isupper(ext_name[0]); 1420 } 1421 1422 /* 1423 * String will be allocated in the heap. Caller is responsible 1424 * for freeing it. 1425 */ 1426 static char *cpu_ext_to_lower(const char *ext_name) 1427 { 1428 char *ret = g_malloc0(strlen(ext_name) + 1); 1429 1430 strcpy(ret, ext_name); 1431 ret[0] = tolower(ret[0]); 1432 1433 return ret; 1434 } 1435 1436 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1437 void *opaque, Error **errp) 1438 { 1439 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1440 RISCVCPU *cpu = RISCV_CPU(obj); 1441 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1442 bool prev_val, value; 1443 1444 if (!visit_type_bool(v, name, &value, errp)) { 1445 return; 1446 } 1447 1448 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1449 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1450 1451 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1452 multi_ext_cfg->name, lower); 1453 } 1454 1455 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1456 1457 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1458 1459 if (value == prev_val) { 1460 return; 1461 } 1462 1463 if (value && vendor_cpu) { 1464 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1465 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1466 cpuname); 1467 return; 1468 } 1469 1470 if (value) { 1471 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1472 } 1473 1474 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1475 } 1476 1477 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1478 void *opaque, Error **errp) 1479 { 1480 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1481 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1482 1483 visit_type_bool(v, name, &value, errp); 1484 } 1485 1486 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1487 const RISCVCPUMultiExtConfig *multi_cfg) 1488 { 1489 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1490 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1491 1492 object_property_add(cpu_obj, multi_cfg->name, "bool", 1493 cpu_get_multi_ext_cfg, 1494 cpu_set_multi_ext_cfg, 1495 NULL, (void *)multi_cfg); 1496 1497 if (!generic_cpu || deprecated_ext) { 1498 return; 1499 } 1500 1501 /* 1502 * Set def val directly instead of using 1503 * object_property_set_bool() to save the set() 1504 * callback hash for user inputs. 1505 */ 1506 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1507 multi_cfg->enabled); 1508 } 1509 1510 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1511 const RISCVCPUMultiExtConfig *array) 1512 { 1513 const RISCVCPUMultiExtConfig *prop; 1514 1515 g_assert(array); 1516 1517 for (prop = array; prop && prop->name; prop++) { 1518 cpu_add_multi_ext_prop(obj, prop); 1519 } 1520 } 1521 1522 /* 1523 * Add CPU properties with user-facing flags. 1524 * 1525 * This will overwrite existing env->misa_ext values with the 1526 * defaults set via riscv_cpu_add_misa_properties(). 1527 */ 1528 static void riscv_cpu_add_user_properties(Object *obj) 1529 { 1530 #ifndef CONFIG_USER_ONLY 1531 riscv_add_satp_mode_properties(obj); 1532 #endif 1533 1534 riscv_cpu_add_misa_properties(obj); 1535 1536 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1537 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1538 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1539 1540 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1541 1542 riscv_cpu_add_profiles(obj); 1543 } 1544 1545 /* 1546 * The 'max' type CPU will have all possible ratified 1547 * non-vendor extensions enabled. 1548 */ 1549 static void riscv_init_max_cpu_extensions(Object *obj) 1550 { 1551 RISCVCPU *cpu = RISCV_CPU(obj); 1552 CPURISCVState *env = &cpu->env; 1553 const RISCVCPUMultiExtConfig *prop; 1554 1555 /* Enable RVG and RVV that are disabled by default */ 1556 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV); 1557 1558 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1559 isa_ext_update_enabled(cpu, prop->offset, true); 1560 } 1561 1562 /* 1563 * Some extensions can't be added without backward compatibilty concerns. 1564 * Disable those, the user can still opt in to them on the command line. 1565 */ 1566 cpu->cfg.ext_svade = false; 1567 1568 /* set vector version */ 1569 env->vext_ver = VEXT_VERSION_1_00_0; 1570 1571 /* Zfinx is not compatible with F. Disable it */ 1572 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1573 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1574 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1575 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1576 1577 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1578 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1579 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1580 1581 if (env->misa_mxl != MXL_RV32) { 1582 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1583 } 1584 1585 /* 1586 * TODO: ext_smrnmi requires OpenSBI changes that our current 1587 * image does not have. Disable it for now. 1588 */ 1589 if (cpu->cfg.ext_smrnmi) { 1590 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false); 1591 } 1592 1593 /* 1594 * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup 1595 * to avoid generating a double trap. OpenSBI does not currently support it, 1596 * disable it for now. 1597 */ 1598 if (cpu->cfg.ext_smdbltrp) { 1599 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false); 1600 } 1601 } 1602 1603 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1604 { 1605 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1606 } 1607 1608 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1609 { 1610 RISCVCPU *cpu = RISCV_CPU(cs); 1611 Object *obj = OBJECT(cpu); 1612 1613 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1614 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1615 1616 if (!misa_ext_implied_rules) { 1617 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1618 } 1619 1620 if (!multi_ext_implied_rules) { 1621 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1622 } 1623 1624 riscv_cpu_add_user_properties(obj); 1625 1626 if (riscv_cpu_has_max_extensions(obj)) { 1627 riscv_init_max_cpu_extensions(obj); 1628 } 1629 } 1630 1631 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data) 1632 { 1633 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1634 1635 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1636 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1637 } 1638 1639 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1640 .name = ACCEL_CPU_NAME("tcg"), 1641 1642 .parent = TYPE_ACCEL_CPU, 1643 .class_init = riscv_tcg_cpu_accel_class_init, 1644 .abstract = true, 1645 }; 1646 1647 static void riscv_tcg_cpu_accel_register_types(void) 1648 { 1649 type_register_static(&riscv_tcg_cpu_accel_type_info); 1650 } 1651 type_init(riscv_tcg_cpu_accel_register_types); 1652