1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "exec/translation-block.h" 23 #include "tcg-cpu.h" 24 #include "cpu.h" 25 #include "exec/target_page.h" 26 #include "internals.h" 27 #include "pmu.h" 28 #include "time_helper.h" 29 #include "qapi/error.h" 30 #include "qapi/visitor.h" 31 #include "qemu/accel.h" 32 #include "qemu/error-report.h" 33 #include "qemu/log.h" 34 #include "accel/accel-cpu-target.h" 35 #include "accel/tcg/cpu-ops.h" 36 #include "tcg/tcg.h" 37 #ifndef CONFIG_USER_ONLY 38 #include "hw/boards.h" 39 #endif 40 41 /* Hash that stores user set extensions */ 42 static GHashTable *multi_ext_user_opts; 43 static GHashTable *misa_ext_user_opts; 44 45 static GHashTable *multi_ext_implied_rules; 46 static GHashTable *misa_ext_implied_rules; 47 48 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 49 { 50 return g_hash_table_contains(multi_ext_user_opts, 51 GUINT_TO_POINTER(ext_offset)); 52 } 53 54 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 55 { 56 return g_hash_table_contains(misa_ext_user_opts, 57 GUINT_TO_POINTER(misa_bit)); 58 } 59 60 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 61 { 62 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 63 (gpointer)value); 64 } 65 66 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 67 { 68 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 69 (gpointer)value); 70 } 71 72 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 73 bool enabled) 74 { 75 CPURISCVState *env = &cpu->env; 76 77 if (enabled) { 78 env->misa_ext |= bit; 79 env->misa_ext_mask |= bit; 80 } else { 81 env->misa_ext &= ~bit; 82 env->misa_ext_mask &= ~bit; 83 } 84 } 85 86 static const char *cpu_priv_ver_to_str(int priv_ver) 87 { 88 const char *priv_spec_str = priv_spec_to_str(priv_ver); 89 90 g_assert(priv_spec_str); 91 92 return priv_spec_str; 93 } 94 95 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 96 { 97 return riscv_env_mmu_index(cpu_env(cs), ifetch); 98 } 99 100 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 101 const TranslationBlock *tb) 102 { 103 if (!(tb_cflags(tb) & CF_PCREL)) { 104 RISCVCPU *cpu = RISCV_CPU(cs); 105 CPURISCVState *env = &cpu->env; 106 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 107 108 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 109 110 if (xl == MXL_RV32) { 111 env->pc = (int32_t) tb->pc; 112 } else { 113 env->pc = tb->pc; 114 } 115 } 116 } 117 118 static void riscv_restore_state_to_opc(CPUState *cs, 119 const TranslationBlock *tb, 120 const uint64_t *data) 121 { 122 RISCVCPU *cpu = RISCV_CPU(cs); 123 CPURISCVState *env = &cpu->env; 124 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 125 target_ulong pc; 126 127 if (tb_cflags(tb) & CF_PCREL) { 128 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 129 } else { 130 pc = data[0]; 131 } 132 133 if (xl == MXL_RV32) { 134 env->pc = (int32_t)pc; 135 } else { 136 env->pc = pc; 137 } 138 env->bins = data[1]; 139 env->excp_uw2 = data[2]; 140 } 141 142 static const TCGCPUOps riscv_tcg_ops = { 143 .initialize = riscv_translate_init, 144 .translate_code = riscv_translate_code, 145 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 146 .restore_state_to_opc = riscv_restore_state_to_opc, 147 .mmu_index = riscv_cpu_mmu_index, 148 149 #ifndef CONFIG_USER_ONLY 150 .tlb_fill = riscv_cpu_tlb_fill, 151 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 152 .cpu_exec_halt = riscv_cpu_has_work, 153 .do_interrupt = riscv_cpu_do_interrupt, 154 .do_transaction_failed = riscv_cpu_do_transaction_failed, 155 .do_unaligned_access = riscv_cpu_do_unaligned_access, 156 .debug_excp_handler = riscv_cpu_debug_excp_handler, 157 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 158 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 159 #endif /* !CONFIG_USER_ONLY */ 160 }; 161 162 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 163 { 164 const RISCVIsaExtData *edata; 165 166 for (edata = isa_edata_arr; edata && edata->name; edata++) { 167 if (edata->ext_enable_offset != ext_offset) { 168 continue; 169 } 170 171 return edata->min_version; 172 } 173 174 g_assert_not_reached(); 175 } 176 177 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 178 { 179 const RISCVCPUMultiExtConfig *feat; 180 const RISCVIsaExtData *edata; 181 182 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 183 if (edata->ext_enable_offset == ext_offset) { 184 return edata->name; 185 } 186 } 187 188 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 189 if (feat->offset == ext_offset) { 190 return feat->name; 191 } 192 } 193 194 g_assert_not_reached(); 195 } 196 197 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 198 { 199 const RISCVCPUMultiExtConfig *feat; 200 201 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 202 if (feat->offset == ext_offset) { 203 return true; 204 } 205 } 206 207 return false; 208 } 209 210 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 211 { 212 /* 213 * All other named features are already enabled 214 * in riscv_tcg_cpu_instance_init(). 215 */ 216 switch (feat_offset) { 217 case CPU_CFG_OFFSET(ext_zic64b): 218 cpu->cfg.cbom_blocksize = 64; 219 cpu->cfg.cbop_blocksize = 64; 220 cpu->cfg.cboz_blocksize = 64; 221 break; 222 case CPU_CFG_OFFSET(ext_sha): 223 if (!cpu_misa_ext_is_user_set(RVH)) { 224 riscv_cpu_write_misa_bit(cpu, RVH, true); 225 } 226 /* fallthrough */ 227 case CPU_CFG_OFFSET(ext_ssstateen): 228 cpu->cfg.ext_smstateen = true; 229 break; 230 } 231 } 232 233 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 234 uint32_t ext_offset) 235 { 236 int ext_priv_ver; 237 238 if (env->priv_ver == PRIV_VERSION_LATEST) { 239 return; 240 } 241 242 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 243 244 if (env->priv_ver < ext_priv_ver) { 245 /* 246 * Note: the 'priv_spec' command line option, if present, 247 * will take precedence over this priv_ver bump. 248 */ 249 env->priv_ver = ext_priv_ver; 250 } 251 } 252 253 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 254 bool value) 255 { 256 CPURISCVState *env = &cpu->env; 257 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 258 int min_version; 259 260 if (prev_val == value) { 261 return; 262 } 263 264 if (cpu_cfg_ext_is_user_set(ext_offset)) { 265 return; 266 } 267 268 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 269 /* Do not enable it if priv_ver is older than min_version */ 270 min_version = cpu_cfg_ext_get_min_version(ext_offset); 271 if (env->priv_ver < min_version) { 272 return; 273 } 274 } 275 276 isa_ext_update_enabled(cpu, ext_offset, value); 277 } 278 279 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 280 { 281 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 282 error_setg(errp, "H extension requires priv spec 1.12.0"); 283 return; 284 } 285 } 286 287 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 288 Error **errp) 289 { 290 uint32_t vlen = cfg->vlenb << 3; 291 292 if (vlen > RV_VLEN_MAX || vlen < 128) { 293 error_setg(errp, 294 "Vector extension implementation only supports VLEN " 295 "in the range [128, %d]", RV_VLEN_MAX); 296 return; 297 } 298 299 if (cfg->elen > 64 || cfg->elen < 8) { 300 error_setg(errp, 301 "Vector extension implementation only supports ELEN " 302 "in the range [8, 64]"); 303 return; 304 } 305 } 306 307 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 308 { 309 CPURISCVState *env = &cpu->env; 310 const RISCVIsaExtData *edata; 311 312 /* Force disable extensions if priv spec version does not match */ 313 for (edata = isa_edata_arr; edata && edata->name; edata++) { 314 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 315 (env->priv_ver < edata->min_version)) { 316 /* 317 * These two extensions are always enabled as they were supported 318 * by QEMU before they were added as extensions in the ISA. 319 */ 320 if (!strcmp(edata->name, "zicntr") || 321 !strcmp(edata->name, "zihpm")) { 322 continue; 323 } 324 325 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 326 327 /* 328 * Do not show user warnings for named features that users 329 * can't enable/disable in the command line. See commit 330 * 68c9e54bea for more info. 331 */ 332 if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) { 333 continue; 334 } 335 #ifndef CONFIG_USER_ONLY 336 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 337 " because privilege spec version does not match", 338 edata->name, env->mhartid); 339 #else 340 warn_report("disabling %s extension because " 341 "privilege spec version does not match", 342 edata->name); 343 #endif 344 } 345 } 346 } 347 348 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 349 { 350 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { 351 cpu->cfg.has_priv_1_11 = true; 352 } 353 354 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { 355 cpu->cfg.has_priv_1_12 = true; 356 } 357 358 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) { 359 cpu->cfg.has_priv_1_13 = true; 360 } 361 362 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && 363 cpu->cfg.cbop_blocksize == 64 && 364 cpu->cfg.cboz_blocksize == 64; 365 366 cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen; 367 368 cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) && 369 cpu->cfg.ext_ssstateen; 370 371 cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11; 372 } 373 374 static void riscv_cpu_validate_g(RISCVCPU *cpu) 375 { 376 const char *warn_msg = "RVG mandates disabled extension %s"; 377 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 378 bool send_warn = cpu_misa_ext_is_user_set(RVG); 379 380 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 381 uint32_t bit = g_misa_bits[i]; 382 383 if (riscv_has_ext(&cpu->env, bit)) { 384 continue; 385 } 386 387 if (!cpu_misa_ext_is_user_set(bit)) { 388 riscv_cpu_write_misa_bit(cpu, bit, true); 389 continue; 390 } 391 392 if (send_warn) { 393 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 394 } 395 } 396 397 if (!cpu->cfg.ext_zicsr) { 398 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 399 cpu->cfg.ext_zicsr = true; 400 } else if (send_warn) { 401 warn_report(warn_msg, "zicsr"); 402 } 403 } 404 405 if (!cpu->cfg.ext_zifencei) { 406 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 407 cpu->cfg.ext_zifencei = true; 408 } else if (send_warn) { 409 warn_report(warn_msg, "zifencei"); 410 } 411 } 412 } 413 414 static void riscv_cpu_validate_b(RISCVCPU *cpu) 415 { 416 const char *warn_msg = "RVB mandates disabled extension %s"; 417 418 if (!cpu->cfg.ext_zba) { 419 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { 420 cpu->cfg.ext_zba = true; 421 } else { 422 warn_report(warn_msg, "zba"); 423 } 424 } 425 426 if (!cpu->cfg.ext_zbb) { 427 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { 428 cpu->cfg.ext_zbb = true; 429 } else { 430 warn_report(warn_msg, "zbb"); 431 } 432 } 433 434 if (!cpu->cfg.ext_zbs) { 435 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { 436 cpu->cfg.ext_zbs = true; 437 } else { 438 warn_report(warn_msg, "zbs"); 439 } 440 } 441 } 442 443 /* 444 * Check consistency between chosen extensions while setting 445 * cpu->cfg accordingly. 446 */ 447 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 448 { 449 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 450 CPURISCVState *env = &cpu->env; 451 Error *local_err = NULL; 452 453 if (riscv_has_ext(env, RVG)) { 454 riscv_cpu_validate_g(cpu); 455 } 456 457 if (riscv_has_ext(env, RVB)) { 458 riscv_cpu_validate_b(cpu); 459 } 460 461 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 462 error_setg(errp, 463 "I and E extensions are incompatible"); 464 return; 465 } 466 467 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 468 error_setg(errp, 469 "Either I or E extension must be set"); 470 return; 471 } 472 473 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 474 error_setg(errp, 475 "Setting S extension without U extension is illegal"); 476 return; 477 } 478 479 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 480 error_setg(errp, 481 "H depends on an I base integer ISA with 32 x registers"); 482 return; 483 } 484 485 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 486 error_setg(errp, "H extension implicitly requires S-mode"); 487 return; 488 } 489 490 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 491 error_setg(errp, "F extension requires Zicsr"); 492 return; 493 } 494 495 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 496 error_setg(errp, "Zacas extension requires A extension"); 497 return; 498 } 499 500 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 501 error_setg(errp, "Zawrs extension requires A extension"); 502 return; 503 } 504 505 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 506 error_setg(errp, "Zfa extension requires F extension"); 507 return; 508 } 509 510 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 511 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 512 return; 513 } 514 515 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 516 error_setg(errp, "Zfbfmin extension depends on F extension"); 517 return; 518 } 519 520 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 521 error_setg(errp, "D extension requires F extension"); 522 return; 523 } 524 525 if (riscv_has_ext(env, RVV)) { 526 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 527 if (local_err != NULL) { 528 error_propagate(errp, local_err); 529 return; 530 } 531 } 532 533 /* The Zve64d extension depends on the Zve64f extension */ 534 if (cpu->cfg.ext_zve64d) { 535 if (!riscv_has_ext(env, RVD)) { 536 error_setg(errp, "Zve64d/V extensions require D extension"); 537 return; 538 } 539 } 540 541 /* The Zve32f extension depends on the Zve32x extension */ 542 if (cpu->cfg.ext_zve32f) { 543 if (!riscv_has_ext(env, RVF)) { 544 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 545 return; 546 } 547 } 548 549 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 550 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 551 return; 552 } 553 554 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 555 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 556 return; 557 } 558 559 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 560 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 561 return; 562 } 563 564 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 565 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 566 return; 567 } 568 569 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 570 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 571 return; 572 } 573 574 if (cpu->cfg.ext_zfinx) { 575 if (!cpu->cfg.ext_zicsr) { 576 error_setg(errp, "Zfinx extension requires Zicsr"); 577 return; 578 } 579 if (riscv_has_ext(env, RVF)) { 580 error_setg(errp, 581 "Zfinx cannot be supported together with F extension"); 582 return; 583 } 584 } 585 586 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) { 587 error_setg(errp, "Zcmop extensions require Zca"); 588 return; 589 } 590 591 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 592 error_setg(errp, "Zcf extension is only relevant to RV32"); 593 return; 594 } 595 596 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 597 error_setg(errp, "Zcf extension requires F extension"); 598 return; 599 } 600 601 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 602 error_setg(errp, "Zcd extension requires D extension"); 603 return; 604 } 605 606 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 607 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 608 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 609 "extension"); 610 return; 611 } 612 613 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 614 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 615 "Zcd extension"); 616 return; 617 } 618 619 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 620 error_setg(errp, "Zcmt extension requires Zicsr extension"); 621 return; 622 } 623 624 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 625 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 626 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) { 627 error_setg(errp, 628 "Vector crypto extensions require V or Zve* extensions"); 629 return; 630 } 631 632 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { 633 error_setg( 634 errp, 635 "Zvbc and Zvknhb extensions require V or Zve64x extensions"); 636 return; 637 } 638 639 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 640 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 641 error_setg(errp, "zicntr requires zicsr"); 642 return; 643 } 644 cpu->cfg.ext_zicntr = false; 645 } 646 647 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 648 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 649 error_setg(errp, "zihpm requires zicsr"); 650 return; 651 } 652 cpu->cfg.ext_zihpm = false; 653 } 654 655 if (cpu->cfg.ext_zicfiss) { 656 if (!cpu->cfg.ext_zicsr) { 657 error_setg(errp, "zicfiss extension requires zicsr extension"); 658 return; 659 } 660 if (!riscv_has_ext(env, RVA)) { 661 error_setg(errp, "zicfiss extension requires A extension"); 662 return; 663 } 664 if (!riscv_has_ext(env, RVS)) { 665 error_setg(errp, "zicfiss extension requires S"); 666 return; 667 } 668 if (!cpu->cfg.ext_zimop) { 669 error_setg(errp, "zicfiss extension requires zimop extension"); 670 return; 671 } 672 if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { 673 error_setg(errp, "zicfiss with zca requires zcmop extension"); 674 return; 675 } 676 } 677 678 if (!cpu->cfg.ext_zihpm) { 679 cpu->cfg.pmu_mask = 0; 680 cpu->pmu_avail_ctrs = 0; 681 } 682 683 if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { 684 error_setg(errp, "zicfilp extension requires zicsr extension"); 685 return; 686 } 687 688 if (mcc->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { 689 error_setg(errp, "svukte is not supported for RV32"); 690 return; 691 } 692 693 if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) && 694 (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) { 695 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) || 696 cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) { 697 error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind"); 698 return; 699 } 700 cpu->cfg.ext_smctr = false; 701 cpu->cfg.ext_ssctr = false; 702 } 703 704 /* 705 * Disable isa extensions based on priv spec after we 706 * validated and set everything we need. 707 */ 708 riscv_cpu_disable_priv_spec_isa_exts(cpu); 709 } 710 711 #ifndef CONFIG_USER_ONLY 712 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 713 RISCVCPUProfile *profile, 714 bool send_warn) 715 { 716 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 717 718 if (profile->satp_mode > satp_max) { 719 if (send_warn) { 720 bool is_32bit = riscv_cpu_is_32bit(cpu); 721 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 722 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 723 724 warn_report("Profile %s requires satp mode %s, " 725 "but satp mode %s was set", profile->name, 726 req_satp, cur_satp); 727 } 728 729 return false; 730 } 731 732 return true; 733 } 734 #endif 735 736 static void riscv_cpu_check_parent_profile(RISCVCPU *cpu, 737 RISCVCPUProfile *profile, 738 RISCVCPUProfile *parent) 739 { 740 const char *parent_name; 741 bool parent_enabled; 742 743 if (!profile->enabled || !parent) { 744 return; 745 } 746 747 parent_name = parent->name; 748 parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL); 749 profile->enabled = parent_enabled; 750 } 751 752 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 753 RISCVCPUProfile *profile) 754 { 755 CPURISCVState *env = &cpu->env; 756 const char *warn_msg = "Profile %s mandates disabled extension %s"; 757 bool send_warn = profile->user_set && profile->enabled; 758 bool profile_impl = true; 759 int i; 760 761 #ifndef CONFIG_USER_ONLY 762 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 763 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 764 send_warn); 765 } 766 #endif 767 768 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 769 profile->priv_spec > env->priv_ver) { 770 profile_impl = false; 771 772 if (send_warn) { 773 warn_report("Profile %s requires priv spec %s, " 774 "but priv ver %s was set", profile->name, 775 cpu_priv_ver_to_str(profile->priv_spec), 776 cpu_priv_ver_to_str(env->priv_ver)); 777 } 778 } 779 780 for (i = 0; misa_bits[i] != 0; i++) { 781 uint32_t bit = misa_bits[i]; 782 783 if (!(profile->misa_ext & bit)) { 784 continue; 785 } 786 787 if (!riscv_has_ext(&cpu->env, bit)) { 788 profile_impl = false; 789 790 if (send_warn) { 791 warn_report(warn_msg, profile->name, 792 riscv_get_misa_ext_name(bit)); 793 } 794 } 795 } 796 797 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 798 int ext_offset = profile->ext_offsets[i]; 799 800 if (!isa_ext_is_enabled(cpu, ext_offset)) { 801 profile_impl = false; 802 803 if (send_warn) { 804 warn_report(warn_msg, profile->name, 805 cpu_cfg_ext_get_name(ext_offset)); 806 } 807 } 808 } 809 810 profile->enabled = profile_impl; 811 812 riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent); 813 riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent); 814 } 815 816 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 817 { 818 for (int i = 0; riscv_profiles[i] != NULL; i++) { 819 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 820 } 821 } 822 823 static void riscv_cpu_init_implied_exts_rules(void) 824 { 825 RISCVCPUImpliedExtsRule *rule; 826 #ifndef CONFIG_USER_ONLY 827 MachineState *ms = MACHINE(qdev_get_machine()); 828 #endif 829 static bool initialized; 830 int i; 831 832 /* Implied rules only need to be initialized once. */ 833 if (initialized) { 834 return; 835 } 836 837 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 838 #ifndef CONFIG_USER_ONLY 839 rule->enabled = bitmap_new(ms->smp.cpus); 840 #endif 841 g_hash_table_insert(misa_ext_implied_rules, 842 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 843 } 844 845 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 846 #ifndef CONFIG_USER_ONLY 847 rule->enabled = bitmap_new(ms->smp.cpus); 848 #endif 849 g_hash_table_insert(multi_ext_implied_rules, 850 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 851 } 852 853 initialized = true; 854 } 855 856 static void cpu_enable_implied_rule(RISCVCPU *cpu, 857 RISCVCPUImpliedExtsRule *rule) 858 { 859 CPURISCVState *env = &cpu->env; 860 RISCVCPUImpliedExtsRule *ir; 861 bool enabled = false; 862 int i; 863 864 #ifndef CONFIG_USER_ONLY 865 enabled = test_bit(cpu->env.mhartid, rule->enabled); 866 #endif 867 868 if (!enabled) { 869 /* Enable the implied MISAs. */ 870 if (rule->implied_misa_exts) { 871 for (i = 0; misa_bits[i] != 0; i++) { 872 if (rule->implied_misa_exts & misa_bits[i]) { 873 /* 874 * If the user disabled the misa_bit do not re-enable it 875 * and do not apply any implied rules related to it. 876 */ 877 if (cpu_misa_ext_is_user_set(misa_bits[i]) && 878 !(env->misa_ext & misa_bits[i])) { 879 continue; 880 } 881 882 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); 883 ir = g_hash_table_lookup(misa_ext_implied_rules, 884 GUINT_TO_POINTER(misa_bits[i])); 885 886 if (ir) { 887 cpu_enable_implied_rule(cpu, ir); 888 } 889 } 890 } 891 } 892 893 /* Enable the implied extensions. */ 894 for (i = 0; 895 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) { 896 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true); 897 898 ir = g_hash_table_lookup(multi_ext_implied_rules, 899 GUINT_TO_POINTER( 900 rule->implied_multi_exts[i])); 901 902 if (ir) { 903 cpu_enable_implied_rule(cpu, ir); 904 } 905 } 906 907 #ifndef CONFIG_USER_ONLY 908 bitmap_set(rule->enabled, cpu->env.mhartid, 1); 909 #endif 910 } 911 } 912 913 /* Zc extension has special implied rules that need to be handled separately. */ 914 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) 915 { 916 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 917 CPURISCVState *env = &cpu->env; 918 919 if (cpu->cfg.ext_zce) { 920 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 921 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 922 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 923 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 924 925 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 926 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 927 } 928 } 929 930 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */ 931 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 932 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 933 934 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 935 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 936 } 937 938 if (riscv_has_ext(env, RVD)) { 939 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 940 } 941 } 942 } 943 944 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu) 945 { 946 RISCVCPUImpliedExtsRule *rule; 947 int i; 948 949 /* Enable the implied extensions for Zc. */ 950 cpu_enable_zc_implied_rules(cpu); 951 952 /* Enable the implied MISAs. */ 953 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 954 if (riscv_has_ext(&cpu->env, rule->ext)) { 955 cpu_enable_implied_rule(cpu, rule); 956 } 957 } 958 959 /* Enable the implied extensions. */ 960 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 961 if (isa_ext_is_enabled(cpu, rule->ext)) { 962 cpu_enable_implied_rule(cpu, rule); 963 } 964 } 965 } 966 967 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 968 { 969 CPURISCVState *env = &cpu->env; 970 Error *local_err = NULL; 971 972 riscv_cpu_init_implied_exts_rules(); 973 riscv_cpu_enable_implied_rules(cpu); 974 975 riscv_cpu_validate_misa_priv(env, &local_err); 976 if (local_err != NULL) { 977 error_propagate(errp, local_err); 978 return; 979 } 980 981 riscv_cpu_update_named_features(cpu); 982 riscv_cpu_validate_profiles(cpu); 983 984 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 985 /* 986 * Enhanced PMP should only be available 987 * on harts with PMP support 988 */ 989 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 990 return; 991 } 992 993 riscv_cpu_validate_set_extensions(cpu, &local_err); 994 if (local_err != NULL) { 995 error_propagate(errp, local_err); 996 return; 997 } 998 #ifndef CONFIG_USER_ONLY 999 if (cpu->cfg.pmu_mask) { 1000 riscv_pmu_init(cpu, &local_err); 1001 if (local_err != NULL) { 1002 error_propagate(errp, local_err); 1003 return; 1004 } 1005 1006 if (cpu->cfg.ext_sscofpmf) { 1007 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1008 riscv_pmu_timer_cb, cpu); 1009 } 1010 } 1011 #endif 1012 } 1013 1014 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) 1015 { 1016 GPtrArray *dynamic_decoders; 1017 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size); 1018 for (size_t i = 0; i < decoder_table_size; ++i) { 1019 if (decoder_table[i].guard_func && 1020 decoder_table[i].guard_func(&cpu->cfg)) { 1021 g_ptr_array_add(dynamic_decoders, 1022 (gpointer)decoder_table[i].riscv_cpu_decode_fn); 1023 } 1024 } 1025 1026 cpu->decoders = dynamic_decoders; 1027 } 1028 1029 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 1030 { 1031 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 1032 } 1033 1034 static bool riscv_cpu_is_generic(Object *cpu_obj) 1035 { 1036 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1037 } 1038 1039 /* 1040 * We'll get here via the following path: 1041 * 1042 * riscv_cpu_realize() 1043 * -> cpu_exec_realizefn() 1044 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 1045 */ 1046 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 1047 { 1048 RISCVCPU *cpu = RISCV_CPU(cs); 1049 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1050 1051 if (!riscv_cpu_tcg_compatible(cpu)) { 1052 g_autofree char *name = riscv_cpu_get_name(cpu); 1053 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 1054 name); 1055 return false; 1056 } 1057 1058 if (mcc->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) { 1059 /* Missing 128-bit aligned atomics */ 1060 error_setg(errp, 1061 "128-bit RISC-V currently does not work with Multi " 1062 "Threaded TCG. Please use: -accel tcg,thread=single"); 1063 return false; 1064 } 1065 1066 #ifndef CONFIG_USER_ONLY 1067 CPURISCVState *env = &cpu->env; 1068 1069 tcg_cflags_set(CPU(cs), CF_PCREL); 1070 1071 if (cpu->cfg.ext_sstc) { 1072 riscv_timer_init(cpu); 1073 } 1074 1075 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 1076 if (riscv_has_ext(env, RVH)) { 1077 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 1078 } 1079 #endif 1080 1081 return true; 1082 } 1083 1084 typedef struct RISCVCPUMisaExtConfig { 1085 target_ulong misa_bit; 1086 bool enabled; 1087 } RISCVCPUMisaExtConfig; 1088 1089 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1090 void *opaque, Error **errp) 1091 { 1092 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1093 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1094 RISCVCPU *cpu = RISCV_CPU(obj); 1095 CPURISCVState *env = &cpu->env; 1096 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1097 bool prev_val, value; 1098 1099 if (!visit_type_bool(v, name, &value, errp)) { 1100 return; 1101 } 1102 1103 cpu_misa_ext_add_user_opt(misa_bit, value); 1104 1105 prev_val = env->misa_ext & misa_bit; 1106 1107 if (value == prev_val) { 1108 return; 1109 } 1110 1111 if (value) { 1112 if (vendor_cpu) { 1113 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1114 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1115 cpuname); 1116 return; 1117 } 1118 1119 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 1120 /* 1121 * Note: the 'priv_spec' command line option, if present, 1122 * will take precedence over this priv_ver bump. 1123 */ 1124 env->priv_ver = PRIV_VERSION_1_12_0; 1125 } 1126 } 1127 1128 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 1129 } 1130 1131 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1132 void *opaque, Error **errp) 1133 { 1134 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1135 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1136 RISCVCPU *cpu = RISCV_CPU(obj); 1137 CPURISCVState *env = &cpu->env; 1138 bool value; 1139 1140 value = env->misa_ext & misa_bit; 1141 1142 visit_type_bool(v, name, &value, errp); 1143 } 1144 1145 #define MISA_CFG(_bit, _enabled) \ 1146 {.misa_bit = _bit, .enabled = _enabled} 1147 1148 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1149 MISA_CFG(RVA, true), 1150 MISA_CFG(RVC, true), 1151 MISA_CFG(RVD, true), 1152 MISA_CFG(RVF, true), 1153 MISA_CFG(RVI, true), 1154 MISA_CFG(RVE, false), 1155 MISA_CFG(RVM, true), 1156 MISA_CFG(RVS, true), 1157 MISA_CFG(RVU, true), 1158 MISA_CFG(RVH, true), 1159 MISA_CFG(RVV, false), 1160 MISA_CFG(RVG, false), 1161 MISA_CFG(RVB, false), 1162 }; 1163 1164 /* 1165 * We do not support user choice tracking for MISA 1166 * extensions yet because, so far, we do not silently 1167 * change MISA bits during realize() (RVG enables MISA 1168 * bits but the user is warned about it). 1169 */ 1170 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1171 { 1172 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1173 int i; 1174 1175 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1176 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1177 int bit = misa_cfg->misa_bit; 1178 const char *name = riscv_get_misa_ext_name(bit); 1179 const char *desc = riscv_get_misa_ext_description(bit); 1180 1181 /* Check if KVM already created the property */ 1182 if (object_property_find(cpu_obj, name)) { 1183 continue; 1184 } 1185 1186 object_property_add(cpu_obj, name, "bool", 1187 cpu_get_misa_ext_cfg, 1188 cpu_set_misa_ext_cfg, 1189 NULL, (void *)misa_cfg); 1190 object_property_set_description(cpu_obj, name, desc); 1191 if (use_def_vals) { 1192 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1193 misa_cfg->enabled); 1194 } 1195 } 1196 } 1197 1198 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1199 void *opaque, Error **errp) 1200 { 1201 RISCVCPUProfile *profile = opaque; 1202 RISCVCPU *cpu = RISCV_CPU(obj); 1203 bool value; 1204 int i, ext_offset; 1205 1206 if (riscv_cpu_is_vendor(obj)) { 1207 error_setg(errp, "Profile %s is not available for vendor CPUs", 1208 profile->name); 1209 return; 1210 } 1211 1212 if (cpu->env.misa_mxl != MXL_RV64) { 1213 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1214 profile->name); 1215 return; 1216 } 1217 1218 if (!visit_type_bool(v, name, &value, errp)) { 1219 return; 1220 } 1221 1222 profile->user_set = true; 1223 profile->enabled = value; 1224 1225 if (profile->u_parent != NULL) { 1226 object_property_set_bool(obj, profile->u_parent->name, 1227 profile->enabled, NULL); 1228 } 1229 1230 if (profile->s_parent != NULL) { 1231 object_property_set_bool(obj, profile->s_parent->name, 1232 profile->enabled, NULL); 1233 } 1234 1235 if (profile->enabled) { 1236 cpu->env.priv_ver = profile->priv_spec; 1237 } 1238 1239 #ifndef CONFIG_USER_ONLY 1240 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1241 object_property_set_bool(obj, "mmu", true, NULL); 1242 const char *satp_prop = satp_mode_str(profile->satp_mode, 1243 riscv_cpu_is_32bit(cpu)); 1244 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1245 } 1246 #endif 1247 1248 for (i = 0; misa_bits[i] != 0; i++) { 1249 uint32_t bit = misa_bits[i]; 1250 1251 if (!(profile->misa_ext & bit)) { 1252 continue; 1253 } 1254 1255 if (bit == RVI && !profile->enabled) { 1256 /* 1257 * Disabling profiles will not disable the base 1258 * ISA RV64I. 1259 */ 1260 continue; 1261 } 1262 1263 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1264 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1265 } 1266 1267 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1268 ext_offset = profile->ext_offsets[i]; 1269 1270 if (profile->enabled) { 1271 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1272 riscv_cpu_enable_named_feat(cpu, ext_offset); 1273 } 1274 1275 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1276 } 1277 1278 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1279 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1280 } 1281 } 1282 1283 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1284 void *opaque, Error **errp) 1285 { 1286 RISCVCPUProfile *profile = opaque; 1287 bool value = profile->enabled; 1288 1289 visit_type_bool(v, name, &value, errp); 1290 } 1291 1292 static void riscv_cpu_add_profiles(Object *cpu_obj) 1293 { 1294 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1295 const RISCVCPUProfile *profile = riscv_profiles[i]; 1296 1297 object_property_add(cpu_obj, profile->name, "bool", 1298 cpu_get_profile, cpu_set_profile, 1299 NULL, (void *)profile); 1300 1301 /* 1302 * CPUs might enable a profile right from the start. 1303 * Enable its mandatory extensions right away in this 1304 * case. 1305 */ 1306 if (profile->enabled) { 1307 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1308 } 1309 } 1310 } 1311 1312 static bool cpu_ext_is_deprecated(const char *ext_name) 1313 { 1314 return isupper(ext_name[0]); 1315 } 1316 1317 /* 1318 * String will be allocated in the heap. Caller is responsible 1319 * for freeing it. 1320 */ 1321 static char *cpu_ext_to_lower(const char *ext_name) 1322 { 1323 char *ret = g_malloc0(strlen(ext_name) + 1); 1324 1325 strcpy(ret, ext_name); 1326 ret[0] = tolower(ret[0]); 1327 1328 return ret; 1329 } 1330 1331 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1332 void *opaque, Error **errp) 1333 { 1334 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1335 RISCVCPU *cpu = RISCV_CPU(obj); 1336 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1337 bool prev_val, value; 1338 1339 if (!visit_type_bool(v, name, &value, errp)) { 1340 return; 1341 } 1342 1343 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1344 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1345 1346 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1347 multi_ext_cfg->name, lower); 1348 } 1349 1350 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1351 1352 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1353 1354 if (value == prev_val) { 1355 return; 1356 } 1357 1358 if (value && vendor_cpu) { 1359 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1360 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1361 cpuname); 1362 return; 1363 } 1364 1365 if (value) { 1366 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1367 } 1368 1369 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1370 } 1371 1372 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1373 void *opaque, Error **errp) 1374 { 1375 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1376 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1377 1378 visit_type_bool(v, name, &value, errp); 1379 } 1380 1381 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1382 const RISCVCPUMultiExtConfig *multi_cfg) 1383 { 1384 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1385 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1386 1387 object_property_add(cpu_obj, multi_cfg->name, "bool", 1388 cpu_get_multi_ext_cfg, 1389 cpu_set_multi_ext_cfg, 1390 NULL, (void *)multi_cfg); 1391 1392 if (!generic_cpu || deprecated_ext) { 1393 return; 1394 } 1395 1396 /* 1397 * Set def val directly instead of using 1398 * object_property_set_bool() to save the set() 1399 * callback hash for user inputs. 1400 */ 1401 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1402 multi_cfg->enabled); 1403 } 1404 1405 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1406 const RISCVCPUMultiExtConfig *array) 1407 { 1408 const RISCVCPUMultiExtConfig *prop; 1409 1410 g_assert(array); 1411 1412 for (prop = array; prop && prop->name; prop++) { 1413 cpu_add_multi_ext_prop(obj, prop); 1414 } 1415 } 1416 1417 /* 1418 * Add CPU properties with user-facing flags. 1419 * 1420 * This will overwrite existing env->misa_ext values with the 1421 * defaults set via riscv_cpu_add_misa_properties(). 1422 */ 1423 static void riscv_cpu_add_user_properties(Object *obj) 1424 { 1425 #ifndef CONFIG_USER_ONLY 1426 riscv_add_satp_mode_properties(obj); 1427 #endif 1428 1429 riscv_cpu_add_misa_properties(obj); 1430 1431 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1432 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1433 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1434 1435 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1436 1437 riscv_cpu_add_profiles(obj); 1438 } 1439 1440 /* 1441 * The 'max' type CPU will have all possible ratified 1442 * non-vendor extensions enabled. 1443 */ 1444 static void riscv_init_max_cpu_extensions(Object *obj) 1445 { 1446 RISCVCPU *cpu = RISCV_CPU(obj); 1447 CPURISCVState *env = &cpu->env; 1448 const RISCVCPUMultiExtConfig *prop; 1449 1450 /* Enable RVG and RVV that are disabled by default */ 1451 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV); 1452 1453 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1454 isa_ext_update_enabled(cpu, prop->offset, true); 1455 } 1456 1457 /* 1458 * Some extensions can't be added without backward compatibilty concerns. 1459 * Disable those, the user can still opt in to them on the command line. 1460 */ 1461 cpu->cfg.ext_svade = false; 1462 1463 /* set vector version */ 1464 env->vext_ver = VEXT_VERSION_1_00_0; 1465 1466 /* Zfinx is not compatible with F. Disable it */ 1467 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1468 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1469 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1470 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1471 1472 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1473 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1474 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1475 1476 if (env->misa_mxl != MXL_RV32) { 1477 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1478 } 1479 1480 /* 1481 * TODO: ext_smrnmi requires OpenSBI changes that our current 1482 * image does not have. Disable it for now. 1483 */ 1484 if (cpu->cfg.ext_smrnmi) { 1485 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false); 1486 } 1487 1488 /* 1489 * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup 1490 * to avoid generating a double trap. OpenSBI does not currently support it, 1491 * disable it for now. 1492 */ 1493 if (cpu->cfg.ext_smdbltrp) { 1494 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false); 1495 } 1496 } 1497 1498 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1499 { 1500 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1501 } 1502 1503 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1504 { 1505 RISCVCPU *cpu = RISCV_CPU(cs); 1506 Object *obj = OBJECT(cpu); 1507 1508 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1509 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1510 1511 if (!misa_ext_implied_rules) { 1512 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1513 } 1514 1515 if (!multi_ext_implied_rules) { 1516 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1517 } 1518 1519 riscv_cpu_add_user_properties(obj); 1520 1521 if (riscv_cpu_has_max_extensions(obj)) { 1522 riscv_init_max_cpu_extensions(obj); 1523 } 1524 } 1525 1526 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1527 { 1528 /* 1529 * All cpus use the same set of operations. 1530 */ 1531 cc->tcg_ops = &riscv_tcg_ops; 1532 } 1533 1534 static void riscv_tcg_cpu_class_init(CPUClass *cc) 1535 { 1536 cc->init_accel_cpu = riscv_tcg_cpu_init_ops; 1537 } 1538 1539 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1540 { 1541 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1542 1543 acc->cpu_class_init = riscv_tcg_cpu_class_init; 1544 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1545 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1546 } 1547 1548 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1549 .name = ACCEL_CPU_NAME("tcg"), 1550 1551 .parent = TYPE_ACCEL_CPU, 1552 .class_init = riscv_tcg_cpu_accel_class_init, 1553 .abstract = true, 1554 }; 1555 1556 static void riscv_tcg_cpu_accel_register_types(void) 1557 { 1558 type_register_static(&riscv_tcg_cpu_accel_type_info); 1559 } 1560 type_init(riscv_tcg_cpu_accel_register_types); 1561