1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "exec/translation-block.h" 23 #include "tcg-cpu.h" 24 #include "cpu.h" 25 #include "internals.h" 26 #include "pmu.h" 27 #include "time_helper.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/accel.h" 31 #include "qemu/error-report.h" 32 #include "qemu/log.h" 33 #include "accel/accel-cpu-target.h" 34 #include "accel/tcg/cpu-ops.h" 35 #include "tcg/tcg.h" 36 #ifndef CONFIG_USER_ONLY 37 #include "hw/boards.h" 38 #endif 39 40 /* Hash that stores user set extensions */ 41 static GHashTable *multi_ext_user_opts; 42 static GHashTable *misa_ext_user_opts; 43 44 static GHashTable *multi_ext_implied_rules; 45 static GHashTable *misa_ext_implied_rules; 46 47 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 48 { 49 return g_hash_table_contains(multi_ext_user_opts, 50 GUINT_TO_POINTER(ext_offset)); 51 } 52 53 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 54 { 55 return g_hash_table_contains(misa_ext_user_opts, 56 GUINT_TO_POINTER(misa_bit)); 57 } 58 59 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 60 { 61 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 62 (gpointer)value); 63 } 64 65 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 66 { 67 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 68 (gpointer)value); 69 } 70 71 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 72 bool enabled) 73 { 74 CPURISCVState *env = &cpu->env; 75 76 if (enabled) { 77 env->misa_ext |= bit; 78 env->misa_ext_mask |= bit; 79 } else { 80 env->misa_ext &= ~bit; 81 env->misa_ext_mask &= ~bit; 82 } 83 } 84 85 static const char *cpu_priv_ver_to_str(int priv_ver) 86 { 87 const char *priv_spec_str = priv_spec_to_str(priv_ver); 88 89 g_assert(priv_spec_str); 90 91 return priv_spec_str; 92 } 93 94 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 95 { 96 return riscv_env_mmu_index(cpu_env(cs), ifetch); 97 } 98 99 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 100 const TranslationBlock *tb) 101 { 102 if (!(tb_cflags(tb) & CF_PCREL)) { 103 RISCVCPU *cpu = RISCV_CPU(cs); 104 CPURISCVState *env = &cpu->env; 105 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 106 107 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 108 109 if (xl == MXL_RV32) { 110 env->pc = (int32_t) tb->pc; 111 } else { 112 env->pc = tb->pc; 113 } 114 } 115 } 116 117 static void riscv_restore_state_to_opc(CPUState *cs, 118 const TranslationBlock *tb, 119 const uint64_t *data) 120 { 121 RISCVCPU *cpu = RISCV_CPU(cs); 122 CPURISCVState *env = &cpu->env; 123 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 124 target_ulong pc; 125 126 if (tb_cflags(tb) & CF_PCREL) { 127 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 128 } else { 129 pc = data[0]; 130 } 131 132 if (xl == MXL_RV32) { 133 env->pc = (int32_t)pc; 134 } else { 135 env->pc = pc; 136 } 137 env->bins = data[1]; 138 env->excp_uw2 = data[2]; 139 } 140 141 static const TCGCPUOps riscv_tcg_ops = { 142 .initialize = riscv_translate_init, 143 .translate_code = riscv_translate_code, 144 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 145 .restore_state_to_opc = riscv_restore_state_to_opc, 146 .mmu_index = riscv_cpu_mmu_index, 147 148 #ifndef CONFIG_USER_ONLY 149 .tlb_fill = riscv_cpu_tlb_fill, 150 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 151 .cpu_exec_halt = riscv_cpu_has_work, 152 .do_interrupt = riscv_cpu_do_interrupt, 153 .do_transaction_failed = riscv_cpu_do_transaction_failed, 154 .do_unaligned_access = riscv_cpu_do_unaligned_access, 155 .debug_excp_handler = riscv_cpu_debug_excp_handler, 156 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 157 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 158 #endif /* !CONFIG_USER_ONLY */ 159 }; 160 161 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 162 { 163 const RISCVIsaExtData *edata; 164 165 for (edata = isa_edata_arr; edata && edata->name; edata++) { 166 if (edata->ext_enable_offset != ext_offset) { 167 continue; 168 } 169 170 return edata->min_version; 171 } 172 173 g_assert_not_reached(); 174 } 175 176 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 177 { 178 const RISCVCPUMultiExtConfig *feat; 179 const RISCVIsaExtData *edata; 180 181 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 182 if (edata->ext_enable_offset == ext_offset) { 183 return edata->name; 184 } 185 } 186 187 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 188 if (feat->offset == ext_offset) { 189 return feat->name; 190 } 191 } 192 193 g_assert_not_reached(); 194 } 195 196 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 197 { 198 const RISCVCPUMultiExtConfig *feat; 199 200 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 201 if (feat->offset == ext_offset) { 202 return true; 203 } 204 } 205 206 return false; 207 } 208 209 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 210 { 211 /* 212 * All other named features are already enabled 213 * in riscv_tcg_cpu_instance_init(). 214 */ 215 switch (feat_offset) { 216 case CPU_CFG_OFFSET(ext_zic64b): 217 cpu->cfg.cbom_blocksize = 64; 218 cpu->cfg.cbop_blocksize = 64; 219 cpu->cfg.cboz_blocksize = 64; 220 break; 221 case CPU_CFG_OFFSET(ext_sha): 222 if (!cpu_misa_ext_is_user_set(RVH)) { 223 riscv_cpu_write_misa_bit(cpu, RVH, true); 224 } 225 /* fallthrough */ 226 case CPU_CFG_OFFSET(ext_ssstateen): 227 cpu->cfg.ext_smstateen = true; 228 break; 229 } 230 } 231 232 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 233 uint32_t ext_offset) 234 { 235 int ext_priv_ver; 236 237 if (env->priv_ver == PRIV_VERSION_LATEST) { 238 return; 239 } 240 241 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 242 243 if (env->priv_ver < ext_priv_ver) { 244 /* 245 * Note: the 'priv_spec' command line option, if present, 246 * will take precedence over this priv_ver bump. 247 */ 248 env->priv_ver = ext_priv_ver; 249 } 250 } 251 252 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 253 bool value) 254 { 255 CPURISCVState *env = &cpu->env; 256 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 257 int min_version; 258 259 if (prev_val == value) { 260 return; 261 } 262 263 if (cpu_cfg_ext_is_user_set(ext_offset)) { 264 return; 265 } 266 267 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 268 /* Do not enable it if priv_ver is older than min_version */ 269 min_version = cpu_cfg_ext_get_min_version(ext_offset); 270 if (env->priv_ver < min_version) { 271 return; 272 } 273 } 274 275 isa_ext_update_enabled(cpu, ext_offset, value); 276 } 277 278 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 279 { 280 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 281 error_setg(errp, "H extension requires priv spec 1.12.0"); 282 return; 283 } 284 } 285 286 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 287 Error **errp) 288 { 289 uint32_t vlen = cfg->vlenb << 3; 290 291 if (vlen > RV_VLEN_MAX || vlen < 128) { 292 error_setg(errp, 293 "Vector extension implementation only supports VLEN " 294 "in the range [128, %d]", RV_VLEN_MAX); 295 return; 296 } 297 298 if (cfg->elen > 64 || cfg->elen < 8) { 299 error_setg(errp, 300 "Vector extension implementation only supports ELEN " 301 "in the range [8, 64]"); 302 return; 303 } 304 } 305 306 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 307 { 308 CPURISCVState *env = &cpu->env; 309 const RISCVIsaExtData *edata; 310 311 /* Force disable extensions if priv spec version does not match */ 312 for (edata = isa_edata_arr; edata && edata->name; edata++) { 313 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 314 (env->priv_ver < edata->min_version)) { 315 /* 316 * These two extensions are always enabled as they were supported 317 * by QEMU before they were added as extensions in the ISA. 318 */ 319 if (!strcmp(edata->name, "zicntr") || 320 !strcmp(edata->name, "zihpm")) { 321 continue; 322 } 323 324 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 325 326 /* 327 * Do not show user warnings for named features that users 328 * can't enable/disable in the command line. See commit 329 * 68c9e54bea for more info. 330 */ 331 if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) { 332 continue; 333 } 334 #ifndef CONFIG_USER_ONLY 335 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 336 " because privilege spec version does not match", 337 edata->name, env->mhartid); 338 #else 339 warn_report("disabling %s extension because " 340 "privilege spec version does not match", 341 edata->name); 342 #endif 343 } 344 } 345 } 346 347 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 348 { 349 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { 350 cpu->cfg.has_priv_1_11 = true; 351 } 352 353 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { 354 cpu->cfg.has_priv_1_12 = true; 355 } 356 357 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) { 358 cpu->cfg.has_priv_1_13 = true; 359 } 360 361 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && 362 cpu->cfg.cbop_blocksize == 64 && 363 cpu->cfg.cboz_blocksize == 64; 364 365 cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen; 366 367 cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) && 368 cpu->cfg.ext_ssstateen; 369 370 cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11; 371 } 372 373 static void riscv_cpu_validate_g(RISCVCPU *cpu) 374 { 375 const char *warn_msg = "RVG mandates disabled extension %s"; 376 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 377 bool send_warn = cpu_misa_ext_is_user_set(RVG); 378 379 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 380 uint32_t bit = g_misa_bits[i]; 381 382 if (riscv_has_ext(&cpu->env, bit)) { 383 continue; 384 } 385 386 if (!cpu_misa_ext_is_user_set(bit)) { 387 riscv_cpu_write_misa_bit(cpu, bit, true); 388 continue; 389 } 390 391 if (send_warn) { 392 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 393 } 394 } 395 396 if (!cpu->cfg.ext_zicsr) { 397 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 398 cpu->cfg.ext_zicsr = true; 399 } else if (send_warn) { 400 warn_report(warn_msg, "zicsr"); 401 } 402 } 403 404 if (!cpu->cfg.ext_zifencei) { 405 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 406 cpu->cfg.ext_zifencei = true; 407 } else if (send_warn) { 408 warn_report(warn_msg, "zifencei"); 409 } 410 } 411 } 412 413 static void riscv_cpu_validate_b(RISCVCPU *cpu) 414 { 415 const char *warn_msg = "RVB mandates disabled extension %s"; 416 417 if (!cpu->cfg.ext_zba) { 418 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { 419 cpu->cfg.ext_zba = true; 420 } else { 421 warn_report(warn_msg, "zba"); 422 } 423 } 424 425 if (!cpu->cfg.ext_zbb) { 426 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { 427 cpu->cfg.ext_zbb = true; 428 } else { 429 warn_report(warn_msg, "zbb"); 430 } 431 } 432 433 if (!cpu->cfg.ext_zbs) { 434 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { 435 cpu->cfg.ext_zbs = true; 436 } else { 437 warn_report(warn_msg, "zbs"); 438 } 439 } 440 } 441 442 /* 443 * Check consistency between chosen extensions while setting 444 * cpu->cfg accordingly. 445 */ 446 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 447 { 448 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 449 CPURISCVState *env = &cpu->env; 450 Error *local_err = NULL; 451 452 if (riscv_has_ext(env, RVG)) { 453 riscv_cpu_validate_g(cpu); 454 } 455 456 if (riscv_has_ext(env, RVB)) { 457 riscv_cpu_validate_b(cpu); 458 } 459 460 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 461 error_setg(errp, 462 "I and E extensions are incompatible"); 463 return; 464 } 465 466 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 467 error_setg(errp, 468 "Either I or E extension must be set"); 469 return; 470 } 471 472 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 473 error_setg(errp, 474 "Setting S extension without U extension is illegal"); 475 return; 476 } 477 478 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 479 error_setg(errp, 480 "H depends on an I base integer ISA with 32 x registers"); 481 return; 482 } 483 484 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 485 error_setg(errp, "H extension implicitly requires S-mode"); 486 return; 487 } 488 489 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 490 error_setg(errp, "F extension requires Zicsr"); 491 return; 492 } 493 494 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 495 error_setg(errp, "Zacas extension requires A extension"); 496 return; 497 } 498 499 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 500 error_setg(errp, "Zawrs extension requires A extension"); 501 return; 502 } 503 504 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 505 error_setg(errp, "Zfa extension requires F extension"); 506 return; 507 } 508 509 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 510 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 511 return; 512 } 513 514 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 515 error_setg(errp, "Zfbfmin extension depends on F extension"); 516 return; 517 } 518 519 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 520 error_setg(errp, "D extension requires F extension"); 521 return; 522 } 523 524 if (riscv_has_ext(env, RVV)) { 525 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 526 if (local_err != NULL) { 527 error_propagate(errp, local_err); 528 return; 529 } 530 } 531 532 /* The Zve64d extension depends on the Zve64f extension */ 533 if (cpu->cfg.ext_zve64d) { 534 if (!riscv_has_ext(env, RVD)) { 535 error_setg(errp, "Zve64d/V extensions require D extension"); 536 return; 537 } 538 } 539 540 /* The Zve32f extension depends on the Zve32x extension */ 541 if (cpu->cfg.ext_zve32f) { 542 if (!riscv_has_ext(env, RVF)) { 543 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 544 return; 545 } 546 } 547 548 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 549 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 550 return; 551 } 552 553 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 554 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 555 return; 556 } 557 558 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 559 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 560 return; 561 } 562 563 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 564 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 565 return; 566 } 567 568 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 569 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 570 return; 571 } 572 573 if (cpu->cfg.ext_zfinx) { 574 if (!cpu->cfg.ext_zicsr) { 575 error_setg(errp, "Zfinx extension requires Zicsr"); 576 return; 577 } 578 if (riscv_has_ext(env, RVF)) { 579 error_setg(errp, 580 "Zfinx cannot be supported together with F extension"); 581 return; 582 } 583 } 584 585 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) { 586 error_setg(errp, "Zcmop extensions require Zca"); 587 return; 588 } 589 590 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 591 error_setg(errp, "Zcf extension is only relevant to RV32"); 592 return; 593 } 594 595 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 596 error_setg(errp, "Zcf extension requires F extension"); 597 return; 598 } 599 600 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 601 error_setg(errp, "Zcd extension requires D extension"); 602 return; 603 } 604 605 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 606 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 607 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 608 "extension"); 609 return; 610 } 611 612 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 613 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 614 "Zcd extension"); 615 return; 616 } 617 618 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 619 error_setg(errp, "Zcmt extension requires Zicsr extension"); 620 return; 621 } 622 623 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 624 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 625 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) { 626 error_setg(errp, 627 "Vector crypto extensions require V or Zve* extensions"); 628 return; 629 } 630 631 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { 632 error_setg( 633 errp, 634 "Zvbc and Zvknhb extensions require V or Zve64x extensions"); 635 return; 636 } 637 638 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 639 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 640 error_setg(errp, "zicntr requires zicsr"); 641 return; 642 } 643 cpu->cfg.ext_zicntr = false; 644 } 645 646 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 647 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 648 error_setg(errp, "zihpm requires zicsr"); 649 return; 650 } 651 cpu->cfg.ext_zihpm = false; 652 } 653 654 if (cpu->cfg.ext_zicfiss) { 655 if (!cpu->cfg.ext_zicsr) { 656 error_setg(errp, "zicfiss extension requires zicsr extension"); 657 return; 658 } 659 if (!riscv_has_ext(env, RVA)) { 660 error_setg(errp, "zicfiss extension requires A extension"); 661 return; 662 } 663 if (!riscv_has_ext(env, RVS)) { 664 error_setg(errp, "zicfiss extension requires S"); 665 return; 666 } 667 if (!cpu->cfg.ext_zimop) { 668 error_setg(errp, "zicfiss extension requires zimop extension"); 669 return; 670 } 671 if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { 672 error_setg(errp, "zicfiss with zca requires zcmop extension"); 673 return; 674 } 675 } 676 677 if (!cpu->cfg.ext_zihpm) { 678 cpu->cfg.pmu_mask = 0; 679 cpu->pmu_avail_ctrs = 0; 680 } 681 682 if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { 683 error_setg(errp, "zicfilp extension requires zicsr extension"); 684 return; 685 } 686 687 if (mcc->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { 688 error_setg(errp, "svukte is not supported for RV32"); 689 return; 690 } 691 692 if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) && 693 (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) { 694 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) || 695 cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) { 696 error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind"); 697 return; 698 } 699 cpu->cfg.ext_smctr = false; 700 cpu->cfg.ext_ssctr = false; 701 } 702 703 /* 704 * Disable isa extensions based on priv spec after we 705 * validated and set everything we need. 706 */ 707 riscv_cpu_disable_priv_spec_isa_exts(cpu); 708 } 709 710 #ifndef CONFIG_USER_ONLY 711 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 712 RISCVCPUProfile *profile, 713 bool send_warn) 714 { 715 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 716 717 if (profile->satp_mode > satp_max) { 718 if (send_warn) { 719 bool is_32bit = riscv_cpu_is_32bit(cpu); 720 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 721 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 722 723 warn_report("Profile %s requires satp mode %s, " 724 "but satp mode %s was set", profile->name, 725 req_satp, cur_satp); 726 } 727 728 return false; 729 } 730 731 return true; 732 } 733 #endif 734 735 static void riscv_cpu_check_parent_profile(RISCVCPU *cpu, 736 RISCVCPUProfile *profile, 737 RISCVCPUProfile *parent) 738 { 739 const char *parent_name; 740 bool parent_enabled; 741 742 if (!profile->enabled || !parent) { 743 return; 744 } 745 746 parent_name = parent->name; 747 parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL); 748 profile->enabled = parent_enabled; 749 } 750 751 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 752 RISCVCPUProfile *profile) 753 { 754 CPURISCVState *env = &cpu->env; 755 const char *warn_msg = "Profile %s mandates disabled extension %s"; 756 bool send_warn = profile->user_set && profile->enabled; 757 bool profile_impl = true; 758 int i; 759 760 #ifndef CONFIG_USER_ONLY 761 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 762 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 763 send_warn); 764 } 765 #endif 766 767 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 768 profile->priv_spec > env->priv_ver) { 769 profile_impl = false; 770 771 if (send_warn) { 772 warn_report("Profile %s requires priv spec %s, " 773 "but priv ver %s was set", profile->name, 774 cpu_priv_ver_to_str(profile->priv_spec), 775 cpu_priv_ver_to_str(env->priv_ver)); 776 } 777 } 778 779 for (i = 0; misa_bits[i] != 0; i++) { 780 uint32_t bit = misa_bits[i]; 781 782 if (!(profile->misa_ext & bit)) { 783 continue; 784 } 785 786 if (!riscv_has_ext(&cpu->env, bit)) { 787 profile_impl = false; 788 789 if (send_warn) { 790 warn_report(warn_msg, profile->name, 791 riscv_get_misa_ext_name(bit)); 792 } 793 } 794 } 795 796 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 797 int ext_offset = profile->ext_offsets[i]; 798 799 if (!isa_ext_is_enabled(cpu, ext_offset)) { 800 profile_impl = false; 801 802 if (send_warn) { 803 warn_report(warn_msg, profile->name, 804 cpu_cfg_ext_get_name(ext_offset)); 805 } 806 } 807 } 808 809 profile->enabled = profile_impl; 810 811 riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent); 812 riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent); 813 } 814 815 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 816 { 817 for (int i = 0; riscv_profiles[i] != NULL; i++) { 818 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 819 } 820 } 821 822 static void riscv_cpu_init_implied_exts_rules(void) 823 { 824 RISCVCPUImpliedExtsRule *rule; 825 #ifndef CONFIG_USER_ONLY 826 MachineState *ms = MACHINE(qdev_get_machine()); 827 #endif 828 static bool initialized; 829 int i; 830 831 /* Implied rules only need to be initialized once. */ 832 if (initialized) { 833 return; 834 } 835 836 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 837 #ifndef CONFIG_USER_ONLY 838 rule->enabled = bitmap_new(ms->smp.cpus); 839 #endif 840 g_hash_table_insert(misa_ext_implied_rules, 841 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 842 } 843 844 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 845 #ifndef CONFIG_USER_ONLY 846 rule->enabled = bitmap_new(ms->smp.cpus); 847 #endif 848 g_hash_table_insert(multi_ext_implied_rules, 849 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 850 } 851 852 initialized = true; 853 } 854 855 static void cpu_enable_implied_rule(RISCVCPU *cpu, 856 RISCVCPUImpliedExtsRule *rule) 857 { 858 CPURISCVState *env = &cpu->env; 859 RISCVCPUImpliedExtsRule *ir; 860 bool enabled = false; 861 int i; 862 863 #ifndef CONFIG_USER_ONLY 864 enabled = test_bit(cpu->env.mhartid, rule->enabled); 865 #endif 866 867 if (!enabled) { 868 /* Enable the implied MISAs. */ 869 if (rule->implied_misa_exts) { 870 for (i = 0; misa_bits[i] != 0; i++) { 871 if (rule->implied_misa_exts & misa_bits[i]) { 872 /* 873 * If the user disabled the misa_bit do not re-enable it 874 * and do not apply any implied rules related to it. 875 */ 876 if (cpu_misa_ext_is_user_set(misa_bits[i]) && 877 !(env->misa_ext & misa_bits[i])) { 878 continue; 879 } 880 881 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); 882 ir = g_hash_table_lookup(misa_ext_implied_rules, 883 GUINT_TO_POINTER(misa_bits[i])); 884 885 if (ir) { 886 cpu_enable_implied_rule(cpu, ir); 887 } 888 } 889 } 890 } 891 892 /* Enable the implied extensions. */ 893 for (i = 0; 894 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) { 895 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true); 896 897 ir = g_hash_table_lookup(multi_ext_implied_rules, 898 GUINT_TO_POINTER( 899 rule->implied_multi_exts[i])); 900 901 if (ir) { 902 cpu_enable_implied_rule(cpu, ir); 903 } 904 } 905 906 #ifndef CONFIG_USER_ONLY 907 bitmap_set(rule->enabled, cpu->env.mhartid, 1); 908 #endif 909 } 910 } 911 912 /* Zc extension has special implied rules that need to be handled separately. */ 913 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) 914 { 915 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 916 CPURISCVState *env = &cpu->env; 917 918 if (cpu->cfg.ext_zce) { 919 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 920 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 921 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 922 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 923 924 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 925 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 926 } 927 } 928 929 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */ 930 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 931 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 932 933 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 934 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 935 } 936 937 if (riscv_has_ext(env, RVD)) { 938 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 939 } 940 } 941 } 942 943 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu) 944 { 945 RISCVCPUImpliedExtsRule *rule; 946 int i; 947 948 /* Enable the implied extensions for Zc. */ 949 cpu_enable_zc_implied_rules(cpu); 950 951 /* Enable the implied MISAs. */ 952 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 953 if (riscv_has_ext(&cpu->env, rule->ext)) { 954 cpu_enable_implied_rule(cpu, rule); 955 } 956 } 957 958 /* Enable the implied extensions. */ 959 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 960 if (isa_ext_is_enabled(cpu, rule->ext)) { 961 cpu_enable_implied_rule(cpu, rule); 962 } 963 } 964 } 965 966 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 967 { 968 CPURISCVState *env = &cpu->env; 969 Error *local_err = NULL; 970 971 riscv_cpu_init_implied_exts_rules(); 972 riscv_cpu_enable_implied_rules(cpu); 973 974 riscv_cpu_validate_misa_priv(env, &local_err); 975 if (local_err != NULL) { 976 error_propagate(errp, local_err); 977 return; 978 } 979 980 riscv_cpu_update_named_features(cpu); 981 riscv_cpu_validate_profiles(cpu); 982 983 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 984 /* 985 * Enhanced PMP should only be available 986 * on harts with PMP support 987 */ 988 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 989 return; 990 } 991 992 riscv_cpu_validate_set_extensions(cpu, &local_err); 993 if (local_err != NULL) { 994 error_propagate(errp, local_err); 995 return; 996 } 997 #ifndef CONFIG_USER_ONLY 998 if (cpu->cfg.pmu_mask) { 999 riscv_pmu_init(cpu, &local_err); 1000 if (local_err != NULL) { 1001 error_propagate(errp, local_err); 1002 return; 1003 } 1004 1005 if (cpu->cfg.ext_sscofpmf) { 1006 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1007 riscv_pmu_timer_cb, cpu); 1008 } 1009 } 1010 #endif 1011 } 1012 1013 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) 1014 { 1015 GPtrArray *dynamic_decoders; 1016 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size); 1017 for (size_t i = 0; i < decoder_table_size; ++i) { 1018 if (decoder_table[i].guard_func && 1019 decoder_table[i].guard_func(&cpu->cfg)) { 1020 g_ptr_array_add(dynamic_decoders, 1021 (gpointer)decoder_table[i].riscv_cpu_decode_fn); 1022 } 1023 } 1024 1025 cpu->decoders = dynamic_decoders; 1026 } 1027 1028 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 1029 { 1030 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 1031 } 1032 1033 static bool riscv_cpu_is_generic(Object *cpu_obj) 1034 { 1035 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1036 } 1037 1038 /* 1039 * We'll get here via the following path: 1040 * 1041 * riscv_cpu_realize() 1042 * -> cpu_exec_realizefn() 1043 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 1044 */ 1045 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 1046 { 1047 RISCVCPU *cpu = RISCV_CPU(cs); 1048 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1049 1050 if (!riscv_cpu_tcg_compatible(cpu)) { 1051 g_autofree char *name = riscv_cpu_get_name(cpu); 1052 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 1053 name); 1054 return false; 1055 } 1056 1057 if (mcc->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) { 1058 /* Missing 128-bit aligned atomics */ 1059 error_setg(errp, 1060 "128-bit RISC-V currently does not work with Multi " 1061 "Threaded TCG. Please use: -accel tcg,thread=single"); 1062 return false; 1063 } 1064 1065 #ifndef CONFIG_USER_ONLY 1066 CPURISCVState *env = &cpu->env; 1067 1068 tcg_cflags_set(CPU(cs), CF_PCREL); 1069 1070 if (cpu->cfg.ext_sstc) { 1071 riscv_timer_init(cpu); 1072 } 1073 1074 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 1075 if (riscv_has_ext(env, RVH)) { 1076 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 1077 } 1078 #endif 1079 1080 return true; 1081 } 1082 1083 typedef struct RISCVCPUMisaExtConfig { 1084 target_ulong misa_bit; 1085 bool enabled; 1086 } RISCVCPUMisaExtConfig; 1087 1088 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1089 void *opaque, Error **errp) 1090 { 1091 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1092 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1093 RISCVCPU *cpu = RISCV_CPU(obj); 1094 CPURISCVState *env = &cpu->env; 1095 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1096 bool prev_val, value; 1097 1098 if (!visit_type_bool(v, name, &value, errp)) { 1099 return; 1100 } 1101 1102 cpu_misa_ext_add_user_opt(misa_bit, value); 1103 1104 prev_val = env->misa_ext & misa_bit; 1105 1106 if (value == prev_val) { 1107 return; 1108 } 1109 1110 if (value) { 1111 if (vendor_cpu) { 1112 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1113 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1114 cpuname); 1115 return; 1116 } 1117 1118 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 1119 /* 1120 * Note: the 'priv_spec' command line option, if present, 1121 * will take precedence over this priv_ver bump. 1122 */ 1123 env->priv_ver = PRIV_VERSION_1_12_0; 1124 } 1125 } 1126 1127 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 1128 } 1129 1130 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1131 void *opaque, Error **errp) 1132 { 1133 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1134 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1135 RISCVCPU *cpu = RISCV_CPU(obj); 1136 CPURISCVState *env = &cpu->env; 1137 bool value; 1138 1139 value = env->misa_ext & misa_bit; 1140 1141 visit_type_bool(v, name, &value, errp); 1142 } 1143 1144 #define MISA_CFG(_bit, _enabled) \ 1145 {.misa_bit = _bit, .enabled = _enabled} 1146 1147 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1148 MISA_CFG(RVA, true), 1149 MISA_CFG(RVC, true), 1150 MISA_CFG(RVD, true), 1151 MISA_CFG(RVF, true), 1152 MISA_CFG(RVI, true), 1153 MISA_CFG(RVE, false), 1154 MISA_CFG(RVM, true), 1155 MISA_CFG(RVS, true), 1156 MISA_CFG(RVU, true), 1157 MISA_CFG(RVH, true), 1158 MISA_CFG(RVV, false), 1159 MISA_CFG(RVG, false), 1160 MISA_CFG(RVB, false), 1161 }; 1162 1163 /* 1164 * We do not support user choice tracking for MISA 1165 * extensions yet because, so far, we do not silently 1166 * change MISA bits during realize() (RVG enables MISA 1167 * bits but the user is warned about it). 1168 */ 1169 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1170 { 1171 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1172 int i; 1173 1174 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1175 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1176 int bit = misa_cfg->misa_bit; 1177 const char *name = riscv_get_misa_ext_name(bit); 1178 const char *desc = riscv_get_misa_ext_description(bit); 1179 1180 /* Check if KVM already created the property */ 1181 if (object_property_find(cpu_obj, name)) { 1182 continue; 1183 } 1184 1185 object_property_add(cpu_obj, name, "bool", 1186 cpu_get_misa_ext_cfg, 1187 cpu_set_misa_ext_cfg, 1188 NULL, (void *)misa_cfg); 1189 object_property_set_description(cpu_obj, name, desc); 1190 if (use_def_vals) { 1191 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1192 misa_cfg->enabled); 1193 } 1194 } 1195 } 1196 1197 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1198 void *opaque, Error **errp) 1199 { 1200 RISCVCPUProfile *profile = opaque; 1201 RISCVCPU *cpu = RISCV_CPU(obj); 1202 bool value; 1203 int i, ext_offset; 1204 1205 if (riscv_cpu_is_vendor(obj)) { 1206 error_setg(errp, "Profile %s is not available for vendor CPUs", 1207 profile->name); 1208 return; 1209 } 1210 1211 if (cpu->env.misa_mxl != MXL_RV64) { 1212 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1213 profile->name); 1214 return; 1215 } 1216 1217 if (!visit_type_bool(v, name, &value, errp)) { 1218 return; 1219 } 1220 1221 profile->user_set = true; 1222 profile->enabled = value; 1223 1224 if (profile->u_parent != NULL) { 1225 object_property_set_bool(obj, profile->u_parent->name, 1226 profile->enabled, NULL); 1227 } 1228 1229 if (profile->s_parent != NULL) { 1230 object_property_set_bool(obj, profile->s_parent->name, 1231 profile->enabled, NULL); 1232 } 1233 1234 if (profile->enabled) { 1235 cpu->env.priv_ver = profile->priv_spec; 1236 } 1237 1238 #ifndef CONFIG_USER_ONLY 1239 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1240 object_property_set_bool(obj, "mmu", true, NULL); 1241 const char *satp_prop = satp_mode_str(profile->satp_mode, 1242 riscv_cpu_is_32bit(cpu)); 1243 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1244 } 1245 #endif 1246 1247 for (i = 0; misa_bits[i] != 0; i++) { 1248 uint32_t bit = misa_bits[i]; 1249 1250 if (!(profile->misa_ext & bit)) { 1251 continue; 1252 } 1253 1254 if (bit == RVI && !profile->enabled) { 1255 /* 1256 * Disabling profiles will not disable the base 1257 * ISA RV64I. 1258 */ 1259 continue; 1260 } 1261 1262 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1263 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1264 } 1265 1266 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1267 ext_offset = profile->ext_offsets[i]; 1268 1269 if (profile->enabled) { 1270 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1271 riscv_cpu_enable_named_feat(cpu, ext_offset); 1272 } 1273 1274 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1275 } 1276 1277 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1278 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1279 } 1280 } 1281 1282 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1283 void *opaque, Error **errp) 1284 { 1285 RISCVCPUProfile *profile = opaque; 1286 bool value = profile->enabled; 1287 1288 visit_type_bool(v, name, &value, errp); 1289 } 1290 1291 static void riscv_cpu_add_profiles(Object *cpu_obj) 1292 { 1293 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1294 const RISCVCPUProfile *profile = riscv_profiles[i]; 1295 1296 object_property_add(cpu_obj, profile->name, "bool", 1297 cpu_get_profile, cpu_set_profile, 1298 NULL, (void *)profile); 1299 1300 /* 1301 * CPUs might enable a profile right from the start. 1302 * Enable its mandatory extensions right away in this 1303 * case. 1304 */ 1305 if (profile->enabled) { 1306 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1307 } 1308 } 1309 } 1310 1311 static bool cpu_ext_is_deprecated(const char *ext_name) 1312 { 1313 return isupper(ext_name[0]); 1314 } 1315 1316 /* 1317 * String will be allocated in the heap. Caller is responsible 1318 * for freeing it. 1319 */ 1320 static char *cpu_ext_to_lower(const char *ext_name) 1321 { 1322 char *ret = g_malloc0(strlen(ext_name) + 1); 1323 1324 strcpy(ret, ext_name); 1325 ret[0] = tolower(ret[0]); 1326 1327 return ret; 1328 } 1329 1330 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1331 void *opaque, Error **errp) 1332 { 1333 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1334 RISCVCPU *cpu = RISCV_CPU(obj); 1335 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1336 bool prev_val, value; 1337 1338 if (!visit_type_bool(v, name, &value, errp)) { 1339 return; 1340 } 1341 1342 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1343 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1344 1345 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1346 multi_ext_cfg->name, lower); 1347 } 1348 1349 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1350 1351 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1352 1353 if (value == prev_val) { 1354 return; 1355 } 1356 1357 if (value && vendor_cpu) { 1358 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1359 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1360 cpuname); 1361 return; 1362 } 1363 1364 if (value) { 1365 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1366 } 1367 1368 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1369 } 1370 1371 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1372 void *opaque, Error **errp) 1373 { 1374 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1375 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1376 1377 visit_type_bool(v, name, &value, errp); 1378 } 1379 1380 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1381 const RISCVCPUMultiExtConfig *multi_cfg) 1382 { 1383 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1384 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1385 1386 object_property_add(cpu_obj, multi_cfg->name, "bool", 1387 cpu_get_multi_ext_cfg, 1388 cpu_set_multi_ext_cfg, 1389 NULL, (void *)multi_cfg); 1390 1391 if (!generic_cpu || deprecated_ext) { 1392 return; 1393 } 1394 1395 /* 1396 * Set def val directly instead of using 1397 * object_property_set_bool() to save the set() 1398 * callback hash for user inputs. 1399 */ 1400 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1401 multi_cfg->enabled); 1402 } 1403 1404 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1405 const RISCVCPUMultiExtConfig *array) 1406 { 1407 const RISCVCPUMultiExtConfig *prop; 1408 1409 g_assert(array); 1410 1411 for (prop = array; prop && prop->name; prop++) { 1412 cpu_add_multi_ext_prop(obj, prop); 1413 } 1414 } 1415 1416 /* 1417 * Add CPU properties with user-facing flags. 1418 * 1419 * This will overwrite existing env->misa_ext values with the 1420 * defaults set via riscv_cpu_add_misa_properties(). 1421 */ 1422 static void riscv_cpu_add_user_properties(Object *obj) 1423 { 1424 #ifndef CONFIG_USER_ONLY 1425 riscv_add_satp_mode_properties(obj); 1426 #endif 1427 1428 riscv_cpu_add_misa_properties(obj); 1429 1430 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1431 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1432 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1433 1434 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1435 1436 riscv_cpu_add_profiles(obj); 1437 } 1438 1439 /* 1440 * The 'max' type CPU will have all possible ratified 1441 * non-vendor extensions enabled. 1442 */ 1443 static void riscv_init_max_cpu_extensions(Object *obj) 1444 { 1445 RISCVCPU *cpu = RISCV_CPU(obj); 1446 CPURISCVState *env = &cpu->env; 1447 const RISCVCPUMultiExtConfig *prop; 1448 1449 /* Enable RVG and RVV that are disabled by default */ 1450 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV); 1451 1452 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1453 isa_ext_update_enabled(cpu, prop->offset, true); 1454 } 1455 1456 /* 1457 * Some extensions can't be added without backward compatibilty concerns. 1458 * Disable those, the user can still opt in to them on the command line. 1459 */ 1460 cpu->cfg.ext_svade = false; 1461 1462 /* set vector version */ 1463 env->vext_ver = VEXT_VERSION_1_00_0; 1464 1465 /* Zfinx is not compatible with F. Disable it */ 1466 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1467 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1468 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1469 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1470 1471 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1472 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1473 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1474 1475 if (env->misa_mxl != MXL_RV32) { 1476 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1477 } 1478 1479 /* 1480 * TODO: ext_smrnmi requires OpenSBI changes that our current 1481 * image does not have. Disable it for now. 1482 */ 1483 if (cpu->cfg.ext_smrnmi) { 1484 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false); 1485 } 1486 1487 /* 1488 * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup 1489 * to avoid generating a double trap. OpenSBI does not currently support it, 1490 * disable it for now. 1491 */ 1492 if (cpu->cfg.ext_smdbltrp) { 1493 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false); 1494 } 1495 } 1496 1497 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1498 { 1499 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1500 } 1501 1502 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1503 { 1504 RISCVCPU *cpu = RISCV_CPU(cs); 1505 Object *obj = OBJECT(cpu); 1506 1507 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1508 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1509 1510 if (!misa_ext_implied_rules) { 1511 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1512 } 1513 1514 if (!multi_ext_implied_rules) { 1515 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1516 } 1517 1518 riscv_cpu_add_user_properties(obj); 1519 1520 if (riscv_cpu_has_max_extensions(obj)) { 1521 riscv_init_max_cpu_extensions(obj); 1522 } 1523 } 1524 1525 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1526 { 1527 /* 1528 * All cpus use the same set of operations. 1529 */ 1530 cc->tcg_ops = &riscv_tcg_ops; 1531 } 1532 1533 static void riscv_tcg_cpu_class_init(CPUClass *cc) 1534 { 1535 cc->init_accel_cpu = riscv_tcg_cpu_init_ops; 1536 } 1537 1538 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1539 { 1540 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1541 1542 acc->cpu_class_init = riscv_tcg_cpu_class_init; 1543 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1544 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1545 } 1546 1547 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1548 .name = ACCEL_CPU_NAME("tcg"), 1549 1550 .parent = TYPE_ACCEL_CPU, 1551 .class_init = riscv_tcg_cpu_accel_class_init, 1552 .abstract = true, 1553 }; 1554 1555 static void riscv_tcg_cpu_accel_register_types(void) 1556 { 1557 type_register_static(&riscv_tcg_cpu_accel_type_info); 1558 } 1559 type_init(riscv_tcg_cpu_accel_register_types); 1560