1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "exec/translation-block.h" 23 #include "tcg-cpu.h" 24 #include "cpu.h" 25 #include "internals.h" 26 #include "pmu.h" 27 #include "time_helper.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/accel.h" 31 #include "qemu/error-report.h" 32 #include "qemu/log.h" 33 #include "hw/core/accel-cpu.h" 34 #include "hw/core/tcg-cpu-ops.h" 35 #include "tcg/tcg.h" 36 #ifndef CONFIG_USER_ONLY 37 #include "hw/boards.h" 38 #endif 39 40 /* Hash that stores user set extensions */ 41 static GHashTable *multi_ext_user_opts; 42 static GHashTable *misa_ext_user_opts; 43 44 static GHashTable *multi_ext_implied_rules; 45 static GHashTable *misa_ext_implied_rules; 46 47 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 48 { 49 return g_hash_table_contains(multi_ext_user_opts, 50 GUINT_TO_POINTER(ext_offset)); 51 } 52 53 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 54 { 55 return g_hash_table_contains(misa_ext_user_opts, 56 GUINT_TO_POINTER(misa_bit)); 57 } 58 59 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 60 { 61 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 62 (gpointer)value); 63 } 64 65 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 66 { 67 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 68 (gpointer)value); 69 } 70 71 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 72 bool enabled) 73 { 74 CPURISCVState *env = &cpu->env; 75 76 if (enabled) { 77 env->misa_ext |= bit; 78 env->misa_ext_mask |= bit; 79 } else { 80 env->misa_ext &= ~bit; 81 env->misa_ext_mask &= ~bit; 82 } 83 } 84 85 static const char *cpu_priv_ver_to_str(int priv_ver) 86 { 87 const char *priv_spec_str = priv_spec_to_str(priv_ver); 88 89 g_assert(priv_spec_str); 90 91 return priv_spec_str; 92 } 93 94 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 95 const TranslationBlock *tb) 96 { 97 if (!(tb_cflags(tb) & CF_PCREL)) { 98 RISCVCPU *cpu = RISCV_CPU(cs); 99 CPURISCVState *env = &cpu->env; 100 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 101 102 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 103 104 if (xl == MXL_RV32) { 105 env->pc = (int32_t) tb->pc; 106 } else { 107 env->pc = tb->pc; 108 } 109 } 110 } 111 112 static void riscv_restore_state_to_opc(CPUState *cs, 113 const TranslationBlock *tb, 114 const uint64_t *data) 115 { 116 RISCVCPU *cpu = RISCV_CPU(cs); 117 CPURISCVState *env = &cpu->env; 118 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 119 target_ulong pc; 120 121 if (tb_cflags(tb) & CF_PCREL) { 122 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 123 } else { 124 pc = data[0]; 125 } 126 127 if (xl == MXL_RV32) { 128 env->pc = (int32_t)pc; 129 } else { 130 env->pc = pc; 131 } 132 env->bins = data[1]; 133 env->excp_uw2 = data[2]; 134 } 135 136 static const TCGCPUOps riscv_tcg_ops = { 137 .initialize = riscv_translate_init, 138 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 139 .restore_state_to_opc = riscv_restore_state_to_opc, 140 141 #ifndef CONFIG_USER_ONLY 142 .tlb_fill = riscv_cpu_tlb_fill, 143 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 144 .cpu_exec_halt = riscv_cpu_has_work, 145 .do_interrupt = riscv_cpu_do_interrupt, 146 .do_transaction_failed = riscv_cpu_do_transaction_failed, 147 .do_unaligned_access = riscv_cpu_do_unaligned_access, 148 .debug_excp_handler = riscv_cpu_debug_excp_handler, 149 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 150 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 151 #endif /* !CONFIG_USER_ONLY */ 152 }; 153 154 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 155 { 156 const RISCVIsaExtData *edata; 157 158 for (edata = isa_edata_arr; edata && edata->name; edata++) { 159 if (edata->ext_enable_offset != ext_offset) { 160 continue; 161 } 162 163 return edata->min_version; 164 } 165 166 g_assert_not_reached(); 167 } 168 169 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 170 { 171 const RISCVCPUMultiExtConfig *feat; 172 const RISCVIsaExtData *edata; 173 174 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 175 if (edata->ext_enable_offset == ext_offset) { 176 return edata->name; 177 } 178 } 179 180 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 181 if (feat->offset == ext_offset) { 182 return feat->name; 183 } 184 } 185 186 g_assert_not_reached(); 187 } 188 189 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 190 { 191 const RISCVCPUMultiExtConfig *feat; 192 193 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 194 if (feat->offset == ext_offset) { 195 return true; 196 } 197 } 198 199 return false; 200 } 201 202 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 203 { 204 /* 205 * All other named features are already enabled 206 * in riscv_tcg_cpu_instance_init(). 207 */ 208 switch (feat_offset) { 209 case CPU_CFG_OFFSET(ext_zic64b): 210 cpu->cfg.cbom_blocksize = 64; 211 cpu->cfg.cbop_blocksize = 64; 212 cpu->cfg.cboz_blocksize = 64; 213 break; 214 case CPU_CFG_OFFSET(ext_ssstateen): 215 cpu->cfg.ext_smstateen = true; 216 break; 217 } 218 } 219 220 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 221 uint32_t ext_offset) 222 { 223 int ext_priv_ver; 224 225 if (env->priv_ver == PRIV_VERSION_LATEST) { 226 return; 227 } 228 229 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 230 231 if (env->priv_ver < ext_priv_ver) { 232 /* 233 * Note: the 'priv_spec' command line option, if present, 234 * will take precedence over this priv_ver bump. 235 */ 236 env->priv_ver = ext_priv_ver; 237 } 238 } 239 240 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 241 bool value) 242 { 243 CPURISCVState *env = &cpu->env; 244 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 245 int min_version; 246 247 if (prev_val == value) { 248 return; 249 } 250 251 if (cpu_cfg_ext_is_user_set(ext_offset)) { 252 return; 253 } 254 255 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 256 /* Do not enable it if priv_ver is older than min_version */ 257 min_version = cpu_cfg_ext_get_min_version(ext_offset); 258 if (env->priv_ver < min_version) { 259 return; 260 } 261 } 262 263 isa_ext_update_enabled(cpu, ext_offset, value); 264 } 265 266 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 267 { 268 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 269 error_setg(errp, "H extension requires priv spec 1.12.0"); 270 return; 271 } 272 } 273 274 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 275 Error **errp) 276 { 277 uint32_t vlen = cfg->vlenb << 3; 278 279 if (vlen > RV_VLEN_MAX || vlen < 128) { 280 error_setg(errp, 281 "Vector extension implementation only supports VLEN " 282 "in the range [128, %d]", RV_VLEN_MAX); 283 return; 284 } 285 286 if (cfg->elen > 64 || cfg->elen < 8) { 287 error_setg(errp, 288 "Vector extension implementation only supports ELEN " 289 "in the range [8, 64]"); 290 return; 291 } 292 } 293 294 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 295 { 296 CPURISCVState *env = &cpu->env; 297 const RISCVIsaExtData *edata; 298 299 /* Force disable extensions if priv spec version does not match */ 300 for (edata = isa_edata_arr; edata && edata->name; edata++) { 301 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 302 (env->priv_ver < edata->min_version)) { 303 /* 304 * These two extensions are always enabled as they were supported 305 * by QEMU before they were added as extensions in the ISA. 306 */ 307 if (!strcmp(edata->name, "zicntr") || 308 !strcmp(edata->name, "zihpm")) { 309 continue; 310 } 311 312 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 313 314 /* 315 * Do not show user warnings for named features that users 316 * can't enable/disable in the command line. See commit 317 * 68c9e54bea for more info. 318 */ 319 if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) { 320 continue; 321 } 322 #ifndef CONFIG_USER_ONLY 323 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 324 " because privilege spec version does not match", 325 edata->name, env->mhartid); 326 #else 327 warn_report("disabling %s extension because " 328 "privilege spec version does not match", 329 edata->name); 330 #endif 331 } 332 } 333 } 334 335 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 336 { 337 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { 338 cpu->cfg.has_priv_1_11 = true; 339 } 340 341 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { 342 cpu->cfg.has_priv_1_12 = true; 343 } 344 345 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) { 346 cpu->cfg.has_priv_1_13 = true; 347 } 348 349 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && 350 cpu->cfg.cbop_blocksize == 64 && 351 cpu->cfg.cboz_blocksize == 64; 352 353 cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen; 354 } 355 356 static void riscv_cpu_validate_g(RISCVCPU *cpu) 357 { 358 const char *warn_msg = "RVG mandates disabled extension %s"; 359 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 360 bool send_warn = cpu_misa_ext_is_user_set(RVG); 361 362 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 363 uint32_t bit = g_misa_bits[i]; 364 365 if (riscv_has_ext(&cpu->env, bit)) { 366 continue; 367 } 368 369 if (!cpu_misa_ext_is_user_set(bit)) { 370 riscv_cpu_write_misa_bit(cpu, bit, true); 371 continue; 372 } 373 374 if (send_warn) { 375 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 376 } 377 } 378 379 if (!cpu->cfg.ext_zicsr) { 380 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 381 cpu->cfg.ext_zicsr = true; 382 } else if (send_warn) { 383 warn_report(warn_msg, "zicsr"); 384 } 385 } 386 387 if (!cpu->cfg.ext_zifencei) { 388 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 389 cpu->cfg.ext_zifencei = true; 390 } else if (send_warn) { 391 warn_report(warn_msg, "zifencei"); 392 } 393 } 394 } 395 396 static void riscv_cpu_validate_b(RISCVCPU *cpu) 397 { 398 const char *warn_msg = "RVB mandates disabled extension %s"; 399 400 if (!cpu->cfg.ext_zba) { 401 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { 402 cpu->cfg.ext_zba = true; 403 } else { 404 warn_report(warn_msg, "zba"); 405 } 406 } 407 408 if (!cpu->cfg.ext_zbb) { 409 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { 410 cpu->cfg.ext_zbb = true; 411 } else { 412 warn_report(warn_msg, "zbb"); 413 } 414 } 415 416 if (!cpu->cfg.ext_zbs) { 417 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { 418 cpu->cfg.ext_zbs = true; 419 } else { 420 warn_report(warn_msg, "zbs"); 421 } 422 } 423 } 424 425 /* 426 * Check consistency between chosen extensions while setting 427 * cpu->cfg accordingly. 428 */ 429 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 430 { 431 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 432 CPURISCVState *env = &cpu->env; 433 Error *local_err = NULL; 434 435 if (riscv_has_ext(env, RVG)) { 436 riscv_cpu_validate_g(cpu); 437 } 438 439 if (riscv_has_ext(env, RVB)) { 440 riscv_cpu_validate_b(cpu); 441 } 442 443 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 444 error_setg(errp, 445 "I and E extensions are incompatible"); 446 return; 447 } 448 449 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 450 error_setg(errp, 451 "Either I or E extension must be set"); 452 return; 453 } 454 455 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 456 error_setg(errp, 457 "Setting S extension without U extension is illegal"); 458 return; 459 } 460 461 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 462 error_setg(errp, 463 "H depends on an I base integer ISA with 32 x registers"); 464 return; 465 } 466 467 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 468 error_setg(errp, "H extension implicitly requires S-mode"); 469 return; 470 } 471 472 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 473 error_setg(errp, "F extension requires Zicsr"); 474 return; 475 } 476 477 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 478 error_setg(errp, "Zacas extension requires A extension"); 479 return; 480 } 481 482 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 483 error_setg(errp, "Zawrs extension requires A extension"); 484 return; 485 } 486 487 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 488 error_setg(errp, "Zfa extension requires F extension"); 489 return; 490 } 491 492 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 493 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 494 return; 495 } 496 497 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 498 error_setg(errp, "Zfbfmin extension depends on F extension"); 499 return; 500 } 501 502 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 503 error_setg(errp, "D extension requires F extension"); 504 return; 505 } 506 507 if (riscv_has_ext(env, RVV)) { 508 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 509 if (local_err != NULL) { 510 error_propagate(errp, local_err); 511 return; 512 } 513 } 514 515 /* The Zve64d extension depends on the Zve64f extension */ 516 if (cpu->cfg.ext_zve64d) { 517 if (!riscv_has_ext(env, RVD)) { 518 error_setg(errp, "Zve64d/V extensions require D extension"); 519 return; 520 } 521 } 522 523 /* The Zve32f extension depends on the Zve32x extension */ 524 if (cpu->cfg.ext_zve32f) { 525 if (!riscv_has_ext(env, RVF)) { 526 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 527 return; 528 } 529 } 530 531 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 532 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 533 return; 534 } 535 536 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 537 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 538 return; 539 } 540 541 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 542 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 543 return; 544 } 545 546 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 547 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 548 return; 549 } 550 551 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 552 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 553 return; 554 } 555 556 if (cpu->cfg.ext_zfinx) { 557 if (!cpu->cfg.ext_zicsr) { 558 error_setg(errp, "Zfinx extension requires Zicsr"); 559 return; 560 } 561 if (riscv_has_ext(env, RVF)) { 562 error_setg(errp, 563 "Zfinx cannot be supported together with F extension"); 564 return; 565 } 566 } 567 568 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) { 569 error_setg(errp, "Zcmop extensions require Zca"); 570 return; 571 } 572 573 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 574 error_setg(errp, "Zcf extension is only relevant to RV32"); 575 return; 576 } 577 578 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 579 error_setg(errp, "Zcf extension requires F extension"); 580 return; 581 } 582 583 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 584 error_setg(errp, "Zcd extension requires D extension"); 585 return; 586 } 587 588 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 589 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 590 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 591 "extension"); 592 return; 593 } 594 595 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 596 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 597 "Zcd extension"); 598 return; 599 } 600 601 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 602 error_setg(errp, "Zcmt extension requires Zicsr extension"); 603 return; 604 } 605 606 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 607 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 608 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) { 609 error_setg(errp, 610 "Vector crypto extensions require V or Zve* extensions"); 611 return; 612 } 613 614 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { 615 error_setg( 616 errp, 617 "Zvbc and Zvknhb extensions require V or Zve64x extensions"); 618 return; 619 } 620 621 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 622 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 623 error_setg(errp, "zicntr requires zicsr"); 624 return; 625 } 626 cpu->cfg.ext_zicntr = false; 627 } 628 629 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 630 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 631 error_setg(errp, "zihpm requires zicsr"); 632 return; 633 } 634 cpu->cfg.ext_zihpm = false; 635 } 636 637 if (cpu->cfg.ext_zicfiss) { 638 if (!cpu->cfg.ext_zicsr) { 639 error_setg(errp, "zicfiss extension requires zicsr extension"); 640 return; 641 } 642 if (!riscv_has_ext(env, RVA)) { 643 error_setg(errp, "zicfiss extension requires A extension"); 644 return; 645 } 646 if (!riscv_has_ext(env, RVS)) { 647 error_setg(errp, "zicfiss extension requires S"); 648 return; 649 } 650 if (!cpu->cfg.ext_zimop) { 651 error_setg(errp, "zicfiss extension requires zimop extension"); 652 return; 653 } 654 if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { 655 error_setg(errp, "zicfiss with zca requires zcmop extension"); 656 return; 657 } 658 } 659 660 if (!cpu->cfg.ext_zihpm) { 661 cpu->cfg.pmu_mask = 0; 662 cpu->pmu_avail_ctrs = 0; 663 } 664 665 if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { 666 error_setg(errp, "zicfilp extension requires zicsr extension"); 667 return; 668 } 669 670 if (mcc->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { 671 error_setg(errp, "svukte is not supported for RV32"); 672 return; 673 } 674 675 /* 676 * Disable isa extensions based on priv spec after we 677 * validated and set everything we need. 678 */ 679 riscv_cpu_disable_priv_spec_isa_exts(cpu); 680 } 681 682 #ifndef CONFIG_USER_ONLY 683 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 684 RISCVCPUProfile *profile, 685 bool send_warn) 686 { 687 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 688 689 if (profile->satp_mode > satp_max) { 690 if (send_warn) { 691 bool is_32bit = riscv_cpu_is_32bit(cpu); 692 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 693 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 694 695 warn_report("Profile %s requires satp mode %s, " 696 "but satp mode %s was set", profile->name, 697 req_satp, cur_satp); 698 } 699 700 return false; 701 } 702 703 return true; 704 } 705 #endif 706 707 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 708 RISCVCPUProfile *profile) 709 { 710 CPURISCVState *env = &cpu->env; 711 const char *warn_msg = "Profile %s mandates disabled extension %s"; 712 bool send_warn = profile->user_set && profile->enabled; 713 bool parent_enabled, profile_impl = true; 714 int i; 715 716 #ifndef CONFIG_USER_ONLY 717 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 718 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 719 send_warn); 720 } 721 #endif 722 723 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 724 profile->priv_spec != env->priv_ver) { 725 profile_impl = false; 726 727 if (send_warn) { 728 warn_report("Profile %s requires priv spec %s, " 729 "but priv ver %s was set", profile->name, 730 cpu_priv_ver_to_str(profile->priv_spec), 731 cpu_priv_ver_to_str(env->priv_ver)); 732 } 733 } 734 735 for (i = 0; misa_bits[i] != 0; i++) { 736 uint32_t bit = misa_bits[i]; 737 738 if (!(profile->misa_ext & bit)) { 739 continue; 740 } 741 742 if (!riscv_has_ext(&cpu->env, bit)) { 743 profile_impl = false; 744 745 if (send_warn) { 746 warn_report(warn_msg, profile->name, 747 riscv_get_misa_ext_name(bit)); 748 } 749 } 750 } 751 752 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 753 int ext_offset = profile->ext_offsets[i]; 754 755 if (!isa_ext_is_enabled(cpu, ext_offset)) { 756 profile_impl = false; 757 758 if (send_warn) { 759 warn_report(warn_msg, profile->name, 760 cpu_cfg_ext_get_name(ext_offset)); 761 } 762 } 763 } 764 765 profile->enabled = profile_impl; 766 767 if (profile->parent != NULL) { 768 parent_enabled = object_property_get_bool(OBJECT(cpu), 769 profile->parent->name, 770 NULL); 771 profile->enabled = profile->enabled && parent_enabled; 772 } 773 } 774 775 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 776 { 777 for (int i = 0; riscv_profiles[i] != NULL; i++) { 778 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 779 } 780 } 781 782 static void riscv_cpu_init_implied_exts_rules(void) 783 { 784 RISCVCPUImpliedExtsRule *rule; 785 #ifndef CONFIG_USER_ONLY 786 MachineState *ms = MACHINE(qdev_get_machine()); 787 #endif 788 static bool initialized; 789 int i; 790 791 /* Implied rules only need to be initialized once. */ 792 if (initialized) { 793 return; 794 } 795 796 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 797 #ifndef CONFIG_USER_ONLY 798 rule->enabled = bitmap_new(ms->smp.cpus); 799 #endif 800 g_hash_table_insert(misa_ext_implied_rules, 801 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 802 } 803 804 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 805 #ifndef CONFIG_USER_ONLY 806 rule->enabled = bitmap_new(ms->smp.cpus); 807 #endif 808 g_hash_table_insert(multi_ext_implied_rules, 809 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 810 } 811 812 initialized = true; 813 } 814 815 static void cpu_enable_implied_rule(RISCVCPU *cpu, 816 RISCVCPUImpliedExtsRule *rule) 817 { 818 CPURISCVState *env = &cpu->env; 819 RISCVCPUImpliedExtsRule *ir; 820 bool enabled = false; 821 int i; 822 823 #ifndef CONFIG_USER_ONLY 824 enabled = test_bit(cpu->env.mhartid, rule->enabled); 825 #endif 826 827 if (!enabled) { 828 /* Enable the implied MISAs. */ 829 if (rule->implied_misa_exts) { 830 for (i = 0; misa_bits[i] != 0; i++) { 831 if (rule->implied_misa_exts & misa_bits[i]) { 832 /* 833 * If the user disabled the misa_bit do not re-enable it 834 * and do not apply any implied rules related to it. 835 */ 836 if (cpu_misa_ext_is_user_set(misa_bits[i]) && 837 !(env->misa_ext & misa_bits[i])) { 838 continue; 839 } 840 841 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); 842 ir = g_hash_table_lookup(misa_ext_implied_rules, 843 GUINT_TO_POINTER(misa_bits[i])); 844 845 if (ir) { 846 cpu_enable_implied_rule(cpu, ir); 847 } 848 } 849 } 850 } 851 852 /* Enable the implied extensions. */ 853 for (i = 0; 854 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) { 855 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true); 856 857 ir = g_hash_table_lookup(multi_ext_implied_rules, 858 GUINT_TO_POINTER( 859 rule->implied_multi_exts[i])); 860 861 if (ir) { 862 cpu_enable_implied_rule(cpu, ir); 863 } 864 } 865 866 #ifndef CONFIG_USER_ONLY 867 bitmap_set(rule->enabled, cpu->env.mhartid, 1); 868 #endif 869 } 870 } 871 872 /* Zc extension has special implied rules that need to be handled separately. */ 873 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) 874 { 875 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 876 CPURISCVState *env = &cpu->env; 877 878 if (cpu->cfg.ext_zce) { 879 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 880 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 881 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 882 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 883 884 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 885 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 886 } 887 } 888 889 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */ 890 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 891 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 892 893 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 894 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 895 } 896 897 if (riscv_has_ext(env, RVD)) { 898 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 899 } 900 } 901 } 902 903 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu) 904 { 905 RISCVCPUImpliedExtsRule *rule; 906 int i; 907 908 /* Enable the implied extensions for Zc. */ 909 cpu_enable_zc_implied_rules(cpu); 910 911 /* Enable the implied MISAs. */ 912 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 913 if (riscv_has_ext(&cpu->env, rule->ext)) { 914 cpu_enable_implied_rule(cpu, rule); 915 } 916 } 917 918 /* Enable the implied extensions. */ 919 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 920 if (isa_ext_is_enabled(cpu, rule->ext)) { 921 cpu_enable_implied_rule(cpu, rule); 922 } 923 } 924 } 925 926 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 927 { 928 CPURISCVState *env = &cpu->env; 929 Error *local_err = NULL; 930 931 riscv_cpu_init_implied_exts_rules(); 932 riscv_cpu_enable_implied_rules(cpu); 933 934 riscv_cpu_validate_misa_priv(env, &local_err); 935 if (local_err != NULL) { 936 error_propagate(errp, local_err); 937 return; 938 } 939 940 riscv_cpu_update_named_features(cpu); 941 riscv_cpu_validate_profiles(cpu); 942 943 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 944 /* 945 * Enhanced PMP should only be available 946 * on harts with PMP support 947 */ 948 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 949 return; 950 } 951 952 riscv_cpu_validate_set_extensions(cpu, &local_err); 953 if (local_err != NULL) { 954 error_propagate(errp, local_err); 955 return; 956 } 957 } 958 959 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) 960 { 961 GPtrArray *dynamic_decoders; 962 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size); 963 for (size_t i = 0; i < decoder_table_size; ++i) { 964 if (decoder_table[i].guard_func && 965 decoder_table[i].guard_func(&cpu->cfg)) { 966 g_ptr_array_add(dynamic_decoders, 967 (gpointer)decoder_table[i].riscv_cpu_decode_fn); 968 } 969 } 970 971 cpu->decoders = dynamic_decoders; 972 } 973 974 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 975 { 976 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 977 } 978 979 static bool riscv_cpu_is_generic(Object *cpu_obj) 980 { 981 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 982 } 983 984 /* 985 * We'll get here via the following path: 986 * 987 * riscv_cpu_realize() 988 * -> cpu_exec_realizefn() 989 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 990 */ 991 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 992 { 993 RISCVCPU *cpu = RISCV_CPU(cs); 994 995 if (!riscv_cpu_tcg_compatible(cpu)) { 996 g_autofree char *name = riscv_cpu_get_name(cpu); 997 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 998 name); 999 return false; 1000 } 1001 1002 #ifndef CONFIG_USER_ONLY 1003 CPURISCVState *env = &cpu->env; 1004 Error *local_err = NULL; 1005 1006 tcg_cflags_set(CPU(cs), CF_PCREL); 1007 1008 if (cpu->cfg.ext_sstc) { 1009 riscv_timer_init(cpu); 1010 } 1011 1012 if (cpu->cfg.pmu_mask) { 1013 riscv_pmu_init(cpu, &local_err); 1014 if (local_err != NULL) { 1015 error_propagate(errp, local_err); 1016 return false; 1017 } 1018 1019 if (cpu->cfg.ext_sscofpmf) { 1020 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1021 riscv_pmu_timer_cb, cpu); 1022 } 1023 } 1024 1025 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 1026 if (riscv_has_ext(env, RVH)) { 1027 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 1028 } 1029 #endif 1030 1031 return true; 1032 } 1033 1034 typedef struct RISCVCPUMisaExtConfig { 1035 target_ulong misa_bit; 1036 bool enabled; 1037 } RISCVCPUMisaExtConfig; 1038 1039 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1040 void *opaque, Error **errp) 1041 { 1042 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1043 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1044 RISCVCPU *cpu = RISCV_CPU(obj); 1045 CPURISCVState *env = &cpu->env; 1046 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1047 bool prev_val, value; 1048 1049 if (!visit_type_bool(v, name, &value, errp)) { 1050 return; 1051 } 1052 1053 cpu_misa_ext_add_user_opt(misa_bit, value); 1054 1055 prev_val = env->misa_ext & misa_bit; 1056 1057 if (value == prev_val) { 1058 return; 1059 } 1060 1061 if (value) { 1062 if (vendor_cpu) { 1063 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1064 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1065 cpuname); 1066 return; 1067 } 1068 1069 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 1070 /* 1071 * Note: the 'priv_spec' command line option, if present, 1072 * will take precedence over this priv_ver bump. 1073 */ 1074 env->priv_ver = PRIV_VERSION_1_12_0; 1075 } 1076 } 1077 1078 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 1079 } 1080 1081 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1082 void *opaque, Error **errp) 1083 { 1084 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1085 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1086 RISCVCPU *cpu = RISCV_CPU(obj); 1087 CPURISCVState *env = &cpu->env; 1088 bool value; 1089 1090 value = env->misa_ext & misa_bit; 1091 1092 visit_type_bool(v, name, &value, errp); 1093 } 1094 1095 #define MISA_CFG(_bit, _enabled) \ 1096 {.misa_bit = _bit, .enabled = _enabled} 1097 1098 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1099 MISA_CFG(RVA, true), 1100 MISA_CFG(RVC, true), 1101 MISA_CFG(RVD, true), 1102 MISA_CFG(RVF, true), 1103 MISA_CFG(RVI, true), 1104 MISA_CFG(RVE, false), 1105 MISA_CFG(RVM, true), 1106 MISA_CFG(RVS, true), 1107 MISA_CFG(RVU, true), 1108 MISA_CFG(RVH, true), 1109 MISA_CFG(RVJ, false), 1110 MISA_CFG(RVV, false), 1111 MISA_CFG(RVG, false), 1112 MISA_CFG(RVB, false), 1113 }; 1114 1115 /* 1116 * We do not support user choice tracking for MISA 1117 * extensions yet because, so far, we do not silently 1118 * change MISA bits during realize() (RVG enables MISA 1119 * bits but the user is warned about it). 1120 */ 1121 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1122 { 1123 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1124 int i; 1125 1126 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1127 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1128 int bit = misa_cfg->misa_bit; 1129 const char *name = riscv_get_misa_ext_name(bit); 1130 const char *desc = riscv_get_misa_ext_description(bit); 1131 1132 /* Check if KVM already created the property */ 1133 if (object_property_find(cpu_obj, name)) { 1134 continue; 1135 } 1136 1137 object_property_add(cpu_obj, name, "bool", 1138 cpu_get_misa_ext_cfg, 1139 cpu_set_misa_ext_cfg, 1140 NULL, (void *)misa_cfg); 1141 object_property_set_description(cpu_obj, name, desc); 1142 if (use_def_vals) { 1143 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1144 misa_cfg->enabled); 1145 } 1146 } 1147 } 1148 1149 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1150 void *opaque, Error **errp) 1151 { 1152 RISCVCPUProfile *profile = opaque; 1153 RISCVCPU *cpu = RISCV_CPU(obj); 1154 bool value; 1155 int i, ext_offset; 1156 1157 if (riscv_cpu_is_vendor(obj)) { 1158 error_setg(errp, "Profile %s is not available for vendor CPUs", 1159 profile->name); 1160 return; 1161 } 1162 1163 if (cpu->env.misa_mxl != MXL_RV64) { 1164 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1165 profile->name); 1166 return; 1167 } 1168 1169 if (!visit_type_bool(v, name, &value, errp)) { 1170 return; 1171 } 1172 1173 profile->user_set = true; 1174 profile->enabled = value; 1175 1176 if (profile->parent != NULL) { 1177 object_property_set_bool(obj, profile->parent->name, 1178 profile->enabled, NULL); 1179 } 1180 1181 if (profile->enabled) { 1182 cpu->env.priv_ver = profile->priv_spec; 1183 } 1184 1185 #ifndef CONFIG_USER_ONLY 1186 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1187 object_property_set_bool(obj, "mmu", true, NULL); 1188 const char *satp_prop = satp_mode_str(profile->satp_mode, 1189 riscv_cpu_is_32bit(cpu)); 1190 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1191 } 1192 #endif 1193 1194 for (i = 0; misa_bits[i] != 0; i++) { 1195 uint32_t bit = misa_bits[i]; 1196 1197 if (!(profile->misa_ext & bit)) { 1198 continue; 1199 } 1200 1201 if (bit == RVI && !profile->enabled) { 1202 /* 1203 * Disabling profiles will not disable the base 1204 * ISA RV64I. 1205 */ 1206 continue; 1207 } 1208 1209 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1210 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1211 } 1212 1213 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1214 ext_offset = profile->ext_offsets[i]; 1215 1216 if (profile->enabled) { 1217 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1218 riscv_cpu_enable_named_feat(cpu, ext_offset); 1219 } 1220 1221 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1222 } 1223 1224 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1225 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1226 } 1227 } 1228 1229 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1230 void *opaque, Error **errp) 1231 { 1232 RISCVCPUProfile *profile = opaque; 1233 bool value = profile->enabled; 1234 1235 visit_type_bool(v, name, &value, errp); 1236 } 1237 1238 static void riscv_cpu_add_profiles(Object *cpu_obj) 1239 { 1240 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1241 const RISCVCPUProfile *profile = riscv_profiles[i]; 1242 1243 object_property_add(cpu_obj, profile->name, "bool", 1244 cpu_get_profile, cpu_set_profile, 1245 NULL, (void *)profile); 1246 1247 /* 1248 * CPUs might enable a profile right from the start. 1249 * Enable its mandatory extensions right away in this 1250 * case. 1251 */ 1252 if (profile->enabled) { 1253 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1254 } 1255 } 1256 } 1257 1258 static bool cpu_ext_is_deprecated(const char *ext_name) 1259 { 1260 return isupper(ext_name[0]); 1261 } 1262 1263 /* 1264 * String will be allocated in the heap. Caller is responsible 1265 * for freeing it. 1266 */ 1267 static char *cpu_ext_to_lower(const char *ext_name) 1268 { 1269 char *ret = g_malloc0(strlen(ext_name) + 1); 1270 1271 strcpy(ret, ext_name); 1272 ret[0] = tolower(ret[0]); 1273 1274 return ret; 1275 } 1276 1277 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1278 void *opaque, Error **errp) 1279 { 1280 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1281 RISCVCPU *cpu = RISCV_CPU(obj); 1282 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1283 bool prev_val, value; 1284 1285 if (!visit_type_bool(v, name, &value, errp)) { 1286 return; 1287 } 1288 1289 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1290 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1291 1292 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1293 multi_ext_cfg->name, lower); 1294 } 1295 1296 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1297 1298 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1299 1300 if (value == prev_val) { 1301 return; 1302 } 1303 1304 if (value && vendor_cpu) { 1305 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1306 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1307 cpuname); 1308 return; 1309 } 1310 1311 if (value) { 1312 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1313 } 1314 1315 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1316 } 1317 1318 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1319 void *opaque, Error **errp) 1320 { 1321 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1322 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1323 1324 visit_type_bool(v, name, &value, errp); 1325 } 1326 1327 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1328 const RISCVCPUMultiExtConfig *multi_cfg) 1329 { 1330 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1331 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1332 1333 object_property_add(cpu_obj, multi_cfg->name, "bool", 1334 cpu_get_multi_ext_cfg, 1335 cpu_set_multi_ext_cfg, 1336 NULL, (void *)multi_cfg); 1337 1338 if (!generic_cpu || deprecated_ext) { 1339 return; 1340 } 1341 1342 /* 1343 * Set def val directly instead of using 1344 * object_property_set_bool() to save the set() 1345 * callback hash for user inputs. 1346 */ 1347 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1348 multi_cfg->enabled); 1349 } 1350 1351 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1352 const RISCVCPUMultiExtConfig *array) 1353 { 1354 const RISCVCPUMultiExtConfig *prop; 1355 1356 g_assert(array); 1357 1358 for (prop = array; prop && prop->name; prop++) { 1359 cpu_add_multi_ext_prop(obj, prop); 1360 } 1361 } 1362 1363 /* 1364 * Add CPU properties with user-facing flags. 1365 * 1366 * This will overwrite existing env->misa_ext values with the 1367 * defaults set via riscv_cpu_add_misa_properties(). 1368 */ 1369 static void riscv_cpu_add_user_properties(Object *obj) 1370 { 1371 #ifndef CONFIG_USER_ONLY 1372 riscv_add_satp_mode_properties(obj); 1373 #endif 1374 1375 riscv_cpu_add_misa_properties(obj); 1376 1377 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1378 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1379 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1380 1381 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1382 1383 riscv_cpu_add_profiles(obj); 1384 } 1385 1386 /* 1387 * The 'max' type CPU will have all possible ratified 1388 * non-vendor extensions enabled. 1389 */ 1390 static void riscv_init_max_cpu_extensions(Object *obj) 1391 { 1392 RISCVCPU *cpu = RISCV_CPU(obj); 1393 CPURISCVState *env = &cpu->env; 1394 const RISCVCPUMultiExtConfig *prop; 1395 1396 /* Enable RVG, RVJ and RVV that are disabled by default */ 1397 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV); 1398 1399 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1400 isa_ext_update_enabled(cpu, prop->offset, true); 1401 } 1402 1403 /* 1404 * Some extensions can't be added without backward compatibilty concerns. 1405 * Disable those, the user can still opt in to them on the command line. 1406 */ 1407 cpu->cfg.ext_svade = false; 1408 1409 /* set vector version */ 1410 env->vext_ver = VEXT_VERSION_1_00_0; 1411 1412 /* Zfinx is not compatible with F. Disable it */ 1413 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1414 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1415 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1416 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1417 1418 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1419 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1420 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1421 1422 if (env->misa_mxl != MXL_RV32) { 1423 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1424 } 1425 } 1426 1427 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1428 { 1429 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1430 } 1431 1432 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1433 { 1434 RISCVCPU *cpu = RISCV_CPU(cs); 1435 Object *obj = OBJECT(cpu); 1436 1437 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1438 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1439 1440 if (!misa_ext_implied_rules) { 1441 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1442 } 1443 1444 if (!multi_ext_implied_rules) { 1445 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1446 } 1447 1448 riscv_cpu_add_user_properties(obj); 1449 1450 if (riscv_cpu_has_max_extensions(obj)) { 1451 riscv_init_max_cpu_extensions(obj); 1452 } 1453 } 1454 1455 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1456 { 1457 /* 1458 * All cpus use the same set of operations. 1459 */ 1460 cc->tcg_ops = &riscv_tcg_ops; 1461 } 1462 1463 static void riscv_tcg_cpu_class_init(CPUClass *cc) 1464 { 1465 cc->init_accel_cpu = riscv_tcg_cpu_init_ops; 1466 } 1467 1468 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1469 { 1470 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1471 1472 acc->cpu_class_init = riscv_tcg_cpu_class_init; 1473 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1474 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1475 } 1476 1477 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1478 .name = ACCEL_CPU_NAME("tcg"), 1479 1480 .parent = TYPE_ACCEL_CPU, 1481 .class_init = riscv_tcg_cpu_accel_class_init, 1482 .abstract = true, 1483 }; 1484 1485 static void riscv_tcg_cpu_accel_register_types(void) 1486 { 1487 type_register_static(&riscv_tcg_cpu_accel_type_info); 1488 } 1489 type_init(riscv_tcg_cpu_accel_register_types); 1490