1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "exec/translation-block.h" 23 #include "tcg-cpu.h" 24 #include "cpu.h" 25 #include "internals.h" 26 #include "pmu.h" 27 #include "time_helper.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/accel.h" 31 #include "qemu/error-report.h" 32 #include "qemu/log.h" 33 #include "hw/core/accel-cpu.h" 34 #include "hw/core/tcg-cpu-ops.h" 35 #include "tcg/tcg.h" 36 #ifndef CONFIG_USER_ONLY 37 #include "hw/boards.h" 38 #endif 39 40 /* Hash that stores user set extensions */ 41 static GHashTable *multi_ext_user_opts; 42 static GHashTable *misa_ext_user_opts; 43 44 static GHashTable *multi_ext_implied_rules; 45 static GHashTable *misa_ext_implied_rules; 46 47 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 48 { 49 return g_hash_table_contains(multi_ext_user_opts, 50 GUINT_TO_POINTER(ext_offset)); 51 } 52 53 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 54 { 55 return g_hash_table_contains(misa_ext_user_opts, 56 GUINT_TO_POINTER(misa_bit)); 57 } 58 59 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 60 { 61 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 62 (gpointer)value); 63 } 64 65 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 66 { 67 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 68 (gpointer)value); 69 } 70 71 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 72 bool enabled) 73 { 74 CPURISCVState *env = &cpu->env; 75 76 if (enabled) { 77 env->misa_ext |= bit; 78 env->misa_ext_mask |= bit; 79 } else { 80 env->misa_ext &= ~bit; 81 env->misa_ext_mask &= ~bit; 82 } 83 } 84 85 static const char *cpu_priv_ver_to_str(int priv_ver) 86 { 87 const char *priv_spec_str = priv_spec_to_str(priv_ver); 88 89 g_assert(priv_spec_str); 90 91 return priv_spec_str; 92 } 93 94 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 95 const TranslationBlock *tb) 96 { 97 if (!(tb_cflags(tb) & CF_PCREL)) { 98 RISCVCPU *cpu = RISCV_CPU(cs); 99 CPURISCVState *env = &cpu->env; 100 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 101 102 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 103 104 if (xl == MXL_RV32) { 105 env->pc = (int32_t) tb->pc; 106 } else { 107 env->pc = tb->pc; 108 } 109 } 110 } 111 112 static void riscv_restore_state_to_opc(CPUState *cs, 113 const TranslationBlock *tb, 114 const uint64_t *data) 115 { 116 RISCVCPU *cpu = RISCV_CPU(cs); 117 CPURISCVState *env = &cpu->env; 118 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 119 target_ulong pc; 120 121 if (tb_cflags(tb) & CF_PCREL) { 122 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 123 } else { 124 pc = data[0]; 125 } 126 127 if (xl == MXL_RV32) { 128 env->pc = (int32_t)pc; 129 } else { 130 env->pc = pc; 131 } 132 env->bins = data[1]; 133 env->excp_uw2 = data[2]; 134 } 135 136 static const TCGCPUOps riscv_tcg_ops = { 137 .initialize = riscv_translate_init, 138 .translate_code = riscv_translate_code, 139 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 140 .restore_state_to_opc = riscv_restore_state_to_opc, 141 142 #ifndef CONFIG_USER_ONLY 143 .tlb_fill = riscv_cpu_tlb_fill, 144 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 145 .cpu_exec_halt = riscv_cpu_has_work, 146 .do_interrupt = riscv_cpu_do_interrupt, 147 .do_transaction_failed = riscv_cpu_do_transaction_failed, 148 .do_unaligned_access = riscv_cpu_do_unaligned_access, 149 .debug_excp_handler = riscv_cpu_debug_excp_handler, 150 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 151 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 152 #endif /* !CONFIG_USER_ONLY */ 153 }; 154 155 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 156 { 157 const RISCVIsaExtData *edata; 158 159 for (edata = isa_edata_arr; edata && edata->name; edata++) { 160 if (edata->ext_enable_offset != ext_offset) { 161 continue; 162 } 163 164 return edata->min_version; 165 } 166 167 g_assert_not_reached(); 168 } 169 170 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 171 { 172 const RISCVCPUMultiExtConfig *feat; 173 const RISCVIsaExtData *edata; 174 175 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 176 if (edata->ext_enable_offset == ext_offset) { 177 return edata->name; 178 } 179 } 180 181 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 182 if (feat->offset == ext_offset) { 183 return feat->name; 184 } 185 } 186 187 g_assert_not_reached(); 188 } 189 190 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 191 { 192 const RISCVCPUMultiExtConfig *feat; 193 194 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 195 if (feat->offset == ext_offset) { 196 return true; 197 } 198 } 199 200 return false; 201 } 202 203 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 204 { 205 /* 206 * All other named features are already enabled 207 * in riscv_tcg_cpu_instance_init(). 208 */ 209 switch (feat_offset) { 210 case CPU_CFG_OFFSET(ext_zic64b): 211 cpu->cfg.cbom_blocksize = 64; 212 cpu->cfg.cbop_blocksize = 64; 213 cpu->cfg.cboz_blocksize = 64; 214 break; 215 case CPU_CFG_OFFSET(ext_ssstateen): 216 cpu->cfg.ext_smstateen = true; 217 break; 218 } 219 } 220 221 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 222 uint32_t ext_offset) 223 { 224 int ext_priv_ver; 225 226 if (env->priv_ver == PRIV_VERSION_LATEST) { 227 return; 228 } 229 230 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 231 232 if (env->priv_ver < ext_priv_ver) { 233 /* 234 * Note: the 'priv_spec' command line option, if present, 235 * will take precedence over this priv_ver bump. 236 */ 237 env->priv_ver = ext_priv_ver; 238 } 239 } 240 241 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 242 bool value) 243 { 244 CPURISCVState *env = &cpu->env; 245 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 246 int min_version; 247 248 if (prev_val == value) { 249 return; 250 } 251 252 if (cpu_cfg_ext_is_user_set(ext_offset)) { 253 return; 254 } 255 256 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 257 /* Do not enable it if priv_ver is older than min_version */ 258 min_version = cpu_cfg_ext_get_min_version(ext_offset); 259 if (env->priv_ver < min_version) { 260 return; 261 } 262 } 263 264 isa_ext_update_enabled(cpu, ext_offset, value); 265 } 266 267 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 268 { 269 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 270 error_setg(errp, "H extension requires priv spec 1.12.0"); 271 return; 272 } 273 } 274 275 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 276 Error **errp) 277 { 278 uint32_t vlen = cfg->vlenb << 3; 279 280 if (vlen > RV_VLEN_MAX || vlen < 128) { 281 error_setg(errp, 282 "Vector extension implementation only supports VLEN " 283 "in the range [128, %d]", RV_VLEN_MAX); 284 return; 285 } 286 287 if (cfg->elen > 64 || cfg->elen < 8) { 288 error_setg(errp, 289 "Vector extension implementation only supports ELEN " 290 "in the range [8, 64]"); 291 return; 292 } 293 } 294 295 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 296 { 297 CPURISCVState *env = &cpu->env; 298 const RISCVIsaExtData *edata; 299 300 /* Force disable extensions if priv spec version does not match */ 301 for (edata = isa_edata_arr; edata && edata->name; edata++) { 302 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 303 (env->priv_ver < edata->min_version)) { 304 /* 305 * These two extensions are always enabled as they were supported 306 * by QEMU before they were added as extensions in the ISA. 307 */ 308 if (!strcmp(edata->name, "zicntr") || 309 !strcmp(edata->name, "zihpm")) { 310 continue; 311 } 312 313 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 314 315 /* 316 * Do not show user warnings for named features that users 317 * can't enable/disable in the command line. See commit 318 * 68c9e54bea for more info. 319 */ 320 if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) { 321 continue; 322 } 323 #ifndef CONFIG_USER_ONLY 324 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 325 " because privilege spec version does not match", 326 edata->name, env->mhartid); 327 #else 328 warn_report("disabling %s extension because " 329 "privilege spec version does not match", 330 edata->name); 331 #endif 332 } 333 } 334 } 335 336 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 337 { 338 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { 339 cpu->cfg.has_priv_1_11 = true; 340 } 341 342 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { 343 cpu->cfg.has_priv_1_12 = true; 344 } 345 346 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) { 347 cpu->cfg.has_priv_1_13 = true; 348 } 349 350 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && 351 cpu->cfg.cbop_blocksize == 64 && 352 cpu->cfg.cboz_blocksize == 64; 353 354 cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen; 355 } 356 357 static void riscv_cpu_validate_g(RISCVCPU *cpu) 358 { 359 const char *warn_msg = "RVG mandates disabled extension %s"; 360 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 361 bool send_warn = cpu_misa_ext_is_user_set(RVG); 362 363 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 364 uint32_t bit = g_misa_bits[i]; 365 366 if (riscv_has_ext(&cpu->env, bit)) { 367 continue; 368 } 369 370 if (!cpu_misa_ext_is_user_set(bit)) { 371 riscv_cpu_write_misa_bit(cpu, bit, true); 372 continue; 373 } 374 375 if (send_warn) { 376 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 377 } 378 } 379 380 if (!cpu->cfg.ext_zicsr) { 381 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 382 cpu->cfg.ext_zicsr = true; 383 } else if (send_warn) { 384 warn_report(warn_msg, "zicsr"); 385 } 386 } 387 388 if (!cpu->cfg.ext_zifencei) { 389 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 390 cpu->cfg.ext_zifencei = true; 391 } else if (send_warn) { 392 warn_report(warn_msg, "zifencei"); 393 } 394 } 395 } 396 397 static void riscv_cpu_validate_b(RISCVCPU *cpu) 398 { 399 const char *warn_msg = "RVB mandates disabled extension %s"; 400 401 if (!cpu->cfg.ext_zba) { 402 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { 403 cpu->cfg.ext_zba = true; 404 } else { 405 warn_report(warn_msg, "zba"); 406 } 407 } 408 409 if (!cpu->cfg.ext_zbb) { 410 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { 411 cpu->cfg.ext_zbb = true; 412 } else { 413 warn_report(warn_msg, "zbb"); 414 } 415 } 416 417 if (!cpu->cfg.ext_zbs) { 418 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { 419 cpu->cfg.ext_zbs = true; 420 } else { 421 warn_report(warn_msg, "zbs"); 422 } 423 } 424 } 425 426 /* 427 * Check consistency between chosen extensions while setting 428 * cpu->cfg accordingly. 429 */ 430 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 431 { 432 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 433 CPURISCVState *env = &cpu->env; 434 Error *local_err = NULL; 435 436 if (riscv_has_ext(env, RVG)) { 437 riscv_cpu_validate_g(cpu); 438 } 439 440 if (riscv_has_ext(env, RVB)) { 441 riscv_cpu_validate_b(cpu); 442 } 443 444 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 445 error_setg(errp, 446 "I and E extensions are incompatible"); 447 return; 448 } 449 450 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 451 error_setg(errp, 452 "Either I or E extension must be set"); 453 return; 454 } 455 456 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 457 error_setg(errp, 458 "Setting S extension without U extension is illegal"); 459 return; 460 } 461 462 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 463 error_setg(errp, 464 "H depends on an I base integer ISA with 32 x registers"); 465 return; 466 } 467 468 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 469 error_setg(errp, "H extension implicitly requires S-mode"); 470 return; 471 } 472 473 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 474 error_setg(errp, "F extension requires Zicsr"); 475 return; 476 } 477 478 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 479 error_setg(errp, "Zacas extension requires A extension"); 480 return; 481 } 482 483 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 484 error_setg(errp, "Zawrs extension requires A extension"); 485 return; 486 } 487 488 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 489 error_setg(errp, "Zfa extension requires F extension"); 490 return; 491 } 492 493 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 494 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 495 return; 496 } 497 498 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 499 error_setg(errp, "Zfbfmin extension depends on F extension"); 500 return; 501 } 502 503 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 504 error_setg(errp, "D extension requires F extension"); 505 return; 506 } 507 508 if (riscv_has_ext(env, RVV)) { 509 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 510 if (local_err != NULL) { 511 error_propagate(errp, local_err); 512 return; 513 } 514 } 515 516 /* The Zve64d extension depends on the Zve64f extension */ 517 if (cpu->cfg.ext_zve64d) { 518 if (!riscv_has_ext(env, RVD)) { 519 error_setg(errp, "Zve64d/V extensions require D extension"); 520 return; 521 } 522 } 523 524 /* The Zve32f extension depends on the Zve32x extension */ 525 if (cpu->cfg.ext_zve32f) { 526 if (!riscv_has_ext(env, RVF)) { 527 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 528 return; 529 } 530 } 531 532 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 533 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 534 return; 535 } 536 537 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 538 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 539 return; 540 } 541 542 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 543 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 544 return; 545 } 546 547 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 548 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 549 return; 550 } 551 552 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 553 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 554 return; 555 } 556 557 if (cpu->cfg.ext_zfinx) { 558 if (!cpu->cfg.ext_zicsr) { 559 error_setg(errp, "Zfinx extension requires Zicsr"); 560 return; 561 } 562 if (riscv_has_ext(env, RVF)) { 563 error_setg(errp, 564 "Zfinx cannot be supported together with F extension"); 565 return; 566 } 567 } 568 569 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) { 570 error_setg(errp, "Zcmop extensions require Zca"); 571 return; 572 } 573 574 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 575 error_setg(errp, "Zcf extension is only relevant to RV32"); 576 return; 577 } 578 579 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 580 error_setg(errp, "Zcf extension requires F extension"); 581 return; 582 } 583 584 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 585 error_setg(errp, "Zcd extension requires D extension"); 586 return; 587 } 588 589 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 590 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 591 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 592 "extension"); 593 return; 594 } 595 596 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 597 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 598 "Zcd extension"); 599 return; 600 } 601 602 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 603 error_setg(errp, "Zcmt extension requires Zicsr extension"); 604 return; 605 } 606 607 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 608 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 609 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) { 610 error_setg(errp, 611 "Vector crypto extensions require V or Zve* extensions"); 612 return; 613 } 614 615 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { 616 error_setg( 617 errp, 618 "Zvbc and Zvknhb extensions require V or Zve64x extensions"); 619 return; 620 } 621 622 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 623 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 624 error_setg(errp, "zicntr requires zicsr"); 625 return; 626 } 627 cpu->cfg.ext_zicntr = false; 628 } 629 630 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 631 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 632 error_setg(errp, "zihpm requires zicsr"); 633 return; 634 } 635 cpu->cfg.ext_zihpm = false; 636 } 637 638 if (cpu->cfg.ext_zicfiss) { 639 if (!cpu->cfg.ext_zicsr) { 640 error_setg(errp, "zicfiss extension requires zicsr extension"); 641 return; 642 } 643 if (!riscv_has_ext(env, RVA)) { 644 error_setg(errp, "zicfiss extension requires A extension"); 645 return; 646 } 647 if (!riscv_has_ext(env, RVS)) { 648 error_setg(errp, "zicfiss extension requires S"); 649 return; 650 } 651 if (!cpu->cfg.ext_zimop) { 652 error_setg(errp, "zicfiss extension requires zimop extension"); 653 return; 654 } 655 if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { 656 error_setg(errp, "zicfiss with zca requires zcmop extension"); 657 return; 658 } 659 } 660 661 if (!cpu->cfg.ext_zihpm) { 662 cpu->cfg.pmu_mask = 0; 663 cpu->pmu_avail_ctrs = 0; 664 } 665 666 if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { 667 error_setg(errp, "zicfilp extension requires zicsr extension"); 668 return; 669 } 670 671 if (mcc->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { 672 error_setg(errp, "svukte is not supported for RV32"); 673 return; 674 } 675 676 /* 677 * Disable isa extensions based on priv spec after we 678 * validated and set everything we need. 679 */ 680 riscv_cpu_disable_priv_spec_isa_exts(cpu); 681 } 682 683 #ifndef CONFIG_USER_ONLY 684 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 685 RISCVCPUProfile *profile, 686 bool send_warn) 687 { 688 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 689 690 if (profile->satp_mode > satp_max) { 691 if (send_warn) { 692 bool is_32bit = riscv_cpu_is_32bit(cpu); 693 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 694 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 695 696 warn_report("Profile %s requires satp mode %s, " 697 "but satp mode %s was set", profile->name, 698 req_satp, cur_satp); 699 } 700 701 return false; 702 } 703 704 return true; 705 } 706 #endif 707 708 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 709 RISCVCPUProfile *profile) 710 { 711 CPURISCVState *env = &cpu->env; 712 const char *warn_msg = "Profile %s mandates disabled extension %s"; 713 bool send_warn = profile->user_set && profile->enabled; 714 bool parent_enabled, profile_impl = true; 715 int i; 716 717 #ifndef CONFIG_USER_ONLY 718 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 719 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 720 send_warn); 721 } 722 #endif 723 724 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 725 profile->priv_spec != env->priv_ver) { 726 profile_impl = false; 727 728 if (send_warn) { 729 warn_report("Profile %s requires priv spec %s, " 730 "but priv ver %s was set", profile->name, 731 cpu_priv_ver_to_str(profile->priv_spec), 732 cpu_priv_ver_to_str(env->priv_ver)); 733 } 734 } 735 736 for (i = 0; misa_bits[i] != 0; i++) { 737 uint32_t bit = misa_bits[i]; 738 739 if (!(profile->misa_ext & bit)) { 740 continue; 741 } 742 743 if (!riscv_has_ext(&cpu->env, bit)) { 744 profile_impl = false; 745 746 if (send_warn) { 747 warn_report(warn_msg, profile->name, 748 riscv_get_misa_ext_name(bit)); 749 } 750 } 751 } 752 753 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 754 int ext_offset = profile->ext_offsets[i]; 755 756 if (!isa_ext_is_enabled(cpu, ext_offset)) { 757 profile_impl = false; 758 759 if (send_warn) { 760 warn_report(warn_msg, profile->name, 761 cpu_cfg_ext_get_name(ext_offset)); 762 } 763 } 764 } 765 766 profile->enabled = profile_impl; 767 768 if (profile->parent != NULL) { 769 parent_enabled = object_property_get_bool(OBJECT(cpu), 770 profile->parent->name, 771 NULL); 772 profile->enabled = profile->enabled && parent_enabled; 773 } 774 } 775 776 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 777 { 778 for (int i = 0; riscv_profiles[i] != NULL; i++) { 779 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 780 } 781 } 782 783 static void riscv_cpu_init_implied_exts_rules(void) 784 { 785 RISCVCPUImpliedExtsRule *rule; 786 #ifndef CONFIG_USER_ONLY 787 MachineState *ms = MACHINE(qdev_get_machine()); 788 #endif 789 static bool initialized; 790 int i; 791 792 /* Implied rules only need to be initialized once. */ 793 if (initialized) { 794 return; 795 } 796 797 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 798 #ifndef CONFIG_USER_ONLY 799 rule->enabled = bitmap_new(ms->smp.cpus); 800 #endif 801 g_hash_table_insert(misa_ext_implied_rules, 802 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 803 } 804 805 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 806 #ifndef CONFIG_USER_ONLY 807 rule->enabled = bitmap_new(ms->smp.cpus); 808 #endif 809 g_hash_table_insert(multi_ext_implied_rules, 810 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 811 } 812 813 initialized = true; 814 } 815 816 static void cpu_enable_implied_rule(RISCVCPU *cpu, 817 RISCVCPUImpliedExtsRule *rule) 818 { 819 CPURISCVState *env = &cpu->env; 820 RISCVCPUImpliedExtsRule *ir; 821 bool enabled = false; 822 int i; 823 824 #ifndef CONFIG_USER_ONLY 825 enabled = test_bit(cpu->env.mhartid, rule->enabled); 826 #endif 827 828 if (!enabled) { 829 /* Enable the implied MISAs. */ 830 if (rule->implied_misa_exts) { 831 for (i = 0; misa_bits[i] != 0; i++) { 832 if (rule->implied_misa_exts & misa_bits[i]) { 833 /* 834 * If the user disabled the misa_bit do not re-enable it 835 * and do not apply any implied rules related to it. 836 */ 837 if (cpu_misa_ext_is_user_set(misa_bits[i]) && 838 !(env->misa_ext & misa_bits[i])) { 839 continue; 840 } 841 842 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); 843 ir = g_hash_table_lookup(misa_ext_implied_rules, 844 GUINT_TO_POINTER(misa_bits[i])); 845 846 if (ir) { 847 cpu_enable_implied_rule(cpu, ir); 848 } 849 } 850 } 851 } 852 853 /* Enable the implied extensions. */ 854 for (i = 0; 855 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) { 856 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true); 857 858 ir = g_hash_table_lookup(multi_ext_implied_rules, 859 GUINT_TO_POINTER( 860 rule->implied_multi_exts[i])); 861 862 if (ir) { 863 cpu_enable_implied_rule(cpu, ir); 864 } 865 } 866 867 #ifndef CONFIG_USER_ONLY 868 bitmap_set(rule->enabled, cpu->env.mhartid, 1); 869 #endif 870 } 871 } 872 873 /* Zc extension has special implied rules that need to be handled separately. */ 874 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) 875 { 876 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 877 CPURISCVState *env = &cpu->env; 878 879 if (cpu->cfg.ext_zce) { 880 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 881 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 882 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 883 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 884 885 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 886 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 887 } 888 } 889 890 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */ 891 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 892 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 893 894 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 895 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 896 } 897 898 if (riscv_has_ext(env, RVD)) { 899 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 900 } 901 } 902 } 903 904 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu) 905 { 906 RISCVCPUImpliedExtsRule *rule; 907 int i; 908 909 /* Enable the implied extensions for Zc. */ 910 cpu_enable_zc_implied_rules(cpu); 911 912 /* Enable the implied MISAs. */ 913 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 914 if (riscv_has_ext(&cpu->env, rule->ext)) { 915 cpu_enable_implied_rule(cpu, rule); 916 } 917 } 918 919 /* Enable the implied extensions. */ 920 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 921 if (isa_ext_is_enabled(cpu, rule->ext)) { 922 cpu_enable_implied_rule(cpu, rule); 923 } 924 } 925 } 926 927 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 928 { 929 CPURISCVState *env = &cpu->env; 930 Error *local_err = NULL; 931 932 riscv_cpu_init_implied_exts_rules(); 933 riscv_cpu_enable_implied_rules(cpu); 934 935 riscv_cpu_validate_misa_priv(env, &local_err); 936 if (local_err != NULL) { 937 error_propagate(errp, local_err); 938 return; 939 } 940 941 riscv_cpu_update_named_features(cpu); 942 riscv_cpu_validate_profiles(cpu); 943 944 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 945 /* 946 * Enhanced PMP should only be available 947 * on harts with PMP support 948 */ 949 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 950 return; 951 } 952 953 riscv_cpu_validate_set_extensions(cpu, &local_err); 954 if (local_err != NULL) { 955 error_propagate(errp, local_err); 956 return; 957 } 958 } 959 960 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) 961 { 962 GPtrArray *dynamic_decoders; 963 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size); 964 for (size_t i = 0; i < decoder_table_size; ++i) { 965 if (decoder_table[i].guard_func && 966 decoder_table[i].guard_func(&cpu->cfg)) { 967 g_ptr_array_add(dynamic_decoders, 968 (gpointer)decoder_table[i].riscv_cpu_decode_fn); 969 } 970 } 971 972 cpu->decoders = dynamic_decoders; 973 } 974 975 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 976 { 977 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 978 } 979 980 static bool riscv_cpu_is_generic(Object *cpu_obj) 981 { 982 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 983 } 984 985 /* 986 * We'll get here via the following path: 987 * 988 * riscv_cpu_realize() 989 * -> cpu_exec_realizefn() 990 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 991 */ 992 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 993 { 994 RISCVCPU *cpu = RISCV_CPU(cs); 995 996 if (!riscv_cpu_tcg_compatible(cpu)) { 997 g_autofree char *name = riscv_cpu_get_name(cpu); 998 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 999 name); 1000 return false; 1001 } 1002 1003 #ifndef CONFIG_USER_ONLY 1004 CPURISCVState *env = &cpu->env; 1005 Error *local_err = NULL; 1006 1007 tcg_cflags_set(CPU(cs), CF_PCREL); 1008 1009 if (cpu->cfg.ext_sstc) { 1010 riscv_timer_init(cpu); 1011 } 1012 1013 if (cpu->cfg.pmu_mask) { 1014 riscv_pmu_init(cpu, &local_err); 1015 if (local_err != NULL) { 1016 error_propagate(errp, local_err); 1017 return false; 1018 } 1019 1020 if (cpu->cfg.ext_sscofpmf) { 1021 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1022 riscv_pmu_timer_cb, cpu); 1023 } 1024 } 1025 1026 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 1027 if (riscv_has_ext(env, RVH)) { 1028 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 1029 } 1030 #endif 1031 1032 return true; 1033 } 1034 1035 typedef struct RISCVCPUMisaExtConfig { 1036 target_ulong misa_bit; 1037 bool enabled; 1038 } RISCVCPUMisaExtConfig; 1039 1040 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1041 void *opaque, Error **errp) 1042 { 1043 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1044 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1045 RISCVCPU *cpu = RISCV_CPU(obj); 1046 CPURISCVState *env = &cpu->env; 1047 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1048 bool prev_val, value; 1049 1050 if (!visit_type_bool(v, name, &value, errp)) { 1051 return; 1052 } 1053 1054 cpu_misa_ext_add_user_opt(misa_bit, value); 1055 1056 prev_val = env->misa_ext & misa_bit; 1057 1058 if (value == prev_val) { 1059 return; 1060 } 1061 1062 if (value) { 1063 if (vendor_cpu) { 1064 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1065 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1066 cpuname); 1067 return; 1068 } 1069 1070 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 1071 /* 1072 * Note: the 'priv_spec' command line option, if present, 1073 * will take precedence over this priv_ver bump. 1074 */ 1075 env->priv_ver = PRIV_VERSION_1_12_0; 1076 } 1077 } 1078 1079 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 1080 } 1081 1082 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1083 void *opaque, Error **errp) 1084 { 1085 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1086 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1087 RISCVCPU *cpu = RISCV_CPU(obj); 1088 CPURISCVState *env = &cpu->env; 1089 bool value; 1090 1091 value = env->misa_ext & misa_bit; 1092 1093 visit_type_bool(v, name, &value, errp); 1094 } 1095 1096 #define MISA_CFG(_bit, _enabled) \ 1097 {.misa_bit = _bit, .enabled = _enabled} 1098 1099 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1100 MISA_CFG(RVA, true), 1101 MISA_CFG(RVC, true), 1102 MISA_CFG(RVD, true), 1103 MISA_CFG(RVF, true), 1104 MISA_CFG(RVI, true), 1105 MISA_CFG(RVE, false), 1106 MISA_CFG(RVM, true), 1107 MISA_CFG(RVS, true), 1108 MISA_CFG(RVU, true), 1109 MISA_CFG(RVH, true), 1110 MISA_CFG(RVJ, false), 1111 MISA_CFG(RVV, false), 1112 MISA_CFG(RVG, false), 1113 MISA_CFG(RVB, false), 1114 }; 1115 1116 /* 1117 * We do not support user choice tracking for MISA 1118 * extensions yet because, so far, we do not silently 1119 * change MISA bits during realize() (RVG enables MISA 1120 * bits but the user is warned about it). 1121 */ 1122 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1123 { 1124 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1125 int i; 1126 1127 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1128 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1129 int bit = misa_cfg->misa_bit; 1130 const char *name = riscv_get_misa_ext_name(bit); 1131 const char *desc = riscv_get_misa_ext_description(bit); 1132 1133 /* Check if KVM already created the property */ 1134 if (object_property_find(cpu_obj, name)) { 1135 continue; 1136 } 1137 1138 object_property_add(cpu_obj, name, "bool", 1139 cpu_get_misa_ext_cfg, 1140 cpu_set_misa_ext_cfg, 1141 NULL, (void *)misa_cfg); 1142 object_property_set_description(cpu_obj, name, desc); 1143 if (use_def_vals) { 1144 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1145 misa_cfg->enabled); 1146 } 1147 } 1148 } 1149 1150 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1151 void *opaque, Error **errp) 1152 { 1153 RISCVCPUProfile *profile = opaque; 1154 RISCVCPU *cpu = RISCV_CPU(obj); 1155 bool value; 1156 int i, ext_offset; 1157 1158 if (riscv_cpu_is_vendor(obj)) { 1159 error_setg(errp, "Profile %s is not available for vendor CPUs", 1160 profile->name); 1161 return; 1162 } 1163 1164 if (cpu->env.misa_mxl != MXL_RV64) { 1165 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1166 profile->name); 1167 return; 1168 } 1169 1170 if (!visit_type_bool(v, name, &value, errp)) { 1171 return; 1172 } 1173 1174 profile->user_set = true; 1175 profile->enabled = value; 1176 1177 if (profile->parent != NULL) { 1178 object_property_set_bool(obj, profile->parent->name, 1179 profile->enabled, NULL); 1180 } 1181 1182 if (profile->enabled) { 1183 cpu->env.priv_ver = profile->priv_spec; 1184 } 1185 1186 #ifndef CONFIG_USER_ONLY 1187 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1188 object_property_set_bool(obj, "mmu", true, NULL); 1189 const char *satp_prop = satp_mode_str(profile->satp_mode, 1190 riscv_cpu_is_32bit(cpu)); 1191 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1192 } 1193 #endif 1194 1195 for (i = 0; misa_bits[i] != 0; i++) { 1196 uint32_t bit = misa_bits[i]; 1197 1198 if (!(profile->misa_ext & bit)) { 1199 continue; 1200 } 1201 1202 if (bit == RVI && !profile->enabled) { 1203 /* 1204 * Disabling profiles will not disable the base 1205 * ISA RV64I. 1206 */ 1207 continue; 1208 } 1209 1210 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1211 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1212 } 1213 1214 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1215 ext_offset = profile->ext_offsets[i]; 1216 1217 if (profile->enabled) { 1218 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1219 riscv_cpu_enable_named_feat(cpu, ext_offset); 1220 } 1221 1222 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1223 } 1224 1225 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1226 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1227 } 1228 } 1229 1230 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1231 void *opaque, Error **errp) 1232 { 1233 RISCVCPUProfile *profile = opaque; 1234 bool value = profile->enabled; 1235 1236 visit_type_bool(v, name, &value, errp); 1237 } 1238 1239 static void riscv_cpu_add_profiles(Object *cpu_obj) 1240 { 1241 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1242 const RISCVCPUProfile *profile = riscv_profiles[i]; 1243 1244 object_property_add(cpu_obj, profile->name, "bool", 1245 cpu_get_profile, cpu_set_profile, 1246 NULL, (void *)profile); 1247 1248 /* 1249 * CPUs might enable a profile right from the start. 1250 * Enable its mandatory extensions right away in this 1251 * case. 1252 */ 1253 if (profile->enabled) { 1254 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1255 } 1256 } 1257 } 1258 1259 static bool cpu_ext_is_deprecated(const char *ext_name) 1260 { 1261 return isupper(ext_name[0]); 1262 } 1263 1264 /* 1265 * String will be allocated in the heap. Caller is responsible 1266 * for freeing it. 1267 */ 1268 static char *cpu_ext_to_lower(const char *ext_name) 1269 { 1270 char *ret = g_malloc0(strlen(ext_name) + 1); 1271 1272 strcpy(ret, ext_name); 1273 ret[0] = tolower(ret[0]); 1274 1275 return ret; 1276 } 1277 1278 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1279 void *opaque, Error **errp) 1280 { 1281 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1282 RISCVCPU *cpu = RISCV_CPU(obj); 1283 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1284 bool prev_val, value; 1285 1286 if (!visit_type_bool(v, name, &value, errp)) { 1287 return; 1288 } 1289 1290 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1291 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1292 1293 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1294 multi_ext_cfg->name, lower); 1295 } 1296 1297 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1298 1299 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1300 1301 if (value == prev_val) { 1302 return; 1303 } 1304 1305 if (value && vendor_cpu) { 1306 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1307 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1308 cpuname); 1309 return; 1310 } 1311 1312 if (value) { 1313 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1314 } 1315 1316 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1317 } 1318 1319 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1320 void *opaque, Error **errp) 1321 { 1322 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1323 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1324 1325 visit_type_bool(v, name, &value, errp); 1326 } 1327 1328 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1329 const RISCVCPUMultiExtConfig *multi_cfg) 1330 { 1331 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1332 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1333 1334 object_property_add(cpu_obj, multi_cfg->name, "bool", 1335 cpu_get_multi_ext_cfg, 1336 cpu_set_multi_ext_cfg, 1337 NULL, (void *)multi_cfg); 1338 1339 if (!generic_cpu || deprecated_ext) { 1340 return; 1341 } 1342 1343 /* 1344 * Set def val directly instead of using 1345 * object_property_set_bool() to save the set() 1346 * callback hash for user inputs. 1347 */ 1348 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1349 multi_cfg->enabled); 1350 } 1351 1352 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1353 const RISCVCPUMultiExtConfig *array) 1354 { 1355 const RISCVCPUMultiExtConfig *prop; 1356 1357 g_assert(array); 1358 1359 for (prop = array; prop && prop->name; prop++) { 1360 cpu_add_multi_ext_prop(obj, prop); 1361 } 1362 } 1363 1364 /* 1365 * Add CPU properties with user-facing flags. 1366 * 1367 * This will overwrite existing env->misa_ext values with the 1368 * defaults set via riscv_cpu_add_misa_properties(). 1369 */ 1370 static void riscv_cpu_add_user_properties(Object *obj) 1371 { 1372 #ifndef CONFIG_USER_ONLY 1373 riscv_add_satp_mode_properties(obj); 1374 #endif 1375 1376 riscv_cpu_add_misa_properties(obj); 1377 1378 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1379 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1380 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1381 1382 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1383 1384 riscv_cpu_add_profiles(obj); 1385 } 1386 1387 /* 1388 * The 'max' type CPU will have all possible ratified 1389 * non-vendor extensions enabled. 1390 */ 1391 static void riscv_init_max_cpu_extensions(Object *obj) 1392 { 1393 RISCVCPU *cpu = RISCV_CPU(obj); 1394 CPURISCVState *env = &cpu->env; 1395 const RISCVCPUMultiExtConfig *prop; 1396 1397 /* Enable RVG, RVJ and RVV that are disabled by default */ 1398 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV); 1399 1400 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1401 isa_ext_update_enabled(cpu, prop->offset, true); 1402 } 1403 1404 /* 1405 * Some extensions can't be added without backward compatibilty concerns. 1406 * Disable those, the user can still opt in to them on the command line. 1407 */ 1408 cpu->cfg.ext_svade = false; 1409 1410 /* set vector version */ 1411 env->vext_ver = VEXT_VERSION_1_00_0; 1412 1413 /* Zfinx is not compatible with F. Disable it */ 1414 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1415 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1416 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1417 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1418 1419 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1420 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1421 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1422 1423 if (env->misa_mxl != MXL_RV32) { 1424 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1425 } 1426 } 1427 1428 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1429 { 1430 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1431 } 1432 1433 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1434 { 1435 RISCVCPU *cpu = RISCV_CPU(cs); 1436 Object *obj = OBJECT(cpu); 1437 1438 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1439 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1440 1441 if (!misa_ext_implied_rules) { 1442 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1443 } 1444 1445 if (!multi_ext_implied_rules) { 1446 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1447 } 1448 1449 riscv_cpu_add_user_properties(obj); 1450 1451 if (riscv_cpu_has_max_extensions(obj)) { 1452 riscv_init_max_cpu_extensions(obj); 1453 } 1454 } 1455 1456 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1457 { 1458 /* 1459 * All cpus use the same set of operations. 1460 */ 1461 cc->tcg_ops = &riscv_tcg_ops; 1462 } 1463 1464 static void riscv_tcg_cpu_class_init(CPUClass *cc) 1465 { 1466 cc->init_accel_cpu = riscv_tcg_cpu_init_ops; 1467 } 1468 1469 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1470 { 1471 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1472 1473 acc->cpu_class_init = riscv_tcg_cpu_class_init; 1474 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1475 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1476 } 1477 1478 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1479 .name = ACCEL_CPU_NAME("tcg"), 1480 1481 .parent = TYPE_ACCEL_CPU, 1482 .class_init = riscv_tcg_cpu_accel_class_init, 1483 .abstract = true, 1484 }; 1485 1486 static void riscv_tcg_cpu_accel_register_types(void) 1487 { 1488 type_register_static(&riscv_tcg_cpu_accel_type_info); 1489 } 1490 type_init(riscv_tcg_cpu_accel_register_types); 1491