1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "exec/translation-block.h" 23 #include "tcg-cpu.h" 24 #include "cpu.h" 25 #include "internals.h" 26 #include "pmu.h" 27 #include "time_helper.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/accel.h" 31 #include "qemu/error-report.h" 32 #include "qemu/log.h" 33 #include "hw/core/accel-cpu.h" 34 #include "hw/core/tcg-cpu-ops.h" 35 #include "tcg/tcg.h" 36 #ifndef CONFIG_USER_ONLY 37 #include "hw/boards.h" 38 #endif 39 40 /* Hash that stores user set extensions */ 41 static GHashTable *multi_ext_user_opts; 42 static GHashTable *misa_ext_user_opts; 43 44 static GHashTable *multi_ext_implied_rules; 45 static GHashTable *misa_ext_implied_rules; 46 47 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 48 { 49 return g_hash_table_contains(multi_ext_user_opts, 50 GUINT_TO_POINTER(ext_offset)); 51 } 52 53 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 54 { 55 return g_hash_table_contains(misa_ext_user_opts, 56 GUINT_TO_POINTER(misa_bit)); 57 } 58 59 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 60 { 61 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 62 (gpointer)value); 63 } 64 65 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 66 { 67 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 68 (gpointer)value); 69 } 70 71 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 72 bool enabled) 73 { 74 CPURISCVState *env = &cpu->env; 75 76 if (enabled) { 77 env->misa_ext |= bit; 78 env->misa_ext_mask |= bit; 79 } else { 80 env->misa_ext &= ~bit; 81 env->misa_ext_mask &= ~bit; 82 } 83 } 84 85 static const char *cpu_priv_ver_to_str(int priv_ver) 86 { 87 const char *priv_spec_str = priv_spec_to_str(priv_ver); 88 89 g_assert(priv_spec_str); 90 91 return priv_spec_str; 92 } 93 94 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 95 const TranslationBlock *tb) 96 { 97 if (!(tb_cflags(tb) & CF_PCREL)) { 98 RISCVCPU *cpu = RISCV_CPU(cs); 99 CPURISCVState *env = &cpu->env; 100 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 101 102 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 103 104 if (xl == MXL_RV32) { 105 env->pc = (int32_t) tb->pc; 106 } else { 107 env->pc = tb->pc; 108 } 109 } 110 } 111 112 static void riscv_restore_state_to_opc(CPUState *cs, 113 const TranslationBlock *tb, 114 const uint64_t *data) 115 { 116 RISCVCPU *cpu = RISCV_CPU(cs); 117 CPURISCVState *env = &cpu->env; 118 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 119 target_ulong pc; 120 121 if (tb_cflags(tb) & CF_PCREL) { 122 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 123 } else { 124 pc = data[0]; 125 } 126 127 if (xl == MXL_RV32) { 128 env->pc = (int32_t)pc; 129 } else { 130 env->pc = pc; 131 } 132 env->bins = data[1]; 133 env->excp_uw2 = data[2]; 134 } 135 136 static const TCGCPUOps riscv_tcg_ops = { 137 .initialize = riscv_translate_init, 138 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 139 .restore_state_to_opc = riscv_restore_state_to_opc, 140 141 #ifndef CONFIG_USER_ONLY 142 .tlb_fill = riscv_cpu_tlb_fill, 143 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 144 .cpu_exec_halt = riscv_cpu_has_work, 145 .do_interrupt = riscv_cpu_do_interrupt, 146 .do_transaction_failed = riscv_cpu_do_transaction_failed, 147 .do_unaligned_access = riscv_cpu_do_unaligned_access, 148 .debug_excp_handler = riscv_cpu_debug_excp_handler, 149 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 150 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 151 #endif /* !CONFIG_USER_ONLY */ 152 }; 153 154 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 155 { 156 const RISCVIsaExtData *edata; 157 158 for (edata = isa_edata_arr; edata && edata->name; edata++) { 159 if (edata->ext_enable_offset != ext_offset) { 160 continue; 161 } 162 163 return edata->min_version; 164 } 165 166 g_assert_not_reached(); 167 } 168 169 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 170 { 171 const RISCVCPUMultiExtConfig *feat; 172 const RISCVIsaExtData *edata; 173 174 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 175 if (edata->ext_enable_offset == ext_offset) { 176 return edata->name; 177 } 178 } 179 180 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 181 if (feat->offset == ext_offset) { 182 return feat->name; 183 } 184 } 185 186 g_assert_not_reached(); 187 } 188 189 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 190 { 191 const RISCVCPUMultiExtConfig *feat; 192 193 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 194 if (feat->offset == ext_offset) { 195 return true; 196 } 197 } 198 199 return false; 200 } 201 202 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 203 { 204 /* 205 * All other named features are already enabled 206 * in riscv_tcg_cpu_instance_init(). 207 */ 208 if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) { 209 cpu->cfg.cbom_blocksize = 64; 210 cpu->cfg.cbop_blocksize = 64; 211 cpu->cfg.cboz_blocksize = 64; 212 } 213 } 214 215 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 216 uint32_t ext_offset) 217 { 218 int ext_priv_ver; 219 220 if (env->priv_ver == PRIV_VERSION_LATEST) { 221 return; 222 } 223 224 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 225 226 if (env->priv_ver < ext_priv_ver) { 227 /* 228 * Note: the 'priv_spec' command line option, if present, 229 * will take precedence over this priv_ver bump. 230 */ 231 env->priv_ver = ext_priv_ver; 232 } 233 } 234 235 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 236 bool value) 237 { 238 CPURISCVState *env = &cpu->env; 239 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 240 int min_version; 241 242 if (prev_val == value) { 243 return; 244 } 245 246 if (cpu_cfg_ext_is_user_set(ext_offset)) { 247 return; 248 } 249 250 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 251 /* Do not enable it if priv_ver is older than min_version */ 252 min_version = cpu_cfg_ext_get_min_version(ext_offset); 253 if (env->priv_ver < min_version) { 254 return; 255 } 256 } 257 258 isa_ext_update_enabled(cpu, ext_offset, value); 259 } 260 261 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 262 { 263 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 264 error_setg(errp, "H extension requires priv spec 1.12.0"); 265 return; 266 } 267 } 268 269 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 270 Error **errp) 271 { 272 uint32_t vlen = cfg->vlenb << 3; 273 274 if (vlen > RV_VLEN_MAX || vlen < 128) { 275 error_setg(errp, 276 "Vector extension implementation only supports VLEN " 277 "in the range [128, %d]", RV_VLEN_MAX); 278 return; 279 } 280 281 if (cfg->elen > 64 || cfg->elen < 8) { 282 error_setg(errp, 283 "Vector extension implementation only supports ELEN " 284 "in the range [8, 64]"); 285 return; 286 } 287 } 288 289 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 290 { 291 CPURISCVState *env = &cpu->env; 292 const RISCVIsaExtData *edata; 293 294 /* Force disable extensions if priv spec version does not match */ 295 for (edata = isa_edata_arr; edata && edata->name; edata++) { 296 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 297 (env->priv_ver < edata->min_version)) { 298 /* 299 * These two extensions are always enabled as they were supported 300 * by QEMU before they were added as extensions in the ISA. 301 */ 302 if (!strcmp(edata->name, "zicntr") || 303 !strcmp(edata->name, "zihpm")) { 304 continue; 305 } 306 307 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 308 #ifndef CONFIG_USER_ONLY 309 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 310 " because privilege spec version does not match", 311 edata->name, env->mhartid); 312 #else 313 warn_report("disabling %s extension because " 314 "privilege spec version does not match", 315 edata->name); 316 #endif 317 } 318 } 319 } 320 321 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 322 { 323 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { 324 cpu->cfg.has_priv_1_11 = true; 325 } 326 327 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { 328 cpu->cfg.has_priv_1_12 = true; 329 } 330 331 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) { 332 cpu->cfg.has_priv_1_13 = true; 333 } 334 335 /* zic64b is 1.12 or later */ 336 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && 337 cpu->cfg.cbop_blocksize == 64 && 338 cpu->cfg.cboz_blocksize == 64 && 339 cpu->cfg.has_priv_1_12; 340 } 341 342 static void riscv_cpu_validate_g(RISCVCPU *cpu) 343 { 344 const char *warn_msg = "RVG mandates disabled extension %s"; 345 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 346 bool send_warn = cpu_misa_ext_is_user_set(RVG); 347 348 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 349 uint32_t bit = g_misa_bits[i]; 350 351 if (riscv_has_ext(&cpu->env, bit)) { 352 continue; 353 } 354 355 if (!cpu_misa_ext_is_user_set(bit)) { 356 riscv_cpu_write_misa_bit(cpu, bit, true); 357 continue; 358 } 359 360 if (send_warn) { 361 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 362 } 363 } 364 365 if (!cpu->cfg.ext_zicsr) { 366 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 367 cpu->cfg.ext_zicsr = true; 368 } else if (send_warn) { 369 warn_report(warn_msg, "zicsr"); 370 } 371 } 372 373 if (!cpu->cfg.ext_zifencei) { 374 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 375 cpu->cfg.ext_zifencei = true; 376 } else if (send_warn) { 377 warn_report(warn_msg, "zifencei"); 378 } 379 } 380 } 381 382 static void riscv_cpu_validate_b(RISCVCPU *cpu) 383 { 384 const char *warn_msg = "RVB mandates disabled extension %s"; 385 386 if (!cpu->cfg.ext_zba) { 387 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { 388 cpu->cfg.ext_zba = true; 389 } else { 390 warn_report(warn_msg, "zba"); 391 } 392 } 393 394 if (!cpu->cfg.ext_zbb) { 395 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { 396 cpu->cfg.ext_zbb = true; 397 } else { 398 warn_report(warn_msg, "zbb"); 399 } 400 } 401 402 if (!cpu->cfg.ext_zbs) { 403 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { 404 cpu->cfg.ext_zbs = true; 405 } else { 406 warn_report(warn_msg, "zbs"); 407 } 408 } 409 } 410 411 /* 412 * Check consistency between chosen extensions while setting 413 * cpu->cfg accordingly. 414 */ 415 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 416 { 417 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 418 CPURISCVState *env = &cpu->env; 419 Error *local_err = NULL; 420 421 if (riscv_has_ext(env, RVG)) { 422 riscv_cpu_validate_g(cpu); 423 } 424 425 if (riscv_has_ext(env, RVB)) { 426 riscv_cpu_validate_b(cpu); 427 } 428 429 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 430 error_setg(errp, 431 "I and E extensions are incompatible"); 432 return; 433 } 434 435 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 436 error_setg(errp, 437 "Either I or E extension must be set"); 438 return; 439 } 440 441 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 442 error_setg(errp, 443 "Setting S extension without U extension is illegal"); 444 return; 445 } 446 447 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 448 error_setg(errp, 449 "H depends on an I base integer ISA with 32 x registers"); 450 return; 451 } 452 453 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 454 error_setg(errp, "H extension implicitly requires S-mode"); 455 return; 456 } 457 458 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 459 error_setg(errp, "F extension requires Zicsr"); 460 return; 461 } 462 463 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 464 error_setg(errp, "Zacas extension requires A extension"); 465 return; 466 } 467 468 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 469 error_setg(errp, "Zawrs extension requires A extension"); 470 return; 471 } 472 473 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 474 error_setg(errp, "Zfa extension requires F extension"); 475 return; 476 } 477 478 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 479 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 480 return; 481 } 482 483 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 484 error_setg(errp, "Zfbfmin extension depends on F extension"); 485 return; 486 } 487 488 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 489 error_setg(errp, "D extension requires F extension"); 490 return; 491 } 492 493 if (riscv_has_ext(env, RVV)) { 494 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 495 if (local_err != NULL) { 496 error_propagate(errp, local_err); 497 return; 498 } 499 } 500 501 /* The Zve64d extension depends on the Zve64f extension */ 502 if (cpu->cfg.ext_zve64d) { 503 if (!riscv_has_ext(env, RVD)) { 504 error_setg(errp, "Zve64d/V extensions require D extension"); 505 return; 506 } 507 } 508 509 /* The Zve32f extension depends on the Zve32x extension */ 510 if (cpu->cfg.ext_zve32f) { 511 if (!riscv_has_ext(env, RVF)) { 512 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 513 return; 514 } 515 } 516 517 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 518 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 519 return; 520 } 521 522 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 523 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 524 return; 525 } 526 527 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 528 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 529 return; 530 } 531 532 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 533 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 534 return; 535 } 536 537 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 538 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 539 return; 540 } 541 542 if (cpu->cfg.ext_zfinx) { 543 if (!cpu->cfg.ext_zicsr) { 544 error_setg(errp, "Zfinx extension requires Zicsr"); 545 return; 546 } 547 if (riscv_has_ext(env, RVF)) { 548 error_setg(errp, 549 "Zfinx cannot be supported together with F extension"); 550 return; 551 } 552 } 553 554 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) { 555 error_setg(errp, "Zcmop extensions require Zca"); 556 return; 557 } 558 559 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 560 error_setg(errp, "Zcf extension is only relevant to RV32"); 561 return; 562 } 563 564 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 565 error_setg(errp, "Zcf extension requires F extension"); 566 return; 567 } 568 569 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 570 error_setg(errp, "Zcd extension requires D extension"); 571 return; 572 } 573 574 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 575 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 576 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 577 "extension"); 578 return; 579 } 580 581 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 582 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 583 "Zcd extension"); 584 return; 585 } 586 587 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 588 error_setg(errp, "Zcmt extension requires Zicsr extension"); 589 return; 590 } 591 592 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 593 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 594 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) { 595 error_setg(errp, 596 "Vector crypto extensions require V or Zve* extensions"); 597 return; 598 } 599 600 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { 601 error_setg( 602 errp, 603 "Zvbc and Zvknhb extensions require V or Zve64x extensions"); 604 return; 605 } 606 607 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 608 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 609 error_setg(errp, "zicntr requires zicsr"); 610 return; 611 } 612 cpu->cfg.ext_zicntr = false; 613 } 614 615 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 616 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 617 error_setg(errp, "zihpm requires zicsr"); 618 return; 619 } 620 cpu->cfg.ext_zihpm = false; 621 } 622 623 if (cpu->cfg.ext_zicfiss) { 624 if (!cpu->cfg.ext_zicsr) { 625 error_setg(errp, "zicfiss extension requires zicsr extension"); 626 return; 627 } 628 if (!riscv_has_ext(env, RVA)) { 629 error_setg(errp, "zicfiss extension requires A extension"); 630 return; 631 } 632 if (!riscv_has_ext(env, RVS)) { 633 error_setg(errp, "zicfiss extension requires S"); 634 return; 635 } 636 if (!cpu->cfg.ext_zimop) { 637 error_setg(errp, "zicfiss extension requires zimop extension"); 638 return; 639 } 640 if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { 641 error_setg(errp, "zicfiss with zca requires zcmop extension"); 642 return; 643 } 644 } 645 646 if (!cpu->cfg.ext_zihpm) { 647 cpu->cfg.pmu_mask = 0; 648 cpu->pmu_avail_ctrs = 0; 649 } 650 651 if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { 652 error_setg(errp, "zicfilp extension requires zicsr extension"); 653 return; 654 } 655 656 /* 657 * Disable isa extensions based on priv spec after we 658 * validated and set everything we need. 659 */ 660 riscv_cpu_disable_priv_spec_isa_exts(cpu); 661 } 662 663 #ifndef CONFIG_USER_ONLY 664 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 665 RISCVCPUProfile *profile, 666 bool send_warn) 667 { 668 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 669 670 if (profile->satp_mode > satp_max) { 671 if (send_warn) { 672 bool is_32bit = riscv_cpu_is_32bit(cpu); 673 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 674 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 675 676 warn_report("Profile %s requires satp mode %s, " 677 "but satp mode %s was set", profile->name, 678 req_satp, cur_satp); 679 } 680 681 return false; 682 } 683 684 return true; 685 } 686 #endif 687 688 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 689 RISCVCPUProfile *profile) 690 { 691 CPURISCVState *env = &cpu->env; 692 const char *warn_msg = "Profile %s mandates disabled extension %s"; 693 bool send_warn = profile->user_set && profile->enabled; 694 bool parent_enabled, profile_impl = true; 695 int i; 696 697 #ifndef CONFIG_USER_ONLY 698 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 699 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 700 send_warn); 701 } 702 #endif 703 704 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 705 profile->priv_spec != env->priv_ver) { 706 profile_impl = false; 707 708 if (send_warn) { 709 warn_report("Profile %s requires priv spec %s, " 710 "but priv ver %s was set", profile->name, 711 cpu_priv_ver_to_str(profile->priv_spec), 712 cpu_priv_ver_to_str(env->priv_ver)); 713 } 714 } 715 716 for (i = 0; misa_bits[i] != 0; i++) { 717 uint32_t bit = misa_bits[i]; 718 719 if (!(profile->misa_ext & bit)) { 720 continue; 721 } 722 723 if (!riscv_has_ext(&cpu->env, bit)) { 724 profile_impl = false; 725 726 if (send_warn) { 727 warn_report(warn_msg, profile->name, 728 riscv_get_misa_ext_name(bit)); 729 } 730 } 731 } 732 733 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 734 int ext_offset = profile->ext_offsets[i]; 735 736 if (!isa_ext_is_enabled(cpu, ext_offset)) { 737 profile_impl = false; 738 739 if (send_warn) { 740 warn_report(warn_msg, profile->name, 741 cpu_cfg_ext_get_name(ext_offset)); 742 } 743 } 744 } 745 746 profile->enabled = profile_impl; 747 748 if (profile->parent != NULL) { 749 parent_enabled = object_property_get_bool(OBJECT(cpu), 750 profile->parent->name, 751 NULL); 752 profile->enabled = profile->enabled && parent_enabled; 753 } 754 } 755 756 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 757 { 758 for (int i = 0; riscv_profiles[i] != NULL; i++) { 759 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 760 } 761 } 762 763 static void riscv_cpu_init_implied_exts_rules(void) 764 { 765 RISCVCPUImpliedExtsRule *rule; 766 #ifndef CONFIG_USER_ONLY 767 MachineState *ms = MACHINE(qdev_get_machine()); 768 #endif 769 static bool initialized; 770 int i; 771 772 /* Implied rules only need to be initialized once. */ 773 if (initialized) { 774 return; 775 } 776 777 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 778 #ifndef CONFIG_USER_ONLY 779 rule->enabled = bitmap_new(ms->smp.cpus); 780 #endif 781 g_hash_table_insert(misa_ext_implied_rules, 782 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 783 } 784 785 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 786 #ifndef CONFIG_USER_ONLY 787 rule->enabled = bitmap_new(ms->smp.cpus); 788 #endif 789 g_hash_table_insert(multi_ext_implied_rules, 790 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 791 } 792 793 initialized = true; 794 } 795 796 static void cpu_enable_implied_rule(RISCVCPU *cpu, 797 RISCVCPUImpliedExtsRule *rule) 798 { 799 CPURISCVState *env = &cpu->env; 800 RISCVCPUImpliedExtsRule *ir; 801 bool enabled = false; 802 int i; 803 804 #ifndef CONFIG_USER_ONLY 805 enabled = test_bit(cpu->env.mhartid, rule->enabled); 806 #endif 807 808 if (!enabled) { 809 /* Enable the implied MISAs. */ 810 if (rule->implied_misa_exts) { 811 for (i = 0; misa_bits[i] != 0; i++) { 812 if (rule->implied_misa_exts & misa_bits[i]) { 813 /* 814 * If the user disabled the misa_bit do not re-enable it 815 * and do not apply any implied rules related to it. 816 */ 817 if (cpu_misa_ext_is_user_set(misa_bits[i]) && 818 !(env->misa_ext & misa_bits[i])) { 819 continue; 820 } 821 822 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); 823 ir = g_hash_table_lookup(misa_ext_implied_rules, 824 GUINT_TO_POINTER(misa_bits[i])); 825 826 if (ir) { 827 cpu_enable_implied_rule(cpu, ir); 828 } 829 } 830 } 831 } 832 833 /* Enable the implied extensions. */ 834 for (i = 0; 835 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) { 836 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true); 837 838 ir = g_hash_table_lookup(multi_ext_implied_rules, 839 GUINT_TO_POINTER( 840 rule->implied_multi_exts[i])); 841 842 if (ir) { 843 cpu_enable_implied_rule(cpu, ir); 844 } 845 } 846 847 #ifndef CONFIG_USER_ONLY 848 bitmap_set(rule->enabled, cpu->env.mhartid, 1); 849 #endif 850 } 851 } 852 853 /* Zc extension has special implied rules that need to be handled separately. */ 854 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) 855 { 856 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 857 CPURISCVState *env = &cpu->env; 858 859 if (cpu->cfg.ext_zce) { 860 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 861 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 862 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 863 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 864 865 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 866 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 867 } 868 } 869 870 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */ 871 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 872 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 873 874 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 875 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 876 } 877 878 if (riscv_has_ext(env, RVD)) { 879 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 880 } 881 } 882 } 883 884 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu) 885 { 886 RISCVCPUImpliedExtsRule *rule; 887 int i; 888 889 /* Enable the implied extensions for Zc. */ 890 cpu_enable_zc_implied_rules(cpu); 891 892 /* Enable the implied MISAs. */ 893 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 894 if (riscv_has_ext(&cpu->env, rule->ext)) { 895 cpu_enable_implied_rule(cpu, rule); 896 } 897 } 898 899 /* Enable the implied extensions. */ 900 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 901 if (isa_ext_is_enabled(cpu, rule->ext)) { 902 cpu_enable_implied_rule(cpu, rule); 903 } 904 } 905 } 906 907 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 908 { 909 CPURISCVState *env = &cpu->env; 910 Error *local_err = NULL; 911 912 riscv_cpu_init_implied_exts_rules(); 913 riscv_cpu_enable_implied_rules(cpu); 914 915 riscv_cpu_validate_misa_priv(env, &local_err); 916 if (local_err != NULL) { 917 error_propagate(errp, local_err); 918 return; 919 } 920 921 riscv_cpu_update_named_features(cpu); 922 riscv_cpu_validate_profiles(cpu); 923 924 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 925 /* 926 * Enhanced PMP should only be available 927 * on harts with PMP support 928 */ 929 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 930 return; 931 } 932 933 riscv_cpu_validate_set_extensions(cpu, &local_err); 934 if (local_err != NULL) { 935 error_propagate(errp, local_err); 936 return; 937 } 938 } 939 940 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) 941 { 942 GPtrArray *dynamic_decoders; 943 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size); 944 for (size_t i = 0; i < decoder_table_size; ++i) { 945 if (decoder_table[i].guard_func && 946 decoder_table[i].guard_func(&cpu->cfg)) { 947 g_ptr_array_add(dynamic_decoders, 948 (gpointer)decoder_table[i].riscv_cpu_decode_fn); 949 } 950 } 951 952 cpu->decoders = dynamic_decoders; 953 } 954 955 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 956 { 957 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 958 } 959 960 static bool riscv_cpu_is_generic(Object *cpu_obj) 961 { 962 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 963 } 964 965 /* 966 * We'll get here via the following path: 967 * 968 * riscv_cpu_realize() 969 * -> cpu_exec_realizefn() 970 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 971 */ 972 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 973 { 974 RISCVCPU *cpu = RISCV_CPU(cs); 975 976 if (!riscv_cpu_tcg_compatible(cpu)) { 977 g_autofree char *name = riscv_cpu_get_name(cpu); 978 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 979 name); 980 return false; 981 } 982 983 #ifndef CONFIG_USER_ONLY 984 CPURISCVState *env = &cpu->env; 985 Error *local_err = NULL; 986 987 tcg_cflags_set(CPU(cs), CF_PCREL); 988 989 if (cpu->cfg.ext_sstc) { 990 riscv_timer_init(cpu); 991 } 992 993 if (cpu->cfg.pmu_mask) { 994 riscv_pmu_init(cpu, &local_err); 995 if (local_err != NULL) { 996 error_propagate(errp, local_err); 997 return false; 998 } 999 1000 if (cpu->cfg.ext_sscofpmf) { 1001 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1002 riscv_pmu_timer_cb, cpu); 1003 } 1004 } 1005 1006 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 1007 if (riscv_has_ext(env, RVH)) { 1008 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 1009 } 1010 #endif 1011 1012 return true; 1013 } 1014 1015 typedef struct RISCVCPUMisaExtConfig { 1016 target_ulong misa_bit; 1017 bool enabled; 1018 } RISCVCPUMisaExtConfig; 1019 1020 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1021 void *opaque, Error **errp) 1022 { 1023 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1024 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1025 RISCVCPU *cpu = RISCV_CPU(obj); 1026 CPURISCVState *env = &cpu->env; 1027 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1028 bool prev_val, value; 1029 1030 if (!visit_type_bool(v, name, &value, errp)) { 1031 return; 1032 } 1033 1034 cpu_misa_ext_add_user_opt(misa_bit, value); 1035 1036 prev_val = env->misa_ext & misa_bit; 1037 1038 if (value == prev_val) { 1039 return; 1040 } 1041 1042 if (value) { 1043 if (vendor_cpu) { 1044 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1045 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1046 cpuname); 1047 return; 1048 } 1049 1050 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 1051 /* 1052 * Note: the 'priv_spec' command line option, if present, 1053 * will take precedence over this priv_ver bump. 1054 */ 1055 env->priv_ver = PRIV_VERSION_1_12_0; 1056 } 1057 } 1058 1059 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 1060 } 1061 1062 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1063 void *opaque, Error **errp) 1064 { 1065 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1066 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1067 RISCVCPU *cpu = RISCV_CPU(obj); 1068 CPURISCVState *env = &cpu->env; 1069 bool value; 1070 1071 value = env->misa_ext & misa_bit; 1072 1073 visit_type_bool(v, name, &value, errp); 1074 } 1075 1076 #define MISA_CFG(_bit, _enabled) \ 1077 {.misa_bit = _bit, .enabled = _enabled} 1078 1079 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1080 MISA_CFG(RVA, true), 1081 MISA_CFG(RVC, true), 1082 MISA_CFG(RVD, true), 1083 MISA_CFG(RVF, true), 1084 MISA_CFG(RVI, true), 1085 MISA_CFG(RVE, false), 1086 MISA_CFG(RVM, true), 1087 MISA_CFG(RVS, true), 1088 MISA_CFG(RVU, true), 1089 MISA_CFG(RVH, true), 1090 MISA_CFG(RVJ, false), 1091 MISA_CFG(RVV, false), 1092 MISA_CFG(RVG, false), 1093 MISA_CFG(RVB, false), 1094 }; 1095 1096 /* 1097 * We do not support user choice tracking for MISA 1098 * extensions yet because, so far, we do not silently 1099 * change MISA bits during realize() (RVG enables MISA 1100 * bits but the user is warned about it). 1101 */ 1102 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1103 { 1104 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1105 int i; 1106 1107 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1108 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1109 int bit = misa_cfg->misa_bit; 1110 const char *name = riscv_get_misa_ext_name(bit); 1111 const char *desc = riscv_get_misa_ext_description(bit); 1112 1113 /* Check if KVM already created the property */ 1114 if (object_property_find(cpu_obj, name)) { 1115 continue; 1116 } 1117 1118 object_property_add(cpu_obj, name, "bool", 1119 cpu_get_misa_ext_cfg, 1120 cpu_set_misa_ext_cfg, 1121 NULL, (void *)misa_cfg); 1122 object_property_set_description(cpu_obj, name, desc); 1123 if (use_def_vals) { 1124 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1125 misa_cfg->enabled); 1126 } 1127 } 1128 } 1129 1130 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1131 void *opaque, Error **errp) 1132 { 1133 RISCVCPUProfile *profile = opaque; 1134 RISCVCPU *cpu = RISCV_CPU(obj); 1135 bool value; 1136 int i, ext_offset; 1137 1138 if (riscv_cpu_is_vendor(obj)) { 1139 error_setg(errp, "Profile %s is not available for vendor CPUs", 1140 profile->name); 1141 return; 1142 } 1143 1144 if (cpu->env.misa_mxl != MXL_RV64) { 1145 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1146 profile->name); 1147 return; 1148 } 1149 1150 if (!visit_type_bool(v, name, &value, errp)) { 1151 return; 1152 } 1153 1154 profile->user_set = true; 1155 profile->enabled = value; 1156 1157 if (profile->parent != NULL) { 1158 object_property_set_bool(obj, profile->parent->name, 1159 profile->enabled, NULL); 1160 } 1161 1162 if (profile->enabled) { 1163 cpu->env.priv_ver = profile->priv_spec; 1164 } 1165 1166 #ifndef CONFIG_USER_ONLY 1167 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1168 object_property_set_bool(obj, "mmu", true, NULL); 1169 const char *satp_prop = satp_mode_str(profile->satp_mode, 1170 riscv_cpu_is_32bit(cpu)); 1171 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1172 } 1173 #endif 1174 1175 for (i = 0; misa_bits[i] != 0; i++) { 1176 uint32_t bit = misa_bits[i]; 1177 1178 if (!(profile->misa_ext & bit)) { 1179 continue; 1180 } 1181 1182 if (bit == RVI && !profile->enabled) { 1183 /* 1184 * Disabling profiles will not disable the base 1185 * ISA RV64I. 1186 */ 1187 continue; 1188 } 1189 1190 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1191 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1192 } 1193 1194 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1195 ext_offset = profile->ext_offsets[i]; 1196 1197 if (profile->enabled) { 1198 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1199 riscv_cpu_enable_named_feat(cpu, ext_offset); 1200 } 1201 1202 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1203 } 1204 1205 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1206 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1207 } 1208 } 1209 1210 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1211 void *opaque, Error **errp) 1212 { 1213 RISCVCPUProfile *profile = opaque; 1214 bool value = profile->enabled; 1215 1216 visit_type_bool(v, name, &value, errp); 1217 } 1218 1219 static void riscv_cpu_add_profiles(Object *cpu_obj) 1220 { 1221 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1222 const RISCVCPUProfile *profile = riscv_profiles[i]; 1223 1224 object_property_add(cpu_obj, profile->name, "bool", 1225 cpu_get_profile, cpu_set_profile, 1226 NULL, (void *)profile); 1227 1228 /* 1229 * CPUs might enable a profile right from the start. 1230 * Enable its mandatory extensions right away in this 1231 * case. 1232 */ 1233 if (profile->enabled) { 1234 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1235 } 1236 } 1237 } 1238 1239 static bool cpu_ext_is_deprecated(const char *ext_name) 1240 { 1241 return isupper(ext_name[0]); 1242 } 1243 1244 /* 1245 * String will be allocated in the heap. Caller is responsible 1246 * for freeing it. 1247 */ 1248 static char *cpu_ext_to_lower(const char *ext_name) 1249 { 1250 char *ret = g_malloc0(strlen(ext_name) + 1); 1251 1252 strcpy(ret, ext_name); 1253 ret[0] = tolower(ret[0]); 1254 1255 return ret; 1256 } 1257 1258 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1259 void *opaque, Error **errp) 1260 { 1261 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1262 RISCVCPU *cpu = RISCV_CPU(obj); 1263 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1264 bool prev_val, value; 1265 1266 if (!visit_type_bool(v, name, &value, errp)) { 1267 return; 1268 } 1269 1270 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1271 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1272 1273 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1274 multi_ext_cfg->name, lower); 1275 } 1276 1277 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1278 1279 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1280 1281 if (value == prev_val) { 1282 return; 1283 } 1284 1285 if (value && vendor_cpu) { 1286 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1287 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1288 cpuname); 1289 return; 1290 } 1291 1292 if (value) { 1293 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1294 } 1295 1296 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1297 } 1298 1299 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1300 void *opaque, Error **errp) 1301 { 1302 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1303 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1304 1305 visit_type_bool(v, name, &value, errp); 1306 } 1307 1308 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1309 const RISCVCPUMultiExtConfig *multi_cfg) 1310 { 1311 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1312 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1313 1314 object_property_add(cpu_obj, multi_cfg->name, "bool", 1315 cpu_get_multi_ext_cfg, 1316 cpu_set_multi_ext_cfg, 1317 NULL, (void *)multi_cfg); 1318 1319 if (!generic_cpu || deprecated_ext) { 1320 return; 1321 } 1322 1323 /* 1324 * Set def val directly instead of using 1325 * object_property_set_bool() to save the set() 1326 * callback hash for user inputs. 1327 */ 1328 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1329 multi_cfg->enabled); 1330 } 1331 1332 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1333 const RISCVCPUMultiExtConfig *array) 1334 { 1335 const RISCVCPUMultiExtConfig *prop; 1336 1337 g_assert(array); 1338 1339 for (prop = array; prop && prop->name; prop++) { 1340 cpu_add_multi_ext_prop(obj, prop); 1341 } 1342 } 1343 1344 /* 1345 * Add CPU properties with user-facing flags. 1346 * 1347 * This will overwrite existing env->misa_ext values with the 1348 * defaults set via riscv_cpu_add_misa_properties(). 1349 */ 1350 static void riscv_cpu_add_user_properties(Object *obj) 1351 { 1352 #ifndef CONFIG_USER_ONLY 1353 riscv_add_satp_mode_properties(obj); 1354 #endif 1355 1356 riscv_cpu_add_misa_properties(obj); 1357 1358 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1359 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1360 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1361 1362 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1363 1364 riscv_cpu_add_profiles(obj); 1365 } 1366 1367 /* 1368 * The 'max' type CPU will have all possible ratified 1369 * non-vendor extensions enabled. 1370 */ 1371 static void riscv_init_max_cpu_extensions(Object *obj) 1372 { 1373 RISCVCPU *cpu = RISCV_CPU(obj); 1374 CPURISCVState *env = &cpu->env; 1375 const RISCVCPUMultiExtConfig *prop; 1376 1377 /* Enable RVG, RVJ and RVV that are disabled by default */ 1378 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV); 1379 1380 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1381 isa_ext_update_enabled(cpu, prop->offset, true); 1382 } 1383 1384 /* 1385 * Some extensions can't be added without backward compatibilty concerns. 1386 * Disable those, the user can still opt in to them on the command line. 1387 */ 1388 cpu->cfg.ext_svade = false; 1389 1390 /* set vector version */ 1391 env->vext_ver = VEXT_VERSION_1_00_0; 1392 1393 /* Zfinx is not compatible with F. Disable it */ 1394 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1395 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1396 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1397 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1398 1399 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1400 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1401 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1402 1403 if (env->misa_mxl != MXL_RV32) { 1404 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1405 } 1406 } 1407 1408 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1409 { 1410 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1411 } 1412 1413 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1414 { 1415 RISCVCPU *cpu = RISCV_CPU(cs); 1416 Object *obj = OBJECT(cpu); 1417 1418 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1419 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1420 1421 if (!misa_ext_implied_rules) { 1422 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1423 } 1424 1425 if (!multi_ext_implied_rules) { 1426 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1427 } 1428 1429 riscv_cpu_add_user_properties(obj); 1430 1431 if (riscv_cpu_has_max_extensions(obj)) { 1432 riscv_init_max_cpu_extensions(obj); 1433 } 1434 } 1435 1436 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1437 { 1438 /* 1439 * All cpus use the same set of operations. 1440 */ 1441 cc->tcg_ops = &riscv_tcg_ops; 1442 } 1443 1444 static void riscv_tcg_cpu_class_init(CPUClass *cc) 1445 { 1446 cc->init_accel_cpu = riscv_tcg_cpu_init_ops; 1447 } 1448 1449 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1450 { 1451 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1452 1453 acc->cpu_class_init = riscv_tcg_cpu_class_init; 1454 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1455 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1456 } 1457 1458 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1459 .name = ACCEL_CPU_NAME("tcg"), 1460 1461 .parent = TYPE_ACCEL_CPU, 1462 .class_init = riscv_tcg_cpu_accel_class_init, 1463 .abstract = true, 1464 }; 1465 1466 static void riscv_tcg_cpu_accel_register_types(void) 1467 { 1468 type_register_static(&riscv_tcg_cpu_accel_type_info); 1469 } 1470 type_init(riscv_tcg_cpu_accel_register_types); 1471