1 /* 2 * RISC-V emulation for qemu: main translation routines. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "qemu/log.h" 21 #include "cpu.h" 22 #include "tcg/tcg-op.h" 23 #include "exec/helper-proto.h" 24 #include "exec/helper-gen.h" 25 #include "exec/target_page.h" 26 #include "exec/translator.h" 27 #include "exec/translation-block.h" 28 #include "exec/log.h" 29 #include "semihosting/semihost.h" 30 31 #include "internals.h" 32 33 #define HELPER_H "helper.h" 34 #include "exec/helper-info.c.inc" 35 #undef HELPER_H 36 37 #include "tcg/tcg-cpu.h" 38 39 /* global register indices */ 40 static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart; 41 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */ 42 static TCGv load_res; 43 static TCGv load_val; 44 45 /* 46 * If an operation is being performed on less than TARGET_LONG_BITS, 47 * it may require the inputs to be sign- or zero-extended; which will 48 * depend on the exact operation being performed. 49 */ 50 typedef enum { 51 EXT_NONE, 52 EXT_SIGN, 53 EXT_ZERO, 54 } DisasExtend; 55 56 typedef struct DisasContext { 57 DisasContextBase base; 58 target_ulong cur_insn_len; 59 target_ulong pc_save; 60 target_ulong priv_ver; 61 RISCVMXL misa_mxl_max; 62 RISCVMXL xl; 63 RISCVMXL address_xl; 64 uint32_t misa_ext; 65 uint32_t opcode; 66 RISCVExtStatus mstatus_fs; 67 RISCVExtStatus mstatus_vs; 68 uint32_t mem_idx; 69 uint32_t priv; 70 /* 71 * Remember the rounding mode encoded in the previous fp instruction, 72 * which we have already installed into env->fp_status. Or -1 for 73 * no previous fp instruction. Note that we exit the TB when writing 74 * to any system register, which includes CSR_FRM, so we do not have 75 * to reset this known value. 76 */ 77 int frm; 78 RISCVMXL ol; 79 bool virt_inst_excp; 80 bool virt_enabled; 81 const RISCVCPUConfig *cfg_ptr; 82 /* vector extension */ 83 bool vill; 84 /* 85 * Encode LMUL to lmul as follows: 86 * LMUL vlmul lmul 87 * 1 000 0 88 * 2 001 1 89 * 4 010 2 90 * 8 011 3 91 * - 100 - 92 * 1/8 101 -3 93 * 1/4 110 -2 94 * 1/2 111 -1 95 */ 96 int8_t lmul; 97 uint8_t sew; 98 uint8_t vta; 99 uint8_t vma; 100 bool cfg_vta_all_1s; 101 bool vstart_eq_zero; 102 bool vl_eq_vlmax; 103 CPUState *cs; 104 TCGv zero; 105 /* actual address width */ 106 uint8_t addr_xl; 107 bool addr_signed; 108 /* Ztso */ 109 bool ztso; 110 /* Use icount trigger for native debug */ 111 bool itrigger; 112 /* FRM is known to contain a valid value. */ 113 bool frm_valid; 114 bool insn_start_updated; 115 const GPtrArray *decoders; 116 /* zicfilp extension. fcfi_enabled, lp expected or not */ 117 bool fcfi_enabled; 118 bool fcfi_lp_expected; 119 /* zicfiss extension, if shadow stack was enabled during TB gen */ 120 bool bcfi_enabled; 121 } DisasContext; 122 123 static inline bool has_ext(DisasContext *ctx, uint32_t ext) 124 { 125 return ctx->misa_ext & ext; 126 } 127 128 #ifdef TARGET_RISCV32 129 #define get_xl(ctx) MXL_RV32 130 #elif defined(CONFIG_USER_ONLY) 131 #define get_xl(ctx) MXL_RV64 132 #else 133 #define get_xl(ctx) ((ctx)->xl) 134 #endif 135 136 #ifdef TARGET_RISCV32 137 #define get_address_xl(ctx) MXL_RV32 138 #elif defined(CONFIG_USER_ONLY) 139 #define get_address_xl(ctx) MXL_RV64 140 #else 141 #define get_address_xl(ctx) ((ctx)->address_xl) 142 #endif 143 144 #define mxl_memop(ctx) ((get_xl(ctx) + 1) | MO_TE) 145 146 /* The word size for this machine mode. */ 147 static inline int __attribute__((unused)) get_xlen(DisasContext *ctx) 148 { 149 return 16 << get_xl(ctx); 150 } 151 152 /* The operation length, as opposed to the xlen. */ 153 #ifdef TARGET_RISCV32 154 #define get_ol(ctx) MXL_RV32 155 #else 156 #define get_ol(ctx) ((ctx)->ol) 157 #endif 158 159 static inline int get_olen(DisasContext *ctx) 160 { 161 return 16 << get_ol(ctx); 162 } 163 164 /* The maximum register length */ 165 #ifdef TARGET_RISCV32 166 #define get_xl_max(ctx) MXL_RV32 167 #else 168 #define get_xl_max(ctx) ((ctx)->misa_mxl_max) 169 #endif 170 171 /* 172 * RISC-V requires NaN-boxing of narrower width floating point values. 173 * This applies when a 32-bit value is assigned to a 64-bit FP register. 174 * For consistency and simplicity, we nanbox results even when the RVD 175 * extension is not present. 176 */ 177 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in) 178 { 179 tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32)); 180 } 181 182 static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in) 183 { 184 tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(16, 48)); 185 } 186 187 /* 188 * A narrow n-bit operation, where n < FLEN, checks that input operands 189 * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1. 190 * If so, the least-significant bits of the input are used, otherwise the 191 * input value is treated as an n-bit canonical NaN (v2.2 section 9.2). 192 * 193 * Here, the result is always nan-boxed, even the canonical nan. 194 */ 195 static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in) 196 { 197 TCGv_i64 t_max = tcg_constant_i64(0xffffffffffff0000ull); 198 TCGv_i64 t_nan = tcg_constant_i64(0xffffffffffff7e00ull); 199 200 tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); 201 } 202 203 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in) 204 { 205 TCGv_i64 t_max = tcg_constant_i64(0xffffffff00000000ull); 206 TCGv_i64 t_nan = tcg_constant_i64(0xffffffff7fc00000ull); 207 208 tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); 209 } 210 211 static void decode_save_opc(DisasContext *ctx, target_ulong excp_uw2) 212 { 213 assert(!ctx->insn_start_updated); 214 ctx->insn_start_updated = true; 215 tcg_set_insn_start_param(ctx->base.insn_start, 1, ctx->opcode); 216 tcg_set_insn_start_param(ctx->base.insn_start, 2, excp_uw2); 217 } 218 219 static void gen_pc_plus_diff(TCGv target, DisasContext *ctx, 220 target_long diff) 221 { 222 target_ulong dest = ctx->base.pc_next + diff; 223 224 assert(ctx->pc_save != -1); 225 if (tb_cflags(ctx->base.tb) & CF_PCREL) { 226 tcg_gen_addi_tl(target, cpu_pc, dest - ctx->pc_save); 227 if (get_xl(ctx) == MXL_RV32) { 228 tcg_gen_ext32s_tl(target, target); 229 } 230 } else { 231 if (get_xl(ctx) == MXL_RV32) { 232 dest = (int32_t)dest; 233 } 234 tcg_gen_movi_tl(target, dest); 235 } 236 } 237 238 static void gen_update_pc(DisasContext *ctx, target_long diff) 239 { 240 gen_pc_plus_diff(cpu_pc, ctx, diff); 241 ctx->pc_save = ctx->base.pc_next + diff; 242 } 243 244 static void generate_exception(DisasContext *ctx, RISCVException excp) 245 { 246 gen_update_pc(ctx, 0); 247 gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp)); 248 ctx->base.is_jmp = DISAS_NORETURN; 249 } 250 251 static void gen_exception_illegal(DisasContext *ctx) 252 { 253 tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), tcg_env, 254 offsetof(CPURISCVState, bins)); 255 if (ctx->virt_inst_excp) { 256 generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT); 257 } else { 258 generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); 259 } 260 } 261 262 static void gen_exception_inst_addr_mis(DisasContext *ctx, TCGv target) 263 { 264 tcg_gen_st_tl(target, tcg_env, offsetof(CPURISCVState, badaddr)); 265 generate_exception(ctx, RISCV_EXCP_INST_ADDR_MIS); 266 } 267 268 static void lookup_and_goto_ptr(DisasContext *ctx) 269 { 270 #ifndef CONFIG_USER_ONLY 271 if (ctx->itrigger) { 272 gen_helper_itrigger_match(tcg_env); 273 } 274 #endif 275 tcg_gen_lookup_and_goto_ptr(); 276 } 277 278 static void exit_tb(DisasContext *ctx) 279 { 280 #ifndef CONFIG_USER_ONLY 281 if (ctx->itrigger) { 282 gen_helper_itrigger_match(tcg_env); 283 } 284 #endif 285 tcg_gen_exit_tb(NULL, 0); 286 } 287 288 static void gen_goto_tb(DisasContext *ctx, int n, target_long diff) 289 { 290 target_ulong dest = ctx->base.pc_next + diff; 291 292 /* 293 * Under itrigger, instruction executes one by one like singlestep, 294 * direct block chain benefits will be small. 295 */ 296 if (translator_use_goto_tb(&ctx->base, dest) && !ctx->itrigger) { 297 /* 298 * For pcrel, the pc must always be up-to-date on entry to 299 * the linked TB, so that it can use simple additions for all 300 * further adjustments. For !pcrel, the linked TB is compiled 301 * to know its full virtual address, so we can delay the 302 * update to pc to the unlinked path. A long chain of links 303 * can thus avoid many updates to the PC. 304 */ 305 if (tb_cflags(ctx->base.tb) & CF_PCREL) { 306 gen_update_pc(ctx, diff); 307 tcg_gen_goto_tb(n); 308 } else { 309 tcg_gen_goto_tb(n); 310 gen_update_pc(ctx, diff); 311 } 312 tcg_gen_exit_tb(ctx->base.tb, n); 313 } else { 314 gen_update_pc(ctx, diff); 315 lookup_and_goto_ptr(ctx); 316 } 317 } 318 319 /* 320 * Wrappers for getting reg values. 321 * 322 * The $zero register does not have cpu_gpr[0] allocated -- we supply the 323 * constant zero as a source, and an uninitialized sink as destination. 324 * 325 * Further, we may provide an extension for word operations. 326 */ 327 static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext) 328 { 329 TCGv t; 330 331 if (reg_num == 0) { 332 return ctx->zero; 333 } 334 335 switch (get_ol(ctx)) { 336 case MXL_RV32: 337 switch (ext) { 338 case EXT_NONE: 339 break; 340 case EXT_SIGN: 341 t = tcg_temp_new(); 342 tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]); 343 return t; 344 case EXT_ZERO: 345 t = tcg_temp_new(); 346 tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]); 347 return t; 348 default: 349 g_assert_not_reached(); 350 } 351 break; 352 case MXL_RV64: 353 case MXL_RV128: 354 break; 355 default: 356 g_assert_not_reached(); 357 } 358 return cpu_gpr[reg_num]; 359 } 360 361 static TCGv get_gprh(DisasContext *ctx, int reg_num) 362 { 363 assert(get_xl(ctx) == MXL_RV128); 364 if (reg_num == 0) { 365 return ctx->zero; 366 } 367 return cpu_gprh[reg_num]; 368 } 369 370 static TCGv dest_gpr(DisasContext *ctx, int reg_num) 371 { 372 if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) { 373 return tcg_temp_new(); 374 } 375 return cpu_gpr[reg_num]; 376 } 377 378 static TCGv dest_gprh(DisasContext *ctx, int reg_num) 379 { 380 if (reg_num == 0) { 381 return tcg_temp_new(); 382 } 383 return cpu_gprh[reg_num]; 384 } 385 386 static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t) 387 { 388 if (reg_num != 0) { 389 switch (get_ol(ctx)) { 390 case MXL_RV32: 391 tcg_gen_ext32s_tl(cpu_gpr[reg_num], t); 392 break; 393 case MXL_RV64: 394 case MXL_RV128: 395 tcg_gen_mov_tl(cpu_gpr[reg_num], t); 396 break; 397 default: 398 g_assert_not_reached(); 399 } 400 401 if (get_xl_max(ctx) == MXL_RV128) { 402 tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63); 403 } 404 } 405 } 406 407 static void gen_set_gpri(DisasContext *ctx, int reg_num, target_long imm) 408 { 409 if (reg_num != 0) { 410 switch (get_ol(ctx)) { 411 case MXL_RV32: 412 tcg_gen_movi_tl(cpu_gpr[reg_num], (int32_t)imm); 413 break; 414 case MXL_RV64: 415 case MXL_RV128: 416 tcg_gen_movi_tl(cpu_gpr[reg_num], imm); 417 break; 418 default: 419 g_assert_not_reached(); 420 } 421 422 if (get_xl_max(ctx) == MXL_RV128) { 423 tcg_gen_movi_tl(cpu_gprh[reg_num], -(imm < 0)); 424 } 425 } 426 } 427 428 static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh) 429 { 430 assert(get_ol(ctx) == MXL_RV128); 431 if (reg_num != 0) { 432 tcg_gen_mov_tl(cpu_gpr[reg_num], rl); 433 tcg_gen_mov_tl(cpu_gprh[reg_num], rh); 434 } 435 } 436 437 static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num) 438 { 439 if (!ctx->cfg_ptr->ext_zfinx) { 440 return cpu_fpr[reg_num]; 441 } 442 443 if (reg_num == 0) { 444 return tcg_constant_i64(0); 445 } 446 switch (get_xl(ctx)) { 447 case MXL_RV32: 448 #ifdef TARGET_RISCV32 449 { 450 TCGv_i64 t = tcg_temp_new_i64(); 451 tcg_gen_ext_i32_i64(t, cpu_gpr[reg_num]); 452 return t; 453 } 454 #else 455 /* fall through */ 456 case MXL_RV64: 457 return cpu_gpr[reg_num]; 458 #endif 459 default: 460 g_assert_not_reached(); 461 } 462 } 463 464 static TCGv_i64 get_fpr_d(DisasContext *ctx, int reg_num) 465 { 466 if (!ctx->cfg_ptr->ext_zfinx) { 467 return cpu_fpr[reg_num]; 468 } 469 470 if (reg_num == 0) { 471 return tcg_constant_i64(0); 472 } 473 switch (get_xl(ctx)) { 474 case MXL_RV32: 475 { 476 TCGv_i64 t = tcg_temp_new_i64(); 477 tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]); 478 return t; 479 } 480 #ifdef TARGET_RISCV64 481 case MXL_RV64: 482 return cpu_gpr[reg_num]; 483 #endif 484 default: 485 g_assert_not_reached(); 486 } 487 } 488 489 static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num) 490 { 491 if (!ctx->cfg_ptr->ext_zfinx) { 492 return cpu_fpr[reg_num]; 493 } 494 495 if (reg_num == 0) { 496 return tcg_temp_new_i64(); 497 } 498 499 switch (get_xl(ctx)) { 500 case MXL_RV32: 501 return tcg_temp_new_i64(); 502 #ifdef TARGET_RISCV64 503 case MXL_RV64: 504 return cpu_gpr[reg_num]; 505 #endif 506 default: 507 g_assert_not_reached(); 508 } 509 } 510 511 /* assume it is nanboxing (for normal) or sign-extended (for zfinx) */ 512 static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t) 513 { 514 if (!ctx->cfg_ptr->ext_zfinx) { 515 tcg_gen_mov_i64(cpu_fpr[reg_num], t); 516 return; 517 } 518 if (reg_num != 0) { 519 switch (get_xl(ctx)) { 520 case MXL_RV32: 521 #ifdef TARGET_RISCV32 522 tcg_gen_extrl_i64_i32(cpu_gpr[reg_num], t); 523 break; 524 #else 525 /* fall through */ 526 case MXL_RV64: 527 tcg_gen_mov_i64(cpu_gpr[reg_num], t); 528 break; 529 #endif 530 default: 531 g_assert_not_reached(); 532 } 533 } 534 } 535 536 static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t) 537 { 538 if (!ctx->cfg_ptr->ext_zfinx) { 539 tcg_gen_mov_i64(cpu_fpr[reg_num], t); 540 return; 541 } 542 543 if (reg_num != 0) { 544 switch (get_xl(ctx)) { 545 case MXL_RV32: 546 #ifdef TARGET_RISCV32 547 tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t); 548 break; 549 #else 550 tcg_gen_ext32s_i64(cpu_gpr[reg_num], t); 551 tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32); 552 break; 553 case MXL_RV64: 554 tcg_gen_mov_i64(cpu_gpr[reg_num], t); 555 break; 556 #endif 557 default: 558 g_assert_not_reached(); 559 } 560 } 561 } 562 563 #ifndef CONFIG_USER_ONLY 564 /* 565 * Direct calls 566 * - jal x1; 567 * - jal x5; 568 * - c.jal. 569 * - cm.jalt. 570 * 571 * Direct jumps 572 * - jal x0; 573 * - c.j; 574 * - cm.jt. 575 * 576 * Other direct jumps 577 * - jal rd where rd != x1 and rd != x5 and rd != x0; 578 */ 579 static void gen_ctr_jal(DisasContext *ctx, int rd, target_ulong imm) 580 { 581 TCGv dest = tcg_temp_new(); 582 TCGv src = tcg_temp_new(); 583 TCGv type; 584 585 /* 586 * If rd is x1 or x5 link registers, treat this as direct call otherwise 587 * its a direct jump. 588 */ 589 if (rd == 1 || rd == 5) { 590 type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL); 591 } else if (rd == 0) { 592 type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP); 593 } else { 594 type = tcg_constant_tl(CTRDATA_TYPE_OTHER_DIRECT_JUMP); 595 } 596 597 gen_pc_plus_diff(dest, ctx, imm); 598 gen_pc_plus_diff(src, ctx, 0); 599 gen_helper_ctr_add_entry(tcg_env, src, dest, type); 600 } 601 #endif 602 603 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) 604 { 605 TCGv succ_pc = dest_gpr(ctx, rd); 606 607 /* check misaligned: */ 608 if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr, 609 ctx->priv_ver, 610 ctx->misa_ext)) { 611 if ((imm & 0x3) != 0) { 612 TCGv target_pc = tcg_temp_new(); 613 gen_pc_plus_diff(target_pc, ctx, imm); 614 gen_exception_inst_addr_mis(ctx, target_pc); 615 return; 616 } 617 } 618 619 #ifndef CONFIG_USER_ONLY 620 if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) { 621 gen_ctr_jal(ctx, rd, imm); 622 } 623 #endif 624 625 gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len); 626 gen_set_gpr(ctx, rd, succ_pc); 627 628 gen_goto_tb(ctx, 0, imm); /* must use this for safety */ 629 ctx->base.is_jmp = DISAS_NORETURN; 630 } 631 632 /* Compute a canonical address from a register plus offset. */ 633 static TCGv get_address(DisasContext *ctx, int rs1, int imm) 634 { 635 TCGv addr = tcg_temp_new(); 636 TCGv src1 = get_gpr(ctx, rs1, EXT_NONE); 637 638 tcg_gen_addi_tl(addr, src1, imm); 639 if (ctx->addr_signed) { 640 tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl); 641 } else { 642 tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl); 643 } 644 645 return addr; 646 } 647 648 /* Compute a canonical address from a register plus reg offset. */ 649 static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs) 650 { 651 TCGv addr = tcg_temp_new(); 652 TCGv src1 = get_gpr(ctx, rs1, EXT_NONE); 653 654 tcg_gen_add_tl(addr, src1, offs); 655 if (ctx->addr_signed) { 656 tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl); 657 } else { 658 tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl); 659 } 660 661 return addr; 662 } 663 664 #ifndef CONFIG_USER_ONLY 665 /* 666 * We will have already diagnosed disabled state, 667 * and need to turn initial/clean into dirty. 668 */ 669 static void mark_fs_dirty(DisasContext *ctx) 670 { 671 TCGv tmp; 672 673 if (!has_ext(ctx, RVF)) { 674 return; 675 } 676 677 if (ctx->mstatus_fs != EXT_STATUS_DIRTY) { 678 /* Remember the state change for the rest of the TB. */ 679 ctx->mstatus_fs = EXT_STATUS_DIRTY; 680 681 tmp = tcg_temp_new(); 682 tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus)); 683 tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); 684 tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus)); 685 686 if (ctx->virt_enabled) { 687 tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs)); 688 tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); 689 tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs)); 690 } 691 } 692 } 693 #else 694 static inline void mark_fs_dirty(DisasContext *ctx) { } 695 #endif 696 697 #ifndef CONFIG_USER_ONLY 698 /* 699 * We will have already diagnosed disabled state, 700 * and need to turn initial/clean into dirty. 701 */ 702 static void mark_vs_dirty(DisasContext *ctx) 703 { 704 TCGv tmp; 705 706 if (ctx->mstatus_vs != EXT_STATUS_DIRTY) { 707 /* Remember the state change for the rest of the TB. */ 708 ctx->mstatus_vs = EXT_STATUS_DIRTY; 709 710 tmp = tcg_temp_new(); 711 tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus)); 712 tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS); 713 tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus)); 714 715 if (ctx->virt_enabled) { 716 tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs)); 717 tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS); 718 tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs)); 719 } 720 } 721 } 722 #else 723 static inline void mark_vs_dirty(DisasContext *ctx) { } 724 #endif 725 726 static void finalize_rvv_inst(DisasContext *ctx) 727 { 728 mark_vs_dirty(ctx); 729 ctx->vstart_eq_zero = true; 730 } 731 732 static void gen_set_rm(DisasContext *ctx, int rm) 733 { 734 if (ctx->frm == rm) { 735 return; 736 } 737 ctx->frm = rm; 738 739 if (rm == RISCV_FRM_DYN) { 740 /* The helper will return only if frm valid. */ 741 ctx->frm_valid = true; 742 } 743 744 /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ 745 decode_save_opc(ctx, 0); 746 gen_helper_set_rounding_mode(tcg_env, tcg_constant_i32(rm)); 747 } 748 749 static void gen_set_rm_chkfrm(DisasContext *ctx, int rm) 750 { 751 if (ctx->frm == rm && ctx->frm_valid) { 752 return; 753 } 754 ctx->frm = rm; 755 ctx->frm_valid = true; 756 757 /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ 758 decode_save_opc(ctx, 0); 759 gen_helper_set_rounding_mode_chkfrm(tcg_env, tcg_constant_i32(rm)); 760 } 761 762 static int ex_plus_1(DisasContext *ctx, int nf) 763 { 764 return nf + 1; 765 } 766 767 #define EX_SH(amount) \ 768 static int ex_shift_##amount(DisasContext *ctx, int imm) \ 769 { \ 770 return imm << amount; \ 771 } 772 EX_SH(1) 773 EX_SH(2) 774 EX_SH(3) 775 EX_SH(4) 776 EX_SH(12) 777 778 #define REQUIRE_EXT(ctx, ext) do { \ 779 if (!has_ext(ctx, ext)) { \ 780 return false; \ 781 } \ 782 } while (0) 783 784 #define REQUIRE_32BIT(ctx) do { \ 785 if (get_xl(ctx) != MXL_RV32) { \ 786 return false; \ 787 } \ 788 } while (0) 789 790 #define REQUIRE_64BIT(ctx) do { \ 791 if (get_xl(ctx) != MXL_RV64) { \ 792 return false; \ 793 } \ 794 } while (0) 795 796 #define REQUIRE_128BIT(ctx) do { \ 797 if (get_xl(ctx) != MXL_RV128) { \ 798 return false; \ 799 } \ 800 } while (0) 801 802 #define REQUIRE_64_OR_128BIT(ctx) do { \ 803 if (get_xl(ctx) == MXL_RV32) { \ 804 return false; \ 805 } \ 806 } while (0) 807 808 #define REQUIRE_EITHER_EXT(ctx, A, B) do { \ 809 if (!ctx->cfg_ptr->ext_##A && \ 810 !ctx->cfg_ptr->ext_##B) { \ 811 return false; \ 812 } \ 813 } while (0) 814 815 static int ex_rvc_register(DisasContext *ctx, int reg) 816 { 817 return 8 + reg; 818 } 819 820 static int ex_sreg_register(DisasContext *ctx, int reg) 821 { 822 return reg < 2 ? reg + 8 : reg + 16; 823 } 824 825 static int ex_rvc_shiftli(DisasContext *ctx, int imm) 826 { 827 /* For RV128 a shamt of 0 means a shift by 64. */ 828 if (get_ol(ctx) == MXL_RV128) { 829 imm = imm ? imm : 64; 830 } 831 return imm; 832 } 833 834 static int ex_rvc_shiftri(DisasContext *ctx, int imm) 835 { 836 /* 837 * For RV128 a shamt of 0 means a shift by 64, furthermore, for right 838 * shifts, the shamt is sign-extended. 839 */ 840 if (get_ol(ctx) == MXL_RV128) { 841 imm = imm | (imm & 32) << 1; 842 imm = imm ? imm : 64; 843 } 844 return imm; 845 } 846 847 /* Include the auto-generated decoder for 32 bit insn */ 848 #include "decode-insn32.c.inc" 849 850 static bool gen_logic_imm_fn(DisasContext *ctx, arg_i *a, 851 void (*func)(TCGv, TCGv, target_long)) 852 { 853 TCGv dest = dest_gpr(ctx, a->rd); 854 TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); 855 856 func(dest, src1, a->imm); 857 858 if (get_xl(ctx) == MXL_RV128) { 859 TCGv src1h = get_gprh(ctx, a->rs1); 860 TCGv desth = dest_gprh(ctx, a->rd); 861 862 func(desth, src1h, -(a->imm < 0)); 863 gen_set_gpr128(ctx, a->rd, dest, desth); 864 } else { 865 gen_set_gpr(ctx, a->rd, dest); 866 } 867 868 return true; 869 } 870 871 static bool gen_logic(DisasContext *ctx, arg_r *a, 872 void (*func)(TCGv, TCGv, TCGv)) 873 { 874 TCGv dest = dest_gpr(ctx, a->rd); 875 TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); 876 TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); 877 878 func(dest, src1, src2); 879 880 if (get_xl(ctx) == MXL_RV128) { 881 TCGv src1h = get_gprh(ctx, a->rs1); 882 TCGv src2h = get_gprh(ctx, a->rs2); 883 TCGv desth = dest_gprh(ctx, a->rd); 884 885 func(desth, src1h, src2h); 886 gen_set_gpr128(ctx, a->rd, dest, desth); 887 } else { 888 gen_set_gpr(ctx, a->rd, dest); 889 } 890 891 return true; 892 } 893 894 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext, 895 void (*func)(TCGv, TCGv, target_long), 896 void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long)) 897 { 898 TCGv dest = dest_gpr(ctx, a->rd); 899 TCGv src1 = get_gpr(ctx, a->rs1, ext); 900 901 if (get_ol(ctx) < MXL_RV128) { 902 func(dest, src1, a->imm); 903 gen_set_gpr(ctx, a->rd, dest); 904 } else { 905 if (f128 == NULL) { 906 return false; 907 } 908 909 TCGv src1h = get_gprh(ctx, a->rs1); 910 TCGv desth = dest_gprh(ctx, a->rd); 911 912 f128(dest, desth, src1, src1h, a->imm); 913 gen_set_gpr128(ctx, a->rd, dest, desth); 914 } 915 return true; 916 } 917 918 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext, 919 void (*func)(TCGv, TCGv, TCGv), 920 void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv)) 921 { 922 TCGv dest = dest_gpr(ctx, a->rd); 923 TCGv src1 = get_gpr(ctx, a->rs1, ext); 924 TCGv src2 = tcg_constant_tl(a->imm); 925 926 if (get_ol(ctx) < MXL_RV128) { 927 func(dest, src1, src2); 928 gen_set_gpr(ctx, a->rd, dest); 929 } else { 930 if (f128 == NULL) { 931 return false; 932 } 933 934 TCGv src1h = get_gprh(ctx, a->rs1); 935 TCGv src2h = tcg_constant_tl(-(a->imm < 0)); 936 TCGv desth = dest_gprh(ctx, a->rd); 937 938 f128(dest, desth, src1, src1h, src2, src2h); 939 gen_set_gpr128(ctx, a->rd, dest, desth); 940 } 941 return true; 942 } 943 944 static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext, 945 void (*func)(TCGv, TCGv, TCGv), 946 void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv)) 947 { 948 TCGv dest = dest_gpr(ctx, a->rd); 949 TCGv src1 = get_gpr(ctx, a->rs1, ext); 950 TCGv src2 = get_gpr(ctx, a->rs2, ext); 951 952 if (get_ol(ctx) < MXL_RV128) { 953 func(dest, src1, src2); 954 gen_set_gpr(ctx, a->rd, dest); 955 } else { 956 if (f128 == NULL) { 957 return false; 958 } 959 960 TCGv src1h = get_gprh(ctx, a->rs1); 961 TCGv src2h = get_gprh(ctx, a->rs2); 962 TCGv desth = dest_gprh(ctx, a->rd); 963 964 f128(dest, desth, src1, src1h, src2, src2h); 965 gen_set_gpr128(ctx, a->rd, dest, desth); 966 } 967 return true; 968 } 969 970 static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext, 971 void (*f_tl)(TCGv, TCGv, TCGv), 972 void (*f_32)(TCGv, TCGv, TCGv), 973 void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv)) 974 { 975 int olen = get_olen(ctx); 976 977 if (olen != TARGET_LONG_BITS) { 978 if (olen == 32) { 979 f_tl = f_32; 980 } else if (olen != 128) { 981 g_assert_not_reached(); 982 } 983 } 984 return gen_arith(ctx, a, ext, f_tl, f_128); 985 } 986 987 static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext, 988 void (*func)(TCGv, TCGv, target_long), 989 void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long)) 990 { 991 TCGv dest, src1; 992 int max_len = get_olen(ctx); 993 994 if (a->shamt >= max_len) { 995 return false; 996 } 997 998 dest = dest_gpr(ctx, a->rd); 999 src1 = get_gpr(ctx, a->rs1, ext); 1000 1001 if (max_len < 128) { 1002 func(dest, src1, a->shamt); 1003 gen_set_gpr(ctx, a->rd, dest); 1004 } else { 1005 TCGv src1h = get_gprh(ctx, a->rs1); 1006 TCGv desth = dest_gprh(ctx, a->rd); 1007 1008 if (f128 == NULL) { 1009 return false; 1010 } 1011 f128(dest, desth, src1, src1h, a->shamt); 1012 gen_set_gpr128(ctx, a->rd, dest, desth); 1013 } 1014 return true; 1015 } 1016 1017 static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a, 1018 DisasExtend ext, 1019 void (*f_tl)(TCGv, TCGv, target_long), 1020 void (*f_32)(TCGv, TCGv, target_long), 1021 void (*f_128)(TCGv, TCGv, TCGv, TCGv, 1022 target_long)) 1023 { 1024 int olen = get_olen(ctx); 1025 if (olen != TARGET_LONG_BITS) { 1026 if (olen == 32) { 1027 f_tl = f_32; 1028 } else if (olen != 128) { 1029 g_assert_not_reached(); 1030 } 1031 } 1032 return gen_shift_imm_fn(ctx, a, ext, f_tl, f_128); 1033 } 1034 1035 static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext, 1036 void (*func)(TCGv, TCGv, TCGv)) 1037 { 1038 TCGv dest, src1, src2; 1039 int max_len = get_olen(ctx); 1040 1041 if (a->shamt >= max_len) { 1042 return false; 1043 } 1044 1045 dest = dest_gpr(ctx, a->rd); 1046 src1 = get_gpr(ctx, a->rs1, ext); 1047 src2 = tcg_constant_tl(a->shamt); 1048 1049 func(dest, src1, src2); 1050 1051 gen_set_gpr(ctx, a->rd, dest); 1052 return true; 1053 } 1054 1055 static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext, 1056 void (*func)(TCGv, TCGv, TCGv), 1057 void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv)) 1058 { 1059 TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); 1060 TCGv ext2 = tcg_temp_new(); 1061 int max_len = get_olen(ctx); 1062 1063 tcg_gen_andi_tl(ext2, src2, max_len - 1); 1064 1065 TCGv dest = dest_gpr(ctx, a->rd); 1066 TCGv src1 = get_gpr(ctx, a->rs1, ext); 1067 1068 if (max_len < 128) { 1069 func(dest, src1, ext2); 1070 gen_set_gpr(ctx, a->rd, dest); 1071 } else { 1072 TCGv src1h = get_gprh(ctx, a->rs1); 1073 TCGv desth = dest_gprh(ctx, a->rd); 1074 1075 if (f128 == NULL) { 1076 return false; 1077 } 1078 f128(dest, desth, src1, src1h, ext2); 1079 gen_set_gpr128(ctx, a->rd, dest, desth); 1080 } 1081 return true; 1082 } 1083 1084 static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext, 1085 void (*f_tl)(TCGv, TCGv, TCGv), 1086 void (*f_32)(TCGv, TCGv, TCGv), 1087 void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv)) 1088 { 1089 int olen = get_olen(ctx); 1090 if (olen != TARGET_LONG_BITS) { 1091 if (olen == 32) { 1092 f_tl = f_32; 1093 } else if (olen != 128) { 1094 g_assert_not_reached(); 1095 } 1096 } 1097 return gen_shift(ctx, a, ext, f_tl, f_128); 1098 } 1099 1100 static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext, 1101 void (*func)(TCGv, TCGv)) 1102 { 1103 TCGv dest = dest_gpr(ctx, a->rd); 1104 TCGv src1 = get_gpr(ctx, a->rs1, ext); 1105 1106 func(dest, src1); 1107 1108 gen_set_gpr(ctx, a->rd, dest); 1109 return true; 1110 } 1111 1112 static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext, 1113 void (*f_tl)(TCGv, TCGv), 1114 void (*f_32)(TCGv, TCGv)) 1115 { 1116 int olen = get_olen(ctx); 1117 1118 if (olen != TARGET_LONG_BITS) { 1119 if (olen == 32) { 1120 f_tl = f_32; 1121 } else { 1122 g_assert_not_reached(); 1123 } 1124 } 1125 return gen_unary(ctx, a, ext, f_tl); 1126 } 1127 1128 static bool gen_amo(DisasContext *ctx, arg_atomic *a, 1129 void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp), 1130 MemOp mop) 1131 { 1132 TCGv dest = dest_gpr(ctx, a->rd); 1133 TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); 1134 MemOp size = mop & MO_SIZE; 1135 1136 if (ctx->cfg_ptr->ext_zama16b && size >= MO_32) { 1137 mop |= MO_ATOM_WITHIN16; 1138 } else { 1139 mop |= MO_ALIGN; 1140 } 1141 1142 decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); 1143 src1 = get_address(ctx, a->rs1, 0); 1144 func(dest, src1, src2, ctx->mem_idx, mop); 1145 1146 gen_set_gpr(ctx, a->rd, dest); 1147 return true; 1148 } 1149 1150 static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop) 1151 { 1152 TCGv dest = get_gpr(ctx, a->rd, EXT_NONE); 1153 TCGv src1 = get_address(ctx, a->rs1, 0); 1154 TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); 1155 1156 decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); 1157 tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop); 1158 1159 gen_set_gpr(ctx, a->rd, dest); 1160 return true; 1161 } 1162 1163 static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) 1164 { 1165 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1166 CPUState *cpu = ctx->cs; 1167 CPURISCVState *env = cpu_env(cpu); 1168 1169 return translator_ldl(env, &ctx->base, pc); 1170 } 1171 1172 #define SS_MMU_INDEX(ctx) (ctx->mem_idx | MMU_IDX_SS_WRITE) 1173 1174 /* Include insn module translation function */ 1175 #include "insn_trans/trans_rvi.c.inc" 1176 #include "insn_trans/trans_rvm.c.inc" 1177 #include "insn_trans/trans_rva.c.inc" 1178 #include "insn_trans/trans_rvf.c.inc" 1179 #include "insn_trans/trans_rvd.c.inc" 1180 #include "insn_trans/trans_rvh.c.inc" 1181 #include "insn_trans/trans_rvv.c.inc" 1182 #include "insn_trans/trans_rvb.c.inc" 1183 #include "insn_trans/trans_rvzicond.c.inc" 1184 #include "insn_trans/trans_rvzacas.c.inc" 1185 #include "insn_trans/trans_rvzabha.c.inc" 1186 #include "insn_trans/trans_rvzawrs.c.inc" 1187 #include "insn_trans/trans_rvzicbo.c.inc" 1188 #include "insn_trans/trans_rvzimop.c.inc" 1189 #include "insn_trans/trans_rvzfa.c.inc" 1190 #include "insn_trans/trans_rvzfh.c.inc" 1191 #include "insn_trans/trans_rvk.c.inc" 1192 #include "insn_trans/trans_rvvk.c.inc" 1193 #include "insn_trans/trans_privileged.c.inc" 1194 #include "insn_trans/trans_svinval.c.inc" 1195 #include "insn_trans/trans_rvbf16.c.inc" 1196 #include "decode-xthead.c.inc" 1197 #include "insn_trans/trans_xthead.c.inc" 1198 #include "insn_trans/trans_xventanacondops.c.inc" 1199 1200 /* Include the auto-generated decoder for 16 bit insn */ 1201 #include "decode-insn16.c.inc" 1202 #include "insn_trans/trans_rvzce.c.inc" 1203 #include "insn_trans/trans_rvzcmop.c.inc" 1204 #include "insn_trans/trans_rvzicfiss.c.inc" 1205 1206 /* Include decoders for factored-out extensions */ 1207 #include "decode-XVentanaCondOps.c.inc" 1208 1209 /* The specification allows for longer insns, but not supported by qemu. */ 1210 #define MAX_INSN_LEN 4 1211 1212 const RISCVDecoder decoder_table[] = { 1213 { always_true_p, decode_insn32 }, 1214 { has_xthead_p, decode_xthead}, 1215 { has_XVentanaCondOps_p, decode_XVentanaCodeOps}, 1216 }; 1217 1218 const size_t decoder_table_size = ARRAY_SIZE(decoder_table); 1219 1220 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) 1221 { 1222 ctx->virt_inst_excp = false; 1223 ctx->cur_insn_len = insn_len(opcode); 1224 /* Check for compressed insn */ 1225 if (ctx->cur_insn_len == 2) { 1226 ctx->opcode = opcode; 1227 /* 1228 * The Zca extension is added as way to refer to instructions in the C 1229 * extension that do not include the floating-point loads and stores 1230 */ 1231 if ((has_ext(ctx, RVC) || ctx->cfg_ptr->ext_zca) && 1232 decode_insn16(ctx, opcode)) { 1233 return; 1234 } 1235 } else { 1236 uint32_t opcode32 = opcode; 1237 opcode32 = deposit32(opcode32, 16, 16, 1238 translator_lduw(env, &ctx->base, 1239 ctx->base.pc_next + 2)); 1240 ctx->opcode = opcode32; 1241 1242 for (guint i = 0; i < ctx->decoders->len; ++i) { 1243 riscv_cpu_decode_fn func = g_ptr_array_index(ctx->decoders, i); 1244 if (func(ctx, opcode32)) { 1245 return; 1246 } 1247 } 1248 } 1249 1250 gen_exception_illegal(ctx); 1251 } 1252 1253 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 1254 { 1255 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1256 CPURISCVState *env = cpu_env(cs); 1257 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); 1258 RISCVCPU *cpu = RISCV_CPU(cs); 1259 uint32_t tb_flags = ctx->base.tb->flags; 1260 1261 ctx->pc_save = ctx->base.pc_first; 1262 ctx->priv = FIELD_EX32(tb_flags, TB_FLAGS, PRIV); 1263 ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX); 1264 ctx->mstatus_fs = FIELD_EX32(tb_flags, TB_FLAGS, FS); 1265 ctx->mstatus_vs = FIELD_EX32(tb_flags, TB_FLAGS, VS); 1266 ctx->priv_ver = env->priv_ver; 1267 ctx->virt_enabled = FIELD_EX32(tb_flags, TB_FLAGS, VIRT_ENABLED); 1268 ctx->misa_ext = env->misa_ext; 1269 ctx->frm = -1; /* unknown rounding mode */ 1270 ctx->cfg_ptr = &(cpu->cfg); 1271 ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL); 1272 ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW); 1273 ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3); 1274 ctx->vta = FIELD_EX32(tb_flags, TB_FLAGS, VTA) && cpu->cfg.rvv_ta_all_1s; 1275 ctx->vma = FIELD_EX32(tb_flags, TB_FLAGS, VMA) && cpu->cfg.rvv_ma_all_1s; 1276 ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s; 1277 ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO); 1278 ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); 1279 ctx->misa_mxl_max = mcc->def->misa_mxl_max; 1280 ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL); 1281 ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL); 1282 ctx->cs = cs; 1283 if (get_xl(ctx) == MXL_RV32) { 1284 ctx->addr_xl = 32; 1285 ctx->addr_signed = false; 1286 } else { 1287 int pm_pmm = FIELD_EX32(tb_flags, TB_FLAGS, PM_PMM); 1288 ctx->addr_xl = 64 - riscv_pm_get_pmlen(pm_pmm); 1289 ctx->addr_signed = FIELD_EX32(tb_flags, TB_FLAGS, PM_SIGNEXTEND); 1290 } 1291 ctx->ztso = cpu->cfg.ext_ztso; 1292 ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER); 1293 ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED); 1294 ctx->fcfi_lp_expected = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_LP_EXPECTED); 1295 ctx->fcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_ENABLED); 1296 ctx->zero = tcg_constant_tl(0); 1297 ctx->virt_inst_excp = false; 1298 ctx->decoders = cpu->decoders; 1299 } 1300 1301 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu) 1302 { 1303 } 1304 1305 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 1306 { 1307 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1308 target_ulong pc_next = ctx->base.pc_next; 1309 1310 if (tb_cflags(dcbase->tb) & CF_PCREL) { 1311 pc_next &= ~TARGET_PAGE_MASK; 1312 } 1313 1314 tcg_gen_insn_start(pc_next, 0, 0); 1315 ctx->insn_start_updated = false; 1316 } 1317 1318 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 1319 { 1320 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1321 CPURISCVState *env = cpu_env(cpu); 1322 uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next); 1323 1324 ctx->ol = ctx->xl; 1325 decode_opc(env, ctx, opcode16); 1326 ctx->base.pc_next += ctx->cur_insn_len; 1327 1328 /* 1329 * If 'fcfi_lp_expected' is still true after processing the instruction, 1330 * then we did not see an 'lpad' instruction, and must raise an exception. 1331 * Insert code to raise the exception at the start of the insn; any other 1332 * code the insn may have emitted will be deleted as dead code following 1333 * the noreturn exception 1334 */ 1335 if (ctx->fcfi_lp_expected) { 1336 /* Emit after insn_start, i.e. before the op following insn_start. */ 1337 tcg_ctx->emit_before_op = QTAILQ_NEXT(ctx->base.insn_start, link); 1338 tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL), 1339 tcg_env, offsetof(CPURISCVState, sw_check_code)); 1340 gen_helper_raise_exception(tcg_env, 1341 tcg_constant_i32(RISCV_EXCP_SW_CHECK)); 1342 tcg_ctx->emit_before_op = NULL; 1343 ctx->base.is_jmp = DISAS_NORETURN; 1344 } 1345 1346 /* Only the first insn within a TB is allowed to cross a page boundary. */ 1347 if (ctx->base.is_jmp == DISAS_NEXT) { 1348 if (ctx->itrigger || !translator_is_same_page(&ctx->base, ctx->base.pc_next)) { 1349 ctx->base.is_jmp = DISAS_TOO_MANY; 1350 } else { 1351 unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK; 1352 1353 if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) { 1354 uint16_t next_insn = 1355 translator_lduw(env, &ctx->base, ctx->base.pc_next); 1356 int len = insn_len(next_insn); 1357 1358 if (!translator_is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) { 1359 ctx->base.is_jmp = DISAS_TOO_MANY; 1360 } 1361 } 1362 } 1363 } 1364 } 1365 1366 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 1367 { 1368 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1369 1370 switch (ctx->base.is_jmp) { 1371 case DISAS_TOO_MANY: 1372 gen_goto_tb(ctx, 0, 0); 1373 break; 1374 case DISAS_NORETURN: 1375 break; 1376 default: 1377 g_assert_not_reached(); 1378 } 1379 } 1380 1381 static const TranslatorOps riscv_tr_ops = { 1382 .init_disas_context = riscv_tr_init_disas_context, 1383 .tb_start = riscv_tr_tb_start, 1384 .insn_start = riscv_tr_insn_start, 1385 .translate_insn = riscv_tr_translate_insn, 1386 .tb_stop = riscv_tr_tb_stop, 1387 }; 1388 1389 void riscv_translate_code(CPUState *cs, TranslationBlock *tb, 1390 int *max_insns, vaddr pc, void *host_pc) 1391 { 1392 DisasContext ctx; 1393 1394 translator_loop(cs, tb, max_insns, pc, host_pc, &riscv_tr_ops, &ctx.base); 1395 } 1396 1397 void riscv_translate_init(void) 1398 { 1399 int i; 1400 1401 /* 1402 * cpu_gpr[0] is a placeholder for the zero register. Do not use it. 1403 * Use the gen_set_gpr and get_gpr helper functions when accessing regs, 1404 * unless you specifically block reads/writes to reg 0. 1405 */ 1406 cpu_gpr[0] = NULL; 1407 cpu_gprh[0] = NULL; 1408 1409 for (i = 1; i < 32; i++) { 1410 cpu_gpr[i] = tcg_global_mem_new(tcg_env, 1411 offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]); 1412 cpu_gprh[i] = tcg_global_mem_new(tcg_env, 1413 offsetof(CPURISCVState, gprh[i]), riscv_int_regnamesh[i]); 1414 } 1415 1416 for (i = 0; i < 32; i++) { 1417 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env, 1418 offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]); 1419 } 1420 1421 cpu_pc = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, pc), "pc"); 1422 cpu_vl = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, vl), "vl"); 1423 cpu_vstart = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, vstart), 1424 "vstart"); 1425 load_res = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_res), 1426 "load_res"); 1427 load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val), 1428 "load_val"); 1429 } 1430