Lines Matching +full:post +full:- +full:processing

5  *  Copyright (c) 2005-2007 CodeSourcery
24 #include "translate-a32.h"
29 #include "exec/helper-proto.h"
33 #include "exec/helper-info.c.inc"
88 /* no-op */ in asimd_imm_const()
156 if (!s->condjmp) { in arm_gen_condlabel()
157 s->condlabel = gen_disas_label(s); in arm_gen_condlabel()
158 s->condjmp = 1; in arm_gen_condlabel()
230 switch (s->mmu_idx) { in get_a32_user_mem_index()
260 return diff + (s->thumb ? 4 : 8); in jmp_diff()
265 assert(s->pc_save != -1); in gen_pc_plus_diff()
266 if (tb_cflags(s->base.tb) & CF_PCREL) { in gen_pc_plus_diff()
267 tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff); in gen_pc_plus_diff()
269 tcg_gen_movi_i32(var, s->pc_curr + diff); in gen_pc_plus_diff()
297 gen_pc_plus_diff(s, tmp, jmp_diff(s, ofs - (s->pc_curr & 3))); in add_reg_for_lit()
314 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3); in store_reg()
315 s->base.is_jmp = DISAS_JUMP; in store_reg()
316 s->pc_save = -1; in store_reg()
318 /* For M-profile SP bits [1:0] are always zero */ in store_reg()
325 * Variant of store_reg which applies v8M stack-limit checks before updating
334 if (s->v8m_stackcheck) { in store_sp_checked()
366 TCGv_i32 tcg_el = tcg_constant_i32(s->current_el); in gen_rebuild_hflags()
383 /* We just completed step of an insn. Move from Active-not-pending in gen_singlestep_exception()
384 * to Active-pending, and then also take the swstep exception. in gen_singlestep_exception()
393 gen_swstep_exception(s, 1, s->is_ldex); in gen_singlestep_exception()
394 s->base.is_jmp = DISAS_NORETURN; in gen_singlestep_exception()
403 if (s->eci) { in clear_eci_state()
405 s->eci = 0; in clear_eci_state()
440 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
472 /* dest = T0 - T1 + CF - 1. */
507 /* dest = T0 - T1. Compute C, N, V and Z flags */
566 shifter_out_im(var, 32 - shift); in gen_arm_shift_im()
578 shifter_out_im(var, shift - 1); in gen_arm_shift_im()
586 shifter_out_im(var, shift - 1); in gen_arm_shift_im()
594 shifter_out_im(var, shift - 1); in gen_arm_shift_im()
669 case 9: /* ls: !C || Z -> !(C && !Z) */ in arm_test_cc()
672 /* CF is 1 for C, so -CF is an all-bits-set mask for C; in arm_test_cc()
673 ZF is non-zero for !Z; so AND the two subexpressions. */ in arm_test_cc()
678 case 10: /* ge: N == V -> N ^ V == 0 */ in arm_test_cc()
679 case 11: /* lt: N != V -> N ^ V != 0 */ in arm_test_cc()
715 cmp->cond = cond; in arm_test_cc()
716 cmp->value = value; in arm_test_cc()
721 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label); in arm_jump_cc()
733 if (s->condexec_mask) { in gen_set_condexec()
734 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); in gen_set_condexec()
743 s->pc_save = s->pc_curr + diff; in gen_update_pc()
749 s->base.is_jmp = DISAS_JUMP; in gen_bx()
753 s->pc_save = -1; in gen_bx()
758 * For M-profile CPUs, include logic to detect exception-return
762 * which signals a function return from non-secure state; this can happen
768 * the same behaviour as for a branch to a non-magic address).
770 * In linux-user mode it is unclear what the right behaviour for an
781 * s->base.is_jmp that we need to do the rest of the work later. in gen_bx_excret()
786 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) { in gen_bx_excret()
787 s->base.is_jmp = DISAS_BX_EXCRET; in gen_bx_excret()
809 if (s->ss_active) { in gen_bx_excret_final_code()
816 * At this point in runtime env->regs[15] and env->thumb will hold in gen_bx_excret_final_code()
817 * the exception-return magic number, which do_v7m_exception_exit() in gen_bx_excret_final_code()
819 * the cpu-exec main loop guarantees that we will always go straight in gen_bx_excret_final_code()
820 * from raising the exception to the exception-handling code. in gen_bx_excret_final_code()
836 * - we don't need to do gen_update_pc() because the bxns helper will in gen_bxns()
838 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE in gen_bxns()
841 * is correct in the non-UNPREDICTABLE cases, and we can choose in gen_bxns()
845 s->base.is_jmp = DISAS_EXIT; in gen_bxns()
858 s->base.is_jmp = DISAS_EXIT; in gen_blxns()
915 /* Not needed for user-mode BE32, where we use MO_BE instead. */ in gen_aa32_addr()
916 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) { in gen_aa32_addr()
917 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE))); in gen_aa32_addr()
947 /* Not needed for user-mode BE32, where we use MO_BE instead. */ in gen_aa32_ld_internal_i64()
948 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { in gen_aa32_ld_internal_i64()
958 /* Not needed for user-mode BE32, where we use MO_BE instead. */ in gen_aa32_st_internal_i64()
959 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { in gen_aa32_st_internal_i64()
1019 s->svc_imm = imm16; in gen_hvc()
1021 s->base.is_jmp = DISAS_HVC; in gen_hvc()
1032 s->base.is_jmp = DISAS_SMC; in gen_smc()
1040 s->base.is_jmp = DISAS_NORETURN; in gen_exception_internal_insn()
1063 if (s->aarch64) { in gen_exception_insn_el_v()
1070 s->base.is_jmp = DISAS_NORETURN; in gen_exception_insn_el_v()
1083 if (s->aarch64) { in gen_exception_insn()
1090 s->base.is_jmp = DISAS_NORETURN; in gen_exception_insn()
1098 s->base.is_jmp = DISAS_NORETURN; in gen_exception_bkpt_insn()
1111 s->base.is_jmp = DISAS_EXIT; in gen_lookup_tb()
1126 * (and for consistency with our 32-bit semihosting). in gen_hlt()
1128 if (semihosting_enabled(s->current_el == 0) && in gen_hlt()
1129 (imm == (s->thumb ? 0x3c : 0xf000))) { in gen_hlt()
1155 * Calculate the offset assuming fully little-endian, in neon_element_offset()
1156 * then XOR to account for the order of the 8-byte units. in neon_element_offset()
1159 ofs ^= 8 - element_size; in neon_element_offset()
1433 tcg_gen_addi_i32(tmp, tmp, -offset); in gen_iwmmxt_address()
1439 /* Post indexed */ in gen_iwmmxt_address()
1444 tcg_gen_addi_i32(tmp, tmp, -offset); in gen_iwmmxt_address()
2501 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1); in disas_dsp_insn()
2523 if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) { in gen_goto_tb()
2525 * For pcrel, the pc must always be up-to-date on entry to in gen_goto_tb()
2532 if (tb_cflags(s->base.tb) & CF_PCREL) { in gen_goto_tb()
2539 tcg_gen_exit_tb(s->base.tb, n); in gen_goto_tb()
2544 s->base.is_jmp = DISAS_NORETURN; in gen_goto_tb()
2550 if (unlikely(s->ss_active)) { in gen_jmp_tb()
2553 s->base.is_jmp = DISAS_JUMP; in gen_jmp_tb()
2556 switch (s->base.is_jmp) { in gen_jmp_tb()
2580 s->base.is_jmp = DISAS_NORETURN; in gen_jmp_tb()
2628 mask &= aarch32_cpsr_valid_mask(s->features, s->isar); in msr_mask()
2784 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) { in msr_banked_access_decode()
2787 if (s->current_el == 1) { in msr_banked_access_decode()
2817 * mode. However there is some real-world code that will do in msr_banked_access_decode()
2819 * access. (Notably a standard Cortex-R52 startup code fragment in msr_banked_access_decode()
2823 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 in msr_banked_access_decode()
2824 || (s->current_el < 3 && *regno != 16 && *regno != 17)) { in msr_banked_access_decode()
2856 s->base.is_jmp = DISAS_UPDATE_EXIT; in gen_msr_banked()
2876 s->base.is_jmp = DISAS_UPDATE_EXIT; in gen_mrs_banked()
2896 translator_io_start(&s->base); in gen_rfe()
2898 /* Must exit loop to check un-masked IRQs */ in gen_rfe()
2899 s->base.is_jmp = DISAS_EXIT; in gen_rfe()
2902 /* Generate an old-style exception return. Marks pc as dead. */
2911 0b0000000111100111, /* crn == 9, crm == {c0-c2, c5-c8} */ in aa32_cpreg_encoding_in_impdef_space()
2913 0b1000000111111111, /* crn == 11, crm == {c0-c8, c15} */ in aa32_cpreg_encoding_in_impdef_space()
2917 return (mask[crn - 9] >> crm) & 1; in aa32_cpreg_encoding_in_impdef_space()
2926 uint32_t key = ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2); in do_coproc_insn()
2927 const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key); in do_coproc_insn()
2972 if (s->hstr_active && cpnum == 15 && s->current_el == 1) { in do_coproc_insn()
2996 * to assume continue-to-next-instruction. in do_coproc_insn()
2998 s->base.is_jmp = DISAS_NEXT; in do_coproc_insn()
3009 switch (s->current_el) { in do_coproc_insn()
3032 s->ns ? "non-secure" : "secure"); in do_coproc_insn()
3038 crm, opc2, s->ns ? "non-secure" : "secure"); in do_coproc_insn()
3045 if (!cp_access_ok(s->current_el, ri, isread)) { in do_coproc_insn()
3050 if ((s->hstr_active && s->current_el == 0) || ri->accessfn || in do_coproc_insn()
3051 (ri->fgt && s->fgt_active) || in do_coproc_insn()
3066 } else if (ri->type & ARM_CP_RAISES_EXC) { in do_coproc_insn()
3076 switch (ri->type & ARM_CP_SPECIAL_MASK) { in do_coproc_insn()
3086 s->base.is_jmp = DISAS_WFI; in do_coproc_insn()
3093 if (ri->type & ARM_CP_IO) { in do_coproc_insn()
3095 need_exit_tb = translator_io_start(&s->base); in do_coproc_insn()
3103 if (ri->type & ARM_CP_CONST) { in do_coproc_insn()
3104 tmp64 = tcg_constant_i64(ri->resetvalue); in do_coproc_insn()
3105 } else if (ri->readfn) { in do_coproc_insn()
3113 tcg_gen_ld_i64(tmp64, tcg_env, ri->fieldoffset); in do_coproc_insn()
3123 if (ri->type & ARM_CP_CONST) { in do_coproc_insn()
3124 tmp = tcg_constant_i32(ri->resetvalue); in do_coproc_insn()
3125 } else if (ri->readfn) { in do_coproc_insn()
3132 tmp = load_cpu_offset(ri->fieldoffset); in do_coproc_insn()
3145 if (ri->type & ARM_CP_CONST) { in do_coproc_insn()
3156 if (ri->writefn) { in do_coproc_insn()
3162 tcg_gen_st_i64(tmp64, tcg_env, ri->fieldoffset); in do_coproc_insn()
3166 if (ri->writefn) { in do_coproc_insn()
3172 store_cpu_offset(tmp, ri->fieldoffset, 4); in do_coproc_insn()
3177 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { in do_coproc_insn()
3182 gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL); in do_coproc_insn()
3200 if (extract32(s->c15_cpar, cpnum, 1) == 0) { in disas_xscale_insn()
3213 /* Store a 64-bit value to a register pair. Clobbers val. */
3225 /* load and add a 64-bit value from a register pair. */
3232 /* Load 64-bit value rd:rn. */ in gen_addq()
3257 MemOp opc = size | MO_ALIGN | s->be_data; in gen_load_exclusive()
3259 s->is_ldex = true; in gen_load_exclusive()
3266 * For AArch32, architecturally the 32-bit word at the lowest in gen_load_exclusive()
3268 * the CPU is big-endian. That means we don't want to do a in gen_load_exclusive()
3270 * architecturally 64-bit access, but instead do a 64-bit access in gen_load_exclusive()
3277 if (s->be_data == MO_BE) { in gen_load_exclusive()
3294 tcg_gen_movi_i64(cpu_exclusive_addr, -1); in gen_clrex()
3305 MemOp opc = size | MO_ALIGN | s->be_data; in gen_store_exclusive()
3307 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { in gen_store_exclusive()
3329 * For AArch32, architecturally the 32-bit word at the lowest in gen_store_exclusive()
3331 * the CPU is big-endian. Since we're going to treat this as a in gen_store_exclusive()
3332 * single 64-bit BE store, we need to put the two halves in the in gen_store_exclusive()
3335 * SCTLR_B as if for an architectural 64-bit access. in gen_store_exclusive()
3337 if (s->be_data == MO_BE) { in gen_store_exclusive()
3360 tcg_gen_movi_i64(cpu_exclusive_addr, -1); in gen_store_exclusive()
3380 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1 in gen_srs()
3382 * - UNDEFINED in Hyp mode in gen_srs()
3383 * - UNPREDICTABLE in User or System mode in gen_srs()
3384 * - UNPREDICTABLE if the specified mode is: in gen_srs()
3385 * -- not implemented in gen_srs()
3386 * -- not a valid mode number in gen_srs()
3387 * -- a mode that's at a higher exception level in gen_srs()
3388 * -- Monitor, if we are Non-secure in gen_srs()
3391 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) { in gen_srs()
3396 if (s->current_el == 0 || s->current_el == 2) { in gen_srs()
3410 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) { in gen_srs()
3415 /* No need to check specifically for "are we non-secure" because in gen_srs()
3416 * we've already made EL0 UNDEF and handled the trap for S-EL1; in gen_srs()
3417 * so if this isn't EL3 then we must be non-secure. in gen_srs()
3419 if (s->current_el != 3) { in gen_srs()
3439 offset = -4; in gen_srs()
3445 offset = -8; in gen_srs()
3462 offset = -8; in gen_srs()
3468 offset = -4; in gen_srs()
3479 s->base.is_jmp = DISAS_UPDATE_EXIT; in gen_srs()
3486 arm_gen_test_cc(cond ^ 1, s->condlabel.label); in arm_skip_unless()
3536 return s->condexec_mask == 0; in t16_setflags()
3541 return (x & 0xff) | (x & 0x100) << (14 - 8); in t16_push_list()
3546 return (x & 0xff) | (x & 0x100) << (15 - 8); in t16_pop_list()
3553 #include "decode-a32.c.inc"
3554 #include "decode-a32-uncond.c.inc"
3555 #include "decode-t32.c.inc"
3556 #include "decode-t16.c.inc"
3570 * to be in the coprocessor-instruction space at all. v8M still in valid_cp()
3589 if (!valid_cp(s, a->cp)) { in trans_MCR()
3592 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2, in trans_MCR()
3593 false, a->rt, 0); in trans_MCR()
3599 if (!valid_cp(s, a->cp)) { in trans_MRC()
3602 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2, in trans_MRC()
3603 true, a->rt, 0); in trans_MRC()
3609 if (!valid_cp(s, a->cp)) { in trans_MCRR()
3612 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0, in trans_MCRR()
3613 false, a->rt, a->rt2); in trans_MCRR()
3619 if (!valid_cp(s, a->cp)) { in trans_MRRC()
3622 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0, in trans_MRRC()
3623 true, a->rt, a->rt2); in trans_MRRC()
3627 /* Helpers to swap operands for reverse-subtract. */
3649 * Helpers for the data processing routines.
3673 if (s->thumb) { in store_reg_kind()
3690 * Data Processing (register)
3701 tmp2 = load_reg(s, a->rm); in op_s_rrr_shi()
3702 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc); in op_s_rrr_shi()
3703 tmp1 = load_reg(s, a->rn); in op_s_rrr_shi()
3710 return store_reg_kind(s, a->rd, tmp1, kind); in op_s_rrr_shi()
3719 tmp = load_reg(s, a->rm); in op_s_rxr_shi()
3720 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc); in op_s_rxr_shi()
3726 return store_reg_kind(s, a->rd, tmp, kind); in op_s_rxr_shi()
3730 * Data-processing (register-shifted register)
3741 tmp1 = load_reg(s, a->rs); in op_s_rrr_shr()
3742 tmp2 = load_reg(s, a->rm); in op_s_rrr_shr()
3743 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc); in op_s_rrr_shr()
3744 tmp1 = load_reg(s, a->rn); in op_s_rrr_shr()
3751 return store_reg_kind(s, a->rd, tmp1, kind); in op_s_rrr_shr()
3760 tmp1 = load_reg(s, a->rs); in op_s_rxr_shr()
3761 tmp2 = load_reg(s, a->rm); in op_s_rxr_shr()
3762 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc); in op_s_rxr_shr()
3768 return store_reg_kind(s, a->rd, tmp2, kind); in op_s_rxr_shr()
3772 * Data-processing (immediate)
3777 * Note that logic_cc && a->rot setting CF based on the msb of the
3788 imm = ror32(a->imm, a->rot); in op_s_rri_rot()
3789 if (logic_cc && a->rot) { in op_s_rri_rot()
3792 tmp1 = load_reg(s, a->rn); in op_s_rri_rot()
3799 return store_reg_kind(s, a->rd, tmp1, kind); in op_s_rri_rot()
3809 imm = ror32(a->imm, a->rot); in op_s_rxi_rot()
3810 if (logic_cc && a->rot) { in op_s_rxi_rot()
3820 return store_reg_kind(s, a->rd, tmp, kind); in op_s_rxi_rot()
3847 DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
3848 DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
3849 DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
3850 DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
3852 DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
3853 DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
3854 DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
3855 DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
3862 DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false, in DO_CMP2()
3863 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL) in DO_CMP2()
3868 * we modify a->s via that parameter before it is used by OP. in DO_CMP2()
3870 DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false, in DO_CMP2()
3873 if (a->rd == 15 && a->s) { in DO_CMP2()
3879 if (IS_USER(s) || s->current_el == 2) { in DO_CMP2()
3884 a->s = 0; in DO_CMP2()
3886 } else if (a->rd == 13 && a->rn == 13) { in DO_CMP2()
3892 DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
3895 if (a->rd == 15 && a->s) {
3901 if (IS_USER(s) || s->current_el == 2) {
3906 a->s = 0;
3908 } else if (a->rd == 13) {
3914 DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
3917 * ORN is only available with T32, so there is no register-shifted-register
3922 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3927 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL); in trans_ORN_rri()
3936 store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm)); in trans_ADR()
3946 store_reg(s, a->rd, tcg_constant_i32(a->imm)); in trans_MOVW()
3958 tmp = load_reg(s, a->rd); in trans_MOVT()
3960 tcg_gen_ori_i32(tmp, tmp, a->imm << 16); in trans_MOVT()
3961 store_reg(s, a->rd, tmp); in trans_MOVT()
3966 * v8.1M MVE wide-shifts
3978 if (a->rdahi == 15) { in do_mve_shl_ri()
3984 a->rdahi == 13) { in do_mve_shl_ri()
3990 if (a->shim == 0) { in do_mve_shl_ri()
3991 a->shim = 32; in do_mve_shl_ri()
3995 rdalo = load_reg(s, a->rdalo); in do_mve_shl_ri()
3996 rdahi = load_reg(s, a->rdahi); in do_mve_shl_ri()
3999 fn(rda, rda, a->shim); in do_mve_shl_ri()
4003 store_reg(s, a->rdalo, rdalo); in do_mve_shl_ri()
4004 store_reg(s, a->rdahi, rdahi); in do_mve_shl_ri()
4063 if (a->rdahi == 15) { in do_mve_shl_rr()
4069 a->rdahi == 13 || a->rm == 13 || a->rm == 15 || in do_mve_shl_rr()
4070 a->rm == a->rdahi || a->rm == a->rdalo) { in do_mve_shl_rr()
4077 rdalo = load_reg(s, a->rdalo); in do_mve_shl_rr()
4078 rdahi = load_reg(s, a->rdahi); in do_mve_shl_rr()
4081 /* The helper takes care of the sign-extension of the low 8 bits of Rm */ in do_mve_shl_rr()
4082 fn(rda, tcg_env, rda, cpu_R[a->rm]); in do_mve_shl_rr()
4086 store_reg(s, a->rdalo, rdalo); in do_mve_shl_rr()
4087 store_reg(s, a->rdahi, rdahi); in do_mve_shl_rr()
4130 a->rda == 13 || a->rda == 15) { in do_mve_sh_ri()
4136 if (a->shim == 0) { in do_mve_sh_ri()
4137 a->shim = 32; in do_mve_sh_ri()
4139 fn(cpu_R[a->rda], cpu_R[a->rda], a->shim); in do_mve_sh_ri()
4182 a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 || in do_mve_sh_rr()
4183 a->rm == a->rda) { in do_mve_sh_rr()
4189 /* The helper takes care of the sign-extension of the low 8 bits of Rm */ in do_mve_sh_rr()
4190 fn(cpu_R[a->rda], tcg_env, cpu_R[a->rda], cpu_R[a->rm]); in do_mve_sh_rr()
4212 t1 = load_reg(s, a->rn); in op_mla()
4213 t2 = load_reg(s, a->rm); in op_mla()
4216 t2 = load_reg(s, a->ra); in op_mla()
4219 if (a->s) { in op_mla()
4222 store_reg(s, a->rd, t1); in op_mla()
4243 t1 = load_reg(s, a->rn); in trans_MLS()
4244 t2 = load_reg(s, a->rm); in trans_MLS()
4246 t2 = load_reg(s, a->ra); in trans_MLS()
4248 store_reg(s, a->rd, t1); in trans_MLS()
4256 t0 = load_reg(s, a->rm); in op_mlal()
4257 t1 = load_reg(s, a->rn); in op_mlal()
4264 t2 = load_reg(s, a->ra); in op_mlal()
4265 t3 = load_reg(s, a->rd); in op_mlal()
4268 if (a->s) { in op_mlal()
4271 store_reg(s, a->ra, t0); in op_mlal()
4272 store_reg(s, a->rd, t1); in op_mlal()
4300 if (s->thumb in trans_UMAAL()
4306 t0 = load_reg(s, a->rm); in trans_UMAAL()
4307 t1 = load_reg(s, a->rn); in trans_UMAAL()
4310 t2 = load_reg(s, a->ra); in trans_UMAAL()
4312 t2 = load_reg(s, a->rd); in trans_UMAAL()
4314 store_reg(s, a->ra, t0); in trans_UMAAL()
4315 store_reg(s, a->rd, t1); in trans_UMAAL()
4327 if (s->thumb in op_qaddsub()
4333 t0 = load_reg(s, a->rm); in op_qaddsub()
4334 t1 = load_reg(s, a->rn); in op_qaddsub()
4343 store_reg(s, a->rd, t0); in op_qaddsub()
4369 if (s->thumb in DO_QADDSUB()
4375 t0 = load_reg(s, a->rn); in DO_QADDSUB()
4376 t1 = load_reg(s, a->rm); in DO_QADDSUB()
4381 store_reg(s, a->rd, t0); in DO_QADDSUB()
4384 t1 = load_reg(s, a->ra); in DO_QADDSUB()
4386 store_reg(s, a->rd, t0); in DO_QADDSUB()
4389 tl = load_reg(s, a->ra); in DO_QADDSUB()
4390 th = load_reg(s, a->rd); in DO_QADDSUB()
4391 /* Sign-extend the 32-bit product to 64 bits. */ in DO_QADDSUB()
4395 store_reg(s, a->ra, tl); in DO_QADDSUB()
4396 store_reg(s, a->rd, th); in DO_QADDSUB()
4435 t0 = load_reg(s, a->rn); in op_smlawx()
4436 t1 = load_reg(s, a->rm); in op_smlawx()
4438 * Since the nominal result is product<47:16>, shift the 16-bit in op_smlawx()
4448 t0 = load_reg(s, a->ra); in op_smlawx()
4451 store_reg(s, a->rd, t1); in op_smlawx()
4475 * When running single-threaded TCG code, use the helper to ensure that in trans_YIELD()
4476 * the next round-robin scheduled vCPU gets a crack. When running in in trans_YIELD()
4480 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { in trans_YIELD()
4482 s->base.is_jmp = DISAS_YIELD; in trans_YIELD()
4490 * When running single-threaded TCG code, use the helper to ensure that in trans_WFE()
4491 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we in trans_WFE()
4496 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { in trans_WFE()
4498 s->base.is_jmp = DISAS_WFE; in trans_WFE()
4507 s->base.is_jmp = DISAS_WFI; in trans_WFI()
4514 * For M-profile, minimal-RAS ESB can be a NOP. in trans_ESB()
4527 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) { in trans_ESB()
4541 uint32_t val = ror32(a->imm, a->rot * 2); in trans_MSR_imm()
4542 uint32_t mask = msr_mask(s, a->mask, a->r); in trans_MSR_imm()
4544 if (gen_set_psr_im(s, mask, a->r, val)) { in trans_MSR_imm()
4562 t1 = load_reg(s, a->rn); in op_crc32()
4563 t2 = load_reg(s, a->rm); in op_crc32()
4582 store_reg(s, a->rd, t1); in op_crc32()
4608 gen_mrs_banked(s, a->r, a->sysm, a->rd); in DO_CRC32()
4617 gen_msr_banked(s, a->r, a->sysm, a->rn); in trans_MSR_bank()
4628 if (a->r) { in trans_MRS_reg()
4638 store_reg(s, a->rd, tmp); in trans_MRS_reg()
4645 uint32_t mask = msr_mask(s, a->mask, a->r); in trans_MSR_reg()
4650 tmp = load_reg(s, a->rn); in trans_MSR_reg()
4651 if (gen_set_psr(s, mask, a->r, tmp)) { in trans_MSR_reg()
4665 gen_helper_v7m_mrs(tmp, tcg_env, tcg_constant_i32(a->sysm)); in trans_MRS_v7m()
4666 store_reg(s, a->rd, tmp); in trans_MRS_v7m()
4677 addr = tcg_constant_i32((a->mask << 10) | a->sysm); in trans_MSR_v7m()
4678 reg = load_reg(s, a->rn); in trans_MSR_v7m()
4691 gen_bx_excret(s, load_reg(s, a->rm)); in trans_BX()
4702 * TBFLAGS bit on a basically-never-happens case, so call a helper in trans_BXJ()
4709 s->current_el < 2 && s->ns) { in trans_BXJ()
4710 gen_helper_check_bxj_trap(tcg_env, tcg_constant_i32(a->rm)); in trans_BXJ()
4713 gen_bx(s, load_reg(s, a->rm)); in trans_BXJ()
4724 tmp = load_reg(s, a->rm); in trans_BLX_r()
4725 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb); in trans_BLX_r()
4733 * the user-only mode either (in theory you can use them from
4738 if (!s->v8m_secure || IS_USER_ONLY) { in trans_BXNS()
4741 gen_bxns(s, a->rm); in trans_BXNS()
4748 if (!s->v8m_secure || IS_USER_ONLY) { in trans_BLXNS()
4751 gen_blxns(s, a->rm); in trans_BLXNS()
4763 tmp = load_reg(s, a->rm); in trans_CLZ()
4765 store_reg(s, a->rd, tmp); in trans_CLZ()
4780 if (s->current_el == 2) { in trans_ERET()
4792 gen_hlt(s, a->imm); in trans_HLT()
4802 s->eci_handled = true; in trans_BKPT()
4804 semihosting_enabled(s->current_el == 0) && in trans_BKPT()
4805 (a->imm == 0xab)) { in trans_BKPT()
4808 gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false)); in trans_BKPT()
4821 gen_hvc(s, a->imm); in trans_HVC()
4849 * it is executed by a CPU in non-secure state from memory in trans_SG()
4850 * which is Secure & NonSecure-Callable. in trans_SG()
4859 if (s->v8m_secure) { in trans_SG()
4861 s->condexec_cond = 0; in trans_SG()
4862 s->condexec_mask = 0; in trans_SG()
4875 if (a->rd == 13 || a->rd == 15 || a->rn == 15) { in trans_TT()
4880 if (a->A && !s->v8m_secure) { in trans_TT()
4886 addr = load_reg(s, a->rn); in trans_TT()
4888 gen_helper_v7m_tt(tmp, tcg_env, addr, tcg_constant_i32((a->A << 1) | a->T)); in trans_TT()
4889 store_reg(s, a->rd, tmp); in trans_TT()
4915 TCGv_i32 addr = load_reg(s, a->rn); in op_addr_rr_pre()
4917 if (s->v8m_stackcheck && a->rn == 13 && a->w) { in op_addr_rr_pre()
4921 if (a->p) { in op_addr_rr_pre()
4922 TCGv_i32 ofs = load_reg(s, a->rm); in op_addr_rr_pre()
4923 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0); in op_addr_rr_pre()
4924 if (a->u) { in op_addr_rr_pre()
4936 if (!a->p) { in op_addr_rr_post()
4937 TCGv_i32 ofs = load_reg(s, a->rm); in op_addr_rr_post()
4938 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0); in op_addr_rr_post()
4939 if (a->u) { in op_addr_rr_post()
4944 } else if (!a->w) { in op_addr_rr_post()
4947 store_reg(s, a->rn, addr); in op_addr_rr_post()
4953 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w); in op_load_rr()
4967 store_reg_from_load(s, a->rt, tmp); in op_load_rr()
4974 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; in op_store_rr()
4981 if (s->thumb && a->rn == 15) { in op_store_rr()
4987 tmp = load_reg(s, a->rt); in op_store_rr()
4998 * LDRD is required to be an atomic 64-bit access if the in do_ldrd_load()
4999 * address is 8-aligned, two atomic 32-bit accesses if in do_ldrd_load()
5000 * it's only 4-aligned, and to give an alignment fault in do_ldrd_load()
5001 * if it's not 4-aligned. This is MO_ALIGN_4 | MO_ATOM_SUBALIGN. in do_ldrd_load()
5005 * so we don't get its SCTLR_B check, and instead do a 64-bit access in do_ldrd_load()
5008 * For M-profile, and for A-profile before LPAE, the 64-bit in do_ldrd_load()
5019 MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data; in do_ldrd_load()
5026 if (s->be_data == MO_BE) { in do_ldrd_load()
5042 if (a->rt & 1) { in trans_LDRD_rr()
5048 do_ldrd_load(s, addr, a->rt, a->rt + 1); in trans_LDRD_rr()
5058 * STRD is required to be an atomic 64-bit access if the in do_strd_store()
5059 * address is 8-aligned, two atomic 32-bit accesses if in do_strd_store()
5060 * it's only 4-aligned, and to give an alignment fault in do_strd_store()
5061 * if it's not 4-aligned. in do_strd_store()
5065 * so we don't get its SCTLR_B check, and instead do a 64-bit access in do_strd_store()
5069 * As with LDRD, the 64-bit atomicity is not required for in do_strd_store()
5070 * M-profile, or for A-profile before LPAE, and we provide in do_strd_store()
5074 MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data; in do_strd_store()
5080 if (s->be_data == MO_BE) { in do_strd_store()
5095 if (a->rt & 1) { in trans_STRD_rr()
5101 do_strd_store(s, addr, a->rt, a->rt + 1); in trans_STRD_rr()
5113 int ofs = a->imm; in op_addr_ri_pre()
5115 if (!a->u) { in op_addr_ri_pre()
5116 ofs = -ofs; in op_addr_ri_pre()
5119 if (s->v8m_stackcheck && a->rn == 13 && a->w) { in op_addr_ri_pre()
5126 if (!a->u) { in op_addr_ri_pre()
5135 return add_reg_for_lit(s, a->rn, a->p ? ofs : 0); in op_addr_ri_pre()
5142 if (!a->p) { in op_addr_ri_post()
5143 if (a->u) { in op_addr_ri_post()
5144 address_offset = a->imm; in op_addr_ri_post()
5146 address_offset = -a->imm; in op_addr_ri_post()
5148 } else if (!a->w) { in op_addr_ri_post()
5152 store_reg(s, a->rn, addr); in op_addr_ri_post()
5158 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w); in op_load_ri()
5172 store_reg_from_load(s, a->rt, tmp); in op_load_ri()
5179 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; in op_store_ri()
5186 if (s->thumb && a->rn == 15) { in op_store_ri()
5192 tmp = load_reg(s, a->rt); in op_store_ri()
5206 do_ldrd_load(s, addr, a->rt, rt2); in op_ldrd_ri()
5215 if (!ENABLE_ARCH_5TE || (a->rt & 1)) { in trans_LDRD_ri_a32()
5218 return op_ldrd_ri(s, a, a->rt + 1); in trans_LDRD_ri_a32()
5224 .u = a->u, .w = a->w, .p = a->p, in trans_LDRD_ri_t32()
5225 .rn = a->rn, .rt = a->rt, .imm = a->imm in trans_LDRD_ri_t32()
5227 return op_ldrd_ri(s, &b, a->rt2); in trans_LDRD_ri_t32()
5236 do_strd_store(s, addr, a->rt, rt2); in op_strd_ri()
5244 if (!ENABLE_ARCH_5TE || (a->rt & 1)) { in trans_STRD_ri_a32()
5247 return op_strd_ri(s, a, a->rt + 1); in trans_STRD_ri_a32()
5253 .u = a->u, .w = a->w, .p = a->p, in trans_STRD_ri_t32()
5254 .rn = a->rn, .rt = a->rt, .imm = a->imm in trans_STRD_ri_t32()
5256 return op_strd_ri(s, &b, a->rt2); in trans_STRD_ri_t32()
5298 opc |= s->be_data; in DO_LDST()
5299 addr = load_reg(s, a->rn); in DO_LDST()
5302 tmp = load_reg(s, a->rt2); in DO_LDST()
5305 store_reg(s, a->rt, tmp); in DO_LDST()
5320 * Load/Store Exclusive and Load-Acquire/Store-Release
5330 if (a->rd == 15 || a->rn == 15 || a->rt == 15 in op_strex()
5331 || a->rd == a->rn || a->rd == a->rt in op_strex()
5332 || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13)) in op_strex()
5334 && (a->rt2 == 15 in op_strex()
5335 || a->rd == a->rt2 in op_strex()
5336 || (!v8a && s->thumb && a->rt2 == 13)))) { in op_strex()
5346 load_reg_var(s, addr, a->rn); in op_strex()
5347 tcg_gen_addi_i32(addr, addr, a->imm); in op_strex()
5349 gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop); in op_strex()
5367 if (a->rt & 1) { in trans_STREXD_a32()
5371 a->rt2 = a->rt + 1; in trans_STREXD_a32()
5382 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { in trans_STREXB()
5390 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { in trans_STREXH()
5410 if (a->rt & 1) { in trans_STLEXD_a32()
5414 a->rt2 = a->rt + 1; in trans_STLEXD_a32()
5450 if (a->rn == 15 || a->rt == 15) { in op_stl()
5455 addr = load_reg(s, a->rn); in op_stl()
5456 tmp = load_reg(s, a->rt); in op_stl()
5459 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite); in op_stl()
5486 if (a->rn == 15 || a->rt == 15 in op_ldrex()
5487 || (!v8a && s->thumb && a->rt == 13) in op_ldrex()
5489 && (a->rt2 == 15 || a->rt == a->rt2 in op_ldrex()
5490 || (!v8a && s->thumb && a->rt2 == 13)))) { in op_ldrex()
5496 load_reg_var(s, addr, a->rn); in op_ldrex()
5497 tcg_gen_addi_i32(addr, addr, a->imm); in op_ldrex()
5499 gen_load_exclusive(s, a->rt, a->rt2, addr, mop); in op_ldrex()
5521 if (a->rt & 1) { in trans_LDREXD_a32()
5525 a->rt2 = a->rt + 1; in trans_LDREXD_a32()
5536 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { in trans_LDREXB()
5544 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { in trans_LDREXH()
5564 if (a->rt & 1) { in trans_LDAEXD_a32()
5568 a->rt2 = a->rt + 1; in trans_LDAEXD_a32()
5604 if (a->rn == 15 || a->rt == 15) { in op_lda()
5609 addr = load_reg(s, a->rn); in op_lda()
5612 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel); in op_lda()
5614 store_reg(s, a->rt, tmp); in op_lda()
5646 t1 = load_reg(s, a->rn); in trans_USADA8()
5647 t2 = load_reg(s, a->rm); in trans_USADA8()
5649 if (a->ra != 15) { in trans_USADA8()
5650 t2 = load_reg(s, a->ra); in trans_USADA8()
5653 store_reg(s, a->rd, t1); in trans_USADA8()
5660 int width = a->widthm1 + 1; in op_bfx()
5661 int shift = a->lsb; in op_bfx()
5672 tmp = load_reg(s, a->rn); in op_bfx()
5678 store_reg(s, a->rd, tmp); in op_bfx()
5694 int msb = a->msb, lsb = a->lsb; in trans_BFCI()
5707 width = msb + 1 - lsb; in trans_BFCI()
5708 if (a->rn == 15) { in trans_BFCI()
5713 t_in = load_reg(s, a->rn); in trans_BFCI()
5715 t_rd = load_reg(s, a->rd); in trans_BFCI()
5717 store_reg(s, a->rd, t_rd); in trans_BFCI()
5736 if (s->thumb in op_par_addsub()
5742 t0 = load_reg(s, a->rn); in op_par_addsub()
5743 t1 = load_reg(s, a->rm); in op_par_addsub()
5747 store_reg(s, a->rd, t0); in op_par_addsub()
5758 if (s->thumb in op_par_addsub_ge()
5764 t0 = load_reg(s, a->rn); in op_par_addsub_ge()
5765 t1 = load_reg(s, a->rm); in op_par_addsub_ge()
5771 store_reg(s, a->rd, t0); in op_par_addsub_ge()
5839 int shift = a->imm; in DO_PAR_ADDSUB_GE()
5841 if (s->thumb in DO_PAR_ADDSUB_GE()
5847 tn = load_reg(s, a->rn); in DO_PAR_ADDSUB_GE()
5848 tm = load_reg(s, a->rm); in DO_PAR_ADDSUB_GE()
5849 if (a->tb) { in DO_PAR_ADDSUB_GE()
5861 store_reg(s, a->rd, tn); in DO_PAR_ADDSUB_GE()
5869 int shift = a->imm; in op_sat()
5875 tmp = load_reg(s, a->rn); in op_sat()
5876 if (a->sh) { in op_sat()
5882 gen(tmp, tcg_env, tmp, tcg_constant_i32(a->satimm)); in op_sat()
5884 store_reg(s, a->rd, tmp); in op_sat()
5900 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { in trans_SSAT16()
5908 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { in trans_USAT16()
5924 tmp = load_reg(s, a->rm); in op_xta()
5929 tcg_gen_rotri_i32(tmp, tmp, a->rot * 8); in op_xta()
5932 if (a->rn != 15) { in op_xta()
5933 TCGv_i32 tmp2 = load_reg(s, a->rn); in op_xta()
5936 store_reg(s, a->rd, tmp); in op_xta()
5952 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { in trans_SXTAB16()
5970 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { in trans_UXTAB16()
5980 if (s->thumb in trans_SEL()
5986 t1 = load_reg(s, a->rn); in trans_SEL()
5987 t2 = load_reg(s, a->rm); in trans_SEL()
5991 store_reg(s, a->rd, t1); in trans_SEL()
6000 tmp = load_reg(s, a->rm); in op_rr()
6002 store_reg(s, a->rd, tmp); in op_rr()
6050 t1 = load_reg(s, a->rn); in op_smlad()
6051 t2 = load_reg(s, a->rm); in op_smlad()
6060 * 32-bit subtraction and then a possible 32-bit saturating in op_smlad()
6065 if (a->ra != 15) { in op_smlad()
6066 t2 = load_reg(s, a->ra); in op_smlad()
6069 } else if (a->ra == 15) { in op_smlad()
6070 /* Single saturation-checking addition */ in op_smlad()
6076 * this as two separate add-and-check-overflow steps incorrectly in op_smlad()
6077 * sets Q for cases like (-32768 * -32768) + (-32768 * -32768) + -1. in op_smlad()
6078 * Do all the arithmetic at 64-bits and then check for overflow. in op_smlad()
6088 load_reg_var(s, t2, a->ra); in op_smlad()
6096 * is different from the sign-extension of t1. in op_smlad()
6105 store_reg(s, a->rd, t1); in op_smlad()
6138 t1 = load_reg(s, a->rn); in op_smlald()
6139 t2 = load_reg(s, a->rm); in op_smlald()
6156 gen_addq(s, l1, a->ra, a->rd); in op_smlald()
6157 gen_storeq_reg(s, a->ra, a->rd, l1); in op_smlald()
6185 if (s->thumb in op_smmla()
6191 t1 = load_reg(s, a->rn); in op_smmla()
6192 t2 = load_reg(s, a->rm); in op_smmla()
6195 if (a->ra != 15) { in op_smmla()
6196 TCGv_i32 t3 = load_reg(s, a->ra); in op_smmla()
6199 * For SMMLS, we need a 64-bit subtract. Borrow caused by in op_smmla()
6200 * a non-zero multiplicand lowpart, and the correct result in op_smmla()
6210 * Adding 0x80000000 to the 64-bit quantity means that we have in op_smmla()
6216 store_reg(s, a->rd, t1); in op_smmla()
6244 if (s->thumb in op_div()
6250 t1 = load_reg(s, a->rn); in op_div()
6251 t2 = load_reg(s, a->rm); in op_div()
6257 store_reg(s, a->rd, t1); in op_div()
6277 TCGv_i32 addr = load_reg(s, a->rn); in op_addr_block_pre()
6279 if (a->b) { in op_addr_block_pre()
6280 if (a->i) { in op_addr_block_pre()
6285 tcg_gen_addi_i32(addr, addr, -(n * 4)); in op_addr_block_pre()
6287 } else if (!a->i && n != 1) { in op_addr_block_pre()
6288 /* post decrement */ in op_addr_block_pre()
6289 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); in op_addr_block_pre()
6292 if (s->v8m_stackcheck && a->rn == 13 && a->w) { in op_addr_block_pre()
6296 * stack limit but the final written-back SP would in op_addr_block_pre()
6313 if (a->w) { in op_addr_block_post()
6315 if (!a->b) { in op_addr_block_post()
6316 if (a->i) { in op_addr_block_post()
6317 /* post increment */ in op_addr_block_post()
6320 /* post decrement */ in op_addr_block_post()
6321 tcg_gen_addi_i32(addr, addr, -(n * 4)); in op_addr_block_post()
6323 } else if (!a->i && n != 1) { in op_addr_block_post()
6325 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); in op_addr_block_post()
6327 store_reg(s, a->rn, addr); in op_addr_block_post()
6334 bool user = a->u; in op_stm()
6346 list = a->list; in op_stm()
6352 * single-register-store, and some in-the-wild (buggy) software in op_stm()
6355 if (n < 1 || a->rn == 15) { in op_stm()
6360 s->eci_handled = true; in op_stm()
6397 if (a->w && (a->list & (1 << a->rn))) { in trans_STM_t32()
6408 bool user = a->u; in do_ldm()
6419 if (extract32(a->list, 15, 1)) { in do_ldm()
6424 if (a->w) { in do_ldm()
6431 list = a->list; in do_ldm()
6437 * single-register-load, and some in-the-wild (buggy) software in do_ldm()
6440 if (n < 1 || a->rn == 15) { in do_ldm()
6445 s->eci_handled = true; in do_ldm()
6461 } else if (i == a->rn) { in do_ldm()
6480 store_reg(s, a->rn, loaded_var); in do_ldm()
6486 translator_io_start(&s->base); in do_ldm()
6488 /* Must exit loop to check un-masked IRQs */ in do_ldm()
6489 s->base.is_jmp = DISAS_EXIT; in do_ldm()
6502 if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) { in trans_LDM_a32()
6512 if (a->w && (a->list & (1 << a->rn))) { in trans_LDM_t32()
6522 a->w = !(a->list & (1 << a->rn)); in trans_LDM_t16()
6535 if (extract32(a->list, 13, 1)) { in trans_CLRM()
6539 if (!a->list) { in trans_CLRM()
6544 s->eci_handled = true; in trans_CLRM()
6548 if (extract32(a->list, i, 1)) { in trans_CLRM()
6553 if (extract32(a->list, 15, 1)) { in trans_CLRM()
6570 gen_jmp(s, jmp_diff(s, a->imm)); in trans_B()
6577 if (a->cond >= 0xe) { in trans_B_cond_thumb()
6580 if (s->condexec_mask) { in trans_B_cond_thumb()
6584 arm_skip_unless(s, a->cond); in trans_B_cond_thumb()
6585 gen_jmp(s, jmp_diff(s, a->imm)); in trans_B_cond_thumb()
6591 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb); in trans_BL()
6592 gen_jmp(s, jmp_diff(s, a->imm)); in trans_BL()
6599 * BLX <imm> would be useless on M-profile; the encoding space in trans_BLX_i()
6607 if (s->thumb && (a->imm & 2)) { in trans_BLX_i()
6610 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb); in trans_BLX_i()
6611 store_cpu_field_constant(!s->thumb, thumb); in trans_BLX_i()
6613 gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3))); in trans_BLX_i()
6620 gen_pc_plus_diff(s, cpu_R[14], jmp_diff(s, a->imm << 12)); in trans_BL_BLX_prefix()
6629 tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1); in trans_BL_suffix()
6644 tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1); in trans_BLX_suffix()
6654 * M-profile branch future insns. The architecture permits an in trans_BF()
6663 if (a->boff == 0) { in trans_BF()
6673 /* M-profile low-overhead loop start */ in trans_DLS()
6679 if (a->rn == 13 || a->rn == 15) { in trans_DLS()
6688 if (a->size != 4) { in trans_DLS()
6699 tmp = load_reg(s, a->rn); in trans_DLS()
6701 if (a->size != 4) { in trans_DLS()
6703 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize); in trans_DLS()
6704 s->base.is_jmp = DISAS_UPDATE_NOCHAIN; in trans_DLS()
6711 /* M-profile low-overhead while-loop start */ in trans_WLS()
6718 if (a->rn == 13 || a->rn == 15) { in trans_WLS()
6726 if (s->condexec_mask) { in trans_WLS()
6731 * in the dc->condjmp condition-failed codepath in in trans_WLS()
6736 if (a->size != 4) { in trans_WLS()
6745 * Do the check-and-raise-exception by hand. in trans_WLS()
6747 if (s->fp_excp_el) { in trans_WLS()
6749 syn_uncategorized(), s->fp_excp_el); in trans_WLS()
6755 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel.label); in trans_WLS()
6756 tmp = load_reg(s, a->rn); in trans_WLS()
6758 if (a->size != 4) { in trans_WLS()
6768 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize); in trans_WLS()
6777 gen_jmp(s, jmp_diff(s, a->imm)); in trans_WLS()
6784 * M-profile low-overhead loop end. The architecture permits an in trans_LE()
6798 if (a->f && a->tp) { in trans_LE()
6801 if (s->condexec_mask) { in trans_LE()
6806 * in the dc->condjmp condition-failed codepath in in trans_LE()
6811 if (a->tp) { in trans_LE()
6817 s->eci_handled = true; in trans_LE()
6823 s->eci_handled = true; in trans_LE()
6831 * can identify not-active purely from our TB state flags, as the in trans_LE()
6840 * the FPU not active. But LE is an unusual case of a non-FP insn in trans_LE()
6843 fpu_active = !s->fp_excp_el && !s->v7m_lspact && !s->v7m_new_fp_ctxt_needed; in trans_LE()
6845 if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) { in trans_LE()
6854 if (a->f) { in trans_LE()
6855 /* Loop-forever: just jump back to the loop start */ in trans_LE()
6856 gen_jmp(s, jmp_diff(s, -a->imm)); in trans_LE()
6861 * Not loop-forever. If LR <= loop-decrement-value this is the last loop. in trans_LE()
6867 if (!a->tp) { in trans_LE()
6869 tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1); in trans_LE()
6872 * Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local in trans_LE()
6885 gen_jmp(s, jmp_diff(s, -a->imm)); in trans_LE()
6888 if (a->tp) { in trans_LE()
6889 /* Exits from tail-pred loops must reset LTPSIZE to 4 */ in trans_LE()
6900 * M-profile Loop Clear with Tail Predication. Since our implementation in trans_LCTP()
6921 * M-profile Create Vector Tail Predicate. This insn is itself in trans_VCTP()
6926 if (!dc_isar_feature(aa32_mve, s) || a->rn == 13 || a->rn == 15) { in trans_VCTP()
6935 * We pre-calculate the mask length here to avoid having in trans_VCTP()
6937 * We pass the helper "rn <= (1 << (4 - size)) ? (rn << size) : 16". in trans_VCTP()
6940 masklen = load_reg(s, a->rn); in trans_VCTP()
6941 tcg_gen_shli_i32(rn_shifted, masklen, a->size); in trans_VCTP()
6943 masklen, tcg_constant_i32(1 << (4 - a->size)), in trans_VCTP()
6947 s->base.is_jmp = DISAS_UPDATE_NOCHAIN; in trans_VCTP()
6956 tmp = load_reg(s, a->rm); in op_tbranch()
6960 addr = load_reg(s, a->rn); in op_tbranch()
6984 TCGv_i32 tmp = load_reg(s, a->rn); in trans_CBZ()
6987 tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE, in trans_CBZ()
6988 tmp, 0, s->condlabel.label); in trans_CBZ()
6989 gen_jmp(s, jmp_diff(s, a->imm)); in trans_CBZ()
6994 * Supervisor call - both T32 & A32 come here so we need to check
7000 const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456; in trans_SVC()
7003 semihosting_enabled(s->current_el == 0) && in trans_SVC()
7004 (a->imm == semihost_imm)) { in trans_SVC()
7007 if (s->fgt_svc) { in trans_SVC()
7008 uint32_t syndrome = syn_aa32_svc(a->imm, s->thumb); in trans_SVC()
7012 s->svc_imm = a->imm; in trans_SVC()
7013 s->base.is_jmp = DISAS_SWI; in trans_SVC()
7026 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4 in trans_RFE()
7029 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0 in trans_RFE()
7041 addr = load_reg(s, a->rn); in trans_RFE()
7042 tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]); in trans_RFE()
7051 if (a->w) { in trans_RFE()
7053 tcg_gen_addi_i32(addr, addr, post_offset[a->pu]); in trans_RFE()
7054 store_reg(s, a->rn, addr); in trans_RFE()
7065 gen_srs(s, a->mode, a->pu, a->w); in trans_SRS()
7083 if (a->imod & 2) { in trans_CPS()
7084 if (a->A) { in trans_CPS()
7087 if (a->I) { in trans_CPS()
7090 if (a->F) { in trans_CPS()
7093 if (a->imod & 1) { in trans_CPS()
7097 if (a->M) { in trans_CPS()
7099 val |= a->mode; in trans_CPS()
7119 tmp = tcg_constant_i32(a->im); in trans_CPS_v7m()
7121 if (a->F) { in trans_CPS_v7m()
7126 if (a->I) { in trans_CPS_v7m()
7136 * Clear-Exclusive, Barriers
7141 if (s->thumb in trans_CLREX()
7171 * self-modifying code correctly and also to take in trans_ISB()
7174 s->base.is_jmp = DISAS_TOO_MANY; in trans_ISB()
7188 s->base.is_jmp = DISAS_TOO_MANY; in trans_SB()
7197 if (a->E != (s->be_data == MO_BE)) { in trans_SETEND()
7199 s->base.is_jmp = DISAS_UPDATE_EXIT; in trans_SETEND()
7225 * If-then
7230 int cond_mask = a->cond_mask; in trans_IT()
7240 s->condexec_cond = (cond_mask >> 4) & 0xe; in trans_IT()
7241 s->condexec_mask = cond_mask & 0x1f; in trans_IT()
7255 if (a->rm == 13) { in trans_CSEL()
7260 if (a->rd == 13 || a->rd == 15 || a->rn == 13 || a->fcond >= 14) { in trans_CSEL()
7268 if (a->rn == 15) { in trans_CSEL()
7271 load_reg_var(s, rn, a->rn); in trans_CSEL()
7273 if (a->rm == 15) { in trans_CSEL()
7276 load_reg_var(s, rm, a->rm); in trans_CSEL()
7279 switch (a->op) { in trans_CSEL()
7295 arm_test_cc(&c, a->fcond); in trans_CSEL()
7298 store_reg(s, a->rd, rn); in trans_CSEL()
7318 if (s->pstate_il) { in disas_arm_insn()
7351 if (extract32(s->c15_cpar, 1, 1)) { in disas_arm_insn()
7393 /* Definitely a 16-bit instruction */ in thumb_insn_is_16bit()
7398 * first half of a 32-bit Thumb insn. Thumb-1 cores might in thumb_insn_is_16bit()
7399 * end up actually treating this as two 16-bit insns, though, in thumb_insn_is_16bit()
7405 * 32-bit insns as 32-bit. in thumb_insn_is_16bit()
7410 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) { in thumb_insn_is_16bit()
7412 * is not on the next page; we merge this into a 32-bit in thumb_insn_is_16bit()
7420 * -- handle as single 16 bit insn in thumb_insn_is_16bit()
7425 /* Translate a 32-bit thumb instruction. */
7429 * ARMv6-M supports a limited subset of Thumb2 instructions. in disas_thumb2_insn()
7430 * Other Thumb1 architectures allow only 32-bit in disas_thumb2_insn()
7469 * entire wide range of coprocessor-space encodings, so check in disas_thumb2_insn()
7532 /* Return true if the insn at dc->base.pc_next might cross a page boundary. in insn_crosses_page()
7535 * only called if dc->base.pc_next is less than 4 bytes from the page in insn_crosses_page()
7539 uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b); in insn_crosses_page()
7541 return !thumb_insn_is_16bit(s, s->base.pc_next, insn); in insn_crosses_page()
7549 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb); in arm_tr_init_disas_context()
7552 dc->isar = &cpu->isar; in arm_tr_init_disas_context()
7553 dc->condjmp = 0; in arm_tr_init_disas_context()
7554 dc->pc_save = dc->base.pc_first; in arm_tr_init_disas_context()
7555 dc->aarch64 = false; in arm_tr_init_disas_context()
7556 dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB); in arm_tr_init_disas_context()
7557 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE; in arm_tr_init_disas_context()
7560 * the CONDEXEC TB flags are CPSR bits [15:10][26:25]. On A-profile this in arm_tr_init_disas_context()
7561 * is always the IT bits. On M-profile, some of the reserved encodings in arm_tr_init_disas_context()
7570 dc->eci = dc->condexec_mask = dc->condexec_cond = 0; in arm_tr_init_disas_context()
7571 dc->eci_handled = false; in arm_tr_init_disas_context()
7573 dc->condexec_mask = (condexec & 0xf) << 1; in arm_tr_init_disas_context()
7574 dc->condexec_cond = condexec >> 4; in arm_tr_init_disas_context()
7577 dc->eci = condexec >> 4; in arm_tr_init_disas_context()
7582 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx); in arm_tr_init_disas_context()
7583 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); in arm_tr_init_disas_context()
7585 dc->user = (dc->current_el == 0); in arm_tr_init_disas_context()
7587 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); in arm_tr_init_disas_context()
7588 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); in arm_tr_init_disas_context()
7589 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); in arm_tr_init_disas_context()
7590 dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE); in arm_tr_init_disas_context()
7591 dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC); in arm_tr_init_disas_context()
7594 dc->vfp_enabled = 1; in arm_tr_init_disas_context()
7595 dc->be_data = MO_TE; in arm_tr_init_disas_context()
7596 dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER); in arm_tr_init_disas_context()
7597 dc->v8m_secure = EX_TBFLAG_M32(tb_flags, SECURE); in arm_tr_init_disas_context()
7598 dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK); in arm_tr_init_disas_context()
7599 dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG); in arm_tr_init_disas_context()
7600 dc->v7m_new_fp_ctxt_needed = in arm_tr_init_disas_context()
7602 dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT); in arm_tr_init_disas_context()
7603 dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED); in arm_tr_init_disas_context()
7605 dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B); in arm_tr_init_disas_context()
7606 dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE); in arm_tr_init_disas_context()
7607 dc->ns = EX_TBFLAG_A32(tb_flags, NS); in arm_tr_init_disas_context()
7608 dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN); in arm_tr_init_disas_context()
7610 dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR); in arm_tr_init_disas_context()
7612 dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN); in arm_tr_init_disas_context()
7613 dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE); in arm_tr_init_disas_context()
7615 dc->sme_trap_nonstreaming = in arm_tr_init_disas_context()
7618 dc->lse2 = false; /* applies only to aarch64 */ in arm_tr_init_disas_context()
7619 dc->cp_regs = cpu->cp_regs; in arm_tr_init_disas_context()
7620 dc->features = env->features; in arm_tr_init_disas_context()
7622 /* Single step state. The code-generation logic here is: in arm_tr_init_disas_context()
7624 * generate code with no special handling for single-stepping (except in arm_tr_init_disas_context()
7628 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) in arm_tr_init_disas_context()
7633 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) in arm_tr_init_disas_context()
7637 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE); in arm_tr_init_disas_context()
7638 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS); in arm_tr_init_disas_context()
7639 dc->is_ldex = false; in arm_tr_init_disas_context()
7641 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK; in arm_tr_init_disas_context()
7644 if (dc->ss_active) { in arm_tr_init_disas_context()
7645 dc->base.max_insns = 1; in arm_tr_init_disas_context()
7648 /* ARM is a fixed-length ISA. Bound the number of insns to execute in arm_tr_init_disas_context()
7650 if (!dc->thumb) { in arm_tr_init_disas_context()
7651 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; in arm_tr_init_disas_context()
7652 dc->base.max_insns = MIN(dc->base.max_insns, bound); in arm_tr_init_disas_context()
7689 * bits, and none which can write non-static values to them, so in arm_tr_tb_start()
7696 if (dc->condexec_mask || dc->condexec_cond) { in arm_tr_tb_start()
7706 * need to reconstitute the bits from the split-out DisasContext in arm_tr_insn_start()
7710 target_ulong pc_arg = dc->base.pc_next; in arm_tr_insn_start()
7712 if (tb_cflags(dcbase->tb) & CF_PCREL) { in arm_tr_insn_start()
7715 if (dc->eci) { in arm_tr_insn_start()
7716 condexec_bits = dc->eci << 4; in arm_tr_insn_start()
7718 condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); in arm_tr_insn_start()
7721 dc->insn_start_updated = false; in arm_tr_insn_start()
7728 if (dc->base.pc_next >= 0xffff0000) { in arm_check_kernelpage()
7732 dc->base.is_jmp = DISAS_NORETURN; in arm_check_kernelpage()
7741 if (dc->ss_active && !dc->pstate_ss) { in arm_check_ss_active()
7742 /* Singlestep state is Active-pending. in arm_check_ss_active()
7752 assert(dc->base.num_insns == 1); in arm_check_ss_active()
7754 dc->base.is_jmp = DISAS_NORETURN; in arm_check_ss_active()
7763 if (dc->condjmp && in arm_post_translate_insn()
7764 (dc->base.is_jmp == DISAS_NEXT || dc->base.is_jmp == DISAS_TOO_MANY)) { in arm_post_translate_insn()
7765 if (dc->pc_save != dc->condlabel.pc_save) { in arm_post_translate_insn()
7766 gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save); in arm_post_translate_insn()
7768 gen_set_label(dc->condlabel.label); in arm_post_translate_insn()
7769 dc->condjmp = 0; in arm_post_translate_insn()
7777 uint32_t pc = dc->base.pc_next; in arm_tr_translate_insn()
7782 dc->base.pc_next = pc + 4; in arm_tr_translate_insn()
7793 assert(dc->base.num_insns == 1); in arm_tr_translate_insn()
7795 dc->base.is_jmp = DISAS_NORETURN; in arm_tr_translate_insn()
7796 dc->base.pc_next = QEMU_ALIGN_UP(pc, 4); in arm_tr_translate_insn()
7801 dc->base.pc_next = pc + 4; in arm_tr_translate_insn()
7805 dc->pc_curr = pc; in arm_tr_translate_insn()
7806 insn = arm_ldl_code(env, &dc->base, pc, dc->sctlr_b); in arm_tr_translate_insn()
7807 dc->insn = insn; in arm_tr_translate_insn()
7808 dc->base.pc_next = pc + 4; in arm_tr_translate_insn()
7813 /* ARM is a fixed-length ISA. We performed the cross-page check in arm_tr_translate_insn()
7829 * insn is either a 16-bit or a 32-bit instruction; the two are in thumb_insn_is_unconditional()
7830 * distinguishable because for the 16-bit case the top 16 bits in thumb_insn_is_unconditional()
7831 * are zeroes, and that isn't a valid 32-bit encoding. in thumb_insn_is_unconditional()
7867 uint32_t pc = dc->base.pc_next; in thumb_tr_translate_insn()
7872 target_ulong insn_eci_pc_save = -1; in thumb_tr_translate_insn()
7875 assert((dc->base.pc_next & 1) == 0); in thumb_tr_translate_insn()
7878 dc->base.pc_next = pc + 2; in thumb_tr_translate_insn()
7882 dc->pc_curr = pc; in thumb_tr_translate_insn()
7883 insn = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b); in thumb_tr_translate_insn()
7884 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn); in thumb_tr_translate_insn()
7887 uint32_t insn2 = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b); in thumb_tr_translate_insn()
7891 dc->base.pc_next = pc; in thumb_tr_translate_insn()
7892 dc->insn = insn; in thumb_tr_translate_insn()
7894 if (dc->pstate_il) { in thumb_tr_translate_insn()
7903 if (dc->eci) { in thumb_tr_translate_insn()
7905 * For M-profile continuable instructions, ECI/ICI handling in thumb_tr_translate_insn()
7907 * - interrupt-continuable instructions in thumb_tr_translate_insn()
7915 * - MVE instructions subject to beat-wise execution in thumb_tr_translate_insn()
7922 * - Special cases which don't advance ECI in thumb_tr_translate_insn()
7925 * - all other insns (the common case) in thumb_tr_translate_insn()
7926 * Non-zero ECI/ICI means an INVSTATE UsageFault. in thumb_tr_translate_insn()
7927 * We place a rewind-marker here. Insns in the previous in thumb_tr_translate_insn()
7935 insn_eci_pc_save = dc->pc_save; in thumb_tr_translate_insn()
7938 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) { in thumb_tr_translate_insn()
7939 uint32_t cond = dc->condexec_cond; in thumb_tr_translate_insn()
7957 if (dc->condexec_mask) { in thumb_tr_translate_insn()
7958 dc->condexec_cond = ((dc->condexec_cond & 0xe) | in thumb_tr_translate_insn()
7959 ((dc->condexec_mask >> 4) & 1)); in thumb_tr_translate_insn()
7960 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; in thumb_tr_translate_insn()
7961 if (dc->condexec_mask == 0) { in thumb_tr_translate_insn()
7962 dc->condexec_cond = 0; in thumb_tr_translate_insn()
7966 if (dc->eci && !dc->eci_handled) { in thumb_tr_translate_insn()
7972 dc->pc_save = insn_eci_pc_save; in thumb_tr_translate_insn()
7973 dc->condjmp = 0; in thumb_tr_translate_insn()
7979 /* Thumb is a variable-length ISA. Stop translation when the next insn in thumb_tr_translate_insn()
7986 * see if it's a 16-bit Thumb insn (which will fit in this TB) in thumb_tr_translate_insn()
7987 * or a 32-bit Thumb insn (which won't). in thumb_tr_translate_insn()
7988 * This is to avoid generating a silly TB with a single 16-bit insn in thumb_tr_translate_insn()
7992 if (dc->base.is_jmp == DISAS_NEXT in thumb_tr_translate_insn()
7993 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE in thumb_tr_translate_insn()
7994 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3 in thumb_tr_translate_insn()
7996 dc->base.is_jmp = DISAS_TOO_MANY; in thumb_tr_translate_insn()
8004 /* At this stage dc->condjmp will only be set when the skipped in arm_tr_tb_stop()
8008 if (dc->base.is_jmp == DISAS_BX_EXCRET) { in arm_tr_tb_stop()
8011 * handle the single-step vs not and the condition-failed in arm_tr_tb_stop()
8015 } else if (unlikely(dc->ss_active)) { in arm_tr_tb_stop()
8017 switch (dc->base.is_jmp) { in arm_tr_tb_stop()
8020 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); in arm_tr_tb_stop()
8024 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); in arm_tr_tb_stop()
8047 - Exception generating instructions (bkpt, swi, undefined). in arm_tr_tb_stop()
8048 - Page boundaries. in arm_tr_tb_stop()
8049 - Hardware watchpoints. in arm_tr_tb_stop()
8052 switch (dc->base.is_jmp) { in arm_tr_tb_stop()
8088 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); in arm_tr_tb_stop()
8091 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); in arm_tr_tb_stop()
8099 if (dc->condjmp) { in arm_tr_tb_stop()
8101 set_disas_label(dc, dc->condlabel); in arm_tr_tb_stop()
8103 if (unlikely(dc->ss_active)) { in arm_tr_tb_stop()