Lines Matching +full:- +full:- +full:extra +full:- +full:cflags
23 #include "qemu/host-utils.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
28 #include "exec/translation-block.h"
33 #include "exec/helper-info.c.inc"
57 /* True if generating pc-relative code. */
66 /* Current flush-to-zero setting for this TB. */
78 #define UNALIGN(C) (C)->unalign
83 /* Target-specific return values from translate_one, indicating the
161 *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name); in alpha_translate_init()
167 if (!ctx->zero) { in load_zero()
168 ctx->zero = tcg_constant_i64(0); in load_zero()
170 return ctx->zero; in load_zero()
175 if (!ctx->sink) { in dest_sink()
176 ctx->sink = tcg_temp_new(); in dest_sink()
178 return ctx->sink; in dest_sink()
183 if (ctx->sink) { in free_context_temps()
184 tcg_gen_discard_i64(ctx->sink); in free_context_temps()
185 ctx->sink = NULL; in free_context_temps()
192 return ctx->ir[reg]; in load_gpr()
204 return ctx->ir[reg]; in load_gpr_lit()
213 return ctx->ir[reg]; in dest_gpr()
241 ofs += 3 - (shift / 8); in get_flag_ofs()
260 uint64_t addr = ctx->base.pc_next + disp; in gen_pc_disp()
261 if (ctx->pcrel) { in gen_pc_disp()
262 tcg_gen_addi_i64(dest, cpu_pc, addr - ctx->base.pc_first); in gen_pc_disp()
292 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); in gen_ldf()
299 tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); in gen_ldg()
306 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); in gen_lds()
312 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); in gen_ldt()
346 dest = ctx->ir[ra]; in gen_load_int()
347 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op); in gen_load_int()
359 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); in gen_stf()
366 tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); in gen_stg()
373 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); in gen_sts()
378 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); in gen_stt()
403 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); in gen_store_int()
427 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value); in gen_store_conditional()
433 tcg_gen_movi_i64(ctx->ir[ra], 0); in gen_store_conditional()
437 tcg_gen_movi_i64(cpu_lock_addr, -1); in gen_store_conditional()
443 if (translator_use_goto_tb(&ctx->base, ctx->base.pc_next + disp)) { in gen_goto_tb()
444 /* With PCREL, PC must always be up-to-date. */ in gen_goto_tb()
445 if (ctx->pcrel) { in gen_goto_tb()
452 tcg_gen_exit_tb(ctx->base.tb, idx); in gen_goto_tb()
462 gen_pc_disp(ctx, ctx->ir[ra], 0); in gen_bdirect()
465 /* Notice branch-to-next; used to initialize RA with the PC. */ in gen_bdirect()
493 /* Fold -0.0 for comparison with COND. */
503 /* For <= or >, the -0.0 value directly compares the way we want. */ in gen_fold_mzero()
515 /* For >= or <, map -0.0 to +0.0. */ in gen_fold_mzero()
560 if (fn11 == ctx->tb_rm) { in gen_qual_roundmode()
563 ctx->tb_rm = fn11; in gen_qual_roundmode()
584 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just in gen_qual_roundmode()
598 if (fn11 == ctx->tb_ftz) { in gen_qual_flushzero()
601 ctx->tb_ftz = fn11; in gen_qual_flushzero()
609 /* Underflow is disabled, force flush-to-zero. */ in gen_qual_flushzero()
683 /* The arithmetic right shift here, plus the sign-extended mask below in gen_cvtlq()
684 yields a sign-extended result without an explicit ext32s_i64. */ in gen_cvtlq()
882 int pos = (64 - lit * 8) & 0x3f; in gen_ext_h()
885 tcg_gen_deposit_z_i64(vc, va, pos, len - pos); in gen_ext_h()
907 len = 64 - pos; in gen_ext_l()
924 int pos = 64 - (lit & 7) * 8; in gen_ins_h()
927 tcg_gen_extract_i64(vc, va, pos, len - pos); in gen_ins_h()
935 /* The instruction description has us left-shift the byte mask in gen_ins_h()
942 portably by splitting the shift into two parts: shift_count-1 and 1. in gen_ins_h()
943 Arrange for the -1 by using ones-complement instead of in gen_ins_h()
944 twos-complement in the negation: ~(B * 8) & 63. */ in gen_ins_h()
963 len = 64 - pos; in gen_ins_l()
970 /* The instruction description has us left-shift the byte mask in gen_ins_l()
994 emulated with a right-shift on the expanded byte mask. This in gen_msk_h()
995 requires extra care because for an input <2:0> == 0 we need a in gen_msk_h()
997 splitting the shift into two parts, the variable shift - 1 in gen_msk_h()
1034 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); in gen_rx()
1050 /* No-op inside QEMU. */ in gen_call_pal()
1054 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, in gen_call_pal()
1059 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, in gen_call_pal()
1071 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) { in gen_call_pal()
1075 /* No-op inside QEMU. */ in gen_call_pal()
1079 /* No-op inside QEMU. */ in gen_call_pal()
1083 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, in gen_call_pal()
1088 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, in gen_call_pal()
1093 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, in gen_call_pal()
1101 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); in gen_call_pal()
1106 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); in gen_call_pal()
1116 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); in gen_call_pal()
1121 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env, in gen_call_pal()
1126 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env, in gen_call_pal()
1131 tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env, in gen_call_pal()
1132 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); in gen_call_pal()
1138 -offsetof(AlphaCPU, env) + in gen_call_pal()
1140 tcg_gen_movi_i64(ctx->ir[IR_V0], 0); in gen_call_pal()
1161 if (ctx->tbflags & ENV_FLAG_PAL_MODE) { in gen_call_pal()
1168 entry = ctx->palbr; in gen_call_pal()
1170 ? 0x2000 + (palcode - 0x80) * 64 in gen_call_pal()
1199 return offsetof(CPUAlphaState, scratch[pr - 40]); in cpu_pr_data()
1214 /* Accessing the "non-shadow" general registers. */ in gen_mfpr()
1215 regno = regno == 39 ? 25 : regno - 32 + 8; in gen_mfpr()
1225 if (translator_io_start(&ctx->base)) { in gen_mfpr()
1242 are read-zero, write-ignore. */ in gen_mfpr()
1276 -offsetof(AlphaCPU, env) + offsetof(CPUState, halted)); in gen_mtpr()
1286 if (translator_io_start(&ctx->base)) { in gen_mtpr()
1295 /* Changing the PAL base register implies un-chaining all of the TBs in gen_mtpr()
1302 /* Accessing the "non-shadow" general registers. */ in gen_mtpr()
1303 regno = regno == 39 ? 25 : regno - 32 + 8; in gen_mtpr()
1316 are read-zero, write-ignore. */ in gen_mtpr()
1341 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1348 if ((ctx->tbflags & (FLAG)) == 0) { \
1362 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1433 /* It's worth special-casing immediate loads. */ in translate_one()
1723 tcg_gen_andi_i64(vc, vb, ~ctx->amask); in translate_one()
1738 tcg_gen_movi_i64(vc, ctx->implver); in translate_one()
2016 /* CVTDG -- TODO */ in translate_one()
2061 /* CVTGD -- TODO */ in translate_one()
2088 /* IEEE floating-point */ in translate_one()
2232 if (ctx->tb_rm == QUAL_RM_D) { in translate_one()
2233 /* Re-do the copy of the rounding mode to fp_status in translate_one()
2235 ctx->tb_rm = -1; in translate_one()
2293 /* No-op. */ in translate_one()
2297 /* No-op. */ in translate_one()
2309 /* No-op */ in translate_one()
2313 /* No-op */ in translate_one()
2318 if (translator_io_start(&ctx->base)) { in translate_one()
2336 /* No-op */ in translate_one()
2340 /* No-op */ in translate_one()
2365 gen_pc_disp(ctx, ctx->ir[ra], 0); in translate_one()
2617 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return in translate_one()
2625 tcg_gen_movi_i64(cpu_lock_addr, -1); in translate_one()
2779 ctx->mem_idx, MO_LESL | MO_ALIGN); in translate_one()
2784 ctx->mem_idx, MO_LEUQ | MO_ALIGN); in translate_one()
2867 ctx->tbflags = ctx->base.tb->flags; in alpha_tr_init_disas_context()
2868 ctx->mem_idx = alpha_env_mmu_index(env); in alpha_tr_init_disas_context()
2869 ctx->pcrel = ctx->base.tb->cflags & CF_PCREL; in alpha_tr_init_disas_context()
2870 ctx->implver = env->implver; in alpha_tr_init_disas_context()
2871 ctx->amask = env->amask; in alpha_tr_init_disas_context()
2874 ctx->ir = cpu_std_ir; in alpha_tr_init_disas_context()
2875 ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); in alpha_tr_init_disas_context()
2877 ctx->palbr = env->palbr; in alpha_tr_init_disas_context()
2878 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); in alpha_tr_init_disas_context()
2887 ctx->tb_rm = -1; in alpha_tr_init_disas_context()
2888 /* Similarly for flush-to-zero. */ in alpha_tr_init_disas_context()
2889 ctx->tb_ftz = -1; in alpha_tr_init_disas_context()
2891 ctx->zero = NULL; in alpha_tr_init_disas_context()
2892 ctx->sink = NULL; in alpha_tr_init_disas_context()
2895 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; in alpha_tr_init_disas_context()
2896 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); in alpha_tr_init_disas_context()
2907 if (ctx->pcrel) { in alpha_tr_insn_start()
2908 tcg_gen_insn_start(dcbase->pc_next & ~TARGET_PAGE_MASK); in alpha_tr_insn_start()
2910 tcg_gen_insn_start(dcbase->pc_next); in alpha_tr_insn_start()
2917 uint32_t insn = translator_ldl(cpu_env(cpu), &ctx->base, in alpha_tr_translate_insn()
2918 ctx->base.pc_next); in alpha_tr_translate_insn()
2920 ctx->base.pc_next += 4; in alpha_tr_translate_insn()
2921 ctx->base.is_jmp = translate_one(ctx, insn); in alpha_tr_translate_insn()
2930 switch (ctx->base.is_jmp) { in alpha_tr_tb_stop()