xref: /qemu/target/arm/tcg/translate.h (revision 84307cd6027c4602913177ff09aeefa4743b7234)
1 #ifndef TARGET_ARM_TRANSLATE_H
2 #define TARGET_ARM_TRANSLATE_H
3 
4 #include "cpu.h"
5 #include "tcg/tcg-op.h"
6 #include "tcg/tcg-op-gvec.h"
7 #include "exec/translator.h"
8 #include "exec/translation-block.h"
9 #include "exec/helper-gen.h"
10 #include "internals.h"
11 #include "cpu-features.h"
12 
13 /* internal defines */
14 
15 /*
16  * Save pc_save across a branch, so that we may restore the value from
17  * before the branch at the point the label is emitted.
18  */
19 typedef struct DisasLabel {
20     TCGLabel *label;
21     target_ulong pc_save;
22 } DisasLabel;
23 
24 typedef struct DisasContext {
25     DisasContextBase base;
26     const ARMISARegisters *isar;
27 
28     /* The address of the current instruction being translated. */
29     target_ulong pc_curr;
30     /*
31      * For CF_PCREL, the full value of cpu_pc is not known
32      * (although the page offset is known).  For convenience, the
33      * translation loop uses the full virtual address that triggered
34      * the translation, from base.pc_start through pc_curr.
35      * For efficiency, we do not update cpu_pc for every instruction.
36      * Instead, pc_save has the value of pc_curr at the time of the
37      * last update to cpu_pc, which allows us to compute the addend
38      * needed to bring cpu_pc current: pc_curr - pc_save.
39      * If cpu_pc now contains the destination of an indirect branch,
40      * pc_save contains -1 to indicate that relative updates are no
41      * longer possible.
42      */
43     target_ulong pc_save;
44     target_ulong page_start;
45     uint32_t insn;
46     /* Nonzero if this instruction has been conditionally skipped.  */
47     int condjmp;
48     /* The label that will be jumped to when the instruction is skipped.  */
49     DisasLabel condlabel;
50     /* Thumb-2 conditional execution bits.  */
51     int condexec_mask;
52     int condexec_cond;
53     /* M-profile ECI/ICI exception-continuable instruction state */
54     int eci;
55     /*
56      * trans_ functions for insns which are continuable should set this true
57      * after decode (ie after any UNDEF checks)
58      */
59     bool eci_handled;
60     int sctlr_b;
61     MemOp be_data;
62 #if !defined(CONFIG_USER_ONLY)
63     int user;
64 #endif
65     ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
66     uint8_t tbii;      /* TBI1|TBI0 for insns */
67     uint8_t tbid;      /* TBI1|TBI0 for data */
68     uint8_t tcma;      /* TCMA1|TCMA0 for MTE */
69     bool ns;        /* Use non-secure CPREG bank on access */
70     int fp_excp_el; /* FP exception EL or 0 if enabled */
71     int sve_excp_el; /* SVE exception EL or 0 if enabled */
72     int sme_excp_el; /* SME exception EL or 0 if enabled */
73     int vl;          /* current vector length in bytes */
74     int svl;         /* current streaming vector length in bytes */
75     bool vfp_enabled; /* FP enabled via FPSCR.EN */
76     int vec_len;
77     int vec_stride;
78     bool v7m_handler_mode;
79     bool v8m_secure; /* true if v8M and we're in Secure mode */
80     bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */
81     bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */
82     bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */
83     bool v7m_lspact; /* FPCCR.LSPACT set */
84     /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
85      * so that top level loop can generate correct syndrome information.
86      */
87     uint32_t svc_imm;
88     int current_el;
89     GHashTable *cp_regs;
90     uint64_t features; /* CPU features bits */
91     bool aarch64;
92     bool thumb;
93     bool lse2;
94     /*
95      * Because unallocated encodings generate different exception syndrome
96      * information from traps due to FP being disabled, we can't do a single
97      * "is fp access disabled" check at a high level in the decode tree.
98      * To help in catching bugs where the access check was forgotten in some
99      * code path, we set this flag when the access check is done, and assert
100      * that it is set at the point where we actually touch the FP regs.
101      *   0: not checked,
102      *   1: checked, access ok
103      *  -1: checked, access denied
104      */
105     int8_t fp_access_checked;
106     int8_t sve_access_checked;
107     /* ARMv8 single-step state (this is distinct from the QEMU gdbstub
108      * single-step support).
109      */
110     bool ss_active;
111     bool pstate_ss;
112     /* True if the insn just emitted was a load-exclusive instruction
113      * (necessary for syndrome information for single step exceptions),
114      * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
115      */
116     bool is_ldex;
117     /* True if AccType_UNPRIV should be used for LDTR et al */
118     bool unpriv;
119     /* True if v8.3-PAuth is active.  */
120     bool pauth_active;
121     /* True if v8.5-MTE access to tags is enabled; index with is_unpriv.  */
122     bool ata[2];
123     /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv.  */
124     bool mte_active[2];
125     /* True with v8.5-BTI and SCTLR_ELx.BT* set.  */
126     bool bt;
127     /* True if any CP15 access is trapped by HSTR_EL2 */
128     bool hstr_active;
129     /* True if memory operations require alignment */
130     bool align_mem;
131     /* True if PSTATE.IL is set */
132     bool pstate_il;
133     /* True if PSTATE.SM is set. */
134     bool pstate_sm;
135     /* True if PSTATE.ZA is set. */
136     bool pstate_za;
137     /* True if non-streaming insns should raise an SME Streaming exception. */
138     bool sme_trap_nonstreaming;
139     /* True if the current instruction is non-streaming. */
140     bool is_nonstreaming;
141     /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
142     bool mve_no_pred;
143     /* True if fine-grained traps are active */
144     bool fgt_active;
145     /* True if fine-grained trap on SVC is enabled */
146     bool fgt_svc;
147     /* True if a trap on ERET is enabled (FGT or NV) */
148     bool trap_eret;
149     /* True if FEAT_LSE2 SCTLR_ELx.nAA is set */
150     bool naa;
151     /* True if FEAT_NV HCR_EL2.NV is enabled */
152     bool nv;
153     /* True if NV enabled and HCR_EL2.NV1 is set */
154     bool nv1;
155     /* True if NV enabled and HCR_EL2.NV2 is set */
156     bool nv2;
157     /* True if NV2 enabled and NV2 RAM accesses use EL2&0 translation regime */
158     bool nv2_mem_e20;
159     /* True if NV2 enabled and NV2 RAM accesses are big-endian */
160     bool nv2_mem_be;
161     /* True if FPCR.AH is 1 (alternate floating point handling) */
162     bool fpcr_ah;
163     /* True if FPCR.NEP is 1 (FEAT_AFP scalar upper-element result handling) */
164     bool fpcr_nep;
165     /*
166      * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
167      *  < 0, set by the current instruction.
168      */
169     int8_t btype;
170     /* A copy of cpu->dcz_blocksize. */
171     uint8_t dcz_blocksize;
172     /* A copy of cpu->gm_blocksize. */
173     uint8_t gm_blocksize;
174     /* True if the current insn_start has been updated. */
175     bool insn_start_updated;
176     /* Bottom two bits of XScale c15_cpar coprocessor access control reg */
177     int c15_cpar;
178     /* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */
179     uint32_t nv2_redirect_offset;
180 } DisasContext;
181 
182 typedef struct DisasCompare {
183     TCGCond cond;
184     TCGv_i32 value;
185 } DisasCompare;
186 
187 /* Share the TCG temporaries common between 32 and 64 bit modes.  */
188 extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
189 extern TCGv_i64 cpu_exclusive_addr;
190 extern TCGv_i64 cpu_exclusive_val;
191 
192 /*
193  * Constant expanders for the decoders.
194  */
195 
negate(DisasContext * s,int x)196 static inline int negate(DisasContext *s, int x)
197 {
198     return -x;
199 }
200 
plus_1(DisasContext * s,int x)201 static inline int plus_1(DisasContext *s, int x)
202 {
203     return x + 1;
204 }
205 
plus_2(DisasContext * s,int x)206 static inline int plus_2(DisasContext *s, int x)
207 {
208     return x + 2;
209 }
210 
plus_12(DisasContext * s,int x)211 static inline int plus_12(DisasContext *s, int x)
212 {
213     return x + 12;
214 }
215 
times_2(DisasContext * s,int x)216 static inline int times_2(DisasContext *s, int x)
217 {
218     return x * 2;
219 }
220 
times_4(DisasContext * s,int x)221 static inline int times_4(DisasContext *s, int x)
222 {
223     return x * 4;
224 }
225 
times_8(DisasContext * s,int x)226 static inline int times_8(DisasContext *s, int x)
227 {
228     return x * 8;
229 }
230 
times_2_plus_1(DisasContext * s,int x)231 static inline int times_2_plus_1(DisasContext *s, int x)
232 {
233     return x * 2 + 1;
234 }
235 
rsub_64(DisasContext * s,int x)236 static inline int rsub_64(DisasContext *s, int x)
237 {
238     return 64 - x;
239 }
240 
rsub_32(DisasContext * s,int x)241 static inline int rsub_32(DisasContext *s, int x)
242 {
243     return 32 - x;
244 }
245 
rsub_16(DisasContext * s,int x)246 static inline int rsub_16(DisasContext *s, int x)
247 {
248     return 16 - x;
249 }
250 
rsub_8(DisasContext * s,int x)251 static inline int rsub_8(DisasContext *s, int x)
252 {
253     return 8 - x;
254 }
255 
shl_12(DisasContext * s,int x)256 static inline int shl_12(DisasContext *s, int x)
257 {
258     return x << 12;
259 }
260 
xor_2(DisasContext * s,int x)261 static inline int xor_2(DisasContext *s, int x)
262 {
263     return x ^ 2;
264 }
265 
neon_3same_fp_size(DisasContext * s,int x)266 static inline int neon_3same_fp_size(DisasContext *s, int x)
267 {
268     /* Convert 0==fp32, 1==fp16 into a MO_* value */
269     return MO_32 - x;
270 }
271 
arm_dc_feature(DisasContext * dc,int feature)272 static inline int arm_dc_feature(DisasContext *dc, int feature)
273 {
274     return (dc->features & (1ULL << feature)) != 0;
275 }
276 
get_mem_index(DisasContext * s)277 static inline int get_mem_index(DisasContext *s)
278 {
279     return arm_to_core_mmu_idx(s->mmu_idx);
280 }
281 
disas_set_insn_syndrome(DisasContext * s,uint32_t syn)282 static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
283 {
284     /* We don't need to save all of the syndrome so we mask and shift
285      * out unneeded bits to help the sleb128 encoder do a better job.
286      */
287     syn &= ARM_INSN_START_WORD2_MASK;
288     syn >>= ARM_INSN_START_WORD2_SHIFT;
289 
290     /* Check for multiple updates.  */
291     assert(!s->insn_start_updated);
292     s->insn_start_updated = true;
293     tcg_set_insn_start_param(s->base.insn_start, 2, syn);
294 }
295 
curr_insn_len(DisasContext * s)296 static inline int curr_insn_len(DisasContext *s)
297 {
298     return s->base.pc_next - s->pc_curr;
299 }
300 
301 /* is_jmp field values */
302 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
303 /* CPU state was modified dynamically; exit to main loop for interrupts. */
304 #define DISAS_UPDATE_EXIT  DISAS_TARGET_1
305 /* These instructions trap after executing, so the A32/T32 decoder must
306  * defer them until after the conditional execution state has been updated.
307  * WFI also needs special handling when single-stepping.
308  */
309 #define DISAS_WFI       DISAS_TARGET_2
310 #define DISAS_SWI       DISAS_TARGET_3
311 /* WFE */
312 #define DISAS_WFE       DISAS_TARGET_4
313 #define DISAS_HVC       DISAS_TARGET_5
314 #define DISAS_SMC       DISAS_TARGET_6
315 #define DISAS_YIELD     DISAS_TARGET_7
316 /* M profile branch which might be an exception return (and so needs
317  * custom end-of-TB code)
318  */
319 #define DISAS_BX_EXCRET DISAS_TARGET_8
320 /*
321  * For instructions which want an immediate exit to the main loop, as opposed
322  * to attempting to use lookup_and_goto_ptr.  Unlike DISAS_UPDATE_EXIT, this
323  * doesn't write the PC on exiting the translation loop so you need to ensure
324  * something (gen_a64_update_pc or runtime helper) has done so before we reach
325  * return from cpu_tb_exec.
326  */
327 #define DISAS_EXIT      DISAS_TARGET_9
328 /* CPU state was modified dynamically; no need to exit, but do not chain. */
329 #define DISAS_UPDATE_NOCHAIN  DISAS_TARGET_10
330 
331 #ifdef TARGET_AARCH64
332 void a64_translate_init(void);
333 void gen_a64_update_pc(DisasContext *s, target_long diff);
334 extern const TranslatorOps aarch64_translator_ops;
335 #else
a64_translate_init(void)336 static inline void a64_translate_init(void)
337 {
338 }
339 
gen_a64_update_pc(DisasContext * s,target_long diff)340 static inline void gen_a64_update_pc(DisasContext *s, target_long diff)
341 {
342 }
343 #endif
344 
345 void arm_test_cc(DisasCompare *cmp, int cc);
346 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
347 void arm_gen_test_cc(int cc, TCGLabel *label);
348 MemOp pow2_align(unsigned i);
349 void unallocated_encoding(DisasContext *s);
350 void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
351                            uint32_t syn, uint32_t target_el);
352 void gen_exception_insn(DisasContext *s, target_long pc_diff,
353                         int excp, uint32_t syn);
354 
355 /* Return state of Alternate Half-precision flag, caller frees result */
get_ahp_flag(void)356 static inline TCGv_i32 get_ahp_flag(void)
357 {
358     TCGv_i32 ret = tcg_temp_new_i32();
359 
360     tcg_gen_ld_i32(ret, tcg_env, offsetoflow32(CPUARMState, vfp.fpcr));
361     tcg_gen_extract_i32(ret, ret, 26, 1);
362 
363     return ret;
364 }
365 
366 /* Set bits within PSTATE.  */
set_pstate_bits(uint32_t bits)367 static inline void set_pstate_bits(uint32_t bits)
368 {
369     TCGv_i32 p = tcg_temp_new_i32();
370 
371     tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
372 
373     tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
374     tcg_gen_ori_i32(p, p, bits);
375     tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
376 }
377 
378 /* Clear bits within PSTATE.  */
clear_pstate_bits(uint32_t bits)379 static inline void clear_pstate_bits(uint32_t bits)
380 {
381     TCGv_i32 p = tcg_temp_new_i32();
382 
383     tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
384 
385     tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
386     tcg_gen_andi_i32(p, p, ~bits);
387     tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
388 }
389 
390 /* If the singlestep state is Active-not-pending, advance to Active-pending. */
gen_ss_advance(DisasContext * s)391 static inline void gen_ss_advance(DisasContext *s)
392 {
393     if (s->ss_active) {
394         s->pstate_ss = 0;
395         clear_pstate_bits(PSTATE_SS);
396     }
397 }
398 
399 /* Generate an architectural singlestep exception */
gen_swstep_exception(DisasContext * s,int isv,int ex)400 static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
401 {
402     /* Fill in the same_el field of the syndrome in the helper. */
403     uint32_t syn = syn_swstep(false, isv, ex);
404     gen_helper_exception_swstep(tcg_env, tcg_constant_i32(syn));
405 }
406 
407 /*
408  * Given a VFP floating point constant encoded into an 8 bit immediate in an
409  * instruction, expand it to the actual constant value of the specified
410  * size, as per the VFPExpandImm() pseudocode in the Arm ARM.
411  */
412 uint64_t vfp_expand_imm(int size, uint8_t imm8);
413 
gen_vfp_absh(TCGv_i32 d,TCGv_i32 s)414 static inline void gen_vfp_absh(TCGv_i32 d, TCGv_i32 s)
415 {
416     tcg_gen_andi_i32(d, s, INT16_MAX);
417 }
418 
gen_vfp_abss(TCGv_i32 d,TCGv_i32 s)419 static inline void gen_vfp_abss(TCGv_i32 d, TCGv_i32 s)
420 {
421     tcg_gen_andi_i32(d, s, INT32_MAX);
422 }
423 
gen_vfp_absd(TCGv_i64 d,TCGv_i64 s)424 static inline void gen_vfp_absd(TCGv_i64 d, TCGv_i64 s)
425 {
426     tcg_gen_andi_i64(d, s, INT64_MAX);
427 }
428 
gen_vfp_negh(TCGv_i32 d,TCGv_i32 s)429 static inline void gen_vfp_negh(TCGv_i32 d, TCGv_i32 s)
430 {
431     tcg_gen_xori_i32(d, s, 1u << 15);
432 }
433 
gen_vfp_negs(TCGv_i32 d,TCGv_i32 s)434 static inline void gen_vfp_negs(TCGv_i32 d, TCGv_i32 s)
435 {
436     tcg_gen_xori_i32(d, s, 1u << 31);
437 }
438 
gen_vfp_negd(TCGv_i64 d,TCGv_i64 s)439 static inline void gen_vfp_negd(TCGv_i64 d, TCGv_i64 s)
440 {
441     tcg_gen_xori_i64(d, s, 1ull << 63);
442 }
443 
444 /* Vector operations shared between ARM and AArch64.  */
445 void gen_gvec_ceq0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
446                    uint32_t opr_sz, uint32_t max_sz);
447 void gen_gvec_clt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
448                    uint32_t opr_sz, uint32_t max_sz);
449 void gen_gvec_cgt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
450                    uint32_t opr_sz, uint32_t max_sz);
451 void gen_gvec_cle0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
452                    uint32_t opr_sz, uint32_t max_sz);
453 void gen_gvec_cge0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
454                    uint32_t opr_sz, uint32_t max_sz);
455 
456 void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
457                   uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
458 void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
459                   uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
460 
461 void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
462                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
463 void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
464                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
465 void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
466                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
467 void gen_gvec_srshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
468                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
469 void gen_gvec_urshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
470                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
471 void gen_neon_sqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
472                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
473 void gen_neon_uqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
474                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
475 void gen_neon_sqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
476                      uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
477 void gen_neon_uqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
478                      uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
479 
480 void gen_neon_sqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
481                      int64_t c, uint32_t opr_sz, uint32_t max_sz);
482 void gen_neon_uqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
483                      int64_t c, uint32_t opr_sz, uint32_t max_sz);
484 void gen_neon_sqshlui(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
485                       int64_t c, uint32_t opr_sz, uint32_t max_sz);
486 
487 void gen_gvec_shadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
488                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
489 void gen_gvec_uhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
490                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
491 void gen_gvec_shsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
492                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
493 void gen_gvec_uhsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
494                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
495 void gen_gvec_srhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
496                      uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
497 void gen_gvec_urhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
498                      uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
499 
500 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
501 void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
502 void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
503 void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
504 void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
505 
506 void gen_uqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
507                    TCGv_i64 a, TCGv_i64 b, MemOp esz);
508 void gen_uqadd_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
509 void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
510                        uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
511 
512 void gen_sqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
513                    TCGv_i64 a, TCGv_i64 b, MemOp esz);
514 void gen_sqadd_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
515 void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
516                        uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
517 
518 void gen_uqsub_bhs(TCGv_i64 res, TCGv_i64 qc,
519                    TCGv_i64 a, TCGv_i64 b, MemOp esz);
520 void gen_uqsub_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
521 void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
522                        uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
523 
524 void gen_sqsub_bhs(TCGv_i64 res, TCGv_i64 qc,
525                    TCGv_i64 a, TCGv_i64 b, MemOp esz);
526 void gen_sqsub_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
527 void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
528                        uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
529 
530 void gen_gvec_sshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
531                    int64_t shift, uint32_t opr_sz, uint32_t max_sz);
532 void gen_gvec_ushr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
533                    int64_t shift, uint32_t opr_sz, uint32_t max_sz);
534 
535 void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
536                    int64_t shift, uint32_t opr_sz, uint32_t max_sz);
537 void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
538                    int64_t shift, uint32_t opr_sz, uint32_t max_sz);
539 
540 void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh);
541 void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh);
542 void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh);
543 void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh);
544 
545 void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
546                     int64_t shift, uint32_t opr_sz, uint32_t max_sz);
547 void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
548                     int64_t shift, uint32_t opr_sz, uint32_t max_sz);
549 void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
550                     int64_t shift, uint32_t opr_sz, uint32_t max_sz);
551 void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
552                     int64_t shift, uint32_t opr_sz, uint32_t max_sz);
553 
554 void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
555                   int64_t shift, uint32_t opr_sz, uint32_t max_sz);
556 void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
557                   int64_t shift, uint32_t opr_sz, uint32_t max_sz);
558 
559 void gen_gvec_sqdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
560                          uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
561 void gen_gvec_sqrdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
562                           uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
563 void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
564                           uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
565 void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
566                           uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
567 
568 void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
569                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
570 void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
571                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
572 
573 void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
574                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
575 void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
576                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
577 
578 void gen_gvec_addp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
579                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
580 void gen_gvec_smaxp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
581                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
582 void gen_gvec_sminp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
583                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
584 void gen_gvec_umaxp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
585                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
586 void gen_gvec_uminp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
587                     uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
588 
589 void gen_gvec_cls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
590                   uint32_t opr_sz, uint32_t max_sz);
591 void gen_gvec_clz(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
592                   uint32_t opr_sz, uint32_t max_sz);
593 void gen_gvec_cnt(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
594                   uint32_t opr_sz, uint32_t max_sz);
595 void gen_gvec_rbit(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
596                    uint32_t opr_sz, uint32_t max_sz);
597 void gen_gvec_rev16(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
598                     uint32_t opr_sz, uint32_t max_sz);
599 void gen_gvec_rev32(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
600                     uint32_t opr_sz, uint32_t max_sz);
601 void gen_gvec_rev64(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
602                     uint32_t opr_sz, uint32_t max_sz);
603 
604 void gen_gvec_saddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
605                      uint32_t opr_sz, uint32_t max_sz);
606 void gen_gvec_sadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
607                      uint32_t opr_sz, uint32_t max_sz);
608 void gen_gvec_uaddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
609                      uint32_t opr_sz, uint32_t max_sz);
610 void gen_gvec_uadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
611                      uint32_t opr_sz, uint32_t max_sz);
612 
613 /* These exclusively manipulate the sign bit. */
614 void gen_gvec_fabs(unsigned vece, uint32_t dofs, uint32_t aofs,
615                    uint32_t oprsz, uint32_t maxsz);
616 void gen_gvec_fneg(unsigned vece, uint32_t dofs, uint32_t aofs,
617                    uint32_t oprsz, uint32_t maxsz);
618 
619 void gen_gvec_urecpe(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
620                      uint32_t opr_sz, uint32_t max_sz);
621 void gen_gvec_ursqrte(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
622                       uint32_t opr_sz, uint32_t max_sz);
623 
624 /*
625  * Forward to the isar_feature_* tests given a DisasContext pointer.
626  */
627 #define dc_isar_feature(name, ctx) \
628     ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
629 
630 /* Note that the gvec expanders operate on offsets + sizes.  */
631 typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
632 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
633                          uint32_t, uint32_t);
634 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
635                         uint32_t, uint32_t, uint32_t);
636 typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
637                         uint32_t, uint32_t, uint32_t);
638 
639 /* Function prototype for gen_ functions for calling Neon helpers */
640 typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32);
641 typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
642 typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
643 typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
644 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
645                                  TCGv_i32, TCGv_i32);
646 typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
647 typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
648 typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
649 typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
650 typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32);
651 typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr);
652 typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
653 typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
654 typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64);
655 typedef void NeonGenOne64OpEnvFn(TCGv_i64, TCGv_env, TCGv_i64);
656 typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
657 typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
658 typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
659 typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
660 typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
661 typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
662 typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
663 typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
664 
665 /**
666  * arm_tbflags_from_tb:
667  * @tb: the TranslationBlock
668  *
669  * Extract the flag values from @tb.
670  */
arm_tbflags_from_tb(const TranslationBlock * tb)671 static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
672 {
673     return (CPUARMTBFlags){ tb->flags, tb->cs_base };
674 }
675 
676 /**
677  * fpstatus_ptr: return TCGv_ptr to the specified fp_status field
678  *
679  * We have multiple softfloat float_status fields in the Arm CPU state struct
680  * (see the comment in cpu.h for details). Return a TCGv_ptr which has
681  * been set up to point to the requested field in the CPU state struct.
682  */
fpstatus_ptr(ARMFPStatusFlavour flavour)683 static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
684 {
685     TCGv_ptr statusptr = tcg_temp_new_ptr();
686     int offset = offsetof(CPUARMState, vfp.fp_status[flavour]);
687 
688     tcg_gen_addi_ptr(statusptr, tcg_env, offset);
689     return statusptr;
690 }
691 
692 /**
693  * finalize_memop_atom:
694  * @s: DisasContext
695  * @opc: size+sign+align of the memory operation
696  * @atom: atomicity of the memory operation
697  *
698  * Build the complete MemOp for a memory operation, including alignment,
699  * endianness, and atomicity.
700  *
701  * If (op & MO_AMASK) then the operation already contains the required
702  * alignment, e.g. for AccType_ATOMIC.  Otherwise, this an optionally
703  * unaligned operation, e.g. for AccType_NORMAL.
704  *
705  * In the latter case, there are configuration bits that require alignment,
706  * and this is applied here.  Note that there is no way to indicate that
707  * no alignment should ever be enforced; this must be handled manually.
708  */
finalize_memop_atom(DisasContext * s,MemOp opc,MemOp atom)709 static inline MemOp finalize_memop_atom(DisasContext *s, MemOp opc, MemOp atom)
710 {
711     if (s->align_mem && !(opc & MO_AMASK)) {
712         opc |= MO_ALIGN;
713     }
714     return opc | atom | s->be_data;
715 }
716 
717 /**
718  * finalize_memop:
719  * @s: DisasContext
720  * @opc: size+sign+align of the memory operation
721  *
722  * Like finalize_memop_atom, but with default atomicity.
723  */
finalize_memop(DisasContext * s,MemOp opc)724 static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
725 {
726     MemOp atom = s->lse2 ? MO_ATOM_WITHIN16 : MO_ATOM_IFALIGN;
727     return finalize_memop_atom(s, opc, atom);
728 }
729 
730 /**
731  * finalize_memop_pair:
732  * @s: DisasContext
733  * @opc: size+sign+align of the memory operation
734  *
735  * Like finalize_memop_atom, but with atomicity for a pair.
736  * C.f. Pseudocode for Mem[], operand ispair.
737  */
finalize_memop_pair(DisasContext * s,MemOp opc)738 static inline MemOp finalize_memop_pair(DisasContext *s, MemOp opc)
739 {
740     MemOp atom = s->lse2 ? MO_ATOM_WITHIN16_PAIR : MO_ATOM_IFALIGN_PAIR;
741     return finalize_memop_atom(s, opc, atom);
742 }
743 
744 /**
745  * finalize_memop_asimd:
746  * @s: DisasContext
747  * @opc: size+sign+align of the memory operation
748  *
749  * Like finalize_memop_atom, but with atomicity of AccessType_ASIMD.
750  */
finalize_memop_asimd(DisasContext * s,MemOp opc)751 static inline MemOp finalize_memop_asimd(DisasContext *s, MemOp opc)
752 {
753     /*
754      * In the pseudocode for Mem[], with AccessType_ASIMD, size == 16,
755      * if IsAligned(8), the first case provides separate atomicity for
756      * the pair of 64-bit accesses.  If !IsAligned(8), the middle cases
757      * do not apply, and we're left with the final case of no atomicity.
758      * Thus MO_ATOM_IFALIGN_PAIR.
759      *
760      * For other sizes, normal LSE2 rules apply.
761      */
762     if ((opc & MO_SIZE) == MO_128) {
763         return finalize_memop_atom(s, opc, MO_ATOM_IFALIGN_PAIR);
764     }
765     return finalize_memop(s, opc);
766 }
767 
768 /**
769  * asimd_imm_const: Expand an encoded SIMD constant value
770  *
771  * Expand a SIMD constant value. This is essentially the pseudocode
772  * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
773  * VMVN and VBIC (when cmode < 14 && op == 1).
774  *
775  * The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
776  * callers must catch this; we return the 64-bit constant value defined
777  * for AArch64.
778  *
779  * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
780  * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
781  * we produce an immediate constant value of 0 in these cases.
782  */
783 uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
784 
785 /*
786  * gen_disas_label:
787  * Create a label and cache a copy of pc_save.
788  */
gen_disas_label(DisasContext * s)789 static inline DisasLabel gen_disas_label(DisasContext *s)
790 {
791     return (DisasLabel){
792         .label = gen_new_label(),
793         .pc_save = s->pc_save,
794     };
795 }
796 
797 /*
798  * set_disas_label:
799  * Emit a label and restore the cached copy of pc_save.
800  */
set_disas_label(DisasContext * s,DisasLabel l)801 static inline void set_disas_label(DisasContext *s, DisasLabel l)
802 {
803     gen_set_label(l.label);
804     s->pc_save = l.pc_save;
805 }
806 
gen_lookup_cp_reg(uint32_t key)807 static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key)
808 {
809     TCGv_ptr ret = tcg_temp_new_ptr();
810     gen_helper_lookup_cp_reg(ret, tcg_env, tcg_constant_i32(key));
811     return ret;
812 }
813 
814 /*
815  * Set and reset rounding mode around another operation.
816  */
gen_set_rmode(ARMFPRounding rmode,TCGv_ptr fpst)817 static inline TCGv_i32 gen_set_rmode(ARMFPRounding rmode, TCGv_ptr fpst)
818 {
819     TCGv_i32 new = tcg_constant_i32(arm_rmode_to_sf(rmode));
820     TCGv_i32 old = tcg_temp_new_i32();
821 
822     gen_helper_set_rmode(old, new, fpst);
823     return old;
824 }
825 
gen_restore_rmode(TCGv_i32 old,TCGv_ptr fpst)826 static inline void gen_restore_rmode(TCGv_i32 old, TCGv_ptr fpst)
827 {
828     gen_helper_set_rmode(old, old, fpst);
829 }
830 
831 /*
832  * Helpers for implementing sets of trans_* functions.
833  * Defer the implementation of NAME to FUNC, with optional extra arguments.
834  */
835 #define TRANS(NAME, FUNC, ...) \
836     static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
837     { return FUNC(s, __VA_ARGS__); }
838 #define TRANS_FEAT(NAME, FEAT, FUNC, ...) \
839     static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
840     { return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); }
841 
842 #define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...)            \
843     static bool trans_##NAME(DisasContext *s, arg_##NAME *a)      \
844     {                                                             \
845         s->is_nonstreaming = true;                                \
846         return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__);  \
847     }
848 
849 #endif /* TARGET_ARM_TRANSLATE_H */
850