1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include "qemu/osdep.h"
22
23 #include "translate.h"
24 #include "translate-a32.h"
25 #include "qemu/log.h"
26 #include "arm_ldst.h"
27 #include "semihosting/semihost.h"
28 #include "cpregs.h"
29 #include "exec/helper-proto.h"
30 #include "exec/target_page.h"
31
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef HELPER_H
35
36 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
41 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
45 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
46
47 /* These are TCG temporaries used only by the legacy iwMMXt decoder */
48 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
49 /* These are TCG globals which alias CPUARMState fields */
50 static TCGv_i32 cpu_R[16];
51 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
52 TCGv_i64 cpu_exclusive_addr;
53 TCGv_i64 cpu_exclusive_val;
54
55 static const char * const regnames[] =
56 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
57 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
58
59
60 /* initialize TCG globals. */
arm_translate_init(void)61 void arm_translate_init(void)
62 {
63 int i;
64
65 for (i = 0; i < 16; i++) {
66 cpu_R[i] = tcg_global_mem_new_i32(tcg_env,
67 offsetof(CPUARMState, regs[i]),
68 regnames[i]);
69 }
70 cpu_CF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, CF), "CF");
71 cpu_NF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, NF), "NF");
72 cpu_VF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, VF), "VF");
73 cpu_ZF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, ZF), "ZF");
74
75 cpu_exclusive_addr = tcg_global_mem_new_i64(tcg_env,
76 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
77 cpu_exclusive_val = tcg_global_mem_new_i64(tcg_env,
78 offsetof(CPUARMState, exclusive_val), "exclusive_val");
79
80 a64_translate_init();
81 }
82
asimd_imm_const(uint32_t imm,int cmode,int op)83 uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
84 {
85 /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
86 switch (cmode) {
87 case 0: case 1:
88 /* no-op */
89 break;
90 case 2: case 3:
91 imm <<= 8;
92 break;
93 case 4: case 5:
94 imm <<= 16;
95 break;
96 case 6: case 7:
97 imm <<= 24;
98 break;
99 case 8: case 9:
100 imm |= imm << 16;
101 break;
102 case 10: case 11:
103 imm = (imm << 8) | (imm << 24);
104 break;
105 case 12:
106 imm = (imm << 8) | 0xff;
107 break;
108 case 13:
109 imm = (imm << 16) | 0xffff;
110 break;
111 case 14:
112 if (op) {
113 /*
114 * This and cmode == 15 op == 1 are the only cases where
115 * the top and bottom 32 bits of the encoded constant differ.
116 */
117 uint64_t imm64 = 0;
118 int n;
119
120 for (n = 0; n < 8; n++) {
121 if (imm & (1 << n)) {
122 imm64 |= (0xffULL << (n * 8));
123 }
124 }
125 return imm64;
126 }
127 imm |= (imm << 8) | (imm << 16) | (imm << 24);
128 break;
129 case 15:
130 if (op) {
131 /* Reserved encoding for AArch32; valid for AArch64 */
132 uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
133 if (imm & 0x80) {
134 imm64 |= 0x8000000000000000ULL;
135 }
136 if (imm & 0x40) {
137 imm64 |= 0x3fc0000000000000ULL;
138 } else {
139 imm64 |= 0x4000000000000000ULL;
140 }
141 return imm64;
142 }
143 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
144 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
145 break;
146 }
147 if (op) {
148 imm = ~imm;
149 }
150 return dup_const(MO_32, imm);
151 }
152
153 /* Generate a label used for skipping this instruction */
arm_gen_condlabel(DisasContext * s)154 void arm_gen_condlabel(DisasContext *s)
155 {
156 if (!s->condjmp) {
157 s->condlabel = gen_disas_label(s);
158 s->condjmp = 1;
159 }
160 }
161
162 /* Flags for the disas_set_da_iss info argument:
163 * lower bits hold the Rt register number, higher bits are flags.
164 */
165 typedef enum ISSInfo {
166 ISSNone = 0,
167 ISSRegMask = 0x1f,
168 ISSInvalid = (1 << 5),
169 ISSIsAcqRel = (1 << 6),
170 ISSIsWrite = (1 << 7),
171 ISSIs16Bit = (1 << 8),
172 } ISSInfo;
173
174 /*
175 * Store var into env + offset to a member with size bytes.
176 * Free var after use.
177 */
store_cpu_offset(TCGv_i32 var,int offset,int size)178 void store_cpu_offset(TCGv_i32 var, int offset, int size)
179 {
180 switch (size) {
181 case 1:
182 tcg_gen_st8_i32(var, tcg_env, offset);
183 break;
184 case 4:
185 tcg_gen_st_i32(var, tcg_env, offset);
186 break;
187 default:
188 g_assert_not_reached();
189 }
190 }
191
192 /* Save the syndrome information for a Data Abort */
disas_set_da_iss(DisasContext * s,MemOp memop,ISSInfo issinfo)193 static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
194 {
195 uint32_t syn;
196 int sas = memop & MO_SIZE;
197 bool sse = memop & MO_SIGN;
198 bool is_acqrel = issinfo & ISSIsAcqRel;
199 bool is_write = issinfo & ISSIsWrite;
200 bool is_16bit = issinfo & ISSIs16Bit;
201 int srt = issinfo & ISSRegMask;
202
203 if (issinfo & ISSInvalid) {
204 /* Some callsites want to conditionally provide ISS info,
205 * eg "only if this was not a writeback"
206 */
207 return;
208 }
209
210 if (srt == 15) {
211 /* For AArch32, insns where the src/dest is R15 never generate
212 * ISS information. Catching that here saves checking at all
213 * the call sites.
214 */
215 return;
216 }
217
218 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
219 0, 0, 0, is_write, 0, is_16bit);
220 disas_set_insn_syndrome(s, syn);
221 }
222
get_a32_user_mem_index(DisasContext * s)223 static inline int get_a32_user_mem_index(DisasContext *s)
224 {
225 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
226 * insns:
227 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
228 * otherwise, access as if at PL0.
229 */
230 switch (s->mmu_idx) {
231 case ARMMMUIdx_E3:
232 case ARMMMUIdx_E30_0:
233 case ARMMMUIdx_E30_3_PAN:
234 return arm_to_core_mmu_idx(ARMMMUIdx_E30_0);
235 case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
236 case ARMMMUIdx_E10_0:
237 case ARMMMUIdx_E10_1:
238 case ARMMMUIdx_E10_1_PAN:
239 return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
240 case ARMMMUIdx_MUser:
241 case ARMMMUIdx_MPriv:
242 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
243 case ARMMMUIdx_MUserNegPri:
244 case ARMMMUIdx_MPrivNegPri:
245 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
246 case ARMMMUIdx_MSUser:
247 case ARMMMUIdx_MSPriv:
248 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
249 case ARMMMUIdx_MSUserNegPri:
250 case ARMMMUIdx_MSPrivNegPri:
251 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
252 default:
253 g_assert_not_reached();
254 }
255 }
256
257 /* The pc_curr difference for an architectural jump. */
jmp_diff(DisasContext * s,target_long diff)258 static target_long jmp_diff(DisasContext *s, target_long diff)
259 {
260 return diff + (s->thumb ? 4 : 8);
261 }
262
gen_pc_plus_diff(DisasContext * s,TCGv_i32 var,target_long diff)263 static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff)
264 {
265 assert(s->pc_save != -1);
266 if (tb_cflags(s->base.tb) & CF_PCREL) {
267 tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff);
268 } else {
269 tcg_gen_movi_i32(var, s->pc_curr + diff);
270 }
271 }
272
273 /* Set a variable to the value of a CPU register. */
load_reg_var(DisasContext * s,TCGv_i32 var,int reg)274 void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
275 {
276 if (reg == 15) {
277 gen_pc_plus_diff(s, var, jmp_diff(s, 0));
278 } else {
279 tcg_gen_mov_i32(var, cpu_R[reg]);
280 }
281 }
282
283 /*
284 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
285 * This is used for load/store for which use of PC implies (literal),
286 * or ADD that implies ADR.
287 */
add_reg_for_lit(DisasContext * s,int reg,int ofs)288 TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
289 {
290 TCGv_i32 tmp = tcg_temp_new_i32();
291
292 if (reg == 15) {
293 /*
294 * This address is computed from an aligned PC:
295 * subtract off the low bits.
296 */
297 gen_pc_plus_diff(s, tmp, jmp_diff(s, ofs - (s->pc_curr & 3)));
298 } else {
299 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
300 }
301 return tmp;
302 }
303
304 /* Set a CPU register. The source must be a temporary and will be
305 marked as dead. */
store_reg(DisasContext * s,int reg,TCGv_i32 var)306 void store_reg(DisasContext *s, int reg, TCGv_i32 var)
307 {
308 if (reg == 15) {
309 /* In Thumb mode, we must ignore bit 0.
310 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
311 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
312 * We choose to ignore [1:0] in ARM mode for all architecture versions.
313 */
314 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
315 s->base.is_jmp = DISAS_JUMP;
316 s->pc_save = -1;
317 } else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) {
318 /* For M-profile SP bits [1:0] are always zero */
319 tcg_gen_andi_i32(var, var, ~3);
320 }
321 tcg_gen_mov_i32(cpu_R[reg], var);
322 }
323
324 /*
325 * Variant of store_reg which applies v8M stack-limit checks before updating
326 * SP. If the check fails this will result in an exception being taken.
327 * We disable the stack checks for CONFIG_USER_ONLY because we have
328 * no idea what the stack limits should be in that case.
329 * If stack checking is not being done this just acts like store_reg().
330 */
store_sp_checked(DisasContext * s,TCGv_i32 var)331 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
332 {
333 #ifndef CONFIG_USER_ONLY
334 if (s->v8m_stackcheck) {
335 gen_helper_v8m_stackcheck(tcg_env, var);
336 }
337 #endif
338 store_reg(s, 13, var);
339 }
340
341 /* Value extensions. */
342 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
343 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
344 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
345 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
346
347 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
348 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
349
gen_set_cpsr(TCGv_i32 var,uint32_t mask)350 void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
351 {
352 gen_helper_cpsr_write(tcg_env, var, tcg_constant_i32(mask));
353 }
354
gen_rebuild_hflags(DisasContext * s,bool new_el)355 static void gen_rebuild_hflags(DisasContext *s, bool new_el)
356 {
357 bool m_profile = arm_dc_feature(s, ARM_FEATURE_M);
358
359 if (new_el) {
360 if (m_profile) {
361 gen_helper_rebuild_hflags_m32_newel(tcg_env);
362 } else {
363 gen_helper_rebuild_hflags_a32_newel(tcg_env);
364 }
365 } else {
366 TCGv_i32 tcg_el = tcg_constant_i32(s->current_el);
367 if (m_profile) {
368 gen_helper_rebuild_hflags_m32(tcg_env, tcg_el);
369 } else {
370 gen_helper_rebuild_hflags_a32(tcg_env, tcg_el);
371 }
372 }
373 }
374
gen_exception_internal(int excp)375 static void gen_exception_internal(int excp)
376 {
377 assert(excp_is_internal(excp));
378 gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
379 }
380
gen_singlestep_exception(DisasContext * s)381 static void gen_singlestep_exception(DisasContext *s)
382 {
383 /* We just completed step of an insn. Move from Active-not-pending
384 * to Active-pending, and then also take the swstep exception.
385 * This corresponds to making the (IMPDEF) choice to prioritize
386 * swstep exceptions over asynchronous exceptions taken to an exception
387 * level where debug is disabled. This choice has the advantage that
388 * we do not need to maintain internal state corresponding to the
389 * ISV/EX syndrome bits between completion of the step and generation
390 * of the exception, and our syndrome information is always correct.
391 */
392 gen_ss_advance(s);
393 gen_swstep_exception(s, 1, s->is_ldex);
394 s->base.is_jmp = DISAS_NORETURN;
395 }
396
clear_eci_state(DisasContext * s)397 void clear_eci_state(DisasContext *s)
398 {
399 /*
400 * Clear any ECI/ICI state: used when a load multiple/store
401 * multiple insn executes.
402 */
403 if (s->eci) {
404 store_cpu_field_constant(0, condexec_bits);
405 s->eci = 0;
406 }
407 }
408
gen_smul_dual(TCGv_i32 a,TCGv_i32 b)409 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
410 {
411 TCGv_i32 tmp1 = tcg_temp_new_i32();
412 TCGv_i32 tmp2 = tcg_temp_new_i32();
413 tcg_gen_ext16s_i32(tmp1, a);
414 tcg_gen_ext16s_i32(tmp2, b);
415 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
416 tcg_gen_sari_i32(a, a, 16);
417 tcg_gen_sari_i32(b, b, 16);
418 tcg_gen_mul_i32(b, b, a);
419 tcg_gen_mov_i32(a, tmp1);
420 }
421
422 /* Byteswap each halfword. */
gen_rev16(TCGv_i32 dest,TCGv_i32 var)423 void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
424 {
425 TCGv_i32 tmp = tcg_temp_new_i32();
426 TCGv_i32 mask = tcg_constant_i32(0x00ff00ff);
427 tcg_gen_shri_i32(tmp, var, 8);
428 tcg_gen_and_i32(tmp, tmp, mask);
429 tcg_gen_and_i32(var, var, mask);
430 tcg_gen_shli_i32(var, var, 8);
431 tcg_gen_or_i32(dest, var, tmp);
432 }
433
434 /* Byteswap low halfword and sign extend. */
gen_revsh(TCGv_i32 dest,TCGv_i32 var)435 static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
436 {
437 tcg_gen_bswap16_i32(var, var, TCG_BSWAP_OS);
438 }
439
440 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
441 tmp = (t0 ^ t1) & 0x8000;
442 t0 &= ~0x8000;
443 t1 &= ~0x8000;
444 t0 = (t0 + t1) ^ tmp;
445 */
446
gen_add16(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)447 static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
448 {
449 TCGv_i32 tmp = tcg_temp_new_i32();
450 tcg_gen_xor_i32(tmp, t0, t1);
451 tcg_gen_andi_i32(tmp, tmp, 0x8000);
452 tcg_gen_andi_i32(t0, t0, ~0x8000);
453 tcg_gen_andi_i32(t1, t1, ~0x8000);
454 tcg_gen_add_i32(t0, t0, t1);
455 tcg_gen_xor_i32(dest, t0, tmp);
456 }
457
458 /* Set N and Z flags from var. */
gen_logic_CC(TCGv_i32 var)459 static inline void gen_logic_CC(TCGv_i32 var)
460 {
461 tcg_gen_mov_i32(cpu_NF, var);
462 tcg_gen_mov_i32(cpu_ZF, var);
463 }
464
465 /* dest = T0 + T1 + CF. */
gen_add_carry(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)466 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
467 {
468 tcg_gen_add_i32(dest, t0, t1);
469 tcg_gen_add_i32(dest, dest, cpu_CF);
470 }
471
472 /* dest = T0 - T1 + CF - 1. */
gen_sub_carry(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)473 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
474 {
475 tcg_gen_sub_i32(dest, t0, t1);
476 tcg_gen_add_i32(dest, dest, cpu_CF);
477 tcg_gen_subi_i32(dest, dest, 1);
478 }
479
480 /* dest = T0 + T1. Compute C, N, V and Z flags */
gen_add_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)481 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
482 {
483 TCGv_i32 tmp = tcg_temp_new_i32();
484 tcg_gen_movi_i32(tmp, 0);
485 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
486 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
487 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
488 tcg_gen_xor_i32(tmp, t0, t1);
489 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
490 tcg_gen_mov_i32(dest, cpu_NF);
491 }
492
493 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
gen_adc_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)494 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
495 {
496 TCGv_i32 tmp = tcg_temp_new_i32();
497
498 tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0, t1, cpu_CF);
499
500 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
501 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
502 tcg_gen_xor_i32(tmp, t0, t1);
503 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
504 tcg_gen_mov_i32(dest, cpu_NF);
505 }
506
507 /* dest = T0 - T1. Compute C, N, V and Z flags */
gen_sub_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)508 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
509 {
510 TCGv_i32 tmp;
511 tcg_gen_sub_i32(cpu_NF, t0, t1);
512 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
513 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
514 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
515 tmp = tcg_temp_new_i32();
516 tcg_gen_xor_i32(tmp, t0, t1);
517 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
518 tcg_gen_mov_i32(dest, cpu_NF);
519 }
520
521 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
gen_sbc_CC(TCGv_i32 dest,TCGv_i32 t0,TCGv_i32 t1)522 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
523 {
524 TCGv_i32 tmp = tcg_temp_new_i32();
525 tcg_gen_not_i32(tmp, t1);
526 gen_adc_CC(dest, t0, tmp);
527 }
528
529 #define GEN_SHIFT(name) \
530 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
531 { \
532 TCGv_i32 tmpd = tcg_temp_new_i32(); \
533 TCGv_i32 tmp1 = tcg_temp_new_i32(); \
534 TCGv_i32 zero = tcg_constant_i32(0); \
535 tcg_gen_andi_i32(tmp1, t1, 0x1f); \
536 tcg_gen_##name##_i32(tmpd, t0, tmp1); \
537 tcg_gen_andi_i32(tmp1, t1, 0xe0); \
538 tcg_gen_movcond_i32(TCG_COND_NE, dest, tmp1, zero, zero, tmpd); \
539 }
540 GEN_SHIFT(shl)
GEN_SHIFT(shr)541 GEN_SHIFT(shr)
542 #undef GEN_SHIFT
543
544 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
545 {
546 TCGv_i32 tmp1 = tcg_temp_new_i32();
547
548 tcg_gen_andi_i32(tmp1, t1, 0xff);
549 tcg_gen_umin_i32(tmp1, tmp1, tcg_constant_i32(31));
550 tcg_gen_sar_i32(dest, t0, tmp1);
551 }
552
shifter_out_im(TCGv_i32 var,int shift)553 static void shifter_out_im(TCGv_i32 var, int shift)
554 {
555 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
556 }
557
558 /* Shift by immediate. Includes special handling for shift == 0. */
gen_arm_shift_im(TCGv_i32 var,int shiftop,int shift,int flags)559 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
560 int shift, int flags)
561 {
562 switch (shiftop) {
563 case 0: /* LSL */
564 if (shift != 0) {
565 if (flags)
566 shifter_out_im(var, 32 - shift);
567 tcg_gen_shli_i32(var, var, shift);
568 }
569 break;
570 case 1: /* LSR */
571 if (shift == 0) {
572 if (flags) {
573 tcg_gen_shri_i32(cpu_CF, var, 31);
574 }
575 tcg_gen_movi_i32(var, 0);
576 } else {
577 if (flags)
578 shifter_out_im(var, shift - 1);
579 tcg_gen_shri_i32(var, var, shift);
580 }
581 break;
582 case 2: /* ASR */
583 if (shift == 0)
584 shift = 32;
585 if (flags)
586 shifter_out_im(var, shift - 1);
587 if (shift == 32)
588 shift = 31;
589 tcg_gen_sari_i32(var, var, shift);
590 break;
591 case 3: /* ROR/RRX */
592 if (shift != 0) {
593 if (flags)
594 shifter_out_im(var, shift - 1);
595 tcg_gen_rotri_i32(var, var, shift); break;
596 } else {
597 TCGv_i32 tmp = tcg_temp_new_i32();
598 tcg_gen_shli_i32(tmp, cpu_CF, 31);
599 if (flags)
600 shifter_out_im(var, 0);
601 tcg_gen_shri_i32(var, var, 1);
602 tcg_gen_or_i32(var, var, tmp);
603 }
604 }
605 };
606
gen_arm_shift_reg(TCGv_i32 var,int shiftop,TCGv_i32 shift,int flags)607 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
608 TCGv_i32 shift, int flags)
609 {
610 if (flags) {
611 switch (shiftop) {
612 case 0: gen_helper_shl_cc(var, tcg_env, var, shift); break;
613 case 1: gen_helper_shr_cc(var, tcg_env, var, shift); break;
614 case 2: gen_helper_sar_cc(var, tcg_env, var, shift); break;
615 case 3: gen_helper_ror_cc(var, tcg_env, var, shift); break;
616 }
617 } else {
618 switch (shiftop) {
619 case 0:
620 gen_shl(var, var, shift);
621 break;
622 case 1:
623 gen_shr(var, var, shift);
624 break;
625 case 2:
626 gen_sar(var, var, shift);
627 break;
628 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
629 tcg_gen_rotr_i32(var, var, shift); break;
630 }
631 }
632 }
633
634 /*
635 * Generate a conditional based on ARM condition code cc.
636 * This is common between ARM and Aarch64 targets.
637 */
arm_test_cc(DisasCompare * cmp,int cc)638 void arm_test_cc(DisasCompare *cmp, int cc)
639 {
640 TCGv_i32 value;
641 TCGCond cond;
642
643 switch (cc) {
644 case 0: /* eq: Z */
645 case 1: /* ne: !Z */
646 cond = TCG_COND_EQ;
647 value = cpu_ZF;
648 break;
649
650 case 2: /* cs: C */
651 case 3: /* cc: !C */
652 cond = TCG_COND_NE;
653 value = cpu_CF;
654 break;
655
656 case 4: /* mi: N */
657 case 5: /* pl: !N */
658 cond = TCG_COND_LT;
659 value = cpu_NF;
660 break;
661
662 case 6: /* vs: V */
663 case 7: /* vc: !V */
664 cond = TCG_COND_LT;
665 value = cpu_VF;
666 break;
667
668 case 8: /* hi: C && !Z */
669 case 9: /* ls: !C || Z -> !(C && !Z) */
670 cond = TCG_COND_NE;
671 value = tcg_temp_new_i32();
672 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
673 ZF is non-zero for !Z; so AND the two subexpressions. */
674 tcg_gen_neg_i32(value, cpu_CF);
675 tcg_gen_and_i32(value, value, cpu_ZF);
676 break;
677
678 case 10: /* ge: N == V -> N ^ V == 0 */
679 case 11: /* lt: N != V -> N ^ V != 0 */
680 /* Since we're only interested in the sign bit, == 0 is >= 0. */
681 cond = TCG_COND_GE;
682 value = tcg_temp_new_i32();
683 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
684 break;
685
686 case 12: /* gt: !Z && N == V */
687 case 13: /* le: Z || N != V */
688 cond = TCG_COND_NE;
689 value = tcg_temp_new_i32();
690 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
691 * the sign bit then AND with ZF to yield the result. */
692 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
693 tcg_gen_sari_i32(value, value, 31);
694 tcg_gen_andc_i32(value, cpu_ZF, value);
695 break;
696
697 case 14: /* always */
698 case 15: /* always */
699 /* Use the ALWAYS condition, which will fold early.
700 * It doesn't matter what we use for the value. */
701 cond = TCG_COND_ALWAYS;
702 value = cpu_ZF;
703 goto no_invert;
704
705 default:
706 fprintf(stderr, "Bad condition code 0x%x\n", cc);
707 abort();
708 }
709
710 if (cc & 1) {
711 cond = tcg_invert_cond(cond);
712 }
713
714 no_invert:
715 cmp->cond = cond;
716 cmp->value = value;
717 }
718
arm_jump_cc(DisasCompare * cmp,TCGLabel * label)719 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
720 {
721 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
722 }
723
arm_gen_test_cc(int cc,TCGLabel * label)724 void arm_gen_test_cc(int cc, TCGLabel *label)
725 {
726 DisasCompare cmp;
727 arm_test_cc(&cmp, cc);
728 arm_jump_cc(&cmp, label);
729 }
730
gen_set_condexec(DisasContext * s)731 void gen_set_condexec(DisasContext *s)
732 {
733 if (s->condexec_mask) {
734 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
735
736 store_cpu_field_constant(val, condexec_bits);
737 }
738 }
739
gen_update_pc(DisasContext * s,target_long diff)740 void gen_update_pc(DisasContext *s, target_long diff)
741 {
742 gen_pc_plus_diff(s, cpu_R[15], diff);
743 s->pc_save = s->pc_curr + diff;
744 }
745
746 /* Set PC and Thumb state from var. var is marked as dead. */
gen_bx(DisasContext * s,TCGv_i32 var)747 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
748 {
749 s->base.is_jmp = DISAS_JUMP;
750 tcg_gen_andi_i32(cpu_R[15], var, ~1);
751 tcg_gen_andi_i32(var, var, 1);
752 store_cpu_field(var, thumb);
753 s->pc_save = -1;
754 }
755
756 /*
757 * Set PC and Thumb state from var. var is marked as dead.
758 * For M-profile CPUs, include logic to detect exception-return
759 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
760 * and BX reg, and no others, and happens only for code in Handler mode.
761 * The Security Extension also requires us to check for the FNC_RETURN
762 * which signals a function return from non-secure state; this can happen
763 * in both Handler and Thread mode.
764 * To avoid having to do multiple comparisons in inline generated code,
765 * we make the check we do here loose, so it will match for EXC_RETURN
766 * in Thread mode. For system emulation do_v7m_exception_exit() checks
767 * for these spurious cases and returns without doing anything (giving
768 * the same behaviour as for a branch to a non-magic address).
769 *
770 * In linux-user mode it is unclear what the right behaviour for an
771 * attempted FNC_RETURN should be, because in real hardware this will go
772 * directly to Secure code (ie not the Linux kernel) which will then treat
773 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
774 * attempt behave the way it would on a CPU without the security extension,
775 * which is to say "like a normal branch". That means we can simply treat
776 * all branches as normal with no magic address behaviour.
777 */
gen_bx_excret(DisasContext * s,TCGv_i32 var)778 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
779 {
780 /* Generate the same code here as for a simple bx, but flag via
781 * s->base.is_jmp that we need to do the rest of the work later.
782 */
783 gen_bx(s, var);
784 #ifndef CONFIG_USER_ONLY
785 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
786 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
787 s->base.is_jmp = DISAS_BX_EXCRET;
788 }
789 #endif
790 }
791
gen_bx_excret_final_code(DisasContext * s)792 static inline void gen_bx_excret_final_code(DisasContext *s)
793 {
794 /* Generate the code to finish possible exception return and end the TB */
795 DisasLabel excret_label = gen_disas_label(s);
796 uint32_t min_magic;
797
798 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
799 /* Covers FNC_RETURN and EXC_RETURN magic */
800 min_magic = FNC_RETURN_MIN_MAGIC;
801 } else {
802 /* EXC_RETURN magic only */
803 min_magic = EXC_RETURN_MIN_MAGIC;
804 }
805
806 /* Is the new PC value in the magic range indicating exception return? */
807 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label.label);
808 /* No: end the TB as we would for a DISAS_JMP */
809 if (s->ss_active) {
810 gen_singlestep_exception(s);
811 } else {
812 tcg_gen_exit_tb(NULL, 0);
813 }
814 set_disas_label(s, excret_label);
815 /* Yes: this is an exception return.
816 * At this point in runtime env->regs[15] and env->thumb will hold
817 * the exception-return magic number, which do_v7m_exception_exit()
818 * will read. Nothing else will be able to see those values because
819 * the cpu-exec main loop guarantees that we will always go straight
820 * from raising the exception to the exception-handling code.
821 *
822 * gen_ss_advance(s) does nothing on M profile currently but
823 * calling it is conceptually the right thing as we have executed
824 * this instruction (compare SWI, HVC, SMC handling).
825 */
826 gen_ss_advance(s);
827 gen_exception_internal(EXCP_EXCEPTION_EXIT);
828 }
829
gen_bxns(DisasContext * s,int rm)830 static inline void gen_bxns(DisasContext *s, int rm)
831 {
832 TCGv_i32 var = load_reg(s, rm);
833
834 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
835 * we need to sync state before calling it, but:
836 * - we don't need to do gen_update_pc() because the bxns helper will
837 * always set the PC itself
838 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
839 * unless it's outside an IT block or the last insn in an IT block,
840 * so we know that condexec == 0 (already set at the top of the TB)
841 * is correct in the non-UNPREDICTABLE cases, and we can choose
842 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
843 */
844 gen_helper_v7m_bxns(tcg_env, var);
845 s->base.is_jmp = DISAS_EXIT;
846 }
847
gen_blxns(DisasContext * s,int rm)848 static inline void gen_blxns(DisasContext *s, int rm)
849 {
850 TCGv_i32 var = load_reg(s, rm);
851
852 /* We don't need to sync condexec state, for the same reason as bxns.
853 * We do however need to set the PC, because the blxns helper reads it.
854 * The blxns helper may throw an exception.
855 */
856 gen_update_pc(s, curr_insn_len(s));
857 gen_helper_v7m_blxns(tcg_env, var);
858 s->base.is_jmp = DISAS_EXIT;
859 }
860
861 /* Variant of store_reg which uses branch&exchange logic when storing
862 to r15 in ARM architecture v7 and above. The source must be a temporary
863 and will be marked as dead. */
store_reg_bx(DisasContext * s,int reg,TCGv_i32 var)864 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
865 {
866 if (reg == 15 && ENABLE_ARCH_7) {
867 gen_bx(s, var);
868 } else {
869 store_reg(s, reg, var);
870 }
871 }
872
873 /* Variant of store_reg which uses branch&exchange logic when storing
874 * to r15 in ARM architecture v5T and above. This is used for storing
875 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
876 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
store_reg_from_load(DisasContext * s,int reg,TCGv_i32 var)877 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
878 {
879 if (reg == 15 && ENABLE_ARCH_5) {
880 gen_bx_excret(s, var);
881 } else {
882 store_reg(s, reg, var);
883 }
884 }
885
886 #ifdef CONFIG_USER_ONLY
887 #define IS_USER_ONLY 1
888 #else
889 #define IS_USER_ONLY 0
890 #endif
891
pow2_align(unsigned i)892 MemOp pow2_align(unsigned i)
893 {
894 static const MemOp mop_align[] = {
895 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16, MO_ALIGN_32
896 };
897 g_assert(i < ARRAY_SIZE(mop_align));
898 return mop_align[i];
899 }
900
901 /*
902 * Abstractions of "generate code to do a guest load/store for
903 * AArch32", where a vaddr is always 32 bits (and is zero
904 * extended if we're a 64 bit core) and data is also
905 * 32 bits unless specifically doing a 64 bit access.
906 * These functions work like tcg_gen_qemu_{ld,st}* except
907 * that the address argument is TCGv_i32 rather than TCGv.
908 */
909
gen_aa32_addr(DisasContext * s,TCGv_i32 a32,MemOp op)910 static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
911 {
912 TCGv addr = tcg_temp_new();
913 tcg_gen_extu_i32_tl(addr, a32);
914
915 /* Not needed for user-mode BE32, where we use MO_BE instead. */
916 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
917 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
918 }
919 return addr;
920 }
921
922 /*
923 * Internal routines are used for NEON cases where the endianness
924 * and/or alignment has already been taken into account and manipulated.
925 */
gen_aa32_ld_internal_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)926 void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
927 TCGv_i32 a32, int index, MemOp opc)
928 {
929 TCGv addr = gen_aa32_addr(s, a32, opc);
930 tcg_gen_qemu_ld_i32(val, addr, index, opc);
931 }
932
gen_aa32_st_internal_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)933 void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
934 TCGv_i32 a32, int index, MemOp opc)
935 {
936 TCGv addr = gen_aa32_addr(s, a32, opc);
937 tcg_gen_qemu_st_i32(val, addr, index, opc);
938 }
939
gen_aa32_ld_internal_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)940 void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
941 TCGv_i32 a32, int index, MemOp opc)
942 {
943 TCGv addr = gen_aa32_addr(s, a32, opc);
944
945 tcg_gen_qemu_ld_i64(val, addr, index, opc);
946
947 /* Not needed for user-mode BE32, where we use MO_BE instead. */
948 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
949 tcg_gen_rotri_i64(val, val, 32);
950 }
951 }
952
gen_aa32_st_internal_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)953 void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
954 TCGv_i32 a32, int index, MemOp opc)
955 {
956 TCGv addr = gen_aa32_addr(s, a32, opc);
957
958 /* Not needed for user-mode BE32, where we use MO_BE instead. */
959 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
960 TCGv_i64 tmp = tcg_temp_new_i64();
961 tcg_gen_rotri_i64(tmp, val, 32);
962 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
963 } else {
964 tcg_gen_qemu_st_i64(val, addr, index, opc);
965 }
966 }
967
gen_aa32_ld_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)968 void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
969 int index, MemOp opc)
970 {
971 gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
972 }
973
gen_aa32_st_i32(DisasContext * s,TCGv_i32 val,TCGv_i32 a32,int index,MemOp opc)974 void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
975 int index, MemOp opc)
976 {
977 gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
978 }
979
gen_aa32_ld_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)980 void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
981 int index, MemOp opc)
982 {
983 gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
984 }
985
gen_aa32_st_i64(DisasContext * s,TCGv_i64 val,TCGv_i32 a32,int index,MemOp opc)986 void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
987 int index, MemOp opc)
988 {
989 gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
990 }
991
992 #define DO_GEN_LD(SUFF, OPC) \
993 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
994 TCGv_i32 a32, int index) \
995 { \
996 gen_aa32_ld_i32(s, val, a32, index, OPC); \
997 }
998
999 #define DO_GEN_ST(SUFF, OPC) \
1000 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1001 TCGv_i32 a32, int index) \
1002 { \
1003 gen_aa32_st_i32(s, val, a32, index, OPC); \
1004 }
1005
gen_hvc(DisasContext * s,int imm16)1006 static inline void gen_hvc(DisasContext *s, int imm16)
1007 {
1008 /* The pre HVC helper handles cases when HVC gets trapped
1009 * as an undefined insn by runtime configuration (ie before
1010 * the insn really executes).
1011 */
1012 gen_update_pc(s, 0);
1013 gen_helper_pre_hvc(tcg_env);
1014 /* Otherwise we will treat this as a real exception which
1015 * happens after execution of the insn. (The distinction matters
1016 * for the PC value reported to the exception handler and also
1017 * for single stepping.)
1018 */
1019 s->svc_imm = imm16;
1020 gen_update_pc(s, curr_insn_len(s));
1021 s->base.is_jmp = DISAS_HVC;
1022 }
1023
gen_smc(DisasContext * s)1024 static inline void gen_smc(DisasContext *s)
1025 {
1026 /* As with HVC, we may take an exception either before or after
1027 * the insn executes.
1028 */
1029 gen_update_pc(s, 0);
1030 gen_helper_pre_smc(tcg_env, tcg_constant_i32(syn_aa32_smc()));
1031 gen_update_pc(s, curr_insn_len(s));
1032 s->base.is_jmp = DISAS_SMC;
1033 }
1034
gen_exception_internal_insn(DisasContext * s,int excp)1035 static void gen_exception_internal_insn(DisasContext *s, int excp)
1036 {
1037 gen_set_condexec(s);
1038 gen_update_pc(s, 0);
1039 gen_exception_internal(excp);
1040 s->base.is_jmp = DISAS_NORETURN;
1041 }
1042
gen_exception_el_v(int excp,uint32_t syndrome,TCGv_i32 tcg_el)1043 static void gen_exception_el_v(int excp, uint32_t syndrome, TCGv_i32 tcg_el)
1044 {
1045 gen_helper_exception_with_syndrome_el(tcg_env, tcg_constant_i32(excp),
1046 tcg_constant_i32(syndrome), tcg_el);
1047 }
1048
gen_exception_el(int excp,uint32_t syndrome,uint32_t target_el)1049 static void gen_exception_el(int excp, uint32_t syndrome, uint32_t target_el)
1050 {
1051 gen_exception_el_v(excp, syndrome, tcg_constant_i32(target_el));
1052 }
1053
gen_exception(int excp,uint32_t syndrome)1054 static void gen_exception(int excp, uint32_t syndrome)
1055 {
1056 gen_helper_exception_with_syndrome(tcg_env, tcg_constant_i32(excp),
1057 tcg_constant_i32(syndrome));
1058 }
1059
gen_exception_insn_el_v(DisasContext * s,target_long pc_diff,int excp,uint32_t syn,TCGv_i32 tcg_el)1060 static void gen_exception_insn_el_v(DisasContext *s, target_long pc_diff,
1061 int excp, uint32_t syn, TCGv_i32 tcg_el)
1062 {
1063 if (s->aarch64) {
1064 gen_a64_update_pc(s, pc_diff);
1065 } else {
1066 gen_set_condexec(s);
1067 gen_update_pc(s, pc_diff);
1068 }
1069 gen_exception_el_v(excp, syn, tcg_el);
1070 s->base.is_jmp = DISAS_NORETURN;
1071 }
1072
gen_exception_insn_el(DisasContext * s,target_long pc_diff,int excp,uint32_t syn,uint32_t target_el)1073 void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
1074 uint32_t syn, uint32_t target_el)
1075 {
1076 gen_exception_insn_el_v(s, pc_diff, excp, syn,
1077 tcg_constant_i32(target_el));
1078 }
1079
gen_exception_insn(DisasContext * s,target_long pc_diff,int excp,uint32_t syn)1080 void gen_exception_insn(DisasContext *s, target_long pc_diff,
1081 int excp, uint32_t syn)
1082 {
1083 if (s->aarch64) {
1084 gen_a64_update_pc(s, pc_diff);
1085 } else {
1086 gen_set_condexec(s);
1087 gen_update_pc(s, pc_diff);
1088 }
1089 gen_exception(excp, syn);
1090 s->base.is_jmp = DISAS_NORETURN;
1091 }
1092
gen_exception_bkpt_insn(DisasContext * s,uint32_t syn)1093 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1094 {
1095 gen_set_condexec(s);
1096 gen_update_pc(s, 0);
1097 gen_helper_exception_bkpt_insn(tcg_env, tcg_constant_i32(syn));
1098 s->base.is_jmp = DISAS_NORETURN;
1099 }
1100
unallocated_encoding(DisasContext * s)1101 void unallocated_encoding(DisasContext *s)
1102 {
1103 /* Unallocated and reserved encodings are uncategorized */
1104 gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
1105 }
1106
1107 /* Force a TB lookup after an instruction that changes the CPU state. */
gen_lookup_tb(DisasContext * s)1108 void gen_lookup_tb(DisasContext *s)
1109 {
1110 gen_pc_plus_diff(s, cpu_R[15], curr_insn_len(s));
1111 s->base.is_jmp = DISAS_EXIT;
1112 }
1113
gen_hlt(DisasContext * s,int imm)1114 static inline void gen_hlt(DisasContext *s, int imm)
1115 {
1116 /* HLT. This has two purposes.
1117 * Architecturally, it is an external halting debug instruction.
1118 * Since QEMU doesn't implement external debug, we treat this as
1119 * it is required for halting debug disabled: it will UNDEF.
1120 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1121 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1122 * must trigger semihosting even for ARMv7 and earlier, where
1123 * HLT was an undefined encoding.
1124 * In system mode, we don't allow userspace access to
1125 * semihosting, to provide some semblance of security
1126 * (and for consistency with our 32-bit semihosting).
1127 */
1128 if (semihosting_enabled(s->current_el == 0) &&
1129 (imm == (s->thumb ? 0x3c : 0xf000))) {
1130 gen_exception_internal_insn(s, EXCP_SEMIHOST);
1131 return;
1132 }
1133
1134 unallocated_encoding(s);
1135 }
1136
1137 /*
1138 * Return the offset of a "full" NEON Dreg.
1139 */
neon_full_reg_offset(unsigned reg)1140 long neon_full_reg_offset(unsigned reg)
1141 {
1142 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1143 }
1144
1145 /*
1146 * Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1147 * where 0 is the least significant end of the register.
1148 */
neon_element_offset(int reg,int element,MemOp memop)1149 long neon_element_offset(int reg, int element, MemOp memop)
1150 {
1151 int element_size = 1 << (memop & MO_SIZE);
1152 int ofs = element * element_size;
1153 #if HOST_BIG_ENDIAN
1154 /*
1155 * Calculate the offset assuming fully little-endian,
1156 * then XOR to account for the order of the 8-byte units.
1157 */
1158 if (element_size < 8) {
1159 ofs ^= 8 - element_size;
1160 }
1161 #endif
1162 return neon_full_reg_offset(reg) + ofs;
1163 }
1164
1165 /* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
vfp_reg_offset(bool dp,unsigned reg)1166 long vfp_reg_offset(bool dp, unsigned reg)
1167 {
1168 if (dp) {
1169 return neon_element_offset(reg, 0, MO_64);
1170 } else {
1171 return neon_element_offset(reg >> 1, reg & 1, MO_32);
1172 }
1173 }
1174
read_neon_element32(TCGv_i32 dest,int reg,int ele,MemOp memop)1175 void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
1176 {
1177 long off = neon_element_offset(reg, ele, memop);
1178
1179 switch (memop) {
1180 case MO_SB:
1181 tcg_gen_ld8s_i32(dest, tcg_env, off);
1182 break;
1183 case MO_UB:
1184 tcg_gen_ld8u_i32(dest, tcg_env, off);
1185 break;
1186 case MO_SW:
1187 tcg_gen_ld16s_i32(dest, tcg_env, off);
1188 break;
1189 case MO_UW:
1190 tcg_gen_ld16u_i32(dest, tcg_env, off);
1191 break;
1192 case MO_UL:
1193 case MO_SL:
1194 tcg_gen_ld_i32(dest, tcg_env, off);
1195 break;
1196 default:
1197 g_assert_not_reached();
1198 }
1199 }
1200
read_neon_element64(TCGv_i64 dest,int reg,int ele,MemOp memop)1201 void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
1202 {
1203 long off = neon_element_offset(reg, ele, memop);
1204
1205 switch (memop) {
1206 case MO_SL:
1207 tcg_gen_ld32s_i64(dest, tcg_env, off);
1208 break;
1209 case MO_UL:
1210 tcg_gen_ld32u_i64(dest, tcg_env, off);
1211 break;
1212 case MO_UQ:
1213 tcg_gen_ld_i64(dest, tcg_env, off);
1214 break;
1215 default:
1216 g_assert_not_reached();
1217 }
1218 }
1219
write_neon_element32(TCGv_i32 src,int reg,int ele,MemOp memop)1220 void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
1221 {
1222 long off = neon_element_offset(reg, ele, memop);
1223
1224 switch (memop) {
1225 case MO_8:
1226 tcg_gen_st8_i32(src, tcg_env, off);
1227 break;
1228 case MO_16:
1229 tcg_gen_st16_i32(src, tcg_env, off);
1230 break;
1231 case MO_32:
1232 tcg_gen_st_i32(src, tcg_env, off);
1233 break;
1234 default:
1235 g_assert_not_reached();
1236 }
1237 }
1238
write_neon_element64(TCGv_i64 src,int reg,int ele,MemOp memop)1239 void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
1240 {
1241 long off = neon_element_offset(reg, ele, memop);
1242
1243 switch (memop) {
1244 case MO_32:
1245 tcg_gen_st32_i64(src, tcg_env, off);
1246 break;
1247 case MO_64:
1248 tcg_gen_st_i64(src, tcg_env, off);
1249 break;
1250 default:
1251 g_assert_not_reached();
1252 }
1253 }
1254
1255 #define ARM_CP_RW_BIT (1 << 20)
1256
iwmmxt_load_reg(TCGv_i64 var,int reg)1257 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1258 {
1259 tcg_gen_ld_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1260 }
1261
iwmmxt_store_reg(TCGv_i64 var,int reg)1262 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1263 {
1264 tcg_gen_st_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1265 }
1266
iwmmxt_load_creg(int reg)1267 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1268 {
1269 TCGv_i32 var = tcg_temp_new_i32();
1270 tcg_gen_ld_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1271 return var;
1272 }
1273
iwmmxt_store_creg(int reg,TCGv_i32 var)1274 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1275 {
1276 tcg_gen_st_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1277 }
1278
gen_op_iwmmxt_movq_wRn_M0(int rn)1279 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1280 {
1281 iwmmxt_store_reg(cpu_M0, rn);
1282 }
1283
gen_op_iwmmxt_movq_M0_wRn(int rn)1284 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1285 {
1286 iwmmxt_load_reg(cpu_M0, rn);
1287 }
1288
gen_op_iwmmxt_orq_M0_wRn(int rn)1289 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1290 {
1291 iwmmxt_load_reg(cpu_V1, rn);
1292 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1293 }
1294
gen_op_iwmmxt_andq_M0_wRn(int rn)1295 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1296 {
1297 iwmmxt_load_reg(cpu_V1, rn);
1298 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1299 }
1300
gen_op_iwmmxt_xorq_M0_wRn(int rn)1301 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1302 {
1303 iwmmxt_load_reg(cpu_V1, rn);
1304 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1305 }
1306
1307 #define IWMMXT_OP(name) \
1308 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1309 { \
1310 iwmmxt_load_reg(cpu_V1, rn); \
1311 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1312 }
1313
1314 #define IWMMXT_OP_ENV(name) \
1315 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1316 { \
1317 iwmmxt_load_reg(cpu_V1, rn); \
1318 gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0, cpu_V1); \
1319 }
1320
1321 #define IWMMXT_OP_ENV_SIZE(name) \
1322 IWMMXT_OP_ENV(name##b) \
1323 IWMMXT_OP_ENV(name##w) \
1324 IWMMXT_OP_ENV(name##l)
1325
1326 #define IWMMXT_OP_ENV1(name) \
1327 static inline void gen_op_iwmmxt_##name##_M0(void) \
1328 { \
1329 gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0); \
1330 }
1331
1332 IWMMXT_OP(maddsq)
IWMMXT_OP(madduq)1333 IWMMXT_OP(madduq)
1334 IWMMXT_OP(sadb)
1335 IWMMXT_OP(sadw)
1336 IWMMXT_OP(mulslw)
1337 IWMMXT_OP(mulshw)
1338 IWMMXT_OP(mululw)
1339 IWMMXT_OP(muluhw)
1340 IWMMXT_OP(macsw)
1341 IWMMXT_OP(macuw)
1342
1343 IWMMXT_OP_ENV_SIZE(unpackl)
1344 IWMMXT_OP_ENV_SIZE(unpackh)
1345
1346 IWMMXT_OP_ENV1(unpacklub)
1347 IWMMXT_OP_ENV1(unpackluw)
1348 IWMMXT_OP_ENV1(unpacklul)
1349 IWMMXT_OP_ENV1(unpackhub)
1350 IWMMXT_OP_ENV1(unpackhuw)
1351 IWMMXT_OP_ENV1(unpackhul)
1352 IWMMXT_OP_ENV1(unpacklsb)
1353 IWMMXT_OP_ENV1(unpacklsw)
1354 IWMMXT_OP_ENV1(unpacklsl)
1355 IWMMXT_OP_ENV1(unpackhsb)
1356 IWMMXT_OP_ENV1(unpackhsw)
1357 IWMMXT_OP_ENV1(unpackhsl)
1358
1359 IWMMXT_OP_ENV_SIZE(cmpeq)
1360 IWMMXT_OP_ENV_SIZE(cmpgtu)
1361 IWMMXT_OP_ENV_SIZE(cmpgts)
1362
1363 IWMMXT_OP_ENV_SIZE(mins)
1364 IWMMXT_OP_ENV_SIZE(minu)
1365 IWMMXT_OP_ENV_SIZE(maxs)
1366 IWMMXT_OP_ENV_SIZE(maxu)
1367
1368 IWMMXT_OP_ENV_SIZE(subn)
1369 IWMMXT_OP_ENV_SIZE(addn)
1370 IWMMXT_OP_ENV_SIZE(subu)
1371 IWMMXT_OP_ENV_SIZE(addu)
1372 IWMMXT_OP_ENV_SIZE(subs)
1373 IWMMXT_OP_ENV_SIZE(adds)
1374
1375 IWMMXT_OP_ENV(avgb0)
1376 IWMMXT_OP_ENV(avgb1)
1377 IWMMXT_OP_ENV(avgw0)
1378 IWMMXT_OP_ENV(avgw1)
1379
1380 IWMMXT_OP_ENV(packuw)
1381 IWMMXT_OP_ENV(packul)
1382 IWMMXT_OP_ENV(packuq)
1383 IWMMXT_OP_ENV(packsw)
1384 IWMMXT_OP_ENV(packsl)
1385 IWMMXT_OP_ENV(packsq)
1386
1387 static void gen_op_iwmmxt_set_mup(void)
1388 {
1389 TCGv_i32 tmp;
1390 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1391 tcg_gen_ori_i32(tmp, tmp, 2);
1392 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1393 }
1394
gen_op_iwmmxt_set_cup(void)1395 static void gen_op_iwmmxt_set_cup(void)
1396 {
1397 TCGv_i32 tmp;
1398 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1399 tcg_gen_ori_i32(tmp, tmp, 1);
1400 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1401 }
1402
gen_op_iwmmxt_setpsr_nz(void)1403 static void gen_op_iwmmxt_setpsr_nz(void)
1404 {
1405 TCGv_i32 tmp = tcg_temp_new_i32();
1406 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1407 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1408 }
1409
gen_op_iwmmxt_addl_M0_wRn(int rn)1410 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1411 {
1412 iwmmxt_load_reg(cpu_V1, rn);
1413 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1414 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1415 }
1416
gen_iwmmxt_address(DisasContext * s,uint32_t insn,TCGv_i32 dest)1417 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1418 TCGv_i32 dest)
1419 {
1420 int rd;
1421 uint32_t offset;
1422 TCGv_i32 tmp;
1423
1424 rd = (insn >> 16) & 0xf;
1425 tmp = load_reg(s, rd);
1426
1427 offset = (insn & 0xff) << ((insn >> 7) & 2);
1428 if (insn & (1 << 24)) {
1429 /* Pre indexed */
1430 if (insn & (1 << 23))
1431 tcg_gen_addi_i32(tmp, tmp, offset);
1432 else
1433 tcg_gen_addi_i32(tmp, tmp, -offset);
1434 tcg_gen_mov_i32(dest, tmp);
1435 if (insn & (1 << 21)) {
1436 store_reg(s, rd, tmp);
1437 }
1438 } else if (insn & (1 << 21)) {
1439 /* Post indexed */
1440 tcg_gen_mov_i32(dest, tmp);
1441 if (insn & (1 << 23))
1442 tcg_gen_addi_i32(tmp, tmp, offset);
1443 else
1444 tcg_gen_addi_i32(tmp, tmp, -offset);
1445 store_reg(s, rd, tmp);
1446 } else if (!(insn & (1 << 23)))
1447 return 1;
1448 return 0;
1449 }
1450
gen_iwmmxt_shift(uint32_t insn,uint32_t mask,TCGv_i32 dest)1451 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1452 {
1453 int rd = (insn >> 0) & 0xf;
1454 TCGv_i32 tmp;
1455
1456 if (insn & (1 << 8)) {
1457 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1458 return 1;
1459 } else {
1460 tmp = iwmmxt_load_creg(rd);
1461 }
1462 } else {
1463 tmp = tcg_temp_new_i32();
1464 iwmmxt_load_reg(cpu_V0, rd);
1465 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1466 }
1467 tcg_gen_andi_i32(tmp, tmp, mask);
1468 tcg_gen_mov_i32(dest, tmp);
1469 return 0;
1470 }
1471
1472 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1473 (ie. an undefined instruction). */
disas_iwmmxt_insn(DisasContext * s,uint32_t insn)1474 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1475 {
1476 int rd, wrd;
1477 int rdhi, rdlo, rd0, rd1, i;
1478 TCGv_i32 addr;
1479 TCGv_i32 tmp, tmp2, tmp3;
1480
1481 if ((insn & 0x0e000e00) == 0x0c000000) {
1482 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1483 wrd = insn & 0xf;
1484 rdlo = (insn >> 12) & 0xf;
1485 rdhi = (insn >> 16) & 0xf;
1486 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1487 iwmmxt_load_reg(cpu_V0, wrd);
1488 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1489 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
1490 } else { /* TMCRR */
1491 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1492 iwmmxt_store_reg(cpu_V0, wrd);
1493 gen_op_iwmmxt_set_mup();
1494 }
1495 return 0;
1496 }
1497
1498 wrd = (insn >> 12) & 0xf;
1499 addr = tcg_temp_new_i32();
1500 if (gen_iwmmxt_address(s, insn, addr)) {
1501 return 1;
1502 }
1503 if (insn & ARM_CP_RW_BIT) {
1504 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1505 tmp = tcg_temp_new_i32();
1506 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1507 iwmmxt_store_creg(wrd, tmp);
1508 } else {
1509 i = 1;
1510 if (insn & (1 << 8)) {
1511 if (insn & (1 << 22)) { /* WLDRD */
1512 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1513 i = 0;
1514 } else { /* WLDRW wRd */
1515 tmp = tcg_temp_new_i32();
1516 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1517 }
1518 } else {
1519 tmp = tcg_temp_new_i32();
1520 if (insn & (1 << 22)) { /* WLDRH */
1521 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1522 } else { /* WLDRB */
1523 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1524 }
1525 }
1526 if (i) {
1527 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1528 }
1529 gen_op_iwmmxt_movq_wRn_M0(wrd);
1530 }
1531 } else {
1532 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1533 tmp = iwmmxt_load_creg(wrd);
1534 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1535 } else {
1536 gen_op_iwmmxt_movq_M0_wRn(wrd);
1537 tmp = tcg_temp_new_i32();
1538 if (insn & (1 << 8)) {
1539 if (insn & (1 << 22)) { /* WSTRD */
1540 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1541 } else { /* WSTRW wRd */
1542 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1543 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1544 }
1545 } else {
1546 if (insn & (1 << 22)) { /* WSTRH */
1547 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1548 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1549 } else { /* WSTRB */
1550 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1551 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1552 }
1553 }
1554 }
1555 }
1556 return 0;
1557 }
1558
1559 if ((insn & 0x0f000000) != 0x0e000000)
1560 return 1;
1561
1562 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1563 case 0x000: /* WOR */
1564 wrd = (insn >> 12) & 0xf;
1565 rd0 = (insn >> 0) & 0xf;
1566 rd1 = (insn >> 16) & 0xf;
1567 gen_op_iwmmxt_movq_M0_wRn(rd0);
1568 gen_op_iwmmxt_orq_M0_wRn(rd1);
1569 gen_op_iwmmxt_setpsr_nz();
1570 gen_op_iwmmxt_movq_wRn_M0(wrd);
1571 gen_op_iwmmxt_set_mup();
1572 gen_op_iwmmxt_set_cup();
1573 break;
1574 case 0x011: /* TMCR */
1575 if (insn & 0xf)
1576 return 1;
1577 rd = (insn >> 12) & 0xf;
1578 wrd = (insn >> 16) & 0xf;
1579 switch (wrd) {
1580 case ARM_IWMMXT_wCID:
1581 case ARM_IWMMXT_wCASF:
1582 break;
1583 case ARM_IWMMXT_wCon:
1584 gen_op_iwmmxt_set_cup();
1585 /* Fall through. */
1586 case ARM_IWMMXT_wCSSF:
1587 tmp = iwmmxt_load_creg(wrd);
1588 tmp2 = load_reg(s, rd);
1589 tcg_gen_andc_i32(tmp, tmp, tmp2);
1590 iwmmxt_store_creg(wrd, tmp);
1591 break;
1592 case ARM_IWMMXT_wCGR0:
1593 case ARM_IWMMXT_wCGR1:
1594 case ARM_IWMMXT_wCGR2:
1595 case ARM_IWMMXT_wCGR3:
1596 gen_op_iwmmxt_set_cup();
1597 tmp = load_reg(s, rd);
1598 iwmmxt_store_creg(wrd, tmp);
1599 break;
1600 default:
1601 return 1;
1602 }
1603 break;
1604 case 0x100: /* WXOR */
1605 wrd = (insn >> 12) & 0xf;
1606 rd0 = (insn >> 0) & 0xf;
1607 rd1 = (insn >> 16) & 0xf;
1608 gen_op_iwmmxt_movq_M0_wRn(rd0);
1609 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1610 gen_op_iwmmxt_setpsr_nz();
1611 gen_op_iwmmxt_movq_wRn_M0(wrd);
1612 gen_op_iwmmxt_set_mup();
1613 gen_op_iwmmxt_set_cup();
1614 break;
1615 case 0x111: /* TMRC */
1616 if (insn & 0xf)
1617 return 1;
1618 rd = (insn >> 12) & 0xf;
1619 wrd = (insn >> 16) & 0xf;
1620 tmp = iwmmxt_load_creg(wrd);
1621 store_reg(s, rd, tmp);
1622 break;
1623 case 0x300: /* WANDN */
1624 wrd = (insn >> 12) & 0xf;
1625 rd0 = (insn >> 0) & 0xf;
1626 rd1 = (insn >> 16) & 0xf;
1627 gen_op_iwmmxt_movq_M0_wRn(rd0);
1628 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1629 gen_op_iwmmxt_andq_M0_wRn(rd1);
1630 gen_op_iwmmxt_setpsr_nz();
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 gen_op_iwmmxt_set_cup();
1634 break;
1635 case 0x200: /* WAND */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 0) & 0xf;
1638 rd1 = (insn >> 16) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 gen_op_iwmmxt_andq_M0_wRn(rd1);
1641 gen_op_iwmmxt_setpsr_nz();
1642 gen_op_iwmmxt_movq_wRn_M0(wrd);
1643 gen_op_iwmmxt_set_mup();
1644 gen_op_iwmmxt_set_cup();
1645 break;
1646 case 0x810: case 0xa10: /* WMADD */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 0) & 0xf;
1649 rd1 = (insn >> 16) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
1651 if (insn & (1 << 21))
1652 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1653 else
1654 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 break;
1658 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 16) & 0xf;
1661 rd1 = (insn >> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
1663 switch ((insn >> 22) & 3) {
1664 case 0:
1665 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1666 break;
1667 case 1:
1668 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1669 break;
1670 case 2:
1671 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1672 break;
1673 case 3:
1674 return 1;
1675 }
1676 gen_op_iwmmxt_movq_wRn_M0(wrd);
1677 gen_op_iwmmxt_set_mup();
1678 gen_op_iwmmxt_set_cup();
1679 break;
1680 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1681 wrd = (insn >> 12) & 0xf;
1682 rd0 = (insn >> 16) & 0xf;
1683 rd1 = (insn >> 0) & 0xf;
1684 gen_op_iwmmxt_movq_M0_wRn(rd0);
1685 switch ((insn >> 22) & 3) {
1686 case 0:
1687 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1688 break;
1689 case 1:
1690 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1691 break;
1692 case 2:
1693 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1694 break;
1695 case 3:
1696 return 1;
1697 }
1698 gen_op_iwmmxt_movq_wRn_M0(wrd);
1699 gen_op_iwmmxt_set_mup();
1700 gen_op_iwmmxt_set_cup();
1701 break;
1702 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1703 wrd = (insn >> 12) & 0xf;
1704 rd0 = (insn >> 16) & 0xf;
1705 rd1 = (insn >> 0) & 0xf;
1706 gen_op_iwmmxt_movq_M0_wRn(rd0);
1707 if (insn & (1 << 22))
1708 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1709 else
1710 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1711 if (!(insn & (1 << 20)))
1712 gen_op_iwmmxt_addl_M0_wRn(wrd);
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 break;
1716 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1717 wrd = (insn >> 12) & 0xf;
1718 rd0 = (insn >> 16) & 0xf;
1719 rd1 = (insn >> 0) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0);
1721 if (insn & (1 << 21)) {
1722 if (insn & (1 << 20))
1723 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1724 else
1725 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1726 } else {
1727 if (insn & (1 << 20))
1728 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1729 else
1730 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1731 }
1732 gen_op_iwmmxt_movq_wRn_M0(wrd);
1733 gen_op_iwmmxt_set_mup();
1734 break;
1735 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1736 wrd = (insn >> 12) & 0xf;
1737 rd0 = (insn >> 16) & 0xf;
1738 rd1 = (insn >> 0) & 0xf;
1739 gen_op_iwmmxt_movq_M0_wRn(rd0);
1740 if (insn & (1 << 21))
1741 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1742 else
1743 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1744 if (!(insn & (1 << 20))) {
1745 iwmmxt_load_reg(cpu_V1, wrd);
1746 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1747 }
1748 gen_op_iwmmxt_movq_wRn_M0(wrd);
1749 gen_op_iwmmxt_set_mup();
1750 break;
1751 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1752 wrd = (insn >> 12) & 0xf;
1753 rd0 = (insn >> 16) & 0xf;
1754 rd1 = (insn >> 0) & 0xf;
1755 gen_op_iwmmxt_movq_M0_wRn(rd0);
1756 switch ((insn >> 22) & 3) {
1757 case 0:
1758 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1759 break;
1760 case 1:
1761 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1762 break;
1763 case 2:
1764 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1765 break;
1766 case 3:
1767 return 1;
1768 }
1769 gen_op_iwmmxt_movq_wRn_M0(wrd);
1770 gen_op_iwmmxt_set_mup();
1771 gen_op_iwmmxt_set_cup();
1772 break;
1773 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1774 wrd = (insn >> 12) & 0xf;
1775 rd0 = (insn >> 16) & 0xf;
1776 rd1 = (insn >> 0) & 0xf;
1777 gen_op_iwmmxt_movq_M0_wRn(rd0);
1778 if (insn & (1 << 22)) {
1779 if (insn & (1 << 20))
1780 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1781 else
1782 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1783 } else {
1784 if (insn & (1 << 20))
1785 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1786 else
1787 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1788 }
1789 gen_op_iwmmxt_movq_wRn_M0(wrd);
1790 gen_op_iwmmxt_set_mup();
1791 gen_op_iwmmxt_set_cup();
1792 break;
1793 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1794 wrd = (insn >> 12) & 0xf;
1795 rd0 = (insn >> 16) & 0xf;
1796 rd1 = (insn >> 0) & 0xf;
1797 gen_op_iwmmxt_movq_M0_wRn(rd0);
1798 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1799 tcg_gen_andi_i32(tmp, tmp, 7);
1800 iwmmxt_load_reg(cpu_V1, rd1);
1801 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1802 gen_op_iwmmxt_movq_wRn_M0(wrd);
1803 gen_op_iwmmxt_set_mup();
1804 break;
1805 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1806 if (((insn >> 6) & 3) == 3)
1807 return 1;
1808 rd = (insn >> 12) & 0xf;
1809 wrd = (insn >> 16) & 0xf;
1810 tmp = load_reg(s, rd);
1811 gen_op_iwmmxt_movq_M0_wRn(wrd);
1812 switch ((insn >> 6) & 3) {
1813 case 0:
1814 tmp2 = tcg_constant_i32(0xff);
1815 tmp3 = tcg_constant_i32((insn & 7) << 3);
1816 break;
1817 case 1:
1818 tmp2 = tcg_constant_i32(0xffff);
1819 tmp3 = tcg_constant_i32((insn & 3) << 4);
1820 break;
1821 case 2:
1822 tmp2 = tcg_constant_i32(0xffffffff);
1823 tmp3 = tcg_constant_i32((insn & 1) << 5);
1824 break;
1825 default:
1826 g_assert_not_reached();
1827 }
1828 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1829 gen_op_iwmmxt_movq_wRn_M0(wrd);
1830 gen_op_iwmmxt_set_mup();
1831 break;
1832 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1833 rd = (insn >> 12) & 0xf;
1834 wrd = (insn >> 16) & 0xf;
1835 if (rd == 15 || ((insn >> 22) & 3) == 3)
1836 return 1;
1837 gen_op_iwmmxt_movq_M0_wRn(wrd);
1838 tmp = tcg_temp_new_i32();
1839 switch ((insn >> 22) & 3) {
1840 case 0:
1841 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1842 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1843 if (insn & 8) {
1844 tcg_gen_ext8s_i32(tmp, tmp);
1845 } else {
1846 tcg_gen_andi_i32(tmp, tmp, 0xff);
1847 }
1848 break;
1849 case 1:
1850 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1851 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1852 if (insn & 8) {
1853 tcg_gen_ext16s_i32(tmp, tmp);
1854 } else {
1855 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1856 }
1857 break;
1858 case 2:
1859 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1860 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1861 break;
1862 }
1863 store_reg(s, rd, tmp);
1864 break;
1865 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1866 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1867 return 1;
1868 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1869 switch ((insn >> 22) & 3) {
1870 case 0:
1871 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1872 break;
1873 case 1:
1874 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1875 break;
1876 case 2:
1877 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1878 break;
1879 }
1880 tcg_gen_shli_i32(tmp, tmp, 28);
1881 gen_set_nzcv(tmp);
1882 break;
1883 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1884 if (((insn >> 6) & 3) == 3)
1885 return 1;
1886 rd = (insn >> 12) & 0xf;
1887 wrd = (insn >> 16) & 0xf;
1888 tmp = load_reg(s, rd);
1889 switch ((insn >> 6) & 3) {
1890 case 0:
1891 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1892 break;
1893 case 1:
1894 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1895 break;
1896 case 2:
1897 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1898 break;
1899 }
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 break;
1903 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1904 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1905 return 1;
1906 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1907 tmp2 = tcg_temp_new_i32();
1908 tcg_gen_mov_i32(tmp2, tmp);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 for (i = 0; i < 7; i ++) {
1912 tcg_gen_shli_i32(tmp2, tmp2, 4);
1913 tcg_gen_and_i32(tmp, tmp, tmp2);
1914 }
1915 break;
1916 case 1:
1917 for (i = 0; i < 3; i ++) {
1918 tcg_gen_shli_i32(tmp2, tmp2, 8);
1919 tcg_gen_and_i32(tmp, tmp, tmp2);
1920 }
1921 break;
1922 case 2:
1923 tcg_gen_shli_i32(tmp2, tmp2, 16);
1924 tcg_gen_and_i32(tmp, tmp, tmp2);
1925 break;
1926 }
1927 gen_set_nzcv(tmp);
1928 break;
1929 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1930 wrd = (insn >> 12) & 0xf;
1931 rd0 = (insn >> 16) & 0xf;
1932 gen_op_iwmmxt_movq_M0_wRn(rd0);
1933 switch ((insn >> 22) & 3) {
1934 case 0:
1935 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1936 break;
1937 case 1:
1938 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1939 break;
1940 case 2:
1941 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1942 break;
1943 case 3:
1944 return 1;
1945 }
1946 gen_op_iwmmxt_movq_wRn_M0(wrd);
1947 gen_op_iwmmxt_set_mup();
1948 break;
1949 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1950 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1951 return 1;
1952 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1953 tmp2 = tcg_temp_new_i32();
1954 tcg_gen_mov_i32(tmp2, tmp);
1955 switch ((insn >> 22) & 3) {
1956 case 0:
1957 for (i = 0; i < 7; i ++) {
1958 tcg_gen_shli_i32(tmp2, tmp2, 4);
1959 tcg_gen_or_i32(tmp, tmp, tmp2);
1960 }
1961 break;
1962 case 1:
1963 for (i = 0; i < 3; i ++) {
1964 tcg_gen_shli_i32(tmp2, tmp2, 8);
1965 tcg_gen_or_i32(tmp, tmp, tmp2);
1966 }
1967 break;
1968 case 2:
1969 tcg_gen_shli_i32(tmp2, tmp2, 16);
1970 tcg_gen_or_i32(tmp, tmp, tmp2);
1971 break;
1972 }
1973 gen_set_nzcv(tmp);
1974 break;
1975 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1976 rd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 16) & 0xf;
1978 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1979 return 1;
1980 gen_op_iwmmxt_movq_M0_wRn(rd0);
1981 tmp = tcg_temp_new_i32();
1982 switch ((insn >> 22) & 3) {
1983 case 0:
1984 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1985 break;
1986 case 1:
1987 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1988 break;
1989 case 2:
1990 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1991 break;
1992 }
1993 store_reg(s, rd, tmp);
1994 break;
1995 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1996 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1997 wrd = (insn >> 12) & 0xf;
1998 rd0 = (insn >> 16) & 0xf;
1999 rd1 = (insn >> 0) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
2001 switch ((insn >> 22) & 3) {
2002 case 0:
2003 if (insn & (1 << 21))
2004 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2005 else
2006 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2007 break;
2008 case 1:
2009 if (insn & (1 << 21))
2010 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2011 else
2012 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2013 break;
2014 case 2:
2015 if (insn & (1 << 21))
2016 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2017 else
2018 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2019 break;
2020 case 3:
2021 return 1;
2022 }
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2028 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2029 wrd = (insn >> 12) & 0xf;
2030 rd0 = (insn >> 16) & 0xf;
2031 gen_op_iwmmxt_movq_M0_wRn(rd0);
2032 switch ((insn >> 22) & 3) {
2033 case 0:
2034 if (insn & (1 << 21))
2035 gen_op_iwmmxt_unpacklsb_M0();
2036 else
2037 gen_op_iwmmxt_unpacklub_M0();
2038 break;
2039 case 1:
2040 if (insn & (1 << 21))
2041 gen_op_iwmmxt_unpacklsw_M0();
2042 else
2043 gen_op_iwmmxt_unpackluw_M0();
2044 break;
2045 case 2:
2046 if (insn & (1 << 21))
2047 gen_op_iwmmxt_unpacklsl_M0();
2048 else
2049 gen_op_iwmmxt_unpacklul_M0();
2050 break;
2051 case 3:
2052 return 1;
2053 }
2054 gen_op_iwmmxt_movq_wRn_M0(wrd);
2055 gen_op_iwmmxt_set_mup();
2056 gen_op_iwmmxt_set_cup();
2057 break;
2058 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2059 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 switch ((insn >> 22) & 3) {
2064 case 0:
2065 if (insn & (1 << 21))
2066 gen_op_iwmmxt_unpackhsb_M0();
2067 else
2068 gen_op_iwmmxt_unpackhub_M0();
2069 break;
2070 case 1:
2071 if (insn & (1 << 21))
2072 gen_op_iwmmxt_unpackhsw_M0();
2073 else
2074 gen_op_iwmmxt_unpackhuw_M0();
2075 break;
2076 case 2:
2077 if (insn & (1 << 21))
2078 gen_op_iwmmxt_unpackhsl_M0();
2079 else
2080 gen_op_iwmmxt_unpackhul_M0();
2081 break;
2082 case 3:
2083 return 1;
2084 }
2085 gen_op_iwmmxt_movq_wRn_M0(wrd);
2086 gen_op_iwmmxt_set_mup();
2087 gen_op_iwmmxt_set_cup();
2088 break;
2089 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2090 case 0x214: case 0x614: case 0xa14: case 0xe14:
2091 if (((insn >> 22) & 3) == 0)
2092 return 1;
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 gen_op_iwmmxt_movq_M0_wRn(rd0);
2096 tmp = tcg_temp_new_i32();
2097 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2098 return 1;
2099 }
2100 switch ((insn >> 22) & 3) {
2101 case 1:
2102 gen_helper_iwmmxt_srlw(cpu_M0, tcg_env, cpu_M0, tmp);
2103 break;
2104 case 2:
2105 gen_helper_iwmmxt_srll(cpu_M0, tcg_env, cpu_M0, tmp);
2106 break;
2107 case 3:
2108 gen_helper_iwmmxt_srlq(cpu_M0, tcg_env, cpu_M0, tmp);
2109 break;
2110 }
2111 gen_op_iwmmxt_movq_wRn_M0(wrd);
2112 gen_op_iwmmxt_set_mup();
2113 gen_op_iwmmxt_set_cup();
2114 break;
2115 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2116 case 0x014: case 0x414: case 0x814: case 0xc14:
2117 if (((insn >> 22) & 3) == 0)
2118 return 1;
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0);
2122 tmp = tcg_temp_new_i32();
2123 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2124 return 1;
2125 }
2126 switch ((insn >> 22) & 3) {
2127 case 1:
2128 gen_helper_iwmmxt_sraw(cpu_M0, tcg_env, cpu_M0, tmp);
2129 break;
2130 case 2:
2131 gen_helper_iwmmxt_sral(cpu_M0, tcg_env, cpu_M0, tmp);
2132 break;
2133 case 3:
2134 gen_helper_iwmmxt_sraq(cpu_M0, tcg_env, cpu_M0, tmp);
2135 break;
2136 }
2137 gen_op_iwmmxt_movq_wRn_M0(wrd);
2138 gen_op_iwmmxt_set_mup();
2139 gen_op_iwmmxt_set_cup();
2140 break;
2141 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2142 case 0x114: case 0x514: case 0x914: case 0xd14:
2143 if (((insn >> 22) & 3) == 0)
2144 return 1;
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 tmp = tcg_temp_new_i32();
2149 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2150 return 1;
2151 }
2152 switch ((insn >> 22) & 3) {
2153 case 1:
2154 gen_helper_iwmmxt_sllw(cpu_M0, tcg_env, cpu_M0, tmp);
2155 break;
2156 case 2:
2157 gen_helper_iwmmxt_slll(cpu_M0, tcg_env, cpu_M0, tmp);
2158 break;
2159 case 3:
2160 gen_helper_iwmmxt_sllq(cpu_M0, tcg_env, cpu_M0, tmp);
2161 break;
2162 }
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 gen_op_iwmmxt_set_cup();
2166 break;
2167 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2168 case 0x314: case 0x714: case 0xb14: case 0xf14:
2169 if (((insn >> 22) & 3) == 0)
2170 return 1;
2171 wrd = (insn >> 12) & 0xf;
2172 rd0 = (insn >> 16) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 tmp = tcg_temp_new_i32();
2175 switch ((insn >> 22) & 3) {
2176 case 1:
2177 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2178 return 1;
2179 }
2180 gen_helper_iwmmxt_rorw(cpu_M0, tcg_env, cpu_M0, tmp);
2181 break;
2182 case 2:
2183 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2184 return 1;
2185 }
2186 gen_helper_iwmmxt_rorl(cpu_M0, tcg_env, cpu_M0, tmp);
2187 break;
2188 case 3:
2189 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2190 return 1;
2191 }
2192 gen_helper_iwmmxt_rorq(cpu_M0, tcg_env, cpu_M0, tmp);
2193 break;
2194 }
2195 gen_op_iwmmxt_movq_wRn_M0(wrd);
2196 gen_op_iwmmxt_set_mup();
2197 gen_op_iwmmxt_set_cup();
2198 break;
2199 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2200 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2201 wrd = (insn >> 12) & 0xf;
2202 rd0 = (insn >> 16) & 0xf;
2203 rd1 = (insn >> 0) & 0xf;
2204 gen_op_iwmmxt_movq_M0_wRn(rd0);
2205 switch ((insn >> 22) & 3) {
2206 case 0:
2207 if (insn & (1 << 21))
2208 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2209 else
2210 gen_op_iwmmxt_minub_M0_wRn(rd1);
2211 break;
2212 case 1:
2213 if (insn & (1 << 21))
2214 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2215 else
2216 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2217 break;
2218 case 2:
2219 if (insn & (1 << 21))
2220 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2221 else
2222 gen_op_iwmmxt_minul_M0_wRn(rd1);
2223 break;
2224 case 3:
2225 return 1;
2226 }
2227 gen_op_iwmmxt_movq_wRn_M0(wrd);
2228 gen_op_iwmmxt_set_mup();
2229 break;
2230 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2231 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2232 wrd = (insn >> 12) & 0xf;
2233 rd0 = (insn >> 16) & 0xf;
2234 rd1 = (insn >> 0) & 0xf;
2235 gen_op_iwmmxt_movq_M0_wRn(rd0);
2236 switch ((insn >> 22) & 3) {
2237 case 0:
2238 if (insn & (1 << 21))
2239 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2240 else
2241 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2242 break;
2243 case 1:
2244 if (insn & (1 << 21))
2245 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2246 else
2247 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2248 break;
2249 case 2:
2250 if (insn & (1 << 21))
2251 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2252 else
2253 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2254 break;
2255 case 3:
2256 return 1;
2257 }
2258 gen_op_iwmmxt_movq_wRn_M0(wrd);
2259 gen_op_iwmmxt_set_mup();
2260 break;
2261 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2262 case 0x402: case 0x502: case 0x602: case 0x702:
2263 wrd = (insn >> 12) & 0xf;
2264 rd0 = (insn >> 16) & 0xf;
2265 rd1 = (insn >> 0) & 0xf;
2266 gen_op_iwmmxt_movq_M0_wRn(rd0);
2267 iwmmxt_load_reg(cpu_V1, rd1);
2268 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1,
2269 tcg_constant_i32((insn >> 20) & 3));
2270 gen_op_iwmmxt_movq_wRn_M0(wrd);
2271 gen_op_iwmmxt_set_mup();
2272 break;
2273 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2274 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2275 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2276 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2277 wrd = (insn >> 12) & 0xf;
2278 rd0 = (insn >> 16) & 0xf;
2279 rd1 = (insn >> 0) & 0xf;
2280 gen_op_iwmmxt_movq_M0_wRn(rd0);
2281 switch ((insn >> 20) & 0xf) {
2282 case 0x0:
2283 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2284 break;
2285 case 0x1:
2286 gen_op_iwmmxt_subub_M0_wRn(rd1);
2287 break;
2288 case 0x3:
2289 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2290 break;
2291 case 0x4:
2292 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2293 break;
2294 case 0x5:
2295 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2296 break;
2297 case 0x7:
2298 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2299 break;
2300 case 0x8:
2301 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2302 break;
2303 case 0x9:
2304 gen_op_iwmmxt_subul_M0_wRn(rd1);
2305 break;
2306 case 0xb:
2307 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2308 break;
2309 default:
2310 return 1;
2311 }
2312 gen_op_iwmmxt_movq_wRn_M0(wrd);
2313 gen_op_iwmmxt_set_mup();
2314 gen_op_iwmmxt_set_cup();
2315 break;
2316 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2317 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2318 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2319 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2320 wrd = (insn >> 12) & 0xf;
2321 rd0 = (insn >> 16) & 0xf;
2322 gen_op_iwmmxt_movq_M0_wRn(rd0);
2323 tmp = tcg_constant_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2324 gen_helper_iwmmxt_shufh(cpu_M0, tcg_env, cpu_M0, tmp);
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2328 break;
2329 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2330 case 0x418: case 0x518: case 0x618: case 0x718:
2331 case 0x818: case 0x918: case 0xa18: case 0xb18:
2332 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2333 wrd = (insn >> 12) & 0xf;
2334 rd0 = (insn >> 16) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 gen_op_iwmmxt_movq_M0_wRn(rd0);
2337 switch ((insn >> 20) & 0xf) {
2338 case 0x0:
2339 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2340 break;
2341 case 0x1:
2342 gen_op_iwmmxt_addub_M0_wRn(rd1);
2343 break;
2344 case 0x3:
2345 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2346 break;
2347 case 0x4:
2348 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2349 break;
2350 case 0x5:
2351 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2352 break;
2353 case 0x7:
2354 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2355 break;
2356 case 0x8:
2357 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2358 break;
2359 case 0x9:
2360 gen_op_iwmmxt_addul_M0_wRn(rd1);
2361 break;
2362 case 0xb:
2363 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2364 break;
2365 default:
2366 return 1;
2367 }
2368 gen_op_iwmmxt_movq_wRn_M0(wrd);
2369 gen_op_iwmmxt_set_mup();
2370 gen_op_iwmmxt_set_cup();
2371 break;
2372 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2373 case 0x408: case 0x508: case 0x608: case 0x708:
2374 case 0x808: case 0x908: case 0xa08: case 0xb08:
2375 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2376 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2377 return 1;
2378 wrd = (insn >> 12) & 0xf;
2379 rd0 = (insn >> 16) & 0xf;
2380 rd1 = (insn >> 0) & 0xf;
2381 gen_op_iwmmxt_movq_M0_wRn(rd0);
2382 switch ((insn >> 22) & 3) {
2383 case 1:
2384 if (insn & (1 << 21))
2385 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2386 else
2387 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2388 break;
2389 case 2:
2390 if (insn & (1 << 21))
2391 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2392 else
2393 gen_op_iwmmxt_packul_M0_wRn(rd1);
2394 break;
2395 case 3:
2396 if (insn & (1 << 21))
2397 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2398 else
2399 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2400 break;
2401 }
2402 gen_op_iwmmxt_movq_wRn_M0(wrd);
2403 gen_op_iwmmxt_set_mup();
2404 gen_op_iwmmxt_set_cup();
2405 break;
2406 case 0x201: case 0x203: case 0x205: case 0x207:
2407 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2408 case 0x211: case 0x213: case 0x215: case 0x217:
2409 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2410 wrd = (insn >> 5) & 0xf;
2411 rd0 = (insn >> 12) & 0xf;
2412 rd1 = (insn >> 0) & 0xf;
2413 if (rd0 == 0xf || rd1 == 0xf)
2414 return 1;
2415 gen_op_iwmmxt_movq_M0_wRn(wrd);
2416 tmp = load_reg(s, rd0);
2417 tmp2 = load_reg(s, rd1);
2418 switch ((insn >> 16) & 0xf) {
2419 case 0x0: /* TMIA */
2420 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2421 break;
2422 case 0x8: /* TMIAPH */
2423 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2424 break;
2425 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2426 if (insn & (1 << 16))
2427 tcg_gen_shri_i32(tmp, tmp, 16);
2428 if (insn & (1 << 17))
2429 tcg_gen_shri_i32(tmp2, tmp2, 16);
2430 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2431 break;
2432 default:
2433 return 1;
2434 }
2435 gen_op_iwmmxt_movq_wRn_M0(wrd);
2436 gen_op_iwmmxt_set_mup();
2437 break;
2438 default:
2439 return 1;
2440 }
2441
2442 return 0;
2443 }
2444
2445 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2446 (ie. an undefined instruction). */
disas_dsp_insn(DisasContext * s,uint32_t insn)2447 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2448 {
2449 int acc, rd0, rd1, rdhi, rdlo;
2450 TCGv_i32 tmp, tmp2;
2451
2452 if ((insn & 0x0ff00f10) == 0x0e200010) {
2453 /* Multiply with Internal Accumulate Format */
2454 rd0 = (insn >> 12) & 0xf;
2455 rd1 = insn & 0xf;
2456 acc = (insn >> 5) & 7;
2457
2458 if (acc != 0)
2459 return 1;
2460
2461 tmp = load_reg(s, rd0);
2462 tmp2 = load_reg(s, rd1);
2463 switch ((insn >> 16) & 0xf) {
2464 case 0x0: /* MIA */
2465 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2466 break;
2467 case 0x8: /* MIAPH */
2468 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2469 break;
2470 case 0xc: /* MIABB */
2471 case 0xd: /* MIABT */
2472 case 0xe: /* MIATB */
2473 case 0xf: /* MIATT */
2474 if (insn & (1 << 16))
2475 tcg_gen_shri_i32(tmp, tmp, 16);
2476 if (insn & (1 << 17))
2477 tcg_gen_shri_i32(tmp2, tmp2, 16);
2478 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2479 break;
2480 default:
2481 return 1;
2482 }
2483
2484 gen_op_iwmmxt_movq_wRn_M0(acc);
2485 return 0;
2486 }
2487
2488 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2489 /* Internal Accumulator Access Format */
2490 rdhi = (insn >> 16) & 0xf;
2491 rdlo = (insn >> 12) & 0xf;
2492 acc = insn & 7;
2493
2494 if (acc != 0)
2495 return 1;
2496
2497 if (insn & ARM_CP_RW_BIT) { /* MRA */
2498 iwmmxt_load_reg(cpu_V0, acc);
2499 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2500 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
2501 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2502 } else { /* MAR */
2503 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2504 iwmmxt_store_reg(cpu_V0, acc);
2505 }
2506 return 0;
2507 }
2508
2509 return 1;
2510 }
2511
gen_goto_ptr(void)2512 static void gen_goto_ptr(void)
2513 {
2514 tcg_gen_lookup_and_goto_ptr();
2515 }
2516
2517 /* This will end the TB but doesn't guarantee we'll return to
2518 * cpu_loop_exec. Any live exit_requests will be processed as we
2519 * enter the next TB.
2520 */
gen_goto_tb(DisasContext * s,int n,target_long diff)2521 static void gen_goto_tb(DisasContext *s, int n, target_long diff)
2522 {
2523 if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) {
2524 /*
2525 * For pcrel, the pc must always be up-to-date on entry to
2526 * the linked TB, so that it can use simple additions for all
2527 * further adjustments. For !pcrel, the linked TB is compiled
2528 * to know its full virtual address, so we can delay the
2529 * update to pc to the unlinked path. A long chain of links
2530 * can thus avoid many updates to the PC.
2531 */
2532 if (tb_cflags(s->base.tb) & CF_PCREL) {
2533 gen_update_pc(s, diff);
2534 tcg_gen_goto_tb(n);
2535 } else {
2536 tcg_gen_goto_tb(n);
2537 gen_update_pc(s, diff);
2538 }
2539 tcg_gen_exit_tb(s->base.tb, n);
2540 } else {
2541 gen_update_pc(s, diff);
2542 gen_goto_ptr();
2543 }
2544 s->base.is_jmp = DISAS_NORETURN;
2545 }
2546
2547 /* Jump, specifying which TB number to use if we gen_goto_tb() */
gen_jmp_tb(DisasContext * s,target_long diff,int tbno)2548 static void gen_jmp_tb(DisasContext *s, target_long diff, int tbno)
2549 {
2550 if (unlikely(s->ss_active)) {
2551 /* An indirect jump so that we still trigger the debug exception. */
2552 gen_update_pc(s, diff);
2553 s->base.is_jmp = DISAS_JUMP;
2554 return;
2555 }
2556 switch (s->base.is_jmp) {
2557 case DISAS_NEXT:
2558 case DISAS_TOO_MANY:
2559 case DISAS_NORETURN:
2560 /*
2561 * The normal case: just go to the destination TB.
2562 * NB: NORETURN happens if we generate code like
2563 * gen_brcondi(l);
2564 * gen_jmp();
2565 * gen_set_label(l);
2566 * gen_jmp();
2567 * on the second call to gen_jmp().
2568 */
2569 gen_goto_tb(s, tbno, diff);
2570 break;
2571 case DISAS_UPDATE_NOCHAIN:
2572 case DISAS_UPDATE_EXIT:
2573 /*
2574 * We already decided we're leaving the TB for some other reason.
2575 * Avoid using goto_tb so we really do exit back to the main loop
2576 * and don't chain to another TB.
2577 */
2578 gen_update_pc(s, diff);
2579 gen_goto_ptr();
2580 s->base.is_jmp = DISAS_NORETURN;
2581 break;
2582 default:
2583 /*
2584 * We shouldn't be emitting code for a jump and also have
2585 * is_jmp set to one of the special cases like DISAS_SWI.
2586 */
2587 g_assert_not_reached();
2588 }
2589 }
2590
gen_jmp(DisasContext * s,target_long diff)2591 static inline void gen_jmp(DisasContext *s, target_long diff)
2592 {
2593 gen_jmp_tb(s, diff, 0);
2594 }
2595
gen_mulxy(TCGv_i32 t0,TCGv_i32 t1,int x,int y)2596 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2597 {
2598 if (x)
2599 tcg_gen_sari_i32(t0, t0, 16);
2600 else
2601 gen_sxth(t0);
2602 if (y)
2603 tcg_gen_sari_i32(t1, t1, 16);
2604 else
2605 gen_sxth(t1);
2606 tcg_gen_mul_i32(t0, t0, t1);
2607 }
2608
2609 /* Return the mask of PSR bits set by a MSR instruction. */
msr_mask(DisasContext * s,int flags,int spsr)2610 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2611 {
2612 uint32_t mask = 0;
2613
2614 if (flags & (1 << 0)) {
2615 mask |= 0xff;
2616 }
2617 if (flags & (1 << 1)) {
2618 mask |= 0xff00;
2619 }
2620 if (flags & (1 << 2)) {
2621 mask |= 0xff0000;
2622 }
2623 if (flags & (1 << 3)) {
2624 mask |= 0xff000000;
2625 }
2626
2627 /* Mask out undefined and reserved bits. */
2628 mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
2629
2630 /* Mask out execution state. */
2631 if (!spsr) {
2632 mask &= ~CPSR_EXEC;
2633 }
2634
2635 /* Mask out privileged bits. */
2636 if (IS_USER(s)) {
2637 mask &= CPSR_USER;
2638 }
2639 return mask;
2640 }
2641
2642 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
gen_set_psr(DisasContext * s,uint32_t mask,int spsr,TCGv_i32 t0)2643 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
2644 {
2645 TCGv_i32 tmp;
2646 if (spsr) {
2647 /* ??? This is also undefined in system mode. */
2648 if (IS_USER(s))
2649 return 1;
2650
2651 tmp = load_cpu_field(spsr);
2652 tcg_gen_andi_i32(tmp, tmp, ~mask);
2653 tcg_gen_andi_i32(t0, t0, mask);
2654 tcg_gen_or_i32(tmp, tmp, t0);
2655 store_cpu_field(tmp, spsr);
2656 } else {
2657 gen_set_cpsr(t0, mask);
2658 }
2659 gen_lookup_tb(s);
2660 return 0;
2661 }
2662
2663 /* Returns nonzero if access to the PSR is not permitted. */
gen_set_psr_im(DisasContext * s,uint32_t mask,int spsr,uint32_t val)2664 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2665 {
2666 TCGv_i32 tmp;
2667 tmp = tcg_temp_new_i32();
2668 tcg_gen_movi_i32(tmp, val);
2669 return gen_set_psr(s, mask, spsr, tmp);
2670 }
2671
msr_banked_access_decode(DisasContext * s,int r,int sysm,int rn,int * tgtmode,int * regno)2672 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
2673 int *tgtmode, int *regno)
2674 {
2675 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2676 * the target mode and register number, and identify the various
2677 * unpredictable cases.
2678 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2679 * + executed in user mode
2680 * + using R15 as the src/dest register
2681 * + accessing an unimplemented register
2682 * + accessing a register that's inaccessible at current PL/security state*
2683 * + accessing a register that you could access with a different insn
2684 * We choose to UNDEF in all these cases.
2685 * Since we don't know which of the various AArch32 modes we are in
2686 * we have to defer some checks to runtime.
2687 * Accesses to Monitor mode registers from Secure EL1 (which implies
2688 * that EL3 is AArch64) must trap to EL3.
2689 *
2690 * If the access checks fail this function will emit code to take
2691 * an exception and return false. Otherwise it will return true,
2692 * and set *tgtmode and *regno appropriately.
2693 */
2694 /* These instructions are present only in ARMv8, or in ARMv7 with the
2695 * Virtualization Extensions.
2696 */
2697 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
2698 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
2699 goto undef;
2700 }
2701
2702 if (IS_USER(s) || rn == 15) {
2703 goto undef;
2704 }
2705
2706 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2707 * of registers into (r, sysm).
2708 */
2709 if (r) {
2710 /* SPSRs for other modes */
2711 switch (sysm) {
2712 case 0xe: /* SPSR_fiq */
2713 *tgtmode = ARM_CPU_MODE_FIQ;
2714 break;
2715 case 0x10: /* SPSR_irq */
2716 *tgtmode = ARM_CPU_MODE_IRQ;
2717 break;
2718 case 0x12: /* SPSR_svc */
2719 *tgtmode = ARM_CPU_MODE_SVC;
2720 break;
2721 case 0x14: /* SPSR_abt */
2722 *tgtmode = ARM_CPU_MODE_ABT;
2723 break;
2724 case 0x16: /* SPSR_und */
2725 *tgtmode = ARM_CPU_MODE_UND;
2726 break;
2727 case 0x1c: /* SPSR_mon */
2728 *tgtmode = ARM_CPU_MODE_MON;
2729 break;
2730 case 0x1e: /* SPSR_hyp */
2731 *tgtmode = ARM_CPU_MODE_HYP;
2732 break;
2733 default: /* unallocated */
2734 goto undef;
2735 }
2736 /* We arbitrarily assign SPSR a register number of 16. */
2737 *regno = 16;
2738 } else {
2739 /* general purpose registers for other modes */
2740 switch (sysm) {
2741 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2742 *tgtmode = ARM_CPU_MODE_USR;
2743 *regno = sysm + 8;
2744 break;
2745 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2746 *tgtmode = ARM_CPU_MODE_FIQ;
2747 *regno = sysm;
2748 break;
2749 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2750 *tgtmode = ARM_CPU_MODE_IRQ;
2751 *regno = sysm & 1 ? 13 : 14;
2752 break;
2753 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2754 *tgtmode = ARM_CPU_MODE_SVC;
2755 *regno = sysm & 1 ? 13 : 14;
2756 break;
2757 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2758 *tgtmode = ARM_CPU_MODE_ABT;
2759 *regno = sysm & 1 ? 13 : 14;
2760 break;
2761 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2762 *tgtmode = ARM_CPU_MODE_UND;
2763 *regno = sysm & 1 ? 13 : 14;
2764 break;
2765 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2766 *tgtmode = ARM_CPU_MODE_MON;
2767 *regno = sysm & 1 ? 13 : 14;
2768 break;
2769 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2770 *tgtmode = ARM_CPU_MODE_HYP;
2771 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2772 *regno = sysm & 1 ? 13 : 17;
2773 break;
2774 default: /* unallocated */
2775 goto undef;
2776 }
2777 }
2778
2779 /* Catch the 'accessing inaccessible register' cases we can detect
2780 * at translate time.
2781 */
2782 switch (*tgtmode) {
2783 case ARM_CPU_MODE_MON:
2784 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
2785 goto undef;
2786 }
2787 if (s->current_el == 1) {
2788 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2789 * then accesses to Mon registers trap to Secure EL2, if it exists,
2790 * otherwise EL3.
2791 */
2792 TCGv_i32 tcg_el;
2793
2794 if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
2795 dc_isar_feature(aa64_sel2, s)) {
2796 /* Target EL is EL<3 minus SCR_EL3.EEL2> */
2797 tcg_el = load_cpu_field_low32(cp15.scr_el3);
2798 tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
2799 tcg_gen_addi_i32(tcg_el, tcg_el, 3);
2800 } else {
2801 tcg_el = tcg_constant_i32(3);
2802 }
2803
2804 gen_exception_insn_el_v(s, 0, EXCP_UDEF,
2805 syn_uncategorized(), tcg_el);
2806 return false;
2807 }
2808 break;
2809 case ARM_CPU_MODE_HYP:
2810 /*
2811 * r13_hyp can only be accessed from Monitor mode, and so we
2812 * can forbid accesses from EL2 or below.
2813 * elr_hyp can be accessed also from Hyp mode, so forbid
2814 * accesses from EL0 or EL1.
2815 * SPSR_hyp is supposed to be in the same category as r13_hyp
2816 * and UNPREDICTABLE if accessed from anything except Monitor
2817 * mode. However there is some real-world code that will do
2818 * it because at least some hardware happens to permit the
2819 * access. (Notably a standard Cortex-R52 startup code fragment
2820 * does this.) So we permit SPSR_hyp from Hyp mode also, to allow
2821 * this (incorrect) guest code to run.
2822 */
2823 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2
2824 || (s->current_el < 3 && *regno != 16 && *regno != 17)) {
2825 goto undef;
2826 }
2827 break;
2828 default:
2829 break;
2830 }
2831
2832 return true;
2833
2834 undef:
2835 /* If we get here then some access check did not pass */
2836 gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
2837 return false;
2838 }
2839
gen_msr_banked(DisasContext * s,int r,int sysm,int rn)2840 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
2841 {
2842 TCGv_i32 tcg_reg;
2843 int tgtmode = 0, regno = 0;
2844
2845 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
2846 return;
2847 }
2848
2849 /* Sync state because msr_banked() can raise exceptions */
2850 gen_set_condexec(s);
2851 gen_update_pc(s, 0);
2852 tcg_reg = load_reg(s, rn);
2853 gen_helper_msr_banked(tcg_env, tcg_reg,
2854 tcg_constant_i32(tgtmode),
2855 tcg_constant_i32(regno));
2856 s->base.is_jmp = DISAS_UPDATE_EXIT;
2857 }
2858
gen_mrs_banked(DisasContext * s,int r,int sysm,int rn)2859 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
2860 {
2861 TCGv_i32 tcg_reg;
2862 int tgtmode = 0, regno = 0;
2863
2864 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
2865 return;
2866 }
2867
2868 /* Sync state because mrs_banked() can raise exceptions */
2869 gen_set_condexec(s);
2870 gen_update_pc(s, 0);
2871 tcg_reg = tcg_temp_new_i32();
2872 gen_helper_mrs_banked(tcg_reg, tcg_env,
2873 tcg_constant_i32(tgtmode),
2874 tcg_constant_i32(regno));
2875 store_reg(s, rn, tcg_reg);
2876 s->base.is_jmp = DISAS_UPDATE_EXIT;
2877 }
2878
2879 /* Store value to PC as for an exception return (ie don't
2880 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2881 * will do the masking based on the new value of the Thumb bit.
2882 */
store_pc_exc_ret(DisasContext * s,TCGv_i32 pc)2883 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
2884 {
2885 tcg_gen_mov_i32(cpu_R[15], pc);
2886 }
2887
2888 /* Generate a v6 exception return. Marks both values as dead. */
gen_rfe(DisasContext * s,TCGv_i32 pc,TCGv_i32 cpsr)2889 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2890 {
2891 store_pc_exc_ret(s, pc);
2892 /* The cpsr_write_eret helper will mask the low bits of PC
2893 * appropriately depending on the new Thumb bit, so it must
2894 * be called after storing the new PC.
2895 */
2896 translator_io_start(&s->base);
2897 gen_helper_cpsr_write_eret(tcg_env, cpsr);
2898 /* Must exit loop to check un-masked IRQs */
2899 s->base.is_jmp = DISAS_EXIT;
2900 }
2901
2902 /* Generate an old-style exception return. Marks pc as dead. */
gen_exception_return(DisasContext * s,TCGv_i32 pc)2903 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
2904 {
2905 gen_rfe(s, pc, load_cpu_field(spsr));
2906 }
2907
aa32_cpreg_encoding_in_impdef_space(uint8_t crn,uint8_t crm)2908 static bool aa32_cpreg_encoding_in_impdef_space(uint8_t crn, uint8_t crm)
2909 {
2910 static const uint16_t mask[3] = {
2911 0b0000000111100111, /* crn == 9, crm == {c0-c2, c5-c8} */
2912 0b0000000100010011, /* crn == 10, crm == {c0, c1, c4, c8} */
2913 0b1000000111111111, /* crn == 11, crm == {c0-c8, c15} */
2914 };
2915
2916 if (crn >= 9 && crn <= 11) {
2917 return (mask[crn - 9] >> crm) & 1;
2918 }
2919 return false;
2920 }
2921
do_coproc_insn(DisasContext * s,int cpnum,int is64,int opc1,int crn,int crm,int opc2,bool isread,int rt,int rt2)2922 static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
2923 int opc1, int crn, int crm, int opc2,
2924 bool isread, int rt, int rt2)
2925 {
2926 uint32_t key = ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2);
2927 const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
2928 TCGv_ptr tcg_ri = NULL;
2929 bool need_exit_tb = false;
2930 uint32_t syndrome;
2931
2932 /*
2933 * Note that since we are an implementation which takes an
2934 * exception on a trapped conditional instruction only if the
2935 * instruction passes its condition code check, we can take
2936 * advantage of the clause in the ARM ARM that allows us to set
2937 * the COND field in the instruction to 0xE in all cases.
2938 * We could fish the actual condition out of the insn (ARM)
2939 * or the condexec bits (Thumb) but it isn't necessary.
2940 */
2941 switch (cpnum) {
2942 case 14:
2943 if (is64) {
2944 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
2945 isread, false);
2946 } else {
2947 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
2948 rt, isread, false);
2949 }
2950 break;
2951 case 15:
2952 if (is64) {
2953 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
2954 isread, false);
2955 } else {
2956 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
2957 rt, isread, false);
2958 }
2959 break;
2960 default:
2961 /*
2962 * ARMv8 defines that only coprocessors 14 and 15 exist,
2963 * so this can only happen if this is an ARMv7 or earlier CPU,
2964 * in which case the syndrome information won't actually be
2965 * guest visible.
2966 */
2967 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
2968 syndrome = syn_uncategorized();
2969 break;
2970 }
2971
2972 if (s->hstr_active && cpnum == 15 && s->current_el == 1) {
2973 /*
2974 * At EL1, check for a HSTR_EL2 trap, which must take precedence
2975 * over the UNDEF for "no such register" or the UNDEF for "access
2976 * permissions forbid this EL1 access". HSTR_EL2 traps from EL0
2977 * only happen if the cpreg doesn't UNDEF at EL0, so we do those in
2978 * access_check_cp_reg(), after the checks for whether the access
2979 * configurably trapped to EL1.
2980 */
2981 uint32_t maskbit = is64 ? crm : crn;
2982
2983 if (maskbit != 4 && maskbit != 14) {
2984 /* T4 and T14 are RES0 so never cause traps */
2985 TCGv_i32 t;
2986 DisasLabel over = gen_disas_label(s);
2987
2988 t = load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2));
2989 tcg_gen_andi_i32(t, t, 1u << maskbit);
2990 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label);
2991
2992 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
2993 /*
2994 * gen_exception_insn() will set is_jmp to DISAS_NORETURN,
2995 * but since we're conditionally branching over it, we want
2996 * to assume continue-to-next-instruction.
2997 */
2998 s->base.is_jmp = DISAS_NEXT;
2999 set_disas_label(s, over);
3000 }
3001 }
3002
3003 if (cpnum == 15 && aa32_cpreg_encoding_in_impdef_space(crn, crm)) {
3004 /*
3005 * Check for TIDCP trap, which must take precedence over the UNDEF
3006 * for "no such register" etc. It shares precedence with HSTR,
3007 * but raises the same exception, so order doesn't matter.
3008 */
3009 switch (s->current_el) {
3010 case 0:
3011 if (arm_dc_feature(s, ARM_FEATURE_AARCH64)
3012 && dc_isar_feature(aa64_tidcp1, s)) {
3013 gen_helper_tidcp_el0(tcg_env, tcg_constant_i32(syndrome));
3014 }
3015 break;
3016 case 1:
3017 gen_helper_tidcp_el1(tcg_env, tcg_constant_i32(syndrome));
3018 break;
3019 }
3020 }
3021
3022 if (!ri) {
3023 /*
3024 * Unknown register; this might be a guest error or a QEMU
3025 * unimplemented feature.
3026 */
3027 if (is64) {
3028 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
3029 "64 bit system register cp:%d opc1: %d crm:%d "
3030 "(%s)\n",
3031 isread ? "read" : "write", cpnum, opc1, crm,
3032 s->ns ? "non-secure" : "secure");
3033 } else {
3034 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
3035 "system register cp:%d opc1:%d crn:%d crm:%d "
3036 "opc2:%d (%s)\n",
3037 isread ? "read" : "write", cpnum, opc1, crn,
3038 crm, opc2, s->ns ? "non-secure" : "secure");
3039 }
3040 unallocated_encoding(s);
3041 return;
3042 }
3043
3044 /* Check access permissions */
3045 if (!cp_access_ok(s->current_el, ri, isread)) {
3046 unallocated_encoding(s);
3047 return;
3048 }
3049
3050 if ((s->hstr_active && s->current_el == 0) || ri->accessfn ||
3051 (ri->fgt && s->fgt_active) ||
3052 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
3053 /*
3054 * Emit code to perform further access permissions checks at
3055 * runtime; this may result in an exception.
3056 * Note that on XScale all cp0..c13 registers do an access check
3057 * call in order to handle c15_cpar.
3058 */
3059 gen_set_condexec(s);
3060 gen_update_pc(s, 0);
3061 tcg_ri = tcg_temp_new_ptr();
3062 gen_helper_access_check_cp_reg(tcg_ri, tcg_env,
3063 tcg_constant_i32(key),
3064 tcg_constant_i32(syndrome),
3065 tcg_constant_i32(isread));
3066 } else if (ri->type & ARM_CP_RAISES_EXC) {
3067 /*
3068 * The readfn or writefn might raise an exception;
3069 * synchronize the CPU state in case it does.
3070 */
3071 gen_set_condexec(s);
3072 gen_update_pc(s, 0);
3073 }
3074
3075 /* Handle special cases first */
3076 switch (ri->type & ARM_CP_SPECIAL_MASK) {
3077 case 0:
3078 break;
3079 case ARM_CP_NOP:
3080 return;
3081 case ARM_CP_WFI:
3082 if (isread) {
3083 unallocated_encoding(s);
3084 } else {
3085 gen_update_pc(s, curr_insn_len(s));
3086 s->base.is_jmp = DISAS_WFI;
3087 }
3088 return;
3089 default:
3090 g_assert_not_reached();
3091 }
3092
3093 if (ri->type & ARM_CP_IO) {
3094 /* I/O operations must end the TB here (whether read or write) */
3095 need_exit_tb = translator_io_start(&s->base);
3096 }
3097
3098 if (isread) {
3099 /* Read */
3100 if (is64) {
3101 TCGv_i64 tmp64;
3102 TCGv_i32 tmp;
3103 if (ri->type & ARM_CP_CONST) {
3104 tmp64 = tcg_constant_i64(ri->resetvalue);
3105 } else if (ri->readfn) {
3106 if (!tcg_ri) {
3107 tcg_ri = gen_lookup_cp_reg(key);
3108 }
3109 tmp64 = tcg_temp_new_i64();
3110 gen_helper_get_cp_reg64(tmp64, tcg_env, tcg_ri);
3111 } else {
3112 tmp64 = tcg_temp_new_i64();
3113 tcg_gen_ld_i64(tmp64, tcg_env, ri->fieldoffset);
3114 }
3115 tmp = tcg_temp_new_i32();
3116 tcg_gen_extrl_i64_i32(tmp, tmp64);
3117 store_reg(s, rt, tmp);
3118 tmp = tcg_temp_new_i32();
3119 tcg_gen_extrh_i64_i32(tmp, tmp64);
3120 store_reg(s, rt2, tmp);
3121 } else {
3122 TCGv_i32 tmp;
3123 if (ri->type & ARM_CP_CONST) {
3124 tmp = tcg_constant_i32(ri->resetvalue);
3125 } else if (ri->readfn) {
3126 if (!tcg_ri) {
3127 tcg_ri = gen_lookup_cp_reg(key);
3128 }
3129 tmp = tcg_temp_new_i32();
3130 gen_helper_get_cp_reg(tmp, tcg_env, tcg_ri);
3131 } else {
3132 tmp = load_cpu_offset(ri->fieldoffset);
3133 }
3134 if (rt == 15) {
3135 /* Destination register of r15 for 32 bit loads sets
3136 * the condition codes from the high 4 bits of the value
3137 */
3138 gen_set_nzcv(tmp);
3139 } else {
3140 store_reg(s, rt, tmp);
3141 }
3142 }
3143 } else {
3144 /* Write */
3145 if (ri->type & ARM_CP_CONST) {
3146 /* If not forbidden by access permissions, treat as WI */
3147 return;
3148 }
3149
3150 if (is64) {
3151 TCGv_i32 tmplo, tmphi;
3152 TCGv_i64 tmp64 = tcg_temp_new_i64();
3153 tmplo = load_reg(s, rt);
3154 tmphi = load_reg(s, rt2);
3155 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
3156 if (ri->writefn) {
3157 if (!tcg_ri) {
3158 tcg_ri = gen_lookup_cp_reg(key);
3159 }
3160 gen_helper_set_cp_reg64(tcg_env, tcg_ri, tmp64);
3161 } else {
3162 tcg_gen_st_i64(tmp64, tcg_env, ri->fieldoffset);
3163 }
3164 } else {
3165 TCGv_i32 tmp = load_reg(s, rt);
3166 if (ri->writefn) {
3167 if (!tcg_ri) {
3168 tcg_ri = gen_lookup_cp_reg(key);
3169 }
3170 gen_helper_set_cp_reg(tcg_env, tcg_ri, tmp);
3171 } else {
3172 store_cpu_offset(tmp, ri->fieldoffset, 4);
3173 }
3174 }
3175 }
3176
3177 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
3178 /*
3179 * A write to any coprocessor register that ends a TB
3180 * must rebuild the hflags for the next TB.
3181 */
3182 gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL);
3183 /*
3184 * We default to ending the TB on a coprocessor register write,
3185 * but allow this to be suppressed by the register definition
3186 * (usually only necessary to work around guest bugs).
3187 */
3188 need_exit_tb = true;
3189 }
3190 if (need_exit_tb) {
3191 gen_lookup_tb(s);
3192 }
3193 }
3194
3195 /* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
disas_xscale_insn(DisasContext * s,uint32_t insn)3196 static void disas_xscale_insn(DisasContext *s, uint32_t insn)
3197 {
3198 int cpnum = (insn >> 8) & 0xf;
3199
3200 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
3201 unallocated_encoding(s);
3202 } else if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
3203 if (disas_iwmmxt_insn(s, insn)) {
3204 unallocated_encoding(s);
3205 }
3206 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
3207 if (disas_dsp_insn(s, insn)) {
3208 unallocated_encoding(s);
3209 }
3210 }
3211 }
3212
3213 /* Store a 64-bit value to a register pair. Clobbers val. */
gen_storeq_reg(DisasContext * s,int rlow,int rhigh,TCGv_i64 val)3214 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
3215 {
3216 TCGv_i32 tmp;
3217 tmp = tcg_temp_new_i32();
3218 tcg_gen_extrl_i64_i32(tmp, val);
3219 store_reg(s, rlow, tmp);
3220 tmp = tcg_temp_new_i32();
3221 tcg_gen_extrh_i64_i32(tmp, val);
3222 store_reg(s, rhigh, tmp);
3223 }
3224
3225 /* load and add a 64-bit value from a register pair. */
gen_addq(DisasContext * s,TCGv_i64 val,int rlow,int rhigh)3226 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
3227 {
3228 TCGv_i64 tmp;
3229 TCGv_i32 tmpl;
3230 TCGv_i32 tmph;
3231
3232 /* Load 64-bit value rd:rn. */
3233 tmpl = load_reg(s, rlow);
3234 tmph = load_reg(s, rhigh);
3235 tmp = tcg_temp_new_i64();
3236 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
3237 tcg_gen_add_i64(val, val, tmp);
3238 }
3239
3240 /* Set N and Z flags from hi|lo. */
gen_logicq_cc(TCGv_i32 lo,TCGv_i32 hi)3241 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
3242 {
3243 tcg_gen_mov_i32(cpu_NF, hi);
3244 tcg_gen_or_i32(cpu_ZF, lo, hi);
3245 }
3246
3247 /* Load/Store exclusive instructions are implemented by remembering
3248 the value/address loaded, and seeing if these are the same
3249 when the store is performed. This should be sufficient to implement
3250 the architecturally mandated semantics, and avoids having to monitor
3251 regular stores. The compare vs the remembered value is done during
3252 the cmpxchg operation, but we must compare the addresses manually. */
gen_load_exclusive(DisasContext * s,int rt,int rt2,TCGv_i32 addr,int size)3253 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
3254 TCGv_i32 addr, int size)
3255 {
3256 TCGv_i32 tmp = tcg_temp_new_i32();
3257 MemOp opc = size | MO_ALIGN | s->be_data;
3258
3259 s->is_ldex = true;
3260
3261 if (size == 3) {
3262 TCGv_i32 tmp2 = tcg_temp_new_i32();
3263 TCGv_i64 t64 = tcg_temp_new_i64();
3264
3265 /*
3266 * For AArch32, architecturally the 32-bit word at the lowest
3267 * address is always Rt and the one at addr+4 is Rt2, even if
3268 * the CPU is big-endian. That means we don't want to do a
3269 * gen_aa32_ld_i64(), which checks SCTLR_B as if for an
3270 * architecturally 64-bit access, but instead do a 64-bit access
3271 * using MO_BE if appropriate and then split the two halves.
3272 */
3273 TCGv taddr = gen_aa32_addr(s, addr, opc);
3274
3275 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
3276 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3277 if (s->be_data == MO_BE) {
3278 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
3279 } else {
3280 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
3281 }
3282 store_reg(s, rt2, tmp2);
3283 } else {
3284 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
3285 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
3286 }
3287
3288 store_reg(s, rt, tmp);
3289 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
3290 }
3291
gen_clrex(DisasContext * s)3292 static void gen_clrex(DisasContext *s)
3293 {
3294 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
3295 }
3296
gen_store_exclusive(DisasContext * s,int rd,int rt,int rt2,TCGv_i32 addr,int size)3297 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
3298 TCGv_i32 addr, int size)
3299 {
3300 TCGv_i32 t0, t1, t2;
3301 TCGv_i64 extaddr;
3302 TCGv taddr;
3303 TCGLabel *done_label;
3304 TCGLabel *fail_label;
3305 MemOp opc = size | MO_ALIGN | s->be_data;
3306
3307 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
3308 [addr] = {Rt};
3309 {Rd} = 0;
3310 } else {
3311 {Rd} = 1;
3312 } */
3313 fail_label = gen_new_label();
3314 done_label = gen_new_label();
3315 extaddr = tcg_temp_new_i64();
3316 tcg_gen_extu_i32_i64(extaddr, addr);
3317 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
3318
3319 taddr = gen_aa32_addr(s, addr, opc);
3320 t0 = tcg_temp_new_i32();
3321 t1 = load_reg(s, rt);
3322 if (size == 3) {
3323 TCGv_i64 o64 = tcg_temp_new_i64();
3324 TCGv_i64 n64 = tcg_temp_new_i64();
3325
3326 t2 = load_reg(s, rt2);
3327
3328 /*
3329 * For AArch32, architecturally the 32-bit word at the lowest
3330 * address is always Rt and the one at addr+4 is Rt2, even if
3331 * the CPU is big-endian. Since we're going to treat this as a
3332 * single 64-bit BE store, we need to put the two halves in the
3333 * opposite order for BE to LE, so that they end up in the right
3334 * places. We don't want gen_aa32_st_i64, because that checks
3335 * SCTLR_B as if for an architectural 64-bit access.
3336 */
3337 if (s->be_data == MO_BE) {
3338 tcg_gen_concat_i32_i64(n64, t2, t1);
3339 } else {
3340 tcg_gen_concat_i32_i64(n64, t1, t2);
3341 }
3342
3343 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
3344 get_mem_index(s), opc);
3345
3346 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
3347 tcg_gen_extrl_i64_i32(t0, o64);
3348 } else {
3349 t2 = tcg_temp_new_i32();
3350 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
3351 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
3352 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
3353 }
3354 tcg_gen_mov_i32(cpu_R[rd], t0);
3355 tcg_gen_br(done_label);
3356
3357 gen_set_label(fail_label);
3358 tcg_gen_movi_i32(cpu_R[rd], 1);
3359 gen_set_label(done_label);
3360 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
3361 }
3362
3363 /* gen_srs:
3364 * @env: CPUARMState
3365 * @s: DisasContext
3366 * @mode: mode field from insn (which stack to store to)
3367 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
3368 * @writeback: true if writeback bit set
3369 *
3370 * Generate code for the SRS (Store Return State) insn.
3371 */
gen_srs(DisasContext * s,uint32_t mode,uint32_t amode,bool writeback)3372 static void gen_srs(DisasContext *s,
3373 uint32_t mode, uint32_t amode, bool writeback)
3374 {
3375 int32_t offset;
3376 TCGv_i32 addr, tmp;
3377 bool undef = false;
3378
3379 /* SRS is:
3380 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
3381 * and specified mode is monitor mode
3382 * - UNDEFINED in Hyp mode
3383 * - UNPREDICTABLE in User or System mode
3384 * - UNPREDICTABLE if the specified mode is:
3385 * -- not implemented
3386 * -- not a valid mode number
3387 * -- a mode that's at a higher exception level
3388 * -- Monitor, if we are Non-secure
3389 * For the UNPREDICTABLE cases we choose to UNDEF.
3390 */
3391 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
3392 gen_exception_insn_el(s, 0, EXCP_UDEF, syn_uncategorized(), 3);
3393 return;
3394 }
3395
3396 if (s->current_el == 0 || s->current_el == 2) {
3397 undef = true;
3398 }
3399
3400 switch (mode) {
3401 case ARM_CPU_MODE_USR:
3402 case ARM_CPU_MODE_FIQ:
3403 case ARM_CPU_MODE_IRQ:
3404 case ARM_CPU_MODE_SVC:
3405 case ARM_CPU_MODE_ABT:
3406 case ARM_CPU_MODE_UND:
3407 case ARM_CPU_MODE_SYS:
3408 break;
3409 case ARM_CPU_MODE_HYP:
3410 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3411 undef = true;
3412 }
3413 break;
3414 case ARM_CPU_MODE_MON:
3415 /* No need to check specifically for "are we non-secure" because
3416 * we've already made EL0 UNDEF and handled the trap for S-EL1;
3417 * so if this isn't EL3 then we must be non-secure.
3418 */
3419 if (s->current_el != 3) {
3420 undef = true;
3421 }
3422 break;
3423 default:
3424 undef = true;
3425 }
3426
3427 if (undef) {
3428 unallocated_encoding(s);
3429 return;
3430 }
3431
3432 addr = tcg_temp_new_i32();
3433 /* get_r13_banked() will raise an exception if called from System mode */
3434 gen_set_condexec(s);
3435 gen_update_pc(s, 0);
3436 gen_helper_get_r13_banked(addr, tcg_env, tcg_constant_i32(mode));
3437 switch (amode) {
3438 case 0: /* DA */
3439 offset = -4;
3440 break;
3441 case 1: /* IA */
3442 offset = 0;
3443 break;
3444 case 2: /* DB */
3445 offset = -8;
3446 break;
3447 case 3: /* IB */
3448 offset = 4;
3449 break;
3450 default:
3451 g_assert_not_reached();
3452 }
3453 tcg_gen_addi_i32(addr, addr, offset);
3454 tmp = load_reg(s, 14);
3455 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
3456 tmp = load_cpu_field(spsr);
3457 tcg_gen_addi_i32(addr, addr, 4);
3458 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
3459 if (writeback) {
3460 switch (amode) {
3461 case 0:
3462 offset = -8;
3463 break;
3464 case 1:
3465 offset = 4;
3466 break;
3467 case 2:
3468 offset = -4;
3469 break;
3470 case 3:
3471 offset = 0;
3472 break;
3473 default:
3474 g_assert_not_reached();
3475 }
3476 tcg_gen_addi_i32(addr, addr, offset);
3477 gen_helper_set_r13_banked(tcg_env, tcg_constant_i32(mode), addr);
3478 }
3479 s->base.is_jmp = DISAS_UPDATE_EXIT;
3480 }
3481
3482 /* Skip this instruction if the ARM condition is false */
arm_skip_unless(DisasContext * s,uint32_t cond)3483 static void arm_skip_unless(DisasContext *s, uint32_t cond)
3484 {
3485 arm_gen_condlabel(s);
3486 arm_gen_test_cc(cond ^ 1, s->condlabel.label);
3487 }
3488
3489
3490 /*
3491 * Constant expanders used by T16/T32 decode
3492 */
3493
3494 /* Return only the rotation part of T32ExpandImm. */
t32_expandimm_rot(DisasContext * s,int x)3495 static int t32_expandimm_rot(DisasContext *s, int x)
3496 {
3497 return x & 0xc00 ? extract32(x, 7, 5) : 0;
3498 }
3499
3500 /* Return the unrotated immediate from T32ExpandImm. */
t32_expandimm_imm(DisasContext * s,int x)3501 static int t32_expandimm_imm(DisasContext *s, int x)
3502 {
3503 uint32_t imm = extract32(x, 0, 8);
3504
3505 switch (extract32(x, 8, 4)) {
3506 case 0: /* XY */
3507 /* Nothing to do. */
3508 break;
3509 case 1: /* 00XY00XY */
3510 imm *= 0x00010001;
3511 break;
3512 case 2: /* XY00XY00 */
3513 imm *= 0x01000100;
3514 break;
3515 case 3: /* XYXYXYXY */
3516 imm *= 0x01010101;
3517 break;
3518 default:
3519 /* Rotated constant. */
3520 imm |= 0x80;
3521 break;
3522 }
3523 return imm;
3524 }
3525
t32_branch24(DisasContext * s,int x)3526 static int t32_branch24(DisasContext *s, int x)
3527 {
3528 /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
3529 x ^= !(x < 0) * (3 << 21);
3530 /* Append the final zero. */
3531 return x << 1;
3532 }
3533
t16_setflags(DisasContext * s)3534 static int t16_setflags(DisasContext *s)
3535 {
3536 return s->condexec_mask == 0;
3537 }
3538
t16_push_list(DisasContext * s,int x)3539 static int t16_push_list(DisasContext *s, int x)
3540 {
3541 return (x & 0xff) | (x & 0x100) << (14 - 8);
3542 }
3543
t16_pop_list(DisasContext * s,int x)3544 static int t16_pop_list(DisasContext *s, int x)
3545 {
3546 return (x & 0xff) | (x & 0x100) << (15 - 8);
3547 }
3548
3549 /*
3550 * Include the generated decoders.
3551 */
3552
3553 #include "decode-a32.c.inc"
3554 #include "decode-a32-uncond.c.inc"
3555 #include "decode-t32.c.inc"
3556 #include "decode-t16.c.inc"
3557
valid_cp(DisasContext * s,int cp)3558 static bool valid_cp(DisasContext *s, int cp)
3559 {
3560 /*
3561 * Return true if this coprocessor field indicates something
3562 * that's really a possible coprocessor.
3563 * For v7 and earlier, coprocessors 8..15 were reserved for Arm use,
3564 * and of those only cp14 and cp15 were used for registers.
3565 * cp10 and cp11 were used for VFP and Neon, whose decode is
3566 * dealt with elsewhere. With the advent of fp16, cp9 is also
3567 * now part of VFP.
3568 * For v8A and later, the encoding has been tightened so that
3569 * only cp14 and cp15 are valid, and other values aren't considered
3570 * to be in the coprocessor-instruction space at all. v8M still
3571 * permits coprocessors 0..7.
3572 * For XScale, we must not decode the XScale cp0, cp1 space as
3573 * a standard coprocessor insn, because we want to fall through to
3574 * the legacy disas_xscale_insn() decoder after decodetree is done.
3575 */
3576 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cp == 0 || cp == 1)) {
3577 return false;
3578 }
3579
3580 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
3581 !arm_dc_feature(s, ARM_FEATURE_M)) {
3582 return cp >= 14;
3583 }
3584 return cp < 8 || cp >= 14;
3585 }
3586
trans_MCR(DisasContext * s,arg_MCR * a)3587 static bool trans_MCR(DisasContext *s, arg_MCR *a)
3588 {
3589 if (!valid_cp(s, a->cp)) {
3590 return false;
3591 }
3592 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
3593 false, a->rt, 0);
3594 return true;
3595 }
3596
trans_MRC(DisasContext * s,arg_MRC * a)3597 static bool trans_MRC(DisasContext *s, arg_MRC *a)
3598 {
3599 if (!valid_cp(s, a->cp)) {
3600 return false;
3601 }
3602 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
3603 true, a->rt, 0);
3604 return true;
3605 }
3606
trans_MCRR(DisasContext * s,arg_MCRR * a)3607 static bool trans_MCRR(DisasContext *s, arg_MCRR *a)
3608 {
3609 if (!valid_cp(s, a->cp)) {
3610 return false;
3611 }
3612 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
3613 false, a->rt, a->rt2);
3614 return true;
3615 }
3616
trans_MRRC(DisasContext * s,arg_MRRC * a)3617 static bool trans_MRRC(DisasContext *s, arg_MRRC *a)
3618 {
3619 if (!valid_cp(s, a->cp)) {
3620 return false;
3621 }
3622 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
3623 true, a->rt, a->rt2);
3624 return true;
3625 }
3626
3627 /* Helpers to swap operands for reverse-subtract. */
gen_rsb(TCGv_i32 dst,TCGv_i32 a,TCGv_i32 b)3628 static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
3629 {
3630 tcg_gen_sub_i32(dst, b, a);
3631 }
3632
gen_rsb_CC(TCGv_i32 dst,TCGv_i32 a,TCGv_i32 b)3633 static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
3634 {
3635 gen_sub_CC(dst, b, a);
3636 }
3637
gen_rsc(TCGv_i32 dest,TCGv_i32 a,TCGv_i32 b)3638 static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
3639 {
3640 gen_sub_carry(dest, b, a);
3641 }
3642
gen_rsc_CC(TCGv_i32 dest,TCGv_i32 a,TCGv_i32 b)3643 static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
3644 {
3645 gen_sbc_CC(dest, b, a);
3646 }
3647
3648 /*
3649 * Helpers for the data processing routines.
3650 *
3651 * After the computation store the results back.
3652 * This may be suppressed altogether (STREG_NONE), require a runtime
3653 * check against the stack limits (STREG_SP_CHECK), or generate an
3654 * exception return. Oh, or store into a register.
3655 *
3656 * Always return true, indicating success for a trans_* function.
3657 */
3658 typedef enum {
3659 STREG_NONE,
3660 STREG_NORMAL,
3661 STREG_SP_CHECK,
3662 STREG_EXC_RET,
3663 } StoreRegKind;
3664
store_reg_kind(DisasContext * s,int rd,TCGv_i32 val,StoreRegKind kind)3665 static bool store_reg_kind(DisasContext *s, int rd,
3666 TCGv_i32 val, StoreRegKind kind)
3667 {
3668 switch (kind) {
3669 case STREG_NONE:
3670 return true;
3671 case STREG_NORMAL:
3672 /* See ALUWritePC: Interworking only from a32 mode. */
3673 if (s->thumb) {
3674 store_reg(s, rd, val);
3675 } else {
3676 store_reg_bx(s, rd, val);
3677 }
3678 return true;
3679 case STREG_SP_CHECK:
3680 store_sp_checked(s, val);
3681 return true;
3682 case STREG_EXC_RET:
3683 gen_exception_return(s, val);
3684 return true;
3685 }
3686 g_assert_not_reached();
3687 }
3688
3689 /*
3690 * Data Processing (register)
3691 *
3692 * Operate, with set flags, one register source,
3693 * one immediate shifted register source, and a destination.
3694 */
op_s_rrr_shi(DisasContext * s,arg_s_rrr_shi * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3695 static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
3696 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3697 int logic_cc, StoreRegKind kind)
3698 {
3699 TCGv_i32 tmp1, tmp2;
3700
3701 tmp2 = load_reg(s, a->rm);
3702 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
3703 tmp1 = load_reg(s, a->rn);
3704
3705 gen(tmp1, tmp1, tmp2);
3706
3707 if (logic_cc) {
3708 gen_logic_CC(tmp1);
3709 }
3710 return store_reg_kind(s, a->rd, tmp1, kind);
3711 }
3712
op_s_rxr_shi(DisasContext * s,arg_s_rrr_shi * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3713 static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
3714 void (*gen)(TCGv_i32, TCGv_i32),
3715 int logic_cc, StoreRegKind kind)
3716 {
3717 TCGv_i32 tmp;
3718
3719 tmp = load_reg(s, a->rm);
3720 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
3721
3722 gen(tmp, tmp);
3723 if (logic_cc) {
3724 gen_logic_CC(tmp);
3725 }
3726 return store_reg_kind(s, a->rd, tmp, kind);
3727 }
3728
3729 /*
3730 * Data-processing (register-shifted register)
3731 *
3732 * Operate, with set flags, one register source,
3733 * one register shifted register source, and a destination.
3734 */
op_s_rrr_shr(DisasContext * s,arg_s_rrr_shr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3735 static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
3736 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3737 int logic_cc, StoreRegKind kind)
3738 {
3739 TCGv_i32 tmp1, tmp2;
3740
3741 tmp1 = load_reg(s, a->rs);
3742 tmp2 = load_reg(s, a->rm);
3743 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
3744 tmp1 = load_reg(s, a->rn);
3745
3746 gen(tmp1, tmp1, tmp2);
3747
3748 if (logic_cc) {
3749 gen_logic_CC(tmp1);
3750 }
3751 return store_reg_kind(s, a->rd, tmp1, kind);
3752 }
3753
op_s_rxr_shr(DisasContext * s,arg_s_rrr_shr * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3754 static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
3755 void (*gen)(TCGv_i32, TCGv_i32),
3756 int logic_cc, StoreRegKind kind)
3757 {
3758 TCGv_i32 tmp1, tmp2;
3759
3760 tmp1 = load_reg(s, a->rs);
3761 tmp2 = load_reg(s, a->rm);
3762 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
3763
3764 gen(tmp2, tmp2);
3765 if (logic_cc) {
3766 gen_logic_CC(tmp2);
3767 }
3768 return store_reg_kind(s, a->rd, tmp2, kind);
3769 }
3770
3771 /*
3772 * Data-processing (immediate)
3773 *
3774 * Operate, with set flags, one register source,
3775 * one rotated immediate, and a destination.
3776 *
3777 * Note that logic_cc && a->rot setting CF based on the msb of the
3778 * immediate is the reason why we must pass in the unrotated form
3779 * of the immediate.
3780 */
op_s_rri_rot(DisasContext * s,arg_s_rri_rot * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3781 static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
3782 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
3783 int logic_cc, StoreRegKind kind)
3784 {
3785 TCGv_i32 tmp1;
3786 uint32_t imm;
3787
3788 imm = ror32(a->imm, a->rot);
3789 if (logic_cc && a->rot) {
3790 tcg_gen_movi_i32(cpu_CF, imm >> 31);
3791 }
3792 tmp1 = load_reg(s, a->rn);
3793
3794 gen(tmp1, tmp1, tcg_constant_i32(imm));
3795
3796 if (logic_cc) {
3797 gen_logic_CC(tmp1);
3798 }
3799 return store_reg_kind(s, a->rd, tmp1, kind);
3800 }
3801
op_s_rxi_rot(DisasContext * s,arg_s_rri_rot * a,void (* gen)(TCGv_i32,TCGv_i32),int logic_cc,StoreRegKind kind)3802 static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
3803 void (*gen)(TCGv_i32, TCGv_i32),
3804 int logic_cc, StoreRegKind kind)
3805 {
3806 TCGv_i32 tmp;
3807 uint32_t imm;
3808
3809 imm = ror32(a->imm, a->rot);
3810 if (logic_cc && a->rot) {
3811 tcg_gen_movi_i32(cpu_CF, imm >> 31);
3812 }
3813
3814 tmp = tcg_temp_new_i32();
3815 gen(tmp, tcg_constant_i32(imm));
3816
3817 if (logic_cc) {
3818 gen_logic_CC(tmp);
3819 }
3820 return store_reg_kind(s, a->rd, tmp, kind);
3821 }
3822
3823 #define DO_ANY3(NAME, OP, L, K) \
3824 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
3825 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
3826 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
3827 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
3828 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
3829 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
3830
3831 #define DO_ANY2(NAME, OP, L, K) \
3832 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
3833 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
3834 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
3835 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
3836 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
3837 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
3838
3839 #define DO_CMP2(NAME, OP, L) \
3840 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
3841 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
3842 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
3843 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
3844 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
3845 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
3846
3847 DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
3848 DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
3849 DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
3850 DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
3851
3852 DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
3853 DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
3854 DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
3855 DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
3856
DO_CMP2(TST,tcg_gen_and_i32,true)3857 DO_CMP2(TST, tcg_gen_and_i32, true)
3858 DO_CMP2(TEQ, tcg_gen_xor_i32, true)
3859 DO_CMP2(CMN, gen_add_CC, false)
3860 DO_CMP2(CMP, gen_sub_CC, false)
3861
3862 DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
3863 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
3864
3865 /*
3866 * Note for the computation of StoreRegKind we return out of the
3867 * middle of the functions that are expanded by DO_ANY3, and that
3868 * we modify a->s via that parameter before it is used by OP.
3869 */
3870 DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
3871 ({
3872 StoreRegKind ret = STREG_NORMAL;
3873 if (a->rd == 15 && a->s) {
3874 /*
3875 * See ALUExceptionReturn:
3876 * In User mode, UNPREDICTABLE; we choose UNDEF.
3877 * In Hyp mode, UNDEFINED.
3878 */
3879 if (IS_USER(s) || s->current_el == 2) {
3880 unallocated_encoding(s);
3881 return true;
3882 }
3883 /* There is no writeback of nzcv to PSTATE. */
3884 a->s = 0;
3885 ret = STREG_EXC_RET;
3886 } else if (a->rd == 13 && a->rn == 13) {
3887 ret = STREG_SP_CHECK;
3888 }
3889 ret;
3890 }))
3891
3892 DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
3893 ({
3894 StoreRegKind ret = STREG_NORMAL;
3895 if (a->rd == 15 && a->s) {
3896 /*
3897 * See ALUExceptionReturn:
3898 * In User mode, UNPREDICTABLE; we choose UNDEF.
3899 * In Hyp mode, UNDEFINED.
3900 */
3901 if (IS_USER(s) || s->current_el == 2) {
3902 unallocated_encoding(s);
3903 return true;
3904 }
3905 /* There is no writeback of nzcv to PSTATE. */
3906 a->s = 0;
3907 ret = STREG_EXC_RET;
3908 } else if (a->rd == 13) {
3909 ret = STREG_SP_CHECK;
3910 }
3911 ret;
3912 }))
3913
3914 DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
3915
3916 /*
3917 * ORN is only available with T32, so there is no register-shifted-register
3918 * form of the insn. Using the DO_ANY3 macro would create an unused function.
3919 */
3920 static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
3921 {
3922 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3923 }
3924
trans_ORN_rri(DisasContext * s,arg_s_rri_rot * a)3925 static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
3926 {
3927 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3928 }
3929
3930 #undef DO_ANY3
3931 #undef DO_ANY2
3932 #undef DO_CMP2
3933
trans_ADR(DisasContext * s,arg_ri * a)3934 static bool trans_ADR(DisasContext *s, arg_ri *a)
3935 {
3936 store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
3937 return true;
3938 }
3939
trans_MOVW(DisasContext * s,arg_MOVW * a)3940 static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
3941 {
3942 if (!ENABLE_ARCH_6T2) {
3943 return false;
3944 }
3945
3946 store_reg(s, a->rd, tcg_constant_i32(a->imm));
3947 return true;
3948 }
3949
trans_MOVT(DisasContext * s,arg_MOVW * a)3950 static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
3951 {
3952 TCGv_i32 tmp;
3953
3954 if (!ENABLE_ARCH_6T2) {
3955 return false;
3956 }
3957
3958 tmp = load_reg(s, a->rd);
3959 tcg_gen_ext16u_i32(tmp, tmp);
3960 tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
3961 store_reg(s, a->rd, tmp);
3962 return true;
3963 }
3964
3965 /*
3966 * v8.1M MVE wide-shifts
3967 */
do_mve_shl_ri(DisasContext * s,arg_mve_shl_ri * a,WideShiftImmFn * fn)3968 static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
3969 WideShiftImmFn *fn)
3970 {
3971 TCGv_i64 rda;
3972 TCGv_i32 rdalo, rdahi;
3973
3974 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
3975 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
3976 return false;
3977 }
3978 if (a->rdahi == 15) {
3979 /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
3980 return false;
3981 }
3982 if (!dc_isar_feature(aa32_mve, s) ||
3983 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
3984 a->rdahi == 13) {
3985 /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
3986 unallocated_encoding(s);
3987 return true;
3988 }
3989
3990 if (a->shim == 0) {
3991 a->shim = 32;
3992 }
3993
3994 rda = tcg_temp_new_i64();
3995 rdalo = load_reg(s, a->rdalo);
3996 rdahi = load_reg(s, a->rdahi);
3997 tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
3998
3999 fn(rda, rda, a->shim);
4000
4001 tcg_gen_extrl_i64_i32(rdalo, rda);
4002 tcg_gen_extrh_i64_i32(rdahi, rda);
4003 store_reg(s, a->rdalo, rdalo);
4004 store_reg(s, a->rdahi, rdahi);
4005
4006 return true;
4007 }
4008
trans_ASRL_ri(DisasContext * s,arg_mve_shl_ri * a)4009 static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4010 {
4011 return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
4012 }
4013
trans_LSLL_ri(DisasContext * s,arg_mve_shl_ri * a)4014 static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4015 {
4016 return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
4017 }
4018
trans_LSRL_ri(DisasContext * s,arg_mve_shl_ri * a)4019 static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4020 {
4021 return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
4022 }
4023
gen_mve_sqshll(TCGv_i64 r,TCGv_i64 n,int64_t shift)4024 static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
4025 {
4026 gen_helper_mve_sqshll(r, tcg_env, n, tcg_constant_i32(shift));
4027 }
4028
trans_SQSHLL_ri(DisasContext * s,arg_mve_shl_ri * a)4029 static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4030 {
4031 return do_mve_shl_ri(s, a, gen_mve_sqshll);
4032 }
4033
gen_mve_uqshll(TCGv_i64 r,TCGv_i64 n,int64_t shift)4034 static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
4035 {
4036 gen_helper_mve_uqshll(r, tcg_env, n, tcg_constant_i32(shift));
4037 }
4038
trans_UQSHLL_ri(DisasContext * s,arg_mve_shl_ri * a)4039 static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
4040 {
4041 return do_mve_shl_ri(s, a, gen_mve_uqshll);
4042 }
4043
trans_SRSHRL_ri(DisasContext * s,arg_mve_shl_ri * a)4044 static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4045 {
4046 return do_mve_shl_ri(s, a, gen_srshr64_i64);
4047 }
4048
trans_URSHRL_ri(DisasContext * s,arg_mve_shl_ri * a)4049 static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
4050 {
4051 return do_mve_shl_ri(s, a, gen_urshr64_i64);
4052 }
4053
do_mve_shl_rr(DisasContext * s,arg_mve_shl_rr * a,WideShiftFn * fn)4054 static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
4055 {
4056 TCGv_i64 rda;
4057 TCGv_i32 rdalo, rdahi;
4058
4059 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4060 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4061 return false;
4062 }
4063 if (a->rdahi == 15) {
4064 /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
4065 return false;
4066 }
4067 if (!dc_isar_feature(aa32_mve, s) ||
4068 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4069 a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
4070 a->rm == a->rdahi || a->rm == a->rdalo) {
4071 /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
4072 unallocated_encoding(s);
4073 return true;
4074 }
4075
4076 rda = tcg_temp_new_i64();
4077 rdalo = load_reg(s, a->rdalo);
4078 rdahi = load_reg(s, a->rdahi);
4079 tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
4080
4081 /* The helper takes care of the sign-extension of the low 8 bits of Rm */
4082 fn(rda, tcg_env, rda, cpu_R[a->rm]);
4083
4084 tcg_gen_extrl_i64_i32(rdalo, rda);
4085 tcg_gen_extrh_i64_i32(rdahi, rda);
4086 store_reg(s, a->rdalo, rdalo);
4087 store_reg(s, a->rdahi, rdahi);
4088
4089 return true;
4090 }
4091
trans_LSLL_rr(DisasContext * s,arg_mve_shl_rr * a)4092 static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
4093 {
4094 return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
4095 }
4096
trans_ASRL_rr(DisasContext * s,arg_mve_shl_rr * a)4097 static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
4098 {
4099 return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
4100 }
4101
trans_UQRSHLL64_rr(DisasContext * s,arg_mve_shl_rr * a)4102 static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
4103 {
4104 return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
4105 }
4106
trans_SQRSHRL64_rr(DisasContext * s,arg_mve_shl_rr * a)4107 static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
4108 {
4109 return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
4110 }
4111
trans_UQRSHLL48_rr(DisasContext * s,arg_mve_shl_rr * a)4112 static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
4113 {
4114 return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
4115 }
4116
trans_SQRSHRL48_rr(DisasContext * s,arg_mve_shl_rr * a)4117 static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
4118 {
4119 return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
4120 }
4121
do_mve_sh_ri(DisasContext * s,arg_mve_sh_ri * a,ShiftImmFn * fn)4122 static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
4123 {
4124 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4125 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4126 return false;
4127 }
4128 if (!dc_isar_feature(aa32_mve, s) ||
4129 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4130 a->rda == 13 || a->rda == 15) {
4131 /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
4132 unallocated_encoding(s);
4133 return true;
4134 }
4135
4136 if (a->shim == 0) {
4137 a->shim = 32;
4138 }
4139 fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
4140
4141 return true;
4142 }
4143
trans_URSHR_ri(DisasContext * s,arg_mve_sh_ri * a)4144 static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
4145 {
4146 return do_mve_sh_ri(s, a, gen_urshr32_i32);
4147 }
4148
trans_SRSHR_ri(DisasContext * s,arg_mve_sh_ri * a)4149 static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
4150 {
4151 return do_mve_sh_ri(s, a, gen_srshr32_i32);
4152 }
4153
gen_mve_sqshl(TCGv_i32 r,TCGv_i32 n,int32_t shift)4154 static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
4155 {
4156 gen_helper_mve_sqshl(r, tcg_env, n, tcg_constant_i32(shift));
4157 }
4158
trans_SQSHL_ri(DisasContext * s,arg_mve_sh_ri * a)4159 static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
4160 {
4161 return do_mve_sh_ri(s, a, gen_mve_sqshl);
4162 }
4163
gen_mve_uqshl(TCGv_i32 r,TCGv_i32 n,int32_t shift)4164 static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
4165 {
4166 gen_helper_mve_uqshl(r, tcg_env, n, tcg_constant_i32(shift));
4167 }
4168
trans_UQSHL_ri(DisasContext * s,arg_mve_sh_ri * a)4169 static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
4170 {
4171 return do_mve_sh_ri(s, a, gen_mve_uqshl);
4172 }
4173
do_mve_sh_rr(DisasContext * s,arg_mve_sh_rr * a,ShiftFn * fn)4174 static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
4175 {
4176 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
4177 /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
4178 return false;
4179 }
4180 if (!dc_isar_feature(aa32_mve, s) ||
4181 !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
4182 a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
4183 a->rm == a->rda) {
4184 /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
4185 unallocated_encoding(s);
4186 return true;
4187 }
4188
4189 /* The helper takes care of the sign-extension of the low 8 bits of Rm */
4190 fn(cpu_R[a->rda], tcg_env, cpu_R[a->rda], cpu_R[a->rm]);
4191 return true;
4192 }
4193
trans_SQRSHR_rr(DisasContext * s,arg_mve_sh_rr * a)4194 static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
4195 {
4196 return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
4197 }
4198
trans_UQRSHL_rr(DisasContext * s,arg_mve_sh_rr * a)4199 static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
4200 {
4201 return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
4202 }
4203
4204 /*
4205 * Multiply and multiply accumulate
4206 */
4207
op_mla(DisasContext * s,arg_s_rrrr * a,bool add)4208 static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
4209 {
4210 TCGv_i32 t1, t2;
4211
4212 t1 = load_reg(s, a->rn);
4213 t2 = load_reg(s, a->rm);
4214 tcg_gen_mul_i32(t1, t1, t2);
4215 if (add) {
4216 t2 = load_reg(s, a->ra);
4217 tcg_gen_add_i32(t1, t1, t2);
4218 }
4219 if (a->s) {
4220 gen_logic_CC(t1);
4221 }
4222 store_reg(s, a->rd, t1);
4223 return true;
4224 }
4225
trans_MUL(DisasContext * s,arg_MUL * a)4226 static bool trans_MUL(DisasContext *s, arg_MUL *a)
4227 {
4228 return op_mla(s, a, false);
4229 }
4230
trans_MLA(DisasContext * s,arg_MLA * a)4231 static bool trans_MLA(DisasContext *s, arg_MLA *a)
4232 {
4233 return op_mla(s, a, true);
4234 }
4235
trans_MLS(DisasContext * s,arg_MLS * a)4236 static bool trans_MLS(DisasContext *s, arg_MLS *a)
4237 {
4238 TCGv_i32 t1, t2;
4239
4240 if (!ENABLE_ARCH_6T2) {
4241 return false;
4242 }
4243 t1 = load_reg(s, a->rn);
4244 t2 = load_reg(s, a->rm);
4245 tcg_gen_mul_i32(t1, t1, t2);
4246 t2 = load_reg(s, a->ra);
4247 tcg_gen_sub_i32(t1, t2, t1);
4248 store_reg(s, a->rd, t1);
4249 return true;
4250 }
4251
op_mlal(DisasContext * s,arg_s_rrrr * a,bool uns,bool add)4252 static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
4253 {
4254 TCGv_i32 t0, t1, t2, t3;
4255
4256 t0 = load_reg(s, a->rm);
4257 t1 = load_reg(s, a->rn);
4258 if (uns) {
4259 tcg_gen_mulu2_i32(t0, t1, t0, t1);
4260 } else {
4261 tcg_gen_muls2_i32(t0, t1, t0, t1);
4262 }
4263 if (add) {
4264 t2 = load_reg(s, a->ra);
4265 t3 = load_reg(s, a->rd);
4266 tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
4267 }
4268 if (a->s) {
4269 gen_logicq_cc(t0, t1);
4270 }
4271 store_reg(s, a->ra, t0);
4272 store_reg(s, a->rd, t1);
4273 return true;
4274 }
4275
trans_UMULL(DisasContext * s,arg_UMULL * a)4276 static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
4277 {
4278 return op_mlal(s, a, true, false);
4279 }
4280
trans_SMULL(DisasContext * s,arg_SMULL * a)4281 static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
4282 {
4283 return op_mlal(s, a, false, false);
4284 }
4285
trans_UMLAL(DisasContext * s,arg_UMLAL * a)4286 static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
4287 {
4288 return op_mlal(s, a, true, true);
4289 }
4290
trans_SMLAL(DisasContext * s,arg_SMLAL * a)4291 static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
4292 {
4293 return op_mlal(s, a, false, true);
4294 }
4295
trans_UMAAL(DisasContext * s,arg_UMAAL * a)4296 static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
4297 {
4298 TCGv_i32 t0, t1, t2, zero;
4299
4300 if (s->thumb
4301 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4302 : !ENABLE_ARCH_6) {
4303 return false;
4304 }
4305
4306 t0 = load_reg(s, a->rm);
4307 t1 = load_reg(s, a->rn);
4308 tcg_gen_mulu2_i32(t0, t1, t0, t1);
4309 zero = tcg_constant_i32(0);
4310 t2 = load_reg(s, a->ra);
4311 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
4312 t2 = load_reg(s, a->rd);
4313 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
4314 store_reg(s, a->ra, t0);
4315 store_reg(s, a->rd, t1);
4316 return true;
4317 }
4318
4319 /*
4320 * Saturating addition and subtraction
4321 */
4322
op_qaddsub(DisasContext * s,arg_rrr * a,bool add,bool doub)4323 static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
4324 {
4325 TCGv_i32 t0, t1;
4326
4327 if (s->thumb
4328 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4329 : !ENABLE_ARCH_5TE) {
4330 return false;
4331 }
4332
4333 t0 = load_reg(s, a->rm);
4334 t1 = load_reg(s, a->rn);
4335 if (doub) {
4336 gen_helper_add_saturate(t1, tcg_env, t1, t1);
4337 }
4338 if (add) {
4339 gen_helper_add_saturate(t0, tcg_env, t0, t1);
4340 } else {
4341 gen_helper_sub_saturate(t0, tcg_env, t0, t1);
4342 }
4343 store_reg(s, a->rd, t0);
4344 return true;
4345 }
4346
4347 #define DO_QADDSUB(NAME, ADD, DOUB) \
4348 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
4349 { \
4350 return op_qaddsub(s, a, ADD, DOUB); \
4351 }
4352
DO_QADDSUB(QADD,true,false)4353 DO_QADDSUB(QADD, true, false)
4354 DO_QADDSUB(QSUB, false, false)
4355 DO_QADDSUB(QDADD, true, true)
4356 DO_QADDSUB(QDSUB, false, true)
4357
4358 #undef DO_QADDSUB
4359
4360 /*
4361 * Halfword multiply and multiply accumulate
4362 */
4363
4364 static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
4365 int add_long, bool nt, bool mt)
4366 {
4367 TCGv_i32 t0, t1, tl, th;
4368
4369 if (s->thumb
4370 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
4371 : !ENABLE_ARCH_5TE) {
4372 return false;
4373 }
4374
4375 t0 = load_reg(s, a->rn);
4376 t1 = load_reg(s, a->rm);
4377 gen_mulxy(t0, t1, nt, mt);
4378
4379 switch (add_long) {
4380 case 0:
4381 store_reg(s, a->rd, t0);
4382 break;
4383 case 1:
4384 t1 = load_reg(s, a->ra);
4385 gen_helper_add_setq(t0, tcg_env, t0, t1);
4386 store_reg(s, a->rd, t0);
4387 break;
4388 case 2:
4389 tl = load_reg(s, a->ra);
4390 th = load_reg(s, a->rd);
4391 /* Sign-extend the 32-bit product to 64 bits. */
4392 t1 = tcg_temp_new_i32();
4393 tcg_gen_sari_i32(t1, t0, 31);
4394 tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
4395 store_reg(s, a->ra, tl);
4396 store_reg(s, a->rd, th);
4397 break;
4398 default:
4399 g_assert_not_reached();
4400 }
4401 return true;
4402 }
4403
4404 #define DO_SMLAX(NAME, add, nt, mt) \
4405 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
4406 { \
4407 return op_smlaxxx(s, a, add, nt, mt); \
4408 }
4409
4410 DO_SMLAX(SMULBB, 0, 0, 0)
4411 DO_SMLAX(SMULBT, 0, 0, 1)
4412 DO_SMLAX(SMULTB, 0, 1, 0)
4413 DO_SMLAX(SMULTT, 0, 1, 1)
4414
4415 DO_SMLAX(SMLABB, 1, 0, 0)
4416 DO_SMLAX(SMLABT, 1, 0, 1)
4417 DO_SMLAX(SMLATB, 1, 1, 0)
4418 DO_SMLAX(SMLATT, 1, 1, 1)
4419
4420 DO_SMLAX(SMLALBB, 2, 0, 0)
4421 DO_SMLAX(SMLALBT, 2, 0, 1)
4422 DO_SMLAX(SMLALTB, 2, 1, 0)
4423 DO_SMLAX(SMLALTT, 2, 1, 1)
4424
4425 #undef DO_SMLAX
4426
op_smlawx(DisasContext * s,arg_rrrr * a,bool add,bool mt)4427 static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
4428 {
4429 TCGv_i32 t0, t1;
4430
4431 if (!ENABLE_ARCH_5TE) {
4432 return false;
4433 }
4434
4435 t0 = load_reg(s, a->rn);
4436 t1 = load_reg(s, a->rm);
4437 /*
4438 * Since the nominal result is product<47:16>, shift the 16-bit
4439 * input up by 16 bits, so that the result is at product<63:32>.
4440 */
4441 if (mt) {
4442 tcg_gen_andi_i32(t1, t1, 0xffff0000);
4443 } else {
4444 tcg_gen_shli_i32(t1, t1, 16);
4445 }
4446 tcg_gen_muls2_i32(t0, t1, t0, t1);
4447 if (add) {
4448 t0 = load_reg(s, a->ra);
4449 gen_helper_add_setq(t1, tcg_env, t1, t0);
4450 }
4451 store_reg(s, a->rd, t1);
4452 return true;
4453 }
4454
4455 #define DO_SMLAWX(NAME, add, mt) \
4456 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
4457 { \
4458 return op_smlawx(s, a, add, mt); \
4459 }
4460
4461 DO_SMLAWX(SMULWB, 0, 0)
4462 DO_SMLAWX(SMULWT, 0, 1)
4463 DO_SMLAWX(SMLAWB, 1, 0)
4464 DO_SMLAWX(SMLAWT, 1, 1)
4465
4466 #undef DO_SMLAWX
4467
4468 /*
4469 * MSR (immediate) and hints
4470 */
4471
trans_YIELD(DisasContext * s,arg_YIELD * a)4472 static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
4473 {
4474 /*
4475 * When running single-threaded TCG code, use the helper to ensure that
4476 * the next round-robin scheduled vCPU gets a crack. When running in
4477 * MTTCG we don't generate jumps to the helper as it won't affect the
4478 * scheduling of other vCPUs.
4479 */
4480 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4481 gen_update_pc(s, curr_insn_len(s));
4482 s->base.is_jmp = DISAS_YIELD;
4483 }
4484 return true;
4485 }
4486
trans_WFE(DisasContext * s,arg_WFE * a)4487 static bool trans_WFE(DisasContext *s, arg_WFE *a)
4488 {
4489 /*
4490 * When running single-threaded TCG code, use the helper to ensure that
4491 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4492 * just skip this instruction. Currently the SEV/SEVL instructions,
4493 * which are *one* of many ways to wake the CPU from WFE, are not
4494 * implemented so we can't sleep like WFI does.
4495 */
4496 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4497 gen_update_pc(s, curr_insn_len(s));
4498 s->base.is_jmp = DISAS_WFE;
4499 }
4500 return true;
4501 }
4502
trans_WFI(DisasContext * s,arg_WFI * a)4503 static bool trans_WFI(DisasContext *s, arg_WFI *a)
4504 {
4505 /* For WFI, halt the vCPU until an IRQ. */
4506 gen_update_pc(s, curr_insn_len(s));
4507 s->base.is_jmp = DISAS_WFI;
4508 return true;
4509 }
4510
trans_ESB(DisasContext * s,arg_ESB * a)4511 static bool trans_ESB(DisasContext *s, arg_ESB *a)
4512 {
4513 /*
4514 * For M-profile, minimal-RAS ESB can be a NOP.
4515 * Without RAS, we must implement this as NOP.
4516 */
4517 if (!arm_dc_feature(s, ARM_FEATURE_M) && dc_isar_feature(aa32_ras, s)) {
4518 /*
4519 * QEMU does not have a source of physical SErrors,
4520 * so we are only concerned with virtual SErrors.
4521 * The pseudocode in the ARM for this case is
4522 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
4523 * AArch32.vESBOperation();
4524 * Most of the condition can be evaluated at translation time.
4525 * Test for EL2 present, and defer test for SEL2 to runtime.
4526 */
4527 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
4528 gen_helper_vesb(tcg_env);
4529 }
4530 }
4531 return true;
4532 }
4533
trans_NOP(DisasContext * s,arg_NOP * a)4534 static bool trans_NOP(DisasContext *s, arg_NOP *a)
4535 {
4536 return true;
4537 }
4538
trans_MSR_imm(DisasContext * s,arg_MSR_imm * a)4539 static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
4540 {
4541 uint32_t val = ror32(a->imm, a->rot * 2);
4542 uint32_t mask = msr_mask(s, a->mask, a->r);
4543
4544 if (gen_set_psr_im(s, mask, a->r, val)) {
4545 unallocated_encoding(s);
4546 }
4547 return true;
4548 }
4549
4550 /*
4551 * Cyclic Redundancy Check
4552 */
4553
op_crc32(DisasContext * s,arg_rrr * a,bool c,MemOp sz)4554 static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
4555 {
4556 TCGv_i32 t1, t2, t3;
4557
4558 if (!dc_isar_feature(aa32_crc32, s)) {
4559 return false;
4560 }
4561
4562 t1 = load_reg(s, a->rn);
4563 t2 = load_reg(s, a->rm);
4564 switch (sz) {
4565 case MO_8:
4566 gen_uxtb(t2);
4567 break;
4568 case MO_16:
4569 gen_uxth(t2);
4570 break;
4571 case MO_32:
4572 break;
4573 default:
4574 g_assert_not_reached();
4575 }
4576 t3 = tcg_constant_i32(1 << sz);
4577 if (c) {
4578 gen_helper_crc32c(t1, t1, t2, t3);
4579 } else {
4580 gen_helper_crc32(t1, t1, t2, t3);
4581 }
4582 store_reg(s, a->rd, t1);
4583 return true;
4584 }
4585
4586 #define DO_CRC32(NAME, c, sz) \
4587 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
4588 { return op_crc32(s, a, c, sz); }
4589
DO_CRC32(CRC32B,false,MO_8)4590 DO_CRC32(CRC32B, false, MO_8)
4591 DO_CRC32(CRC32H, false, MO_16)
4592 DO_CRC32(CRC32W, false, MO_32)
4593 DO_CRC32(CRC32CB, true, MO_8)
4594 DO_CRC32(CRC32CH, true, MO_16)
4595 DO_CRC32(CRC32CW, true, MO_32)
4596
4597 #undef DO_CRC32
4598
4599 /*
4600 * Miscellaneous instructions
4601 */
4602
4603 static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
4604 {
4605 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4606 return false;
4607 }
4608 gen_mrs_banked(s, a->r, a->sysm, a->rd);
4609 return true;
4610 }
4611
trans_MSR_bank(DisasContext * s,arg_MSR_bank * a)4612 static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
4613 {
4614 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4615 return false;
4616 }
4617 gen_msr_banked(s, a->r, a->sysm, a->rn);
4618 return true;
4619 }
4620
trans_MRS_reg(DisasContext * s,arg_MRS_reg * a)4621 static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
4622 {
4623 TCGv_i32 tmp;
4624
4625 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4626 return false;
4627 }
4628 if (a->r) {
4629 if (IS_USER(s)) {
4630 unallocated_encoding(s);
4631 return true;
4632 }
4633 tmp = load_cpu_field(spsr);
4634 } else {
4635 tmp = tcg_temp_new_i32();
4636 gen_helper_cpsr_read(tmp, tcg_env);
4637 }
4638 store_reg(s, a->rd, tmp);
4639 return true;
4640 }
4641
trans_MSR_reg(DisasContext * s,arg_MSR_reg * a)4642 static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
4643 {
4644 TCGv_i32 tmp;
4645 uint32_t mask = msr_mask(s, a->mask, a->r);
4646
4647 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4648 return false;
4649 }
4650 tmp = load_reg(s, a->rn);
4651 if (gen_set_psr(s, mask, a->r, tmp)) {
4652 unallocated_encoding(s);
4653 }
4654 return true;
4655 }
4656
trans_MRS_v7m(DisasContext * s,arg_MRS_v7m * a)4657 static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
4658 {
4659 TCGv_i32 tmp;
4660
4661 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
4662 return false;
4663 }
4664 tmp = tcg_temp_new_i32();
4665 gen_helper_v7m_mrs(tmp, tcg_env, tcg_constant_i32(a->sysm));
4666 store_reg(s, a->rd, tmp);
4667 return true;
4668 }
4669
trans_MSR_v7m(DisasContext * s,arg_MSR_v7m * a)4670 static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
4671 {
4672 TCGv_i32 addr, reg;
4673
4674 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
4675 return false;
4676 }
4677 addr = tcg_constant_i32((a->mask << 10) | a->sysm);
4678 reg = load_reg(s, a->rn);
4679 gen_helper_v7m_msr(tcg_env, addr, reg);
4680 /* If we wrote to CONTROL, the EL might have changed */
4681 gen_rebuild_hflags(s, true);
4682 gen_lookup_tb(s);
4683 return true;
4684 }
4685
trans_BX(DisasContext * s,arg_BX * a)4686 static bool trans_BX(DisasContext *s, arg_BX *a)
4687 {
4688 if (!ENABLE_ARCH_4T) {
4689 return false;
4690 }
4691 gen_bx_excret(s, load_reg(s, a->rm));
4692 return true;
4693 }
4694
trans_BXJ(DisasContext * s,arg_BXJ * a)4695 static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
4696 {
4697 if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
4698 return false;
4699 }
4700 /*
4701 * v7A allows BXJ to be trapped via HSTR.TJDBX. We don't waste a
4702 * TBFLAGS bit on a basically-never-happens case, so call a helper
4703 * function to check for the trap and raise the exception if needed
4704 * (passing it the register number for the syndrome value).
4705 * v8A doesn't have this HSTR bit.
4706 */
4707 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4708 arm_dc_feature(s, ARM_FEATURE_EL2) &&
4709 s->current_el < 2 && s->ns) {
4710 gen_helper_check_bxj_trap(tcg_env, tcg_constant_i32(a->rm));
4711 }
4712 /* Trivial implementation equivalent to bx. */
4713 gen_bx(s, load_reg(s, a->rm));
4714 return true;
4715 }
4716
trans_BLX_r(DisasContext * s,arg_BLX_r * a)4717 static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
4718 {
4719 TCGv_i32 tmp;
4720
4721 if (!ENABLE_ARCH_5) {
4722 return false;
4723 }
4724 tmp = load_reg(s, a->rm);
4725 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
4726 gen_bx(s, tmp);
4727 return true;
4728 }
4729
4730 /*
4731 * BXNS/BLXNS: only exist for v8M with the security extensions,
4732 * and always UNDEF if NonSecure. We don't implement these in
4733 * the user-only mode either (in theory you can use them from
4734 * Secure User mode but they are too tied in to system emulation).
4735 */
trans_BXNS(DisasContext * s,arg_BXNS * a)4736 static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
4737 {
4738 if (!s->v8m_secure || IS_USER_ONLY) {
4739 unallocated_encoding(s);
4740 } else {
4741 gen_bxns(s, a->rm);
4742 }
4743 return true;
4744 }
4745
trans_BLXNS(DisasContext * s,arg_BLXNS * a)4746 static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
4747 {
4748 if (!s->v8m_secure || IS_USER_ONLY) {
4749 unallocated_encoding(s);
4750 } else {
4751 gen_blxns(s, a->rm);
4752 }
4753 return true;
4754 }
4755
trans_CLZ(DisasContext * s,arg_CLZ * a)4756 static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
4757 {
4758 TCGv_i32 tmp;
4759
4760 if (!ENABLE_ARCH_5) {
4761 return false;
4762 }
4763 tmp = load_reg(s, a->rm);
4764 tcg_gen_clzi_i32(tmp, tmp, 32);
4765 store_reg(s, a->rd, tmp);
4766 return true;
4767 }
4768
trans_ERET(DisasContext * s,arg_ERET * a)4769 static bool trans_ERET(DisasContext *s, arg_ERET *a)
4770 {
4771 TCGv_i32 tmp;
4772
4773 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
4774 return false;
4775 }
4776 if (IS_USER(s)) {
4777 unallocated_encoding(s);
4778 return true;
4779 }
4780 if (s->current_el == 2) {
4781 /* ERET from Hyp uses ELR_Hyp, not LR */
4782 tmp = load_cpu_field_low32(elr_el[2]);
4783 } else {
4784 tmp = load_reg(s, 14);
4785 }
4786 gen_exception_return(s, tmp);
4787 return true;
4788 }
4789
trans_HLT(DisasContext * s,arg_HLT * a)4790 static bool trans_HLT(DisasContext *s, arg_HLT *a)
4791 {
4792 gen_hlt(s, a->imm);
4793 return true;
4794 }
4795
trans_BKPT(DisasContext * s,arg_BKPT * a)4796 static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
4797 {
4798 if (!ENABLE_ARCH_5) {
4799 return false;
4800 }
4801 /* BKPT is OK with ECI set and leaves it untouched */
4802 s->eci_handled = true;
4803 if (arm_dc_feature(s, ARM_FEATURE_M) &&
4804 semihosting_enabled(s->current_el == 0) &&
4805 (a->imm == 0xab)) {
4806 gen_exception_internal_insn(s, EXCP_SEMIHOST);
4807 } else {
4808 gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
4809 }
4810 return true;
4811 }
4812
trans_HVC(DisasContext * s,arg_HVC * a)4813 static bool trans_HVC(DisasContext *s, arg_HVC *a)
4814 {
4815 if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
4816 return false;
4817 }
4818 if (IS_USER(s)) {
4819 unallocated_encoding(s);
4820 } else {
4821 gen_hvc(s, a->imm);
4822 }
4823 return true;
4824 }
4825
trans_SMC(DisasContext * s,arg_SMC * a)4826 static bool trans_SMC(DisasContext *s, arg_SMC *a)
4827 {
4828 if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
4829 return false;
4830 }
4831 if (IS_USER(s)) {
4832 unallocated_encoding(s);
4833 } else {
4834 gen_smc(s);
4835 }
4836 return true;
4837 }
4838
trans_SG(DisasContext * s,arg_SG * a)4839 static bool trans_SG(DisasContext *s, arg_SG *a)
4840 {
4841 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
4842 !arm_dc_feature(s, ARM_FEATURE_V8)) {
4843 return false;
4844 }
4845 /*
4846 * SG (v8M only)
4847 * The bulk of the behaviour for this instruction is implemented
4848 * in v7m_handle_execute_nsc(), which deals with the insn when
4849 * it is executed by a CPU in non-secure state from memory
4850 * which is Secure & NonSecure-Callable.
4851 * Here we only need to handle the remaining cases:
4852 * * in NS memory (including the "security extension not
4853 * implemented" case) : NOP
4854 * * in S memory but CPU already secure (clear IT bits)
4855 * We know that the attribute for the memory this insn is
4856 * in must match the current CPU state, because otherwise
4857 * get_phys_addr_pmsav8 would have generated an exception.
4858 */
4859 if (s->v8m_secure) {
4860 /* Like the IT insn, we don't need to generate any code */
4861 s->condexec_cond = 0;
4862 s->condexec_mask = 0;
4863 }
4864 return true;
4865 }
4866
trans_TT(DisasContext * s,arg_TT * a)4867 static bool trans_TT(DisasContext *s, arg_TT *a)
4868 {
4869 TCGv_i32 addr, tmp;
4870
4871 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
4872 !arm_dc_feature(s, ARM_FEATURE_V8)) {
4873 return false;
4874 }
4875 if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
4876 /* We UNDEF for these UNPREDICTABLE cases */
4877 unallocated_encoding(s);
4878 return true;
4879 }
4880 if (a->A && !s->v8m_secure) {
4881 /* This case is UNDEFINED. */
4882 unallocated_encoding(s);
4883 return true;
4884 }
4885
4886 addr = load_reg(s, a->rn);
4887 tmp = tcg_temp_new_i32();
4888 gen_helper_v7m_tt(tmp, tcg_env, addr, tcg_constant_i32((a->A << 1) | a->T));
4889 store_reg(s, a->rd, tmp);
4890 return true;
4891 }
4892
4893 /*
4894 * Load/store register index
4895 */
4896
make_issinfo(DisasContext * s,int rd,bool p,bool w)4897 static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
4898 {
4899 ISSInfo ret;
4900
4901 /* ISS not valid if writeback */
4902 if (p && !w) {
4903 ret = rd;
4904 if (curr_insn_len(s) == 2) {
4905 ret |= ISSIs16Bit;
4906 }
4907 } else {
4908 ret = ISSInvalid;
4909 }
4910 return ret;
4911 }
4912
op_addr_rr_pre(DisasContext * s,arg_ldst_rr * a)4913 static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
4914 {
4915 TCGv_i32 addr = load_reg(s, a->rn);
4916
4917 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
4918 gen_helper_v8m_stackcheck(tcg_env, addr);
4919 }
4920
4921 if (a->p) {
4922 TCGv_i32 ofs = load_reg(s, a->rm);
4923 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
4924 if (a->u) {
4925 tcg_gen_add_i32(addr, addr, ofs);
4926 } else {
4927 tcg_gen_sub_i32(addr, addr, ofs);
4928 }
4929 }
4930 return addr;
4931 }
4932
op_addr_rr_post(DisasContext * s,arg_ldst_rr * a,TCGv_i32 addr)4933 static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
4934 TCGv_i32 addr)
4935 {
4936 if (!a->p) {
4937 TCGv_i32 ofs = load_reg(s, a->rm);
4938 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
4939 if (a->u) {
4940 tcg_gen_add_i32(addr, addr, ofs);
4941 } else {
4942 tcg_gen_sub_i32(addr, addr, ofs);
4943 }
4944 } else if (!a->w) {
4945 return;
4946 }
4947 store_reg(s, a->rn, addr);
4948 }
4949
op_load_rr(DisasContext * s,arg_ldst_rr * a,MemOp mop,int mem_idx)4950 static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
4951 MemOp mop, int mem_idx)
4952 {
4953 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
4954 TCGv_i32 addr, tmp;
4955
4956 addr = op_addr_rr_pre(s, a);
4957
4958 tmp = tcg_temp_new_i32();
4959 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
4960 disas_set_da_iss(s, mop, issinfo);
4961
4962 /*
4963 * Perform base writeback before the loaded value to
4964 * ensure correct behavior with overlapping index registers.
4965 */
4966 op_addr_rr_post(s, a, addr);
4967 store_reg_from_load(s, a->rt, tmp);
4968 return true;
4969 }
4970
op_store_rr(DisasContext * s,arg_ldst_rr * a,MemOp mop,int mem_idx)4971 static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
4972 MemOp mop, int mem_idx)
4973 {
4974 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
4975 TCGv_i32 addr, tmp;
4976
4977 /*
4978 * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
4979 * is either UNPREDICTABLE or has defined behaviour
4980 */
4981 if (s->thumb && a->rn == 15) {
4982 return false;
4983 }
4984
4985 addr = op_addr_rr_pre(s, a);
4986
4987 tmp = load_reg(s, a->rt);
4988 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
4989 disas_set_da_iss(s, mop, issinfo);
4990
4991 op_addr_rr_post(s, a, addr);
4992 return true;
4993 }
4994
do_ldrd_load(DisasContext * s,TCGv_i32 addr,int rt,int rt2)4995 static void do_ldrd_load(DisasContext *s, TCGv_i32 addr, int rt, int rt2)
4996 {
4997 /*
4998 * LDRD is required to be an atomic 64-bit access if the
4999 * address is 8-aligned, two atomic 32-bit accesses if
5000 * it's only 4-aligned, and to give an alignment fault
5001 * if it's not 4-aligned. This is MO_ALIGN_4 | MO_ATOM_SUBALIGN.
5002 * Rt is always the word from the lower address, and Rt2 the
5003 * data from the higher address, regardless of endianness.
5004 * So (like gen_load_exclusive) we avoid gen_aa32_ld_i64()
5005 * so we don't get its SCTLR_B check, and instead do a 64-bit access
5006 * using MO_BE if appropriate and then split the two halves.
5007 *
5008 * For M-profile, and for A-profile before LPAE, the 64-bit
5009 * atomicity is not required. We could model that using
5010 * the looser MO_ATOM_IFALIGN_PAIR, but providing a higher
5011 * level of atomicity than required is harmless (we would not
5012 * currently generate better code for IFALIGN_PAIR here).
5013 *
5014 * This also gives us the correct behaviour of not updating
5015 * rt if the load of rt2 faults; this is required for cases
5016 * like "ldrd r2, r3, [r2]" where rt is also the base register.
5017 */
5018 int mem_idx = get_mem_index(s);
5019 MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data;
5020 TCGv taddr = gen_aa32_addr(s, addr, opc);
5021 TCGv_i64 t64 = tcg_temp_new_i64();
5022 TCGv_i32 tmp = tcg_temp_new_i32();
5023 TCGv_i32 tmp2 = tcg_temp_new_i32();
5024
5025 tcg_gen_qemu_ld_i64(t64, taddr, mem_idx, opc);
5026 if (s->be_data == MO_BE) {
5027 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
5028 } else {
5029 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
5030 }
5031 store_reg(s, rt, tmp);
5032 store_reg(s, rt2, tmp2);
5033 }
5034
trans_LDRD_rr(DisasContext * s,arg_ldst_rr * a)5035 static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
5036 {
5037 TCGv_i32 addr;
5038
5039 if (!ENABLE_ARCH_5TE) {
5040 return false;
5041 }
5042 if (a->rt & 1) {
5043 unallocated_encoding(s);
5044 return true;
5045 }
5046 addr = op_addr_rr_pre(s, a);
5047
5048 do_ldrd_load(s, addr, a->rt, a->rt + 1);
5049
5050 /* LDRD w/ base writeback is undefined if the registers overlap. */
5051 op_addr_rr_post(s, a, addr);
5052 return true;
5053 }
5054
do_strd_store(DisasContext * s,TCGv_i32 addr,int rt,int rt2)5055 static void do_strd_store(DisasContext *s, TCGv_i32 addr, int rt, int rt2)
5056 {
5057 /*
5058 * STRD is required to be an atomic 64-bit access if the
5059 * address is 8-aligned, two atomic 32-bit accesses if
5060 * it's only 4-aligned, and to give an alignment fault
5061 * if it's not 4-aligned.
5062 * Rt is always the word from the lower address, and Rt2 the
5063 * data from the higher address, regardless of endianness.
5064 * So (like gen_store_exclusive) we avoid gen_aa32_ld_i64()
5065 * so we don't get its SCTLR_B check, and instead do a 64-bit access
5066 * using MO_BE if appropriate, using a value constructed
5067 * by putting the two halves together in the right order.
5068 *
5069 * As with LDRD, the 64-bit atomicity is not required for
5070 * M-profile, or for A-profile before LPAE, and we provide
5071 * the higher guarantee always for simplicity.
5072 */
5073 int mem_idx = get_mem_index(s);
5074 MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data;
5075 TCGv taddr = gen_aa32_addr(s, addr, opc);
5076 TCGv_i32 t1 = load_reg(s, rt);
5077 TCGv_i32 t2 = load_reg(s, rt2);
5078 TCGv_i64 t64 = tcg_temp_new_i64();
5079
5080 if (s->be_data == MO_BE) {
5081 tcg_gen_concat_i32_i64(t64, t2, t1);
5082 } else {
5083 tcg_gen_concat_i32_i64(t64, t1, t2);
5084 }
5085 tcg_gen_qemu_st_i64(t64, taddr, mem_idx, opc);
5086 }
5087
trans_STRD_rr(DisasContext * s,arg_ldst_rr * a)5088 static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
5089 {
5090 TCGv_i32 addr;
5091
5092 if (!ENABLE_ARCH_5TE) {
5093 return false;
5094 }
5095 if (a->rt & 1) {
5096 unallocated_encoding(s);
5097 return true;
5098 }
5099 addr = op_addr_rr_pre(s, a);
5100
5101 do_strd_store(s, addr, a->rt, a->rt + 1);
5102
5103 op_addr_rr_post(s, a, addr);
5104 return true;
5105 }
5106
5107 /*
5108 * Load/store immediate index
5109 */
5110
op_addr_ri_pre(DisasContext * s,arg_ldst_ri * a)5111 static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
5112 {
5113 int ofs = a->imm;
5114
5115 if (!a->u) {
5116 ofs = -ofs;
5117 }
5118
5119 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
5120 /*
5121 * Stackcheck. Here we know 'addr' is the current SP;
5122 * U is set if we're moving SP up, else down. It is
5123 * UNKNOWN whether the limit check triggers when SP starts
5124 * below the limit and ends up above it; we chose to do so.
5125 */
5126 if (!a->u) {
5127 TCGv_i32 newsp = tcg_temp_new_i32();
5128 tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
5129 gen_helper_v8m_stackcheck(tcg_env, newsp);
5130 } else {
5131 gen_helper_v8m_stackcheck(tcg_env, cpu_R[13]);
5132 }
5133 }
5134
5135 return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
5136 }
5137
op_addr_ri_post(DisasContext * s,arg_ldst_ri * a,TCGv_i32 addr)5138 static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
5139 TCGv_i32 addr)
5140 {
5141 int address_offset = 0;
5142 if (!a->p) {
5143 if (a->u) {
5144 address_offset = a->imm;
5145 } else {
5146 address_offset = -a->imm;
5147 }
5148 } else if (!a->w) {
5149 return;
5150 }
5151 tcg_gen_addi_i32(addr, addr, address_offset);
5152 store_reg(s, a->rn, addr);
5153 }
5154
op_load_ri(DisasContext * s,arg_ldst_ri * a,MemOp mop,int mem_idx)5155 static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
5156 MemOp mop, int mem_idx)
5157 {
5158 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
5159 TCGv_i32 addr, tmp;
5160
5161 addr = op_addr_ri_pre(s, a);
5162
5163 tmp = tcg_temp_new_i32();
5164 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
5165 disas_set_da_iss(s, mop, issinfo);
5166
5167 /*
5168 * Perform base writeback before the loaded value to
5169 * ensure correct behavior with overlapping index registers.
5170 */
5171 op_addr_ri_post(s, a, addr);
5172 store_reg_from_load(s, a->rt, tmp);
5173 return true;
5174 }
5175
op_store_ri(DisasContext * s,arg_ldst_ri * a,MemOp mop,int mem_idx)5176 static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
5177 MemOp mop, int mem_idx)
5178 {
5179 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
5180 TCGv_i32 addr, tmp;
5181
5182 /*
5183 * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
5184 * is either UNPREDICTABLE or has defined behaviour
5185 */
5186 if (s->thumb && a->rn == 15) {
5187 return false;
5188 }
5189
5190 addr = op_addr_ri_pre(s, a);
5191
5192 tmp = load_reg(s, a->rt);
5193 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
5194 disas_set_da_iss(s, mop, issinfo);
5195
5196 op_addr_ri_post(s, a, addr);
5197 return true;
5198 }
5199
op_ldrd_ri(DisasContext * s,arg_ldst_ri * a,int rt2)5200 static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
5201 {
5202 TCGv_i32 addr;
5203
5204 addr = op_addr_ri_pre(s, a);
5205
5206 do_ldrd_load(s, addr, a->rt, rt2);
5207
5208 /* LDRD w/ base writeback is undefined if the registers overlap. */
5209 op_addr_ri_post(s, a, addr);
5210 return true;
5211 }
5212
trans_LDRD_ri_a32(DisasContext * s,arg_ldst_ri * a)5213 static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
5214 {
5215 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
5216 return false;
5217 }
5218 return op_ldrd_ri(s, a, a->rt + 1);
5219 }
5220
trans_LDRD_ri_t32(DisasContext * s,arg_ldst_ri2 * a)5221 static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
5222 {
5223 arg_ldst_ri b = {
5224 .u = a->u, .w = a->w, .p = a->p,
5225 .rn = a->rn, .rt = a->rt, .imm = a->imm
5226 };
5227 return op_ldrd_ri(s, &b, a->rt2);
5228 }
5229
op_strd_ri(DisasContext * s,arg_ldst_ri * a,int rt2)5230 static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
5231 {
5232 TCGv_i32 addr;
5233
5234 addr = op_addr_ri_pre(s, a);
5235
5236 do_strd_store(s, addr, a->rt, rt2);
5237
5238 op_addr_ri_post(s, a, addr);
5239 return true;
5240 }
5241
trans_STRD_ri_a32(DisasContext * s,arg_ldst_ri * a)5242 static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
5243 {
5244 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
5245 return false;
5246 }
5247 return op_strd_ri(s, a, a->rt + 1);
5248 }
5249
trans_STRD_ri_t32(DisasContext * s,arg_ldst_ri2 * a)5250 static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
5251 {
5252 arg_ldst_ri b = {
5253 .u = a->u, .w = a->w, .p = a->p,
5254 .rn = a->rn, .rt = a->rt, .imm = a->imm
5255 };
5256 return op_strd_ri(s, &b, a->rt2);
5257 }
5258
5259 #define DO_LDST(NAME, WHICH, MEMOP) \
5260 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
5261 { \
5262 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
5263 } \
5264 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
5265 { \
5266 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
5267 } \
5268 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
5269 { \
5270 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
5271 } \
5272 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
5273 { \
5274 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
5275 }
5276
DO_LDST(LDR,load,MO_UL)5277 DO_LDST(LDR, load, MO_UL)
5278 DO_LDST(LDRB, load, MO_UB)
5279 DO_LDST(LDRH, load, MO_UW)
5280 DO_LDST(LDRSB, load, MO_SB)
5281 DO_LDST(LDRSH, load, MO_SW)
5282
5283 DO_LDST(STR, store, MO_UL)
5284 DO_LDST(STRB, store, MO_UB)
5285 DO_LDST(STRH, store, MO_UW)
5286
5287 #undef DO_LDST
5288
5289 /*
5290 * Synchronization primitives
5291 */
5292
5293 static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
5294 {
5295 TCGv_i32 addr, tmp;
5296 TCGv taddr;
5297
5298 opc |= s->be_data;
5299 addr = load_reg(s, a->rn);
5300 taddr = gen_aa32_addr(s, addr, opc);
5301
5302 tmp = load_reg(s, a->rt2);
5303 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
5304
5305 store_reg(s, a->rt, tmp);
5306 return true;
5307 }
5308
trans_SWP(DisasContext * s,arg_SWP * a)5309 static bool trans_SWP(DisasContext *s, arg_SWP *a)
5310 {
5311 return op_swp(s, a, MO_UL | MO_ALIGN);
5312 }
5313
trans_SWPB(DisasContext * s,arg_SWP * a)5314 static bool trans_SWPB(DisasContext *s, arg_SWP *a)
5315 {
5316 return op_swp(s, a, MO_UB);
5317 }
5318
5319 /*
5320 * Load/Store Exclusive and Load-Acquire/Store-Release
5321 */
5322
op_strex(DisasContext * s,arg_STREX * a,MemOp mop,bool rel)5323 static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
5324 {
5325 TCGv_i32 addr;
5326 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
5327 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
5328
5329 /* We UNDEF for these UNPREDICTABLE cases. */
5330 if (a->rd == 15 || a->rn == 15 || a->rt == 15
5331 || a->rd == a->rn || a->rd == a->rt
5332 || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
5333 || (mop == MO_64
5334 && (a->rt2 == 15
5335 || a->rd == a->rt2
5336 || (!v8a && s->thumb && a->rt2 == 13)))) {
5337 unallocated_encoding(s);
5338 return true;
5339 }
5340
5341 if (rel) {
5342 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5343 }
5344
5345 addr = tcg_temp_new_i32();
5346 load_reg_var(s, addr, a->rn);
5347 tcg_gen_addi_i32(addr, addr, a->imm);
5348
5349 gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
5350 return true;
5351 }
5352
trans_STREX(DisasContext * s,arg_STREX * a)5353 static bool trans_STREX(DisasContext *s, arg_STREX *a)
5354 {
5355 if (!ENABLE_ARCH_6) {
5356 return false;
5357 }
5358 return op_strex(s, a, MO_32, false);
5359 }
5360
trans_STREXD_a32(DisasContext * s,arg_STREX * a)5361 static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
5362 {
5363 if (!ENABLE_ARCH_6K) {
5364 return false;
5365 }
5366 /* We UNDEF for these UNPREDICTABLE cases. */
5367 if (a->rt & 1) {
5368 unallocated_encoding(s);
5369 return true;
5370 }
5371 a->rt2 = a->rt + 1;
5372 return op_strex(s, a, MO_64, false);
5373 }
5374
trans_STREXD_t32(DisasContext * s,arg_STREX * a)5375 static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
5376 {
5377 return op_strex(s, a, MO_64, false);
5378 }
5379
trans_STREXB(DisasContext * s,arg_STREX * a)5380 static bool trans_STREXB(DisasContext *s, arg_STREX *a)
5381 {
5382 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5383 return false;
5384 }
5385 return op_strex(s, a, MO_8, false);
5386 }
5387
trans_STREXH(DisasContext * s,arg_STREX * a)5388 static bool trans_STREXH(DisasContext *s, arg_STREX *a)
5389 {
5390 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5391 return false;
5392 }
5393 return op_strex(s, a, MO_16, false);
5394 }
5395
trans_STLEX(DisasContext * s,arg_STREX * a)5396 static bool trans_STLEX(DisasContext *s, arg_STREX *a)
5397 {
5398 if (!ENABLE_ARCH_8) {
5399 return false;
5400 }
5401 return op_strex(s, a, MO_32, true);
5402 }
5403
trans_STLEXD_a32(DisasContext * s,arg_STREX * a)5404 static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
5405 {
5406 if (!ENABLE_ARCH_8) {
5407 return false;
5408 }
5409 /* We UNDEF for these UNPREDICTABLE cases. */
5410 if (a->rt & 1) {
5411 unallocated_encoding(s);
5412 return true;
5413 }
5414 a->rt2 = a->rt + 1;
5415 return op_strex(s, a, MO_64, true);
5416 }
5417
trans_STLEXD_t32(DisasContext * s,arg_STREX * a)5418 static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
5419 {
5420 if (!ENABLE_ARCH_8) {
5421 return false;
5422 }
5423 return op_strex(s, a, MO_64, true);
5424 }
5425
trans_STLEXB(DisasContext * s,arg_STREX * a)5426 static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
5427 {
5428 if (!ENABLE_ARCH_8) {
5429 return false;
5430 }
5431 return op_strex(s, a, MO_8, true);
5432 }
5433
trans_STLEXH(DisasContext * s,arg_STREX * a)5434 static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
5435 {
5436 if (!ENABLE_ARCH_8) {
5437 return false;
5438 }
5439 return op_strex(s, a, MO_16, true);
5440 }
5441
op_stl(DisasContext * s,arg_STL * a,MemOp mop)5442 static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
5443 {
5444 TCGv_i32 addr, tmp;
5445
5446 if (!ENABLE_ARCH_8) {
5447 return false;
5448 }
5449 /* We UNDEF for these UNPREDICTABLE cases. */
5450 if (a->rn == 15 || a->rt == 15) {
5451 unallocated_encoding(s);
5452 return true;
5453 }
5454
5455 addr = load_reg(s, a->rn);
5456 tmp = load_reg(s, a->rt);
5457 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5458 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
5459 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
5460
5461 return true;
5462 }
5463
trans_STL(DisasContext * s,arg_STL * a)5464 static bool trans_STL(DisasContext *s, arg_STL *a)
5465 {
5466 return op_stl(s, a, MO_UL);
5467 }
5468
trans_STLB(DisasContext * s,arg_STL * a)5469 static bool trans_STLB(DisasContext *s, arg_STL *a)
5470 {
5471 return op_stl(s, a, MO_UB);
5472 }
5473
trans_STLH(DisasContext * s,arg_STL * a)5474 static bool trans_STLH(DisasContext *s, arg_STL *a)
5475 {
5476 return op_stl(s, a, MO_UW);
5477 }
5478
op_ldrex(DisasContext * s,arg_LDREX * a,MemOp mop,bool acq)5479 static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
5480 {
5481 TCGv_i32 addr;
5482 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
5483 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
5484
5485 /* We UNDEF for these UNPREDICTABLE cases. */
5486 if (a->rn == 15 || a->rt == 15
5487 || (!v8a && s->thumb && a->rt == 13)
5488 || (mop == MO_64
5489 && (a->rt2 == 15 || a->rt == a->rt2
5490 || (!v8a && s->thumb && a->rt2 == 13)))) {
5491 unallocated_encoding(s);
5492 return true;
5493 }
5494
5495 addr = tcg_temp_new_i32();
5496 load_reg_var(s, addr, a->rn);
5497 tcg_gen_addi_i32(addr, addr, a->imm);
5498
5499 gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
5500
5501 if (acq) {
5502 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
5503 }
5504 return true;
5505 }
5506
trans_LDREX(DisasContext * s,arg_LDREX * a)5507 static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
5508 {
5509 if (!ENABLE_ARCH_6) {
5510 return false;
5511 }
5512 return op_ldrex(s, a, MO_32, false);
5513 }
5514
trans_LDREXD_a32(DisasContext * s,arg_LDREX * a)5515 static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
5516 {
5517 if (!ENABLE_ARCH_6K) {
5518 return false;
5519 }
5520 /* We UNDEF for these UNPREDICTABLE cases. */
5521 if (a->rt & 1) {
5522 unallocated_encoding(s);
5523 return true;
5524 }
5525 a->rt2 = a->rt + 1;
5526 return op_ldrex(s, a, MO_64, false);
5527 }
5528
trans_LDREXD_t32(DisasContext * s,arg_LDREX * a)5529 static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
5530 {
5531 return op_ldrex(s, a, MO_64, false);
5532 }
5533
trans_LDREXB(DisasContext * s,arg_LDREX * a)5534 static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
5535 {
5536 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5537 return false;
5538 }
5539 return op_ldrex(s, a, MO_8, false);
5540 }
5541
trans_LDREXH(DisasContext * s,arg_LDREX * a)5542 static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
5543 {
5544 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
5545 return false;
5546 }
5547 return op_ldrex(s, a, MO_16, false);
5548 }
5549
trans_LDAEX(DisasContext * s,arg_LDREX * a)5550 static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
5551 {
5552 if (!ENABLE_ARCH_8) {
5553 return false;
5554 }
5555 return op_ldrex(s, a, MO_32, true);
5556 }
5557
trans_LDAEXD_a32(DisasContext * s,arg_LDREX * a)5558 static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
5559 {
5560 if (!ENABLE_ARCH_8) {
5561 return false;
5562 }
5563 /* We UNDEF for these UNPREDICTABLE cases. */
5564 if (a->rt & 1) {
5565 unallocated_encoding(s);
5566 return true;
5567 }
5568 a->rt2 = a->rt + 1;
5569 return op_ldrex(s, a, MO_64, true);
5570 }
5571
trans_LDAEXD_t32(DisasContext * s,arg_LDREX * a)5572 static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
5573 {
5574 if (!ENABLE_ARCH_8) {
5575 return false;
5576 }
5577 return op_ldrex(s, a, MO_64, true);
5578 }
5579
trans_LDAEXB(DisasContext * s,arg_LDREX * a)5580 static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
5581 {
5582 if (!ENABLE_ARCH_8) {
5583 return false;
5584 }
5585 return op_ldrex(s, a, MO_8, true);
5586 }
5587
trans_LDAEXH(DisasContext * s,arg_LDREX * a)5588 static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
5589 {
5590 if (!ENABLE_ARCH_8) {
5591 return false;
5592 }
5593 return op_ldrex(s, a, MO_16, true);
5594 }
5595
op_lda(DisasContext * s,arg_LDA * a,MemOp mop)5596 static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
5597 {
5598 TCGv_i32 addr, tmp;
5599
5600 if (!ENABLE_ARCH_8) {
5601 return false;
5602 }
5603 /* We UNDEF for these UNPREDICTABLE cases. */
5604 if (a->rn == 15 || a->rt == 15) {
5605 unallocated_encoding(s);
5606 return true;
5607 }
5608
5609 addr = load_reg(s, a->rn);
5610 tmp = tcg_temp_new_i32();
5611 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
5612 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
5613
5614 store_reg(s, a->rt, tmp);
5615 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
5616 return true;
5617 }
5618
trans_LDA(DisasContext * s,arg_LDA * a)5619 static bool trans_LDA(DisasContext *s, arg_LDA *a)
5620 {
5621 return op_lda(s, a, MO_UL);
5622 }
5623
trans_LDAB(DisasContext * s,arg_LDA * a)5624 static bool trans_LDAB(DisasContext *s, arg_LDA *a)
5625 {
5626 return op_lda(s, a, MO_UB);
5627 }
5628
trans_LDAH(DisasContext * s,arg_LDA * a)5629 static bool trans_LDAH(DisasContext *s, arg_LDA *a)
5630 {
5631 return op_lda(s, a, MO_UW);
5632 }
5633
5634 /*
5635 * Media instructions
5636 */
5637
trans_USADA8(DisasContext * s,arg_USADA8 * a)5638 static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
5639 {
5640 TCGv_i32 t1, t2;
5641
5642 if (!ENABLE_ARCH_6) {
5643 return false;
5644 }
5645
5646 t1 = load_reg(s, a->rn);
5647 t2 = load_reg(s, a->rm);
5648 gen_helper_usad8(t1, t1, t2);
5649 if (a->ra != 15) {
5650 t2 = load_reg(s, a->ra);
5651 tcg_gen_add_i32(t1, t1, t2);
5652 }
5653 store_reg(s, a->rd, t1);
5654 return true;
5655 }
5656
op_bfx(DisasContext * s,arg_UBFX * a,bool u)5657 static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
5658 {
5659 TCGv_i32 tmp;
5660 int width = a->widthm1 + 1;
5661 int shift = a->lsb;
5662
5663 if (!ENABLE_ARCH_6T2) {
5664 return false;
5665 }
5666 if (shift + width > 32) {
5667 /* UNPREDICTABLE; we choose to UNDEF */
5668 unallocated_encoding(s);
5669 return true;
5670 }
5671
5672 tmp = load_reg(s, a->rn);
5673 if (u) {
5674 tcg_gen_extract_i32(tmp, tmp, shift, width);
5675 } else {
5676 tcg_gen_sextract_i32(tmp, tmp, shift, width);
5677 }
5678 store_reg(s, a->rd, tmp);
5679 return true;
5680 }
5681
trans_SBFX(DisasContext * s,arg_SBFX * a)5682 static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
5683 {
5684 return op_bfx(s, a, false);
5685 }
5686
trans_UBFX(DisasContext * s,arg_UBFX * a)5687 static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
5688 {
5689 return op_bfx(s, a, true);
5690 }
5691
trans_BFCI(DisasContext * s,arg_BFCI * a)5692 static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
5693 {
5694 int msb = a->msb, lsb = a->lsb;
5695 TCGv_i32 t_in, t_rd;
5696 int width;
5697
5698 if (!ENABLE_ARCH_6T2) {
5699 return false;
5700 }
5701 if (msb < lsb) {
5702 /* UNPREDICTABLE; we choose to UNDEF */
5703 unallocated_encoding(s);
5704 return true;
5705 }
5706
5707 width = msb + 1 - lsb;
5708 if (a->rn == 15) {
5709 /* BFC */
5710 t_in = tcg_constant_i32(0);
5711 } else {
5712 /* BFI */
5713 t_in = load_reg(s, a->rn);
5714 }
5715 t_rd = load_reg(s, a->rd);
5716 tcg_gen_deposit_i32(t_rd, t_rd, t_in, lsb, width);
5717 store_reg(s, a->rd, t_rd);
5718 return true;
5719 }
5720
trans_UDF(DisasContext * s,arg_UDF * a)5721 static bool trans_UDF(DisasContext *s, arg_UDF *a)
5722 {
5723 unallocated_encoding(s);
5724 return true;
5725 }
5726
5727 /*
5728 * Parallel addition and subtraction
5729 */
5730
op_par_addsub(DisasContext * s,arg_rrr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32))5731 static bool op_par_addsub(DisasContext *s, arg_rrr *a,
5732 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
5733 {
5734 TCGv_i32 t0, t1;
5735
5736 if (s->thumb
5737 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5738 : !ENABLE_ARCH_6) {
5739 return false;
5740 }
5741
5742 t0 = load_reg(s, a->rn);
5743 t1 = load_reg(s, a->rm);
5744
5745 gen(t0, t0, t1);
5746
5747 store_reg(s, a->rd, t0);
5748 return true;
5749 }
5750
op_par_addsub_ge(DisasContext * s,arg_rrr * a,void (* gen)(TCGv_i32,TCGv_i32,TCGv_i32,TCGv_ptr))5751 static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
5752 void (*gen)(TCGv_i32, TCGv_i32,
5753 TCGv_i32, TCGv_ptr))
5754 {
5755 TCGv_i32 t0, t1;
5756 TCGv_ptr ge;
5757
5758 if (s->thumb
5759 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5760 : !ENABLE_ARCH_6) {
5761 return false;
5762 }
5763
5764 t0 = load_reg(s, a->rn);
5765 t1 = load_reg(s, a->rm);
5766
5767 ge = tcg_temp_new_ptr();
5768 tcg_gen_addi_ptr(ge, tcg_env, offsetof(CPUARMState, GE));
5769 gen(t0, t0, t1, ge);
5770
5771 store_reg(s, a->rd, t0);
5772 return true;
5773 }
5774
5775 #define DO_PAR_ADDSUB(NAME, helper) \
5776 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
5777 { \
5778 return op_par_addsub(s, a, helper); \
5779 }
5780
5781 #define DO_PAR_ADDSUB_GE(NAME, helper) \
5782 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
5783 { \
5784 return op_par_addsub_ge(s, a, helper); \
5785 }
5786
DO_PAR_ADDSUB_GE(SADD16,gen_helper_sadd16)5787 DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
5788 DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
5789 DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
5790 DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
5791 DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
5792 DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
5793
5794 DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
5795 DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
5796 DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
5797 DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
5798 DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
5799 DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
5800
5801 DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
5802 DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
5803 DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
5804 DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
5805 DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
5806 DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
5807
5808 DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
5809 DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
5810 DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
5811 DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
5812 DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
5813 DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
5814
5815 DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
5816 DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
5817 DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
5818 DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
5819 DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
5820 DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
5821
5822 DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
5823 DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
5824 DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
5825 DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
5826 DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
5827 DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
5828
5829 #undef DO_PAR_ADDSUB
5830 #undef DO_PAR_ADDSUB_GE
5831
5832 /*
5833 * Packing, unpacking, saturation, and reversal
5834 */
5835
5836 static bool trans_PKH(DisasContext *s, arg_PKH *a)
5837 {
5838 TCGv_i32 tn, tm;
5839 int shift = a->imm;
5840
5841 if (s->thumb
5842 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5843 : !ENABLE_ARCH_6) {
5844 return false;
5845 }
5846
5847 tn = load_reg(s, a->rn);
5848 tm = load_reg(s, a->rm);
5849 if (a->tb) {
5850 /* PKHTB */
5851 if (shift == 0) {
5852 shift = 31;
5853 }
5854 tcg_gen_sari_i32(tm, tm, shift);
5855 tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
5856 } else {
5857 /* PKHBT */
5858 tcg_gen_shli_i32(tm, tm, shift);
5859 tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
5860 }
5861 store_reg(s, a->rd, tn);
5862 return true;
5863 }
5864
op_sat(DisasContext * s,arg_sat * a,void (* gen)(TCGv_i32,TCGv_env,TCGv_i32,TCGv_i32))5865 static bool op_sat(DisasContext *s, arg_sat *a,
5866 void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5867 {
5868 TCGv_i32 tmp;
5869 int shift = a->imm;
5870
5871 if (!ENABLE_ARCH_6) {
5872 return false;
5873 }
5874
5875 tmp = load_reg(s, a->rn);
5876 if (a->sh) {
5877 tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
5878 } else {
5879 tcg_gen_shli_i32(tmp, tmp, shift);
5880 }
5881
5882 gen(tmp, tcg_env, tmp, tcg_constant_i32(a->satimm));
5883
5884 store_reg(s, a->rd, tmp);
5885 return true;
5886 }
5887
trans_SSAT(DisasContext * s,arg_sat * a)5888 static bool trans_SSAT(DisasContext *s, arg_sat *a)
5889 {
5890 return op_sat(s, a, gen_helper_ssat);
5891 }
5892
trans_USAT(DisasContext * s,arg_sat * a)5893 static bool trans_USAT(DisasContext *s, arg_sat *a)
5894 {
5895 return op_sat(s, a, gen_helper_usat);
5896 }
5897
trans_SSAT16(DisasContext * s,arg_sat * a)5898 static bool trans_SSAT16(DisasContext *s, arg_sat *a)
5899 {
5900 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5901 return false;
5902 }
5903 return op_sat(s, a, gen_helper_ssat16);
5904 }
5905
trans_USAT16(DisasContext * s,arg_sat * a)5906 static bool trans_USAT16(DisasContext *s, arg_sat *a)
5907 {
5908 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5909 return false;
5910 }
5911 return op_sat(s, a, gen_helper_usat16);
5912 }
5913
op_xta(DisasContext * s,arg_rrr_rot * a,void (* gen_extract)(TCGv_i32,TCGv_i32),void (* gen_add)(TCGv_i32,TCGv_i32,TCGv_i32))5914 static bool op_xta(DisasContext *s, arg_rrr_rot *a,
5915 void (*gen_extract)(TCGv_i32, TCGv_i32),
5916 void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
5917 {
5918 TCGv_i32 tmp;
5919
5920 if (!ENABLE_ARCH_6) {
5921 return false;
5922 }
5923
5924 tmp = load_reg(s, a->rm);
5925 /*
5926 * TODO: In many cases we could do a shift instead of a rotate.
5927 * Combined with a simple extend, that becomes an extract.
5928 */
5929 tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
5930 gen_extract(tmp, tmp);
5931
5932 if (a->rn != 15) {
5933 TCGv_i32 tmp2 = load_reg(s, a->rn);
5934 gen_add(tmp, tmp, tmp2);
5935 }
5936 store_reg(s, a->rd, tmp);
5937 return true;
5938 }
5939
trans_SXTAB(DisasContext * s,arg_rrr_rot * a)5940 static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
5941 {
5942 return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
5943 }
5944
trans_SXTAH(DisasContext * s,arg_rrr_rot * a)5945 static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
5946 {
5947 return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
5948 }
5949
trans_SXTAB16(DisasContext * s,arg_rrr_rot * a)5950 static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
5951 {
5952 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5953 return false;
5954 }
5955 return op_xta(s, a, gen_helper_sxtb16, gen_add16);
5956 }
5957
trans_UXTAB(DisasContext * s,arg_rrr_rot * a)5958 static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
5959 {
5960 return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
5961 }
5962
trans_UXTAH(DisasContext * s,arg_rrr_rot * a)5963 static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
5964 {
5965 return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
5966 }
5967
trans_UXTAB16(DisasContext * s,arg_rrr_rot * a)5968 static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
5969 {
5970 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
5971 return false;
5972 }
5973 return op_xta(s, a, gen_helper_uxtb16, gen_add16);
5974 }
5975
trans_SEL(DisasContext * s,arg_rrr * a)5976 static bool trans_SEL(DisasContext *s, arg_rrr *a)
5977 {
5978 TCGv_i32 t1, t2, t3;
5979
5980 if (s->thumb
5981 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5982 : !ENABLE_ARCH_6) {
5983 return false;
5984 }
5985
5986 t1 = load_reg(s, a->rn);
5987 t2 = load_reg(s, a->rm);
5988 t3 = tcg_temp_new_i32();
5989 tcg_gen_ld_i32(t3, tcg_env, offsetof(CPUARMState, GE));
5990 gen_helper_sel_flags(t1, t3, t1, t2);
5991 store_reg(s, a->rd, t1);
5992 return true;
5993 }
5994
op_rr(DisasContext * s,arg_rr * a,void (* gen)(TCGv_i32,TCGv_i32))5995 static bool op_rr(DisasContext *s, arg_rr *a,
5996 void (*gen)(TCGv_i32, TCGv_i32))
5997 {
5998 TCGv_i32 tmp;
5999
6000 tmp = load_reg(s, a->rm);
6001 gen(tmp, tmp);
6002 store_reg(s, a->rd, tmp);
6003 return true;
6004 }
6005
trans_REV(DisasContext * s,arg_rr * a)6006 static bool trans_REV(DisasContext *s, arg_rr *a)
6007 {
6008 if (!ENABLE_ARCH_6) {
6009 return false;
6010 }
6011 return op_rr(s, a, tcg_gen_bswap32_i32);
6012 }
6013
trans_REV16(DisasContext * s,arg_rr * a)6014 static bool trans_REV16(DisasContext *s, arg_rr *a)
6015 {
6016 if (!ENABLE_ARCH_6) {
6017 return false;
6018 }
6019 return op_rr(s, a, gen_rev16);
6020 }
6021
trans_REVSH(DisasContext * s,arg_rr * a)6022 static bool trans_REVSH(DisasContext *s, arg_rr *a)
6023 {
6024 if (!ENABLE_ARCH_6) {
6025 return false;
6026 }
6027 return op_rr(s, a, gen_revsh);
6028 }
6029
trans_RBIT(DisasContext * s,arg_rr * a)6030 static bool trans_RBIT(DisasContext *s, arg_rr *a)
6031 {
6032 if (!ENABLE_ARCH_6T2) {
6033 return false;
6034 }
6035 return op_rr(s, a, gen_helper_rbit);
6036 }
6037
6038 /*
6039 * Signed multiply, signed and unsigned divide
6040 */
6041
op_smlad(DisasContext * s,arg_rrrr * a,bool m_swap,bool sub)6042 static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
6043 {
6044 TCGv_i32 t1, t2;
6045
6046 if (!ENABLE_ARCH_6) {
6047 return false;
6048 }
6049
6050 t1 = load_reg(s, a->rn);
6051 t2 = load_reg(s, a->rm);
6052 if (m_swap) {
6053 gen_swap_half(t2, t2);
6054 }
6055 gen_smul_dual(t1, t2);
6056
6057 if (sub) {
6058 /*
6059 * This subtraction cannot overflow, so we can do a simple
6060 * 32-bit subtraction and then a possible 32-bit saturating
6061 * addition of Ra.
6062 */
6063 tcg_gen_sub_i32(t1, t1, t2);
6064
6065 if (a->ra != 15) {
6066 t2 = load_reg(s, a->ra);
6067 gen_helper_add_setq(t1, tcg_env, t1, t2);
6068 }
6069 } else if (a->ra == 15) {
6070 /* Single saturation-checking addition */
6071 gen_helper_add_setq(t1, tcg_env, t1, t2);
6072 } else {
6073 /*
6074 * We need to add the products and Ra together and then
6075 * determine whether the final result overflowed. Doing
6076 * this as two separate add-and-check-overflow steps incorrectly
6077 * sets Q for cases like (-32768 * -32768) + (-32768 * -32768) + -1.
6078 * Do all the arithmetic at 64-bits and then check for overflow.
6079 */
6080 TCGv_i64 p64, q64;
6081 TCGv_i32 t3, qf, one;
6082
6083 p64 = tcg_temp_new_i64();
6084 q64 = tcg_temp_new_i64();
6085 tcg_gen_ext_i32_i64(p64, t1);
6086 tcg_gen_ext_i32_i64(q64, t2);
6087 tcg_gen_add_i64(p64, p64, q64);
6088 load_reg_var(s, t2, a->ra);
6089 tcg_gen_ext_i32_i64(q64, t2);
6090 tcg_gen_add_i64(p64, p64, q64);
6091
6092 tcg_gen_extr_i64_i32(t1, t2, p64);
6093 /*
6094 * t1 is the low half of the result which goes into Rd.
6095 * We have overflow and must set Q if the high half (t2)
6096 * is different from the sign-extension of t1.
6097 */
6098 t3 = tcg_temp_new_i32();
6099 tcg_gen_sari_i32(t3, t1, 31);
6100 qf = load_cpu_field(QF);
6101 one = tcg_constant_i32(1);
6102 tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf);
6103 store_cpu_field(qf, QF);
6104 }
6105 store_reg(s, a->rd, t1);
6106 return true;
6107 }
6108
trans_SMLAD(DisasContext * s,arg_rrrr * a)6109 static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
6110 {
6111 return op_smlad(s, a, false, false);
6112 }
6113
trans_SMLADX(DisasContext * s,arg_rrrr * a)6114 static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
6115 {
6116 return op_smlad(s, a, true, false);
6117 }
6118
trans_SMLSD(DisasContext * s,arg_rrrr * a)6119 static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
6120 {
6121 return op_smlad(s, a, false, true);
6122 }
6123
trans_SMLSDX(DisasContext * s,arg_rrrr * a)6124 static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
6125 {
6126 return op_smlad(s, a, true, true);
6127 }
6128
op_smlald(DisasContext * s,arg_rrrr * a,bool m_swap,bool sub)6129 static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
6130 {
6131 TCGv_i32 t1, t2;
6132 TCGv_i64 l1, l2;
6133
6134 if (!ENABLE_ARCH_6) {
6135 return false;
6136 }
6137
6138 t1 = load_reg(s, a->rn);
6139 t2 = load_reg(s, a->rm);
6140 if (m_swap) {
6141 gen_swap_half(t2, t2);
6142 }
6143 gen_smul_dual(t1, t2);
6144
6145 l1 = tcg_temp_new_i64();
6146 l2 = tcg_temp_new_i64();
6147 tcg_gen_ext_i32_i64(l1, t1);
6148 tcg_gen_ext_i32_i64(l2, t2);
6149
6150 if (sub) {
6151 tcg_gen_sub_i64(l1, l1, l2);
6152 } else {
6153 tcg_gen_add_i64(l1, l1, l2);
6154 }
6155
6156 gen_addq(s, l1, a->ra, a->rd);
6157 gen_storeq_reg(s, a->ra, a->rd, l1);
6158 return true;
6159 }
6160
trans_SMLALD(DisasContext * s,arg_rrrr * a)6161 static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
6162 {
6163 return op_smlald(s, a, false, false);
6164 }
6165
trans_SMLALDX(DisasContext * s,arg_rrrr * a)6166 static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
6167 {
6168 return op_smlald(s, a, true, false);
6169 }
6170
trans_SMLSLD(DisasContext * s,arg_rrrr * a)6171 static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
6172 {
6173 return op_smlald(s, a, false, true);
6174 }
6175
trans_SMLSLDX(DisasContext * s,arg_rrrr * a)6176 static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
6177 {
6178 return op_smlald(s, a, true, true);
6179 }
6180
op_smmla(DisasContext * s,arg_rrrr * a,bool round,bool sub)6181 static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
6182 {
6183 TCGv_i32 t1, t2;
6184
6185 if (s->thumb
6186 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
6187 : !ENABLE_ARCH_6) {
6188 return false;
6189 }
6190
6191 t1 = load_reg(s, a->rn);
6192 t2 = load_reg(s, a->rm);
6193 tcg_gen_muls2_i32(t2, t1, t1, t2);
6194
6195 if (a->ra != 15) {
6196 TCGv_i32 t3 = load_reg(s, a->ra);
6197 if (sub) {
6198 /*
6199 * For SMMLS, we need a 64-bit subtract. Borrow caused by
6200 * a non-zero multiplicand lowpart, and the correct result
6201 * lowpart for rounding.
6202 */
6203 tcg_gen_sub2_i32(t2, t1, tcg_constant_i32(0), t3, t2, t1);
6204 } else {
6205 tcg_gen_add_i32(t1, t1, t3);
6206 }
6207 }
6208 if (round) {
6209 /*
6210 * Adding 0x80000000 to the 64-bit quantity means that we have
6211 * carry in to the high word when the low word has the msb set.
6212 */
6213 tcg_gen_shri_i32(t2, t2, 31);
6214 tcg_gen_add_i32(t1, t1, t2);
6215 }
6216 store_reg(s, a->rd, t1);
6217 return true;
6218 }
6219
trans_SMMLA(DisasContext * s,arg_rrrr * a)6220 static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
6221 {
6222 return op_smmla(s, a, false, false);
6223 }
6224
trans_SMMLAR(DisasContext * s,arg_rrrr * a)6225 static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
6226 {
6227 return op_smmla(s, a, true, false);
6228 }
6229
trans_SMMLS(DisasContext * s,arg_rrrr * a)6230 static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
6231 {
6232 return op_smmla(s, a, false, true);
6233 }
6234
trans_SMMLSR(DisasContext * s,arg_rrrr * a)6235 static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
6236 {
6237 return op_smmla(s, a, true, true);
6238 }
6239
op_div(DisasContext * s,arg_rrr * a,bool u)6240 static bool op_div(DisasContext *s, arg_rrr *a, bool u)
6241 {
6242 TCGv_i32 t1, t2;
6243
6244 if (s->thumb
6245 ? !dc_isar_feature(aa32_thumb_div, s)
6246 : !dc_isar_feature(aa32_arm_div, s)) {
6247 return false;
6248 }
6249
6250 t1 = load_reg(s, a->rn);
6251 t2 = load_reg(s, a->rm);
6252 if (u) {
6253 gen_helper_udiv(t1, tcg_env, t1, t2);
6254 } else {
6255 gen_helper_sdiv(t1, tcg_env, t1, t2);
6256 }
6257 store_reg(s, a->rd, t1);
6258 return true;
6259 }
6260
trans_SDIV(DisasContext * s,arg_rrr * a)6261 static bool trans_SDIV(DisasContext *s, arg_rrr *a)
6262 {
6263 return op_div(s, a, false);
6264 }
6265
trans_UDIV(DisasContext * s,arg_rrr * a)6266 static bool trans_UDIV(DisasContext *s, arg_rrr *a)
6267 {
6268 return op_div(s, a, true);
6269 }
6270
6271 /*
6272 * Block data transfer
6273 */
6274
op_addr_block_pre(DisasContext * s,arg_ldst_block * a,int n)6275 static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
6276 {
6277 TCGv_i32 addr = load_reg(s, a->rn);
6278
6279 if (a->b) {
6280 if (a->i) {
6281 /* pre increment */
6282 tcg_gen_addi_i32(addr, addr, 4);
6283 } else {
6284 /* pre decrement */
6285 tcg_gen_addi_i32(addr, addr, -(n * 4));
6286 }
6287 } else if (!a->i && n != 1) {
6288 /* post decrement */
6289 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6290 }
6291
6292 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
6293 /*
6294 * If the writeback is incrementing SP rather than
6295 * decrementing it, and the initial SP is below the
6296 * stack limit but the final written-back SP would
6297 * be above, then we must not perform any memory
6298 * accesses, but it is IMPDEF whether we generate
6299 * an exception. We choose to do so in this case.
6300 * At this point 'addr' is the lowest address, so
6301 * either the original SP (if incrementing) or our
6302 * final SP (if decrementing), so that's what we check.
6303 */
6304 gen_helper_v8m_stackcheck(tcg_env, addr);
6305 }
6306
6307 return addr;
6308 }
6309
op_addr_block_post(DisasContext * s,arg_ldst_block * a,TCGv_i32 addr,int n)6310 static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
6311 TCGv_i32 addr, int n)
6312 {
6313 if (a->w) {
6314 /* write back */
6315 if (!a->b) {
6316 if (a->i) {
6317 /* post increment */
6318 tcg_gen_addi_i32(addr, addr, 4);
6319 } else {
6320 /* post decrement */
6321 tcg_gen_addi_i32(addr, addr, -(n * 4));
6322 }
6323 } else if (!a->i && n != 1) {
6324 /* pre decrement */
6325 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6326 }
6327 store_reg(s, a->rn, addr);
6328 }
6329 }
6330
op_stm(DisasContext * s,arg_ldst_block * a)6331 static bool op_stm(DisasContext *s, arg_ldst_block *a)
6332 {
6333 int i, j, n, list, mem_idx;
6334 bool user = a->u;
6335 TCGv_i32 addr, tmp;
6336
6337 if (user) {
6338 /* STM (user) */
6339 if (IS_USER(s)) {
6340 /* Only usable in supervisor mode. */
6341 unallocated_encoding(s);
6342 return true;
6343 }
6344 }
6345
6346 list = a->list;
6347 n = ctpop16(list);
6348 /*
6349 * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
6350 * to UNDEF. In the T32 STM encoding n == 1 is also UNPREDICTABLE,
6351 * but hardware treats it like the A32 version and implements the
6352 * single-register-store, and some in-the-wild (buggy) software
6353 * assumes that, so we don't UNDEF on that case.
6354 */
6355 if (n < 1 || a->rn == 15) {
6356 unallocated_encoding(s);
6357 return true;
6358 }
6359
6360 s->eci_handled = true;
6361
6362 addr = op_addr_block_pre(s, a, n);
6363 mem_idx = get_mem_index(s);
6364
6365 for (i = j = 0; i < 16; i++) {
6366 if (!(list & (1 << i))) {
6367 continue;
6368 }
6369
6370 if (user && i != 15) {
6371 tmp = tcg_temp_new_i32();
6372 gen_helper_get_user_reg(tmp, tcg_env, tcg_constant_i32(i));
6373 } else {
6374 tmp = load_reg(s, i);
6375 }
6376 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
6377
6378 /* No need to add after the last transfer. */
6379 if (++j != n) {
6380 tcg_gen_addi_i32(addr, addr, 4);
6381 }
6382 }
6383
6384 op_addr_block_post(s, a, addr, n);
6385 clear_eci_state(s);
6386 return true;
6387 }
6388
trans_STM(DisasContext * s,arg_ldst_block * a)6389 static bool trans_STM(DisasContext *s, arg_ldst_block *a)
6390 {
6391 return op_stm(s, a);
6392 }
6393
trans_STM_t32(DisasContext * s,arg_ldst_block * a)6394 static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
6395 {
6396 /* Writeback register in register list is UNPREDICTABLE for T32. */
6397 if (a->w && (a->list & (1 << a->rn))) {
6398 unallocated_encoding(s);
6399 return true;
6400 }
6401 return op_stm(s, a);
6402 }
6403
do_ldm(DisasContext * s,arg_ldst_block * a)6404 static bool do_ldm(DisasContext *s, arg_ldst_block *a)
6405 {
6406 int i, j, n, list, mem_idx;
6407 bool loaded_base;
6408 bool user = a->u;
6409 bool exc_return = false;
6410 TCGv_i32 addr, tmp, loaded_var;
6411
6412 if (user) {
6413 /* LDM (user), LDM (exception return) */
6414 if (IS_USER(s)) {
6415 /* Only usable in supervisor mode. */
6416 unallocated_encoding(s);
6417 return true;
6418 }
6419 if (extract32(a->list, 15, 1)) {
6420 exc_return = true;
6421 user = false;
6422 } else {
6423 /* LDM (user) does not allow writeback. */
6424 if (a->w) {
6425 unallocated_encoding(s);
6426 return true;
6427 }
6428 }
6429 }
6430
6431 list = a->list;
6432 n = ctpop16(list);
6433 /*
6434 * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
6435 * to UNDEF. In the T32 LDM encoding n == 1 is also UNPREDICTABLE,
6436 * but hardware treats it like the A32 version and implements the
6437 * single-register-load, and some in-the-wild (buggy) software
6438 * assumes that, so we don't UNDEF on that case.
6439 */
6440 if (n < 1 || a->rn == 15) {
6441 unallocated_encoding(s);
6442 return true;
6443 }
6444
6445 s->eci_handled = true;
6446
6447 addr = op_addr_block_pre(s, a, n);
6448 mem_idx = get_mem_index(s);
6449 loaded_base = false;
6450 loaded_var = NULL;
6451
6452 for (i = j = 0; i < 16; i++) {
6453 if (!(list & (1 << i))) {
6454 continue;
6455 }
6456
6457 tmp = tcg_temp_new_i32();
6458 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
6459 if (user) {
6460 gen_helper_set_user_reg(tcg_env, tcg_constant_i32(i), tmp);
6461 } else if (i == a->rn) {
6462 loaded_var = tmp;
6463 loaded_base = true;
6464 } else if (i == 15 && exc_return) {
6465 store_pc_exc_ret(s, tmp);
6466 } else {
6467 store_reg_from_load(s, i, tmp);
6468 }
6469
6470 /* No need to add after the last transfer. */
6471 if (++j != n) {
6472 tcg_gen_addi_i32(addr, addr, 4);
6473 }
6474 }
6475
6476 op_addr_block_post(s, a, addr, n);
6477
6478 if (loaded_base) {
6479 /* Note that we reject base == pc above. */
6480 store_reg(s, a->rn, loaded_var);
6481 }
6482
6483 if (exc_return) {
6484 /* Restore CPSR from SPSR. */
6485 tmp = load_cpu_field(spsr);
6486 translator_io_start(&s->base);
6487 gen_helper_cpsr_write_eret(tcg_env, tmp);
6488 /* Must exit loop to check un-masked IRQs */
6489 s->base.is_jmp = DISAS_EXIT;
6490 }
6491 clear_eci_state(s);
6492 return true;
6493 }
6494
trans_LDM_a32(DisasContext * s,arg_ldst_block * a)6495 static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
6496 {
6497 /*
6498 * Writeback register in register list is UNPREDICTABLE
6499 * for ArchVersion() >= 7. Prior to v7, A32 would write
6500 * an UNKNOWN value to the base register.
6501 */
6502 if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
6503 unallocated_encoding(s);
6504 return true;
6505 }
6506 return do_ldm(s, a);
6507 }
6508
trans_LDM_t32(DisasContext * s,arg_ldst_block * a)6509 static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
6510 {
6511 /* Writeback register in register list is UNPREDICTABLE for T32. */
6512 if (a->w && (a->list & (1 << a->rn))) {
6513 unallocated_encoding(s);
6514 return true;
6515 }
6516 return do_ldm(s, a);
6517 }
6518
trans_LDM_t16(DisasContext * s,arg_ldst_block * a)6519 static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
6520 {
6521 /* Writeback is conditional on the base register not being loaded. */
6522 a->w = !(a->list & (1 << a->rn));
6523 return do_ldm(s, a);
6524 }
6525
trans_CLRM(DisasContext * s,arg_CLRM * a)6526 static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
6527 {
6528 int i;
6529 TCGv_i32 zero;
6530
6531 if (!dc_isar_feature(aa32_m_sec_state, s)) {
6532 return false;
6533 }
6534
6535 if (extract32(a->list, 13, 1)) {
6536 return false;
6537 }
6538
6539 if (!a->list) {
6540 /* UNPREDICTABLE; we choose to UNDEF */
6541 return false;
6542 }
6543
6544 s->eci_handled = true;
6545
6546 zero = tcg_constant_i32(0);
6547 for (i = 0; i < 15; i++) {
6548 if (extract32(a->list, i, 1)) {
6549 /* Clear R[i] */
6550 tcg_gen_mov_i32(cpu_R[i], zero);
6551 }
6552 }
6553 if (extract32(a->list, 15, 1)) {
6554 /*
6555 * Clear APSR (by calling the MSR helper with the same argument
6556 * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
6557 */
6558 gen_helper_v7m_msr(tcg_env, tcg_constant_i32(0xc00), zero);
6559 }
6560 clear_eci_state(s);
6561 return true;
6562 }
6563
6564 /*
6565 * Branch, branch with link
6566 */
6567
trans_B(DisasContext * s,arg_i * a)6568 static bool trans_B(DisasContext *s, arg_i *a)
6569 {
6570 gen_jmp(s, jmp_diff(s, a->imm));
6571 return true;
6572 }
6573
trans_B_cond_thumb(DisasContext * s,arg_ci * a)6574 static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
6575 {
6576 /* This has cond from encoding, required to be outside IT block. */
6577 if (a->cond >= 0xe) {
6578 return false;
6579 }
6580 if (s->condexec_mask) {
6581 unallocated_encoding(s);
6582 return true;
6583 }
6584 arm_skip_unless(s, a->cond);
6585 gen_jmp(s, jmp_diff(s, a->imm));
6586 return true;
6587 }
6588
trans_BL(DisasContext * s,arg_i * a)6589 static bool trans_BL(DisasContext *s, arg_i *a)
6590 {
6591 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
6592 gen_jmp(s, jmp_diff(s, a->imm));
6593 return true;
6594 }
6595
trans_BLX_i(DisasContext * s,arg_BLX_i * a)6596 static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
6597 {
6598 /*
6599 * BLX <imm> would be useless on M-profile; the encoding space
6600 * is used for other insns from v8.1M onward, and UNDEFs before that.
6601 */
6602 if (arm_dc_feature(s, ARM_FEATURE_M)) {
6603 return false;
6604 }
6605
6606 /* For A32, ARM_FEATURE_V5 is checked near the start of the uncond block. */
6607 if (s->thumb && (a->imm & 2)) {
6608 return false;
6609 }
6610 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
6611 store_cpu_field_constant(!s->thumb, thumb);
6612 /* This jump is computed from an aligned PC: subtract off the low bits. */
6613 gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3)));
6614 return true;
6615 }
6616
trans_BL_BLX_prefix(DisasContext * s,arg_BL_BLX_prefix * a)6617 static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
6618 {
6619 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6620 gen_pc_plus_diff(s, cpu_R[14], jmp_diff(s, a->imm << 12));
6621 return true;
6622 }
6623
trans_BL_suffix(DisasContext * s,arg_BL_suffix * a)6624 static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
6625 {
6626 TCGv_i32 tmp = tcg_temp_new_i32();
6627
6628 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6629 tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
6630 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
6631 gen_bx(s, tmp);
6632 return true;
6633 }
6634
trans_BLX_suffix(DisasContext * s,arg_BLX_suffix * a)6635 static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
6636 {
6637 TCGv_i32 tmp;
6638
6639 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
6640 if (!ENABLE_ARCH_5) {
6641 return false;
6642 }
6643 tmp = tcg_temp_new_i32();
6644 tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
6645 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
6646 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
6647 gen_bx(s, tmp);
6648 return true;
6649 }
6650
trans_BF(DisasContext * s,arg_BF * a)6651 static bool trans_BF(DisasContext *s, arg_BF *a)
6652 {
6653 /*
6654 * M-profile branch future insns. The architecture permits an
6655 * implementation to implement these as NOPs (equivalent to
6656 * discarding the LO_BRANCH_INFO cache immediately), and we
6657 * take that IMPDEF option because for QEMU a "real" implementation
6658 * would be complicated and wouldn't execute any faster.
6659 */
6660 if (!dc_isar_feature(aa32_lob, s)) {
6661 return false;
6662 }
6663 if (a->boff == 0) {
6664 /* SEE "Related encodings" (loop insns) */
6665 return false;
6666 }
6667 /* Handle as NOP */
6668 return true;
6669 }
6670
trans_DLS(DisasContext * s,arg_DLS * a)6671 static bool trans_DLS(DisasContext *s, arg_DLS *a)
6672 {
6673 /* M-profile low-overhead loop start */
6674 TCGv_i32 tmp;
6675
6676 if (!dc_isar_feature(aa32_lob, s)) {
6677 return false;
6678 }
6679 if (a->rn == 13 || a->rn == 15) {
6680 /*
6681 * For DLSTP rn == 15 is a related encoding (LCTP); the
6682 * other cases caught by this condition are all
6683 * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
6684 */
6685 return false;
6686 }
6687
6688 if (a->size != 4) {
6689 /* DLSTP */
6690 if (!dc_isar_feature(aa32_mve, s)) {
6691 return false;
6692 }
6693 if (!vfp_access_check(s)) {
6694 return true;
6695 }
6696 }
6697
6698 /* Not a while loop: set LR to the count, and set LTPSIZE for DLSTP */
6699 tmp = load_reg(s, a->rn);
6700 store_reg(s, 14, tmp);
6701 if (a->size != 4) {
6702 /* DLSTP: set FPSCR.LTPSIZE */
6703 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize);
6704 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
6705 }
6706 return true;
6707 }
6708
trans_WLS(DisasContext * s,arg_WLS * a)6709 static bool trans_WLS(DisasContext *s, arg_WLS *a)
6710 {
6711 /* M-profile low-overhead while-loop start */
6712 TCGv_i32 tmp;
6713 DisasLabel nextlabel;
6714
6715 if (!dc_isar_feature(aa32_lob, s)) {
6716 return false;
6717 }
6718 if (a->rn == 13 || a->rn == 15) {
6719 /*
6720 * For WLSTP rn == 15 is a related encoding (LE); the
6721 * other cases caught by this condition are all
6722 * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
6723 */
6724 return false;
6725 }
6726 if (s->condexec_mask) {
6727 /*
6728 * WLS in an IT block is CONSTRAINED UNPREDICTABLE;
6729 * we choose to UNDEF, because otherwise our use of
6730 * gen_goto_tb(1) would clash with the use of TB exit 1
6731 * in the dc->condjmp condition-failed codepath in
6732 * arm_tr_tb_stop() and we'd get an assertion.
6733 */
6734 return false;
6735 }
6736 if (a->size != 4) {
6737 /* WLSTP */
6738 if (!dc_isar_feature(aa32_mve, s)) {
6739 return false;
6740 }
6741 /*
6742 * We need to check that the FPU is enabled here, but mustn't
6743 * call vfp_access_check() to do that because we don't want to
6744 * do the lazy state preservation in the "loop count is zero" case.
6745 * Do the check-and-raise-exception by hand.
6746 */
6747 if (s->fp_excp_el) {
6748 gen_exception_insn_el(s, 0, EXCP_NOCP,
6749 syn_uncategorized(), s->fp_excp_el);
6750 return true;
6751 }
6752 }
6753
6754 nextlabel = gen_disas_label(s);
6755 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel.label);
6756 tmp = load_reg(s, a->rn);
6757 store_reg(s, 14, tmp);
6758 if (a->size != 4) {
6759 /*
6760 * WLSTP: set FPSCR.LTPSIZE. This requires that we do the
6761 * lazy state preservation, new FP context creation, etc,
6762 * that vfp_access_check() does. We know that the actual
6763 * access check will succeed (ie it won't generate code that
6764 * throws an exception) because we did that check by hand earlier.
6765 */
6766 bool ok = vfp_access_check(s);
6767 assert(ok);
6768 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize);
6769 /*
6770 * LTPSIZE updated, but MVE_NO_PRED will always be the same thing (0)
6771 * when we take this upcoming exit from this TB, so gen_jmp_tb() is OK.
6772 */
6773 }
6774 gen_jmp_tb(s, curr_insn_len(s), 1);
6775
6776 set_disas_label(s, nextlabel);
6777 gen_jmp(s, jmp_diff(s, a->imm));
6778 return true;
6779 }
6780
trans_LE(DisasContext * s,arg_LE * a)6781 static bool trans_LE(DisasContext *s, arg_LE *a)
6782 {
6783 /*
6784 * M-profile low-overhead loop end. The architecture permits an
6785 * implementation to discard the LO_BRANCH_INFO cache at any time,
6786 * and we take the IMPDEF option to never set it in the first place
6787 * (equivalent to always discarding it immediately), because for QEMU
6788 * a "real" implementation would be complicated and wouldn't execute
6789 * any faster.
6790 */
6791 TCGv_i32 tmp;
6792 DisasLabel loopend;
6793 bool fpu_active;
6794
6795 if (!dc_isar_feature(aa32_lob, s)) {
6796 return false;
6797 }
6798 if (a->f && a->tp) {
6799 return false;
6800 }
6801 if (s->condexec_mask) {
6802 /*
6803 * LE in an IT block is CONSTRAINED UNPREDICTABLE;
6804 * we choose to UNDEF, because otherwise our use of
6805 * gen_goto_tb(1) would clash with the use of TB exit 1
6806 * in the dc->condjmp condition-failed codepath in
6807 * arm_tr_tb_stop() and we'd get an assertion.
6808 */
6809 return false;
6810 }
6811 if (a->tp) {
6812 /* LETP */
6813 if (!dc_isar_feature(aa32_mve, s)) {
6814 return false;
6815 }
6816 if (!vfp_access_check(s)) {
6817 s->eci_handled = true;
6818 return true;
6819 }
6820 }
6821
6822 /* LE/LETP is OK with ECI set and leaves it untouched */
6823 s->eci_handled = true;
6824
6825 /*
6826 * With MVE, LTPSIZE might not be 4, and we must emit an INVSTATE
6827 * UsageFault exception for the LE insn in that case. Note that we
6828 * are not directly checking FPSCR.LTPSIZE but instead check the
6829 * pseudocode LTPSIZE() function, which returns 4 if the FPU is
6830 * not currently active (ie ActiveFPState() returns false). We
6831 * can identify not-active purely from our TB state flags, as the
6832 * FPU is active only if:
6833 * the FPU is enabled
6834 * AND lazy state preservation is not active
6835 * AND we do not need a new fp context (this is the ASPEN/FPCA check)
6836 *
6837 * Usually we don't need to care about this distinction between
6838 * LTPSIZE and FPSCR.LTPSIZE, because the code in vfp_access_check()
6839 * will either take an exception or clear the conditions that make
6840 * the FPU not active. But LE is an unusual case of a non-FP insn
6841 * that looks at LTPSIZE.
6842 */
6843 fpu_active = !s->fp_excp_el && !s->v7m_lspact && !s->v7m_new_fp_ctxt_needed;
6844
6845 if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) {
6846 /* Need to do a runtime check for LTPSIZE != 4 */
6847 DisasLabel skipexc = gen_disas_label(s);
6848 tmp = load_cpu_field(v7m.ltpsize);
6849 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label);
6850 gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
6851 set_disas_label(s, skipexc);
6852 }
6853
6854 if (a->f) {
6855 /* Loop-forever: just jump back to the loop start */
6856 gen_jmp(s, jmp_diff(s, -a->imm));
6857 return true;
6858 }
6859
6860 /*
6861 * Not loop-forever. If LR <= loop-decrement-value this is the last loop.
6862 * For LE, we know at this point that LTPSIZE must be 4 and the
6863 * loop decrement value is 1. For LETP we need to calculate the decrement
6864 * value from LTPSIZE.
6865 */
6866 loopend = gen_disas_label(s);
6867 if (!a->tp) {
6868 tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend.label);
6869 tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1);
6870 } else {
6871 /*
6872 * Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local
6873 * so that decr stays live after the brcondi.
6874 */
6875 TCGv_i32 decr = tcg_temp_new_i32();
6876 TCGv_i32 ltpsize = load_cpu_field(v7m.ltpsize);
6877 tcg_gen_sub_i32(decr, tcg_constant_i32(4), ltpsize);
6878 tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr);
6879
6880 tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend.label);
6881
6882 tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr);
6883 }
6884 /* Jump back to the loop start */
6885 gen_jmp(s, jmp_diff(s, -a->imm));
6886
6887 set_disas_label(s, loopend);
6888 if (a->tp) {
6889 /* Exits from tail-pred loops must reset LTPSIZE to 4 */
6890 store_cpu_field(tcg_constant_i32(4), v7m.ltpsize);
6891 }
6892 /* End TB, continuing to following insn */
6893 gen_jmp_tb(s, curr_insn_len(s), 1);
6894 return true;
6895 }
6896
trans_LCTP(DisasContext * s,arg_LCTP * a)6897 static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
6898 {
6899 /*
6900 * M-profile Loop Clear with Tail Predication. Since our implementation
6901 * doesn't cache branch information, all we need to do is reset
6902 * FPSCR.LTPSIZE to 4.
6903 */
6904
6905 if (!dc_isar_feature(aa32_lob, s) ||
6906 !dc_isar_feature(aa32_mve, s)) {
6907 return false;
6908 }
6909
6910 if (!vfp_access_check(s)) {
6911 return true;
6912 }
6913
6914 store_cpu_field_constant(4, v7m.ltpsize);
6915 return true;
6916 }
6917
trans_VCTP(DisasContext * s,arg_VCTP * a)6918 static bool trans_VCTP(DisasContext *s, arg_VCTP *a)
6919 {
6920 /*
6921 * M-profile Create Vector Tail Predicate. This insn is itself
6922 * predicated and is subject to beatwise execution.
6923 */
6924 TCGv_i32 rn_shifted, masklen;
6925
6926 if (!dc_isar_feature(aa32_mve, s) || a->rn == 13 || a->rn == 15) {
6927 return false;
6928 }
6929
6930 if (!mve_eci_check(s) || !vfp_access_check(s)) {
6931 return true;
6932 }
6933
6934 /*
6935 * We pre-calculate the mask length here to avoid having
6936 * to have multiple helpers specialized for size.
6937 * We pass the helper "rn <= (1 << (4 - size)) ? (rn << size) : 16".
6938 */
6939 rn_shifted = tcg_temp_new_i32();
6940 masklen = load_reg(s, a->rn);
6941 tcg_gen_shli_i32(rn_shifted, masklen, a->size);
6942 tcg_gen_movcond_i32(TCG_COND_LEU, masklen,
6943 masklen, tcg_constant_i32(1 << (4 - a->size)),
6944 rn_shifted, tcg_constant_i32(16));
6945 gen_helper_mve_vctp(tcg_env, masklen);
6946 /* This insn updates predication bits */
6947 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
6948 mve_update_eci(s);
6949 return true;
6950 }
6951
op_tbranch(DisasContext * s,arg_tbranch * a,bool half)6952 static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
6953 {
6954 TCGv_i32 addr, tmp;
6955
6956 tmp = load_reg(s, a->rm);
6957 if (half) {
6958 tcg_gen_add_i32(tmp, tmp, tmp);
6959 }
6960 addr = load_reg(s, a->rn);
6961 tcg_gen_add_i32(addr, addr, tmp);
6962
6963 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
6964
6965 tcg_gen_add_i32(tmp, tmp, tmp);
6966 gen_pc_plus_diff(s, addr, jmp_diff(s, 0));
6967 tcg_gen_add_i32(tmp, tmp, addr);
6968 store_reg(s, 15, tmp);
6969 return true;
6970 }
6971
trans_TBB(DisasContext * s,arg_tbranch * a)6972 static bool trans_TBB(DisasContext *s, arg_tbranch *a)
6973 {
6974 return op_tbranch(s, a, false);
6975 }
6976
trans_TBH(DisasContext * s,arg_tbranch * a)6977 static bool trans_TBH(DisasContext *s, arg_tbranch *a)
6978 {
6979 return op_tbranch(s, a, true);
6980 }
6981
trans_CBZ(DisasContext * s,arg_CBZ * a)6982 static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
6983 {
6984 TCGv_i32 tmp = load_reg(s, a->rn);
6985
6986 arm_gen_condlabel(s);
6987 tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
6988 tmp, 0, s->condlabel.label);
6989 gen_jmp(s, jmp_diff(s, a->imm));
6990 return true;
6991 }
6992
6993 /*
6994 * Supervisor call - both T32 & A32 come here so we need to check
6995 * which mode we are in when checking for semihosting.
6996 */
6997
trans_SVC(DisasContext * s,arg_SVC * a)6998 static bool trans_SVC(DisasContext *s, arg_SVC *a)
6999 {
7000 const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
7001
7002 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
7003 semihosting_enabled(s->current_el == 0) &&
7004 (a->imm == semihost_imm)) {
7005 gen_exception_internal_insn(s, EXCP_SEMIHOST);
7006 } else {
7007 if (s->fgt_svc) {
7008 uint32_t syndrome = syn_aa32_svc(a->imm, s->thumb);
7009 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
7010 } else {
7011 gen_update_pc(s, curr_insn_len(s));
7012 s->svc_imm = a->imm;
7013 s->base.is_jmp = DISAS_SWI;
7014 }
7015 }
7016 return true;
7017 }
7018
7019 /*
7020 * Unconditional system instructions
7021 */
7022
trans_RFE(DisasContext * s,arg_RFE * a)7023 static bool trans_RFE(DisasContext *s, arg_RFE *a)
7024 {
7025 static const int8_t pre_offset[4] = {
7026 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
7027 };
7028 static const int8_t post_offset[4] = {
7029 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
7030 };
7031 TCGv_i32 addr, t1, t2;
7032
7033 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7034 return false;
7035 }
7036 if (IS_USER(s)) {
7037 unallocated_encoding(s);
7038 return true;
7039 }
7040
7041 addr = load_reg(s, a->rn);
7042 tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
7043
7044 /* Load PC into tmp and CPSR into tmp2. */
7045 t1 = tcg_temp_new_i32();
7046 gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN);
7047 tcg_gen_addi_i32(addr, addr, 4);
7048 t2 = tcg_temp_new_i32();
7049 gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN);
7050
7051 if (a->w) {
7052 /* Base writeback. */
7053 tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
7054 store_reg(s, a->rn, addr);
7055 }
7056 gen_rfe(s, t1, t2);
7057 return true;
7058 }
7059
trans_SRS(DisasContext * s,arg_SRS * a)7060 static bool trans_SRS(DisasContext *s, arg_SRS *a)
7061 {
7062 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7063 return false;
7064 }
7065 gen_srs(s, a->mode, a->pu, a->w);
7066 return true;
7067 }
7068
trans_CPS(DisasContext * s,arg_CPS * a)7069 static bool trans_CPS(DisasContext *s, arg_CPS *a)
7070 {
7071 uint32_t mask, val;
7072
7073 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
7074 return false;
7075 }
7076 if (IS_USER(s)) {
7077 /* Implemented as NOP in user mode. */
7078 return true;
7079 }
7080 /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
7081
7082 mask = val = 0;
7083 if (a->imod & 2) {
7084 if (a->A) {
7085 mask |= CPSR_A;
7086 }
7087 if (a->I) {
7088 mask |= CPSR_I;
7089 }
7090 if (a->F) {
7091 mask |= CPSR_F;
7092 }
7093 if (a->imod & 1) {
7094 val |= mask;
7095 }
7096 }
7097 if (a->M) {
7098 mask |= CPSR_M;
7099 val |= a->mode;
7100 }
7101 if (mask) {
7102 gen_set_psr_im(s, mask, 0, val);
7103 }
7104 return true;
7105 }
7106
trans_CPS_v7m(DisasContext * s,arg_CPS_v7m * a)7107 static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
7108 {
7109 TCGv_i32 tmp, addr;
7110
7111 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
7112 return false;
7113 }
7114 if (IS_USER(s)) {
7115 /* Implemented as NOP in user mode. */
7116 return true;
7117 }
7118
7119 tmp = tcg_constant_i32(a->im);
7120 /* FAULTMASK */
7121 if (a->F) {
7122 addr = tcg_constant_i32(19);
7123 gen_helper_v7m_msr(tcg_env, addr, tmp);
7124 }
7125 /* PRIMASK */
7126 if (a->I) {
7127 addr = tcg_constant_i32(16);
7128 gen_helper_v7m_msr(tcg_env, addr, tmp);
7129 }
7130 gen_rebuild_hflags(s, false);
7131 gen_lookup_tb(s);
7132 return true;
7133 }
7134
7135 /*
7136 * Clear-Exclusive, Barriers
7137 */
7138
trans_CLREX(DisasContext * s,arg_CLREX * a)7139 static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
7140 {
7141 if (s->thumb
7142 ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
7143 : !ENABLE_ARCH_6K) {
7144 return false;
7145 }
7146 gen_clrex(s);
7147 return true;
7148 }
7149
trans_DSB(DisasContext * s,arg_DSB * a)7150 static bool trans_DSB(DisasContext *s, arg_DSB *a)
7151 {
7152 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
7153 return false;
7154 }
7155 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7156 return true;
7157 }
7158
trans_DMB(DisasContext * s,arg_DMB * a)7159 static bool trans_DMB(DisasContext *s, arg_DMB *a)
7160 {
7161 return trans_DSB(s, NULL);
7162 }
7163
trans_ISB(DisasContext * s,arg_ISB * a)7164 static bool trans_ISB(DisasContext *s, arg_ISB *a)
7165 {
7166 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
7167 return false;
7168 }
7169 /*
7170 * We need to break the TB after this insn to execute
7171 * self-modifying code correctly and also to take
7172 * any pending interrupts immediately.
7173 */
7174 s->base.is_jmp = DISAS_TOO_MANY;
7175 return true;
7176 }
7177
trans_SB(DisasContext * s,arg_SB * a)7178 static bool trans_SB(DisasContext *s, arg_SB *a)
7179 {
7180 if (!dc_isar_feature(aa32_sb, s)) {
7181 return false;
7182 }
7183 /*
7184 * TODO: There is no speculation barrier opcode
7185 * for TCG; MB and end the TB instead.
7186 */
7187 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7188 s->base.is_jmp = DISAS_TOO_MANY;
7189 return true;
7190 }
7191
trans_SETEND(DisasContext * s,arg_SETEND * a)7192 static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
7193 {
7194 if (!ENABLE_ARCH_6) {
7195 return false;
7196 }
7197 if (a->E != (s->be_data == MO_BE)) {
7198 gen_helper_setend(tcg_env);
7199 s->base.is_jmp = DISAS_UPDATE_EXIT;
7200 }
7201 return true;
7202 }
7203
7204 /*
7205 * Preload instructions
7206 * All are nops, contingent on the appropriate arch level.
7207 */
7208
trans_PLD(DisasContext * s,arg_PLD * a)7209 static bool trans_PLD(DisasContext *s, arg_PLD *a)
7210 {
7211 return ENABLE_ARCH_5TE;
7212 }
7213
trans_PLDW(DisasContext * s,arg_PLDW * a)7214 static bool trans_PLDW(DisasContext *s, arg_PLDW *a)
7215 {
7216 return arm_dc_feature(s, ARM_FEATURE_V7MP);
7217 }
7218
trans_PLI(DisasContext * s,arg_PLI * a)7219 static bool trans_PLI(DisasContext *s, arg_PLI *a)
7220 {
7221 return ENABLE_ARCH_7;
7222 }
7223
7224 /*
7225 * If-then
7226 */
7227
trans_IT(DisasContext * s,arg_IT * a)7228 static bool trans_IT(DisasContext *s, arg_IT *a)
7229 {
7230 int cond_mask = a->cond_mask;
7231
7232 /*
7233 * No actual code generated for this insn, just setup state.
7234 *
7235 * Combinations of firstcond and mask which set up an 0b1111
7236 * condition are UNPREDICTABLE; we take the CONSTRAINED
7237 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
7238 * i.e. both meaning "execute always".
7239 */
7240 s->condexec_cond = (cond_mask >> 4) & 0xe;
7241 s->condexec_mask = cond_mask & 0x1f;
7242 return true;
7243 }
7244
7245 /* v8.1M CSEL/CSINC/CSNEG/CSINV */
trans_CSEL(DisasContext * s,arg_CSEL * a)7246 static bool trans_CSEL(DisasContext *s, arg_CSEL *a)
7247 {
7248 TCGv_i32 rn, rm;
7249 DisasCompare c;
7250
7251 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
7252 return false;
7253 }
7254
7255 if (a->rm == 13) {
7256 /* SEE "Related encodings" (MVE shifts) */
7257 return false;
7258 }
7259
7260 if (a->rd == 13 || a->rd == 15 || a->rn == 13 || a->fcond >= 14) {
7261 /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
7262 return false;
7263 }
7264
7265 /* In this insn input reg fields of 0b1111 mean "zero", not "PC" */
7266 rn = tcg_temp_new_i32();
7267 rm = tcg_temp_new_i32();
7268 if (a->rn == 15) {
7269 tcg_gen_movi_i32(rn, 0);
7270 } else {
7271 load_reg_var(s, rn, a->rn);
7272 }
7273 if (a->rm == 15) {
7274 tcg_gen_movi_i32(rm, 0);
7275 } else {
7276 load_reg_var(s, rm, a->rm);
7277 }
7278
7279 switch (a->op) {
7280 case 0: /* CSEL */
7281 break;
7282 case 1: /* CSINC */
7283 tcg_gen_addi_i32(rm, rm, 1);
7284 break;
7285 case 2: /* CSINV */
7286 tcg_gen_not_i32(rm, rm);
7287 break;
7288 case 3: /* CSNEG */
7289 tcg_gen_neg_i32(rm, rm);
7290 break;
7291 default:
7292 g_assert_not_reached();
7293 }
7294
7295 arm_test_cc(&c, a->fcond);
7296 tcg_gen_movcond_i32(c.cond, rn, c.value, tcg_constant_i32(0), rn, rm);
7297
7298 store_reg(s, a->rd, rn);
7299 return true;
7300 }
7301
7302 /*
7303 * Legacy decoder.
7304 */
7305
disas_arm_insn(DisasContext * s,unsigned int insn)7306 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7307 {
7308 unsigned int cond = insn >> 28;
7309
7310 /* M variants do not implement ARM mode; this must raise the INVSTATE
7311 * UsageFault exception.
7312 */
7313 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7314 gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
7315 return;
7316 }
7317
7318 if (s->pstate_il) {
7319 /*
7320 * Illegal execution state. This has priority over BTI
7321 * exceptions, but comes after instruction abort exceptions.
7322 */
7323 gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
7324 return;
7325 }
7326
7327 if (cond == 0xf) {
7328 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7329 * choose to UNDEF. In ARMv5 and above the space is used
7330 * for miscellaneous unconditional instructions.
7331 */
7332 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
7333 unallocated_encoding(s);
7334 return;
7335 }
7336
7337 /* Unconditional instructions. */
7338 /* TODO: Perhaps merge these into one decodetree output file. */
7339 if (disas_a32_uncond(s, insn) ||
7340 disas_vfp_uncond(s, insn) ||
7341 disas_neon_dp(s, insn) ||
7342 disas_neon_ls(s, insn) ||
7343 disas_neon_shared(s, insn)) {
7344 return;
7345 }
7346 /* fall back to legacy decoder */
7347
7348 if ((insn & 0x0e000f00) == 0x0c000100) {
7349 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7350 /* iWMMXt register transfer. */
7351 if (extract32(s->c15_cpar, 1, 1)) {
7352 if (!disas_iwmmxt_insn(s, insn)) {
7353 return;
7354 }
7355 }
7356 }
7357 }
7358 goto illegal_op;
7359 }
7360 if (cond != 0xe) {
7361 /* if not always execute, we generate a conditional jump to
7362 next instruction */
7363 arm_skip_unless(s, cond);
7364 }
7365
7366 /* TODO: Perhaps merge these into one decodetree output file. */
7367 if (disas_a32(s, insn) ||
7368 disas_vfp(s, insn)) {
7369 return;
7370 }
7371 /* fall back to legacy decoder */
7372 /* TODO: convert xscale/iwmmxt decoder to decodetree ?? */
7373 if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7374 if (((insn & 0x0c000e00) == 0x0c000000)
7375 && ((insn & 0x03000000) != 0x03000000)) {
7376 /* Coprocessor insn, coprocessor 0 or 1 */
7377 disas_xscale_insn(s, insn);
7378 return;
7379 }
7380 }
7381
7382 illegal_op:
7383 unallocated_encoding(s);
7384 }
7385
thumb_insn_is_16bit(DisasContext * s,uint32_t pc,uint32_t insn)7386 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
7387 {
7388 /*
7389 * Return true if this is a 16 bit instruction. We must be precise
7390 * about this (matching the decode).
7391 */
7392 if ((insn >> 11) < 0x1d) {
7393 /* Definitely a 16-bit instruction */
7394 return true;
7395 }
7396
7397 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
7398 * first half of a 32-bit Thumb insn. Thumb-1 cores might
7399 * end up actually treating this as two 16-bit insns, though,
7400 * if it's half of a bl/blx pair that might span a page boundary.
7401 */
7402 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
7403 arm_dc_feature(s, ARM_FEATURE_M)) {
7404 /* Thumb2 cores (including all M profile ones) always treat
7405 * 32-bit insns as 32-bit.
7406 */
7407 return false;
7408 }
7409
7410 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
7411 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
7412 * is not on the next page; we merge this into a 32-bit
7413 * insn.
7414 */
7415 return false;
7416 }
7417 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
7418 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
7419 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
7420 * -- handle as single 16 bit insn
7421 */
7422 return true;
7423 }
7424
7425 /* Translate a 32-bit thumb instruction. */
disas_thumb2_insn(DisasContext * s,uint32_t insn)7426 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
7427 {
7428 /*
7429 * ARMv6-M supports a limited subset of Thumb2 instructions.
7430 * Other Thumb1 architectures allow only 32-bit
7431 * combined BL/BLX prefix and suffix.
7432 */
7433 if (arm_dc_feature(s, ARM_FEATURE_M) &&
7434 !arm_dc_feature(s, ARM_FEATURE_V7)) {
7435 int i;
7436 bool found = false;
7437 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
7438 0xf3b08040 /* dsb */,
7439 0xf3b08050 /* dmb */,
7440 0xf3b08060 /* isb */,
7441 0xf3e08000 /* mrs */,
7442 0xf000d000 /* bl */};
7443 static const uint32_t armv6m_mask[] = {0xffe0d000,
7444 0xfff0d0f0,
7445 0xfff0d0f0,
7446 0xfff0d0f0,
7447 0xffe0d000,
7448 0xf800d000};
7449
7450 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
7451 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
7452 found = true;
7453 break;
7454 }
7455 }
7456 if (!found) {
7457 goto illegal_op;
7458 }
7459 } else if ((insn & 0xf800e800) != 0xf000e800) {
7460 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
7461 unallocated_encoding(s);
7462 return;
7463 }
7464 }
7465
7466 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7467 /*
7468 * NOCP takes precedence over any UNDEF for (almost) the
7469 * entire wide range of coprocessor-space encodings, so check
7470 * for it first before proceeding to actually decode eg VFP
7471 * insns. This decode also handles the few insns which are
7472 * in copro space but do not have NOCP checks (eg VLLDM, VLSTM).
7473 */
7474 if (disas_m_nocp(s, insn)) {
7475 return;
7476 }
7477 }
7478
7479 if ((insn & 0xef000000) == 0xef000000) {
7480 /*
7481 * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
7482 * transform into
7483 * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
7484 */
7485 uint32_t a32_insn = (insn & 0xe2ffffff) |
7486 ((insn & (1 << 28)) >> 4) | (1 << 28);
7487
7488 if (disas_neon_dp(s, a32_insn)) {
7489 return;
7490 }
7491 }
7492
7493 if ((insn & 0xff100000) == 0xf9000000) {
7494 /*
7495 * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
7496 * transform into
7497 * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
7498 */
7499 uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
7500
7501 if (disas_neon_ls(s, a32_insn)) {
7502 return;
7503 }
7504 }
7505
7506 /*
7507 * TODO: Perhaps merge these into one decodetree output file.
7508 * Note disas_vfp is written for a32 with cond field in the
7509 * top nibble. The t32 encoding requires 0xe in the top nibble.
7510 */
7511 if (disas_t32(s, insn) ||
7512 disas_vfp_uncond(s, insn) ||
7513 disas_neon_shared(s, insn) ||
7514 disas_mve(s, insn) ||
7515 ((insn >> 28) == 0xe && disas_vfp(s, insn))) {
7516 return;
7517 }
7518
7519 illegal_op:
7520 unallocated_encoding(s);
7521 }
7522
disas_thumb_insn(DisasContext * s,uint32_t insn)7523 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
7524 {
7525 if (!disas_t16(s, insn)) {
7526 unallocated_encoding(s);
7527 }
7528 }
7529
insn_crosses_page(CPUARMState * env,DisasContext * s)7530 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
7531 {
7532 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
7533 * (False positives are OK, false negatives are not.)
7534 * We know this is a Thumb insn, and our caller ensures we are
7535 * only called if dc->base.pc_next is less than 4 bytes from the page
7536 * boundary, so we cross the page if the first 16 bits indicate
7537 * that this is a 32 bit insn.
7538 */
7539 uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b);
7540
7541 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
7542 }
7543
arm_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)7544 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7545 {
7546 DisasContext *dc = container_of(dcbase, DisasContext, base);
7547 CPUARMState *env = cpu_env(cs);
7548 ARMCPU *cpu = env_archcpu(env);
7549 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
7550 uint32_t condexec, core_mmu_idx;
7551
7552 dc->isar = &cpu->isar;
7553 dc->condjmp = 0;
7554 dc->pc_save = dc->base.pc_first;
7555 dc->aarch64 = false;
7556 dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
7557 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
7558 condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC);
7559 /*
7560 * the CONDEXEC TB flags are CPSR bits [15:10][26:25]. On A-profile this
7561 * is always the IT bits. On M-profile, some of the reserved encodings
7562 * of IT are used instead to indicate either ICI or ECI, which
7563 * indicate partial progress of a restartable insn that was interrupted
7564 * partway through by an exception:
7565 * * if CONDEXEC[3:0] != 0b0000 : CONDEXEC is IT bits
7566 * * if CONDEXEC[3:0] == 0b0000 : CONDEXEC is ICI or ECI bits
7567 * In all cases CONDEXEC == 0 means "not in IT block or restartable
7568 * insn, behave normally".
7569 */
7570 dc->eci = dc->condexec_mask = dc->condexec_cond = 0;
7571 dc->eci_handled = false;
7572 if (condexec & 0xf) {
7573 dc->condexec_mask = (condexec & 0xf) << 1;
7574 dc->condexec_cond = condexec >> 4;
7575 } else {
7576 if (arm_feature(env, ARM_FEATURE_M)) {
7577 dc->eci = condexec >> 4;
7578 }
7579 }
7580
7581 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
7582 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
7583 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
7584 #if !defined(CONFIG_USER_ONLY)
7585 dc->user = (dc->current_el == 0);
7586 #endif
7587 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
7588 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
7589 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
7590 dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE);
7591 dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC);
7592
7593 if (arm_feature(env, ARM_FEATURE_M)) {
7594 dc->vfp_enabled = 1;
7595 dc->be_data = MO_TE;
7596 dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
7597 dc->v8m_secure = EX_TBFLAG_M32(tb_flags, SECURE);
7598 dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
7599 dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
7600 dc->v7m_new_fp_ctxt_needed =
7601 EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
7602 dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
7603 dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED);
7604 } else {
7605 dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
7606 dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE);
7607 dc->ns = EX_TBFLAG_A32(tb_flags, NS);
7608 dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN);
7609 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7610 dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR);
7611 } else {
7612 dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
7613 dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
7614 }
7615 dc->sme_trap_nonstreaming =
7616 EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
7617 }
7618 dc->lse2 = false; /* applies only to aarch64 */
7619 dc->cp_regs = cpu->cp_regs;
7620 dc->features = env->features;
7621
7622 /* Single step state. The code-generation logic here is:
7623 * SS_ACTIVE == 0:
7624 * generate code with no special handling for single-stepping (except
7625 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
7626 * this happens anyway because those changes are all system register or
7627 * PSTATE writes).
7628 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
7629 * emit code for one insn
7630 * emit code to clear PSTATE.SS
7631 * emit code to generate software step exception for completed step
7632 * end TB (as usual for having generated an exception)
7633 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
7634 * emit code to generate a software step exception
7635 * end the TB
7636 */
7637 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
7638 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
7639 dc->is_ldex = false;
7640
7641 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
7642
7643 /* If architectural single step active, limit to 1. */
7644 if (dc->ss_active) {
7645 dc->base.max_insns = 1;
7646 }
7647
7648 /* ARM is a fixed-length ISA. Bound the number of insns to execute
7649 to those left on the page. */
7650 if (!dc->thumb) {
7651 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
7652 dc->base.max_insns = MIN(dc->base.max_insns, bound);
7653 }
7654
7655 cpu_V0 = tcg_temp_new_i64();
7656 cpu_V1 = tcg_temp_new_i64();
7657 cpu_M0 = tcg_temp_new_i64();
7658 }
7659
arm_tr_tb_start(DisasContextBase * dcbase,CPUState * cpu)7660 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
7661 {
7662 DisasContext *dc = container_of(dcbase, DisasContext, base);
7663
7664 /* A note on handling of the condexec (IT) bits:
7665 *
7666 * We want to avoid the overhead of having to write the updated condexec
7667 * bits back to the CPUARMState for every instruction in an IT block. So:
7668 * (1) if the condexec bits are not already zero then we write
7669 * zero back into the CPUARMState now. This avoids complications trying
7670 * to do it at the end of the block. (For example if we don't do this
7671 * it's hard to identify whether we can safely skip writing condexec
7672 * at the end of the TB, which we definitely want to do for the case
7673 * where a TB doesn't do anything with the IT state at all.)
7674 * (2) if we are going to leave the TB then we call gen_set_condexec()
7675 * which will write the correct value into CPUARMState if zero is wrong.
7676 * This is done both for leaving the TB at the end, and for leaving
7677 * it because of an exception we know will happen, which is done in
7678 * gen_exception_insn(). The latter is necessary because we need to
7679 * leave the TB with the PC/IT state just prior to execution of the
7680 * instruction which caused the exception.
7681 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
7682 * then the CPUARMState will be wrong and we need to reset it.
7683 * This is handled in the same way as restoration of the
7684 * PC in these situations; we save the value of the condexec bits
7685 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
7686 * then uses this to restore them after an exception.
7687 *
7688 * Note that there are no instructions which can read the condexec
7689 * bits, and none which can write non-static values to them, so
7690 * we don't need to care about whether CPUARMState is correct in the
7691 * middle of a TB.
7692 */
7693
7694 /* Reset the conditional execution bits immediately. This avoids
7695 complications trying to do it at the end of the block. */
7696 if (dc->condexec_mask || dc->condexec_cond) {
7697 store_cpu_field_constant(0, condexec_bits);
7698 }
7699 }
7700
arm_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)7701 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
7702 {
7703 DisasContext *dc = container_of(dcbase, DisasContext, base);
7704 /*
7705 * The ECI/ICI bits share PSR bits with the IT bits, so we
7706 * need to reconstitute the bits from the split-out DisasContext
7707 * fields here.
7708 */
7709 uint32_t condexec_bits;
7710 target_ulong pc_arg = dc->base.pc_next;
7711
7712 if (tb_cflags(dcbase->tb) & CF_PCREL) {
7713 pc_arg &= ~TARGET_PAGE_MASK;
7714 }
7715 if (dc->eci) {
7716 condexec_bits = dc->eci << 4;
7717 } else {
7718 condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
7719 }
7720 tcg_gen_insn_start(pc_arg, condexec_bits, 0);
7721 dc->insn_start_updated = false;
7722 }
7723
arm_check_kernelpage(DisasContext * dc)7724 static bool arm_check_kernelpage(DisasContext *dc)
7725 {
7726 #ifdef CONFIG_USER_ONLY
7727 /* Intercept jump to the magic kernel page. */
7728 if (dc->base.pc_next >= 0xffff0000) {
7729 /* We always get here via a jump, so know we are not in a
7730 conditional execution block. */
7731 gen_exception_internal(EXCP_KERNEL_TRAP);
7732 dc->base.is_jmp = DISAS_NORETURN;
7733 return true;
7734 }
7735 #endif
7736 return false;
7737 }
7738
arm_check_ss_active(DisasContext * dc)7739 static bool arm_check_ss_active(DisasContext *dc)
7740 {
7741 if (dc->ss_active && !dc->pstate_ss) {
7742 /* Singlestep state is Active-pending.
7743 * If we're in this state at the start of a TB then either
7744 * a) we just took an exception to an EL which is being debugged
7745 * and this is the first insn in the exception handler
7746 * b) debug exceptions were masked and we just unmasked them
7747 * without changing EL (eg by clearing PSTATE.D)
7748 * In either case we're going to take a swstep exception in the
7749 * "did not step an insn" case, and so the syndrome ISV and EX
7750 * bits should be zero.
7751 */
7752 assert(dc->base.num_insns == 1);
7753 gen_swstep_exception(dc, 0, 0);
7754 dc->base.is_jmp = DISAS_NORETURN;
7755 return true;
7756 }
7757
7758 return false;
7759 }
7760
arm_post_translate_insn(DisasContext * dc)7761 static void arm_post_translate_insn(DisasContext *dc)
7762 {
7763 if (dc->condjmp &&
7764 (dc->base.is_jmp == DISAS_NEXT || dc->base.is_jmp == DISAS_TOO_MANY)) {
7765 if (dc->pc_save != dc->condlabel.pc_save) {
7766 gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save);
7767 }
7768 gen_set_label(dc->condlabel.label);
7769 dc->condjmp = 0;
7770 }
7771 }
7772
arm_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)7773 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7774 {
7775 DisasContext *dc = container_of(dcbase, DisasContext, base);
7776 CPUARMState *env = cpu_env(cpu);
7777 uint32_t pc = dc->base.pc_next;
7778 unsigned int insn;
7779
7780 /* Singlestep exceptions have the highest priority. */
7781 if (arm_check_ss_active(dc)) {
7782 dc->base.pc_next = pc + 4;
7783 return;
7784 }
7785
7786 if (pc & 3) {
7787 /*
7788 * PC alignment fault. This has priority over the instruction abort
7789 * that we would receive from a translation fault via arm_ldl_code
7790 * (or the execution of the kernelpage entrypoint). This should only
7791 * be possible after an indirect branch, at the start of the TB.
7792 */
7793 assert(dc->base.num_insns == 1);
7794 gen_helper_exception_pc_alignment(tcg_env, tcg_constant_vaddr(pc));
7795 dc->base.is_jmp = DISAS_NORETURN;
7796 dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
7797 return;
7798 }
7799
7800 if (arm_check_kernelpage(dc)) {
7801 dc->base.pc_next = pc + 4;
7802 return;
7803 }
7804
7805 dc->pc_curr = pc;
7806 insn = arm_ldl_code(env, &dc->base, pc, dc->sctlr_b);
7807 dc->insn = insn;
7808 dc->base.pc_next = pc + 4;
7809 disas_arm_insn(dc, insn);
7810
7811 arm_post_translate_insn(dc);
7812
7813 /* ARM is a fixed-length ISA. We performed the cross-page check
7814 in init_disas_context by adjusting max_insns. */
7815 }
7816
thumb_insn_is_unconditional(DisasContext * s,uint32_t insn)7817 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
7818 {
7819 /* Return true if this Thumb insn is always unconditional,
7820 * even inside an IT block. This is true of only a very few
7821 * instructions: BKPT, HLT, and SG.
7822 *
7823 * A larger class of instructions are UNPREDICTABLE if used
7824 * inside an IT block; we do not need to detect those here, because
7825 * what we do by default (perform the cc check and update the IT
7826 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
7827 * choice for those situations.
7828 *
7829 * insn is either a 16-bit or a 32-bit instruction; the two are
7830 * distinguishable because for the 16-bit case the top 16 bits
7831 * are zeroes, and that isn't a valid 32-bit encoding.
7832 */
7833 if ((insn & 0xffffff00) == 0xbe00) {
7834 /* BKPT */
7835 return true;
7836 }
7837
7838 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
7839 !arm_dc_feature(s, ARM_FEATURE_M)) {
7840 /* HLT: v8A only. This is unconditional even when it is going to
7841 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
7842 * For v7 cores this was a plain old undefined encoding and so
7843 * honours its cc check. (We might be using the encoding as
7844 * a semihosting trap, but we don't change the cc check behaviour
7845 * on that account, because a debugger connected to a real v7A
7846 * core and emulating semihosting traps by catching the UNDEF
7847 * exception would also only see cases where the cc check passed.
7848 * No guest code should be trying to do a HLT semihosting trap
7849 * in an IT block anyway.
7850 */
7851 return true;
7852 }
7853
7854 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
7855 arm_dc_feature(s, ARM_FEATURE_M)) {
7856 /* SG: v8M only */
7857 return true;
7858 }
7859
7860 return false;
7861 }
7862
thumb_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)7863 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7864 {
7865 DisasContext *dc = container_of(dcbase, DisasContext, base);
7866 CPUARMState *env = cpu_env(cpu);
7867 uint32_t pc = dc->base.pc_next;
7868 uint32_t insn;
7869 bool is_16bit;
7870 /* TCG op to rewind to if this turns out to be an invalid ECI state */
7871 TCGOp *insn_eci_rewind = NULL;
7872 target_ulong insn_eci_pc_save = -1;
7873
7874 /* Misaligned thumb PC is architecturally impossible. */
7875 assert((dc->base.pc_next & 1) == 0);
7876
7877 if (arm_check_ss_active(dc) || arm_check_kernelpage(dc)) {
7878 dc->base.pc_next = pc + 2;
7879 return;
7880 }
7881
7882 dc->pc_curr = pc;
7883 insn = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b);
7884 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
7885 pc += 2;
7886 if (!is_16bit) {
7887 uint32_t insn2 = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b);
7888 insn = insn << 16 | insn2;
7889 pc += 2;
7890 }
7891 dc->base.pc_next = pc;
7892 dc->insn = insn;
7893
7894 if (dc->pstate_il) {
7895 /*
7896 * Illegal execution state. This has priority over BTI
7897 * exceptions, but comes after instruction abort exceptions.
7898 */
7899 gen_exception_insn(dc, 0, EXCP_UDEF, syn_illegalstate());
7900 return;
7901 }
7902
7903 if (dc->eci) {
7904 /*
7905 * For M-profile continuable instructions, ECI/ICI handling
7906 * falls into these cases:
7907 * - interrupt-continuable instructions
7908 * These are the various load/store multiple insns (both
7909 * integer and fp). The ICI bits indicate the register
7910 * where the load/store can resume. We make the IMPDEF
7911 * choice to always do "instruction restart", ie ignore
7912 * the ICI value and always execute the ldm/stm from the
7913 * start. So all we need to do is zero PSR.ICI if the
7914 * insn executes.
7915 * - MVE instructions subject to beat-wise execution
7916 * Here the ECI bits indicate which beats have already been
7917 * executed, and we must honour this. Each insn of this
7918 * type will handle it correctly. We will update PSR.ECI
7919 * in the helper function for the insn (some ECI values
7920 * mean that the following insn also has been partially
7921 * executed).
7922 * - Special cases which don't advance ECI
7923 * The insns LE, LETP and BKPT leave the ECI/ICI state
7924 * bits untouched.
7925 * - all other insns (the common case)
7926 * Non-zero ECI/ICI means an INVSTATE UsageFault.
7927 * We place a rewind-marker here. Insns in the previous
7928 * three categories will set a flag in the DisasContext.
7929 * If the flag isn't set after we call disas_thumb_insn()
7930 * or disas_thumb2_insn() then we know we have a "some other
7931 * insn" case. We will rewind to the marker (ie throwing away
7932 * all the generated code) and instead emit "take exception".
7933 */
7934 insn_eci_rewind = tcg_last_op();
7935 insn_eci_pc_save = dc->pc_save;
7936 }
7937
7938 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
7939 uint32_t cond = dc->condexec_cond;
7940
7941 /*
7942 * Conditionally skip the insn. Note that both 0xe and 0xf mean
7943 * "always"; 0xf is not "never".
7944 */
7945 if (cond < 0x0e) {
7946 arm_skip_unless(dc, cond);
7947 }
7948 }
7949
7950 if (is_16bit) {
7951 disas_thumb_insn(dc, insn);
7952 } else {
7953 disas_thumb2_insn(dc, insn);
7954 }
7955
7956 /* Advance the Thumb condexec condition. */
7957 if (dc->condexec_mask) {
7958 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
7959 ((dc->condexec_mask >> 4) & 1));
7960 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
7961 if (dc->condexec_mask == 0) {
7962 dc->condexec_cond = 0;
7963 }
7964 }
7965
7966 if (dc->eci && !dc->eci_handled) {
7967 /*
7968 * Insn wasn't valid for ECI/ICI at all: undo what we
7969 * just generated and instead emit an exception
7970 */
7971 tcg_remove_ops_after(insn_eci_rewind);
7972 dc->pc_save = insn_eci_pc_save;
7973 dc->condjmp = 0;
7974 gen_exception_insn(dc, 0, EXCP_INVSTATE, syn_uncategorized());
7975 }
7976
7977 arm_post_translate_insn(dc);
7978
7979 /* Thumb is a variable-length ISA. Stop translation when the next insn
7980 * will touch a new page. This ensures that prefetch aborts occur at
7981 * the right place.
7982 *
7983 * We want to stop the TB if the next insn starts in a new page,
7984 * or if it spans between this page and the next. This means that
7985 * if we're looking at the last halfword in the page we need to
7986 * see if it's a 16-bit Thumb insn (which will fit in this TB)
7987 * or a 32-bit Thumb insn (which won't).
7988 * This is to avoid generating a silly TB with a single 16-bit insn
7989 * in it at the end of this page (which would execute correctly
7990 * but isn't very efficient).
7991 */
7992 if (dc->base.is_jmp == DISAS_NEXT
7993 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
7994 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
7995 && insn_crosses_page(env, dc)))) {
7996 dc->base.is_jmp = DISAS_TOO_MANY;
7997 }
7998 }
7999
arm_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)8000 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
8001 {
8002 DisasContext *dc = container_of(dcbase, DisasContext, base);
8003
8004 /* At this stage dc->condjmp will only be set when the skipped
8005 instruction was a conditional branch or trap, and the PC has
8006 already been written. */
8007 gen_set_condexec(dc);
8008 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
8009 /* Exception return branches need some special case code at the
8010 * end of the TB, which is complex enough that it has to
8011 * handle the single-step vs not and the condition-failed
8012 * insn codepath itself.
8013 */
8014 gen_bx_excret_final_code(dc);
8015 } else if (unlikely(dc->ss_active)) {
8016 /* Unconditional and "condition passed" instruction codepath. */
8017 switch (dc->base.is_jmp) {
8018 case DISAS_SWI:
8019 gen_ss_advance(dc);
8020 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
8021 break;
8022 case DISAS_HVC:
8023 gen_ss_advance(dc);
8024 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
8025 break;
8026 case DISAS_SMC:
8027 gen_ss_advance(dc);
8028 gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3);
8029 break;
8030 case DISAS_NEXT:
8031 case DISAS_TOO_MANY:
8032 case DISAS_UPDATE_EXIT:
8033 case DISAS_UPDATE_NOCHAIN:
8034 gen_update_pc(dc, curr_insn_len(dc));
8035 /* fall through */
8036 default:
8037 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
8038 gen_singlestep_exception(dc);
8039 break;
8040 case DISAS_NORETURN:
8041 break;
8042 }
8043 } else {
8044 /* While branches must always occur at the end of an IT block,
8045 there are a few other things that can cause us to terminate
8046 the TB in the middle of an IT block:
8047 - Exception generating instructions (bkpt, swi, undefined).
8048 - Page boundaries.
8049 - Hardware watchpoints.
8050 Hardware breakpoints have already been handled and skip this code.
8051 */
8052 switch (dc->base.is_jmp) {
8053 case DISAS_NEXT:
8054 case DISAS_TOO_MANY:
8055 gen_goto_tb(dc, 1, curr_insn_len(dc));
8056 break;
8057 case DISAS_UPDATE_NOCHAIN:
8058 gen_update_pc(dc, curr_insn_len(dc));
8059 /* fall through */
8060 case DISAS_JUMP:
8061 gen_goto_ptr();
8062 break;
8063 case DISAS_UPDATE_EXIT:
8064 gen_update_pc(dc, curr_insn_len(dc));
8065 /* fall through */
8066 default:
8067 /* indicate that the hash table must be used to find the next TB */
8068 tcg_gen_exit_tb(NULL, 0);
8069 break;
8070 case DISAS_NORETURN:
8071 /* nothing more to generate */
8072 break;
8073 case DISAS_WFI:
8074 gen_helper_wfi(tcg_env, tcg_constant_i32(curr_insn_len(dc)));
8075 /*
8076 * The helper doesn't necessarily throw an exception, but we
8077 * must go back to the main loop to check for interrupts anyway.
8078 */
8079 tcg_gen_exit_tb(NULL, 0);
8080 break;
8081 case DISAS_WFE:
8082 gen_helper_wfe(tcg_env);
8083 break;
8084 case DISAS_YIELD:
8085 gen_helper_yield(tcg_env);
8086 break;
8087 case DISAS_SWI:
8088 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
8089 break;
8090 case DISAS_HVC:
8091 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
8092 break;
8093 case DISAS_SMC:
8094 gen_exception_el(EXCP_SMC, syn_aa32_smc(), 3);
8095 break;
8096 }
8097 }
8098
8099 if (dc->condjmp) {
8100 /* "Condition failed" instruction codepath for the branch/trap insn */
8101 set_disas_label(dc, dc->condlabel);
8102 gen_set_condexec(dc);
8103 if (unlikely(dc->ss_active)) {
8104 gen_update_pc(dc, curr_insn_len(dc));
8105 gen_singlestep_exception(dc);
8106 } else {
8107 gen_goto_tb(dc, 1, curr_insn_len(dc));
8108 }
8109 }
8110 }
8111
8112 static const TranslatorOps arm_translator_ops = {
8113 .init_disas_context = arm_tr_init_disas_context,
8114 .tb_start = arm_tr_tb_start,
8115 .insn_start = arm_tr_insn_start,
8116 .translate_insn = arm_tr_translate_insn,
8117 .tb_stop = arm_tr_tb_stop,
8118 };
8119
8120 static const TranslatorOps thumb_translator_ops = {
8121 .init_disas_context = arm_tr_init_disas_context,
8122 .tb_start = arm_tr_tb_start,
8123 .insn_start = arm_tr_insn_start,
8124 .translate_insn = thumb_tr_translate_insn,
8125 .tb_stop = arm_tr_tb_stop,
8126 };
8127
arm_translate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)8128 void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
8129 int *max_insns, vaddr pc, void *host_pc)
8130 {
8131 DisasContext dc = { };
8132 const TranslatorOps *ops = &arm_translator_ops;
8133 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb);
8134
8135 if (EX_TBFLAG_AM32(tb_flags, THUMB)) {
8136 ops = &thumb_translator_ops;
8137 }
8138 #ifdef TARGET_AARCH64
8139 if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) {
8140 ops = &aarch64_translator_ops;
8141 }
8142 #endif
8143
8144 translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
8145 }
8146