1 /*
2 * ARM translation: AArch32 Neon instructions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2020 Linaro, Ltd.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 */
22
23 #include "qemu/osdep.h"
24 #include "translate.h"
25 #include "translate-a32.h"
26
27 /* Include the generated Neon decoder */
28 #include "decode-neon-dp.c.inc"
29 #include "decode-neon-ls.c.inc"
30 #include "decode-neon-shared.c.inc"
31
vfp_reg_ptr(bool dp,int reg)32 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
33 {
34 TCGv_ptr ret = tcg_temp_new_ptr();
35 tcg_gen_addi_ptr(ret, tcg_env, vfp_reg_offset(dp, reg));
36 return ret;
37 }
38
neon_load_element(TCGv_i32 var,int reg,int ele,MemOp mop)39 static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
40 {
41 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
42
43 switch (mop) {
44 case MO_UB:
45 tcg_gen_ld8u_i32(var, tcg_env, offset);
46 break;
47 case MO_UW:
48 tcg_gen_ld16u_i32(var, tcg_env, offset);
49 break;
50 case MO_UL:
51 tcg_gen_ld_i32(var, tcg_env, offset);
52 break;
53 default:
54 g_assert_not_reached();
55 }
56 }
57
neon_load_element64(TCGv_i64 var,int reg,int ele,MemOp mop)58 static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop)
59 {
60 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
61
62 switch (mop) {
63 case MO_UB:
64 tcg_gen_ld8u_i64(var, tcg_env, offset);
65 break;
66 case MO_UW:
67 tcg_gen_ld16u_i64(var, tcg_env, offset);
68 break;
69 case MO_UL:
70 tcg_gen_ld32u_i64(var, tcg_env, offset);
71 break;
72 case MO_UQ:
73 tcg_gen_ld_i64(var, tcg_env, offset);
74 break;
75 default:
76 g_assert_not_reached();
77 }
78 }
79
neon_store_element(int reg,int ele,MemOp size,TCGv_i32 var)80 static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var)
81 {
82 long offset = neon_element_offset(reg, ele, size);
83
84 switch (size) {
85 case MO_8:
86 tcg_gen_st8_i32(var, tcg_env, offset);
87 break;
88 case MO_16:
89 tcg_gen_st16_i32(var, tcg_env, offset);
90 break;
91 case MO_32:
92 tcg_gen_st_i32(var, tcg_env, offset);
93 break;
94 default:
95 g_assert_not_reached();
96 }
97 }
98
neon_store_element64(int reg,int ele,MemOp size,TCGv_i64 var)99 static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var)
100 {
101 long offset = neon_element_offset(reg, ele, size);
102
103 switch (size) {
104 case MO_8:
105 tcg_gen_st8_i64(var, tcg_env, offset);
106 break;
107 case MO_16:
108 tcg_gen_st16_i64(var, tcg_env, offset);
109 break;
110 case MO_32:
111 tcg_gen_st32_i64(var, tcg_env, offset);
112 break;
113 case MO_64:
114 tcg_gen_st_i64(var, tcg_env, offset);
115 break;
116 default:
117 g_assert_not_reached();
118 }
119 }
120
do_neon_ddda(DisasContext * s,int q,int vd,int vn,int vm,int data,gen_helper_gvec_4 * fn_gvec)121 static bool do_neon_ddda(DisasContext *s, int q, int vd, int vn, int vm,
122 int data, gen_helper_gvec_4 *fn_gvec)
123 {
124 /* UNDEF accesses to D16-D31 if they don't exist. */
125 if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) {
126 return false;
127 }
128
129 /*
130 * UNDEF accesses to odd registers for each bit of Q.
131 * Q will be 0b111 for all Q-reg instructions, otherwise
132 * when we have mixed Q- and D-reg inputs.
133 */
134 if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) {
135 return false;
136 }
137
138 if (!vfp_access_check(s)) {
139 return true;
140 }
141
142 int opr_sz = q ? 16 : 8;
143 tcg_gen_gvec_4_ool(vfp_reg_offset(1, vd),
144 vfp_reg_offset(1, vn),
145 vfp_reg_offset(1, vm),
146 vfp_reg_offset(1, vd),
147 opr_sz, opr_sz, data, fn_gvec);
148 return true;
149 }
150
do_neon_ddda_env(DisasContext * s,int q,int vd,int vn,int vm,int data,gen_helper_gvec_4_ptr * fn_gvec)151 static bool do_neon_ddda_env(DisasContext *s, int q, int vd, int vn, int vm,
152 int data, gen_helper_gvec_4_ptr *fn_gvec)
153 {
154 /* UNDEF accesses to D16-D31 if they don't exist. */
155 if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) {
156 return false;
157 }
158
159 /*
160 * UNDEF accesses to odd registers for each bit of Q.
161 * Q will be 0b111 for all Q-reg instructions, otherwise
162 * when we have mixed Q- and D-reg inputs.
163 */
164 if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) {
165 return false;
166 }
167
168 if (!vfp_access_check(s)) {
169 return true;
170 }
171
172 int opr_sz = q ? 16 : 8;
173 tcg_gen_gvec_4_ptr(vfp_reg_offset(1, vd),
174 vfp_reg_offset(1, vn),
175 vfp_reg_offset(1, vm),
176 vfp_reg_offset(1, vd),
177 tcg_env,
178 opr_sz, opr_sz, data, fn_gvec);
179 return true;
180 }
181
do_neon_ddda_fpst(DisasContext * s,int q,int vd,int vn,int vm,int data,ARMFPStatusFlavour fp_flavour,gen_helper_gvec_4_ptr * fn_gvec_ptr)182 static bool do_neon_ddda_fpst(DisasContext *s, int q, int vd, int vn, int vm,
183 int data, ARMFPStatusFlavour fp_flavour,
184 gen_helper_gvec_4_ptr *fn_gvec_ptr)
185 {
186 /* UNDEF accesses to D16-D31 if they don't exist. */
187 if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) {
188 return false;
189 }
190
191 /*
192 * UNDEF accesses to odd registers for each bit of Q.
193 * Q will be 0b111 for all Q-reg instructions, otherwise
194 * when we have mixed Q- and D-reg inputs.
195 */
196 if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) {
197 return false;
198 }
199
200 if (!vfp_access_check(s)) {
201 return true;
202 }
203
204 int opr_sz = q ? 16 : 8;
205 TCGv_ptr fpst = fpstatus_ptr(fp_flavour);
206
207 tcg_gen_gvec_4_ptr(vfp_reg_offset(1, vd),
208 vfp_reg_offset(1, vn),
209 vfp_reg_offset(1, vm),
210 vfp_reg_offset(1, vd),
211 fpst, opr_sz, opr_sz, data, fn_gvec_ptr);
212 return true;
213 }
214
trans_VCMLA(DisasContext * s,arg_VCMLA * a)215 static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
216 {
217 if (!dc_isar_feature(aa32_vcma, s)) {
218 return false;
219 }
220 if (a->size == MO_16) {
221 if (!dc_isar_feature(aa32_fp16_arith, s)) {
222 return false;
223 }
224 return do_neon_ddda_fpst(s, a->q * 7, a->vd, a->vn, a->vm, a->rot,
225 FPST_STD_F16, gen_helper_gvec_fcmlah);
226 }
227 return do_neon_ddda_fpst(s, a->q * 7, a->vd, a->vn, a->vm, a->rot,
228 FPST_STD, gen_helper_gvec_fcmlas);
229 }
230
trans_VCADD(DisasContext * s,arg_VCADD * a)231 static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
232 {
233 int opr_sz;
234 TCGv_ptr fpst;
235 gen_helper_gvec_3_ptr *fn_gvec_ptr;
236
237 if (!dc_isar_feature(aa32_vcma, s)
238 || (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s))) {
239 return false;
240 }
241
242 /* UNDEF accesses to D16-D31 if they don't exist. */
243 if (!dc_isar_feature(aa32_simd_r32, s) &&
244 ((a->vd | a->vn | a->vm) & 0x10)) {
245 return false;
246 }
247
248 if ((a->vn | a->vm | a->vd) & a->q) {
249 return false;
250 }
251
252 if (!vfp_access_check(s)) {
253 return true;
254 }
255
256 opr_sz = (1 + a->q) * 8;
257 fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
258 fn_gvec_ptr = (a->size == MO_16) ?
259 gen_helper_gvec_fcaddh : gen_helper_gvec_fcadds;
260 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
261 vfp_reg_offset(1, a->vn),
262 vfp_reg_offset(1, a->vm),
263 fpst, opr_sz, opr_sz, a->rot,
264 fn_gvec_ptr);
265 return true;
266 }
267
trans_VSDOT(DisasContext * s,arg_VSDOT * a)268 static bool trans_VSDOT(DisasContext *s, arg_VSDOT *a)
269 {
270 if (!dc_isar_feature(aa32_dp, s)) {
271 return false;
272 }
273 return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
274 gen_helper_gvec_sdot_b);
275 }
276
trans_VUDOT(DisasContext * s,arg_VUDOT * a)277 static bool trans_VUDOT(DisasContext *s, arg_VUDOT *a)
278 {
279 if (!dc_isar_feature(aa32_dp, s)) {
280 return false;
281 }
282 return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
283 gen_helper_gvec_udot_b);
284 }
285
trans_VUSDOT(DisasContext * s,arg_VUSDOT * a)286 static bool trans_VUSDOT(DisasContext *s, arg_VUSDOT *a)
287 {
288 if (!dc_isar_feature(aa32_i8mm, s)) {
289 return false;
290 }
291 return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
292 gen_helper_gvec_usdot_b);
293 }
294
trans_VDOT_b16(DisasContext * s,arg_VDOT_b16 * a)295 static bool trans_VDOT_b16(DisasContext *s, arg_VDOT_b16 *a)
296 {
297 if (!dc_isar_feature(aa32_bf16, s)) {
298 return false;
299 }
300 return do_neon_ddda_env(s, a->q * 7, a->vd, a->vn, a->vm, 0,
301 gen_helper_gvec_bfdot);
302 }
303
trans_VFML(DisasContext * s,arg_VFML * a)304 static bool trans_VFML(DisasContext *s, arg_VFML *a)
305 {
306 int opr_sz;
307
308 if (!dc_isar_feature(aa32_fhm, s)) {
309 return false;
310 }
311
312 /* UNDEF accesses to D16-D31 if they don't exist. */
313 if (!dc_isar_feature(aa32_simd_r32, s) &&
314 (a->vd & 0x10)) {
315 return false;
316 }
317
318 if (a->vd & a->q) {
319 return false;
320 }
321
322 if (!vfp_access_check(s)) {
323 return true;
324 }
325
326 opr_sz = (1 + a->q) * 8;
327 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
328 vfp_reg_offset(a->q, a->vn),
329 vfp_reg_offset(a->q, a->vm),
330 tcg_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
331 gen_helper_gvec_fmlal_a32);
332 return true;
333 }
334
trans_VCMLA_scalar(DisasContext * s,arg_VCMLA_scalar * a)335 static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
336 {
337 int data = (a->index << 2) | a->rot;
338
339 if (!dc_isar_feature(aa32_vcma, s)) {
340 return false;
341 }
342 if (a->size == MO_16) {
343 if (!dc_isar_feature(aa32_fp16_arith, s)) {
344 return false;
345 }
346 return do_neon_ddda_fpst(s, a->q * 6, a->vd, a->vn, a->vm, data,
347 FPST_STD_F16, gen_helper_gvec_fcmlah_idx);
348 }
349 return do_neon_ddda_fpst(s, a->q * 6, a->vd, a->vn, a->vm, data,
350 FPST_STD, gen_helper_gvec_fcmlas_idx);
351 }
352
trans_VSDOT_scalar(DisasContext * s,arg_VSDOT_scalar * a)353 static bool trans_VSDOT_scalar(DisasContext *s, arg_VSDOT_scalar *a)
354 {
355 if (!dc_isar_feature(aa32_dp, s)) {
356 return false;
357 }
358 return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
359 gen_helper_gvec_sdot_idx_b);
360 }
361
trans_VUDOT_scalar(DisasContext * s,arg_VUDOT_scalar * a)362 static bool trans_VUDOT_scalar(DisasContext *s, arg_VUDOT_scalar *a)
363 {
364 if (!dc_isar_feature(aa32_dp, s)) {
365 return false;
366 }
367 return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
368 gen_helper_gvec_udot_idx_b);
369 }
370
trans_VUSDOT_scalar(DisasContext * s,arg_VUSDOT_scalar * a)371 static bool trans_VUSDOT_scalar(DisasContext *s, arg_VUSDOT_scalar *a)
372 {
373 if (!dc_isar_feature(aa32_i8mm, s)) {
374 return false;
375 }
376 return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
377 gen_helper_gvec_usdot_idx_b);
378 }
379
trans_VSUDOT_scalar(DisasContext * s,arg_VSUDOT_scalar * a)380 static bool trans_VSUDOT_scalar(DisasContext *s, arg_VSUDOT_scalar *a)
381 {
382 if (!dc_isar_feature(aa32_i8mm, s)) {
383 return false;
384 }
385 return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
386 gen_helper_gvec_sudot_idx_b);
387 }
388
trans_VDOT_b16_scal(DisasContext * s,arg_VDOT_b16_scal * a)389 static bool trans_VDOT_b16_scal(DisasContext *s, arg_VDOT_b16_scal *a)
390 {
391 if (!dc_isar_feature(aa32_bf16, s)) {
392 return false;
393 }
394 return do_neon_ddda_env(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
395 gen_helper_gvec_bfdot_idx);
396 }
397
trans_VFML_scalar(DisasContext * s,arg_VFML_scalar * a)398 static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
399 {
400 int opr_sz;
401
402 if (!dc_isar_feature(aa32_fhm, s)) {
403 return false;
404 }
405
406 /* UNDEF accesses to D16-D31 if they don't exist. */
407 if (!dc_isar_feature(aa32_simd_r32, s) &&
408 ((a->vd & 0x10) || (a->q && (a->vn & 0x10)))) {
409 return false;
410 }
411
412 if (a->vd & a->q) {
413 return false;
414 }
415
416 if (!vfp_access_check(s)) {
417 return true;
418 }
419
420 opr_sz = (1 + a->q) * 8;
421 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
422 vfp_reg_offset(a->q, a->vn),
423 vfp_reg_offset(a->q, a->rm),
424 tcg_env, opr_sz, opr_sz,
425 (a->index << 2) | a->s, /* is_2 == 0 */
426 gen_helper_gvec_fmlal_idx_a32);
427 return true;
428 }
429
430 static struct {
431 int nregs;
432 int interleave;
433 int spacing;
434 } const neon_ls_element_type[11] = {
435 {1, 4, 1},
436 {1, 4, 2},
437 {4, 1, 1},
438 {2, 2, 2},
439 {1, 3, 1},
440 {1, 3, 2},
441 {3, 1, 1},
442 {1, 1, 1},
443 {1, 2, 1},
444 {1, 2, 2},
445 {2, 1, 1}
446 };
447
gen_neon_ldst_base_update(DisasContext * s,int rm,int rn,int stride)448 static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn,
449 int stride)
450 {
451 if (rm != 15) {
452 TCGv_i32 base;
453
454 base = load_reg(s, rn);
455 if (rm == 13) {
456 tcg_gen_addi_i32(base, base, stride);
457 } else {
458 TCGv_i32 index;
459 index = load_reg(s, rm);
460 tcg_gen_add_i32(base, base, index);
461 }
462 store_reg(s, rn, base);
463 }
464 }
465
trans_VLDST_multiple(DisasContext * s,arg_VLDST_multiple * a)466 static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
467 {
468 /* Neon load/store multiple structures */
469 int nregs, interleave, spacing, reg, n;
470 MemOp mop, align, endian;
471 int mmu_idx = get_mem_index(s);
472 int size = a->size;
473 TCGv_i64 tmp64;
474 TCGv_i32 addr;
475
476 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
477 return false;
478 }
479
480 /* UNDEF accesses to D16-D31 if they don't exist */
481 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
482 return false;
483 }
484 if (a->itype > 10) {
485 return false;
486 }
487 /* Catch UNDEF cases for bad values of align field */
488 switch (a->itype & 0xc) {
489 case 4:
490 if (a->align >= 2) {
491 return false;
492 }
493 break;
494 case 8:
495 if (a->align == 3) {
496 return false;
497 }
498 break;
499 default:
500 break;
501 }
502 nregs = neon_ls_element_type[a->itype].nregs;
503 interleave = neon_ls_element_type[a->itype].interleave;
504 spacing = neon_ls_element_type[a->itype].spacing;
505 if (size == 3 && (interleave | spacing) != 1) {
506 return false;
507 }
508
509 if (!vfp_access_check(s)) {
510 return true;
511 }
512
513 /* For our purposes, bytes are always little-endian. */
514 endian = s->be_data;
515 if (size == 0) {
516 endian = MO_LE;
517 }
518
519 /* Enforce alignment requested by the instruction */
520 if (a->align) {
521 align = pow2_align(a->align + 2); /* 4 ** a->align */
522 } else {
523 align = s->align_mem ? MO_ALIGN : 0;
524 }
525
526 /*
527 * Consecutive little-endian elements from a single register
528 * can be promoted to a larger little-endian operation.
529 */
530 if (interleave == 1 && endian == MO_LE) {
531 /* Retain any natural alignment. */
532 if (align == MO_ALIGN) {
533 align = pow2_align(size);
534 }
535 size = 3;
536 }
537
538 tmp64 = tcg_temp_new_i64();
539 addr = tcg_temp_new_i32();
540 load_reg_var(s, addr, a->rn);
541
542 mop = endian | size | align;
543 for (reg = 0; reg < nregs; reg++) {
544 for (n = 0; n < 8 >> size; n++) {
545 int xs;
546 for (xs = 0; xs < interleave; xs++) {
547 int tt = a->vd + reg + spacing * xs;
548
549 if (a->l) {
550 gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, mop);
551 neon_store_element64(tt, n, size, tmp64);
552 } else {
553 neon_load_element64(tmp64, tt, n, size);
554 gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop);
555 }
556 tcg_gen_addi_i32(addr, addr, 1 << size);
557
558 /* Subsequent memory operations inherit alignment */
559 mop &= ~MO_AMASK;
560 }
561 }
562 }
563
564 gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
565 return true;
566 }
567
trans_VLD_all_lanes(DisasContext * s,arg_VLD_all_lanes * a)568 static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
569 {
570 /* Neon load single structure to all lanes */
571 int reg, stride, vec_size;
572 int vd = a->vd;
573 int size = a->size;
574 int nregs = a->n + 1;
575 TCGv_i32 addr, tmp;
576 MemOp mop, align;
577
578 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
579 return false;
580 }
581
582 /* UNDEF accesses to D16-D31 if they don't exist */
583 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
584 return false;
585 }
586
587 align = 0;
588 if (size == 3) {
589 if (nregs != 4 || a->a == 0) {
590 return false;
591 }
592 /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
593 size = MO_32;
594 align = MO_ALIGN_16;
595 } else if (a->a) {
596 switch (nregs) {
597 case 1:
598 if (size == 0) {
599 return false;
600 }
601 align = MO_ALIGN;
602 break;
603 case 2:
604 align = pow2_align(size + 1);
605 break;
606 case 3:
607 return false;
608 case 4:
609 if (size == 2) {
610 align = pow2_align(3);
611 } else {
612 align = pow2_align(size + 2);
613 }
614 break;
615 default:
616 g_assert_not_reached();
617 }
618 }
619
620 if (!vfp_access_check(s)) {
621 return true;
622 }
623
624 /*
625 * VLD1 to all lanes: T bit indicates how many Dregs to write.
626 * VLD2/3/4 to all lanes: T bit indicates register stride.
627 */
628 stride = a->t ? 2 : 1;
629 vec_size = nregs == 1 ? stride * 8 : 8;
630 mop = size | align;
631 tmp = tcg_temp_new_i32();
632 addr = tcg_temp_new_i32();
633 load_reg_var(s, addr, a->rn);
634 for (reg = 0; reg < nregs; reg++) {
635 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
636 if ((vd & 1) && vec_size == 16) {
637 /*
638 * We cannot write 16 bytes at once because the
639 * destination is unaligned.
640 */
641 tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
642 8, 8, tmp);
643 tcg_gen_gvec_mov(0, neon_full_reg_offset(vd + 1),
644 neon_full_reg_offset(vd), 8, 8);
645 } else {
646 tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
647 vec_size, vec_size, tmp);
648 }
649 tcg_gen_addi_i32(addr, addr, 1 << size);
650 vd += stride;
651
652 /* Subsequent memory operations inherit alignment */
653 mop &= ~MO_AMASK;
654 }
655
656 gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs);
657
658 return true;
659 }
660
trans_VLDST_single(DisasContext * s,arg_VLDST_single * a)661 static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
662 {
663 /* Neon load/store single structure to one lane */
664 int reg;
665 int nregs = a->n + 1;
666 int vd = a->vd;
667 TCGv_i32 addr, tmp;
668 MemOp mop;
669
670 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
671 return false;
672 }
673
674 /* UNDEF accesses to D16-D31 if they don't exist */
675 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
676 return false;
677 }
678
679 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
680 switch (nregs) {
681 case 1:
682 if (a->stride != 1) {
683 return false;
684 }
685 if (((a->align & (1 << a->size)) != 0) ||
686 (a->size == 2 && (a->align == 1 || a->align == 2))) {
687 return false;
688 }
689 break;
690 case 2:
691 if (a->size == 2 && (a->align & 2) != 0) {
692 return false;
693 }
694 break;
695 case 3:
696 if (a->align != 0) {
697 return false;
698 }
699 break;
700 case 4:
701 if (a->size == 2 && a->align == 3) {
702 return false;
703 }
704 break;
705 default:
706 g_assert_not_reached();
707 }
708 if ((vd + a->stride * (nregs - 1)) > 31) {
709 /*
710 * Attempts to write off the end of the register file are
711 * UNPREDICTABLE; we choose to UNDEF because otherwise we would
712 * access off the end of the array that holds the register data.
713 */
714 return false;
715 }
716
717 if (!vfp_access_check(s)) {
718 return true;
719 }
720
721 /* Pick up SCTLR settings */
722 mop = finalize_memop(s, a->size);
723
724 if (a->align) {
725 MemOp align_op;
726
727 switch (nregs) {
728 case 1:
729 /* For VLD1, use natural alignment. */
730 align_op = MO_ALIGN;
731 break;
732 case 2:
733 /* For VLD2, use double alignment. */
734 align_op = pow2_align(a->size + 1);
735 break;
736 case 4:
737 if (a->size == MO_32) {
738 /*
739 * For VLD4.32, align = 1 is double alignment, align = 2 is
740 * quad alignment; align = 3 is rejected above.
741 */
742 align_op = pow2_align(a->size + a->align);
743 } else {
744 /* For VLD4.8 and VLD.16, we want quad alignment. */
745 align_op = pow2_align(a->size + 2);
746 }
747 break;
748 default:
749 /* For VLD3, the alignment field is zero and rejected above. */
750 g_assert_not_reached();
751 }
752
753 mop = (mop & ~MO_AMASK) | align_op;
754 }
755
756 tmp = tcg_temp_new_i32();
757 addr = tcg_temp_new_i32();
758 load_reg_var(s, addr, a->rn);
759
760 for (reg = 0; reg < nregs; reg++) {
761 if (a->l) {
762 gen_aa32_ld_internal_i32(s, tmp, addr, get_mem_index(s), mop);
763 neon_store_element(vd, a->reg_idx, a->size, tmp);
764 } else { /* Store */
765 neon_load_element(tmp, vd, a->reg_idx, a->size);
766 gen_aa32_st_internal_i32(s, tmp, addr, get_mem_index(s), mop);
767 }
768 vd += a->stride;
769 tcg_gen_addi_i32(addr, addr, 1 << a->size);
770
771 /* Subsequent memory operations inherit alignment */
772 mop &= ~MO_AMASK;
773 }
774
775 gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs);
776
777 return true;
778 }
779
do_3same(DisasContext * s,arg_3same * a,GVecGen3Fn fn)780 static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
781 {
782 int vec_size = a->q ? 16 : 8;
783 int rd_ofs = neon_full_reg_offset(a->vd);
784 int rn_ofs = neon_full_reg_offset(a->vn);
785 int rm_ofs = neon_full_reg_offset(a->vm);
786
787 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
788 return false;
789 }
790
791 /* UNDEF accesses to D16-D31 if they don't exist. */
792 if (!dc_isar_feature(aa32_simd_r32, s) &&
793 ((a->vd | a->vn | a->vm) & 0x10)) {
794 return false;
795 }
796
797 if ((a->vn | a->vm | a->vd) & a->q) {
798 return false;
799 }
800
801 if (!vfp_access_check(s)) {
802 return true;
803 }
804
805 fn(a->size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
806 return true;
807 }
808
809 #define DO_3SAME(INSN, FUNC) \
810 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
811 { \
812 return do_3same(s, a, FUNC); \
813 }
814
DO_3SAME(VADD,tcg_gen_gvec_add)815 DO_3SAME(VADD, tcg_gen_gvec_add)
816 DO_3SAME(VSUB, tcg_gen_gvec_sub)
817 DO_3SAME(VAND, tcg_gen_gvec_and)
818 DO_3SAME(VBIC, tcg_gen_gvec_andc)
819 DO_3SAME(VORR, tcg_gen_gvec_or)
820 DO_3SAME(VORN, tcg_gen_gvec_orc)
821 DO_3SAME(VEOR, tcg_gen_gvec_xor)
822 DO_3SAME(VSHL_S, gen_gvec_sshl)
823 DO_3SAME(VSHL_U, gen_gvec_ushl)
824 DO_3SAME(VQADD_S, gen_gvec_sqadd_qc)
825 DO_3SAME(VQADD_U, gen_gvec_uqadd_qc)
826 DO_3SAME(VQSUB_S, gen_gvec_sqsub_qc)
827 DO_3SAME(VQSUB_U, gen_gvec_uqsub_qc)
828 DO_3SAME(VRSHL_S, gen_gvec_srshl)
829 DO_3SAME(VRSHL_U, gen_gvec_urshl)
830 DO_3SAME(VQSHL_S, gen_neon_sqshl)
831 DO_3SAME(VQSHL_U, gen_neon_uqshl)
832 DO_3SAME(VQRSHL_S, gen_neon_sqrshl)
833 DO_3SAME(VQRSHL_U, gen_neon_uqrshl)
834
835 /* These insns are all gvec_bitsel but with the inputs in various orders. */
836 #define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
837 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
838 uint32_t rn_ofs, uint32_t rm_ofs, \
839 uint32_t oprsz, uint32_t maxsz) \
840 { \
841 tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
842 } \
843 DO_3SAME(INSN, gen_##INSN##_3s)
844
845 DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
846 DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
847 DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
848
849 #define DO_3SAME_NO_SZ_3(INSN, FUNC) \
850 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
851 { \
852 if (a->size == 3) { \
853 return false; \
854 } \
855 return do_3same(s, a, FUNC); \
856 }
857
858 DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
859 DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
860 DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
861 DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
862 DO_3SAME_NO_SZ_3(VMUL, tcg_gen_gvec_mul)
863 DO_3SAME_NO_SZ_3(VMLA, gen_gvec_mla)
864 DO_3SAME_NO_SZ_3(VMLS, gen_gvec_mls)
865 DO_3SAME_NO_SZ_3(VTST, gen_gvec_cmtst)
866 DO_3SAME_NO_SZ_3(VABD_S, gen_gvec_sabd)
867 DO_3SAME_NO_SZ_3(VABA_S, gen_gvec_saba)
868 DO_3SAME_NO_SZ_3(VABD_U, gen_gvec_uabd)
869 DO_3SAME_NO_SZ_3(VABA_U, gen_gvec_uaba)
870 DO_3SAME_NO_SZ_3(VPADD, gen_gvec_addp)
871 DO_3SAME_NO_SZ_3(VPMAX_S, gen_gvec_smaxp)
872 DO_3SAME_NO_SZ_3(VPMIN_S, gen_gvec_sminp)
873 DO_3SAME_NO_SZ_3(VPMAX_U, gen_gvec_umaxp)
874 DO_3SAME_NO_SZ_3(VPMIN_U, gen_gvec_uminp)
875 DO_3SAME_NO_SZ_3(VHADD_S, gen_gvec_shadd)
876 DO_3SAME_NO_SZ_3(VHADD_U, gen_gvec_uhadd)
877 DO_3SAME_NO_SZ_3(VHSUB_S, gen_gvec_shsub)
878 DO_3SAME_NO_SZ_3(VHSUB_U, gen_gvec_uhsub)
879 DO_3SAME_NO_SZ_3(VRHADD_S, gen_gvec_srhadd)
880 DO_3SAME_NO_SZ_3(VRHADD_U, gen_gvec_urhadd)
881
882 #define DO_3SAME_CMP(INSN, COND) \
883 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
884 uint32_t rn_ofs, uint32_t rm_ofs, \
885 uint32_t oprsz, uint32_t maxsz) \
886 { \
887 tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
888 } \
889 DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
890
891 DO_3SAME_CMP(VCGT_S, TCG_COND_GT)
892 DO_3SAME_CMP(VCGT_U, TCG_COND_GTU)
893 DO_3SAME_CMP(VCGE_S, TCG_COND_GE)
894 DO_3SAME_CMP(VCGE_U, TCG_COND_GEU)
895 DO_3SAME_CMP(VCEQ, TCG_COND_EQ)
896
897 #define WRAP_OOL_FN(WRAPNAME, FUNC) \
898 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, \
899 uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz) \
900 { \
901 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, 0, FUNC); \
902 }
903
904 WRAP_OOL_FN(gen_VMUL_p_3s, gen_helper_gvec_pmul_b)
905
906 static bool trans_VMUL_p_3s(DisasContext *s, arg_3same *a)
907 {
908 if (a->size != 0) {
909 return false;
910 }
911 return do_3same(s, a, gen_VMUL_p_3s);
912 }
913
914 #define DO_VQRDMLAH(INSN, FUNC) \
915 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
916 { \
917 if (!dc_isar_feature(aa32_rdm, s)) { \
918 return false; \
919 } \
920 if (a->size != 1 && a->size != 2) { \
921 return false; \
922 } \
923 return do_3same(s, a, FUNC); \
924 }
925
DO_VQRDMLAH(VQRDMLAH,gen_gvec_sqrdmlah_qc)926 DO_VQRDMLAH(VQRDMLAH, gen_gvec_sqrdmlah_qc)
927 DO_VQRDMLAH(VQRDMLSH, gen_gvec_sqrdmlsh_qc)
928
929 #define DO_SHA1(NAME, FUNC) \
930 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
931 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
932 { \
933 if (!dc_isar_feature(aa32_sha1, s)) { \
934 return false; \
935 } \
936 return do_3same(s, a, gen_##NAME##_3s); \
937 }
938
939 DO_SHA1(SHA1C, gen_helper_crypto_sha1c)
940 DO_SHA1(SHA1P, gen_helper_crypto_sha1p)
941 DO_SHA1(SHA1M, gen_helper_crypto_sha1m)
942 DO_SHA1(SHA1SU0, gen_helper_crypto_sha1su0)
943
944 #define DO_SHA2(NAME, FUNC) \
945 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
946 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
947 { \
948 if (!dc_isar_feature(aa32_sha2, s)) { \
949 return false; \
950 } \
951 return do_3same(s, a, gen_##NAME##_3s); \
952 }
953
954 DO_SHA2(SHA256H, gen_helper_crypto_sha256h)
955 DO_SHA2(SHA256H2, gen_helper_crypto_sha256h2)
956 DO_SHA2(SHA256SU1, gen_helper_crypto_sha256su1)
957
958 /*
959 * Some helper functions need to be passed the tcg_env. In order
960 * to use those with the gvec APIs like tcg_gen_gvec_3() we need
961 * to create wrapper functions whose prototype is a NeonGenTwoOpFn()
962 * and which call a NeonGenTwoOpEnvFn().
963 */
964 #define WRAP_ENV_FN(WRAPNAME, FUNC) \
965 static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \
966 { \
967 FUNC(d, tcg_env, n, m); \
968 }
969
970 #define DO_3SAME_VQDMULH(INSN, FUNC) \
971 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
972 { return a->size >= 1 && a->size <= 2 && do_3same(s, a, FUNC); }
973
974 DO_3SAME_VQDMULH(VQDMULH, gen_gvec_sqdmulh_qc)
975 DO_3SAME_VQDMULH(VQRDMULH, gen_gvec_sqrdmulh_qc)
976
977 #define WRAP_FP_GVEC(WRAPNAME, FPST, FUNC) \
978 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
979 uint32_t rn_ofs, uint32_t rm_ofs, \
980 uint32_t oprsz, uint32_t maxsz) \
981 { \
982 TCGv_ptr fpst = fpstatus_ptr(FPST); \
983 tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \
984 oprsz, maxsz, 0, FUNC); \
985 }
986
987 #define DO_3S_FP_GVEC(INSN,SFUNC,HFUNC) \
988 WRAP_FP_GVEC(gen_##INSN##_fp32_3s, FPST_STD, SFUNC) \
989 WRAP_FP_GVEC(gen_##INSN##_fp16_3s, FPST_STD_F16, HFUNC) \
990 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
991 { \
992 if (a->size == MO_16) { \
993 if (!dc_isar_feature(aa32_fp16_arith, s)) { \
994 return false; \
995 } \
996 return do_3same(s, a, gen_##INSN##_fp16_3s); \
997 } \
998 return do_3same(s, a, gen_##INSN##_fp32_3s); \
999 }
1000
1001
1002 DO_3S_FP_GVEC(VADD, gen_helper_gvec_fadd_s, gen_helper_gvec_fadd_h)
1003 DO_3S_FP_GVEC(VSUB, gen_helper_gvec_fsub_s, gen_helper_gvec_fsub_h)
1004 DO_3S_FP_GVEC(VABD, gen_helper_gvec_fabd_s, gen_helper_gvec_fabd_h)
1005 DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s, gen_helper_gvec_fmul_h)
1006 DO_3S_FP_GVEC(VCEQ, gen_helper_gvec_fceq_s, gen_helper_gvec_fceq_h)
1007 DO_3S_FP_GVEC(VCGE, gen_helper_gvec_fcge_s, gen_helper_gvec_fcge_h)
1008 DO_3S_FP_GVEC(VCGT, gen_helper_gvec_fcgt_s, gen_helper_gvec_fcgt_h)
1009 DO_3S_FP_GVEC(VACGE, gen_helper_gvec_facge_s, gen_helper_gvec_facge_h)
1010 DO_3S_FP_GVEC(VACGT, gen_helper_gvec_facgt_s, gen_helper_gvec_facgt_h)
1011 DO_3S_FP_GVEC(VMAX, gen_helper_gvec_fmax_s, gen_helper_gvec_fmax_h)
1012 DO_3S_FP_GVEC(VMIN, gen_helper_gvec_fmin_s, gen_helper_gvec_fmin_h)
1013 DO_3S_FP_GVEC(VMLA, gen_helper_gvec_fmla_s, gen_helper_gvec_fmla_h)
1014 DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_s, gen_helper_gvec_fmls_h)
1015 DO_3S_FP_GVEC(VFMA, gen_helper_gvec_vfma_s, gen_helper_gvec_vfma_h)
1016 DO_3S_FP_GVEC(VFMS, gen_helper_gvec_vfms_s, gen_helper_gvec_vfms_h)
1017 DO_3S_FP_GVEC(VRECPS, gen_helper_gvec_recps_nf_s, gen_helper_gvec_recps_nf_h)
1018 DO_3S_FP_GVEC(VRSQRTS, gen_helper_gvec_rsqrts_nf_s, gen_helper_gvec_rsqrts_nf_h)
1019 DO_3S_FP_GVEC(VPADD, gen_helper_gvec_faddp_s, gen_helper_gvec_faddp_h)
1020 DO_3S_FP_GVEC(VPMAX, gen_helper_gvec_fmaxp_s, gen_helper_gvec_fmaxp_h)
1021 DO_3S_FP_GVEC(VPMIN, gen_helper_gvec_fminp_s, gen_helper_gvec_fminp_h)
1022
1023 WRAP_FP_GVEC(gen_VMAXNM_fp32_3s, FPST_STD, gen_helper_gvec_fmaxnum_s)
1024 WRAP_FP_GVEC(gen_VMAXNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fmaxnum_h)
1025 WRAP_FP_GVEC(gen_VMINNM_fp32_3s, FPST_STD, gen_helper_gvec_fminnum_s)
1026 WRAP_FP_GVEC(gen_VMINNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fminnum_h)
1027
1028 static bool trans_VMAXNM_fp_3s(DisasContext *s, arg_3same *a)
1029 {
1030 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
1031 return false;
1032 }
1033
1034 if (a->size == MO_16) {
1035 if (!dc_isar_feature(aa32_fp16_arith, s)) {
1036 return false;
1037 }
1038 return do_3same(s, a, gen_VMAXNM_fp16_3s);
1039 }
1040 return do_3same(s, a, gen_VMAXNM_fp32_3s);
1041 }
1042
trans_VMINNM_fp_3s(DisasContext * s,arg_3same * a)1043 static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
1044 {
1045 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
1046 return false;
1047 }
1048
1049 if (a->size == MO_16) {
1050 if (!dc_isar_feature(aa32_fp16_arith, s)) {
1051 return false;
1052 }
1053 return do_3same(s, a, gen_VMINNM_fp16_3s);
1054 }
1055 return do_3same(s, a, gen_VMINNM_fp32_3s);
1056 }
1057
do_vector_2sh(DisasContext * s,arg_2reg_shift * a,GVecGen2iFn * fn)1058 static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn)
1059 {
1060 /* Handle a 2-reg-shift insn which can be vectorized. */
1061 int vec_size = a->q ? 16 : 8;
1062 int rd_ofs = neon_full_reg_offset(a->vd);
1063 int rm_ofs = neon_full_reg_offset(a->vm);
1064
1065 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1066 return false;
1067 }
1068
1069 /* UNDEF accesses to D16-D31 if they don't exist. */
1070 if (!dc_isar_feature(aa32_simd_r32, s) &&
1071 ((a->vd | a->vm) & 0x10)) {
1072 return false;
1073 }
1074
1075 if ((a->vm | a->vd) & a->q) {
1076 return false;
1077 }
1078
1079 if (!vfp_access_check(s)) {
1080 return true;
1081 }
1082
1083 fn(a->size, rd_ofs, rm_ofs, a->shift, vec_size, vec_size);
1084 return true;
1085 }
1086
1087 #define DO_2SH(INSN, FUNC) \
1088 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1089 { \
1090 return do_vector_2sh(s, a, FUNC); \
1091 } \
1092
DO_2SH(VSHL,tcg_gen_gvec_shli)1093 DO_2SH(VSHL, tcg_gen_gvec_shli)
1094 DO_2SH(VSLI, gen_gvec_sli)
1095 DO_2SH(VSRI, gen_gvec_sri)
1096 DO_2SH(VSRA_S, gen_gvec_ssra)
1097 DO_2SH(VSRA_U, gen_gvec_usra)
1098 DO_2SH(VRSHR_S, gen_gvec_srshr)
1099 DO_2SH(VRSHR_U, gen_gvec_urshr)
1100 DO_2SH(VRSRA_S, gen_gvec_srsra)
1101 DO_2SH(VRSRA_U, gen_gvec_ursra)
1102 DO_2SH(VSHR_S, gen_gvec_sshr)
1103 DO_2SH(VSHR_U, gen_gvec_ushr)
1104 DO_2SH(VQSHLU, gen_neon_sqshlui)
1105 DO_2SH(VQSHL_U, gen_neon_uqshli)
1106 DO_2SH(VQSHL_S, gen_neon_sqshli)
1107
1108 static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
1109 NeonGenTwo64OpFn *shiftfn,
1110 NeonGenOne64OpEnvFn *narrowfn)
1111 {
1112 /* 2-reg-and-shift narrowing-shift operations, size == 3 case */
1113 TCGv_i64 constimm, rm1, rm2, rd;
1114
1115 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1116 return false;
1117 }
1118
1119 /* UNDEF accesses to D16-D31 if they don't exist. */
1120 if (!dc_isar_feature(aa32_simd_r32, s) &&
1121 ((a->vd | a->vm) & 0x10)) {
1122 return false;
1123 }
1124
1125 if (a->vm & 1) {
1126 return false;
1127 }
1128
1129 if (!vfp_access_check(s)) {
1130 return true;
1131 }
1132
1133 /*
1134 * This is always a right shift, and the shiftfn is always a
1135 * left-shift helper, which thus needs the negated shift count.
1136 */
1137 constimm = tcg_constant_i64(-a->shift);
1138 rm1 = tcg_temp_new_i64();
1139 rm2 = tcg_temp_new_i64();
1140 rd = tcg_temp_new_i64();
1141
1142 /* Load both inputs first to avoid potential overwrite if rm == rd */
1143 read_neon_element64(rm1, a->vm, 0, MO_64);
1144 read_neon_element64(rm2, a->vm, 1, MO_64);
1145
1146 shiftfn(rm1, rm1, constimm);
1147 narrowfn(rd, tcg_env, rm1);
1148 write_neon_element64(rd, a->vd, 0, MO_32);
1149
1150 shiftfn(rm2, rm2, constimm);
1151 narrowfn(rd, tcg_env, rm2);
1152 write_neon_element64(rd, a->vd, 1, MO_32);
1153
1154 return true;
1155 }
1156
do_2shift_narrow_32(DisasContext * s,arg_2reg_shift * a,NeonGenTwoOpFn * shiftfn,NeonGenOne64OpEnvFn * narrowfn)1157 static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
1158 NeonGenTwoOpFn *shiftfn,
1159 NeonGenOne64OpEnvFn *narrowfn)
1160 {
1161 /* 2-reg-and-shift narrowing-shift operations, size < 3 case */
1162 TCGv_i32 constimm, rm1, rm2, rm3, rm4;
1163 TCGv_i64 rtmp;
1164 uint32_t imm;
1165
1166 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1167 return false;
1168 }
1169
1170 /* UNDEF accesses to D16-D31 if they don't exist. */
1171 if (!dc_isar_feature(aa32_simd_r32, s) &&
1172 ((a->vd | a->vm) & 0x10)) {
1173 return false;
1174 }
1175
1176 if (a->vm & 1) {
1177 return false;
1178 }
1179
1180 if (!vfp_access_check(s)) {
1181 return true;
1182 }
1183
1184 /*
1185 * This is always a right shift, and the shiftfn is always a
1186 * left-shift helper, which thus needs the negated shift count
1187 * duplicated into each lane of the immediate value.
1188 */
1189 if (a->size == 1) {
1190 imm = (uint16_t)(-a->shift);
1191 imm |= imm << 16;
1192 } else {
1193 /* size == 2 */
1194 imm = -a->shift;
1195 }
1196 constimm = tcg_constant_i32(imm);
1197
1198 /* Load all inputs first to avoid potential overwrite */
1199 rm1 = tcg_temp_new_i32();
1200 rm2 = tcg_temp_new_i32();
1201 rm3 = tcg_temp_new_i32();
1202 rm4 = tcg_temp_new_i32();
1203 read_neon_element32(rm1, a->vm, 0, MO_32);
1204 read_neon_element32(rm2, a->vm, 1, MO_32);
1205 read_neon_element32(rm3, a->vm, 2, MO_32);
1206 read_neon_element32(rm4, a->vm, 3, MO_32);
1207 rtmp = tcg_temp_new_i64();
1208
1209 shiftfn(rm1, rm1, constimm);
1210 shiftfn(rm2, rm2, constimm);
1211
1212 tcg_gen_concat_i32_i64(rtmp, rm1, rm2);
1213
1214 narrowfn(rtmp, tcg_env, rtmp);
1215 write_neon_element64(rtmp, a->vd, 0, MO_32);
1216
1217 shiftfn(rm3, rm3, constimm);
1218 shiftfn(rm4, rm4, constimm);
1219
1220 tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
1221
1222 narrowfn(rtmp, tcg_env, rtmp);
1223 write_neon_element64(rtmp, a->vd, 1, MO_32);
1224 return true;
1225 }
1226
1227 #define DO_2SN_64(INSN, FUNC, NARROWFUNC) \
1228 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1229 { \
1230 return do_2shift_narrow_64(s, a, FUNC, NARROWFUNC); \
1231 }
1232 #define DO_2SN_32(INSN, FUNC, NARROWFUNC) \
1233 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1234 { \
1235 return do_2shift_narrow_32(s, a, FUNC, NARROWFUNC); \
1236 }
1237
gen_neon_narrow_u32(TCGv_i64 dest,TCGv_ptr env,TCGv_i64 src)1238 static void gen_neon_narrow_u32(TCGv_i64 dest, TCGv_ptr env, TCGv_i64 src)
1239 {
1240 tcg_gen_ext32u_i64(dest, src);
1241 }
1242
gen_neon_narrow_u16(TCGv_i64 dest,TCGv_ptr env,TCGv_i64 src)1243 static void gen_neon_narrow_u16(TCGv_i64 dest, TCGv_ptr env, TCGv_i64 src)
1244 {
1245 gen_helper_neon_narrow_u16(dest, src);
1246 }
1247
gen_neon_narrow_u8(TCGv_i64 dest,TCGv_ptr env,TCGv_i64 src)1248 static void gen_neon_narrow_u8(TCGv_i64 dest, TCGv_ptr env, TCGv_i64 src)
1249 {
1250 gen_helper_neon_narrow_u8(dest, src);
1251 }
1252
DO_2SN_64(VSHRN_64,gen_ushl_i64,gen_neon_narrow_u32)1253 DO_2SN_64(VSHRN_64, gen_ushl_i64, gen_neon_narrow_u32)
1254 DO_2SN_32(VSHRN_32, gen_ushl_i32, gen_neon_narrow_u16)
1255 DO_2SN_32(VSHRN_16, gen_helper_neon_shl_u16, gen_neon_narrow_u8)
1256
1257 DO_2SN_64(VRSHRN_64, gen_helper_neon_rshl_u64, gen_neon_narrow_u32)
1258 DO_2SN_32(VRSHRN_32, gen_helper_neon_rshl_u32, gen_neon_narrow_u16)
1259 DO_2SN_32(VRSHRN_16, gen_helper_neon_rshl_u16, gen_neon_narrow_u8)
1260
1261 DO_2SN_64(VQSHRUN_64, gen_sshl_i64, gen_helper_neon_unarrow_sat32)
1262 DO_2SN_32(VQSHRUN_32, gen_sshl_i32, gen_helper_neon_unarrow_sat16)
1263 DO_2SN_32(VQSHRUN_16, gen_helper_neon_shl_s16, gen_helper_neon_unarrow_sat8)
1264
1265 DO_2SN_64(VQRSHRUN_64, gen_helper_neon_rshl_s64, gen_helper_neon_unarrow_sat32)
1266 DO_2SN_32(VQRSHRUN_32, gen_helper_neon_rshl_s32, gen_helper_neon_unarrow_sat16)
1267 DO_2SN_32(VQRSHRUN_16, gen_helper_neon_rshl_s16, gen_helper_neon_unarrow_sat8)
1268 DO_2SN_64(VQSHRN_S64, gen_sshl_i64, gen_helper_neon_narrow_sat_s32)
1269 DO_2SN_32(VQSHRN_S32, gen_sshl_i32, gen_helper_neon_narrow_sat_s16)
1270 DO_2SN_32(VQSHRN_S16, gen_helper_neon_shl_s16, gen_helper_neon_narrow_sat_s8)
1271
1272 DO_2SN_64(VQRSHRN_S64, gen_helper_neon_rshl_s64, gen_helper_neon_narrow_sat_s32)
1273 DO_2SN_32(VQRSHRN_S32, gen_helper_neon_rshl_s32, gen_helper_neon_narrow_sat_s16)
1274 DO_2SN_32(VQRSHRN_S16, gen_helper_neon_rshl_s16, gen_helper_neon_narrow_sat_s8)
1275
1276 DO_2SN_64(VQSHRN_U64, gen_ushl_i64, gen_helper_neon_narrow_sat_u32)
1277 DO_2SN_32(VQSHRN_U32, gen_ushl_i32, gen_helper_neon_narrow_sat_u16)
1278 DO_2SN_32(VQSHRN_U16, gen_helper_neon_shl_u16, gen_helper_neon_narrow_sat_u8)
1279
1280 DO_2SN_64(VQRSHRN_U64, gen_helper_neon_rshl_u64, gen_helper_neon_narrow_sat_u32)
1281 DO_2SN_32(VQRSHRN_U32, gen_helper_neon_rshl_u32, gen_helper_neon_narrow_sat_u16)
1282 DO_2SN_32(VQRSHRN_U16, gen_helper_neon_rshl_u16, gen_helper_neon_narrow_sat_u8)
1283
1284 static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
1285 NeonGenWidenFn *widenfn, bool u)
1286 {
1287 TCGv_i64 tmp;
1288 TCGv_i32 rm0, rm1;
1289 uint64_t widen_mask = 0;
1290
1291 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1292 return false;
1293 }
1294
1295 /* UNDEF accesses to D16-D31 if they don't exist. */
1296 if (!dc_isar_feature(aa32_simd_r32, s) &&
1297 ((a->vd | a->vm) & 0x10)) {
1298 return false;
1299 }
1300
1301 if (a->vd & 1) {
1302 return false;
1303 }
1304
1305 if (!vfp_access_check(s)) {
1306 return true;
1307 }
1308
1309 /*
1310 * This is a widen-and-shift operation. The shift is always less
1311 * than the width of the source type, so after widening the input
1312 * vector we can simply shift the whole 64-bit widened register,
1313 * and then clear the potential overflow bits resulting from left
1314 * bits of the narrow input appearing as right bits of the left
1315 * neighbour narrow input. Calculate a mask of bits to clear.
1316 */
1317 if ((a->shift != 0) && (a->size < 2 || u)) {
1318 int esize = 8 << a->size;
1319 widen_mask = MAKE_64BIT_MASK(0, esize);
1320 widen_mask >>= esize - a->shift;
1321 widen_mask = dup_const(a->size + 1, widen_mask);
1322 }
1323
1324 rm0 = tcg_temp_new_i32();
1325 rm1 = tcg_temp_new_i32();
1326 read_neon_element32(rm0, a->vm, 0, MO_32);
1327 read_neon_element32(rm1, a->vm, 1, MO_32);
1328 tmp = tcg_temp_new_i64();
1329
1330 widenfn(tmp, rm0);
1331 if (a->shift != 0) {
1332 tcg_gen_shli_i64(tmp, tmp, a->shift);
1333 tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
1334 }
1335 write_neon_element64(tmp, a->vd, 0, MO_64);
1336
1337 widenfn(tmp, rm1);
1338 if (a->shift != 0) {
1339 tcg_gen_shli_i64(tmp, tmp, a->shift);
1340 tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
1341 }
1342 write_neon_element64(tmp, a->vd, 1, MO_64);
1343 return true;
1344 }
1345
trans_VSHLL_S_2sh(DisasContext * s,arg_2reg_shift * a)1346 static bool trans_VSHLL_S_2sh(DisasContext *s, arg_2reg_shift *a)
1347 {
1348 static NeonGenWidenFn * const widenfn[] = {
1349 gen_helper_neon_widen_s8,
1350 gen_helper_neon_widen_s16,
1351 tcg_gen_ext_i32_i64,
1352 };
1353 return do_vshll_2sh(s, a, widenfn[a->size], false);
1354 }
1355
trans_VSHLL_U_2sh(DisasContext * s,arg_2reg_shift * a)1356 static bool trans_VSHLL_U_2sh(DisasContext *s, arg_2reg_shift *a)
1357 {
1358 static NeonGenWidenFn * const widenfn[] = {
1359 gen_helper_neon_widen_u8,
1360 gen_helper_neon_widen_u16,
1361 tcg_gen_extu_i32_i64,
1362 };
1363 return do_vshll_2sh(s, a, widenfn[a->size], true);
1364 }
1365
do_fp_2sh(DisasContext * s,arg_2reg_shift * a,gen_helper_gvec_2_ptr * fn)1366 static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
1367 gen_helper_gvec_2_ptr *fn)
1368 {
1369 /* FP operations in 2-reg-and-shift group */
1370 int vec_size = a->q ? 16 : 8;
1371 int rd_ofs = neon_full_reg_offset(a->vd);
1372 int rm_ofs = neon_full_reg_offset(a->vm);
1373 TCGv_ptr fpst;
1374
1375 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1376 return false;
1377 }
1378
1379 if (a->size == MO_16) {
1380 if (!dc_isar_feature(aa32_fp16_arith, s)) {
1381 return false;
1382 }
1383 }
1384
1385 /* UNDEF accesses to D16-D31 if they don't exist. */
1386 if (!dc_isar_feature(aa32_simd_r32, s) &&
1387 ((a->vd | a->vm) & 0x10)) {
1388 return false;
1389 }
1390
1391 if ((a->vm | a->vd) & a->q) {
1392 return false;
1393 }
1394
1395 if (!vfp_access_check(s)) {
1396 return true;
1397 }
1398
1399 fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
1400 tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, vec_size, vec_size, a->shift, fn);
1401 return true;
1402 }
1403
1404 #define DO_FP_2SH(INSN, FUNC) \
1405 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1406 { \
1407 return do_fp_2sh(s, a, FUNC); \
1408 }
1409
DO_FP_2SH(VCVT_SF,gen_helper_gvec_vcvt_sf)1410 DO_FP_2SH(VCVT_SF, gen_helper_gvec_vcvt_sf)
1411 DO_FP_2SH(VCVT_UF, gen_helper_gvec_vcvt_uf)
1412 DO_FP_2SH(VCVT_FS, gen_helper_gvec_vcvt_rz_fs)
1413 DO_FP_2SH(VCVT_FU, gen_helper_gvec_vcvt_rz_fu)
1414
1415 DO_FP_2SH(VCVT_SH, gen_helper_gvec_vcvt_sh)
1416 DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
1417 DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_rz_hs)
1418 DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_rz_hu)
1419
1420 static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
1421 GVecGen2iFn *fn)
1422 {
1423 uint64_t imm;
1424 int reg_ofs, vec_size;
1425
1426 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1427 return false;
1428 }
1429
1430 /* UNDEF accesses to D16-D31 if they don't exist. */
1431 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
1432 return false;
1433 }
1434
1435 if (a->vd & a->q) {
1436 return false;
1437 }
1438
1439 if (!vfp_access_check(s)) {
1440 return true;
1441 }
1442
1443 reg_ofs = neon_full_reg_offset(a->vd);
1444 vec_size = a->q ? 16 : 8;
1445 imm = asimd_imm_const(a->imm, a->cmode, a->op);
1446
1447 fn(MO_64, reg_ofs, reg_ofs, imm, vec_size, vec_size);
1448 return true;
1449 }
1450
gen_VMOV_1r(unsigned vece,uint32_t dofs,uint32_t aofs,int64_t c,uint32_t oprsz,uint32_t maxsz)1451 static void gen_VMOV_1r(unsigned vece, uint32_t dofs, uint32_t aofs,
1452 int64_t c, uint32_t oprsz, uint32_t maxsz)
1453 {
1454 tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, c);
1455 }
1456
trans_Vimm_1r(DisasContext * s,arg_1reg_imm * a)1457 static bool trans_Vimm_1r(DisasContext *s, arg_1reg_imm *a)
1458 {
1459 /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
1460 GVecGen2iFn *fn;
1461
1462 if ((a->cmode & 1) && a->cmode < 12) {
1463 /* for op=1, the imm will be inverted, so BIC becomes AND. */
1464 fn = a->op ? tcg_gen_gvec_andi : tcg_gen_gvec_ori;
1465 } else {
1466 /* There is one unallocated cmode/op combination in this space */
1467 if (a->cmode == 15 && a->op == 1) {
1468 return false;
1469 }
1470 fn = gen_VMOV_1r;
1471 }
1472 return do_1reg_imm(s, a, fn);
1473 }
1474
do_prewiden_3d(DisasContext * s,arg_3diff * a,NeonGenWidenFn * widenfn,NeonGenTwo64OpFn * opfn,int src1_mop,int src2_mop)1475 static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
1476 NeonGenWidenFn *widenfn,
1477 NeonGenTwo64OpFn *opfn,
1478 int src1_mop, int src2_mop)
1479 {
1480 /* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
1481 TCGv_i64 rn0_64, rn1_64, rm_64;
1482
1483 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1484 return false;
1485 }
1486
1487 /* UNDEF accesses to D16-D31 if they don't exist. */
1488 if (!dc_isar_feature(aa32_simd_r32, s) &&
1489 ((a->vd | a->vn | a->vm) & 0x10)) {
1490 return false;
1491 }
1492
1493 if (!opfn) {
1494 /* size == 3 case, which is an entirely different insn group */
1495 return false;
1496 }
1497
1498 if ((a->vd & 1) || (src1_mop == MO_UQ && (a->vn & 1))) {
1499 return false;
1500 }
1501
1502 if (!vfp_access_check(s)) {
1503 return true;
1504 }
1505
1506 rn0_64 = tcg_temp_new_i64();
1507 rn1_64 = tcg_temp_new_i64();
1508 rm_64 = tcg_temp_new_i64();
1509
1510 if (src1_mop >= 0) {
1511 read_neon_element64(rn0_64, a->vn, 0, src1_mop);
1512 } else {
1513 TCGv_i32 tmp = tcg_temp_new_i32();
1514 read_neon_element32(tmp, a->vn, 0, MO_32);
1515 widenfn(rn0_64, tmp);
1516 }
1517 if (src2_mop >= 0) {
1518 read_neon_element64(rm_64, a->vm, 0, src2_mop);
1519 } else {
1520 TCGv_i32 tmp = tcg_temp_new_i32();
1521 read_neon_element32(tmp, a->vm, 0, MO_32);
1522 widenfn(rm_64, tmp);
1523 }
1524
1525 opfn(rn0_64, rn0_64, rm_64);
1526
1527 /*
1528 * Load second pass inputs before storing the first pass result, to
1529 * avoid incorrect results if a narrow input overlaps with the result.
1530 */
1531 if (src1_mop >= 0) {
1532 read_neon_element64(rn1_64, a->vn, 1, src1_mop);
1533 } else {
1534 TCGv_i32 tmp = tcg_temp_new_i32();
1535 read_neon_element32(tmp, a->vn, 1, MO_32);
1536 widenfn(rn1_64, tmp);
1537 }
1538 if (src2_mop >= 0) {
1539 read_neon_element64(rm_64, a->vm, 1, src2_mop);
1540 } else {
1541 TCGv_i32 tmp = tcg_temp_new_i32();
1542 read_neon_element32(tmp, a->vm, 1, MO_32);
1543 widenfn(rm_64, tmp);
1544 }
1545
1546 write_neon_element64(rn0_64, a->vd, 0, MO_64);
1547
1548 opfn(rn1_64, rn1_64, rm_64);
1549 write_neon_element64(rn1_64, a->vd, 1, MO_64);
1550
1551 return true;
1552 }
1553
1554 #define DO_PREWIDEN(INSN, S, OP, SRC1WIDE, SIGN) \
1555 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1556 { \
1557 static NeonGenWidenFn * const widenfn[] = { \
1558 gen_helper_neon_widen_##S##8, \
1559 gen_helper_neon_widen_##S##16, \
1560 NULL, NULL, \
1561 }; \
1562 static NeonGenTwo64OpFn * const addfn[] = { \
1563 tcg_gen_vec_##OP##16_i64, \
1564 tcg_gen_vec_##OP##32_i64, \
1565 tcg_gen_##OP##_i64, \
1566 NULL, \
1567 }; \
1568 int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \
1569 return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \
1570 SRC1WIDE ? MO_UQ : narrow_mop, \
1571 narrow_mop); \
1572 }
1573
DO_PREWIDEN(VADDL_S,s,add,false,MO_SIGN)1574 DO_PREWIDEN(VADDL_S, s, add, false, MO_SIGN)
1575 DO_PREWIDEN(VADDL_U, u, add, false, 0)
1576 DO_PREWIDEN(VSUBL_S, s, sub, false, MO_SIGN)
1577 DO_PREWIDEN(VSUBL_U, u, sub, false, 0)
1578 DO_PREWIDEN(VADDW_S, s, add, true, MO_SIGN)
1579 DO_PREWIDEN(VADDW_U, u, add, true, 0)
1580 DO_PREWIDEN(VSUBW_S, s, sub, true, MO_SIGN)
1581 DO_PREWIDEN(VSUBW_U, u, sub, true, 0)
1582
1583 static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
1584 NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn)
1585 {
1586 /* 3-regs different lengths, narrowing (VADDHN/VSUBHN/VRADDHN/VRSUBHN) */
1587 TCGv_i64 rn_64, rm_64;
1588 TCGv_i32 rd0, rd1;
1589
1590 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1591 return false;
1592 }
1593
1594 /* UNDEF accesses to D16-D31 if they don't exist. */
1595 if (!dc_isar_feature(aa32_simd_r32, s) &&
1596 ((a->vd | a->vn | a->vm) & 0x10)) {
1597 return false;
1598 }
1599
1600 if (!opfn || !narrowfn) {
1601 /* size == 3 case, which is an entirely different insn group */
1602 return false;
1603 }
1604
1605 if ((a->vn | a->vm) & 1) {
1606 return false;
1607 }
1608
1609 if (!vfp_access_check(s)) {
1610 return true;
1611 }
1612
1613 rn_64 = tcg_temp_new_i64();
1614 rm_64 = tcg_temp_new_i64();
1615 rd0 = tcg_temp_new_i32();
1616 rd1 = tcg_temp_new_i32();
1617
1618 read_neon_element64(rn_64, a->vn, 0, MO_64);
1619 read_neon_element64(rm_64, a->vm, 0, MO_64);
1620
1621 opfn(rn_64, rn_64, rm_64);
1622
1623 narrowfn(rd0, rn_64);
1624
1625 read_neon_element64(rn_64, a->vn, 1, MO_64);
1626 read_neon_element64(rm_64, a->vm, 1, MO_64);
1627
1628 opfn(rn_64, rn_64, rm_64);
1629
1630 narrowfn(rd1, rn_64);
1631
1632 write_neon_element32(rd0, a->vd, 0, MO_32);
1633 write_neon_element32(rd1, a->vd, 1, MO_32);
1634
1635 return true;
1636 }
1637
1638 #define DO_NARROW_3D(INSN, OP, NARROWTYPE, EXTOP) \
1639 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1640 { \
1641 static NeonGenTwo64OpFn * const addfn[] = { \
1642 tcg_gen_vec_##OP##16_i64, \
1643 tcg_gen_vec_##OP##32_i64, \
1644 tcg_gen_##OP##_i64, \
1645 NULL, \
1646 }; \
1647 static NeonGenNarrowFn * const narrowfn[] = { \
1648 gen_helper_neon_##NARROWTYPE##_high_u8, \
1649 gen_helper_neon_##NARROWTYPE##_high_u16, \
1650 EXTOP, \
1651 NULL, \
1652 }; \
1653 return do_narrow_3d(s, a, addfn[a->size], narrowfn[a->size]); \
1654 }
1655
gen_narrow_round_high_u32(TCGv_i32 rd,TCGv_i64 rn)1656 static void gen_narrow_round_high_u32(TCGv_i32 rd, TCGv_i64 rn)
1657 {
1658 tcg_gen_addi_i64(rn, rn, 1u << 31);
1659 tcg_gen_extrh_i64_i32(rd, rn);
1660 }
1661
DO_NARROW_3D(VADDHN,add,narrow,tcg_gen_extrh_i64_i32)1662 DO_NARROW_3D(VADDHN, add, narrow, tcg_gen_extrh_i64_i32)
1663 DO_NARROW_3D(VSUBHN, sub, narrow, tcg_gen_extrh_i64_i32)
1664 DO_NARROW_3D(VRADDHN, add, narrow_round, gen_narrow_round_high_u32)
1665 DO_NARROW_3D(VRSUBHN, sub, narrow_round, gen_narrow_round_high_u32)
1666
1667 static bool do_long_3d(DisasContext *s, arg_3diff *a,
1668 NeonGenTwoOpWidenFn *opfn,
1669 NeonGenTwo64OpFn *accfn)
1670 {
1671 /*
1672 * 3-regs different lengths, long operations.
1673 * These perform an operation on two inputs that returns a double-width
1674 * result, and then possibly perform an accumulation operation of
1675 * that result into the double-width destination.
1676 */
1677 TCGv_i64 rd0, rd1, tmp;
1678 TCGv_i32 rn, rm;
1679
1680 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1681 return false;
1682 }
1683
1684 /* UNDEF accesses to D16-D31 if they don't exist. */
1685 if (!dc_isar_feature(aa32_simd_r32, s) &&
1686 ((a->vd | a->vn | a->vm) & 0x10)) {
1687 return false;
1688 }
1689
1690 if (!opfn) {
1691 /* size == 3 case, which is an entirely different insn group */
1692 return false;
1693 }
1694
1695 if (a->vd & 1) {
1696 return false;
1697 }
1698
1699 if (!vfp_access_check(s)) {
1700 return true;
1701 }
1702
1703 rd0 = tcg_temp_new_i64();
1704 rd1 = tcg_temp_new_i64();
1705
1706 rn = tcg_temp_new_i32();
1707 rm = tcg_temp_new_i32();
1708 read_neon_element32(rn, a->vn, 0, MO_32);
1709 read_neon_element32(rm, a->vm, 0, MO_32);
1710 opfn(rd0, rn, rm);
1711
1712 read_neon_element32(rn, a->vn, 1, MO_32);
1713 read_neon_element32(rm, a->vm, 1, MO_32);
1714 opfn(rd1, rn, rm);
1715
1716 /* Don't store results until after all loads: they might overlap */
1717 if (accfn) {
1718 tmp = tcg_temp_new_i64();
1719 read_neon_element64(tmp, a->vd, 0, MO_64);
1720 accfn(rd0, tmp, rd0);
1721 read_neon_element64(tmp, a->vd, 1, MO_64);
1722 accfn(rd1, tmp, rd1);
1723 }
1724
1725 write_neon_element64(rd0, a->vd, 0, MO_64);
1726 write_neon_element64(rd1, a->vd, 1, MO_64);
1727
1728 return true;
1729 }
1730
trans_VABDL_S_3d(DisasContext * s,arg_3diff * a)1731 static bool trans_VABDL_S_3d(DisasContext *s, arg_3diff *a)
1732 {
1733 static NeonGenTwoOpWidenFn * const opfn[] = {
1734 gen_helper_neon_abdl_s16,
1735 gen_helper_neon_abdl_s32,
1736 gen_helper_neon_abdl_s64,
1737 NULL,
1738 };
1739
1740 return do_long_3d(s, a, opfn[a->size], NULL);
1741 }
1742
trans_VABDL_U_3d(DisasContext * s,arg_3diff * a)1743 static bool trans_VABDL_U_3d(DisasContext *s, arg_3diff *a)
1744 {
1745 static NeonGenTwoOpWidenFn * const opfn[] = {
1746 gen_helper_neon_abdl_u16,
1747 gen_helper_neon_abdl_u32,
1748 gen_helper_neon_abdl_u64,
1749 NULL,
1750 };
1751
1752 return do_long_3d(s, a, opfn[a->size], NULL);
1753 }
1754
trans_VABAL_S_3d(DisasContext * s,arg_3diff * a)1755 static bool trans_VABAL_S_3d(DisasContext *s, arg_3diff *a)
1756 {
1757 static NeonGenTwoOpWidenFn * const opfn[] = {
1758 gen_helper_neon_abdl_s16,
1759 gen_helper_neon_abdl_s32,
1760 gen_helper_neon_abdl_s64,
1761 NULL,
1762 };
1763 static NeonGenTwo64OpFn * const addfn[] = {
1764 tcg_gen_vec_add16_i64,
1765 tcg_gen_vec_add32_i64,
1766 tcg_gen_add_i64,
1767 NULL,
1768 };
1769
1770 return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
1771 }
1772
trans_VABAL_U_3d(DisasContext * s,arg_3diff * a)1773 static bool trans_VABAL_U_3d(DisasContext *s, arg_3diff *a)
1774 {
1775 static NeonGenTwoOpWidenFn * const opfn[] = {
1776 gen_helper_neon_abdl_u16,
1777 gen_helper_neon_abdl_u32,
1778 gen_helper_neon_abdl_u64,
1779 NULL,
1780 };
1781 static NeonGenTwo64OpFn * const addfn[] = {
1782 tcg_gen_vec_add16_i64,
1783 tcg_gen_vec_add32_i64,
1784 tcg_gen_add_i64,
1785 NULL,
1786 };
1787
1788 return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
1789 }
1790
gen_mull_s32(TCGv_i64 rd,TCGv_i32 rn,TCGv_i32 rm)1791 static void gen_mull_s32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
1792 {
1793 TCGv_i32 lo = tcg_temp_new_i32();
1794 TCGv_i32 hi = tcg_temp_new_i32();
1795
1796 tcg_gen_muls2_i32(lo, hi, rn, rm);
1797 tcg_gen_concat_i32_i64(rd, lo, hi);
1798 }
1799
gen_mull_u32(TCGv_i64 rd,TCGv_i32 rn,TCGv_i32 rm)1800 static void gen_mull_u32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
1801 {
1802 TCGv_i32 lo = tcg_temp_new_i32();
1803 TCGv_i32 hi = tcg_temp_new_i32();
1804
1805 tcg_gen_mulu2_i32(lo, hi, rn, rm);
1806 tcg_gen_concat_i32_i64(rd, lo, hi);
1807 }
1808
trans_VMULL_S_3d(DisasContext * s,arg_3diff * a)1809 static bool trans_VMULL_S_3d(DisasContext *s, arg_3diff *a)
1810 {
1811 static NeonGenTwoOpWidenFn * const opfn[] = {
1812 gen_helper_neon_mull_s8,
1813 gen_helper_neon_mull_s16,
1814 gen_mull_s32,
1815 NULL,
1816 };
1817
1818 return do_long_3d(s, a, opfn[a->size], NULL);
1819 }
1820
trans_VMULL_U_3d(DisasContext * s,arg_3diff * a)1821 static bool trans_VMULL_U_3d(DisasContext *s, arg_3diff *a)
1822 {
1823 static NeonGenTwoOpWidenFn * const opfn[] = {
1824 gen_helper_neon_mull_u8,
1825 gen_helper_neon_mull_u16,
1826 gen_mull_u32,
1827 NULL,
1828 };
1829
1830 return do_long_3d(s, a, opfn[a->size], NULL);
1831 }
1832
1833 #define DO_VMLAL(INSN,MULL,ACC) \
1834 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1835 { \
1836 static NeonGenTwoOpWidenFn * const opfn[] = { \
1837 gen_helper_neon_##MULL##8, \
1838 gen_helper_neon_##MULL##16, \
1839 gen_##MULL##32, \
1840 NULL, \
1841 }; \
1842 static NeonGenTwo64OpFn * const accfn[] = { \
1843 tcg_gen_vec_##ACC##16_i64, \
1844 tcg_gen_vec_##ACC##32_i64, \
1845 tcg_gen_##ACC##_i64, \
1846 NULL, \
1847 }; \
1848 return do_long_3d(s, a, opfn[a->size], accfn[a->size]); \
1849 }
1850
DO_VMLAL(VMLAL_S,mull_s,add)1851 DO_VMLAL(VMLAL_S,mull_s,add)
1852 DO_VMLAL(VMLAL_U,mull_u,add)
1853 DO_VMLAL(VMLSL_S,mull_s,sub)
1854 DO_VMLAL(VMLSL_U,mull_u,sub)
1855
1856 static void gen_VQDMULL_16(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
1857 {
1858 gen_helper_neon_mull_s16(rd, rn, rm);
1859 gen_helper_neon_addl_saturate_s32(rd, tcg_env, rd, rd);
1860 }
1861
gen_VQDMULL_32(TCGv_i64 rd,TCGv_i32 rn,TCGv_i32 rm)1862 static void gen_VQDMULL_32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
1863 {
1864 gen_mull_s32(rd, rn, rm);
1865 gen_helper_neon_addl_saturate_s64(rd, tcg_env, rd, rd);
1866 }
1867
trans_VQDMULL_3d(DisasContext * s,arg_3diff * a)1868 static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a)
1869 {
1870 static NeonGenTwoOpWidenFn * const opfn[] = {
1871 NULL,
1872 gen_VQDMULL_16,
1873 gen_VQDMULL_32,
1874 NULL,
1875 };
1876
1877 return do_long_3d(s, a, opfn[a->size], NULL);
1878 }
1879
gen_VQDMLAL_acc_16(TCGv_i64 rd,TCGv_i64 rn,TCGv_i64 rm)1880 static void gen_VQDMLAL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
1881 {
1882 gen_helper_neon_addl_saturate_s32(rd, tcg_env, rn, rm);
1883 }
1884
gen_VQDMLAL_acc_32(TCGv_i64 rd,TCGv_i64 rn,TCGv_i64 rm)1885 static void gen_VQDMLAL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
1886 {
1887 gen_helper_neon_addl_saturate_s64(rd, tcg_env, rn, rm);
1888 }
1889
trans_VQDMLAL_3d(DisasContext * s,arg_3diff * a)1890 static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a)
1891 {
1892 static NeonGenTwoOpWidenFn * const opfn[] = {
1893 NULL,
1894 gen_VQDMULL_16,
1895 gen_VQDMULL_32,
1896 NULL,
1897 };
1898 static NeonGenTwo64OpFn * const accfn[] = {
1899 NULL,
1900 gen_VQDMLAL_acc_16,
1901 gen_VQDMLAL_acc_32,
1902 NULL,
1903 };
1904
1905 return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
1906 }
1907
gen_VQDMLSL_acc_16(TCGv_i64 rd,TCGv_i64 rn,TCGv_i64 rm)1908 static void gen_VQDMLSL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
1909 {
1910 gen_helper_neon_negl_u32(rm, rm);
1911 gen_helper_neon_addl_saturate_s32(rd, tcg_env, rn, rm);
1912 }
1913
gen_VQDMLSL_acc_32(TCGv_i64 rd,TCGv_i64 rn,TCGv_i64 rm)1914 static void gen_VQDMLSL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
1915 {
1916 tcg_gen_neg_i64(rm, rm);
1917 gen_helper_neon_addl_saturate_s64(rd, tcg_env, rn, rm);
1918 }
1919
trans_VQDMLSL_3d(DisasContext * s,arg_3diff * a)1920 static bool trans_VQDMLSL_3d(DisasContext *s, arg_3diff *a)
1921 {
1922 static NeonGenTwoOpWidenFn * const opfn[] = {
1923 NULL,
1924 gen_VQDMULL_16,
1925 gen_VQDMULL_32,
1926 NULL,
1927 };
1928 static NeonGenTwo64OpFn * const accfn[] = {
1929 NULL,
1930 gen_VQDMLSL_acc_16,
1931 gen_VQDMLSL_acc_32,
1932 NULL,
1933 };
1934
1935 return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
1936 }
1937
trans_VMULL_P_3d(DisasContext * s,arg_3diff * a)1938 static bool trans_VMULL_P_3d(DisasContext *s, arg_3diff *a)
1939 {
1940 gen_helper_gvec_3 *fn_gvec;
1941
1942 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1943 return false;
1944 }
1945
1946 /* UNDEF accesses to D16-D31 if they don't exist. */
1947 if (!dc_isar_feature(aa32_simd_r32, s) &&
1948 ((a->vd | a->vn | a->vm) & 0x10)) {
1949 return false;
1950 }
1951
1952 if (a->vd & 1) {
1953 return false;
1954 }
1955
1956 switch (a->size) {
1957 case 0:
1958 fn_gvec = gen_helper_neon_pmull_h;
1959 break;
1960 case 2:
1961 if (!dc_isar_feature(aa32_pmull, s)) {
1962 return false;
1963 }
1964 fn_gvec = gen_helper_gvec_pmull_q;
1965 break;
1966 default:
1967 return false;
1968 }
1969
1970 if (!vfp_access_check(s)) {
1971 return true;
1972 }
1973
1974 tcg_gen_gvec_3_ool(neon_full_reg_offset(a->vd),
1975 neon_full_reg_offset(a->vn),
1976 neon_full_reg_offset(a->vm),
1977 16, 16, 0, fn_gvec);
1978 return true;
1979 }
1980
gen_neon_dup_low16(TCGv_i32 var)1981 static void gen_neon_dup_low16(TCGv_i32 var)
1982 {
1983 TCGv_i32 tmp = tcg_temp_new_i32();
1984 tcg_gen_ext16u_i32(var, var);
1985 tcg_gen_shli_i32(tmp, var, 16);
1986 tcg_gen_or_i32(var, var, tmp);
1987 }
1988
gen_neon_dup_high16(TCGv_i32 var)1989 static void gen_neon_dup_high16(TCGv_i32 var)
1990 {
1991 TCGv_i32 tmp = tcg_temp_new_i32();
1992 tcg_gen_andi_i32(var, var, 0xffff0000);
1993 tcg_gen_shri_i32(tmp, var, 16);
1994 tcg_gen_or_i32(var, var, tmp);
1995 }
1996
neon_get_scalar(int size,int reg)1997 static inline TCGv_i32 neon_get_scalar(int size, int reg)
1998 {
1999 TCGv_i32 tmp = tcg_temp_new_i32();
2000 if (size == MO_16) {
2001 read_neon_element32(tmp, reg & 7, reg >> 4, MO_32);
2002 if (reg & 8) {
2003 gen_neon_dup_high16(tmp);
2004 } else {
2005 gen_neon_dup_low16(tmp);
2006 }
2007 } else {
2008 read_neon_element32(tmp, reg & 15, reg >> 4, MO_32);
2009 }
2010 return tmp;
2011 }
2012
do_2scalar(DisasContext * s,arg_2scalar * a,NeonGenTwoOpFn * opfn,NeonGenTwoOpFn * accfn)2013 static bool do_2scalar(DisasContext *s, arg_2scalar *a,
2014 NeonGenTwoOpFn *opfn, NeonGenTwoOpFn *accfn)
2015 {
2016 /*
2017 * Two registers and a scalar: perform an operation between
2018 * the input elements and the scalar, and then possibly
2019 * perform an accumulation operation of that result into the
2020 * destination.
2021 */
2022 TCGv_i32 scalar, tmp;
2023 int pass;
2024
2025 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2026 return false;
2027 }
2028
2029 /* UNDEF accesses to D16-D31 if they don't exist. */
2030 if (!dc_isar_feature(aa32_simd_r32, s) &&
2031 ((a->vd | a->vn | a->vm) & 0x10)) {
2032 return false;
2033 }
2034
2035 if (!opfn) {
2036 /* Bad size (including size == 3, which is a different insn group) */
2037 return false;
2038 }
2039
2040 if (a->q && ((a->vd | a->vn) & 1)) {
2041 return false;
2042 }
2043
2044 if (!vfp_access_check(s)) {
2045 return true;
2046 }
2047
2048 scalar = neon_get_scalar(a->size, a->vm);
2049 tmp = tcg_temp_new_i32();
2050
2051 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
2052 read_neon_element32(tmp, a->vn, pass, MO_32);
2053 opfn(tmp, tmp, scalar);
2054 if (accfn) {
2055 TCGv_i32 rd = tcg_temp_new_i32();
2056 read_neon_element32(rd, a->vd, pass, MO_32);
2057 accfn(tmp, rd, tmp);
2058 }
2059 write_neon_element32(tmp, a->vd, pass, MO_32);
2060 }
2061 return true;
2062 }
2063
trans_VMUL_2sc(DisasContext * s,arg_2scalar * a)2064 static bool trans_VMUL_2sc(DisasContext *s, arg_2scalar *a)
2065 {
2066 static NeonGenTwoOpFn * const opfn[] = {
2067 NULL,
2068 gen_helper_neon_mul_u16,
2069 tcg_gen_mul_i32,
2070 NULL,
2071 };
2072
2073 return do_2scalar(s, a, opfn[a->size], NULL);
2074 }
2075
trans_VMLA_2sc(DisasContext * s,arg_2scalar * a)2076 static bool trans_VMLA_2sc(DisasContext *s, arg_2scalar *a)
2077 {
2078 static NeonGenTwoOpFn * const opfn[] = {
2079 NULL,
2080 gen_helper_neon_mul_u16,
2081 tcg_gen_mul_i32,
2082 NULL,
2083 };
2084 static NeonGenTwoOpFn * const accfn[] = {
2085 NULL,
2086 gen_helper_neon_add_u16,
2087 tcg_gen_add_i32,
2088 NULL,
2089 };
2090
2091 return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
2092 }
2093
trans_VMLS_2sc(DisasContext * s,arg_2scalar * a)2094 static bool trans_VMLS_2sc(DisasContext *s, arg_2scalar *a)
2095 {
2096 static NeonGenTwoOpFn * const opfn[] = {
2097 NULL,
2098 gen_helper_neon_mul_u16,
2099 tcg_gen_mul_i32,
2100 NULL,
2101 };
2102 static NeonGenTwoOpFn * const accfn[] = {
2103 NULL,
2104 gen_helper_neon_sub_u16,
2105 tcg_gen_sub_i32,
2106 NULL,
2107 };
2108
2109 return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
2110 }
2111
do_2scalar_fp_vec(DisasContext * s,arg_2scalar * a,gen_helper_gvec_3_ptr * fn)2112 static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a,
2113 gen_helper_gvec_3_ptr *fn)
2114 {
2115 /* Two registers and a scalar, using gvec */
2116 int vec_size = a->q ? 16 : 8;
2117 int rd_ofs = neon_full_reg_offset(a->vd);
2118 int rn_ofs = neon_full_reg_offset(a->vn);
2119 int rm_ofs;
2120 int idx;
2121 TCGv_ptr fpstatus;
2122
2123 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2124 return false;
2125 }
2126
2127 /* UNDEF accesses to D16-D31 if they don't exist. */
2128 if (!dc_isar_feature(aa32_simd_r32, s) &&
2129 ((a->vd | a->vn | a->vm) & 0x10)) {
2130 return false;
2131 }
2132
2133 if (!fn) {
2134 /* Bad size (including size == 3, which is a different insn group) */
2135 return false;
2136 }
2137
2138 if (a->q && ((a->vd | a->vn) & 1)) {
2139 return false;
2140 }
2141
2142 if (!vfp_access_check(s)) {
2143 return true;
2144 }
2145
2146 /* a->vm is M:Vm, which encodes both register and index */
2147 idx = extract32(a->vm, a->size + 2, 2);
2148 a->vm = extract32(a->vm, 0, a->size + 2);
2149 rm_ofs = neon_full_reg_offset(a->vm);
2150
2151 fpstatus = fpstatus_ptr(a->size == 1 ? FPST_STD_F16 : FPST_STD);
2152 tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpstatus,
2153 vec_size, vec_size, idx, fn);
2154 return true;
2155 }
2156
2157 #define DO_VMUL_F_2sc(NAME, FUNC) \
2158 static bool trans_##NAME##_F_2sc(DisasContext *s, arg_2scalar *a) \
2159 { \
2160 static gen_helper_gvec_3_ptr * const opfn[] = { \
2161 NULL, \
2162 gen_helper_##FUNC##_h, \
2163 gen_helper_##FUNC##_s, \
2164 NULL, \
2165 }; \
2166 if (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s)) { \
2167 return false; \
2168 } \
2169 return do_2scalar_fp_vec(s, a, opfn[a->size]); \
2170 }
2171
DO_VMUL_F_2sc(VMUL,gvec_fmul_idx)2172 DO_VMUL_F_2sc(VMUL, gvec_fmul_idx)
2173 DO_VMUL_F_2sc(VMLA, gvec_fmla_nf_idx)
2174 DO_VMUL_F_2sc(VMLS, gvec_fmls_nf_idx)
2175
2176 WRAP_ENV_FN(gen_VQDMULH_16, gen_helper_neon_qdmulh_s16)
2177 WRAP_ENV_FN(gen_VQDMULH_32, gen_helper_neon_qdmulh_s32)
2178 WRAP_ENV_FN(gen_VQRDMULH_16, gen_helper_neon_qrdmulh_s16)
2179 WRAP_ENV_FN(gen_VQRDMULH_32, gen_helper_neon_qrdmulh_s32)
2180
2181 static bool trans_VQDMULH_2sc(DisasContext *s, arg_2scalar *a)
2182 {
2183 static NeonGenTwoOpFn * const opfn[] = {
2184 NULL,
2185 gen_VQDMULH_16,
2186 gen_VQDMULH_32,
2187 NULL,
2188 };
2189
2190 return do_2scalar(s, a, opfn[a->size], NULL);
2191 }
2192
trans_VQRDMULH_2sc(DisasContext * s,arg_2scalar * a)2193 static bool trans_VQRDMULH_2sc(DisasContext *s, arg_2scalar *a)
2194 {
2195 static NeonGenTwoOpFn * const opfn[] = {
2196 NULL,
2197 gen_VQRDMULH_16,
2198 gen_VQRDMULH_32,
2199 NULL,
2200 };
2201
2202 return do_2scalar(s, a, opfn[a->size], NULL);
2203 }
2204
do_vqrdmlah_2sc(DisasContext * s,arg_2scalar * a,NeonGenThreeOpEnvFn * opfn)2205 static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
2206 NeonGenThreeOpEnvFn *opfn)
2207 {
2208 /*
2209 * VQRDMLAH/VQRDMLSH: this is like do_2scalar, but the opfn
2210 * performs a kind of fused op-then-accumulate using a helper
2211 * function that takes all of rd, rn and the scalar at once.
2212 */
2213 TCGv_i32 scalar, rn, rd;
2214 int pass;
2215
2216 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2217 return false;
2218 }
2219
2220 if (!dc_isar_feature(aa32_rdm, s)) {
2221 return false;
2222 }
2223
2224 /* UNDEF accesses to D16-D31 if they don't exist. */
2225 if (!dc_isar_feature(aa32_simd_r32, s) &&
2226 ((a->vd | a->vn | a->vm) & 0x10)) {
2227 return false;
2228 }
2229
2230 if (!opfn) {
2231 /* Bad size (including size == 3, which is a different insn group) */
2232 return false;
2233 }
2234
2235 if (a->q && ((a->vd | a->vn) & 1)) {
2236 return false;
2237 }
2238
2239 if (!vfp_access_check(s)) {
2240 return true;
2241 }
2242
2243 scalar = neon_get_scalar(a->size, a->vm);
2244 rn = tcg_temp_new_i32();
2245 rd = tcg_temp_new_i32();
2246
2247 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
2248 read_neon_element32(rn, a->vn, pass, MO_32);
2249 read_neon_element32(rd, a->vd, pass, MO_32);
2250 opfn(rd, tcg_env, rn, scalar, rd);
2251 write_neon_element32(rd, a->vd, pass, MO_32);
2252 }
2253 return true;
2254 }
2255
trans_VQRDMLAH_2sc(DisasContext * s,arg_2scalar * a)2256 static bool trans_VQRDMLAH_2sc(DisasContext *s, arg_2scalar *a)
2257 {
2258 static NeonGenThreeOpEnvFn *opfn[] = {
2259 NULL,
2260 gen_helper_neon_qrdmlah_s16,
2261 gen_helper_neon_qrdmlah_s32,
2262 NULL,
2263 };
2264 return do_vqrdmlah_2sc(s, a, opfn[a->size]);
2265 }
2266
trans_VQRDMLSH_2sc(DisasContext * s,arg_2scalar * a)2267 static bool trans_VQRDMLSH_2sc(DisasContext *s, arg_2scalar *a)
2268 {
2269 static NeonGenThreeOpEnvFn *opfn[] = {
2270 NULL,
2271 gen_helper_neon_qrdmlsh_s16,
2272 gen_helper_neon_qrdmlsh_s32,
2273 NULL,
2274 };
2275 return do_vqrdmlah_2sc(s, a, opfn[a->size]);
2276 }
2277
do_2scalar_long(DisasContext * s,arg_2scalar * a,NeonGenTwoOpWidenFn * opfn,NeonGenTwo64OpFn * accfn)2278 static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
2279 NeonGenTwoOpWidenFn *opfn,
2280 NeonGenTwo64OpFn *accfn)
2281 {
2282 /*
2283 * Two registers and a scalar, long operations: perform an
2284 * operation on the input elements and the scalar which produces
2285 * a double-width result, and then possibly perform an accumulation
2286 * operation of that result into the destination.
2287 */
2288 TCGv_i32 scalar, rn;
2289 TCGv_i64 rn0_64, rn1_64;
2290
2291 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2292 return false;
2293 }
2294
2295 /* UNDEF accesses to D16-D31 if they don't exist. */
2296 if (!dc_isar_feature(aa32_simd_r32, s) &&
2297 ((a->vd | a->vn | a->vm) & 0x10)) {
2298 return false;
2299 }
2300
2301 if (!opfn) {
2302 /* Bad size (including size == 3, which is a different insn group) */
2303 return false;
2304 }
2305
2306 if (a->vd & 1) {
2307 return false;
2308 }
2309
2310 if (!vfp_access_check(s)) {
2311 return true;
2312 }
2313
2314 scalar = neon_get_scalar(a->size, a->vm);
2315
2316 /* Load all inputs before writing any outputs, in case of overlap */
2317 rn = tcg_temp_new_i32();
2318 read_neon_element32(rn, a->vn, 0, MO_32);
2319 rn0_64 = tcg_temp_new_i64();
2320 opfn(rn0_64, rn, scalar);
2321
2322 read_neon_element32(rn, a->vn, 1, MO_32);
2323 rn1_64 = tcg_temp_new_i64();
2324 opfn(rn1_64, rn, scalar);
2325
2326 if (accfn) {
2327 TCGv_i64 t64 = tcg_temp_new_i64();
2328 read_neon_element64(t64, a->vd, 0, MO_64);
2329 accfn(rn0_64, t64, rn0_64);
2330 read_neon_element64(t64, a->vd, 1, MO_64);
2331 accfn(rn1_64, t64, rn1_64);
2332 }
2333
2334 write_neon_element64(rn0_64, a->vd, 0, MO_64);
2335 write_neon_element64(rn1_64, a->vd, 1, MO_64);
2336 return true;
2337 }
2338
trans_VMULL_S_2sc(DisasContext * s,arg_2scalar * a)2339 static bool trans_VMULL_S_2sc(DisasContext *s, arg_2scalar *a)
2340 {
2341 static NeonGenTwoOpWidenFn * const opfn[] = {
2342 NULL,
2343 gen_helper_neon_mull_s16,
2344 gen_mull_s32,
2345 NULL,
2346 };
2347
2348 return do_2scalar_long(s, a, opfn[a->size], NULL);
2349 }
2350
trans_VMULL_U_2sc(DisasContext * s,arg_2scalar * a)2351 static bool trans_VMULL_U_2sc(DisasContext *s, arg_2scalar *a)
2352 {
2353 static NeonGenTwoOpWidenFn * const opfn[] = {
2354 NULL,
2355 gen_helper_neon_mull_u16,
2356 gen_mull_u32,
2357 NULL,
2358 };
2359
2360 return do_2scalar_long(s, a, opfn[a->size], NULL);
2361 }
2362
2363 #define DO_VMLAL_2SC(INSN, MULL, ACC) \
2364 static bool trans_##INSN##_2sc(DisasContext *s, arg_2scalar *a) \
2365 { \
2366 static NeonGenTwoOpWidenFn * const opfn[] = { \
2367 NULL, \
2368 gen_helper_neon_##MULL##16, \
2369 gen_##MULL##32, \
2370 NULL, \
2371 }; \
2372 static NeonGenTwo64OpFn * const accfn[] = { \
2373 NULL, \
2374 tcg_gen_vec_##ACC##32_i64, \
2375 tcg_gen_##ACC##_i64, \
2376 NULL, \
2377 }; \
2378 return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]); \
2379 }
2380
DO_VMLAL_2SC(VMLAL_S,mull_s,add)2381 DO_VMLAL_2SC(VMLAL_S, mull_s, add)
2382 DO_VMLAL_2SC(VMLAL_U, mull_u, add)
2383 DO_VMLAL_2SC(VMLSL_S, mull_s, sub)
2384 DO_VMLAL_2SC(VMLSL_U, mull_u, sub)
2385
2386 static bool trans_VQDMULL_2sc(DisasContext *s, arg_2scalar *a)
2387 {
2388 static NeonGenTwoOpWidenFn * const opfn[] = {
2389 NULL,
2390 gen_VQDMULL_16,
2391 gen_VQDMULL_32,
2392 NULL,
2393 };
2394
2395 return do_2scalar_long(s, a, opfn[a->size], NULL);
2396 }
2397
trans_VQDMLAL_2sc(DisasContext * s,arg_2scalar * a)2398 static bool trans_VQDMLAL_2sc(DisasContext *s, arg_2scalar *a)
2399 {
2400 static NeonGenTwoOpWidenFn * const opfn[] = {
2401 NULL,
2402 gen_VQDMULL_16,
2403 gen_VQDMULL_32,
2404 NULL,
2405 };
2406 static NeonGenTwo64OpFn * const accfn[] = {
2407 NULL,
2408 gen_VQDMLAL_acc_16,
2409 gen_VQDMLAL_acc_32,
2410 NULL,
2411 };
2412
2413 return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]);
2414 }
2415
trans_VQDMLSL_2sc(DisasContext * s,arg_2scalar * a)2416 static bool trans_VQDMLSL_2sc(DisasContext *s, arg_2scalar *a)
2417 {
2418 static NeonGenTwoOpWidenFn * const opfn[] = {
2419 NULL,
2420 gen_VQDMULL_16,
2421 gen_VQDMULL_32,
2422 NULL,
2423 };
2424 static NeonGenTwo64OpFn * const accfn[] = {
2425 NULL,
2426 gen_VQDMLSL_acc_16,
2427 gen_VQDMLSL_acc_32,
2428 NULL,
2429 };
2430
2431 return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]);
2432 }
2433
trans_VEXT(DisasContext * s,arg_VEXT * a)2434 static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
2435 {
2436 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2437 return false;
2438 }
2439
2440 /* UNDEF accesses to D16-D31 if they don't exist. */
2441 if (!dc_isar_feature(aa32_simd_r32, s) &&
2442 ((a->vd | a->vn | a->vm) & 0x10)) {
2443 return false;
2444 }
2445
2446 if ((a->vn | a->vm | a->vd) & a->q) {
2447 return false;
2448 }
2449
2450 if (a->imm > 7 && !a->q) {
2451 return false;
2452 }
2453
2454 if (!vfp_access_check(s)) {
2455 return true;
2456 }
2457
2458 if (!a->q) {
2459 /* Extract 64 bits from <Vm:Vn> */
2460 TCGv_i64 left, right, dest;
2461
2462 left = tcg_temp_new_i64();
2463 right = tcg_temp_new_i64();
2464 dest = tcg_temp_new_i64();
2465
2466 read_neon_element64(right, a->vn, 0, MO_64);
2467 read_neon_element64(left, a->vm, 0, MO_64);
2468 tcg_gen_extract2_i64(dest, right, left, a->imm * 8);
2469 write_neon_element64(dest, a->vd, 0, MO_64);
2470 } else {
2471 /* Extract 128 bits from <Vm+1:Vm:Vn+1:Vn> */
2472 TCGv_i64 left, middle, right, destleft, destright;
2473
2474 left = tcg_temp_new_i64();
2475 middle = tcg_temp_new_i64();
2476 right = tcg_temp_new_i64();
2477 destleft = tcg_temp_new_i64();
2478 destright = tcg_temp_new_i64();
2479
2480 if (a->imm < 8) {
2481 read_neon_element64(right, a->vn, 0, MO_64);
2482 read_neon_element64(middle, a->vn, 1, MO_64);
2483 tcg_gen_extract2_i64(destright, right, middle, a->imm * 8);
2484 read_neon_element64(left, a->vm, 0, MO_64);
2485 tcg_gen_extract2_i64(destleft, middle, left, a->imm * 8);
2486 } else {
2487 read_neon_element64(right, a->vn, 1, MO_64);
2488 read_neon_element64(middle, a->vm, 0, MO_64);
2489 tcg_gen_extract2_i64(destright, right, middle, (a->imm - 8) * 8);
2490 read_neon_element64(left, a->vm, 1, MO_64);
2491 tcg_gen_extract2_i64(destleft, middle, left, (a->imm - 8) * 8);
2492 }
2493
2494 write_neon_element64(destright, a->vd, 0, MO_64);
2495 write_neon_element64(destleft, a->vd, 1, MO_64);
2496 }
2497 return true;
2498 }
2499
trans_VTBL(DisasContext * s,arg_VTBL * a)2500 static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
2501 {
2502 TCGv_i64 val, def;
2503 TCGv_i32 desc;
2504
2505 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2506 return false;
2507 }
2508
2509 /* UNDEF accesses to D16-D31 if they don't exist. */
2510 if (!dc_isar_feature(aa32_simd_r32, s) &&
2511 ((a->vd | a->vn | a->vm) & 0x10)) {
2512 return false;
2513 }
2514
2515 if ((a->vn + a->len + 1) > 32) {
2516 /*
2517 * This is UNPREDICTABLE; we choose to UNDEF to avoid the
2518 * helper function running off the end of the register file.
2519 */
2520 return false;
2521 }
2522
2523 if (!vfp_access_check(s)) {
2524 return true;
2525 }
2526
2527 desc = tcg_constant_i32((a->vn << 2) | a->len);
2528 def = tcg_temp_new_i64();
2529 if (a->op) {
2530 read_neon_element64(def, a->vd, 0, MO_64);
2531 } else {
2532 tcg_gen_movi_i64(def, 0);
2533 }
2534 val = tcg_temp_new_i64();
2535 read_neon_element64(val, a->vm, 0, MO_64);
2536
2537 gen_helper_neon_tbl(val, tcg_env, desc, val, def);
2538 write_neon_element64(val, a->vd, 0, MO_64);
2539 return true;
2540 }
2541
trans_VDUP_scalar(DisasContext * s,arg_VDUP_scalar * a)2542 static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
2543 {
2544 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2545 return false;
2546 }
2547
2548 /* UNDEF accesses to D16-D31 if they don't exist. */
2549 if (!dc_isar_feature(aa32_simd_r32, s) &&
2550 ((a->vd | a->vm) & 0x10)) {
2551 return false;
2552 }
2553
2554 if (a->vd & a->q) {
2555 return false;
2556 }
2557
2558 if (!vfp_access_check(s)) {
2559 return true;
2560 }
2561
2562 tcg_gen_gvec_dup_mem(a->size, neon_full_reg_offset(a->vd),
2563 neon_element_offset(a->vm, a->index, a->size),
2564 a->q ? 16 : 8, a->q ? 16 : 8);
2565 return true;
2566 }
2567
2568 typedef void ZipFn(TCGv_ptr, TCGv_ptr);
2569
do_zip_uzp(DisasContext * s,arg_2misc * a,ZipFn * fn)2570 static bool do_zip_uzp(DisasContext *s, arg_2misc *a,
2571 ZipFn *fn)
2572 {
2573 TCGv_ptr pd, pm;
2574
2575 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2576 return false;
2577 }
2578
2579 /* UNDEF accesses to D16-D31 if they don't exist. */
2580 if (!dc_isar_feature(aa32_simd_r32, s) &&
2581 ((a->vd | a->vm) & 0x10)) {
2582 return false;
2583 }
2584
2585 if ((a->vd | a->vm) & a->q) {
2586 return false;
2587 }
2588
2589 if (!fn) {
2590 /* Bad size or size/q combination */
2591 return false;
2592 }
2593
2594 if (!vfp_access_check(s)) {
2595 return true;
2596 }
2597
2598 pd = vfp_reg_ptr(true, a->vd);
2599 pm = vfp_reg_ptr(true, a->vm);
2600 fn(pd, pm);
2601 return true;
2602 }
2603
trans_VUZP(DisasContext * s,arg_2misc * a)2604 static bool trans_VUZP(DisasContext *s, arg_2misc *a)
2605 {
2606 static ZipFn * const fn[2][4] = {
2607 {
2608 gen_helper_neon_unzip8,
2609 gen_helper_neon_unzip16,
2610 NULL,
2611 NULL,
2612 }, {
2613 gen_helper_neon_qunzip8,
2614 gen_helper_neon_qunzip16,
2615 gen_helper_neon_qunzip32,
2616 NULL,
2617 }
2618 };
2619 return do_zip_uzp(s, a, fn[a->q][a->size]);
2620 }
2621
trans_VZIP(DisasContext * s,arg_2misc * a)2622 static bool trans_VZIP(DisasContext *s, arg_2misc *a)
2623 {
2624 static ZipFn * const fn[2][4] = {
2625 {
2626 gen_helper_neon_zip8,
2627 gen_helper_neon_zip16,
2628 NULL,
2629 NULL,
2630 }, {
2631 gen_helper_neon_qzip8,
2632 gen_helper_neon_qzip16,
2633 gen_helper_neon_qzip32,
2634 NULL,
2635 }
2636 };
2637 return do_zip_uzp(s, a, fn[a->q][a->size]);
2638 }
2639
do_vmovn(DisasContext * s,arg_2misc * a,NeonGenOne64OpEnvFn * narrowfn)2640 static bool do_vmovn(DisasContext *s, arg_2misc *a,
2641 NeonGenOne64OpEnvFn *narrowfn)
2642 {
2643 TCGv_i64 rm, rd0, rd1;
2644
2645 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2646 return false;
2647 }
2648
2649 /* UNDEF accesses to D16-D31 if they don't exist. */
2650 if (!dc_isar_feature(aa32_simd_r32, s) &&
2651 ((a->vd | a->vm) & 0x10)) {
2652 return false;
2653 }
2654
2655 if (a->vm & 1) {
2656 return false;
2657 }
2658
2659 if (!narrowfn) {
2660 return false;
2661 }
2662
2663 if (!vfp_access_check(s)) {
2664 return true;
2665 }
2666
2667 rm = tcg_temp_new_i64();
2668 rd0 = tcg_temp_new_i64();
2669 rd1 = tcg_temp_new_i64();
2670
2671 read_neon_element64(rm, a->vm, 0, MO_64);
2672 narrowfn(rd0, tcg_env, rm);
2673 read_neon_element64(rm, a->vm, 1, MO_64);
2674 narrowfn(rd1, tcg_env, rm);
2675 write_neon_element64(rd0, a->vd, 0, MO_32);
2676 write_neon_element64(rd1, a->vd, 1, MO_32);
2677 return true;
2678 }
2679
2680 #define DO_VMOVN(INSN, FUNC) \
2681 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
2682 { \
2683 static NeonGenOne64OpEnvFn * const narrowfn[] = { \
2684 FUNC##8, \
2685 FUNC##16, \
2686 FUNC##32, \
2687 NULL, \
2688 }; \
2689 return do_vmovn(s, a, narrowfn[a->size]); \
2690 }
2691
DO_VMOVN(VMOVN,gen_neon_narrow_u)2692 DO_VMOVN(VMOVN, gen_neon_narrow_u)
2693 DO_VMOVN(VQMOVUN, gen_helper_neon_unarrow_sat)
2694 DO_VMOVN(VQMOVN_S, gen_helper_neon_narrow_sat_s)
2695 DO_VMOVN(VQMOVN_U, gen_helper_neon_narrow_sat_u)
2696
2697 static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
2698 {
2699 TCGv_i32 rm0, rm1;
2700 TCGv_i64 rd;
2701 static NeonGenWidenFn * const widenfns[] = {
2702 gen_helper_neon_widen_u8,
2703 gen_helper_neon_widen_u16,
2704 tcg_gen_extu_i32_i64,
2705 NULL,
2706 };
2707 NeonGenWidenFn *widenfn = widenfns[a->size];
2708
2709 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2710 return false;
2711 }
2712
2713 /* UNDEF accesses to D16-D31 if they don't exist. */
2714 if (!dc_isar_feature(aa32_simd_r32, s) &&
2715 ((a->vd | a->vm) & 0x10)) {
2716 return false;
2717 }
2718
2719 if (a->vd & 1) {
2720 return false;
2721 }
2722
2723 if (!widenfn) {
2724 return false;
2725 }
2726
2727 if (!vfp_access_check(s)) {
2728 return true;
2729 }
2730
2731 rd = tcg_temp_new_i64();
2732 rm0 = tcg_temp_new_i32();
2733 rm1 = tcg_temp_new_i32();
2734
2735 read_neon_element32(rm0, a->vm, 0, MO_32);
2736 read_neon_element32(rm1, a->vm, 1, MO_32);
2737
2738 widenfn(rd, rm0);
2739 tcg_gen_shli_i64(rd, rd, 8 << a->size);
2740 write_neon_element64(rd, a->vd, 0, MO_64);
2741 widenfn(rd, rm1);
2742 tcg_gen_shli_i64(rd, rd, 8 << a->size);
2743 write_neon_element64(rd, a->vd, 1, MO_64);
2744 return true;
2745 }
2746
trans_VCVT_B16_F32(DisasContext * s,arg_2misc * a)2747 static bool trans_VCVT_B16_F32(DisasContext *s, arg_2misc *a)
2748 {
2749 TCGv_ptr fpst;
2750 TCGv_i64 tmp;
2751 TCGv_i32 dst0, dst1;
2752
2753 if (!dc_isar_feature(aa32_bf16, s)) {
2754 return false;
2755 }
2756
2757 /* UNDEF accesses to D16-D31 if they don't exist. */
2758 if (!dc_isar_feature(aa32_simd_r32, s) &&
2759 ((a->vd | a->vm) & 0x10)) {
2760 return false;
2761 }
2762
2763 if ((a->vm & 1) || (a->size != 1)) {
2764 return false;
2765 }
2766
2767 if (!vfp_access_check(s)) {
2768 return true;
2769 }
2770
2771 fpst = fpstatus_ptr(FPST_STD);
2772 tmp = tcg_temp_new_i64();
2773 dst0 = tcg_temp_new_i32();
2774 dst1 = tcg_temp_new_i32();
2775
2776 read_neon_element64(tmp, a->vm, 0, MO_64);
2777 gen_helper_bfcvt_pair(dst0, tmp, fpst);
2778
2779 read_neon_element64(tmp, a->vm, 1, MO_64);
2780 gen_helper_bfcvt_pair(dst1, tmp, fpst);
2781
2782 write_neon_element32(dst0, a->vd, 0, MO_32);
2783 write_neon_element32(dst1, a->vd, 1, MO_32);
2784 return true;
2785 }
2786
trans_VCVT_F16_F32(DisasContext * s,arg_2misc * a)2787 static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a)
2788 {
2789 TCGv_ptr fpst;
2790 TCGv_i32 ahp, tmp, tmp2, tmp3;
2791
2792 if (!arm_dc_feature(s, ARM_FEATURE_NEON) ||
2793 !dc_isar_feature(aa32_fp16_spconv, s)) {
2794 return false;
2795 }
2796
2797 /* UNDEF accesses to D16-D31 if they don't exist. */
2798 if (!dc_isar_feature(aa32_simd_r32, s) &&
2799 ((a->vd | a->vm) & 0x10)) {
2800 return false;
2801 }
2802
2803 if ((a->vm & 1) || (a->size != 1)) {
2804 return false;
2805 }
2806
2807 if (!vfp_access_check(s)) {
2808 return true;
2809 }
2810
2811 fpst = fpstatus_ptr(FPST_STD);
2812 ahp = get_ahp_flag();
2813 tmp = tcg_temp_new_i32();
2814 read_neon_element32(tmp, a->vm, 0, MO_32);
2815 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
2816 tmp2 = tcg_temp_new_i32();
2817 read_neon_element32(tmp2, a->vm, 1, MO_32);
2818 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
2819 tcg_gen_shli_i32(tmp2, tmp2, 16);
2820 tcg_gen_or_i32(tmp2, tmp2, tmp);
2821 read_neon_element32(tmp, a->vm, 2, MO_32);
2822 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
2823 tmp3 = tcg_temp_new_i32();
2824 read_neon_element32(tmp3, a->vm, 3, MO_32);
2825 write_neon_element32(tmp2, a->vd, 0, MO_32);
2826 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
2827 tcg_gen_shli_i32(tmp3, tmp3, 16);
2828 tcg_gen_or_i32(tmp3, tmp3, tmp);
2829 write_neon_element32(tmp3, a->vd, 1, MO_32);
2830 return true;
2831 }
2832
trans_VCVT_F32_F16(DisasContext * s,arg_2misc * a)2833 static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a)
2834 {
2835 TCGv_ptr fpst;
2836 TCGv_i32 ahp, tmp, tmp2, tmp3;
2837
2838 if (!arm_dc_feature(s, ARM_FEATURE_NEON) ||
2839 !dc_isar_feature(aa32_fp16_spconv, s)) {
2840 return false;
2841 }
2842
2843 /* UNDEF accesses to D16-D31 if they don't exist. */
2844 if (!dc_isar_feature(aa32_simd_r32, s) &&
2845 ((a->vd | a->vm) & 0x10)) {
2846 return false;
2847 }
2848
2849 if ((a->vd & 1) || (a->size != 1)) {
2850 return false;
2851 }
2852
2853 if (!vfp_access_check(s)) {
2854 return true;
2855 }
2856
2857 fpst = fpstatus_ptr(FPST_STD);
2858 ahp = get_ahp_flag();
2859 tmp3 = tcg_temp_new_i32();
2860 tmp2 = tcg_temp_new_i32();
2861 tmp = tcg_temp_new_i32();
2862 read_neon_element32(tmp, a->vm, 0, MO_32);
2863 read_neon_element32(tmp2, a->vm, 1, MO_32);
2864 tcg_gen_ext16u_i32(tmp3, tmp);
2865 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
2866 write_neon_element32(tmp3, a->vd, 0, MO_32);
2867 tcg_gen_shri_i32(tmp, tmp, 16);
2868 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
2869 write_neon_element32(tmp, a->vd, 1, MO_32);
2870 tcg_gen_ext16u_i32(tmp3, tmp2);
2871 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
2872 write_neon_element32(tmp3, a->vd, 2, MO_32);
2873 tcg_gen_shri_i32(tmp2, tmp2, 16);
2874 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
2875 write_neon_element32(tmp2, a->vd, 3, MO_32);
2876 return true;
2877 }
2878
do_2misc_vec(DisasContext * s,arg_2misc * a,GVecGen2Fn * fn)2879 static bool do_2misc_vec(DisasContext *s, arg_2misc *a, GVecGen2Fn *fn)
2880 {
2881 int vec_size = a->q ? 16 : 8;
2882 int rd_ofs = neon_full_reg_offset(a->vd);
2883 int rm_ofs = neon_full_reg_offset(a->vm);
2884
2885 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2886 return false;
2887 }
2888
2889 /* UNDEF accesses to D16-D31 if they don't exist. */
2890 if (!dc_isar_feature(aa32_simd_r32, s) &&
2891 ((a->vd | a->vm) & 0x10)) {
2892 return false;
2893 }
2894
2895 if (a->size == 3) {
2896 return false;
2897 }
2898
2899 if ((a->vd | a->vm) & a->q) {
2900 return false;
2901 }
2902
2903 if (!vfp_access_check(s)) {
2904 return true;
2905 }
2906
2907 fn(a->size, rd_ofs, rm_ofs, vec_size, vec_size);
2908
2909 return true;
2910 }
2911
2912 #define DO_2MISC_VEC(INSN, FN) \
2913 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
2914 { \
2915 return do_2misc_vec(s, a, FN); \
2916 }
2917
DO_2MISC_VEC(VNEG,tcg_gen_gvec_neg)2918 DO_2MISC_VEC(VNEG, tcg_gen_gvec_neg)
2919 DO_2MISC_VEC(VABS, tcg_gen_gvec_abs)
2920 DO_2MISC_VEC(VCEQ0, gen_gvec_ceq0)
2921 DO_2MISC_VEC(VCGT0, gen_gvec_cgt0)
2922 DO_2MISC_VEC(VCLE0, gen_gvec_cle0)
2923 DO_2MISC_VEC(VCGE0, gen_gvec_cge0)
2924 DO_2MISC_VEC(VCLT0, gen_gvec_clt0)
2925 DO_2MISC_VEC(VCLS, gen_gvec_cls)
2926 DO_2MISC_VEC(VCLZ, gen_gvec_clz)
2927 DO_2MISC_VEC(VREV64, gen_gvec_rev64)
2928 DO_2MISC_VEC(VPADDL_S, gen_gvec_saddlp)
2929 DO_2MISC_VEC(VPADDL_U, gen_gvec_uaddlp)
2930 DO_2MISC_VEC(VPADAL_S, gen_gvec_sadalp)
2931 DO_2MISC_VEC(VPADAL_U, gen_gvec_uadalp)
2932
2933 static bool trans_VMVN(DisasContext *s, arg_2misc *a)
2934 {
2935 if (a->size != 0) {
2936 return false;
2937 }
2938 return do_2misc_vec(s, a, tcg_gen_gvec_not);
2939 }
2940
trans_VCNT(DisasContext * s,arg_2misc * a)2941 static bool trans_VCNT(DisasContext *s, arg_2misc *a)
2942 {
2943 if (a->size != 0) {
2944 return false;
2945 }
2946 return do_2misc_vec(s, a, gen_gvec_cnt);
2947 }
2948
trans_VREV16(DisasContext * s,arg_2misc * a)2949 static bool trans_VREV16(DisasContext *s, arg_2misc *a)
2950 {
2951 if (a->size != 0) {
2952 return false;
2953 }
2954 return do_2misc_vec(s, a, gen_gvec_rev16);
2955 }
2956
trans_VREV32(DisasContext * s,arg_2misc * a)2957 static bool trans_VREV32(DisasContext *s, arg_2misc *a)
2958 {
2959 if (a->size != 0 && a->size != 1) {
2960 return false;
2961 }
2962 return do_2misc_vec(s, a, gen_gvec_rev32);
2963 }
2964
2965 #define WRAP_2M_3_OOL_FN(WRAPNAME, FUNC, DATA) \
2966 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
2967 uint32_t rm_ofs, uint32_t oprsz, \
2968 uint32_t maxsz) \
2969 { \
2970 tcg_gen_gvec_3_ool(rd_ofs, rd_ofs, rm_ofs, oprsz, maxsz, \
2971 DATA, FUNC); \
2972 }
2973
2974 #define WRAP_2M_2_OOL_FN(WRAPNAME, FUNC, DATA) \
2975 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
2976 uint32_t rm_ofs, uint32_t oprsz, \
2977 uint32_t maxsz) \
2978 { \
2979 tcg_gen_gvec_2_ool(rd_ofs, rm_ofs, oprsz, maxsz, DATA, FUNC); \
2980 }
2981
2982 WRAP_2M_3_OOL_FN(gen_AESE, gen_helper_crypto_aese, 0)
2983 WRAP_2M_3_OOL_FN(gen_AESD, gen_helper_crypto_aesd, 0)
2984 WRAP_2M_2_OOL_FN(gen_AESMC, gen_helper_crypto_aesmc, 0)
2985 WRAP_2M_2_OOL_FN(gen_AESIMC, gen_helper_crypto_aesimc, 0)
2986 WRAP_2M_2_OOL_FN(gen_SHA1H, gen_helper_crypto_sha1h, 0)
2987 WRAP_2M_2_OOL_FN(gen_SHA1SU1, gen_helper_crypto_sha1su1, 0)
2988 WRAP_2M_2_OOL_FN(gen_SHA256SU0, gen_helper_crypto_sha256su0, 0)
2989
2990 #define DO_2M_CRYPTO(INSN, FEATURE, SIZE) \
2991 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
2992 { \
2993 if (!dc_isar_feature(FEATURE, s) || a->size != SIZE) { \
2994 return false; \
2995 } \
2996 return do_2misc_vec(s, a, gen_##INSN); \
2997 }
2998
2999 DO_2M_CRYPTO(AESE, aa32_aes, 0)
3000 DO_2M_CRYPTO(AESD, aa32_aes, 0)
3001 DO_2M_CRYPTO(AESMC, aa32_aes, 0)
3002 DO_2M_CRYPTO(AESIMC, aa32_aes, 0)
3003 DO_2M_CRYPTO(SHA1H, aa32_sha1, 2)
3004 DO_2M_CRYPTO(SHA1SU1, aa32_sha1, 2)
3005 DO_2M_CRYPTO(SHA256SU0, aa32_sha2, 2)
3006
do_2misc(DisasContext * s,arg_2misc * a,NeonGenOneOpFn * fn)3007 static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn)
3008 {
3009 TCGv_i32 tmp;
3010 int pass;
3011
3012 /* Handle a 2-reg-misc operation by iterating 32 bits at a time */
3013 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
3014 return false;
3015 }
3016
3017 /* UNDEF accesses to D16-D31 if they don't exist. */
3018 if (!dc_isar_feature(aa32_simd_r32, s) &&
3019 ((a->vd | a->vm) & 0x10)) {
3020 return false;
3021 }
3022
3023 if (!fn) {
3024 return false;
3025 }
3026
3027 if ((a->vd | a->vm) & a->q) {
3028 return false;
3029 }
3030
3031 if (!vfp_access_check(s)) {
3032 return true;
3033 }
3034
3035 tmp = tcg_temp_new_i32();
3036 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
3037 read_neon_element32(tmp, a->vm, pass, MO_32);
3038 fn(tmp, tmp);
3039 write_neon_element32(tmp, a->vd, pass, MO_32);
3040 }
3041 return true;
3042 }
3043
trans_VABS_F(DisasContext * s,arg_2misc * a)3044 static bool trans_VABS_F(DisasContext *s, arg_2misc *a)
3045 {
3046 if (a->size == MO_16) {
3047 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3048 return false;
3049 }
3050 } else if (a->size != MO_32) {
3051 return false;
3052 }
3053 return do_2misc_vec(s, a, gen_gvec_fabs);
3054 }
3055
trans_VNEG_F(DisasContext * s,arg_2misc * a)3056 static bool trans_VNEG_F(DisasContext *s, arg_2misc *a)
3057 {
3058 if (a->size == MO_16) {
3059 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3060 return false;
3061 }
3062 } else if (a->size != MO_32) {
3063 return false;
3064 }
3065 return do_2misc_vec(s, a, gen_gvec_fneg);
3066 }
3067
trans_VRECPE(DisasContext * s,arg_2misc * a)3068 static bool trans_VRECPE(DisasContext *s, arg_2misc *a)
3069 {
3070 if (a->size != 2) {
3071 return false;
3072 }
3073 return do_2misc_vec(s, a, gen_gvec_urecpe);
3074 }
3075
trans_VRSQRTE(DisasContext * s,arg_2misc * a)3076 static bool trans_VRSQRTE(DisasContext *s, arg_2misc *a)
3077 {
3078 if (a->size != 2) {
3079 return false;
3080 }
3081 return do_2misc_vec(s, a, gen_gvec_ursqrte);
3082 }
3083
3084 #define WRAP_1OP_ENV_FN(WRAPNAME, FUNC) \
3085 static void WRAPNAME(TCGv_i32 d, TCGv_i32 m) \
3086 { \
3087 FUNC(d, tcg_env, m); \
3088 }
3089
WRAP_1OP_ENV_FN(gen_VQABS_s8,gen_helper_neon_qabs_s8)3090 WRAP_1OP_ENV_FN(gen_VQABS_s8, gen_helper_neon_qabs_s8)
3091 WRAP_1OP_ENV_FN(gen_VQABS_s16, gen_helper_neon_qabs_s16)
3092 WRAP_1OP_ENV_FN(gen_VQABS_s32, gen_helper_neon_qabs_s32)
3093 WRAP_1OP_ENV_FN(gen_VQNEG_s8, gen_helper_neon_qneg_s8)
3094 WRAP_1OP_ENV_FN(gen_VQNEG_s16, gen_helper_neon_qneg_s16)
3095 WRAP_1OP_ENV_FN(gen_VQNEG_s32, gen_helper_neon_qneg_s32)
3096
3097 static bool trans_VQABS(DisasContext *s, arg_2misc *a)
3098 {
3099 static NeonGenOneOpFn * const fn[] = {
3100 gen_VQABS_s8,
3101 gen_VQABS_s16,
3102 gen_VQABS_s32,
3103 NULL,
3104 };
3105 return do_2misc(s, a, fn[a->size]);
3106 }
3107
trans_VQNEG(DisasContext * s,arg_2misc * a)3108 static bool trans_VQNEG(DisasContext *s, arg_2misc *a)
3109 {
3110 static NeonGenOneOpFn * const fn[] = {
3111 gen_VQNEG_s8,
3112 gen_VQNEG_s16,
3113 gen_VQNEG_s32,
3114 NULL,
3115 };
3116 return do_2misc(s, a, fn[a->size]);
3117 }
3118
3119 #define DO_2MISC_FP_VEC(INSN, HFUNC, SFUNC) \
3120 static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
3121 uint32_t rm_ofs, \
3122 uint32_t oprsz, uint32_t maxsz) \
3123 { \
3124 static gen_helper_gvec_2_ptr * const fns[4] = { \
3125 NULL, HFUNC, SFUNC, NULL, \
3126 }; \
3127 TCGv_ptr fpst; \
3128 fpst = fpstatus_ptr(vece == MO_16 ? FPST_STD_F16 : FPST_STD); \
3129 tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, 0, \
3130 fns[vece]); \
3131 } \
3132 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
3133 { \
3134 if (a->size == MO_16) { \
3135 if (!dc_isar_feature(aa32_fp16_arith, s)) { \
3136 return false; \
3137 } \
3138 } else if (a->size != MO_32) { \
3139 return false; \
3140 } \
3141 return do_2misc_vec(s, a, gen_##INSN); \
3142 }
3143
DO_2MISC_FP_VEC(VRECPE_F,gen_helper_gvec_frecpe_h,gen_helper_gvec_frecpe_s)3144 DO_2MISC_FP_VEC(VRECPE_F, gen_helper_gvec_frecpe_h, gen_helper_gvec_frecpe_s)
3145 DO_2MISC_FP_VEC(VRSQRTE_F, gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s)
3146 DO_2MISC_FP_VEC(VCGT0_F, gen_helper_gvec_fcgt0_h, gen_helper_gvec_fcgt0_s)
3147 DO_2MISC_FP_VEC(VCGE0_F, gen_helper_gvec_fcge0_h, gen_helper_gvec_fcge0_s)
3148 DO_2MISC_FP_VEC(VCEQ0_F, gen_helper_gvec_fceq0_h, gen_helper_gvec_fceq0_s)
3149 DO_2MISC_FP_VEC(VCLT0_F, gen_helper_gvec_fclt0_h, gen_helper_gvec_fclt0_s)
3150 DO_2MISC_FP_VEC(VCLE0_F, gen_helper_gvec_fcle0_h, gen_helper_gvec_fcle0_s)
3151 DO_2MISC_FP_VEC(VCVT_FS, gen_helper_gvec_sstoh, gen_helper_gvec_sitos)
3152 DO_2MISC_FP_VEC(VCVT_FU, gen_helper_gvec_ustoh, gen_helper_gvec_uitos)
3153 DO_2MISC_FP_VEC(VCVT_SF, gen_helper_gvec_tosszh, gen_helper_gvec_tosizs)
3154 DO_2MISC_FP_VEC(VCVT_UF, gen_helper_gvec_touszh, gen_helper_gvec_touizs)
3155
3156 DO_2MISC_FP_VEC(VRINTX_impl, gen_helper_gvec_vrintx_h, gen_helper_gvec_vrintx_s)
3157
3158 static bool trans_VRINTX(DisasContext *s, arg_2misc *a)
3159 {
3160 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3161 return false;
3162 }
3163 return trans_VRINTX_impl(s, a);
3164 }
3165
3166 #define DO_VEC_RMODE(INSN, RMODE, OP) \
3167 static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
3168 uint32_t rm_ofs, \
3169 uint32_t oprsz, uint32_t maxsz) \
3170 { \
3171 static gen_helper_gvec_2_ptr * const fns[4] = { \
3172 NULL, \
3173 gen_helper_gvec_##OP##h, \
3174 gen_helper_gvec_##OP##s, \
3175 NULL, \
3176 }; \
3177 TCGv_ptr fpst; \
3178 fpst = fpstatus_ptr(vece == 1 ? FPST_STD_F16 : FPST_STD); \
3179 tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, \
3180 arm_rmode_to_sf(RMODE), fns[vece]); \
3181 } \
3182 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
3183 { \
3184 if (!arm_dc_feature(s, ARM_FEATURE_V8)) { \
3185 return false; \
3186 } \
3187 if (a->size == MO_16) { \
3188 if (!dc_isar_feature(aa32_fp16_arith, s)) { \
3189 return false; \
3190 } \
3191 } else if (a->size != MO_32) { \
3192 return false; \
3193 } \
3194 return do_2misc_vec(s, a, gen_##INSN); \
3195 }
3196
DO_VEC_RMODE(VCVTAU,FPROUNDING_TIEAWAY,vcvt_rm_u)3197 DO_VEC_RMODE(VCVTAU, FPROUNDING_TIEAWAY, vcvt_rm_u)
3198 DO_VEC_RMODE(VCVTAS, FPROUNDING_TIEAWAY, vcvt_rm_s)
3199 DO_VEC_RMODE(VCVTNU, FPROUNDING_TIEEVEN, vcvt_rm_u)
3200 DO_VEC_RMODE(VCVTNS, FPROUNDING_TIEEVEN, vcvt_rm_s)
3201 DO_VEC_RMODE(VCVTPU, FPROUNDING_POSINF, vcvt_rm_u)
3202 DO_VEC_RMODE(VCVTPS, FPROUNDING_POSINF, vcvt_rm_s)
3203 DO_VEC_RMODE(VCVTMU, FPROUNDING_NEGINF, vcvt_rm_u)
3204 DO_VEC_RMODE(VCVTMS, FPROUNDING_NEGINF, vcvt_rm_s)
3205
3206 DO_VEC_RMODE(VRINTN, FPROUNDING_TIEEVEN, vrint_rm_)
3207 DO_VEC_RMODE(VRINTA, FPROUNDING_TIEAWAY, vrint_rm_)
3208 DO_VEC_RMODE(VRINTZ, FPROUNDING_ZERO, vrint_rm_)
3209 DO_VEC_RMODE(VRINTM, FPROUNDING_NEGINF, vrint_rm_)
3210 DO_VEC_RMODE(VRINTP, FPROUNDING_POSINF, vrint_rm_)
3211
3212 static bool trans_VSWP(DisasContext *s, arg_2misc *a)
3213 {
3214 TCGv_i64 rm, rd;
3215 int pass;
3216
3217 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
3218 return false;
3219 }
3220
3221 /* UNDEF accesses to D16-D31 if they don't exist. */
3222 if (!dc_isar_feature(aa32_simd_r32, s) &&
3223 ((a->vd | a->vm) & 0x10)) {
3224 return false;
3225 }
3226
3227 if (a->size != 0) {
3228 return false;
3229 }
3230
3231 if ((a->vd | a->vm) & a->q) {
3232 return false;
3233 }
3234
3235 if (!vfp_access_check(s)) {
3236 return true;
3237 }
3238
3239 rm = tcg_temp_new_i64();
3240 rd = tcg_temp_new_i64();
3241 for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
3242 read_neon_element64(rm, a->vm, pass, MO_64);
3243 read_neon_element64(rd, a->vd, pass, MO_64);
3244 write_neon_element64(rm, a->vd, pass, MO_64);
3245 write_neon_element64(rd, a->vm, pass, MO_64);
3246 }
3247 return true;
3248 }
3249
gen_neon_trn_u8(TCGv_i32 t0,TCGv_i32 t1)3250 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3251 {
3252 TCGv_i32 rd, tmp;
3253
3254 rd = tcg_temp_new_i32();
3255 tmp = tcg_temp_new_i32();
3256
3257 tcg_gen_shli_i32(rd, t0, 8);
3258 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3259 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3260 tcg_gen_or_i32(rd, rd, tmp);
3261
3262 tcg_gen_shri_i32(t1, t1, 8);
3263 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3264 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3265 tcg_gen_or_i32(t1, t1, tmp);
3266 tcg_gen_mov_i32(t0, rd);
3267 }
3268
gen_neon_trn_u16(TCGv_i32 t0,TCGv_i32 t1)3269 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3270 {
3271 TCGv_i32 rd, tmp;
3272
3273 rd = tcg_temp_new_i32();
3274 tmp = tcg_temp_new_i32();
3275
3276 tcg_gen_shli_i32(rd, t0, 16);
3277 tcg_gen_andi_i32(tmp, t1, 0xffff);
3278 tcg_gen_or_i32(rd, rd, tmp);
3279 tcg_gen_shri_i32(t1, t1, 16);
3280 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3281 tcg_gen_or_i32(t1, t1, tmp);
3282 tcg_gen_mov_i32(t0, rd);
3283 }
3284
trans_VTRN(DisasContext * s,arg_2misc * a)3285 static bool trans_VTRN(DisasContext *s, arg_2misc *a)
3286 {
3287 TCGv_i32 tmp, tmp2;
3288 int pass;
3289
3290 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
3291 return false;
3292 }
3293
3294 /* UNDEF accesses to D16-D31 if they don't exist. */
3295 if (!dc_isar_feature(aa32_simd_r32, s) &&
3296 ((a->vd | a->vm) & 0x10)) {
3297 return false;
3298 }
3299
3300 if ((a->vd | a->vm) & a->q) {
3301 return false;
3302 }
3303
3304 if (a->size == 3) {
3305 return false;
3306 }
3307
3308 if (!vfp_access_check(s)) {
3309 return true;
3310 }
3311
3312 tmp = tcg_temp_new_i32();
3313 tmp2 = tcg_temp_new_i32();
3314 if (a->size == MO_32) {
3315 for (pass = 0; pass < (a->q ? 4 : 2); pass += 2) {
3316 read_neon_element32(tmp, a->vm, pass, MO_32);
3317 read_neon_element32(tmp2, a->vd, pass + 1, MO_32);
3318 write_neon_element32(tmp2, a->vm, pass, MO_32);
3319 write_neon_element32(tmp, a->vd, pass + 1, MO_32);
3320 }
3321 } else {
3322 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
3323 read_neon_element32(tmp, a->vm, pass, MO_32);
3324 read_neon_element32(tmp2, a->vd, pass, MO_32);
3325 if (a->size == MO_8) {
3326 gen_neon_trn_u8(tmp, tmp2);
3327 } else {
3328 gen_neon_trn_u16(tmp, tmp2);
3329 }
3330 write_neon_element32(tmp2, a->vm, pass, MO_32);
3331 write_neon_element32(tmp, a->vd, pass, MO_32);
3332 }
3333 }
3334 return true;
3335 }
3336
trans_VSMMLA(DisasContext * s,arg_VSMMLA * a)3337 static bool trans_VSMMLA(DisasContext *s, arg_VSMMLA *a)
3338 {
3339 if (!dc_isar_feature(aa32_i8mm, s)) {
3340 return false;
3341 }
3342 return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
3343 gen_helper_gvec_smmla_b);
3344 }
3345
trans_VUMMLA(DisasContext * s,arg_VUMMLA * a)3346 static bool trans_VUMMLA(DisasContext *s, arg_VUMMLA *a)
3347 {
3348 if (!dc_isar_feature(aa32_i8mm, s)) {
3349 return false;
3350 }
3351 return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
3352 gen_helper_gvec_ummla_b);
3353 }
3354
trans_VUSMMLA(DisasContext * s,arg_VUSMMLA * a)3355 static bool trans_VUSMMLA(DisasContext *s, arg_VUSMMLA *a)
3356 {
3357 if (!dc_isar_feature(aa32_i8mm, s)) {
3358 return false;
3359 }
3360 return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
3361 gen_helper_gvec_usmmla_b);
3362 }
3363
trans_VMMLA_b16(DisasContext * s,arg_VMMLA_b16 * a)3364 static bool trans_VMMLA_b16(DisasContext *s, arg_VMMLA_b16 *a)
3365 {
3366 if (!dc_isar_feature(aa32_bf16, s)) {
3367 return false;
3368 }
3369 return do_neon_ddda_env(s, 7, a->vd, a->vn, a->vm, 0,
3370 gen_helper_gvec_bfmmla);
3371 }
3372
trans_VFMA_b16(DisasContext * s,arg_VFMA_b16 * a)3373 static bool trans_VFMA_b16(DisasContext *s, arg_VFMA_b16 *a)
3374 {
3375 if (!dc_isar_feature(aa32_bf16, s)) {
3376 return false;
3377 }
3378 return do_neon_ddda_fpst(s, 7, a->vd, a->vn, a->vm, a->q, FPST_STD,
3379 gen_helper_gvec_bfmlal);
3380 }
3381
trans_VFMA_b16_scal(DisasContext * s,arg_VFMA_b16_scal * a)3382 static bool trans_VFMA_b16_scal(DisasContext *s, arg_VFMA_b16_scal *a)
3383 {
3384 if (!dc_isar_feature(aa32_bf16, s)) {
3385 return false;
3386 }
3387 return do_neon_ddda_fpst(s, 6, a->vd, a->vn, a->vm,
3388 (a->index << 1) | a->q, FPST_STD,
3389 gen_helper_gvec_bfmlal_idx);
3390 }
3391