Lines Matching +full:1 +full:a
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * You should have received a copy of the GNU Lesser General Public
44 tcg_debug_assert(vece >= 1 && vece <= 2); in gen_gvec_sqdmulh_qc()
45 gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]); in gen_gvec_sqdmulh_qc()
54 tcg_debug_assert(vece >= 1 && vece <= 2); in gen_gvec_sqrdmulh_qc()
55 gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]); in gen_gvec_sqrdmulh_qc()
64 tcg_debug_assert(vece >= 1 && vece <= 2); in gen_gvec_sqrdmlah_qc()
65 gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]); in gen_gvec_sqrdmlah_qc()
74 tcg_debug_assert(vece >= 1 && vece <= 2); in gen_gvec_sqrdmlsh_qc()
75 gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]); in gen_gvec_sqrdmlsh_qc()
95 shift = MIN(shift, (8 << vece) - 1); in GEN_CMP0()
110 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_ssra8_i64() argument
112 tcg_gen_vec_sar8i_i64(a, a, shift); in gen_ssra8_i64()
113 tcg_gen_vec_add8_i64(d, d, a); in gen_ssra8_i64()
116 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_ssra16_i64() argument
118 tcg_gen_vec_sar16i_i64(a, a, shift); in gen_ssra16_i64()
119 tcg_gen_vec_add16_i64(d, d, a); in gen_ssra16_i64()
122 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) in gen_ssra32_i32() argument
124 tcg_gen_sari_i32(a, a, shift); in gen_ssra32_i32()
125 tcg_gen_add_i32(d, d, a); in gen_ssra32_i32()
128 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_ssra64_i64() argument
130 tcg_gen_sari_i64(a, a, shift); in gen_ssra64_i64()
131 tcg_gen_add_i64(d, d, a); in gen_ssra64_i64()
134 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) in gen_ssra_vec() argument
136 tcg_gen_sari_vec(vece, a, a, sh); in gen_ssra_vec()
137 tcg_gen_add_vec(vece, d, d, a); in gen_ssra_vec()
174 /* tszimm encoding produces immediates in the range [1..esize]. */ in gen_gvec_ssra()
182 shift = MIN(shift, (8 << vece) - 1); in gen_gvec_ssra()
186 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_usra8_i64() argument
188 tcg_gen_vec_shr8i_i64(a, a, shift); in gen_usra8_i64()
189 tcg_gen_vec_add8_i64(d, d, a); in gen_usra8_i64()
192 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_usra16_i64() argument
194 tcg_gen_vec_shr16i_i64(a, a, shift); in gen_usra16_i64()
195 tcg_gen_vec_add16_i64(d, d, a); in gen_usra16_i64()
198 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) in gen_usra32_i32() argument
200 tcg_gen_shri_i32(a, a, shift); in gen_usra32_i32()
201 tcg_gen_add_i32(d, d, a); in gen_usra32_i32()
204 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_usra64_i64() argument
206 tcg_gen_shri_i64(a, a, shift); in gen_usra64_i64()
207 tcg_gen_add_i64(d, d, a); in gen_usra64_i64()
210 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) in gen_usra_vec() argument
212 tcg_gen_shri_vec(vece, a, a, sh); in gen_usra_vec()
213 tcg_gen_add_vec(vece, d, d, a); in gen_usra_vec()
250 /* tszimm encoding produces immediates in the range [1..esize]. */ in gen_gvec_usra()
269 * mask the low bit, we can perform a normal integer shift instead
270 * of a vector shift.
272 static void gen_srshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_srshr8_i64() argument
276 tcg_gen_shri_i64(t, a, sh - 1); in gen_srshr8_i64()
277 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_srshr8_i64()
278 tcg_gen_vec_sar8i_i64(d, a, sh); in gen_srshr8_i64()
282 static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_srshr16_i64() argument
286 tcg_gen_shri_i64(t, a, sh - 1); in gen_srshr16_i64()
287 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_srshr16_i64()
288 tcg_gen_vec_sar16i_i64(d, a, sh); in gen_srshr16_i64()
292 void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) in gen_srshr32_i32() argument
302 tcg_gen_extract_i32(t, a, sh - 1, 1); in gen_srshr32_i32()
303 tcg_gen_sari_i32(d, a, sh); in gen_srshr32_i32()
307 void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_srshr64_i64() argument
311 tcg_gen_extract_i64(t, a, sh - 1, 1); in gen_srshr64_i64()
312 tcg_gen_sari_i64(d, a, sh); in gen_srshr64_i64()
316 static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) in gen_srshr_vec() argument
319 TCGv_vec ones = tcg_constant_vec_matching(d, vece, 1); in gen_srshr_vec()
321 tcg_gen_shri_vec(vece, t, a, sh - 1); in gen_srshr_vec()
323 tcg_gen_sari_vec(vece, d, a, sh); in gen_srshr_vec()
357 /* tszimm encoding produces immediates in the range [1..esize] */ in gen_gvec_srshr()
365 * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0. in gen_gvec_srshr()
374 static void gen_srsra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_srsra8_i64() argument
378 gen_srshr8_i64(t, a, sh); in gen_srsra8_i64()
382 static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_srsra16_i64() argument
386 gen_srshr16_i64(t, a, sh); in gen_srsra16_i64()
390 static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) in gen_srsra32_i32() argument
394 gen_srshr32_i32(t, a, sh); in gen_srsra32_i32()
398 static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_srsra64_i64() argument
402 gen_srshr64_i64(t, a, sh); in gen_srsra64_i64()
406 static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) in gen_srsra_vec() argument
410 gen_srshr_vec(vece, t, a, sh); in gen_srsra_vec()
448 /* tszimm encoding produces immediates in the range [1..esize] */ in gen_gvec_srsra()
455 * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0. in gen_gvec_srsra()
466 static void gen_urshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_urshr8_i64() argument
470 tcg_gen_shri_i64(t, a, sh - 1); in gen_urshr8_i64()
471 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_urshr8_i64()
472 tcg_gen_vec_shr8i_i64(d, a, sh); in gen_urshr8_i64()
476 static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_urshr16_i64() argument
480 tcg_gen_shri_i64(t, a, sh - 1); in gen_urshr16_i64()
481 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_urshr16_i64()
482 tcg_gen_vec_shr16i_i64(d, a, sh); in gen_urshr16_i64()
486 void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) in gen_urshr32_i32() argument
492 tcg_gen_extract_i32(d, a, sh - 1, 1); in gen_urshr32_i32()
496 tcg_gen_extract_i32(t, a, sh - 1, 1); in gen_urshr32_i32()
497 tcg_gen_shri_i32(d, a, sh); in gen_urshr32_i32()
501 void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_urshr64_i64() argument
505 tcg_gen_extract_i64(t, a, sh - 1, 1); in gen_urshr64_i64()
506 tcg_gen_shri_i64(d, a, sh); in gen_urshr64_i64()
510 static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift) in gen_urshr_vec() argument
513 TCGv_vec ones = tcg_constant_vec_matching(d, vece, 1); in gen_urshr_vec()
515 tcg_gen_shri_vec(vece, t, a, shift - 1); in gen_urshr_vec()
517 tcg_gen_shri_vec(vece, d, a, shift); in gen_urshr_vec()
551 /* tszimm encoding produces immediates in the range [1..esize] */ in gen_gvec_urshr()
558 * Unsigned results in zero. With rounding, this produces a in gen_gvec_urshr()
561 tcg_gen_gvec_shri(vece, rd_ofs, rm_ofs, shift - 1, opr_sz, max_sz); in gen_gvec_urshr()
567 static void gen_ursra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_ursra8_i64() argument
572 tcg_gen_vec_shr8i_i64(t, a, 7); in gen_ursra8_i64()
574 gen_urshr8_i64(t, a, sh); in gen_ursra8_i64()
579 static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_ursra16_i64() argument
584 tcg_gen_vec_shr16i_i64(t, a, 15); in gen_ursra16_i64()
586 gen_urshr16_i64(t, a, sh); in gen_ursra16_i64()
591 static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) in gen_ursra32_i32() argument
596 tcg_gen_shri_i32(t, a, 31); in gen_ursra32_i32()
598 gen_urshr32_i32(t, a, sh); in gen_ursra32_i32()
603 static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) in gen_ursra64_i64() argument
608 tcg_gen_shri_i64(t, a, 63); in gen_ursra64_i64()
610 gen_urshr64_i64(t, a, sh); in gen_ursra64_i64()
615 static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) in gen_ursra_vec() argument
620 tcg_gen_shri_vec(vece, t, a, sh - 1); in gen_ursra_vec()
622 gen_urshr_vec(vece, t, a, sh); in gen_ursra_vec()
661 /* tszimm encoding produces immediates in the range [1..esize] */ in gen_gvec_ursra()
668 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_shr8_ins_i64() argument
673 tcg_gen_shri_i64(t, a, shift); in gen_shr8_ins_i64()
679 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_shr16_ins_i64() argument
684 tcg_gen_shri_i64(t, a, shift); in gen_shr16_ins_i64()
690 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) in gen_shr32_ins_i32() argument
692 tcg_gen_shri_i32(a, a, shift); in gen_shr32_ins_i32()
693 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift); in gen_shr32_ins_i32()
696 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_shr64_ins_i64() argument
698 tcg_gen_shri_i64(a, a, shift); in gen_shr64_ins_i64()
699 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift); in gen_shr64_ins_i64()
702 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) in gen_shr_ins_vec() argument
708 tcg_gen_shri_vec(vece, t, a, sh); in gen_shr_ins_vec()
745 /* tszimm encoding produces immediates in the range [1..esize]. */ in gen_gvec_sri()
758 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_shl8_ins_i64() argument
763 tcg_gen_shli_i64(t, a, shift); in gen_shl8_ins_i64()
769 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_shl16_ins_i64() argument
774 tcg_gen_shli_i64(t, a, shift); in gen_shl16_ins_i64()
780 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) in gen_shl32_ins_i32() argument
782 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift); in gen_shl32_ins_i32()
785 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) in gen_shl64_ins_i64() argument
787 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift); in gen_shl64_ins_i64()
790 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) in gen_shl_ins_vec() argument
795 tcg_gen_shli_vec(vece, t, a, sh); in gen_shl_ins_vec()
832 /* tszimm encoding produces immediates in the range [0..esize-1]. */ in gen_gvec_sli()
843 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_mla8_i32() argument
845 gen_helper_neon_mul_u8(a, a, b); in gen_mla8_i32()
846 gen_helper_neon_add_u8(d, d, a); in gen_mla8_i32()
849 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_mls8_i32() argument
851 gen_helper_neon_mul_u8(a, a, b); in gen_mls8_i32()
852 gen_helper_neon_sub_u8(d, d, a); in gen_mls8_i32()
855 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_mla16_i32() argument
857 gen_helper_neon_mul_u16(a, a, b); in gen_mla16_i32()
858 gen_helper_neon_add_u16(d, d, a); in gen_mla16_i32()
861 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_mls16_i32() argument
863 gen_helper_neon_mul_u16(a, a, b); in gen_mls16_i32()
864 gen_helper_neon_sub_u16(d, d, a); in gen_mls16_i32()
867 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_mla32_i32() argument
869 tcg_gen_mul_i32(a, a, b); in gen_mla32_i32()
870 tcg_gen_add_i32(d, d, a); in gen_mla32_i32()
873 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_mls32_i32() argument
875 tcg_gen_mul_i32(a, a, b); in gen_mls32_i32()
876 tcg_gen_sub_i32(d, d, a); in gen_mls32_i32()
879 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_mla64_i64() argument
881 tcg_gen_mul_i64(a, a, b); in gen_mla64_i64()
882 tcg_gen_add_i64(d, d, a); in gen_mla64_i64()
885 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_mls64_i64() argument
887 tcg_gen_mul_i64(a, a, b); in gen_mls64_i64()
888 tcg_gen_sub_i64(d, d, a); in gen_mls64_i64()
891 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_mla_vec() argument
893 tcg_gen_mul_vec(vece, a, a, b); in gen_mla_vec()
894 tcg_gen_add_vec(vece, d, d, a); in gen_mla_vec()
897 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_mls_vec() argument
899 tcg_gen_mul_vec(vece, a, a, b); in gen_mls_vec()
900 tcg_gen_sub_vec(vece, d, d, a); in gen_mls_vec()
971 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_cmtst_i32() argument
973 tcg_gen_negsetcond_i32(TCG_COND_TSTNE, d, a, b); in gen_cmtst_i32()
976 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_cmtst_i64() argument
978 tcg_gen_negsetcond_i64(TCG_COND_TSTNE, d, a, b); in gen_cmtst_i64()
981 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_cmtst_vec() argument
983 tcg_gen_cmp_vec(TCG_COND_TSTNE, vece, d, a, b); in gen_cmtst_vec()
1086 * have already masked to a byte and so a signed compare works. in gen_ushl_vec()
1087 * Other tcg hosts have a full set of comparisons and do not care. in gen_ushl_vec()
1198 /* Bound rsh so out of bound right shift gets -1. */ in gen_sshl_vec()
1199 max = tcg_constant_vec_matching(dst, vece, (8 << vece) - 1); in gen_sshl_vec()
1352 void gen_uqadd_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz) in gen_uqadd_bhs() argument
1357 tcg_gen_add_i64(tmp, a, b); in gen_uqadd_bhs()
1363 void gen_uqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b) in gen_uqadd_d() argument
1367 tcg_gen_add_i64(t, a, b); in gen_uqadd_d()
1368 tcg_gen_movcond_i64(TCG_COND_LTU, res, t, a, in gen_uqadd_d()
1375 TCGv_vec a, TCGv_vec b) in gen_uqadd_vec() argument
1378 tcg_gen_add_vec(vece, x, a, b); in gen_uqadd_vec()
1379 tcg_gen_usadd_vec(vece, t, a, b); in gen_uqadd_vec()
1419 void gen_sqadd_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz) in gen_sqadd_bhs() argument
1421 int64_t max = MAKE_64BIT_MASK(0, (8 << esz) - 1); in gen_sqadd_bhs()
1422 int64_t min = -1ll - max; in gen_sqadd_bhs()
1425 tcg_gen_add_i64(tmp, a, b); in gen_sqadd_bhs()
1432 void gen_sqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b) in gen_sqadd_d() argument
1438 tcg_gen_add_i64(t0, a, b); in gen_sqadd_d()
1441 tcg_gen_xor_i64(t1, a, b); in gen_sqadd_d()
1442 tcg_gen_xor_i64(t2, t0, a); in gen_sqadd_d()
1446 tcg_gen_sari_i64(t2, a, 63); in gen_sqadd_d()
1455 TCGv_vec a, TCGv_vec b) in gen_sqadd_vec() argument
1458 tcg_gen_add_vec(vece, x, a, b); in gen_sqadd_vec()
1459 tcg_gen_ssadd_vec(vece, t, a, b); in gen_sqadd_vec()
1499 void gen_uqsub_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz) in gen_uqsub_bhs() argument
1503 tcg_gen_sub_i64(tmp, a, b); in gen_uqsub_bhs()
1509 void gen_uqsub_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b) in gen_uqsub_d() argument
1513 tcg_gen_sub_i64(t, a, b); in gen_uqsub_d()
1514 tcg_gen_movcond_i64(TCG_COND_LTU, res, a, b, tcg_constant_i64(0), t); in gen_uqsub_d()
1520 TCGv_vec a, TCGv_vec b) in gen_uqsub_vec() argument
1523 tcg_gen_sub_vec(vece, x, a, b); in gen_uqsub_vec()
1524 tcg_gen_ussub_vec(vece, t, a, b); in gen_uqsub_vec()
1564 void gen_sqsub_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz) in gen_sqsub_bhs() argument
1566 int64_t max = MAKE_64BIT_MASK(0, (8 << esz) - 1); in gen_sqsub_bhs()
1567 int64_t min = -1ll - max; in gen_sqsub_bhs()
1570 tcg_gen_sub_i64(tmp, a, b); in gen_sqsub_bhs()
1577 void gen_sqsub_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b) in gen_sqsub_d() argument
1583 tcg_gen_sub_i64(t0, a, b); in gen_sqsub_d()
1586 tcg_gen_xor_i64(t1, a, b); in gen_sqsub_d()
1587 tcg_gen_xor_i64(t2, t0, a); in gen_sqsub_d()
1591 tcg_gen_sari_i64(t2, a, 63); in gen_sqsub_d()
1600 TCGv_vec a, TCGv_vec b) in gen_sqsub_vec() argument
1603 tcg_gen_sub_vec(vece, x, a, b); in gen_sqsub_vec()
1604 tcg_gen_sssub_vec(vece, t, a, b); in gen_sqsub_vec()
1644 static void gen_sabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_sabd_i32() argument
1648 tcg_gen_sub_i32(t, a, b); in gen_sabd_i32()
1649 tcg_gen_sub_i32(d, b, a); in gen_sabd_i32()
1650 tcg_gen_movcond_i32(TCG_COND_LT, d, a, b, d, t); in gen_sabd_i32()
1653 static void gen_sabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_sabd_i64() argument
1657 tcg_gen_sub_i64(t, a, b); in gen_sabd_i64()
1658 tcg_gen_sub_i64(d, b, a); in gen_sabd_i64()
1659 tcg_gen_movcond_i64(TCG_COND_LT, d, a, b, d, t); in gen_sabd_i64()
1662 static void gen_sabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_sabd_vec() argument
1666 tcg_gen_smin_vec(vece, t, a, b); in gen_sabd_vec()
1667 tcg_gen_smax_vec(vece, d, a, b); in gen_sabd_vec()
1701 static void gen_uabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_uabd_i32() argument
1705 tcg_gen_sub_i32(t, a, b); in gen_uabd_i32()
1706 tcg_gen_sub_i32(d, b, a); in gen_uabd_i32()
1707 tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, d, t); in gen_uabd_i32()
1710 static void gen_uabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_uabd_i64() argument
1714 tcg_gen_sub_i64(t, a, b); in gen_uabd_i64()
1715 tcg_gen_sub_i64(d, b, a); in gen_uabd_i64()
1716 tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, d, t); in gen_uabd_i64()
1719 static void gen_uabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_uabd_vec() argument
1723 tcg_gen_umin_vec(vece, t, a, b); in gen_uabd_vec()
1724 tcg_gen_umax_vec(vece, d, a, b); in gen_uabd_vec()
1758 static void gen_saba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_saba_i32() argument
1761 gen_sabd_i32(t, a, b); in gen_saba_i32()
1765 static void gen_saba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_saba_i64() argument
1768 gen_sabd_i64(t, a, b); in gen_saba_i64()
1772 static void gen_saba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_saba_vec() argument
1775 gen_sabd_vec(vece, t, a, b); in gen_saba_vec()
1814 static void gen_uaba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_uaba_i32() argument
1817 gen_uabd_i32(t, a, b); in gen_uaba_i32()
1821 static void gen_uaba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_uaba_i64() argument
1824 gen_uabd_i64(t, a, b); in gen_uaba_i64()
1828 static void gen_uaba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_uaba_vec() argument
1831 gen_uabd_vec(vece, t, a, b); in gen_uaba_vec()
1930 static void gen_shadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_shadd8_i64() argument
1934 tcg_gen_and_i64(t, a, b); in gen_shadd8_i64()
1935 tcg_gen_vec_sar8i_i64(a, a, 1); in gen_shadd8_i64()
1936 tcg_gen_vec_sar8i_i64(b, b, 1); in gen_shadd8_i64()
1937 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_shadd8_i64()
1938 tcg_gen_vec_add8_i64(d, a, b); in gen_shadd8_i64()
1942 static void gen_shadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_shadd16_i64() argument
1946 tcg_gen_and_i64(t, a, b); in gen_shadd16_i64()
1947 tcg_gen_vec_sar16i_i64(a, a, 1); in gen_shadd16_i64()
1948 tcg_gen_vec_sar16i_i64(b, b, 1); in gen_shadd16_i64()
1949 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_shadd16_i64()
1950 tcg_gen_vec_add16_i64(d, a, b); in gen_shadd16_i64()
1954 static void gen_shadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_shadd_i32() argument
1958 tcg_gen_and_i32(t, a, b); in gen_shadd_i32()
1959 tcg_gen_sari_i32(a, a, 1); in gen_shadd_i32()
1960 tcg_gen_sari_i32(b, b, 1); in gen_shadd_i32()
1961 tcg_gen_andi_i32(t, t, 1); in gen_shadd_i32()
1962 tcg_gen_add_i32(d, a, b); in gen_shadd_i32()
1966 static void gen_shadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_shadd_vec() argument
1970 tcg_gen_and_vec(vece, t, a, b); in gen_shadd_vec()
1971 tcg_gen_sari_vec(vece, a, a, 1); in gen_shadd_vec()
1972 tcg_gen_sari_vec(vece, b, b, 1); in gen_shadd_vec()
1973 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1)); in gen_shadd_vec()
1974 tcg_gen_add_vec(vece, d, a, b); in gen_shadd_vec()
2002 static void gen_uhadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_uhadd8_i64() argument
2006 tcg_gen_and_i64(t, a, b); in gen_uhadd8_i64()
2007 tcg_gen_vec_shr8i_i64(a, a, 1); in gen_uhadd8_i64()
2008 tcg_gen_vec_shr8i_i64(b, b, 1); in gen_uhadd8_i64()
2009 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_uhadd8_i64()
2010 tcg_gen_vec_add8_i64(d, a, b); in gen_uhadd8_i64()
2014 static void gen_uhadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_uhadd16_i64() argument
2018 tcg_gen_and_i64(t, a, b); in gen_uhadd16_i64()
2019 tcg_gen_vec_shr16i_i64(a, a, 1); in gen_uhadd16_i64()
2020 tcg_gen_vec_shr16i_i64(b, b, 1); in gen_uhadd16_i64()
2021 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_uhadd16_i64()
2022 tcg_gen_vec_add16_i64(d, a, b); in gen_uhadd16_i64()
2026 static void gen_uhadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_uhadd_i32() argument
2030 tcg_gen_and_i32(t, a, b); in gen_uhadd_i32()
2031 tcg_gen_shri_i32(a, a, 1); in gen_uhadd_i32()
2032 tcg_gen_shri_i32(b, b, 1); in gen_uhadd_i32()
2033 tcg_gen_andi_i32(t, t, 1); in gen_uhadd_i32()
2034 tcg_gen_add_i32(d, a, b); in gen_uhadd_i32()
2038 static void gen_uhadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_uhadd_vec() argument
2042 tcg_gen_and_vec(vece, t, a, b); in gen_uhadd_vec()
2043 tcg_gen_shri_vec(vece, a, a, 1); in gen_uhadd_vec()
2044 tcg_gen_shri_vec(vece, b, b, 1); in gen_uhadd_vec()
2045 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1)); in gen_uhadd_vec()
2046 tcg_gen_add_vec(vece, d, a, b); in gen_uhadd_vec()
2074 static void gen_shsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_shsub8_i64() argument
2078 tcg_gen_andc_i64(t, b, a); in gen_shsub8_i64()
2079 tcg_gen_vec_sar8i_i64(a, a, 1); in gen_shsub8_i64()
2080 tcg_gen_vec_sar8i_i64(b, b, 1); in gen_shsub8_i64()
2081 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_shsub8_i64()
2082 tcg_gen_vec_sub8_i64(d, a, b); in gen_shsub8_i64()
2086 static void gen_shsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_shsub16_i64() argument
2090 tcg_gen_andc_i64(t, b, a); in gen_shsub16_i64()
2091 tcg_gen_vec_sar16i_i64(a, a, 1); in gen_shsub16_i64()
2092 tcg_gen_vec_sar16i_i64(b, b, 1); in gen_shsub16_i64()
2093 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_shsub16_i64()
2094 tcg_gen_vec_sub16_i64(d, a, b); in gen_shsub16_i64()
2098 static void gen_shsub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_shsub_i32() argument
2102 tcg_gen_andc_i32(t, b, a); in gen_shsub_i32()
2103 tcg_gen_sari_i32(a, a, 1); in gen_shsub_i32()
2104 tcg_gen_sari_i32(b, b, 1); in gen_shsub_i32()
2105 tcg_gen_andi_i32(t, t, 1); in gen_shsub_i32()
2106 tcg_gen_sub_i32(d, a, b); in gen_shsub_i32()
2110 static void gen_shsub_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_shsub_vec() argument
2114 tcg_gen_andc_vec(vece, t, b, a); in gen_shsub_vec()
2115 tcg_gen_sari_vec(vece, a, a, 1); in gen_shsub_vec()
2116 tcg_gen_sari_vec(vece, b, b, 1); in gen_shsub_vec()
2117 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1)); in gen_shsub_vec()
2118 tcg_gen_sub_vec(vece, d, a, b); in gen_shsub_vec()
2146 static void gen_uhsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_uhsub8_i64() argument
2150 tcg_gen_andc_i64(t, b, a); in gen_uhsub8_i64()
2151 tcg_gen_vec_shr8i_i64(a, a, 1); in gen_uhsub8_i64()
2152 tcg_gen_vec_shr8i_i64(b, b, 1); in gen_uhsub8_i64()
2153 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_uhsub8_i64()
2154 tcg_gen_vec_sub8_i64(d, a, b); in gen_uhsub8_i64()
2158 static void gen_uhsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_uhsub16_i64() argument
2162 tcg_gen_andc_i64(t, b, a); in gen_uhsub16_i64()
2163 tcg_gen_vec_shr16i_i64(a, a, 1); in gen_uhsub16_i64()
2164 tcg_gen_vec_shr16i_i64(b, b, 1); in gen_uhsub16_i64()
2165 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_uhsub16_i64()
2166 tcg_gen_vec_sub16_i64(d, a, b); in gen_uhsub16_i64()
2170 static void gen_uhsub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_uhsub_i32() argument
2174 tcg_gen_andc_i32(t, b, a); in gen_uhsub_i32()
2175 tcg_gen_shri_i32(a, a, 1); in gen_uhsub_i32()
2176 tcg_gen_shri_i32(b, b, 1); in gen_uhsub_i32()
2177 tcg_gen_andi_i32(t, t, 1); in gen_uhsub_i32()
2178 tcg_gen_sub_i32(d, a, b); in gen_uhsub_i32()
2182 static void gen_uhsub_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_uhsub_vec() argument
2186 tcg_gen_andc_vec(vece, t, b, a); in gen_uhsub_vec()
2187 tcg_gen_shri_vec(vece, a, a, 1); in gen_uhsub_vec()
2188 tcg_gen_shri_vec(vece, b, b, 1); in gen_uhsub_vec()
2189 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1)); in gen_uhsub_vec()
2190 tcg_gen_sub_vec(vece, d, a, b); in gen_uhsub_vec()
2218 static void gen_srhadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_srhadd8_i64() argument
2222 tcg_gen_or_i64(t, a, b); in gen_srhadd8_i64()
2223 tcg_gen_vec_sar8i_i64(a, a, 1); in gen_srhadd8_i64()
2224 tcg_gen_vec_sar8i_i64(b, b, 1); in gen_srhadd8_i64()
2225 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_srhadd8_i64()
2226 tcg_gen_vec_add8_i64(d, a, b); in gen_srhadd8_i64()
2230 static void gen_srhadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_srhadd16_i64() argument
2234 tcg_gen_or_i64(t, a, b); in gen_srhadd16_i64()
2235 tcg_gen_vec_sar16i_i64(a, a, 1); in gen_srhadd16_i64()
2236 tcg_gen_vec_sar16i_i64(b, b, 1); in gen_srhadd16_i64()
2237 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_srhadd16_i64()
2238 tcg_gen_vec_add16_i64(d, a, b); in gen_srhadd16_i64()
2242 static void gen_srhadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_srhadd_i32() argument
2246 tcg_gen_or_i32(t, a, b); in gen_srhadd_i32()
2247 tcg_gen_sari_i32(a, a, 1); in gen_srhadd_i32()
2248 tcg_gen_sari_i32(b, b, 1); in gen_srhadd_i32()
2249 tcg_gen_andi_i32(t, t, 1); in gen_srhadd_i32()
2250 tcg_gen_add_i32(d, a, b); in gen_srhadd_i32()
2254 static void gen_srhadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_srhadd_vec() argument
2258 tcg_gen_or_vec(vece, t, a, b); in gen_srhadd_vec()
2259 tcg_gen_sari_vec(vece, a, a, 1); in gen_srhadd_vec()
2260 tcg_gen_sari_vec(vece, b, b, 1); in gen_srhadd_vec()
2261 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1)); in gen_srhadd_vec()
2262 tcg_gen_add_vec(vece, d, a, b); in gen_srhadd_vec()
2290 static void gen_urhadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_urhadd8_i64() argument
2294 tcg_gen_or_i64(t, a, b); in gen_urhadd8_i64()
2295 tcg_gen_vec_shr8i_i64(a, a, 1); in gen_urhadd8_i64()
2296 tcg_gen_vec_shr8i_i64(b, b, 1); in gen_urhadd8_i64()
2297 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); in gen_urhadd8_i64()
2298 tcg_gen_vec_add8_i64(d, a, b); in gen_urhadd8_i64()
2302 static void gen_urhadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) in gen_urhadd16_i64() argument
2306 tcg_gen_or_i64(t, a, b); in gen_urhadd16_i64()
2307 tcg_gen_vec_shr16i_i64(a, a, 1); in gen_urhadd16_i64()
2308 tcg_gen_vec_shr16i_i64(b, b, 1); in gen_urhadd16_i64()
2309 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); in gen_urhadd16_i64()
2310 tcg_gen_vec_add16_i64(d, a, b); in gen_urhadd16_i64()
2314 static void gen_urhadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) in gen_urhadd_i32() argument
2318 tcg_gen_or_i32(t, a, b); in gen_urhadd_i32()
2319 tcg_gen_shri_i32(a, a, 1); in gen_urhadd_i32()
2320 tcg_gen_shri_i32(b, b, 1); in gen_urhadd_i32()
2321 tcg_gen_andi_i32(t, t, 1); in gen_urhadd_i32()
2322 tcg_gen_add_i32(d, a, b); in gen_urhadd_i32()
2326 static void gen_urhadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) in gen_urhadd_vec() argument
2330 tcg_gen_or_vec(vece, t, a, b); in gen_urhadd_vec()
2331 tcg_gen_shri_vec(vece, a, a, 1); in gen_urhadd_vec()
2332 tcg_gen_shri_vec(vece, b, b, 1); in gen_urhadd_vec()
2333 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1)); in gen_urhadd_vec()
2334 tcg_gen_add_vec(vece, d, a, b); in gen_urhadd_vec()
2704 uint64_t s_bit = 1ull << ((8 << vece) - 1); in gen_gvec_fabs()
2705 tcg_gen_gvec_andi(vece, dofs, aofs, s_bit - 1, oprsz, maxsz); in gen_gvec_fabs()
2711 uint64_t s_bit = 1ull << ((8 << vece) - 1); in gen_gvec_fneg()