Lines Matching defs:s

45 static int tszimm_esz(DisasContext *s, int x)
51 static int tszimm_shr(DisasContext *s, int x)
58 int esz = tszimm_esz(s, x);
66 static int tszimm_shl(DisasContext *s, int x)
69 int esz = tszimm_esz(s, x);
77 static inline int expand_imm_sh8s(DisasContext *s, int x)
82 static inline int expand_imm_sh8u(DisasContext *s, int x)
90 static inline int msz_dtype(DisasContext *s, int msz)
107 static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn,
113 if (sve_access_check(s)) {
114 unsigned vsz = vec_full_reg_size(s);
115 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
116 vec_full_reg_offset(s, rn),
122 static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn,
129 if (sve_access_check(s)) {
130 unsigned vsz = vec_full_reg_size(s);
133 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
134 vec_full_reg_offset(s, rn),
140 static bool gen_gvec_fpst_ah_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn,
143 return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data,
144 select_ah_fpst(s, a->esz));
148 static bool gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
154 if (sve_access_check(s)) {
155 unsigned vsz = vec_full_reg_size(s);
156 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
157 vec_full_reg_offset(s, rn),
158 vec_full_reg_offset(s, rm),
164 static bool gen_gvec_ool_arg_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
167 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data);
171 static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
178 if (sve_access_check(s)) {
179 unsigned vsz = vec_full_reg_size(s);
182 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
183 vec_full_reg_offset(s, rn),
184 vec_full_reg_offset(s, rm),
190 static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
193 return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data,
197 static bool gen_gvec_fpst_ah_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
200 return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data,
201 select_ah_fpst(s, a->esz));
205 static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
211 if (sve_access_check(s)) {
212 unsigned vsz = vec_full_reg_size(s);
213 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
214 vec_full_reg_offset(s, rn),
215 vec_full_reg_offset(s, rm),
216 vec_full_reg_offset(s, ra),
222 static bool gen_gvec_ool_arg_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
225 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
228 static bool gen_gvec_ool_arg_zzxz(DisasContext *s, gen_helper_gvec_4 *fn,
231 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
235 static bool gen_gvec_ptr_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
242 if (sve_access_check(s)) {
243 unsigned vsz = vec_full_reg_size(s);
244 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
245 vec_full_reg_offset(s, rn),
246 vec_full_reg_offset(s, rm),
247 vec_full_reg_offset(s, ra),
253 static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
258 bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status);
262 static bool gen_gvec_env_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
266 return gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, tcg_env);
269 static bool gen_gvec_env_arg_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
272 return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
275 static bool gen_gvec_env_arg_zzxz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
278 return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
282 static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn,
289 if (sve_access_check(s)) {
290 unsigned vsz = vec_full_reg_size(s);
293 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, rd),
294 vec_full_reg_offset(s, rn),
295 vec_full_reg_offset(s, rm),
296 vec_full_reg_offset(s, ra),
297 pred_full_reg_offset(s, pg),
304 static bool gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn,
310 if (sve_access_check(s)) {
311 unsigned vsz = vec_full_reg_size(s);
312 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
313 vec_full_reg_offset(s, rn),
314 pred_full_reg_offset(s, pg),
320 static bool gen_gvec_ool_arg_zpz(DisasContext *s, gen_helper_gvec_3 *fn,
323 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, data);
326 static bool gen_gvec_ool_arg_zpzi(DisasContext *s, gen_helper_gvec_3 *fn,
329 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm);
332 static bool gen_gvec_fpst_zzp(DisasContext *s, gen_helper_gvec_3_ptr *fn,
339 if (sve_access_check(s)) {
340 unsigned vsz = vec_full_reg_size(s);
343 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
344 vec_full_reg_offset(s, rn),
345 pred_full_reg_offset(s, pg),
351 static bool gen_gvec_fpst_arg_zpz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
355 return gen_gvec_fpst_zzp(s, fn, a->rd, a->rn, a->pg, data, flavour);
359 static bool gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn,
365 if (sve_access_check(s)) {
366 unsigned vsz = vec_full_reg_size(s);
367 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
368 vec_full_reg_offset(s, rn),
369 vec_full_reg_offset(s, rm),
370 pred_full_reg_offset(s, pg),
376 static bool gen_gvec_ool_arg_zpzz(DisasContext *s, gen_helper_gvec_4 *fn,
379 return gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, data);
383 static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn,
390 if (sve_access_check(s)) {
391 unsigned vsz = vec_full_reg_size(s);
394 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
395 vec_full_reg_offset(s, rn),
396 vec_full_reg_offset(s, rm),
397 pred_full_reg_offset(s, pg),
403 static bool gen_gvec_fpst_arg_zpzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
406 return gen_gvec_fpst_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0,
411 static bool gen_gvec_fn_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
417 if (sve_access_check(s)) {
418 unsigned vsz = vec_full_reg_size(s);
419 gvec_fn(esz, vec_full_reg_offset(s, rd),
420 vec_full_reg_offset(s, rn), imm, vsz, vsz);
425 static bool gen_gvec_fn_arg_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
432 return gen_gvec_fn_zzi(s, gvec_fn, a->esz, a->rd, a->rn, a->imm);
436 static bool gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
442 if (sve_access_check(s)) {
443 unsigned vsz = vec_full_reg_size(s);
444 gvec_fn(esz, vec_full_reg_offset(s, rd),
445 vec_full_reg_offset(s, rn),
446 vec_full_reg_offset(s, rm), vsz, vsz);
451 static bool gen_gvec_fn_arg_zzz(DisasContext *s, GVecGen3Fn *fn,
454 return gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm);
458 static bool gen_gvec_fn_arg_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn,
464 if (sve_access_check(s)) {
465 unsigned vsz = vec_full_reg_size(s);
466 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
467 vec_full_reg_offset(s, a->rn),
468 vec_full_reg_offset(s, a->rm),
469 vec_full_reg_offset(s, a->ra), vsz, vsz);
475 static bool do_mov_z(DisasContext *s, int rd, int rn)
477 if (sve_access_check(s)) {
478 unsigned vsz = vec_full_reg_size(s);
479 tcg_gen_gvec_mov(MO_8, vec_full_reg_offset(s, rd),
480 vec_full_reg_offset(s, rn), vsz, vsz);
486 static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
488 unsigned vsz = vec_full_reg_size(s);
489 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
493 static bool gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn,
496 if (sve_access_check(s)) {
497 unsigned psz = pred_gvec_reg_size(s);
498 gvec_fn(MO_64, pred_full_reg_offset(s, rd),
499 pred_full_reg_offset(s, rn),
500 pred_full_reg_offset(s, rm), psz, psz);
506 static bool do_mov_p(DisasContext *s, int rd, int rn)
508 if (sve_access_check(s)) {
509 unsigned psz = pred_gvec_reg_size(s);
510 tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd),
511 pred_full_reg_offset(s, rn), psz, psz);
534 static void do_predtest(DisasContext *s, int dofs, int gofs, int words)
555 static bool trans_INVALID(DisasContext *s, arg_INVALID *a)
557 unallocated_encoding(s);
570 static bool trans_XAR(DisasContext *s, arg_rrri_esz *a)
572 if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
575 if (sve_access_check(s)) {
576 unsigned vsz = vec_full_reg_size(s);
577 gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd),
578 vec_full_reg_offset(s, a->rn),
579 vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz);
710 static bool do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz)
716 return gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0);
794 s->fpcr_ah ? fabs_ah_fns[a->esz] : fabs_fns[a->esz], a, 0)
805 s->fpcr_ah ? fneg_ah_fns[a->esz] : fneg_fns[a->esz], a, 0)
874 static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a,
877 unsigned vsz = vec_full_reg_size(s);
885 if (!sve_access_check(s)) {
894 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
895 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
898 write_fp_dreg(s, a->rd, temp);
935 static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg,
942 return gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert);
945 static bool do_shift_zpzi(DisasContext *s, arg_rpri_esz *a, bool asr,
957 * For arithmetic right-shift, it's the same as by one less.
965 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
968 return gen_gvec_ool_arg_zpzi(s, fns[a->esz], a);
1052 static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr,
1060 if (sve_access_check(s)) {
1061 unsigned vsz = vec_full_reg_size(s);
1063 arithmetic right-shift, it's the same as by one less.
1069 do_dupi_z(s, a->rd, 0);
1073 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
1074 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
1101 static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a,
1104 if (sve_access_check(s)) {
1105 unsigned vsz = vec_full_reg_size(s);
1106 tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd),
1107 vec_full_reg_offset(s, a->ra),
1108 vec_full_reg_offset(s, a->rn),
1109 vec_full_reg_offset(s, a->rm),
1110 pred_full_reg_offset(s, a->pg),
1132 static bool do_index(DisasContext *s, int esz, int rd,
1139 if (!sve_access_check(s)) {
1143 vsz = vec_full_reg_size(s);
1147 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd));
1170 tcg_constant_i64(a->imm), cpu_reg(s, a->rm))
1172 cpu_reg(s, a->rn), tcg_constant_i64(a->imm))
1174 cpu_reg(s, a->rn), cpu_reg(s, a->rm))
1180 static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
1182 if (!dc_isar_feature(aa64_sve, s)) {
1185 if (sve_access_check(s)) {
1186 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1187 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1188 tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s));
1193 static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a)
1195 if (!dc_isar_feature(aa64_sme, s)) {
1198 if (sme_enabled_check(s)) {
1199 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1200 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1201 tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s));
1206 static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
1208 if (!dc_isar_feature(aa64_sve, s)) {
1211 if (sve_access_check(s)) {
1212 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1213 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1214 tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s));
1219 static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a)
1221 if (!dc_isar_feature(aa64_sme, s)) {
1224 if (sme_enabled_check(s)) {
1225 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1226 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1227 tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s));
1232 static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
1234 if (!dc_isar_feature(aa64_sve, s)) {
1237 if (sve_access_check(s)) {
1238 TCGv_i64 reg = cpu_reg(s, a->rd);
1239 tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s));
1244 static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a)
1246 if (!dc_isar_feature(aa64_sme, s)) {
1249 if (sme_enabled_check(s)) {
1250 TCGv_i64 reg = cpu_reg(s, a->rd);
1251 tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s));
1260 static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
1262 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
1279 fexpa_fns[a->esz], a->rd, a->rn, s->fpcr_ah)
1286 ftssel_fns[a->esz], a, s->fpcr_ah)
1292 static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a,
1295 if (!sve_access_check(s)) {
1299 unsigned psz = pred_gvec_reg_size(s);
1300 int dofs = pred_full_reg_offset(s, a->rd);
1301 int nofs = pred_full_reg_offset(s, a->rn);
1302 int mofs = pred_full_reg_offset(s, a->rm);
1303 int gofs = pred_full_reg_offset(s, a->pg);
1305 if (!a->s) {
1338 do_predtest(s, dofs, tofs, psz / 8);
1356 static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a)
1365 if (!dc_isar_feature(aa64_sve, s)) {
1368 if (!a->s) {
1371 return do_mov_p(s, a->rd, a->rn);
1373 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg);
1375 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm);
1378 return do_pppp_flags(s, a, &op);
1394 static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a)
1403 if (!dc_isar_feature(aa64_sve, s)) {
1406 if (!a->s && a->pg == a->rn) {
1407 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm);
1409 return do_pppp_flags(s, a, &op);
1425 static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a)
1434 if (!dc_isar_feature(aa64_sve, s)) {
1438 if (!a->s && a->pg == a->rm) {
1439 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->pg, a->rn);
1441 return do_pppp_flags(s, a, &op);
1444 static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a)
1446 if (a->s || !dc_isar_feature(aa64_sve, s)) {
1449 if (sve_access_check(s)) {
1450 unsigned psz = pred_gvec_reg_size(s);
1451 tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd),
1452 pred_full_reg_offset(s, a->pg),
1453 pred_full_reg_offset(s, a->rn),
1454 pred_full_reg_offset(s, a->rm), psz, psz);
1472 static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a)
1481 if (!dc_isar_feature(aa64_sve, s)) {
1484 if (!a->s && a->pg == a->rn && a->rn == a->rm) {
1485 return do_mov_p(s, a->rd, a->rn);
1487 return do_pppp_flags(s, a, &op);
1503 static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a)
1512 if (!dc_isar_feature(aa64_sve, s)) {
1515 return do_pppp_flags(s, a, &op);
1531 static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a)
1540 if (!dc_isar_feature(aa64_sve, s)) {
1543 return do_pppp_flags(s, a, &op);
1559 static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a)
1568 if (!dc_isar_feature(aa64_sve, s)) {
1571 return do_pppp_flags(s, a, &op);
1578 static bool trans_PTEST(DisasContext *s, arg_PTEST *a)
1580 if (!dc_isar_feature(aa64_sve, s)) {
1583 if (sve_access_check(s)) {
1584 int nofs = pred_full_reg_offset(s, a->rn);
1585 int gofs = pred_full_reg_offset(s, a->pg);
1586 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8);
1596 do_predtest(s, nofs, gofs, words);
1645 static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
1647 if (!sve_access_check(s)) {
1651 unsigned fullsz = vec_full_reg_size(s);
1652 unsigned ofs = pred_full_reg_offset(s, rd);
1718 TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
1720 static bool trans_PTRUE_cnt(DisasContext *s, arg_PTRUE_cnt *a)
1722 if (!dc_isar_feature(aa64_sme2_or_sve2p1, s)) {
1725 if (sve_access_check(s)) {
1730 tcg_gen_gvec_dup_imm(MO_64, pred_full_reg_offset(s, a->rd),
1731 8, size_for_gvec(pred_full_reg_size(s)), val);
1743 static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
1749 .rd = a->rd, .pg = a->pg, .s = a->s,
1753 s->is_nonstreaming = true;
1754 return trans_AND_pppp(s, &alt_a);
1760 static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
1764 if (!sve_access_check(s)) {
1773 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
1776 tcg_gen_addi_ptr(t_pd, tcg_env, pred_full_reg_offset(s, a->rd));
1777 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->rn));
1863 static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn,
1866 unsigned vsz = vec_full_reg_size(s);
1873 tcg_gen_addi_ptr(dptr, tcg_env, vec_full_reg_offset(s, rd));
1874 tcg_gen_addi_ptr(nptr, tcg_env, vec_full_reg_offset(s, rn));
1939 static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a)
1941 if (!dc_isar_feature(aa64_sve, s)) {
1944 if (sve_access_check(s)) {
1945 unsigned fullsz = vec_full_reg_size(s);
1947 tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm);
1952 static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a)
1954 if (!dc_isar_feature(aa64_sve, s)) {
1957 if (sve_access_check(s)) {
1958 unsigned fullsz = vec_full_reg_size(s);
1961 TCGv_i64 reg = cpu_reg(s, a->rd);
1968 static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a)
1970 if (!dc_isar_feature(aa64_sve, s)) {
1973 if (!sve_access_check(s)) {
1977 unsigned fullsz = vec_full_reg_size(s);
1980 TCGv_i64 reg = cpu_reg(s, a->rd);
1995 static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a)
1997 if (!dc_isar_feature(aa64_sve, s)) {
2000 if (!sve_access_check(s)) {
2004 unsigned fullsz = vec_full_reg_size(s);
2007 TCGv_i64 reg = cpu_reg(s, a->rd);
2015 static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
2017 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
2021 unsigned fullsz = vec_full_reg_size(s);
2026 if (sve_access_check(s)) {
2027 tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd),
2028 vec_full_reg_offset(s, a->rn),
2033 do_mov_z(s, a->rd, a->rn);
2038 static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
2040 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
2044 unsigned fullsz = vec_full_reg_size(s);
2049 if (sve_access_check(s)) {
2050 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
2054 do_mov_z(s, a->rd, a->rn);
2063 static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn)
2071 return gen_gvec_fn_zzi(s, gvec_fn, MO_64, a->rd, a->rn, imm);
2078 static bool trans_DUPM(DisasContext *s, arg_DUPM *a)
2082 if (!dc_isar_feature(aa64_sve, s)) {
2090 if (sve_access_check(s)) {
2091 do_dupi_z(s, a->rd, imm);
2103 static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg,
2111 unsigned vsz = vec_full_reg_size(s);
2117 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd));
2118 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, rn));
2119 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
2124 static bool trans_FCPY(DisasContext *s, arg_FCPY *a)
2126 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
2129 if (sve_access_check(s)) {
2132 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(imm));
2137 static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a)
2139 if (!dc_isar_feature(aa64_sve, s)) {
2142 if (sve_access_check(s)) {
2143 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(a->imm));
2148 static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a)
2155 if (!dc_isar_feature(aa64_sve, s)) {
2158 if (sve_access_check(s)) {
2159 unsigned vsz = vec_full_reg_size(s);
2160 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
2161 pred_full_reg_offset(s, a->pg),
2172 static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm)
2174 if (!sve_access_check(s)) {
2178 unsigned vsz = vec_full_reg_size(s);
2181 unsigned d = vec_full_reg_offset(s, rd);
2182 unsigned n = vec_full_reg_offset(s, rn);
2183 unsigned m = vec_full_reg_offset(s, rm);
2205 static bool trans_EXTQ(DisasContext *s, arg_EXTQ *a)
2209 if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
2212 if (!sve_access_check(s)) {
2222 vl = vec_full_reg_size(s);
2223 dofs = vec_full_reg_offset(s, a->rd);
2224 sofs2 = vec_full_reg_offset(s, a->rn);
2258 static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a)
2260 if (!dc_isar_feature(aa64_sve, s)) {
2263 if (sve_access_check(s)) {
2264 unsigned vsz = vec_full_reg_size(s);
2265 tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd),
2266 vsz, vsz, cpu_reg_sp(s, a->rn));
2271 static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
2273 if (!dc_isar_feature(aa64_sve, s)) {
2279 if (sve_access_check(s)) {
2280 unsigned vsz = vec_full_reg_size(s);
2281 unsigned dofs = vec_full_reg_offset(s, a->rd);
2288 unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
2301 static bool trans_DUPQ(DisasContext *s, arg_DUPQ *a)
2305 if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
2308 if (!sve_access_check(s)) {
2312 vl = vec_full_reg_size(s);
2313 dofs = vec_full_reg_offset(s, a->rd);
2314 nofs = vec_reg_offset(s, a->rn, a->imm, a->esz);
2322 static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val)
2329 unsigned vsz = vec_full_reg_size(s);
2334 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, a->rd));
2335 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
2340 static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a)
2342 if (!dc_isar_feature(aa64_sve, s)) {
2345 if (sve_access_check(s)) {
2347 tcg_gen_ld_i64(t, tcg_env, vec_reg_offset(s, a->rm, 0, MO_64));
2348 do_insr_i64(s, a, t);
2353 static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a)
2355 if (!dc_isar_feature(aa64_sve, s)) {
2358 if (sve_access_check(s)) {
2359 do_insr_i64(s, a, cpu_reg(s, a->rm));
2403 static bool trans_PMOV_pv(DisasContext *s, arg_PMOV_pv *a)
2412 if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
2415 if (!sve_access_check(s)) {
2419 vl = vec_full_reg_size(s);
2421 tcg_gen_gvec_2_ool(pred_full_reg_offset(s, a->rd),
2422 vec_full_reg_offset(s, a->rn),
2433 pofs = pred_full_reg_offset(s, a->rd);
2434 vofs = vec_full_reg_offset(s, a->rn);
2467 static bool trans_PMOV_vp(DisasContext *s, arg_PMOV_pv *a)
2475 if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
2478 if (!sve_access_check(s)) {
2482 vl = vec_full_reg_size(s);
2490 tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, a->rd),
2491 pred_full_reg_offset(s, a->rn),
2494 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
2495 pred_full_reg_offset(s, a->rn),
2501 static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
2510 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
2513 if (sve_access_check(s)) {
2514 unsigned vsz = vec_full_reg_size(s);
2515 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
2516 vec_full_reg_offset(s, a->rn)
2527 static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd,
2530 if (!sve_access_check(s)) {
2534 unsigned vsz = pred_full_reg_size(s);
2545 tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd));
2546 tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn));
2547 tcg_gen_addi_ptr(t_m, tcg_env, pred_full_reg_offset(s, a->rm));
2553 static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd,
2556 if (!sve_access_check(s)) {
2560 unsigned vsz = pred_full_reg_size(s);
2565 tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd));
2566 tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn));
2591 static bool do_interleave_q(DisasContext *s, gen_helper_gvec_3 *fn,
2594 if (sve_access_check(s)) {
2595 unsigned vsz = vec_full_reg_size(s);
2597 unallocated_encoding(s);
2599 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
2600 vec_full_reg_offset(s, a->rn),
2601 vec_full_reg_offset(s, a->rm),
2615 zip_fns[a->esz], a, vec_full_reg_size(s) / 2)
2621 QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2)
2684 static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg)
2692 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
2695 tcg_gen_addi_ptr(t_p, tcg_env, pred_full_reg_offset(s, pg));
2703 static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz)
2705 unsigned vsz = vec_full_reg_size(s);
2718 static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz)
2720 unsigned vsz = vec_full_reg_size(s);
2756 static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last,
2774 return load_esz(p, vec_full_reg_offset(s, rm), esz);
2778 static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before)
2785 if (!sve_access_check(s)) {
2792 find_last_active(s, last, esz, a->pg);
2800 incr_last_active(s, last, esz);
2803 ele = load_last_active(s, last, a->rm, esz);
2805 vsz = vec_full_reg_size(s);
2806 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele);
2814 do_mov_z(s, a->rd, a->rn);
2827 static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm,
2833 find_last_active(s, last, esz, pg);
2840 incr_last_active(s, last, esz);
2848 ele = load_last_active(s, last, rm, esz);
2855 static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2857 if (sve_access_check(s)) {
2859 int ofs = vec_reg_offset(s, a->rd, 0, esz);
2862 do_clast_scalar(s, esz, a->pg, a->rn, before, reg);
2863 write_fp_dreg(s, a->rd, reg);
2872 static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before)
2876 if (!sve_access_check(s)) {
2880 reg = cpu_reg(s, a->rd);
2897 do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg);
2905 static TCGv_i64 do_last_scalar(DisasContext *s, int esz,
2910 find_last_active(s, last, esz, pg);
2912 wrap_last_active(s, last, esz);
2914 incr_last_active(s, last, esz);
2917 return load_last_active(s, last, rm, esz);
2921 static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2923 if (sve_access_check(s)) {
2924 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2925 write_fp_dreg(s, a->rd, val);
2934 static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before)
2936 if (sve_access_check(s)) {
2937 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2938 tcg_gen_mov_i64(cpu_reg(s, a->rd), val);
2946 static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a)
2948 if (!dc_isar_feature(aa64_sve, s)) {
2951 if (sve_access_check(s)) {
2952 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn));
2957 static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a)
2959 if (!dc_isar_feature(aa64_sve, s)) {
2962 if (sve_access_check(s)) {
2963 int ofs = vec_reg_offset(s, a->rn, 0, a->esz);
2965 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t);
2996 static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
3006 if (!sve_access_check(s)) {
3010 vsz = vec_full_reg_size(s);
3017 tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd));
3018 tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn));
3019 tcg_gen_addi_ptr(zm, tcg_env, vec_full_reg_offset(s, a->rm));
3020 tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg));
3070 static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a,
3080 if (!sve_access_check(s)) {
3084 vsz = vec_full_reg_size(s);
3090 tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd));
3091 tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn));
3092 tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg));
3125 static bool do_brk3(DisasContext *s, arg_rprr_s *a,
3128 if (!sve_access_check(s)) {
3132 unsigned vsz = pred_full_reg_size(s);
3141 tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd));
3142 tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn));
3143 tcg_gen_addi_ptr(m, tcg_env, pred_full_reg_offset(s, a->rm));
3144 tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg));
3146 if (a->s) {
3156 static bool do_brk2(DisasContext *s, arg_rpr_s *a,
3159 if (!sve_access_check(s)) {
3163 unsigned vsz = pred_full_reg_size(s);
3171 tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd));
3172 tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn));
3173 tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg));
3175 if (a->s) {
3207 static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg)
3209 unsigned psz = pred_full_reg_size(s);
3214 tcg_gen_ld_i64(val, tcg_env, pred_full_reg_offset(s, pn));
3217 tcg_gen_ld_i64(g, tcg_env, pred_full_reg_offset(s, pg));
3236 tcg_gen_addi_ptr(t_pn, tcg_env, pred_full_reg_offset(s, pn));
3237 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
3243 static bool trans_CNTP(DisasContext *s, arg_CNTP *a)
3245 if (!dc_isar_feature(aa64_sve, s)) {
3248 if (sve_access_check(s)) {
3249 do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg);
3254 static bool trans_CNTP_c(DisasContext *s, arg_CNTP_c *a)
3259 if (dc_isar_feature(aa64_sve2p1, s)) {
3260 if (!sve_access_check(s)) {
3263 } else if (dc_isar_feature(aa64_sme2, s)) {
3264 if (!sme_sm_enabled_check(s)) {
3273 pred_full_reg_offset(s, a->rn) ^
3276 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
3280 gen_helper_sve2p1_cntp_c(cpu_reg(s, a->rd), t_png, tcg_constant_i32(desc));
3284 static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a)
3286 if (!dc_isar_feature(aa64_sve, s)) {
3289 if (sve_access_check(s)) {
3290 TCGv_i64 reg = cpu_reg(s, a->rd);
3293 do_cntp(s, val, a->esz, a->pg, a->pg);
3303 static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3305 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
3308 if (sve_access_check(s)) {
3309 unsigned vsz = vec_full_reg_size(s);
3313 do_cntp(s, val, a->esz, a->pg, a->pg);
3314 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
3315 vec_full_reg_offset(s, a->rn), val, vsz, vsz);
3320 static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a)
3322 if (!dc_isar_feature(aa64_sve, s)) {
3325 if (sve_access_check(s)) {
3326 TCGv_i64 reg = cpu_reg(s, a->rd);
3329 do_cntp(s, val, a->esz, a->pg, a->pg);
3335 static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a)
3337 if (!dc_isar_feature(aa64_sve, s)) {
3340 if (sve_access_check(s)) {
3341 TCGv_i64 reg = cpu_reg(s, a->rd);
3344 do_cntp(s, val, a->esz, a->pg, a->pg);
3350 static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3352 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
3355 if (sve_access_check(s)) {
3357 do_cntp(s, val, a->esz, a->pg, a->pg);
3358 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d);
3367 static bool trans_CTERM(DisasContext *s, arg_CTERM *a)
3369 if (!dc_isar_feature(aa64_sve, s)) {
3372 if (!sve_access_check(s)) {
3377 TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf);
3378 TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf);
3395 static bool do_WHILE(DisasContext *s, arg_while *a,
3401 unsigned vsz = vec_full_reg_size(s);
3408 if (!sve_access_check(s)) {
3412 op0 = read_cpu_reg(s, a->rn, 1);
3413 op1 = read_cpu_reg(s, a->rm, 1);
3485 tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd));
3512 static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
3517 unsigned vsz = vec_full_reg_size(s);
3520 if (!dc_isar_feature(aa64_sve2, s)) {
3523 if (!sve_access_check(s)) {
3527 op0 = read_cpu_reg(s, a->rn, 1);
3528 op1 = read_cpu_reg(s, a->rm, 1);
3564 tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd));
3571 static bool do_pext(DisasContext *s, arg_pext *a, int n)
3577 if (!sve_access_check(s)) {
3583 pred_full_reg_offset(s, a->rn) ^
3587 pl = pred_full_reg_size(s);
3598 tcg_gen_addi_ptr(t_pd, tcg_env, pred_full_reg_offset(s, rd));
3611 static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
3613 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
3616 if (sve_access_check(s)) {
3617 unsigned vsz = vec_full_reg_size(s);
3618 int dofs = vec_full_reg_offset(s, a->rd);
3628 static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
3630 if (!dc_isar_feature(aa64_sve, s)) {
3633 if (sve_access_check(s)) {
3634 unsigned vsz = vec_full_reg_size(s);
3635 int dofs = vec_full_reg_offset(s, a->rd);
3643 static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a)
3646 return trans_ADD_zzi(s, a);
3649 static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a)
3680 if (!dc_isar_feature(aa64_sve, s)) {
3683 if (sve_access_check(s)) {
3684 unsigned vsz = vec_full_reg_size(s);
3685 tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd),
3686 vec_full_reg_offset(s, a->rn),
3694 static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d)
3696 if (sve_access_check(s)) {
3697 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
3708 static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn)
3710 if (sve_access_check(s)) {
3711 unsigned vsz = vec_full_reg_size(s);
3712 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
3713 vec_full_reg_offset(s, a->rn),
3893 fmls_idx_fns[a->esz][s->fpcr_ah],
3916 static bool do_reduce(DisasContext *s, arg_rpr_esz *a,
3927 if (!sve_access_check(s)) {
3931 vsz = vec_full_reg_size(s);
3938 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
3939 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
3944 write_fp_dreg(s, a->rd, temp);
3965 s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz])
4008 (s->fpcr_ah ? fmaxqv_fns : fmaxqv_ah_fns)[a->esz], a, 0,
4020 (s->fpcr_ah ? fminqv_fns : fminqv_ah_fns)[a->esz], a, 0,
4036 s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ?
4048 s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ?
4055 static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a,
4061 if (sve_access_check(s)) {
4062 unsigned vsz = vec_full_reg_size(s);
4066 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd),
4067 vec_full_reg_offset(s, a->rn),
4068 pred_full_reg_offset(s, a->pg),
4100 a->imm | (s->fpcr_ah << 3),
4107 static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
4116 unsigned vsz = vec_full_reg_size(s);
4121 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
4124 s->is_nonstreaming = true;
4125 if (!sve_access_check(s)) {
4129 t_val = load_esz(tcg_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
4132 tcg_gen_addi_ptr(t_rm, tcg_env, vec_full_reg_offset(s, a->rm));
4133 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
4139 write_fp_dreg(s, a->rd, t_val);
4164 s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], a, 0)
4202 s->fpcr_ah ? name##_ah_zpzz_fns[a->esz] : \
4220 static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16,
4223 unsigned vsz = vec_full_reg_size(s);
4230 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, zd));
4231 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, zn));
4232 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
4239 static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm,
4245 if (sve_access_check(s)) {
4246 do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16,
4286 s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz])
4299 static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a,
4305 if (sve_access_check(s)) {
4306 unsigned vsz = vec_full_reg_size(s);
4308 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd),
4309 vec_full_reg_offset(s, a->rn),
4310 vec_full_reg_offset(s, a->rm),
4311 pred_full_reg_offset(s, a->pg),
4339 a->rd, a->rn, a->rm, a->pg, a->rot | (s->fpcr_ah << 1),
4352 s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], \
4369 a->rd, a->rn, a->rm, a->ra, a->pg, a->rot | (s->fpcr_ah << 2),
4390 s->fpcr_ah ? FPST_AH : FPST_A64)
4450 static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
4460 if (!sve_access_check(s)) {
4464 vsz = vec_full_reg_size(s);
4468 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
4469 vec_full_reg_offset(s, a->rn),
4470 pred_full_reg_offset(s, a->pg),
4493 a, 0, select_ah_fpst(s, a->esz))
4544 void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
4550 int midx = get_mem_index(s);
4555 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4556 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
4654 void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
4660 int midx = get_mem_index(s);
4665 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4666 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8);
4759 static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
4761 if (!dc_isar_feature(aa64_sve, s)) {
4764 if (sve_access_check(s)) {
4765 int size = vec_full_reg_size(s);
4766 int off = vec_full_reg_offset(s, a->rd);
4767 gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size,
4768 s->align_mem ? MO_ALIGN_16 : MO_UNALN);
4773 static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
4775 if (!dc_isar_feature(aa64_sve, s)) {
4778 if (sve_access_check(s)) {
4779 int size = pred_full_reg_size(s);
4780 int off = pred_full_reg_offset(s, a->rd);
4781 gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size,
4782 s->align_mem ? MO_ALIGN_2 : MO_UNALN);
4787 static bool trans_STR_zri(DisasContext *s, arg_rri *a)
4789 if (!dc_isar_feature(aa64_sve, s)) {
4792 if (sve_access_check(s)) {
4793 int size = vec_full_reg_size(s);
4794 int off = vec_full_reg_offset(s, a->rd);
4795 gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size,
4796 s->align_mem ? MO_ALIGN_16 : MO_UNALN);
4801 static bool trans_STR_pri(DisasContext *s, arg_rri *a)
4803 if (!dc_isar_feature(aa64_sve, s)) {
4806 if (sve_access_check(s)) {
4807 int size = pred_full_reg_size(s);
4808 int off = pred_full_reg_offset(s, a->rd);
4809 gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size,
4810 s->align_mem ? MO_ALIGN_2 : MO_UNALN);
4841 uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs,
4853 if (s->mte_active[0]) {
4854 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
4855 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
4856 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
4864 static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
4871 if (!s->mte_active[0]) {
4872 addr = clean_data_tbi(s, addr);
4880 desc = make_svemte_desc(s, vec_full_reg_size(s), nregs,
4884 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
5037 static void do_ld_zpa(DisasContext *s, int zt, int pg,
5041 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg];
5048 do_mem_zpa(s, zt, pg, addr, dtype, nreg + 1, false, fn);
5051 static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
5060 if (!dc_isar_feature(aa64_sve, s)) {
5065 if (!dc_isar_feature(aa64_sve2p1, s)) {
5068 s->is_nonstreaming = true;
5071 if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
5079 if (sve_access_check(s)) {
5081 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
5082 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5083 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
5088 static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a)
5093 if (!dc_isar_feature(aa64_sve, s)) {
5098 if (!dc_isar_feature(aa64_sve2p1, s)) {
5101 s->is_nonstreaming = true;
5104 if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
5112 if (sve_access_check(s)) {
5113 int vsz = vec_full_reg_size(s);
5117 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
5120 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
5125 static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
5213 if (!dc_isar_feature(aa64_sve, s)) {
5216 s->is_nonstreaming = true;
5217 if (sve_access_check(s)) {
5219 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
5220 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5221 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
5222 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
5227 static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
5315 if (!dc_isar_feature(aa64_sve, s)) {
5318 s->is_nonstreaming = true;
5319 if (sve_access_check(s)) {
5320 int vsz = vec_full_reg_size(s);
5325 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
5326 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
5327 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
5332 static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
5334 unsigned vsz = vec_full_reg_size(s);
5340 if (!s->mte_active[0]) {
5341 addr = clean_data_tbi(s, addr);
5344 poff = pred_full_reg_offset(s, pg);
5366 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
5367 desc = make_svemte_desc(s, 16, 1, dtype_msz(dtype), false, zt);
5372 int doff = vec_full_reg_offset(s, zt);
5377 static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a)
5379 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) {
5382 if (sve_access_check(s)) {
5385 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz);
5386 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5387 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
5392 static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a)
5394 if (!dc_isar_feature(aa64_sve, s)) {
5397 if (sve_access_check(s)) {
5399 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16);
5400 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
5405 static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
5407 unsigned vsz = vec_full_reg_size(s);
5419 unallocated_encoding(s);
5424 if (!s->mte_active[0]) {
5425 addr = clean_data_tbi(s, addr);
5428 poff = pred_full_reg_offset(s, pg);
5450 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
5451 desc = make_svemte_desc(s, 32, 1, dtype_msz(dtype), false, zt);
5459 doff = vec_full_reg_offset(s, zt);
5470 static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
5472 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
5478 s->is_nonstreaming = true;
5479 if (sve_access_check(s)) {
5481 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
5482 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5483 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5488 static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
5490 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
5493 s->is_nonstreaming = true;
5494 if (sve_access_check(s)) {
5496 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
5497 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5503 static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
5505 unsigned vsz = vec_full_reg_size(s);
5506 unsigned psz = pred_full_reg_size(s);
5513 if (!dc_isar_feature(aa64_sve, s)) {
5516 if (!sve_access_check(s)) {
5529 tcg_gen_ld_i64(temp, tcg_env, pred_full_reg_offset(s, a->pg));
5534 find_last_active(s, t32, esz, a->pg);
5540 tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz);
5542 memop = finalize_memop(s, dtype_mop[a->dtype]);
5543 clean_addr = gen_mte_check1(s, temp, false, true, memop);
5544 tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s), memop);
5547 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
5552 return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false);
5555 static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
5684 int be = s->be_data == MO_BE;
5688 fn = fn_single[s->mte_active[0]][be][msz][esz];
5692 fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz];
5695 do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg + 1, true, fn);
5698 static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
5705 if (!dc_isar_feature(aa64_sve, s)) {
5712 if (!dc_isar_feature(aa64_sve2p1, s)) {
5715 s->is_nonstreaming = true;
5717 if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
5726 if (sve_access_check(s)) {
5728 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz);
5729 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5730 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5735 static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a)
5742 if (!dc_isar_feature(aa64_sve, s)) {
5749 if (!dc_isar_feature(aa64_sve2p1, s)) {
5752 s->is_nonstreaming = true;
5754 if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
5763 if (sve_access_check(s)) {
5764 int vsz = vec_full_reg_size(s);
5768 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
5770 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5779 static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
5788 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
5789 tcg_gen_addi_ptr(t_zm, tcg_env, vec_full_reg_offset(s, zm));
5790 tcg_gen_addi_ptr(t_zt, tcg_env, vec_full_reg_offset(s, zt));
5792 desc = make_svemte_desc(s, vec_full_reg_size(s), 1, msz, is_write, scale);
6132 static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
6135 bool be = s->be_data == MO_BE;
6136 bool mte = s->mte_active[0];
6139 ? !dc_isar_feature(aa64_sve, s)
6140 : !dc_isar_feature(aa64_sve2p1, s)) {
6143 s->is_nonstreaming = true;
6144 if (!sve_access_check(s)) {
6164 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
6165 cpu_reg_sp(s, a->rn), a->msz, false, fn);
6169 static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
6172 bool be = s->be_data == MO_BE;
6173 bool mte = s->mte_active[0];
6178 if (!dc_isar_feature(aa64_sve, s)) {
6181 s->is_nonstreaming = true;
6182 if (!sve_access_check(s)) {
6199 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
6204 static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
6207 bool be = s->be_data == MO_BE;
6208 bool mte = s->mte_active[0];
6213 if (!dc_isar_feature(aa64_sve2, s)) {
6216 s->is_nonstreaming = true;
6217 if (!sve_access_check(s)) {
6231 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
6232 cpu_reg(s, a->rm), a->msz, false, fn);
6336 static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
6339 bool be = s->be_data == MO_BE;
6340 bool mte = s->mte_active[0];
6346 ? !dc_isar_feature(aa64_sve, s)
6347 : !dc_isar_feature(aa64_sve2p1, s)) {
6350 s->is_nonstreaming = true;
6351 if (!sve_access_check(s)) {
6368 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
6369 cpu_reg_sp(s, a->rn), a->msz, true, fn);
6373 static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
6376 bool be = s->be_data == MO_BE;
6377 bool mte = s->mte_active[0];
6382 if (!dc_isar_feature(aa64_sve, s)) {
6385 s->is_nonstreaming = true;
6386 if (!sve_access_check(s)) {
6403 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
6408 static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
6411 bool be = s->be_data == MO_BE;
6412 bool mte = s->mte_active[0];
6417 if (!dc_isar_feature(aa64_sve2, s)) {
6420 s->is_nonstreaming = true;
6421 if (!sve_access_check(s)) {
6436 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
6437 cpu_reg(s, a->rm), a->msz, true, fn);
6445 static bool trans_PRF(DisasContext *s, arg_PRF *a)
6447 if (!dc_isar_feature(aa64_sve, s)) {
6451 (void)sve_access_check(s);
6455 static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
6457 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) {
6461 (void)sve_access_check(s);
6465 static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a)
6467 if (!dc_isar_feature(aa64_sve, s)) {
6471 s->is_nonstreaming = true;
6472 (void)sve_access_check(s);
6692 static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
6700 if (!dc_isar_feature(aa64_sve2_pmull128, s)) {
6703 s->is_nonstreaming = true;
6704 } else if (!dc_isar_feature(aa64_sve, s)) {
6707 return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
6825 static bool do_shll_tb(DisasContext *s, arg_rri_esz *a,
6832 if (sve_access_check(s)) {
6833 unsigned vsz = vec_full_reg_size(s);
6834 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6835 vec_full_reg_offset(s, a->rn),
6938 static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel)
6948 return gen_gvec_ool_arg_zzzz(s, fns[a->esz & 1], a, (a->esz & 2) | sel);
6964 static bool do_narrow_extract(DisasContext *s, arg_rri_esz *a,
6970 if (sve_access_check(s)) {
6971 unsigned vsz = vec_full_reg_size(s);
6972 tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
6973 vec_full_reg_offset(s, a->rn),
7161 static bool do_shr_narrow(DisasContext *s, arg_rri_esz *a,
7168 if (sve_access_check(s)) {
7169 unsigned vsz = vec_full_reg_size(s);
7170 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
7171 vec_full_reg_offset(s, a->rn),
7590 static bool do_fmmla(DisasContext *s, arg_rrrr_esz *a,
7593 if (sve_access_check(s)) {
7594 if (vec_full_reg_size(s) < 4 * memop_size(a->esz)) {
7595 unallocated_encoding(s);
7597 gen_gvec_fpst_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, 0, FPST_A64);
7735 s->fpcr_ah ? FPST_AH : FPST_A64)
7754 static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
7756 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s,
7766 static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
7768 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s,
7798 static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
7800 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal,
7802 s->fpcr_ah ? FPST_AH : FPST_A64);
7808 static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
7810 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx,
7813 s->fpcr_ah ? FPST_AH : FPST_A64);
7819 static bool do_BFMLSL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
7821 if (s->fpcr_ah) {
7822 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_ah_bfmlsl,
7825 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlsl,
7833 static bool do_BFMLSL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
7835 if (s->fpcr_ah) {
7836 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_ah_bfmlsl_idx,
7840 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlsl_idx,
7849 static bool trans_PSEL(DisasContext *s, arg_psel *a)
7851 int vl = vec_full_reg_size(s);
7852 int pl = pred_gvec_reg_size(s);
7857 if (!dc_isar_feature(aa64_sme_or_sve2p1, s)) {
7860 if (!sve_access_check(s)) {
7870 tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm);
7888 tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm));
7897 tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd),
7898 pred_full_reg_offset(s, a->pn), tmp, pl, pl);
8004 static bool trans_FCLAMP(DisasContext *s, arg_FCLAMP *a)
8015 ? !dc_isar_feature(aa64_sve_b16b16, s)
8016 : !dc_isar_feature(aa64_sme2_or_sve2p1, s)) {
8022 return gen_gvec_fpst_zzz(s, fn[a->esz], a->rd, a->rn, a->rm, 1,
8033 static bool gen_ldst_c(DisasContext *s, TCGv_i64 addr, int zd, int png,
8061 bool be = s->be_data == MO_BE;
8077 if (strided || !dc_isar_feature(aa64_sve2p1, s)
8078 ? !sme_sm_enabled_check(s)
8079 : !sve_access_check(s)) {
8083 if (!s->mte_active[0]) {
8084 addr = clean_data_tbi(s, addr);
8089 desc = make_svemte_desc(s, vec_full_reg_size(s), 1, esz, is_write, desc);
8094 pred_full_reg_offset(s, png) ^
8098 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, zd));
8104 static bool gen_ldst_zcrr_c(DisasContext *s, arg_zcrr_ldst *a,
8109 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz);
8110 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
8111 return gen_ldst_c(s, addr, a->rd, a->png, a->esz, is_write,
8115 static bool gen_ldst_zcri_c(DisasContext *s, arg_zcri_ldst *a,
8120 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
8121 a->imm * a->nreg * vec_full_reg_size(s));
8122 return gen_ldst_c(s, addr, a->rd, a->png, a->esz, is_write,