1 /* 2 * AArch64 SVE translation 3 * 4 * Copyright (c) 2018 Linaro, Ltd 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "translate.h" 22 #include "translate-a64.h" 23 #include "fpu/softfloat.h" 24 25 26 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, 27 TCGv_i64, uint32_t, uint32_t); 28 29 typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr, 30 TCGv_ptr, TCGv_i32); 31 typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr, 32 TCGv_ptr, TCGv_ptr, TCGv_i32); 33 34 typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32); 35 typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr, 36 TCGv_ptr, TCGv_i64, TCGv_i32); 37 38 /* 39 * Helpers for extracting complex instruction fields. 40 */ 41 42 /* See e.g. ASR (immediate, predicated). 43 * Returns -1 for unallocated encoding; diagnose later. 44 */ 45 static int tszimm_esz(DisasContext *s, int x) 46 { 47 x >>= 3; /* discard imm3 */ 48 return 31 - clz32(x); 49 } 50 51 static int tszimm_shr(DisasContext *s, int x) 52 { 53 /* 54 * We won't use the tszimm_shr() value if tszimm_esz() returns -1 (the 55 * trans function will check for esz < 0), so we can return any 56 * value we like from here in that case as long as we avoid UB. 57 */ 58 int esz = tszimm_esz(s, x); 59 if (esz < 0) { 60 return esz; 61 } 62 return (16 << esz) - x; 63 } 64 65 /* See e.g. LSL (immediate, predicated). */ 66 static int tszimm_shl(DisasContext *s, int x) 67 { 68 /* As with tszimm_shr(), value will be unused if esz < 0 */ 69 int esz = tszimm_esz(s, x); 70 if (esz < 0) { 71 return esz; 72 } 73 return x - (8 << esz); 74 } 75 76 /* The SH bit is in bit 8. Extract the low 8 and shift. */ 77 static inline int expand_imm_sh8s(DisasContext *s, int x) 78 { 79 return (int8_t)x << (x & 0x100 ? 8 : 0); 80 } 81 82 static inline int expand_imm_sh8u(DisasContext *s, int x) 83 { 84 return (uint8_t)x << (x & 0x100 ? 8 : 0); 85 } 86 87 /* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype) 88 * with unsigned data. C.f. SVE Memory Contiguous Load Group. 89 */ 90 static inline int msz_dtype(DisasContext *s, int msz) 91 { 92 static const uint8_t dtype[4] = { 0, 5, 10, 15 }; 93 return dtype[msz]; 94 } 95 96 /* 97 * Include the generated decoder. 98 */ 99 100 #include "decode-sve.c.inc" 101 102 /* 103 * Implement all of the translator functions referenced by the decoder. 104 */ 105 106 /* Invoke an out-of-line helper on 2 Zregs. */ 107 static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn, 108 int rd, int rn, int data) 109 { 110 if (fn == NULL) { 111 return false; 112 } 113 if (sve_access_check(s)) { 114 unsigned vsz = vec_full_reg_size(s); 115 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd), 116 vec_full_reg_offset(s, rn), 117 vsz, vsz, data, fn); 118 } 119 return true; 120 } 121 122 static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, 123 int rd, int rn, int data, 124 ARMFPStatusFlavour flavour) 125 { 126 if (fn == NULL) { 127 return false; 128 } 129 if (sve_access_check(s)) { 130 unsigned vsz = vec_full_reg_size(s); 131 TCGv_ptr status = fpstatus_ptr(flavour); 132 133 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), 134 vec_full_reg_offset(s, rn), 135 status, vsz, vsz, data, fn); 136 } 137 return true; 138 } 139 140 static bool gen_gvec_fpst_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, 141 arg_rr_esz *a, int data) 142 { 143 return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data, 144 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 145 } 146 147 /* Invoke an out-of-line helper on 3 Zregs. */ 148 static bool gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn, 149 int rd, int rn, int rm, int data) 150 { 151 if (fn == NULL) { 152 return false; 153 } 154 if (sve_access_check(s)) { 155 unsigned vsz = vec_full_reg_size(s); 156 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), 157 vec_full_reg_offset(s, rn), 158 vec_full_reg_offset(s, rm), 159 vsz, vsz, data, fn); 160 } 161 return true; 162 } 163 164 static bool gen_gvec_ool_arg_zzz(DisasContext *s, gen_helper_gvec_3 *fn, 165 arg_rrr_esz *a, int data) 166 { 167 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data); 168 } 169 170 /* Invoke an out-of-line helper on 3 Zregs, plus float_status. */ 171 static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 172 int rd, int rn, int rm, 173 int data, ARMFPStatusFlavour flavour) 174 { 175 if (fn == NULL) { 176 return false; 177 } 178 if (sve_access_check(s)) { 179 unsigned vsz = vec_full_reg_size(s); 180 TCGv_ptr status = fpstatus_ptr(flavour); 181 182 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 183 vec_full_reg_offset(s, rn), 184 vec_full_reg_offset(s, rm), 185 status, vsz, vsz, data, fn); 186 } 187 return true; 188 } 189 190 static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 191 arg_rrr_esz *a, int data) 192 { 193 return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data, 194 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 195 } 196 197 /* Invoke an out-of-line helper on 4 Zregs. */ 198 static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, 199 int rd, int rn, int rm, int ra, int data) 200 { 201 if (fn == NULL) { 202 return false; 203 } 204 if (sve_access_check(s)) { 205 unsigned vsz = vec_full_reg_size(s); 206 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 207 vec_full_reg_offset(s, rn), 208 vec_full_reg_offset(s, rm), 209 vec_full_reg_offset(s, ra), 210 vsz, vsz, data, fn); 211 } 212 return true; 213 } 214 215 static bool gen_gvec_ool_arg_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, 216 arg_rrrr_esz *a, int data) 217 { 218 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); 219 } 220 221 static bool gen_gvec_ool_arg_zzxz(DisasContext *s, gen_helper_gvec_4 *fn, 222 arg_rrxr_esz *a) 223 { 224 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index); 225 } 226 227 /* Invoke an out-of-line helper on 4 Zregs, plus a pointer. */ 228 static bool gen_gvec_ptr_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 229 int rd, int rn, int rm, int ra, 230 int data, TCGv_ptr ptr) 231 { 232 if (fn == NULL) { 233 return false; 234 } 235 if (sve_access_check(s)) { 236 unsigned vsz = vec_full_reg_size(s); 237 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 238 vec_full_reg_offset(s, rn), 239 vec_full_reg_offset(s, rm), 240 vec_full_reg_offset(s, ra), 241 ptr, vsz, vsz, data, fn); 242 } 243 return true; 244 } 245 246 static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 247 int rd, int rn, int rm, int ra, 248 int data, ARMFPStatusFlavour flavour) 249 { 250 TCGv_ptr status = fpstatus_ptr(flavour); 251 bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status); 252 return ret; 253 } 254 255 static bool gen_gvec_env_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 256 int rd, int rn, int rm, int ra, 257 int data) 258 { 259 return gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, tcg_env); 260 } 261 262 static bool gen_gvec_env_arg_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 263 arg_rrrr_esz *a, int data) 264 { 265 return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); 266 } 267 268 static bool gen_gvec_env_arg_zzxz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 269 arg_rrxr_esz *a) 270 { 271 return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index); 272 } 273 274 /* Invoke an out-of-line helper on 4 Zregs, 1 Preg, plus fpst. */ 275 static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn, 276 int rd, int rn, int rm, int ra, int pg, 277 int data, ARMFPStatusFlavour flavour) 278 { 279 if (fn == NULL) { 280 return false; 281 } 282 if (sve_access_check(s)) { 283 unsigned vsz = vec_full_reg_size(s); 284 TCGv_ptr status = fpstatus_ptr(flavour); 285 286 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, rd), 287 vec_full_reg_offset(s, rn), 288 vec_full_reg_offset(s, rm), 289 vec_full_reg_offset(s, ra), 290 pred_full_reg_offset(s, pg), 291 status, vsz, vsz, data, fn); 292 } 293 return true; 294 } 295 296 /* Invoke an out-of-line helper on 2 Zregs and a predicate. */ 297 static bool gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn, 298 int rd, int rn, int pg, int data) 299 { 300 if (fn == NULL) { 301 return false; 302 } 303 if (sve_access_check(s)) { 304 unsigned vsz = vec_full_reg_size(s); 305 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), 306 vec_full_reg_offset(s, rn), 307 pred_full_reg_offset(s, pg), 308 vsz, vsz, data, fn); 309 } 310 return true; 311 } 312 313 static bool gen_gvec_ool_arg_zpz(DisasContext *s, gen_helper_gvec_3 *fn, 314 arg_rpr_esz *a, int data) 315 { 316 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, data); 317 } 318 319 static bool gen_gvec_ool_arg_zpzi(DisasContext *s, gen_helper_gvec_3 *fn, 320 arg_rpri_esz *a) 321 { 322 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm); 323 } 324 325 static bool gen_gvec_fpst_zzp(DisasContext *s, gen_helper_gvec_3_ptr *fn, 326 int rd, int rn, int pg, int data, 327 ARMFPStatusFlavour flavour) 328 { 329 if (fn == NULL) { 330 return false; 331 } 332 if (sve_access_check(s)) { 333 unsigned vsz = vec_full_reg_size(s); 334 TCGv_ptr status = fpstatus_ptr(flavour); 335 336 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 337 vec_full_reg_offset(s, rn), 338 pred_full_reg_offset(s, pg), 339 status, vsz, vsz, data, fn); 340 } 341 return true; 342 } 343 344 static bool gen_gvec_fpst_arg_zpz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 345 arg_rpr_esz *a, int data, 346 ARMFPStatusFlavour flavour) 347 { 348 return gen_gvec_fpst_zzp(s, fn, a->rd, a->rn, a->pg, data, flavour); 349 } 350 351 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */ 352 static bool gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn, 353 int rd, int rn, int rm, int pg, int data) 354 { 355 if (fn == NULL) { 356 return false; 357 } 358 if (sve_access_check(s)) { 359 unsigned vsz = vec_full_reg_size(s); 360 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 361 vec_full_reg_offset(s, rn), 362 vec_full_reg_offset(s, rm), 363 pred_full_reg_offset(s, pg), 364 vsz, vsz, data, fn); 365 } 366 return true; 367 } 368 369 static bool gen_gvec_ool_arg_zpzz(DisasContext *s, gen_helper_gvec_4 *fn, 370 arg_rprr_esz *a, int data) 371 { 372 return gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, data); 373 } 374 375 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */ 376 static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn, 377 int rd, int rn, int rm, int pg, int data, 378 ARMFPStatusFlavour flavour) 379 { 380 if (fn == NULL) { 381 return false; 382 } 383 if (sve_access_check(s)) { 384 unsigned vsz = vec_full_reg_size(s); 385 TCGv_ptr status = fpstatus_ptr(flavour); 386 387 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 388 vec_full_reg_offset(s, rn), 389 vec_full_reg_offset(s, rm), 390 pred_full_reg_offset(s, pg), 391 status, vsz, vsz, data, fn); 392 } 393 return true; 394 } 395 396 static bool gen_gvec_fpst_arg_zpzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 397 arg_rprr_esz *a) 398 { 399 return gen_gvec_fpst_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0, 400 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 401 } 402 403 /* Invoke a vector expander on two Zregs and an immediate. */ 404 static bool gen_gvec_fn_zzi(DisasContext *s, GVecGen2iFn *gvec_fn, 405 int esz, int rd, int rn, uint64_t imm) 406 { 407 if (gvec_fn == NULL) { 408 return false; 409 } 410 if (sve_access_check(s)) { 411 unsigned vsz = vec_full_reg_size(s); 412 gvec_fn(esz, vec_full_reg_offset(s, rd), 413 vec_full_reg_offset(s, rn), imm, vsz, vsz); 414 } 415 return true; 416 } 417 418 static bool gen_gvec_fn_arg_zzi(DisasContext *s, GVecGen2iFn *gvec_fn, 419 arg_rri_esz *a) 420 { 421 if (a->esz < 0) { 422 /* Invalid tsz encoding -- see tszimm_esz. */ 423 return false; 424 } 425 return gen_gvec_fn_zzi(s, gvec_fn, a->esz, a->rd, a->rn, a->imm); 426 } 427 428 /* Invoke a vector expander on three Zregs. */ 429 static bool gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn, 430 int esz, int rd, int rn, int rm) 431 { 432 if (gvec_fn == NULL) { 433 return false; 434 } 435 if (sve_access_check(s)) { 436 unsigned vsz = vec_full_reg_size(s); 437 gvec_fn(esz, vec_full_reg_offset(s, rd), 438 vec_full_reg_offset(s, rn), 439 vec_full_reg_offset(s, rm), vsz, vsz); 440 } 441 return true; 442 } 443 444 static bool gen_gvec_fn_arg_zzz(DisasContext *s, GVecGen3Fn *fn, 445 arg_rrr_esz *a) 446 { 447 return gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm); 448 } 449 450 /* Invoke a vector expander on four Zregs. */ 451 static bool gen_gvec_fn_arg_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn, 452 arg_rrrr_esz *a) 453 { 454 if (gvec_fn == NULL) { 455 return false; 456 } 457 if (sve_access_check(s)) { 458 unsigned vsz = vec_full_reg_size(s); 459 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), 460 vec_full_reg_offset(s, a->rn), 461 vec_full_reg_offset(s, a->rm), 462 vec_full_reg_offset(s, a->ra), vsz, vsz); 463 } 464 return true; 465 } 466 467 /* Invoke a vector move on two Zregs. */ 468 static bool do_mov_z(DisasContext *s, int rd, int rn) 469 { 470 if (sve_access_check(s)) { 471 unsigned vsz = vec_full_reg_size(s); 472 tcg_gen_gvec_mov(MO_8, vec_full_reg_offset(s, rd), 473 vec_full_reg_offset(s, rn), vsz, vsz); 474 } 475 return true; 476 } 477 478 /* Initialize a Zreg with replications of a 64-bit immediate. */ 479 static void do_dupi_z(DisasContext *s, int rd, uint64_t word) 480 { 481 unsigned vsz = vec_full_reg_size(s); 482 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word); 483 } 484 485 /* Invoke a vector expander on three Pregs. */ 486 static bool gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn, 487 int rd, int rn, int rm) 488 { 489 if (sve_access_check(s)) { 490 unsigned psz = pred_gvec_reg_size(s); 491 gvec_fn(MO_64, pred_full_reg_offset(s, rd), 492 pred_full_reg_offset(s, rn), 493 pred_full_reg_offset(s, rm), psz, psz); 494 } 495 return true; 496 } 497 498 /* Invoke a vector move on two Pregs. */ 499 static bool do_mov_p(DisasContext *s, int rd, int rn) 500 { 501 if (sve_access_check(s)) { 502 unsigned psz = pred_gvec_reg_size(s); 503 tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd), 504 pred_full_reg_offset(s, rn), psz, psz); 505 } 506 return true; 507 } 508 509 /* Set the cpu flags as per a return from an SVE helper. */ 510 static void do_pred_flags(TCGv_i32 t) 511 { 512 tcg_gen_mov_i32(cpu_NF, t); 513 tcg_gen_andi_i32(cpu_ZF, t, 2); 514 tcg_gen_andi_i32(cpu_CF, t, 1); 515 tcg_gen_movi_i32(cpu_VF, 0); 516 } 517 518 /* Subroutines computing the ARM PredTest psuedofunction. */ 519 static void do_predtest1(TCGv_i64 d, TCGv_i64 g) 520 { 521 TCGv_i32 t = tcg_temp_new_i32(); 522 523 gen_helper_sve_predtest1(t, d, g); 524 do_pred_flags(t); 525 } 526 527 static void do_predtest(DisasContext *s, int dofs, int gofs, int words) 528 { 529 TCGv_ptr dptr = tcg_temp_new_ptr(); 530 TCGv_ptr gptr = tcg_temp_new_ptr(); 531 TCGv_i32 t = tcg_temp_new_i32(); 532 533 tcg_gen_addi_ptr(dptr, tcg_env, dofs); 534 tcg_gen_addi_ptr(gptr, tcg_env, gofs); 535 536 gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words)); 537 538 do_pred_flags(t); 539 } 540 541 /* For each element size, the bits within a predicate word that are active. */ 542 const uint64_t pred_esz_masks[5] = { 543 0xffffffffffffffffull, 0x5555555555555555ull, 544 0x1111111111111111ull, 0x0101010101010101ull, 545 0x0001000100010001ull, 546 }; 547 548 static bool trans_INVALID(DisasContext *s, arg_INVALID *a) 549 { 550 unallocated_encoding(s); 551 return true; 552 } 553 554 /* 555 *** SVE Logical - Unpredicated Group 556 */ 557 558 TRANS_FEAT(AND_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_and, a) 559 TRANS_FEAT(ORR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_or, a) 560 TRANS_FEAT(EOR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_xor, a) 561 TRANS_FEAT(BIC_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_andc, a) 562 563 static bool trans_XAR(DisasContext *s, arg_rrri_esz *a) 564 { 565 if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { 566 return false; 567 } 568 if (sve_access_check(s)) { 569 unsigned vsz = vec_full_reg_size(s); 570 gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd), 571 vec_full_reg_offset(s, a->rn), 572 vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz); 573 } 574 return true; 575 } 576 577 TRANS_FEAT(EOR3, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_gvec_eor3, a) 578 TRANS_FEAT(BCAX, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_gvec_bcax, a) 579 580 static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 581 uint32_t a, uint32_t oprsz, uint32_t maxsz) 582 { 583 /* BSL differs from the generic bitsel in argument ordering. */ 584 tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz); 585 } 586 587 TRANS_FEAT(BSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl, a) 588 589 static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 590 { 591 tcg_gen_andc_i64(n, k, n); 592 tcg_gen_andc_i64(m, m, k); 593 tcg_gen_or_i64(d, n, m); 594 } 595 596 static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 597 TCGv_vec m, TCGv_vec k) 598 { 599 tcg_gen_not_vec(vece, n, n); 600 tcg_gen_bitsel_vec(vece, d, k, n, m); 601 } 602 603 static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 604 uint32_t a, uint32_t oprsz, uint32_t maxsz) 605 { 606 static const GVecGen4 op = { 607 .fni8 = gen_bsl1n_i64, 608 .fniv = gen_bsl1n_vec, 609 .fno = gen_helper_sve2_bsl1n, 610 .vece = MO_64, 611 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 612 }; 613 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 614 } 615 616 TRANS_FEAT(BSL1N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl1n, a) 617 618 static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 619 { 620 /* 621 * Z[dn] = (n & k) | (~m & ~k) 622 * = | ~(m | k) 623 */ 624 tcg_gen_and_i64(n, n, k); 625 if (tcg_op_supported(INDEX_op_orc_i64, TCG_TYPE_I64, 0)) { 626 tcg_gen_or_i64(m, m, k); 627 tcg_gen_orc_i64(d, n, m); 628 } else { 629 tcg_gen_nor_i64(m, m, k); 630 tcg_gen_or_i64(d, n, m); 631 } 632 } 633 634 static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 635 TCGv_vec m, TCGv_vec k) 636 { 637 tcg_gen_not_vec(vece, m, m); 638 tcg_gen_bitsel_vec(vece, d, k, n, m); 639 } 640 641 static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 642 uint32_t a, uint32_t oprsz, uint32_t maxsz) 643 { 644 static const GVecGen4 op = { 645 .fni8 = gen_bsl2n_i64, 646 .fniv = gen_bsl2n_vec, 647 .fno = gen_helper_sve2_bsl2n, 648 .vece = MO_64, 649 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 650 }; 651 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 652 } 653 654 TRANS_FEAT(BSL2N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl2n, a) 655 656 static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 657 { 658 tcg_gen_and_i64(n, n, k); 659 tcg_gen_andc_i64(m, m, k); 660 tcg_gen_nor_i64(d, n, m); 661 } 662 663 static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 664 TCGv_vec m, TCGv_vec k) 665 { 666 tcg_gen_bitsel_vec(vece, d, k, n, m); 667 tcg_gen_not_vec(vece, d, d); 668 } 669 670 static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 671 uint32_t a, uint32_t oprsz, uint32_t maxsz) 672 { 673 static const GVecGen4 op = { 674 .fni8 = gen_nbsl_i64, 675 .fniv = gen_nbsl_vec, 676 .fno = gen_helper_sve2_nbsl, 677 .vece = MO_64, 678 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 679 }; 680 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 681 } 682 683 TRANS_FEAT(NBSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_nbsl, a) 684 685 /* 686 *** SVE Integer Arithmetic - Unpredicated Group 687 */ 688 689 TRANS_FEAT(ADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_add, a) 690 TRANS_FEAT(SUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sub, a) 691 TRANS_FEAT(SQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ssadd, a) 692 TRANS_FEAT(SQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sssub, a) 693 TRANS_FEAT(UQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_usadd, a) 694 TRANS_FEAT(UQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ussub, a) 695 696 /* 697 *** SVE Integer Arithmetic - Binary Predicated Group 698 */ 699 700 /* Select active elememnts from Zn and inactive elements from Zm, 701 * storing the result in Zd. 702 */ 703 static bool do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz) 704 { 705 static gen_helper_gvec_4 * const fns[4] = { 706 gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, 707 gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d 708 }; 709 return gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0); 710 } 711 712 #define DO_ZPZZ(NAME, FEAT, name) \ 713 static gen_helper_gvec_4 * const name##_zpzz_fns[4] = { \ 714 gen_helper_##name##_zpzz_b, gen_helper_##name##_zpzz_h, \ 715 gen_helper_##name##_zpzz_s, gen_helper_##name##_zpzz_d, \ 716 }; \ 717 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpzz, \ 718 name##_zpzz_fns[a->esz], a, 0) 719 720 DO_ZPZZ(AND_zpzz, aa64_sve, sve_and) 721 DO_ZPZZ(EOR_zpzz, aa64_sve, sve_eor) 722 DO_ZPZZ(ORR_zpzz, aa64_sve, sve_orr) 723 DO_ZPZZ(BIC_zpzz, aa64_sve, sve_bic) 724 725 DO_ZPZZ(ADD_zpzz, aa64_sve, sve_add) 726 DO_ZPZZ(SUB_zpzz, aa64_sve, sve_sub) 727 728 DO_ZPZZ(SMAX_zpzz, aa64_sve, sve_smax) 729 DO_ZPZZ(UMAX_zpzz, aa64_sve, sve_umax) 730 DO_ZPZZ(SMIN_zpzz, aa64_sve, sve_smin) 731 DO_ZPZZ(UMIN_zpzz, aa64_sve, sve_umin) 732 DO_ZPZZ(SABD_zpzz, aa64_sve, sve_sabd) 733 DO_ZPZZ(UABD_zpzz, aa64_sve, sve_uabd) 734 735 DO_ZPZZ(MUL_zpzz, aa64_sve, sve_mul) 736 DO_ZPZZ(SMULH_zpzz, aa64_sve, sve_smulh) 737 DO_ZPZZ(UMULH_zpzz, aa64_sve, sve_umulh) 738 739 DO_ZPZZ(ASR_zpzz, aa64_sve, sve_asr) 740 DO_ZPZZ(LSR_zpzz, aa64_sve, sve_lsr) 741 DO_ZPZZ(LSL_zpzz, aa64_sve, sve_lsl) 742 743 static gen_helper_gvec_4 * const sdiv_fns[4] = { 744 NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d 745 }; 746 TRANS_FEAT(SDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, sdiv_fns[a->esz], a, 0) 747 748 static gen_helper_gvec_4 * const udiv_fns[4] = { 749 NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d 750 }; 751 TRANS_FEAT(UDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, udiv_fns[a->esz], a, 0) 752 753 TRANS_FEAT(SEL_zpzz, aa64_sve, do_sel_z, a->rd, a->rn, a->rm, a->pg, a->esz) 754 755 /* 756 *** SVE Integer Arithmetic - Unary Predicated Group 757 */ 758 759 #define DO_ZPZ(NAME, FEAT, name) \ 760 static gen_helper_gvec_3 * const name##_fns[4] = { \ 761 gen_helper_##name##_b, gen_helper_##name##_h, \ 762 gen_helper_##name##_s, gen_helper_##name##_d, \ 763 }; \ 764 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpz, name##_fns[a->esz], a, 0) 765 766 DO_ZPZ(CLS, aa64_sve, sve_cls) 767 DO_ZPZ(CLZ, aa64_sve, sve_clz) 768 DO_ZPZ(CNT_zpz, aa64_sve, sve_cnt_zpz) 769 DO_ZPZ(CNOT, aa64_sve, sve_cnot) 770 DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz) 771 DO_ZPZ(ABS, aa64_sve, sve_abs) 772 DO_ZPZ(NEG, aa64_sve, sve_neg) 773 DO_ZPZ(RBIT, aa64_sve, sve_rbit) 774 775 static gen_helper_gvec_3 * const fabs_fns[4] = { 776 NULL, gen_helper_sve_fabs_h, 777 gen_helper_sve_fabs_s, gen_helper_sve_fabs_d, 778 }; 779 TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0) 780 781 static gen_helper_gvec_3 * const fneg_fns[4] = { 782 NULL, gen_helper_sve_fneg_h, 783 gen_helper_sve_fneg_s, gen_helper_sve_fneg_d, 784 }; 785 TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0) 786 787 static gen_helper_gvec_3 * const sxtb_fns[4] = { 788 NULL, gen_helper_sve_sxtb_h, 789 gen_helper_sve_sxtb_s, gen_helper_sve_sxtb_d, 790 }; 791 TRANS_FEAT(SXTB, aa64_sve, gen_gvec_ool_arg_zpz, sxtb_fns[a->esz], a, 0) 792 793 static gen_helper_gvec_3 * const uxtb_fns[4] = { 794 NULL, gen_helper_sve_uxtb_h, 795 gen_helper_sve_uxtb_s, gen_helper_sve_uxtb_d, 796 }; 797 TRANS_FEAT(UXTB, aa64_sve, gen_gvec_ool_arg_zpz, uxtb_fns[a->esz], a, 0) 798 799 static gen_helper_gvec_3 * const sxth_fns[4] = { 800 NULL, NULL, gen_helper_sve_sxth_s, gen_helper_sve_sxth_d 801 }; 802 TRANS_FEAT(SXTH, aa64_sve, gen_gvec_ool_arg_zpz, sxth_fns[a->esz], a, 0) 803 804 static gen_helper_gvec_3 * const uxth_fns[4] = { 805 NULL, NULL, gen_helper_sve_uxth_s, gen_helper_sve_uxth_d 806 }; 807 TRANS_FEAT(UXTH, aa64_sve, gen_gvec_ool_arg_zpz, uxth_fns[a->esz], a, 0) 808 809 TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz, 810 a->esz == 3 ? gen_helper_sve_sxtw_d : NULL, a, 0) 811 TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz, 812 a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0) 813 814 /* 815 *** SVE Integer Reduction Group 816 */ 817 818 typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32); 819 static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, 820 gen_helper_gvec_reduc *fn) 821 { 822 unsigned vsz = vec_full_reg_size(s); 823 TCGv_ptr t_zn, t_pg; 824 TCGv_i32 desc; 825 TCGv_i64 temp; 826 827 if (fn == NULL) { 828 return false; 829 } 830 if (!sve_access_check(s)) { 831 return true; 832 } 833 834 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 835 temp = tcg_temp_new_i64(); 836 t_zn = tcg_temp_new_ptr(); 837 t_pg = tcg_temp_new_ptr(); 838 839 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn)); 840 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg)); 841 fn(temp, t_zn, t_pg, desc); 842 843 write_fp_dreg(s, a->rd, temp); 844 return true; 845 } 846 847 #define DO_VPZ(NAME, name) \ 848 static gen_helper_gvec_reduc * const name##_fns[4] = { \ 849 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ 850 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 851 }; \ 852 TRANS_FEAT(NAME, aa64_sve, do_vpz_ool, a, name##_fns[a->esz]) 853 854 DO_VPZ(ORV, orv) 855 DO_VPZ(ANDV, andv) 856 DO_VPZ(EORV, eorv) 857 858 DO_VPZ(UADDV, uaddv) 859 DO_VPZ(SMAXV, smaxv) 860 DO_VPZ(UMAXV, umaxv) 861 DO_VPZ(SMINV, sminv) 862 DO_VPZ(UMINV, uminv) 863 864 static gen_helper_gvec_reduc * const saddv_fns[4] = { 865 gen_helper_sve_saddv_b, gen_helper_sve_saddv_h, 866 gen_helper_sve_saddv_s, NULL 867 }; 868 TRANS_FEAT(SADDV, aa64_sve, do_vpz_ool, a, saddv_fns[a->esz]) 869 870 #undef DO_VPZ 871 872 /* 873 *** SVE Shift by Immediate - Predicated Group 874 */ 875 876 /* 877 * Copy Zn into Zd, storing zeros into inactive elements. 878 * If invert, store zeros into the active elements. 879 */ 880 static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg, 881 int esz, bool invert) 882 { 883 static gen_helper_gvec_3 * const fns[4] = { 884 gen_helper_sve_movz_b, gen_helper_sve_movz_h, 885 gen_helper_sve_movz_s, gen_helper_sve_movz_d, 886 }; 887 return gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert); 888 } 889 890 static bool do_shift_zpzi(DisasContext *s, arg_rpri_esz *a, bool asr, 891 gen_helper_gvec_3 * const fns[4]) 892 { 893 int max; 894 895 if (a->esz < 0) { 896 /* Invalid tsz encoding -- see tszimm_esz. */ 897 return false; 898 } 899 900 /* 901 * Shift by element size is architecturally valid. 902 * For arithmetic right-shift, it's the same as by one less. 903 * For logical shifts and ASRD, it is a zeroing operation. 904 */ 905 max = 8 << a->esz; 906 if (a->imm >= max) { 907 if (asr) { 908 a->imm = max - 1; 909 } else { 910 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true); 911 } 912 } 913 return gen_gvec_ool_arg_zpzi(s, fns[a->esz], a); 914 } 915 916 static gen_helper_gvec_3 * const asr_zpzi_fns[4] = { 917 gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h, 918 gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d, 919 }; 920 TRANS_FEAT(ASR_zpzi, aa64_sve, do_shift_zpzi, a, true, asr_zpzi_fns) 921 922 static gen_helper_gvec_3 * const lsr_zpzi_fns[4] = { 923 gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h, 924 gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d, 925 }; 926 TRANS_FEAT(LSR_zpzi, aa64_sve, do_shift_zpzi, a, false, lsr_zpzi_fns) 927 928 static gen_helper_gvec_3 * const lsl_zpzi_fns[4] = { 929 gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h, 930 gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d, 931 }; 932 TRANS_FEAT(LSL_zpzi, aa64_sve, do_shift_zpzi, a, false, lsl_zpzi_fns) 933 934 static gen_helper_gvec_3 * const asrd_fns[4] = { 935 gen_helper_sve_asrd_b, gen_helper_sve_asrd_h, 936 gen_helper_sve_asrd_s, gen_helper_sve_asrd_d, 937 }; 938 TRANS_FEAT(ASRD, aa64_sve, do_shift_zpzi, a, false, asrd_fns) 939 940 static gen_helper_gvec_3 * const sqshl_zpzi_fns[4] = { 941 gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h, 942 gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d, 943 }; 944 TRANS_FEAT(SQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi, 945 a->esz < 0 ? NULL : sqshl_zpzi_fns[a->esz], a) 946 947 static gen_helper_gvec_3 * const uqshl_zpzi_fns[4] = { 948 gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h, 949 gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d, 950 }; 951 TRANS_FEAT(UQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi, 952 a->esz < 0 ? NULL : uqshl_zpzi_fns[a->esz], a) 953 954 static gen_helper_gvec_3 * const srshr_fns[4] = { 955 gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h, 956 gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d, 957 }; 958 TRANS_FEAT(SRSHR, aa64_sve2, gen_gvec_ool_arg_zpzi, 959 a->esz < 0 ? NULL : srshr_fns[a->esz], a) 960 961 static gen_helper_gvec_3 * const urshr_fns[4] = { 962 gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h, 963 gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d, 964 }; 965 TRANS_FEAT(URSHR, aa64_sve2, gen_gvec_ool_arg_zpzi, 966 a->esz < 0 ? NULL : urshr_fns[a->esz], a) 967 968 static gen_helper_gvec_3 * const sqshlu_fns[4] = { 969 gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h, 970 gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d, 971 }; 972 TRANS_FEAT(SQSHLU, aa64_sve2, gen_gvec_ool_arg_zpzi, 973 a->esz < 0 ? NULL : sqshlu_fns[a->esz], a) 974 975 /* 976 *** SVE Bitwise Shift - Predicated Group 977 */ 978 979 #define DO_ZPZW(NAME, name) \ 980 static gen_helper_gvec_4 * const name##_zpzw_fns[4] = { \ 981 gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \ 982 gen_helper_sve_##name##_zpzw_s, NULL \ 983 }; \ 984 TRANS_FEAT(NAME##_zpzw, aa64_sve, gen_gvec_ool_arg_zpzz, \ 985 a->esz < 0 ? NULL : name##_zpzw_fns[a->esz], a, 0) 986 987 DO_ZPZW(ASR, asr) 988 DO_ZPZW(LSR, lsr) 989 DO_ZPZW(LSL, lsl) 990 991 #undef DO_ZPZW 992 993 /* 994 *** SVE Bitwise Shift - Unpredicated Group 995 */ 996 997 static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr, 998 void (*gvec_fn)(unsigned, uint32_t, uint32_t, 999 int64_t, uint32_t, uint32_t)) 1000 { 1001 if (a->esz < 0) { 1002 /* Invalid tsz encoding -- see tszimm_esz. */ 1003 return false; 1004 } 1005 if (sve_access_check(s)) { 1006 unsigned vsz = vec_full_reg_size(s); 1007 /* Shift by element size is architecturally valid. For 1008 arithmetic right-shift, it's the same as by one less. 1009 Otherwise it is a zeroing operation. */ 1010 if (a->imm >= 8 << a->esz) { 1011 if (asr) { 1012 a->imm = (8 << a->esz) - 1; 1013 } else { 1014 do_dupi_z(s, a->rd, 0); 1015 return true; 1016 } 1017 } 1018 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), 1019 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); 1020 } 1021 return true; 1022 } 1023 1024 TRANS_FEAT(ASR_zzi, aa64_sve, do_shift_imm, a, true, tcg_gen_gvec_sari) 1025 TRANS_FEAT(LSR_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shri) 1026 TRANS_FEAT(LSL_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shli) 1027 1028 #define DO_ZZW(NAME, name) \ 1029 static gen_helper_gvec_3 * const name##_zzw_fns[4] = { \ 1030 gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \ 1031 gen_helper_sve_##name##_zzw_s, NULL \ 1032 }; \ 1033 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_arg_zzz, \ 1034 name##_zzw_fns[a->esz], a, 0) 1035 1036 DO_ZZW(ASR_zzw, asr) 1037 DO_ZZW(LSR_zzw, lsr) 1038 DO_ZZW(LSL_zzw, lsl) 1039 1040 #undef DO_ZZW 1041 1042 /* 1043 *** SVE Integer Multiply-Add Group 1044 */ 1045 1046 static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a, 1047 gen_helper_gvec_5 *fn) 1048 { 1049 if (sve_access_check(s)) { 1050 unsigned vsz = vec_full_reg_size(s); 1051 tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd), 1052 vec_full_reg_offset(s, a->ra), 1053 vec_full_reg_offset(s, a->rn), 1054 vec_full_reg_offset(s, a->rm), 1055 pred_full_reg_offset(s, a->pg), 1056 vsz, vsz, 0, fn); 1057 } 1058 return true; 1059 } 1060 1061 static gen_helper_gvec_5 * const mla_fns[4] = { 1062 gen_helper_sve_mla_b, gen_helper_sve_mla_h, 1063 gen_helper_sve_mla_s, gen_helper_sve_mla_d, 1064 }; 1065 TRANS_FEAT(MLA, aa64_sve, do_zpzzz_ool, a, mla_fns[a->esz]) 1066 1067 static gen_helper_gvec_5 * const mls_fns[4] = { 1068 gen_helper_sve_mls_b, gen_helper_sve_mls_h, 1069 gen_helper_sve_mls_s, gen_helper_sve_mls_d, 1070 }; 1071 TRANS_FEAT(MLS, aa64_sve, do_zpzzz_ool, a, mls_fns[a->esz]) 1072 1073 /* 1074 *** SVE Index Generation Group 1075 */ 1076 1077 static bool do_index(DisasContext *s, int esz, int rd, 1078 TCGv_i64 start, TCGv_i64 incr) 1079 { 1080 unsigned vsz; 1081 TCGv_i32 desc; 1082 TCGv_ptr t_zd; 1083 1084 if (!sve_access_check(s)) { 1085 return true; 1086 } 1087 1088 vsz = vec_full_reg_size(s); 1089 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 1090 t_zd = tcg_temp_new_ptr(); 1091 1092 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd)); 1093 if (esz == 3) { 1094 gen_helper_sve_index_d(t_zd, start, incr, desc); 1095 } else { 1096 typedef void index_fn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); 1097 static index_fn * const fns[3] = { 1098 gen_helper_sve_index_b, 1099 gen_helper_sve_index_h, 1100 gen_helper_sve_index_s, 1101 }; 1102 TCGv_i32 s32 = tcg_temp_new_i32(); 1103 TCGv_i32 i32 = tcg_temp_new_i32(); 1104 1105 tcg_gen_extrl_i64_i32(s32, start); 1106 tcg_gen_extrl_i64_i32(i32, incr); 1107 fns[esz](t_zd, s32, i32, desc); 1108 } 1109 return true; 1110 } 1111 1112 TRANS_FEAT(INDEX_ii, aa64_sve, do_index, a->esz, a->rd, 1113 tcg_constant_i64(a->imm1), tcg_constant_i64(a->imm2)) 1114 TRANS_FEAT(INDEX_ir, aa64_sve, do_index, a->esz, a->rd, 1115 tcg_constant_i64(a->imm), cpu_reg(s, a->rm)) 1116 TRANS_FEAT(INDEX_ri, aa64_sve, do_index, a->esz, a->rd, 1117 cpu_reg(s, a->rn), tcg_constant_i64(a->imm)) 1118 TRANS_FEAT(INDEX_rr, aa64_sve, do_index, a->esz, a->rd, 1119 cpu_reg(s, a->rn), cpu_reg(s, a->rm)) 1120 1121 /* 1122 *** SVE Stack Allocation Group 1123 */ 1124 1125 static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) 1126 { 1127 if (!dc_isar_feature(aa64_sve, s)) { 1128 return false; 1129 } 1130 if (sve_access_check(s)) { 1131 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1132 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1133 tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s)); 1134 } 1135 return true; 1136 } 1137 1138 static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a) 1139 { 1140 if (!dc_isar_feature(aa64_sme, s)) { 1141 return false; 1142 } 1143 if (sme_enabled_check(s)) { 1144 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1145 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1146 tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s)); 1147 } 1148 return true; 1149 } 1150 1151 static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) 1152 { 1153 if (!dc_isar_feature(aa64_sve, s)) { 1154 return false; 1155 } 1156 if (sve_access_check(s)) { 1157 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1158 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1159 tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s)); 1160 } 1161 return true; 1162 } 1163 1164 static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a) 1165 { 1166 if (!dc_isar_feature(aa64_sme, s)) { 1167 return false; 1168 } 1169 if (sme_enabled_check(s)) { 1170 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1171 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1172 tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s)); 1173 } 1174 return true; 1175 } 1176 1177 static bool trans_RDVL(DisasContext *s, arg_RDVL *a) 1178 { 1179 if (!dc_isar_feature(aa64_sve, s)) { 1180 return false; 1181 } 1182 if (sve_access_check(s)) { 1183 TCGv_i64 reg = cpu_reg(s, a->rd); 1184 tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s)); 1185 } 1186 return true; 1187 } 1188 1189 static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a) 1190 { 1191 if (!dc_isar_feature(aa64_sme, s)) { 1192 return false; 1193 } 1194 if (sme_enabled_check(s)) { 1195 TCGv_i64 reg = cpu_reg(s, a->rd); 1196 tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s)); 1197 } 1198 return true; 1199 } 1200 1201 /* 1202 *** SVE Compute Vector Address Group 1203 */ 1204 1205 static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn) 1206 { 1207 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm); 1208 } 1209 1210 TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) 1211 TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) 1212 TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) 1213 TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) 1214 1215 /* 1216 *** SVE Integer Misc - Unpredicated Group 1217 */ 1218 1219 static gen_helper_gvec_2 * const fexpa_fns[4] = { 1220 NULL, gen_helper_sve_fexpa_h, 1221 gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, 1222 }; 1223 TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz, 1224 fexpa_fns[a->esz], a->rd, a->rn, 0) 1225 1226 static gen_helper_gvec_3 * const ftssel_fns[4] = { 1227 NULL, gen_helper_sve_ftssel_h, 1228 gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, 1229 }; 1230 TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, 1231 ftssel_fns[a->esz], a, 0) 1232 1233 /* 1234 *** SVE Predicate Logical Operations Group 1235 */ 1236 1237 static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a, 1238 const GVecGen4 *gvec_op) 1239 { 1240 if (!sve_access_check(s)) { 1241 return true; 1242 } 1243 1244 unsigned psz = pred_gvec_reg_size(s); 1245 int dofs = pred_full_reg_offset(s, a->rd); 1246 int nofs = pred_full_reg_offset(s, a->rn); 1247 int mofs = pred_full_reg_offset(s, a->rm); 1248 int gofs = pred_full_reg_offset(s, a->pg); 1249 1250 if (!a->s) { 1251 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op); 1252 return true; 1253 } 1254 1255 if (psz == 8) { 1256 /* Do the operation and the flags generation in temps. */ 1257 TCGv_i64 pd = tcg_temp_new_i64(); 1258 TCGv_i64 pn = tcg_temp_new_i64(); 1259 TCGv_i64 pm = tcg_temp_new_i64(); 1260 TCGv_i64 pg = tcg_temp_new_i64(); 1261 1262 tcg_gen_ld_i64(pn, tcg_env, nofs); 1263 tcg_gen_ld_i64(pm, tcg_env, mofs); 1264 tcg_gen_ld_i64(pg, tcg_env, gofs); 1265 1266 gvec_op->fni8(pd, pn, pm, pg); 1267 tcg_gen_st_i64(pd, tcg_env, dofs); 1268 1269 do_predtest1(pd, pg); 1270 } else { 1271 /* The operation and flags generation is large. The computation 1272 * of the flags depends on the original contents of the guarding 1273 * predicate. If the destination overwrites the guarding predicate, 1274 * then the easiest way to get this right is to save a copy. 1275 */ 1276 int tofs = gofs; 1277 if (a->rd == a->pg) { 1278 tofs = offsetof(CPUARMState, vfp.preg_tmp); 1279 tcg_gen_gvec_mov(0, tofs, gofs, psz, psz); 1280 } 1281 1282 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op); 1283 do_predtest(s, dofs, tofs, psz / 8); 1284 } 1285 return true; 1286 } 1287 1288 static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1289 { 1290 tcg_gen_and_i64(pd, pn, pm); 1291 tcg_gen_and_i64(pd, pd, pg); 1292 } 1293 1294 static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1295 TCGv_vec pm, TCGv_vec pg) 1296 { 1297 tcg_gen_and_vec(vece, pd, pn, pm); 1298 tcg_gen_and_vec(vece, pd, pd, pg); 1299 } 1300 1301 static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a) 1302 { 1303 static const GVecGen4 op = { 1304 .fni8 = gen_and_pg_i64, 1305 .fniv = gen_and_pg_vec, 1306 .fno = gen_helper_sve_and_pppp, 1307 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1308 }; 1309 1310 if (!dc_isar_feature(aa64_sve, s)) { 1311 return false; 1312 } 1313 if (!a->s) { 1314 if (a->rn == a->rm) { 1315 if (a->pg == a->rn) { 1316 return do_mov_p(s, a->rd, a->rn); 1317 } 1318 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg); 1319 } else if (a->pg == a->rn || a->pg == a->rm) { 1320 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm); 1321 } 1322 } 1323 return do_pppp_flags(s, a, &op); 1324 } 1325 1326 static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1327 { 1328 tcg_gen_andc_i64(pd, pn, pm); 1329 tcg_gen_and_i64(pd, pd, pg); 1330 } 1331 1332 static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1333 TCGv_vec pm, TCGv_vec pg) 1334 { 1335 tcg_gen_andc_vec(vece, pd, pn, pm); 1336 tcg_gen_and_vec(vece, pd, pd, pg); 1337 } 1338 1339 static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a) 1340 { 1341 static const GVecGen4 op = { 1342 .fni8 = gen_bic_pg_i64, 1343 .fniv = gen_bic_pg_vec, 1344 .fno = gen_helper_sve_bic_pppp, 1345 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1346 }; 1347 1348 if (!dc_isar_feature(aa64_sve, s)) { 1349 return false; 1350 } 1351 if (!a->s && a->pg == a->rn) { 1352 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm); 1353 } 1354 return do_pppp_flags(s, a, &op); 1355 } 1356 1357 static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1358 { 1359 tcg_gen_xor_i64(pd, pn, pm); 1360 tcg_gen_and_i64(pd, pd, pg); 1361 } 1362 1363 static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1364 TCGv_vec pm, TCGv_vec pg) 1365 { 1366 tcg_gen_xor_vec(vece, pd, pn, pm); 1367 tcg_gen_and_vec(vece, pd, pd, pg); 1368 } 1369 1370 static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a) 1371 { 1372 static const GVecGen4 op = { 1373 .fni8 = gen_eor_pg_i64, 1374 .fniv = gen_eor_pg_vec, 1375 .fno = gen_helper_sve_eor_pppp, 1376 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1377 }; 1378 1379 if (!dc_isar_feature(aa64_sve, s)) { 1380 return false; 1381 } 1382 /* Alias NOT (predicate) is EOR Pd.B, Pg/Z, Pn.B, Pg.B */ 1383 if (!a->s && a->pg == a->rm) { 1384 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->pg, a->rn); 1385 } 1386 return do_pppp_flags(s, a, &op); 1387 } 1388 1389 static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a) 1390 { 1391 if (a->s || !dc_isar_feature(aa64_sve, s)) { 1392 return false; 1393 } 1394 if (sve_access_check(s)) { 1395 unsigned psz = pred_gvec_reg_size(s); 1396 tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd), 1397 pred_full_reg_offset(s, a->pg), 1398 pred_full_reg_offset(s, a->rn), 1399 pred_full_reg_offset(s, a->rm), psz, psz); 1400 } 1401 return true; 1402 } 1403 1404 static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1405 { 1406 tcg_gen_or_i64(pd, pn, pm); 1407 tcg_gen_and_i64(pd, pd, pg); 1408 } 1409 1410 static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1411 TCGv_vec pm, TCGv_vec pg) 1412 { 1413 tcg_gen_or_vec(vece, pd, pn, pm); 1414 tcg_gen_and_vec(vece, pd, pd, pg); 1415 } 1416 1417 static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a) 1418 { 1419 static const GVecGen4 op = { 1420 .fni8 = gen_orr_pg_i64, 1421 .fniv = gen_orr_pg_vec, 1422 .fno = gen_helper_sve_orr_pppp, 1423 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1424 }; 1425 1426 if (!dc_isar_feature(aa64_sve, s)) { 1427 return false; 1428 } 1429 if (!a->s && a->pg == a->rn && a->rn == a->rm) { 1430 return do_mov_p(s, a->rd, a->rn); 1431 } 1432 return do_pppp_flags(s, a, &op); 1433 } 1434 1435 static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1436 { 1437 tcg_gen_orc_i64(pd, pn, pm); 1438 tcg_gen_and_i64(pd, pd, pg); 1439 } 1440 1441 static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1442 TCGv_vec pm, TCGv_vec pg) 1443 { 1444 tcg_gen_orc_vec(vece, pd, pn, pm); 1445 tcg_gen_and_vec(vece, pd, pd, pg); 1446 } 1447 1448 static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a) 1449 { 1450 static const GVecGen4 op = { 1451 .fni8 = gen_orn_pg_i64, 1452 .fniv = gen_orn_pg_vec, 1453 .fno = gen_helper_sve_orn_pppp, 1454 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1455 }; 1456 1457 if (!dc_isar_feature(aa64_sve, s)) { 1458 return false; 1459 } 1460 return do_pppp_flags(s, a, &op); 1461 } 1462 1463 static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1464 { 1465 tcg_gen_or_i64(pd, pn, pm); 1466 tcg_gen_andc_i64(pd, pg, pd); 1467 } 1468 1469 static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1470 TCGv_vec pm, TCGv_vec pg) 1471 { 1472 tcg_gen_or_vec(vece, pd, pn, pm); 1473 tcg_gen_andc_vec(vece, pd, pg, pd); 1474 } 1475 1476 static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a) 1477 { 1478 static const GVecGen4 op = { 1479 .fni8 = gen_nor_pg_i64, 1480 .fniv = gen_nor_pg_vec, 1481 .fno = gen_helper_sve_nor_pppp, 1482 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1483 }; 1484 1485 if (!dc_isar_feature(aa64_sve, s)) { 1486 return false; 1487 } 1488 return do_pppp_flags(s, a, &op); 1489 } 1490 1491 static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1492 { 1493 tcg_gen_and_i64(pd, pn, pm); 1494 tcg_gen_andc_i64(pd, pg, pd); 1495 } 1496 1497 static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1498 TCGv_vec pm, TCGv_vec pg) 1499 { 1500 tcg_gen_and_vec(vece, pd, pn, pm); 1501 tcg_gen_andc_vec(vece, pd, pg, pd); 1502 } 1503 1504 static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a) 1505 { 1506 static const GVecGen4 op = { 1507 .fni8 = gen_nand_pg_i64, 1508 .fniv = gen_nand_pg_vec, 1509 .fno = gen_helper_sve_nand_pppp, 1510 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1511 }; 1512 1513 if (!dc_isar_feature(aa64_sve, s)) { 1514 return false; 1515 } 1516 return do_pppp_flags(s, a, &op); 1517 } 1518 1519 /* 1520 *** SVE Predicate Misc Group 1521 */ 1522 1523 static bool trans_PTEST(DisasContext *s, arg_PTEST *a) 1524 { 1525 if (!dc_isar_feature(aa64_sve, s)) { 1526 return false; 1527 } 1528 if (sve_access_check(s)) { 1529 int nofs = pred_full_reg_offset(s, a->rn); 1530 int gofs = pred_full_reg_offset(s, a->pg); 1531 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8); 1532 1533 if (words == 1) { 1534 TCGv_i64 pn = tcg_temp_new_i64(); 1535 TCGv_i64 pg = tcg_temp_new_i64(); 1536 1537 tcg_gen_ld_i64(pn, tcg_env, nofs); 1538 tcg_gen_ld_i64(pg, tcg_env, gofs); 1539 do_predtest1(pn, pg); 1540 } else { 1541 do_predtest(s, nofs, gofs, words); 1542 } 1543 } 1544 return true; 1545 } 1546 1547 /* See the ARM pseudocode DecodePredCount. */ 1548 static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz) 1549 { 1550 unsigned elements = fullsz >> esz; 1551 unsigned bound; 1552 1553 switch (pattern) { 1554 case 0x0: /* POW2 */ 1555 return pow2floor(elements); 1556 case 0x1: /* VL1 */ 1557 case 0x2: /* VL2 */ 1558 case 0x3: /* VL3 */ 1559 case 0x4: /* VL4 */ 1560 case 0x5: /* VL5 */ 1561 case 0x6: /* VL6 */ 1562 case 0x7: /* VL7 */ 1563 case 0x8: /* VL8 */ 1564 bound = pattern; 1565 break; 1566 case 0x9: /* VL16 */ 1567 case 0xa: /* VL32 */ 1568 case 0xb: /* VL64 */ 1569 case 0xc: /* VL128 */ 1570 case 0xd: /* VL256 */ 1571 bound = 16 << (pattern - 9); 1572 break; 1573 case 0x1d: /* MUL4 */ 1574 return elements - elements % 4; 1575 case 0x1e: /* MUL3 */ 1576 return elements - elements % 3; 1577 case 0x1f: /* ALL */ 1578 return elements; 1579 default: /* #uimm5 */ 1580 return 0; 1581 } 1582 return elements >= bound ? bound : 0; 1583 } 1584 1585 /* This handles all of the predicate initialization instructions, 1586 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32 1587 * so that decode_pred_count returns 0. For SETFFR, we will have 1588 * set RD == 16 == FFR. 1589 */ 1590 static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) 1591 { 1592 if (!sve_access_check(s)) { 1593 return true; 1594 } 1595 1596 unsigned fullsz = vec_full_reg_size(s); 1597 unsigned ofs = pred_full_reg_offset(s, rd); 1598 unsigned numelem, setsz, i; 1599 uint64_t word, lastword; 1600 TCGv_i64 t; 1601 1602 numelem = decode_pred_count(fullsz, pat, esz); 1603 1604 /* Determine what we must store into each bit, and how many. */ 1605 if (numelem == 0) { 1606 lastword = word = 0; 1607 setsz = fullsz; 1608 } else { 1609 setsz = numelem << esz; 1610 lastword = word = pred_esz_masks[esz]; 1611 if (setsz % 64) { 1612 lastword &= MAKE_64BIT_MASK(0, setsz % 64); 1613 } 1614 } 1615 1616 t = tcg_temp_new_i64(); 1617 if (fullsz <= 64) { 1618 tcg_gen_movi_i64(t, lastword); 1619 tcg_gen_st_i64(t, tcg_env, ofs); 1620 goto done; 1621 } 1622 1623 if (word == lastword) { 1624 unsigned maxsz = size_for_gvec(fullsz / 8); 1625 unsigned oprsz = size_for_gvec(setsz / 8); 1626 1627 if (oprsz * 8 == setsz) { 1628 tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word); 1629 goto done; 1630 } 1631 } 1632 1633 setsz /= 8; 1634 fullsz /= 8; 1635 1636 tcg_gen_movi_i64(t, word); 1637 for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) { 1638 tcg_gen_st_i64(t, tcg_env, ofs + i); 1639 } 1640 if (lastword != word) { 1641 tcg_gen_movi_i64(t, lastword); 1642 tcg_gen_st_i64(t, tcg_env, ofs + i); 1643 i += 8; 1644 } 1645 if (i < fullsz) { 1646 tcg_gen_movi_i64(t, 0); 1647 for (; i < fullsz; i += 8) { 1648 tcg_gen_st_i64(t, tcg_env, ofs + i); 1649 } 1650 } 1651 1652 done: 1653 /* PTRUES */ 1654 if (setflag) { 1655 tcg_gen_movi_i32(cpu_NF, -(word != 0)); 1656 tcg_gen_movi_i32(cpu_CF, word == 0); 1657 tcg_gen_movi_i32(cpu_VF, 0); 1658 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 1659 } 1660 return true; 1661 } 1662 1663 TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s) 1664 1665 /* Note pat == 31 is #all, to set all elements. */ 1666 TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve, 1667 do_predset, 0, FFR_PRED_NUM, 31, false) 1668 1669 /* Note pat == 32 is #unimp, to set no elements. */ 1670 TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false) 1671 1672 static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) 1673 { 1674 /* The path through do_pppp_flags is complicated enough to want to avoid 1675 * duplication. Frob the arguments into the form of a predicated AND. 1676 */ 1677 arg_rprr_s alt_a = { 1678 .rd = a->rd, .pg = a->pg, .s = a->s, 1679 .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM, 1680 }; 1681 1682 s->is_nonstreaming = true; 1683 return trans_AND_pppp(s, &alt_a); 1684 } 1685 1686 TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) 1687 TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) 1688 1689 static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, 1690 void (*gen_fn)(TCGv_i32, TCGv_ptr, 1691 TCGv_ptr, TCGv_i32)) 1692 { 1693 if (!sve_access_check(s)) { 1694 return true; 1695 } 1696 1697 TCGv_ptr t_pd = tcg_temp_new_ptr(); 1698 TCGv_ptr t_pg = tcg_temp_new_ptr(); 1699 TCGv_i32 t; 1700 unsigned desc = 0; 1701 1702 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); 1703 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 1704 1705 tcg_gen_addi_ptr(t_pd, tcg_env, pred_full_reg_offset(s, a->rd)); 1706 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->rn)); 1707 t = tcg_temp_new_i32(); 1708 1709 gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc)); 1710 1711 do_pred_flags(t); 1712 return true; 1713 } 1714 1715 TRANS_FEAT(PFIRST, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pfirst) 1716 TRANS_FEAT(PNEXT, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pnext) 1717 1718 /* 1719 *** SVE Element Count Group 1720 */ 1721 1722 /* Perform an inline saturating addition of a 32-bit value within 1723 * a 64-bit register. The second operand is known to be positive, 1724 * which halves the comparisons we must perform to bound the result. 1725 */ 1726 static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) 1727 { 1728 int64_t ibound; 1729 1730 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ 1731 if (u) { 1732 tcg_gen_ext32u_i64(reg, reg); 1733 } else { 1734 tcg_gen_ext32s_i64(reg, reg); 1735 } 1736 if (d) { 1737 tcg_gen_sub_i64(reg, reg, val); 1738 ibound = (u ? 0 : INT32_MIN); 1739 tcg_gen_smax_i64(reg, reg, tcg_constant_i64(ibound)); 1740 } else { 1741 tcg_gen_add_i64(reg, reg, val); 1742 ibound = (u ? UINT32_MAX : INT32_MAX); 1743 tcg_gen_smin_i64(reg, reg, tcg_constant_i64(ibound)); 1744 } 1745 } 1746 1747 /* Similarly with 64-bit values. */ 1748 static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) 1749 { 1750 TCGv_i64 t0 = tcg_temp_new_i64(); 1751 TCGv_i64 t2; 1752 1753 if (u) { 1754 if (d) { 1755 tcg_gen_sub_i64(t0, reg, val); 1756 t2 = tcg_constant_i64(0); 1757 tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0); 1758 } else { 1759 tcg_gen_add_i64(t0, reg, val); 1760 t2 = tcg_constant_i64(-1); 1761 tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0); 1762 } 1763 } else { 1764 TCGv_i64 t1 = tcg_temp_new_i64(); 1765 if (d) { 1766 /* Detect signed overflow for subtraction. */ 1767 tcg_gen_xor_i64(t0, reg, val); 1768 tcg_gen_sub_i64(t1, reg, val); 1769 tcg_gen_xor_i64(reg, reg, t1); 1770 tcg_gen_and_i64(t0, t0, reg); 1771 1772 /* Bound the result. */ 1773 tcg_gen_movi_i64(reg, INT64_MIN); 1774 t2 = tcg_constant_i64(0); 1775 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1); 1776 } else { 1777 /* Detect signed overflow for addition. */ 1778 tcg_gen_xor_i64(t0, reg, val); 1779 tcg_gen_add_i64(reg, reg, val); 1780 tcg_gen_xor_i64(t1, reg, val); 1781 tcg_gen_andc_i64(t0, t1, t0); 1782 1783 /* Bound the result. */ 1784 tcg_gen_movi_i64(t1, INT64_MAX); 1785 t2 = tcg_constant_i64(0); 1786 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg); 1787 } 1788 } 1789 } 1790 1791 /* Similarly with a vector and a scalar operand. */ 1792 static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, 1793 TCGv_i64 val, bool u, bool d) 1794 { 1795 unsigned vsz = vec_full_reg_size(s); 1796 TCGv_ptr dptr, nptr; 1797 TCGv_i32 t32, desc; 1798 TCGv_i64 t64; 1799 1800 dptr = tcg_temp_new_ptr(); 1801 nptr = tcg_temp_new_ptr(); 1802 tcg_gen_addi_ptr(dptr, tcg_env, vec_full_reg_offset(s, rd)); 1803 tcg_gen_addi_ptr(nptr, tcg_env, vec_full_reg_offset(s, rn)); 1804 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 1805 1806 switch (esz) { 1807 case MO_8: 1808 t32 = tcg_temp_new_i32(); 1809 tcg_gen_extrl_i64_i32(t32, val); 1810 if (d) { 1811 tcg_gen_neg_i32(t32, t32); 1812 } 1813 if (u) { 1814 gen_helper_sve_uqaddi_b(dptr, nptr, t32, desc); 1815 } else { 1816 gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc); 1817 } 1818 break; 1819 1820 case MO_16: 1821 t32 = tcg_temp_new_i32(); 1822 tcg_gen_extrl_i64_i32(t32, val); 1823 if (d) { 1824 tcg_gen_neg_i32(t32, t32); 1825 } 1826 if (u) { 1827 gen_helper_sve_uqaddi_h(dptr, nptr, t32, desc); 1828 } else { 1829 gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc); 1830 } 1831 break; 1832 1833 case MO_32: 1834 t64 = tcg_temp_new_i64(); 1835 if (d) { 1836 tcg_gen_neg_i64(t64, val); 1837 } else { 1838 tcg_gen_mov_i64(t64, val); 1839 } 1840 if (u) { 1841 gen_helper_sve_uqaddi_s(dptr, nptr, t64, desc); 1842 } else { 1843 gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc); 1844 } 1845 break; 1846 1847 case MO_64: 1848 if (u) { 1849 if (d) { 1850 gen_helper_sve_uqsubi_d(dptr, nptr, val, desc); 1851 } else { 1852 gen_helper_sve_uqaddi_d(dptr, nptr, val, desc); 1853 } 1854 } else if (d) { 1855 t64 = tcg_temp_new_i64(); 1856 tcg_gen_neg_i64(t64, val); 1857 gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc); 1858 } else { 1859 gen_helper_sve_sqaddi_d(dptr, nptr, val, desc); 1860 } 1861 break; 1862 1863 default: 1864 g_assert_not_reached(); 1865 } 1866 } 1867 1868 static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a) 1869 { 1870 if (!dc_isar_feature(aa64_sve, s)) { 1871 return false; 1872 } 1873 if (sve_access_check(s)) { 1874 unsigned fullsz = vec_full_reg_size(s); 1875 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1876 tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm); 1877 } 1878 return true; 1879 } 1880 1881 static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a) 1882 { 1883 if (!dc_isar_feature(aa64_sve, s)) { 1884 return false; 1885 } 1886 if (sve_access_check(s)) { 1887 unsigned fullsz = vec_full_reg_size(s); 1888 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1889 int inc = numelem * a->imm * (a->d ? -1 : 1); 1890 TCGv_i64 reg = cpu_reg(s, a->rd); 1891 1892 tcg_gen_addi_i64(reg, reg, inc); 1893 } 1894 return true; 1895 } 1896 1897 static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a) 1898 { 1899 if (!dc_isar_feature(aa64_sve, s)) { 1900 return false; 1901 } 1902 if (!sve_access_check(s)) { 1903 return true; 1904 } 1905 1906 unsigned fullsz = vec_full_reg_size(s); 1907 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1908 int inc = numelem * a->imm; 1909 TCGv_i64 reg = cpu_reg(s, a->rd); 1910 1911 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ 1912 if (inc == 0) { 1913 if (a->u) { 1914 tcg_gen_ext32u_i64(reg, reg); 1915 } else { 1916 tcg_gen_ext32s_i64(reg, reg); 1917 } 1918 } else { 1919 do_sat_addsub_32(reg, tcg_constant_i64(inc), a->u, a->d); 1920 } 1921 return true; 1922 } 1923 1924 static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a) 1925 { 1926 if (!dc_isar_feature(aa64_sve, s)) { 1927 return false; 1928 } 1929 if (!sve_access_check(s)) { 1930 return true; 1931 } 1932 1933 unsigned fullsz = vec_full_reg_size(s); 1934 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1935 int inc = numelem * a->imm; 1936 TCGv_i64 reg = cpu_reg(s, a->rd); 1937 1938 if (inc != 0) { 1939 do_sat_addsub_64(reg, tcg_constant_i64(inc), a->u, a->d); 1940 } 1941 return true; 1942 } 1943 1944 static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a) 1945 { 1946 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 1947 return false; 1948 } 1949 1950 unsigned fullsz = vec_full_reg_size(s); 1951 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1952 int inc = numelem * a->imm; 1953 1954 if (inc != 0) { 1955 if (sve_access_check(s)) { 1956 tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd), 1957 vec_full_reg_offset(s, a->rn), 1958 tcg_constant_i64(a->d ? -inc : inc), 1959 fullsz, fullsz); 1960 } 1961 } else { 1962 do_mov_z(s, a->rd, a->rn); 1963 } 1964 return true; 1965 } 1966 1967 static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a) 1968 { 1969 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 1970 return false; 1971 } 1972 1973 unsigned fullsz = vec_full_reg_size(s); 1974 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1975 int inc = numelem * a->imm; 1976 1977 if (inc != 0) { 1978 if (sve_access_check(s)) { 1979 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, 1980 tcg_constant_i64(inc), a->u, a->d); 1981 } 1982 } else { 1983 do_mov_z(s, a->rd, a->rn); 1984 } 1985 return true; 1986 } 1987 1988 /* 1989 *** SVE Bitwise Immediate Group 1990 */ 1991 1992 static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn) 1993 { 1994 uint64_t imm; 1995 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), 1996 extract32(a->dbm, 0, 6), 1997 extract32(a->dbm, 6, 6))) { 1998 return false; 1999 } 2000 return gen_gvec_fn_zzi(s, gvec_fn, MO_64, a->rd, a->rn, imm); 2001 } 2002 2003 TRANS_FEAT(AND_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_andi) 2004 TRANS_FEAT(ORR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_ori) 2005 TRANS_FEAT(EOR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_xori) 2006 2007 static bool trans_DUPM(DisasContext *s, arg_DUPM *a) 2008 { 2009 uint64_t imm; 2010 2011 if (!dc_isar_feature(aa64_sve, s)) { 2012 return false; 2013 } 2014 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), 2015 extract32(a->dbm, 0, 6), 2016 extract32(a->dbm, 6, 6))) { 2017 return false; 2018 } 2019 if (sve_access_check(s)) { 2020 do_dupi_z(s, a->rd, imm); 2021 } 2022 return true; 2023 } 2024 2025 /* 2026 *** SVE Integer Wide Immediate - Predicated Group 2027 */ 2028 2029 /* Implement all merging copies. This is used for CPY (immediate), 2030 * FCPY, CPY (scalar), CPY (SIMD&FP scalar). 2031 */ 2032 static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg, 2033 TCGv_i64 val) 2034 { 2035 typedef void gen_cpy(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); 2036 static gen_cpy * const fns[4] = { 2037 gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h, 2038 gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d, 2039 }; 2040 unsigned vsz = vec_full_reg_size(s); 2041 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 2042 TCGv_ptr t_zd = tcg_temp_new_ptr(); 2043 TCGv_ptr t_zn = tcg_temp_new_ptr(); 2044 TCGv_ptr t_pg = tcg_temp_new_ptr(); 2045 2046 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd)); 2047 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, rn)); 2048 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 2049 2050 fns[esz](t_zd, t_zn, t_pg, val, desc); 2051 } 2052 2053 static bool trans_FCPY(DisasContext *s, arg_FCPY *a) 2054 { 2055 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 2056 return false; 2057 } 2058 if (sve_access_check(s)) { 2059 /* Decode the VFP immediate. */ 2060 uint64_t imm = vfp_expand_imm(a->esz, a->imm); 2061 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(imm)); 2062 } 2063 return true; 2064 } 2065 2066 static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a) 2067 { 2068 if (!dc_isar_feature(aa64_sve, s)) { 2069 return false; 2070 } 2071 if (sve_access_check(s)) { 2072 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(a->imm)); 2073 } 2074 return true; 2075 } 2076 2077 static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a) 2078 { 2079 static gen_helper_gvec_2i * const fns[4] = { 2080 gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h, 2081 gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d, 2082 }; 2083 2084 if (!dc_isar_feature(aa64_sve, s)) { 2085 return false; 2086 } 2087 if (sve_access_check(s)) { 2088 unsigned vsz = vec_full_reg_size(s); 2089 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd), 2090 pred_full_reg_offset(s, a->pg), 2091 tcg_constant_i64(a->imm), 2092 vsz, vsz, 0, fns[a->esz]); 2093 } 2094 return true; 2095 } 2096 2097 /* 2098 *** SVE Permute Extract Group 2099 */ 2100 2101 static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm) 2102 { 2103 if (!sve_access_check(s)) { 2104 return true; 2105 } 2106 2107 unsigned vsz = vec_full_reg_size(s); 2108 unsigned n_ofs = imm >= vsz ? 0 : imm; 2109 unsigned n_siz = vsz - n_ofs; 2110 unsigned d = vec_full_reg_offset(s, rd); 2111 unsigned n = vec_full_reg_offset(s, rn); 2112 unsigned m = vec_full_reg_offset(s, rm); 2113 2114 /* Use host vector move insns if we have appropriate sizes 2115 * and no unfortunate overlap. 2116 */ 2117 if (m != d 2118 && n_ofs == size_for_gvec(n_ofs) 2119 && n_siz == size_for_gvec(n_siz) 2120 && (d != n || n_siz <= n_ofs)) { 2121 tcg_gen_gvec_mov(0, d, n + n_ofs, n_siz, n_siz); 2122 if (n_ofs != 0) { 2123 tcg_gen_gvec_mov(0, d + n_siz, m, n_ofs, n_ofs); 2124 } 2125 } else { 2126 tcg_gen_gvec_3_ool(d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext); 2127 } 2128 return true; 2129 } 2130 2131 TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm) 2132 TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm) 2133 2134 /* 2135 *** SVE Permute - Unpredicated Group 2136 */ 2137 2138 static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a) 2139 { 2140 if (!dc_isar_feature(aa64_sve, s)) { 2141 return false; 2142 } 2143 if (sve_access_check(s)) { 2144 unsigned vsz = vec_full_reg_size(s); 2145 tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd), 2146 vsz, vsz, cpu_reg_sp(s, a->rn)); 2147 } 2148 return true; 2149 } 2150 2151 static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a) 2152 { 2153 if (!dc_isar_feature(aa64_sve, s)) { 2154 return false; 2155 } 2156 if ((a->imm & 0x1f) == 0) { 2157 return false; 2158 } 2159 if (sve_access_check(s)) { 2160 unsigned vsz = vec_full_reg_size(s); 2161 unsigned dofs = vec_full_reg_offset(s, a->rd); 2162 unsigned esz, index; 2163 2164 esz = ctz32(a->imm); 2165 index = a->imm >> (esz + 1); 2166 2167 if ((index << esz) < vsz) { 2168 unsigned nofs = vec_reg_offset(s, a->rn, index, esz); 2169 tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz); 2170 } else { 2171 /* 2172 * While dup_mem handles 128-bit elements, dup_imm does not. 2173 * Thankfully element size doesn't matter for splatting zero. 2174 */ 2175 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0); 2176 } 2177 } 2178 return true; 2179 } 2180 2181 static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) 2182 { 2183 typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); 2184 static gen_insr * const fns[4] = { 2185 gen_helper_sve_insr_b, gen_helper_sve_insr_h, 2186 gen_helper_sve_insr_s, gen_helper_sve_insr_d, 2187 }; 2188 unsigned vsz = vec_full_reg_size(s); 2189 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 2190 TCGv_ptr t_zd = tcg_temp_new_ptr(); 2191 TCGv_ptr t_zn = tcg_temp_new_ptr(); 2192 2193 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, a->rd)); 2194 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn)); 2195 2196 fns[a->esz](t_zd, t_zn, val, desc); 2197 } 2198 2199 static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) 2200 { 2201 if (!dc_isar_feature(aa64_sve, s)) { 2202 return false; 2203 } 2204 if (sve_access_check(s)) { 2205 TCGv_i64 t = tcg_temp_new_i64(); 2206 tcg_gen_ld_i64(t, tcg_env, vec_reg_offset(s, a->rm, 0, MO_64)); 2207 do_insr_i64(s, a, t); 2208 } 2209 return true; 2210 } 2211 2212 static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a) 2213 { 2214 if (!dc_isar_feature(aa64_sve, s)) { 2215 return false; 2216 } 2217 if (sve_access_check(s)) { 2218 do_insr_i64(s, a, cpu_reg(s, a->rm)); 2219 } 2220 return true; 2221 } 2222 2223 static gen_helper_gvec_2 * const rev_fns[4] = { 2224 gen_helper_sve_rev_b, gen_helper_sve_rev_h, 2225 gen_helper_sve_rev_s, gen_helper_sve_rev_d 2226 }; 2227 TRANS_FEAT(REV_v, aa64_sve, gen_gvec_ool_zz, rev_fns[a->esz], a->rd, a->rn, 0) 2228 2229 static gen_helper_gvec_3 * const sve_tbl_fns[4] = { 2230 gen_helper_sve_tbl_b, gen_helper_sve_tbl_h, 2231 gen_helper_sve_tbl_s, gen_helper_sve_tbl_d 2232 }; 2233 TRANS_FEAT(TBL, aa64_sve, gen_gvec_ool_arg_zzz, sve_tbl_fns[a->esz], a, 0) 2234 2235 static gen_helper_gvec_4 * const sve2_tbl_fns[4] = { 2236 gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h, 2237 gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d 2238 }; 2239 TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz], 2240 a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0) 2241 2242 static gen_helper_gvec_3 * const tbx_fns[4] = { 2243 gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h, 2244 gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d 2245 }; 2246 TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0) 2247 2248 static bool trans_UNPK(DisasContext *s, arg_UNPK *a) 2249 { 2250 static gen_helper_gvec_2 * const fns[4][2] = { 2251 { NULL, NULL }, 2252 { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h }, 2253 { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s }, 2254 { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d }, 2255 }; 2256 2257 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 2258 return false; 2259 } 2260 if (sve_access_check(s)) { 2261 unsigned vsz = vec_full_reg_size(s); 2262 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd), 2263 vec_full_reg_offset(s, a->rn) 2264 + (a->h ? vsz / 2 : 0), 2265 vsz, vsz, 0, fns[a->esz][a->u]); 2266 } 2267 return true; 2268 } 2269 2270 /* 2271 *** SVE Permute - Predicates Group 2272 */ 2273 2274 static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd, 2275 gen_helper_gvec_3 *fn) 2276 { 2277 if (!sve_access_check(s)) { 2278 return true; 2279 } 2280 2281 unsigned vsz = pred_full_reg_size(s); 2282 2283 TCGv_ptr t_d = tcg_temp_new_ptr(); 2284 TCGv_ptr t_n = tcg_temp_new_ptr(); 2285 TCGv_ptr t_m = tcg_temp_new_ptr(); 2286 uint32_t desc = 0; 2287 2288 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz); 2289 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 2290 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); 2291 2292 tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd)); 2293 tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn)); 2294 tcg_gen_addi_ptr(t_m, tcg_env, pred_full_reg_offset(s, a->rm)); 2295 2296 fn(t_d, t_n, t_m, tcg_constant_i32(desc)); 2297 return true; 2298 } 2299 2300 static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd, 2301 gen_helper_gvec_2 *fn) 2302 { 2303 if (!sve_access_check(s)) { 2304 return true; 2305 } 2306 2307 unsigned vsz = pred_full_reg_size(s); 2308 TCGv_ptr t_d = tcg_temp_new_ptr(); 2309 TCGv_ptr t_n = tcg_temp_new_ptr(); 2310 uint32_t desc = 0; 2311 2312 tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd)); 2313 tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn)); 2314 2315 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz); 2316 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 2317 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); 2318 2319 fn(t_d, t_n, tcg_constant_i32(desc)); 2320 return true; 2321 } 2322 2323 TRANS_FEAT(ZIP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_zip_p) 2324 TRANS_FEAT(ZIP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_zip_p) 2325 TRANS_FEAT(UZP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_uzp_p) 2326 TRANS_FEAT(UZP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_uzp_p) 2327 TRANS_FEAT(TRN1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_trn_p) 2328 TRANS_FEAT(TRN2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_trn_p) 2329 2330 TRANS_FEAT(REV_p, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_rev_p) 2331 TRANS_FEAT(PUNPKLO, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_punpk_p) 2332 TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p) 2333 2334 /* 2335 *** SVE Permute - Interleaving Group 2336 */ 2337 2338 static gen_helper_gvec_3 * const zip_fns[4] = { 2339 gen_helper_sve_zip_b, gen_helper_sve_zip_h, 2340 gen_helper_sve_zip_s, gen_helper_sve_zip_d, 2341 }; 2342 TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz, 2343 zip_fns[a->esz], a, 0) 2344 TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz, 2345 zip_fns[a->esz], a, vec_full_reg_size(s) / 2) 2346 2347 TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2348 gen_helper_sve2_zip_q, a, 0) 2349 TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2350 gen_helper_sve2_zip_q, a, 2351 QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2) 2352 2353 static gen_helper_gvec_3 * const uzp_fns[4] = { 2354 gen_helper_sve_uzp_b, gen_helper_sve_uzp_h, 2355 gen_helper_sve_uzp_s, gen_helper_sve_uzp_d, 2356 }; 2357 2358 TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz, 2359 uzp_fns[a->esz], a, 0) 2360 TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz, 2361 uzp_fns[a->esz], a, 1 << a->esz) 2362 2363 TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2364 gen_helper_sve2_uzp_q, a, 0) 2365 TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2366 gen_helper_sve2_uzp_q, a, 16) 2367 2368 static gen_helper_gvec_3 * const trn_fns[4] = { 2369 gen_helper_sve_trn_b, gen_helper_sve_trn_h, 2370 gen_helper_sve_trn_s, gen_helper_sve_trn_d, 2371 }; 2372 2373 TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz, 2374 trn_fns[a->esz], a, 0) 2375 TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz, 2376 trn_fns[a->esz], a, 1 << a->esz) 2377 2378 TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2379 gen_helper_sve2_trn_q, a, 0) 2380 TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2381 gen_helper_sve2_trn_q, a, 16) 2382 2383 /* 2384 *** SVE Permute Vector - Predicated Group 2385 */ 2386 2387 static gen_helper_gvec_3 * const compact_fns[4] = { 2388 NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d 2389 }; 2390 TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, 2391 compact_fns[a->esz], a, 0) 2392 2393 /* Call the helper that computes the ARM LastActiveElement pseudocode 2394 * function, scaled by the element size. This includes the not found 2395 * indication; e.g. not found for esz=3 is -8. 2396 */ 2397 static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg) 2398 { 2399 /* Predicate sizes may be smaller and cannot use simd_desc. We cannot 2400 * round up, as we do elsewhere, because we need the exact size. 2401 */ 2402 TCGv_ptr t_p = tcg_temp_new_ptr(); 2403 unsigned desc = 0; 2404 2405 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); 2406 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); 2407 2408 tcg_gen_addi_ptr(t_p, tcg_env, pred_full_reg_offset(s, pg)); 2409 2410 gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc)); 2411 } 2412 2413 /* Increment LAST to the offset of the next element in the vector, 2414 * wrapping around to 0. 2415 */ 2416 static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz) 2417 { 2418 unsigned vsz = vec_full_reg_size(s); 2419 2420 tcg_gen_addi_i32(last, last, 1 << esz); 2421 if (is_power_of_2(vsz)) { 2422 tcg_gen_andi_i32(last, last, vsz - 1); 2423 } else { 2424 TCGv_i32 max = tcg_constant_i32(vsz); 2425 TCGv_i32 zero = tcg_constant_i32(0); 2426 tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last); 2427 } 2428 } 2429 2430 /* If LAST < 0, set LAST to the offset of the last element in the vector. */ 2431 static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz) 2432 { 2433 unsigned vsz = vec_full_reg_size(s); 2434 2435 if (is_power_of_2(vsz)) { 2436 tcg_gen_andi_i32(last, last, vsz - 1); 2437 } else { 2438 TCGv_i32 max = tcg_constant_i32(vsz - (1 << esz)); 2439 TCGv_i32 zero = tcg_constant_i32(0); 2440 tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last); 2441 } 2442 } 2443 2444 /* Load an unsigned element of ESZ from BASE+OFS. */ 2445 static TCGv_i64 load_esz(TCGv_ptr base, int ofs, int esz) 2446 { 2447 TCGv_i64 r = tcg_temp_new_i64(); 2448 2449 switch (esz) { 2450 case 0: 2451 tcg_gen_ld8u_i64(r, base, ofs); 2452 break; 2453 case 1: 2454 tcg_gen_ld16u_i64(r, base, ofs); 2455 break; 2456 case 2: 2457 tcg_gen_ld32u_i64(r, base, ofs); 2458 break; 2459 case 3: 2460 tcg_gen_ld_i64(r, base, ofs); 2461 break; 2462 default: 2463 g_assert_not_reached(); 2464 } 2465 return r; 2466 } 2467 2468 /* Load an unsigned element of ESZ from RM[LAST]. */ 2469 static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, 2470 int rm, int esz) 2471 { 2472 TCGv_ptr p = tcg_temp_new_ptr(); 2473 2474 /* Convert offset into vector into offset into ENV. 2475 * The final adjustment for the vector register base 2476 * is added via constant offset to the load. 2477 */ 2478 #if HOST_BIG_ENDIAN 2479 /* Adjust for element ordering. See vec_reg_offset. */ 2480 if (esz < 3) { 2481 tcg_gen_xori_i32(last, last, 8 - (1 << esz)); 2482 } 2483 #endif 2484 tcg_gen_ext_i32_ptr(p, last); 2485 tcg_gen_add_ptr(p, p, tcg_env); 2486 2487 return load_esz(p, vec_full_reg_offset(s, rm), esz); 2488 } 2489 2490 /* Compute CLAST for a Zreg. */ 2491 static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) 2492 { 2493 TCGv_i32 last; 2494 TCGLabel *over; 2495 TCGv_i64 ele; 2496 unsigned vsz, esz = a->esz; 2497 2498 if (!sve_access_check(s)) { 2499 return true; 2500 } 2501 2502 last = tcg_temp_new_i32(); 2503 over = gen_new_label(); 2504 2505 find_last_active(s, last, esz, a->pg); 2506 2507 /* There is of course no movcond for a 2048-bit vector, 2508 * so we must branch over the actual store. 2509 */ 2510 tcg_gen_brcondi_i32(TCG_COND_LT, last, 0, over); 2511 2512 if (!before) { 2513 incr_last_active(s, last, esz); 2514 } 2515 2516 ele = load_last_active(s, last, a->rm, esz); 2517 2518 vsz = vec_full_reg_size(s); 2519 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele); 2520 2521 /* If this insn used MOVPRFX, we may need a second move. */ 2522 if (a->rd != a->rn) { 2523 TCGLabel *done = gen_new_label(); 2524 tcg_gen_br(done); 2525 2526 gen_set_label(over); 2527 do_mov_z(s, a->rd, a->rn); 2528 2529 gen_set_label(done); 2530 } else { 2531 gen_set_label(over); 2532 } 2533 return true; 2534 } 2535 2536 TRANS_FEAT(CLASTA_z, aa64_sve, do_clast_vector, a, false) 2537 TRANS_FEAT(CLASTB_z, aa64_sve, do_clast_vector, a, true) 2538 2539 /* Compute CLAST for a scalar. */ 2540 static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm, 2541 bool before, TCGv_i64 reg_val) 2542 { 2543 TCGv_i32 last = tcg_temp_new_i32(); 2544 TCGv_i64 ele, cmp; 2545 2546 find_last_active(s, last, esz, pg); 2547 2548 /* Extend the original value of last prior to incrementing. */ 2549 cmp = tcg_temp_new_i64(); 2550 tcg_gen_ext_i32_i64(cmp, last); 2551 2552 if (!before) { 2553 incr_last_active(s, last, esz); 2554 } 2555 2556 /* The conceit here is that while last < 0 indicates not found, after 2557 * adjusting for tcg_env->vfp.zregs[rm], it is still a valid address 2558 * from which we can load garbage. We then discard the garbage with 2559 * a conditional move. 2560 */ 2561 ele = load_last_active(s, last, rm, esz); 2562 2563 tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0), 2564 ele, reg_val); 2565 } 2566 2567 /* Compute CLAST for a Vreg. */ 2568 static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before) 2569 { 2570 if (sve_access_check(s)) { 2571 int esz = a->esz; 2572 int ofs = vec_reg_offset(s, a->rd, 0, esz); 2573 TCGv_i64 reg = load_esz(tcg_env, ofs, esz); 2574 2575 do_clast_scalar(s, esz, a->pg, a->rn, before, reg); 2576 write_fp_dreg(s, a->rd, reg); 2577 } 2578 return true; 2579 } 2580 2581 TRANS_FEAT(CLASTA_v, aa64_sve, do_clast_fp, a, false) 2582 TRANS_FEAT(CLASTB_v, aa64_sve, do_clast_fp, a, true) 2583 2584 /* Compute CLAST for a Xreg. */ 2585 static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before) 2586 { 2587 TCGv_i64 reg; 2588 2589 if (!sve_access_check(s)) { 2590 return true; 2591 } 2592 2593 reg = cpu_reg(s, a->rd); 2594 switch (a->esz) { 2595 case 0: 2596 tcg_gen_ext8u_i64(reg, reg); 2597 break; 2598 case 1: 2599 tcg_gen_ext16u_i64(reg, reg); 2600 break; 2601 case 2: 2602 tcg_gen_ext32u_i64(reg, reg); 2603 break; 2604 case 3: 2605 break; 2606 default: 2607 g_assert_not_reached(); 2608 } 2609 2610 do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg); 2611 return true; 2612 } 2613 2614 TRANS_FEAT(CLASTA_r, aa64_sve, do_clast_general, a, false) 2615 TRANS_FEAT(CLASTB_r, aa64_sve, do_clast_general, a, true) 2616 2617 /* Compute LAST for a scalar. */ 2618 static TCGv_i64 do_last_scalar(DisasContext *s, int esz, 2619 int pg, int rm, bool before) 2620 { 2621 TCGv_i32 last = tcg_temp_new_i32(); 2622 2623 find_last_active(s, last, esz, pg); 2624 if (before) { 2625 wrap_last_active(s, last, esz); 2626 } else { 2627 incr_last_active(s, last, esz); 2628 } 2629 2630 return load_last_active(s, last, rm, esz); 2631 } 2632 2633 /* Compute LAST for a Vreg. */ 2634 static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before) 2635 { 2636 if (sve_access_check(s)) { 2637 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); 2638 write_fp_dreg(s, a->rd, val); 2639 } 2640 return true; 2641 } 2642 2643 TRANS_FEAT(LASTA_v, aa64_sve, do_last_fp, a, false) 2644 TRANS_FEAT(LASTB_v, aa64_sve, do_last_fp, a, true) 2645 2646 /* Compute LAST for a Xreg. */ 2647 static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before) 2648 { 2649 if (sve_access_check(s)) { 2650 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); 2651 tcg_gen_mov_i64(cpu_reg(s, a->rd), val); 2652 } 2653 return true; 2654 } 2655 2656 TRANS_FEAT(LASTA_r, aa64_sve, do_last_general, a, false) 2657 TRANS_FEAT(LASTB_r, aa64_sve, do_last_general, a, true) 2658 2659 static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a) 2660 { 2661 if (!dc_isar_feature(aa64_sve, s)) { 2662 return false; 2663 } 2664 if (sve_access_check(s)) { 2665 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn)); 2666 } 2667 return true; 2668 } 2669 2670 static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a) 2671 { 2672 if (!dc_isar_feature(aa64_sve, s)) { 2673 return false; 2674 } 2675 if (sve_access_check(s)) { 2676 int ofs = vec_reg_offset(s, a->rn, 0, a->esz); 2677 TCGv_i64 t = load_esz(tcg_env, ofs, a->esz); 2678 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t); 2679 } 2680 return true; 2681 } 2682 2683 static gen_helper_gvec_3 * const revb_fns[4] = { 2684 NULL, gen_helper_sve_revb_h, 2685 gen_helper_sve_revb_s, gen_helper_sve_revb_d, 2686 }; 2687 TRANS_FEAT(REVB, aa64_sve, gen_gvec_ool_arg_zpz, revb_fns[a->esz], a, 0) 2688 2689 static gen_helper_gvec_3 * const revh_fns[4] = { 2690 NULL, NULL, gen_helper_sve_revh_s, gen_helper_sve_revh_d, 2691 }; 2692 TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0) 2693 2694 TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz, 2695 a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0) 2696 2697 TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0) 2698 2699 TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz, 2700 gen_helper_sve_splice, a, a->esz) 2701 2702 TRANS_FEAT(SPLICE_sve2, aa64_sve2, gen_gvec_ool_zzzp, gen_helper_sve_splice, 2703 a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz) 2704 2705 /* 2706 *** SVE Integer Compare - Vectors Group 2707 */ 2708 2709 static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, 2710 gen_helper_gvec_flags_4 *gen_fn) 2711 { 2712 TCGv_ptr pd, zn, zm, pg; 2713 unsigned vsz; 2714 TCGv_i32 t; 2715 2716 if (gen_fn == NULL) { 2717 return false; 2718 } 2719 if (!sve_access_check(s)) { 2720 return true; 2721 } 2722 2723 vsz = vec_full_reg_size(s); 2724 t = tcg_temp_new_i32(); 2725 pd = tcg_temp_new_ptr(); 2726 zn = tcg_temp_new_ptr(); 2727 zm = tcg_temp_new_ptr(); 2728 pg = tcg_temp_new_ptr(); 2729 2730 tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd)); 2731 tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn)); 2732 tcg_gen_addi_ptr(zm, tcg_env, vec_full_reg_offset(s, a->rm)); 2733 tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg)); 2734 2735 gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0))); 2736 2737 do_pred_flags(t); 2738 return true; 2739 } 2740 2741 #define DO_PPZZ(NAME, name) \ 2742 static gen_helper_gvec_flags_4 * const name##_ppzz_fns[4] = { \ 2743 gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \ 2744 gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \ 2745 }; \ 2746 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_ppzz_flags, \ 2747 a, name##_ppzz_fns[a->esz]) 2748 2749 DO_PPZZ(CMPEQ, cmpeq) 2750 DO_PPZZ(CMPNE, cmpne) 2751 DO_PPZZ(CMPGT, cmpgt) 2752 DO_PPZZ(CMPGE, cmpge) 2753 DO_PPZZ(CMPHI, cmphi) 2754 DO_PPZZ(CMPHS, cmphs) 2755 2756 #undef DO_PPZZ 2757 2758 #define DO_PPZW(NAME, name) \ 2759 static gen_helper_gvec_flags_4 * const name##_ppzw_fns[4] = { \ 2760 gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \ 2761 gen_helper_sve_##name##_ppzw_s, NULL \ 2762 }; \ 2763 TRANS_FEAT(NAME##_ppzw, aa64_sve, do_ppzz_flags, \ 2764 a, name##_ppzw_fns[a->esz]) 2765 2766 DO_PPZW(CMPEQ, cmpeq) 2767 DO_PPZW(CMPNE, cmpne) 2768 DO_PPZW(CMPGT, cmpgt) 2769 DO_PPZW(CMPGE, cmpge) 2770 DO_PPZW(CMPHI, cmphi) 2771 DO_PPZW(CMPHS, cmphs) 2772 DO_PPZW(CMPLT, cmplt) 2773 DO_PPZW(CMPLE, cmple) 2774 DO_PPZW(CMPLO, cmplo) 2775 DO_PPZW(CMPLS, cmpls) 2776 2777 #undef DO_PPZW 2778 2779 /* 2780 *** SVE Integer Compare - Immediate Groups 2781 */ 2782 2783 static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, 2784 gen_helper_gvec_flags_3 *gen_fn) 2785 { 2786 TCGv_ptr pd, zn, pg; 2787 unsigned vsz; 2788 TCGv_i32 t; 2789 2790 if (gen_fn == NULL) { 2791 return false; 2792 } 2793 if (!sve_access_check(s)) { 2794 return true; 2795 } 2796 2797 vsz = vec_full_reg_size(s); 2798 t = tcg_temp_new_i32(); 2799 pd = tcg_temp_new_ptr(); 2800 zn = tcg_temp_new_ptr(); 2801 pg = tcg_temp_new_ptr(); 2802 2803 tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd)); 2804 tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn)); 2805 tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg)); 2806 2807 gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm))); 2808 2809 do_pred_flags(t); 2810 return true; 2811 } 2812 2813 #define DO_PPZI(NAME, name) \ 2814 static gen_helper_gvec_flags_3 * const name##_ppzi_fns[4] = { \ 2815 gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \ 2816 gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \ 2817 }; \ 2818 TRANS_FEAT(NAME##_ppzi, aa64_sve, do_ppzi_flags, a, \ 2819 name##_ppzi_fns[a->esz]) 2820 2821 DO_PPZI(CMPEQ, cmpeq) 2822 DO_PPZI(CMPNE, cmpne) 2823 DO_PPZI(CMPGT, cmpgt) 2824 DO_PPZI(CMPGE, cmpge) 2825 DO_PPZI(CMPHI, cmphi) 2826 DO_PPZI(CMPHS, cmphs) 2827 DO_PPZI(CMPLT, cmplt) 2828 DO_PPZI(CMPLE, cmple) 2829 DO_PPZI(CMPLO, cmplo) 2830 DO_PPZI(CMPLS, cmpls) 2831 2832 #undef DO_PPZI 2833 2834 /* 2835 *** SVE Partition Break Group 2836 */ 2837 2838 static bool do_brk3(DisasContext *s, arg_rprr_s *a, 2839 gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s) 2840 { 2841 if (!sve_access_check(s)) { 2842 return true; 2843 } 2844 2845 unsigned vsz = pred_full_reg_size(s); 2846 2847 /* Predicate sizes may be smaller and cannot use simd_desc. */ 2848 TCGv_ptr d = tcg_temp_new_ptr(); 2849 TCGv_ptr n = tcg_temp_new_ptr(); 2850 TCGv_ptr m = tcg_temp_new_ptr(); 2851 TCGv_ptr g = tcg_temp_new_ptr(); 2852 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); 2853 2854 tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd)); 2855 tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn)); 2856 tcg_gen_addi_ptr(m, tcg_env, pred_full_reg_offset(s, a->rm)); 2857 tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg)); 2858 2859 if (a->s) { 2860 TCGv_i32 t = tcg_temp_new_i32(); 2861 fn_s(t, d, n, m, g, desc); 2862 do_pred_flags(t); 2863 } else { 2864 fn(d, n, m, g, desc); 2865 } 2866 return true; 2867 } 2868 2869 static bool do_brk2(DisasContext *s, arg_rpr_s *a, 2870 gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s) 2871 { 2872 if (!sve_access_check(s)) { 2873 return true; 2874 } 2875 2876 unsigned vsz = pred_full_reg_size(s); 2877 2878 /* Predicate sizes may be smaller and cannot use simd_desc. */ 2879 TCGv_ptr d = tcg_temp_new_ptr(); 2880 TCGv_ptr n = tcg_temp_new_ptr(); 2881 TCGv_ptr g = tcg_temp_new_ptr(); 2882 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); 2883 2884 tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd)); 2885 tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn)); 2886 tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg)); 2887 2888 if (a->s) { 2889 TCGv_i32 t = tcg_temp_new_i32(); 2890 fn_s(t, d, n, g, desc); 2891 do_pred_flags(t); 2892 } else { 2893 fn(d, n, g, desc); 2894 } 2895 return true; 2896 } 2897 2898 TRANS_FEAT(BRKPA, aa64_sve, do_brk3, a, 2899 gen_helper_sve_brkpa, gen_helper_sve_brkpas) 2900 TRANS_FEAT(BRKPB, aa64_sve, do_brk3, a, 2901 gen_helper_sve_brkpb, gen_helper_sve_brkpbs) 2902 2903 TRANS_FEAT(BRKA_m, aa64_sve, do_brk2, a, 2904 gen_helper_sve_brka_m, gen_helper_sve_brkas_m) 2905 TRANS_FEAT(BRKB_m, aa64_sve, do_brk2, a, 2906 gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m) 2907 2908 TRANS_FEAT(BRKA_z, aa64_sve, do_brk2, a, 2909 gen_helper_sve_brka_z, gen_helper_sve_brkas_z) 2910 TRANS_FEAT(BRKB_z, aa64_sve, do_brk2, a, 2911 gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z) 2912 2913 TRANS_FEAT(BRKN, aa64_sve, do_brk2, a, 2914 gen_helper_sve_brkn, gen_helper_sve_brkns) 2915 2916 /* 2917 *** SVE Predicate Count Group 2918 */ 2919 2920 static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) 2921 { 2922 unsigned psz = pred_full_reg_size(s); 2923 2924 if (psz <= 8) { 2925 uint64_t psz_mask; 2926 2927 tcg_gen_ld_i64(val, tcg_env, pred_full_reg_offset(s, pn)); 2928 if (pn != pg) { 2929 TCGv_i64 g = tcg_temp_new_i64(); 2930 tcg_gen_ld_i64(g, tcg_env, pred_full_reg_offset(s, pg)); 2931 tcg_gen_and_i64(val, val, g); 2932 } 2933 2934 /* Reduce the pred_esz_masks value simply to reduce the 2935 * size of the code generated here. 2936 */ 2937 psz_mask = MAKE_64BIT_MASK(0, psz * 8); 2938 tcg_gen_andi_i64(val, val, pred_esz_masks[esz] & psz_mask); 2939 2940 tcg_gen_ctpop_i64(val, val); 2941 } else { 2942 TCGv_ptr t_pn = tcg_temp_new_ptr(); 2943 TCGv_ptr t_pg = tcg_temp_new_ptr(); 2944 unsigned desc = 0; 2945 2946 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz); 2947 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); 2948 2949 tcg_gen_addi_ptr(t_pn, tcg_env, pred_full_reg_offset(s, pn)); 2950 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 2951 2952 gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc)); 2953 } 2954 } 2955 2956 static bool trans_CNTP(DisasContext *s, arg_CNTP *a) 2957 { 2958 if (!dc_isar_feature(aa64_sve, s)) { 2959 return false; 2960 } 2961 if (sve_access_check(s)) { 2962 do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg); 2963 } 2964 return true; 2965 } 2966 2967 static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) 2968 { 2969 if (!dc_isar_feature(aa64_sve, s)) { 2970 return false; 2971 } 2972 if (sve_access_check(s)) { 2973 TCGv_i64 reg = cpu_reg(s, a->rd); 2974 TCGv_i64 val = tcg_temp_new_i64(); 2975 2976 do_cntp(s, val, a->esz, a->pg, a->pg); 2977 if (a->d) { 2978 tcg_gen_sub_i64(reg, reg, val); 2979 } else { 2980 tcg_gen_add_i64(reg, reg, val); 2981 } 2982 } 2983 return true; 2984 } 2985 2986 static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a) 2987 { 2988 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 2989 return false; 2990 } 2991 if (sve_access_check(s)) { 2992 unsigned vsz = vec_full_reg_size(s); 2993 TCGv_i64 val = tcg_temp_new_i64(); 2994 GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds; 2995 2996 do_cntp(s, val, a->esz, a->pg, a->pg); 2997 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), 2998 vec_full_reg_offset(s, a->rn), val, vsz, vsz); 2999 } 3000 return true; 3001 } 3002 3003 static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a) 3004 { 3005 if (!dc_isar_feature(aa64_sve, s)) { 3006 return false; 3007 } 3008 if (sve_access_check(s)) { 3009 TCGv_i64 reg = cpu_reg(s, a->rd); 3010 TCGv_i64 val = tcg_temp_new_i64(); 3011 3012 do_cntp(s, val, a->esz, a->pg, a->pg); 3013 do_sat_addsub_32(reg, val, a->u, a->d); 3014 } 3015 return true; 3016 } 3017 3018 static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a) 3019 { 3020 if (!dc_isar_feature(aa64_sve, s)) { 3021 return false; 3022 } 3023 if (sve_access_check(s)) { 3024 TCGv_i64 reg = cpu_reg(s, a->rd); 3025 TCGv_i64 val = tcg_temp_new_i64(); 3026 3027 do_cntp(s, val, a->esz, a->pg, a->pg); 3028 do_sat_addsub_64(reg, val, a->u, a->d); 3029 } 3030 return true; 3031 } 3032 3033 static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a) 3034 { 3035 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3036 return false; 3037 } 3038 if (sve_access_check(s)) { 3039 TCGv_i64 val = tcg_temp_new_i64(); 3040 do_cntp(s, val, a->esz, a->pg, a->pg); 3041 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d); 3042 } 3043 return true; 3044 } 3045 3046 /* 3047 *** SVE Integer Compare Scalars Group 3048 */ 3049 3050 static bool trans_CTERM(DisasContext *s, arg_CTERM *a) 3051 { 3052 if (!dc_isar_feature(aa64_sve, s)) { 3053 return false; 3054 } 3055 if (!sve_access_check(s)) { 3056 return true; 3057 } 3058 3059 TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ); 3060 TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf); 3061 TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf); 3062 TCGv_i64 cmp = tcg_temp_new_i64(); 3063 3064 tcg_gen_setcond_i64(cond, cmp, rn, rm); 3065 tcg_gen_extrl_i64_i32(cpu_NF, cmp); 3066 3067 /* VF = !NF & !CF. */ 3068 tcg_gen_xori_i32(cpu_VF, cpu_NF, 1); 3069 tcg_gen_andc_i32(cpu_VF, cpu_VF, cpu_CF); 3070 3071 /* Both NF and VF actually look at bit 31. */ 3072 tcg_gen_neg_i32(cpu_NF, cpu_NF); 3073 tcg_gen_neg_i32(cpu_VF, cpu_VF); 3074 return true; 3075 } 3076 3077 static bool trans_WHILE(DisasContext *s, arg_WHILE *a) 3078 { 3079 TCGv_i64 op0, op1, t0, t1, tmax; 3080 TCGv_i32 t2; 3081 TCGv_ptr ptr; 3082 unsigned vsz = vec_full_reg_size(s); 3083 unsigned desc = 0; 3084 TCGCond cond; 3085 uint64_t maxval; 3086 /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */ 3087 bool eq = a->eq == a->lt; 3088 3089 /* The greater-than conditions are all SVE2. */ 3090 if (a->lt 3091 ? !dc_isar_feature(aa64_sve, s) 3092 : !dc_isar_feature(aa64_sve2, s)) { 3093 return false; 3094 } 3095 if (!sve_access_check(s)) { 3096 return true; 3097 } 3098 3099 op0 = read_cpu_reg(s, a->rn, 1); 3100 op1 = read_cpu_reg(s, a->rm, 1); 3101 3102 if (!a->sf) { 3103 if (a->u) { 3104 tcg_gen_ext32u_i64(op0, op0); 3105 tcg_gen_ext32u_i64(op1, op1); 3106 } else { 3107 tcg_gen_ext32s_i64(op0, op0); 3108 tcg_gen_ext32s_i64(op1, op1); 3109 } 3110 } 3111 3112 /* For the helper, compress the different conditions into a computation 3113 * of how many iterations for which the condition is true. 3114 */ 3115 t0 = tcg_temp_new_i64(); 3116 t1 = tcg_temp_new_i64(); 3117 3118 if (a->lt) { 3119 tcg_gen_sub_i64(t0, op1, op0); 3120 if (a->u) { 3121 maxval = a->sf ? UINT64_MAX : UINT32_MAX; 3122 cond = eq ? TCG_COND_LEU : TCG_COND_LTU; 3123 } else { 3124 maxval = a->sf ? INT64_MAX : INT32_MAX; 3125 cond = eq ? TCG_COND_LE : TCG_COND_LT; 3126 } 3127 } else { 3128 tcg_gen_sub_i64(t0, op0, op1); 3129 if (a->u) { 3130 maxval = 0; 3131 cond = eq ? TCG_COND_GEU : TCG_COND_GTU; 3132 } else { 3133 maxval = a->sf ? INT64_MIN : INT32_MIN; 3134 cond = eq ? TCG_COND_GE : TCG_COND_GT; 3135 } 3136 } 3137 3138 tmax = tcg_constant_i64(vsz >> a->esz); 3139 if (eq) { 3140 /* Equality means one more iteration. */ 3141 tcg_gen_addi_i64(t0, t0, 1); 3142 3143 /* 3144 * For the less-than while, if op1 is maxval (and the only time 3145 * the addition above could overflow), then we produce an all-true 3146 * predicate by setting the count to the vector length. This is 3147 * because the pseudocode is described as an increment + compare 3148 * loop, and the maximum integer would always compare true. 3149 * Similarly, the greater-than while has the same issue with the 3150 * minimum integer due to the decrement + compare loop. 3151 */ 3152 tcg_gen_movi_i64(t1, maxval); 3153 tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0); 3154 } 3155 3156 /* Bound to the maximum. */ 3157 tcg_gen_umin_i64(t0, t0, tmax); 3158 3159 /* Set the count to zero if the condition is false. */ 3160 tcg_gen_movi_i64(t1, 0); 3161 tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1); 3162 3163 /* Since we're bounded, pass as a 32-bit type. */ 3164 t2 = tcg_temp_new_i32(); 3165 tcg_gen_extrl_i64_i32(t2, t0); 3166 3167 /* Scale elements to bits. */ 3168 tcg_gen_shli_i32(t2, t2, a->esz); 3169 3170 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); 3171 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 3172 3173 ptr = tcg_temp_new_ptr(); 3174 tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd)); 3175 3176 if (a->lt) { 3177 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); 3178 } else { 3179 gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc)); 3180 } 3181 do_pred_flags(t2); 3182 return true; 3183 } 3184 3185 static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) 3186 { 3187 TCGv_i64 op0, op1, diff, t1, tmax; 3188 TCGv_i32 t2; 3189 TCGv_ptr ptr; 3190 unsigned vsz = vec_full_reg_size(s); 3191 unsigned desc = 0; 3192 3193 if (!dc_isar_feature(aa64_sve2, s)) { 3194 return false; 3195 } 3196 if (!sve_access_check(s)) { 3197 return true; 3198 } 3199 3200 op0 = read_cpu_reg(s, a->rn, 1); 3201 op1 = read_cpu_reg(s, a->rm, 1); 3202 3203 tmax = tcg_constant_i64(vsz); 3204 diff = tcg_temp_new_i64(); 3205 3206 if (a->rw) { 3207 /* WHILERW */ 3208 /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */ 3209 t1 = tcg_temp_new_i64(); 3210 tcg_gen_sub_i64(diff, op0, op1); 3211 tcg_gen_sub_i64(t1, op1, op0); 3212 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1); 3213 /* Round down to a multiple of ESIZE. */ 3214 tcg_gen_andi_i64(diff, diff, -1 << a->esz); 3215 /* If op1 == op0, diff == 0, and the condition is always true. */ 3216 tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff); 3217 } else { 3218 /* WHILEWR */ 3219 tcg_gen_sub_i64(diff, op1, op0); 3220 /* Round down to a multiple of ESIZE. */ 3221 tcg_gen_andi_i64(diff, diff, -1 << a->esz); 3222 /* If op0 >= op1, diff <= 0, the condition is always true. */ 3223 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff); 3224 } 3225 3226 /* Bound to the maximum. */ 3227 tcg_gen_umin_i64(diff, diff, tmax); 3228 3229 /* Since we're bounded, pass as a 32-bit type. */ 3230 t2 = tcg_temp_new_i32(); 3231 tcg_gen_extrl_i64_i32(t2, diff); 3232 3233 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); 3234 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 3235 3236 ptr = tcg_temp_new_ptr(); 3237 tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd)); 3238 3239 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); 3240 do_pred_flags(t2); 3241 return true; 3242 } 3243 3244 /* 3245 *** SVE Integer Wide Immediate - Unpredicated Group 3246 */ 3247 3248 static bool trans_FDUP(DisasContext *s, arg_FDUP *a) 3249 { 3250 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3251 return false; 3252 } 3253 if (sve_access_check(s)) { 3254 unsigned vsz = vec_full_reg_size(s); 3255 int dofs = vec_full_reg_offset(s, a->rd); 3256 uint64_t imm; 3257 3258 /* Decode the VFP immediate. */ 3259 imm = vfp_expand_imm(a->esz, a->imm); 3260 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm); 3261 } 3262 return true; 3263 } 3264 3265 static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a) 3266 { 3267 if (!dc_isar_feature(aa64_sve, s)) { 3268 return false; 3269 } 3270 if (sve_access_check(s)) { 3271 unsigned vsz = vec_full_reg_size(s); 3272 int dofs = vec_full_reg_offset(s, a->rd); 3273 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm); 3274 } 3275 return true; 3276 } 3277 3278 TRANS_FEAT(ADD_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_addi, a) 3279 3280 static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a) 3281 { 3282 a->imm = -a->imm; 3283 return trans_ADD_zzi(s, a); 3284 } 3285 3286 static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a) 3287 { 3288 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 }; 3289 static const GVecGen2s op[4] = { 3290 { .fni8 = tcg_gen_vec_sub8_i64, 3291 .fniv = tcg_gen_sub_vec, 3292 .fno = gen_helper_sve_subri_b, 3293 .opt_opc = vecop_list, 3294 .vece = MO_8, 3295 .scalar_first = true }, 3296 { .fni8 = tcg_gen_vec_sub16_i64, 3297 .fniv = tcg_gen_sub_vec, 3298 .fno = gen_helper_sve_subri_h, 3299 .opt_opc = vecop_list, 3300 .vece = MO_16, 3301 .scalar_first = true }, 3302 { .fni4 = tcg_gen_sub_i32, 3303 .fniv = tcg_gen_sub_vec, 3304 .fno = gen_helper_sve_subri_s, 3305 .opt_opc = vecop_list, 3306 .vece = MO_32, 3307 .scalar_first = true }, 3308 { .fni8 = tcg_gen_sub_i64, 3309 .fniv = tcg_gen_sub_vec, 3310 .fno = gen_helper_sve_subri_d, 3311 .opt_opc = vecop_list, 3312 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 3313 .vece = MO_64, 3314 .scalar_first = true } 3315 }; 3316 3317 if (!dc_isar_feature(aa64_sve, s)) { 3318 return false; 3319 } 3320 if (sve_access_check(s)) { 3321 unsigned vsz = vec_full_reg_size(s); 3322 tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd), 3323 vec_full_reg_offset(s, a->rn), 3324 vsz, vsz, tcg_constant_i64(a->imm), &op[a->esz]); 3325 } 3326 return true; 3327 } 3328 3329 TRANS_FEAT(MUL_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_muli, a) 3330 3331 static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d) 3332 { 3333 if (sve_access_check(s)) { 3334 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, 3335 tcg_constant_i64(a->imm), u, d); 3336 } 3337 return true; 3338 } 3339 3340 TRANS_FEAT(SQADD_zzi, aa64_sve, do_zzi_sat, a, false, false) 3341 TRANS_FEAT(UQADD_zzi, aa64_sve, do_zzi_sat, a, true, false) 3342 TRANS_FEAT(SQSUB_zzi, aa64_sve, do_zzi_sat, a, false, true) 3343 TRANS_FEAT(UQSUB_zzi, aa64_sve, do_zzi_sat, a, true, true) 3344 3345 static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn) 3346 { 3347 if (sve_access_check(s)) { 3348 unsigned vsz = vec_full_reg_size(s); 3349 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd), 3350 vec_full_reg_offset(s, a->rn), 3351 tcg_constant_i64(a->imm), vsz, vsz, 0, fn); 3352 } 3353 return true; 3354 } 3355 3356 #define DO_ZZI(NAME, name) \ 3357 static gen_helper_gvec_2i * const name##i_fns[4] = { \ 3358 gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \ 3359 gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \ 3360 }; \ 3361 TRANS_FEAT(NAME##_zzi, aa64_sve, do_zzi_ool, a, name##i_fns[a->esz]) 3362 3363 DO_ZZI(SMAX, smax) 3364 DO_ZZI(UMAX, umax) 3365 DO_ZZI(SMIN, smin) 3366 DO_ZZI(UMIN, umin) 3367 3368 #undef DO_ZZI 3369 3370 static gen_helper_gvec_4 * const dot_fns[2][2] = { 3371 { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h }, 3372 { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h } 3373 }; 3374 TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz, 3375 dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0) 3376 3377 /* 3378 * SVE Multiply - Indexed 3379 */ 3380 3381 TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, 3382 gen_helper_gvec_sdot_idx_b, a) 3383 TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, 3384 gen_helper_gvec_sdot_idx_h, a) 3385 TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, 3386 gen_helper_gvec_udot_idx_b, a) 3387 TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, 3388 gen_helper_gvec_udot_idx_h, a) 3389 3390 TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, 3391 gen_helper_gvec_sudot_idx_b, a) 3392 TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, 3393 gen_helper_gvec_usdot_idx_b, a) 3394 3395 #define DO_SVE2_RRX(NAME, FUNC) \ 3396 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \ 3397 a->rd, a->rn, a->rm, a->index) 3398 3399 DO_SVE2_RRX(MUL_zzx_h, gen_helper_gvec_mul_idx_h) 3400 DO_SVE2_RRX(MUL_zzx_s, gen_helper_gvec_mul_idx_s) 3401 DO_SVE2_RRX(MUL_zzx_d, gen_helper_gvec_mul_idx_d) 3402 3403 DO_SVE2_RRX(SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h) 3404 DO_SVE2_RRX(SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s) 3405 DO_SVE2_RRX(SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d) 3406 3407 DO_SVE2_RRX(SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h) 3408 DO_SVE2_RRX(SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s) 3409 DO_SVE2_RRX(SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d) 3410 3411 #undef DO_SVE2_RRX 3412 3413 #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \ 3414 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \ 3415 a->rd, a->rn, a->rm, (a->index << 1) | TOP) 3416 3417 DO_SVE2_RRX_TB(SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false) 3418 DO_SVE2_RRX_TB(SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false) 3419 DO_SVE2_RRX_TB(SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true) 3420 DO_SVE2_RRX_TB(SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true) 3421 3422 DO_SVE2_RRX_TB(SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false) 3423 DO_SVE2_RRX_TB(SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false) 3424 DO_SVE2_RRX_TB(SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true) 3425 DO_SVE2_RRX_TB(SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true) 3426 3427 DO_SVE2_RRX_TB(UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false) 3428 DO_SVE2_RRX_TB(UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false) 3429 DO_SVE2_RRX_TB(UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true) 3430 DO_SVE2_RRX_TB(UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true) 3431 3432 #undef DO_SVE2_RRX_TB 3433 3434 #define DO_SVE2_RRXR(NAME, FUNC) \ 3435 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzxz, FUNC, a) 3436 3437 DO_SVE2_RRXR(MLA_zzxz_h, gen_helper_gvec_mla_idx_h) 3438 DO_SVE2_RRXR(MLA_zzxz_s, gen_helper_gvec_mla_idx_s) 3439 DO_SVE2_RRXR(MLA_zzxz_d, gen_helper_gvec_mla_idx_d) 3440 3441 DO_SVE2_RRXR(MLS_zzxz_h, gen_helper_gvec_mls_idx_h) 3442 DO_SVE2_RRXR(MLS_zzxz_s, gen_helper_gvec_mls_idx_s) 3443 DO_SVE2_RRXR(MLS_zzxz_d, gen_helper_gvec_mls_idx_d) 3444 3445 DO_SVE2_RRXR(SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h) 3446 DO_SVE2_RRXR(SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s) 3447 DO_SVE2_RRXR(SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d) 3448 3449 DO_SVE2_RRXR(SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h) 3450 DO_SVE2_RRXR(SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s) 3451 DO_SVE2_RRXR(SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d) 3452 3453 #undef DO_SVE2_RRXR 3454 3455 #define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \ 3456 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \ 3457 a->rd, a->rn, a->rm, a->ra, (a->index << 1) | TOP) 3458 3459 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false) 3460 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false) 3461 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true) 3462 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true) 3463 3464 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false) 3465 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false) 3466 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true) 3467 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true) 3468 3469 DO_SVE2_RRXR_TB(SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false) 3470 DO_SVE2_RRXR_TB(SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false) 3471 DO_SVE2_RRXR_TB(SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true) 3472 DO_SVE2_RRXR_TB(SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true) 3473 3474 DO_SVE2_RRXR_TB(UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false) 3475 DO_SVE2_RRXR_TB(UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false) 3476 DO_SVE2_RRXR_TB(UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true) 3477 DO_SVE2_RRXR_TB(UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true) 3478 3479 DO_SVE2_RRXR_TB(SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false) 3480 DO_SVE2_RRXR_TB(SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false) 3481 DO_SVE2_RRXR_TB(SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true) 3482 DO_SVE2_RRXR_TB(SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true) 3483 3484 DO_SVE2_RRXR_TB(UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false) 3485 DO_SVE2_RRXR_TB(UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false) 3486 DO_SVE2_RRXR_TB(UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true) 3487 DO_SVE2_RRXR_TB(UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true) 3488 3489 #undef DO_SVE2_RRXR_TB 3490 3491 #define DO_SVE2_RRXR_ROT(NAME, FUNC) \ 3492 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \ 3493 a->rd, a->rn, a->rm, a->ra, (a->index << 2) | a->rot) 3494 3495 DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h) 3496 DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s) 3497 3498 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h) 3499 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s) 3500 3501 DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s) 3502 DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d) 3503 3504 #undef DO_SVE2_RRXR_ROT 3505 3506 /* 3507 *** SVE Floating Point Multiply-Add Indexed Group 3508 */ 3509 3510 static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub) 3511 { 3512 static gen_helper_gvec_4_ptr * const fns[4] = { 3513 NULL, 3514 gen_helper_gvec_fmla_idx_h, 3515 gen_helper_gvec_fmla_idx_s, 3516 gen_helper_gvec_fmla_idx_d, 3517 }; 3518 return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, 3519 (a->index << 1) | sub, 3520 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3521 } 3522 3523 TRANS_FEAT(FMLA_zzxz, aa64_sve, do_FMLA_zzxz, a, false) 3524 TRANS_FEAT(FMLS_zzxz, aa64_sve, do_FMLA_zzxz, a, true) 3525 3526 /* 3527 *** SVE Floating Point Multiply Indexed Group 3528 */ 3529 3530 static gen_helper_gvec_3_ptr * const fmul_idx_fns[4] = { 3531 NULL, gen_helper_gvec_fmul_idx_h, 3532 gen_helper_gvec_fmul_idx_s, gen_helper_gvec_fmul_idx_d, 3533 }; 3534 TRANS_FEAT(FMUL_zzx, aa64_sve, gen_gvec_fpst_zzz, 3535 fmul_idx_fns[a->esz], a->rd, a->rn, a->rm, a->index, 3536 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3537 3538 /* 3539 *** SVE Floating Point Fast Reduction Group 3540 */ 3541 3542 typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr, 3543 TCGv_ptr, TCGv_i32); 3544 3545 static bool do_reduce(DisasContext *s, arg_rpr_esz *a, 3546 gen_helper_fp_reduce *fn) 3547 { 3548 unsigned vsz, p2vsz; 3549 TCGv_i32 t_desc; 3550 TCGv_ptr t_zn, t_pg, status; 3551 TCGv_i64 temp; 3552 3553 if (fn == NULL) { 3554 return false; 3555 } 3556 if (!sve_access_check(s)) { 3557 return true; 3558 } 3559 3560 vsz = vec_full_reg_size(s); 3561 p2vsz = pow2ceil(vsz); 3562 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, p2vsz)); 3563 temp = tcg_temp_new_i64(); 3564 t_zn = tcg_temp_new_ptr(); 3565 t_pg = tcg_temp_new_ptr(); 3566 3567 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn)); 3568 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg)); 3569 status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3570 3571 fn(temp, t_zn, t_pg, status, t_desc); 3572 3573 write_fp_dreg(s, a->rd, temp); 3574 return true; 3575 } 3576 3577 #define DO_VPZ(NAME, name) \ 3578 static gen_helper_fp_reduce * const name##_fns[4] = { \ 3579 NULL, gen_helper_sve_##name##_h, \ 3580 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 3581 }; \ 3582 TRANS_FEAT(NAME, aa64_sve, do_reduce, a, name##_fns[a->esz]) 3583 3584 DO_VPZ(FADDV, faddv) 3585 DO_VPZ(FMINNMV, fminnmv) 3586 DO_VPZ(FMAXNMV, fmaxnmv) 3587 DO_VPZ(FMINV, fminv) 3588 DO_VPZ(FMAXV, fmaxv) 3589 3590 #undef DO_VPZ 3591 3592 /* 3593 *** SVE Floating Point Unary Operations - Unpredicated Group 3594 */ 3595 3596 static gen_helper_gvec_2_ptr * const frecpe_fns[] = { 3597 NULL, gen_helper_gvec_frecpe_h, 3598 gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, 3599 }; 3600 TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_arg_zz, frecpe_fns[a->esz], a, 0) 3601 3602 static gen_helper_gvec_2_ptr * const frsqrte_fns[] = { 3603 NULL, gen_helper_gvec_frsqrte_h, 3604 gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, 3605 }; 3606 TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_arg_zz, frsqrte_fns[a->esz], a, 0) 3607 3608 /* 3609 *** SVE Floating Point Compare with Zero Group 3610 */ 3611 3612 static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a, 3613 gen_helper_gvec_3_ptr *fn) 3614 { 3615 if (fn == NULL) { 3616 return false; 3617 } 3618 if (sve_access_check(s)) { 3619 unsigned vsz = vec_full_reg_size(s); 3620 TCGv_ptr status = 3621 fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3622 3623 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd), 3624 vec_full_reg_offset(s, a->rn), 3625 pred_full_reg_offset(s, a->pg), 3626 status, vsz, vsz, 0, fn); 3627 } 3628 return true; 3629 } 3630 3631 #define DO_PPZ(NAME, name) \ 3632 static gen_helper_gvec_3_ptr * const name##_fns[] = { \ 3633 NULL, gen_helper_sve_##name##_h, \ 3634 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 3635 }; \ 3636 TRANS_FEAT(NAME, aa64_sve, do_ppz_fp, a, name##_fns[a->esz]) 3637 3638 DO_PPZ(FCMGE_ppz0, fcmge0) 3639 DO_PPZ(FCMGT_ppz0, fcmgt0) 3640 DO_PPZ(FCMLE_ppz0, fcmle0) 3641 DO_PPZ(FCMLT_ppz0, fcmlt0) 3642 DO_PPZ(FCMEQ_ppz0, fcmeq0) 3643 DO_PPZ(FCMNE_ppz0, fcmne0) 3644 3645 #undef DO_PPZ 3646 3647 /* 3648 *** SVE floating-point trig multiply-add coefficient 3649 */ 3650 3651 static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { 3652 NULL, gen_helper_sve_ftmad_h, 3653 gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, 3654 }; 3655 TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, 3656 ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, 3657 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3658 3659 /* 3660 *** SVE Floating Point Accumulating Reduction Group 3661 */ 3662 3663 static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) 3664 { 3665 typedef void fadda_fn(TCGv_i64, TCGv_i64, TCGv_ptr, 3666 TCGv_ptr, TCGv_ptr, TCGv_i32); 3667 static fadda_fn * const fns[3] = { 3668 gen_helper_sve_fadda_h, 3669 gen_helper_sve_fadda_s, 3670 gen_helper_sve_fadda_d, 3671 }; 3672 unsigned vsz = vec_full_reg_size(s); 3673 TCGv_ptr t_rm, t_pg, t_fpst; 3674 TCGv_i64 t_val; 3675 TCGv_i32 t_desc; 3676 3677 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3678 return false; 3679 } 3680 s->is_nonstreaming = true; 3681 if (!sve_access_check(s)) { 3682 return true; 3683 } 3684 3685 t_val = load_esz(tcg_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz); 3686 t_rm = tcg_temp_new_ptr(); 3687 t_pg = tcg_temp_new_ptr(); 3688 tcg_gen_addi_ptr(t_rm, tcg_env, vec_full_reg_offset(s, a->rm)); 3689 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg)); 3690 t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3691 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 3692 3693 fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc); 3694 3695 write_fp_dreg(s, a->rd, t_val); 3696 return true; 3697 } 3698 3699 /* 3700 *** SVE Floating Point Arithmetic - Unpredicated Group 3701 */ 3702 3703 #define DO_FP3(NAME, name) \ 3704 static gen_helper_gvec_3_ptr * const name##_fns[4] = { \ 3705 NULL, gen_helper_gvec_##name##_h, \ 3706 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ 3707 }; \ 3708 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0) 3709 3710 DO_FP3(FADD_zzz, fadd) 3711 DO_FP3(FSUB_zzz, fsub) 3712 DO_FP3(FMUL_zzz, fmul) 3713 DO_FP3(FRECPS, recps) 3714 DO_FP3(FRSQRTS, rsqrts) 3715 3716 #undef DO_FP3 3717 3718 static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = { 3719 NULL, gen_helper_gvec_ftsmul_h, 3720 gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d 3721 }; 3722 TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, 3723 ftsmul_fns[a->esz], a, 0) 3724 3725 /* 3726 *** SVE Floating Point Arithmetic - Predicated Group 3727 */ 3728 3729 #define DO_ZPZZ_FP(NAME, FEAT, name) \ 3730 static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ 3731 NULL, gen_helper_##name##_h, \ 3732 gen_helper_##name##_s, gen_helper_##name##_d \ 3733 }; \ 3734 TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a) 3735 3736 DO_ZPZZ_FP(FADD_zpzz, aa64_sve, sve_fadd) 3737 DO_ZPZZ_FP(FSUB_zpzz, aa64_sve, sve_fsub) 3738 DO_ZPZZ_FP(FMUL_zpzz, aa64_sve, sve_fmul) 3739 DO_ZPZZ_FP(FMIN_zpzz, aa64_sve, sve_fmin) 3740 DO_ZPZZ_FP(FMAX_zpzz, aa64_sve, sve_fmax) 3741 DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum) 3742 DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) 3743 DO_ZPZZ_FP(FABD, aa64_sve, sve_fabd) 3744 DO_ZPZZ_FP(FSCALE, aa64_sve, sve_fscalbn) 3745 DO_ZPZZ_FP(FDIV, aa64_sve, sve_fdiv) 3746 DO_ZPZZ_FP(FMULX, aa64_sve, sve_fmulx) 3747 3748 typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr, 3749 TCGv_i64, TCGv_ptr, TCGv_i32); 3750 3751 static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16, 3752 TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn) 3753 { 3754 unsigned vsz = vec_full_reg_size(s); 3755 TCGv_ptr t_zd, t_zn, t_pg, status; 3756 TCGv_i32 desc; 3757 3758 t_zd = tcg_temp_new_ptr(); 3759 t_zn = tcg_temp_new_ptr(); 3760 t_pg = tcg_temp_new_ptr(); 3761 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, zd)); 3762 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, zn)); 3763 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 3764 3765 status = fpstatus_ptr(is_fp16 ? FPST_A64_F16 : FPST_A64); 3766 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 3767 fn(t_zd, t_zn, t_pg, scalar, status, desc); 3768 } 3769 3770 static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, 3771 gen_helper_sve_fp2scalar *fn) 3772 { 3773 if (fn == NULL) { 3774 return false; 3775 } 3776 if (sve_access_check(s)) { 3777 do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16, 3778 tcg_constant_i64(imm), fn); 3779 } 3780 return true; 3781 } 3782 3783 #define DO_FP_IMM(NAME, name, const0, const1) \ 3784 static gen_helper_sve_fp2scalar * const name##_fns[4] = { \ 3785 NULL, gen_helper_sve_##name##_h, \ 3786 gen_helper_sve_##name##_s, \ 3787 gen_helper_sve_##name##_d \ 3788 }; \ 3789 static uint64_t const name##_const[4][2] = { \ 3790 { -1, -1 }, \ 3791 { float16_##const0, float16_##const1 }, \ 3792 { float32_##const0, float32_##const1 }, \ 3793 { float64_##const0, float64_##const1 }, \ 3794 }; \ 3795 TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \ 3796 name##_const[a->esz][a->imm], name##_fns[a->esz]) 3797 3798 DO_FP_IMM(FADD, fadds, half, one) 3799 DO_FP_IMM(FSUB, fsubs, half, one) 3800 DO_FP_IMM(FMUL, fmuls, half, two) 3801 DO_FP_IMM(FSUBR, fsubrs, half, one) 3802 DO_FP_IMM(FMAXNM, fmaxnms, zero, one) 3803 DO_FP_IMM(FMINNM, fminnms, zero, one) 3804 DO_FP_IMM(FMAX, fmaxs, zero, one) 3805 DO_FP_IMM(FMIN, fmins, zero, one) 3806 3807 #undef DO_FP_IMM 3808 3809 static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a, 3810 gen_helper_gvec_4_ptr *fn) 3811 { 3812 if (fn == NULL) { 3813 return false; 3814 } 3815 if (sve_access_check(s)) { 3816 unsigned vsz = vec_full_reg_size(s); 3817 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3818 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd), 3819 vec_full_reg_offset(s, a->rn), 3820 vec_full_reg_offset(s, a->rm), 3821 pred_full_reg_offset(s, a->pg), 3822 status, vsz, vsz, 0, fn); 3823 } 3824 return true; 3825 } 3826 3827 #define DO_FPCMP(NAME, name) \ 3828 static gen_helper_gvec_4_ptr * const name##_fns[4] = { \ 3829 NULL, gen_helper_sve_##name##_h, \ 3830 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ 3831 }; \ 3832 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_fp_cmp, a, name##_fns[a->esz]) 3833 3834 DO_FPCMP(FCMGE, fcmge) 3835 DO_FPCMP(FCMGT, fcmgt) 3836 DO_FPCMP(FCMEQ, fcmeq) 3837 DO_FPCMP(FCMNE, fcmne) 3838 DO_FPCMP(FCMUO, fcmuo) 3839 DO_FPCMP(FACGE, facge) 3840 DO_FPCMP(FACGT, facgt) 3841 3842 #undef DO_FPCMP 3843 3844 static gen_helper_gvec_4_ptr * const fcadd_fns[] = { 3845 NULL, gen_helper_sve_fcadd_h, 3846 gen_helper_sve_fcadd_s, gen_helper_sve_fcadd_d, 3847 }; 3848 TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz], 3849 a->rd, a->rn, a->rm, a->pg, a->rot, 3850 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3851 3852 #define DO_FMLA(NAME, name) \ 3853 static gen_helper_gvec_5_ptr * const name##_fns[4] = { \ 3854 NULL, gen_helper_sve_##name##_h, \ 3855 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ 3856 }; \ 3857 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, name##_fns[a->esz], \ 3858 a->rd, a->rn, a->rm, a->ra, a->pg, 0, \ 3859 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3860 3861 DO_FMLA(FMLA_zpzzz, fmla_zpzzz) 3862 DO_FMLA(FMLS_zpzzz, fmls_zpzzz) 3863 DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz) 3864 DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz) 3865 3866 #undef DO_FMLA 3867 3868 static gen_helper_gvec_5_ptr * const fcmla_fns[4] = { 3869 NULL, gen_helper_sve_fcmla_zpzzz_h, 3870 gen_helper_sve_fcmla_zpzzz_s, gen_helper_sve_fcmla_zpzzz_d, 3871 }; 3872 TRANS_FEAT(FCMLA_zpzzz, aa64_sve, gen_gvec_fpst_zzzzp, fcmla_fns[a->esz], 3873 a->rd, a->rn, a->rm, a->ra, a->pg, a->rot, 3874 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3875 3876 static gen_helper_gvec_4_ptr * const fcmla_idx_fns[4] = { 3877 NULL, gen_helper_gvec_fcmlah_idx, gen_helper_gvec_fcmlas_idx, NULL 3878 }; 3879 TRANS_FEAT(FCMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, fcmla_idx_fns[a->esz], 3880 a->rd, a->rn, a->rm, a->ra, a->index * 4 + a->rot, 3881 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3882 3883 /* 3884 *** SVE Floating Point Unary Operations Predicated Group 3885 */ 3886 3887 TRANS_FEAT(FCVT_sh, aa64_sve, gen_gvec_fpst_arg_zpz, 3888 gen_helper_sve_fcvt_sh, a, 0, FPST_A64) 3889 TRANS_FEAT(FCVT_hs, aa64_sve, gen_gvec_fpst_arg_zpz, 3890 gen_helper_sve_fcvt_hs, a, 0, FPST_A64_F16) 3891 3892 TRANS_FEAT(BFCVT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, 3893 gen_helper_sve_bfcvt, a, 0, FPST_A64) 3894 3895 TRANS_FEAT(FCVT_dh, aa64_sve, gen_gvec_fpst_arg_zpz, 3896 gen_helper_sve_fcvt_dh, a, 0, FPST_A64) 3897 TRANS_FEAT(FCVT_hd, aa64_sve, gen_gvec_fpst_arg_zpz, 3898 gen_helper_sve_fcvt_hd, a, 0, FPST_A64_F16) 3899 TRANS_FEAT(FCVT_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 3900 gen_helper_sve_fcvt_ds, a, 0, FPST_A64) 3901 TRANS_FEAT(FCVT_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 3902 gen_helper_sve_fcvt_sd, a, 0, FPST_A64) 3903 3904 TRANS_FEAT(FCVTZS_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 3905 gen_helper_sve_fcvtzs_hh, a, 0, FPST_A64_F16) 3906 TRANS_FEAT(FCVTZU_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 3907 gen_helper_sve_fcvtzu_hh, a, 0, FPST_A64_F16) 3908 TRANS_FEAT(FCVTZS_hs, aa64_sve, gen_gvec_fpst_arg_zpz, 3909 gen_helper_sve_fcvtzs_hs, a, 0, FPST_A64_F16) 3910 TRANS_FEAT(FCVTZU_hs, aa64_sve, gen_gvec_fpst_arg_zpz, 3911 gen_helper_sve_fcvtzu_hs, a, 0, FPST_A64_F16) 3912 TRANS_FEAT(FCVTZS_hd, aa64_sve, gen_gvec_fpst_arg_zpz, 3913 gen_helper_sve_fcvtzs_hd, a, 0, FPST_A64_F16) 3914 TRANS_FEAT(FCVTZU_hd, aa64_sve, gen_gvec_fpst_arg_zpz, 3915 gen_helper_sve_fcvtzu_hd, a, 0, FPST_A64_F16) 3916 3917 TRANS_FEAT(FCVTZS_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 3918 gen_helper_sve_fcvtzs_ss, a, 0, FPST_A64) 3919 TRANS_FEAT(FCVTZU_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 3920 gen_helper_sve_fcvtzu_ss, a, 0, FPST_A64) 3921 TRANS_FEAT(FCVTZS_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 3922 gen_helper_sve_fcvtzs_sd, a, 0, FPST_A64) 3923 TRANS_FEAT(FCVTZU_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 3924 gen_helper_sve_fcvtzu_sd, a, 0, FPST_A64) 3925 TRANS_FEAT(FCVTZS_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 3926 gen_helper_sve_fcvtzs_ds, a, 0, FPST_A64) 3927 TRANS_FEAT(FCVTZU_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 3928 gen_helper_sve_fcvtzu_ds, a, 0, FPST_A64) 3929 3930 TRANS_FEAT(FCVTZS_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 3931 gen_helper_sve_fcvtzs_dd, a, 0, FPST_A64) 3932 TRANS_FEAT(FCVTZU_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 3933 gen_helper_sve_fcvtzu_dd, a, 0, FPST_A64) 3934 3935 static gen_helper_gvec_3_ptr * const frint_fns[] = { 3936 NULL, 3937 gen_helper_sve_frint_h, 3938 gen_helper_sve_frint_s, 3939 gen_helper_sve_frint_d 3940 }; 3941 TRANS_FEAT(FRINTI, aa64_sve, gen_gvec_fpst_arg_zpz, frint_fns[a->esz], 3942 a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3943 3944 static gen_helper_gvec_3_ptr * const frintx_fns[] = { 3945 NULL, 3946 gen_helper_sve_frintx_h, 3947 gen_helper_sve_frintx_s, 3948 gen_helper_sve_frintx_d 3949 }; 3950 TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz], 3951 a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3952 3953 static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, 3954 ARMFPRounding mode, gen_helper_gvec_3_ptr *fn) 3955 { 3956 unsigned vsz; 3957 TCGv_i32 tmode; 3958 TCGv_ptr status; 3959 3960 if (fn == NULL) { 3961 return false; 3962 } 3963 if (!sve_access_check(s)) { 3964 return true; 3965 } 3966 3967 vsz = vec_full_reg_size(s); 3968 status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3969 tmode = gen_set_rmode(mode, status); 3970 3971 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), 3972 vec_full_reg_offset(s, a->rn), 3973 pred_full_reg_offset(s, a->pg), 3974 status, vsz, vsz, 0, fn); 3975 3976 gen_restore_rmode(tmode, status); 3977 return true; 3978 } 3979 3980 TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a, 3981 FPROUNDING_TIEEVEN, frint_fns[a->esz]) 3982 TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a, 3983 FPROUNDING_POSINF, frint_fns[a->esz]) 3984 TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a, 3985 FPROUNDING_NEGINF, frint_fns[a->esz]) 3986 TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a, 3987 FPROUNDING_ZERO, frint_fns[a->esz]) 3988 TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a, 3989 FPROUNDING_TIEAWAY, frint_fns[a->esz]) 3990 3991 static gen_helper_gvec_3_ptr * const frecpx_fns[] = { 3992 NULL, gen_helper_sve_frecpx_h, 3993 gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d, 3994 }; 3995 TRANS_FEAT(FRECPX, aa64_sve, gen_gvec_fpst_arg_zpz, frecpx_fns[a->esz], 3996 a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3997 3998 static gen_helper_gvec_3_ptr * const fsqrt_fns[] = { 3999 NULL, gen_helper_sve_fsqrt_h, 4000 gen_helper_sve_fsqrt_s, gen_helper_sve_fsqrt_d, 4001 }; 4002 TRANS_FEAT(FSQRT, aa64_sve, gen_gvec_fpst_arg_zpz, fsqrt_fns[a->esz], 4003 a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 4004 4005 TRANS_FEAT(SCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 4006 gen_helper_sve_scvt_hh, a, 0, FPST_A64_F16) 4007 TRANS_FEAT(SCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, 4008 gen_helper_sve_scvt_sh, a, 0, FPST_A64_F16) 4009 TRANS_FEAT(SCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, 4010 gen_helper_sve_scvt_dh, a, 0, FPST_A64_F16) 4011 4012 TRANS_FEAT(SCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4013 gen_helper_sve_scvt_ss, a, 0, FPST_A64) 4014 TRANS_FEAT(SCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4015 gen_helper_sve_scvt_ds, a, 0, FPST_A64) 4016 4017 TRANS_FEAT(SCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4018 gen_helper_sve_scvt_sd, a, 0, FPST_A64) 4019 TRANS_FEAT(SCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4020 gen_helper_sve_scvt_dd, a, 0, FPST_A64) 4021 4022 TRANS_FEAT(UCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 4023 gen_helper_sve_ucvt_hh, a, 0, FPST_A64_F16) 4024 TRANS_FEAT(UCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, 4025 gen_helper_sve_ucvt_sh, a, 0, FPST_A64_F16) 4026 TRANS_FEAT(UCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, 4027 gen_helper_sve_ucvt_dh, a, 0, FPST_A64_F16) 4028 4029 TRANS_FEAT(UCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4030 gen_helper_sve_ucvt_ss, a, 0, FPST_A64) 4031 TRANS_FEAT(UCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4032 gen_helper_sve_ucvt_ds, a, 0, FPST_A64) 4033 TRANS_FEAT(UCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4034 gen_helper_sve_ucvt_sd, a, 0, FPST_A64) 4035 4036 TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4037 gen_helper_sve_ucvt_dd, a, 0, FPST_A64) 4038 4039 /* 4040 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group 4041 */ 4042 4043 /* Subroutine loading a vector register at VOFS of LEN bytes. 4044 * The load should begin at the address Rn + IMM. 4045 */ 4046 4047 void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, 4048 int len, int rn, int imm) 4049 { 4050 int len_align = QEMU_ALIGN_DOWN(len, 16); 4051 int len_remain = len % 16; 4052 int nparts = len / 16 + ctpop8(len_remain); 4053 int midx = get_mem_index(s); 4054 TCGv_i64 dirty_addr, clean_addr, t0, t1; 4055 TCGv_i128 t16; 4056 4057 dirty_addr = tcg_temp_new_i64(); 4058 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); 4059 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); 4060 4061 /* 4062 * Note that unpredicated load/store of vector/predicate registers 4063 * are defined as a stream of bytes, which equates to little-endian 4064 * operations on larger quantities. 4065 * Attempt to keep code expansion to a minimum by limiting the 4066 * amount of unrolling done. 4067 */ 4068 if (nparts <= 4) { 4069 int i; 4070 4071 t0 = tcg_temp_new_i64(); 4072 t1 = tcg_temp_new_i64(); 4073 t16 = tcg_temp_new_i128(); 4074 4075 for (i = 0; i < len_align; i += 16) { 4076 tcg_gen_qemu_ld_i128(t16, clean_addr, midx, 4077 MO_LE | MO_128 | MO_ATOM_NONE); 4078 tcg_gen_extr_i128_i64(t0, t1, t16); 4079 tcg_gen_st_i64(t0, base, vofs + i); 4080 tcg_gen_st_i64(t1, base, vofs + i + 8); 4081 tcg_gen_addi_i64(clean_addr, clean_addr, 16); 4082 } 4083 } else { 4084 TCGLabel *loop = gen_new_label(); 4085 TCGv_ptr tp, i = tcg_temp_new_ptr(); 4086 4087 tcg_gen_movi_ptr(i, 0); 4088 gen_set_label(loop); 4089 4090 t16 = tcg_temp_new_i128(); 4091 tcg_gen_qemu_ld_i128(t16, clean_addr, midx, 4092 MO_LE | MO_128 | MO_ATOM_NONE); 4093 tcg_gen_addi_i64(clean_addr, clean_addr, 16); 4094 4095 tp = tcg_temp_new_ptr(); 4096 tcg_gen_add_ptr(tp, base, i); 4097 tcg_gen_addi_ptr(i, i, 16); 4098 4099 t0 = tcg_temp_new_i64(); 4100 t1 = tcg_temp_new_i64(); 4101 tcg_gen_extr_i128_i64(t0, t1, t16); 4102 4103 tcg_gen_st_i64(t0, tp, vofs); 4104 tcg_gen_st_i64(t1, tp, vofs + 8); 4105 4106 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); 4107 } 4108 4109 /* 4110 * Predicate register loads can be any multiple of 2. 4111 * Note that we still store the entire 64-bit unit into tcg_env. 4112 */ 4113 if (len_remain >= 8) { 4114 t0 = tcg_temp_new_i64(); 4115 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE); 4116 tcg_gen_st_i64(t0, base, vofs + len_align); 4117 len_remain -= 8; 4118 len_align += 8; 4119 if (len_remain) { 4120 tcg_gen_addi_i64(clean_addr, clean_addr, 8); 4121 } 4122 } 4123 if (len_remain) { 4124 t0 = tcg_temp_new_i64(); 4125 switch (len_remain) { 4126 case 2: 4127 case 4: 4128 case 8: 4129 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, 4130 MO_LE | ctz32(len_remain) | MO_ATOM_NONE); 4131 break; 4132 4133 case 6: 4134 t1 = tcg_temp_new_i64(); 4135 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE); 4136 tcg_gen_addi_i64(clean_addr, clean_addr, 4); 4137 tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW | MO_ATOM_NONE); 4138 tcg_gen_deposit_i64(t0, t0, t1, 32, 32); 4139 break; 4140 4141 default: 4142 g_assert_not_reached(); 4143 } 4144 tcg_gen_st_i64(t0, base, vofs + len_align); 4145 } 4146 } 4147 4148 /* Similarly for stores. */ 4149 void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, 4150 int len, int rn, int imm) 4151 { 4152 int len_align = QEMU_ALIGN_DOWN(len, 16); 4153 int len_remain = len % 16; 4154 int nparts = len / 16 + ctpop8(len_remain); 4155 int midx = get_mem_index(s); 4156 TCGv_i64 dirty_addr, clean_addr, t0, t1; 4157 TCGv_i128 t16; 4158 4159 dirty_addr = tcg_temp_new_i64(); 4160 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); 4161 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); 4162 4163 /* Note that unpredicated load/store of vector/predicate registers 4164 * are defined as a stream of bytes, which equates to little-endian 4165 * operations on larger quantities. There is no nice way to force 4166 * a little-endian store for aarch64_be-linux-user out of line. 4167 * 4168 * Attempt to keep code expansion to a minimum by limiting the 4169 * amount of unrolling done. 4170 */ 4171 if (nparts <= 4) { 4172 int i; 4173 4174 t0 = tcg_temp_new_i64(); 4175 t1 = tcg_temp_new_i64(); 4176 t16 = tcg_temp_new_i128(); 4177 for (i = 0; i < len_align; i += 16) { 4178 tcg_gen_ld_i64(t0, base, vofs + i); 4179 tcg_gen_ld_i64(t1, base, vofs + i + 8); 4180 tcg_gen_concat_i64_i128(t16, t0, t1); 4181 tcg_gen_qemu_st_i128(t16, clean_addr, midx, 4182 MO_LE | MO_128 | MO_ATOM_NONE); 4183 tcg_gen_addi_i64(clean_addr, clean_addr, 16); 4184 } 4185 } else { 4186 TCGLabel *loop = gen_new_label(); 4187 TCGv_ptr tp, i = tcg_temp_new_ptr(); 4188 4189 tcg_gen_movi_ptr(i, 0); 4190 gen_set_label(loop); 4191 4192 t0 = tcg_temp_new_i64(); 4193 t1 = tcg_temp_new_i64(); 4194 tp = tcg_temp_new_ptr(); 4195 tcg_gen_add_ptr(tp, base, i); 4196 tcg_gen_ld_i64(t0, tp, vofs); 4197 tcg_gen_ld_i64(t1, tp, vofs + 8); 4198 tcg_gen_addi_ptr(i, i, 16); 4199 4200 t16 = tcg_temp_new_i128(); 4201 tcg_gen_concat_i64_i128(t16, t0, t1); 4202 4203 tcg_gen_qemu_st_i128(t16, clean_addr, midx, 4204 MO_LE | MO_128 | MO_ATOM_NONE); 4205 tcg_gen_addi_i64(clean_addr, clean_addr, 16); 4206 4207 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); 4208 } 4209 4210 /* Predicate register stores can be any multiple of 2. */ 4211 if (len_remain >= 8) { 4212 t0 = tcg_temp_new_i64(); 4213 tcg_gen_ld_i64(t0, base, vofs + len_align); 4214 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE); 4215 len_remain -= 8; 4216 len_align += 8; 4217 if (len_remain) { 4218 tcg_gen_addi_i64(clean_addr, clean_addr, 8); 4219 } 4220 } 4221 if (len_remain) { 4222 t0 = tcg_temp_new_i64(); 4223 tcg_gen_ld_i64(t0, base, vofs + len_align); 4224 4225 switch (len_remain) { 4226 case 2: 4227 case 4: 4228 case 8: 4229 tcg_gen_qemu_st_i64(t0, clean_addr, midx, 4230 MO_LE | ctz32(len_remain) | MO_ATOM_NONE); 4231 break; 4232 4233 case 6: 4234 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE); 4235 tcg_gen_addi_i64(clean_addr, clean_addr, 4); 4236 tcg_gen_shri_i64(t0, t0, 32); 4237 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW | MO_ATOM_NONE); 4238 break; 4239 4240 default: 4241 g_assert_not_reached(); 4242 } 4243 } 4244 } 4245 4246 static bool trans_LDR_zri(DisasContext *s, arg_rri *a) 4247 { 4248 if (!dc_isar_feature(aa64_sve, s)) { 4249 return false; 4250 } 4251 if (sve_access_check(s)) { 4252 int size = vec_full_reg_size(s); 4253 int off = vec_full_reg_offset(s, a->rd); 4254 gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size); 4255 } 4256 return true; 4257 } 4258 4259 static bool trans_LDR_pri(DisasContext *s, arg_rri *a) 4260 { 4261 if (!dc_isar_feature(aa64_sve, s)) { 4262 return false; 4263 } 4264 if (sve_access_check(s)) { 4265 int size = pred_full_reg_size(s); 4266 int off = pred_full_reg_offset(s, a->rd); 4267 gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size); 4268 } 4269 return true; 4270 } 4271 4272 static bool trans_STR_zri(DisasContext *s, arg_rri *a) 4273 { 4274 if (!dc_isar_feature(aa64_sve, s)) { 4275 return false; 4276 } 4277 if (sve_access_check(s)) { 4278 int size = vec_full_reg_size(s); 4279 int off = vec_full_reg_offset(s, a->rd); 4280 gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size); 4281 } 4282 return true; 4283 } 4284 4285 static bool trans_STR_pri(DisasContext *s, arg_rri *a) 4286 { 4287 if (!dc_isar_feature(aa64_sve, s)) { 4288 return false; 4289 } 4290 if (sve_access_check(s)) { 4291 int size = pred_full_reg_size(s); 4292 int off = pred_full_reg_offset(s, a->rd); 4293 gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size); 4294 } 4295 return true; 4296 } 4297 4298 /* 4299 *** SVE Memory - Contiguous Load Group 4300 */ 4301 4302 /* The memory mode of the dtype. */ 4303 static const MemOp dtype_mop[16] = { 4304 MO_UB, MO_UB, MO_UB, MO_UB, 4305 MO_SL, MO_UW, MO_UW, MO_UW, 4306 MO_SW, MO_SW, MO_UL, MO_UL, 4307 MO_SB, MO_SB, MO_SB, MO_UQ 4308 }; 4309 4310 #define dtype_msz(x) (dtype_mop[x] & MO_SIZE) 4311 4312 /* The vector element size of dtype. */ 4313 static const uint8_t dtype_esz[16] = { 4314 0, 1, 2, 3, 4315 3, 1, 2, 3, 4316 3, 2, 2, 3, 4317 3, 2, 1, 3 4318 }; 4319 4320 uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, 4321 uint32_t msz, bool is_write, uint32_t data) 4322 { 4323 uint32_t sizem1; 4324 uint32_t desc = 0; 4325 4326 /* Assert all of the data fits, with or without MTE enabled. */ 4327 assert(nregs >= 1 && nregs <= 4); 4328 sizem1 = (nregs << msz) - 1; 4329 assert(sizem1 <= R_MTEDESC_SIZEM1_MASK >> R_MTEDESC_SIZEM1_SHIFT); 4330 assert(data < 1u << SVE_MTEDESC_SHIFT); 4331 4332 if (s->mte_active[0]) { 4333 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); 4334 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 4335 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 4336 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); 4337 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, sizem1); 4338 desc <<= SVE_MTEDESC_SHIFT; 4339 } 4340 return simd_desc(vsz, vsz, desc | data); 4341 } 4342 4343 static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, 4344 int dtype, uint32_t nregs, bool is_write, 4345 gen_helper_gvec_mem *fn) 4346 { 4347 TCGv_ptr t_pg; 4348 uint32_t desc; 4349 4350 if (!s->mte_active[0]) { 4351 addr = clean_data_tbi(s, addr); 4352 } 4353 4354 /* 4355 * For e.g. LD4, there are not enough arguments to pass all 4 4356 * registers as pointers, so encode the regno into the data field. 4357 * For consistency, do this even for LD1. 4358 */ 4359 desc = make_svemte_desc(s, vec_full_reg_size(s), nregs, 4360 dtype_msz(dtype), is_write, zt); 4361 t_pg = tcg_temp_new_ptr(); 4362 4363 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 4364 fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); 4365 } 4366 4367 /* Indexed by [mte][be][dtype][nreg] */ 4368 static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = { 4369 { /* mte inactive, little-endian */ 4370 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, 4371 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, 4372 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, 4373 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, 4374 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, 4375 4376 { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL }, 4377 { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r, 4378 gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r }, 4379 { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL }, 4380 { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL }, 4381 4382 { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL }, 4383 { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL }, 4384 { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r, 4385 gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r }, 4386 { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL }, 4387 4388 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, 4389 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, 4390 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, 4391 { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r, 4392 gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } }, 4393 4394 /* mte inactive, big-endian */ 4395 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, 4396 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, 4397 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, 4398 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, 4399 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, 4400 4401 { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL }, 4402 { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r, 4403 gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r }, 4404 { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL }, 4405 { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL }, 4406 4407 { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL }, 4408 { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL }, 4409 { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r, 4410 gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r }, 4411 { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL }, 4412 4413 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, 4414 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, 4415 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, 4416 { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r, 4417 gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } }, 4418 4419 { /* mte active, little-endian */ 4420 { { gen_helper_sve_ld1bb_r_mte, 4421 gen_helper_sve_ld2bb_r_mte, 4422 gen_helper_sve_ld3bb_r_mte, 4423 gen_helper_sve_ld4bb_r_mte }, 4424 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL }, 4425 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL }, 4426 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL }, 4427 4428 { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL }, 4429 { gen_helper_sve_ld1hh_le_r_mte, 4430 gen_helper_sve_ld2hh_le_r_mte, 4431 gen_helper_sve_ld3hh_le_r_mte, 4432 gen_helper_sve_ld4hh_le_r_mte }, 4433 { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL }, 4434 { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL }, 4435 4436 { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL }, 4437 { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL }, 4438 { gen_helper_sve_ld1ss_le_r_mte, 4439 gen_helper_sve_ld2ss_le_r_mte, 4440 gen_helper_sve_ld3ss_le_r_mte, 4441 gen_helper_sve_ld4ss_le_r_mte }, 4442 { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL }, 4443 4444 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL }, 4445 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL }, 4446 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL }, 4447 { gen_helper_sve_ld1dd_le_r_mte, 4448 gen_helper_sve_ld2dd_le_r_mte, 4449 gen_helper_sve_ld3dd_le_r_mte, 4450 gen_helper_sve_ld4dd_le_r_mte } }, 4451 4452 /* mte active, big-endian */ 4453 { { gen_helper_sve_ld1bb_r_mte, 4454 gen_helper_sve_ld2bb_r_mte, 4455 gen_helper_sve_ld3bb_r_mte, 4456 gen_helper_sve_ld4bb_r_mte }, 4457 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL }, 4458 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL }, 4459 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL }, 4460 4461 { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL }, 4462 { gen_helper_sve_ld1hh_be_r_mte, 4463 gen_helper_sve_ld2hh_be_r_mte, 4464 gen_helper_sve_ld3hh_be_r_mte, 4465 gen_helper_sve_ld4hh_be_r_mte }, 4466 { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL }, 4467 { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL }, 4468 4469 { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL }, 4470 { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL }, 4471 { gen_helper_sve_ld1ss_be_r_mte, 4472 gen_helper_sve_ld2ss_be_r_mte, 4473 gen_helper_sve_ld3ss_be_r_mte, 4474 gen_helper_sve_ld4ss_be_r_mte }, 4475 { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL }, 4476 4477 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL }, 4478 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL }, 4479 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL }, 4480 { gen_helper_sve_ld1dd_be_r_mte, 4481 gen_helper_sve_ld2dd_be_r_mte, 4482 gen_helper_sve_ld3dd_be_r_mte, 4483 gen_helper_sve_ld4dd_be_r_mte } } }, 4484 }; 4485 4486 static void do_ld_zpa(DisasContext *s, int zt, int pg, 4487 TCGv_i64 addr, int dtype, int nreg) 4488 { 4489 gen_helper_gvec_mem *fn 4490 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg]; 4491 4492 /* 4493 * While there are holes in the table, they are not 4494 * accessible via the instruction encoding. 4495 */ 4496 assert(fn != NULL); 4497 do_mem_zpa(s, zt, pg, addr, dtype, nreg + 1, false, fn); 4498 } 4499 4500 static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) 4501 { 4502 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { 4503 return false; 4504 } 4505 if (sve_access_check(s)) { 4506 TCGv_i64 addr = tcg_temp_new_i64(); 4507 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); 4508 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4509 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); 4510 } 4511 return true; 4512 } 4513 4514 static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a) 4515 { 4516 if (!dc_isar_feature(aa64_sve, s)) { 4517 return false; 4518 } 4519 if (sve_access_check(s)) { 4520 int vsz = vec_full_reg_size(s); 4521 int elements = vsz >> dtype_esz[a->dtype]; 4522 TCGv_i64 addr = tcg_temp_new_i64(); 4523 4524 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), 4525 (a->imm * elements * (a->nreg + 1)) 4526 << dtype_msz(a->dtype)); 4527 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); 4528 } 4529 return true; 4530 } 4531 4532 static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) 4533 { 4534 static gen_helper_gvec_mem * const fns[2][2][16] = { 4535 { /* mte inactive, little-endian */ 4536 { gen_helper_sve_ldff1bb_r, 4537 gen_helper_sve_ldff1bhu_r, 4538 gen_helper_sve_ldff1bsu_r, 4539 gen_helper_sve_ldff1bdu_r, 4540 4541 gen_helper_sve_ldff1sds_le_r, 4542 gen_helper_sve_ldff1hh_le_r, 4543 gen_helper_sve_ldff1hsu_le_r, 4544 gen_helper_sve_ldff1hdu_le_r, 4545 4546 gen_helper_sve_ldff1hds_le_r, 4547 gen_helper_sve_ldff1hss_le_r, 4548 gen_helper_sve_ldff1ss_le_r, 4549 gen_helper_sve_ldff1sdu_le_r, 4550 4551 gen_helper_sve_ldff1bds_r, 4552 gen_helper_sve_ldff1bss_r, 4553 gen_helper_sve_ldff1bhs_r, 4554 gen_helper_sve_ldff1dd_le_r }, 4555 4556 /* mte inactive, big-endian */ 4557 { gen_helper_sve_ldff1bb_r, 4558 gen_helper_sve_ldff1bhu_r, 4559 gen_helper_sve_ldff1bsu_r, 4560 gen_helper_sve_ldff1bdu_r, 4561 4562 gen_helper_sve_ldff1sds_be_r, 4563 gen_helper_sve_ldff1hh_be_r, 4564 gen_helper_sve_ldff1hsu_be_r, 4565 gen_helper_sve_ldff1hdu_be_r, 4566 4567 gen_helper_sve_ldff1hds_be_r, 4568 gen_helper_sve_ldff1hss_be_r, 4569 gen_helper_sve_ldff1ss_be_r, 4570 gen_helper_sve_ldff1sdu_be_r, 4571 4572 gen_helper_sve_ldff1bds_r, 4573 gen_helper_sve_ldff1bss_r, 4574 gen_helper_sve_ldff1bhs_r, 4575 gen_helper_sve_ldff1dd_be_r } }, 4576 4577 { /* mte active, little-endian */ 4578 { gen_helper_sve_ldff1bb_r_mte, 4579 gen_helper_sve_ldff1bhu_r_mte, 4580 gen_helper_sve_ldff1bsu_r_mte, 4581 gen_helper_sve_ldff1bdu_r_mte, 4582 4583 gen_helper_sve_ldff1sds_le_r_mte, 4584 gen_helper_sve_ldff1hh_le_r_mte, 4585 gen_helper_sve_ldff1hsu_le_r_mte, 4586 gen_helper_sve_ldff1hdu_le_r_mte, 4587 4588 gen_helper_sve_ldff1hds_le_r_mte, 4589 gen_helper_sve_ldff1hss_le_r_mte, 4590 gen_helper_sve_ldff1ss_le_r_mte, 4591 gen_helper_sve_ldff1sdu_le_r_mte, 4592 4593 gen_helper_sve_ldff1bds_r_mte, 4594 gen_helper_sve_ldff1bss_r_mte, 4595 gen_helper_sve_ldff1bhs_r_mte, 4596 gen_helper_sve_ldff1dd_le_r_mte }, 4597 4598 /* mte active, big-endian */ 4599 { gen_helper_sve_ldff1bb_r_mte, 4600 gen_helper_sve_ldff1bhu_r_mte, 4601 gen_helper_sve_ldff1bsu_r_mte, 4602 gen_helper_sve_ldff1bdu_r_mte, 4603 4604 gen_helper_sve_ldff1sds_be_r_mte, 4605 gen_helper_sve_ldff1hh_be_r_mte, 4606 gen_helper_sve_ldff1hsu_be_r_mte, 4607 gen_helper_sve_ldff1hdu_be_r_mte, 4608 4609 gen_helper_sve_ldff1hds_be_r_mte, 4610 gen_helper_sve_ldff1hss_be_r_mte, 4611 gen_helper_sve_ldff1ss_be_r_mte, 4612 gen_helper_sve_ldff1sdu_be_r_mte, 4613 4614 gen_helper_sve_ldff1bds_r_mte, 4615 gen_helper_sve_ldff1bss_r_mte, 4616 gen_helper_sve_ldff1bhs_r_mte, 4617 gen_helper_sve_ldff1dd_be_r_mte } }, 4618 }; 4619 4620 if (!dc_isar_feature(aa64_sve, s)) { 4621 return false; 4622 } 4623 s->is_nonstreaming = true; 4624 if (sve_access_check(s)) { 4625 TCGv_i64 addr = tcg_temp_new_i64(); 4626 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); 4627 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4628 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, 4629 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]); 4630 } 4631 return true; 4632 } 4633 4634 static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) 4635 { 4636 static gen_helper_gvec_mem * const fns[2][2][16] = { 4637 { /* mte inactive, little-endian */ 4638 { gen_helper_sve_ldnf1bb_r, 4639 gen_helper_sve_ldnf1bhu_r, 4640 gen_helper_sve_ldnf1bsu_r, 4641 gen_helper_sve_ldnf1bdu_r, 4642 4643 gen_helper_sve_ldnf1sds_le_r, 4644 gen_helper_sve_ldnf1hh_le_r, 4645 gen_helper_sve_ldnf1hsu_le_r, 4646 gen_helper_sve_ldnf1hdu_le_r, 4647 4648 gen_helper_sve_ldnf1hds_le_r, 4649 gen_helper_sve_ldnf1hss_le_r, 4650 gen_helper_sve_ldnf1ss_le_r, 4651 gen_helper_sve_ldnf1sdu_le_r, 4652 4653 gen_helper_sve_ldnf1bds_r, 4654 gen_helper_sve_ldnf1bss_r, 4655 gen_helper_sve_ldnf1bhs_r, 4656 gen_helper_sve_ldnf1dd_le_r }, 4657 4658 /* mte inactive, big-endian */ 4659 { gen_helper_sve_ldnf1bb_r, 4660 gen_helper_sve_ldnf1bhu_r, 4661 gen_helper_sve_ldnf1bsu_r, 4662 gen_helper_sve_ldnf1bdu_r, 4663 4664 gen_helper_sve_ldnf1sds_be_r, 4665 gen_helper_sve_ldnf1hh_be_r, 4666 gen_helper_sve_ldnf1hsu_be_r, 4667 gen_helper_sve_ldnf1hdu_be_r, 4668 4669 gen_helper_sve_ldnf1hds_be_r, 4670 gen_helper_sve_ldnf1hss_be_r, 4671 gen_helper_sve_ldnf1ss_be_r, 4672 gen_helper_sve_ldnf1sdu_be_r, 4673 4674 gen_helper_sve_ldnf1bds_r, 4675 gen_helper_sve_ldnf1bss_r, 4676 gen_helper_sve_ldnf1bhs_r, 4677 gen_helper_sve_ldnf1dd_be_r } }, 4678 4679 { /* mte inactive, little-endian */ 4680 { gen_helper_sve_ldnf1bb_r_mte, 4681 gen_helper_sve_ldnf1bhu_r_mte, 4682 gen_helper_sve_ldnf1bsu_r_mte, 4683 gen_helper_sve_ldnf1bdu_r_mte, 4684 4685 gen_helper_sve_ldnf1sds_le_r_mte, 4686 gen_helper_sve_ldnf1hh_le_r_mte, 4687 gen_helper_sve_ldnf1hsu_le_r_mte, 4688 gen_helper_sve_ldnf1hdu_le_r_mte, 4689 4690 gen_helper_sve_ldnf1hds_le_r_mte, 4691 gen_helper_sve_ldnf1hss_le_r_mte, 4692 gen_helper_sve_ldnf1ss_le_r_mte, 4693 gen_helper_sve_ldnf1sdu_le_r_mte, 4694 4695 gen_helper_sve_ldnf1bds_r_mte, 4696 gen_helper_sve_ldnf1bss_r_mte, 4697 gen_helper_sve_ldnf1bhs_r_mte, 4698 gen_helper_sve_ldnf1dd_le_r_mte }, 4699 4700 /* mte inactive, big-endian */ 4701 { gen_helper_sve_ldnf1bb_r_mte, 4702 gen_helper_sve_ldnf1bhu_r_mte, 4703 gen_helper_sve_ldnf1bsu_r_mte, 4704 gen_helper_sve_ldnf1bdu_r_mte, 4705 4706 gen_helper_sve_ldnf1sds_be_r_mte, 4707 gen_helper_sve_ldnf1hh_be_r_mte, 4708 gen_helper_sve_ldnf1hsu_be_r_mte, 4709 gen_helper_sve_ldnf1hdu_be_r_mte, 4710 4711 gen_helper_sve_ldnf1hds_be_r_mte, 4712 gen_helper_sve_ldnf1hss_be_r_mte, 4713 gen_helper_sve_ldnf1ss_be_r_mte, 4714 gen_helper_sve_ldnf1sdu_be_r_mte, 4715 4716 gen_helper_sve_ldnf1bds_r_mte, 4717 gen_helper_sve_ldnf1bss_r_mte, 4718 gen_helper_sve_ldnf1bhs_r_mte, 4719 gen_helper_sve_ldnf1dd_be_r_mte } }, 4720 }; 4721 4722 if (!dc_isar_feature(aa64_sve, s)) { 4723 return false; 4724 } 4725 s->is_nonstreaming = true; 4726 if (sve_access_check(s)) { 4727 int vsz = vec_full_reg_size(s); 4728 int elements = vsz >> dtype_esz[a->dtype]; 4729 int off = (a->imm * elements) << dtype_msz(a->dtype); 4730 TCGv_i64 addr = tcg_temp_new_i64(); 4731 4732 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off); 4733 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, 4734 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]); 4735 } 4736 return true; 4737 } 4738 4739 static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) 4740 { 4741 unsigned vsz = vec_full_reg_size(s); 4742 TCGv_ptr t_pg; 4743 int poff; 4744 uint32_t desc; 4745 4746 /* Load the first quadword using the normal predicated load helpers. */ 4747 if (!s->mte_active[0]) { 4748 addr = clean_data_tbi(s, addr); 4749 } 4750 4751 poff = pred_full_reg_offset(s, pg); 4752 if (vsz > 16) { 4753 /* 4754 * Zero-extend the first 16 bits of the predicate into a temporary. 4755 * This avoids triggering an assert making sure we don't have bits 4756 * set within a predicate beyond VQ, but we have lowered VQ to 1 4757 * for this load operation. 4758 */ 4759 TCGv_i64 tmp = tcg_temp_new_i64(); 4760 #if HOST_BIG_ENDIAN 4761 poff += 6; 4762 #endif 4763 tcg_gen_ld16u_i64(tmp, tcg_env, poff); 4764 4765 poff = offsetof(CPUARMState, vfp.preg_tmp); 4766 tcg_gen_st_i64(tmp, tcg_env, poff); 4767 } 4768 4769 t_pg = tcg_temp_new_ptr(); 4770 tcg_gen_addi_ptr(t_pg, tcg_env, poff); 4771 4772 gen_helper_gvec_mem *fn 4773 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; 4774 desc = make_svemte_desc(s, 16, 1, dtype_msz(dtype), false, zt); 4775 fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); 4776 4777 /* Replicate that first quadword. */ 4778 if (vsz > 16) { 4779 int doff = vec_full_reg_offset(s, zt); 4780 tcg_gen_gvec_dup_mem(4, doff + 16, doff, vsz - 16, vsz - 16); 4781 } 4782 } 4783 4784 static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a) 4785 { 4786 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { 4787 return false; 4788 } 4789 if (sve_access_check(s)) { 4790 int msz = dtype_msz(a->dtype); 4791 TCGv_i64 addr = tcg_temp_new_i64(); 4792 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz); 4793 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4794 do_ldrq(s, a->rd, a->pg, addr, a->dtype); 4795 } 4796 return true; 4797 } 4798 4799 static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a) 4800 { 4801 if (!dc_isar_feature(aa64_sve, s)) { 4802 return false; 4803 } 4804 if (sve_access_check(s)) { 4805 TCGv_i64 addr = tcg_temp_new_i64(); 4806 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16); 4807 do_ldrq(s, a->rd, a->pg, addr, a->dtype); 4808 } 4809 return true; 4810 } 4811 4812 static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) 4813 { 4814 unsigned vsz = vec_full_reg_size(s); 4815 unsigned vsz_r32; 4816 TCGv_ptr t_pg; 4817 int poff, doff; 4818 uint32_t desc; 4819 4820 if (vsz < 32) { 4821 /* 4822 * Note that this UNDEFINED check comes after CheckSVEEnabled() 4823 * in the ARM pseudocode, which is the sve_access_check() done 4824 * in our caller. We should not now return false from the caller. 4825 */ 4826 unallocated_encoding(s); 4827 return; 4828 } 4829 4830 /* Load the first octaword using the normal predicated load helpers. */ 4831 if (!s->mte_active[0]) { 4832 addr = clean_data_tbi(s, addr); 4833 } 4834 4835 poff = pred_full_reg_offset(s, pg); 4836 if (vsz > 32) { 4837 /* 4838 * Zero-extend the first 32 bits of the predicate into a temporary. 4839 * This avoids triggering an assert making sure we don't have bits 4840 * set within a predicate beyond VQ, but we have lowered VQ to 2 4841 * for this load operation. 4842 */ 4843 TCGv_i64 tmp = tcg_temp_new_i64(); 4844 #if HOST_BIG_ENDIAN 4845 poff += 4; 4846 #endif 4847 tcg_gen_ld32u_i64(tmp, tcg_env, poff); 4848 4849 poff = offsetof(CPUARMState, vfp.preg_tmp); 4850 tcg_gen_st_i64(tmp, tcg_env, poff); 4851 } 4852 4853 t_pg = tcg_temp_new_ptr(); 4854 tcg_gen_addi_ptr(t_pg, tcg_env, poff); 4855 4856 gen_helper_gvec_mem *fn 4857 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; 4858 desc = make_svemte_desc(s, 32, 1, dtype_msz(dtype), false, zt); 4859 fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); 4860 4861 /* 4862 * Replicate that first octaword. 4863 * The replication happens in units of 32; if the full vector size 4864 * is not a multiple of 32, the final bits are zeroed. 4865 */ 4866 doff = vec_full_reg_offset(s, zt); 4867 vsz_r32 = QEMU_ALIGN_DOWN(vsz, 32); 4868 if (vsz >= 64) { 4869 tcg_gen_gvec_dup_mem(5, doff + 32, doff, vsz_r32 - 32, vsz_r32 - 32); 4870 } 4871 vsz -= vsz_r32; 4872 if (vsz) { 4873 tcg_gen_gvec_dup_imm(MO_64, doff + vsz_r32, vsz, vsz, 0); 4874 } 4875 } 4876 4877 static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a) 4878 { 4879 if (!dc_isar_feature(aa64_sve_f64mm, s)) { 4880 return false; 4881 } 4882 if (a->rm == 31) { 4883 return false; 4884 } 4885 s->is_nonstreaming = true; 4886 if (sve_access_check(s)) { 4887 TCGv_i64 addr = tcg_temp_new_i64(); 4888 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); 4889 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4890 do_ldro(s, a->rd, a->pg, addr, a->dtype); 4891 } 4892 return true; 4893 } 4894 4895 static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a) 4896 { 4897 if (!dc_isar_feature(aa64_sve_f64mm, s)) { 4898 return false; 4899 } 4900 s->is_nonstreaming = true; 4901 if (sve_access_check(s)) { 4902 TCGv_i64 addr = tcg_temp_new_i64(); 4903 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32); 4904 do_ldro(s, a->rd, a->pg, addr, a->dtype); 4905 } 4906 return true; 4907 } 4908 4909 /* Load and broadcast element. */ 4910 static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) 4911 { 4912 unsigned vsz = vec_full_reg_size(s); 4913 unsigned psz = pred_full_reg_size(s); 4914 unsigned esz = dtype_esz[a->dtype]; 4915 unsigned msz = dtype_msz(a->dtype); 4916 TCGLabel *over; 4917 TCGv_i64 temp, clean_addr; 4918 MemOp memop; 4919 4920 if (!dc_isar_feature(aa64_sve, s)) { 4921 return false; 4922 } 4923 if (!sve_access_check(s)) { 4924 return true; 4925 } 4926 4927 over = gen_new_label(); 4928 4929 /* If the guarding predicate has no bits set, no load occurs. */ 4930 if (psz <= 8) { 4931 /* Reduce the pred_esz_masks value simply to reduce the 4932 * size of the code generated here. 4933 */ 4934 uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8); 4935 temp = tcg_temp_new_i64(); 4936 tcg_gen_ld_i64(temp, tcg_env, pred_full_reg_offset(s, a->pg)); 4937 tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask); 4938 tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over); 4939 } else { 4940 TCGv_i32 t32 = tcg_temp_new_i32(); 4941 find_last_active(s, t32, esz, a->pg); 4942 tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over); 4943 } 4944 4945 /* Load the data. */ 4946 temp = tcg_temp_new_i64(); 4947 tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz); 4948 4949 memop = finalize_memop(s, dtype_mop[a->dtype]); 4950 clean_addr = gen_mte_check1(s, temp, false, true, memop); 4951 tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s), memop); 4952 4953 /* Broadcast to *all* elements. */ 4954 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), 4955 vsz, vsz, temp); 4956 4957 /* Zero the inactive elements. */ 4958 gen_set_label(over); 4959 return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false); 4960 } 4961 4962 static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, 4963 int msz, int esz, int nreg) 4964 { 4965 static gen_helper_gvec_mem * const fn_single[2][2][4][4] = { 4966 { { { gen_helper_sve_st1bb_r, 4967 gen_helper_sve_st1bh_r, 4968 gen_helper_sve_st1bs_r, 4969 gen_helper_sve_st1bd_r }, 4970 { NULL, 4971 gen_helper_sve_st1hh_le_r, 4972 gen_helper_sve_st1hs_le_r, 4973 gen_helper_sve_st1hd_le_r }, 4974 { NULL, NULL, 4975 gen_helper_sve_st1ss_le_r, 4976 gen_helper_sve_st1sd_le_r }, 4977 { NULL, NULL, NULL, 4978 gen_helper_sve_st1dd_le_r } }, 4979 { { gen_helper_sve_st1bb_r, 4980 gen_helper_sve_st1bh_r, 4981 gen_helper_sve_st1bs_r, 4982 gen_helper_sve_st1bd_r }, 4983 { NULL, 4984 gen_helper_sve_st1hh_be_r, 4985 gen_helper_sve_st1hs_be_r, 4986 gen_helper_sve_st1hd_be_r }, 4987 { NULL, NULL, 4988 gen_helper_sve_st1ss_be_r, 4989 gen_helper_sve_st1sd_be_r }, 4990 { NULL, NULL, NULL, 4991 gen_helper_sve_st1dd_be_r } } }, 4992 4993 { { { gen_helper_sve_st1bb_r_mte, 4994 gen_helper_sve_st1bh_r_mte, 4995 gen_helper_sve_st1bs_r_mte, 4996 gen_helper_sve_st1bd_r_mte }, 4997 { NULL, 4998 gen_helper_sve_st1hh_le_r_mte, 4999 gen_helper_sve_st1hs_le_r_mte, 5000 gen_helper_sve_st1hd_le_r_mte }, 5001 { NULL, NULL, 5002 gen_helper_sve_st1ss_le_r_mte, 5003 gen_helper_sve_st1sd_le_r_mte }, 5004 { NULL, NULL, NULL, 5005 gen_helper_sve_st1dd_le_r_mte } }, 5006 { { gen_helper_sve_st1bb_r_mte, 5007 gen_helper_sve_st1bh_r_mte, 5008 gen_helper_sve_st1bs_r_mte, 5009 gen_helper_sve_st1bd_r_mte }, 5010 { NULL, 5011 gen_helper_sve_st1hh_be_r_mte, 5012 gen_helper_sve_st1hs_be_r_mte, 5013 gen_helper_sve_st1hd_be_r_mte }, 5014 { NULL, NULL, 5015 gen_helper_sve_st1ss_be_r_mte, 5016 gen_helper_sve_st1sd_be_r_mte }, 5017 { NULL, NULL, NULL, 5018 gen_helper_sve_st1dd_be_r_mte } } }, 5019 }; 5020 static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = { 5021 { { { gen_helper_sve_st2bb_r, 5022 gen_helper_sve_st2hh_le_r, 5023 gen_helper_sve_st2ss_le_r, 5024 gen_helper_sve_st2dd_le_r }, 5025 { gen_helper_sve_st3bb_r, 5026 gen_helper_sve_st3hh_le_r, 5027 gen_helper_sve_st3ss_le_r, 5028 gen_helper_sve_st3dd_le_r }, 5029 { gen_helper_sve_st4bb_r, 5030 gen_helper_sve_st4hh_le_r, 5031 gen_helper_sve_st4ss_le_r, 5032 gen_helper_sve_st4dd_le_r } }, 5033 { { gen_helper_sve_st2bb_r, 5034 gen_helper_sve_st2hh_be_r, 5035 gen_helper_sve_st2ss_be_r, 5036 gen_helper_sve_st2dd_be_r }, 5037 { gen_helper_sve_st3bb_r, 5038 gen_helper_sve_st3hh_be_r, 5039 gen_helper_sve_st3ss_be_r, 5040 gen_helper_sve_st3dd_be_r }, 5041 { gen_helper_sve_st4bb_r, 5042 gen_helper_sve_st4hh_be_r, 5043 gen_helper_sve_st4ss_be_r, 5044 gen_helper_sve_st4dd_be_r } } }, 5045 { { { gen_helper_sve_st2bb_r_mte, 5046 gen_helper_sve_st2hh_le_r_mte, 5047 gen_helper_sve_st2ss_le_r_mte, 5048 gen_helper_sve_st2dd_le_r_mte }, 5049 { gen_helper_sve_st3bb_r_mte, 5050 gen_helper_sve_st3hh_le_r_mte, 5051 gen_helper_sve_st3ss_le_r_mte, 5052 gen_helper_sve_st3dd_le_r_mte }, 5053 { gen_helper_sve_st4bb_r_mte, 5054 gen_helper_sve_st4hh_le_r_mte, 5055 gen_helper_sve_st4ss_le_r_mte, 5056 gen_helper_sve_st4dd_le_r_mte } }, 5057 { { gen_helper_sve_st2bb_r_mte, 5058 gen_helper_sve_st2hh_be_r_mte, 5059 gen_helper_sve_st2ss_be_r_mte, 5060 gen_helper_sve_st2dd_be_r_mte }, 5061 { gen_helper_sve_st3bb_r_mte, 5062 gen_helper_sve_st3hh_be_r_mte, 5063 gen_helper_sve_st3ss_be_r_mte, 5064 gen_helper_sve_st3dd_be_r_mte }, 5065 { gen_helper_sve_st4bb_r_mte, 5066 gen_helper_sve_st4hh_be_r_mte, 5067 gen_helper_sve_st4ss_be_r_mte, 5068 gen_helper_sve_st4dd_be_r_mte } } }, 5069 }; 5070 gen_helper_gvec_mem *fn; 5071 int be = s->be_data == MO_BE; 5072 5073 if (nreg == 0) { 5074 /* ST1 */ 5075 fn = fn_single[s->mte_active[0]][be][msz][esz]; 5076 } else { 5077 /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */ 5078 assert(msz == esz); 5079 fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz]; 5080 } 5081 assert(fn != NULL); 5082 do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg + 1, true, fn); 5083 } 5084 5085 static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) 5086 { 5087 if (!dc_isar_feature(aa64_sve, s)) { 5088 return false; 5089 } 5090 if (a->rm == 31 || a->msz > a->esz) { 5091 return false; 5092 } 5093 if (sve_access_check(s)) { 5094 TCGv_i64 addr = tcg_temp_new_i64(); 5095 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz); 5096 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 5097 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); 5098 } 5099 return true; 5100 } 5101 5102 static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a) 5103 { 5104 if (!dc_isar_feature(aa64_sve, s)) { 5105 return false; 5106 } 5107 if (a->msz > a->esz) { 5108 return false; 5109 } 5110 if (sve_access_check(s)) { 5111 int vsz = vec_full_reg_size(s); 5112 int elements = vsz >> a->esz; 5113 TCGv_i64 addr = tcg_temp_new_i64(); 5114 5115 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), 5116 (a->imm * elements * (a->nreg + 1)) << a->msz); 5117 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); 5118 } 5119 return true; 5120 } 5121 5122 /* 5123 *** SVE gather loads / scatter stores 5124 */ 5125 5126 static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, 5127 int scale, TCGv_i64 scalar, int msz, bool is_write, 5128 gen_helper_gvec_mem_scatter *fn) 5129 { 5130 TCGv_ptr t_zm = tcg_temp_new_ptr(); 5131 TCGv_ptr t_pg = tcg_temp_new_ptr(); 5132 TCGv_ptr t_zt = tcg_temp_new_ptr(); 5133 uint32_t desc; 5134 5135 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 5136 tcg_gen_addi_ptr(t_zm, tcg_env, vec_full_reg_offset(s, zm)); 5137 tcg_gen_addi_ptr(t_zt, tcg_env, vec_full_reg_offset(s, zt)); 5138 5139 desc = make_svemte_desc(s, vec_full_reg_size(s), 1, msz, is_write, scale); 5140 fn(tcg_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc)); 5141 } 5142 5143 /* Indexed by [mte][be][ff][xs][u][msz]. */ 5144 static gen_helper_gvec_mem_scatter * const 5145 gather_load_fn32[2][2][2][2][2][3] = { 5146 { /* MTE Inactive */ 5147 { /* Little-endian */ 5148 { { { gen_helper_sve_ldbss_zsu, 5149 gen_helper_sve_ldhss_le_zsu, 5150 NULL, }, 5151 { gen_helper_sve_ldbsu_zsu, 5152 gen_helper_sve_ldhsu_le_zsu, 5153 gen_helper_sve_ldss_le_zsu, } }, 5154 { { gen_helper_sve_ldbss_zss, 5155 gen_helper_sve_ldhss_le_zss, 5156 NULL, }, 5157 { gen_helper_sve_ldbsu_zss, 5158 gen_helper_sve_ldhsu_le_zss, 5159 gen_helper_sve_ldss_le_zss, } } }, 5160 5161 /* First-fault */ 5162 { { { gen_helper_sve_ldffbss_zsu, 5163 gen_helper_sve_ldffhss_le_zsu, 5164 NULL, }, 5165 { gen_helper_sve_ldffbsu_zsu, 5166 gen_helper_sve_ldffhsu_le_zsu, 5167 gen_helper_sve_ldffss_le_zsu, } }, 5168 { { gen_helper_sve_ldffbss_zss, 5169 gen_helper_sve_ldffhss_le_zss, 5170 NULL, }, 5171 { gen_helper_sve_ldffbsu_zss, 5172 gen_helper_sve_ldffhsu_le_zss, 5173 gen_helper_sve_ldffss_le_zss, } } } }, 5174 5175 { /* Big-endian */ 5176 { { { gen_helper_sve_ldbss_zsu, 5177 gen_helper_sve_ldhss_be_zsu, 5178 NULL, }, 5179 { gen_helper_sve_ldbsu_zsu, 5180 gen_helper_sve_ldhsu_be_zsu, 5181 gen_helper_sve_ldss_be_zsu, } }, 5182 { { gen_helper_sve_ldbss_zss, 5183 gen_helper_sve_ldhss_be_zss, 5184 NULL, }, 5185 { gen_helper_sve_ldbsu_zss, 5186 gen_helper_sve_ldhsu_be_zss, 5187 gen_helper_sve_ldss_be_zss, } } }, 5188 5189 /* First-fault */ 5190 { { { gen_helper_sve_ldffbss_zsu, 5191 gen_helper_sve_ldffhss_be_zsu, 5192 NULL, }, 5193 { gen_helper_sve_ldffbsu_zsu, 5194 gen_helper_sve_ldffhsu_be_zsu, 5195 gen_helper_sve_ldffss_be_zsu, } }, 5196 { { gen_helper_sve_ldffbss_zss, 5197 gen_helper_sve_ldffhss_be_zss, 5198 NULL, }, 5199 { gen_helper_sve_ldffbsu_zss, 5200 gen_helper_sve_ldffhsu_be_zss, 5201 gen_helper_sve_ldffss_be_zss, } } } } }, 5202 { /* MTE Active */ 5203 { /* Little-endian */ 5204 { { { gen_helper_sve_ldbss_zsu_mte, 5205 gen_helper_sve_ldhss_le_zsu_mte, 5206 NULL, }, 5207 { gen_helper_sve_ldbsu_zsu_mte, 5208 gen_helper_sve_ldhsu_le_zsu_mte, 5209 gen_helper_sve_ldss_le_zsu_mte, } }, 5210 { { gen_helper_sve_ldbss_zss_mte, 5211 gen_helper_sve_ldhss_le_zss_mte, 5212 NULL, }, 5213 { gen_helper_sve_ldbsu_zss_mte, 5214 gen_helper_sve_ldhsu_le_zss_mte, 5215 gen_helper_sve_ldss_le_zss_mte, } } }, 5216 5217 /* First-fault */ 5218 { { { gen_helper_sve_ldffbss_zsu_mte, 5219 gen_helper_sve_ldffhss_le_zsu_mte, 5220 NULL, }, 5221 { gen_helper_sve_ldffbsu_zsu_mte, 5222 gen_helper_sve_ldffhsu_le_zsu_mte, 5223 gen_helper_sve_ldffss_le_zsu_mte, } }, 5224 { { gen_helper_sve_ldffbss_zss_mte, 5225 gen_helper_sve_ldffhss_le_zss_mte, 5226 NULL, }, 5227 { gen_helper_sve_ldffbsu_zss_mte, 5228 gen_helper_sve_ldffhsu_le_zss_mte, 5229 gen_helper_sve_ldffss_le_zss_mte, } } } }, 5230 5231 { /* Big-endian */ 5232 { { { gen_helper_sve_ldbss_zsu_mte, 5233 gen_helper_sve_ldhss_be_zsu_mte, 5234 NULL, }, 5235 { gen_helper_sve_ldbsu_zsu_mte, 5236 gen_helper_sve_ldhsu_be_zsu_mte, 5237 gen_helper_sve_ldss_be_zsu_mte, } }, 5238 { { gen_helper_sve_ldbss_zss_mte, 5239 gen_helper_sve_ldhss_be_zss_mte, 5240 NULL, }, 5241 { gen_helper_sve_ldbsu_zss_mte, 5242 gen_helper_sve_ldhsu_be_zss_mte, 5243 gen_helper_sve_ldss_be_zss_mte, } } }, 5244 5245 /* First-fault */ 5246 { { { gen_helper_sve_ldffbss_zsu_mte, 5247 gen_helper_sve_ldffhss_be_zsu_mte, 5248 NULL, }, 5249 { gen_helper_sve_ldffbsu_zsu_mte, 5250 gen_helper_sve_ldffhsu_be_zsu_mte, 5251 gen_helper_sve_ldffss_be_zsu_mte, } }, 5252 { { gen_helper_sve_ldffbss_zss_mte, 5253 gen_helper_sve_ldffhss_be_zss_mte, 5254 NULL, }, 5255 { gen_helper_sve_ldffbsu_zss_mte, 5256 gen_helper_sve_ldffhsu_be_zss_mte, 5257 gen_helper_sve_ldffss_be_zss_mte, } } } } }, 5258 }; 5259 5260 /* Note that we overload xs=2 to indicate 64-bit offset. */ 5261 static gen_helper_gvec_mem_scatter * const 5262 gather_load_fn64[2][2][2][3][2][4] = { 5263 { /* MTE Inactive */ 5264 { /* Little-endian */ 5265 { { { gen_helper_sve_ldbds_zsu, 5266 gen_helper_sve_ldhds_le_zsu, 5267 gen_helper_sve_ldsds_le_zsu, 5268 NULL, }, 5269 { gen_helper_sve_ldbdu_zsu, 5270 gen_helper_sve_ldhdu_le_zsu, 5271 gen_helper_sve_ldsdu_le_zsu, 5272 gen_helper_sve_lddd_le_zsu, } }, 5273 { { gen_helper_sve_ldbds_zss, 5274 gen_helper_sve_ldhds_le_zss, 5275 gen_helper_sve_ldsds_le_zss, 5276 NULL, }, 5277 { gen_helper_sve_ldbdu_zss, 5278 gen_helper_sve_ldhdu_le_zss, 5279 gen_helper_sve_ldsdu_le_zss, 5280 gen_helper_sve_lddd_le_zss, } }, 5281 { { gen_helper_sve_ldbds_zd, 5282 gen_helper_sve_ldhds_le_zd, 5283 gen_helper_sve_ldsds_le_zd, 5284 NULL, }, 5285 { gen_helper_sve_ldbdu_zd, 5286 gen_helper_sve_ldhdu_le_zd, 5287 gen_helper_sve_ldsdu_le_zd, 5288 gen_helper_sve_lddd_le_zd, } } }, 5289 5290 /* First-fault */ 5291 { { { gen_helper_sve_ldffbds_zsu, 5292 gen_helper_sve_ldffhds_le_zsu, 5293 gen_helper_sve_ldffsds_le_zsu, 5294 NULL, }, 5295 { gen_helper_sve_ldffbdu_zsu, 5296 gen_helper_sve_ldffhdu_le_zsu, 5297 gen_helper_sve_ldffsdu_le_zsu, 5298 gen_helper_sve_ldffdd_le_zsu, } }, 5299 { { gen_helper_sve_ldffbds_zss, 5300 gen_helper_sve_ldffhds_le_zss, 5301 gen_helper_sve_ldffsds_le_zss, 5302 NULL, }, 5303 { gen_helper_sve_ldffbdu_zss, 5304 gen_helper_sve_ldffhdu_le_zss, 5305 gen_helper_sve_ldffsdu_le_zss, 5306 gen_helper_sve_ldffdd_le_zss, } }, 5307 { { gen_helper_sve_ldffbds_zd, 5308 gen_helper_sve_ldffhds_le_zd, 5309 gen_helper_sve_ldffsds_le_zd, 5310 NULL, }, 5311 { gen_helper_sve_ldffbdu_zd, 5312 gen_helper_sve_ldffhdu_le_zd, 5313 gen_helper_sve_ldffsdu_le_zd, 5314 gen_helper_sve_ldffdd_le_zd, } } } }, 5315 { /* Big-endian */ 5316 { { { gen_helper_sve_ldbds_zsu, 5317 gen_helper_sve_ldhds_be_zsu, 5318 gen_helper_sve_ldsds_be_zsu, 5319 NULL, }, 5320 { gen_helper_sve_ldbdu_zsu, 5321 gen_helper_sve_ldhdu_be_zsu, 5322 gen_helper_sve_ldsdu_be_zsu, 5323 gen_helper_sve_lddd_be_zsu, } }, 5324 { { gen_helper_sve_ldbds_zss, 5325 gen_helper_sve_ldhds_be_zss, 5326 gen_helper_sve_ldsds_be_zss, 5327 NULL, }, 5328 { gen_helper_sve_ldbdu_zss, 5329 gen_helper_sve_ldhdu_be_zss, 5330 gen_helper_sve_ldsdu_be_zss, 5331 gen_helper_sve_lddd_be_zss, } }, 5332 { { gen_helper_sve_ldbds_zd, 5333 gen_helper_sve_ldhds_be_zd, 5334 gen_helper_sve_ldsds_be_zd, 5335 NULL, }, 5336 { gen_helper_sve_ldbdu_zd, 5337 gen_helper_sve_ldhdu_be_zd, 5338 gen_helper_sve_ldsdu_be_zd, 5339 gen_helper_sve_lddd_be_zd, } } }, 5340 5341 /* First-fault */ 5342 { { { gen_helper_sve_ldffbds_zsu, 5343 gen_helper_sve_ldffhds_be_zsu, 5344 gen_helper_sve_ldffsds_be_zsu, 5345 NULL, }, 5346 { gen_helper_sve_ldffbdu_zsu, 5347 gen_helper_sve_ldffhdu_be_zsu, 5348 gen_helper_sve_ldffsdu_be_zsu, 5349 gen_helper_sve_ldffdd_be_zsu, } }, 5350 { { gen_helper_sve_ldffbds_zss, 5351 gen_helper_sve_ldffhds_be_zss, 5352 gen_helper_sve_ldffsds_be_zss, 5353 NULL, }, 5354 { gen_helper_sve_ldffbdu_zss, 5355 gen_helper_sve_ldffhdu_be_zss, 5356 gen_helper_sve_ldffsdu_be_zss, 5357 gen_helper_sve_ldffdd_be_zss, } }, 5358 { { gen_helper_sve_ldffbds_zd, 5359 gen_helper_sve_ldffhds_be_zd, 5360 gen_helper_sve_ldffsds_be_zd, 5361 NULL, }, 5362 { gen_helper_sve_ldffbdu_zd, 5363 gen_helper_sve_ldffhdu_be_zd, 5364 gen_helper_sve_ldffsdu_be_zd, 5365 gen_helper_sve_ldffdd_be_zd, } } } } }, 5366 { /* MTE Active */ 5367 { /* Little-endian */ 5368 { { { gen_helper_sve_ldbds_zsu_mte, 5369 gen_helper_sve_ldhds_le_zsu_mte, 5370 gen_helper_sve_ldsds_le_zsu_mte, 5371 NULL, }, 5372 { gen_helper_sve_ldbdu_zsu_mte, 5373 gen_helper_sve_ldhdu_le_zsu_mte, 5374 gen_helper_sve_ldsdu_le_zsu_mte, 5375 gen_helper_sve_lddd_le_zsu_mte, } }, 5376 { { gen_helper_sve_ldbds_zss_mte, 5377 gen_helper_sve_ldhds_le_zss_mte, 5378 gen_helper_sve_ldsds_le_zss_mte, 5379 NULL, }, 5380 { gen_helper_sve_ldbdu_zss_mte, 5381 gen_helper_sve_ldhdu_le_zss_mte, 5382 gen_helper_sve_ldsdu_le_zss_mte, 5383 gen_helper_sve_lddd_le_zss_mte, } }, 5384 { { gen_helper_sve_ldbds_zd_mte, 5385 gen_helper_sve_ldhds_le_zd_mte, 5386 gen_helper_sve_ldsds_le_zd_mte, 5387 NULL, }, 5388 { gen_helper_sve_ldbdu_zd_mte, 5389 gen_helper_sve_ldhdu_le_zd_mte, 5390 gen_helper_sve_ldsdu_le_zd_mte, 5391 gen_helper_sve_lddd_le_zd_mte, } } }, 5392 5393 /* First-fault */ 5394 { { { gen_helper_sve_ldffbds_zsu_mte, 5395 gen_helper_sve_ldffhds_le_zsu_mte, 5396 gen_helper_sve_ldffsds_le_zsu_mte, 5397 NULL, }, 5398 { gen_helper_sve_ldffbdu_zsu_mte, 5399 gen_helper_sve_ldffhdu_le_zsu_mte, 5400 gen_helper_sve_ldffsdu_le_zsu_mte, 5401 gen_helper_sve_ldffdd_le_zsu_mte, } }, 5402 { { gen_helper_sve_ldffbds_zss_mte, 5403 gen_helper_sve_ldffhds_le_zss_mte, 5404 gen_helper_sve_ldffsds_le_zss_mte, 5405 NULL, }, 5406 { gen_helper_sve_ldffbdu_zss_mte, 5407 gen_helper_sve_ldffhdu_le_zss_mte, 5408 gen_helper_sve_ldffsdu_le_zss_mte, 5409 gen_helper_sve_ldffdd_le_zss_mte, } }, 5410 { { gen_helper_sve_ldffbds_zd_mte, 5411 gen_helper_sve_ldffhds_le_zd_mte, 5412 gen_helper_sve_ldffsds_le_zd_mte, 5413 NULL, }, 5414 { gen_helper_sve_ldffbdu_zd_mte, 5415 gen_helper_sve_ldffhdu_le_zd_mte, 5416 gen_helper_sve_ldffsdu_le_zd_mte, 5417 gen_helper_sve_ldffdd_le_zd_mte, } } } }, 5418 { /* Big-endian */ 5419 { { { gen_helper_sve_ldbds_zsu_mte, 5420 gen_helper_sve_ldhds_be_zsu_mte, 5421 gen_helper_sve_ldsds_be_zsu_mte, 5422 NULL, }, 5423 { gen_helper_sve_ldbdu_zsu_mte, 5424 gen_helper_sve_ldhdu_be_zsu_mte, 5425 gen_helper_sve_ldsdu_be_zsu_mte, 5426 gen_helper_sve_lddd_be_zsu_mte, } }, 5427 { { gen_helper_sve_ldbds_zss_mte, 5428 gen_helper_sve_ldhds_be_zss_mte, 5429 gen_helper_sve_ldsds_be_zss_mte, 5430 NULL, }, 5431 { gen_helper_sve_ldbdu_zss_mte, 5432 gen_helper_sve_ldhdu_be_zss_mte, 5433 gen_helper_sve_ldsdu_be_zss_mte, 5434 gen_helper_sve_lddd_be_zss_mte, } }, 5435 { { gen_helper_sve_ldbds_zd_mte, 5436 gen_helper_sve_ldhds_be_zd_mte, 5437 gen_helper_sve_ldsds_be_zd_mte, 5438 NULL, }, 5439 { gen_helper_sve_ldbdu_zd_mte, 5440 gen_helper_sve_ldhdu_be_zd_mte, 5441 gen_helper_sve_ldsdu_be_zd_mte, 5442 gen_helper_sve_lddd_be_zd_mte, } } }, 5443 5444 /* First-fault */ 5445 { { { gen_helper_sve_ldffbds_zsu_mte, 5446 gen_helper_sve_ldffhds_be_zsu_mte, 5447 gen_helper_sve_ldffsds_be_zsu_mte, 5448 NULL, }, 5449 { gen_helper_sve_ldffbdu_zsu_mte, 5450 gen_helper_sve_ldffhdu_be_zsu_mte, 5451 gen_helper_sve_ldffsdu_be_zsu_mte, 5452 gen_helper_sve_ldffdd_be_zsu_mte, } }, 5453 { { gen_helper_sve_ldffbds_zss_mte, 5454 gen_helper_sve_ldffhds_be_zss_mte, 5455 gen_helper_sve_ldffsds_be_zss_mte, 5456 NULL, }, 5457 { gen_helper_sve_ldffbdu_zss_mte, 5458 gen_helper_sve_ldffhdu_be_zss_mte, 5459 gen_helper_sve_ldffsdu_be_zss_mte, 5460 gen_helper_sve_ldffdd_be_zss_mte, } }, 5461 { { gen_helper_sve_ldffbds_zd_mte, 5462 gen_helper_sve_ldffhds_be_zd_mte, 5463 gen_helper_sve_ldffsds_be_zd_mte, 5464 NULL, }, 5465 { gen_helper_sve_ldffbdu_zd_mte, 5466 gen_helper_sve_ldffhdu_be_zd_mte, 5467 gen_helper_sve_ldffsdu_be_zd_mte, 5468 gen_helper_sve_ldffdd_be_zd_mte, } } } } }, 5469 }; 5470 5471 static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) 5472 { 5473 gen_helper_gvec_mem_scatter *fn = NULL; 5474 bool be = s->be_data == MO_BE; 5475 bool mte = s->mte_active[0]; 5476 5477 if (!dc_isar_feature(aa64_sve, s)) { 5478 return false; 5479 } 5480 s->is_nonstreaming = true; 5481 if (!sve_access_check(s)) { 5482 return true; 5483 } 5484 5485 switch (a->esz) { 5486 case MO_32: 5487 fn = gather_load_fn32[mte][be][a->ff][a->xs][a->u][a->msz]; 5488 break; 5489 case MO_64: 5490 fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz]; 5491 break; 5492 } 5493 assert(fn != NULL); 5494 5495 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, 5496 cpu_reg_sp(s, a->rn), a->msz, false, fn); 5497 return true; 5498 } 5499 5500 static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) 5501 { 5502 gen_helper_gvec_mem_scatter *fn = NULL; 5503 bool be = s->be_data == MO_BE; 5504 bool mte = s->mte_active[0]; 5505 5506 if (a->esz < a->msz || (a->esz == a->msz && !a->u)) { 5507 return false; 5508 } 5509 if (!dc_isar_feature(aa64_sve, s)) { 5510 return false; 5511 } 5512 s->is_nonstreaming = true; 5513 if (!sve_access_check(s)) { 5514 return true; 5515 } 5516 5517 switch (a->esz) { 5518 case MO_32: 5519 fn = gather_load_fn32[mte][be][a->ff][0][a->u][a->msz]; 5520 break; 5521 case MO_64: 5522 fn = gather_load_fn64[mte][be][a->ff][2][a->u][a->msz]; 5523 break; 5524 } 5525 assert(fn != NULL); 5526 5527 /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x]) 5528 * by loading the immediate into the scalar parameter. 5529 */ 5530 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5531 tcg_constant_i64(a->imm << a->msz), a->msz, false, fn); 5532 return true; 5533 } 5534 5535 static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a) 5536 { 5537 gen_helper_gvec_mem_scatter *fn = NULL; 5538 bool be = s->be_data == MO_BE; 5539 bool mte = s->mte_active[0]; 5540 5541 if (a->esz < a->msz + !a->u) { 5542 return false; 5543 } 5544 if (!dc_isar_feature(aa64_sve2, s)) { 5545 return false; 5546 } 5547 s->is_nonstreaming = true; 5548 if (!sve_access_check(s)) { 5549 return true; 5550 } 5551 5552 switch (a->esz) { 5553 case MO_32: 5554 fn = gather_load_fn32[mte][be][0][0][a->u][a->msz]; 5555 break; 5556 case MO_64: 5557 fn = gather_load_fn64[mte][be][0][2][a->u][a->msz]; 5558 break; 5559 } 5560 assert(fn != NULL); 5561 5562 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5563 cpu_reg(s, a->rm), a->msz, false, fn); 5564 return true; 5565 } 5566 5567 /* Indexed by [mte][be][xs][msz]. */ 5568 static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = { 5569 { /* MTE Inactive */ 5570 { /* Little-endian */ 5571 { gen_helper_sve_stbs_zsu, 5572 gen_helper_sve_sths_le_zsu, 5573 gen_helper_sve_stss_le_zsu, }, 5574 { gen_helper_sve_stbs_zss, 5575 gen_helper_sve_sths_le_zss, 5576 gen_helper_sve_stss_le_zss, } }, 5577 { /* Big-endian */ 5578 { gen_helper_sve_stbs_zsu, 5579 gen_helper_sve_sths_be_zsu, 5580 gen_helper_sve_stss_be_zsu, }, 5581 { gen_helper_sve_stbs_zss, 5582 gen_helper_sve_sths_be_zss, 5583 gen_helper_sve_stss_be_zss, } } }, 5584 { /* MTE Active */ 5585 { /* Little-endian */ 5586 { gen_helper_sve_stbs_zsu_mte, 5587 gen_helper_sve_sths_le_zsu_mte, 5588 gen_helper_sve_stss_le_zsu_mte, }, 5589 { gen_helper_sve_stbs_zss_mte, 5590 gen_helper_sve_sths_le_zss_mte, 5591 gen_helper_sve_stss_le_zss_mte, } }, 5592 { /* Big-endian */ 5593 { gen_helper_sve_stbs_zsu_mte, 5594 gen_helper_sve_sths_be_zsu_mte, 5595 gen_helper_sve_stss_be_zsu_mte, }, 5596 { gen_helper_sve_stbs_zss_mte, 5597 gen_helper_sve_sths_be_zss_mte, 5598 gen_helper_sve_stss_be_zss_mte, } } }, 5599 }; 5600 5601 /* Note that we overload xs=2 to indicate 64-bit offset. */ 5602 static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = { 5603 { /* MTE Inactive */ 5604 { /* Little-endian */ 5605 { gen_helper_sve_stbd_zsu, 5606 gen_helper_sve_sthd_le_zsu, 5607 gen_helper_sve_stsd_le_zsu, 5608 gen_helper_sve_stdd_le_zsu, }, 5609 { gen_helper_sve_stbd_zss, 5610 gen_helper_sve_sthd_le_zss, 5611 gen_helper_sve_stsd_le_zss, 5612 gen_helper_sve_stdd_le_zss, }, 5613 { gen_helper_sve_stbd_zd, 5614 gen_helper_sve_sthd_le_zd, 5615 gen_helper_sve_stsd_le_zd, 5616 gen_helper_sve_stdd_le_zd, } }, 5617 { /* Big-endian */ 5618 { gen_helper_sve_stbd_zsu, 5619 gen_helper_sve_sthd_be_zsu, 5620 gen_helper_sve_stsd_be_zsu, 5621 gen_helper_sve_stdd_be_zsu, }, 5622 { gen_helper_sve_stbd_zss, 5623 gen_helper_sve_sthd_be_zss, 5624 gen_helper_sve_stsd_be_zss, 5625 gen_helper_sve_stdd_be_zss, }, 5626 { gen_helper_sve_stbd_zd, 5627 gen_helper_sve_sthd_be_zd, 5628 gen_helper_sve_stsd_be_zd, 5629 gen_helper_sve_stdd_be_zd, } } }, 5630 { /* MTE Inactive */ 5631 { /* Little-endian */ 5632 { gen_helper_sve_stbd_zsu_mte, 5633 gen_helper_sve_sthd_le_zsu_mte, 5634 gen_helper_sve_stsd_le_zsu_mte, 5635 gen_helper_sve_stdd_le_zsu_mte, }, 5636 { gen_helper_sve_stbd_zss_mte, 5637 gen_helper_sve_sthd_le_zss_mte, 5638 gen_helper_sve_stsd_le_zss_mte, 5639 gen_helper_sve_stdd_le_zss_mte, }, 5640 { gen_helper_sve_stbd_zd_mte, 5641 gen_helper_sve_sthd_le_zd_mte, 5642 gen_helper_sve_stsd_le_zd_mte, 5643 gen_helper_sve_stdd_le_zd_mte, } }, 5644 { /* Big-endian */ 5645 { gen_helper_sve_stbd_zsu_mte, 5646 gen_helper_sve_sthd_be_zsu_mte, 5647 gen_helper_sve_stsd_be_zsu_mte, 5648 gen_helper_sve_stdd_be_zsu_mte, }, 5649 { gen_helper_sve_stbd_zss_mte, 5650 gen_helper_sve_sthd_be_zss_mte, 5651 gen_helper_sve_stsd_be_zss_mte, 5652 gen_helper_sve_stdd_be_zss_mte, }, 5653 { gen_helper_sve_stbd_zd_mte, 5654 gen_helper_sve_sthd_be_zd_mte, 5655 gen_helper_sve_stsd_be_zd_mte, 5656 gen_helper_sve_stdd_be_zd_mte, } } }, 5657 }; 5658 5659 static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) 5660 { 5661 gen_helper_gvec_mem_scatter *fn; 5662 bool be = s->be_data == MO_BE; 5663 bool mte = s->mte_active[0]; 5664 5665 if (a->esz < a->msz || (a->msz == 0 && a->scale)) { 5666 return false; 5667 } 5668 if (!dc_isar_feature(aa64_sve, s)) { 5669 return false; 5670 } 5671 s->is_nonstreaming = true; 5672 if (!sve_access_check(s)) { 5673 return true; 5674 } 5675 switch (a->esz) { 5676 case MO_32: 5677 fn = scatter_store_fn32[mte][be][a->xs][a->msz]; 5678 break; 5679 case MO_64: 5680 fn = scatter_store_fn64[mte][be][a->xs][a->msz]; 5681 break; 5682 default: 5683 g_assert_not_reached(); 5684 } 5685 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, 5686 cpu_reg_sp(s, a->rn), a->msz, true, fn); 5687 return true; 5688 } 5689 5690 static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) 5691 { 5692 gen_helper_gvec_mem_scatter *fn = NULL; 5693 bool be = s->be_data == MO_BE; 5694 bool mte = s->mte_active[0]; 5695 5696 if (a->esz < a->msz) { 5697 return false; 5698 } 5699 if (!dc_isar_feature(aa64_sve, s)) { 5700 return false; 5701 } 5702 s->is_nonstreaming = true; 5703 if (!sve_access_check(s)) { 5704 return true; 5705 } 5706 5707 switch (a->esz) { 5708 case MO_32: 5709 fn = scatter_store_fn32[mte][be][0][a->msz]; 5710 break; 5711 case MO_64: 5712 fn = scatter_store_fn64[mte][be][2][a->msz]; 5713 break; 5714 } 5715 assert(fn != NULL); 5716 5717 /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x]) 5718 * by loading the immediate into the scalar parameter. 5719 */ 5720 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5721 tcg_constant_i64(a->imm << a->msz), a->msz, true, fn); 5722 return true; 5723 } 5724 5725 static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a) 5726 { 5727 gen_helper_gvec_mem_scatter *fn; 5728 bool be = s->be_data == MO_BE; 5729 bool mte = s->mte_active[0]; 5730 5731 if (a->esz < a->msz) { 5732 return false; 5733 } 5734 if (!dc_isar_feature(aa64_sve2, s)) { 5735 return false; 5736 } 5737 s->is_nonstreaming = true; 5738 if (!sve_access_check(s)) { 5739 return true; 5740 } 5741 5742 switch (a->esz) { 5743 case MO_32: 5744 fn = scatter_store_fn32[mte][be][0][a->msz]; 5745 break; 5746 case MO_64: 5747 fn = scatter_store_fn64[mte][be][2][a->msz]; 5748 break; 5749 default: 5750 g_assert_not_reached(); 5751 } 5752 5753 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5754 cpu_reg(s, a->rm), a->msz, true, fn); 5755 return true; 5756 } 5757 5758 /* 5759 * Prefetches 5760 */ 5761 5762 static bool trans_PRF(DisasContext *s, arg_PRF *a) 5763 { 5764 if (!dc_isar_feature(aa64_sve, s)) { 5765 return false; 5766 } 5767 /* Prefetch is a nop within QEMU. */ 5768 (void)sve_access_check(s); 5769 return true; 5770 } 5771 5772 static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) 5773 { 5774 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { 5775 return false; 5776 } 5777 /* Prefetch is a nop within QEMU. */ 5778 (void)sve_access_check(s); 5779 return true; 5780 } 5781 5782 static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a) 5783 { 5784 if (!dc_isar_feature(aa64_sve, s)) { 5785 return false; 5786 } 5787 /* Prefetch is a nop within QEMU. */ 5788 s->is_nonstreaming = true; 5789 (void)sve_access_check(s); 5790 return true; 5791 } 5792 5793 /* 5794 * Move Prefix 5795 * 5796 * TODO: The implementation so far could handle predicated merging movprfx. 5797 * The helper functions as written take an extra source register to 5798 * use in the operation, but the result is only written when predication 5799 * succeeds. For unpredicated movprfx, we need to rearrange the helpers 5800 * to allow the final write back to the destination to be unconditional. 5801 * For predicated zeroing movprfx, we need to rearrange the helpers to 5802 * allow the final write back to zero inactives. 5803 * 5804 * In the meantime, just emit the moves. 5805 */ 5806 5807 TRANS_FEAT(MOVPRFX, aa64_sve, do_mov_z, a->rd, a->rn) 5808 TRANS_FEAT(MOVPRFX_m, aa64_sve, do_sel_z, a->rd, a->rn, a->rd, a->pg, a->esz) 5809 TRANS_FEAT(MOVPRFX_z, aa64_sve, do_movz_zpz, a->rd, a->rn, a->pg, a->esz, false) 5810 5811 /* 5812 * SVE2 Integer Multiply - Unpredicated 5813 */ 5814 5815 TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a) 5816 5817 static gen_helper_gvec_3 * const smulh_zzz_fns[4] = { 5818 gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h, 5819 gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d, 5820 }; 5821 TRANS_FEAT(SMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5822 smulh_zzz_fns[a->esz], a, 0) 5823 5824 static gen_helper_gvec_3 * const umulh_zzz_fns[4] = { 5825 gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h, 5826 gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d, 5827 }; 5828 TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5829 umulh_zzz_fns[a->esz], a, 0) 5830 5831 TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5832 gen_helper_gvec_pmul_b, a, 0) 5833 5834 static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = { 5835 gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h, 5836 gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d, 5837 }; 5838 TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5839 sqdmulh_zzz_fns[a->esz], a, 0) 5840 5841 static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = { 5842 gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h, 5843 gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d, 5844 }; 5845 TRANS_FEAT(SQRDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5846 sqrdmulh_zzz_fns[a->esz], a, 0) 5847 5848 /* 5849 * SVE2 Integer - Predicated 5850 */ 5851 5852 static gen_helper_gvec_4 * const sadlp_fns[4] = { 5853 NULL, gen_helper_sve2_sadalp_zpzz_h, 5854 gen_helper_sve2_sadalp_zpzz_s, gen_helper_sve2_sadalp_zpzz_d, 5855 }; 5856 TRANS_FEAT(SADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz, 5857 sadlp_fns[a->esz], a, 0) 5858 5859 static gen_helper_gvec_4 * const uadlp_fns[4] = { 5860 NULL, gen_helper_sve2_uadalp_zpzz_h, 5861 gen_helper_sve2_uadalp_zpzz_s, gen_helper_sve2_uadalp_zpzz_d, 5862 }; 5863 TRANS_FEAT(UADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz, 5864 uadlp_fns[a->esz], a, 0) 5865 5866 /* 5867 * SVE2 integer unary operations (predicated) 5868 */ 5869 5870 TRANS_FEAT(URECPE, aa64_sve2, gen_gvec_ool_arg_zpz, 5871 a->esz == 2 ? gen_helper_sve2_urecpe_s : NULL, a, 0) 5872 5873 TRANS_FEAT(URSQRTE, aa64_sve2, gen_gvec_ool_arg_zpz, 5874 a->esz == 2 ? gen_helper_sve2_ursqrte_s : NULL, a, 0) 5875 5876 static gen_helper_gvec_3 * const sqabs_fns[4] = { 5877 gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h, 5878 gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d, 5879 }; 5880 TRANS_FEAT(SQABS, aa64_sve2, gen_gvec_ool_arg_zpz, sqabs_fns[a->esz], a, 0) 5881 5882 static gen_helper_gvec_3 * const sqneg_fns[4] = { 5883 gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h, 5884 gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d, 5885 }; 5886 TRANS_FEAT(SQNEG, aa64_sve2, gen_gvec_ool_arg_zpz, sqneg_fns[a->esz], a, 0) 5887 5888 DO_ZPZZ(SQSHL, aa64_sve2, sve2_sqshl) 5889 DO_ZPZZ(SQRSHL, aa64_sve2, sve2_sqrshl) 5890 DO_ZPZZ(SRSHL, aa64_sve2, sve2_srshl) 5891 5892 DO_ZPZZ(UQSHL, aa64_sve2, sve2_uqshl) 5893 DO_ZPZZ(UQRSHL, aa64_sve2, sve2_uqrshl) 5894 DO_ZPZZ(URSHL, aa64_sve2, sve2_urshl) 5895 5896 DO_ZPZZ(SHADD, aa64_sve2, sve2_shadd) 5897 DO_ZPZZ(SRHADD, aa64_sve2, sve2_srhadd) 5898 DO_ZPZZ(SHSUB, aa64_sve2, sve2_shsub) 5899 5900 DO_ZPZZ(UHADD, aa64_sve2, sve2_uhadd) 5901 DO_ZPZZ(URHADD, aa64_sve2, sve2_urhadd) 5902 DO_ZPZZ(UHSUB, aa64_sve2, sve2_uhsub) 5903 5904 DO_ZPZZ(ADDP, aa64_sve2, sve2_addp) 5905 DO_ZPZZ(SMAXP, aa64_sve2, sve2_smaxp) 5906 DO_ZPZZ(UMAXP, aa64_sve2, sve2_umaxp) 5907 DO_ZPZZ(SMINP, aa64_sve2, sve2_sminp) 5908 DO_ZPZZ(UMINP, aa64_sve2, sve2_uminp) 5909 5910 DO_ZPZZ(SQADD_zpzz, aa64_sve2, sve2_sqadd) 5911 DO_ZPZZ(UQADD_zpzz, aa64_sve2, sve2_uqadd) 5912 DO_ZPZZ(SQSUB_zpzz, aa64_sve2, sve2_sqsub) 5913 DO_ZPZZ(UQSUB_zpzz, aa64_sve2, sve2_uqsub) 5914 DO_ZPZZ(SUQADD, aa64_sve2, sve2_suqadd) 5915 DO_ZPZZ(USQADD, aa64_sve2, sve2_usqadd) 5916 5917 /* 5918 * SVE2 Widening Integer Arithmetic 5919 */ 5920 5921 static gen_helper_gvec_3 * const saddl_fns[4] = { 5922 NULL, gen_helper_sve2_saddl_h, 5923 gen_helper_sve2_saddl_s, gen_helper_sve2_saddl_d, 5924 }; 5925 TRANS_FEAT(SADDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 5926 saddl_fns[a->esz], a, 0) 5927 TRANS_FEAT(SADDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 5928 saddl_fns[a->esz], a, 3) 5929 TRANS_FEAT(SADDLBT, aa64_sve2, gen_gvec_ool_arg_zzz, 5930 saddl_fns[a->esz], a, 2) 5931 5932 static gen_helper_gvec_3 * const ssubl_fns[4] = { 5933 NULL, gen_helper_sve2_ssubl_h, 5934 gen_helper_sve2_ssubl_s, gen_helper_sve2_ssubl_d, 5935 }; 5936 TRANS_FEAT(SSUBLB, aa64_sve2, gen_gvec_ool_arg_zzz, 5937 ssubl_fns[a->esz], a, 0) 5938 TRANS_FEAT(SSUBLT, aa64_sve2, gen_gvec_ool_arg_zzz, 5939 ssubl_fns[a->esz], a, 3) 5940 TRANS_FEAT(SSUBLBT, aa64_sve2, gen_gvec_ool_arg_zzz, 5941 ssubl_fns[a->esz], a, 2) 5942 TRANS_FEAT(SSUBLTB, aa64_sve2, gen_gvec_ool_arg_zzz, 5943 ssubl_fns[a->esz], a, 1) 5944 5945 static gen_helper_gvec_3 * const sabdl_fns[4] = { 5946 NULL, gen_helper_sve2_sabdl_h, 5947 gen_helper_sve2_sabdl_s, gen_helper_sve2_sabdl_d, 5948 }; 5949 TRANS_FEAT(SABDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 5950 sabdl_fns[a->esz], a, 0) 5951 TRANS_FEAT(SABDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 5952 sabdl_fns[a->esz], a, 3) 5953 5954 static gen_helper_gvec_3 * const uaddl_fns[4] = { 5955 NULL, gen_helper_sve2_uaddl_h, 5956 gen_helper_sve2_uaddl_s, gen_helper_sve2_uaddl_d, 5957 }; 5958 TRANS_FEAT(UADDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 5959 uaddl_fns[a->esz], a, 0) 5960 TRANS_FEAT(UADDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 5961 uaddl_fns[a->esz], a, 3) 5962 5963 static gen_helper_gvec_3 * const usubl_fns[4] = { 5964 NULL, gen_helper_sve2_usubl_h, 5965 gen_helper_sve2_usubl_s, gen_helper_sve2_usubl_d, 5966 }; 5967 TRANS_FEAT(USUBLB, aa64_sve2, gen_gvec_ool_arg_zzz, 5968 usubl_fns[a->esz], a, 0) 5969 TRANS_FEAT(USUBLT, aa64_sve2, gen_gvec_ool_arg_zzz, 5970 usubl_fns[a->esz], a, 3) 5971 5972 static gen_helper_gvec_3 * const uabdl_fns[4] = { 5973 NULL, gen_helper_sve2_uabdl_h, 5974 gen_helper_sve2_uabdl_s, gen_helper_sve2_uabdl_d, 5975 }; 5976 TRANS_FEAT(UABDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 5977 uabdl_fns[a->esz], a, 0) 5978 TRANS_FEAT(UABDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 5979 uabdl_fns[a->esz], a, 3) 5980 5981 static gen_helper_gvec_3 * const sqdmull_fns[4] = { 5982 NULL, gen_helper_sve2_sqdmull_zzz_h, 5983 gen_helper_sve2_sqdmull_zzz_s, gen_helper_sve2_sqdmull_zzz_d, 5984 }; 5985 TRANS_FEAT(SQDMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5986 sqdmull_fns[a->esz], a, 0) 5987 TRANS_FEAT(SQDMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5988 sqdmull_fns[a->esz], a, 3) 5989 5990 static gen_helper_gvec_3 * const smull_fns[4] = { 5991 NULL, gen_helper_sve2_smull_zzz_h, 5992 gen_helper_sve2_smull_zzz_s, gen_helper_sve2_smull_zzz_d, 5993 }; 5994 TRANS_FEAT(SMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5995 smull_fns[a->esz], a, 0) 5996 TRANS_FEAT(SMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5997 smull_fns[a->esz], a, 3) 5998 5999 static gen_helper_gvec_3 * const umull_fns[4] = { 6000 NULL, gen_helper_sve2_umull_zzz_h, 6001 gen_helper_sve2_umull_zzz_s, gen_helper_sve2_umull_zzz_d, 6002 }; 6003 TRANS_FEAT(UMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6004 umull_fns[a->esz], a, 0) 6005 TRANS_FEAT(UMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6006 umull_fns[a->esz], a, 3) 6007 6008 static gen_helper_gvec_3 * const eoril_fns[4] = { 6009 gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h, 6010 gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d, 6011 }; 6012 TRANS_FEAT(EORBT, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 2) 6013 TRANS_FEAT(EORTB, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 1) 6014 6015 static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) 6016 { 6017 static gen_helper_gvec_3 * const fns[4] = { 6018 gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h, 6019 NULL, gen_helper_sve2_pmull_d, 6020 }; 6021 6022 if (a->esz == 0) { 6023 if (!dc_isar_feature(aa64_sve2_pmull128, s)) { 6024 return false; 6025 } 6026 s->is_nonstreaming = true; 6027 } else if (!dc_isar_feature(aa64_sve, s)) { 6028 return false; 6029 } 6030 return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel); 6031 } 6032 6033 TRANS_FEAT(PMULLB, aa64_sve2, do_trans_pmull, a, false) 6034 TRANS_FEAT(PMULLT, aa64_sve2, do_trans_pmull, a, true) 6035 6036 static gen_helper_gvec_3 * const saddw_fns[4] = { 6037 NULL, gen_helper_sve2_saddw_h, 6038 gen_helper_sve2_saddw_s, gen_helper_sve2_saddw_d, 6039 }; 6040 TRANS_FEAT(SADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 0) 6041 TRANS_FEAT(SADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 1) 6042 6043 static gen_helper_gvec_3 * const ssubw_fns[4] = { 6044 NULL, gen_helper_sve2_ssubw_h, 6045 gen_helper_sve2_ssubw_s, gen_helper_sve2_ssubw_d, 6046 }; 6047 TRANS_FEAT(SSUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 0) 6048 TRANS_FEAT(SSUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 1) 6049 6050 static gen_helper_gvec_3 * const uaddw_fns[4] = { 6051 NULL, gen_helper_sve2_uaddw_h, 6052 gen_helper_sve2_uaddw_s, gen_helper_sve2_uaddw_d, 6053 }; 6054 TRANS_FEAT(UADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 0) 6055 TRANS_FEAT(UADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 1) 6056 6057 static gen_helper_gvec_3 * const usubw_fns[4] = { 6058 NULL, gen_helper_sve2_usubw_h, 6059 gen_helper_sve2_usubw_s, gen_helper_sve2_usubw_d, 6060 }; 6061 TRANS_FEAT(USUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 0) 6062 TRANS_FEAT(USUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 1) 6063 6064 static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) 6065 { 6066 int top = imm & 1; 6067 int shl = imm >> 1; 6068 int halfbits = 4 << vece; 6069 6070 if (top) { 6071 if (shl == halfbits) { 6072 tcg_gen_and_vec(vece, d, n, 6073 tcg_constant_vec_matching(d, vece, 6074 MAKE_64BIT_MASK(halfbits, halfbits))); 6075 } else { 6076 tcg_gen_sari_vec(vece, d, n, halfbits); 6077 tcg_gen_shli_vec(vece, d, d, shl); 6078 } 6079 } else { 6080 tcg_gen_shli_vec(vece, d, n, halfbits); 6081 tcg_gen_sari_vec(vece, d, d, halfbits - shl); 6082 } 6083 } 6084 6085 static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm) 6086 { 6087 int halfbits = 4 << vece; 6088 int top = imm & 1; 6089 int shl = (imm >> 1); 6090 int shift; 6091 uint64_t mask; 6092 6093 mask = MAKE_64BIT_MASK(0, halfbits); 6094 mask <<= shl; 6095 mask = dup_const(vece, mask); 6096 6097 shift = shl - top * halfbits; 6098 if (shift < 0) { 6099 tcg_gen_shri_i64(d, n, -shift); 6100 } else { 6101 tcg_gen_shli_i64(d, n, shift); 6102 } 6103 tcg_gen_andi_i64(d, d, mask); 6104 } 6105 6106 static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) 6107 { 6108 gen_ushll_i64(MO_16, d, n, imm); 6109 } 6110 6111 static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) 6112 { 6113 gen_ushll_i64(MO_32, d, n, imm); 6114 } 6115 6116 static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) 6117 { 6118 gen_ushll_i64(MO_64, d, n, imm); 6119 } 6120 6121 static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) 6122 { 6123 int halfbits = 4 << vece; 6124 int top = imm & 1; 6125 int shl = imm >> 1; 6126 6127 if (top) { 6128 if (shl == halfbits) { 6129 tcg_gen_and_vec(vece, d, n, 6130 tcg_constant_vec_matching(d, vece, 6131 MAKE_64BIT_MASK(halfbits, halfbits))); 6132 } else { 6133 tcg_gen_shri_vec(vece, d, n, halfbits); 6134 tcg_gen_shli_vec(vece, d, d, shl); 6135 } 6136 } else { 6137 if (shl == 0) { 6138 tcg_gen_and_vec(vece, d, n, 6139 tcg_constant_vec_matching(d, vece, 6140 MAKE_64BIT_MASK(0, halfbits))); 6141 } else { 6142 tcg_gen_shli_vec(vece, d, n, halfbits); 6143 tcg_gen_shri_vec(vece, d, d, halfbits - shl); 6144 } 6145 } 6146 } 6147 6148 static bool do_shll_tb(DisasContext *s, arg_rri_esz *a, 6149 const GVecGen2i ops[3], bool sel) 6150 { 6151 6152 if (a->esz < 0 || a->esz > 2) { 6153 return false; 6154 } 6155 if (sve_access_check(s)) { 6156 unsigned vsz = vec_full_reg_size(s); 6157 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd), 6158 vec_full_reg_offset(s, a->rn), 6159 vsz, vsz, (a->imm << 1) | sel, 6160 &ops[a->esz]); 6161 } 6162 return true; 6163 } 6164 6165 static const TCGOpcode sshll_list[] = { 6166 INDEX_op_shli_vec, INDEX_op_sari_vec, 0 6167 }; 6168 static const GVecGen2i sshll_ops[3] = { 6169 { .fniv = gen_sshll_vec, 6170 .opt_opc = sshll_list, 6171 .fno = gen_helper_sve2_sshll_h, 6172 .vece = MO_16 }, 6173 { .fniv = gen_sshll_vec, 6174 .opt_opc = sshll_list, 6175 .fno = gen_helper_sve2_sshll_s, 6176 .vece = MO_32 }, 6177 { .fniv = gen_sshll_vec, 6178 .opt_opc = sshll_list, 6179 .fno = gen_helper_sve2_sshll_d, 6180 .vece = MO_64 } 6181 }; 6182 TRANS_FEAT(SSHLLB, aa64_sve2, do_shll_tb, a, sshll_ops, false) 6183 TRANS_FEAT(SSHLLT, aa64_sve2, do_shll_tb, a, sshll_ops, true) 6184 6185 static const TCGOpcode ushll_list[] = { 6186 INDEX_op_shli_vec, INDEX_op_shri_vec, 0 6187 }; 6188 static const GVecGen2i ushll_ops[3] = { 6189 { .fni8 = gen_ushll16_i64, 6190 .fniv = gen_ushll_vec, 6191 .opt_opc = ushll_list, 6192 .fno = gen_helper_sve2_ushll_h, 6193 .vece = MO_16 }, 6194 { .fni8 = gen_ushll32_i64, 6195 .fniv = gen_ushll_vec, 6196 .opt_opc = ushll_list, 6197 .fno = gen_helper_sve2_ushll_s, 6198 .vece = MO_32 }, 6199 { .fni8 = gen_ushll64_i64, 6200 .fniv = gen_ushll_vec, 6201 .opt_opc = ushll_list, 6202 .fno = gen_helper_sve2_ushll_d, 6203 .vece = MO_64 }, 6204 }; 6205 TRANS_FEAT(USHLLB, aa64_sve2, do_shll_tb, a, ushll_ops, false) 6206 TRANS_FEAT(USHLLT, aa64_sve2, do_shll_tb, a, ushll_ops, true) 6207 6208 static gen_helper_gvec_3 * const bext_fns[4] = { 6209 gen_helper_sve2_bext_b, gen_helper_sve2_bext_h, 6210 gen_helper_sve2_bext_s, gen_helper_sve2_bext_d, 6211 }; 6212 TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, 6213 bext_fns[a->esz], a, 0) 6214 6215 static gen_helper_gvec_3 * const bdep_fns[4] = { 6216 gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h, 6217 gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d, 6218 }; 6219 TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, 6220 bdep_fns[a->esz], a, 0) 6221 6222 static gen_helper_gvec_3 * const bgrp_fns[4] = { 6223 gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h, 6224 gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d, 6225 }; 6226 TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, 6227 bgrp_fns[a->esz], a, 0) 6228 6229 static gen_helper_gvec_3 * const cadd_fns[4] = { 6230 gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h, 6231 gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d, 6232 }; 6233 TRANS_FEAT(CADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz, 6234 cadd_fns[a->esz], a, 0) 6235 TRANS_FEAT(CADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz, 6236 cadd_fns[a->esz], a, 1) 6237 6238 static gen_helper_gvec_3 * const sqcadd_fns[4] = { 6239 gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h, 6240 gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d, 6241 }; 6242 TRANS_FEAT(SQCADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz, 6243 sqcadd_fns[a->esz], a, 0) 6244 TRANS_FEAT(SQCADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz, 6245 sqcadd_fns[a->esz], a, 1) 6246 6247 static gen_helper_gvec_4 * const sabal_fns[4] = { 6248 NULL, gen_helper_sve2_sabal_h, 6249 gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d, 6250 }; 6251 TRANS_FEAT(SABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 0) 6252 TRANS_FEAT(SABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 1) 6253 6254 static gen_helper_gvec_4 * const uabal_fns[4] = { 6255 NULL, gen_helper_sve2_uabal_h, 6256 gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d, 6257 }; 6258 TRANS_FEAT(UABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 0) 6259 TRANS_FEAT(UABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 1) 6260 6261 static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel) 6262 { 6263 static gen_helper_gvec_4 * const fns[2] = { 6264 gen_helper_sve2_adcl_s, 6265 gen_helper_sve2_adcl_d, 6266 }; 6267 /* 6268 * Note that in this case the ESZ field encodes both size and sign. 6269 * Split out 'subtract' into bit 1 of the data field for the helper. 6270 */ 6271 return gen_gvec_ool_arg_zzzz(s, fns[a->esz & 1], a, (a->esz & 2) | sel); 6272 } 6273 6274 TRANS_FEAT(ADCLB, aa64_sve2, do_adcl, a, false) 6275 TRANS_FEAT(ADCLT, aa64_sve2, do_adcl, a, true) 6276 6277 TRANS_FEAT(SSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ssra, a) 6278 TRANS_FEAT(USRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_usra, a) 6279 TRANS_FEAT(SRSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_srsra, a) 6280 TRANS_FEAT(URSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ursra, a) 6281 TRANS_FEAT(SRI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sri, a) 6282 TRANS_FEAT(SLI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sli, a) 6283 6284 TRANS_FEAT(SABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_saba, a) 6285 TRANS_FEAT(UABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_uaba, a) 6286 6287 static bool do_narrow_extract(DisasContext *s, arg_rri_esz *a, 6288 const GVecGen2 ops[3]) 6289 { 6290 if (a->esz < 0 || a->esz > MO_32 || a->imm != 0) { 6291 return false; 6292 } 6293 if (sve_access_check(s)) { 6294 unsigned vsz = vec_full_reg_size(s); 6295 tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd), 6296 vec_full_reg_offset(s, a->rn), 6297 vsz, vsz, &ops[a->esz]); 6298 } 6299 return true; 6300 } 6301 6302 static const TCGOpcode sqxtn_list[] = { 6303 INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0 6304 }; 6305 6306 static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6307 { 6308 int halfbits = 4 << vece; 6309 int64_t mask = (1ull << halfbits) - 1; 6310 int64_t min = -1ull << (halfbits - 1); 6311 int64_t max = -min - 1; 6312 6313 tcg_gen_smax_vec(vece, d, n, tcg_constant_vec_matching(d, vece, min)); 6314 tcg_gen_smin_vec(vece, d, d, tcg_constant_vec_matching(d, vece, max)); 6315 tcg_gen_and_vec(vece, d, d, tcg_constant_vec_matching(d, vece, mask)); 6316 } 6317 6318 static const GVecGen2 sqxtnb_ops[3] = { 6319 { .fniv = gen_sqxtnb_vec, 6320 .opt_opc = sqxtn_list, 6321 .fno = gen_helper_sve2_sqxtnb_h, 6322 .vece = MO_16 }, 6323 { .fniv = gen_sqxtnb_vec, 6324 .opt_opc = sqxtn_list, 6325 .fno = gen_helper_sve2_sqxtnb_s, 6326 .vece = MO_32 }, 6327 { .fniv = gen_sqxtnb_vec, 6328 .opt_opc = sqxtn_list, 6329 .fno = gen_helper_sve2_sqxtnb_d, 6330 .vece = MO_64 }, 6331 }; 6332 TRANS_FEAT(SQXTNB, aa64_sve2, do_narrow_extract, a, sqxtnb_ops) 6333 6334 static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6335 { 6336 int halfbits = 4 << vece; 6337 int64_t mask = (1ull << halfbits) - 1; 6338 int64_t min = -1ull << (halfbits - 1); 6339 int64_t max = -min - 1; 6340 6341 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min)); 6342 tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max)); 6343 tcg_gen_shli_vec(vece, n, n, halfbits); 6344 tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n); 6345 } 6346 6347 static const GVecGen2 sqxtnt_ops[3] = { 6348 { .fniv = gen_sqxtnt_vec, 6349 .opt_opc = sqxtn_list, 6350 .load_dest = true, 6351 .fno = gen_helper_sve2_sqxtnt_h, 6352 .vece = MO_16 }, 6353 { .fniv = gen_sqxtnt_vec, 6354 .opt_opc = sqxtn_list, 6355 .load_dest = true, 6356 .fno = gen_helper_sve2_sqxtnt_s, 6357 .vece = MO_32 }, 6358 { .fniv = gen_sqxtnt_vec, 6359 .opt_opc = sqxtn_list, 6360 .load_dest = true, 6361 .fno = gen_helper_sve2_sqxtnt_d, 6362 .vece = MO_64 }, 6363 }; 6364 TRANS_FEAT(SQXTNT, aa64_sve2, do_narrow_extract, a, sqxtnt_ops) 6365 6366 static const TCGOpcode uqxtn_list[] = { 6367 INDEX_op_shli_vec, INDEX_op_umin_vec, 0 6368 }; 6369 6370 static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6371 { 6372 int halfbits = 4 << vece; 6373 int64_t max = (1ull << halfbits) - 1; 6374 6375 tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max)); 6376 } 6377 6378 static const GVecGen2 uqxtnb_ops[3] = { 6379 { .fniv = gen_uqxtnb_vec, 6380 .opt_opc = uqxtn_list, 6381 .fno = gen_helper_sve2_uqxtnb_h, 6382 .vece = MO_16 }, 6383 { .fniv = gen_uqxtnb_vec, 6384 .opt_opc = uqxtn_list, 6385 .fno = gen_helper_sve2_uqxtnb_s, 6386 .vece = MO_32 }, 6387 { .fniv = gen_uqxtnb_vec, 6388 .opt_opc = uqxtn_list, 6389 .fno = gen_helper_sve2_uqxtnb_d, 6390 .vece = MO_64 }, 6391 }; 6392 TRANS_FEAT(UQXTNB, aa64_sve2, do_narrow_extract, a, uqxtnb_ops) 6393 6394 static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6395 { 6396 int halfbits = 4 << vece; 6397 int64_t max = (1ull << halfbits) - 1; 6398 TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); 6399 6400 tcg_gen_umin_vec(vece, n, n, maxv); 6401 tcg_gen_shli_vec(vece, n, n, halfbits); 6402 tcg_gen_bitsel_vec(vece, d, maxv, d, n); 6403 } 6404 6405 static const GVecGen2 uqxtnt_ops[3] = { 6406 { .fniv = gen_uqxtnt_vec, 6407 .opt_opc = uqxtn_list, 6408 .load_dest = true, 6409 .fno = gen_helper_sve2_uqxtnt_h, 6410 .vece = MO_16 }, 6411 { .fniv = gen_uqxtnt_vec, 6412 .opt_opc = uqxtn_list, 6413 .load_dest = true, 6414 .fno = gen_helper_sve2_uqxtnt_s, 6415 .vece = MO_32 }, 6416 { .fniv = gen_uqxtnt_vec, 6417 .opt_opc = uqxtn_list, 6418 .load_dest = true, 6419 .fno = gen_helper_sve2_uqxtnt_d, 6420 .vece = MO_64 }, 6421 }; 6422 TRANS_FEAT(UQXTNT, aa64_sve2, do_narrow_extract, a, uqxtnt_ops) 6423 6424 static const TCGOpcode sqxtun_list[] = { 6425 INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0 6426 }; 6427 6428 static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6429 { 6430 int halfbits = 4 << vece; 6431 int64_t max = (1ull << halfbits) - 1; 6432 6433 tcg_gen_smax_vec(vece, d, n, tcg_constant_vec_matching(d, vece, 0)); 6434 tcg_gen_umin_vec(vece, d, d, tcg_constant_vec_matching(d, vece, max)); 6435 } 6436 6437 static const GVecGen2 sqxtunb_ops[3] = { 6438 { .fniv = gen_sqxtunb_vec, 6439 .opt_opc = sqxtun_list, 6440 .fno = gen_helper_sve2_sqxtunb_h, 6441 .vece = MO_16 }, 6442 { .fniv = gen_sqxtunb_vec, 6443 .opt_opc = sqxtun_list, 6444 .fno = gen_helper_sve2_sqxtunb_s, 6445 .vece = MO_32 }, 6446 { .fniv = gen_sqxtunb_vec, 6447 .opt_opc = sqxtun_list, 6448 .fno = gen_helper_sve2_sqxtunb_d, 6449 .vece = MO_64 }, 6450 }; 6451 TRANS_FEAT(SQXTUNB, aa64_sve2, do_narrow_extract, a, sqxtunb_ops) 6452 6453 static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6454 { 6455 int halfbits = 4 << vece; 6456 int64_t max = (1ull << halfbits) - 1; 6457 TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); 6458 6459 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0)); 6460 tcg_gen_umin_vec(vece, n, n, maxv); 6461 tcg_gen_shli_vec(vece, n, n, halfbits); 6462 tcg_gen_bitsel_vec(vece, d, maxv, d, n); 6463 } 6464 6465 static const GVecGen2 sqxtunt_ops[3] = { 6466 { .fniv = gen_sqxtunt_vec, 6467 .opt_opc = sqxtun_list, 6468 .load_dest = true, 6469 .fno = gen_helper_sve2_sqxtunt_h, 6470 .vece = MO_16 }, 6471 { .fniv = gen_sqxtunt_vec, 6472 .opt_opc = sqxtun_list, 6473 .load_dest = true, 6474 .fno = gen_helper_sve2_sqxtunt_s, 6475 .vece = MO_32 }, 6476 { .fniv = gen_sqxtunt_vec, 6477 .opt_opc = sqxtun_list, 6478 .load_dest = true, 6479 .fno = gen_helper_sve2_sqxtunt_d, 6480 .vece = MO_64 }, 6481 }; 6482 TRANS_FEAT(SQXTUNT, aa64_sve2, do_narrow_extract, a, sqxtunt_ops) 6483 6484 static bool do_shr_narrow(DisasContext *s, arg_rri_esz *a, 6485 const GVecGen2i ops[3]) 6486 { 6487 if (a->esz < 0 || a->esz > MO_32) { 6488 return false; 6489 } 6490 assert(a->imm > 0 && a->imm <= (8 << a->esz)); 6491 if (sve_access_check(s)) { 6492 unsigned vsz = vec_full_reg_size(s); 6493 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd), 6494 vec_full_reg_offset(s, a->rn), 6495 vsz, vsz, a->imm, &ops[a->esz]); 6496 } 6497 return true; 6498 } 6499 6500 static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr) 6501 { 6502 int halfbits = 4 << vece; 6503 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); 6504 6505 tcg_gen_shri_i64(d, n, shr); 6506 tcg_gen_andi_i64(d, d, mask); 6507 } 6508 6509 static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6510 { 6511 gen_shrnb_i64(MO_16, d, n, shr); 6512 } 6513 6514 static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6515 { 6516 gen_shrnb_i64(MO_32, d, n, shr); 6517 } 6518 6519 static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6520 { 6521 gen_shrnb_i64(MO_64, d, n, shr); 6522 } 6523 6524 static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) 6525 { 6526 int halfbits = 4 << vece; 6527 uint64_t mask = MAKE_64BIT_MASK(0, halfbits); 6528 6529 tcg_gen_shri_vec(vece, n, n, shr); 6530 tcg_gen_and_vec(vece, d, n, tcg_constant_vec_matching(d, vece, mask)); 6531 } 6532 6533 static const TCGOpcode shrnb_vec_list[] = { INDEX_op_shri_vec, 0 }; 6534 static const GVecGen2i shrnb_ops[3] = { 6535 { .fni8 = gen_shrnb16_i64, 6536 .fniv = gen_shrnb_vec, 6537 .opt_opc = shrnb_vec_list, 6538 .fno = gen_helper_sve2_shrnb_h, 6539 .vece = MO_16 }, 6540 { .fni8 = gen_shrnb32_i64, 6541 .fniv = gen_shrnb_vec, 6542 .opt_opc = shrnb_vec_list, 6543 .fno = gen_helper_sve2_shrnb_s, 6544 .vece = MO_32 }, 6545 { .fni8 = gen_shrnb64_i64, 6546 .fniv = gen_shrnb_vec, 6547 .opt_opc = shrnb_vec_list, 6548 .fno = gen_helper_sve2_shrnb_d, 6549 .vece = MO_64 }, 6550 }; 6551 TRANS_FEAT(SHRNB, aa64_sve2, do_shr_narrow, a, shrnb_ops) 6552 6553 static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr) 6554 { 6555 int halfbits = 4 << vece; 6556 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); 6557 6558 tcg_gen_shli_i64(n, n, halfbits - shr); 6559 tcg_gen_andi_i64(n, n, ~mask); 6560 tcg_gen_andi_i64(d, d, mask); 6561 tcg_gen_or_i64(d, d, n); 6562 } 6563 6564 static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6565 { 6566 gen_shrnt_i64(MO_16, d, n, shr); 6567 } 6568 6569 static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6570 { 6571 gen_shrnt_i64(MO_32, d, n, shr); 6572 } 6573 6574 static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6575 { 6576 tcg_gen_shri_i64(n, n, shr); 6577 tcg_gen_deposit_i64(d, d, n, 32, 32); 6578 } 6579 6580 static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) 6581 { 6582 int halfbits = 4 << vece; 6583 uint64_t mask = MAKE_64BIT_MASK(0, halfbits); 6584 6585 tcg_gen_shli_vec(vece, n, n, halfbits - shr); 6586 tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n); 6587 } 6588 6589 static const TCGOpcode shrnt_vec_list[] = { INDEX_op_shli_vec, 0 }; 6590 static const GVecGen2i shrnt_ops[3] = { 6591 { .fni8 = gen_shrnt16_i64, 6592 .fniv = gen_shrnt_vec, 6593 .opt_opc = shrnt_vec_list, 6594 .load_dest = true, 6595 .fno = gen_helper_sve2_shrnt_h, 6596 .vece = MO_16 }, 6597 { .fni8 = gen_shrnt32_i64, 6598 .fniv = gen_shrnt_vec, 6599 .opt_opc = shrnt_vec_list, 6600 .load_dest = true, 6601 .fno = gen_helper_sve2_shrnt_s, 6602 .vece = MO_32 }, 6603 { .fni8 = gen_shrnt64_i64, 6604 .fniv = gen_shrnt_vec, 6605 .opt_opc = shrnt_vec_list, 6606 .load_dest = true, 6607 .fno = gen_helper_sve2_shrnt_d, 6608 .vece = MO_64 }, 6609 }; 6610 TRANS_FEAT(SHRNT, aa64_sve2, do_shr_narrow, a, shrnt_ops) 6611 6612 static const GVecGen2i rshrnb_ops[3] = { 6613 { .fno = gen_helper_sve2_rshrnb_h }, 6614 { .fno = gen_helper_sve2_rshrnb_s }, 6615 { .fno = gen_helper_sve2_rshrnb_d }, 6616 }; 6617 TRANS_FEAT(RSHRNB, aa64_sve2, do_shr_narrow, a, rshrnb_ops) 6618 6619 static const GVecGen2i rshrnt_ops[3] = { 6620 { .fno = gen_helper_sve2_rshrnt_h }, 6621 { .fno = gen_helper_sve2_rshrnt_s }, 6622 { .fno = gen_helper_sve2_rshrnt_d }, 6623 }; 6624 TRANS_FEAT(RSHRNT, aa64_sve2, do_shr_narrow, a, rshrnt_ops) 6625 6626 static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d, 6627 TCGv_vec n, int64_t shr) 6628 { 6629 int halfbits = 4 << vece; 6630 uint64_t max = MAKE_64BIT_MASK(0, halfbits); 6631 6632 tcg_gen_sari_vec(vece, n, n, shr); 6633 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0)); 6634 tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max)); 6635 } 6636 6637 static const TCGOpcode sqshrunb_vec_list[] = { 6638 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0 6639 }; 6640 static const GVecGen2i sqshrunb_ops[3] = { 6641 { .fniv = gen_sqshrunb_vec, 6642 .opt_opc = sqshrunb_vec_list, 6643 .fno = gen_helper_sve2_sqshrunb_h, 6644 .vece = MO_16 }, 6645 { .fniv = gen_sqshrunb_vec, 6646 .opt_opc = sqshrunb_vec_list, 6647 .fno = gen_helper_sve2_sqshrunb_s, 6648 .vece = MO_32 }, 6649 { .fniv = gen_sqshrunb_vec, 6650 .opt_opc = sqshrunb_vec_list, 6651 .fno = gen_helper_sve2_sqshrunb_d, 6652 .vece = MO_64 }, 6653 }; 6654 TRANS_FEAT(SQSHRUNB, aa64_sve2, do_shr_narrow, a, sqshrunb_ops) 6655 6656 static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d, 6657 TCGv_vec n, int64_t shr) 6658 { 6659 int halfbits = 4 << vece; 6660 uint64_t max = MAKE_64BIT_MASK(0, halfbits); 6661 TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); 6662 6663 tcg_gen_sari_vec(vece, n, n, shr); 6664 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0)); 6665 tcg_gen_umin_vec(vece, n, n, maxv); 6666 tcg_gen_shli_vec(vece, n, n, halfbits); 6667 tcg_gen_bitsel_vec(vece, d, maxv, d, n); 6668 } 6669 6670 static const TCGOpcode sqshrunt_vec_list[] = { 6671 INDEX_op_shli_vec, INDEX_op_sari_vec, 6672 INDEX_op_smax_vec, INDEX_op_umin_vec, 0 6673 }; 6674 static const GVecGen2i sqshrunt_ops[3] = { 6675 { .fniv = gen_sqshrunt_vec, 6676 .opt_opc = sqshrunt_vec_list, 6677 .load_dest = true, 6678 .fno = gen_helper_sve2_sqshrunt_h, 6679 .vece = MO_16 }, 6680 { .fniv = gen_sqshrunt_vec, 6681 .opt_opc = sqshrunt_vec_list, 6682 .load_dest = true, 6683 .fno = gen_helper_sve2_sqshrunt_s, 6684 .vece = MO_32 }, 6685 { .fniv = gen_sqshrunt_vec, 6686 .opt_opc = sqshrunt_vec_list, 6687 .load_dest = true, 6688 .fno = gen_helper_sve2_sqshrunt_d, 6689 .vece = MO_64 }, 6690 }; 6691 TRANS_FEAT(SQSHRUNT, aa64_sve2, do_shr_narrow, a, sqshrunt_ops) 6692 6693 static const GVecGen2i sqrshrunb_ops[3] = { 6694 { .fno = gen_helper_sve2_sqrshrunb_h }, 6695 { .fno = gen_helper_sve2_sqrshrunb_s }, 6696 { .fno = gen_helper_sve2_sqrshrunb_d }, 6697 }; 6698 TRANS_FEAT(SQRSHRUNB, aa64_sve2, do_shr_narrow, a, sqrshrunb_ops) 6699 6700 static const GVecGen2i sqrshrunt_ops[3] = { 6701 { .fno = gen_helper_sve2_sqrshrunt_h }, 6702 { .fno = gen_helper_sve2_sqrshrunt_s }, 6703 { .fno = gen_helper_sve2_sqrshrunt_d }, 6704 }; 6705 TRANS_FEAT(SQRSHRUNT, aa64_sve2, do_shr_narrow, a, sqrshrunt_ops) 6706 6707 static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d, 6708 TCGv_vec n, int64_t shr) 6709 { 6710 int halfbits = 4 << vece; 6711 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); 6712 int64_t min = -max - 1; 6713 int64_t mask = MAKE_64BIT_MASK(0, halfbits); 6714 6715 tcg_gen_sari_vec(vece, n, n, shr); 6716 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min)); 6717 tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max)); 6718 tcg_gen_and_vec(vece, d, n, tcg_constant_vec_matching(d, vece, mask)); 6719 } 6720 6721 static const TCGOpcode sqshrnb_vec_list[] = { 6722 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0 6723 }; 6724 static const GVecGen2i sqshrnb_ops[3] = { 6725 { .fniv = gen_sqshrnb_vec, 6726 .opt_opc = sqshrnb_vec_list, 6727 .fno = gen_helper_sve2_sqshrnb_h, 6728 .vece = MO_16 }, 6729 { .fniv = gen_sqshrnb_vec, 6730 .opt_opc = sqshrnb_vec_list, 6731 .fno = gen_helper_sve2_sqshrnb_s, 6732 .vece = MO_32 }, 6733 { .fniv = gen_sqshrnb_vec, 6734 .opt_opc = sqshrnb_vec_list, 6735 .fno = gen_helper_sve2_sqshrnb_d, 6736 .vece = MO_64 }, 6737 }; 6738 TRANS_FEAT(SQSHRNB, aa64_sve2, do_shr_narrow, a, sqshrnb_ops) 6739 6740 static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d, 6741 TCGv_vec n, int64_t shr) 6742 { 6743 int halfbits = 4 << vece; 6744 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); 6745 int64_t min = -max - 1; 6746 int64_t mask = MAKE_64BIT_MASK(0, halfbits); 6747 6748 tcg_gen_sari_vec(vece, n, n, shr); 6749 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min)); 6750 tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max)); 6751 tcg_gen_shli_vec(vece, n, n, halfbits); 6752 tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n); 6753 } 6754 6755 static const TCGOpcode sqshrnt_vec_list[] = { 6756 INDEX_op_shli_vec, INDEX_op_sari_vec, 6757 INDEX_op_smax_vec, INDEX_op_smin_vec, 0 6758 }; 6759 static const GVecGen2i sqshrnt_ops[3] = { 6760 { .fniv = gen_sqshrnt_vec, 6761 .opt_opc = sqshrnt_vec_list, 6762 .load_dest = true, 6763 .fno = gen_helper_sve2_sqshrnt_h, 6764 .vece = MO_16 }, 6765 { .fniv = gen_sqshrnt_vec, 6766 .opt_opc = sqshrnt_vec_list, 6767 .load_dest = true, 6768 .fno = gen_helper_sve2_sqshrnt_s, 6769 .vece = MO_32 }, 6770 { .fniv = gen_sqshrnt_vec, 6771 .opt_opc = sqshrnt_vec_list, 6772 .load_dest = true, 6773 .fno = gen_helper_sve2_sqshrnt_d, 6774 .vece = MO_64 }, 6775 }; 6776 TRANS_FEAT(SQSHRNT, aa64_sve2, do_shr_narrow, a, sqshrnt_ops) 6777 6778 static const GVecGen2i sqrshrnb_ops[3] = { 6779 { .fno = gen_helper_sve2_sqrshrnb_h }, 6780 { .fno = gen_helper_sve2_sqrshrnb_s }, 6781 { .fno = gen_helper_sve2_sqrshrnb_d }, 6782 }; 6783 TRANS_FEAT(SQRSHRNB, aa64_sve2, do_shr_narrow, a, sqrshrnb_ops) 6784 6785 static const GVecGen2i sqrshrnt_ops[3] = { 6786 { .fno = gen_helper_sve2_sqrshrnt_h }, 6787 { .fno = gen_helper_sve2_sqrshrnt_s }, 6788 { .fno = gen_helper_sve2_sqrshrnt_d }, 6789 }; 6790 TRANS_FEAT(SQRSHRNT, aa64_sve2, do_shr_narrow, a, sqrshrnt_ops) 6791 6792 static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, 6793 TCGv_vec n, int64_t shr) 6794 { 6795 int halfbits = 4 << vece; 6796 int64_t max = MAKE_64BIT_MASK(0, halfbits); 6797 6798 tcg_gen_shri_vec(vece, n, n, shr); 6799 tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max)); 6800 } 6801 6802 static const TCGOpcode uqshrnb_vec_list[] = { 6803 INDEX_op_shri_vec, INDEX_op_umin_vec, 0 6804 }; 6805 static const GVecGen2i uqshrnb_ops[3] = { 6806 { .fniv = gen_uqshrnb_vec, 6807 .opt_opc = uqshrnb_vec_list, 6808 .fno = gen_helper_sve2_uqshrnb_h, 6809 .vece = MO_16 }, 6810 { .fniv = gen_uqshrnb_vec, 6811 .opt_opc = uqshrnb_vec_list, 6812 .fno = gen_helper_sve2_uqshrnb_s, 6813 .vece = MO_32 }, 6814 { .fniv = gen_uqshrnb_vec, 6815 .opt_opc = uqshrnb_vec_list, 6816 .fno = gen_helper_sve2_uqshrnb_d, 6817 .vece = MO_64 }, 6818 }; 6819 TRANS_FEAT(UQSHRNB, aa64_sve2, do_shr_narrow, a, uqshrnb_ops) 6820 6821 static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d, 6822 TCGv_vec n, int64_t shr) 6823 { 6824 int halfbits = 4 << vece; 6825 int64_t max = MAKE_64BIT_MASK(0, halfbits); 6826 TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); 6827 6828 tcg_gen_shri_vec(vece, n, n, shr); 6829 tcg_gen_umin_vec(vece, n, n, maxv); 6830 tcg_gen_shli_vec(vece, n, n, halfbits); 6831 tcg_gen_bitsel_vec(vece, d, maxv, d, n); 6832 } 6833 6834 static const TCGOpcode uqshrnt_vec_list[] = { 6835 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0 6836 }; 6837 static const GVecGen2i uqshrnt_ops[3] = { 6838 { .fniv = gen_uqshrnt_vec, 6839 .opt_opc = uqshrnt_vec_list, 6840 .load_dest = true, 6841 .fno = gen_helper_sve2_uqshrnt_h, 6842 .vece = MO_16 }, 6843 { .fniv = gen_uqshrnt_vec, 6844 .opt_opc = uqshrnt_vec_list, 6845 .load_dest = true, 6846 .fno = gen_helper_sve2_uqshrnt_s, 6847 .vece = MO_32 }, 6848 { .fniv = gen_uqshrnt_vec, 6849 .opt_opc = uqshrnt_vec_list, 6850 .load_dest = true, 6851 .fno = gen_helper_sve2_uqshrnt_d, 6852 .vece = MO_64 }, 6853 }; 6854 TRANS_FEAT(UQSHRNT, aa64_sve2, do_shr_narrow, a, uqshrnt_ops) 6855 6856 static const GVecGen2i uqrshrnb_ops[3] = { 6857 { .fno = gen_helper_sve2_uqrshrnb_h }, 6858 { .fno = gen_helper_sve2_uqrshrnb_s }, 6859 { .fno = gen_helper_sve2_uqrshrnb_d }, 6860 }; 6861 TRANS_FEAT(UQRSHRNB, aa64_sve2, do_shr_narrow, a, uqrshrnb_ops) 6862 6863 static const GVecGen2i uqrshrnt_ops[3] = { 6864 { .fno = gen_helper_sve2_uqrshrnt_h }, 6865 { .fno = gen_helper_sve2_uqrshrnt_s }, 6866 { .fno = gen_helper_sve2_uqrshrnt_d }, 6867 }; 6868 TRANS_FEAT(UQRSHRNT, aa64_sve2, do_shr_narrow, a, uqrshrnt_ops) 6869 6870 #define DO_SVE2_ZZZ_NARROW(NAME, name) \ 6871 static gen_helper_gvec_3 * const name##_fns[4] = { \ 6872 NULL, gen_helper_sve2_##name##_h, \ 6873 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \ 6874 }; \ 6875 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzz, \ 6876 name##_fns[a->esz], a, 0) 6877 6878 DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb) 6879 DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt) 6880 DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb) 6881 DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt) 6882 6883 DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb) 6884 DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt) 6885 DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb) 6886 DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt) 6887 6888 static gen_helper_gvec_flags_4 * const match_fns[4] = { 6889 gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL 6890 }; 6891 TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) 6892 6893 static gen_helper_gvec_flags_4 * const nmatch_fns[4] = { 6894 gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL 6895 }; 6896 TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) 6897 6898 static gen_helper_gvec_4 * const histcnt_fns[4] = { 6899 NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d 6900 }; 6901 TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, 6902 histcnt_fns[a->esz], a, 0) 6903 6904 TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, 6905 a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) 6906 6907 DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz) 6908 DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz) 6909 DO_ZPZZ_FP(FMINNMP, aa64_sve2, sve2_fminnmp_zpzz) 6910 DO_ZPZZ_FP(FMAXP, aa64_sve2, sve2_fmaxp_zpzz) 6911 DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz) 6912 6913 /* 6914 * SVE Integer Multiply-Add (unpredicated) 6915 */ 6916 6917 TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, 6918 gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra, 6919 0, FPST_A64) 6920 TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, 6921 gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra, 6922 0, FPST_A64) 6923 6924 static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = { 6925 NULL, gen_helper_sve2_sqdmlal_zzzw_h, 6926 gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d, 6927 }; 6928 TRANS_FEAT(SQDMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6929 sqdmlal_zzzw_fns[a->esz], a, 0) 6930 TRANS_FEAT(SQDMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6931 sqdmlal_zzzw_fns[a->esz], a, 3) 6932 TRANS_FEAT(SQDMLALBT, aa64_sve2, gen_gvec_ool_arg_zzzz, 6933 sqdmlal_zzzw_fns[a->esz], a, 2) 6934 6935 static gen_helper_gvec_4 * const sqdmlsl_zzzw_fns[] = { 6936 NULL, gen_helper_sve2_sqdmlsl_zzzw_h, 6937 gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d, 6938 }; 6939 TRANS_FEAT(SQDMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6940 sqdmlsl_zzzw_fns[a->esz], a, 0) 6941 TRANS_FEAT(SQDMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6942 sqdmlsl_zzzw_fns[a->esz], a, 3) 6943 TRANS_FEAT(SQDMLSLBT, aa64_sve2, gen_gvec_ool_arg_zzzz, 6944 sqdmlsl_zzzw_fns[a->esz], a, 2) 6945 6946 static gen_helper_gvec_4 * const sqrdmlah_fns[] = { 6947 gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h, 6948 gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d, 6949 }; 6950 TRANS_FEAT(SQRDMLAH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz, 6951 sqrdmlah_fns[a->esz], a, 0) 6952 6953 static gen_helper_gvec_4 * const sqrdmlsh_fns[] = { 6954 gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h, 6955 gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d, 6956 }; 6957 TRANS_FEAT(SQRDMLSH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz, 6958 sqrdmlsh_fns[a->esz], a, 0) 6959 6960 static gen_helper_gvec_4 * const smlal_zzzw_fns[] = { 6961 NULL, gen_helper_sve2_smlal_zzzw_h, 6962 gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d, 6963 }; 6964 TRANS_FEAT(SMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6965 smlal_zzzw_fns[a->esz], a, 0) 6966 TRANS_FEAT(SMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6967 smlal_zzzw_fns[a->esz], a, 1) 6968 6969 static gen_helper_gvec_4 * const umlal_zzzw_fns[] = { 6970 NULL, gen_helper_sve2_umlal_zzzw_h, 6971 gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d, 6972 }; 6973 TRANS_FEAT(UMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6974 umlal_zzzw_fns[a->esz], a, 0) 6975 TRANS_FEAT(UMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6976 umlal_zzzw_fns[a->esz], a, 1) 6977 6978 static gen_helper_gvec_4 * const smlsl_zzzw_fns[] = { 6979 NULL, gen_helper_sve2_smlsl_zzzw_h, 6980 gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d, 6981 }; 6982 TRANS_FEAT(SMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6983 smlsl_zzzw_fns[a->esz], a, 0) 6984 TRANS_FEAT(SMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6985 smlsl_zzzw_fns[a->esz], a, 1) 6986 6987 static gen_helper_gvec_4 * const umlsl_zzzw_fns[] = { 6988 NULL, gen_helper_sve2_umlsl_zzzw_h, 6989 gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d, 6990 }; 6991 TRANS_FEAT(UMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6992 umlsl_zzzw_fns[a->esz], a, 0) 6993 TRANS_FEAT(UMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 6994 umlsl_zzzw_fns[a->esz], a, 1) 6995 6996 static gen_helper_gvec_4 * const cmla_fns[] = { 6997 gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h, 6998 gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d, 6999 }; 7000 TRANS_FEAT(CMLA_zzzz, aa64_sve2, gen_gvec_ool_zzzz, 7001 cmla_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) 7002 7003 static gen_helper_gvec_4 * const cdot_fns[] = { 7004 NULL, NULL, gen_helper_sve2_cdot_zzzz_s, gen_helper_sve2_cdot_zzzz_d 7005 }; 7006 TRANS_FEAT(CDOT_zzzz, aa64_sve2, gen_gvec_ool_zzzz, 7007 cdot_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) 7008 7009 static gen_helper_gvec_4 * const sqrdcmlah_fns[] = { 7010 gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h, 7011 gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d, 7012 }; 7013 TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz, 7014 sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) 7015 7016 TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7017 a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0) 7018 7019 TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, 7020 gen_helper_crypto_aesmc, a->rd, a->rd, 0) 7021 TRANS_FEAT_NONSTREAMING(AESIMC, aa64_sve2_aes, gen_gvec_ool_zz, 7022 gen_helper_crypto_aesimc, a->rd, a->rd, 0) 7023 7024 TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, 7025 gen_helper_crypto_aese, a, 0) 7026 TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, 7027 gen_helper_crypto_aesd, a, 0) 7028 7029 TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, 7030 gen_helper_crypto_sm4e, a, 0) 7031 TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, 7032 gen_helper_crypto_sm4ekey, a, 0) 7033 7034 TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, 7035 gen_gvec_rax1, a) 7036 7037 TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz, 7038 gen_helper_sve2_fcvtnt_sh, a, 0, FPST_A64) 7039 TRANS_FEAT(FCVTNT_ds, aa64_sve2, gen_gvec_fpst_arg_zpz, 7040 gen_helper_sve2_fcvtnt_ds, a, 0, FPST_A64) 7041 7042 TRANS_FEAT(BFCVTNT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, 7043 gen_helper_sve_bfcvtnt, a, 0, FPST_A64) 7044 7045 TRANS_FEAT(FCVTLT_hs, aa64_sve2, gen_gvec_fpst_arg_zpz, 7046 gen_helper_sve2_fcvtlt_hs, a, 0, FPST_A64) 7047 TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz, 7048 gen_helper_sve2_fcvtlt_sd, a, 0, FPST_A64) 7049 7050 TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a, 7051 FPROUNDING_ODD, gen_helper_sve_fcvt_ds) 7052 TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a, 7053 FPROUNDING_ODD, gen_helper_sve2_fcvtnt_ds) 7054 7055 static gen_helper_gvec_3_ptr * const flogb_fns[] = { 7056 NULL, gen_helper_flogb_h, 7057 gen_helper_flogb_s, gen_helper_flogb_d 7058 }; 7059 TRANS_FEAT(FLOGB, aa64_sve2, gen_gvec_fpst_arg_zpz, flogb_fns[a->esz], 7060 a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 7061 7062 static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel) 7063 { 7064 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s, 7065 a->rd, a->rn, a->rm, a->ra, 7066 (sel << 1) | sub, tcg_env); 7067 } 7068 7069 TRANS_FEAT(FMLALB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, false) 7070 TRANS_FEAT(FMLALT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, true) 7071 TRANS_FEAT(FMLSLB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, false) 7072 TRANS_FEAT(FMLSLT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, true) 7073 7074 static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel) 7075 { 7076 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s, 7077 a->rd, a->rn, a->rm, a->ra, 7078 (a->index << 2) | (sel << 1) | sub, tcg_env); 7079 } 7080 7081 TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false) 7082 TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true) 7083 TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false) 7084 TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true) 7085 7086 TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7087 gen_helper_gvec_smmla_b, a, 0) 7088 TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7089 gen_helper_gvec_usmmla_b, a, 0) 7090 TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7091 gen_helper_gvec_ummla_b, a, 0) 7092 7093 TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_env_arg_zzzz, 7094 gen_helper_gvec_bfdot, a, 0) 7095 TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_env_arg_zzxz, 7096 gen_helper_gvec_bfdot_idx, a) 7097 7098 TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_env_arg_zzzz, 7099 gen_helper_gvec_bfmmla, a, 0) 7100 7101 static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) 7102 { 7103 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal, 7104 a->rd, a->rn, a->rm, a->ra, sel, FPST_A64); 7105 } 7106 7107 TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false) 7108 TRANS_FEAT(BFMLALT_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, true) 7109 7110 static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) 7111 { 7112 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx, 7113 a->rd, a->rn, a->rm, a->ra, 7114 (a->index << 1) | sel, FPST_A64); 7115 } 7116 7117 TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false) 7118 TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true) 7119 7120 static bool trans_PSEL(DisasContext *s, arg_psel *a) 7121 { 7122 int vl = vec_full_reg_size(s); 7123 int pl = pred_gvec_reg_size(s); 7124 int elements = vl >> a->esz; 7125 TCGv_i64 tmp, didx, dbit; 7126 TCGv_ptr ptr; 7127 7128 if (!dc_isar_feature(aa64_sme, s)) { 7129 return false; 7130 } 7131 if (!sve_access_check(s)) { 7132 return true; 7133 } 7134 7135 tmp = tcg_temp_new_i64(); 7136 dbit = tcg_temp_new_i64(); 7137 didx = tcg_temp_new_i64(); 7138 ptr = tcg_temp_new_ptr(); 7139 7140 /* Compute the predicate element. */ 7141 tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm); 7142 if (is_power_of_2(elements)) { 7143 tcg_gen_andi_i64(tmp, tmp, elements - 1); 7144 } else { 7145 tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements)); 7146 } 7147 7148 /* Extract the predicate byte and bit indices. */ 7149 tcg_gen_shli_i64(tmp, tmp, a->esz); 7150 tcg_gen_andi_i64(dbit, tmp, 7); 7151 tcg_gen_shri_i64(didx, tmp, 3); 7152 if (HOST_BIG_ENDIAN) { 7153 tcg_gen_xori_i64(didx, didx, 7); 7154 } 7155 7156 /* Load the predicate word. */ 7157 tcg_gen_trunc_i64_ptr(ptr, didx); 7158 tcg_gen_add_ptr(ptr, ptr, tcg_env); 7159 tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm)); 7160 7161 /* Extract the predicate bit and replicate to MO_64. */ 7162 tcg_gen_shr_i64(tmp, tmp, dbit); 7163 tcg_gen_andi_i64(tmp, tmp, 1); 7164 tcg_gen_neg_i64(tmp, tmp); 7165 7166 /* Apply to either copy the source, or write zeros. */ 7167 tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), 7168 pred_full_reg_offset(s, a->pn), tmp, pl, pl); 7169 return true; 7170 } 7171 7172 static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) 7173 { 7174 tcg_gen_smax_i32(d, a, n); 7175 tcg_gen_smin_i32(d, d, m); 7176 } 7177 7178 static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) 7179 { 7180 tcg_gen_smax_i64(d, a, n); 7181 tcg_gen_smin_i64(d, d, m); 7182 } 7183 7184 static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 7185 TCGv_vec m, TCGv_vec a) 7186 { 7187 tcg_gen_smax_vec(vece, d, a, n); 7188 tcg_gen_smin_vec(vece, d, d, m); 7189 } 7190 7191 static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 7192 uint32_t a, uint32_t oprsz, uint32_t maxsz) 7193 { 7194 static const TCGOpcode vecop[] = { 7195 INDEX_op_smin_vec, INDEX_op_smax_vec, 0 7196 }; 7197 static const GVecGen4 ops[4] = { 7198 { .fniv = gen_sclamp_vec, 7199 .fno = gen_helper_gvec_sclamp_b, 7200 .opt_opc = vecop, 7201 .vece = MO_8 }, 7202 { .fniv = gen_sclamp_vec, 7203 .fno = gen_helper_gvec_sclamp_h, 7204 .opt_opc = vecop, 7205 .vece = MO_16 }, 7206 { .fni4 = gen_sclamp_i32, 7207 .fniv = gen_sclamp_vec, 7208 .fno = gen_helper_gvec_sclamp_s, 7209 .opt_opc = vecop, 7210 .vece = MO_32 }, 7211 { .fni8 = gen_sclamp_i64, 7212 .fniv = gen_sclamp_vec, 7213 .fno = gen_helper_gvec_sclamp_d, 7214 .opt_opc = vecop, 7215 .vece = MO_64, 7216 .prefer_i64 = TCG_TARGET_REG_BITS == 64 } 7217 }; 7218 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); 7219 } 7220 7221 TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a) 7222 7223 static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) 7224 { 7225 tcg_gen_umax_i32(d, a, n); 7226 tcg_gen_umin_i32(d, d, m); 7227 } 7228 7229 static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) 7230 { 7231 tcg_gen_umax_i64(d, a, n); 7232 tcg_gen_umin_i64(d, d, m); 7233 } 7234 7235 static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 7236 TCGv_vec m, TCGv_vec a) 7237 { 7238 tcg_gen_umax_vec(vece, d, a, n); 7239 tcg_gen_umin_vec(vece, d, d, m); 7240 } 7241 7242 static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 7243 uint32_t a, uint32_t oprsz, uint32_t maxsz) 7244 { 7245 static const TCGOpcode vecop[] = { 7246 INDEX_op_umin_vec, INDEX_op_umax_vec, 0 7247 }; 7248 static const GVecGen4 ops[4] = { 7249 { .fniv = gen_uclamp_vec, 7250 .fno = gen_helper_gvec_uclamp_b, 7251 .opt_opc = vecop, 7252 .vece = MO_8 }, 7253 { .fniv = gen_uclamp_vec, 7254 .fno = gen_helper_gvec_uclamp_h, 7255 .opt_opc = vecop, 7256 .vece = MO_16 }, 7257 { .fni4 = gen_uclamp_i32, 7258 .fniv = gen_uclamp_vec, 7259 .fno = gen_helper_gvec_uclamp_s, 7260 .opt_opc = vecop, 7261 .vece = MO_32 }, 7262 { .fni8 = gen_uclamp_i64, 7263 .fniv = gen_uclamp_vec, 7264 .fno = gen_helper_gvec_uclamp_d, 7265 .opt_opc = vecop, 7266 .vece = MO_64, 7267 .prefer_i64 = TCG_TARGET_REG_BITS == 64 } 7268 }; 7269 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); 7270 } 7271 7272 TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a) 7273