1 /* 2 * M-profile MVE Operations 3 * 4 * Copyright (c) 2021 Linaro, Ltd. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/int128.h" 22 #include "cpu.h" 23 #include "internals.h" 24 #include "vec_internal.h" 25 #include "exec/helper-proto.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/exec-all.h" 28 #include "tcg/tcg.h" 29 30 static uint16_t mve_element_mask(CPUARMState *env) 31 { 32 /* 33 * Return the mask of which elements in the MVE vector should be 34 * updated. This is a combination of multiple things: 35 * (1) by default, we update every lane in the vector 36 * (2) VPT predication stores its state in the VPR register; 37 * (3) low-overhead-branch tail predication will mask out part 38 * the vector on the final iteration of the loop 39 * (4) if EPSR.ECI is set then we must execute only some beats 40 * of the insn 41 * We combine all these into a 16-bit result with the same semantics 42 * as VPR.P0: 0 to mask the lane, 1 if it is active. 43 * 8-bit vector ops will look at all bits of the result; 44 * 16-bit ops will look at bits 0, 2, 4, ...; 45 * 32-bit ops will look at bits 0, 4, 8 and 12. 46 * Compare pseudocode GetCurInstrBeat(), though that only returns 47 * the 4-bit slice of the mask corresponding to a single beat. 48 */ 49 uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 50 51 if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) { 52 mask |= 0xff; 53 } 54 if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) { 55 mask |= 0xff00; 56 } 57 58 if (env->v7m.ltpsize < 4 && 59 env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) { 60 /* 61 * Tail predication active, and this is the last loop iteration. 62 * The element size is (1 << ltpsize), and we only want to process 63 * loopcount elements, so we want to retain the least significant 64 * (loopcount * esize) predicate bits and zero out bits above that. 65 */ 66 int masklen = env->regs[14] << env->v7m.ltpsize; 67 assert(masklen <= 16); 68 mask &= MAKE_64BIT_MASK(0, masklen); 69 } 70 71 if ((env->condexec_bits & 0xf) == 0) { 72 /* 73 * ECI bits indicate which beats are already executed; 74 * we handle this by effectively predicating them out. 75 */ 76 int eci = env->condexec_bits >> 4; 77 switch (eci) { 78 case ECI_NONE: 79 break; 80 case ECI_A0: 81 mask &= 0xfff0; 82 break; 83 case ECI_A0A1: 84 mask &= 0xff00; 85 break; 86 case ECI_A0A1A2: 87 case ECI_A0A1A2B0: 88 mask &= 0xf000; 89 break; 90 default: 91 g_assert_not_reached(); 92 } 93 } 94 95 return mask; 96 } 97 98 static void mve_advance_vpt(CPUARMState *env) 99 { 100 /* Advance the VPT and ECI state if necessary */ 101 uint32_t vpr = env->v7m.vpr; 102 unsigned mask01, mask23; 103 104 if ((env->condexec_bits & 0xf) == 0) { 105 env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ? 106 (ECI_A0 << 4) : (ECI_NONE << 4); 107 } 108 109 if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) { 110 /* VPT not enabled, nothing to do */ 111 return; 112 } 113 114 mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01); 115 mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23); 116 if (mask01 > 8) { 117 /* high bit set, but not 0b1000: invert the relevant half of P0 */ 118 vpr ^= 0xff; 119 } 120 if (mask23 > 8) { 121 /* high bit set, but not 0b1000: invert the relevant half of P0 */ 122 vpr ^= 0xff00; 123 } 124 vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); 125 vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1); 126 env->v7m.vpr = vpr; 127 } 128 129 130 #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ 131 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 132 { \ 133 TYPE *d = vd; \ 134 uint16_t mask = mve_element_mask(env); \ 135 unsigned b, e; \ 136 /* \ 137 * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ 138 * beats so we don't care if we update part of the dest and \ 139 * then take an exception. \ 140 */ \ 141 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 142 if (mask & (1 << b)) { \ 143 d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \ 144 } \ 145 addr += MSIZE; \ 146 } \ 147 mve_advance_vpt(env); \ 148 } 149 150 #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ 151 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 152 { \ 153 TYPE *d = vd; \ 154 uint16_t mask = mve_element_mask(env); \ 155 unsigned b, e; \ 156 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 157 if (mask & (1 << b)) { \ 158 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 159 } \ 160 addr += MSIZE; \ 161 } \ 162 mve_advance_vpt(env); \ 163 } 164 165 DO_VLDR(vldrb, 1, ldub, 1, uint8_t) 166 DO_VLDR(vldrh, 2, lduw, 2, uint16_t) 167 DO_VLDR(vldrw, 4, ldl, 4, uint32_t) 168 169 DO_VSTR(vstrb, 1, stb, 1, uint8_t) 170 DO_VSTR(vstrh, 2, stw, 2, uint16_t) 171 DO_VSTR(vstrw, 4, stl, 4, uint32_t) 172 173 DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t) 174 DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t) 175 DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t) 176 DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t) 177 DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t) 178 DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t) 179 180 DO_VSTR(vstrb_h, 1, stb, 2, int16_t) 181 DO_VSTR(vstrb_w, 1, stb, 4, int32_t) 182 DO_VSTR(vstrh_w, 2, stw, 4, int32_t) 183 184 #undef DO_VLDR 185 #undef DO_VSTR 186 187 /* 188 * The mergemask(D, R, M) macro performs the operation "*D = R" but 189 * storing only the bytes which correspond to 1 bits in M, 190 * leaving other bytes in *D unchanged. We use _Generic 191 * to select the correct implementation based on the type of D. 192 */ 193 194 static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask) 195 { 196 if (mask & 1) { 197 *d = r; 198 } 199 } 200 201 static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask) 202 { 203 mergemask_ub((uint8_t *)d, r, mask); 204 } 205 206 static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask) 207 { 208 uint16_t bmask = expand_pred_b_data[mask & 3]; 209 *d = (*d & ~bmask) | (r & bmask); 210 } 211 212 static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask) 213 { 214 mergemask_uh((uint16_t *)d, r, mask); 215 } 216 217 static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask) 218 { 219 uint32_t bmask = expand_pred_b_data[mask & 0xf]; 220 *d = (*d & ~bmask) | (r & bmask); 221 } 222 223 static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask) 224 { 225 mergemask_uw((uint32_t *)d, r, mask); 226 } 227 228 static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask) 229 { 230 uint64_t bmask = expand_pred_b_data[mask & 0xff]; 231 *d = (*d & ~bmask) | (r & bmask); 232 } 233 234 static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask) 235 { 236 mergemask_uq((uint64_t *)d, r, mask); 237 } 238 239 #define mergemask(D, R, M) \ 240 _Generic(D, \ 241 uint8_t *: mergemask_ub, \ 242 int8_t *: mergemask_sb, \ 243 uint16_t *: mergemask_uh, \ 244 int16_t *: mergemask_sh, \ 245 uint32_t *: mergemask_uw, \ 246 int32_t *: mergemask_sw, \ 247 uint64_t *: mergemask_uq, \ 248 int64_t *: mergemask_sq)(D, R, M) 249 250 void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val) 251 { 252 /* 253 * The generated code already replicated an 8 or 16 bit constant 254 * into the 32-bit value, so we only need to write the 32-bit 255 * value to all elements of the Qreg, allowing for predication. 256 */ 257 uint32_t *d = vd; 258 uint16_t mask = mve_element_mask(env); 259 unsigned e; 260 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 261 mergemask(&d[H4(e)], val, mask); 262 } 263 mve_advance_vpt(env); 264 } 265 266 #define DO_1OP(OP, ESIZE, TYPE, FN) \ 267 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 268 { \ 269 TYPE *d = vd, *m = vm; \ 270 uint16_t mask = mve_element_mask(env); \ 271 unsigned e; \ 272 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 273 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \ 274 } \ 275 mve_advance_vpt(env); \ 276 } 277 278 #define DO_CLS_B(N) (clrsb32(N) - 24) 279 #define DO_CLS_H(N) (clrsb32(N) - 16) 280 281 DO_1OP(vclsb, 1, int8_t, DO_CLS_B) 282 DO_1OP(vclsh, 2, int16_t, DO_CLS_H) 283 DO_1OP(vclsw, 4, int32_t, clrsb32) 284 285 #define DO_CLZ_B(N) (clz32(N) - 24) 286 #define DO_CLZ_H(N) (clz32(N) - 16) 287 288 DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B) 289 DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H) 290 DO_1OP(vclzw, 4, uint32_t, clz32) 291 292 DO_1OP(vrev16b, 2, uint16_t, bswap16) 293 DO_1OP(vrev32b, 4, uint32_t, bswap32) 294 DO_1OP(vrev32h, 4, uint32_t, hswap32) 295 DO_1OP(vrev64b, 8, uint64_t, bswap64) 296 DO_1OP(vrev64h, 8, uint64_t, hswap64) 297 DO_1OP(vrev64w, 8, uint64_t, wswap64) 298 299 #define DO_NOT(N) (~(N)) 300 301 DO_1OP(vmvn, 8, uint64_t, DO_NOT) 302 303 #define DO_ABS(N) ((N) < 0 ? -(N) : (N)) 304 #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff)) 305 #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff)) 306 307 DO_1OP(vabsb, 1, int8_t, DO_ABS) 308 DO_1OP(vabsh, 2, int16_t, DO_ABS) 309 DO_1OP(vabsw, 4, int32_t, DO_ABS) 310 311 /* We can do these 64 bits at a time */ 312 DO_1OP(vfabsh, 8, uint64_t, DO_FABSH) 313 DO_1OP(vfabss, 8, uint64_t, DO_FABSS) 314 315 #define DO_NEG(N) (-(N)) 316 #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000)) 317 #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000)) 318 319 DO_1OP(vnegb, 1, int8_t, DO_NEG) 320 DO_1OP(vnegh, 2, int16_t, DO_NEG) 321 DO_1OP(vnegw, 4, int32_t, DO_NEG) 322 323 /* We can do these 64 bits at a time */ 324 DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH) 325 DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS) 326 327 #define DO_2OP(OP, ESIZE, TYPE, FN) \ 328 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 329 void *vd, void *vn, void *vm) \ 330 { \ 331 TYPE *d = vd, *n = vn, *m = vm; \ 332 uint16_t mask = mve_element_mask(env); \ 333 unsigned e; \ 334 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 335 mergemask(&d[H##ESIZE(e)], \ 336 FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \ 337 } \ 338 mve_advance_vpt(env); \ 339 } 340 341 /* provide unsigned 2-op helpers for all sizes */ 342 #define DO_2OP_U(OP, FN) \ 343 DO_2OP(OP##b, 1, uint8_t, FN) \ 344 DO_2OP(OP##h, 2, uint16_t, FN) \ 345 DO_2OP(OP##w, 4, uint32_t, FN) 346 347 /* provide signed 2-op helpers for all sizes */ 348 #define DO_2OP_S(OP, FN) \ 349 DO_2OP(OP##b, 1, int8_t, FN) \ 350 DO_2OP(OP##h, 2, int16_t, FN) \ 351 DO_2OP(OP##w, 4, int32_t, FN) 352 353 /* 354 * "Long" operations where two half-sized inputs (taken from either the 355 * top or the bottom of the input vector) produce a double-width result. 356 * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output. 357 */ 358 #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 359 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 360 { \ 361 LTYPE *d = vd; \ 362 TYPE *n = vn, *m = vm; \ 363 uint16_t mask = mve_element_mask(env); \ 364 unsigned le; \ 365 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 366 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \ 367 m[H##ESIZE(le * 2 + TOP)]); \ 368 mergemask(&d[H##LESIZE(le)], r, mask); \ 369 } \ 370 mve_advance_vpt(env); \ 371 } 372 373 #define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \ 374 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 375 { \ 376 TYPE *d = vd, *n = vn, *m = vm; \ 377 uint16_t mask = mve_element_mask(env); \ 378 unsigned e; \ 379 bool qc = false; \ 380 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 381 bool sat = false; \ 382 TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \ 383 mergemask(&d[H##ESIZE(e)], r, mask); \ 384 qc |= sat & mask & 1; \ 385 } \ 386 if (qc) { \ 387 env->vfp.qc[0] = qc; \ 388 } \ 389 mve_advance_vpt(env); \ 390 } 391 392 /* provide unsigned 2-op helpers for all sizes */ 393 #define DO_2OP_SAT_U(OP, FN) \ 394 DO_2OP_SAT(OP##b, 1, uint8_t, FN) \ 395 DO_2OP_SAT(OP##h, 2, uint16_t, FN) \ 396 DO_2OP_SAT(OP##w, 4, uint32_t, FN) 397 398 /* provide signed 2-op helpers for all sizes */ 399 #define DO_2OP_SAT_S(OP, FN) \ 400 DO_2OP_SAT(OP##b, 1, int8_t, FN) \ 401 DO_2OP_SAT(OP##h, 2, int16_t, FN) \ 402 DO_2OP_SAT(OP##w, 4, int32_t, FN) 403 404 #define DO_AND(N, M) ((N) & (M)) 405 #define DO_BIC(N, M) ((N) & ~(M)) 406 #define DO_ORR(N, M) ((N) | (M)) 407 #define DO_ORN(N, M) ((N) | ~(M)) 408 #define DO_EOR(N, M) ((N) ^ (M)) 409 410 DO_2OP(vand, 8, uint64_t, DO_AND) 411 DO_2OP(vbic, 8, uint64_t, DO_BIC) 412 DO_2OP(vorr, 8, uint64_t, DO_ORR) 413 DO_2OP(vorn, 8, uint64_t, DO_ORN) 414 DO_2OP(veor, 8, uint64_t, DO_EOR) 415 416 #define DO_ADD(N, M) ((N) + (M)) 417 #define DO_SUB(N, M) ((N) - (M)) 418 #define DO_MUL(N, M) ((N) * (M)) 419 420 DO_2OP_U(vadd, DO_ADD) 421 DO_2OP_U(vsub, DO_SUB) 422 DO_2OP_U(vmul, DO_MUL) 423 424 DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL) 425 DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL) 426 DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL) 427 DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL) 428 DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL) 429 DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL) 430 431 DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL) 432 DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL) 433 DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL) 434 DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL) 435 DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL) 436 DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL) 437 438 /* 439 * Because the computation type is at least twice as large as required, 440 * these work for both signed and unsigned source types. 441 */ 442 static inline uint8_t do_mulh_b(int32_t n, int32_t m) 443 { 444 return (n * m) >> 8; 445 } 446 447 static inline uint16_t do_mulh_h(int32_t n, int32_t m) 448 { 449 return (n * m) >> 16; 450 } 451 452 static inline uint32_t do_mulh_w(int64_t n, int64_t m) 453 { 454 return (n * m) >> 32; 455 } 456 457 static inline uint8_t do_rmulh_b(int32_t n, int32_t m) 458 { 459 return (n * m + (1U << 7)) >> 8; 460 } 461 462 static inline uint16_t do_rmulh_h(int32_t n, int32_t m) 463 { 464 return (n * m + (1U << 15)) >> 16; 465 } 466 467 static inline uint32_t do_rmulh_w(int64_t n, int64_t m) 468 { 469 return (n * m + (1U << 31)) >> 32; 470 } 471 472 DO_2OP(vmulhsb, 1, int8_t, do_mulh_b) 473 DO_2OP(vmulhsh, 2, int16_t, do_mulh_h) 474 DO_2OP(vmulhsw, 4, int32_t, do_mulh_w) 475 DO_2OP(vmulhub, 1, uint8_t, do_mulh_b) 476 DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h) 477 DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w) 478 479 DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b) 480 DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h) 481 DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w) 482 DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b) 483 DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h) 484 DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w) 485 486 #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) 487 #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) 488 489 DO_2OP_S(vmaxs, DO_MAX) 490 DO_2OP_U(vmaxu, DO_MAX) 491 DO_2OP_S(vmins, DO_MIN) 492 DO_2OP_U(vminu, DO_MIN) 493 494 #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N)) 495 496 DO_2OP_S(vabds, DO_ABD) 497 DO_2OP_U(vabdu, DO_ABD) 498 499 static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m) 500 { 501 return ((uint64_t)n + m) >> 1; 502 } 503 504 static inline int32_t do_vhadd_s(int32_t n, int32_t m) 505 { 506 return ((int64_t)n + m) >> 1; 507 } 508 509 static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m) 510 { 511 return ((uint64_t)n - m) >> 1; 512 } 513 514 static inline int32_t do_vhsub_s(int32_t n, int32_t m) 515 { 516 return ((int64_t)n - m) >> 1; 517 } 518 519 DO_2OP_S(vhadds, do_vhadd_s) 520 DO_2OP_U(vhaddu, do_vhadd_u) 521 DO_2OP_S(vhsubs, do_vhsub_s) 522 DO_2OP_U(vhsubu, do_vhsub_u) 523 524 #define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 525 #define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 526 #define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 527 #define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 528 529 DO_2OP_S(vshls, DO_VSHLS) 530 DO_2OP_U(vshlu, DO_VSHLU) 531 DO_2OP_S(vrshls, DO_VRSHLS) 532 DO_2OP_U(vrshlu, DO_VRSHLU) 533 534 #define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1) 535 #define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1) 536 537 DO_2OP_S(vrhadds, DO_RHADD_S) 538 DO_2OP_U(vrhaddu, DO_RHADD_U) 539 540 static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m, 541 uint32_t inv, uint32_t carry_in, bool update_flags) 542 { 543 uint16_t mask = mve_element_mask(env); 544 unsigned e; 545 546 /* If any additions trigger, we will update flags. */ 547 if (mask & 0x1111) { 548 update_flags = true; 549 } 550 551 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 552 uint64_t r = carry_in; 553 r += n[H4(e)]; 554 r += m[H4(e)] ^ inv; 555 if (mask & 1) { 556 carry_in = r >> 32; 557 } 558 mergemask(&d[H4(e)], r, mask); 559 } 560 561 if (update_flags) { 562 /* Store C, clear NZV. */ 563 env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK; 564 env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C; 565 } 566 mve_advance_vpt(env); 567 } 568 569 void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm) 570 { 571 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 572 do_vadc(env, vd, vn, vm, 0, carry_in, false); 573 } 574 575 void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm) 576 { 577 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 578 do_vadc(env, vd, vn, vm, -1, carry_in, false); 579 } 580 581 582 void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm) 583 { 584 do_vadc(env, vd, vn, vm, 0, 0, true); 585 } 586 587 void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm) 588 { 589 do_vadc(env, vd, vn, vm, -1, 1, true); 590 } 591 592 #define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \ 593 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 594 { \ 595 TYPE *d = vd, *n = vn, *m = vm; \ 596 uint16_t mask = mve_element_mask(env); \ 597 unsigned e; \ 598 TYPE r[16 / ESIZE]; \ 599 /* Calculate all results first to avoid overwriting inputs */ \ 600 for (e = 0; e < 16 / ESIZE; e++) { \ 601 if (!(e & 1)) { \ 602 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \ 603 } else { \ 604 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \ 605 } \ 606 } \ 607 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 608 mergemask(&d[H##ESIZE(e)], r[e], mask); \ 609 } \ 610 mve_advance_vpt(env); \ 611 } 612 613 #define DO_VCADD_ALL(OP, FN0, FN1) \ 614 DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \ 615 DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \ 616 DO_VCADD(OP##w, 4, int32_t, FN0, FN1) 617 618 DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD) 619 DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB) 620 621 static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s) 622 { 623 if (val > max) { 624 *s = true; 625 return max; 626 } else if (val < min) { 627 *s = true; 628 return min; 629 } 630 return val; 631 } 632 633 #define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s) 634 #define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s) 635 #define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s) 636 637 #define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s) 638 #define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s) 639 #define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s) 640 641 #define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s) 642 #define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s) 643 #define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s) 644 645 #define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s) 646 #define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s) 647 #define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s) 648 649 /* 650 * For QDMULH and QRDMULH we simplify "double and shift by esize" into 651 * "shift by esize-1", adjusting the QRDMULH rounding constant to match. 652 */ 653 #define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \ 654 INT8_MIN, INT8_MAX, s) 655 #define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \ 656 INT16_MIN, INT16_MAX, s) 657 #define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \ 658 INT32_MIN, INT32_MAX, s) 659 660 #define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \ 661 INT8_MIN, INT8_MAX, s) 662 #define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \ 663 INT16_MIN, INT16_MAX, s) 664 #define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \ 665 INT32_MIN, INT32_MAX, s) 666 667 DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B) 668 DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H) 669 DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W) 670 671 DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B) 672 DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H) 673 DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W) 674 675 DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B) 676 DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H) 677 DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W) 678 DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B) 679 DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H) 680 DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W) 681 682 DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B) 683 DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H) 684 DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W) 685 DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B) 686 DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H) 687 DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W) 688 689 /* 690 * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs() 691 * and friends wanting a uint32_t* sat and our needing a bool*. 692 */ 693 #define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \ 694 ({ \ 695 uint32_t su32 = 0; \ 696 typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \ 697 if (su32) { \ 698 *satp = true; \ 699 } \ 700 r; \ 701 }) 702 703 #define DO_SQSHL_OP(N, M, satp) \ 704 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp) 705 #define DO_UQSHL_OP(N, M, satp) \ 706 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp) 707 #define DO_SQRSHL_OP(N, M, satp) \ 708 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp) 709 #define DO_UQRSHL_OP(N, M, satp) \ 710 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp) 711 712 DO_2OP_SAT_S(vqshls, DO_SQSHL_OP) 713 DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP) 714 DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP) 715 DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP) 716 717 /* 718 * Multiply add dual returning high half 719 * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of 720 * whether to add the rounding constant, and the pointer to the 721 * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant", 722 * saturate to twice the input size and return the high half; or 723 * (A * B - C * D) etc for VQDMLSDH. 724 */ 725 #define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \ 726 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 727 void *vm) \ 728 { \ 729 TYPE *d = vd, *n = vn, *m = vm; \ 730 uint16_t mask = mve_element_mask(env); \ 731 unsigned e; \ 732 bool qc = false; \ 733 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 734 bool sat = false; \ 735 if ((e & 1) == XCHG) { \ 736 TYPE r = FN(n[H##ESIZE(e)], \ 737 m[H##ESIZE(e - XCHG)], \ 738 n[H##ESIZE(e + (1 - 2 * XCHG))], \ 739 m[H##ESIZE(e + (1 - XCHG))], \ 740 ROUND, &sat); \ 741 mergemask(&d[H##ESIZE(e)], r, mask); \ 742 qc |= sat & mask & 1; \ 743 } \ 744 } \ 745 if (qc) { \ 746 env->vfp.qc[0] = qc; \ 747 } \ 748 mve_advance_vpt(env); \ 749 } 750 751 static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d, 752 int round, bool *sat) 753 { 754 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7); 755 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 756 } 757 758 static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d, 759 int round, bool *sat) 760 { 761 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15); 762 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 763 } 764 765 static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d, 766 int round, bool *sat) 767 { 768 int64_t m1 = (int64_t)a * b; 769 int64_t m2 = (int64_t)c * d; 770 int64_t r; 771 /* 772 * Architecturally we should do the entire add, double, round 773 * and then check for saturation. We do three saturating adds, 774 * but we need to be careful about the order. If the first 775 * m1 + m2 saturates then it's impossible for the *2+rc to 776 * bring it back into the non-saturated range. However, if 777 * m1 + m2 is negative then it's possible that doing the doubling 778 * would take the intermediate result below INT64_MAX and the 779 * addition of the rounding constant then brings it back in range. 780 * So we add half the rounding constant before doubling rather 781 * than adding the rounding constant after the doubling. 782 */ 783 if (sadd64_overflow(m1, m2, &r) || 784 sadd64_overflow(r, (round << 30), &r) || 785 sadd64_overflow(r, r, &r)) { 786 *sat = true; 787 return r < 0 ? INT32_MAX : INT32_MIN; 788 } 789 return r >> 32; 790 } 791 792 static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d, 793 int round, bool *sat) 794 { 795 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7); 796 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 797 } 798 799 static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d, 800 int round, bool *sat) 801 { 802 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15); 803 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 804 } 805 806 static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d, 807 int round, bool *sat) 808 { 809 int64_t m1 = (int64_t)a * b; 810 int64_t m2 = (int64_t)c * d; 811 int64_t r; 812 /* The same ordering issue as in do_vqdmladh_w applies here too */ 813 if (ssub64_overflow(m1, m2, &r) || 814 sadd64_overflow(r, (round << 30), &r) || 815 sadd64_overflow(r, r, &r)) { 816 *sat = true; 817 return r < 0 ? INT32_MAX : INT32_MIN; 818 } 819 return r >> 32; 820 } 821 822 DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b) 823 DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h) 824 DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w) 825 DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b) 826 DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h) 827 DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w) 828 829 DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b) 830 DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h) 831 DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w) 832 DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b) 833 DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h) 834 DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w) 835 836 DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b) 837 DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h) 838 DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w) 839 DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b) 840 DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h) 841 DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w) 842 843 DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b) 844 DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h) 845 DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w) 846 DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b) 847 DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h) 848 DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w) 849 850 #define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \ 851 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 852 uint32_t rm) \ 853 { \ 854 TYPE *d = vd, *n = vn; \ 855 TYPE m = rm; \ 856 uint16_t mask = mve_element_mask(env); \ 857 unsigned e; \ 858 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 859 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \ 860 } \ 861 mve_advance_vpt(env); \ 862 } 863 864 #define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \ 865 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 866 uint32_t rm) \ 867 { \ 868 TYPE *d = vd, *n = vn; \ 869 TYPE m = rm; \ 870 uint16_t mask = mve_element_mask(env); \ 871 unsigned e; \ 872 bool qc = false; \ 873 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 874 bool sat = false; \ 875 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \ 876 mask); \ 877 qc |= sat & mask & 1; \ 878 } \ 879 if (qc) { \ 880 env->vfp.qc[0] = qc; \ 881 } \ 882 mve_advance_vpt(env); \ 883 } 884 885 /* provide unsigned 2-op scalar helpers for all sizes */ 886 #define DO_2OP_SCALAR_U(OP, FN) \ 887 DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \ 888 DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \ 889 DO_2OP_SCALAR(OP##w, 4, uint32_t, FN) 890 #define DO_2OP_SCALAR_S(OP, FN) \ 891 DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \ 892 DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \ 893 DO_2OP_SCALAR(OP##w, 4, int32_t, FN) 894 895 DO_2OP_SCALAR_U(vadd_scalar, DO_ADD) 896 DO_2OP_SCALAR_U(vsub_scalar, DO_SUB) 897 DO_2OP_SCALAR_U(vmul_scalar, DO_MUL) 898 DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s) 899 DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u) 900 DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s) 901 DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u) 902 903 DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B) 904 DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H) 905 DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W) 906 DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B) 907 DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H) 908 DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W) 909 910 DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B) 911 DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H) 912 DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W) 913 DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B) 914 DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H) 915 DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W) 916 917 DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B) 918 DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H) 919 DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W) 920 DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B) 921 DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H) 922 DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W) 923 924 /* 925 * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the 926 * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type. 927 * SATMASK specifies which bits of the predicate mask matter for determining 928 * whether to propagate a saturation indication into FPSCR.QC -- for 929 * the 16x16->32 case we must check only the bit corresponding to the T or B 930 * half that we used, but for the 32x32->64 case we propagate if the mask 931 * bit is set for either half. 932 */ 933 #define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 934 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 935 uint32_t rm) \ 936 { \ 937 LTYPE *d = vd; \ 938 TYPE *n = vn; \ 939 TYPE m = rm; \ 940 uint16_t mask = mve_element_mask(env); \ 941 unsigned le; \ 942 bool qc = false; \ 943 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 944 bool sat = false; \ 945 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \ 946 mergemask(&d[H##LESIZE(le)], r, mask); \ 947 qc |= sat && (mask & SATMASK); \ 948 } \ 949 if (qc) { \ 950 env->vfp.qc[0] = qc; \ 951 } \ 952 mve_advance_vpt(env); \ 953 } 954 955 static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat) 956 { 957 int64_t r = ((int64_t)n * m) * 2; 958 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat); 959 } 960 961 static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat) 962 { 963 /* The multiply can't overflow, but the doubling might */ 964 int64_t r = (int64_t)n * m; 965 if (r > INT64_MAX / 2) { 966 *sat = true; 967 return INT64_MAX; 968 } else if (r < INT64_MIN / 2) { 969 *sat = true; 970 return INT64_MIN; 971 } else { 972 return r * 2; 973 } 974 } 975 976 #define SATMASK16B 1 977 #define SATMASK16T (1 << 2) 978 #define SATMASK32 ((1 << 4) | 1) 979 980 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \ 981 do_qdmullh, SATMASK16B) 982 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \ 983 do_qdmullw, SATMASK32) 984 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \ 985 do_qdmullh, SATMASK16T) 986 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \ 987 do_qdmullw, SATMASK32) 988 989 /* 990 * Long saturating ops 991 */ 992 #define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 993 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 994 void *vm) \ 995 { \ 996 LTYPE *d = vd; \ 997 TYPE *n = vn, *m = vm; \ 998 uint16_t mask = mve_element_mask(env); \ 999 unsigned le; \ 1000 bool qc = false; \ 1001 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1002 bool sat = false; \ 1003 LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \ 1004 LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \ 1005 mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \ 1006 qc |= sat && (mask & SATMASK); \ 1007 } \ 1008 if (qc) { \ 1009 env->vfp.qc[0] = qc; \ 1010 } \ 1011 mve_advance_vpt(env); \ 1012 } 1013 1014 DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B) 1015 DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 1016 DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T) 1017 DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 1018 1019 static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m) 1020 { 1021 m &= 0xff; 1022 if (m == 0) { 1023 return 0; 1024 } 1025 n = revbit8(n); 1026 if (m < 8) { 1027 n >>= 8 - m; 1028 } 1029 return n; 1030 } 1031 1032 static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m) 1033 { 1034 m &= 0xff; 1035 if (m == 0) { 1036 return 0; 1037 } 1038 n = revbit16(n); 1039 if (m < 16) { 1040 n >>= 16 - m; 1041 } 1042 return n; 1043 } 1044 1045 static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m) 1046 { 1047 m &= 0xff; 1048 if (m == 0) { 1049 return 0; 1050 } 1051 n = revbit32(n); 1052 if (m < 32) { 1053 n >>= 32 - m; 1054 } 1055 return n; 1056 } 1057 1058 DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb) 1059 DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh) 1060 DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw) 1061 1062 /* 1063 * Multiply add long dual accumulate ops. 1064 */ 1065 #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 1066 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1067 void *vm, uint64_t a) \ 1068 { \ 1069 uint16_t mask = mve_element_mask(env); \ 1070 unsigned e; \ 1071 TYPE *n = vn, *m = vm; \ 1072 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1073 if (mask & 1) { \ 1074 if (e & 1) { \ 1075 a ODDACC \ 1076 (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 1077 } else { \ 1078 a EVENACC \ 1079 (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 1080 } \ 1081 } \ 1082 } \ 1083 mve_advance_vpt(env); \ 1084 return a; \ 1085 } 1086 1087 DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=) 1088 DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=) 1089 DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=) 1090 DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=) 1091 1092 DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=) 1093 DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=) 1094 1095 DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=) 1096 DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=) 1097 DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) 1098 DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) 1099 1100 /* 1101 * Rounding multiply add long dual accumulate high: we must keep 1102 * a 72-bit internal accumulator value and return the top 64 bits. 1103 */ 1104 #define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \ 1105 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1106 void *vm, uint64_t a) \ 1107 { \ 1108 uint16_t mask = mve_element_mask(env); \ 1109 unsigned e; \ 1110 TYPE *n = vn, *m = vm; \ 1111 Int128 acc = int128_lshift(TO128(a), 8); \ 1112 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1113 if (mask & 1) { \ 1114 if (e & 1) { \ 1115 acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \ 1116 m[H##ESIZE(e)])); \ 1117 } else { \ 1118 acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \ 1119 m[H##ESIZE(e)])); \ 1120 } \ 1121 acc = int128_add(acc, int128_make64(1 << 7)); \ 1122 } \ 1123 } \ 1124 mve_advance_vpt(env); \ 1125 return int128_getlo(int128_rshift(acc, 8)); \ 1126 } 1127 1128 DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64) 1129 DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64) 1130 1131 DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64) 1132 1133 DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64) 1134 DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64) 1135