1507b6a50SPeter Maydell /* 2507b6a50SPeter Maydell * M-profile MVE Operations 3507b6a50SPeter Maydell * 4507b6a50SPeter Maydell * Copyright (c) 2021 Linaro, Ltd. 5507b6a50SPeter Maydell * 6507b6a50SPeter Maydell * This library is free software; you can redistribute it and/or 7507b6a50SPeter Maydell * modify it under the terms of the GNU Lesser General Public 8507b6a50SPeter Maydell * License as published by the Free Software Foundation; either 9507b6a50SPeter Maydell * version 2.1 of the License, or (at your option) any later version. 10507b6a50SPeter Maydell * 11507b6a50SPeter Maydell * This library is distributed in the hope that it will be useful, 12507b6a50SPeter Maydell * but WITHOUT ANY WARRANTY; without even the implied warranty of 13507b6a50SPeter Maydell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14507b6a50SPeter Maydell * Lesser General Public License for more details. 15507b6a50SPeter Maydell * 16507b6a50SPeter Maydell * You should have received a copy of the GNU Lesser General Public 17507b6a50SPeter Maydell * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18507b6a50SPeter Maydell */ 19507b6a50SPeter Maydell 20507b6a50SPeter Maydell #include "qemu/osdep.h" 21507b6a50SPeter Maydell #include "cpu.h" 22507b6a50SPeter Maydell #include "internals.h" 23507b6a50SPeter Maydell #include "vec_internal.h" 24507b6a50SPeter Maydell #include "exec/helper-proto.h" 25*42fa9665SPhilippe Mathieu-Daudé #include "accel/tcg/cpu-ldst.h" 26507b6a50SPeter Maydell #include "exec/exec-all.h" 2759c91773SPeter Maydell #include "tcg/tcg.h" 281e35cd91SPeter Maydell #include "fpu/softfloat.h" 298e3da4c7SRichard Henderson #include "crypto/clmul.h" 30507b6a50SPeter Maydell 31e0d40070SPeter Maydell static uint16_t mve_eci_mask(CPUARMState *env) 32e0d40070SPeter Maydell { 33e0d40070SPeter Maydell /* 34e0d40070SPeter Maydell * Return the mask of which elements in the MVE vector correspond 35e0d40070SPeter Maydell * to beats being executed. The mask has 1 bits for executed lanes 36e0d40070SPeter Maydell * and 0 bits where ECI says this beat was already executed. 37e0d40070SPeter Maydell */ 38e0d40070SPeter Maydell int eci; 39e0d40070SPeter Maydell 40e0d40070SPeter Maydell if ((env->condexec_bits & 0xf) != 0) { 41e0d40070SPeter Maydell return 0xffff; 42e0d40070SPeter Maydell } 43e0d40070SPeter Maydell 44e0d40070SPeter Maydell eci = env->condexec_bits >> 4; 45e0d40070SPeter Maydell switch (eci) { 46e0d40070SPeter Maydell case ECI_NONE: 47e0d40070SPeter Maydell return 0xffff; 48e0d40070SPeter Maydell case ECI_A0: 49e0d40070SPeter Maydell return 0xfff0; 50e0d40070SPeter Maydell case ECI_A0A1: 51e0d40070SPeter Maydell return 0xff00; 52e0d40070SPeter Maydell case ECI_A0A1A2: 53e0d40070SPeter Maydell case ECI_A0A1A2B0: 54e0d40070SPeter Maydell return 0xf000; 55e0d40070SPeter Maydell default: 56e0d40070SPeter Maydell g_assert_not_reached(); 57e0d40070SPeter Maydell } 58e0d40070SPeter Maydell } 59e0d40070SPeter Maydell 60507b6a50SPeter Maydell static uint16_t mve_element_mask(CPUARMState *env) 61507b6a50SPeter Maydell { 62507b6a50SPeter Maydell /* 63507b6a50SPeter Maydell * Return the mask of which elements in the MVE vector should be 64507b6a50SPeter Maydell * updated. This is a combination of multiple things: 65507b6a50SPeter Maydell * (1) by default, we update every lane in the vector 66507b6a50SPeter Maydell * (2) VPT predication stores its state in the VPR register; 67507b6a50SPeter Maydell * (3) low-overhead-branch tail predication will mask out part 68507b6a50SPeter Maydell * the vector on the final iteration of the loop 69507b6a50SPeter Maydell * (4) if EPSR.ECI is set then we must execute only some beats 70507b6a50SPeter Maydell * of the insn 71507b6a50SPeter Maydell * We combine all these into a 16-bit result with the same semantics 72507b6a50SPeter Maydell * as VPR.P0: 0 to mask the lane, 1 if it is active. 73507b6a50SPeter Maydell * 8-bit vector ops will look at all bits of the result; 74507b6a50SPeter Maydell * 16-bit ops will look at bits 0, 2, 4, ...; 75507b6a50SPeter Maydell * 32-bit ops will look at bits 0, 4, 8 and 12. 76507b6a50SPeter Maydell * Compare pseudocode GetCurInstrBeat(), though that only returns 77507b6a50SPeter Maydell * the 4-bit slice of the mask corresponding to a single beat. 78507b6a50SPeter Maydell */ 79507b6a50SPeter Maydell uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 80507b6a50SPeter Maydell 81507b6a50SPeter Maydell if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) { 82507b6a50SPeter Maydell mask |= 0xff; 83507b6a50SPeter Maydell } 84507b6a50SPeter Maydell if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) { 85507b6a50SPeter Maydell mask |= 0xff00; 86507b6a50SPeter Maydell } 87507b6a50SPeter Maydell 88507b6a50SPeter Maydell if (env->v7m.ltpsize < 4 && 89507b6a50SPeter Maydell env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) { 90507b6a50SPeter Maydell /* 91507b6a50SPeter Maydell * Tail predication active, and this is the last loop iteration. 92507b6a50SPeter Maydell * The element size is (1 << ltpsize), and we only want to process 93507b6a50SPeter Maydell * loopcount elements, so we want to retain the least significant 94507b6a50SPeter Maydell * (loopcount * esize) predicate bits and zero out bits above that. 95507b6a50SPeter Maydell */ 96507b6a50SPeter Maydell int masklen = env->regs[14] << env->v7m.ltpsize; 97507b6a50SPeter Maydell assert(masklen <= 16); 983f4f1880SPeter Maydell uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 993f4f1880SPeter Maydell mask &= ltpmask; 100507b6a50SPeter Maydell } 101507b6a50SPeter Maydell 102507b6a50SPeter Maydell /* 103507b6a50SPeter Maydell * ECI bits indicate which beats are already executed; 104507b6a50SPeter Maydell * we handle this by effectively predicating them out. 105507b6a50SPeter Maydell */ 106e0d40070SPeter Maydell mask &= mve_eci_mask(env); 107507b6a50SPeter Maydell return mask; 108507b6a50SPeter Maydell } 109507b6a50SPeter Maydell 110507b6a50SPeter Maydell static void mve_advance_vpt(CPUARMState *env) 111507b6a50SPeter Maydell { 112507b6a50SPeter Maydell /* Advance the VPT and ECI state if necessary */ 113507b6a50SPeter Maydell uint32_t vpr = env->v7m.vpr; 114507b6a50SPeter Maydell unsigned mask01, mask23; 115e3152d02SPeter Maydell uint16_t inv_mask; 116e3152d02SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); 117507b6a50SPeter Maydell 118507b6a50SPeter Maydell if ((env->condexec_bits & 0xf) == 0) { 119507b6a50SPeter Maydell env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ? 120507b6a50SPeter Maydell (ECI_A0 << 4) : (ECI_NONE << 4); 121507b6a50SPeter Maydell } 122507b6a50SPeter Maydell 123507b6a50SPeter Maydell if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) { 124507b6a50SPeter Maydell /* VPT not enabled, nothing to do */ 125507b6a50SPeter Maydell return; 126507b6a50SPeter Maydell } 127507b6a50SPeter Maydell 128e3152d02SPeter Maydell /* Invert P0 bits if needed, but only for beats we actually executed */ 129507b6a50SPeter Maydell mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01); 130507b6a50SPeter Maydell mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23); 131e3152d02SPeter Maydell /* Start by assuming we invert all bits corresponding to executed beats */ 132e3152d02SPeter Maydell inv_mask = eci_mask; 133e3152d02SPeter Maydell if (mask01 <= 8) { 134e3152d02SPeter Maydell /* MASK01 says don't invert low half of P0 */ 135e3152d02SPeter Maydell inv_mask &= ~0xff; 136507b6a50SPeter Maydell } 137e3152d02SPeter Maydell if (mask23 <= 8) { 138e3152d02SPeter Maydell /* MASK23 says don't invert high half of P0 */ 139e3152d02SPeter Maydell inv_mask &= ~0xff00; 140507b6a50SPeter Maydell } 141e3152d02SPeter Maydell vpr ^= inv_mask; 142e3152d02SPeter Maydell /* Only update MASK01 if beat 1 executed */ 143e3152d02SPeter Maydell if (eci_mask & 0xf0) { 144507b6a50SPeter Maydell vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); 145e3152d02SPeter Maydell } 146e3152d02SPeter Maydell /* Beat 3 always executes, so update MASK23 */ 147507b6a50SPeter Maydell vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1); 148507b6a50SPeter Maydell env->v7m.vpr = vpr; 149507b6a50SPeter Maydell } 150507b6a50SPeter Maydell 15141704cc2SPeter Maydell /* For loads, predicated lanes are zeroed instead of keeping their old values */ 152507b6a50SPeter Maydell #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ 153507b6a50SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 154507b6a50SPeter Maydell { \ 155507b6a50SPeter Maydell TYPE *d = vd; \ 156507b6a50SPeter Maydell uint16_t mask = mve_element_mask(env); \ 15741704cc2SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 158507b6a50SPeter Maydell unsigned b, e; \ 159507b6a50SPeter Maydell /* \ 160507b6a50SPeter Maydell * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ 161507b6a50SPeter Maydell * beats so we don't care if we update part of the dest and \ 162507b6a50SPeter Maydell * then take an exception. \ 163507b6a50SPeter Maydell */ \ 164507b6a50SPeter Maydell for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 16541704cc2SPeter Maydell if (eci_mask & (1 << b)) { \ 16641704cc2SPeter Maydell d[H##ESIZE(e)] = (mask & (1 << b)) ? \ 16741704cc2SPeter Maydell cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 168507b6a50SPeter Maydell } \ 169507b6a50SPeter Maydell addr += MSIZE; \ 170507b6a50SPeter Maydell } \ 171507b6a50SPeter Maydell mve_advance_vpt(env); \ 172507b6a50SPeter Maydell } 173507b6a50SPeter Maydell 174507b6a50SPeter Maydell #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ 175507b6a50SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 176507b6a50SPeter Maydell { \ 177507b6a50SPeter Maydell TYPE *d = vd; \ 178507b6a50SPeter Maydell uint16_t mask = mve_element_mask(env); \ 179507b6a50SPeter Maydell unsigned b, e; \ 180507b6a50SPeter Maydell for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 181507b6a50SPeter Maydell if (mask & (1 << b)) { \ 182507b6a50SPeter Maydell cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 183507b6a50SPeter Maydell } \ 184507b6a50SPeter Maydell addr += MSIZE; \ 185507b6a50SPeter Maydell } \ 186507b6a50SPeter Maydell mve_advance_vpt(env); \ 187507b6a50SPeter Maydell } 188507b6a50SPeter Maydell 189507b6a50SPeter Maydell DO_VLDR(vldrb, 1, ldub, 1, uint8_t) 190507b6a50SPeter Maydell DO_VLDR(vldrh, 2, lduw, 2, uint16_t) 191507b6a50SPeter Maydell DO_VLDR(vldrw, 4, ldl, 4, uint32_t) 192507b6a50SPeter Maydell 193507b6a50SPeter Maydell DO_VSTR(vstrb, 1, stb, 1, uint8_t) 194507b6a50SPeter Maydell DO_VSTR(vstrh, 2, stw, 2, uint16_t) 195507b6a50SPeter Maydell DO_VSTR(vstrw, 4, stl, 4, uint32_t) 196507b6a50SPeter Maydell 1972fc6b751SPeter Maydell DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t) 1982fc6b751SPeter Maydell DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t) 1992fc6b751SPeter Maydell DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t) 2002fc6b751SPeter Maydell DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t) 2012fc6b751SPeter Maydell DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t) 2022fc6b751SPeter Maydell DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t) 2032fc6b751SPeter Maydell 2042fc6b751SPeter Maydell DO_VSTR(vstrb_h, 1, stb, 2, int16_t) 2052fc6b751SPeter Maydell DO_VSTR(vstrb_w, 1, stb, 4, int32_t) 2062fc6b751SPeter Maydell DO_VSTR(vstrh_w, 2, stw, 4, int32_t) 2072fc6b751SPeter Maydell 208507b6a50SPeter Maydell #undef DO_VLDR 209507b6a50SPeter Maydell #undef DO_VSTR 2100f0f2bd5SPeter Maydell 2110f0f2bd5SPeter Maydell /* 212dc18628bSPeter Maydell * Gather loads/scatter stores. Here each element of Qm specifies 213dc18628bSPeter Maydell * an offset to use from the base register Rm. In the _os_ versions 214dc18628bSPeter Maydell * that offset is scaled by the element size. 215dc18628bSPeter Maydell * For loads, predicated lanes are zeroed instead of retaining 216dc18628bSPeter Maydell * their previous values. 217dc18628bSPeter Maydell */ 218fac80f08SPeter Maydell #define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \ 219dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 220dc18628bSPeter Maydell uint32_t base) \ 221dc18628bSPeter Maydell { \ 222dc18628bSPeter Maydell TYPE *d = vd; \ 223dc18628bSPeter Maydell OFFTYPE *m = vm; \ 224dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 225dc18628bSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 226dc18628bSPeter Maydell unsigned e; \ 227dc18628bSPeter Maydell uint32_t addr; \ 228dc18628bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 229dc18628bSPeter Maydell if (!(eci_mask & 1)) { \ 230dc18628bSPeter Maydell continue; \ 231dc18628bSPeter Maydell } \ 232dc18628bSPeter Maydell addr = ADDRFN(base, m[H##ESIZE(e)]); \ 233dc18628bSPeter Maydell d[H##ESIZE(e)] = (mask & 1) ? \ 234dc18628bSPeter Maydell cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 235fac80f08SPeter Maydell if (WB) { \ 236fac80f08SPeter Maydell m[H##ESIZE(e)] = addr; \ 237fac80f08SPeter Maydell } \ 238dc18628bSPeter Maydell } \ 239dc18628bSPeter Maydell mve_advance_vpt(env); \ 240dc18628bSPeter Maydell } 241dc18628bSPeter Maydell 242dc18628bSPeter Maydell /* We know here TYPE is unsigned so always the same as the offset type */ 243fac80f08SPeter Maydell #define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \ 244dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 245dc18628bSPeter Maydell uint32_t base) \ 246dc18628bSPeter Maydell { \ 247dc18628bSPeter Maydell TYPE *d = vd; \ 248dc18628bSPeter Maydell TYPE *m = vm; \ 249dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 250fac80f08SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 251dc18628bSPeter Maydell unsigned e; \ 252dc18628bSPeter Maydell uint32_t addr; \ 253fac80f08SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 254fac80f08SPeter Maydell if (!(eci_mask & 1)) { \ 255fac80f08SPeter Maydell continue; \ 256fac80f08SPeter Maydell } \ 257dc18628bSPeter Maydell addr = ADDRFN(base, m[H##ESIZE(e)]); \ 258dc18628bSPeter Maydell if (mask & 1) { \ 259dc18628bSPeter Maydell cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 260dc18628bSPeter Maydell } \ 261fac80f08SPeter Maydell if (WB) { \ 262fac80f08SPeter Maydell m[H##ESIZE(e)] = addr; \ 263fac80f08SPeter Maydell } \ 264dc18628bSPeter Maydell } \ 265dc18628bSPeter Maydell mve_advance_vpt(env); \ 266dc18628bSPeter Maydell } 267dc18628bSPeter Maydell 268dc18628bSPeter Maydell /* 269dc18628bSPeter Maydell * 64-bit accesses are slightly different: they are done as two 32-bit 270dc18628bSPeter Maydell * accesses, controlled by the predicate mask for the relevant beat, 271dc18628bSPeter Maydell * and with a single 32-bit offset in the first of the two Qm elements. 272dc18628bSPeter Maydell * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little). 273fac80f08SPeter Maydell * Address writeback happens on the odd beats and updates the address 274fac80f08SPeter Maydell * stored in the even-beat element. 275dc18628bSPeter Maydell */ 276fac80f08SPeter Maydell #define DO_VLDR64_SG(OP, ADDRFN, WB) \ 277dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 278dc18628bSPeter Maydell uint32_t base) \ 279dc18628bSPeter Maydell { \ 280dc18628bSPeter Maydell uint32_t *d = vd; \ 281dc18628bSPeter Maydell uint32_t *m = vm; \ 282dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 283dc18628bSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 284dc18628bSPeter Maydell unsigned e; \ 285dc18628bSPeter Maydell uint32_t addr; \ 286dc18628bSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 287dc18628bSPeter Maydell if (!(eci_mask & 1)) { \ 288dc18628bSPeter Maydell continue; \ 289dc18628bSPeter Maydell } \ 290dc18628bSPeter Maydell addr = ADDRFN(base, m[H4(e & ~1)]); \ 291dc18628bSPeter Maydell addr += 4 * (e & 1); \ 292dc18628bSPeter Maydell d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \ 293fac80f08SPeter Maydell if (WB && (e & 1)) { \ 294fac80f08SPeter Maydell m[H4(e & ~1)] = addr - 4; \ 295fac80f08SPeter Maydell } \ 296dc18628bSPeter Maydell } \ 297dc18628bSPeter Maydell mve_advance_vpt(env); \ 298dc18628bSPeter Maydell } 299dc18628bSPeter Maydell 300fac80f08SPeter Maydell #define DO_VSTR64_SG(OP, ADDRFN, WB) \ 301dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 302dc18628bSPeter Maydell uint32_t base) \ 303dc18628bSPeter Maydell { \ 304dc18628bSPeter Maydell uint32_t *d = vd; \ 305dc18628bSPeter Maydell uint32_t *m = vm; \ 306dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 307fac80f08SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 308dc18628bSPeter Maydell unsigned e; \ 309dc18628bSPeter Maydell uint32_t addr; \ 310fac80f08SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 311fac80f08SPeter Maydell if (!(eci_mask & 1)) { \ 312fac80f08SPeter Maydell continue; \ 313fac80f08SPeter Maydell } \ 314dc18628bSPeter Maydell addr = ADDRFN(base, m[H4(e & ~1)]); \ 315dc18628bSPeter Maydell addr += 4 * (e & 1); \ 316dc18628bSPeter Maydell if (mask & 1) { \ 317dc18628bSPeter Maydell cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \ 318dc18628bSPeter Maydell } \ 319fac80f08SPeter Maydell if (WB && (e & 1)) { \ 320fac80f08SPeter Maydell m[H4(e & ~1)] = addr - 4; \ 321fac80f08SPeter Maydell } \ 322dc18628bSPeter Maydell } \ 323dc18628bSPeter Maydell mve_advance_vpt(env); \ 324dc18628bSPeter Maydell } 325dc18628bSPeter Maydell 326dc18628bSPeter Maydell #define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET)) 327dc18628bSPeter Maydell #define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1)) 328dc18628bSPeter Maydell #define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2)) 329dc18628bSPeter Maydell #define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3)) 330dc18628bSPeter Maydell 331fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false) 332fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false) 333fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false) 334dc18628bSPeter Maydell 335fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false) 336fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false) 337fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false) 338fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false) 339fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false) 340fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false) 341fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false) 342dc18628bSPeter Maydell 343fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false) 344fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false) 345fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false) 346fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false) 347fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false) 348dc18628bSPeter Maydell 349fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false) 350fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false) 351fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false) 352fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false) 353fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false) 354fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false) 355fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false) 356dc18628bSPeter Maydell 357fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false) 358fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false) 359fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false) 360fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false) 361fac80f08SPeter Maydell 362fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true) 363fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true) 364fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true) 365fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) 366dc18628bSPeter Maydell 367dc18628bSPeter Maydell /* 368075e7e97SPeter Maydell * Deinterleaving loads/interleaving stores. 369075e7e97SPeter Maydell * 370075e7e97SPeter Maydell * For these helpers we are passed the index of the first Qreg 371075e7e97SPeter Maydell * (VLD2/VST2 will also access Qn+1, VLD4/VST4 access Qn .. Qn+3) 372075e7e97SPeter Maydell * and the value of the base address register Rn. 373075e7e97SPeter Maydell * The helpers are specialized for pattern and element size, so 374075e7e97SPeter Maydell * for instance vld42h is VLD4 with pattern 2, element size MO_16. 375075e7e97SPeter Maydell * 376075e7e97SPeter Maydell * These insns are beatwise but not predicated, so we must honour ECI, 377075e7e97SPeter Maydell * but need not look at mve_element_mask(). 378075e7e97SPeter Maydell * 379075e7e97SPeter Maydell * The pseudocode implements these insns with multiple memory accesses 380075e7e97SPeter Maydell * of the element size, but rules R_VVVG and R_FXDM permit us to make 381075e7e97SPeter Maydell * one 32-bit memory access per beat. 382075e7e97SPeter Maydell */ 383075e7e97SPeter Maydell #define DO_VLD4B(OP, O1, O2, O3, O4) \ 384075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 385075e7e97SPeter Maydell uint32_t base) \ 386075e7e97SPeter Maydell { \ 387075e7e97SPeter Maydell int beat, e; \ 388075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 389075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 390075e7e97SPeter Maydell uint32_t addr, data; \ 391075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 392075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 393075e7e97SPeter Maydell /* ECI says skip this beat */ \ 394075e7e97SPeter Maydell continue; \ 395075e7e97SPeter Maydell } \ 396075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 397075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 398075e7e97SPeter Maydell for (e = 0; e < 4; e++, data >>= 8) { \ 399075e7e97SPeter Maydell uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ 400075e7e97SPeter Maydell qd[H1(off[beat])] = data; \ 401075e7e97SPeter Maydell } \ 402075e7e97SPeter Maydell } \ 403075e7e97SPeter Maydell } 404075e7e97SPeter Maydell 405075e7e97SPeter Maydell #define DO_VLD4H(OP, O1, O2) \ 406075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 407075e7e97SPeter Maydell uint32_t base) \ 408075e7e97SPeter Maydell { \ 409075e7e97SPeter Maydell int beat; \ 410075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 411075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O1, O2, O2 }; \ 412075e7e97SPeter Maydell uint32_t addr, data; \ 413075e7e97SPeter Maydell int y; /* y counts 0 2 0 2 */ \ 414075e7e97SPeter Maydell uint16_t *qd; \ 415075e7e97SPeter Maydell for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ 416075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 417075e7e97SPeter Maydell /* ECI says skip this beat */ \ 418075e7e97SPeter Maydell continue; \ 419075e7e97SPeter Maydell } \ 420075e7e97SPeter Maydell addr = base + off[beat] * 8 + (beat & 1) * 4; \ 421075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 422075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ 423075e7e97SPeter Maydell qd[H2(off[beat])] = data; \ 424075e7e97SPeter Maydell data >>= 16; \ 425075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ 426075e7e97SPeter Maydell qd[H2(off[beat])] = data; \ 427075e7e97SPeter Maydell } \ 428075e7e97SPeter Maydell } 429075e7e97SPeter Maydell 430075e7e97SPeter Maydell #define DO_VLD4W(OP, O1, O2, O3, O4) \ 431075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 432075e7e97SPeter Maydell uint32_t base) \ 433075e7e97SPeter Maydell { \ 434075e7e97SPeter Maydell int beat; \ 435075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 436075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 437075e7e97SPeter Maydell uint32_t addr, data; \ 438075e7e97SPeter Maydell uint32_t *qd; \ 439075e7e97SPeter Maydell int y; \ 440075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 441075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 442075e7e97SPeter Maydell /* ECI says skip this beat */ \ 443075e7e97SPeter Maydell continue; \ 444075e7e97SPeter Maydell } \ 445075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 446075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 447075e7e97SPeter Maydell y = (beat + (O1 & 2)) & 3; \ 448075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ 449075e7e97SPeter Maydell qd[H4(off[beat] >> 2)] = data; \ 450075e7e97SPeter Maydell } \ 451075e7e97SPeter Maydell } 452075e7e97SPeter Maydell 453075e7e97SPeter Maydell DO_VLD4B(vld40b, 0, 1, 10, 11) 454075e7e97SPeter Maydell DO_VLD4B(vld41b, 2, 3, 12, 13) 455075e7e97SPeter Maydell DO_VLD4B(vld42b, 4, 5, 14, 15) 456075e7e97SPeter Maydell DO_VLD4B(vld43b, 6, 7, 8, 9) 457075e7e97SPeter Maydell 458075e7e97SPeter Maydell DO_VLD4H(vld40h, 0, 5) 459075e7e97SPeter Maydell DO_VLD4H(vld41h, 1, 6) 460075e7e97SPeter Maydell DO_VLD4H(vld42h, 2, 7) 461075e7e97SPeter Maydell DO_VLD4H(vld43h, 3, 4) 462075e7e97SPeter Maydell 463075e7e97SPeter Maydell DO_VLD4W(vld40w, 0, 1, 10, 11) 464075e7e97SPeter Maydell DO_VLD4W(vld41w, 2, 3, 12, 13) 465075e7e97SPeter Maydell DO_VLD4W(vld42w, 4, 5, 14, 15) 466075e7e97SPeter Maydell DO_VLD4W(vld43w, 6, 7, 8, 9) 467075e7e97SPeter Maydell 468075e7e97SPeter Maydell #define DO_VLD2B(OP, O1, O2, O3, O4) \ 469075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 470075e7e97SPeter Maydell uint32_t base) \ 471075e7e97SPeter Maydell { \ 472075e7e97SPeter Maydell int beat, e; \ 473075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 474075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 475075e7e97SPeter Maydell uint32_t addr, data; \ 476075e7e97SPeter Maydell uint8_t *qd; \ 477075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 478075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 479075e7e97SPeter Maydell /* ECI says skip this beat */ \ 480075e7e97SPeter Maydell continue; \ 481075e7e97SPeter Maydell } \ 482075e7e97SPeter Maydell addr = base + off[beat] * 2; \ 483075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 484075e7e97SPeter Maydell for (e = 0; e < 4; e++, data >>= 8) { \ 485075e7e97SPeter Maydell qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ 486075e7e97SPeter Maydell qd[H1(off[beat] + (e >> 1))] = data; \ 487075e7e97SPeter Maydell } \ 488075e7e97SPeter Maydell } \ 489075e7e97SPeter Maydell } 490075e7e97SPeter Maydell 491075e7e97SPeter Maydell #define DO_VLD2H(OP, O1, O2, O3, O4) \ 492075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 493075e7e97SPeter Maydell uint32_t base) \ 494075e7e97SPeter Maydell { \ 495075e7e97SPeter Maydell int beat; \ 496075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 497075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 498075e7e97SPeter Maydell uint32_t addr, data; \ 499075e7e97SPeter Maydell int e; \ 500075e7e97SPeter Maydell uint16_t *qd; \ 501075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 502075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 503075e7e97SPeter Maydell /* ECI says skip this beat */ \ 504075e7e97SPeter Maydell continue; \ 505075e7e97SPeter Maydell } \ 506075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 507075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 508075e7e97SPeter Maydell for (e = 0; e < 2; e++, data >>= 16) { \ 509075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ 510075e7e97SPeter Maydell qd[H2(off[beat])] = data; \ 511075e7e97SPeter Maydell } \ 512075e7e97SPeter Maydell } \ 513075e7e97SPeter Maydell } 514075e7e97SPeter Maydell 515075e7e97SPeter Maydell #define DO_VLD2W(OP, O1, O2, O3, O4) \ 516075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 517075e7e97SPeter Maydell uint32_t base) \ 518075e7e97SPeter Maydell { \ 519075e7e97SPeter Maydell int beat; \ 520075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 521075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 522075e7e97SPeter Maydell uint32_t addr, data; \ 523075e7e97SPeter Maydell uint32_t *qd; \ 524075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 525075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 526075e7e97SPeter Maydell /* ECI says skip this beat */ \ 527075e7e97SPeter Maydell continue; \ 528075e7e97SPeter Maydell } \ 529075e7e97SPeter Maydell addr = base + off[beat]; \ 530075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 531075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ 532075e7e97SPeter Maydell qd[H4(off[beat] >> 3)] = data; \ 533075e7e97SPeter Maydell } \ 534075e7e97SPeter Maydell } 535075e7e97SPeter Maydell 536075e7e97SPeter Maydell DO_VLD2B(vld20b, 0, 2, 12, 14) 537075e7e97SPeter Maydell DO_VLD2B(vld21b, 4, 6, 8, 10) 538075e7e97SPeter Maydell 539075e7e97SPeter Maydell DO_VLD2H(vld20h, 0, 1, 6, 7) 540075e7e97SPeter Maydell DO_VLD2H(vld21h, 2, 3, 4, 5) 541075e7e97SPeter Maydell 542075e7e97SPeter Maydell DO_VLD2W(vld20w, 0, 4, 24, 28) 543075e7e97SPeter Maydell DO_VLD2W(vld21w, 8, 12, 16, 20) 544075e7e97SPeter Maydell 545075e7e97SPeter Maydell #define DO_VST4B(OP, O1, O2, O3, O4) \ 546075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 547075e7e97SPeter Maydell uint32_t base) \ 548075e7e97SPeter Maydell { \ 549075e7e97SPeter Maydell int beat, e; \ 550075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 551075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 552075e7e97SPeter Maydell uint32_t addr, data; \ 553075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 554075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 555075e7e97SPeter Maydell /* ECI says skip this beat */ \ 556075e7e97SPeter Maydell continue; \ 557075e7e97SPeter Maydell } \ 558075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 559075e7e97SPeter Maydell data = 0; \ 560075e7e97SPeter Maydell for (e = 3; e >= 0; e--) { \ 561075e7e97SPeter Maydell uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ 562075e7e97SPeter Maydell data = (data << 8) | qd[H1(off[beat])]; \ 563075e7e97SPeter Maydell } \ 564075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 565075e7e97SPeter Maydell } \ 566075e7e97SPeter Maydell } 567075e7e97SPeter Maydell 568075e7e97SPeter Maydell #define DO_VST4H(OP, O1, O2) \ 569075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 570075e7e97SPeter Maydell uint32_t base) \ 571075e7e97SPeter Maydell { \ 572075e7e97SPeter Maydell int beat; \ 573075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 574075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O1, O2, O2 }; \ 575075e7e97SPeter Maydell uint32_t addr, data; \ 576075e7e97SPeter Maydell int y; /* y counts 0 2 0 2 */ \ 577075e7e97SPeter Maydell uint16_t *qd; \ 578075e7e97SPeter Maydell for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ 579075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 580075e7e97SPeter Maydell /* ECI says skip this beat */ \ 581075e7e97SPeter Maydell continue; \ 582075e7e97SPeter Maydell } \ 583075e7e97SPeter Maydell addr = base + off[beat] * 8 + (beat & 1) * 4; \ 584075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ 585075e7e97SPeter Maydell data = qd[H2(off[beat])]; \ 586075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ 587075e7e97SPeter Maydell data |= qd[H2(off[beat])] << 16; \ 588075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 589075e7e97SPeter Maydell } \ 590075e7e97SPeter Maydell } 591075e7e97SPeter Maydell 592075e7e97SPeter Maydell #define DO_VST4W(OP, O1, O2, O3, O4) \ 593075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 594075e7e97SPeter Maydell uint32_t base) \ 595075e7e97SPeter Maydell { \ 596075e7e97SPeter Maydell int beat; \ 597075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 598075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 599075e7e97SPeter Maydell uint32_t addr, data; \ 600075e7e97SPeter Maydell uint32_t *qd; \ 601075e7e97SPeter Maydell int y; \ 602075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 603075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 604075e7e97SPeter Maydell /* ECI says skip this beat */ \ 605075e7e97SPeter Maydell continue; \ 606075e7e97SPeter Maydell } \ 607075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 608075e7e97SPeter Maydell y = (beat + (O1 & 2)) & 3; \ 609075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ 610075e7e97SPeter Maydell data = qd[H4(off[beat] >> 2)]; \ 611075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 612075e7e97SPeter Maydell } \ 613075e7e97SPeter Maydell } 614075e7e97SPeter Maydell 615075e7e97SPeter Maydell DO_VST4B(vst40b, 0, 1, 10, 11) 616075e7e97SPeter Maydell DO_VST4B(vst41b, 2, 3, 12, 13) 617075e7e97SPeter Maydell DO_VST4B(vst42b, 4, 5, 14, 15) 618075e7e97SPeter Maydell DO_VST4B(vst43b, 6, 7, 8, 9) 619075e7e97SPeter Maydell 620075e7e97SPeter Maydell DO_VST4H(vst40h, 0, 5) 621075e7e97SPeter Maydell DO_VST4H(vst41h, 1, 6) 622075e7e97SPeter Maydell DO_VST4H(vst42h, 2, 7) 623075e7e97SPeter Maydell DO_VST4H(vst43h, 3, 4) 624075e7e97SPeter Maydell 625075e7e97SPeter Maydell DO_VST4W(vst40w, 0, 1, 10, 11) 626075e7e97SPeter Maydell DO_VST4W(vst41w, 2, 3, 12, 13) 627075e7e97SPeter Maydell DO_VST4W(vst42w, 4, 5, 14, 15) 628075e7e97SPeter Maydell DO_VST4W(vst43w, 6, 7, 8, 9) 629075e7e97SPeter Maydell 630075e7e97SPeter Maydell #define DO_VST2B(OP, O1, O2, O3, O4) \ 631075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 632075e7e97SPeter Maydell uint32_t base) \ 633075e7e97SPeter Maydell { \ 634075e7e97SPeter Maydell int beat, e; \ 635075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 636075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 637075e7e97SPeter Maydell uint32_t addr, data; \ 638075e7e97SPeter Maydell uint8_t *qd; \ 639075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 640075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 641075e7e97SPeter Maydell /* ECI says skip this beat */ \ 642075e7e97SPeter Maydell continue; \ 643075e7e97SPeter Maydell } \ 644075e7e97SPeter Maydell addr = base + off[beat] * 2; \ 645075e7e97SPeter Maydell data = 0; \ 646075e7e97SPeter Maydell for (e = 3; e >= 0; e--) { \ 647075e7e97SPeter Maydell qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ 648075e7e97SPeter Maydell data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \ 649075e7e97SPeter Maydell } \ 650075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 651075e7e97SPeter Maydell } \ 652075e7e97SPeter Maydell } 653075e7e97SPeter Maydell 654075e7e97SPeter Maydell #define DO_VST2H(OP, O1, O2, O3, O4) \ 655075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 656075e7e97SPeter Maydell uint32_t base) \ 657075e7e97SPeter Maydell { \ 658075e7e97SPeter Maydell int beat; \ 659075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 660075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 661075e7e97SPeter Maydell uint32_t addr, data; \ 662075e7e97SPeter Maydell int e; \ 663075e7e97SPeter Maydell uint16_t *qd; \ 664075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 665075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 666075e7e97SPeter Maydell /* ECI says skip this beat */ \ 667075e7e97SPeter Maydell continue; \ 668075e7e97SPeter Maydell } \ 669075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 670075e7e97SPeter Maydell data = 0; \ 671075e7e97SPeter Maydell for (e = 1; e >= 0; e--) { \ 672075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ 673075e7e97SPeter Maydell data = (data << 16) | qd[H2(off[beat])]; \ 674075e7e97SPeter Maydell } \ 675075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 676075e7e97SPeter Maydell } \ 677075e7e97SPeter Maydell } 678075e7e97SPeter Maydell 679075e7e97SPeter Maydell #define DO_VST2W(OP, O1, O2, O3, O4) \ 680075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 681075e7e97SPeter Maydell uint32_t base) \ 682075e7e97SPeter Maydell { \ 683075e7e97SPeter Maydell int beat; \ 684075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 685075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 686075e7e97SPeter Maydell uint32_t addr, data; \ 687075e7e97SPeter Maydell uint32_t *qd; \ 688075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 689075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 690075e7e97SPeter Maydell /* ECI says skip this beat */ \ 691075e7e97SPeter Maydell continue; \ 692075e7e97SPeter Maydell } \ 693075e7e97SPeter Maydell addr = base + off[beat]; \ 694075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ 695075e7e97SPeter Maydell data = qd[H4(off[beat] >> 3)]; \ 696075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 697075e7e97SPeter Maydell } \ 698075e7e97SPeter Maydell } 699075e7e97SPeter Maydell 700075e7e97SPeter Maydell DO_VST2B(vst20b, 0, 2, 12, 14) 701075e7e97SPeter Maydell DO_VST2B(vst21b, 4, 6, 8, 10) 702075e7e97SPeter Maydell 703075e7e97SPeter Maydell DO_VST2H(vst20h, 0, 1, 6, 7) 704075e7e97SPeter Maydell DO_VST2H(vst21h, 2, 3, 4, 5) 705075e7e97SPeter Maydell 706075e7e97SPeter Maydell DO_VST2W(vst20w, 0, 4, 24, 28) 707075e7e97SPeter Maydell DO_VST2W(vst21w, 8, 12, 16, 20) 708075e7e97SPeter Maydell 709075e7e97SPeter Maydell /* 7100f0f2bd5SPeter Maydell * The mergemask(D, R, M) macro performs the operation "*D = R" but 7110f0f2bd5SPeter Maydell * storing only the bytes which correspond to 1 bits in M, 7120f0f2bd5SPeter Maydell * leaving other bytes in *D unchanged. We use _Generic 7130f0f2bd5SPeter Maydell * to select the correct implementation based on the type of D. 7140f0f2bd5SPeter Maydell */ 7150f0f2bd5SPeter Maydell 7160f0f2bd5SPeter Maydell static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask) 7170f0f2bd5SPeter Maydell { 7180f0f2bd5SPeter Maydell if (mask & 1) { 7190f0f2bd5SPeter Maydell *d = r; 7200f0f2bd5SPeter Maydell } 7210f0f2bd5SPeter Maydell } 7220f0f2bd5SPeter Maydell 7230f0f2bd5SPeter Maydell static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask) 7240f0f2bd5SPeter Maydell { 7250f0f2bd5SPeter Maydell mergemask_ub((uint8_t *)d, r, mask); 7260f0f2bd5SPeter Maydell } 7270f0f2bd5SPeter Maydell 7280f0f2bd5SPeter Maydell static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask) 7290f0f2bd5SPeter Maydell { 73005dd14bdSRichard Henderson uint16_t bmask = expand_pred_b(mask); 7310f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask); 7320f0f2bd5SPeter Maydell } 7330f0f2bd5SPeter Maydell 7340f0f2bd5SPeter Maydell static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask) 7350f0f2bd5SPeter Maydell { 7360f0f2bd5SPeter Maydell mergemask_uh((uint16_t *)d, r, mask); 7370f0f2bd5SPeter Maydell } 7380f0f2bd5SPeter Maydell 7390f0f2bd5SPeter Maydell static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask) 7400f0f2bd5SPeter Maydell { 74105dd14bdSRichard Henderson uint32_t bmask = expand_pred_b(mask); 7420f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask); 7430f0f2bd5SPeter Maydell } 7440f0f2bd5SPeter Maydell 7450f0f2bd5SPeter Maydell static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask) 7460f0f2bd5SPeter Maydell { 7470f0f2bd5SPeter Maydell mergemask_uw((uint32_t *)d, r, mask); 7480f0f2bd5SPeter Maydell } 7490f0f2bd5SPeter Maydell 7500f0f2bd5SPeter Maydell static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask) 7510f0f2bd5SPeter Maydell { 75205dd14bdSRichard Henderson uint64_t bmask = expand_pred_b(mask); 7530f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask); 7540f0f2bd5SPeter Maydell } 7550f0f2bd5SPeter Maydell 7560f0f2bd5SPeter Maydell static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask) 7570f0f2bd5SPeter Maydell { 7580f0f2bd5SPeter Maydell mergemask_uq((uint64_t *)d, r, mask); 7590f0f2bd5SPeter Maydell } 7600f0f2bd5SPeter Maydell 7610f0f2bd5SPeter Maydell #define mergemask(D, R, M) \ 7620f0f2bd5SPeter Maydell _Generic(D, \ 7630f0f2bd5SPeter Maydell uint8_t *: mergemask_ub, \ 7640f0f2bd5SPeter Maydell int8_t *: mergemask_sb, \ 7650f0f2bd5SPeter Maydell uint16_t *: mergemask_uh, \ 7660f0f2bd5SPeter Maydell int16_t *: mergemask_sh, \ 7670f0f2bd5SPeter Maydell uint32_t *: mergemask_uw, \ 7680f0f2bd5SPeter Maydell int32_t *: mergemask_sw, \ 7690f0f2bd5SPeter Maydell uint64_t *: mergemask_uq, \ 7700f0f2bd5SPeter Maydell int64_t *: mergemask_sq)(D, R, M) 7710f0f2bd5SPeter Maydell 772ab59362fSPeter Maydell void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val) 773ab59362fSPeter Maydell { 774ab59362fSPeter Maydell /* 775ab59362fSPeter Maydell * The generated code already replicated an 8 or 16 bit constant 776ab59362fSPeter Maydell * into the 32-bit value, so we only need to write the 32-bit 777ab59362fSPeter Maydell * value to all elements of the Qreg, allowing for predication. 778ab59362fSPeter Maydell */ 779ab59362fSPeter Maydell uint32_t *d = vd; 780ab59362fSPeter Maydell uint16_t mask = mve_element_mask(env); 781ab59362fSPeter Maydell unsigned e; 782ab59362fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 783ab59362fSPeter Maydell mergemask(&d[H4(e)], val, mask); 784ab59362fSPeter Maydell } 785ab59362fSPeter Maydell mve_advance_vpt(env); 786ab59362fSPeter Maydell } 787ab59362fSPeter Maydell 7880f0f2bd5SPeter Maydell #define DO_1OP(OP, ESIZE, TYPE, FN) \ 7890f0f2bd5SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 7900f0f2bd5SPeter Maydell { \ 7910f0f2bd5SPeter Maydell TYPE *d = vd, *m = vm; \ 7920f0f2bd5SPeter Maydell uint16_t mask = mve_element_mask(env); \ 7930f0f2bd5SPeter Maydell unsigned e; \ 7940f0f2bd5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 7950f0f2bd5SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \ 7960f0f2bd5SPeter Maydell } \ 7970f0f2bd5SPeter Maydell mve_advance_vpt(env); \ 7980f0f2bd5SPeter Maydell } 7990f0f2bd5SPeter Maydell 8006437f1f7SPeter Maydell #define DO_CLS_B(N) (clrsb32(N) - 24) 8016437f1f7SPeter Maydell #define DO_CLS_H(N) (clrsb32(N) - 16) 8026437f1f7SPeter Maydell 8036437f1f7SPeter Maydell DO_1OP(vclsb, 1, int8_t, DO_CLS_B) 8046437f1f7SPeter Maydell DO_1OP(vclsh, 2, int16_t, DO_CLS_H) 8056437f1f7SPeter Maydell DO_1OP(vclsw, 4, int32_t, clrsb32) 8066437f1f7SPeter Maydell 8070f0f2bd5SPeter Maydell #define DO_CLZ_B(N) (clz32(N) - 24) 8080f0f2bd5SPeter Maydell #define DO_CLZ_H(N) (clz32(N) - 16) 8090f0f2bd5SPeter Maydell 8100f0f2bd5SPeter Maydell DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B) 8110f0f2bd5SPeter Maydell DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H) 8120f0f2bd5SPeter Maydell DO_1OP(vclzw, 4, uint32_t, clz32) 813249b5309SPeter Maydell 814249b5309SPeter Maydell DO_1OP(vrev16b, 2, uint16_t, bswap16) 815249b5309SPeter Maydell DO_1OP(vrev32b, 4, uint32_t, bswap32) 816249b5309SPeter Maydell DO_1OP(vrev32h, 4, uint32_t, hswap32) 817249b5309SPeter Maydell DO_1OP(vrev64b, 8, uint64_t, bswap64) 818249b5309SPeter Maydell DO_1OP(vrev64h, 8, uint64_t, hswap64) 819249b5309SPeter Maydell DO_1OP(vrev64w, 8, uint64_t, wswap64) 8208abd3c80SPeter Maydell 8218abd3c80SPeter Maydell #define DO_NOT(N) (~(N)) 8228abd3c80SPeter Maydell 8238abd3c80SPeter Maydell DO_1OP(vmvn, 8, uint64_t, DO_NOT) 82459c91773SPeter Maydell 82559c91773SPeter Maydell #define DO_ABS(N) ((N) < 0 ? -(N) : (N)) 82659c91773SPeter Maydell #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff)) 82759c91773SPeter Maydell #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff)) 82859c91773SPeter Maydell 82959c91773SPeter Maydell DO_1OP(vabsb, 1, int8_t, DO_ABS) 83059c91773SPeter Maydell DO_1OP(vabsh, 2, int16_t, DO_ABS) 83159c91773SPeter Maydell DO_1OP(vabsw, 4, int32_t, DO_ABS) 83259c91773SPeter Maydell 83359c91773SPeter Maydell /* We can do these 64 bits at a time */ 83459c91773SPeter Maydell DO_1OP(vfabsh, 8, uint64_t, DO_FABSH) 83559c91773SPeter Maydell DO_1OP(vfabss, 8, uint64_t, DO_FABSS) 836399a8c76SPeter Maydell 837399a8c76SPeter Maydell #define DO_NEG(N) (-(N)) 838399a8c76SPeter Maydell #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000)) 839399a8c76SPeter Maydell #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000)) 840399a8c76SPeter Maydell 841399a8c76SPeter Maydell DO_1OP(vnegb, 1, int8_t, DO_NEG) 842399a8c76SPeter Maydell DO_1OP(vnegh, 2, int16_t, DO_NEG) 843399a8c76SPeter Maydell DO_1OP(vnegw, 4, int32_t, DO_NEG) 844399a8c76SPeter Maydell 845399a8c76SPeter Maydell /* We can do these 64 bits at a time */ 846399a8c76SPeter Maydell DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH) 847399a8c76SPeter Maydell DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS) 84868245e44SPeter Maydell 849eab84139SPeter Maydell /* 850eab84139SPeter Maydell * 1 operand immediates: Vda is destination and possibly also one source. 851eab84139SPeter Maydell * All these insns work at 64-bit widths. 852eab84139SPeter Maydell */ 853eab84139SPeter Maydell #define DO_1OP_IMM(OP, FN) \ 854eab84139SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \ 855eab84139SPeter Maydell { \ 856eab84139SPeter Maydell uint64_t *da = vda; \ 857eab84139SPeter Maydell uint16_t mask = mve_element_mask(env); \ 858eab84139SPeter Maydell unsigned e; \ 859eab84139SPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 860eab84139SPeter Maydell mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \ 861eab84139SPeter Maydell } \ 862eab84139SPeter Maydell mve_advance_vpt(env); \ 863eab84139SPeter Maydell } 864eab84139SPeter Maydell 865eab84139SPeter Maydell #define DO_MOVI(N, I) (I) 866eab84139SPeter Maydell #define DO_ANDI(N, I) ((N) & (I)) 867eab84139SPeter Maydell #define DO_ORRI(N, I) ((N) | (I)) 868eab84139SPeter Maydell 869eab84139SPeter Maydell DO_1OP_IMM(vmovi, DO_MOVI) 870eab84139SPeter Maydell DO_1OP_IMM(vandi, DO_ANDI) 871eab84139SPeter Maydell DO_1OP_IMM(vorri, DO_ORRI) 872eab84139SPeter Maydell 87368245e44SPeter Maydell #define DO_2OP(OP, ESIZE, TYPE, FN) \ 87468245e44SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 87568245e44SPeter Maydell void *vd, void *vn, void *vm) \ 87668245e44SPeter Maydell { \ 87768245e44SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 87868245e44SPeter Maydell uint16_t mask = mve_element_mask(env); \ 87968245e44SPeter Maydell unsigned e; \ 88068245e44SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 88168245e44SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 88268245e44SPeter Maydell FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \ 88368245e44SPeter Maydell } \ 88468245e44SPeter Maydell mve_advance_vpt(env); \ 88568245e44SPeter Maydell } 88668245e44SPeter Maydell 8879333fe4dSPeter Maydell /* provide unsigned 2-op helpers for all sizes */ 8889333fe4dSPeter Maydell #define DO_2OP_U(OP, FN) \ 8899333fe4dSPeter Maydell DO_2OP(OP##b, 1, uint8_t, FN) \ 8909333fe4dSPeter Maydell DO_2OP(OP##h, 2, uint16_t, FN) \ 8919333fe4dSPeter Maydell DO_2OP(OP##w, 4, uint32_t, FN) 8929333fe4dSPeter Maydell 893cd367ff3SPeter Maydell /* provide signed 2-op helpers for all sizes */ 894cd367ff3SPeter Maydell #define DO_2OP_S(OP, FN) \ 895cd367ff3SPeter Maydell DO_2OP(OP##b, 1, int8_t, FN) \ 896cd367ff3SPeter Maydell DO_2OP(OP##h, 2, int16_t, FN) \ 897cd367ff3SPeter Maydell DO_2OP(OP##w, 4, int32_t, FN) 898cd367ff3SPeter Maydell 899ac6ad1dcSPeter Maydell /* 900ac6ad1dcSPeter Maydell * "Long" operations where two half-sized inputs (taken from either the 901ac6ad1dcSPeter Maydell * top or the bottom of the input vector) produce a double-width result. 902ac6ad1dcSPeter Maydell * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output. 903ac6ad1dcSPeter Maydell */ 904ac6ad1dcSPeter Maydell #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 905ac6ad1dcSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 906ac6ad1dcSPeter Maydell { \ 907ac6ad1dcSPeter Maydell LTYPE *d = vd; \ 908ac6ad1dcSPeter Maydell TYPE *n = vn, *m = vm; \ 909ac6ad1dcSPeter Maydell uint16_t mask = mve_element_mask(env); \ 910ac6ad1dcSPeter Maydell unsigned le; \ 911ac6ad1dcSPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 912ac6ad1dcSPeter Maydell LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \ 913ac6ad1dcSPeter Maydell m[H##ESIZE(le * 2 + TOP)]); \ 914ac6ad1dcSPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \ 915ac6ad1dcSPeter Maydell } \ 916ac6ad1dcSPeter Maydell mve_advance_vpt(env); \ 917ac6ad1dcSPeter Maydell } 918ac6ad1dcSPeter Maydell 919380caf6cSPeter Maydell #define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \ 920380caf6cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 921380caf6cSPeter Maydell { \ 922380caf6cSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 923380caf6cSPeter Maydell uint16_t mask = mve_element_mask(env); \ 924380caf6cSPeter Maydell unsigned e; \ 925380caf6cSPeter Maydell bool qc = false; \ 926380caf6cSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 927380caf6cSPeter Maydell bool sat = false; \ 928d54deb2aSPhilippe Mathieu-Daudé TYPE r_ = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \ 929d54deb2aSPhilippe Mathieu-Daudé mergemask(&d[H##ESIZE(e)], r_, mask); \ 930380caf6cSPeter Maydell qc |= sat & mask & 1; \ 931380caf6cSPeter Maydell } \ 932380caf6cSPeter Maydell if (qc) { \ 933380caf6cSPeter Maydell env->vfp.qc[0] = qc; \ 934380caf6cSPeter Maydell } \ 935380caf6cSPeter Maydell mve_advance_vpt(env); \ 936380caf6cSPeter Maydell } 937380caf6cSPeter Maydell 938483da661SPeter Maydell /* provide unsigned 2-op helpers for all sizes */ 939483da661SPeter Maydell #define DO_2OP_SAT_U(OP, FN) \ 940483da661SPeter Maydell DO_2OP_SAT(OP##b, 1, uint8_t, FN) \ 941483da661SPeter Maydell DO_2OP_SAT(OP##h, 2, uint16_t, FN) \ 942483da661SPeter Maydell DO_2OP_SAT(OP##w, 4, uint32_t, FN) 943483da661SPeter Maydell 944483da661SPeter Maydell /* provide signed 2-op helpers for all sizes */ 945483da661SPeter Maydell #define DO_2OP_SAT_S(OP, FN) \ 946483da661SPeter Maydell DO_2OP_SAT(OP##b, 1, int8_t, FN) \ 947483da661SPeter Maydell DO_2OP_SAT(OP##h, 2, int16_t, FN) \ 948483da661SPeter Maydell DO_2OP_SAT(OP##w, 4, int32_t, FN) 949483da661SPeter Maydell 95068245e44SPeter Maydell #define DO_AND(N, M) ((N) & (M)) 95168245e44SPeter Maydell #define DO_BIC(N, M) ((N) & ~(M)) 95268245e44SPeter Maydell #define DO_ORR(N, M) ((N) | (M)) 95368245e44SPeter Maydell #define DO_ORN(N, M) ((N) | ~(M)) 95468245e44SPeter Maydell #define DO_EOR(N, M) ((N) ^ (M)) 95568245e44SPeter Maydell 95668245e44SPeter Maydell DO_2OP(vand, 8, uint64_t, DO_AND) 95768245e44SPeter Maydell DO_2OP(vbic, 8, uint64_t, DO_BIC) 95868245e44SPeter Maydell DO_2OP(vorr, 8, uint64_t, DO_ORR) 95968245e44SPeter Maydell DO_2OP(vorn, 8, uint64_t, DO_ORN) 96068245e44SPeter Maydell DO_2OP(veor, 8, uint64_t, DO_EOR) 9619333fe4dSPeter Maydell 9629333fe4dSPeter Maydell #define DO_ADD(N, M) ((N) + (M)) 9639333fe4dSPeter Maydell #define DO_SUB(N, M) ((N) - (M)) 9649333fe4dSPeter Maydell #define DO_MUL(N, M) ((N) * (M)) 9659333fe4dSPeter Maydell 9669333fe4dSPeter Maydell DO_2OP_U(vadd, DO_ADD) 9679333fe4dSPeter Maydell DO_2OP_U(vsub, DO_SUB) 9689333fe4dSPeter Maydell DO_2OP_U(vmul, DO_MUL) 969ba62cc56SPeter Maydell 970ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL) 971ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL) 972ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL) 973ac6ad1dcSPeter Maydell DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL) 974ac6ad1dcSPeter Maydell DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL) 975ac6ad1dcSPeter Maydell DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL) 976ac6ad1dcSPeter Maydell 977ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL) 978ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL) 979ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL) 980ac6ad1dcSPeter Maydell DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL) 981ac6ad1dcSPeter Maydell DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL) 982ac6ad1dcSPeter Maydell DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL) 983ac6ad1dcSPeter Maydell 984ba62cc56SPeter Maydell /* 985c1bd78cbSPeter Maydell * Polynomial multiply. We can always do this generating 64 bits 986c1bd78cbSPeter Maydell * of the result at a time, so we don't need to use DO_2OP_L. 987c1bd78cbSPeter Maydell */ 9888e3da4c7SRichard Henderson DO_2OP(vmullpbh, 8, uint64_t, clmul_8x4_even) 9898e3da4c7SRichard Henderson DO_2OP(vmullpth, 8, uint64_t, clmul_8x4_odd) 990c6f0dcb1SRichard Henderson DO_2OP(vmullpbw, 8, uint64_t, clmul_16x2_even) 991c6f0dcb1SRichard Henderson DO_2OP(vmullptw, 8, uint64_t, clmul_16x2_odd) 992c1bd78cbSPeter Maydell 993c1bd78cbSPeter Maydell /* 994ba62cc56SPeter Maydell * Because the computation type is at least twice as large as required, 995ba62cc56SPeter Maydell * these work for both signed and unsigned source types. 996ba62cc56SPeter Maydell */ 997ba62cc56SPeter Maydell static inline uint8_t do_mulh_b(int32_t n, int32_t m) 998ba62cc56SPeter Maydell { 999ba62cc56SPeter Maydell return (n * m) >> 8; 1000ba62cc56SPeter Maydell } 1001ba62cc56SPeter Maydell 1002ba62cc56SPeter Maydell static inline uint16_t do_mulh_h(int32_t n, int32_t m) 1003ba62cc56SPeter Maydell { 1004ba62cc56SPeter Maydell return (n * m) >> 16; 1005ba62cc56SPeter Maydell } 1006ba62cc56SPeter Maydell 1007ba62cc56SPeter Maydell static inline uint32_t do_mulh_w(int64_t n, int64_t m) 1008ba62cc56SPeter Maydell { 1009ba62cc56SPeter Maydell return (n * m) >> 32; 1010ba62cc56SPeter Maydell } 1011ba62cc56SPeter Maydell 1012fca87b78SPeter Maydell static inline uint8_t do_rmulh_b(int32_t n, int32_t m) 1013fca87b78SPeter Maydell { 1014fca87b78SPeter Maydell return (n * m + (1U << 7)) >> 8; 1015fca87b78SPeter Maydell } 1016fca87b78SPeter Maydell 1017fca87b78SPeter Maydell static inline uint16_t do_rmulh_h(int32_t n, int32_t m) 1018fca87b78SPeter Maydell { 1019fca87b78SPeter Maydell return (n * m + (1U << 15)) >> 16; 1020fca87b78SPeter Maydell } 1021fca87b78SPeter Maydell 1022fca87b78SPeter Maydell static inline uint32_t do_rmulh_w(int64_t n, int64_t m) 1023fca87b78SPeter Maydell { 1024fca87b78SPeter Maydell return (n * m + (1U << 31)) >> 32; 1025fca87b78SPeter Maydell } 1026fca87b78SPeter Maydell 1027ba62cc56SPeter Maydell DO_2OP(vmulhsb, 1, int8_t, do_mulh_b) 1028ba62cc56SPeter Maydell DO_2OP(vmulhsh, 2, int16_t, do_mulh_h) 1029ba62cc56SPeter Maydell DO_2OP(vmulhsw, 4, int32_t, do_mulh_w) 1030ba62cc56SPeter Maydell DO_2OP(vmulhub, 1, uint8_t, do_mulh_b) 1031ba62cc56SPeter Maydell DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h) 1032ba62cc56SPeter Maydell DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w) 1033fca87b78SPeter Maydell 1034fca87b78SPeter Maydell DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b) 1035fca87b78SPeter Maydell DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h) 1036fca87b78SPeter Maydell DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w) 1037fca87b78SPeter Maydell DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b) 1038fca87b78SPeter Maydell DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h) 1039fca87b78SPeter Maydell DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w) 1040cd367ff3SPeter Maydell 1041cd367ff3SPeter Maydell #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) 1042cd367ff3SPeter Maydell #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) 1043cd367ff3SPeter Maydell 1044cd367ff3SPeter Maydell DO_2OP_S(vmaxs, DO_MAX) 1045cd367ff3SPeter Maydell DO_2OP_U(vmaxu, DO_MAX) 1046cd367ff3SPeter Maydell DO_2OP_S(vmins, DO_MIN) 1047cd367ff3SPeter Maydell DO_2OP_U(vminu, DO_MIN) 1048bc67aa8dSPeter Maydell 1049bc67aa8dSPeter Maydell #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N)) 1050bc67aa8dSPeter Maydell 1051bc67aa8dSPeter Maydell DO_2OP_S(vabds, DO_ABD) 1052bc67aa8dSPeter Maydell DO_2OP_U(vabdu, DO_ABD) 1053abc48e31SPeter Maydell 1054abc48e31SPeter Maydell static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m) 1055abc48e31SPeter Maydell { 1056abc48e31SPeter Maydell return ((uint64_t)n + m) >> 1; 1057abc48e31SPeter Maydell } 1058abc48e31SPeter Maydell 1059abc48e31SPeter Maydell static inline int32_t do_vhadd_s(int32_t n, int32_t m) 1060abc48e31SPeter Maydell { 1061abc48e31SPeter Maydell return ((int64_t)n + m) >> 1; 1062abc48e31SPeter Maydell } 1063abc48e31SPeter Maydell 1064abc48e31SPeter Maydell static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m) 1065abc48e31SPeter Maydell { 1066abc48e31SPeter Maydell return ((uint64_t)n - m) >> 1; 1067abc48e31SPeter Maydell } 1068abc48e31SPeter Maydell 1069abc48e31SPeter Maydell static inline int32_t do_vhsub_s(int32_t n, int32_t m) 1070abc48e31SPeter Maydell { 1071abc48e31SPeter Maydell return ((int64_t)n - m) >> 1; 1072abc48e31SPeter Maydell } 1073abc48e31SPeter Maydell 1074abc48e31SPeter Maydell DO_2OP_S(vhadds, do_vhadd_s) 1075abc48e31SPeter Maydell DO_2OP_U(vhaddu, do_vhadd_u) 1076abc48e31SPeter Maydell DO_2OP_S(vhsubs, do_vhsub_s) 1077abc48e31SPeter Maydell DO_2OP_U(vhsubu, do_vhsub_u) 10781d2386f7SPeter Maydell 10790372cad8SPeter Maydell #define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 10800372cad8SPeter Maydell #define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 1081bb002345SPeter Maydell #define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 1082bb002345SPeter Maydell #define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 10830372cad8SPeter Maydell 10840372cad8SPeter Maydell DO_2OP_S(vshls, DO_VSHLS) 10850372cad8SPeter Maydell DO_2OP_U(vshlu, DO_VSHLU) 1086bb002345SPeter Maydell DO_2OP_S(vrshls, DO_VRSHLS) 1087bb002345SPeter Maydell DO_2OP_U(vrshlu, DO_VRSHLU) 10880372cad8SPeter Maydell 10891eb987a8SPeter Maydell #define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1) 10901eb987a8SPeter Maydell #define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1) 10911eb987a8SPeter Maydell 10921eb987a8SPeter Maydell DO_2OP_S(vrhadds, DO_RHADD_S) 10931eb987a8SPeter Maydell DO_2OP_U(vrhaddu, DO_RHADD_U) 10941eb987a8SPeter Maydell 109589bc4c4fSPeter Maydell static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m, 109689bc4c4fSPeter Maydell uint32_t inv, uint32_t carry_in, bool update_flags) 109789bc4c4fSPeter Maydell { 109889bc4c4fSPeter Maydell uint16_t mask = mve_element_mask(env); 109989bc4c4fSPeter Maydell unsigned e; 110089bc4c4fSPeter Maydell 110189bc4c4fSPeter Maydell /* If any additions trigger, we will update flags. */ 110289bc4c4fSPeter Maydell if (mask & 0x1111) { 110389bc4c4fSPeter Maydell update_flags = true; 110489bc4c4fSPeter Maydell } 110589bc4c4fSPeter Maydell 110689bc4c4fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 110789bc4c4fSPeter Maydell uint64_t r = carry_in; 110889bc4c4fSPeter Maydell r += n[H4(e)]; 110989bc4c4fSPeter Maydell r += m[H4(e)] ^ inv; 111089bc4c4fSPeter Maydell if (mask & 1) { 111189bc4c4fSPeter Maydell carry_in = r >> 32; 111289bc4c4fSPeter Maydell } 111389bc4c4fSPeter Maydell mergemask(&d[H4(e)], r, mask); 111489bc4c4fSPeter Maydell } 111589bc4c4fSPeter Maydell 111689bc4c4fSPeter Maydell if (update_flags) { 111789bc4c4fSPeter Maydell /* Store C, clear NZV. */ 1118a26db547SPeter Maydell env->vfp.fpsr &= ~FPSR_NZCV_MASK; 1119a26db547SPeter Maydell env->vfp.fpsr |= carry_in * FPSR_C; 112089bc4c4fSPeter Maydell } 112189bc4c4fSPeter Maydell mve_advance_vpt(env); 112289bc4c4fSPeter Maydell } 112389bc4c4fSPeter Maydell 112489bc4c4fSPeter Maydell void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm) 112589bc4c4fSPeter Maydell { 1126a26db547SPeter Maydell bool carry_in = env->vfp.fpsr & FPSR_C; 112789bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, 0, carry_in, false); 112889bc4c4fSPeter Maydell } 112989bc4c4fSPeter Maydell 113089bc4c4fSPeter Maydell void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm) 113189bc4c4fSPeter Maydell { 1132a26db547SPeter Maydell bool carry_in = env->vfp.fpsr & FPSR_C; 113389bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, -1, carry_in, false); 113489bc4c4fSPeter Maydell } 113589bc4c4fSPeter Maydell 113689bc4c4fSPeter Maydell 113789bc4c4fSPeter Maydell void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm) 113889bc4c4fSPeter Maydell { 113989bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, 0, 0, true); 114089bc4c4fSPeter Maydell } 114189bc4c4fSPeter Maydell 114289bc4c4fSPeter Maydell void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm) 114389bc4c4fSPeter Maydell { 114489bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, -1, 1, true); 114589bc4c4fSPeter Maydell } 114689bc4c4fSPeter Maydell 114767ec113bSPeter Maydell #define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \ 114867ec113bSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 114967ec113bSPeter Maydell { \ 115067ec113bSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 115167ec113bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 115267ec113bSPeter Maydell unsigned e; \ 115367ec113bSPeter Maydell TYPE r[16 / ESIZE]; \ 115467ec113bSPeter Maydell /* Calculate all results first to avoid overwriting inputs */ \ 115567ec113bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \ 115667ec113bSPeter Maydell if (!(e & 1)) { \ 115767ec113bSPeter Maydell r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \ 115867ec113bSPeter Maydell } else { \ 115967ec113bSPeter Maydell r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \ 116067ec113bSPeter Maydell } \ 116167ec113bSPeter Maydell } \ 116267ec113bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 116367ec113bSPeter Maydell mergemask(&d[H##ESIZE(e)], r[e], mask); \ 116467ec113bSPeter Maydell } \ 116567ec113bSPeter Maydell mve_advance_vpt(env); \ 116667ec113bSPeter Maydell } 116767ec113bSPeter Maydell 116867ec113bSPeter Maydell #define DO_VCADD_ALL(OP, FN0, FN1) \ 116967ec113bSPeter Maydell DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \ 117067ec113bSPeter Maydell DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \ 117167ec113bSPeter Maydell DO_VCADD(OP##w, 4, int32_t, FN0, FN1) 117267ec113bSPeter Maydell 117367ec113bSPeter Maydell DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD) 117467ec113bSPeter Maydell DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB) 11758625693aSPeter Maydell DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s) 11768625693aSPeter Maydell DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s) 117767ec113bSPeter Maydell 117839f2ec85SPeter Maydell static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s) 117939f2ec85SPeter Maydell { 118039f2ec85SPeter Maydell if (val > max) { 118139f2ec85SPeter Maydell *s = true; 118239f2ec85SPeter Maydell return max; 118339f2ec85SPeter Maydell } else if (val < min) { 118439f2ec85SPeter Maydell *s = true; 118539f2ec85SPeter Maydell return min; 118639f2ec85SPeter Maydell } 118739f2ec85SPeter Maydell return val; 118839f2ec85SPeter Maydell } 118939f2ec85SPeter Maydell 119039f2ec85SPeter Maydell #define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s) 119139f2ec85SPeter Maydell #define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s) 119239f2ec85SPeter Maydell #define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s) 119339f2ec85SPeter Maydell 119439f2ec85SPeter Maydell #define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s) 119539f2ec85SPeter Maydell #define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s) 119639f2ec85SPeter Maydell #define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s) 119739f2ec85SPeter Maydell 119839f2ec85SPeter Maydell #define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s) 119939f2ec85SPeter Maydell #define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s) 120039f2ec85SPeter Maydell #define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s) 120139f2ec85SPeter Maydell 120239f2ec85SPeter Maydell #define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s) 120339f2ec85SPeter Maydell #define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s) 120439f2ec85SPeter Maydell #define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s) 12051d2386f7SPeter Maydell 120666c05767SPeter Maydell /* 120766c05767SPeter Maydell * For QDMULH and QRDMULH we simplify "double and shift by esize" into 120866c05767SPeter Maydell * "shift by esize-1", adjusting the QRDMULH rounding constant to match. 120966c05767SPeter Maydell */ 121066c05767SPeter Maydell #define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \ 121166c05767SPeter Maydell INT8_MIN, INT8_MAX, s) 121266c05767SPeter Maydell #define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \ 121366c05767SPeter Maydell INT16_MIN, INT16_MAX, s) 121466c05767SPeter Maydell #define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \ 121566c05767SPeter Maydell INT32_MIN, INT32_MAX, s) 121666c05767SPeter Maydell 121766c05767SPeter Maydell #define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \ 121866c05767SPeter Maydell INT8_MIN, INT8_MAX, s) 121966c05767SPeter Maydell #define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \ 122066c05767SPeter Maydell INT16_MIN, INT16_MAX, s) 122166c05767SPeter Maydell #define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \ 122266c05767SPeter Maydell INT32_MIN, INT32_MAX, s) 122366c05767SPeter Maydell 1224380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B) 1225380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H) 1226380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W) 1227380caf6cSPeter Maydell 1228380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B) 1229380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H) 1230380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W) 1231380caf6cSPeter Maydell 1232f741707bSPeter Maydell DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B) 1233f741707bSPeter Maydell DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H) 1234f741707bSPeter Maydell DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W) 1235f741707bSPeter Maydell DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B) 1236f741707bSPeter Maydell DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H) 1237f741707bSPeter Maydell DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W) 1238f741707bSPeter Maydell 1239f741707bSPeter Maydell DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B) 1240f741707bSPeter Maydell DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H) 1241f741707bSPeter Maydell DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W) 1242f741707bSPeter Maydell DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B) 1243f741707bSPeter Maydell DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H) 1244f741707bSPeter Maydell DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W) 1245f741707bSPeter Maydell 1246483da661SPeter Maydell /* 1247483da661SPeter Maydell * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs() 1248483da661SPeter Maydell * and friends wanting a uint32_t* sat and our needing a bool*. 1249483da661SPeter Maydell */ 1250483da661SPeter Maydell #define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \ 1251483da661SPeter Maydell ({ \ 1252483da661SPeter Maydell uint32_t su32 = 0; \ 1253d54deb2aSPhilippe Mathieu-Daudé typeof(N) qrshl_ret = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \ 1254483da661SPeter Maydell if (su32) { \ 1255483da661SPeter Maydell *satp = true; \ 1256483da661SPeter Maydell } \ 1257d54deb2aSPhilippe Mathieu-Daudé qrshl_ret; \ 1258483da661SPeter Maydell }) 1259483da661SPeter Maydell 1260483da661SPeter Maydell #define DO_SQSHL_OP(N, M, satp) \ 1261483da661SPeter Maydell WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp) 1262483da661SPeter Maydell #define DO_UQSHL_OP(N, M, satp) \ 1263483da661SPeter Maydell WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp) 12649dc868c4SPeter Maydell #define DO_SQRSHL_OP(N, M, satp) \ 12659dc868c4SPeter Maydell WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp) 12669dc868c4SPeter Maydell #define DO_UQRSHL_OP(N, M, satp) \ 12679dc868c4SPeter Maydell WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp) 1268f9ed6174SPeter Maydell #define DO_SUQSHL_OP(N, M, satp) \ 1269f9ed6174SPeter Maydell WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp) 1270483da661SPeter Maydell 1271483da661SPeter Maydell DO_2OP_SAT_S(vqshls, DO_SQSHL_OP) 1272483da661SPeter Maydell DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP) 12739dc868c4SPeter Maydell DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP) 12749dc868c4SPeter Maydell DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP) 1275483da661SPeter Maydell 1276fd677f80SPeter Maydell /* 1277fd677f80SPeter Maydell * Multiply add dual returning high half 1278fd677f80SPeter Maydell * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of 1279fd677f80SPeter Maydell * whether to add the rounding constant, and the pointer to the 1280fd677f80SPeter Maydell * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant", 1281fd677f80SPeter Maydell * saturate to twice the input size and return the high half; or 1282fd677f80SPeter Maydell * (A * B - C * D) etc for VQDMLSDH. 1283fd677f80SPeter Maydell */ 1284fd677f80SPeter Maydell #define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \ 1285fd677f80SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1286fd677f80SPeter Maydell void *vm) \ 1287fd677f80SPeter Maydell { \ 1288fd677f80SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 1289fd677f80SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1290fd677f80SPeter Maydell unsigned e; \ 1291fd677f80SPeter Maydell bool qc = false; \ 1292fd677f80SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1293fd677f80SPeter Maydell bool sat = false; \ 1294fd677f80SPeter Maydell if ((e & 1) == XCHG) { \ 1295d54deb2aSPhilippe Mathieu-Daudé TYPE vqdmladh_ret = FN(n[H##ESIZE(e)], \ 1296fd677f80SPeter Maydell m[H##ESIZE(e - XCHG)], \ 1297fd677f80SPeter Maydell n[H##ESIZE(e + (1 - 2 * XCHG))], \ 1298fd677f80SPeter Maydell m[H##ESIZE(e + (1 - XCHG))], \ 1299fd677f80SPeter Maydell ROUND, &sat); \ 1300d54deb2aSPhilippe Mathieu-Daudé mergemask(&d[H##ESIZE(e)], vqdmladh_ret, mask); \ 1301fd677f80SPeter Maydell qc |= sat & mask & 1; \ 1302fd677f80SPeter Maydell } \ 1303fd677f80SPeter Maydell } \ 1304fd677f80SPeter Maydell if (qc) { \ 1305fd677f80SPeter Maydell env->vfp.qc[0] = qc; \ 1306fd677f80SPeter Maydell } \ 1307fd677f80SPeter Maydell mve_advance_vpt(env); \ 1308fd677f80SPeter Maydell } 1309fd677f80SPeter Maydell 1310fd677f80SPeter Maydell static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d, 1311fd677f80SPeter Maydell int round, bool *sat) 1312fd677f80SPeter Maydell { 1313fd677f80SPeter Maydell int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7); 1314fd677f80SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 1315fd677f80SPeter Maydell } 1316fd677f80SPeter Maydell 1317fd677f80SPeter Maydell static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d, 1318fd677f80SPeter Maydell int round, bool *sat) 1319fd677f80SPeter Maydell { 1320fd677f80SPeter Maydell int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15); 1321fd677f80SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 1322fd677f80SPeter Maydell } 1323fd677f80SPeter Maydell 1324fd677f80SPeter Maydell static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d, 1325fd677f80SPeter Maydell int round, bool *sat) 1326fd677f80SPeter Maydell { 1327fd677f80SPeter Maydell int64_t m1 = (int64_t)a * b; 1328fd677f80SPeter Maydell int64_t m2 = (int64_t)c * d; 1329fd677f80SPeter Maydell int64_t r; 1330fd677f80SPeter Maydell /* 1331fd677f80SPeter Maydell * Architecturally we should do the entire add, double, round 1332fd677f80SPeter Maydell * and then check for saturation. We do three saturating adds, 1333fd677f80SPeter Maydell * but we need to be careful about the order. If the first 1334fd677f80SPeter Maydell * m1 + m2 saturates then it's impossible for the *2+rc to 1335fd677f80SPeter Maydell * bring it back into the non-saturated range. However, if 1336fd677f80SPeter Maydell * m1 + m2 is negative then it's possible that doing the doubling 1337fd677f80SPeter Maydell * would take the intermediate result below INT64_MAX and the 1338fd677f80SPeter Maydell * addition of the rounding constant then brings it back in range. 1339fd677f80SPeter Maydell * So we add half the rounding constant before doubling rather 1340fd677f80SPeter Maydell * than adding the rounding constant after the doubling. 1341fd677f80SPeter Maydell */ 1342fd677f80SPeter Maydell if (sadd64_overflow(m1, m2, &r) || 1343fd677f80SPeter Maydell sadd64_overflow(r, (round << 30), &r) || 1344fd677f80SPeter Maydell sadd64_overflow(r, r, &r)) { 1345fd677f80SPeter Maydell *sat = true; 1346fd677f80SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN; 1347fd677f80SPeter Maydell } 1348fd677f80SPeter Maydell return r >> 32; 1349fd677f80SPeter Maydell } 1350fd677f80SPeter Maydell 135192f11732SPeter Maydell static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d, 135292f11732SPeter Maydell int round, bool *sat) 135392f11732SPeter Maydell { 135492f11732SPeter Maydell int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7); 135592f11732SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 135692f11732SPeter Maydell } 135792f11732SPeter Maydell 135892f11732SPeter Maydell static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d, 135992f11732SPeter Maydell int round, bool *sat) 136092f11732SPeter Maydell { 136192f11732SPeter Maydell int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15); 136292f11732SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 136392f11732SPeter Maydell } 136492f11732SPeter Maydell 136592f11732SPeter Maydell static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d, 136692f11732SPeter Maydell int round, bool *sat) 136792f11732SPeter Maydell { 136892f11732SPeter Maydell int64_t m1 = (int64_t)a * b; 136992f11732SPeter Maydell int64_t m2 = (int64_t)c * d; 137092f11732SPeter Maydell int64_t r; 137192f11732SPeter Maydell /* The same ordering issue as in do_vqdmladh_w applies here too */ 137292f11732SPeter Maydell if (ssub64_overflow(m1, m2, &r) || 137392f11732SPeter Maydell sadd64_overflow(r, (round << 30), &r) || 137492f11732SPeter Maydell sadd64_overflow(r, r, &r)) { 137592f11732SPeter Maydell *sat = true; 137692f11732SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN; 137792f11732SPeter Maydell } 137892f11732SPeter Maydell return r >> 32; 137992f11732SPeter Maydell } 138092f11732SPeter Maydell 1381fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b) 1382fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h) 1383fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w) 1384fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b) 1385fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h) 1386fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w) 1387fd677f80SPeter Maydell 1388fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b) 1389fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h) 1390fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w) 1391fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b) 1392fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h) 1393fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w) 1394fd677f80SPeter Maydell 139592f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b) 139692f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h) 139792f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w) 139892f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b) 139992f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h) 140092f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w) 140192f11732SPeter Maydell 140292f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b) 140392f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h) 140492f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w) 140592f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b) 140692f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h) 140792f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w) 140892f11732SPeter Maydell 1409e51896b3SPeter Maydell #define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \ 1410e51896b3SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1411e51896b3SPeter Maydell uint32_t rm) \ 1412e51896b3SPeter Maydell { \ 1413e51896b3SPeter Maydell TYPE *d = vd, *n = vn; \ 1414e51896b3SPeter Maydell TYPE m = rm; \ 1415e51896b3SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1416e51896b3SPeter Maydell unsigned e; \ 1417e51896b3SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1418e51896b3SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \ 1419e51896b3SPeter Maydell } \ 1420e51896b3SPeter Maydell mve_advance_vpt(env); \ 1421e51896b3SPeter Maydell } 1422e51896b3SPeter Maydell 142339f2ec85SPeter Maydell #define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \ 142439f2ec85SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 142539f2ec85SPeter Maydell uint32_t rm) \ 142639f2ec85SPeter Maydell { \ 142739f2ec85SPeter Maydell TYPE *d = vd, *n = vn; \ 142839f2ec85SPeter Maydell TYPE m = rm; \ 142939f2ec85SPeter Maydell uint16_t mask = mve_element_mask(env); \ 143039f2ec85SPeter Maydell unsigned e; \ 143139f2ec85SPeter Maydell bool qc = false; \ 143239f2ec85SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 143339f2ec85SPeter Maydell bool sat = false; \ 143439f2ec85SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \ 143539f2ec85SPeter Maydell mask); \ 143639f2ec85SPeter Maydell qc |= sat & mask & 1; \ 143739f2ec85SPeter Maydell } \ 143839f2ec85SPeter Maydell if (qc) { \ 143939f2ec85SPeter Maydell env->vfp.qc[0] = qc; \ 144039f2ec85SPeter Maydell } \ 144139f2ec85SPeter Maydell mve_advance_vpt(env); \ 144239f2ec85SPeter Maydell } 144339f2ec85SPeter Maydell 14446b895bf8SPeter Maydell /* "accumulating" version where FN takes d as well as n and m */ 14456b895bf8SPeter Maydell #define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 14466b895bf8SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 14476b895bf8SPeter Maydell uint32_t rm) \ 14486b895bf8SPeter Maydell { \ 14496b895bf8SPeter Maydell TYPE *d = vd, *n = vn; \ 14506b895bf8SPeter Maydell TYPE m = rm; \ 14516b895bf8SPeter Maydell uint16_t mask = mve_element_mask(env); \ 14526b895bf8SPeter Maydell unsigned e; \ 14536b895bf8SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 14546b895bf8SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 14556b895bf8SPeter Maydell FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \ 14566b895bf8SPeter Maydell } \ 14576b895bf8SPeter Maydell mve_advance_vpt(env); \ 14586b895bf8SPeter Maydell } 14596b895bf8SPeter Maydell 14608be9a250SPeter Maydell #define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 14618be9a250SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 14628be9a250SPeter Maydell uint32_t rm) \ 14638be9a250SPeter Maydell { \ 14648be9a250SPeter Maydell TYPE *d = vd, *n = vn; \ 14658be9a250SPeter Maydell TYPE m = rm; \ 14668be9a250SPeter Maydell uint16_t mask = mve_element_mask(env); \ 14678be9a250SPeter Maydell unsigned e; \ 14688be9a250SPeter Maydell bool qc = false; \ 14698be9a250SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 14708be9a250SPeter Maydell bool sat = false; \ 14718be9a250SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 14728be9a250SPeter Maydell FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \ 14738be9a250SPeter Maydell mask); \ 14748be9a250SPeter Maydell qc |= sat & mask & 1; \ 14758be9a250SPeter Maydell } \ 14768be9a250SPeter Maydell if (qc) { \ 14778be9a250SPeter Maydell env->vfp.qc[0] = qc; \ 14788be9a250SPeter Maydell } \ 14798be9a250SPeter Maydell mve_advance_vpt(env); \ 14808be9a250SPeter Maydell } 14818be9a250SPeter Maydell 1482e51896b3SPeter Maydell /* provide unsigned 2-op scalar helpers for all sizes */ 1483e51896b3SPeter Maydell #define DO_2OP_SCALAR_U(OP, FN) \ 1484e51896b3SPeter Maydell DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \ 1485e51896b3SPeter Maydell DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \ 1486e51896b3SPeter Maydell DO_2OP_SCALAR(OP##w, 4, uint32_t, FN) 1487644f717cSPeter Maydell #define DO_2OP_SCALAR_S(OP, FN) \ 1488644f717cSPeter Maydell DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \ 1489644f717cSPeter Maydell DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \ 1490644f717cSPeter Maydell DO_2OP_SCALAR(OP##w, 4, int32_t, FN) 1491e51896b3SPeter Maydell 14926b895bf8SPeter Maydell #define DO_2OP_ACC_SCALAR_U(OP, FN) \ 14936b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \ 14946b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \ 14956b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN) 14966b895bf8SPeter Maydell 1497e51896b3SPeter Maydell DO_2OP_SCALAR_U(vadd_scalar, DO_ADD) 149891a358fdSPeter Maydell DO_2OP_SCALAR_U(vsub_scalar, DO_SUB) 149991a358fdSPeter Maydell DO_2OP_SCALAR_U(vmul_scalar, DO_MUL) 1500644f717cSPeter Maydell DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s) 1501644f717cSPeter Maydell DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u) 1502644f717cSPeter Maydell DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s) 1503644f717cSPeter Maydell DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u) 1504e51896b3SPeter Maydell 150539f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B) 150639f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H) 150739f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W) 150839f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B) 150939f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H) 151039f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W) 151139f2ec85SPeter Maydell 151239f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B) 151339f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H) 151439f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W) 151539f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B) 151639f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H) 151739f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W) 151839f2ec85SPeter Maydell 151966c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B) 152066c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H) 152166c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W) 152266c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B) 152366c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H) 152466c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W) 152566c05767SPeter Maydell 15268be9a250SPeter Maydell static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat) 15278be9a250SPeter Maydell { 15288be9a250SPeter Maydell int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7); 15298be9a250SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 15308be9a250SPeter Maydell } 15318be9a250SPeter Maydell 15328be9a250SPeter Maydell static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c, 15338be9a250SPeter Maydell int round, bool *sat) 15348be9a250SPeter Maydell { 15358be9a250SPeter Maydell int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15); 15368be9a250SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 15378be9a250SPeter Maydell } 15388be9a250SPeter Maydell 15398be9a250SPeter Maydell static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c, 15408be9a250SPeter Maydell int round, bool *sat) 15418be9a250SPeter Maydell { 15428be9a250SPeter Maydell /* 15438be9a250SPeter Maydell * Architecturally we should do the entire add, double, round 15448be9a250SPeter Maydell * and then check for saturation. We do three saturating adds, 15458be9a250SPeter Maydell * but we need to be careful about the order. If the first 15468be9a250SPeter Maydell * m1 + m2 saturates then it's impossible for the *2+rc to 15478be9a250SPeter Maydell * bring it back into the non-saturated range. However, if 15488be9a250SPeter Maydell * m1 + m2 is negative then it's possible that doing the doubling 15498be9a250SPeter Maydell * would take the intermediate result below INT64_MAX and the 15508be9a250SPeter Maydell * addition of the rounding constant then brings it back in range. 15518be9a250SPeter Maydell * So we add half the rounding constant and half the "c << esize" 15528be9a250SPeter Maydell * before doubling rather than adding the rounding constant after 15538be9a250SPeter Maydell * the doubling. 15548be9a250SPeter Maydell */ 15558be9a250SPeter Maydell int64_t m1 = (int64_t)a * b; 15568be9a250SPeter Maydell int64_t m2 = (int64_t)c << 31; 15578be9a250SPeter Maydell int64_t r; 15588be9a250SPeter Maydell if (sadd64_overflow(m1, m2, &r) || 15598be9a250SPeter Maydell sadd64_overflow(r, (round << 30), &r) || 15608be9a250SPeter Maydell sadd64_overflow(r, r, &r)) { 15618be9a250SPeter Maydell *sat = true; 15628be9a250SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN; 15638be9a250SPeter Maydell } 15648be9a250SPeter Maydell return r >> 32; 15658be9a250SPeter Maydell } 15668be9a250SPeter Maydell 15678be9a250SPeter Maydell /* 15688be9a250SPeter Maydell * The *MLAH insns are vector * scalar + vector; 15698be9a250SPeter Maydell * the *MLASH insns are vector * vector + scalar 15708be9a250SPeter Maydell */ 15718be9a250SPeter Maydell #define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S) 15728be9a250SPeter Maydell #define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S) 15738be9a250SPeter Maydell #define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S) 15748be9a250SPeter Maydell #define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S) 15758be9a250SPeter Maydell #define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S) 15768be9a250SPeter Maydell #define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S) 15778be9a250SPeter Maydell 15788be9a250SPeter Maydell #define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S) 15798be9a250SPeter Maydell #define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S) 15808be9a250SPeter Maydell #define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S) 15818be9a250SPeter Maydell #define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S) 15828be9a250SPeter Maydell #define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S) 15838be9a250SPeter Maydell #define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S) 15848be9a250SPeter Maydell 15858be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B) 15868be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H) 15878be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W) 15888be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B) 15898be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H) 15908be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W) 15918be9a250SPeter Maydell 15928be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B) 15938be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H) 15948be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W) 15958be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B) 15968be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H) 15978be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W) 15988be9a250SPeter Maydell 1599c69e34c6SPeter Maydell /* Vector by scalar plus vector */ 1600c69e34c6SPeter Maydell #define DO_VMLA(D, N, M) ((N) * (M) + (D)) 1601c69e34c6SPeter Maydell 1602c69e34c6SPeter Maydell DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA) 1603c69e34c6SPeter Maydell 16046b895bf8SPeter Maydell /* Vector by vector plus scalar */ 16056b895bf8SPeter Maydell #define DO_VMLAS(D, N, M) ((N) * (D) + (M)) 16066b895bf8SPeter Maydell 16076b895bf8SPeter Maydell DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS) 16086b895bf8SPeter Maydell 1609a8890353SPeter Maydell /* 1610a8890353SPeter Maydell * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the 1611a8890353SPeter Maydell * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type. 1612a8890353SPeter Maydell * SATMASK specifies which bits of the predicate mask matter for determining 1613a8890353SPeter Maydell * whether to propagate a saturation indication into FPSCR.QC -- for 1614a8890353SPeter Maydell * the 16x16->32 case we must check only the bit corresponding to the T or B 1615a8890353SPeter Maydell * half that we used, but for the 32x32->64 case we propagate if the mask 1616a8890353SPeter Maydell * bit is set for either half. 1617a8890353SPeter Maydell */ 1618a8890353SPeter Maydell #define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 1619a8890353SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1620a8890353SPeter Maydell uint32_t rm) \ 1621a8890353SPeter Maydell { \ 1622a8890353SPeter Maydell LTYPE *d = vd; \ 1623a8890353SPeter Maydell TYPE *n = vn; \ 1624a8890353SPeter Maydell TYPE m = rm; \ 1625a8890353SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1626a8890353SPeter Maydell unsigned le; \ 1627a8890353SPeter Maydell bool qc = false; \ 1628a8890353SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1629a8890353SPeter Maydell bool sat = false; \ 1630a8890353SPeter Maydell LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \ 1631a8890353SPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \ 1632a8890353SPeter Maydell qc |= sat && (mask & SATMASK); \ 1633a8890353SPeter Maydell } \ 1634a8890353SPeter Maydell if (qc) { \ 1635a8890353SPeter Maydell env->vfp.qc[0] = qc; \ 1636a8890353SPeter Maydell } \ 1637a8890353SPeter Maydell mve_advance_vpt(env); \ 1638a8890353SPeter Maydell } 1639a8890353SPeter Maydell 1640a8890353SPeter Maydell static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat) 1641a8890353SPeter Maydell { 1642a8890353SPeter Maydell int64_t r = ((int64_t)n * m) * 2; 1643a8890353SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat); 1644a8890353SPeter Maydell } 1645a8890353SPeter Maydell 1646a8890353SPeter Maydell static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat) 1647a8890353SPeter Maydell { 1648a8890353SPeter Maydell /* The multiply can't overflow, but the doubling might */ 1649a8890353SPeter Maydell int64_t r = (int64_t)n * m; 1650a8890353SPeter Maydell if (r > INT64_MAX / 2) { 1651a8890353SPeter Maydell *sat = true; 1652a8890353SPeter Maydell return INT64_MAX; 1653a8890353SPeter Maydell } else if (r < INT64_MIN / 2) { 1654a8890353SPeter Maydell *sat = true; 1655a8890353SPeter Maydell return INT64_MIN; 1656a8890353SPeter Maydell } else { 1657a8890353SPeter Maydell return r * 2; 1658a8890353SPeter Maydell } 1659a8890353SPeter Maydell } 1660a8890353SPeter Maydell 1661a8890353SPeter Maydell #define SATMASK16B 1 1662a8890353SPeter Maydell #define SATMASK16T (1 << 2) 1663a8890353SPeter Maydell #define SATMASK32 ((1 << 4) | 1) 1664a8890353SPeter Maydell 1665a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \ 1666a8890353SPeter Maydell do_qdmullh, SATMASK16B) 1667a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \ 1668a8890353SPeter Maydell do_qdmullw, SATMASK32) 1669a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \ 1670a8890353SPeter Maydell do_qdmullh, SATMASK16T) 1671a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \ 1672a8890353SPeter Maydell do_qdmullw, SATMASK32) 1673a8890353SPeter Maydell 167443364321SPeter Maydell /* 167543364321SPeter Maydell * Long saturating ops 167643364321SPeter Maydell */ 167743364321SPeter Maydell #define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 167843364321SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 167943364321SPeter Maydell void *vm) \ 168043364321SPeter Maydell { \ 168143364321SPeter Maydell LTYPE *d = vd; \ 168243364321SPeter Maydell TYPE *n = vn, *m = vm; \ 168343364321SPeter Maydell uint16_t mask = mve_element_mask(env); \ 168443364321SPeter Maydell unsigned le; \ 168543364321SPeter Maydell bool qc = false; \ 168643364321SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 168743364321SPeter Maydell bool sat = false; \ 168843364321SPeter Maydell LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \ 168943364321SPeter Maydell LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \ 169043364321SPeter Maydell mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \ 169143364321SPeter Maydell qc |= sat && (mask & SATMASK); \ 169243364321SPeter Maydell } \ 169343364321SPeter Maydell if (qc) { \ 169443364321SPeter Maydell env->vfp.qc[0] = qc; \ 169543364321SPeter Maydell } \ 169643364321SPeter Maydell mve_advance_vpt(env); \ 169743364321SPeter Maydell } 169843364321SPeter Maydell 169943364321SPeter Maydell DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B) 170043364321SPeter Maydell DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 170143364321SPeter Maydell DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T) 170243364321SPeter Maydell DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 170343364321SPeter Maydell 1704b050543bSPeter Maydell static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m) 1705b050543bSPeter Maydell { 1706b050543bSPeter Maydell m &= 0xff; 1707b050543bSPeter Maydell if (m == 0) { 1708b050543bSPeter Maydell return 0; 1709b050543bSPeter Maydell } 1710b050543bSPeter Maydell n = revbit8(n); 1711b050543bSPeter Maydell if (m < 8) { 1712b050543bSPeter Maydell n >>= 8 - m; 1713b050543bSPeter Maydell } 1714b050543bSPeter Maydell return n; 1715b050543bSPeter Maydell } 1716b050543bSPeter Maydell 1717b050543bSPeter Maydell static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m) 1718b050543bSPeter Maydell { 1719b050543bSPeter Maydell m &= 0xff; 1720b050543bSPeter Maydell if (m == 0) { 1721b050543bSPeter Maydell return 0; 1722b050543bSPeter Maydell } 1723b050543bSPeter Maydell n = revbit16(n); 1724b050543bSPeter Maydell if (m < 16) { 1725b050543bSPeter Maydell n >>= 16 - m; 1726b050543bSPeter Maydell } 1727b050543bSPeter Maydell return n; 1728b050543bSPeter Maydell } 1729b050543bSPeter Maydell 1730b050543bSPeter Maydell static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m) 1731b050543bSPeter Maydell { 1732b050543bSPeter Maydell m &= 0xff; 1733b050543bSPeter Maydell if (m == 0) { 1734b050543bSPeter Maydell return 0; 1735b050543bSPeter Maydell } 1736b050543bSPeter Maydell n = revbit32(n); 1737b050543bSPeter Maydell if (m < 32) { 1738b050543bSPeter Maydell n >>= 32 - m; 1739b050543bSPeter Maydell } 1740b050543bSPeter Maydell return n; 1741b050543bSPeter Maydell } 1742b050543bSPeter Maydell 1743b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb) 1744b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh) 1745b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw) 1746b050543bSPeter Maydell 17471d2386f7SPeter Maydell /* 17481d2386f7SPeter Maydell * Multiply add long dual accumulate ops. 17491d2386f7SPeter Maydell */ 17501d2386f7SPeter Maydell #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 17511d2386f7SPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 17521d2386f7SPeter Maydell void *vm, uint64_t a) \ 17531d2386f7SPeter Maydell { \ 17541d2386f7SPeter Maydell uint16_t mask = mve_element_mask(env); \ 17551d2386f7SPeter Maydell unsigned e; \ 17561d2386f7SPeter Maydell TYPE *n = vn, *m = vm; \ 17571d2386f7SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 17581d2386f7SPeter Maydell if (mask & 1) { \ 17591d2386f7SPeter Maydell if (e & 1) { \ 17601d2386f7SPeter Maydell a ODDACC \ 17611d2386f7SPeter Maydell (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 17621d2386f7SPeter Maydell } else { \ 17631d2386f7SPeter Maydell a EVENACC \ 17641d2386f7SPeter Maydell (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 17651d2386f7SPeter Maydell } \ 17661d2386f7SPeter Maydell } \ 17671d2386f7SPeter Maydell } \ 17681d2386f7SPeter Maydell mve_advance_vpt(env); \ 17691d2386f7SPeter Maydell return a; \ 17701d2386f7SPeter Maydell } 17711d2386f7SPeter Maydell 17721d2386f7SPeter Maydell DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=) 17731d2386f7SPeter Maydell DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=) 17741d2386f7SPeter Maydell DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=) 17751d2386f7SPeter Maydell DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=) 17761d2386f7SPeter Maydell 17771d2386f7SPeter Maydell DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=) 17781d2386f7SPeter Maydell DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=) 1779181cd971SPeter Maydell 1780181cd971SPeter Maydell DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=) 1781181cd971SPeter Maydell DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=) 1782181cd971SPeter Maydell DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) 1783181cd971SPeter Maydell DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) 178438548747SPeter Maydell 178538548747SPeter Maydell /* 1786f0ffff51SPeter Maydell * Multiply add dual accumulate ops 1787f0ffff51SPeter Maydell */ 1788f0ffff51SPeter Maydell #define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 1789f0ffff51SPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1790f0ffff51SPeter Maydell void *vm, uint32_t a) \ 1791f0ffff51SPeter Maydell { \ 1792f0ffff51SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1793f0ffff51SPeter Maydell unsigned e; \ 1794f0ffff51SPeter Maydell TYPE *n = vn, *m = vm; \ 1795f0ffff51SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1796f0ffff51SPeter Maydell if (mask & 1) { \ 1797f0ffff51SPeter Maydell if (e & 1) { \ 1798f0ffff51SPeter Maydell a ODDACC \ 1799f0ffff51SPeter Maydell n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 1800f0ffff51SPeter Maydell } else { \ 1801f0ffff51SPeter Maydell a EVENACC \ 1802f0ffff51SPeter Maydell n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 1803f0ffff51SPeter Maydell } \ 1804f0ffff51SPeter Maydell } \ 1805f0ffff51SPeter Maydell } \ 1806f0ffff51SPeter Maydell mve_advance_vpt(env); \ 1807f0ffff51SPeter Maydell return a; \ 1808f0ffff51SPeter Maydell } 1809f0ffff51SPeter Maydell 1810f0ffff51SPeter Maydell #define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \ 1811f0ffff51SPeter Maydell DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \ 1812f0ffff51SPeter Maydell DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \ 1813f0ffff51SPeter Maydell DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC) 1814f0ffff51SPeter Maydell 1815f0ffff51SPeter Maydell #define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \ 1816f0ffff51SPeter Maydell DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \ 1817f0ffff51SPeter Maydell DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \ 1818f0ffff51SPeter Maydell DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC) 1819f0ffff51SPeter Maydell 1820f0ffff51SPeter Maydell DO_DAV_S(vmladavs, false, +=, +=) 1821f0ffff51SPeter Maydell DO_DAV_U(vmladavu, false, +=, +=) 1822f0ffff51SPeter Maydell DO_DAV_S(vmlsdav, false, +=, -=) 1823f0ffff51SPeter Maydell DO_DAV_S(vmladavsx, true, +=, +=) 1824f0ffff51SPeter Maydell DO_DAV_S(vmlsdavx, true, +=, -=) 1825f0ffff51SPeter Maydell 1826f0ffff51SPeter Maydell /* 1827303db86fSPeter Maydell * Rounding multiply add long dual accumulate high. In the pseudocode 1828303db86fSPeter Maydell * this is implemented with a 72-bit internal accumulator value of which 1829303db86fSPeter Maydell * the top 64 bits are returned. We optimize this to avoid having to 1830303db86fSPeter Maydell * use 128-bit arithmetic -- we can do this because the 74-bit accumulator 1831303db86fSPeter Maydell * is squashed back into 64-bits after each beat. 183238548747SPeter Maydell */ 1833303db86fSPeter Maydell #define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \ 183438548747SPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 183538548747SPeter Maydell void *vm, uint64_t a) \ 183638548747SPeter Maydell { \ 183738548747SPeter Maydell uint16_t mask = mve_element_mask(env); \ 183838548747SPeter Maydell unsigned e; \ 183938548747SPeter Maydell TYPE *n = vn, *m = vm; \ 1840303db86fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 184138548747SPeter Maydell if (mask & 1) { \ 1842303db86fSPeter Maydell LTYPE mul; \ 184338548747SPeter Maydell if (e & 1) { \ 1844303db86fSPeter Maydell mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \ 1845303db86fSPeter Maydell if (SUB) { \ 1846303db86fSPeter Maydell mul = -mul; \ 184738548747SPeter Maydell } \ 1848303db86fSPeter Maydell } else { \ 1849303db86fSPeter Maydell mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \ 1850303db86fSPeter Maydell } \ 1851303db86fSPeter Maydell mul = (mul >> 8) + ((mul >> 7) & 1); \ 1852303db86fSPeter Maydell a += mul; \ 185338548747SPeter Maydell } \ 185438548747SPeter Maydell } \ 185538548747SPeter Maydell mve_advance_vpt(env); \ 1856303db86fSPeter Maydell return a; \ 185738548747SPeter Maydell } 185838548747SPeter Maydell 1859303db86fSPeter Maydell DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false) 1860303db86fSPeter Maydell DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false) 186138548747SPeter Maydell 1862303db86fSPeter Maydell DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false) 186338548747SPeter Maydell 1864303db86fSPeter Maydell DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true) 1865303db86fSPeter Maydell DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true) 18666f060a63SPeter Maydell 18676f060a63SPeter Maydell /* Vector add across vector */ 18686f060a63SPeter Maydell #define DO_VADDV(OP, ESIZE, TYPE) \ 18696f060a63SPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 18706f060a63SPeter Maydell uint32_t ra) \ 18716f060a63SPeter Maydell { \ 18726f060a63SPeter Maydell uint16_t mask = mve_element_mask(env); \ 18736f060a63SPeter Maydell unsigned e; \ 18746f060a63SPeter Maydell TYPE *m = vm; \ 18756f060a63SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 18766f060a63SPeter Maydell if (mask & 1) { \ 18776f060a63SPeter Maydell ra += m[H##ESIZE(e)]; \ 18786f060a63SPeter Maydell } \ 18796f060a63SPeter Maydell } \ 18806f060a63SPeter Maydell mve_advance_vpt(env); \ 18816f060a63SPeter Maydell return ra; \ 18826f060a63SPeter Maydell } \ 18836f060a63SPeter Maydell 1884ed5a59d6SPeter Maydell DO_VADDV(vaddvsb, 1, int8_t) 1885ed5a59d6SPeter Maydell DO_VADDV(vaddvsh, 2, int16_t) 1886ed5a59d6SPeter Maydell DO_VADDV(vaddvsw, 4, int32_t) 18876f060a63SPeter Maydell DO_VADDV(vaddvub, 1, uint8_t) 18886f060a63SPeter Maydell DO_VADDV(vaddvuh, 2, uint16_t) 18896f060a63SPeter Maydell DO_VADDV(vaddvuw, 4, uint32_t) 1890f9ed6174SPeter Maydell 1891688ba4cfSPeter Maydell /* 1892688ba4cfSPeter Maydell * Vector max/min across vector. Unlike VADDV, we must 1893688ba4cfSPeter Maydell * read ra as the element size, not its full width. 1894688ba4cfSPeter Maydell * We work with int64_t internally for simplicity. 1895688ba4cfSPeter Maydell */ 1896688ba4cfSPeter Maydell #define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \ 1897688ba4cfSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1898688ba4cfSPeter Maydell uint32_t ra_in) \ 1899688ba4cfSPeter Maydell { \ 1900688ba4cfSPeter Maydell uint16_t mask = mve_element_mask(env); \ 1901688ba4cfSPeter Maydell unsigned e; \ 1902688ba4cfSPeter Maydell TYPE *m = vm; \ 1903688ba4cfSPeter Maydell int64_t ra = (RATYPE)ra_in; \ 1904688ba4cfSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1905688ba4cfSPeter Maydell if (mask & 1) { \ 1906688ba4cfSPeter Maydell ra = FN(ra, m[H##ESIZE(e)]); \ 1907688ba4cfSPeter Maydell } \ 1908688ba4cfSPeter Maydell } \ 1909688ba4cfSPeter Maydell mve_advance_vpt(env); \ 1910688ba4cfSPeter Maydell return ra; \ 1911688ba4cfSPeter Maydell } \ 1912688ba4cfSPeter Maydell 1913688ba4cfSPeter Maydell #define DO_VMAXMINV_U(INSN, FN) \ 1914688ba4cfSPeter Maydell DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \ 1915688ba4cfSPeter Maydell DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \ 1916688ba4cfSPeter Maydell DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN) 1917688ba4cfSPeter Maydell #define DO_VMAXMINV_S(INSN, FN) \ 1918688ba4cfSPeter Maydell DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \ 1919688ba4cfSPeter Maydell DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \ 1920688ba4cfSPeter Maydell DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN) 1921688ba4cfSPeter Maydell 1922688ba4cfSPeter Maydell /* 1923688ba4cfSPeter Maydell * Helpers for max and min of absolute values across vector: 1924688ba4cfSPeter Maydell * note that we only take the absolute value of 'm', not 'n' 1925688ba4cfSPeter Maydell */ 1926688ba4cfSPeter Maydell static int64_t do_maxa(int64_t n, int64_t m) 1927688ba4cfSPeter Maydell { 1928688ba4cfSPeter Maydell if (m < 0) { 1929688ba4cfSPeter Maydell m = -m; 1930688ba4cfSPeter Maydell } 1931688ba4cfSPeter Maydell return MAX(n, m); 1932688ba4cfSPeter Maydell } 1933688ba4cfSPeter Maydell 1934688ba4cfSPeter Maydell static int64_t do_mina(int64_t n, int64_t m) 1935688ba4cfSPeter Maydell { 1936688ba4cfSPeter Maydell if (m < 0) { 1937688ba4cfSPeter Maydell m = -m; 1938688ba4cfSPeter Maydell } 1939688ba4cfSPeter Maydell return MIN(n, m); 1940688ba4cfSPeter Maydell } 1941688ba4cfSPeter Maydell 1942688ba4cfSPeter Maydell DO_VMAXMINV_S(vmaxvs, DO_MAX) 1943688ba4cfSPeter Maydell DO_VMAXMINV_U(vmaxvu, DO_MAX) 1944688ba4cfSPeter Maydell DO_VMAXMINV_S(vminvs, DO_MIN) 1945688ba4cfSPeter Maydell DO_VMAXMINV_U(vminvu, DO_MIN) 1946688ba4cfSPeter Maydell /* 1947688ba4cfSPeter Maydell * VMAXAV, VMINAV treat the general purpose input as unsigned 1948688ba4cfSPeter Maydell * and the vector elements as signed. 1949688ba4cfSPeter Maydell */ 1950688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa) 1951688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa) 1952688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa) 1953688ba4cfSPeter Maydell DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina) 1954688ba4cfSPeter Maydell DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina) 1955688ba4cfSPeter Maydell DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina) 1956688ba4cfSPeter Maydell 19577f061c0aSPeter Maydell #define DO_VABAV(OP, ESIZE, TYPE) \ 19587f061c0aSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 19597f061c0aSPeter Maydell void *vm, uint32_t ra) \ 19607f061c0aSPeter Maydell { \ 19617f061c0aSPeter Maydell uint16_t mask = mve_element_mask(env); \ 19627f061c0aSPeter Maydell unsigned e; \ 19637f061c0aSPeter Maydell TYPE *m = vm, *n = vn; \ 19647f061c0aSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 19657f061c0aSPeter Maydell if (mask & 1) { \ 19667f061c0aSPeter Maydell int64_t n0 = n[H##ESIZE(e)]; \ 19677f061c0aSPeter Maydell int64_t m0 = m[H##ESIZE(e)]; \ 19687f061c0aSPeter Maydell uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \ 19697f061c0aSPeter Maydell ra += r; \ 19707f061c0aSPeter Maydell } \ 19717f061c0aSPeter Maydell } \ 19727f061c0aSPeter Maydell mve_advance_vpt(env); \ 19737f061c0aSPeter Maydell return ra; \ 19747f061c0aSPeter Maydell } 19757f061c0aSPeter Maydell 19767f061c0aSPeter Maydell DO_VABAV(vabavsb, 1, int8_t) 19777f061c0aSPeter Maydell DO_VABAV(vabavsh, 2, int16_t) 19787f061c0aSPeter Maydell DO_VABAV(vabavsw, 4, int32_t) 19797f061c0aSPeter Maydell DO_VABAV(vabavub, 1, uint8_t) 19807f061c0aSPeter Maydell DO_VABAV(vabavuh, 2, uint16_t) 19817f061c0aSPeter Maydell DO_VABAV(vabavuw, 4, uint32_t) 19827f061c0aSPeter Maydell 1983d43ebd9dSPeter Maydell #define DO_VADDLV(OP, TYPE, LTYPE) \ 1984d43ebd9dSPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1985d43ebd9dSPeter Maydell uint64_t ra) \ 1986d43ebd9dSPeter Maydell { \ 1987d43ebd9dSPeter Maydell uint16_t mask = mve_element_mask(env); \ 1988d43ebd9dSPeter Maydell unsigned e; \ 1989d43ebd9dSPeter Maydell TYPE *m = vm; \ 1990d43ebd9dSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 1991d43ebd9dSPeter Maydell if (mask & 1) { \ 1992d43ebd9dSPeter Maydell ra += (LTYPE)m[H4(e)]; \ 1993d43ebd9dSPeter Maydell } \ 1994d43ebd9dSPeter Maydell } \ 1995d43ebd9dSPeter Maydell mve_advance_vpt(env); \ 1996d43ebd9dSPeter Maydell return ra; \ 1997d43ebd9dSPeter Maydell } \ 1998d43ebd9dSPeter Maydell 1999d43ebd9dSPeter Maydell DO_VADDLV(vaddlv_s, int32_t, int64_t) 2000d43ebd9dSPeter Maydell DO_VADDLV(vaddlv_u, uint32_t, uint64_t) 2001d43ebd9dSPeter Maydell 2002f9ed6174SPeter Maydell /* Shifts by immediate */ 2003f9ed6174SPeter Maydell #define DO_2SHIFT(OP, ESIZE, TYPE, FN) \ 2004f9ed6174SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2005f9ed6174SPeter Maydell void *vm, uint32_t shift) \ 2006f9ed6174SPeter Maydell { \ 2007f9ed6174SPeter Maydell TYPE *d = vd, *m = vm; \ 2008f9ed6174SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2009f9ed6174SPeter Maydell unsigned e; \ 2010f9ed6174SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2011f9ed6174SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 2012f9ed6174SPeter Maydell FN(m[H##ESIZE(e)], shift), mask); \ 2013f9ed6174SPeter Maydell } \ 2014f9ed6174SPeter Maydell mve_advance_vpt(env); \ 2015f9ed6174SPeter Maydell } 2016f9ed6174SPeter Maydell 2017f9ed6174SPeter Maydell #define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \ 2018f9ed6174SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2019f9ed6174SPeter Maydell void *vm, uint32_t shift) \ 2020f9ed6174SPeter Maydell { \ 2021f9ed6174SPeter Maydell TYPE *d = vd, *m = vm; \ 2022f9ed6174SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2023f9ed6174SPeter Maydell unsigned e; \ 2024f9ed6174SPeter Maydell bool qc = false; \ 2025f9ed6174SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2026f9ed6174SPeter Maydell bool sat = false; \ 2027f9ed6174SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 2028f9ed6174SPeter Maydell FN(m[H##ESIZE(e)], shift, &sat), mask); \ 2029f9ed6174SPeter Maydell qc |= sat & mask & 1; \ 2030f9ed6174SPeter Maydell } \ 2031f9ed6174SPeter Maydell if (qc) { \ 2032f9ed6174SPeter Maydell env->vfp.qc[0] = qc; \ 2033f9ed6174SPeter Maydell } \ 2034f9ed6174SPeter Maydell mve_advance_vpt(env); \ 2035f9ed6174SPeter Maydell } 2036f9ed6174SPeter Maydell 2037f9ed6174SPeter Maydell /* provide unsigned 2-op shift helpers for all sizes */ 2038f9ed6174SPeter Maydell #define DO_2SHIFT_U(OP, FN) \ 2039f9ed6174SPeter Maydell DO_2SHIFT(OP##b, 1, uint8_t, FN) \ 2040f9ed6174SPeter Maydell DO_2SHIFT(OP##h, 2, uint16_t, FN) \ 2041f9ed6174SPeter Maydell DO_2SHIFT(OP##w, 4, uint32_t, FN) 20423394116fSPeter Maydell #define DO_2SHIFT_S(OP, FN) \ 20433394116fSPeter Maydell DO_2SHIFT(OP##b, 1, int8_t, FN) \ 20443394116fSPeter Maydell DO_2SHIFT(OP##h, 2, int16_t, FN) \ 20453394116fSPeter Maydell DO_2SHIFT(OP##w, 4, int32_t, FN) 2046f9ed6174SPeter Maydell 2047f9ed6174SPeter Maydell #define DO_2SHIFT_SAT_U(OP, FN) \ 2048f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \ 2049f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \ 2050f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN) 2051f9ed6174SPeter Maydell #define DO_2SHIFT_SAT_S(OP, FN) \ 2052f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \ 2053f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \ 2054f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##w, 4, int32_t, FN) 2055f9ed6174SPeter Maydell 2056f9ed6174SPeter Maydell DO_2SHIFT_U(vshli_u, DO_VSHLU) 20573394116fSPeter Maydell DO_2SHIFT_S(vshli_s, DO_VSHLS) 2058f9ed6174SPeter Maydell DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP) 2059f9ed6174SPeter Maydell DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP) 2060f9ed6174SPeter Maydell DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) 20613394116fSPeter Maydell DO_2SHIFT_U(vrshli_u, DO_VRSHLU) 20623394116fSPeter Maydell DO_2SHIFT_S(vrshli_s, DO_VRSHLS) 20631b15a97dSPeter Maydell DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP) 20641b15a97dSPeter Maydell DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP) 2065c2262707SPeter Maydell 2066a78b25faSPeter Maydell /* Shift-and-insert; we always work with 64 bits at a time */ 2067a78b25faSPeter Maydell #define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \ 2068a78b25faSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2069a78b25faSPeter Maydell void *vm, uint32_t shift) \ 2070a78b25faSPeter Maydell { \ 2071a78b25faSPeter Maydell uint64_t *d = vd, *m = vm; \ 2072a78b25faSPeter Maydell uint16_t mask; \ 2073a78b25faSPeter Maydell uint64_t shiftmask; \ 2074a78b25faSPeter Maydell unsigned e; \ 2075c88ff884SPeter Maydell if (shift == ESIZE * 8) { \ 2076a78b25faSPeter Maydell /* \ 2077c88ff884SPeter Maydell * Only VSRI can shift by <dt>; it should mean "don't \ 2078c88ff884SPeter Maydell * update the destination". The generic logic can't handle \ 2079c88ff884SPeter Maydell * this because it would try to shift by an out-of-range \ 2080c88ff884SPeter Maydell * amount, so special case it here. \ 2081a78b25faSPeter Maydell */ \ 2082a78b25faSPeter Maydell goto done; \ 2083a78b25faSPeter Maydell } \ 2084a78b25faSPeter Maydell assert(shift < ESIZE * 8); \ 2085a78b25faSPeter Maydell mask = mve_element_mask(env); \ 2086a78b25faSPeter Maydell /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \ 2087a78b25faSPeter Maydell shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \ 2088a78b25faSPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 2089a78b25faSPeter Maydell uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \ 2090a78b25faSPeter Maydell (d[H8(e)] & ~shiftmask); \ 2091a78b25faSPeter Maydell mergemask(&d[H8(e)], r, mask); \ 2092a78b25faSPeter Maydell } \ 2093a78b25faSPeter Maydell done: \ 2094a78b25faSPeter Maydell mve_advance_vpt(env); \ 2095a78b25faSPeter Maydell } 2096a78b25faSPeter Maydell 2097a78b25faSPeter Maydell #define DO_SHL(N, SHIFT) ((N) << (SHIFT)) 2098a78b25faSPeter Maydell #define DO_SHR(N, SHIFT) ((N) >> (SHIFT)) 2099a78b25faSPeter Maydell #define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT)) 2100a78b25faSPeter Maydell #define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT)) 2101a78b25faSPeter Maydell 2102a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK) 2103a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK) 2104a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK) 2105a78b25faSPeter Maydell DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK) 2106a78b25faSPeter Maydell DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK) 2107a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK) 2108a78b25faSPeter Maydell 2109c2262707SPeter Maydell /* 2110c2262707SPeter Maydell * Long shifts taking half-sized inputs from top or bottom of the input 2111c2262707SPeter Maydell * vector and producing a double-width result. ESIZE, TYPE are for 2112c2262707SPeter Maydell * the input, and LESIZE, LTYPE for the output. 2113c2262707SPeter Maydell * Unlike the normal shift helpers, we do not handle negative shift counts, 2114c2262707SPeter Maydell * because the long shift is strictly left-only. 2115c2262707SPeter Maydell */ 2116c2262707SPeter Maydell #define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 2117c2262707SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2118c2262707SPeter Maydell void *vm, uint32_t shift) \ 2119c2262707SPeter Maydell { \ 2120c2262707SPeter Maydell LTYPE *d = vd; \ 2121c2262707SPeter Maydell TYPE *m = vm; \ 2122c2262707SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2123c2262707SPeter Maydell unsigned le; \ 2124c2262707SPeter Maydell assert(shift <= 16); \ 2125c2262707SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2126c2262707SPeter Maydell LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \ 2127c2262707SPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \ 2128c2262707SPeter Maydell } \ 2129c2262707SPeter Maydell mve_advance_vpt(env); \ 2130c2262707SPeter Maydell } 2131c2262707SPeter Maydell 2132c2262707SPeter Maydell #define DO_VSHLL_ALL(OP, TOP) \ 2133c2262707SPeter Maydell DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \ 2134c2262707SPeter Maydell DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \ 2135c2262707SPeter Maydell DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \ 2136c2262707SPeter Maydell DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \ 2137c2262707SPeter Maydell 2138c2262707SPeter Maydell DO_VSHLL_ALL(vshllb, false) 2139c2262707SPeter Maydell DO_VSHLL_ALL(vshllt, true) 2140162e2655SPeter Maydell 2141162e2655SPeter Maydell /* 2142162e2655SPeter Maydell * Narrowing right shifts, taking a double sized input, shifting it 2143162e2655SPeter Maydell * and putting the result in either the top or bottom half of the output. 2144162e2655SPeter Maydell * ESIZE, TYPE are the output, and LESIZE, LTYPE the input. 2145162e2655SPeter Maydell */ 2146162e2655SPeter Maydell #define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2147162e2655SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2148162e2655SPeter Maydell void *vm, uint32_t shift) \ 2149162e2655SPeter Maydell { \ 2150162e2655SPeter Maydell LTYPE *m = vm; \ 2151162e2655SPeter Maydell TYPE *d = vd; \ 2152162e2655SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2153162e2655SPeter Maydell unsigned le; \ 2154a5e59e8dSPeter Maydell mask >>= ESIZE * TOP; \ 2155162e2655SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2156162e2655SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], shift); \ 2157162e2655SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2158162e2655SPeter Maydell } \ 2159162e2655SPeter Maydell mve_advance_vpt(env); \ 2160162e2655SPeter Maydell } 2161162e2655SPeter Maydell 2162162e2655SPeter Maydell #define DO_VSHRN_ALL(OP, FN) \ 2163162e2655SPeter Maydell DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \ 2164162e2655SPeter Maydell DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \ 2165162e2655SPeter Maydell DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \ 2166162e2655SPeter Maydell DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN) 2167162e2655SPeter Maydell 2168162e2655SPeter Maydell static inline uint64_t do_urshr(uint64_t x, unsigned sh) 2169162e2655SPeter Maydell { 2170162e2655SPeter Maydell if (likely(sh < 64)) { 2171162e2655SPeter Maydell return (x >> sh) + ((x >> (sh - 1)) & 1); 2172162e2655SPeter Maydell } else if (sh == 64) { 2173162e2655SPeter Maydell return x >> 63; 2174162e2655SPeter Maydell } else { 2175162e2655SPeter Maydell return 0; 2176162e2655SPeter Maydell } 2177162e2655SPeter Maydell } 2178162e2655SPeter Maydell 2179d6f9e011SPeter Maydell static inline int64_t do_srshr(int64_t x, unsigned sh) 2180d6f9e011SPeter Maydell { 2181d6f9e011SPeter Maydell if (likely(sh < 64)) { 2182d6f9e011SPeter Maydell return (x >> sh) + ((x >> (sh - 1)) & 1); 2183d6f9e011SPeter Maydell } else { 2184d6f9e011SPeter Maydell /* Rounding the sign bit always produces 0. */ 2185d6f9e011SPeter Maydell return 0; 2186d6f9e011SPeter Maydell } 2187d6f9e011SPeter Maydell } 2188d6f9e011SPeter Maydell 2189162e2655SPeter Maydell DO_VSHRN_ALL(vshrn, DO_SHR) 2190162e2655SPeter Maydell DO_VSHRN_ALL(vrshrn, do_urshr) 2191d6f9e011SPeter Maydell 2192d6f9e011SPeter Maydell static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max, 2193d6f9e011SPeter Maydell bool *satp) 2194d6f9e011SPeter Maydell { 2195d6f9e011SPeter Maydell if (val > max) { 2196d6f9e011SPeter Maydell *satp = true; 2197d6f9e011SPeter Maydell return max; 2198d6f9e011SPeter Maydell } else if (val < min) { 2199d6f9e011SPeter Maydell *satp = true; 2200d6f9e011SPeter Maydell return min; 2201d6f9e011SPeter Maydell } else { 2202d6f9e011SPeter Maydell return val; 2203d6f9e011SPeter Maydell } 2204d6f9e011SPeter Maydell } 2205d6f9e011SPeter Maydell 2206d6f9e011SPeter Maydell /* Saturating narrowing right shifts */ 2207d6f9e011SPeter Maydell #define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2208d6f9e011SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2209d6f9e011SPeter Maydell void *vm, uint32_t shift) \ 2210d6f9e011SPeter Maydell { \ 2211d6f9e011SPeter Maydell LTYPE *m = vm; \ 2212d6f9e011SPeter Maydell TYPE *d = vd; \ 2213d6f9e011SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2214d6f9e011SPeter Maydell bool qc = false; \ 2215d6f9e011SPeter Maydell unsigned le; \ 2216a5e59e8dSPeter Maydell mask >>= ESIZE * TOP; \ 2217d6f9e011SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2218d6f9e011SPeter Maydell bool sat = false; \ 2219d6f9e011SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \ 2220d6f9e011SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2221a5e59e8dSPeter Maydell qc |= sat & mask & 1; \ 2222d6f9e011SPeter Maydell } \ 2223d6f9e011SPeter Maydell if (qc) { \ 2224d6f9e011SPeter Maydell env->vfp.qc[0] = qc; \ 2225d6f9e011SPeter Maydell } \ 2226d6f9e011SPeter Maydell mve_advance_vpt(env); \ 2227d6f9e011SPeter Maydell } 2228d6f9e011SPeter Maydell 2229d6f9e011SPeter Maydell #define DO_VSHRN_SAT_UB(BOP, TOP, FN) \ 2230d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 2231d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 2232d6f9e011SPeter Maydell 2233d6f9e011SPeter Maydell #define DO_VSHRN_SAT_UH(BOP, TOP, FN) \ 2234d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 2235d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 2236d6f9e011SPeter Maydell 2237d6f9e011SPeter Maydell #define DO_VSHRN_SAT_SB(BOP, TOP, FN) \ 2238d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 2239d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 2240d6f9e011SPeter Maydell 2241d6f9e011SPeter Maydell #define DO_VSHRN_SAT_SH(BOP, TOP, FN) \ 2242d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 2243d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 2244d6f9e011SPeter Maydell 2245d6f9e011SPeter Maydell #define DO_SHRN_SB(N, M, SATP) \ 2246d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP) 2247d6f9e011SPeter Maydell #define DO_SHRN_UB(N, M, SATP) \ 2248d6f9e011SPeter Maydell do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP) 2249d6f9e011SPeter Maydell #define DO_SHRUN_B(N, M, SATP) \ 2250d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP) 2251d6f9e011SPeter Maydell 2252d6f9e011SPeter Maydell #define DO_SHRN_SH(N, M, SATP) \ 2253d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP) 2254d6f9e011SPeter Maydell #define DO_SHRN_UH(N, M, SATP) \ 2255d6f9e011SPeter Maydell do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP) 2256d6f9e011SPeter Maydell #define DO_SHRUN_H(N, M, SATP) \ 2257d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP) 2258d6f9e011SPeter Maydell 2259d6f9e011SPeter Maydell #define DO_RSHRN_SB(N, M, SATP) \ 2260d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP) 2261d6f9e011SPeter Maydell #define DO_RSHRN_UB(N, M, SATP) \ 2262d6f9e011SPeter Maydell do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP) 2263d6f9e011SPeter Maydell #define DO_RSHRUN_B(N, M, SATP) \ 2264d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP) 2265d6f9e011SPeter Maydell 2266d6f9e011SPeter Maydell #define DO_RSHRN_SH(N, M, SATP) \ 2267d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP) 2268d6f9e011SPeter Maydell #define DO_RSHRN_UH(N, M, SATP) \ 2269d6f9e011SPeter Maydell do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP) 2270d6f9e011SPeter Maydell #define DO_RSHRUN_H(N, M, SATP) \ 2271d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP) 2272d6f9e011SPeter Maydell 2273d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB) 2274d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH) 2275d6f9e011SPeter Maydell DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB) 2276d6f9e011SPeter Maydell DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH) 2277d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B) 2278d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H) 2279d6f9e011SPeter Maydell 2280d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB) 2281d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH) 2282d6f9e011SPeter Maydell DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB) 2283d6f9e011SPeter Maydell DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH) 2284d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B) 2285d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H) 22862e6a4ce0SPeter Maydell 228754dc78a9SPeter Maydell #define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 228854dc78a9SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 228954dc78a9SPeter Maydell { \ 229054dc78a9SPeter Maydell LTYPE *m = vm; \ 229154dc78a9SPeter Maydell TYPE *d = vd; \ 229254dc78a9SPeter Maydell uint16_t mask = mve_element_mask(env); \ 229354dc78a9SPeter Maydell unsigned le; \ 229454dc78a9SPeter Maydell mask >>= ESIZE * TOP; \ 229554dc78a9SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 229654dc78a9SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], \ 229754dc78a9SPeter Maydell m[H##LESIZE(le)], mask); \ 229854dc78a9SPeter Maydell } \ 229954dc78a9SPeter Maydell mve_advance_vpt(env); \ 230054dc78a9SPeter Maydell } 230154dc78a9SPeter Maydell 230254dc78a9SPeter Maydell DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t) 230354dc78a9SPeter Maydell DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t) 230454dc78a9SPeter Maydell DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t) 230554dc78a9SPeter Maydell DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t) 230654dc78a9SPeter Maydell 230754dc78a9SPeter Maydell #define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 230854dc78a9SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 230954dc78a9SPeter Maydell { \ 231054dc78a9SPeter Maydell LTYPE *m = vm; \ 231154dc78a9SPeter Maydell TYPE *d = vd; \ 231254dc78a9SPeter Maydell uint16_t mask = mve_element_mask(env); \ 231354dc78a9SPeter Maydell bool qc = false; \ 231454dc78a9SPeter Maydell unsigned le; \ 231554dc78a9SPeter Maydell mask >>= ESIZE * TOP; \ 231654dc78a9SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 231754dc78a9SPeter Maydell bool sat = false; \ 231854dc78a9SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], &sat); \ 231954dc78a9SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 232054dc78a9SPeter Maydell qc |= sat & mask & 1; \ 232154dc78a9SPeter Maydell } \ 232254dc78a9SPeter Maydell if (qc) { \ 232354dc78a9SPeter Maydell env->vfp.qc[0] = qc; \ 232454dc78a9SPeter Maydell } \ 232554dc78a9SPeter Maydell mve_advance_vpt(env); \ 232654dc78a9SPeter Maydell } 232754dc78a9SPeter Maydell 232854dc78a9SPeter Maydell #define DO_VMOVN_SAT_UB(BOP, TOP, FN) \ 232954dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 233054dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 233154dc78a9SPeter Maydell 233254dc78a9SPeter Maydell #define DO_VMOVN_SAT_UH(BOP, TOP, FN) \ 233354dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 233454dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 233554dc78a9SPeter Maydell 233654dc78a9SPeter Maydell #define DO_VMOVN_SAT_SB(BOP, TOP, FN) \ 233754dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 233854dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 233954dc78a9SPeter Maydell 234054dc78a9SPeter Maydell #define DO_VMOVN_SAT_SH(BOP, TOP, FN) \ 234154dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 234254dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 234354dc78a9SPeter Maydell 234454dc78a9SPeter Maydell #define DO_VQMOVN_SB(N, SATP) \ 234554dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP) 234654dc78a9SPeter Maydell #define DO_VQMOVN_UB(N, SATP) \ 234754dc78a9SPeter Maydell do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP) 234854dc78a9SPeter Maydell #define DO_VQMOVUN_B(N, SATP) \ 234954dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP) 235054dc78a9SPeter Maydell 235154dc78a9SPeter Maydell #define DO_VQMOVN_SH(N, SATP) \ 235254dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP) 235354dc78a9SPeter Maydell #define DO_VQMOVN_UH(N, SATP) \ 235454dc78a9SPeter Maydell do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP) 235554dc78a9SPeter Maydell #define DO_VQMOVUN_H(N, SATP) \ 235654dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP) 235754dc78a9SPeter Maydell 235854dc78a9SPeter Maydell DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB) 235954dc78a9SPeter Maydell DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH) 236054dc78a9SPeter Maydell DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB) 236154dc78a9SPeter Maydell DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH) 236254dc78a9SPeter Maydell DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B) 236354dc78a9SPeter Maydell DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H) 236454dc78a9SPeter Maydell 23652e6a4ce0SPeter Maydell uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm, 23662e6a4ce0SPeter Maydell uint32_t shift) 23672e6a4ce0SPeter Maydell { 23682e6a4ce0SPeter Maydell uint32_t *d = vd; 23692e6a4ce0SPeter Maydell uint16_t mask = mve_element_mask(env); 23702e6a4ce0SPeter Maydell unsigned e; 23712e6a4ce0SPeter Maydell uint32_t r; 23722e6a4ce0SPeter Maydell 23732e6a4ce0SPeter Maydell /* 23742e6a4ce0SPeter Maydell * For each 32-bit element, we shift it left, bringing in the 23752e6a4ce0SPeter Maydell * low 'shift' bits of rdm at the bottom. Bits shifted out at 23762e6a4ce0SPeter Maydell * the top become the new rdm, if the predicate mask permits. 23772e6a4ce0SPeter Maydell * The final rdm value is returned to update the register. 23782e6a4ce0SPeter Maydell * shift == 0 here means "shift by 32 bits". 23792e6a4ce0SPeter Maydell */ 23802e6a4ce0SPeter Maydell if (shift == 0) { 23812e6a4ce0SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 23822e6a4ce0SPeter Maydell r = rdm; 23832e6a4ce0SPeter Maydell if (mask & 1) { 23842e6a4ce0SPeter Maydell rdm = d[H4(e)]; 23852e6a4ce0SPeter Maydell } 23862e6a4ce0SPeter Maydell mergemask(&d[H4(e)], r, mask); 23872e6a4ce0SPeter Maydell } 23882e6a4ce0SPeter Maydell } else { 23892e6a4ce0SPeter Maydell uint32_t shiftmask = MAKE_64BIT_MASK(0, shift); 23902e6a4ce0SPeter Maydell 23912e6a4ce0SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 23922e6a4ce0SPeter Maydell r = (d[H4(e)] << shift) | (rdm & shiftmask); 23932e6a4ce0SPeter Maydell if (mask & 1) { 23942e6a4ce0SPeter Maydell rdm = d[H4(e)] >> (32 - shift); 23952e6a4ce0SPeter Maydell } 23962e6a4ce0SPeter Maydell mergemask(&d[H4(e)], r, mask); 23972e6a4ce0SPeter Maydell } 23982e6a4ce0SPeter Maydell } 23992e6a4ce0SPeter Maydell mve_advance_vpt(env); 24002e6a4ce0SPeter Maydell return rdm; 24012e6a4ce0SPeter Maydell } 2402f4ae6c8cSPeter Maydell 24030aa4b4c3SPeter Maydell uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 24040aa4b4c3SPeter Maydell { 24050aa4b4c3SPeter Maydell return do_sqrshl_d(n, -(int8_t)shift, false, NULL); 24060aa4b4c3SPeter Maydell } 24070aa4b4c3SPeter Maydell 24080aa4b4c3SPeter Maydell uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift) 24090aa4b4c3SPeter Maydell { 24100aa4b4c3SPeter Maydell return do_uqrshl_d(n, (int8_t)shift, false, NULL); 24110aa4b4c3SPeter Maydell } 24120aa4b4c3SPeter Maydell 2413f4ae6c8cSPeter Maydell uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2414f4ae6c8cSPeter Maydell { 2415f4ae6c8cSPeter Maydell return do_sqrshl_d(n, (int8_t)shift, false, &env->QF); 2416f4ae6c8cSPeter Maydell } 2417f4ae6c8cSPeter Maydell 2418f4ae6c8cSPeter Maydell uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2419f4ae6c8cSPeter Maydell { 2420f4ae6c8cSPeter Maydell return do_uqrshl_d(n, (int8_t)shift, false, &env->QF); 2421f4ae6c8cSPeter Maydell } 24220aa4b4c3SPeter Maydell 24230aa4b4c3SPeter Maydell uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 24240aa4b4c3SPeter Maydell { 24250aa4b4c3SPeter Maydell return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF); 24260aa4b4c3SPeter Maydell } 24270aa4b4c3SPeter Maydell 24280aa4b4c3SPeter Maydell uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift) 24290aa4b4c3SPeter Maydell { 24300aa4b4c3SPeter Maydell return do_uqrshl_d(n, (int8_t)shift, true, &env->QF); 24310aa4b4c3SPeter Maydell } 24320aa4b4c3SPeter Maydell 24330aa4b4c3SPeter Maydell /* Operate on 64-bit values, but saturate at 48 bits */ 24340aa4b4c3SPeter Maydell static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift, 24350aa4b4c3SPeter Maydell bool round, uint32_t *sat) 24360aa4b4c3SPeter Maydell { 2437fdcf2269SPeter Maydell int64_t val, extval; 2438fdcf2269SPeter Maydell 24390aa4b4c3SPeter Maydell if (shift <= -48) { 24400aa4b4c3SPeter Maydell /* Rounding the sign bit always produces 0. */ 24410aa4b4c3SPeter Maydell if (round) { 24420aa4b4c3SPeter Maydell return 0; 24430aa4b4c3SPeter Maydell } 24440aa4b4c3SPeter Maydell return src >> 63; 24450aa4b4c3SPeter Maydell } else if (shift < 0) { 24460aa4b4c3SPeter Maydell if (round) { 24470aa4b4c3SPeter Maydell src >>= -shift - 1; 2448fdcf2269SPeter Maydell val = (src >> 1) + (src & 1); 2449fdcf2269SPeter Maydell } else { 2450fdcf2269SPeter Maydell val = src >> -shift; 24510aa4b4c3SPeter Maydell } 2452fdcf2269SPeter Maydell extval = sextract64(val, 0, 48); 2453fdcf2269SPeter Maydell if (!sat || val == extval) { 2454fdcf2269SPeter Maydell return extval; 2455fdcf2269SPeter Maydell } 24560aa4b4c3SPeter Maydell } else if (shift < 48) { 2457d54deb2aSPhilippe Mathieu-Daudé extval = sextract64(src << shift, 0, 48); 245895351aa7SPeter Maydell if (!sat || src == (extval >> shift)) { 24590aa4b4c3SPeter Maydell return extval; 24600aa4b4c3SPeter Maydell } 24610aa4b4c3SPeter Maydell } else if (!sat || src == 0) { 24620aa4b4c3SPeter Maydell return 0; 24630aa4b4c3SPeter Maydell } 24640aa4b4c3SPeter Maydell 24650aa4b4c3SPeter Maydell *sat = 1; 246695351aa7SPeter Maydell return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17); 24670aa4b4c3SPeter Maydell } 24680aa4b4c3SPeter Maydell 24690aa4b4c3SPeter Maydell /* Operate on 64-bit values, but saturate at 48 bits */ 24700aa4b4c3SPeter Maydell static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift, 24710aa4b4c3SPeter Maydell bool round, uint32_t *sat) 24720aa4b4c3SPeter Maydell { 24730aa4b4c3SPeter Maydell uint64_t val, extval; 24740aa4b4c3SPeter Maydell 24750aa4b4c3SPeter Maydell if (shift <= -(48 + round)) { 24760aa4b4c3SPeter Maydell return 0; 24770aa4b4c3SPeter Maydell } else if (shift < 0) { 24780aa4b4c3SPeter Maydell if (round) { 24790aa4b4c3SPeter Maydell val = src >> (-shift - 1); 24800aa4b4c3SPeter Maydell val = (val >> 1) + (val & 1); 24810aa4b4c3SPeter Maydell } else { 24820aa4b4c3SPeter Maydell val = src >> -shift; 24830aa4b4c3SPeter Maydell } 24840aa4b4c3SPeter Maydell extval = extract64(val, 0, 48); 24850aa4b4c3SPeter Maydell if (!sat || val == extval) { 24860aa4b4c3SPeter Maydell return extval; 24870aa4b4c3SPeter Maydell } 24880aa4b4c3SPeter Maydell } else if (shift < 48) { 2489d54deb2aSPhilippe Mathieu-Daudé extval = extract64(src << shift, 0, 48); 249095351aa7SPeter Maydell if (!sat || src == (extval >> shift)) { 24910aa4b4c3SPeter Maydell return extval; 24920aa4b4c3SPeter Maydell } 24930aa4b4c3SPeter Maydell } else if (!sat || src == 0) { 24940aa4b4c3SPeter Maydell return 0; 24950aa4b4c3SPeter Maydell } 24960aa4b4c3SPeter Maydell 24970aa4b4c3SPeter Maydell *sat = 1; 24980aa4b4c3SPeter Maydell return MAKE_64BIT_MASK(0, 48); 24990aa4b4c3SPeter Maydell } 25000aa4b4c3SPeter Maydell 25010aa4b4c3SPeter Maydell uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift) 25020aa4b4c3SPeter Maydell { 25030aa4b4c3SPeter Maydell return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF); 25040aa4b4c3SPeter Maydell } 25050aa4b4c3SPeter Maydell 25060aa4b4c3SPeter Maydell uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift) 25070aa4b4c3SPeter Maydell { 25080aa4b4c3SPeter Maydell return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF); 25090aa4b4c3SPeter Maydell } 251046321d47SPeter Maydell 251146321d47SPeter Maydell uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 251246321d47SPeter Maydell { 251346321d47SPeter Maydell return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 251446321d47SPeter Maydell } 251546321d47SPeter Maydell 251646321d47SPeter Maydell uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 251746321d47SPeter Maydell { 251846321d47SPeter Maydell return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 251946321d47SPeter Maydell } 252004ea4d3cSPeter Maydell 252104ea4d3cSPeter Maydell uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift) 252204ea4d3cSPeter Maydell { 252304ea4d3cSPeter Maydell return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF); 252404ea4d3cSPeter Maydell } 252504ea4d3cSPeter Maydell 252604ea4d3cSPeter Maydell uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift) 252704ea4d3cSPeter Maydell { 252804ea4d3cSPeter Maydell return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF); 252904ea4d3cSPeter Maydell } 2530395b92d5SPeter Maydell 2531395b92d5SPeter Maydell #define DO_VIDUP(OP, ESIZE, TYPE, FN) \ 2532395b92d5SPeter Maydell uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2533395b92d5SPeter Maydell uint32_t offset, uint32_t imm) \ 2534395b92d5SPeter Maydell { \ 2535395b92d5SPeter Maydell TYPE *d = vd; \ 2536395b92d5SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2537395b92d5SPeter Maydell unsigned e; \ 2538395b92d5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2539395b92d5SPeter Maydell mergemask(&d[H##ESIZE(e)], offset, mask); \ 2540395b92d5SPeter Maydell offset = FN(offset, imm); \ 2541395b92d5SPeter Maydell } \ 2542395b92d5SPeter Maydell mve_advance_vpt(env); \ 2543395b92d5SPeter Maydell return offset; \ 2544395b92d5SPeter Maydell } 2545395b92d5SPeter Maydell 2546395b92d5SPeter Maydell #define DO_VIWDUP(OP, ESIZE, TYPE, FN) \ 2547395b92d5SPeter Maydell uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2548395b92d5SPeter Maydell uint32_t offset, uint32_t wrap, \ 2549395b92d5SPeter Maydell uint32_t imm) \ 2550395b92d5SPeter Maydell { \ 2551395b92d5SPeter Maydell TYPE *d = vd; \ 2552395b92d5SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2553395b92d5SPeter Maydell unsigned e; \ 2554395b92d5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2555395b92d5SPeter Maydell mergemask(&d[H##ESIZE(e)], offset, mask); \ 2556395b92d5SPeter Maydell offset = FN(offset, wrap, imm); \ 2557395b92d5SPeter Maydell } \ 2558395b92d5SPeter Maydell mve_advance_vpt(env); \ 2559395b92d5SPeter Maydell return offset; \ 2560395b92d5SPeter Maydell } 2561395b92d5SPeter Maydell 2562395b92d5SPeter Maydell #define DO_VIDUP_ALL(OP, FN) \ 2563395b92d5SPeter Maydell DO_VIDUP(OP##b, 1, int8_t, FN) \ 2564395b92d5SPeter Maydell DO_VIDUP(OP##h, 2, int16_t, FN) \ 2565395b92d5SPeter Maydell DO_VIDUP(OP##w, 4, int32_t, FN) 2566395b92d5SPeter Maydell 2567395b92d5SPeter Maydell #define DO_VIWDUP_ALL(OP, FN) \ 2568395b92d5SPeter Maydell DO_VIWDUP(OP##b, 1, int8_t, FN) \ 2569395b92d5SPeter Maydell DO_VIWDUP(OP##h, 2, int16_t, FN) \ 2570395b92d5SPeter Maydell DO_VIWDUP(OP##w, 4, int32_t, FN) 2571395b92d5SPeter Maydell 2572395b92d5SPeter Maydell static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2573395b92d5SPeter Maydell { 2574395b92d5SPeter Maydell offset += imm; 2575395b92d5SPeter Maydell if (offset == wrap) { 2576395b92d5SPeter Maydell offset = 0; 2577395b92d5SPeter Maydell } 2578395b92d5SPeter Maydell return offset; 2579395b92d5SPeter Maydell } 2580395b92d5SPeter Maydell 2581395b92d5SPeter Maydell static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2582395b92d5SPeter Maydell { 2583395b92d5SPeter Maydell if (offset == 0) { 2584395b92d5SPeter Maydell offset = wrap; 2585395b92d5SPeter Maydell } 2586395b92d5SPeter Maydell offset -= imm; 2587395b92d5SPeter Maydell return offset; 2588395b92d5SPeter Maydell } 2589395b92d5SPeter Maydell 2590395b92d5SPeter Maydell DO_VIDUP_ALL(vidup, DO_ADD) 2591395b92d5SPeter Maydell DO_VIWDUP_ALL(viwdup, do_add_wrap) 2592395b92d5SPeter Maydell DO_VIWDUP_ALL(vdwdup, do_sub_wrap) 2593eff5d9a9SPeter Maydell 2594eff5d9a9SPeter Maydell /* 2595eff5d9a9SPeter Maydell * Vector comparison. 2596eff5d9a9SPeter Maydell * P0 bits for non-executed beats (where eci_mask is 0) are unchanged. 2597eff5d9a9SPeter Maydell * P0 bits for predicated lanes in executed beats (where mask is 0) are 0. 2598eff5d9a9SPeter Maydell * P0 bits otherwise are updated with the results of the comparisons. 2599eff5d9a9SPeter Maydell * We must also keep unchanged the MASK fields at the top of v7m.vpr. 2600eff5d9a9SPeter Maydell */ 2601eff5d9a9SPeter Maydell #define DO_VCMP(OP, ESIZE, TYPE, FN) \ 2602eff5d9a9SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ 2603eff5d9a9SPeter Maydell { \ 2604eff5d9a9SPeter Maydell TYPE *n = vn, *m = vm; \ 2605eff5d9a9SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2606eff5d9a9SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 2607eff5d9a9SPeter Maydell uint16_t beatpred = 0; \ 2608eff5d9a9SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2609eff5d9a9SPeter Maydell unsigned e; \ 2610eff5d9a9SPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \ 2611eff5d9a9SPeter Maydell bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \ 2612eff5d9a9SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 2613eff5d9a9SPeter Maydell beatpred |= r * emask; \ 2614eff5d9a9SPeter Maydell emask <<= ESIZE; \ 2615eff5d9a9SPeter Maydell } \ 2616eff5d9a9SPeter Maydell beatpred &= mask; \ 2617eff5d9a9SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2618eff5d9a9SPeter Maydell (beatpred & eci_mask); \ 2619eff5d9a9SPeter Maydell mve_advance_vpt(env); \ 2620eff5d9a9SPeter Maydell } 2621eff5d9a9SPeter Maydell 2622cce81873SPeter Maydell #define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \ 2623cce81873SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 2624cce81873SPeter Maydell uint32_t rm) \ 2625cce81873SPeter Maydell { \ 2626cce81873SPeter Maydell TYPE *n = vn; \ 2627cce81873SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2628cce81873SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 2629cce81873SPeter Maydell uint16_t beatpred = 0; \ 2630cce81873SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2631cce81873SPeter Maydell unsigned e; \ 2632cce81873SPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \ 2633cce81873SPeter Maydell bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \ 2634cce81873SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 2635cce81873SPeter Maydell beatpred |= r * emask; \ 2636cce81873SPeter Maydell emask <<= ESIZE; \ 2637cce81873SPeter Maydell } \ 2638cce81873SPeter Maydell beatpred &= mask; \ 2639cce81873SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2640cce81873SPeter Maydell (beatpred & eci_mask); \ 2641cce81873SPeter Maydell mve_advance_vpt(env); \ 2642cce81873SPeter Maydell } 2643cce81873SPeter Maydell 2644eff5d9a9SPeter Maydell #define DO_VCMP_S(OP, FN) \ 2645eff5d9a9SPeter Maydell DO_VCMP(OP##b, 1, int8_t, FN) \ 2646eff5d9a9SPeter Maydell DO_VCMP(OP##h, 2, int16_t, FN) \ 2647cce81873SPeter Maydell DO_VCMP(OP##w, 4, int32_t, FN) \ 2648cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \ 2649cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \ 2650cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN) 2651eff5d9a9SPeter Maydell 2652eff5d9a9SPeter Maydell #define DO_VCMP_U(OP, FN) \ 2653eff5d9a9SPeter Maydell DO_VCMP(OP##b, 1, uint8_t, FN) \ 2654eff5d9a9SPeter Maydell DO_VCMP(OP##h, 2, uint16_t, FN) \ 2655cce81873SPeter Maydell DO_VCMP(OP##w, 4, uint32_t, FN) \ 2656cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \ 2657cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \ 2658cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN) 2659eff5d9a9SPeter Maydell 2660eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M)) 2661eff5d9a9SPeter Maydell #define DO_NE(N, M) ((N) != (M)) 2662eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M)) 2663eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M)) 2664eff5d9a9SPeter Maydell #define DO_GE(N, M) ((N) >= (M)) 2665eff5d9a9SPeter Maydell #define DO_LT(N, M) ((N) < (M)) 2666eff5d9a9SPeter Maydell #define DO_GT(N, M) ((N) > (M)) 2667eff5d9a9SPeter Maydell #define DO_LE(N, M) ((N) <= (M)) 2668eff5d9a9SPeter Maydell 2669eff5d9a9SPeter Maydell DO_VCMP_U(vcmpeq, DO_EQ) 2670eff5d9a9SPeter Maydell DO_VCMP_U(vcmpne, DO_NE) 2671eff5d9a9SPeter Maydell DO_VCMP_U(vcmpcs, DO_GE) 2672eff5d9a9SPeter Maydell DO_VCMP_U(vcmphi, DO_GT) 2673eff5d9a9SPeter Maydell DO_VCMP_S(vcmpge, DO_GE) 2674eff5d9a9SPeter Maydell DO_VCMP_S(vcmplt, DO_LT) 2675eff5d9a9SPeter Maydell DO_VCMP_S(vcmpgt, DO_GT) 2676eff5d9a9SPeter Maydell DO_VCMP_S(vcmple, DO_LE) 2677c386443bSPeter Maydell 2678c386443bSPeter Maydell void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm) 2679c386443bSPeter Maydell { 2680c386443bSPeter Maydell /* 2681c386443bSPeter Maydell * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n] 2682c386443bSPeter Maydell * but note that whether bytes are written to Qd is still subject 2683c386443bSPeter Maydell * to (all forms of) predication in the usual way. 2684c386443bSPeter Maydell */ 2685c386443bSPeter Maydell uint64_t *d = vd, *n = vn, *m = vm; 2686c386443bSPeter Maydell uint16_t mask = mve_element_mask(env); 2687c386443bSPeter Maydell uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 2688c386443bSPeter Maydell unsigned e; 2689c386443bSPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) { 2690c386443bSPeter Maydell uint64_t r = m[H8(e)]; 2691c386443bSPeter Maydell mergemask(&r, n[H8(e)], p0); 2692c386443bSPeter Maydell mergemask(&d[H8(e)], r, mask); 2693c386443bSPeter Maydell } 2694c386443bSPeter Maydell mve_advance_vpt(env); 2695c386443bSPeter Maydell } 2696398e7cd3SPeter Maydell 2697fea3958fSPeter Maydell void HELPER(mve_vpnot)(CPUARMState *env) 2698fea3958fSPeter Maydell { 2699fea3958fSPeter Maydell /* 2700fea3958fSPeter Maydell * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged. 2701fea3958fSPeter Maydell * P0 bits for predicated lanes in executed bits (where mask is 0) are 0. 2702fea3958fSPeter Maydell * P0 bits otherwise are inverted. 2703fea3958fSPeter Maydell * (This is the same logic as VCMP.) 2704fea3958fSPeter Maydell * This insn is itself subject to predication and to beat-wise execution, 2705fea3958fSPeter Maydell * and after it executes VPT state advances in the usual way. 2706fea3958fSPeter Maydell */ 2707fea3958fSPeter Maydell uint16_t mask = mve_element_mask(env); 2708fea3958fSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); 2709fea3958fSPeter Maydell uint16_t beatpred = ~env->v7m.vpr & mask; 2710fea3958fSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask); 2711fea3958fSPeter Maydell mve_advance_vpt(env); 2712fea3958fSPeter Maydell } 2713fea3958fSPeter Maydell 27140f31e37cSPeter Maydell /* 27150f31e37cSPeter Maydell * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed, 27160f31e37cSPeter Maydell * otherwise set according to value of Rn. The calculation of 27170f31e37cSPeter Maydell * newmask here works in the same way as the calculation of the 27180f31e37cSPeter Maydell * ltpmask in mve_element_mask(), but we have pre-calculated 27190f31e37cSPeter Maydell * the masklen in the generated code. 27200f31e37cSPeter Maydell */ 27210f31e37cSPeter Maydell void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen) 27220f31e37cSPeter Maydell { 27230f31e37cSPeter Maydell uint16_t mask = mve_element_mask(env); 27240f31e37cSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); 27250f31e37cSPeter Maydell uint16_t newmask; 27260f31e37cSPeter Maydell 27270f31e37cSPeter Maydell assert(masklen <= 16); 27280f31e37cSPeter Maydell newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 27290f31e37cSPeter Maydell newmask &= mask; 27300f31e37cSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask); 27310f31e37cSPeter Maydell mve_advance_vpt(env); 27320f31e37cSPeter Maydell } 27330f31e37cSPeter Maydell 2734398e7cd3SPeter Maydell #define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \ 2735398e7cd3SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2736398e7cd3SPeter Maydell { \ 2737398e7cd3SPeter Maydell TYPE *d = vd, *m = vm; \ 2738398e7cd3SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2739398e7cd3SPeter Maydell unsigned e; \ 2740398e7cd3SPeter Maydell bool qc = false; \ 2741398e7cd3SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2742398e7cd3SPeter Maydell bool sat = false; \ 2743398e7cd3SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \ 2744398e7cd3SPeter Maydell qc |= sat & mask & 1; \ 2745398e7cd3SPeter Maydell } \ 2746398e7cd3SPeter Maydell if (qc) { \ 2747398e7cd3SPeter Maydell env->vfp.qc[0] = qc; \ 2748398e7cd3SPeter Maydell } \ 2749398e7cd3SPeter Maydell mve_advance_vpt(env); \ 2750398e7cd3SPeter Maydell } 2751398e7cd3SPeter Maydell 2752398e7cd3SPeter Maydell #define DO_VQABS_B(N, SATP) \ 2753398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP) 2754398e7cd3SPeter Maydell #define DO_VQABS_H(N, SATP) \ 2755398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP) 2756398e7cd3SPeter Maydell #define DO_VQABS_W(N, SATP) \ 2757398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP) 2758398e7cd3SPeter Maydell 2759398e7cd3SPeter Maydell #define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP) 2760398e7cd3SPeter Maydell #define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP) 2761398e7cd3SPeter Maydell #define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP) 2762398e7cd3SPeter Maydell 2763398e7cd3SPeter Maydell DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B) 2764398e7cd3SPeter Maydell DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H) 2765398e7cd3SPeter Maydell DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W) 2766398e7cd3SPeter Maydell 2767398e7cd3SPeter Maydell DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B) 2768398e7cd3SPeter Maydell DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H) 2769398e7cd3SPeter Maydell DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W) 2770d5c571eaSPeter Maydell 2771d5c571eaSPeter Maydell /* 2772d5c571eaSPeter Maydell * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its 2773d5c571eaSPeter Maydell * absolute value; we then do an unsigned comparison. 2774d5c571eaSPeter Maydell */ 2775d5c571eaSPeter Maydell #define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \ 2776d5c571eaSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2777d5c571eaSPeter Maydell { \ 2778d5c571eaSPeter Maydell UTYPE *d = vd; \ 2779d5c571eaSPeter Maydell STYPE *m = vm; \ 2780d5c571eaSPeter Maydell uint16_t mask = mve_element_mask(env); \ 2781d5c571eaSPeter Maydell unsigned e; \ 2782d5c571eaSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2783d5c571eaSPeter Maydell UTYPE r = DO_ABS(m[H##ESIZE(e)]); \ 2784d5c571eaSPeter Maydell r = FN(d[H##ESIZE(e)], r); \ 2785d5c571eaSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 2786d5c571eaSPeter Maydell } \ 2787d5c571eaSPeter Maydell mve_advance_vpt(env); \ 2788d5c571eaSPeter Maydell } 2789d5c571eaSPeter Maydell 2790d5c571eaSPeter Maydell DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX) 2791d5c571eaSPeter Maydell DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX) 2792d5c571eaSPeter Maydell DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX) 2793d5c571eaSPeter Maydell DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN) 2794d5c571eaSPeter Maydell DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN) 2795d5c571eaSPeter Maydell DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) 27961e35cd91SPeter Maydell 27971e35cd91SPeter Maydell /* 27981e35cd91SPeter Maydell * 2-operand floating point. Note that if an element is partially 27991e35cd91SPeter Maydell * predicated we must do the FP operation to update the non-predicated 28001e35cd91SPeter Maydell * bytes, but we must be careful to avoid updating the FP exception 28011e35cd91SPeter Maydell * state unless byte 0 of the element was unpredicated. 28021e35cd91SPeter Maydell */ 28031e35cd91SPeter Maydell #define DO_2OP_FP(OP, ESIZE, TYPE, FN) \ 28041e35cd91SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 28051e35cd91SPeter Maydell void *vd, void *vn, void *vm) \ 28061e35cd91SPeter Maydell { \ 28071e35cd91SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 28081e35cd91SPeter Maydell TYPE r; \ 28091e35cd91SPeter Maydell uint16_t mask = mve_element_mask(env); \ 28101e35cd91SPeter Maydell unsigned e; \ 28111e35cd91SPeter Maydell float_status *fpst; \ 28121e35cd91SPeter Maydell float_status scratch_fpst; \ 28131e35cd91SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 28141e35cd91SPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 28151e35cd91SPeter Maydell continue; \ 28161e35cd91SPeter Maydell } \ 2817f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 28181e35cd91SPeter Maydell if (!(mask & 1)) { \ 28191e35cd91SPeter Maydell /* We need the result but without updating flags */ \ 28201e35cd91SPeter Maydell scratch_fpst = *fpst; \ 28211e35cd91SPeter Maydell fpst = &scratch_fpst; \ 28221e35cd91SPeter Maydell } \ 28231e35cd91SPeter Maydell r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ 28241e35cd91SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 28251e35cd91SPeter Maydell } \ 28261e35cd91SPeter Maydell mve_advance_vpt(env); \ 28271e35cd91SPeter Maydell } 28281e35cd91SPeter Maydell 28291e35cd91SPeter Maydell #define DO_2OP_FP_ALL(OP, FN) \ 28301e35cd91SPeter Maydell DO_2OP_FP(OP##h, 2, float16, float16_##FN) \ 28311e35cd91SPeter Maydell DO_2OP_FP(OP##s, 4, float32, float32_##FN) 28321e35cd91SPeter Maydell 28331e35cd91SPeter Maydell DO_2OP_FP_ALL(vfadd, add) 283482af0153SPeter Maydell DO_2OP_FP_ALL(vfsub, sub) 283582af0153SPeter Maydell DO_2OP_FP_ALL(vfmul, mul) 283682af0153SPeter Maydell 283782af0153SPeter Maydell static inline float16 float16_abd(float16 a, float16 b, float_status *s) 283882af0153SPeter Maydell { 283982af0153SPeter Maydell return float16_abs(float16_sub(a, b, s)); 284082af0153SPeter Maydell } 284182af0153SPeter Maydell 284282af0153SPeter Maydell static inline float32 float32_abd(float32 a, float32 b, float_status *s) 284382af0153SPeter Maydell { 284482af0153SPeter Maydell return float32_abs(float32_sub(a, b, s)); 284582af0153SPeter Maydell } 284682af0153SPeter Maydell 284782af0153SPeter Maydell DO_2OP_FP_ALL(vfabd, abd) 284882af0153SPeter Maydell DO_2OP_FP_ALL(vmaxnm, maxnum) 284982af0153SPeter Maydell DO_2OP_FP_ALL(vminnm, minnum) 2850104afc68SPeter Maydell 285190257a4fSPeter Maydell static inline float16 float16_maxnuma(float16 a, float16 b, float_status *s) 285290257a4fSPeter Maydell { 285390257a4fSPeter Maydell return float16_maxnum(float16_abs(a), float16_abs(b), s); 285490257a4fSPeter Maydell } 285590257a4fSPeter Maydell 285690257a4fSPeter Maydell static inline float32 float32_maxnuma(float32 a, float32 b, float_status *s) 285790257a4fSPeter Maydell { 285890257a4fSPeter Maydell return float32_maxnum(float32_abs(a), float32_abs(b), s); 285990257a4fSPeter Maydell } 286090257a4fSPeter Maydell 286190257a4fSPeter Maydell static inline float16 float16_minnuma(float16 a, float16 b, float_status *s) 286290257a4fSPeter Maydell { 286390257a4fSPeter Maydell return float16_minnum(float16_abs(a), float16_abs(b), s); 286490257a4fSPeter Maydell } 286590257a4fSPeter Maydell 286690257a4fSPeter Maydell static inline float32 float32_minnuma(float32 a, float32 b, float_status *s) 286790257a4fSPeter Maydell { 286890257a4fSPeter Maydell return float32_minnum(float32_abs(a), float32_abs(b), s); 286990257a4fSPeter Maydell } 287090257a4fSPeter Maydell 287190257a4fSPeter Maydell DO_2OP_FP_ALL(vmaxnma, maxnuma) 287290257a4fSPeter Maydell DO_2OP_FP_ALL(vminnma, minnuma) 287390257a4fSPeter Maydell 2874104afc68SPeter Maydell #define DO_VCADD_FP(OP, ESIZE, TYPE, FN0, FN1) \ 2875104afc68SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2876104afc68SPeter Maydell void *vd, void *vn, void *vm) \ 2877104afc68SPeter Maydell { \ 2878104afc68SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 2879104afc68SPeter Maydell TYPE r[16 / ESIZE]; \ 2880104afc68SPeter Maydell uint16_t tm, mask = mve_element_mask(env); \ 2881104afc68SPeter Maydell unsigned e; \ 2882104afc68SPeter Maydell float_status *fpst; \ 2883104afc68SPeter Maydell float_status scratch_fpst; \ 2884104afc68SPeter Maydell /* Calculate all results first to avoid overwriting inputs */ \ 2885104afc68SPeter Maydell for (e = 0, tm = mask; e < 16 / ESIZE; e++, tm >>= ESIZE) { \ 2886104afc68SPeter Maydell if ((tm & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 2887104afc68SPeter Maydell r[e] = 0; \ 2888104afc68SPeter Maydell continue; \ 2889104afc68SPeter Maydell } \ 2890f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 2891104afc68SPeter Maydell if (!(tm & 1)) { \ 2892104afc68SPeter Maydell /* We need the result but without updating flags */ \ 2893104afc68SPeter Maydell scratch_fpst = *fpst; \ 2894104afc68SPeter Maydell fpst = &scratch_fpst; \ 2895104afc68SPeter Maydell } \ 2896104afc68SPeter Maydell if (!(e & 1)) { \ 2897104afc68SPeter Maydell r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)], fpst); \ 2898104afc68SPeter Maydell } else { \ 2899104afc68SPeter Maydell r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)], fpst); \ 2900104afc68SPeter Maydell } \ 2901104afc68SPeter Maydell } \ 2902104afc68SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2903104afc68SPeter Maydell mergemask(&d[H##ESIZE(e)], r[e], mask); \ 2904104afc68SPeter Maydell } \ 2905104afc68SPeter Maydell mve_advance_vpt(env); \ 2906104afc68SPeter Maydell } 2907104afc68SPeter Maydell 2908104afc68SPeter Maydell DO_VCADD_FP(vfcadd90h, 2, float16, float16_sub, float16_add) 2909104afc68SPeter Maydell DO_VCADD_FP(vfcadd90s, 4, float32, float32_sub, float32_add) 2910104afc68SPeter Maydell DO_VCADD_FP(vfcadd270h, 2, float16, float16_add, float16_sub) 2911104afc68SPeter Maydell DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub) 29123173c0ddSPeter Maydell 29133173c0ddSPeter Maydell #define DO_VFMA(OP, ESIZE, TYPE, CHS) \ 29143173c0ddSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 29153173c0ddSPeter Maydell void *vd, void *vn, void *vm) \ 29163173c0ddSPeter Maydell { \ 29173173c0ddSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 29183173c0ddSPeter Maydell TYPE r; \ 29193173c0ddSPeter Maydell uint16_t mask = mve_element_mask(env); \ 29203173c0ddSPeter Maydell unsigned e; \ 29213173c0ddSPeter Maydell float_status *fpst; \ 29223173c0ddSPeter Maydell float_status scratch_fpst; \ 29233173c0ddSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 29243173c0ddSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 29253173c0ddSPeter Maydell continue; \ 29263173c0ddSPeter Maydell } \ 2927f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 29283173c0ddSPeter Maydell if (!(mask & 1)) { \ 29293173c0ddSPeter Maydell /* We need the result but without updating flags */ \ 29303173c0ddSPeter Maydell scratch_fpst = *fpst; \ 29313173c0ddSPeter Maydell fpst = &scratch_fpst; \ 29323173c0ddSPeter Maydell } \ 29333173c0ddSPeter Maydell r = n[H##ESIZE(e)]; \ 29343173c0ddSPeter Maydell if (CHS) { \ 29353173c0ddSPeter Maydell r = TYPE##_chs(r); \ 29363173c0ddSPeter Maydell } \ 29373173c0ddSPeter Maydell r = TYPE##_muladd(r, m[H##ESIZE(e)], d[H##ESIZE(e)], \ 29383173c0ddSPeter Maydell 0, fpst); \ 29393173c0ddSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 29403173c0ddSPeter Maydell } \ 29413173c0ddSPeter Maydell mve_advance_vpt(env); \ 29423173c0ddSPeter Maydell } 29433173c0ddSPeter Maydell 29443173c0ddSPeter Maydell DO_VFMA(vfmah, 2, float16, false) 29453173c0ddSPeter Maydell DO_VFMA(vfmas, 4, float32, false) 29463173c0ddSPeter Maydell DO_VFMA(vfmsh, 2, float16, true) 29473173c0ddSPeter Maydell DO_VFMA(vfmss, 4, float32, true) 2948d3cd965cSPeter Maydell 2949d3cd965cSPeter Maydell #define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \ 2950d3cd965cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2951d3cd965cSPeter Maydell void *vd, void *vn, void *vm) \ 2952d3cd965cSPeter Maydell { \ 2953d3cd965cSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 2954d3cd965cSPeter Maydell TYPE r0, r1, e1, e2, e3, e4; \ 2955d3cd965cSPeter Maydell uint16_t mask = mve_element_mask(env); \ 2956d3cd965cSPeter Maydell unsigned e; \ 2957d3cd965cSPeter Maydell float_status *fpst0, *fpst1; \ 2958d3cd965cSPeter Maydell float_status scratch_fpst; \ 2959d3cd965cSPeter Maydell /* We loop through pairs of elements at a time */ \ 2960d3cd965cSPeter Maydell for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \ 2961d3cd965cSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \ 2962d3cd965cSPeter Maydell continue; \ 2963d3cd965cSPeter Maydell } \ 2964f81c4698SRichard Henderson fpst0 = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 2965d3cd965cSPeter Maydell fpst1 = fpst0; \ 2966d3cd965cSPeter Maydell if (!(mask & 1)) { \ 2967d3cd965cSPeter Maydell scratch_fpst = *fpst0; \ 2968d3cd965cSPeter Maydell fpst0 = &scratch_fpst; \ 2969d3cd965cSPeter Maydell } \ 2970d3cd965cSPeter Maydell if (!(mask & (1 << ESIZE))) { \ 2971d3cd965cSPeter Maydell scratch_fpst = *fpst1; \ 2972d3cd965cSPeter Maydell fpst1 = &scratch_fpst; \ 2973d3cd965cSPeter Maydell } \ 2974d3cd965cSPeter Maydell switch (ROT) { \ 2975d3cd965cSPeter Maydell case 0: \ 2976d3cd965cSPeter Maydell e1 = m[H##ESIZE(e)]; \ 2977d3cd965cSPeter Maydell e2 = n[H##ESIZE(e)]; \ 2978d3cd965cSPeter Maydell e3 = m[H##ESIZE(e + 1)]; \ 2979d3cd965cSPeter Maydell e4 = n[H##ESIZE(e)]; \ 2980d3cd965cSPeter Maydell break; \ 2981d3cd965cSPeter Maydell case 1: \ 2982d3cd965cSPeter Maydell e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ 2983d3cd965cSPeter Maydell e2 = n[H##ESIZE(e + 1)]; \ 2984d3cd965cSPeter Maydell e3 = m[H##ESIZE(e)]; \ 2985d3cd965cSPeter Maydell e4 = n[H##ESIZE(e + 1)]; \ 2986d3cd965cSPeter Maydell break; \ 2987d3cd965cSPeter Maydell case 2: \ 2988d3cd965cSPeter Maydell e1 = TYPE##_chs(m[H##ESIZE(e)]); \ 2989d3cd965cSPeter Maydell e2 = n[H##ESIZE(e)]; \ 2990d3cd965cSPeter Maydell e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ 2991d3cd965cSPeter Maydell e4 = n[H##ESIZE(e)]; \ 2992d3cd965cSPeter Maydell break; \ 2993d3cd965cSPeter Maydell case 3: \ 2994d3cd965cSPeter Maydell e1 = m[H##ESIZE(e + 1)]; \ 2995d3cd965cSPeter Maydell e2 = n[H##ESIZE(e + 1)]; \ 2996d3cd965cSPeter Maydell e3 = TYPE##_chs(m[H##ESIZE(e)]); \ 2997d3cd965cSPeter Maydell e4 = n[H##ESIZE(e + 1)]; \ 2998d3cd965cSPeter Maydell break; \ 2999d3cd965cSPeter Maydell default: \ 3000d3cd965cSPeter Maydell g_assert_not_reached(); \ 3001d3cd965cSPeter Maydell } \ 3002d3cd965cSPeter Maydell r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \ 3003d3cd965cSPeter Maydell r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \ 3004d3cd965cSPeter Maydell mergemask(&d[H##ESIZE(e)], r0, mask); \ 3005d3cd965cSPeter Maydell mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \ 3006d3cd965cSPeter Maydell } \ 3007d3cd965cSPeter Maydell mve_advance_vpt(env); \ 3008d3cd965cSPeter Maydell } 3009d3cd965cSPeter Maydell 3010d3cd965cSPeter Maydell #define DO_VCMULH(N, M, D, S) float16_mul(N, M, S) 3011d3cd965cSPeter Maydell #define DO_VCMULS(N, M, D, S) float32_mul(N, M, S) 3012d3cd965cSPeter Maydell 3013d3cd965cSPeter Maydell #define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S) 3014d3cd965cSPeter Maydell #define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S) 3015d3cd965cSPeter Maydell 3016d3cd965cSPeter Maydell DO_VCMLA(vcmul0h, 2, float16, 0, DO_VCMULH) 3017d3cd965cSPeter Maydell DO_VCMLA(vcmul0s, 4, float32, 0, DO_VCMULS) 3018d3cd965cSPeter Maydell DO_VCMLA(vcmul90h, 2, float16, 1, DO_VCMULH) 3019d3cd965cSPeter Maydell DO_VCMLA(vcmul90s, 4, float32, 1, DO_VCMULS) 3020d3cd965cSPeter Maydell DO_VCMLA(vcmul180h, 2, float16, 2, DO_VCMULH) 3021d3cd965cSPeter Maydell DO_VCMLA(vcmul180s, 4, float32, 2, DO_VCMULS) 3022d3cd965cSPeter Maydell DO_VCMLA(vcmul270h, 2, float16, 3, DO_VCMULH) 3023d3cd965cSPeter Maydell DO_VCMLA(vcmul270s, 4, float32, 3, DO_VCMULS) 3024d3cd965cSPeter Maydell 3025d3cd965cSPeter Maydell DO_VCMLA(vcmla0h, 2, float16, 0, DO_VCMLAH) 3026d3cd965cSPeter Maydell DO_VCMLA(vcmla0s, 4, float32, 0, DO_VCMLAS) 3027d3cd965cSPeter Maydell DO_VCMLA(vcmla90h, 2, float16, 1, DO_VCMLAH) 3028d3cd965cSPeter Maydell DO_VCMLA(vcmla90s, 4, float32, 1, DO_VCMLAS) 3029d3cd965cSPeter Maydell DO_VCMLA(vcmla180h, 2, float16, 2, DO_VCMLAH) 3030d3cd965cSPeter Maydell DO_VCMLA(vcmla180s, 4, float32, 2, DO_VCMLAS) 3031d3cd965cSPeter Maydell DO_VCMLA(vcmla270h, 2, float16, 3, DO_VCMLAH) 3032d3cd965cSPeter Maydell DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) 3033abfe39b2SPeter Maydell 3034abfe39b2SPeter Maydell #define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ 3035abfe39b2SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 3036abfe39b2SPeter Maydell void *vd, void *vn, uint32_t rm) \ 3037abfe39b2SPeter Maydell { \ 3038abfe39b2SPeter Maydell TYPE *d = vd, *n = vn; \ 3039abfe39b2SPeter Maydell TYPE r, m = rm; \ 3040abfe39b2SPeter Maydell uint16_t mask = mve_element_mask(env); \ 3041abfe39b2SPeter Maydell unsigned e; \ 3042abfe39b2SPeter Maydell float_status *fpst; \ 3043abfe39b2SPeter Maydell float_status scratch_fpst; \ 3044abfe39b2SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3045abfe39b2SPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3046abfe39b2SPeter Maydell continue; \ 3047abfe39b2SPeter Maydell } \ 3048f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 3049abfe39b2SPeter Maydell if (!(mask & 1)) { \ 3050abfe39b2SPeter Maydell /* We need the result but without updating flags */ \ 3051abfe39b2SPeter Maydell scratch_fpst = *fpst; \ 3052abfe39b2SPeter Maydell fpst = &scratch_fpst; \ 3053abfe39b2SPeter Maydell } \ 3054abfe39b2SPeter Maydell r = FN(n[H##ESIZE(e)], m, fpst); \ 3055abfe39b2SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 3056abfe39b2SPeter Maydell } \ 3057abfe39b2SPeter Maydell mve_advance_vpt(env); \ 3058abfe39b2SPeter Maydell } 3059abfe39b2SPeter Maydell 3060abfe39b2SPeter Maydell #define DO_2OP_FP_SCALAR_ALL(OP, FN) \ 3061abfe39b2SPeter Maydell DO_2OP_FP_SCALAR(OP##h, 2, float16, float16_##FN) \ 3062abfe39b2SPeter Maydell DO_2OP_FP_SCALAR(OP##s, 4, float32, float32_##FN) 3063abfe39b2SPeter Maydell 3064abfe39b2SPeter Maydell DO_2OP_FP_SCALAR_ALL(vfadd_scalar, add) 3065abfe39b2SPeter Maydell DO_2OP_FP_SCALAR_ALL(vfsub_scalar, sub) 3066abfe39b2SPeter Maydell DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul) 30674773e74eSPeter Maydell 30684773e74eSPeter Maydell #define DO_2OP_FP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 30694773e74eSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 30704773e74eSPeter Maydell void *vd, void *vn, uint32_t rm) \ 30714773e74eSPeter Maydell { \ 30724773e74eSPeter Maydell TYPE *d = vd, *n = vn; \ 30734773e74eSPeter Maydell TYPE r, m = rm; \ 30744773e74eSPeter Maydell uint16_t mask = mve_element_mask(env); \ 30754773e74eSPeter Maydell unsigned e; \ 30764773e74eSPeter Maydell float_status *fpst; \ 30774773e74eSPeter Maydell float_status scratch_fpst; \ 30784773e74eSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 30794773e74eSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 30804773e74eSPeter Maydell continue; \ 30814773e74eSPeter Maydell } \ 3082f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 30834773e74eSPeter Maydell if (!(mask & 1)) { \ 30844773e74eSPeter Maydell /* We need the result but without updating flags */ \ 30854773e74eSPeter Maydell scratch_fpst = *fpst; \ 30864773e74eSPeter Maydell fpst = &scratch_fpst; \ 30874773e74eSPeter Maydell } \ 30884773e74eSPeter Maydell r = FN(n[H##ESIZE(e)], m, d[H##ESIZE(e)], 0, fpst); \ 30894773e74eSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 30904773e74eSPeter Maydell } \ 30914773e74eSPeter Maydell mve_advance_vpt(env); \ 30924773e74eSPeter Maydell } 30934773e74eSPeter Maydell 30944773e74eSPeter Maydell /* VFMAS is vector * vector + scalar, so swap op2 and op3 */ 30954773e74eSPeter Maydell #define DO_VFMAS_SCALARH(N, M, D, F, S) float16_muladd(N, D, M, F, S) 30964773e74eSPeter Maydell #define DO_VFMAS_SCALARS(N, M, D, F, S) float32_muladd(N, D, M, F, S) 30974773e74eSPeter Maydell 30984773e74eSPeter Maydell /* VFMA is vector * scalar + vector */ 30994773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfma_scalarh, 2, float16, float16_muladd) 31004773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfma_scalars, 4, float32, float32_muladd) 31014773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfmas_scalarh, 2, float16, DO_VFMAS_SCALARH) 31024773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS) 310329f80e7dSPeter Maydell 310429f80e7dSPeter Maydell /* Floating point max/min across vector. */ 310529f80e7dSPeter Maydell #define DO_FP_VMAXMINV(OP, ESIZE, TYPE, ABS, FN) \ 310629f80e7dSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 310729f80e7dSPeter Maydell uint32_t ra_in) \ 310829f80e7dSPeter Maydell { \ 310929f80e7dSPeter Maydell uint16_t mask = mve_element_mask(env); \ 311029f80e7dSPeter Maydell unsigned e; \ 311129f80e7dSPeter Maydell TYPE *m = vm; \ 311229f80e7dSPeter Maydell TYPE ra = (TYPE)ra_in; \ 3113f81c4698SRichard Henderson float_status *fpst = \ 3114f81c4698SRichard Henderson &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 311529f80e7dSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 311629f80e7dSPeter Maydell if (mask & 1) { \ 311729f80e7dSPeter Maydell TYPE v = m[H##ESIZE(e)]; \ 311829f80e7dSPeter Maydell if (TYPE##_is_signaling_nan(ra, fpst)) { \ 311929f80e7dSPeter Maydell ra = TYPE##_silence_nan(ra, fpst); \ 312029f80e7dSPeter Maydell float_raise(float_flag_invalid, fpst); \ 312129f80e7dSPeter Maydell } \ 312229f80e7dSPeter Maydell if (TYPE##_is_signaling_nan(v, fpst)) { \ 312329f80e7dSPeter Maydell v = TYPE##_silence_nan(v, fpst); \ 312429f80e7dSPeter Maydell float_raise(float_flag_invalid, fpst); \ 312529f80e7dSPeter Maydell } \ 312629f80e7dSPeter Maydell if (ABS) { \ 312729f80e7dSPeter Maydell v = TYPE##_abs(v); \ 312829f80e7dSPeter Maydell } \ 312929f80e7dSPeter Maydell ra = FN(ra, v, fpst); \ 313029f80e7dSPeter Maydell } \ 313129f80e7dSPeter Maydell } \ 313229f80e7dSPeter Maydell mve_advance_vpt(env); \ 313329f80e7dSPeter Maydell return ra; \ 313429f80e7dSPeter Maydell } \ 313529f80e7dSPeter Maydell 313629f80e7dSPeter Maydell #define NOP(X) (X) 313729f80e7dSPeter Maydell 313829f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmvh, 2, float16, false, float16_maxnum) 313929f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmvs, 4, float32, false, float32_maxnum) 314029f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmvh, 2, float16, false, float16_minnum) 314129f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmvs, 4, float32, false, float32_minnum) 314229f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmavh, 2, float16, true, float16_maxnum) 314329f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmavs, 4, float32, true, float32_maxnum) 314429f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmavh, 2, float16, true, float16_minnum) 314529f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) 3146c87fe6d2SPeter Maydell 3147c87fe6d2SPeter Maydell /* FP compares; note that all comparisons signal InvalidOp for QNaNs */ 3148c87fe6d2SPeter Maydell #define DO_VCMP_FP(OP, ESIZE, TYPE, FN) \ 3149c87fe6d2SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ 3150c87fe6d2SPeter Maydell { \ 3151c87fe6d2SPeter Maydell TYPE *n = vn, *m = vm; \ 3152c87fe6d2SPeter Maydell uint16_t mask = mve_element_mask(env); \ 3153c87fe6d2SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 3154c87fe6d2SPeter Maydell uint16_t beatpred = 0; \ 3155c87fe6d2SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 3156c87fe6d2SPeter Maydell unsigned e; \ 3157c87fe6d2SPeter Maydell float_status *fpst; \ 3158c87fe6d2SPeter Maydell float_status scratch_fpst; \ 3159c87fe6d2SPeter Maydell bool r; \ 3160c87fe6d2SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ 3161c87fe6d2SPeter Maydell if ((mask & emask) == 0) { \ 3162c87fe6d2SPeter Maydell continue; \ 3163c87fe6d2SPeter Maydell } \ 3164f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 3165c87fe6d2SPeter Maydell if (!(mask & (1 << (e * ESIZE)))) { \ 3166c87fe6d2SPeter Maydell /* We need the result but without updating flags */ \ 3167c87fe6d2SPeter Maydell scratch_fpst = *fpst; \ 3168c87fe6d2SPeter Maydell fpst = &scratch_fpst; \ 3169c87fe6d2SPeter Maydell } \ 3170c87fe6d2SPeter Maydell r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ 3171c87fe6d2SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 3172c87fe6d2SPeter Maydell beatpred |= r * emask; \ 3173c87fe6d2SPeter Maydell } \ 3174c87fe6d2SPeter Maydell beatpred &= mask; \ 3175c87fe6d2SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 3176c87fe6d2SPeter Maydell (beatpred & eci_mask); \ 3177c87fe6d2SPeter Maydell mve_advance_vpt(env); \ 3178c87fe6d2SPeter Maydell } 3179c87fe6d2SPeter Maydell 3180c2d8f6bbSPeter Maydell #define DO_VCMP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ 3181c2d8f6bbSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 3182c2d8f6bbSPeter Maydell uint32_t rm) \ 3183c2d8f6bbSPeter Maydell { \ 3184c2d8f6bbSPeter Maydell TYPE *n = vn; \ 3185c2d8f6bbSPeter Maydell uint16_t mask = mve_element_mask(env); \ 3186c2d8f6bbSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 3187c2d8f6bbSPeter Maydell uint16_t beatpred = 0; \ 3188c2d8f6bbSPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 3189c2d8f6bbSPeter Maydell unsigned e; \ 3190c2d8f6bbSPeter Maydell float_status *fpst; \ 3191c2d8f6bbSPeter Maydell float_status scratch_fpst; \ 3192c2d8f6bbSPeter Maydell bool r; \ 3193c2d8f6bbSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ 3194c2d8f6bbSPeter Maydell if ((mask & emask) == 0) { \ 3195c2d8f6bbSPeter Maydell continue; \ 3196c2d8f6bbSPeter Maydell } \ 3197f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 3198c2d8f6bbSPeter Maydell if (!(mask & (1 << (e * ESIZE)))) { \ 3199c2d8f6bbSPeter Maydell /* We need the result but without updating flags */ \ 3200c2d8f6bbSPeter Maydell scratch_fpst = *fpst; \ 3201c2d8f6bbSPeter Maydell fpst = &scratch_fpst; \ 3202c2d8f6bbSPeter Maydell } \ 3203c2d8f6bbSPeter Maydell r = FN(n[H##ESIZE(e)], (TYPE)rm, fpst); \ 3204c2d8f6bbSPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 3205c2d8f6bbSPeter Maydell beatpred |= r * emask; \ 3206c2d8f6bbSPeter Maydell } \ 3207c2d8f6bbSPeter Maydell beatpred &= mask; \ 3208c2d8f6bbSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 3209c2d8f6bbSPeter Maydell (beatpred & eci_mask); \ 3210c2d8f6bbSPeter Maydell mve_advance_vpt(env); \ 3211c2d8f6bbSPeter Maydell } 3212c2d8f6bbSPeter Maydell 3213c2d8f6bbSPeter Maydell #define DO_VCMP_FP_BOTH(VOP, SOP, ESIZE, TYPE, FN) \ 3214c2d8f6bbSPeter Maydell DO_VCMP_FP(VOP, ESIZE, TYPE, FN) \ 3215c2d8f6bbSPeter Maydell DO_VCMP_FP_SCALAR(SOP, ESIZE, TYPE, FN) 3216c2d8f6bbSPeter Maydell 3217c87fe6d2SPeter Maydell /* 3218c87fe6d2SPeter Maydell * Some care is needed here to get the correct result for the unordered case. 3219c87fe6d2SPeter Maydell * Architecturally EQ, GE and GT are defined to be false for unordered, but 3220c87fe6d2SPeter Maydell * the NE, LT and LE comparisons are defined as simple logical inverses of 3221c87fe6d2SPeter Maydell * EQ, GE and GT and so they must return true for unordered. The softfloat 3222c87fe6d2SPeter Maydell * comparison functions float*_{eq,le,lt} all return false for unordered. 3223c87fe6d2SPeter Maydell */ 3224c87fe6d2SPeter Maydell #define DO_GE16(X, Y, S) float16_le(Y, X, S) 3225c87fe6d2SPeter Maydell #define DO_GE32(X, Y, S) float32_le(Y, X, S) 3226c87fe6d2SPeter Maydell #define DO_GT16(X, Y, S) float16_lt(Y, X, S) 3227c87fe6d2SPeter Maydell #define DO_GT32(X, Y, S) float32_lt(Y, X, S) 3228c87fe6d2SPeter Maydell 3229c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpeqh, vfcmpeq_scalarh, 2, float16, float16_eq) 3230c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpeqs, vfcmpeq_scalars, 4, float32, float32_eq) 3231c87fe6d2SPeter Maydell 3232c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpneh, vfcmpne_scalarh, 2, float16, !float16_eq) 3233c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpnes, vfcmpne_scalars, 4, float32, !float32_eq) 3234c87fe6d2SPeter Maydell 3235c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpgeh, vfcmpge_scalarh, 2, float16, DO_GE16) 3236c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpges, vfcmpge_scalars, 4, float32, DO_GE32) 3237c87fe6d2SPeter Maydell 3238c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmplth, vfcmplt_scalarh, 2, float16, !DO_GE16) 3239c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmplts, vfcmplt_scalars, 4, float32, !DO_GE32) 3240c87fe6d2SPeter Maydell 3241c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpgth, vfcmpgt_scalarh, 2, float16, DO_GT16) 3242c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpgts, vfcmpgt_scalars, 4, float32, DO_GT32) 3243c87fe6d2SPeter Maydell 3244c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpleh, vfcmple_scalarh, 2, float16, !DO_GT16) 3245c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32) 32462a4b939cSPeter Maydell 32472a4b939cSPeter Maydell #define DO_VCVT_FIXED(OP, ESIZE, TYPE, FN) \ 32482a4b939cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm, \ 32492a4b939cSPeter Maydell uint32_t shift) \ 32502a4b939cSPeter Maydell { \ 32512a4b939cSPeter Maydell TYPE *d = vd, *m = vm; \ 32522a4b939cSPeter Maydell TYPE r; \ 32532a4b939cSPeter Maydell uint16_t mask = mve_element_mask(env); \ 32542a4b939cSPeter Maydell unsigned e; \ 32552a4b939cSPeter Maydell float_status *fpst; \ 32562a4b939cSPeter Maydell float_status scratch_fpst; \ 32572a4b939cSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 32582a4b939cSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 32592a4b939cSPeter Maydell continue; \ 32602a4b939cSPeter Maydell } \ 3261f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 32622a4b939cSPeter Maydell if (!(mask & 1)) { \ 32632a4b939cSPeter Maydell /* We need the result but without updating flags */ \ 32642a4b939cSPeter Maydell scratch_fpst = *fpst; \ 32652a4b939cSPeter Maydell fpst = &scratch_fpst; \ 32662a4b939cSPeter Maydell } \ 32672a4b939cSPeter Maydell r = FN(m[H##ESIZE(e)], shift, fpst); \ 32682a4b939cSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 32692a4b939cSPeter Maydell } \ 32702a4b939cSPeter Maydell mve_advance_vpt(env); \ 32712a4b939cSPeter Maydell } 32722a4b939cSPeter Maydell 32732a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_sh, 2, int16_t, helper_vfp_shtoh) 32742a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_uh, 2, uint16_t, helper_vfp_uhtoh) 32752a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_hs, 2, int16_t, helper_vfp_toshh_round_to_zero) 32762a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_hu, 2, uint16_t, helper_vfp_touhh_round_to_zero) 32772a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_sf, 4, int32_t, helper_vfp_sltos) 32782a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_uf, 4, uint32_t, helper_vfp_ultos) 32792a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_fs, 4, int32_t, helper_vfp_tosls_round_to_zero) 32802a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero) 328153fc5f61SPeter Maydell 328253fc5f61SPeter Maydell /* VCVT with specified rmode */ 328353fc5f61SPeter Maydell #define DO_VCVT_RMODE(OP, ESIZE, TYPE, FN) \ 328453fc5f61SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 328553fc5f61SPeter Maydell void *vd, void *vm, uint32_t rmode) \ 328653fc5f61SPeter Maydell { \ 328753fc5f61SPeter Maydell TYPE *d = vd, *m = vm; \ 328853fc5f61SPeter Maydell TYPE r; \ 328953fc5f61SPeter Maydell uint16_t mask = mve_element_mask(env); \ 329053fc5f61SPeter Maydell unsigned e; \ 329153fc5f61SPeter Maydell float_status *fpst; \ 329253fc5f61SPeter Maydell float_status scratch_fpst; \ 3293f81c4698SRichard Henderson float_status *base_fpst = \ 3294f81c4698SRichard Henderson &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 329553fc5f61SPeter Maydell uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \ 329653fc5f61SPeter Maydell set_float_rounding_mode(rmode, base_fpst); \ 329753fc5f61SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 329853fc5f61SPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 329953fc5f61SPeter Maydell continue; \ 330053fc5f61SPeter Maydell } \ 330153fc5f61SPeter Maydell fpst = base_fpst; \ 330253fc5f61SPeter Maydell if (!(mask & 1)) { \ 330353fc5f61SPeter Maydell /* We need the result but without updating flags */ \ 330453fc5f61SPeter Maydell scratch_fpst = *fpst; \ 330553fc5f61SPeter Maydell fpst = &scratch_fpst; \ 330653fc5f61SPeter Maydell } \ 330753fc5f61SPeter Maydell r = FN(m[H##ESIZE(e)], 0, fpst); \ 330853fc5f61SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 330953fc5f61SPeter Maydell } \ 331053fc5f61SPeter Maydell set_float_rounding_mode(prev_rmode, base_fpst); \ 331153fc5f61SPeter Maydell mve_advance_vpt(env); \ 331253fc5f61SPeter Maydell } 331353fc5f61SPeter Maydell 331453fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_sh, 2, uint16_t, helper_vfp_toshh) 331553fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_uh, 2, uint16_t, helper_vfp_touhh) 331653fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_ss, 4, uint32_t, helper_vfp_tosls) 331753fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_us, 4, uint32_t, helper_vfp_touls) 331873d260dbSPeter Maydell 331998e40fbdSPeter Maydell #define DO_VRINT_RM_H(M, F, S) helper_rinth(M, S) 332098e40fbdSPeter Maydell #define DO_VRINT_RM_S(M, F, S) helper_rints(M, S) 332198e40fbdSPeter Maydell 332298e40fbdSPeter Maydell DO_VCVT_RMODE(vrint_rm_h, 2, uint16_t, DO_VRINT_RM_H) 332398e40fbdSPeter Maydell DO_VCVT_RMODE(vrint_rm_s, 4, uint32_t, DO_VRINT_RM_S) 332498e40fbdSPeter Maydell 332573d260dbSPeter Maydell /* 332673d260dbSPeter Maydell * VCVT between halfprec and singleprec. As usual for halfprec 332773d260dbSPeter Maydell * conversions, FZ16 is ignored and AHP is observed. 332873d260dbSPeter Maydell */ 332973d260dbSPeter Maydell static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top) 333073d260dbSPeter Maydell { 333173d260dbSPeter Maydell uint16_t *d = vd; 333273d260dbSPeter Maydell uint32_t *m = vm; 333373d260dbSPeter Maydell uint16_t r; 333473d260dbSPeter Maydell uint16_t mask = mve_element_mask(env); 3335ce07ea61SPeter Maydell bool ieee = !(env->vfp.fpcr & FPCR_AHP); 333673d260dbSPeter Maydell unsigned e; 333773d260dbSPeter Maydell float_status *fpst; 333873d260dbSPeter Maydell float_status scratch_fpst; 3339f069b26bSRichard Henderson float_status *base_fpst = &env->vfp.fp_status[FPST_STD]; 334073d260dbSPeter Maydell bool old_fz = get_flush_to_zero(base_fpst); 334173d260dbSPeter Maydell set_flush_to_zero(false, base_fpst); 334273d260dbSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 334373d260dbSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { 334473d260dbSPeter Maydell continue; 334573d260dbSPeter Maydell } 334673d260dbSPeter Maydell fpst = base_fpst; 334773d260dbSPeter Maydell if (!(mask & 1)) { 334873d260dbSPeter Maydell /* We need the result but without updating flags */ 334973d260dbSPeter Maydell scratch_fpst = *fpst; 335073d260dbSPeter Maydell fpst = &scratch_fpst; 335173d260dbSPeter Maydell } 335273d260dbSPeter Maydell r = float32_to_float16(m[H4(e)], ieee, fpst); 335373d260dbSPeter Maydell mergemask(&d[H2(e * 2 + top)], r, mask >> (top * 2)); 335473d260dbSPeter Maydell } 335573d260dbSPeter Maydell set_flush_to_zero(old_fz, base_fpst); 335673d260dbSPeter Maydell mve_advance_vpt(env); 335773d260dbSPeter Maydell } 335873d260dbSPeter Maydell 335973d260dbSPeter Maydell static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top) 336073d260dbSPeter Maydell { 336173d260dbSPeter Maydell uint32_t *d = vd; 336273d260dbSPeter Maydell uint16_t *m = vm; 336373d260dbSPeter Maydell uint32_t r; 336473d260dbSPeter Maydell uint16_t mask = mve_element_mask(env); 3365ce07ea61SPeter Maydell bool ieee = !(env->vfp.fpcr & FPCR_AHP); 336673d260dbSPeter Maydell unsigned e; 336773d260dbSPeter Maydell float_status *fpst; 336873d260dbSPeter Maydell float_status scratch_fpst; 3369f069b26bSRichard Henderson float_status *base_fpst = &env->vfp.fp_status[FPST_STD]; 337073d260dbSPeter Maydell bool old_fiz = get_flush_inputs_to_zero(base_fpst); 337173d260dbSPeter Maydell set_flush_inputs_to_zero(false, base_fpst); 337273d260dbSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 337373d260dbSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { 337473d260dbSPeter Maydell continue; 337573d260dbSPeter Maydell } 337673d260dbSPeter Maydell fpst = base_fpst; 337773d260dbSPeter Maydell if (!(mask & (1 << (top * 2)))) { 337873d260dbSPeter Maydell /* We need the result but without updating flags */ 337973d260dbSPeter Maydell scratch_fpst = *fpst; 338073d260dbSPeter Maydell fpst = &scratch_fpst; 338173d260dbSPeter Maydell } 338273d260dbSPeter Maydell r = float16_to_float32(m[H2(e * 2 + top)], ieee, fpst); 338373d260dbSPeter Maydell mergemask(&d[H4(e)], r, mask); 338473d260dbSPeter Maydell } 338573d260dbSPeter Maydell set_flush_inputs_to_zero(old_fiz, base_fpst); 338673d260dbSPeter Maydell mve_advance_vpt(env); 338773d260dbSPeter Maydell } 338873d260dbSPeter Maydell 338973d260dbSPeter Maydell void HELPER(mve_vcvtb_sh)(CPUARMState *env, void *vd, void *vm) 339073d260dbSPeter Maydell { 339173d260dbSPeter Maydell do_vcvt_sh(env, vd, vm, 0); 339273d260dbSPeter Maydell } 339373d260dbSPeter Maydell void HELPER(mve_vcvtt_sh)(CPUARMState *env, void *vd, void *vm) 339473d260dbSPeter Maydell { 339573d260dbSPeter Maydell do_vcvt_sh(env, vd, vm, 1); 339673d260dbSPeter Maydell } 339773d260dbSPeter Maydell void HELPER(mve_vcvtb_hs)(CPUARMState *env, void *vd, void *vm) 339873d260dbSPeter Maydell { 339973d260dbSPeter Maydell do_vcvt_hs(env, vd, vm, 0); 340073d260dbSPeter Maydell } 340173d260dbSPeter Maydell void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm) 340273d260dbSPeter Maydell { 340373d260dbSPeter Maydell do_vcvt_hs(env, vd, vm, 1); 340473d260dbSPeter Maydell } 340598e40fbdSPeter Maydell 340698e40fbdSPeter Maydell #define DO_1OP_FP(OP, ESIZE, TYPE, FN) \ 340798e40fbdSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm) \ 340898e40fbdSPeter Maydell { \ 340998e40fbdSPeter Maydell TYPE *d = vd, *m = vm; \ 341098e40fbdSPeter Maydell TYPE r; \ 341198e40fbdSPeter Maydell uint16_t mask = mve_element_mask(env); \ 341298e40fbdSPeter Maydell unsigned e; \ 341398e40fbdSPeter Maydell float_status *fpst; \ 341498e40fbdSPeter Maydell float_status scratch_fpst; \ 341598e40fbdSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 341698e40fbdSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 341798e40fbdSPeter Maydell continue; \ 341898e40fbdSPeter Maydell } \ 3419f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ 342098e40fbdSPeter Maydell if (!(mask & 1)) { \ 342198e40fbdSPeter Maydell /* We need the result but without updating flags */ \ 342298e40fbdSPeter Maydell scratch_fpst = *fpst; \ 342398e40fbdSPeter Maydell fpst = &scratch_fpst; \ 342498e40fbdSPeter Maydell } \ 342598e40fbdSPeter Maydell r = FN(m[H##ESIZE(e)], fpst); \ 342698e40fbdSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 342798e40fbdSPeter Maydell } \ 342898e40fbdSPeter Maydell mve_advance_vpt(env); \ 342998e40fbdSPeter Maydell } 343098e40fbdSPeter Maydell 343198e40fbdSPeter Maydell DO_1OP_FP(vrintx_h, 2, float16, float16_round_to_int) 343298e40fbdSPeter Maydell DO_1OP_FP(vrintx_s, 4, float32, float32_round_to_int) 3433