1507b6a50SPeter Maydell /* 2507b6a50SPeter Maydell * M-profile MVE Operations 3507b6a50SPeter Maydell * 4507b6a50SPeter Maydell * Copyright (c) 2021 Linaro, Ltd. 5507b6a50SPeter Maydell * 6507b6a50SPeter Maydell * This library is free software; you can redistribute it and/or 7507b6a50SPeter Maydell * modify it under the terms of the GNU Lesser General Public 8507b6a50SPeter Maydell * License as published by the Free Software Foundation; either 9507b6a50SPeter Maydell * version 2.1 of the License, or (at your option) any later version. 10507b6a50SPeter Maydell * 11507b6a50SPeter Maydell * This library is distributed in the hope that it will be useful, 12507b6a50SPeter Maydell * but WITHOUT ANY WARRANTY; without even the implied warranty of 13507b6a50SPeter Maydell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14507b6a50SPeter Maydell * Lesser General Public License for more details. 15507b6a50SPeter Maydell * 16507b6a50SPeter Maydell * You should have received a copy of the GNU Lesser General Public 17507b6a50SPeter Maydell * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18507b6a50SPeter Maydell */ 19507b6a50SPeter Maydell 20507b6a50SPeter Maydell #include "qemu/osdep.h" 21507b6a50SPeter Maydell #include "cpu.h" 22507b6a50SPeter Maydell #include "internals.h" 23507b6a50SPeter Maydell #include "vec_internal.h" 24507b6a50SPeter Maydell #include "exec/helper-proto.h" 25507b6a50SPeter Maydell #include "exec/cpu_ldst.h" 26507b6a50SPeter Maydell #include "exec/exec-all.h" 2759c91773SPeter Maydell #include "tcg/tcg.h" 28507b6a50SPeter Maydell 29e0d40070SPeter Maydell static uint16_t mve_eci_mask(CPUARMState *env) 30e0d40070SPeter Maydell { 31e0d40070SPeter Maydell /* 32e0d40070SPeter Maydell * Return the mask of which elements in the MVE vector correspond 33e0d40070SPeter Maydell * to beats being executed. The mask has 1 bits for executed lanes 34e0d40070SPeter Maydell * and 0 bits where ECI says this beat was already executed. 35e0d40070SPeter Maydell */ 36e0d40070SPeter Maydell int eci; 37e0d40070SPeter Maydell 38e0d40070SPeter Maydell if ((env->condexec_bits & 0xf) != 0) { 39e0d40070SPeter Maydell return 0xffff; 40e0d40070SPeter Maydell } 41e0d40070SPeter Maydell 42e0d40070SPeter Maydell eci = env->condexec_bits >> 4; 43e0d40070SPeter Maydell switch (eci) { 44e0d40070SPeter Maydell case ECI_NONE: 45e0d40070SPeter Maydell return 0xffff; 46e0d40070SPeter Maydell case ECI_A0: 47e0d40070SPeter Maydell return 0xfff0; 48e0d40070SPeter Maydell case ECI_A0A1: 49e0d40070SPeter Maydell return 0xff00; 50e0d40070SPeter Maydell case ECI_A0A1A2: 51e0d40070SPeter Maydell case ECI_A0A1A2B0: 52e0d40070SPeter Maydell return 0xf000; 53e0d40070SPeter Maydell default: 54e0d40070SPeter Maydell g_assert_not_reached(); 55e0d40070SPeter Maydell } 56e0d40070SPeter Maydell } 57e0d40070SPeter Maydell 58507b6a50SPeter Maydell static uint16_t mve_element_mask(CPUARMState *env) 59507b6a50SPeter Maydell { 60507b6a50SPeter Maydell /* 61507b6a50SPeter Maydell * Return the mask of which elements in the MVE vector should be 62507b6a50SPeter Maydell * updated. This is a combination of multiple things: 63507b6a50SPeter Maydell * (1) by default, we update every lane in the vector 64507b6a50SPeter Maydell * (2) VPT predication stores its state in the VPR register; 65507b6a50SPeter Maydell * (3) low-overhead-branch tail predication will mask out part 66507b6a50SPeter Maydell * the vector on the final iteration of the loop 67507b6a50SPeter Maydell * (4) if EPSR.ECI is set then we must execute only some beats 68507b6a50SPeter Maydell * of the insn 69507b6a50SPeter Maydell * We combine all these into a 16-bit result with the same semantics 70507b6a50SPeter Maydell * as VPR.P0: 0 to mask the lane, 1 if it is active. 71507b6a50SPeter Maydell * 8-bit vector ops will look at all bits of the result; 72507b6a50SPeter Maydell * 16-bit ops will look at bits 0, 2, 4, ...; 73507b6a50SPeter Maydell * 32-bit ops will look at bits 0, 4, 8 and 12. 74507b6a50SPeter Maydell * Compare pseudocode GetCurInstrBeat(), though that only returns 75507b6a50SPeter Maydell * the 4-bit slice of the mask corresponding to a single beat. 76507b6a50SPeter Maydell */ 77507b6a50SPeter Maydell uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 78507b6a50SPeter Maydell 79507b6a50SPeter Maydell if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) { 80507b6a50SPeter Maydell mask |= 0xff; 81507b6a50SPeter Maydell } 82507b6a50SPeter Maydell if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) { 83507b6a50SPeter Maydell mask |= 0xff00; 84507b6a50SPeter Maydell } 85507b6a50SPeter Maydell 86507b6a50SPeter Maydell if (env->v7m.ltpsize < 4 && 87507b6a50SPeter Maydell env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) { 88507b6a50SPeter Maydell /* 89507b6a50SPeter Maydell * Tail predication active, and this is the last loop iteration. 90507b6a50SPeter Maydell * The element size is (1 << ltpsize), and we only want to process 91507b6a50SPeter Maydell * loopcount elements, so we want to retain the least significant 92507b6a50SPeter Maydell * (loopcount * esize) predicate bits and zero out bits above that. 93507b6a50SPeter Maydell */ 94507b6a50SPeter Maydell int masklen = env->regs[14] << env->v7m.ltpsize; 95507b6a50SPeter Maydell assert(masklen <= 16); 963f4f1880SPeter Maydell uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 973f4f1880SPeter Maydell mask &= ltpmask; 98507b6a50SPeter Maydell } 99507b6a50SPeter Maydell 100507b6a50SPeter Maydell /* 101507b6a50SPeter Maydell * ECI bits indicate which beats are already executed; 102507b6a50SPeter Maydell * we handle this by effectively predicating them out. 103507b6a50SPeter Maydell */ 104e0d40070SPeter Maydell mask &= mve_eci_mask(env); 105507b6a50SPeter Maydell return mask; 106507b6a50SPeter Maydell } 107507b6a50SPeter Maydell 108507b6a50SPeter Maydell static void mve_advance_vpt(CPUARMState *env) 109507b6a50SPeter Maydell { 110507b6a50SPeter Maydell /* Advance the VPT and ECI state if necessary */ 111507b6a50SPeter Maydell uint32_t vpr = env->v7m.vpr; 112507b6a50SPeter Maydell unsigned mask01, mask23; 113e3152d02SPeter Maydell uint16_t inv_mask; 114e3152d02SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); 115507b6a50SPeter Maydell 116507b6a50SPeter Maydell if ((env->condexec_bits & 0xf) == 0) { 117507b6a50SPeter Maydell env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ? 118507b6a50SPeter Maydell (ECI_A0 << 4) : (ECI_NONE << 4); 119507b6a50SPeter Maydell } 120507b6a50SPeter Maydell 121507b6a50SPeter Maydell if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) { 122507b6a50SPeter Maydell /* VPT not enabled, nothing to do */ 123507b6a50SPeter Maydell return; 124507b6a50SPeter Maydell } 125507b6a50SPeter Maydell 126e3152d02SPeter Maydell /* Invert P0 bits if needed, but only for beats we actually executed */ 127507b6a50SPeter Maydell mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01); 128507b6a50SPeter Maydell mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23); 129e3152d02SPeter Maydell /* Start by assuming we invert all bits corresponding to executed beats */ 130e3152d02SPeter Maydell inv_mask = eci_mask; 131e3152d02SPeter Maydell if (mask01 <= 8) { 132e3152d02SPeter Maydell /* MASK01 says don't invert low half of P0 */ 133e3152d02SPeter Maydell inv_mask &= ~0xff; 134507b6a50SPeter Maydell } 135e3152d02SPeter Maydell if (mask23 <= 8) { 136e3152d02SPeter Maydell /* MASK23 says don't invert high half of P0 */ 137e3152d02SPeter Maydell inv_mask &= ~0xff00; 138507b6a50SPeter Maydell } 139e3152d02SPeter Maydell vpr ^= inv_mask; 140e3152d02SPeter Maydell /* Only update MASK01 if beat 1 executed */ 141e3152d02SPeter Maydell if (eci_mask & 0xf0) { 142507b6a50SPeter Maydell vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); 143e3152d02SPeter Maydell } 144e3152d02SPeter Maydell /* Beat 3 always executes, so update MASK23 */ 145507b6a50SPeter Maydell vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1); 146507b6a50SPeter Maydell env->v7m.vpr = vpr; 147507b6a50SPeter Maydell } 148507b6a50SPeter Maydell 14941704cc2SPeter Maydell /* For loads, predicated lanes are zeroed instead of keeping their old values */ 150507b6a50SPeter Maydell #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ 151507b6a50SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 152507b6a50SPeter Maydell { \ 153507b6a50SPeter Maydell TYPE *d = vd; \ 154507b6a50SPeter Maydell uint16_t mask = mve_element_mask(env); \ 15541704cc2SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 156507b6a50SPeter Maydell unsigned b, e; \ 157507b6a50SPeter Maydell /* \ 158507b6a50SPeter Maydell * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ 159507b6a50SPeter Maydell * beats so we don't care if we update part of the dest and \ 160507b6a50SPeter Maydell * then take an exception. \ 161507b6a50SPeter Maydell */ \ 162507b6a50SPeter Maydell for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 16341704cc2SPeter Maydell if (eci_mask & (1 << b)) { \ 16441704cc2SPeter Maydell d[H##ESIZE(e)] = (mask & (1 << b)) ? \ 16541704cc2SPeter Maydell cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 166507b6a50SPeter Maydell } \ 167507b6a50SPeter Maydell addr += MSIZE; \ 168507b6a50SPeter Maydell } \ 169507b6a50SPeter Maydell mve_advance_vpt(env); \ 170507b6a50SPeter Maydell } 171507b6a50SPeter Maydell 172507b6a50SPeter Maydell #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ 173507b6a50SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 174507b6a50SPeter Maydell { \ 175507b6a50SPeter Maydell TYPE *d = vd; \ 176507b6a50SPeter Maydell uint16_t mask = mve_element_mask(env); \ 177507b6a50SPeter Maydell unsigned b, e; \ 178507b6a50SPeter Maydell for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 179507b6a50SPeter Maydell if (mask & (1 << b)) { \ 180507b6a50SPeter Maydell cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 181507b6a50SPeter Maydell } \ 182507b6a50SPeter Maydell addr += MSIZE; \ 183507b6a50SPeter Maydell } \ 184507b6a50SPeter Maydell mve_advance_vpt(env); \ 185507b6a50SPeter Maydell } 186507b6a50SPeter Maydell 187507b6a50SPeter Maydell DO_VLDR(vldrb, 1, ldub, 1, uint8_t) 188507b6a50SPeter Maydell DO_VLDR(vldrh, 2, lduw, 2, uint16_t) 189507b6a50SPeter Maydell DO_VLDR(vldrw, 4, ldl, 4, uint32_t) 190507b6a50SPeter Maydell 191507b6a50SPeter Maydell DO_VSTR(vstrb, 1, stb, 1, uint8_t) 192507b6a50SPeter Maydell DO_VSTR(vstrh, 2, stw, 2, uint16_t) 193507b6a50SPeter Maydell DO_VSTR(vstrw, 4, stl, 4, uint32_t) 194507b6a50SPeter Maydell 1952fc6b751SPeter Maydell DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t) 1962fc6b751SPeter Maydell DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t) 1972fc6b751SPeter Maydell DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t) 1982fc6b751SPeter Maydell DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t) 1992fc6b751SPeter Maydell DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t) 2002fc6b751SPeter Maydell DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t) 2012fc6b751SPeter Maydell 2022fc6b751SPeter Maydell DO_VSTR(vstrb_h, 1, stb, 2, int16_t) 2032fc6b751SPeter Maydell DO_VSTR(vstrb_w, 1, stb, 4, int32_t) 2042fc6b751SPeter Maydell DO_VSTR(vstrh_w, 2, stw, 4, int32_t) 2052fc6b751SPeter Maydell 206507b6a50SPeter Maydell #undef DO_VLDR 207507b6a50SPeter Maydell #undef DO_VSTR 2080f0f2bd5SPeter Maydell 2090f0f2bd5SPeter Maydell /* 210dc18628bSPeter Maydell * Gather loads/scatter stores. Here each element of Qm specifies 211dc18628bSPeter Maydell * an offset to use from the base register Rm. In the _os_ versions 212dc18628bSPeter Maydell * that offset is scaled by the element size. 213dc18628bSPeter Maydell * For loads, predicated lanes are zeroed instead of retaining 214dc18628bSPeter Maydell * their previous values. 215dc18628bSPeter Maydell */ 216*fac80f08SPeter Maydell #define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \ 217dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 218dc18628bSPeter Maydell uint32_t base) \ 219dc18628bSPeter Maydell { \ 220dc18628bSPeter Maydell TYPE *d = vd; \ 221dc18628bSPeter Maydell OFFTYPE *m = vm; \ 222dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 223dc18628bSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 224dc18628bSPeter Maydell unsigned e; \ 225dc18628bSPeter Maydell uint32_t addr; \ 226dc18628bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 227dc18628bSPeter Maydell if (!(eci_mask & 1)) { \ 228dc18628bSPeter Maydell continue; \ 229dc18628bSPeter Maydell } \ 230dc18628bSPeter Maydell addr = ADDRFN(base, m[H##ESIZE(e)]); \ 231dc18628bSPeter Maydell d[H##ESIZE(e)] = (mask & 1) ? \ 232dc18628bSPeter Maydell cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 233*fac80f08SPeter Maydell if (WB) { \ 234*fac80f08SPeter Maydell m[H##ESIZE(e)] = addr; \ 235*fac80f08SPeter Maydell } \ 236dc18628bSPeter Maydell } \ 237dc18628bSPeter Maydell mve_advance_vpt(env); \ 238dc18628bSPeter Maydell } 239dc18628bSPeter Maydell 240dc18628bSPeter Maydell /* We know here TYPE is unsigned so always the same as the offset type */ 241*fac80f08SPeter Maydell #define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \ 242dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 243dc18628bSPeter Maydell uint32_t base) \ 244dc18628bSPeter Maydell { \ 245dc18628bSPeter Maydell TYPE *d = vd; \ 246dc18628bSPeter Maydell TYPE *m = vm; \ 247dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 248*fac80f08SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 249dc18628bSPeter Maydell unsigned e; \ 250dc18628bSPeter Maydell uint32_t addr; \ 251*fac80f08SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 252*fac80f08SPeter Maydell if (!(eci_mask & 1)) { \ 253*fac80f08SPeter Maydell continue; \ 254*fac80f08SPeter Maydell } \ 255dc18628bSPeter Maydell addr = ADDRFN(base, m[H##ESIZE(e)]); \ 256dc18628bSPeter Maydell if (mask & 1) { \ 257dc18628bSPeter Maydell cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 258dc18628bSPeter Maydell } \ 259*fac80f08SPeter Maydell if (WB) { \ 260*fac80f08SPeter Maydell m[H##ESIZE(e)] = addr; \ 261*fac80f08SPeter Maydell } \ 262dc18628bSPeter Maydell } \ 263dc18628bSPeter Maydell mve_advance_vpt(env); \ 264dc18628bSPeter Maydell } 265dc18628bSPeter Maydell 266dc18628bSPeter Maydell /* 267dc18628bSPeter Maydell * 64-bit accesses are slightly different: they are done as two 32-bit 268dc18628bSPeter Maydell * accesses, controlled by the predicate mask for the relevant beat, 269dc18628bSPeter Maydell * and with a single 32-bit offset in the first of the two Qm elements. 270dc18628bSPeter Maydell * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little). 271*fac80f08SPeter Maydell * Address writeback happens on the odd beats and updates the address 272*fac80f08SPeter Maydell * stored in the even-beat element. 273dc18628bSPeter Maydell */ 274*fac80f08SPeter Maydell #define DO_VLDR64_SG(OP, ADDRFN, WB) \ 275dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 276dc18628bSPeter Maydell uint32_t base) \ 277dc18628bSPeter Maydell { \ 278dc18628bSPeter Maydell uint32_t *d = vd; \ 279dc18628bSPeter Maydell uint32_t *m = vm; \ 280dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 281dc18628bSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 282dc18628bSPeter Maydell unsigned e; \ 283dc18628bSPeter Maydell uint32_t addr; \ 284dc18628bSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 285dc18628bSPeter Maydell if (!(eci_mask & 1)) { \ 286dc18628bSPeter Maydell continue; \ 287dc18628bSPeter Maydell } \ 288dc18628bSPeter Maydell addr = ADDRFN(base, m[H4(e & ~1)]); \ 289dc18628bSPeter Maydell addr += 4 * (e & 1); \ 290dc18628bSPeter Maydell d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \ 291*fac80f08SPeter Maydell if (WB && (e & 1)) { \ 292*fac80f08SPeter Maydell m[H4(e & ~1)] = addr - 4; \ 293*fac80f08SPeter Maydell } \ 294dc18628bSPeter Maydell } \ 295dc18628bSPeter Maydell mve_advance_vpt(env); \ 296dc18628bSPeter Maydell } 297dc18628bSPeter Maydell 298*fac80f08SPeter Maydell #define DO_VSTR64_SG(OP, ADDRFN, WB) \ 299dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 300dc18628bSPeter Maydell uint32_t base) \ 301dc18628bSPeter Maydell { \ 302dc18628bSPeter Maydell uint32_t *d = vd; \ 303dc18628bSPeter Maydell uint32_t *m = vm; \ 304dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 305*fac80f08SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 306dc18628bSPeter Maydell unsigned e; \ 307dc18628bSPeter Maydell uint32_t addr; \ 308*fac80f08SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 309*fac80f08SPeter Maydell if (!(eci_mask & 1)) { \ 310*fac80f08SPeter Maydell continue; \ 311*fac80f08SPeter Maydell } \ 312dc18628bSPeter Maydell addr = ADDRFN(base, m[H4(e & ~1)]); \ 313dc18628bSPeter Maydell addr += 4 * (e & 1); \ 314dc18628bSPeter Maydell if (mask & 1) { \ 315dc18628bSPeter Maydell cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \ 316dc18628bSPeter Maydell } \ 317*fac80f08SPeter Maydell if (WB && (e & 1)) { \ 318*fac80f08SPeter Maydell m[H4(e & ~1)] = addr - 4; \ 319*fac80f08SPeter Maydell } \ 320dc18628bSPeter Maydell } \ 321dc18628bSPeter Maydell mve_advance_vpt(env); \ 322dc18628bSPeter Maydell } 323dc18628bSPeter Maydell 324dc18628bSPeter Maydell #define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET)) 325dc18628bSPeter Maydell #define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1)) 326dc18628bSPeter Maydell #define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2)) 327dc18628bSPeter Maydell #define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3)) 328dc18628bSPeter Maydell 329*fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false) 330*fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false) 331*fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false) 332dc18628bSPeter Maydell 333*fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false) 334*fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false) 335*fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false) 336*fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false) 337*fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false) 338*fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false) 339*fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false) 340dc18628bSPeter Maydell 341*fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false) 342*fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false) 343*fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false) 344*fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false) 345*fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false) 346dc18628bSPeter Maydell 347*fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false) 348*fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false) 349*fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false) 350*fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false) 351*fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false) 352*fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false) 353*fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false) 354dc18628bSPeter Maydell 355*fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false) 356*fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false) 357*fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false) 358*fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false) 359*fac80f08SPeter Maydell 360*fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true) 361*fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true) 362*fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true) 363*fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) 364dc18628bSPeter Maydell 365dc18628bSPeter Maydell /* 3660f0f2bd5SPeter Maydell * The mergemask(D, R, M) macro performs the operation "*D = R" but 3670f0f2bd5SPeter Maydell * storing only the bytes which correspond to 1 bits in M, 3680f0f2bd5SPeter Maydell * leaving other bytes in *D unchanged. We use _Generic 3690f0f2bd5SPeter Maydell * to select the correct implementation based on the type of D. 3700f0f2bd5SPeter Maydell */ 3710f0f2bd5SPeter Maydell 3720f0f2bd5SPeter Maydell static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask) 3730f0f2bd5SPeter Maydell { 3740f0f2bd5SPeter Maydell if (mask & 1) { 3750f0f2bd5SPeter Maydell *d = r; 3760f0f2bd5SPeter Maydell } 3770f0f2bd5SPeter Maydell } 3780f0f2bd5SPeter Maydell 3790f0f2bd5SPeter Maydell static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask) 3800f0f2bd5SPeter Maydell { 3810f0f2bd5SPeter Maydell mergemask_ub((uint8_t *)d, r, mask); 3820f0f2bd5SPeter Maydell } 3830f0f2bd5SPeter Maydell 3840f0f2bd5SPeter Maydell static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask) 3850f0f2bd5SPeter Maydell { 3860f0f2bd5SPeter Maydell uint16_t bmask = expand_pred_b_data[mask & 3]; 3870f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask); 3880f0f2bd5SPeter Maydell } 3890f0f2bd5SPeter Maydell 3900f0f2bd5SPeter Maydell static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask) 3910f0f2bd5SPeter Maydell { 3920f0f2bd5SPeter Maydell mergemask_uh((uint16_t *)d, r, mask); 3930f0f2bd5SPeter Maydell } 3940f0f2bd5SPeter Maydell 3950f0f2bd5SPeter Maydell static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask) 3960f0f2bd5SPeter Maydell { 3970f0f2bd5SPeter Maydell uint32_t bmask = expand_pred_b_data[mask & 0xf]; 3980f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask); 3990f0f2bd5SPeter Maydell } 4000f0f2bd5SPeter Maydell 4010f0f2bd5SPeter Maydell static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask) 4020f0f2bd5SPeter Maydell { 4030f0f2bd5SPeter Maydell mergemask_uw((uint32_t *)d, r, mask); 4040f0f2bd5SPeter Maydell } 4050f0f2bd5SPeter Maydell 4060f0f2bd5SPeter Maydell static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask) 4070f0f2bd5SPeter Maydell { 4080f0f2bd5SPeter Maydell uint64_t bmask = expand_pred_b_data[mask & 0xff]; 4090f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask); 4100f0f2bd5SPeter Maydell } 4110f0f2bd5SPeter Maydell 4120f0f2bd5SPeter Maydell static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask) 4130f0f2bd5SPeter Maydell { 4140f0f2bd5SPeter Maydell mergemask_uq((uint64_t *)d, r, mask); 4150f0f2bd5SPeter Maydell } 4160f0f2bd5SPeter Maydell 4170f0f2bd5SPeter Maydell #define mergemask(D, R, M) \ 4180f0f2bd5SPeter Maydell _Generic(D, \ 4190f0f2bd5SPeter Maydell uint8_t *: mergemask_ub, \ 4200f0f2bd5SPeter Maydell int8_t *: mergemask_sb, \ 4210f0f2bd5SPeter Maydell uint16_t *: mergemask_uh, \ 4220f0f2bd5SPeter Maydell int16_t *: mergemask_sh, \ 4230f0f2bd5SPeter Maydell uint32_t *: mergemask_uw, \ 4240f0f2bd5SPeter Maydell int32_t *: mergemask_sw, \ 4250f0f2bd5SPeter Maydell uint64_t *: mergemask_uq, \ 4260f0f2bd5SPeter Maydell int64_t *: mergemask_sq)(D, R, M) 4270f0f2bd5SPeter Maydell 428ab59362fSPeter Maydell void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val) 429ab59362fSPeter Maydell { 430ab59362fSPeter Maydell /* 431ab59362fSPeter Maydell * The generated code already replicated an 8 or 16 bit constant 432ab59362fSPeter Maydell * into the 32-bit value, so we only need to write the 32-bit 433ab59362fSPeter Maydell * value to all elements of the Qreg, allowing for predication. 434ab59362fSPeter Maydell */ 435ab59362fSPeter Maydell uint32_t *d = vd; 436ab59362fSPeter Maydell uint16_t mask = mve_element_mask(env); 437ab59362fSPeter Maydell unsigned e; 438ab59362fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 439ab59362fSPeter Maydell mergemask(&d[H4(e)], val, mask); 440ab59362fSPeter Maydell } 441ab59362fSPeter Maydell mve_advance_vpt(env); 442ab59362fSPeter Maydell } 443ab59362fSPeter Maydell 4440f0f2bd5SPeter Maydell #define DO_1OP(OP, ESIZE, TYPE, FN) \ 4450f0f2bd5SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 4460f0f2bd5SPeter Maydell { \ 4470f0f2bd5SPeter Maydell TYPE *d = vd, *m = vm; \ 4480f0f2bd5SPeter Maydell uint16_t mask = mve_element_mask(env); \ 4490f0f2bd5SPeter Maydell unsigned e; \ 4500f0f2bd5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 4510f0f2bd5SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \ 4520f0f2bd5SPeter Maydell } \ 4530f0f2bd5SPeter Maydell mve_advance_vpt(env); \ 4540f0f2bd5SPeter Maydell } 4550f0f2bd5SPeter Maydell 4566437f1f7SPeter Maydell #define DO_CLS_B(N) (clrsb32(N) - 24) 4576437f1f7SPeter Maydell #define DO_CLS_H(N) (clrsb32(N) - 16) 4586437f1f7SPeter Maydell 4596437f1f7SPeter Maydell DO_1OP(vclsb, 1, int8_t, DO_CLS_B) 4606437f1f7SPeter Maydell DO_1OP(vclsh, 2, int16_t, DO_CLS_H) 4616437f1f7SPeter Maydell DO_1OP(vclsw, 4, int32_t, clrsb32) 4626437f1f7SPeter Maydell 4630f0f2bd5SPeter Maydell #define DO_CLZ_B(N) (clz32(N) - 24) 4640f0f2bd5SPeter Maydell #define DO_CLZ_H(N) (clz32(N) - 16) 4650f0f2bd5SPeter Maydell 4660f0f2bd5SPeter Maydell DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B) 4670f0f2bd5SPeter Maydell DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H) 4680f0f2bd5SPeter Maydell DO_1OP(vclzw, 4, uint32_t, clz32) 469249b5309SPeter Maydell 470249b5309SPeter Maydell DO_1OP(vrev16b, 2, uint16_t, bswap16) 471249b5309SPeter Maydell DO_1OP(vrev32b, 4, uint32_t, bswap32) 472249b5309SPeter Maydell DO_1OP(vrev32h, 4, uint32_t, hswap32) 473249b5309SPeter Maydell DO_1OP(vrev64b, 8, uint64_t, bswap64) 474249b5309SPeter Maydell DO_1OP(vrev64h, 8, uint64_t, hswap64) 475249b5309SPeter Maydell DO_1OP(vrev64w, 8, uint64_t, wswap64) 4768abd3c80SPeter Maydell 4778abd3c80SPeter Maydell #define DO_NOT(N) (~(N)) 4788abd3c80SPeter Maydell 4798abd3c80SPeter Maydell DO_1OP(vmvn, 8, uint64_t, DO_NOT) 48059c91773SPeter Maydell 48159c91773SPeter Maydell #define DO_ABS(N) ((N) < 0 ? -(N) : (N)) 48259c91773SPeter Maydell #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff)) 48359c91773SPeter Maydell #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff)) 48459c91773SPeter Maydell 48559c91773SPeter Maydell DO_1OP(vabsb, 1, int8_t, DO_ABS) 48659c91773SPeter Maydell DO_1OP(vabsh, 2, int16_t, DO_ABS) 48759c91773SPeter Maydell DO_1OP(vabsw, 4, int32_t, DO_ABS) 48859c91773SPeter Maydell 48959c91773SPeter Maydell /* We can do these 64 bits at a time */ 49059c91773SPeter Maydell DO_1OP(vfabsh, 8, uint64_t, DO_FABSH) 49159c91773SPeter Maydell DO_1OP(vfabss, 8, uint64_t, DO_FABSS) 492399a8c76SPeter Maydell 493399a8c76SPeter Maydell #define DO_NEG(N) (-(N)) 494399a8c76SPeter Maydell #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000)) 495399a8c76SPeter Maydell #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000)) 496399a8c76SPeter Maydell 497399a8c76SPeter Maydell DO_1OP(vnegb, 1, int8_t, DO_NEG) 498399a8c76SPeter Maydell DO_1OP(vnegh, 2, int16_t, DO_NEG) 499399a8c76SPeter Maydell DO_1OP(vnegw, 4, int32_t, DO_NEG) 500399a8c76SPeter Maydell 501399a8c76SPeter Maydell /* We can do these 64 bits at a time */ 502399a8c76SPeter Maydell DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH) 503399a8c76SPeter Maydell DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS) 50468245e44SPeter Maydell 505eab84139SPeter Maydell /* 506eab84139SPeter Maydell * 1 operand immediates: Vda is destination and possibly also one source. 507eab84139SPeter Maydell * All these insns work at 64-bit widths. 508eab84139SPeter Maydell */ 509eab84139SPeter Maydell #define DO_1OP_IMM(OP, FN) \ 510eab84139SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \ 511eab84139SPeter Maydell { \ 512eab84139SPeter Maydell uint64_t *da = vda; \ 513eab84139SPeter Maydell uint16_t mask = mve_element_mask(env); \ 514eab84139SPeter Maydell unsigned e; \ 515eab84139SPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 516eab84139SPeter Maydell mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \ 517eab84139SPeter Maydell } \ 518eab84139SPeter Maydell mve_advance_vpt(env); \ 519eab84139SPeter Maydell } 520eab84139SPeter Maydell 521eab84139SPeter Maydell #define DO_MOVI(N, I) (I) 522eab84139SPeter Maydell #define DO_ANDI(N, I) ((N) & (I)) 523eab84139SPeter Maydell #define DO_ORRI(N, I) ((N) | (I)) 524eab84139SPeter Maydell 525eab84139SPeter Maydell DO_1OP_IMM(vmovi, DO_MOVI) 526eab84139SPeter Maydell DO_1OP_IMM(vandi, DO_ANDI) 527eab84139SPeter Maydell DO_1OP_IMM(vorri, DO_ORRI) 528eab84139SPeter Maydell 52968245e44SPeter Maydell #define DO_2OP(OP, ESIZE, TYPE, FN) \ 53068245e44SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 53168245e44SPeter Maydell void *vd, void *vn, void *vm) \ 53268245e44SPeter Maydell { \ 53368245e44SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 53468245e44SPeter Maydell uint16_t mask = mve_element_mask(env); \ 53568245e44SPeter Maydell unsigned e; \ 53668245e44SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 53768245e44SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 53868245e44SPeter Maydell FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \ 53968245e44SPeter Maydell } \ 54068245e44SPeter Maydell mve_advance_vpt(env); \ 54168245e44SPeter Maydell } 54268245e44SPeter Maydell 5439333fe4dSPeter Maydell /* provide unsigned 2-op helpers for all sizes */ 5449333fe4dSPeter Maydell #define DO_2OP_U(OP, FN) \ 5459333fe4dSPeter Maydell DO_2OP(OP##b, 1, uint8_t, FN) \ 5469333fe4dSPeter Maydell DO_2OP(OP##h, 2, uint16_t, FN) \ 5479333fe4dSPeter Maydell DO_2OP(OP##w, 4, uint32_t, FN) 5489333fe4dSPeter Maydell 549cd367ff3SPeter Maydell /* provide signed 2-op helpers for all sizes */ 550cd367ff3SPeter Maydell #define DO_2OP_S(OP, FN) \ 551cd367ff3SPeter Maydell DO_2OP(OP##b, 1, int8_t, FN) \ 552cd367ff3SPeter Maydell DO_2OP(OP##h, 2, int16_t, FN) \ 553cd367ff3SPeter Maydell DO_2OP(OP##w, 4, int32_t, FN) 554cd367ff3SPeter Maydell 555ac6ad1dcSPeter Maydell /* 556ac6ad1dcSPeter Maydell * "Long" operations where two half-sized inputs (taken from either the 557ac6ad1dcSPeter Maydell * top or the bottom of the input vector) produce a double-width result. 558ac6ad1dcSPeter Maydell * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output. 559ac6ad1dcSPeter Maydell */ 560ac6ad1dcSPeter Maydell #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 561ac6ad1dcSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 562ac6ad1dcSPeter Maydell { \ 563ac6ad1dcSPeter Maydell LTYPE *d = vd; \ 564ac6ad1dcSPeter Maydell TYPE *n = vn, *m = vm; \ 565ac6ad1dcSPeter Maydell uint16_t mask = mve_element_mask(env); \ 566ac6ad1dcSPeter Maydell unsigned le; \ 567ac6ad1dcSPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 568ac6ad1dcSPeter Maydell LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \ 569ac6ad1dcSPeter Maydell m[H##ESIZE(le * 2 + TOP)]); \ 570ac6ad1dcSPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \ 571ac6ad1dcSPeter Maydell } \ 572ac6ad1dcSPeter Maydell mve_advance_vpt(env); \ 573ac6ad1dcSPeter Maydell } 574ac6ad1dcSPeter Maydell 575380caf6cSPeter Maydell #define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \ 576380caf6cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 577380caf6cSPeter Maydell { \ 578380caf6cSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 579380caf6cSPeter Maydell uint16_t mask = mve_element_mask(env); \ 580380caf6cSPeter Maydell unsigned e; \ 581380caf6cSPeter Maydell bool qc = false; \ 582380caf6cSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 583380caf6cSPeter Maydell bool sat = false; \ 584380caf6cSPeter Maydell TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \ 585380caf6cSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 586380caf6cSPeter Maydell qc |= sat & mask & 1; \ 587380caf6cSPeter Maydell } \ 588380caf6cSPeter Maydell if (qc) { \ 589380caf6cSPeter Maydell env->vfp.qc[0] = qc; \ 590380caf6cSPeter Maydell } \ 591380caf6cSPeter Maydell mve_advance_vpt(env); \ 592380caf6cSPeter Maydell } 593380caf6cSPeter Maydell 594483da661SPeter Maydell /* provide unsigned 2-op helpers for all sizes */ 595483da661SPeter Maydell #define DO_2OP_SAT_U(OP, FN) \ 596483da661SPeter Maydell DO_2OP_SAT(OP##b, 1, uint8_t, FN) \ 597483da661SPeter Maydell DO_2OP_SAT(OP##h, 2, uint16_t, FN) \ 598483da661SPeter Maydell DO_2OP_SAT(OP##w, 4, uint32_t, FN) 599483da661SPeter Maydell 600483da661SPeter Maydell /* provide signed 2-op helpers for all sizes */ 601483da661SPeter Maydell #define DO_2OP_SAT_S(OP, FN) \ 602483da661SPeter Maydell DO_2OP_SAT(OP##b, 1, int8_t, FN) \ 603483da661SPeter Maydell DO_2OP_SAT(OP##h, 2, int16_t, FN) \ 604483da661SPeter Maydell DO_2OP_SAT(OP##w, 4, int32_t, FN) 605483da661SPeter Maydell 60668245e44SPeter Maydell #define DO_AND(N, M) ((N) & (M)) 60768245e44SPeter Maydell #define DO_BIC(N, M) ((N) & ~(M)) 60868245e44SPeter Maydell #define DO_ORR(N, M) ((N) | (M)) 60968245e44SPeter Maydell #define DO_ORN(N, M) ((N) | ~(M)) 61068245e44SPeter Maydell #define DO_EOR(N, M) ((N) ^ (M)) 61168245e44SPeter Maydell 61268245e44SPeter Maydell DO_2OP(vand, 8, uint64_t, DO_AND) 61368245e44SPeter Maydell DO_2OP(vbic, 8, uint64_t, DO_BIC) 61468245e44SPeter Maydell DO_2OP(vorr, 8, uint64_t, DO_ORR) 61568245e44SPeter Maydell DO_2OP(vorn, 8, uint64_t, DO_ORN) 61668245e44SPeter Maydell DO_2OP(veor, 8, uint64_t, DO_EOR) 6179333fe4dSPeter Maydell 6189333fe4dSPeter Maydell #define DO_ADD(N, M) ((N) + (M)) 6199333fe4dSPeter Maydell #define DO_SUB(N, M) ((N) - (M)) 6209333fe4dSPeter Maydell #define DO_MUL(N, M) ((N) * (M)) 6219333fe4dSPeter Maydell 6229333fe4dSPeter Maydell DO_2OP_U(vadd, DO_ADD) 6239333fe4dSPeter Maydell DO_2OP_U(vsub, DO_SUB) 6249333fe4dSPeter Maydell DO_2OP_U(vmul, DO_MUL) 625ba62cc56SPeter Maydell 626ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL) 627ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL) 628ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL) 629ac6ad1dcSPeter Maydell DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL) 630ac6ad1dcSPeter Maydell DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL) 631ac6ad1dcSPeter Maydell DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL) 632ac6ad1dcSPeter Maydell 633ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL) 634ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL) 635ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL) 636ac6ad1dcSPeter Maydell DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL) 637ac6ad1dcSPeter Maydell DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL) 638ac6ad1dcSPeter Maydell DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL) 639ac6ad1dcSPeter Maydell 640ba62cc56SPeter Maydell /* 641c1bd78cbSPeter Maydell * Polynomial multiply. We can always do this generating 64 bits 642c1bd78cbSPeter Maydell * of the result at a time, so we don't need to use DO_2OP_L. 643c1bd78cbSPeter Maydell */ 644c1bd78cbSPeter Maydell #define VMULLPH_MASK 0x00ff00ff00ff00ffULL 645c1bd78cbSPeter Maydell #define VMULLPW_MASK 0x0000ffff0000ffffULL 646c1bd78cbSPeter Maydell #define DO_VMULLPBH(N, M) pmull_h((N) & VMULLPH_MASK, (M) & VMULLPH_MASK) 647c1bd78cbSPeter Maydell #define DO_VMULLPTH(N, M) DO_VMULLPBH((N) >> 8, (M) >> 8) 648c1bd78cbSPeter Maydell #define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK) 649c1bd78cbSPeter Maydell #define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16) 650c1bd78cbSPeter Maydell 651c1bd78cbSPeter Maydell DO_2OP(vmullpbh, 8, uint64_t, DO_VMULLPBH) 652c1bd78cbSPeter Maydell DO_2OP(vmullpth, 8, uint64_t, DO_VMULLPTH) 653c1bd78cbSPeter Maydell DO_2OP(vmullpbw, 8, uint64_t, DO_VMULLPBW) 654c1bd78cbSPeter Maydell DO_2OP(vmullptw, 8, uint64_t, DO_VMULLPTW) 655c1bd78cbSPeter Maydell 656c1bd78cbSPeter Maydell /* 657ba62cc56SPeter Maydell * Because the computation type is at least twice as large as required, 658ba62cc56SPeter Maydell * these work for both signed and unsigned source types. 659ba62cc56SPeter Maydell */ 660ba62cc56SPeter Maydell static inline uint8_t do_mulh_b(int32_t n, int32_t m) 661ba62cc56SPeter Maydell { 662ba62cc56SPeter Maydell return (n * m) >> 8; 663ba62cc56SPeter Maydell } 664ba62cc56SPeter Maydell 665ba62cc56SPeter Maydell static inline uint16_t do_mulh_h(int32_t n, int32_t m) 666ba62cc56SPeter Maydell { 667ba62cc56SPeter Maydell return (n * m) >> 16; 668ba62cc56SPeter Maydell } 669ba62cc56SPeter Maydell 670ba62cc56SPeter Maydell static inline uint32_t do_mulh_w(int64_t n, int64_t m) 671ba62cc56SPeter Maydell { 672ba62cc56SPeter Maydell return (n * m) >> 32; 673ba62cc56SPeter Maydell } 674ba62cc56SPeter Maydell 675fca87b78SPeter Maydell static inline uint8_t do_rmulh_b(int32_t n, int32_t m) 676fca87b78SPeter Maydell { 677fca87b78SPeter Maydell return (n * m + (1U << 7)) >> 8; 678fca87b78SPeter Maydell } 679fca87b78SPeter Maydell 680fca87b78SPeter Maydell static inline uint16_t do_rmulh_h(int32_t n, int32_t m) 681fca87b78SPeter Maydell { 682fca87b78SPeter Maydell return (n * m + (1U << 15)) >> 16; 683fca87b78SPeter Maydell } 684fca87b78SPeter Maydell 685fca87b78SPeter Maydell static inline uint32_t do_rmulh_w(int64_t n, int64_t m) 686fca87b78SPeter Maydell { 687fca87b78SPeter Maydell return (n * m + (1U << 31)) >> 32; 688fca87b78SPeter Maydell } 689fca87b78SPeter Maydell 690ba62cc56SPeter Maydell DO_2OP(vmulhsb, 1, int8_t, do_mulh_b) 691ba62cc56SPeter Maydell DO_2OP(vmulhsh, 2, int16_t, do_mulh_h) 692ba62cc56SPeter Maydell DO_2OP(vmulhsw, 4, int32_t, do_mulh_w) 693ba62cc56SPeter Maydell DO_2OP(vmulhub, 1, uint8_t, do_mulh_b) 694ba62cc56SPeter Maydell DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h) 695ba62cc56SPeter Maydell DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w) 696fca87b78SPeter Maydell 697fca87b78SPeter Maydell DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b) 698fca87b78SPeter Maydell DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h) 699fca87b78SPeter Maydell DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w) 700fca87b78SPeter Maydell DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b) 701fca87b78SPeter Maydell DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h) 702fca87b78SPeter Maydell DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w) 703cd367ff3SPeter Maydell 704cd367ff3SPeter Maydell #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) 705cd367ff3SPeter Maydell #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) 706cd367ff3SPeter Maydell 707cd367ff3SPeter Maydell DO_2OP_S(vmaxs, DO_MAX) 708cd367ff3SPeter Maydell DO_2OP_U(vmaxu, DO_MAX) 709cd367ff3SPeter Maydell DO_2OP_S(vmins, DO_MIN) 710cd367ff3SPeter Maydell DO_2OP_U(vminu, DO_MIN) 711bc67aa8dSPeter Maydell 712bc67aa8dSPeter Maydell #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N)) 713bc67aa8dSPeter Maydell 714bc67aa8dSPeter Maydell DO_2OP_S(vabds, DO_ABD) 715bc67aa8dSPeter Maydell DO_2OP_U(vabdu, DO_ABD) 716abc48e31SPeter Maydell 717abc48e31SPeter Maydell static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m) 718abc48e31SPeter Maydell { 719abc48e31SPeter Maydell return ((uint64_t)n + m) >> 1; 720abc48e31SPeter Maydell } 721abc48e31SPeter Maydell 722abc48e31SPeter Maydell static inline int32_t do_vhadd_s(int32_t n, int32_t m) 723abc48e31SPeter Maydell { 724abc48e31SPeter Maydell return ((int64_t)n + m) >> 1; 725abc48e31SPeter Maydell } 726abc48e31SPeter Maydell 727abc48e31SPeter Maydell static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m) 728abc48e31SPeter Maydell { 729abc48e31SPeter Maydell return ((uint64_t)n - m) >> 1; 730abc48e31SPeter Maydell } 731abc48e31SPeter Maydell 732abc48e31SPeter Maydell static inline int32_t do_vhsub_s(int32_t n, int32_t m) 733abc48e31SPeter Maydell { 734abc48e31SPeter Maydell return ((int64_t)n - m) >> 1; 735abc48e31SPeter Maydell } 736abc48e31SPeter Maydell 737abc48e31SPeter Maydell DO_2OP_S(vhadds, do_vhadd_s) 738abc48e31SPeter Maydell DO_2OP_U(vhaddu, do_vhadd_u) 739abc48e31SPeter Maydell DO_2OP_S(vhsubs, do_vhsub_s) 740abc48e31SPeter Maydell DO_2OP_U(vhsubu, do_vhsub_u) 7411d2386f7SPeter Maydell 7420372cad8SPeter Maydell #define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 7430372cad8SPeter Maydell #define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 744bb002345SPeter Maydell #define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 745bb002345SPeter Maydell #define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 7460372cad8SPeter Maydell 7470372cad8SPeter Maydell DO_2OP_S(vshls, DO_VSHLS) 7480372cad8SPeter Maydell DO_2OP_U(vshlu, DO_VSHLU) 749bb002345SPeter Maydell DO_2OP_S(vrshls, DO_VRSHLS) 750bb002345SPeter Maydell DO_2OP_U(vrshlu, DO_VRSHLU) 7510372cad8SPeter Maydell 7521eb987a8SPeter Maydell #define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1) 7531eb987a8SPeter Maydell #define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1) 7541eb987a8SPeter Maydell 7551eb987a8SPeter Maydell DO_2OP_S(vrhadds, DO_RHADD_S) 7561eb987a8SPeter Maydell DO_2OP_U(vrhaddu, DO_RHADD_U) 7571eb987a8SPeter Maydell 75889bc4c4fSPeter Maydell static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m, 75989bc4c4fSPeter Maydell uint32_t inv, uint32_t carry_in, bool update_flags) 76089bc4c4fSPeter Maydell { 76189bc4c4fSPeter Maydell uint16_t mask = mve_element_mask(env); 76289bc4c4fSPeter Maydell unsigned e; 76389bc4c4fSPeter Maydell 76489bc4c4fSPeter Maydell /* If any additions trigger, we will update flags. */ 76589bc4c4fSPeter Maydell if (mask & 0x1111) { 76689bc4c4fSPeter Maydell update_flags = true; 76789bc4c4fSPeter Maydell } 76889bc4c4fSPeter Maydell 76989bc4c4fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 77089bc4c4fSPeter Maydell uint64_t r = carry_in; 77189bc4c4fSPeter Maydell r += n[H4(e)]; 77289bc4c4fSPeter Maydell r += m[H4(e)] ^ inv; 77389bc4c4fSPeter Maydell if (mask & 1) { 77489bc4c4fSPeter Maydell carry_in = r >> 32; 77589bc4c4fSPeter Maydell } 77689bc4c4fSPeter Maydell mergemask(&d[H4(e)], r, mask); 77789bc4c4fSPeter Maydell } 77889bc4c4fSPeter Maydell 77989bc4c4fSPeter Maydell if (update_flags) { 78089bc4c4fSPeter Maydell /* Store C, clear NZV. */ 78189bc4c4fSPeter Maydell env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK; 78289bc4c4fSPeter Maydell env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C; 78389bc4c4fSPeter Maydell } 78489bc4c4fSPeter Maydell mve_advance_vpt(env); 78589bc4c4fSPeter Maydell } 78689bc4c4fSPeter Maydell 78789bc4c4fSPeter Maydell void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm) 78889bc4c4fSPeter Maydell { 78989bc4c4fSPeter Maydell bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 79089bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, 0, carry_in, false); 79189bc4c4fSPeter Maydell } 79289bc4c4fSPeter Maydell 79389bc4c4fSPeter Maydell void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm) 79489bc4c4fSPeter Maydell { 79589bc4c4fSPeter Maydell bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 79689bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, -1, carry_in, false); 79789bc4c4fSPeter Maydell } 79889bc4c4fSPeter Maydell 79989bc4c4fSPeter Maydell 80089bc4c4fSPeter Maydell void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm) 80189bc4c4fSPeter Maydell { 80289bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, 0, 0, true); 80389bc4c4fSPeter Maydell } 80489bc4c4fSPeter Maydell 80589bc4c4fSPeter Maydell void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm) 80689bc4c4fSPeter Maydell { 80789bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, -1, 1, true); 80889bc4c4fSPeter Maydell } 80989bc4c4fSPeter Maydell 81067ec113bSPeter Maydell #define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \ 81167ec113bSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 81267ec113bSPeter Maydell { \ 81367ec113bSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 81467ec113bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 81567ec113bSPeter Maydell unsigned e; \ 81667ec113bSPeter Maydell TYPE r[16 / ESIZE]; \ 81767ec113bSPeter Maydell /* Calculate all results first to avoid overwriting inputs */ \ 81867ec113bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \ 81967ec113bSPeter Maydell if (!(e & 1)) { \ 82067ec113bSPeter Maydell r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \ 82167ec113bSPeter Maydell } else { \ 82267ec113bSPeter Maydell r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \ 82367ec113bSPeter Maydell } \ 82467ec113bSPeter Maydell } \ 82567ec113bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 82667ec113bSPeter Maydell mergemask(&d[H##ESIZE(e)], r[e], mask); \ 82767ec113bSPeter Maydell } \ 82867ec113bSPeter Maydell mve_advance_vpt(env); \ 82967ec113bSPeter Maydell } 83067ec113bSPeter Maydell 83167ec113bSPeter Maydell #define DO_VCADD_ALL(OP, FN0, FN1) \ 83267ec113bSPeter Maydell DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \ 83367ec113bSPeter Maydell DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \ 83467ec113bSPeter Maydell DO_VCADD(OP##w, 4, int32_t, FN0, FN1) 83567ec113bSPeter Maydell 83667ec113bSPeter Maydell DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD) 83767ec113bSPeter Maydell DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB) 8388625693aSPeter Maydell DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s) 8398625693aSPeter Maydell DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s) 84067ec113bSPeter Maydell 84139f2ec85SPeter Maydell static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s) 84239f2ec85SPeter Maydell { 84339f2ec85SPeter Maydell if (val > max) { 84439f2ec85SPeter Maydell *s = true; 84539f2ec85SPeter Maydell return max; 84639f2ec85SPeter Maydell } else if (val < min) { 84739f2ec85SPeter Maydell *s = true; 84839f2ec85SPeter Maydell return min; 84939f2ec85SPeter Maydell } 85039f2ec85SPeter Maydell return val; 85139f2ec85SPeter Maydell } 85239f2ec85SPeter Maydell 85339f2ec85SPeter Maydell #define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s) 85439f2ec85SPeter Maydell #define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s) 85539f2ec85SPeter Maydell #define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s) 85639f2ec85SPeter Maydell 85739f2ec85SPeter Maydell #define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s) 85839f2ec85SPeter Maydell #define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s) 85939f2ec85SPeter Maydell #define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s) 86039f2ec85SPeter Maydell 86139f2ec85SPeter Maydell #define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s) 86239f2ec85SPeter Maydell #define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s) 86339f2ec85SPeter Maydell #define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s) 86439f2ec85SPeter Maydell 86539f2ec85SPeter Maydell #define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s) 86639f2ec85SPeter Maydell #define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s) 86739f2ec85SPeter Maydell #define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s) 8681d2386f7SPeter Maydell 86966c05767SPeter Maydell /* 87066c05767SPeter Maydell * For QDMULH and QRDMULH we simplify "double and shift by esize" into 87166c05767SPeter Maydell * "shift by esize-1", adjusting the QRDMULH rounding constant to match. 87266c05767SPeter Maydell */ 87366c05767SPeter Maydell #define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \ 87466c05767SPeter Maydell INT8_MIN, INT8_MAX, s) 87566c05767SPeter Maydell #define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \ 87666c05767SPeter Maydell INT16_MIN, INT16_MAX, s) 87766c05767SPeter Maydell #define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \ 87866c05767SPeter Maydell INT32_MIN, INT32_MAX, s) 87966c05767SPeter Maydell 88066c05767SPeter Maydell #define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \ 88166c05767SPeter Maydell INT8_MIN, INT8_MAX, s) 88266c05767SPeter Maydell #define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \ 88366c05767SPeter Maydell INT16_MIN, INT16_MAX, s) 88466c05767SPeter Maydell #define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \ 88566c05767SPeter Maydell INT32_MIN, INT32_MAX, s) 88666c05767SPeter Maydell 887380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B) 888380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H) 889380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W) 890380caf6cSPeter Maydell 891380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B) 892380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H) 893380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W) 894380caf6cSPeter Maydell 895f741707bSPeter Maydell DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B) 896f741707bSPeter Maydell DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H) 897f741707bSPeter Maydell DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W) 898f741707bSPeter Maydell DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B) 899f741707bSPeter Maydell DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H) 900f741707bSPeter Maydell DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W) 901f741707bSPeter Maydell 902f741707bSPeter Maydell DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B) 903f741707bSPeter Maydell DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H) 904f741707bSPeter Maydell DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W) 905f741707bSPeter Maydell DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B) 906f741707bSPeter Maydell DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H) 907f741707bSPeter Maydell DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W) 908f741707bSPeter Maydell 909483da661SPeter Maydell /* 910483da661SPeter Maydell * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs() 911483da661SPeter Maydell * and friends wanting a uint32_t* sat and our needing a bool*. 912483da661SPeter Maydell */ 913483da661SPeter Maydell #define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \ 914483da661SPeter Maydell ({ \ 915483da661SPeter Maydell uint32_t su32 = 0; \ 916483da661SPeter Maydell typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \ 917483da661SPeter Maydell if (su32) { \ 918483da661SPeter Maydell *satp = true; \ 919483da661SPeter Maydell } \ 920483da661SPeter Maydell r; \ 921483da661SPeter Maydell }) 922483da661SPeter Maydell 923483da661SPeter Maydell #define DO_SQSHL_OP(N, M, satp) \ 924483da661SPeter Maydell WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp) 925483da661SPeter Maydell #define DO_UQSHL_OP(N, M, satp) \ 926483da661SPeter Maydell WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp) 9279dc868c4SPeter Maydell #define DO_SQRSHL_OP(N, M, satp) \ 9289dc868c4SPeter Maydell WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp) 9299dc868c4SPeter Maydell #define DO_UQRSHL_OP(N, M, satp) \ 9309dc868c4SPeter Maydell WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp) 931f9ed6174SPeter Maydell #define DO_SUQSHL_OP(N, M, satp) \ 932f9ed6174SPeter Maydell WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp) 933483da661SPeter Maydell 934483da661SPeter Maydell DO_2OP_SAT_S(vqshls, DO_SQSHL_OP) 935483da661SPeter Maydell DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP) 9369dc868c4SPeter Maydell DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP) 9379dc868c4SPeter Maydell DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP) 938483da661SPeter Maydell 939fd677f80SPeter Maydell /* 940fd677f80SPeter Maydell * Multiply add dual returning high half 941fd677f80SPeter Maydell * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of 942fd677f80SPeter Maydell * whether to add the rounding constant, and the pointer to the 943fd677f80SPeter Maydell * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant", 944fd677f80SPeter Maydell * saturate to twice the input size and return the high half; or 945fd677f80SPeter Maydell * (A * B - C * D) etc for VQDMLSDH. 946fd677f80SPeter Maydell */ 947fd677f80SPeter Maydell #define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \ 948fd677f80SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 949fd677f80SPeter Maydell void *vm) \ 950fd677f80SPeter Maydell { \ 951fd677f80SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 952fd677f80SPeter Maydell uint16_t mask = mve_element_mask(env); \ 953fd677f80SPeter Maydell unsigned e; \ 954fd677f80SPeter Maydell bool qc = false; \ 955fd677f80SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 956fd677f80SPeter Maydell bool sat = false; \ 957fd677f80SPeter Maydell if ((e & 1) == XCHG) { \ 958fd677f80SPeter Maydell TYPE r = FN(n[H##ESIZE(e)], \ 959fd677f80SPeter Maydell m[H##ESIZE(e - XCHG)], \ 960fd677f80SPeter Maydell n[H##ESIZE(e + (1 - 2 * XCHG))], \ 961fd677f80SPeter Maydell m[H##ESIZE(e + (1 - XCHG))], \ 962fd677f80SPeter Maydell ROUND, &sat); \ 963fd677f80SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 964fd677f80SPeter Maydell qc |= sat & mask & 1; \ 965fd677f80SPeter Maydell } \ 966fd677f80SPeter Maydell } \ 967fd677f80SPeter Maydell if (qc) { \ 968fd677f80SPeter Maydell env->vfp.qc[0] = qc; \ 969fd677f80SPeter Maydell } \ 970fd677f80SPeter Maydell mve_advance_vpt(env); \ 971fd677f80SPeter Maydell } 972fd677f80SPeter Maydell 973fd677f80SPeter Maydell static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d, 974fd677f80SPeter Maydell int round, bool *sat) 975fd677f80SPeter Maydell { 976fd677f80SPeter Maydell int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7); 977fd677f80SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 978fd677f80SPeter Maydell } 979fd677f80SPeter Maydell 980fd677f80SPeter Maydell static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d, 981fd677f80SPeter Maydell int round, bool *sat) 982fd677f80SPeter Maydell { 983fd677f80SPeter Maydell int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15); 984fd677f80SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 985fd677f80SPeter Maydell } 986fd677f80SPeter Maydell 987fd677f80SPeter Maydell static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d, 988fd677f80SPeter Maydell int round, bool *sat) 989fd677f80SPeter Maydell { 990fd677f80SPeter Maydell int64_t m1 = (int64_t)a * b; 991fd677f80SPeter Maydell int64_t m2 = (int64_t)c * d; 992fd677f80SPeter Maydell int64_t r; 993fd677f80SPeter Maydell /* 994fd677f80SPeter Maydell * Architecturally we should do the entire add, double, round 995fd677f80SPeter Maydell * and then check for saturation. We do three saturating adds, 996fd677f80SPeter Maydell * but we need to be careful about the order. If the first 997fd677f80SPeter Maydell * m1 + m2 saturates then it's impossible for the *2+rc to 998fd677f80SPeter Maydell * bring it back into the non-saturated range. However, if 999fd677f80SPeter Maydell * m1 + m2 is negative then it's possible that doing the doubling 1000fd677f80SPeter Maydell * would take the intermediate result below INT64_MAX and the 1001fd677f80SPeter Maydell * addition of the rounding constant then brings it back in range. 1002fd677f80SPeter Maydell * So we add half the rounding constant before doubling rather 1003fd677f80SPeter Maydell * than adding the rounding constant after the doubling. 1004fd677f80SPeter Maydell */ 1005fd677f80SPeter Maydell if (sadd64_overflow(m1, m2, &r) || 1006fd677f80SPeter Maydell sadd64_overflow(r, (round << 30), &r) || 1007fd677f80SPeter Maydell sadd64_overflow(r, r, &r)) { 1008fd677f80SPeter Maydell *sat = true; 1009fd677f80SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN; 1010fd677f80SPeter Maydell } 1011fd677f80SPeter Maydell return r >> 32; 1012fd677f80SPeter Maydell } 1013fd677f80SPeter Maydell 101492f11732SPeter Maydell static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d, 101592f11732SPeter Maydell int round, bool *sat) 101692f11732SPeter Maydell { 101792f11732SPeter Maydell int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7); 101892f11732SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 101992f11732SPeter Maydell } 102092f11732SPeter Maydell 102192f11732SPeter Maydell static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d, 102292f11732SPeter Maydell int round, bool *sat) 102392f11732SPeter Maydell { 102492f11732SPeter Maydell int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15); 102592f11732SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 102692f11732SPeter Maydell } 102792f11732SPeter Maydell 102892f11732SPeter Maydell static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d, 102992f11732SPeter Maydell int round, bool *sat) 103092f11732SPeter Maydell { 103192f11732SPeter Maydell int64_t m1 = (int64_t)a * b; 103292f11732SPeter Maydell int64_t m2 = (int64_t)c * d; 103392f11732SPeter Maydell int64_t r; 103492f11732SPeter Maydell /* The same ordering issue as in do_vqdmladh_w applies here too */ 103592f11732SPeter Maydell if (ssub64_overflow(m1, m2, &r) || 103692f11732SPeter Maydell sadd64_overflow(r, (round << 30), &r) || 103792f11732SPeter Maydell sadd64_overflow(r, r, &r)) { 103892f11732SPeter Maydell *sat = true; 103992f11732SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN; 104092f11732SPeter Maydell } 104192f11732SPeter Maydell return r >> 32; 104292f11732SPeter Maydell } 104392f11732SPeter Maydell 1044fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b) 1045fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h) 1046fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w) 1047fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b) 1048fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h) 1049fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w) 1050fd677f80SPeter Maydell 1051fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b) 1052fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h) 1053fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w) 1054fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b) 1055fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h) 1056fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w) 1057fd677f80SPeter Maydell 105892f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b) 105992f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h) 106092f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w) 106192f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b) 106292f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h) 106392f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w) 106492f11732SPeter Maydell 106592f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b) 106692f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h) 106792f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w) 106892f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b) 106992f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h) 107092f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w) 107192f11732SPeter Maydell 1072e51896b3SPeter Maydell #define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \ 1073e51896b3SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1074e51896b3SPeter Maydell uint32_t rm) \ 1075e51896b3SPeter Maydell { \ 1076e51896b3SPeter Maydell TYPE *d = vd, *n = vn; \ 1077e51896b3SPeter Maydell TYPE m = rm; \ 1078e51896b3SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1079e51896b3SPeter Maydell unsigned e; \ 1080e51896b3SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1081e51896b3SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \ 1082e51896b3SPeter Maydell } \ 1083e51896b3SPeter Maydell mve_advance_vpt(env); \ 1084e51896b3SPeter Maydell } 1085e51896b3SPeter Maydell 108639f2ec85SPeter Maydell #define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \ 108739f2ec85SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 108839f2ec85SPeter Maydell uint32_t rm) \ 108939f2ec85SPeter Maydell { \ 109039f2ec85SPeter Maydell TYPE *d = vd, *n = vn; \ 109139f2ec85SPeter Maydell TYPE m = rm; \ 109239f2ec85SPeter Maydell uint16_t mask = mve_element_mask(env); \ 109339f2ec85SPeter Maydell unsigned e; \ 109439f2ec85SPeter Maydell bool qc = false; \ 109539f2ec85SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 109639f2ec85SPeter Maydell bool sat = false; \ 109739f2ec85SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \ 109839f2ec85SPeter Maydell mask); \ 109939f2ec85SPeter Maydell qc |= sat & mask & 1; \ 110039f2ec85SPeter Maydell } \ 110139f2ec85SPeter Maydell if (qc) { \ 110239f2ec85SPeter Maydell env->vfp.qc[0] = qc; \ 110339f2ec85SPeter Maydell } \ 110439f2ec85SPeter Maydell mve_advance_vpt(env); \ 110539f2ec85SPeter Maydell } 110639f2ec85SPeter Maydell 11076b895bf8SPeter Maydell /* "accumulating" version where FN takes d as well as n and m */ 11086b895bf8SPeter Maydell #define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 11096b895bf8SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 11106b895bf8SPeter Maydell uint32_t rm) \ 11116b895bf8SPeter Maydell { \ 11126b895bf8SPeter Maydell TYPE *d = vd, *n = vn; \ 11136b895bf8SPeter Maydell TYPE m = rm; \ 11146b895bf8SPeter Maydell uint16_t mask = mve_element_mask(env); \ 11156b895bf8SPeter Maydell unsigned e; \ 11166b895bf8SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 11176b895bf8SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 11186b895bf8SPeter Maydell FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \ 11196b895bf8SPeter Maydell } \ 11206b895bf8SPeter Maydell mve_advance_vpt(env); \ 11216b895bf8SPeter Maydell } 11226b895bf8SPeter Maydell 11238be9a250SPeter Maydell #define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 11248be9a250SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 11258be9a250SPeter Maydell uint32_t rm) \ 11268be9a250SPeter Maydell { \ 11278be9a250SPeter Maydell TYPE *d = vd, *n = vn; \ 11288be9a250SPeter Maydell TYPE m = rm; \ 11298be9a250SPeter Maydell uint16_t mask = mve_element_mask(env); \ 11308be9a250SPeter Maydell unsigned e; \ 11318be9a250SPeter Maydell bool qc = false; \ 11328be9a250SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 11338be9a250SPeter Maydell bool sat = false; \ 11348be9a250SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 11358be9a250SPeter Maydell FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \ 11368be9a250SPeter Maydell mask); \ 11378be9a250SPeter Maydell qc |= sat & mask & 1; \ 11388be9a250SPeter Maydell } \ 11398be9a250SPeter Maydell if (qc) { \ 11408be9a250SPeter Maydell env->vfp.qc[0] = qc; \ 11418be9a250SPeter Maydell } \ 11428be9a250SPeter Maydell mve_advance_vpt(env); \ 11438be9a250SPeter Maydell } 11448be9a250SPeter Maydell 1145e51896b3SPeter Maydell /* provide unsigned 2-op scalar helpers for all sizes */ 1146e51896b3SPeter Maydell #define DO_2OP_SCALAR_U(OP, FN) \ 1147e51896b3SPeter Maydell DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \ 1148e51896b3SPeter Maydell DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \ 1149e51896b3SPeter Maydell DO_2OP_SCALAR(OP##w, 4, uint32_t, FN) 1150644f717cSPeter Maydell #define DO_2OP_SCALAR_S(OP, FN) \ 1151644f717cSPeter Maydell DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \ 1152644f717cSPeter Maydell DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \ 1153644f717cSPeter Maydell DO_2OP_SCALAR(OP##w, 4, int32_t, FN) 1154e51896b3SPeter Maydell 11556b895bf8SPeter Maydell #define DO_2OP_ACC_SCALAR_U(OP, FN) \ 11566b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \ 11576b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \ 11586b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN) 11596b895bf8SPeter Maydell 1160e51896b3SPeter Maydell DO_2OP_SCALAR_U(vadd_scalar, DO_ADD) 116191a358fdSPeter Maydell DO_2OP_SCALAR_U(vsub_scalar, DO_SUB) 116291a358fdSPeter Maydell DO_2OP_SCALAR_U(vmul_scalar, DO_MUL) 1163644f717cSPeter Maydell DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s) 1164644f717cSPeter Maydell DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u) 1165644f717cSPeter Maydell DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s) 1166644f717cSPeter Maydell DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u) 1167e51896b3SPeter Maydell 116839f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B) 116939f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H) 117039f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W) 117139f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B) 117239f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H) 117339f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W) 117439f2ec85SPeter Maydell 117539f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B) 117639f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H) 117739f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W) 117839f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B) 117939f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H) 118039f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W) 118139f2ec85SPeter Maydell 118266c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B) 118366c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H) 118466c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W) 118566c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B) 118666c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H) 118766c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W) 118866c05767SPeter Maydell 11898be9a250SPeter Maydell static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat) 11908be9a250SPeter Maydell { 11918be9a250SPeter Maydell int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7); 11928be9a250SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 11938be9a250SPeter Maydell } 11948be9a250SPeter Maydell 11958be9a250SPeter Maydell static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c, 11968be9a250SPeter Maydell int round, bool *sat) 11978be9a250SPeter Maydell { 11988be9a250SPeter Maydell int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15); 11998be9a250SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 12008be9a250SPeter Maydell } 12018be9a250SPeter Maydell 12028be9a250SPeter Maydell static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c, 12038be9a250SPeter Maydell int round, bool *sat) 12048be9a250SPeter Maydell { 12058be9a250SPeter Maydell /* 12068be9a250SPeter Maydell * Architecturally we should do the entire add, double, round 12078be9a250SPeter Maydell * and then check for saturation. We do three saturating adds, 12088be9a250SPeter Maydell * but we need to be careful about the order. If the first 12098be9a250SPeter Maydell * m1 + m2 saturates then it's impossible for the *2+rc to 12108be9a250SPeter Maydell * bring it back into the non-saturated range. However, if 12118be9a250SPeter Maydell * m1 + m2 is negative then it's possible that doing the doubling 12128be9a250SPeter Maydell * would take the intermediate result below INT64_MAX and the 12138be9a250SPeter Maydell * addition of the rounding constant then brings it back in range. 12148be9a250SPeter Maydell * So we add half the rounding constant and half the "c << esize" 12158be9a250SPeter Maydell * before doubling rather than adding the rounding constant after 12168be9a250SPeter Maydell * the doubling. 12178be9a250SPeter Maydell */ 12188be9a250SPeter Maydell int64_t m1 = (int64_t)a * b; 12198be9a250SPeter Maydell int64_t m2 = (int64_t)c << 31; 12208be9a250SPeter Maydell int64_t r; 12218be9a250SPeter Maydell if (sadd64_overflow(m1, m2, &r) || 12228be9a250SPeter Maydell sadd64_overflow(r, (round << 30), &r) || 12238be9a250SPeter Maydell sadd64_overflow(r, r, &r)) { 12248be9a250SPeter Maydell *sat = true; 12258be9a250SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN; 12268be9a250SPeter Maydell } 12278be9a250SPeter Maydell return r >> 32; 12288be9a250SPeter Maydell } 12298be9a250SPeter Maydell 12308be9a250SPeter Maydell /* 12318be9a250SPeter Maydell * The *MLAH insns are vector * scalar + vector; 12328be9a250SPeter Maydell * the *MLASH insns are vector * vector + scalar 12338be9a250SPeter Maydell */ 12348be9a250SPeter Maydell #define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S) 12358be9a250SPeter Maydell #define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S) 12368be9a250SPeter Maydell #define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S) 12378be9a250SPeter Maydell #define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S) 12388be9a250SPeter Maydell #define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S) 12398be9a250SPeter Maydell #define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S) 12408be9a250SPeter Maydell 12418be9a250SPeter Maydell #define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S) 12428be9a250SPeter Maydell #define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S) 12438be9a250SPeter Maydell #define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S) 12448be9a250SPeter Maydell #define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S) 12458be9a250SPeter Maydell #define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S) 12468be9a250SPeter Maydell #define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S) 12478be9a250SPeter Maydell 12488be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B) 12498be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H) 12508be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W) 12518be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B) 12528be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H) 12538be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W) 12548be9a250SPeter Maydell 12558be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B) 12568be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H) 12578be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W) 12588be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B) 12598be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H) 12608be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W) 12618be9a250SPeter Maydell 1262c69e34c6SPeter Maydell /* Vector by scalar plus vector */ 1263c69e34c6SPeter Maydell #define DO_VMLA(D, N, M) ((N) * (M) + (D)) 1264c69e34c6SPeter Maydell 1265c69e34c6SPeter Maydell DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA) 1266c69e34c6SPeter Maydell 12676b895bf8SPeter Maydell /* Vector by vector plus scalar */ 12686b895bf8SPeter Maydell #define DO_VMLAS(D, N, M) ((N) * (D) + (M)) 12696b895bf8SPeter Maydell 12706b895bf8SPeter Maydell DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS) 12716b895bf8SPeter Maydell 1272a8890353SPeter Maydell /* 1273a8890353SPeter Maydell * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the 1274a8890353SPeter Maydell * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type. 1275a8890353SPeter Maydell * SATMASK specifies which bits of the predicate mask matter for determining 1276a8890353SPeter Maydell * whether to propagate a saturation indication into FPSCR.QC -- for 1277a8890353SPeter Maydell * the 16x16->32 case we must check only the bit corresponding to the T or B 1278a8890353SPeter Maydell * half that we used, but for the 32x32->64 case we propagate if the mask 1279a8890353SPeter Maydell * bit is set for either half. 1280a8890353SPeter Maydell */ 1281a8890353SPeter Maydell #define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 1282a8890353SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1283a8890353SPeter Maydell uint32_t rm) \ 1284a8890353SPeter Maydell { \ 1285a8890353SPeter Maydell LTYPE *d = vd; \ 1286a8890353SPeter Maydell TYPE *n = vn; \ 1287a8890353SPeter Maydell TYPE m = rm; \ 1288a8890353SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1289a8890353SPeter Maydell unsigned le; \ 1290a8890353SPeter Maydell bool qc = false; \ 1291a8890353SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1292a8890353SPeter Maydell bool sat = false; \ 1293a8890353SPeter Maydell LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \ 1294a8890353SPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \ 1295a8890353SPeter Maydell qc |= sat && (mask & SATMASK); \ 1296a8890353SPeter Maydell } \ 1297a8890353SPeter Maydell if (qc) { \ 1298a8890353SPeter Maydell env->vfp.qc[0] = qc; \ 1299a8890353SPeter Maydell } \ 1300a8890353SPeter Maydell mve_advance_vpt(env); \ 1301a8890353SPeter Maydell } 1302a8890353SPeter Maydell 1303a8890353SPeter Maydell static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat) 1304a8890353SPeter Maydell { 1305a8890353SPeter Maydell int64_t r = ((int64_t)n * m) * 2; 1306a8890353SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat); 1307a8890353SPeter Maydell } 1308a8890353SPeter Maydell 1309a8890353SPeter Maydell static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat) 1310a8890353SPeter Maydell { 1311a8890353SPeter Maydell /* The multiply can't overflow, but the doubling might */ 1312a8890353SPeter Maydell int64_t r = (int64_t)n * m; 1313a8890353SPeter Maydell if (r > INT64_MAX / 2) { 1314a8890353SPeter Maydell *sat = true; 1315a8890353SPeter Maydell return INT64_MAX; 1316a8890353SPeter Maydell } else if (r < INT64_MIN / 2) { 1317a8890353SPeter Maydell *sat = true; 1318a8890353SPeter Maydell return INT64_MIN; 1319a8890353SPeter Maydell } else { 1320a8890353SPeter Maydell return r * 2; 1321a8890353SPeter Maydell } 1322a8890353SPeter Maydell } 1323a8890353SPeter Maydell 1324a8890353SPeter Maydell #define SATMASK16B 1 1325a8890353SPeter Maydell #define SATMASK16T (1 << 2) 1326a8890353SPeter Maydell #define SATMASK32 ((1 << 4) | 1) 1327a8890353SPeter Maydell 1328a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \ 1329a8890353SPeter Maydell do_qdmullh, SATMASK16B) 1330a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \ 1331a8890353SPeter Maydell do_qdmullw, SATMASK32) 1332a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \ 1333a8890353SPeter Maydell do_qdmullh, SATMASK16T) 1334a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \ 1335a8890353SPeter Maydell do_qdmullw, SATMASK32) 1336a8890353SPeter Maydell 133743364321SPeter Maydell /* 133843364321SPeter Maydell * Long saturating ops 133943364321SPeter Maydell */ 134043364321SPeter Maydell #define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 134143364321SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 134243364321SPeter Maydell void *vm) \ 134343364321SPeter Maydell { \ 134443364321SPeter Maydell LTYPE *d = vd; \ 134543364321SPeter Maydell TYPE *n = vn, *m = vm; \ 134643364321SPeter Maydell uint16_t mask = mve_element_mask(env); \ 134743364321SPeter Maydell unsigned le; \ 134843364321SPeter Maydell bool qc = false; \ 134943364321SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 135043364321SPeter Maydell bool sat = false; \ 135143364321SPeter Maydell LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \ 135243364321SPeter Maydell LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \ 135343364321SPeter Maydell mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \ 135443364321SPeter Maydell qc |= sat && (mask & SATMASK); \ 135543364321SPeter Maydell } \ 135643364321SPeter Maydell if (qc) { \ 135743364321SPeter Maydell env->vfp.qc[0] = qc; \ 135843364321SPeter Maydell } \ 135943364321SPeter Maydell mve_advance_vpt(env); \ 136043364321SPeter Maydell } 136143364321SPeter Maydell 136243364321SPeter Maydell DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B) 136343364321SPeter Maydell DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 136443364321SPeter Maydell DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T) 136543364321SPeter Maydell DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 136643364321SPeter Maydell 1367b050543bSPeter Maydell static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m) 1368b050543bSPeter Maydell { 1369b050543bSPeter Maydell m &= 0xff; 1370b050543bSPeter Maydell if (m == 0) { 1371b050543bSPeter Maydell return 0; 1372b050543bSPeter Maydell } 1373b050543bSPeter Maydell n = revbit8(n); 1374b050543bSPeter Maydell if (m < 8) { 1375b050543bSPeter Maydell n >>= 8 - m; 1376b050543bSPeter Maydell } 1377b050543bSPeter Maydell return n; 1378b050543bSPeter Maydell } 1379b050543bSPeter Maydell 1380b050543bSPeter Maydell static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m) 1381b050543bSPeter Maydell { 1382b050543bSPeter Maydell m &= 0xff; 1383b050543bSPeter Maydell if (m == 0) { 1384b050543bSPeter Maydell return 0; 1385b050543bSPeter Maydell } 1386b050543bSPeter Maydell n = revbit16(n); 1387b050543bSPeter Maydell if (m < 16) { 1388b050543bSPeter Maydell n >>= 16 - m; 1389b050543bSPeter Maydell } 1390b050543bSPeter Maydell return n; 1391b050543bSPeter Maydell } 1392b050543bSPeter Maydell 1393b050543bSPeter Maydell static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m) 1394b050543bSPeter Maydell { 1395b050543bSPeter Maydell m &= 0xff; 1396b050543bSPeter Maydell if (m == 0) { 1397b050543bSPeter Maydell return 0; 1398b050543bSPeter Maydell } 1399b050543bSPeter Maydell n = revbit32(n); 1400b050543bSPeter Maydell if (m < 32) { 1401b050543bSPeter Maydell n >>= 32 - m; 1402b050543bSPeter Maydell } 1403b050543bSPeter Maydell return n; 1404b050543bSPeter Maydell } 1405b050543bSPeter Maydell 1406b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb) 1407b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh) 1408b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw) 1409b050543bSPeter Maydell 14101d2386f7SPeter Maydell /* 14111d2386f7SPeter Maydell * Multiply add long dual accumulate ops. 14121d2386f7SPeter Maydell */ 14131d2386f7SPeter Maydell #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 14141d2386f7SPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 14151d2386f7SPeter Maydell void *vm, uint64_t a) \ 14161d2386f7SPeter Maydell { \ 14171d2386f7SPeter Maydell uint16_t mask = mve_element_mask(env); \ 14181d2386f7SPeter Maydell unsigned e; \ 14191d2386f7SPeter Maydell TYPE *n = vn, *m = vm; \ 14201d2386f7SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 14211d2386f7SPeter Maydell if (mask & 1) { \ 14221d2386f7SPeter Maydell if (e & 1) { \ 14231d2386f7SPeter Maydell a ODDACC \ 14241d2386f7SPeter Maydell (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 14251d2386f7SPeter Maydell } else { \ 14261d2386f7SPeter Maydell a EVENACC \ 14271d2386f7SPeter Maydell (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 14281d2386f7SPeter Maydell } \ 14291d2386f7SPeter Maydell } \ 14301d2386f7SPeter Maydell } \ 14311d2386f7SPeter Maydell mve_advance_vpt(env); \ 14321d2386f7SPeter Maydell return a; \ 14331d2386f7SPeter Maydell } 14341d2386f7SPeter Maydell 14351d2386f7SPeter Maydell DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=) 14361d2386f7SPeter Maydell DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=) 14371d2386f7SPeter Maydell DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=) 14381d2386f7SPeter Maydell DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=) 14391d2386f7SPeter Maydell 14401d2386f7SPeter Maydell DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=) 14411d2386f7SPeter Maydell DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=) 1442181cd971SPeter Maydell 1443181cd971SPeter Maydell DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=) 1444181cd971SPeter Maydell DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=) 1445181cd971SPeter Maydell DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) 1446181cd971SPeter Maydell DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) 144738548747SPeter Maydell 144838548747SPeter Maydell /* 1449f0ffff51SPeter Maydell * Multiply add dual accumulate ops 1450f0ffff51SPeter Maydell */ 1451f0ffff51SPeter Maydell #define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 1452f0ffff51SPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1453f0ffff51SPeter Maydell void *vm, uint32_t a) \ 1454f0ffff51SPeter Maydell { \ 1455f0ffff51SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1456f0ffff51SPeter Maydell unsigned e; \ 1457f0ffff51SPeter Maydell TYPE *n = vn, *m = vm; \ 1458f0ffff51SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1459f0ffff51SPeter Maydell if (mask & 1) { \ 1460f0ffff51SPeter Maydell if (e & 1) { \ 1461f0ffff51SPeter Maydell a ODDACC \ 1462f0ffff51SPeter Maydell n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 1463f0ffff51SPeter Maydell } else { \ 1464f0ffff51SPeter Maydell a EVENACC \ 1465f0ffff51SPeter Maydell n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 1466f0ffff51SPeter Maydell } \ 1467f0ffff51SPeter Maydell } \ 1468f0ffff51SPeter Maydell } \ 1469f0ffff51SPeter Maydell mve_advance_vpt(env); \ 1470f0ffff51SPeter Maydell return a; \ 1471f0ffff51SPeter Maydell } 1472f0ffff51SPeter Maydell 1473f0ffff51SPeter Maydell #define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \ 1474f0ffff51SPeter Maydell DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \ 1475f0ffff51SPeter Maydell DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \ 1476f0ffff51SPeter Maydell DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC) 1477f0ffff51SPeter Maydell 1478f0ffff51SPeter Maydell #define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \ 1479f0ffff51SPeter Maydell DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \ 1480f0ffff51SPeter Maydell DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \ 1481f0ffff51SPeter Maydell DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC) 1482f0ffff51SPeter Maydell 1483f0ffff51SPeter Maydell DO_DAV_S(vmladavs, false, +=, +=) 1484f0ffff51SPeter Maydell DO_DAV_U(vmladavu, false, +=, +=) 1485f0ffff51SPeter Maydell DO_DAV_S(vmlsdav, false, +=, -=) 1486f0ffff51SPeter Maydell DO_DAV_S(vmladavsx, true, +=, +=) 1487f0ffff51SPeter Maydell DO_DAV_S(vmlsdavx, true, +=, -=) 1488f0ffff51SPeter Maydell 1489f0ffff51SPeter Maydell /* 1490303db86fSPeter Maydell * Rounding multiply add long dual accumulate high. In the pseudocode 1491303db86fSPeter Maydell * this is implemented with a 72-bit internal accumulator value of which 1492303db86fSPeter Maydell * the top 64 bits are returned. We optimize this to avoid having to 1493303db86fSPeter Maydell * use 128-bit arithmetic -- we can do this because the 74-bit accumulator 1494303db86fSPeter Maydell * is squashed back into 64-bits after each beat. 149538548747SPeter Maydell */ 1496303db86fSPeter Maydell #define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \ 149738548747SPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 149838548747SPeter Maydell void *vm, uint64_t a) \ 149938548747SPeter Maydell { \ 150038548747SPeter Maydell uint16_t mask = mve_element_mask(env); \ 150138548747SPeter Maydell unsigned e; \ 150238548747SPeter Maydell TYPE *n = vn, *m = vm; \ 1503303db86fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 150438548747SPeter Maydell if (mask & 1) { \ 1505303db86fSPeter Maydell LTYPE mul; \ 150638548747SPeter Maydell if (e & 1) { \ 1507303db86fSPeter Maydell mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \ 1508303db86fSPeter Maydell if (SUB) { \ 1509303db86fSPeter Maydell mul = -mul; \ 151038548747SPeter Maydell } \ 1511303db86fSPeter Maydell } else { \ 1512303db86fSPeter Maydell mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \ 1513303db86fSPeter Maydell } \ 1514303db86fSPeter Maydell mul = (mul >> 8) + ((mul >> 7) & 1); \ 1515303db86fSPeter Maydell a += mul; \ 151638548747SPeter Maydell } \ 151738548747SPeter Maydell } \ 151838548747SPeter Maydell mve_advance_vpt(env); \ 1519303db86fSPeter Maydell return a; \ 152038548747SPeter Maydell } 152138548747SPeter Maydell 1522303db86fSPeter Maydell DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false) 1523303db86fSPeter Maydell DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false) 152438548747SPeter Maydell 1525303db86fSPeter Maydell DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false) 152638548747SPeter Maydell 1527303db86fSPeter Maydell DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true) 1528303db86fSPeter Maydell DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true) 15296f060a63SPeter Maydell 15306f060a63SPeter Maydell /* Vector add across vector */ 15316f060a63SPeter Maydell #define DO_VADDV(OP, ESIZE, TYPE) \ 15326f060a63SPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 15336f060a63SPeter Maydell uint32_t ra) \ 15346f060a63SPeter Maydell { \ 15356f060a63SPeter Maydell uint16_t mask = mve_element_mask(env); \ 15366f060a63SPeter Maydell unsigned e; \ 15376f060a63SPeter Maydell TYPE *m = vm; \ 15386f060a63SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 15396f060a63SPeter Maydell if (mask & 1) { \ 15406f060a63SPeter Maydell ra += m[H##ESIZE(e)]; \ 15416f060a63SPeter Maydell } \ 15426f060a63SPeter Maydell } \ 15436f060a63SPeter Maydell mve_advance_vpt(env); \ 15446f060a63SPeter Maydell return ra; \ 15456f060a63SPeter Maydell } \ 15466f060a63SPeter Maydell 1547ed5a59d6SPeter Maydell DO_VADDV(vaddvsb, 1, int8_t) 1548ed5a59d6SPeter Maydell DO_VADDV(vaddvsh, 2, int16_t) 1549ed5a59d6SPeter Maydell DO_VADDV(vaddvsw, 4, int32_t) 15506f060a63SPeter Maydell DO_VADDV(vaddvub, 1, uint8_t) 15516f060a63SPeter Maydell DO_VADDV(vaddvuh, 2, uint16_t) 15526f060a63SPeter Maydell DO_VADDV(vaddvuw, 4, uint32_t) 1553f9ed6174SPeter Maydell 1554688ba4cfSPeter Maydell /* 1555688ba4cfSPeter Maydell * Vector max/min across vector. Unlike VADDV, we must 1556688ba4cfSPeter Maydell * read ra as the element size, not its full width. 1557688ba4cfSPeter Maydell * We work with int64_t internally for simplicity. 1558688ba4cfSPeter Maydell */ 1559688ba4cfSPeter Maydell #define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \ 1560688ba4cfSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1561688ba4cfSPeter Maydell uint32_t ra_in) \ 1562688ba4cfSPeter Maydell { \ 1563688ba4cfSPeter Maydell uint16_t mask = mve_element_mask(env); \ 1564688ba4cfSPeter Maydell unsigned e; \ 1565688ba4cfSPeter Maydell TYPE *m = vm; \ 1566688ba4cfSPeter Maydell int64_t ra = (RATYPE)ra_in; \ 1567688ba4cfSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1568688ba4cfSPeter Maydell if (mask & 1) { \ 1569688ba4cfSPeter Maydell ra = FN(ra, m[H##ESIZE(e)]); \ 1570688ba4cfSPeter Maydell } \ 1571688ba4cfSPeter Maydell } \ 1572688ba4cfSPeter Maydell mve_advance_vpt(env); \ 1573688ba4cfSPeter Maydell return ra; \ 1574688ba4cfSPeter Maydell } \ 1575688ba4cfSPeter Maydell 1576688ba4cfSPeter Maydell #define DO_VMAXMINV_U(INSN, FN) \ 1577688ba4cfSPeter Maydell DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \ 1578688ba4cfSPeter Maydell DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \ 1579688ba4cfSPeter Maydell DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN) 1580688ba4cfSPeter Maydell #define DO_VMAXMINV_S(INSN, FN) \ 1581688ba4cfSPeter Maydell DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \ 1582688ba4cfSPeter Maydell DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \ 1583688ba4cfSPeter Maydell DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN) 1584688ba4cfSPeter Maydell 1585688ba4cfSPeter Maydell /* 1586688ba4cfSPeter Maydell * Helpers for max and min of absolute values across vector: 1587688ba4cfSPeter Maydell * note that we only take the absolute value of 'm', not 'n' 1588688ba4cfSPeter Maydell */ 1589688ba4cfSPeter Maydell static int64_t do_maxa(int64_t n, int64_t m) 1590688ba4cfSPeter Maydell { 1591688ba4cfSPeter Maydell if (m < 0) { 1592688ba4cfSPeter Maydell m = -m; 1593688ba4cfSPeter Maydell } 1594688ba4cfSPeter Maydell return MAX(n, m); 1595688ba4cfSPeter Maydell } 1596688ba4cfSPeter Maydell 1597688ba4cfSPeter Maydell static int64_t do_mina(int64_t n, int64_t m) 1598688ba4cfSPeter Maydell { 1599688ba4cfSPeter Maydell if (m < 0) { 1600688ba4cfSPeter Maydell m = -m; 1601688ba4cfSPeter Maydell } 1602688ba4cfSPeter Maydell return MIN(n, m); 1603688ba4cfSPeter Maydell } 1604688ba4cfSPeter Maydell 1605688ba4cfSPeter Maydell DO_VMAXMINV_S(vmaxvs, DO_MAX) 1606688ba4cfSPeter Maydell DO_VMAXMINV_U(vmaxvu, DO_MAX) 1607688ba4cfSPeter Maydell DO_VMAXMINV_S(vminvs, DO_MIN) 1608688ba4cfSPeter Maydell DO_VMAXMINV_U(vminvu, DO_MIN) 1609688ba4cfSPeter Maydell /* 1610688ba4cfSPeter Maydell * VMAXAV, VMINAV treat the general purpose input as unsigned 1611688ba4cfSPeter Maydell * and the vector elements as signed. 1612688ba4cfSPeter Maydell */ 1613688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa) 1614688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa) 1615688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa) 1616688ba4cfSPeter Maydell DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina) 1617688ba4cfSPeter Maydell DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina) 1618688ba4cfSPeter Maydell DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina) 1619688ba4cfSPeter Maydell 16207f061c0aSPeter Maydell #define DO_VABAV(OP, ESIZE, TYPE) \ 16217f061c0aSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 16227f061c0aSPeter Maydell void *vm, uint32_t ra) \ 16237f061c0aSPeter Maydell { \ 16247f061c0aSPeter Maydell uint16_t mask = mve_element_mask(env); \ 16257f061c0aSPeter Maydell unsigned e; \ 16267f061c0aSPeter Maydell TYPE *m = vm, *n = vn; \ 16277f061c0aSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 16287f061c0aSPeter Maydell if (mask & 1) { \ 16297f061c0aSPeter Maydell int64_t n0 = n[H##ESIZE(e)]; \ 16307f061c0aSPeter Maydell int64_t m0 = m[H##ESIZE(e)]; \ 16317f061c0aSPeter Maydell uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \ 16327f061c0aSPeter Maydell ra += r; \ 16337f061c0aSPeter Maydell } \ 16347f061c0aSPeter Maydell } \ 16357f061c0aSPeter Maydell mve_advance_vpt(env); \ 16367f061c0aSPeter Maydell return ra; \ 16377f061c0aSPeter Maydell } 16387f061c0aSPeter Maydell 16397f061c0aSPeter Maydell DO_VABAV(vabavsb, 1, int8_t) 16407f061c0aSPeter Maydell DO_VABAV(vabavsh, 2, int16_t) 16417f061c0aSPeter Maydell DO_VABAV(vabavsw, 4, int32_t) 16427f061c0aSPeter Maydell DO_VABAV(vabavub, 1, uint8_t) 16437f061c0aSPeter Maydell DO_VABAV(vabavuh, 2, uint16_t) 16447f061c0aSPeter Maydell DO_VABAV(vabavuw, 4, uint32_t) 16457f061c0aSPeter Maydell 1646d43ebd9dSPeter Maydell #define DO_VADDLV(OP, TYPE, LTYPE) \ 1647d43ebd9dSPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1648d43ebd9dSPeter Maydell uint64_t ra) \ 1649d43ebd9dSPeter Maydell { \ 1650d43ebd9dSPeter Maydell uint16_t mask = mve_element_mask(env); \ 1651d43ebd9dSPeter Maydell unsigned e; \ 1652d43ebd9dSPeter Maydell TYPE *m = vm; \ 1653d43ebd9dSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 1654d43ebd9dSPeter Maydell if (mask & 1) { \ 1655d43ebd9dSPeter Maydell ra += (LTYPE)m[H4(e)]; \ 1656d43ebd9dSPeter Maydell } \ 1657d43ebd9dSPeter Maydell } \ 1658d43ebd9dSPeter Maydell mve_advance_vpt(env); \ 1659d43ebd9dSPeter Maydell return ra; \ 1660d43ebd9dSPeter Maydell } \ 1661d43ebd9dSPeter Maydell 1662d43ebd9dSPeter Maydell DO_VADDLV(vaddlv_s, int32_t, int64_t) 1663d43ebd9dSPeter Maydell DO_VADDLV(vaddlv_u, uint32_t, uint64_t) 1664d43ebd9dSPeter Maydell 1665f9ed6174SPeter Maydell /* Shifts by immediate */ 1666f9ed6174SPeter Maydell #define DO_2SHIFT(OP, ESIZE, TYPE, FN) \ 1667f9ed6174SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 1668f9ed6174SPeter Maydell void *vm, uint32_t shift) \ 1669f9ed6174SPeter Maydell { \ 1670f9ed6174SPeter Maydell TYPE *d = vd, *m = vm; \ 1671f9ed6174SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1672f9ed6174SPeter Maydell unsigned e; \ 1673f9ed6174SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1674f9ed6174SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 1675f9ed6174SPeter Maydell FN(m[H##ESIZE(e)], shift), mask); \ 1676f9ed6174SPeter Maydell } \ 1677f9ed6174SPeter Maydell mve_advance_vpt(env); \ 1678f9ed6174SPeter Maydell } 1679f9ed6174SPeter Maydell 1680f9ed6174SPeter Maydell #define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \ 1681f9ed6174SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 1682f9ed6174SPeter Maydell void *vm, uint32_t shift) \ 1683f9ed6174SPeter Maydell { \ 1684f9ed6174SPeter Maydell TYPE *d = vd, *m = vm; \ 1685f9ed6174SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1686f9ed6174SPeter Maydell unsigned e; \ 1687f9ed6174SPeter Maydell bool qc = false; \ 1688f9ed6174SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1689f9ed6174SPeter Maydell bool sat = false; \ 1690f9ed6174SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 1691f9ed6174SPeter Maydell FN(m[H##ESIZE(e)], shift, &sat), mask); \ 1692f9ed6174SPeter Maydell qc |= sat & mask & 1; \ 1693f9ed6174SPeter Maydell } \ 1694f9ed6174SPeter Maydell if (qc) { \ 1695f9ed6174SPeter Maydell env->vfp.qc[0] = qc; \ 1696f9ed6174SPeter Maydell } \ 1697f9ed6174SPeter Maydell mve_advance_vpt(env); \ 1698f9ed6174SPeter Maydell } 1699f9ed6174SPeter Maydell 1700f9ed6174SPeter Maydell /* provide unsigned 2-op shift helpers for all sizes */ 1701f9ed6174SPeter Maydell #define DO_2SHIFT_U(OP, FN) \ 1702f9ed6174SPeter Maydell DO_2SHIFT(OP##b, 1, uint8_t, FN) \ 1703f9ed6174SPeter Maydell DO_2SHIFT(OP##h, 2, uint16_t, FN) \ 1704f9ed6174SPeter Maydell DO_2SHIFT(OP##w, 4, uint32_t, FN) 17053394116fSPeter Maydell #define DO_2SHIFT_S(OP, FN) \ 17063394116fSPeter Maydell DO_2SHIFT(OP##b, 1, int8_t, FN) \ 17073394116fSPeter Maydell DO_2SHIFT(OP##h, 2, int16_t, FN) \ 17083394116fSPeter Maydell DO_2SHIFT(OP##w, 4, int32_t, FN) 1709f9ed6174SPeter Maydell 1710f9ed6174SPeter Maydell #define DO_2SHIFT_SAT_U(OP, FN) \ 1711f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \ 1712f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \ 1713f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN) 1714f9ed6174SPeter Maydell #define DO_2SHIFT_SAT_S(OP, FN) \ 1715f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \ 1716f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \ 1717f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##w, 4, int32_t, FN) 1718f9ed6174SPeter Maydell 1719f9ed6174SPeter Maydell DO_2SHIFT_U(vshli_u, DO_VSHLU) 17203394116fSPeter Maydell DO_2SHIFT_S(vshli_s, DO_VSHLS) 1721f9ed6174SPeter Maydell DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP) 1722f9ed6174SPeter Maydell DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP) 1723f9ed6174SPeter Maydell DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) 17243394116fSPeter Maydell DO_2SHIFT_U(vrshli_u, DO_VRSHLU) 17253394116fSPeter Maydell DO_2SHIFT_S(vrshli_s, DO_VRSHLS) 17261b15a97dSPeter Maydell DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP) 17271b15a97dSPeter Maydell DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP) 1728c2262707SPeter Maydell 1729a78b25faSPeter Maydell /* Shift-and-insert; we always work with 64 bits at a time */ 1730a78b25faSPeter Maydell #define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \ 1731a78b25faSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 1732a78b25faSPeter Maydell void *vm, uint32_t shift) \ 1733a78b25faSPeter Maydell { \ 1734a78b25faSPeter Maydell uint64_t *d = vd, *m = vm; \ 1735a78b25faSPeter Maydell uint16_t mask; \ 1736a78b25faSPeter Maydell uint64_t shiftmask; \ 1737a78b25faSPeter Maydell unsigned e; \ 1738c88ff884SPeter Maydell if (shift == ESIZE * 8) { \ 1739a78b25faSPeter Maydell /* \ 1740c88ff884SPeter Maydell * Only VSRI can shift by <dt>; it should mean "don't \ 1741c88ff884SPeter Maydell * update the destination". The generic logic can't handle \ 1742c88ff884SPeter Maydell * this because it would try to shift by an out-of-range \ 1743c88ff884SPeter Maydell * amount, so special case it here. \ 1744a78b25faSPeter Maydell */ \ 1745a78b25faSPeter Maydell goto done; \ 1746a78b25faSPeter Maydell } \ 1747a78b25faSPeter Maydell assert(shift < ESIZE * 8); \ 1748a78b25faSPeter Maydell mask = mve_element_mask(env); \ 1749a78b25faSPeter Maydell /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \ 1750a78b25faSPeter Maydell shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \ 1751a78b25faSPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 1752a78b25faSPeter Maydell uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \ 1753a78b25faSPeter Maydell (d[H8(e)] & ~shiftmask); \ 1754a78b25faSPeter Maydell mergemask(&d[H8(e)], r, mask); \ 1755a78b25faSPeter Maydell } \ 1756a78b25faSPeter Maydell done: \ 1757a78b25faSPeter Maydell mve_advance_vpt(env); \ 1758a78b25faSPeter Maydell } 1759a78b25faSPeter Maydell 1760a78b25faSPeter Maydell #define DO_SHL(N, SHIFT) ((N) << (SHIFT)) 1761a78b25faSPeter Maydell #define DO_SHR(N, SHIFT) ((N) >> (SHIFT)) 1762a78b25faSPeter Maydell #define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT)) 1763a78b25faSPeter Maydell #define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT)) 1764a78b25faSPeter Maydell 1765a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK) 1766a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK) 1767a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK) 1768a78b25faSPeter Maydell DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK) 1769a78b25faSPeter Maydell DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK) 1770a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK) 1771a78b25faSPeter Maydell 1772c2262707SPeter Maydell /* 1773c2262707SPeter Maydell * Long shifts taking half-sized inputs from top or bottom of the input 1774c2262707SPeter Maydell * vector and producing a double-width result. ESIZE, TYPE are for 1775c2262707SPeter Maydell * the input, and LESIZE, LTYPE for the output. 1776c2262707SPeter Maydell * Unlike the normal shift helpers, we do not handle negative shift counts, 1777c2262707SPeter Maydell * because the long shift is strictly left-only. 1778c2262707SPeter Maydell */ 1779c2262707SPeter Maydell #define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 1780c2262707SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 1781c2262707SPeter Maydell void *vm, uint32_t shift) \ 1782c2262707SPeter Maydell { \ 1783c2262707SPeter Maydell LTYPE *d = vd; \ 1784c2262707SPeter Maydell TYPE *m = vm; \ 1785c2262707SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1786c2262707SPeter Maydell unsigned le; \ 1787c2262707SPeter Maydell assert(shift <= 16); \ 1788c2262707SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1789c2262707SPeter Maydell LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \ 1790c2262707SPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \ 1791c2262707SPeter Maydell } \ 1792c2262707SPeter Maydell mve_advance_vpt(env); \ 1793c2262707SPeter Maydell } 1794c2262707SPeter Maydell 1795c2262707SPeter Maydell #define DO_VSHLL_ALL(OP, TOP) \ 1796c2262707SPeter Maydell DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \ 1797c2262707SPeter Maydell DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \ 1798c2262707SPeter Maydell DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \ 1799c2262707SPeter Maydell DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \ 1800c2262707SPeter Maydell 1801c2262707SPeter Maydell DO_VSHLL_ALL(vshllb, false) 1802c2262707SPeter Maydell DO_VSHLL_ALL(vshllt, true) 1803162e2655SPeter Maydell 1804162e2655SPeter Maydell /* 1805162e2655SPeter Maydell * Narrowing right shifts, taking a double sized input, shifting it 1806162e2655SPeter Maydell * and putting the result in either the top or bottom half of the output. 1807162e2655SPeter Maydell * ESIZE, TYPE are the output, and LESIZE, LTYPE the input. 1808162e2655SPeter Maydell */ 1809162e2655SPeter Maydell #define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 1810162e2655SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 1811162e2655SPeter Maydell void *vm, uint32_t shift) \ 1812162e2655SPeter Maydell { \ 1813162e2655SPeter Maydell LTYPE *m = vm; \ 1814162e2655SPeter Maydell TYPE *d = vd; \ 1815162e2655SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1816162e2655SPeter Maydell unsigned le; \ 1817a5e59e8dSPeter Maydell mask >>= ESIZE * TOP; \ 1818162e2655SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1819162e2655SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], shift); \ 1820162e2655SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 1821162e2655SPeter Maydell } \ 1822162e2655SPeter Maydell mve_advance_vpt(env); \ 1823162e2655SPeter Maydell } 1824162e2655SPeter Maydell 1825162e2655SPeter Maydell #define DO_VSHRN_ALL(OP, FN) \ 1826162e2655SPeter Maydell DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \ 1827162e2655SPeter Maydell DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \ 1828162e2655SPeter Maydell DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \ 1829162e2655SPeter Maydell DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN) 1830162e2655SPeter Maydell 1831162e2655SPeter Maydell static inline uint64_t do_urshr(uint64_t x, unsigned sh) 1832162e2655SPeter Maydell { 1833162e2655SPeter Maydell if (likely(sh < 64)) { 1834162e2655SPeter Maydell return (x >> sh) + ((x >> (sh - 1)) & 1); 1835162e2655SPeter Maydell } else if (sh == 64) { 1836162e2655SPeter Maydell return x >> 63; 1837162e2655SPeter Maydell } else { 1838162e2655SPeter Maydell return 0; 1839162e2655SPeter Maydell } 1840162e2655SPeter Maydell } 1841162e2655SPeter Maydell 1842d6f9e011SPeter Maydell static inline int64_t do_srshr(int64_t x, unsigned sh) 1843d6f9e011SPeter Maydell { 1844d6f9e011SPeter Maydell if (likely(sh < 64)) { 1845d6f9e011SPeter Maydell return (x >> sh) + ((x >> (sh - 1)) & 1); 1846d6f9e011SPeter Maydell } else { 1847d6f9e011SPeter Maydell /* Rounding the sign bit always produces 0. */ 1848d6f9e011SPeter Maydell return 0; 1849d6f9e011SPeter Maydell } 1850d6f9e011SPeter Maydell } 1851d6f9e011SPeter Maydell 1852162e2655SPeter Maydell DO_VSHRN_ALL(vshrn, DO_SHR) 1853162e2655SPeter Maydell DO_VSHRN_ALL(vrshrn, do_urshr) 1854d6f9e011SPeter Maydell 1855d6f9e011SPeter Maydell static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max, 1856d6f9e011SPeter Maydell bool *satp) 1857d6f9e011SPeter Maydell { 1858d6f9e011SPeter Maydell if (val > max) { 1859d6f9e011SPeter Maydell *satp = true; 1860d6f9e011SPeter Maydell return max; 1861d6f9e011SPeter Maydell } else if (val < min) { 1862d6f9e011SPeter Maydell *satp = true; 1863d6f9e011SPeter Maydell return min; 1864d6f9e011SPeter Maydell } else { 1865d6f9e011SPeter Maydell return val; 1866d6f9e011SPeter Maydell } 1867d6f9e011SPeter Maydell } 1868d6f9e011SPeter Maydell 1869d6f9e011SPeter Maydell /* Saturating narrowing right shifts */ 1870d6f9e011SPeter Maydell #define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 1871d6f9e011SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 1872d6f9e011SPeter Maydell void *vm, uint32_t shift) \ 1873d6f9e011SPeter Maydell { \ 1874d6f9e011SPeter Maydell LTYPE *m = vm; \ 1875d6f9e011SPeter Maydell TYPE *d = vd; \ 1876d6f9e011SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1877d6f9e011SPeter Maydell bool qc = false; \ 1878d6f9e011SPeter Maydell unsigned le; \ 1879a5e59e8dSPeter Maydell mask >>= ESIZE * TOP; \ 1880d6f9e011SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1881d6f9e011SPeter Maydell bool sat = false; \ 1882d6f9e011SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \ 1883d6f9e011SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 1884a5e59e8dSPeter Maydell qc |= sat & mask & 1; \ 1885d6f9e011SPeter Maydell } \ 1886d6f9e011SPeter Maydell if (qc) { \ 1887d6f9e011SPeter Maydell env->vfp.qc[0] = qc; \ 1888d6f9e011SPeter Maydell } \ 1889d6f9e011SPeter Maydell mve_advance_vpt(env); \ 1890d6f9e011SPeter Maydell } 1891d6f9e011SPeter Maydell 1892d6f9e011SPeter Maydell #define DO_VSHRN_SAT_UB(BOP, TOP, FN) \ 1893d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 1894d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 1895d6f9e011SPeter Maydell 1896d6f9e011SPeter Maydell #define DO_VSHRN_SAT_UH(BOP, TOP, FN) \ 1897d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 1898d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 1899d6f9e011SPeter Maydell 1900d6f9e011SPeter Maydell #define DO_VSHRN_SAT_SB(BOP, TOP, FN) \ 1901d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 1902d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 1903d6f9e011SPeter Maydell 1904d6f9e011SPeter Maydell #define DO_VSHRN_SAT_SH(BOP, TOP, FN) \ 1905d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 1906d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 1907d6f9e011SPeter Maydell 1908d6f9e011SPeter Maydell #define DO_SHRN_SB(N, M, SATP) \ 1909d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP) 1910d6f9e011SPeter Maydell #define DO_SHRN_UB(N, M, SATP) \ 1911d6f9e011SPeter Maydell do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP) 1912d6f9e011SPeter Maydell #define DO_SHRUN_B(N, M, SATP) \ 1913d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP) 1914d6f9e011SPeter Maydell 1915d6f9e011SPeter Maydell #define DO_SHRN_SH(N, M, SATP) \ 1916d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP) 1917d6f9e011SPeter Maydell #define DO_SHRN_UH(N, M, SATP) \ 1918d6f9e011SPeter Maydell do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP) 1919d6f9e011SPeter Maydell #define DO_SHRUN_H(N, M, SATP) \ 1920d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP) 1921d6f9e011SPeter Maydell 1922d6f9e011SPeter Maydell #define DO_RSHRN_SB(N, M, SATP) \ 1923d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP) 1924d6f9e011SPeter Maydell #define DO_RSHRN_UB(N, M, SATP) \ 1925d6f9e011SPeter Maydell do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP) 1926d6f9e011SPeter Maydell #define DO_RSHRUN_B(N, M, SATP) \ 1927d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP) 1928d6f9e011SPeter Maydell 1929d6f9e011SPeter Maydell #define DO_RSHRN_SH(N, M, SATP) \ 1930d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP) 1931d6f9e011SPeter Maydell #define DO_RSHRN_UH(N, M, SATP) \ 1932d6f9e011SPeter Maydell do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP) 1933d6f9e011SPeter Maydell #define DO_RSHRUN_H(N, M, SATP) \ 1934d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP) 1935d6f9e011SPeter Maydell 1936d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB) 1937d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH) 1938d6f9e011SPeter Maydell DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB) 1939d6f9e011SPeter Maydell DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH) 1940d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B) 1941d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H) 1942d6f9e011SPeter Maydell 1943d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB) 1944d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH) 1945d6f9e011SPeter Maydell DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB) 1946d6f9e011SPeter Maydell DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH) 1947d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B) 1948d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H) 19492e6a4ce0SPeter Maydell 195054dc78a9SPeter Maydell #define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 195154dc78a9SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 195254dc78a9SPeter Maydell { \ 195354dc78a9SPeter Maydell LTYPE *m = vm; \ 195454dc78a9SPeter Maydell TYPE *d = vd; \ 195554dc78a9SPeter Maydell uint16_t mask = mve_element_mask(env); \ 195654dc78a9SPeter Maydell unsigned le; \ 195754dc78a9SPeter Maydell mask >>= ESIZE * TOP; \ 195854dc78a9SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 195954dc78a9SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], \ 196054dc78a9SPeter Maydell m[H##LESIZE(le)], mask); \ 196154dc78a9SPeter Maydell } \ 196254dc78a9SPeter Maydell mve_advance_vpt(env); \ 196354dc78a9SPeter Maydell } 196454dc78a9SPeter Maydell 196554dc78a9SPeter Maydell DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t) 196654dc78a9SPeter Maydell DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t) 196754dc78a9SPeter Maydell DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t) 196854dc78a9SPeter Maydell DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t) 196954dc78a9SPeter Maydell 197054dc78a9SPeter Maydell #define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 197154dc78a9SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 197254dc78a9SPeter Maydell { \ 197354dc78a9SPeter Maydell LTYPE *m = vm; \ 197454dc78a9SPeter Maydell TYPE *d = vd; \ 197554dc78a9SPeter Maydell uint16_t mask = mve_element_mask(env); \ 197654dc78a9SPeter Maydell bool qc = false; \ 197754dc78a9SPeter Maydell unsigned le; \ 197854dc78a9SPeter Maydell mask >>= ESIZE * TOP; \ 197954dc78a9SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 198054dc78a9SPeter Maydell bool sat = false; \ 198154dc78a9SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], &sat); \ 198254dc78a9SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 198354dc78a9SPeter Maydell qc |= sat & mask & 1; \ 198454dc78a9SPeter Maydell } \ 198554dc78a9SPeter Maydell if (qc) { \ 198654dc78a9SPeter Maydell env->vfp.qc[0] = qc; \ 198754dc78a9SPeter Maydell } \ 198854dc78a9SPeter Maydell mve_advance_vpt(env); \ 198954dc78a9SPeter Maydell } 199054dc78a9SPeter Maydell 199154dc78a9SPeter Maydell #define DO_VMOVN_SAT_UB(BOP, TOP, FN) \ 199254dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 199354dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 199454dc78a9SPeter Maydell 199554dc78a9SPeter Maydell #define DO_VMOVN_SAT_UH(BOP, TOP, FN) \ 199654dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 199754dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 199854dc78a9SPeter Maydell 199954dc78a9SPeter Maydell #define DO_VMOVN_SAT_SB(BOP, TOP, FN) \ 200054dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 200154dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 200254dc78a9SPeter Maydell 200354dc78a9SPeter Maydell #define DO_VMOVN_SAT_SH(BOP, TOP, FN) \ 200454dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 200554dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 200654dc78a9SPeter Maydell 200754dc78a9SPeter Maydell #define DO_VQMOVN_SB(N, SATP) \ 200854dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP) 200954dc78a9SPeter Maydell #define DO_VQMOVN_UB(N, SATP) \ 201054dc78a9SPeter Maydell do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP) 201154dc78a9SPeter Maydell #define DO_VQMOVUN_B(N, SATP) \ 201254dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP) 201354dc78a9SPeter Maydell 201454dc78a9SPeter Maydell #define DO_VQMOVN_SH(N, SATP) \ 201554dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP) 201654dc78a9SPeter Maydell #define DO_VQMOVN_UH(N, SATP) \ 201754dc78a9SPeter Maydell do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP) 201854dc78a9SPeter Maydell #define DO_VQMOVUN_H(N, SATP) \ 201954dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP) 202054dc78a9SPeter Maydell 202154dc78a9SPeter Maydell DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB) 202254dc78a9SPeter Maydell DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH) 202354dc78a9SPeter Maydell DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB) 202454dc78a9SPeter Maydell DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH) 202554dc78a9SPeter Maydell DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B) 202654dc78a9SPeter Maydell DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H) 202754dc78a9SPeter Maydell 20282e6a4ce0SPeter Maydell uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm, 20292e6a4ce0SPeter Maydell uint32_t shift) 20302e6a4ce0SPeter Maydell { 20312e6a4ce0SPeter Maydell uint32_t *d = vd; 20322e6a4ce0SPeter Maydell uint16_t mask = mve_element_mask(env); 20332e6a4ce0SPeter Maydell unsigned e; 20342e6a4ce0SPeter Maydell uint32_t r; 20352e6a4ce0SPeter Maydell 20362e6a4ce0SPeter Maydell /* 20372e6a4ce0SPeter Maydell * For each 32-bit element, we shift it left, bringing in the 20382e6a4ce0SPeter Maydell * low 'shift' bits of rdm at the bottom. Bits shifted out at 20392e6a4ce0SPeter Maydell * the top become the new rdm, if the predicate mask permits. 20402e6a4ce0SPeter Maydell * The final rdm value is returned to update the register. 20412e6a4ce0SPeter Maydell * shift == 0 here means "shift by 32 bits". 20422e6a4ce0SPeter Maydell */ 20432e6a4ce0SPeter Maydell if (shift == 0) { 20442e6a4ce0SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 20452e6a4ce0SPeter Maydell r = rdm; 20462e6a4ce0SPeter Maydell if (mask & 1) { 20472e6a4ce0SPeter Maydell rdm = d[H4(e)]; 20482e6a4ce0SPeter Maydell } 20492e6a4ce0SPeter Maydell mergemask(&d[H4(e)], r, mask); 20502e6a4ce0SPeter Maydell } 20512e6a4ce0SPeter Maydell } else { 20522e6a4ce0SPeter Maydell uint32_t shiftmask = MAKE_64BIT_MASK(0, shift); 20532e6a4ce0SPeter Maydell 20542e6a4ce0SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 20552e6a4ce0SPeter Maydell r = (d[H4(e)] << shift) | (rdm & shiftmask); 20562e6a4ce0SPeter Maydell if (mask & 1) { 20572e6a4ce0SPeter Maydell rdm = d[H4(e)] >> (32 - shift); 20582e6a4ce0SPeter Maydell } 20592e6a4ce0SPeter Maydell mergemask(&d[H4(e)], r, mask); 20602e6a4ce0SPeter Maydell } 20612e6a4ce0SPeter Maydell } 20622e6a4ce0SPeter Maydell mve_advance_vpt(env); 20632e6a4ce0SPeter Maydell return rdm; 20642e6a4ce0SPeter Maydell } 2065f4ae6c8cSPeter Maydell 20660aa4b4c3SPeter Maydell uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 20670aa4b4c3SPeter Maydell { 20680aa4b4c3SPeter Maydell return do_sqrshl_d(n, -(int8_t)shift, false, NULL); 20690aa4b4c3SPeter Maydell } 20700aa4b4c3SPeter Maydell 20710aa4b4c3SPeter Maydell uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift) 20720aa4b4c3SPeter Maydell { 20730aa4b4c3SPeter Maydell return do_uqrshl_d(n, (int8_t)shift, false, NULL); 20740aa4b4c3SPeter Maydell } 20750aa4b4c3SPeter Maydell 2076f4ae6c8cSPeter Maydell uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2077f4ae6c8cSPeter Maydell { 2078f4ae6c8cSPeter Maydell return do_sqrshl_d(n, (int8_t)shift, false, &env->QF); 2079f4ae6c8cSPeter Maydell } 2080f4ae6c8cSPeter Maydell 2081f4ae6c8cSPeter Maydell uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2082f4ae6c8cSPeter Maydell { 2083f4ae6c8cSPeter Maydell return do_uqrshl_d(n, (int8_t)shift, false, &env->QF); 2084f4ae6c8cSPeter Maydell } 20850aa4b4c3SPeter Maydell 20860aa4b4c3SPeter Maydell uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 20870aa4b4c3SPeter Maydell { 20880aa4b4c3SPeter Maydell return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF); 20890aa4b4c3SPeter Maydell } 20900aa4b4c3SPeter Maydell 20910aa4b4c3SPeter Maydell uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift) 20920aa4b4c3SPeter Maydell { 20930aa4b4c3SPeter Maydell return do_uqrshl_d(n, (int8_t)shift, true, &env->QF); 20940aa4b4c3SPeter Maydell } 20950aa4b4c3SPeter Maydell 20960aa4b4c3SPeter Maydell /* Operate on 64-bit values, but saturate at 48 bits */ 20970aa4b4c3SPeter Maydell static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift, 20980aa4b4c3SPeter Maydell bool round, uint32_t *sat) 20990aa4b4c3SPeter Maydell { 2100fdcf2269SPeter Maydell int64_t val, extval; 2101fdcf2269SPeter Maydell 21020aa4b4c3SPeter Maydell if (shift <= -48) { 21030aa4b4c3SPeter Maydell /* Rounding the sign bit always produces 0. */ 21040aa4b4c3SPeter Maydell if (round) { 21050aa4b4c3SPeter Maydell return 0; 21060aa4b4c3SPeter Maydell } 21070aa4b4c3SPeter Maydell return src >> 63; 21080aa4b4c3SPeter Maydell } else if (shift < 0) { 21090aa4b4c3SPeter Maydell if (round) { 21100aa4b4c3SPeter Maydell src >>= -shift - 1; 2111fdcf2269SPeter Maydell val = (src >> 1) + (src & 1); 2112fdcf2269SPeter Maydell } else { 2113fdcf2269SPeter Maydell val = src >> -shift; 21140aa4b4c3SPeter Maydell } 2115fdcf2269SPeter Maydell extval = sextract64(val, 0, 48); 2116fdcf2269SPeter Maydell if (!sat || val == extval) { 2117fdcf2269SPeter Maydell return extval; 2118fdcf2269SPeter Maydell } 21190aa4b4c3SPeter Maydell } else if (shift < 48) { 212095351aa7SPeter Maydell int64_t extval = sextract64(src << shift, 0, 48); 212195351aa7SPeter Maydell if (!sat || src == (extval >> shift)) { 21220aa4b4c3SPeter Maydell return extval; 21230aa4b4c3SPeter Maydell } 21240aa4b4c3SPeter Maydell } else if (!sat || src == 0) { 21250aa4b4c3SPeter Maydell return 0; 21260aa4b4c3SPeter Maydell } 21270aa4b4c3SPeter Maydell 21280aa4b4c3SPeter Maydell *sat = 1; 212995351aa7SPeter Maydell return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17); 21300aa4b4c3SPeter Maydell } 21310aa4b4c3SPeter Maydell 21320aa4b4c3SPeter Maydell /* Operate on 64-bit values, but saturate at 48 bits */ 21330aa4b4c3SPeter Maydell static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift, 21340aa4b4c3SPeter Maydell bool round, uint32_t *sat) 21350aa4b4c3SPeter Maydell { 21360aa4b4c3SPeter Maydell uint64_t val, extval; 21370aa4b4c3SPeter Maydell 21380aa4b4c3SPeter Maydell if (shift <= -(48 + round)) { 21390aa4b4c3SPeter Maydell return 0; 21400aa4b4c3SPeter Maydell } else if (shift < 0) { 21410aa4b4c3SPeter Maydell if (round) { 21420aa4b4c3SPeter Maydell val = src >> (-shift - 1); 21430aa4b4c3SPeter Maydell val = (val >> 1) + (val & 1); 21440aa4b4c3SPeter Maydell } else { 21450aa4b4c3SPeter Maydell val = src >> -shift; 21460aa4b4c3SPeter Maydell } 21470aa4b4c3SPeter Maydell extval = extract64(val, 0, 48); 21480aa4b4c3SPeter Maydell if (!sat || val == extval) { 21490aa4b4c3SPeter Maydell return extval; 21500aa4b4c3SPeter Maydell } 21510aa4b4c3SPeter Maydell } else if (shift < 48) { 215295351aa7SPeter Maydell uint64_t extval = extract64(src << shift, 0, 48); 215395351aa7SPeter Maydell if (!sat || src == (extval >> shift)) { 21540aa4b4c3SPeter Maydell return extval; 21550aa4b4c3SPeter Maydell } 21560aa4b4c3SPeter Maydell } else if (!sat || src == 0) { 21570aa4b4c3SPeter Maydell return 0; 21580aa4b4c3SPeter Maydell } 21590aa4b4c3SPeter Maydell 21600aa4b4c3SPeter Maydell *sat = 1; 21610aa4b4c3SPeter Maydell return MAKE_64BIT_MASK(0, 48); 21620aa4b4c3SPeter Maydell } 21630aa4b4c3SPeter Maydell 21640aa4b4c3SPeter Maydell uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift) 21650aa4b4c3SPeter Maydell { 21660aa4b4c3SPeter Maydell return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF); 21670aa4b4c3SPeter Maydell } 21680aa4b4c3SPeter Maydell 21690aa4b4c3SPeter Maydell uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift) 21700aa4b4c3SPeter Maydell { 21710aa4b4c3SPeter Maydell return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF); 21720aa4b4c3SPeter Maydell } 217346321d47SPeter Maydell 217446321d47SPeter Maydell uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 217546321d47SPeter Maydell { 217646321d47SPeter Maydell return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 217746321d47SPeter Maydell } 217846321d47SPeter Maydell 217946321d47SPeter Maydell uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 218046321d47SPeter Maydell { 218146321d47SPeter Maydell return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 218246321d47SPeter Maydell } 218304ea4d3cSPeter Maydell 218404ea4d3cSPeter Maydell uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift) 218504ea4d3cSPeter Maydell { 218604ea4d3cSPeter Maydell return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF); 218704ea4d3cSPeter Maydell } 218804ea4d3cSPeter Maydell 218904ea4d3cSPeter Maydell uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift) 219004ea4d3cSPeter Maydell { 219104ea4d3cSPeter Maydell return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF); 219204ea4d3cSPeter Maydell } 2193395b92d5SPeter Maydell 2194395b92d5SPeter Maydell #define DO_VIDUP(OP, ESIZE, TYPE, FN) \ 2195395b92d5SPeter Maydell uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2196395b92d5SPeter Maydell uint32_t offset, uint32_t imm) \ 2197395b92d5SPeter Maydell { \ 2198395b92d5SPeter Maydell TYPE *d = vd; \ 2199395b92d5SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2200395b92d5SPeter Maydell unsigned e; \ 2201395b92d5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2202395b92d5SPeter Maydell mergemask(&d[H##ESIZE(e)], offset, mask); \ 2203395b92d5SPeter Maydell offset = FN(offset, imm); \ 2204395b92d5SPeter Maydell } \ 2205395b92d5SPeter Maydell mve_advance_vpt(env); \ 2206395b92d5SPeter Maydell return offset; \ 2207395b92d5SPeter Maydell } 2208395b92d5SPeter Maydell 2209395b92d5SPeter Maydell #define DO_VIWDUP(OP, ESIZE, TYPE, FN) \ 2210395b92d5SPeter Maydell uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2211395b92d5SPeter Maydell uint32_t offset, uint32_t wrap, \ 2212395b92d5SPeter Maydell uint32_t imm) \ 2213395b92d5SPeter Maydell { \ 2214395b92d5SPeter Maydell TYPE *d = vd; \ 2215395b92d5SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2216395b92d5SPeter Maydell unsigned e; \ 2217395b92d5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2218395b92d5SPeter Maydell mergemask(&d[H##ESIZE(e)], offset, mask); \ 2219395b92d5SPeter Maydell offset = FN(offset, wrap, imm); \ 2220395b92d5SPeter Maydell } \ 2221395b92d5SPeter Maydell mve_advance_vpt(env); \ 2222395b92d5SPeter Maydell return offset; \ 2223395b92d5SPeter Maydell } 2224395b92d5SPeter Maydell 2225395b92d5SPeter Maydell #define DO_VIDUP_ALL(OP, FN) \ 2226395b92d5SPeter Maydell DO_VIDUP(OP##b, 1, int8_t, FN) \ 2227395b92d5SPeter Maydell DO_VIDUP(OP##h, 2, int16_t, FN) \ 2228395b92d5SPeter Maydell DO_VIDUP(OP##w, 4, int32_t, FN) 2229395b92d5SPeter Maydell 2230395b92d5SPeter Maydell #define DO_VIWDUP_ALL(OP, FN) \ 2231395b92d5SPeter Maydell DO_VIWDUP(OP##b, 1, int8_t, FN) \ 2232395b92d5SPeter Maydell DO_VIWDUP(OP##h, 2, int16_t, FN) \ 2233395b92d5SPeter Maydell DO_VIWDUP(OP##w, 4, int32_t, FN) 2234395b92d5SPeter Maydell 2235395b92d5SPeter Maydell static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2236395b92d5SPeter Maydell { 2237395b92d5SPeter Maydell offset += imm; 2238395b92d5SPeter Maydell if (offset == wrap) { 2239395b92d5SPeter Maydell offset = 0; 2240395b92d5SPeter Maydell } 2241395b92d5SPeter Maydell return offset; 2242395b92d5SPeter Maydell } 2243395b92d5SPeter Maydell 2244395b92d5SPeter Maydell static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2245395b92d5SPeter Maydell { 2246395b92d5SPeter Maydell if (offset == 0) { 2247395b92d5SPeter Maydell offset = wrap; 2248395b92d5SPeter Maydell } 2249395b92d5SPeter Maydell offset -= imm; 2250395b92d5SPeter Maydell return offset; 2251395b92d5SPeter Maydell } 2252395b92d5SPeter Maydell 2253395b92d5SPeter Maydell DO_VIDUP_ALL(vidup, DO_ADD) 2254395b92d5SPeter Maydell DO_VIWDUP_ALL(viwdup, do_add_wrap) 2255395b92d5SPeter Maydell DO_VIWDUP_ALL(vdwdup, do_sub_wrap) 2256eff5d9a9SPeter Maydell 2257eff5d9a9SPeter Maydell /* 2258eff5d9a9SPeter Maydell * Vector comparison. 2259eff5d9a9SPeter Maydell * P0 bits for non-executed beats (where eci_mask is 0) are unchanged. 2260eff5d9a9SPeter Maydell * P0 bits for predicated lanes in executed beats (where mask is 0) are 0. 2261eff5d9a9SPeter Maydell * P0 bits otherwise are updated with the results of the comparisons. 2262eff5d9a9SPeter Maydell * We must also keep unchanged the MASK fields at the top of v7m.vpr. 2263eff5d9a9SPeter Maydell */ 2264eff5d9a9SPeter Maydell #define DO_VCMP(OP, ESIZE, TYPE, FN) \ 2265eff5d9a9SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ 2266eff5d9a9SPeter Maydell { \ 2267eff5d9a9SPeter Maydell TYPE *n = vn, *m = vm; \ 2268eff5d9a9SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2269eff5d9a9SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 2270eff5d9a9SPeter Maydell uint16_t beatpred = 0; \ 2271eff5d9a9SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2272eff5d9a9SPeter Maydell unsigned e; \ 2273eff5d9a9SPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \ 2274eff5d9a9SPeter Maydell bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \ 2275eff5d9a9SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 2276eff5d9a9SPeter Maydell beatpred |= r * emask; \ 2277eff5d9a9SPeter Maydell emask <<= ESIZE; \ 2278eff5d9a9SPeter Maydell } \ 2279eff5d9a9SPeter Maydell beatpred &= mask; \ 2280eff5d9a9SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2281eff5d9a9SPeter Maydell (beatpred & eci_mask); \ 2282eff5d9a9SPeter Maydell mve_advance_vpt(env); \ 2283eff5d9a9SPeter Maydell } 2284eff5d9a9SPeter Maydell 2285cce81873SPeter Maydell #define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \ 2286cce81873SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 2287cce81873SPeter Maydell uint32_t rm) \ 2288cce81873SPeter Maydell { \ 2289cce81873SPeter Maydell TYPE *n = vn; \ 2290cce81873SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2291cce81873SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 2292cce81873SPeter Maydell uint16_t beatpred = 0; \ 2293cce81873SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2294cce81873SPeter Maydell unsigned e; \ 2295cce81873SPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \ 2296cce81873SPeter Maydell bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \ 2297cce81873SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 2298cce81873SPeter Maydell beatpred |= r * emask; \ 2299cce81873SPeter Maydell emask <<= ESIZE; \ 2300cce81873SPeter Maydell } \ 2301cce81873SPeter Maydell beatpred &= mask; \ 2302cce81873SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2303cce81873SPeter Maydell (beatpred & eci_mask); \ 2304cce81873SPeter Maydell mve_advance_vpt(env); \ 2305cce81873SPeter Maydell } 2306cce81873SPeter Maydell 2307eff5d9a9SPeter Maydell #define DO_VCMP_S(OP, FN) \ 2308eff5d9a9SPeter Maydell DO_VCMP(OP##b, 1, int8_t, FN) \ 2309eff5d9a9SPeter Maydell DO_VCMP(OP##h, 2, int16_t, FN) \ 2310cce81873SPeter Maydell DO_VCMP(OP##w, 4, int32_t, FN) \ 2311cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \ 2312cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \ 2313cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN) 2314eff5d9a9SPeter Maydell 2315eff5d9a9SPeter Maydell #define DO_VCMP_U(OP, FN) \ 2316eff5d9a9SPeter Maydell DO_VCMP(OP##b, 1, uint8_t, FN) \ 2317eff5d9a9SPeter Maydell DO_VCMP(OP##h, 2, uint16_t, FN) \ 2318cce81873SPeter Maydell DO_VCMP(OP##w, 4, uint32_t, FN) \ 2319cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \ 2320cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \ 2321cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN) 2322eff5d9a9SPeter Maydell 2323eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M)) 2324eff5d9a9SPeter Maydell #define DO_NE(N, M) ((N) != (M)) 2325eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M)) 2326eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M)) 2327eff5d9a9SPeter Maydell #define DO_GE(N, M) ((N) >= (M)) 2328eff5d9a9SPeter Maydell #define DO_LT(N, M) ((N) < (M)) 2329eff5d9a9SPeter Maydell #define DO_GT(N, M) ((N) > (M)) 2330eff5d9a9SPeter Maydell #define DO_LE(N, M) ((N) <= (M)) 2331eff5d9a9SPeter Maydell 2332eff5d9a9SPeter Maydell DO_VCMP_U(vcmpeq, DO_EQ) 2333eff5d9a9SPeter Maydell DO_VCMP_U(vcmpne, DO_NE) 2334eff5d9a9SPeter Maydell DO_VCMP_U(vcmpcs, DO_GE) 2335eff5d9a9SPeter Maydell DO_VCMP_U(vcmphi, DO_GT) 2336eff5d9a9SPeter Maydell DO_VCMP_S(vcmpge, DO_GE) 2337eff5d9a9SPeter Maydell DO_VCMP_S(vcmplt, DO_LT) 2338eff5d9a9SPeter Maydell DO_VCMP_S(vcmpgt, DO_GT) 2339eff5d9a9SPeter Maydell DO_VCMP_S(vcmple, DO_LE) 2340c386443bSPeter Maydell 2341c386443bSPeter Maydell void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm) 2342c386443bSPeter Maydell { 2343c386443bSPeter Maydell /* 2344c386443bSPeter Maydell * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n] 2345c386443bSPeter Maydell * but note that whether bytes are written to Qd is still subject 2346c386443bSPeter Maydell * to (all forms of) predication in the usual way. 2347c386443bSPeter Maydell */ 2348c386443bSPeter Maydell uint64_t *d = vd, *n = vn, *m = vm; 2349c386443bSPeter Maydell uint16_t mask = mve_element_mask(env); 2350c386443bSPeter Maydell uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 2351c386443bSPeter Maydell unsigned e; 2352c386443bSPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) { 2353c386443bSPeter Maydell uint64_t r = m[H8(e)]; 2354c386443bSPeter Maydell mergemask(&r, n[H8(e)], p0); 2355c386443bSPeter Maydell mergemask(&d[H8(e)], r, mask); 2356c386443bSPeter Maydell } 2357c386443bSPeter Maydell mve_advance_vpt(env); 2358c386443bSPeter Maydell } 2359398e7cd3SPeter Maydell 2360fea3958fSPeter Maydell void HELPER(mve_vpnot)(CPUARMState *env) 2361fea3958fSPeter Maydell { 2362fea3958fSPeter Maydell /* 2363fea3958fSPeter Maydell * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged. 2364fea3958fSPeter Maydell * P0 bits for predicated lanes in executed bits (where mask is 0) are 0. 2365fea3958fSPeter Maydell * P0 bits otherwise are inverted. 2366fea3958fSPeter Maydell * (This is the same logic as VCMP.) 2367fea3958fSPeter Maydell * This insn is itself subject to predication and to beat-wise execution, 2368fea3958fSPeter Maydell * and after it executes VPT state advances in the usual way. 2369fea3958fSPeter Maydell */ 2370fea3958fSPeter Maydell uint16_t mask = mve_element_mask(env); 2371fea3958fSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); 2372fea3958fSPeter Maydell uint16_t beatpred = ~env->v7m.vpr & mask; 2373fea3958fSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask); 2374fea3958fSPeter Maydell mve_advance_vpt(env); 2375fea3958fSPeter Maydell } 2376fea3958fSPeter Maydell 23770f31e37cSPeter Maydell /* 23780f31e37cSPeter Maydell * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed, 23790f31e37cSPeter Maydell * otherwise set according to value of Rn. The calculation of 23800f31e37cSPeter Maydell * newmask here works in the same way as the calculation of the 23810f31e37cSPeter Maydell * ltpmask in mve_element_mask(), but we have pre-calculated 23820f31e37cSPeter Maydell * the masklen in the generated code. 23830f31e37cSPeter Maydell */ 23840f31e37cSPeter Maydell void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen) 23850f31e37cSPeter Maydell { 23860f31e37cSPeter Maydell uint16_t mask = mve_element_mask(env); 23870f31e37cSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); 23880f31e37cSPeter Maydell uint16_t newmask; 23890f31e37cSPeter Maydell 23900f31e37cSPeter Maydell assert(masklen <= 16); 23910f31e37cSPeter Maydell newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 23920f31e37cSPeter Maydell newmask &= mask; 23930f31e37cSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask); 23940f31e37cSPeter Maydell mve_advance_vpt(env); 23950f31e37cSPeter Maydell } 23960f31e37cSPeter Maydell 2397398e7cd3SPeter Maydell #define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \ 2398398e7cd3SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2399398e7cd3SPeter Maydell { \ 2400398e7cd3SPeter Maydell TYPE *d = vd, *m = vm; \ 2401398e7cd3SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2402398e7cd3SPeter Maydell unsigned e; \ 2403398e7cd3SPeter Maydell bool qc = false; \ 2404398e7cd3SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2405398e7cd3SPeter Maydell bool sat = false; \ 2406398e7cd3SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \ 2407398e7cd3SPeter Maydell qc |= sat & mask & 1; \ 2408398e7cd3SPeter Maydell } \ 2409398e7cd3SPeter Maydell if (qc) { \ 2410398e7cd3SPeter Maydell env->vfp.qc[0] = qc; \ 2411398e7cd3SPeter Maydell } \ 2412398e7cd3SPeter Maydell mve_advance_vpt(env); \ 2413398e7cd3SPeter Maydell } 2414398e7cd3SPeter Maydell 2415398e7cd3SPeter Maydell #define DO_VQABS_B(N, SATP) \ 2416398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP) 2417398e7cd3SPeter Maydell #define DO_VQABS_H(N, SATP) \ 2418398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP) 2419398e7cd3SPeter Maydell #define DO_VQABS_W(N, SATP) \ 2420398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP) 2421398e7cd3SPeter Maydell 2422398e7cd3SPeter Maydell #define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP) 2423398e7cd3SPeter Maydell #define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP) 2424398e7cd3SPeter Maydell #define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP) 2425398e7cd3SPeter Maydell 2426398e7cd3SPeter Maydell DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B) 2427398e7cd3SPeter Maydell DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H) 2428398e7cd3SPeter Maydell DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W) 2429398e7cd3SPeter Maydell 2430398e7cd3SPeter Maydell DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B) 2431398e7cd3SPeter Maydell DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H) 2432398e7cd3SPeter Maydell DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W) 2433d5c571eaSPeter Maydell 2434d5c571eaSPeter Maydell /* 2435d5c571eaSPeter Maydell * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its 2436d5c571eaSPeter Maydell * absolute value; we then do an unsigned comparison. 2437d5c571eaSPeter Maydell */ 2438d5c571eaSPeter Maydell #define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \ 2439d5c571eaSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2440d5c571eaSPeter Maydell { \ 2441d5c571eaSPeter Maydell UTYPE *d = vd; \ 2442d5c571eaSPeter Maydell STYPE *m = vm; \ 2443d5c571eaSPeter Maydell uint16_t mask = mve_element_mask(env); \ 2444d5c571eaSPeter Maydell unsigned e; \ 2445d5c571eaSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2446d5c571eaSPeter Maydell UTYPE r = DO_ABS(m[H##ESIZE(e)]); \ 2447d5c571eaSPeter Maydell r = FN(d[H##ESIZE(e)], r); \ 2448d5c571eaSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 2449d5c571eaSPeter Maydell } \ 2450d5c571eaSPeter Maydell mve_advance_vpt(env); \ 2451d5c571eaSPeter Maydell } 2452d5c571eaSPeter Maydell 2453d5c571eaSPeter Maydell DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX) 2454d5c571eaSPeter Maydell DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX) 2455d5c571eaSPeter Maydell DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX) 2456d5c571eaSPeter Maydell DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN) 2457d5c571eaSPeter Maydell DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN) 2458d5c571eaSPeter Maydell DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) 2459