1507b6a50SPeter Maydell /* 2507b6a50SPeter Maydell * M-profile MVE Operations 3507b6a50SPeter Maydell * 4507b6a50SPeter Maydell * Copyright (c) 2021 Linaro, Ltd. 5507b6a50SPeter Maydell * 6507b6a50SPeter Maydell * This library is free software; you can redistribute it and/or 7507b6a50SPeter Maydell * modify it under the terms of the GNU Lesser General Public 8507b6a50SPeter Maydell * License as published by the Free Software Foundation; either 9507b6a50SPeter Maydell * version 2.1 of the License, or (at your option) any later version. 10507b6a50SPeter Maydell * 11507b6a50SPeter Maydell * This library is distributed in the hope that it will be useful, 12507b6a50SPeter Maydell * but WITHOUT ANY WARRANTY; without even the implied warranty of 13507b6a50SPeter Maydell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14507b6a50SPeter Maydell * Lesser General Public License for more details. 15507b6a50SPeter Maydell * 16507b6a50SPeter Maydell * You should have received a copy of the GNU Lesser General Public 17507b6a50SPeter Maydell * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18507b6a50SPeter Maydell */ 19507b6a50SPeter Maydell 20507b6a50SPeter Maydell #include "qemu/osdep.h" 21507b6a50SPeter Maydell #include "cpu.h" 22507b6a50SPeter Maydell #include "internals.h" 23507b6a50SPeter Maydell #include "vec_internal.h" 24507b6a50SPeter Maydell #include "exec/helper-proto.h" 25507b6a50SPeter Maydell #include "exec/cpu_ldst.h" 26507b6a50SPeter Maydell #include "exec/exec-all.h" 2759c91773SPeter Maydell #include "tcg/tcg.h" 281e35cd91SPeter Maydell #include "fpu/softfloat.h" 29507b6a50SPeter Maydell 30e0d40070SPeter Maydell static uint16_t mve_eci_mask(CPUARMState *env) 31e0d40070SPeter Maydell { 32e0d40070SPeter Maydell /* 33e0d40070SPeter Maydell * Return the mask of which elements in the MVE vector correspond 34e0d40070SPeter Maydell * to beats being executed. The mask has 1 bits for executed lanes 35e0d40070SPeter Maydell * and 0 bits where ECI says this beat was already executed. 36e0d40070SPeter Maydell */ 37e0d40070SPeter Maydell int eci; 38e0d40070SPeter Maydell 39e0d40070SPeter Maydell if ((env->condexec_bits & 0xf) != 0) { 40e0d40070SPeter Maydell return 0xffff; 41e0d40070SPeter Maydell } 42e0d40070SPeter Maydell 43e0d40070SPeter Maydell eci = env->condexec_bits >> 4; 44e0d40070SPeter Maydell switch (eci) { 45e0d40070SPeter Maydell case ECI_NONE: 46e0d40070SPeter Maydell return 0xffff; 47e0d40070SPeter Maydell case ECI_A0: 48e0d40070SPeter Maydell return 0xfff0; 49e0d40070SPeter Maydell case ECI_A0A1: 50e0d40070SPeter Maydell return 0xff00; 51e0d40070SPeter Maydell case ECI_A0A1A2: 52e0d40070SPeter Maydell case ECI_A0A1A2B0: 53e0d40070SPeter Maydell return 0xf000; 54e0d40070SPeter Maydell default: 55e0d40070SPeter Maydell g_assert_not_reached(); 56e0d40070SPeter Maydell } 57e0d40070SPeter Maydell } 58e0d40070SPeter Maydell 59507b6a50SPeter Maydell static uint16_t mve_element_mask(CPUARMState *env) 60507b6a50SPeter Maydell { 61507b6a50SPeter Maydell /* 62507b6a50SPeter Maydell * Return the mask of which elements in the MVE vector should be 63507b6a50SPeter Maydell * updated. This is a combination of multiple things: 64507b6a50SPeter Maydell * (1) by default, we update every lane in the vector 65507b6a50SPeter Maydell * (2) VPT predication stores its state in the VPR register; 66507b6a50SPeter Maydell * (3) low-overhead-branch tail predication will mask out part 67507b6a50SPeter Maydell * the vector on the final iteration of the loop 68507b6a50SPeter Maydell * (4) if EPSR.ECI is set then we must execute only some beats 69507b6a50SPeter Maydell * of the insn 70507b6a50SPeter Maydell * We combine all these into a 16-bit result with the same semantics 71507b6a50SPeter Maydell * as VPR.P0: 0 to mask the lane, 1 if it is active. 72507b6a50SPeter Maydell * 8-bit vector ops will look at all bits of the result; 73507b6a50SPeter Maydell * 16-bit ops will look at bits 0, 2, 4, ...; 74507b6a50SPeter Maydell * 32-bit ops will look at bits 0, 4, 8 and 12. 75507b6a50SPeter Maydell * Compare pseudocode GetCurInstrBeat(), though that only returns 76507b6a50SPeter Maydell * the 4-bit slice of the mask corresponding to a single beat. 77507b6a50SPeter Maydell */ 78507b6a50SPeter Maydell uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 79507b6a50SPeter Maydell 80507b6a50SPeter Maydell if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) { 81507b6a50SPeter Maydell mask |= 0xff; 82507b6a50SPeter Maydell } 83507b6a50SPeter Maydell if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) { 84507b6a50SPeter Maydell mask |= 0xff00; 85507b6a50SPeter Maydell } 86507b6a50SPeter Maydell 87507b6a50SPeter Maydell if (env->v7m.ltpsize < 4 && 88507b6a50SPeter Maydell env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) { 89507b6a50SPeter Maydell /* 90507b6a50SPeter Maydell * Tail predication active, and this is the last loop iteration. 91507b6a50SPeter Maydell * The element size is (1 << ltpsize), and we only want to process 92507b6a50SPeter Maydell * loopcount elements, so we want to retain the least significant 93507b6a50SPeter Maydell * (loopcount * esize) predicate bits and zero out bits above that. 94507b6a50SPeter Maydell */ 95507b6a50SPeter Maydell int masklen = env->regs[14] << env->v7m.ltpsize; 96507b6a50SPeter Maydell assert(masklen <= 16); 973f4f1880SPeter Maydell uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 983f4f1880SPeter Maydell mask &= ltpmask; 99507b6a50SPeter Maydell } 100507b6a50SPeter Maydell 101507b6a50SPeter Maydell /* 102507b6a50SPeter Maydell * ECI bits indicate which beats are already executed; 103507b6a50SPeter Maydell * we handle this by effectively predicating them out. 104507b6a50SPeter Maydell */ 105e0d40070SPeter Maydell mask &= mve_eci_mask(env); 106507b6a50SPeter Maydell return mask; 107507b6a50SPeter Maydell } 108507b6a50SPeter Maydell 109507b6a50SPeter Maydell static void mve_advance_vpt(CPUARMState *env) 110507b6a50SPeter Maydell { 111507b6a50SPeter Maydell /* Advance the VPT and ECI state if necessary */ 112507b6a50SPeter Maydell uint32_t vpr = env->v7m.vpr; 113507b6a50SPeter Maydell unsigned mask01, mask23; 114e3152d02SPeter Maydell uint16_t inv_mask; 115e3152d02SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); 116507b6a50SPeter Maydell 117507b6a50SPeter Maydell if ((env->condexec_bits & 0xf) == 0) { 118507b6a50SPeter Maydell env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ? 119507b6a50SPeter Maydell (ECI_A0 << 4) : (ECI_NONE << 4); 120507b6a50SPeter Maydell } 121507b6a50SPeter Maydell 122507b6a50SPeter Maydell if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) { 123507b6a50SPeter Maydell /* VPT not enabled, nothing to do */ 124507b6a50SPeter Maydell return; 125507b6a50SPeter Maydell } 126507b6a50SPeter Maydell 127e3152d02SPeter Maydell /* Invert P0 bits if needed, but only for beats we actually executed */ 128507b6a50SPeter Maydell mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01); 129507b6a50SPeter Maydell mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23); 130e3152d02SPeter Maydell /* Start by assuming we invert all bits corresponding to executed beats */ 131e3152d02SPeter Maydell inv_mask = eci_mask; 132e3152d02SPeter Maydell if (mask01 <= 8) { 133e3152d02SPeter Maydell /* MASK01 says don't invert low half of P0 */ 134e3152d02SPeter Maydell inv_mask &= ~0xff; 135507b6a50SPeter Maydell } 136e3152d02SPeter Maydell if (mask23 <= 8) { 137e3152d02SPeter Maydell /* MASK23 says don't invert high half of P0 */ 138e3152d02SPeter Maydell inv_mask &= ~0xff00; 139507b6a50SPeter Maydell } 140e3152d02SPeter Maydell vpr ^= inv_mask; 141e3152d02SPeter Maydell /* Only update MASK01 if beat 1 executed */ 142e3152d02SPeter Maydell if (eci_mask & 0xf0) { 143507b6a50SPeter Maydell vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); 144e3152d02SPeter Maydell } 145e3152d02SPeter Maydell /* Beat 3 always executes, so update MASK23 */ 146507b6a50SPeter Maydell vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1); 147507b6a50SPeter Maydell env->v7m.vpr = vpr; 148507b6a50SPeter Maydell } 149507b6a50SPeter Maydell 15041704cc2SPeter Maydell /* For loads, predicated lanes are zeroed instead of keeping their old values */ 151507b6a50SPeter Maydell #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ 152507b6a50SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 153507b6a50SPeter Maydell { \ 154507b6a50SPeter Maydell TYPE *d = vd; \ 155507b6a50SPeter Maydell uint16_t mask = mve_element_mask(env); \ 15641704cc2SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 157507b6a50SPeter Maydell unsigned b, e; \ 158507b6a50SPeter Maydell /* \ 159507b6a50SPeter Maydell * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ 160507b6a50SPeter Maydell * beats so we don't care if we update part of the dest and \ 161507b6a50SPeter Maydell * then take an exception. \ 162507b6a50SPeter Maydell */ \ 163507b6a50SPeter Maydell for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 16441704cc2SPeter Maydell if (eci_mask & (1 << b)) { \ 16541704cc2SPeter Maydell d[H##ESIZE(e)] = (mask & (1 << b)) ? \ 16641704cc2SPeter Maydell cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 167507b6a50SPeter Maydell } \ 168507b6a50SPeter Maydell addr += MSIZE; \ 169507b6a50SPeter Maydell } \ 170507b6a50SPeter Maydell mve_advance_vpt(env); \ 171507b6a50SPeter Maydell } 172507b6a50SPeter Maydell 173507b6a50SPeter Maydell #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ 174507b6a50SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 175507b6a50SPeter Maydell { \ 176507b6a50SPeter Maydell TYPE *d = vd; \ 177507b6a50SPeter Maydell uint16_t mask = mve_element_mask(env); \ 178507b6a50SPeter Maydell unsigned b, e; \ 179507b6a50SPeter Maydell for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 180507b6a50SPeter Maydell if (mask & (1 << b)) { \ 181507b6a50SPeter Maydell cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 182507b6a50SPeter Maydell } \ 183507b6a50SPeter Maydell addr += MSIZE; \ 184507b6a50SPeter Maydell } \ 185507b6a50SPeter Maydell mve_advance_vpt(env); \ 186507b6a50SPeter Maydell } 187507b6a50SPeter Maydell 188507b6a50SPeter Maydell DO_VLDR(vldrb, 1, ldub, 1, uint8_t) 189507b6a50SPeter Maydell DO_VLDR(vldrh, 2, lduw, 2, uint16_t) 190507b6a50SPeter Maydell DO_VLDR(vldrw, 4, ldl, 4, uint32_t) 191507b6a50SPeter Maydell 192507b6a50SPeter Maydell DO_VSTR(vstrb, 1, stb, 1, uint8_t) 193507b6a50SPeter Maydell DO_VSTR(vstrh, 2, stw, 2, uint16_t) 194507b6a50SPeter Maydell DO_VSTR(vstrw, 4, stl, 4, uint32_t) 195507b6a50SPeter Maydell 1962fc6b751SPeter Maydell DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t) 1972fc6b751SPeter Maydell DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t) 1982fc6b751SPeter Maydell DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t) 1992fc6b751SPeter Maydell DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t) 2002fc6b751SPeter Maydell DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t) 2012fc6b751SPeter Maydell DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t) 2022fc6b751SPeter Maydell 2032fc6b751SPeter Maydell DO_VSTR(vstrb_h, 1, stb, 2, int16_t) 2042fc6b751SPeter Maydell DO_VSTR(vstrb_w, 1, stb, 4, int32_t) 2052fc6b751SPeter Maydell DO_VSTR(vstrh_w, 2, stw, 4, int32_t) 2062fc6b751SPeter Maydell 207507b6a50SPeter Maydell #undef DO_VLDR 208507b6a50SPeter Maydell #undef DO_VSTR 2090f0f2bd5SPeter Maydell 2100f0f2bd5SPeter Maydell /* 211dc18628bSPeter Maydell * Gather loads/scatter stores. Here each element of Qm specifies 212dc18628bSPeter Maydell * an offset to use from the base register Rm. In the _os_ versions 213dc18628bSPeter Maydell * that offset is scaled by the element size. 214dc18628bSPeter Maydell * For loads, predicated lanes are zeroed instead of retaining 215dc18628bSPeter Maydell * their previous values. 216dc18628bSPeter Maydell */ 217fac80f08SPeter Maydell #define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \ 218dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 219dc18628bSPeter Maydell uint32_t base) \ 220dc18628bSPeter Maydell { \ 221dc18628bSPeter Maydell TYPE *d = vd; \ 222dc18628bSPeter Maydell OFFTYPE *m = vm; \ 223dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 224dc18628bSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 225dc18628bSPeter Maydell unsigned e; \ 226dc18628bSPeter Maydell uint32_t addr; \ 227dc18628bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 228dc18628bSPeter Maydell if (!(eci_mask & 1)) { \ 229dc18628bSPeter Maydell continue; \ 230dc18628bSPeter Maydell } \ 231dc18628bSPeter Maydell addr = ADDRFN(base, m[H##ESIZE(e)]); \ 232dc18628bSPeter Maydell d[H##ESIZE(e)] = (mask & 1) ? \ 233dc18628bSPeter Maydell cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 234fac80f08SPeter Maydell if (WB) { \ 235fac80f08SPeter Maydell m[H##ESIZE(e)] = addr; \ 236fac80f08SPeter Maydell } \ 237dc18628bSPeter Maydell } \ 238dc18628bSPeter Maydell mve_advance_vpt(env); \ 239dc18628bSPeter Maydell } 240dc18628bSPeter Maydell 241dc18628bSPeter Maydell /* We know here TYPE is unsigned so always the same as the offset type */ 242fac80f08SPeter Maydell #define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \ 243dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 244dc18628bSPeter Maydell uint32_t base) \ 245dc18628bSPeter Maydell { \ 246dc18628bSPeter Maydell TYPE *d = vd; \ 247dc18628bSPeter Maydell TYPE *m = vm; \ 248dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 249fac80f08SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 250dc18628bSPeter Maydell unsigned e; \ 251dc18628bSPeter Maydell uint32_t addr; \ 252fac80f08SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 253fac80f08SPeter Maydell if (!(eci_mask & 1)) { \ 254fac80f08SPeter Maydell continue; \ 255fac80f08SPeter Maydell } \ 256dc18628bSPeter Maydell addr = ADDRFN(base, m[H##ESIZE(e)]); \ 257dc18628bSPeter Maydell if (mask & 1) { \ 258dc18628bSPeter Maydell cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 259dc18628bSPeter Maydell } \ 260fac80f08SPeter Maydell if (WB) { \ 261fac80f08SPeter Maydell m[H##ESIZE(e)] = addr; \ 262fac80f08SPeter Maydell } \ 263dc18628bSPeter Maydell } \ 264dc18628bSPeter Maydell mve_advance_vpt(env); \ 265dc18628bSPeter Maydell } 266dc18628bSPeter Maydell 267dc18628bSPeter Maydell /* 268dc18628bSPeter Maydell * 64-bit accesses are slightly different: they are done as two 32-bit 269dc18628bSPeter Maydell * accesses, controlled by the predicate mask for the relevant beat, 270dc18628bSPeter Maydell * and with a single 32-bit offset in the first of the two Qm elements. 271dc18628bSPeter Maydell * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little). 272fac80f08SPeter Maydell * Address writeback happens on the odd beats and updates the address 273fac80f08SPeter Maydell * stored in the even-beat element. 274dc18628bSPeter Maydell */ 275fac80f08SPeter Maydell #define DO_VLDR64_SG(OP, ADDRFN, WB) \ 276dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 277dc18628bSPeter Maydell uint32_t base) \ 278dc18628bSPeter Maydell { \ 279dc18628bSPeter Maydell uint32_t *d = vd; \ 280dc18628bSPeter Maydell uint32_t *m = vm; \ 281dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 282dc18628bSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 283dc18628bSPeter Maydell unsigned e; \ 284dc18628bSPeter Maydell uint32_t addr; \ 285dc18628bSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 286dc18628bSPeter Maydell if (!(eci_mask & 1)) { \ 287dc18628bSPeter Maydell continue; \ 288dc18628bSPeter Maydell } \ 289dc18628bSPeter Maydell addr = ADDRFN(base, m[H4(e & ~1)]); \ 290dc18628bSPeter Maydell addr += 4 * (e & 1); \ 291dc18628bSPeter Maydell d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \ 292fac80f08SPeter Maydell if (WB && (e & 1)) { \ 293fac80f08SPeter Maydell m[H4(e & ~1)] = addr - 4; \ 294fac80f08SPeter Maydell } \ 295dc18628bSPeter Maydell } \ 296dc18628bSPeter Maydell mve_advance_vpt(env); \ 297dc18628bSPeter Maydell } 298dc18628bSPeter Maydell 299fac80f08SPeter Maydell #define DO_VSTR64_SG(OP, ADDRFN, WB) \ 300dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 301dc18628bSPeter Maydell uint32_t base) \ 302dc18628bSPeter Maydell { \ 303dc18628bSPeter Maydell uint32_t *d = vd; \ 304dc18628bSPeter Maydell uint32_t *m = vm; \ 305dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 306fac80f08SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 307dc18628bSPeter Maydell unsigned e; \ 308dc18628bSPeter Maydell uint32_t addr; \ 309fac80f08SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 310fac80f08SPeter Maydell if (!(eci_mask & 1)) { \ 311fac80f08SPeter Maydell continue; \ 312fac80f08SPeter Maydell } \ 313dc18628bSPeter Maydell addr = ADDRFN(base, m[H4(e & ~1)]); \ 314dc18628bSPeter Maydell addr += 4 * (e & 1); \ 315dc18628bSPeter Maydell if (mask & 1) { \ 316dc18628bSPeter Maydell cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \ 317dc18628bSPeter Maydell } \ 318fac80f08SPeter Maydell if (WB && (e & 1)) { \ 319fac80f08SPeter Maydell m[H4(e & ~1)] = addr - 4; \ 320fac80f08SPeter Maydell } \ 321dc18628bSPeter Maydell } \ 322dc18628bSPeter Maydell mve_advance_vpt(env); \ 323dc18628bSPeter Maydell } 324dc18628bSPeter Maydell 325dc18628bSPeter Maydell #define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET)) 326dc18628bSPeter Maydell #define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1)) 327dc18628bSPeter Maydell #define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2)) 328dc18628bSPeter Maydell #define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3)) 329dc18628bSPeter Maydell 330fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false) 331fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false) 332fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false) 333dc18628bSPeter Maydell 334fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false) 335fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false) 336fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false) 337fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false) 338fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false) 339fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false) 340fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false) 341dc18628bSPeter Maydell 342fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false) 343fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false) 344fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false) 345fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false) 346fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false) 347dc18628bSPeter Maydell 348fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false) 349fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false) 350fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false) 351fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false) 352fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false) 353fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false) 354fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false) 355dc18628bSPeter Maydell 356fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false) 357fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false) 358fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false) 359fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false) 360fac80f08SPeter Maydell 361fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true) 362fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true) 363fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true) 364fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) 365dc18628bSPeter Maydell 366dc18628bSPeter Maydell /* 367075e7e97SPeter Maydell * Deinterleaving loads/interleaving stores. 368075e7e97SPeter Maydell * 369075e7e97SPeter Maydell * For these helpers we are passed the index of the first Qreg 370075e7e97SPeter Maydell * (VLD2/VST2 will also access Qn+1, VLD4/VST4 access Qn .. Qn+3) 371075e7e97SPeter Maydell * and the value of the base address register Rn. 372075e7e97SPeter Maydell * The helpers are specialized for pattern and element size, so 373075e7e97SPeter Maydell * for instance vld42h is VLD4 with pattern 2, element size MO_16. 374075e7e97SPeter Maydell * 375075e7e97SPeter Maydell * These insns are beatwise but not predicated, so we must honour ECI, 376075e7e97SPeter Maydell * but need not look at mve_element_mask(). 377075e7e97SPeter Maydell * 378075e7e97SPeter Maydell * The pseudocode implements these insns with multiple memory accesses 379075e7e97SPeter Maydell * of the element size, but rules R_VVVG and R_FXDM permit us to make 380075e7e97SPeter Maydell * one 32-bit memory access per beat. 381075e7e97SPeter Maydell */ 382075e7e97SPeter Maydell #define DO_VLD4B(OP, O1, O2, O3, O4) \ 383075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 384075e7e97SPeter Maydell uint32_t base) \ 385075e7e97SPeter Maydell { \ 386075e7e97SPeter Maydell int beat, e; \ 387075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 388075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 389075e7e97SPeter Maydell uint32_t addr, data; \ 390075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 391075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 392075e7e97SPeter Maydell /* ECI says skip this beat */ \ 393075e7e97SPeter Maydell continue; \ 394075e7e97SPeter Maydell } \ 395075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 396075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 397075e7e97SPeter Maydell for (e = 0; e < 4; e++, data >>= 8) { \ 398075e7e97SPeter Maydell uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ 399075e7e97SPeter Maydell qd[H1(off[beat])] = data; \ 400075e7e97SPeter Maydell } \ 401075e7e97SPeter Maydell } \ 402075e7e97SPeter Maydell } 403075e7e97SPeter Maydell 404075e7e97SPeter Maydell #define DO_VLD4H(OP, O1, O2) \ 405075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 406075e7e97SPeter Maydell uint32_t base) \ 407075e7e97SPeter Maydell { \ 408075e7e97SPeter Maydell int beat; \ 409075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 410075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O1, O2, O2 }; \ 411075e7e97SPeter Maydell uint32_t addr, data; \ 412075e7e97SPeter Maydell int y; /* y counts 0 2 0 2 */ \ 413075e7e97SPeter Maydell uint16_t *qd; \ 414075e7e97SPeter Maydell for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ 415075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 416075e7e97SPeter Maydell /* ECI says skip this beat */ \ 417075e7e97SPeter Maydell continue; \ 418075e7e97SPeter Maydell } \ 419075e7e97SPeter Maydell addr = base + off[beat] * 8 + (beat & 1) * 4; \ 420075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 421075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ 422075e7e97SPeter Maydell qd[H2(off[beat])] = data; \ 423075e7e97SPeter Maydell data >>= 16; \ 424075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ 425075e7e97SPeter Maydell qd[H2(off[beat])] = data; \ 426075e7e97SPeter Maydell } \ 427075e7e97SPeter Maydell } 428075e7e97SPeter Maydell 429075e7e97SPeter Maydell #define DO_VLD4W(OP, O1, O2, O3, O4) \ 430075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 431075e7e97SPeter Maydell uint32_t base) \ 432075e7e97SPeter Maydell { \ 433075e7e97SPeter Maydell int beat; \ 434075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 435075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 436075e7e97SPeter Maydell uint32_t addr, data; \ 437075e7e97SPeter Maydell uint32_t *qd; \ 438075e7e97SPeter Maydell int y; \ 439075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 440075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 441075e7e97SPeter Maydell /* ECI says skip this beat */ \ 442075e7e97SPeter Maydell continue; \ 443075e7e97SPeter Maydell } \ 444075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 445075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 446075e7e97SPeter Maydell y = (beat + (O1 & 2)) & 3; \ 447075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ 448075e7e97SPeter Maydell qd[H4(off[beat] >> 2)] = data; \ 449075e7e97SPeter Maydell } \ 450075e7e97SPeter Maydell } 451075e7e97SPeter Maydell 452075e7e97SPeter Maydell DO_VLD4B(vld40b, 0, 1, 10, 11) 453075e7e97SPeter Maydell DO_VLD4B(vld41b, 2, 3, 12, 13) 454075e7e97SPeter Maydell DO_VLD4B(vld42b, 4, 5, 14, 15) 455075e7e97SPeter Maydell DO_VLD4B(vld43b, 6, 7, 8, 9) 456075e7e97SPeter Maydell 457075e7e97SPeter Maydell DO_VLD4H(vld40h, 0, 5) 458075e7e97SPeter Maydell DO_VLD4H(vld41h, 1, 6) 459075e7e97SPeter Maydell DO_VLD4H(vld42h, 2, 7) 460075e7e97SPeter Maydell DO_VLD4H(vld43h, 3, 4) 461075e7e97SPeter Maydell 462075e7e97SPeter Maydell DO_VLD4W(vld40w, 0, 1, 10, 11) 463075e7e97SPeter Maydell DO_VLD4W(vld41w, 2, 3, 12, 13) 464075e7e97SPeter Maydell DO_VLD4W(vld42w, 4, 5, 14, 15) 465075e7e97SPeter Maydell DO_VLD4W(vld43w, 6, 7, 8, 9) 466075e7e97SPeter Maydell 467075e7e97SPeter Maydell #define DO_VLD2B(OP, O1, O2, O3, O4) \ 468075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 469075e7e97SPeter Maydell uint32_t base) \ 470075e7e97SPeter Maydell { \ 471075e7e97SPeter Maydell int beat, e; \ 472075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 473075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 474075e7e97SPeter Maydell uint32_t addr, data; \ 475075e7e97SPeter Maydell uint8_t *qd; \ 476075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 477075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 478075e7e97SPeter Maydell /* ECI says skip this beat */ \ 479075e7e97SPeter Maydell continue; \ 480075e7e97SPeter Maydell } \ 481075e7e97SPeter Maydell addr = base + off[beat] * 2; \ 482075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 483075e7e97SPeter Maydell for (e = 0; e < 4; e++, data >>= 8) { \ 484075e7e97SPeter Maydell qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ 485075e7e97SPeter Maydell qd[H1(off[beat] + (e >> 1))] = data; \ 486075e7e97SPeter Maydell } \ 487075e7e97SPeter Maydell } \ 488075e7e97SPeter Maydell } 489075e7e97SPeter Maydell 490075e7e97SPeter Maydell #define DO_VLD2H(OP, O1, O2, O3, O4) \ 491075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 492075e7e97SPeter Maydell uint32_t base) \ 493075e7e97SPeter Maydell { \ 494075e7e97SPeter Maydell int beat; \ 495075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 496075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 497075e7e97SPeter Maydell uint32_t addr, data; \ 498075e7e97SPeter Maydell int e; \ 499075e7e97SPeter Maydell uint16_t *qd; \ 500075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 501075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 502075e7e97SPeter Maydell /* ECI says skip this beat */ \ 503075e7e97SPeter Maydell continue; \ 504075e7e97SPeter Maydell } \ 505075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 506075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 507075e7e97SPeter Maydell for (e = 0; e < 2; e++, data >>= 16) { \ 508075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ 509075e7e97SPeter Maydell qd[H2(off[beat])] = data; \ 510075e7e97SPeter Maydell } \ 511075e7e97SPeter Maydell } \ 512075e7e97SPeter Maydell } 513075e7e97SPeter Maydell 514075e7e97SPeter Maydell #define DO_VLD2W(OP, O1, O2, O3, O4) \ 515075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 516075e7e97SPeter Maydell uint32_t base) \ 517075e7e97SPeter Maydell { \ 518075e7e97SPeter Maydell int beat; \ 519075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 520075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 521075e7e97SPeter Maydell uint32_t addr, data; \ 522075e7e97SPeter Maydell uint32_t *qd; \ 523075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 524075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 525075e7e97SPeter Maydell /* ECI says skip this beat */ \ 526075e7e97SPeter Maydell continue; \ 527075e7e97SPeter Maydell } \ 528075e7e97SPeter Maydell addr = base + off[beat]; \ 529075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 530075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ 531075e7e97SPeter Maydell qd[H4(off[beat] >> 3)] = data; \ 532075e7e97SPeter Maydell } \ 533075e7e97SPeter Maydell } 534075e7e97SPeter Maydell 535075e7e97SPeter Maydell DO_VLD2B(vld20b, 0, 2, 12, 14) 536075e7e97SPeter Maydell DO_VLD2B(vld21b, 4, 6, 8, 10) 537075e7e97SPeter Maydell 538075e7e97SPeter Maydell DO_VLD2H(vld20h, 0, 1, 6, 7) 539075e7e97SPeter Maydell DO_VLD2H(vld21h, 2, 3, 4, 5) 540075e7e97SPeter Maydell 541075e7e97SPeter Maydell DO_VLD2W(vld20w, 0, 4, 24, 28) 542075e7e97SPeter Maydell DO_VLD2W(vld21w, 8, 12, 16, 20) 543075e7e97SPeter Maydell 544075e7e97SPeter Maydell #define DO_VST4B(OP, O1, O2, O3, O4) \ 545075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 546075e7e97SPeter Maydell uint32_t base) \ 547075e7e97SPeter Maydell { \ 548075e7e97SPeter Maydell int beat, e; \ 549075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 550075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 551075e7e97SPeter Maydell uint32_t addr, data; \ 552075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 553075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 554075e7e97SPeter Maydell /* ECI says skip this beat */ \ 555075e7e97SPeter Maydell continue; \ 556075e7e97SPeter Maydell } \ 557075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 558075e7e97SPeter Maydell data = 0; \ 559075e7e97SPeter Maydell for (e = 3; e >= 0; e--) { \ 560075e7e97SPeter Maydell uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ 561075e7e97SPeter Maydell data = (data << 8) | qd[H1(off[beat])]; \ 562075e7e97SPeter Maydell } \ 563075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 564075e7e97SPeter Maydell } \ 565075e7e97SPeter Maydell } 566075e7e97SPeter Maydell 567075e7e97SPeter Maydell #define DO_VST4H(OP, O1, O2) \ 568075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 569075e7e97SPeter Maydell uint32_t base) \ 570075e7e97SPeter Maydell { \ 571075e7e97SPeter Maydell int beat; \ 572075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 573075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O1, O2, O2 }; \ 574075e7e97SPeter Maydell uint32_t addr, data; \ 575075e7e97SPeter Maydell int y; /* y counts 0 2 0 2 */ \ 576075e7e97SPeter Maydell uint16_t *qd; \ 577075e7e97SPeter Maydell for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ 578075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 579075e7e97SPeter Maydell /* ECI says skip this beat */ \ 580075e7e97SPeter Maydell continue; \ 581075e7e97SPeter Maydell } \ 582075e7e97SPeter Maydell addr = base + off[beat] * 8 + (beat & 1) * 4; \ 583075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ 584075e7e97SPeter Maydell data = qd[H2(off[beat])]; \ 585075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ 586075e7e97SPeter Maydell data |= qd[H2(off[beat])] << 16; \ 587075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 588075e7e97SPeter Maydell } \ 589075e7e97SPeter Maydell } 590075e7e97SPeter Maydell 591075e7e97SPeter Maydell #define DO_VST4W(OP, O1, O2, O3, O4) \ 592075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 593075e7e97SPeter Maydell uint32_t base) \ 594075e7e97SPeter Maydell { \ 595075e7e97SPeter Maydell int beat; \ 596075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 597075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 598075e7e97SPeter Maydell uint32_t addr, data; \ 599075e7e97SPeter Maydell uint32_t *qd; \ 600075e7e97SPeter Maydell int y; \ 601075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 602075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 603075e7e97SPeter Maydell /* ECI says skip this beat */ \ 604075e7e97SPeter Maydell continue; \ 605075e7e97SPeter Maydell } \ 606075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 607075e7e97SPeter Maydell y = (beat + (O1 & 2)) & 3; \ 608075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ 609075e7e97SPeter Maydell data = qd[H4(off[beat] >> 2)]; \ 610075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 611075e7e97SPeter Maydell } \ 612075e7e97SPeter Maydell } 613075e7e97SPeter Maydell 614075e7e97SPeter Maydell DO_VST4B(vst40b, 0, 1, 10, 11) 615075e7e97SPeter Maydell DO_VST4B(vst41b, 2, 3, 12, 13) 616075e7e97SPeter Maydell DO_VST4B(vst42b, 4, 5, 14, 15) 617075e7e97SPeter Maydell DO_VST4B(vst43b, 6, 7, 8, 9) 618075e7e97SPeter Maydell 619075e7e97SPeter Maydell DO_VST4H(vst40h, 0, 5) 620075e7e97SPeter Maydell DO_VST4H(vst41h, 1, 6) 621075e7e97SPeter Maydell DO_VST4H(vst42h, 2, 7) 622075e7e97SPeter Maydell DO_VST4H(vst43h, 3, 4) 623075e7e97SPeter Maydell 624075e7e97SPeter Maydell DO_VST4W(vst40w, 0, 1, 10, 11) 625075e7e97SPeter Maydell DO_VST4W(vst41w, 2, 3, 12, 13) 626075e7e97SPeter Maydell DO_VST4W(vst42w, 4, 5, 14, 15) 627075e7e97SPeter Maydell DO_VST4W(vst43w, 6, 7, 8, 9) 628075e7e97SPeter Maydell 629075e7e97SPeter Maydell #define DO_VST2B(OP, O1, O2, O3, O4) \ 630075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 631075e7e97SPeter Maydell uint32_t base) \ 632075e7e97SPeter Maydell { \ 633075e7e97SPeter Maydell int beat, e; \ 634075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 635075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 636075e7e97SPeter Maydell uint32_t addr, data; \ 637075e7e97SPeter Maydell uint8_t *qd; \ 638075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 639075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 640075e7e97SPeter Maydell /* ECI says skip this beat */ \ 641075e7e97SPeter Maydell continue; \ 642075e7e97SPeter Maydell } \ 643075e7e97SPeter Maydell addr = base + off[beat] * 2; \ 644075e7e97SPeter Maydell data = 0; \ 645075e7e97SPeter Maydell for (e = 3; e >= 0; e--) { \ 646075e7e97SPeter Maydell qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ 647075e7e97SPeter Maydell data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \ 648075e7e97SPeter Maydell } \ 649075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 650075e7e97SPeter Maydell } \ 651075e7e97SPeter Maydell } 652075e7e97SPeter Maydell 653075e7e97SPeter Maydell #define DO_VST2H(OP, O1, O2, O3, O4) \ 654075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 655075e7e97SPeter Maydell uint32_t base) \ 656075e7e97SPeter Maydell { \ 657075e7e97SPeter Maydell int beat; \ 658075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 659075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 660075e7e97SPeter Maydell uint32_t addr, data; \ 661075e7e97SPeter Maydell int e; \ 662075e7e97SPeter Maydell uint16_t *qd; \ 663075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 664075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 665075e7e97SPeter Maydell /* ECI says skip this beat */ \ 666075e7e97SPeter Maydell continue; \ 667075e7e97SPeter Maydell } \ 668075e7e97SPeter Maydell addr = base + off[beat] * 4; \ 669075e7e97SPeter Maydell data = 0; \ 670075e7e97SPeter Maydell for (e = 1; e >= 0; e--) { \ 671075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ 672075e7e97SPeter Maydell data = (data << 16) | qd[H2(off[beat])]; \ 673075e7e97SPeter Maydell } \ 674075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 675075e7e97SPeter Maydell } \ 676075e7e97SPeter Maydell } 677075e7e97SPeter Maydell 678075e7e97SPeter Maydell #define DO_VST2W(OP, O1, O2, O3, O4) \ 679075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 680075e7e97SPeter Maydell uint32_t base) \ 681075e7e97SPeter Maydell { \ 682075e7e97SPeter Maydell int beat; \ 683075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \ 684075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 685075e7e97SPeter Maydell uint32_t addr, data; \ 686075e7e97SPeter Maydell uint32_t *qd; \ 687075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 688075e7e97SPeter Maydell if ((mask & 1) == 0) { \ 689075e7e97SPeter Maydell /* ECI says skip this beat */ \ 690075e7e97SPeter Maydell continue; \ 691075e7e97SPeter Maydell } \ 692075e7e97SPeter Maydell addr = base + off[beat]; \ 693075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ 694075e7e97SPeter Maydell data = qd[H4(off[beat] >> 3)]; \ 695075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 696075e7e97SPeter Maydell } \ 697075e7e97SPeter Maydell } 698075e7e97SPeter Maydell 699075e7e97SPeter Maydell DO_VST2B(vst20b, 0, 2, 12, 14) 700075e7e97SPeter Maydell DO_VST2B(vst21b, 4, 6, 8, 10) 701075e7e97SPeter Maydell 702075e7e97SPeter Maydell DO_VST2H(vst20h, 0, 1, 6, 7) 703075e7e97SPeter Maydell DO_VST2H(vst21h, 2, 3, 4, 5) 704075e7e97SPeter Maydell 705075e7e97SPeter Maydell DO_VST2W(vst20w, 0, 4, 24, 28) 706075e7e97SPeter Maydell DO_VST2W(vst21w, 8, 12, 16, 20) 707075e7e97SPeter Maydell 708075e7e97SPeter Maydell /* 7090f0f2bd5SPeter Maydell * The mergemask(D, R, M) macro performs the operation "*D = R" but 7100f0f2bd5SPeter Maydell * storing only the bytes which correspond to 1 bits in M, 7110f0f2bd5SPeter Maydell * leaving other bytes in *D unchanged. We use _Generic 7120f0f2bd5SPeter Maydell * to select the correct implementation based on the type of D. 7130f0f2bd5SPeter Maydell */ 7140f0f2bd5SPeter Maydell 7150f0f2bd5SPeter Maydell static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask) 7160f0f2bd5SPeter Maydell { 7170f0f2bd5SPeter Maydell if (mask & 1) { 7180f0f2bd5SPeter Maydell *d = r; 7190f0f2bd5SPeter Maydell } 7200f0f2bd5SPeter Maydell } 7210f0f2bd5SPeter Maydell 7220f0f2bd5SPeter Maydell static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask) 7230f0f2bd5SPeter Maydell { 7240f0f2bd5SPeter Maydell mergemask_ub((uint8_t *)d, r, mask); 7250f0f2bd5SPeter Maydell } 7260f0f2bd5SPeter Maydell 7270f0f2bd5SPeter Maydell static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask) 7280f0f2bd5SPeter Maydell { 7290f0f2bd5SPeter Maydell uint16_t bmask = expand_pred_b_data[mask & 3]; 7300f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask); 7310f0f2bd5SPeter Maydell } 7320f0f2bd5SPeter Maydell 7330f0f2bd5SPeter Maydell static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask) 7340f0f2bd5SPeter Maydell { 7350f0f2bd5SPeter Maydell mergemask_uh((uint16_t *)d, r, mask); 7360f0f2bd5SPeter Maydell } 7370f0f2bd5SPeter Maydell 7380f0f2bd5SPeter Maydell static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask) 7390f0f2bd5SPeter Maydell { 7400f0f2bd5SPeter Maydell uint32_t bmask = expand_pred_b_data[mask & 0xf]; 7410f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask); 7420f0f2bd5SPeter Maydell } 7430f0f2bd5SPeter Maydell 7440f0f2bd5SPeter Maydell static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask) 7450f0f2bd5SPeter Maydell { 7460f0f2bd5SPeter Maydell mergemask_uw((uint32_t *)d, r, mask); 7470f0f2bd5SPeter Maydell } 7480f0f2bd5SPeter Maydell 7490f0f2bd5SPeter Maydell static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask) 7500f0f2bd5SPeter Maydell { 7510f0f2bd5SPeter Maydell uint64_t bmask = expand_pred_b_data[mask & 0xff]; 7520f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask); 7530f0f2bd5SPeter Maydell } 7540f0f2bd5SPeter Maydell 7550f0f2bd5SPeter Maydell static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask) 7560f0f2bd5SPeter Maydell { 7570f0f2bd5SPeter Maydell mergemask_uq((uint64_t *)d, r, mask); 7580f0f2bd5SPeter Maydell } 7590f0f2bd5SPeter Maydell 7600f0f2bd5SPeter Maydell #define mergemask(D, R, M) \ 7610f0f2bd5SPeter Maydell _Generic(D, \ 7620f0f2bd5SPeter Maydell uint8_t *: mergemask_ub, \ 7630f0f2bd5SPeter Maydell int8_t *: mergemask_sb, \ 7640f0f2bd5SPeter Maydell uint16_t *: mergemask_uh, \ 7650f0f2bd5SPeter Maydell int16_t *: mergemask_sh, \ 7660f0f2bd5SPeter Maydell uint32_t *: mergemask_uw, \ 7670f0f2bd5SPeter Maydell int32_t *: mergemask_sw, \ 7680f0f2bd5SPeter Maydell uint64_t *: mergemask_uq, \ 7690f0f2bd5SPeter Maydell int64_t *: mergemask_sq)(D, R, M) 7700f0f2bd5SPeter Maydell 771ab59362fSPeter Maydell void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val) 772ab59362fSPeter Maydell { 773ab59362fSPeter Maydell /* 774ab59362fSPeter Maydell * The generated code already replicated an 8 or 16 bit constant 775ab59362fSPeter Maydell * into the 32-bit value, so we only need to write the 32-bit 776ab59362fSPeter Maydell * value to all elements of the Qreg, allowing for predication. 777ab59362fSPeter Maydell */ 778ab59362fSPeter Maydell uint32_t *d = vd; 779ab59362fSPeter Maydell uint16_t mask = mve_element_mask(env); 780ab59362fSPeter Maydell unsigned e; 781ab59362fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 782ab59362fSPeter Maydell mergemask(&d[H4(e)], val, mask); 783ab59362fSPeter Maydell } 784ab59362fSPeter Maydell mve_advance_vpt(env); 785ab59362fSPeter Maydell } 786ab59362fSPeter Maydell 7870f0f2bd5SPeter Maydell #define DO_1OP(OP, ESIZE, TYPE, FN) \ 7880f0f2bd5SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 7890f0f2bd5SPeter Maydell { \ 7900f0f2bd5SPeter Maydell TYPE *d = vd, *m = vm; \ 7910f0f2bd5SPeter Maydell uint16_t mask = mve_element_mask(env); \ 7920f0f2bd5SPeter Maydell unsigned e; \ 7930f0f2bd5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 7940f0f2bd5SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \ 7950f0f2bd5SPeter Maydell } \ 7960f0f2bd5SPeter Maydell mve_advance_vpt(env); \ 7970f0f2bd5SPeter Maydell } 7980f0f2bd5SPeter Maydell 7996437f1f7SPeter Maydell #define DO_CLS_B(N) (clrsb32(N) - 24) 8006437f1f7SPeter Maydell #define DO_CLS_H(N) (clrsb32(N) - 16) 8016437f1f7SPeter Maydell 8026437f1f7SPeter Maydell DO_1OP(vclsb, 1, int8_t, DO_CLS_B) 8036437f1f7SPeter Maydell DO_1OP(vclsh, 2, int16_t, DO_CLS_H) 8046437f1f7SPeter Maydell DO_1OP(vclsw, 4, int32_t, clrsb32) 8056437f1f7SPeter Maydell 8060f0f2bd5SPeter Maydell #define DO_CLZ_B(N) (clz32(N) - 24) 8070f0f2bd5SPeter Maydell #define DO_CLZ_H(N) (clz32(N) - 16) 8080f0f2bd5SPeter Maydell 8090f0f2bd5SPeter Maydell DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B) 8100f0f2bd5SPeter Maydell DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H) 8110f0f2bd5SPeter Maydell DO_1OP(vclzw, 4, uint32_t, clz32) 812249b5309SPeter Maydell 813249b5309SPeter Maydell DO_1OP(vrev16b, 2, uint16_t, bswap16) 814249b5309SPeter Maydell DO_1OP(vrev32b, 4, uint32_t, bswap32) 815249b5309SPeter Maydell DO_1OP(vrev32h, 4, uint32_t, hswap32) 816249b5309SPeter Maydell DO_1OP(vrev64b, 8, uint64_t, bswap64) 817249b5309SPeter Maydell DO_1OP(vrev64h, 8, uint64_t, hswap64) 818249b5309SPeter Maydell DO_1OP(vrev64w, 8, uint64_t, wswap64) 8198abd3c80SPeter Maydell 8208abd3c80SPeter Maydell #define DO_NOT(N) (~(N)) 8218abd3c80SPeter Maydell 8228abd3c80SPeter Maydell DO_1OP(vmvn, 8, uint64_t, DO_NOT) 82359c91773SPeter Maydell 82459c91773SPeter Maydell #define DO_ABS(N) ((N) < 0 ? -(N) : (N)) 82559c91773SPeter Maydell #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff)) 82659c91773SPeter Maydell #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff)) 82759c91773SPeter Maydell 82859c91773SPeter Maydell DO_1OP(vabsb, 1, int8_t, DO_ABS) 82959c91773SPeter Maydell DO_1OP(vabsh, 2, int16_t, DO_ABS) 83059c91773SPeter Maydell DO_1OP(vabsw, 4, int32_t, DO_ABS) 83159c91773SPeter Maydell 83259c91773SPeter Maydell /* We can do these 64 bits at a time */ 83359c91773SPeter Maydell DO_1OP(vfabsh, 8, uint64_t, DO_FABSH) 83459c91773SPeter Maydell DO_1OP(vfabss, 8, uint64_t, DO_FABSS) 835399a8c76SPeter Maydell 836399a8c76SPeter Maydell #define DO_NEG(N) (-(N)) 837399a8c76SPeter Maydell #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000)) 838399a8c76SPeter Maydell #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000)) 839399a8c76SPeter Maydell 840399a8c76SPeter Maydell DO_1OP(vnegb, 1, int8_t, DO_NEG) 841399a8c76SPeter Maydell DO_1OP(vnegh, 2, int16_t, DO_NEG) 842399a8c76SPeter Maydell DO_1OP(vnegw, 4, int32_t, DO_NEG) 843399a8c76SPeter Maydell 844399a8c76SPeter Maydell /* We can do these 64 bits at a time */ 845399a8c76SPeter Maydell DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH) 846399a8c76SPeter Maydell DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS) 84768245e44SPeter Maydell 848eab84139SPeter Maydell /* 849eab84139SPeter Maydell * 1 operand immediates: Vda is destination and possibly also one source. 850eab84139SPeter Maydell * All these insns work at 64-bit widths. 851eab84139SPeter Maydell */ 852eab84139SPeter Maydell #define DO_1OP_IMM(OP, FN) \ 853eab84139SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \ 854eab84139SPeter Maydell { \ 855eab84139SPeter Maydell uint64_t *da = vda; \ 856eab84139SPeter Maydell uint16_t mask = mve_element_mask(env); \ 857eab84139SPeter Maydell unsigned e; \ 858eab84139SPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 859eab84139SPeter Maydell mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \ 860eab84139SPeter Maydell } \ 861eab84139SPeter Maydell mve_advance_vpt(env); \ 862eab84139SPeter Maydell } 863eab84139SPeter Maydell 864eab84139SPeter Maydell #define DO_MOVI(N, I) (I) 865eab84139SPeter Maydell #define DO_ANDI(N, I) ((N) & (I)) 866eab84139SPeter Maydell #define DO_ORRI(N, I) ((N) | (I)) 867eab84139SPeter Maydell 868eab84139SPeter Maydell DO_1OP_IMM(vmovi, DO_MOVI) 869eab84139SPeter Maydell DO_1OP_IMM(vandi, DO_ANDI) 870eab84139SPeter Maydell DO_1OP_IMM(vorri, DO_ORRI) 871eab84139SPeter Maydell 87268245e44SPeter Maydell #define DO_2OP(OP, ESIZE, TYPE, FN) \ 87368245e44SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 87468245e44SPeter Maydell void *vd, void *vn, void *vm) \ 87568245e44SPeter Maydell { \ 87668245e44SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 87768245e44SPeter Maydell uint16_t mask = mve_element_mask(env); \ 87868245e44SPeter Maydell unsigned e; \ 87968245e44SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 88068245e44SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 88168245e44SPeter Maydell FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \ 88268245e44SPeter Maydell } \ 88368245e44SPeter Maydell mve_advance_vpt(env); \ 88468245e44SPeter Maydell } 88568245e44SPeter Maydell 8869333fe4dSPeter Maydell /* provide unsigned 2-op helpers for all sizes */ 8879333fe4dSPeter Maydell #define DO_2OP_U(OP, FN) \ 8889333fe4dSPeter Maydell DO_2OP(OP##b, 1, uint8_t, FN) \ 8899333fe4dSPeter Maydell DO_2OP(OP##h, 2, uint16_t, FN) \ 8909333fe4dSPeter Maydell DO_2OP(OP##w, 4, uint32_t, FN) 8919333fe4dSPeter Maydell 892cd367ff3SPeter Maydell /* provide signed 2-op helpers for all sizes */ 893cd367ff3SPeter Maydell #define DO_2OP_S(OP, FN) \ 894cd367ff3SPeter Maydell DO_2OP(OP##b, 1, int8_t, FN) \ 895cd367ff3SPeter Maydell DO_2OP(OP##h, 2, int16_t, FN) \ 896cd367ff3SPeter Maydell DO_2OP(OP##w, 4, int32_t, FN) 897cd367ff3SPeter Maydell 898ac6ad1dcSPeter Maydell /* 899ac6ad1dcSPeter Maydell * "Long" operations where two half-sized inputs (taken from either the 900ac6ad1dcSPeter Maydell * top or the bottom of the input vector) produce a double-width result. 901ac6ad1dcSPeter Maydell * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output. 902ac6ad1dcSPeter Maydell */ 903ac6ad1dcSPeter Maydell #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 904ac6ad1dcSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 905ac6ad1dcSPeter Maydell { \ 906ac6ad1dcSPeter Maydell LTYPE *d = vd; \ 907ac6ad1dcSPeter Maydell TYPE *n = vn, *m = vm; \ 908ac6ad1dcSPeter Maydell uint16_t mask = mve_element_mask(env); \ 909ac6ad1dcSPeter Maydell unsigned le; \ 910ac6ad1dcSPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 911ac6ad1dcSPeter Maydell LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \ 912ac6ad1dcSPeter Maydell m[H##ESIZE(le * 2 + TOP)]); \ 913ac6ad1dcSPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \ 914ac6ad1dcSPeter Maydell } \ 915ac6ad1dcSPeter Maydell mve_advance_vpt(env); \ 916ac6ad1dcSPeter Maydell } 917ac6ad1dcSPeter Maydell 918380caf6cSPeter Maydell #define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \ 919380caf6cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 920380caf6cSPeter Maydell { \ 921380caf6cSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 922380caf6cSPeter Maydell uint16_t mask = mve_element_mask(env); \ 923380caf6cSPeter Maydell unsigned e; \ 924380caf6cSPeter Maydell bool qc = false; \ 925380caf6cSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 926380caf6cSPeter Maydell bool sat = false; \ 927380caf6cSPeter Maydell TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \ 928380caf6cSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 929380caf6cSPeter Maydell qc |= sat & mask & 1; \ 930380caf6cSPeter Maydell } \ 931380caf6cSPeter Maydell if (qc) { \ 932380caf6cSPeter Maydell env->vfp.qc[0] = qc; \ 933380caf6cSPeter Maydell } \ 934380caf6cSPeter Maydell mve_advance_vpt(env); \ 935380caf6cSPeter Maydell } 936380caf6cSPeter Maydell 937483da661SPeter Maydell /* provide unsigned 2-op helpers for all sizes */ 938483da661SPeter Maydell #define DO_2OP_SAT_U(OP, FN) \ 939483da661SPeter Maydell DO_2OP_SAT(OP##b, 1, uint8_t, FN) \ 940483da661SPeter Maydell DO_2OP_SAT(OP##h, 2, uint16_t, FN) \ 941483da661SPeter Maydell DO_2OP_SAT(OP##w, 4, uint32_t, FN) 942483da661SPeter Maydell 943483da661SPeter Maydell /* provide signed 2-op helpers for all sizes */ 944483da661SPeter Maydell #define DO_2OP_SAT_S(OP, FN) \ 945483da661SPeter Maydell DO_2OP_SAT(OP##b, 1, int8_t, FN) \ 946483da661SPeter Maydell DO_2OP_SAT(OP##h, 2, int16_t, FN) \ 947483da661SPeter Maydell DO_2OP_SAT(OP##w, 4, int32_t, FN) 948483da661SPeter Maydell 94968245e44SPeter Maydell #define DO_AND(N, M) ((N) & (M)) 95068245e44SPeter Maydell #define DO_BIC(N, M) ((N) & ~(M)) 95168245e44SPeter Maydell #define DO_ORR(N, M) ((N) | (M)) 95268245e44SPeter Maydell #define DO_ORN(N, M) ((N) | ~(M)) 95368245e44SPeter Maydell #define DO_EOR(N, M) ((N) ^ (M)) 95468245e44SPeter Maydell 95568245e44SPeter Maydell DO_2OP(vand, 8, uint64_t, DO_AND) 95668245e44SPeter Maydell DO_2OP(vbic, 8, uint64_t, DO_BIC) 95768245e44SPeter Maydell DO_2OP(vorr, 8, uint64_t, DO_ORR) 95868245e44SPeter Maydell DO_2OP(vorn, 8, uint64_t, DO_ORN) 95968245e44SPeter Maydell DO_2OP(veor, 8, uint64_t, DO_EOR) 9609333fe4dSPeter Maydell 9619333fe4dSPeter Maydell #define DO_ADD(N, M) ((N) + (M)) 9629333fe4dSPeter Maydell #define DO_SUB(N, M) ((N) - (M)) 9639333fe4dSPeter Maydell #define DO_MUL(N, M) ((N) * (M)) 9649333fe4dSPeter Maydell 9659333fe4dSPeter Maydell DO_2OP_U(vadd, DO_ADD) 9669333fe4dSPeter Maydell DO_2OP_U(vsub, DO_SUB) 9679333fe4dSPeter Maydell DO_2OP_U(vmul, DO_MUL) 968ba62cc56SPeter Maydell 969ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL) 970ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL) 971ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL) 972ac6ad1dcSPeter Maydell DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL) 973ac6ad1dcSPeter Maydell DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL) 974ac6ad1dcSPeter Maydell DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL) 975ac6ad1dcSPeter Maydell 976ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL) 977ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL) 978ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL) 979ac6ad1dcSPeter Maydell DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL) 980ac6ad1dcSPeter Maydell DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL) 981ac6ad1dcSPeter Maydell DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL) 982ac6ad1dcSPeter Maydell 983ba62cc56SPeter Maydell /* 984c1bd78cbSPeter Maydell * Polynomial multiply. We can always do this generating 64 bits 985c1bd78cbSPeter Maydell * of the result at a time, so we don't need to use DO_2OP_L. 986c1bd78cbSPeter Maydell */ 987c1bd78cbSPeter Maydell #define VMULLPH_MASK 0x00ff00ff00ff00ffULL 988c1bd78cbSPeter Maydell #define VMULLPW_MASK 0x0000ffff0000ffffULL 989c1bd78cbSPeter Maydell #define DO_VMULLPBH(N, M) pmull_h((N) & VMULLPH_MASK, (M) & VMULLPH_MASK) 990c1bd78cbSPeter Maydell #define DO_VMULLPTH(N, M) DO_VMULLPBH((N) >> 8, (M) >> 8) 991c1bd78cbSPeter Maydell #define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK) 992c1bd78cbSPeter Maydell #define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16) 993c1bd78cbSPeter Maydell 994c1bd78cbSPeter Maydell DO_2OP(vmullpbh, 8, uint64_t, DO_VMULLPBH) 995c1bd78cbSPeter Maydell DO_2OP(vmullpth, 8, uint64_t, DO_VMULLPTH) 996c1bd78cbSPeter Maydell DO_2OP(vmullpbw, 8, uint64_t, DO_VMULLPBW) 997c1bd78cbSPeter Maydell DO_2OP(vmullptw, 8, uint64_t, DO_VMULLPTW) 998c1bd78cbSPeter Maydell 999c1bd78cbSPeter Maydell /* 1000ba62cc56SPeter Maydell * Because the computation type is at least twice as large as required, 1001ba62cc56SPeter Maydell * these work for both signed and unsigned source types. 1002ba62cc56SPeter Maydell */ 1003ba62cc56SPeter Maydell static inline uint8_t do_mulh_b(int32_t n, int32_t m) 1004ba62cc56SPeter Maydell { 1005ba62cc56SPeter Maydell return (n * m) >> 8; 1006ba62cc56SPeter Maydell } 1007ba62cc56SPeter Maydell 1008ba62cc56SPeter Maydell static inline uint16_t do_mulh_h(int32_t n, int32_t m) 1009ba62cc56SPeter Maydell { 1010ba62cc56SPeter Maydell return (n * m) >> 16; 1011ba62cc56SPeter Maydell } 1012ba62cc56SPeter Maydell 1013ba62cc56SPeter Maydell static inline uint32_t do_mulh_w(int64_t n, int64_t m) 1014ba62cc56SPeter Maydell { 1015ba62cc56SPeter Maydell return (n * m) >> 32; 1016ba62cc56SPeter Maydell } 1017ba62cc56SPeter Maydell 1018fca87b78SPeter Maydell static inline uint8_t do_rmulh_b(int32_t n, int32_t m) 1019fca87b78SPeter Maydell { 1020fca87b78SPeter Maydell return (n * m + (1U << 7)) >> 8; 1021fca87b78SPeter Maydell } 1022fca87b78SPeter Maydell 1023fca87b78SPeter Maydell static inline uint16_t do_rmulh_h(int32_t n, int32_t m) 1024fca87b78SPeter Maydell { 1025fca87b78SPeter Maydell return (n * m + (1U << 15)) >> 16; 1026fca87b78SPeter Maydell } 1027fca87b78SPeter Maydell 1028fca87b78SPeter Maydell static inline uint32_t do_rmulh_w(int64_t n, int64_t m) 1029fca87b78SPeter Maydell { 1030fca87b78SPeter Maydell return (n * m + (1U << 31)) >> 32; 1031fca87b78SPeter Maydell } 1032fca87b78SPeter Maydell 1033ba62cc56SPeter Maydell DO_2OP(vmulhsb, 1, int8_t, do_mulh_b) 1034ba62cc56SPeter Maydell DO_2OP(vmulhsh, 2, int16_t, do_mulh_h) 1035ba62cc56SPeter Maydell DO_2OP(vmulhsw, 4, int32_t, do_mulh_w) 1036ba62cc56SPeter Maydell DO_2OP(vmulhub, 1, uint8_t, do_mulh_b) 1037ba62cc56SPeter Maydell DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h) 1038ba62cc56SPeter Maydell DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w) 1039fca87b78SPeter Maydell 1040fca87b78SPeter Maydell DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b) 1041fca87b78SPeter Maydell DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h) 1042fca87b78SPeter Maydell DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w) 1043fca87b78SPeter Maydell DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b) 1044fca87b78SPeter Maydell DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h) 1045fca87b78SPeter Maydell DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w) 1046cd367ff3SPeter Maydell 1047cd367ff3SPeter Maydell #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) 1048cd367ff3SPeter Maydell #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) 1049cd367ff3SPeter Maydell 1050cd367ff3SPeter Maydell DO_2OP_S(vmaxs, DO_MAX) 1051cd367ff3SPeter Maydell DO_2OP_U(vmaxu, DO_MAX) 1052cd367ff3SPeter Maydell DO_2OP_S(vmins, DO_MIN) 1053cd367ff3SPeter Maydell DO_2OP_U(vminu, DO_MIN) 1054bc67aa8dSPeter Maydell 1055bc67aa8dSPeter Maydell #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N)) 1056bc67aa8dSPeter Maydell 1057bc67aa8dSPeter Maydell DO_2OP_S(vabds, DO_ABD) 1058bc67aa8dSPeter Maydell DO_2OP_U(vabdu, DO_ABD) 1059abc48e31SPeter Maydell 1060abc48e31SPeter Maydell static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m) 1061abc48e31SPeter Maydell { 1062abc48e31SPeter Maydell return ((uint64_t)n + m) >> 1; 1063abc48e31SPeter Maydell } 1064abc48e31SPeter Maydell 1065abc48e31SPeter Maydell static inline int32_t do_vhadd_s(int32_t n, int32_t m) 1066abc48e31SPeter Maydell { 1067abc48e31SPeter Maydell return ((int64_t)n + m) >> 1; 1068abc48e31SPeter Maydell } 1069abc48e31SPeter Maydell 1070abc48e31SPeter Maydell static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m) 1071abc48e31SPeter Maydell { 1072abc48e31SPeter Maydell return ((uint64_t)n - m) >> 1; 1073abc48e31SPeter Maydell } 1074abc48e31SPeter Maydell 1075abc48e31SPeter Maydell static inline int32_t do_vhsub_s(int32_t n, int32_t m) 1076abc48e31SPeter Maydell { 1077abc48e31SPeter Maydell return ((int64_t)n - m) >> 1; 1078abc48e31SPeter Maydell } 1079abc48e31SPeter Maydell 1080abc48e31SPeter Maydell DO_2OP_S(vhadds, do_vhadd_s) 1081abc48e31SPeter Maydell DO_2OP_U(vhaddu, do_vhadd_u) 1082abc48e31SPeter Maydell DO_2OP_S(vhsubs, do_vhsub_s) 1083abc48e31SPeter Maydell DO_2OP_U(vhsubu, do_vhsub_u) 10841d2386f7SPeter Maydell 10850372cad8SPeter Maydell #define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 10860372cad8SPeter Maydell #define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 1087bb002345SPeter Maydell #define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 1088bb002345SPeter Maydell #define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 10890372cad8SPeter Maydell 10900372cad8SPeter Maydell DO_2OP_S(vshls, DO_VSHLS) 10910372cad8SPeter Maydell DO_2OP_U(vshlu, DO_VSHLU) 1092bb002345SPeter Maydell DO_2OP_S(vrshls, DO_VRSHLS) 1093bb002345SPeter Maydell DO_2OP_U(vrshlu, DO_VRSHLU) 10940372cad8SPeter Maydell 10951eb987a8SPeter Maydell #define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1) 10961eb987a8SPeter Maydell #define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1) 10971eb987a8SPeter Maydell 10981eb987a8SPeter Maydell DO_2OP_S(vrhadds, DO_RHADD_S) 10991eb987a8SPeter Maydell DO_2OP_U(vrhaddu, DO_RHADD_U) 11001eb987a8SPeter Maydell 110189bc4c4fSPeter Maydell static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m, 110289bc4c4fSPeter Maydell uint32_t inv, uint32_t carry_in, bool update_flags) 110389bc4c4fSPeter Maydell { 110489bc4c4fSPeter Maydell uint16_t mask = mve_element_mask(env); 110589bc4c4fSPeter Maydell unsigned e; 110689bc4c4fSPeter Maydell 110789bc4c4fSPeter Maydell /* If any additions trigger, we will update flags. */ 110889bc4c4fSPeter Maydell if (mask & 0x1111) { 110989bc4c4fSPeter Maydell update_flags = true; 111089bc4c4fSPeter Maydell } 111189bc4c4fSPeter Maydell 111289bc4c4fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 111389bc4c4fSPeter Maydell uint64_t r = carry_in; 111489bc4c4fSPeter Maydell r += n[H4(e)]; 111589bc4c4fSPeter Maydell r += m[H4(e)] ^ inv; 111689bc4c4fSPeter Maydell if (mask & 1) { 111789bc4c4fSPeter Maydell carry_in = r >> 32; 111889bc4c4fSPeter Maydell } 111989bc4c4fSPeter Maydell mergemask(&d[H4(e)], r, mask); 112089bc4c4fSPeter Maydell } 112189bc4c4fSPeter Maydell 112289bc4c4fSPeter Maydell if (update_flags) { 112389bc4c4fSPeter Maydell /* Store C, clear NZV. */ 112489bc4c4fSPeter Maydell env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK; 112589bc4c4fSPeter Maydell env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C; 112689bc4c4fSPeter Maydell } 112789bc4c4fSPeter Maydell mve_advance_vpt(env); 112889bc4c4fSPeter Maydell } 112989bc4c4fSPeter Maydell 113089bc4c4fSPeter Maydell void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm) 113189bc4c4fSPeter Maydell { 113289bc4c4fSPeter Maydell bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 113389bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, 0, carry_in, false); 113489bc4c4fSPeter Maydell } 113589bc4c4fSPeter Maydell 113689bc4c4fSPeter Maydell void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm) 113789bc4c4fSPeter Maydell { 113889bc4c4fSPeter Maydell bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 113989bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, -1, carry_in, false); 114089bc4c4fSPeter Maydell } 114189bc4c4fSPeter Maydell 114289bc4c4fSPeter Maydell 114389bc4c4fSPeter Maydell void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm) 114489bc4c4fSPeter Maydell { 114589bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, 0, 0, true); 114689bc4c4fSPeter Maydell } 114789bc4c4fSPeter Maydell 114889bc4c4fSPeter Maydell void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm) 114989bc4c4fSPeter Maydell { 115089bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, -1, 1, true); 115189bc4c4fSPeter Maydell } 115289bc4c4fSPeter Maydell 115367ec113bSPeter Maydell #define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \ 115467ec113bSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 115567ec113bSPeter Maydell { \ 115667ec113bSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 115767ec113bSPeter Maydell uint16_t mask = mve_element_mask(env); \ 115867ec113bSPeter Maydell unsigned e; \ 115967ec113bSPeter Maydell TYPE r[16 / ESIZE]; \ 116067ec113bSPeter Maydell /* Calculate all results first to avoid overwriting inputs */ \ 116167ec113bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \ 116267ec113bSPeter Maydell if (!(e & 1)) { \ 116367ec113bSPeter Maydell r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \ 116467ec113bSPeter Maydell } else { \ 116567ec113bSPeter Maydell r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \ 116667ec113bSPeter Maydell } \ 116767ec113bSPeter Maydell } \ 116867ec113bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 116967ec113bSPeter Maydell mergemask(&d[H##ESIZE(e)], r[e], mask); \ 117067ec113bSPeter Maydell } \ 117167ec113bSPeter Maydell mve_advance_vpt(env); \ 117267ec113bSPeter Maydell } 117367ec113bSPeter Maydell 117467ec113bSPeter Maydell #define DO_VCADD_ALL(OP, FN0, FN1) \ 117567ec113bSPeter Maydell DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \ 117667ec113bSPeter Maydell DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \ 117767ec113bSPeter Maydell DO_VCADD(OP##w, 4, int32_t, FN0, FN1) 117867ec113bSPeter Maydell 117967ec113bSPeter Maydell DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD) 118067ec113bSPeter Maydell DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB) 11818625693aSPeter Maydell DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s) 11828625693aSPeter Maydell DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s) 118367ec113bSPeter Maydell 118439f2ec85SPeter Maydell static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s) 118539f2ec85SPeter Maydell { 118639f2ec85SPeter Maydell if (val > max) { 118739f2ec85SPeter Maydell *s = true; 118839f2ec85SPeter Maydell return max; 118939f2ec85SPeter Maydell } else if (val < min) { 119039f2ec85SPeter Maydell *s = true; 119139f2ec85SPeter Maydell return min; 119239f2ec85SPeter Maydell } 119339f2ec85SPeter Maydell return val; 119439f2ec85SPeter Maydell } 119539f2ec85SPeter Maydell 119639f2ec85SPeter Maydell #define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s) 119739f2ec85SPeter Maydell #define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s) 119839f2ec85SPeter Maydell #define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s) 119939f2ec85SPeter Maydell 120039f2ec85SPeter Maydell #define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s) 120139f2ec85SPeter Maydell #define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s) 120239f2ec85SPeter Maydell #define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s) 120339f2ec85SPeter Maydell 120439f2ec85SPeter Maydell #define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s) 120539f2ec85SPeter Maydell #define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s) 120639f2ec85SPeter Maydell #define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s) 120739f2ec85SPeter Maydell 120839f2ec85SPeter Maydell #define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s) 120939f2ec85SPeter Maydell #define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s) 121039f2ec85SPeter Maydell #define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s) 12111d2386f7SPeter Maydell 121266c05767SPeter Maydell /* 121366c05767SPeter Maydell * For QDMULH and QRDMULH we simplify "double and shift by esize" into 121466c05767SPeter Maydell * "shift by esize-1", adjusting the QRDMULH rounding constant to match. 121566c05767SPeter Maydell */ 121666c05767SPeter Maydell #define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \ 121766c05767SPeter Maydell INT8_MIN, INT8_MAX, s) 121866c05767SPeter Maydell #define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \ 121966c05767SPeter Maydell INT16_MIN, INT16_MAX, s) 122066c05767SPeter Maydell #define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \ 122166c05767SPeter Maydell INT32_MIN, INT32_MAX, s) 122266c05767SPeter Maydell 122366c05767SPeter Maydell #define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \ 122466c05767SPeter Maydell INT8_MIN, INT8_MAX, s) 122566c05767SPeter Maydell #define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \ 122666c05767SPeter Maydell INT16_MIN, INT16_MAX, s) 122766c05767SPeter Maydell #define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \ 122866c05767SPeter Maydell INT32_MIN, INT32_MAX, s) 122966c05767SPeter Maydell 1230380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B) 1231380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H) 1232380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W) 1233380caf6cSPeter Maydell 1234380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B) 1235380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H) 1236380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W) 1237380caf6cSPeter Maydell 1238f741707bSPeter Maydell DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B) 1239f741707bSPeter Maydell DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H) 1240f741707bSPeter Maydell DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W) 1241f741707bSPeter Maydell DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B) 1242f741707bSPeter Maydell DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H) 1243f741707bSPeter Maydell DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W) 1244f741707bSPeter Maydell 1245f741707bSPeter Maydell DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B) 1246f741707bSPeter Maydell DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H) 1247f741707bSPeter Maydell DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W) 1248f741707bSPeter Maydell DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B) 1249f741707bSPeter Maydell DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H) 1250f741707bSPeter Maydell DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W) 1251f741707bSPeter Maydell 1252483da661SPeter Maydell /* 1253483da661SPeter Maydell * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs() 1254483da661SPeter Maydell * and friends wanting a uint32_t* sat and our needing a bool*. 1255483da661SPeter Maydell */ 1256483da661SPeter Maydell #define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \ 1257483da661SPeter Maydell ({ \ 1258483da661SPeter Maydell uint32_t su32 = 0; \ 1259483da661SPeter Maydell typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \ 1260483da661SPeter Maydell if (su32) { \ 1261483da661SPeter Maydell *satp = true; \ 1262483da661SPeter Maydell } \ 1263483da661SPeter Maydell r; \ 1264483da661SPeter Maydell }) 1265483da661SPeter Maydell 1266483da661SPeter Maydell #define DO_SQSHL_OP(N, M, satp) \ 1267483da661SPeter Maydell WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp) 1268483da661SPeter Maydell #define DO_UQSHL_OP(N, M, satp) \ 1269483da661SPeter Maydell WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp) 12709dc868c4SPeter Maydell #define DO_SQRSHL_OP(N, M, satp) \ 12719dc868c4SPeter Maydell WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp) 12729dc868c4SPeter Maydell #define DO_UQRSHL_OP(N, M, satp) \ 12739dc868c4SPeter Maydell WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp) 1274f9ed6174SPeter Maydell #define DO_SUQSHL_OP(N, M, satp) \ 1275f9ed6174SPeter Maydell WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp) 1276483da661SPeter Maydell 1277483da661SPeter Maydell DO_2OP_SAT_S(vqshls, DO_SQSHL_OP) 1278483da661SPeter Maydell DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP) 12799dc868c4SPeter Maydell DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP) 12809dc868c4SPeter Maydell DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP) 1281483da661SPeter Maydell 1282fd677f80SPeter Maydell /* 1283fd677f80SPeter Maydell * Multiply add dual returning high half 1284fd677f80SPeter Maydell * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of 1285fd677f80SPeter Maydell * whether to add the rounding constant, and the pointer to the 1286fd677f80SPeter Maydell * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant", 1287fd677f80SPeter Maydell * saturate to twice the input size and return the high half; or 1288fd677f80SPeter Maydell * (A * B - C * D) etc for VQDMLSDH. 1289fd677f80SPeter Maydell */ 1290fd677f80SPeter Maydell #define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \ 1291fd677f80SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1292fd677f80SPeter Maydell void *vm) \ 1293fd677f80SPeter Maydell { \ 1294fd677f80SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 1295fd677f80SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1296fd677f80SPeter Maydell unsigned e; \ 1297fd677f80SPeter Maydell bool qc = false; \ 1298fd677f80SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1299fd677f80SPeter Maydell bool sat = false; \ 1300fd677f80SPeter Maydell if ((e & 1) == XCHG) { \ 1301fd677f80SPeter Maydell TYPE r = FN(n[H##ESIZE(e)], \ 1302fd677f80SPeter Maydell m[H##ESIZE(e - XCHG)], \ 1303fd677f80SPeter Maydell n[H##ESIZE(e + (1 - 2 * XCHG))], \ 1304fd677f80SPeter Maydell m[H##ESIZE(e + (1 - XCHG))], \ 1305fd677f80SPeter Maydell ROUND, &sat); \ 1306fd677f80SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 1307fd677f80SPeter Maydell qc |= sat & mask & 1; \ 1308fd677f80SPeter Maydell } \ 1309fd677f80SPeter Maydell } \ 1310fd677f80SPeter Maydell if (qc) { \ 1311fd677f80SPeter Maydell env->vfp.qc[0] = qc; \ 1312fd677f80SPeter Maydell } \ 1313fd677f80SPeter Maydell mve_advance_vpt(env); \ 1314fd677f80SPeter Maydell } 1315fd677f80SPeter Maydell 1316fd677f80SPeter Maydell static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d, 1317fd677f80SPeter Maydell int round, bool *sat) 1318fd677f80SPeter Maydell { 1319fd677f80SPeter Maydell int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7); 1320fd677f80SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 1321fd677f80SPeter Maydell } 1322fd677f80SPeter Maydell 1323fd677f80SPeter Maydell static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d, 1324fd677f80SPeter Maydell int round, bool *sat) 1325fd677f80SPeter Maydell { 1326fd677f80SPeter Maydell int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15); 1327fd677f80SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 1328fd677f80SPeter Maydell } 1329fd677f80SPeter Maydell 1330fd677f80SPeter Maydell static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d, 1331fd677f80SPeter Maydell int round, bool *sat) 1332fd677f80SPeter Maydell { 1333fd677f80SPeter Maydell int64_t m1 = (int64_t)a * b; 1334fd677f80SPeter Maydell int64_t m2 = (int64_t)c * d; 1335fd677f80SPeter Maydell int64_t r; 1336fd677f80SPeter Maydell /* 1337fd677f80SPeter Maydell * Architecturally we should do the entire add, double, round 1338fd677f80SPeter Maydell * and then check for saturation. We do three saturating adds, 1339fd677f80SPeter Maydell * but we need to be careful about the order. If the first 1340fd677f80SPeter Maydell * m1 + m2 saturates then it's impossible for the *2+rc to 1341fd677f80SPeter Maydell * bring it back into the non-saturated range. However, if 1342fd677f80SPeter Maydell * m1 + m2 is negative then it's possible that doing the doubling 1343fd677f80SPeter Maydell * would take the intermediate result below INT64_MAX and the 1344fd677f80SPeter Maydell * addition of the rounding constant then brings it back in range. 1345fd677f80SPeter Maydell * So we add half the rounding constant before doubling rather 1346fd677f80SPeter Maydell * than adding the rounding constant after the doubling. 1347fd677f80SPeter Maydell */ 1348fd677f80SPeter Maydell if (sadd64_overflow(m1, m2, &r) || 1349fd677f80SPeter Maydell sadd64_overflow(r, (round << 30), &r) || 1350fd677f80SPeter Maydell sadd64_overflow(r, r, &r)) { 1351fd677f80SPeter Maydell *sat = true; 1352fd677f80SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN; 1353fd677f80SPeter Maydell } 1354fd677f80SPeter Maydell return r >> 32; 1355fd677f80SPeter Maydell } 1356fd677f80SPeter Maydell 135792f11732SPeter Maydell static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d, 135892f11732SPeter Maydell int round, bool *sat) 135992f11732SPeter Maydell { 136092f11732SPeter Maydell int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7); 136192f11732SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 136292f11732SPeter Maydell } 136392f11732SPeter Maydell 136492f11732SPeter Maydell static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d, 136592f11732SPeter Maydell int round, bool *sat) 136692f11732SPeter Maydell { 136792f11732SPeter Maydell int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15); 136892f11732SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 136992f11732SPeter Maydell } 137092f11732SPeter Maydell 137192f11732SPeter Maydell static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d, 137292f11732SPeter Maydell int round, bool *sat) 137392f11732SPeter Maydell { 137492f11732SPeter Maydell int64_t m1 = (int64_t)a * b; 137592f11732SPeter Maydell int64_t m2 = (int64_t)c * d; 137692f11732SPeter Maydell int64_t r; 137792f11732SPeter Maydell /* The same ordering issue as in do_vqdmladh_w applies here too */ 137892f11732SPeter Maydell if (ssub64_overflow(m1, m2, &r) || 137992f11732SPeter Maydell sadd64_overflow(r, (round << 30), &r) || 138092f11732SPeter Maydell sadd64_overflow(r, r, &r)) { 138192f11732SPeter Maydell *sat = true; 138292f11732SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN; 138392f11732SPeter Maydell } 138492f11732SPeter Maydell return r >> 32; 138592f11732SPeter Maydell } 138692f11732SPeter Maydell 1387fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b) 1388fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h) 1389fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w) 1390fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b) 1391fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h) 1392fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w) 1393fd677f80SPeter Maydell 1394fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b) 1395fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h) 1396fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w) 1397fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b) 1398fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h) 1399fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w) 1400fd677f80SPeter Maydell 140192f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b) 140292f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h) 140392f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w) 140492f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b) 140592f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h) 140692f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w) 140792f11732SPeter Maydell 140892f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b) 140992f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h) 141092f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w) 141192f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b) 141292f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h) 141392f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w) 141492f11732SPeter Maydell 1415e51896b3SPeter Maydell #define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \ 1416e51896b3SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1417e51896b3SPeter Maydell uint32_t rm) \ 1418e51896b3SPeter Maydell { \ 1419e51896b3SPeter Maydell TYPE *d = vd, *n = vn; \ 1420e51896b3SPeter Maydell TYPE m = rm; \ 1421e51896b3SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1422e51896b3SPeter Maydell unsigned e; \ 1423e51896b3SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1424e51896b3SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \ 1425e51896b3SPeter Maydell } \ 1426e51896b3SPeter Maydell mve_advance_vpt(env); \ 1427e51896b3SPeter Maydell } 1428e51896b3SPeter Maydell 142939f2ec85SPeter Maydell #define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \ 143039f2ec85SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 143139f2ec85SPeter Maydell uint32_t rm) \ 143239f2ec85SPeter Maydell { \ 143339f2ec85SPeter Maydell TYPE *d = vd, *n = vn; \ 143439f2ec85SPeter Maydell TYPE m = rm; \ 143539f2ec85SPeter Maydell uint16_t mask = mve_element_mask(env); \ 143639f2ec85SPeter Maydell unsigned e; \ 143739f2ec85SPeter Maydell bool qc = false; \ 143839f2ec85SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 143939f2ec85SPeter Maydell bool sat = false; \ 144039f2ec85SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \ 144139f2ec85SPeter Maydell mask); \ 144239f2ec85SPeter Maydell qc |= sat & mask & 1; \ 144339f2ec85SPeter Maydell } \ 144439f2ec85SPeter Maydell if (qc) { \ 144539f2ec85SPeter Maydell env->vfp.qc[0] = qc; \ 144639f2ec85SPeter Maydell } \ 144739f2ec85SPeter Maydell mve_advance_vpt(env); \ 144839f2ec85SPeter Maydell } 144939f2ec85SPeter Maydell 14506b895bf8SPeter Maydell /* "accumulating" version where FN takes d as well as n and m */ 14516b895bf8SPeter Maydell #define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 14526b895bf8SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 14536b895bf8SPeter Maydell uint32_t rm) \ 14546b895bf8SPeter Maydell { \ 14556b895bf8SPeter Maydell TYPE *d = vd, *n = vn; \ 14566b895bf8SPeter Maydell TYPE m = rm; \ 14576b895bf8SPeter Maydell uint16_t mask = mve_element_mask(env); \ 14586b895bf8SPeter Maydell unsigned e; \ 14596b895bf8SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 14606b895bf8SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 14616b895bf8SPeter Maydell FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \ 14626b895bf8SPeter Maydell } \ 14636b895bf8SPeter Maydell mve_advance_vpt(env); \ 14646b895bf8SPeter Maydell } 14656b895bf8SPeter Maydell 14668be9a250SPeter Maydell #define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 14678be9a250SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 14688be9a250SPeter Maydell uint32_t rm) \ 14698be9a250SPeter Maydell { \ 14708be9a250SPeter Maydell TYPE *d = vd, *n = vn; \ 14718be9a250SPeter Maydell TYPE m = rm; \ 14728be9a250SPeter Maydell uint16_t mask = mve_element_mask(env); \ 14738be9a250SPeter Maydell unsigned e; \ 14748be9a250SPeter Maydell bool qc = false; \ 14758be9a250SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 14768be9a250SPeter Maydell bool sat = false; \ 14778be9a250SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 14788be9a250SPeter Maydell FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \ 14798be9a250SPeter Maydell mask); \ 14808be9a250SPeter Maydell qc |= sat & mask & 1; \ 14818be9a250SPeter Maydell } \ 14828be9a250SPeter Maydell if (qc) { \ 14838be9a250SPeter Maydell env->vfp.qc[0] = qc; \ 14848be9a250SPeter Maydell } \ 14858be9a250SPeter Maydell mve_advance_vpt(env); \ 14868be9a250SPeter Maydell } 14878be9a250SPeter Maydell 1488e51896b3SPeter Maydell /* provide unsigned 2-op scalar helpers for all sizes */ 1489e51896b3SPeter Maydell #define DO_2OP_SCALAR_U(OP, FN) \ 1490e51896b3SPeter Maydell DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \ 1491e51896b3SPeter Maydell DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \ 1492e51896b3SPeter Maydell DO_2OP_SCALAR(OP##w, 4, uint32_t, FN) 1493644f717cSPeter Maydell #define DO_2OP_SCALAR_S(OP, FN) \ 1494644f717cSPeter Maydell DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \ 1495644f717cSPeter Maydell DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \ 1496644f717cSPeter Maydell DO_2OP_SCALAR(OP##w, 4, int32_t, FN) 1497e51896b3SPeter Maydell 14986b895bf8SPeter Maydell #define DO_2OP_ACC_SCALAR_U(OP, FN) \ 14996b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \ 15006b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \ 15016b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN) 15026b895bf8SPeter Maydell 1503e51896b3SPeter Maydell DO_2OP_SCALAR_U(vadd_scalar, DO_ADD) 150491a358fdSPeter Maydell DO_2OP_SCALAR_U(vsub_scalar, DO_SUB) 150591a358fdSPeter Maydell DO_2OP_SCALAR_U(vmul_scalar, DO_MUL) 1506644f717cSPeter Maydell DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s) 1507644f717cSPeter Maydell DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u) 1508644f717cSPeter Maydell DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s) 1509644f717cSPeter Maydell DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u) 1510e51896b3SPeter Maydell 151139f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B) 151239f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H) 151339f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W) 151439f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B) 151539f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H) 151639f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W) 151739f2ec85SPeter Maydell 151839f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B) 151939f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H) 152039f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W) 152139f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B) 152239f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H) 152339f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W) 152439f2ec85SPeter Maydell 152566c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B) 152666c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H) 152766c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W) 152866c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B) 152966c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H) 153066c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W) 153166c05767SPeter Maydell 15328be9a250SPeter Maydell static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat) 15338be9a250SPeter Maydell { 15348be9a250SPeter Maydell int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7); 15358be9a250SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 15368be9a250SPeter Maydell } 15378be9a250SPeter Maydell 15388be9a250SPeter Maydell static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c, 15398be9a250SPeter Maydell int round, bool *sat) 15408be9a250SPeter Maydell { 15418be9a250SPeter Maydell int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15); 15428be9a250SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 15438be9a250SPeter Maydell } 15448be9a250SPeter Maydell 15458be9a250SPeter Maydell static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c, 15468be9a250SPeter Maydell int round, bool *sat) 15478be9a250SPeter Maydell { 15488be9a250SPeter Maydell /* 15498be9a250SPeter Maydell * Architecturally we should do the entire add, double, round 15508be9a250SPeter Maydell * and then check for saturation. We do three saturating adds, 15518be9a250SPeter Maydell * but we need to be careful about the order. If the first 15528be9a250SPeter Maydell * m1 + m2 saturates then it's impossible for the *2+rc to 15538be9a250SPeter Maydell * bring it back into the non-saturated range. However, if 15548be9a250SPeter Maydell * m1 + m2 is negative then it's possible that doing the doubling 15558be9a250SPeter Maydell * would take the intermediate result below INT64_MAX and the 15568be9a250SPeter Maydell * addition of the rounding constant then brings it back in range. 15578be9a250SPeter Maydell * So we add half the rounding constant and half the "c << esize" 15588be9a250SPeter Maydell * before doubling rather than adding the rounding constant after 15598be9a250SPeter Maydell * the doubling. 15608be9a250SPeter Maydell */ 15618be9a250SPeter Maydell int64_t m1 = (int64_t)a * b; 15628be9a250SPeter Maydell int64_t m2 = (int64_t)c << 31; 15638be9a250SPeter Maydell int64_t r; 15648be9a250SPeter Maydell if (sadd64_overflow(m1, m2, &r) || 15658be9a250SPeter Maydell sadd64_overflow(r, (round << 30), &r) || 15668be9a250SPeter Maydell sadd64_overflow(r, r, &r)) { 15678be9a250SPeter Maydell *sat = true; 15688be9a250SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN; 15698be9a250SPeter Maydell } 15708be9a250SPeter Maydell return r >> 32; 15718be9a250SPeter Maydell } 15728be9a250SPeter Maydell 15738be9a250SPeter Maydell /* 15748be9a250SPeter Maydell * The *MLAH insns are vector * scalar + vector; 15758be9a250SPeter Maydell * the *MLASH insns are vector * vector + scalar 15768be9a250SPeter Maydell */ 15778be9a250SPeter Maydell #define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S) 15788be9a250SPeter Maydell #define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S) 15798be9a250SPeter Maydell #define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S) 15808be9a250SPeter Maydell #define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S) 15818be9a250SPeter Maydell #define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S) 15828be9a250SPeter Maydell #define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S) 15838be9a250SPeter Maydell 15848be9a250SPeter Maydell #define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S) 15858be9a250SPeter Maydell #define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S) 15868be9a250SPeter Maydell #define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S) 15878be9a250SPeter Maydell #define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S) 15888be9a250SPeter Maydell #define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S) 15898be9a250SPeter Maydell #define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S) 15908be9a250SPeter Maydell 15918be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B) 15928be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H) 15938be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W) 15948be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B) 15958be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H) 15968be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W) 15978be9a250SPeter Maydell 15988be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B) 15998be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H) 16008be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W) 16018be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B) 16028be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H) 16038be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W) 16048be9a250SPeter Maydell 1605c69e34c6SPeter Maydell /* Vector by scalar plus vector */ 1606c69e34c6SPeter Maydell #define DO_VMLA(D, N, M) ((N) * (M) + (D)) 1607c69e34c6SPeter Maydell 1608c69e34c6SPeter Maydell DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA) 1609c69e34c6SPeter Maydell 16106b895bf8SPeter Maydell /* Vector by vector plus scalar */ 16116b895bf8SPeter Maydell #define DO_VMLAS(D, N, M) ((N) * (D) + (M)) 16126b895bf8SPeter Maydell 16136b895bf8SPeter Maydell DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS) 16146b895bf8SPeter Maydell 1615a8890353SPeter Maydell /* 1616a8890353SPeter Maydell * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the 1617a8890353SPeter Maydell * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type. 1618a8890353SPeter Maydell * SATMASK specifies which bits of the predicate mask matter for determining 1619a8890353SPeter Maydell * whether to propagate a saturation indication into FPSCR.QC -- for 1620a8890353SPeter Maydell * the 16x16->32 case we must check only the bit corresponding to the T or B 1621a8890353SPeter Maydell * half that we used, but for the 32x32->64 case we propagate if the mask 1622a8890353SPeter Maydell * bit is set for either half. 1623a8890353SPeter Maydell */ 1624a8890353SPeter Maydell #define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 1625a8890353SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1626a8890353SPeter Maydell uint32_t rm) \ 1627a8890353SPeter Maydell { \ 1628a8890353SPeter Maydell LTYPE *d = vd; \ 1629a8890353SPeter Maydell TYPE *n = vn; \ 1630a8890353SPeter Maydell TYPE m = rm; \ 1631a8890353SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1632a8890353SPeter Maydell unsigned le; \ 1633a8890353SPeter Maydell bool qc = false; \ 1634a8890353SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1635a8890353SPeter Maydell bool sat = false; \ 1636a8890353SPeter Maydell LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \ 1637a8890353SPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \ 1638a8890353SPeter Maydell qc |= sat && (mask & SATMASK); \ 1639a8890353SPeter Maydell } \ 1640a8890353SPeter Maydell if (qc) { \ 1641a8890353SPeter Maydell env->vfp.qc[0] = qc; \ 1642a8890353SPeter Maydell } \ 1643a8890353SPeter Maydell mve_advance_vpt(env); \ 1644a8890353SPeter Maydell } 1645a8890353SPeter Maydell 1646a8890353SPeter Maydell static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat) 1647a8890353SPeter Maydell { 1648a8890353SPeter Maydell int64_t r = ((int64_t)n * m) * 2; 1649a8890353SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat); 1650a8890353SPeter Maydell } 1651a8890353SPeter Maydell 1652a8890353SPeter Maydell static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat) 1653a8890353SPeter Maydell { 1654a8890353SPeter Maydell /* The multiply can't overflow, but the doubling might */ 1655a8890353SPeter Maydell int64_t r = (int64_t)n * m; 1656a8890353SPeter Maydell if (r > INT64_MAX / 2) { 1657a8890353SPeter Maydell *sat = true; 1658a8890353SPeter Maydell return INT64_MAX; 1659a8890353SPeter Maydell } else if (r < INT64_MIN / 2) { 1660a8890353SPeter Maydell *sat = true; 1661a8890353SPeter Maydell return INT64_MIN; 1662a8890353SPeter Maydell } else { 1663a8890353SPeter Maydell return r * 2; 1664a8890353SPeter Maydell } 1665a8890353SPeter Maydell } 1666a8890353SPeter Maydell 1667a8890353SPeter Maydell #define SATMASK16B 1 1668a8890353SPeter Maydell #define SATMASK16T (1 << 2) 1669a8890353SPeter Maydell #define SATMASK32 ((1 << 4) | 1) 1670a8890353SPeter Maydell 1671a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \ 1672a8890353SPeter Maydell do_qdmullh, SATMASK16B) 1673a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \ 1674a8890353SPeter Maydell do_qdmullw, SATMASK32) 1675a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \ 1676a8890353SPeter Maydell do_qdmullh, SATMASK16T) 1677a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \ 1678a8890353SPeter Maydell do_qdmullw, SATMASK32) 1679a8890353SPeter Maydell 168043364321SPeter Maydell /* 168143364321SPeter Maydell * Long saturating ops 168243364321SPeter Maydell */ 168343364321SPeter Maydell #define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 168443364321SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 168543364321SPeter Maydell void *vm) \ 168643364321SPeter Maydell { \ 168743364321SPeter Maydell LTYPE *d = vd; \ 168843364321SPeter Maydell TYPE *n = vn, *m = vm; \ 168943364321SPeter Maydell uint16_t mask = mve_element_mask(env); \ 169043364321SPeter Maydell unsigned le; \ 169143364321SPeter Maydell bool qc = false; \ 169243364321SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 169343364321SPeter Maydell bool sat = false; \ 169443364321SPeter Maydell LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \ 169543364321SPeter Maydell LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \ 169643364321SPeter Maydell mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \ 169743364321SPeter Maydell qc |= sat && (mask & SATMASK); \ 169843364321SPeter Maydell } \ 169943364321SPeter Maydell if (qc) { \ 170043364321SPeter Maydell env->vfp.qc[0] = qc; \ 170143364321SPeter Maydell } \ 170243364321SPeter Maydell mve_advance_vpt(env); \ 170343364321SPeter Maydell } 170443364321SPeter Maydell 170543364321SPeter Maydell DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B) 170643364321SPeter Maydell DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 170743364321SPeter Maydell DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T) 170843364321SPeter Maydell DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 170943364321SPeter Maydell 1710b050543bSPeter Maydell static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m) 1711b050543bSPeter Maydell { 1712b050543bSPeter Maydell m &= 0xff; 1713b050543bSPeter Maydell if (m == 0) { 1714b050543bSPeter Maydell return 0; 1715b050543bSPeter Maydell } 1716b050543bSPeter Maydell n = revbit8(n); 1717b050543bSPeter Maydell if (m < 8) { 1718b050543bSPeter Maydell n >>= 8 - m; 1719b050543bSPeter Maydell } 1720b050543bSPeter Maydell return n; 1721b050543bSPeter Maydell } 1722b050543bSPeter Maydell 1723b050543bSPeter Maydell static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m) 1724b050543bSPeter Maydell { 1725b050543bSPeter Maydell m &= 0xff; 1726b050543bSPeter Maydell if (m == 0) { 1727b050543bSPeter Maydell return 0; 1728b050543bSPeter Maydell } 1729b050543bSPeter Maydell n = revbit16(n); 1730b050543bSPeter Maydell if (m < 16) { 1731b050543bSPeter Maydell n >>= 16 - m; 1732b050543bSPeter Maydell } 1733b050543bSPeter Maydell return n; 1734b050543bSPeter Maydell } 1735b050543bSPeter Maydell 1736b050543bSPeter Maydell static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m) 1737b050543bSPeter Maydell { 1738b050543bSPeter Maydell m &= 0xff; 1739b050543bSPeter Maydell if (m == 0) { 1740b050543bSPeter Maydell return 0; 1741b050543bSPeter Maydell } 1742b050543bSPeter Maydell n = revbit32(n); 1743b050543bSPeter Maydell if (m < 32) { 1744b050543bSPeter Maydell n >>= 32 - m; 1745b050543bSPeter Maydell } 1746b050543bSPeter Maydell return n; 1747b050543bSPeter Maydell } 1748b050543bSPeter Maydell 1749b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb) 1750b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh) 1751b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw) 1752b050543bSPeter Maydell 17531d2386f7SPeter Maydell /* 17541d2386f7SPeter Maydell * Multiply add long dual accumulate ops. 17551d2386f7SPeter Maydell */ 17561d2386f7SPeter Maydell #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 17571d2386f7SPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 17581d2386f7SPeter Maydell void *vm, uint64_t a) \ 17591d2386f7SPeter Maydell { \ 17601d2386f7SPeter Maydell uint16_t mask = mve_element_mask(env); \ 17611d2386f7SPeter Maydell unsigned e; \ 17621d2386f7SPeter Maydell TYPE *n = vn, *m = vm; \ 17631d2386f7SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 17641d2386f7SPeter Maydell if (mask & 1) { \ 17651d2386f7SPeter Maydell if (e & 1) { \ 17661d2386f7SPeter Maydell a ODDACC \ 17671d2386f7SPeter Maydell (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 17681d2386f7SPeter Maydell } else { \ 17691d2386f7SPeter Maydell a EVENACC \ 17701d2386f7SPeter Maydell (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 17711d2386f7SPeter Maydell } \ 17721d2386f7SPeter Maydell } \ 17731d2386f7SPeter Maydell } \ 17741d2386f7SPeter Maydell mve_advance_vpt(env); \ 17751d2386f7SPeter Maydell return a; \ 17761d2386f7SPeter Maydell } 17771d2386f7SPeter Maydell 17781d2386f7SPeter Maydell DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=) 17791d2386f7SPeter Maydell DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=) 17801d2386f7SPeter Maydell DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=) 17811d2386f7SPeter Maydell DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=) 17821d2386f7SPeter Maydell 17831d2386f7SPeter Maydell DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=) 17841d2386f7SPeter Maydell DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=) 1785181cd971SPeter Maydell 1786181cd971SPeter Maydell DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=) 1787181cd971SPeter Maydell DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=) 1788181cd971SPeter Maydell DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) 1789181cd971SPeter Maydell DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) 179038548747SPeter Maydell 179138548747SPeter Maydell /* 1792f0ffff51SPeter Maydell * Multiply add dual accumulate ops 1793f0ffff51SPeter Maydell */ 1794f0ffff51SPeter Maydell #define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 1795f0ffff51SPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1796f0ffff51SPeter Maydell void *vm, uint32_t a) \ 1797f0ffff51SPeter Maydell { \ 1798f0ffff51SPeter Maydell uint16_t mask = mve_element_mask(env); \ 1799f0ffff51SPeter Maydell unsigned e; \ 1800f0ffff51SPeter Maydell TYPE *n = vn, *m = vm; \ 1801f0ffff51SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1802f0ffff51SPeter Maydell if (mask & 1) { \ 1803f0ffff51SPeter Maydell if (e & 1) { \ 1804f0ffff51SPeter Maydell a ODDACC \ 1805f0ffff51SPeter Maydell n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 1806f0ffff51SPeter Maydell } else { \ 1807f0ffff51SPeter Maydell a EVENACC \ 1808f0ffff51SPeter Maydell n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 1809f0ffff51SPeter Maydell } \ 1810f0ffff51SPeter Maydell } \ 1811f0ffff51SPeter Maydell } \ 1812f0ffff51SPeter Maydell mve_advance_vpt(env); \ 1813f0ffff51SPeter Maydell return a; \ 1814f0ffff51SPeter Maydell } 1815f0ffff51SPeter Maydell 1816f0ffff51SPeter Maydell #define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \ 1817f0ffff51SPeter Maydell DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \ 1818f0ffff51SPeter Maydell DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \ 1819f0ffff51SPeter Maydell DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC) 1820f0ffff51SPeter Maydell 1821f0ffff51SPeter Maydell #define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \ 1822f0ffff51SPeter Maydell DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \ 1823f0ffff51SPeter Maydell DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \ 1824f0ffff51SPeter Maydell DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC) 1825f0ffff51SPeter Maydell 1826f0ffff51SPeter Maydell DO_DAV_S(vmladavs, false, +=, +=) 1827f0ffff51SPeter Maydell DO_DAV_U(vmladavu, false, +=, +=) 1828f0ffff51SPeter Maydell DO_DAV_S(vmlsdav, false, +=, -=) 1829f0ffff51SPeter Maydell DO_DAV_S(vmladavsx, true, +=, +=) 1830f0ffff51SPeter Maydell DO_DAV_S(vmlsdavx, true, +=, -=) 1831f0ffff51SPeter Maydell 1832f0ffff51SPeter Maydell /* 1833303db86fSPeter Maydell * Rounding multiply add long dual accumulate high. In the pseudocode 1834303db86fSPeter Maydell * this is implemented with a 72-bit internal accumulator value of which 1835303db86fSPeter Maydell * the top 64 bits are returned. We optimize this to avoid having to 1836303db86fSPeter Maydell * use 128-bit arithmetic -- we can do this because the 74-bit accumulator 1837303db86fSPeter Maydell * is squashed back into 64-bits after each beat. 183838548747SPeter Maydell */ 1839303db86fSPeter Maydell #define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \ 184038548747SPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 184138548747SPeter Maydell void *vm, uint64_t a) \ 184238548747SPeter Maydell { \ 184338548747SPeter Maydell uint16_t mask = mve_element_mask(env); \ 184438548747SPeter Maydell unsigned e; \ 184538548747SPeter Maydell TYPE *n = vn, *m = vm; \ 1846303db86fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 184738548747SPeter Maydell if (mask & 1) { \ 1848303db86fSPeter Maydell LTYPE mul; \ 184938548747SPeter Maydell if (e & 1) { \ 1850303db86fSPeter Maydell mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \ 1851303db86fSPeter Maydell if (SUB) { \ 1852303db86fSPeter Maydell mul = -mul; \ 185338548747SPeter Maydell } \ 1854303db86fSPeter Maydell } else { \ 1855303db86fSPeter Maydell mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \ 1856303db86fSPeter Maydell } \ 1857303db86fSPeter Maydell mul = (mul >> 8) + ((mul >> 7) & 1); \ 1858303db86fSPeter Maydell a += mul; \ 185938548747SPeter Maydell } \ 186038548747SPeter Maydell } \ 186138548747SPeter Maydell mve_advance_vpt(env); \ 1862303db86fSPeter Maydell return a; \ 186338548747SPeter Maydell } 186438548747SPeter Maydell 1865303db86fSPeter Maydell DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false) 1866303db86fSPeter Maydell DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false) 186738548747SPeter Maydell 1868303db86fSPeter Maydell DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false) 186938548747SPeter Maydell 1870303db86fSPeter Maydell DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true) 1871303db86fSPeter Maydell DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true) 18726f060a63SPeter Maydell 18736f060a63SPeter Maydell /* Vector add across vector */ 18746f060a63SPeter Maydell #define DO_VADDV(OP, ESIZE, TYPE) \ 18756f060a63SPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 18766f060a63SPeter Maydell uint32_t ra) \ 18776f060a63SPeter Maydell { \ 18786f060a63SPeter Maydell uint16_t mask = mve_element_mask(env); \ 18796f060a63SPeter Maydell unsigned e; \ 18806f060a63SPeter Maydell TYPE *m = vm; \ 18816f060a63SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 18826f060a63SPeter Maydell if (mask & 1) { \ 18836f060a63SPeter Maydell ra += m[H##ESIZE(e)]; \ 18846f060a63SPeter Maydell } \ 18856f060a63SPeter Maydell } \ 18866f060a63SPeter Maydell mve_advance_vpt(env); \ 18876f060a63SPeter Maydell return ra; \ 18886f060a63SPeter Maydell } \ 18896f060a63SPeter Maydell 1890ed5a59d6SPeter Maydell DO_VADDV(vaddvsb, 1, int8_t) 1891ed5a59d6SPeter Maydell DO_VADDV(vaddvsh, 2, int16_t) 1892ed5a59d6SPeter Maydell DO_VADDV(vaddvsw, 4, int32_t) 18936f060a63SPeter Maydell DO_VADDV(vaddvub, 1, uint8_t) 18946f060a63SPeter Maydell DO_VADDV(vaddvuh, 2, uint16_t) 18956f060a63SPeter Maydell DO_VADDV(vaddvuw, 4, uint32_t) 1896f9ed6174SPeter Maydell 1897688ba4cfSPeter Maydell /* 1898688ba4cfSPeter Maydell * Vector max/min across vector. Unlike VADDV, we must 1899688ba4cfSPeter Maydell * read ra as the element size, not its full width. 1900688ba4cfSPeter Maydell * We work with int64_t internally for simplicity. 1901688ba4cfSPeter Maydell */ 1902688ba4cfSPeter Maydell #define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \ 1903688ba4cfSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1904688ba4cfSPeter Maydell uint32_t ra_in) \ 1905688ba4cfSPeter Maydell { \ 1906688ba4cfSPeter Maydell uint16_t mask = mve_element_mask(env); \ 1907688ba4cfSPeter Maydell unsigned e; \ 1908688ba4cfSPeter Maydell TYPE *m = vm; \ 1909688ba4cfSPeter Maydell int64_t ra = (RATYPE)ra_in; \ 1910688ba4cfSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1911688ba4cfSPeter Maydell if (mask & 1) { \ 1912688ba4cfSPeter Maydell ra = FN(ra, m[H##ESIZE(e)]); \ 1913688ba4cfSPeter Maydell } \ 1914688ba4cfSPeter Maydell } \ 1915688ba4cfSPeter Maydell mve_advance_vpt(env); \ 1916688ba4cfSPeter Maydell return ra; \ 1917688ba4cfSPeter Maydell } \ 1918688ba4cfSPeter Maydell 1919688ba4cfSPeter Maydell #define DO_VMAXMINV_U(INSN, FN) \ 1920688ba4cfSPeter Maydell DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \ 1921688ba4cfSPeter Maydell DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \ 1922688ba4cfSPeter Maydell DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN) 1923688ba4cfSPeter Maydell #define DO_VMAXMINV_S(INSN, FN) \ 1924688ba4cfSPeter Maydell DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \ 1925688ba4cfSPeter Maydell DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \ 1926688ba4cfSPeter Maydell DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN) 1927688ba4cfSPeter Maydell 1928688ba4cfSPeter Maydell /* 1929688ba4cfSPeter Maydell * Helpers for max and min of absolute values across vector: 1930688ba4cfSPeter Maydell * note that we only take the absolute value of 'm', not 'n' 1931688ba4cfSPeter Maydell */ 1932688ba4cfSPeter Maydell static int64_t do_maxa(int64_t n, int64_t m) 1933688ba4cfSPeter Maydell { 1934688ba4cfSPeter Maydell if (m < 0) { 1935688ba4cfSPeter Maydell m = -m; 1936688ba4cfSPeter Maydell } 1937688ba4cfSPeter Maydell return MAX(n, m); 1938688ba4cfSPeter Maydell } 1939688ba4cfSPeter Maydell 1940688ba4cfSPeter Maydell static int64_t do_mina(int64_t n, int64_t m) 1941688ba4cfSPeter Maydell { 1942688ba4cfSPeter Maydell if (m < 0) { 1943688ba4cfSPeter Maydell m = -m; 1944688ba4cfSPeter Maydell } 1945688ba4cfSPeter Maydell return MIN(n, m); 1946688ba4cfSPeter Maydell } 1947688ba4cfSPeter Maydell 1948688ba4cfSPeter Maydell DO_VMAXMINV_S(vmaxvs, DO_MAX) 1949688ba4cfSPeter Maydell DO_VMAXMINV_U(vmaxvu, DO_MAX) 1950688ba4cfSPeter Maydell DO_VMAXMINV_S(vminvs, DO_MIN) 1951688ba4cfSPeter Maydell DO_VMAXMINV_U(vminvu, DO_MIN) 1952688ba4cfSPeter Maydell /* 1953688ba4cfSPeter Maydell * VMAXAV, VMINAV treat the general purpose input as unsigned 1954688ba4cfSPeter Maydell * and the vector elements as signed. 1955688ba4cfSPeter Maydell */ 1956688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa) 1957688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa) 1958688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa) 1959688ba4cfSPeter Maydell DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina) 1960688ba4cfSPeter Maydell DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina) 1961688ba4cfSPeter Maydell DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina) 1962688ba4cfSPeter Maydell 19637f061c0aSPeter Maydell #define DO_VABAV(OP, ESIZE, TYPE) \ 19647f061c0aSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 19657f061c0aSPeter Maydell void *vm, uint32_t ra) \ 19667f061c0aSPeter Maydell { \ 19677f061c0aSPeter Maydell uint16_t mask = mve_element_mask(env); \ 19687f061c0aSPeter Maydell unsigned e; \ 19697f061c0aSPeter Maydell TYPE *m = vm, *n = vn; \ 19707f061c0aSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 19717f061c0aSPeter Maydell if (mask & 1) { \ 19727f061c0aSPeter Maydell int64_t n0 = n[H##ESIZE(e)]; \ 19737f061c0aSPeter Maydell int64_t m0 = m[H##ESIZE(e)]; \ 19747f061c0aSPeter Maydell uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \ 19757f061c0aSPeter Maydell ra += r; \ 19767f061c0aSPeter Maydell } \ 19777f061c0aSPeter Maydell } \ 19787f061c0aSPeter Maydell mve_advance_vpt(env); \ 19797f061c0aSPeter Maydell return ra; \ 19807f061c0aSPeter Maydell } 19817f061c0aSPeter Maydell 19827f061c0aSPeter Maydell DO_VABAV(vabavsb, 1, int8_t) 19837f061c0aSPeter Maydell DO_VABAV(vabavsh, 2, int16_t) 19847f061c0aSPeter Maydell DO_VABAV(vabavsw, 4, int32_t) 19857f061c0aSPeter Maydell DO_VABAV(vabavub, 1, uint8_t) 19867f061c0aSPeter Maydell DO_VABAV(vabavuh, 2, uint16_t) 19877f061c0aSPeter Maydell DO_VABAV(vabavuw, 4, uint32_t) 19887f061c0aSPeter Maydell 1989d43ebd9dSPeter Maydell #define DO_VADDLV(OP, TYPE, LTYPE) \ 1990d43ebd9dSPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1991d43ebd9dSPeter Maydell uint64_t ra) \ 1992d43ebd9dSPeter Maydell { \ 1993d43ebd9dSPeter Maydell uint16_t mask = mve_element_mask(env); \ 1994d43ebd9dSPeter Maydell unsigned e; \ 1995d43ebd9dSPeter Maydell TYPE *m = vm; \ 1996d43ebd9dSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 1997d43ebd9dSPeter Maydell if (mask & 1) { \ 1998d43ebd9dSPeter Maydell ra += (LTYPE)m[H4(e)]; \ 1999d43ebd9dSPeter Maydell } \ 2000d43ebd9dSPeter Maydell } \ 2001d43ebd9dSPeter Maydell mve_advance_vpt(env); \ 2002d43ebd9dSPeter Maydell return ra; \ 2003d43ebd9dSPeter Maydell } \ 2004d43ebd9dSPeter Maydell 2005d43ebd9dSPeter Maydell DO_VADDLV(vaddlv_s, int32_t, int64_t) 2006d43ebd9dSPeter Maydell DO_VADDLV(vaddlv_u, uint32_t, uint64_t) 2007d43ebd9dSPeter Maydell 2008f9ed6174SPeter Maydell /* Shifts by immediate */ 2009f9ed6174SPeter Maydell #define DO_2SHIFT(OP, ESIZE, TYPE, FN) \ 2010f9ed6174SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2011f9ed6174SPeter Maydell void *vm, uint32_t shift) \ 2012f9ed6174SPeter Maydell { \ 2013f9ed6174SPeter Maydell TYPE *d = vd, *m = vm; \ 2014f9ed6174SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2015f9ed6174SPeter Maydell unsigned e; \ 2016f9ed6174SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2017f9ed6174SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 2018f9ed6174SPeter Maydell FN(m[H##ESIZE(e)], shift), mask); \ 2019f9ed6174SPeter Maydell } \ 2020f9ed6174SPeter Maydell mve_advance_vpt(env); \ 2021f9ed6174SPeter Maydell } 2022f9ed6174SPeter Maydell 2023f9ed6174SPeter Maydell #define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \ 2024f9ed6174SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2025f9ed6174SPeter Maydell void *vm, uint32_t shift) \ 2026f9ed6174SPeter Maydell { \ 2027f9ed6174SPeter Maydell TYPE *d = vd, *m = vm; \ 2028f9ed6174SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2029f9ed6174SPeter Maydell unsigned e; \ 2030f9ed6174SPeter Maydell bool qc = false; \ 2031f9ed6174SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2032f9ed6174SPeter Maydell bool sat = false; \ 2033f9ed6174SPeter Maydell mergemask(&d[H##ESIZE(e)], \ 2034f9ed6174SPeter Maydell FN(m[H##ESIZE(e)], shift, &sat), mask); \ 2035f9ed6174SPeter Maydell qc |= sat & mask & 1; \ 2036f9ed6174SPeter Maydell } \ 2037f9ed6174SPeter Maydell if (qc) { \ 2038f9ed6174SPeter Maydell env->vfp.qc[0] = qc; \ 2039f9ed6174SPeter Maydell } \ 2040f9ed6174SPeter Maydell mve_advance_vpt(env); \ 2041f9ed6174SPeter Maydell } 2042f9ed6174SPeter Maydell 2043f9ed6174SPeter Maydell /* provide unsigned 2-op shift helpers for all sizes */ 2044f9ed6174SPeter Maydell #define DO_2SHIFT_U(OP, FN) \ 2045f9ed6174SPeter Maydell DO_2SHIFT(OP##b, 1, uint8_t, FN) \ 2046f9ed6174SPeter Maydell DO_2SHIFT(OP##h, 2, uint16_t, FN) \ 2047f9ed6174SPeter Maydell DO_2SHIFT(OP##w, 4, uint32_t, FN) 20483394116fSPeter Maydell #define DO_2SHIFT_S(OP, FN) \ 20493394116fSPeter Maydell DO_2SHIFT(OP##b, 1, int8_t, FN) \ 20503394116fSPeter Maydell DO_2SHIFT(OP##h, 2, int16_t, FN) \ 20513394116fSPeter Maydell DO_2SHIFT(OP##w, 4, int32_t, FN) 2052f9ed6174SPeter Maydell 2053f9ed6174SPeter Maydell #define DO_2SHIFT_SAT_U(OP, FN) \ 2054f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \ 2055f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \ 2056f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN) 2057f9ed6174SPeter Maydell #define DO_2SHIFT_SAT_S(OP, FN) \ 2058f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \ 2059f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \ 2060f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##w, 4, int32_t, FN) 2061f9ed6174SPeter Maydell 2062f9ed6174SPeter Maydell DO_2SHIFT_U(vshli_u, DO_VSHLU) 20633394116fSPeter Maydell DO_2SHIFT_S(vshli_s, DO_VSHLS) 2064f9ed6174SPeter Maydell DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP) 2065f9ed6174SPeter Maydell DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP) 2066f9ed6174SPeter Maydell DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) 20673394116fSPeter Maydell DO_2SHIFT_U(vrshli_u, DO_VRSHLU) 20683394116fSPeter Maydell DO_2SHIFT_S(vrshli_s, DO_VRSHLS) 20691b15a97dSPeter Maydell DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP) 20701b15a97dSPeter Maydell DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP) 2071c2262707SPeter Maydell 2072a78b25faSPeter Maydell /* Shift-and-insert; we always work with 64 bits at a time */ 2073a78b25faSPeter Maydell #define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \ 2074a78b25faSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2075a78b25faSPeter Maydell void *vm, uint32_t shift) \ 2076a78b25faSPeter Maydell { \ 2077a78b25faSPeter Maydell uint64_t *d = vd, *m = vm; \ 2078a78b25faSPeter Maydell uint16_t mask; \ 2079a78b25faSPeter Maydell uint64_t shiftmask; \ 2080a78b25faSPeter Maydell unsigned e; \ 2081c88ff884SPeter Maydell if (shift == ESIZE * 8) { \ 2082a78b25faSPeter Maydell /* \ 2083c88ff884SPeter Maydell * Only VSRI can shift by <dt>; it should mean "don't \ 2084c88ff884SPeter Maydell * update the destination". The generic logic can't handle \ 2085c88ff884SPeter Maydell * this because it would try to shift by an out-of-range \ 2086c88ff884SPeter Maydell * amount, so special case it here. \ 2087a78b25faSPeter Maydell */ \ 2088a78b25faSPeter Maydell goto done; \ 2089a78b25faSPeter Maydell } \ 2090a78b25faSPeter Maydell assert(shift < ESIZE * 8); \ 2091a78b25faSPeter Maydell mask = mve_element_mask(env); \ 2092a78b25faSPeter Maydell /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \ 2093a78b25faSPeter Maydell shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \ 2094a78b25faSPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 2095a78b25faSPeter Maydell uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \ 2096a78b25faSPeter Maydell (d[H8(e)] & ~shiftmask); \ 2097a78b25faSPeter Maydell mergemask(&d[H8(e)], r, mask); \ 2098a78b25faSPeter Maydell } \ 2099a78b25faSPeter Maydell done: \ 2100a78b25faSPeter Maydell mve_advance_vpt(env); \ 2101a78b25faSPeter Maydell } 2102a78b25faSPeter Maydell 2103a78b25faSPeter Maydell #define DO_SHL(N, SHIFT) ((N) << (SHIFT)) 2104a78b25faSPeter Maydell #define DO_SHR(N, SHIFT) ((N) >> (SHIFT)) 2105a78b25faSPeter Maydell #define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT)) 2106a78b25faSPeter Maydell #define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT)) 2107a78b25faSPeter Maydell 2108a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK) 2109a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK) 2110a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK) 2111a78b25faSPeter Maydell DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK) 2112a78b25faSPeter Maydell DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK) 2113a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK) 2114a78b25faSPeter Maydell 2115c2262707SPeter Maydell /* 2116c2262707SPeter Maydell * Long shifts taking half-sized inputs from top or bottom of the input 2117c2262707SPeter Maydell * vector and producing a double-width result. ESIZE, TYPE are for 2118c2262707SPeter Maydell * the input, and LESIZE, LTYPE for the output. 2119c2262707SPeter Maydell * Unlike the normal shift helpers, we do not handle negative shift counts, 2120c2262707SPeter Maydell * because the long shift is strictly left-only. 2121c2262707SPeter Maydell */ 2122c2262707SPeter Maydell #define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 2123c2262707SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2124c2262707SPeter Maydell void *vm, uint32_t shift) \ 2125c2262707SPeter Maydell { \ 2126c2262707SPeter Maydell LTYPE *d = vd; \ 2127c2262707SPeter Maydell TYPE *m = vm; \ 2128c2262707SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2129c2262707SPeter Maydell unsigned le; \ 2130c2262707SPeter Maydell assert(shift <= 16); \ 2131c2262707SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2132c2262707SPeter Maydell LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \ 2133c2262707SPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \ 2134c2262707SPeter Maydell } \ 2135c2262707SPeter Maydell mve_advance_vpt(env); \ 2136c2262707SPeter Maydell } 2137c2262707SPeter Maydell 2138c2262707SPeter Maydell #define DO_VSHLL_ALL(OP, TOP) \ 2139c2262707SPeter Maydell DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \ 2140c2262707SPeter Maydell DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \ 2141c2262707SPeter Maydell DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \ 2142c2262707SPeter Maydell DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \ 2143c2262707SPeter Maydell 2144c2262707SPeter Maydell DO_VSHLL_ALL(vshllb, false) 2145c2262707SPeter Maydell DO_VSHLL_ALL(vshllt, true) 2146162e2655SPeter Maydell 2147162e2655SPeter Maydell /* 2148162e2655SPeter Maydell * Narrowing right shifts, taking a double sized input, shifting it 2149162e2655SPeter Maydell * and putting the result in either the top or bottom half of the output. 2150162e2655SPeter Maydell * ESIZE, TYPE are the output, and LESIZE, LTYPE the input. 2151162e2655SPeter Maydell */ 2152162e2655SPeter Maydell #define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2153162e2655SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2154162e2655SPeter Maydell void *vm, uint32_t shift) \ 2155162e2655SPeter Maydell { \ 2156162e2655SPeter Maydell LTYPE *m = vm; \ 2157162e2655SPeter Maydell TYPE *d = vd; \ 2158162e2655SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2159162e2655SPeter Maydell unsigned le; \ 2160a5e59e8dSPeter Maydell mask >>= ESIZE * TOP; \ 2161162e2655SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2162162e2655SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], shift); \ 2163162e2655SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2164162e2655SPeter Maydell } \ 2165162e2655SPeter Maydell mve_advance_vpt(env); \ 2166162e2655SPeter Maydell } 2167162e2655SPeter Maydell 2168162e2655SPeter Maydell #define DO_VSHRN_ALL(OP, FN) \ 2169162e2655SPeter Maydell DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \ 2170162e2655SPeter Maydell DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \ 2171162e2655SPeter Maydell DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \ 2172162e2655SPeter Maydell DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN) 2173162e2655SPeter Maydell 2174162e2655SPeter Maydell static inline uint64_t do_urshr(uint64_t x, unsigned sh) 2175162e2655SPeter Maydell { 2176162e2655SPeter Maydell if (likely(sh < 64)) { 2177162e2655SPeter Maydell return (x >> sh) + ((x >> (sh - 1)) & 1); 2178162e2655SPeter Maydell } else if (sh == 64) { 2179162e2655SPeter Maydell return x >> 63; 2180162e2655SPeter Maydell } else { 2181162e2655SPeter Maydell return 0; 2182162e2655SPeter Maydell } 2183162e2655SPeter Maydell } 2184162e2655SPeter Maydell 2185d6f9e011SPeter Maydell static inline int64_t do_srshr(int64_t x, unsigned sh) 2186d6f9e011SPeter Maydell { 2187d6f9e011SPeter Maydell if (likely(sh < 64)) { 2188d6f9e011SPeter Maydell return (x >> sh) + ((x >> (sh - 1)) & 1); 2189d6f9e011SPeter Maydell } else { 2190d6f9e011SPeter Maydell /* Rounding the sign bit always produces 0. */ 2191d6f9e011SPeter Maydell return 0; 2192d6f9e011SPeter Maydell } 2193d6f9e011SPeter Maydell } 2194d6f9e011SPeter Maydell 2195162e2655SPeter Maydell DO_VSHRN_ALL(vshrn, DO_SHR) 2196162e2655SPeter Maydell DO_VSHRN_ALL(vrshrn, do_urshr) 2197d6f9e011SPeter Maydell 2198d6f9e011SPeter Maydell static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max, 2199d6f9e011SPeter Maydell bool *satp) 2200d6f9e011SPeter Maydell { 2201d6f9e011SPeter Maydell if (val > max) { 2202d6f9e011SPeter Maydell *satp = true; 2203d6f9e011SPeter Maydell return max; 2204d6f9e011SPeter Maydell } else if (val < min) { 2205d6f9e011SPeter Maydell *satp = true; 2206d6f9e011SPeter Maydell return min; 2207d6f9e011SPeter Maydell } else { 2208d6f9e011SPeter Maydell return val; 2209d6f9e011SPeter Maydell } 2210d6f9e011SPeter Maydell } 2211d6f9e011SPeter Maydell 2212d6f9e011SPeter Maydell /* Saturating narrowing right shifts */ 2213d6f9e011SPeter Maydell #define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2214d6f9e011SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2215d6f9e011SPeter Maydell void *vm, uint32_t shift) \ 2216d6f9e011SPeter Maydell { \ 2217d6f9e011SPeter Maydell LTYPE *m = vm; \ 2218d6f9e011SPeter Maydell TYPE *d = vd; \ 2219d6f9e011SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2220d6f9e011SPeter Maydell bool qc = false; \ 2221d6f9e011SPeter Maydell unsigned le; \ 2222a5e59e8dSPeter Maydell mask >>= ESIZE * TOP; \ 2223d6f9e011SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2224d6f9e011SPeter Maydell bool sat = false; \ 2225d6f9e011SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \ 2226d6f9e011SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2227a5e59e8dSPeter Maydell qc |= sat & mask & 1; \ 2228d6f9e011SPeter Maydell } \ 2229d6f9e011SPeter Maydell if (qc) { \ 2230d6f9e011SPeter Maydell env->vfp.qc[0] = qc; \ 2231d6f9e011SPeter Maydell } \ 2232d6f9e011SPeter Maydell mve_advance_vpt(env); \ 2233d6f9e011SPeter Maydell } 2234d6f9e011SPeter Maydell 2235d6f9e011SPeter Maydell #define DO_VSHRN_SAT_UB(BOP, TOP, FN) \ 2236d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 2237d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 2238d6f9e011SPeter Maydell 2239d6f9e011SPeter Maydell #define DO_VSHRN_SAT_UH(BOP, TOP, FN) \ 2240d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 2241d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 2242d6f9e011SPeter Maydell 2243d6f9e011SPeter Maydell #define DO_VSHRN_SAT_SB(BOP, TOP, FN) \ 2244d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 2245d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 2246d6f9e011SPeter Maydell 2247d6f9e011SPeter Maydell #define DO_VSHRN_SAT_SH(BOP, TOP, FN) \ 2248d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 2249d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 2250d6f9e011SPeter Maydell 2251d6f9e011SPeter Maydell #define DO_SHRN_SB(N, M, SATP) \ 2252d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP) 2253d6f9e011SPeter Maydell #define DO_SHRN_UB(N, M, SATP) \ 2254d6f9e011SPeter Maydell do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP) 2255d6f9e011SPeter Maydell #define DO_SHRUN_B(N, M, SATP) \ 2256d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP) 2257d6f9e011SPeter Maydell 2258d6f9e011SPeter Maydell #define DO_SHRN_SH(N, M, SATP) \ 2259d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP) 2260d6f9e011SPeter Maydell #define DO_SHRN_UH(N, M, SATP) \ 2261d6f9e011SPeter Maydell do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP) 2262d6f9e011SPeter Maydell #define DO_SHRUN_H(N, M, SATP) \ 2263d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP) 2264d6f9e011SPeter Maydell 2265d6f9e011SPeter Maydell #define DO_RSHRN_SB(N, M, SATP) \ 2266d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP) 2267d6f9e011SPeter Maydell #define DO_RSHRN_UB(N, M, SATP) \ 2268d6f9e011SPeter Maydell do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP) 2269d6f9e011SPeter Maydell #define DO_RSHRUN_B(N, M, SATP) \ 2270d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP) 2271d6f9e011SPeter Maydell 2272d6f9e011SPeter Maydell #define DO_RSHRN_SH(N, M, SATP) \ 2273d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP) 2274d6f9e011SPeter Maydell #define DO_RSHRN_UH(N, M, SATP) \ 2275d6f9e011SPeter Maydell do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP) 2276d6f9e011SPeter Maydell #define DO_RSHRUN_H(N, M, SATP) \ 2277d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP) 2278d6f9e011SPeter Maydell 2279d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB) 2280d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH) 2281d6f9e011SPeter Maydell DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB) 2282d6f9e011SPeter Maydell DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH) 2283d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B) 2284d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H) 2285d6f9e011SPeter Maydell 2286d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB) 2287d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH) 2288d6f9e011SPeter Maydell DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB) 2289d6f9e011SPeter Maydell DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH) 2290d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B) 2291d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H) 22922e6a4ce0SPeter Maydell 229354dc78a9SPeter Maydell #define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 229454dc78a9SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 229554dc78a9SPeter Maydell { \ 229654dc78a9SPeter Maydell LTYPE *m = vm; \ 229754dc78a9SPeter Maydell TYPE *d = vd; \ 229854dc78a9SPeter Maydell uint16_t mask = mve_element_mask(env); \ 229954dc78a9SPeter Maydell unsigned le; \ 230054dc78a9SPeter Maydell mask >>= ESIZE * TOP; \ 230154dc78a9SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 230254dc78a9SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], \ 230354dc78a9SPeter Maydell m[H##LESIZE(le)], mask); \ 230454dc78a9SPeter Maydell } \ 230554dc78a9SPeter Maydell mve_advance_vpt(env); \ 230654dc78a9SPeter Maydell } 230754dc78a9SPeter Maydell 230854dc78a9SPeter Maydell DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t) 230954dc78a9SPeter Maydell DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t) 231054dc78a9SPeter Maydell DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t) 231154dc78a9SPeter Maydell DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t) 231254dc78a9SPeter Maydell 231354dc78a9SPeter Maydell #define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 231454dc78a9SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 231554dc78a9SPeter Maydell { \ 231654dc78a9SPeter Maydell LTYPE *m = vm; \ 231754dc78a9SPeter Maydell TYPE *d = vd; \ 231854dc78a9SPeter Maydell uint16_t mask = mve_element_mask(env); \ 231954dc78a9SPeter Maydell bool qc = false; \ 232054dc78a9SPeter Maydell unsigned le; \ 232154dc78a9SPeter Maydell mask >>= ESIZE * TOP; \ 232254dc78a9SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 232354dc78a9SPeter Maydell bool sat = false; \ 232454dc78a9SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], &sat); \ 232554dc78a9SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 232654dc78a9SPeter Maydell qc |= sat & mask & 1; \ 232754dc78a9SPeter Maydell } \ 232854dc78a9SPeter Maydell if (qc) { \ 232954dc78a9SPeter Maydell env->vfp.qc[0] = qc; \ 233054dc78a9SPeter Maydell } \ 233154dc78a9SPeter Maydell mve_advance_vpt(env); \ 233254dc78a9SPeter Maydell } 233354dc78a9SPeter Maydell 233454dc78a9SPeter Maydell #define DO_VMOVN_SAT_UB(BOP, TOP, FN) \ 233554dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 233654dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 233754dc78a9SPeter Maydell 233854dc78a9SPeter Maydell #define DO_VMOVN_SAT_UH(BOP, TOP, FN) \ 233954dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 234054dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 234154dc78a9SPeter Maydell 234254dc78a9SPeter Maydell #define DO_VMOVN_SAT_SB(BOP, TOP, FN) \ 234354dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 234454dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 234554dc78a9SPeter Maydell 234654dc78a9SPeter Maydell #define DO_VMOVN_SAT_SH(BOP, TOP, FN) \ 234754dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 234854dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 234954dc78a9SPeter Maydell 235054dc78a9SPeter Maydell #define DO_VQMOVN_SB(N, SATP) \ 235154dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP) 235254dc78a9SPeter Maydell #define DO_VQMOVN_UB(N, SATP) \ 235354dc78a9SPeter Maydell do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP) 235454dc78a9SPeter Maydell #define DO_VQMOVUN_B(N, SATP) \ 235554dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP) 235654dc78a9SPeter Maydell 235754dc78a9SPeter Maydell #define DO_VQMOVN_SH(N, SATP) \ 235854dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP) 235954dc78a9SPeter Maydell #define DO_VQMOVN_UH(N, SATP) \ 236054dc78a9SPeter Maydell do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP) 236154dc78a9SPeter Maydell #define DO_VQMOVUN_H(N, SATP) \ 236254dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP) 236354dc78a9SPeter Maydell 236454dc78a9SPeter Maydell DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB) 236554dc78a9SPeter Maydell DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH) 236654dc78a9SPeter Maydell DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB) 236754dc78a9SPeter Maydell DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH) 236854dc78a9SPeter Maydell DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B) 236954dc78a9SPeter Maydell DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H) 237054dc78a9SPeter Maydell 23712e6a4ce0SPeter Maydell uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm, 23722e6a4ce0SPeter Maydell uint32_t shift) 23732e6a4ce0SPeter Maydell { 23742e6a4ce0SPeter Maydell uint32_t *d = vd; 23752e6a4ce0SPeter Maydell uint16_t mask = mve_element_mask(env); 23762e6a4ce0SPeter Maydell unsigned e; 23772e6a4ce0SPeter Maydell uint32_t r; 23782e6a4ce0SPeter Maydell 23792e6a4ce0SPeter Maydell /* 23802e6a4ce0SPeter Maydell * For each 32-bit element, we shift it left, bringing in the 23812e6a4ce0SPeter Maydell * low 'shift' bits of rdm at the bottom. Bits shifted out at 23822e6a4ce0SPeter Maydell * the top become the new rdm, if the predicate mask permits. 23832e6a4ce0SPeter Maydell * The final rdm value is returned to update the register. 23842e6a4ce0SPeter Maydell * shift == 0 here means "shift by 32 bits". 23852e6a4ce0SPeter Maydell */ 23862e6a4ce0SPeter Maydell if (shift == 0) { 23872e6a4ce0SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 23882e6a4ce0SPeter Maydell r = rdm; 23892e6a4ce0SPeter Maydell if (mask & 1) { 23902e6a4ce0SPeter Maydell rdm = d[H4(e)]; 23912e6a4ce0SPeter Maydell } 23922e6a4ce0SPeter Maydell mergemask(&d[H4(e)], r, mask); 23932e6a4ce0SPeter Maydell } 23942e6a4ce0SPeter Maydell } else { 23952e6a4ce0SPeter Maydell uint32_t shiftmask = MAKE_64BIT_MASK(0, shift); 23962e6a4ce0SPeter Maydell 23972e6a4ce0SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 23982e6a4ce0SPeter Maydell r = (d[H4(e)] << shift) | (rdm & shiftmask); 23992e6a4ce0SPeter Maydell if (mask & 1) { 24002e6a4ce0SPeter Maydell rdm = d[H4(e)] >> (32 - shift); 24012e6a4ce0SPeter Maydell } 24022e6a4ce0SPeter Maydell mergemask(&d[H4(e)], r, mask); 24032e6a4ce0SPeter Maydell } 24042e6a4ce0SPeter Maydell } 24052e6a4ce0SPeter Maydell mve_advance_vpt(env); 24062e6a4ce0SPeter Maydell return rdm; 24072e6a4ce0SPeter Maydell } 2408f4ae6c8cSPeter Maydell 24090aa4b4c3SPeter Maydell uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 24100aa4b4c3SPeter Maydell { 24110aa4b4c3SPeter Maydell return do_sqrshl_d(n, -(int8_t)shift, false, NULL); 24120aa4b4c3SPeter Maydell } 24130aa4b4c3SPeter Maydell 24140aa4b4c3SPeter Maydell uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift) 24150aa4b4c3SPeter Maydell { 24160aa4b4c3SPeter Maydell return do_uqrshl_d(n, (int8_t)shift, false, NULL); 24170aa4b4c3SPeter Maydell } 24180aa4b4c3SPeter Maydell 2419f4ae6c8cSPeter Maydell uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2420f4ae6c8cSPeter Maydell { 2421f4ae6c8cSPeter Maydell return do_sqrshl_d(n, (int8_t)shift, false, &env->QF); 2422f4ae6c8cSPeter Maydell } 2423f4ae6c8cSPeter Maydell 2424f4ae6c8cSPeter Maydell uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2425f4ae6c8cSPeter Maydell { 2426f4ae6c8cSPeter Maydell return do_uqrshl_d(n, (int8_t)shift, false, &env->QF); 2427f4ae6c8cSPeter Maydell } 24280aa4b4c3SPeter Maydell 24290aa4b4c3SPeter Maydell uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 24300aa4b4c3SPeter Maydell { 24310aa4b4c3SPeter Maydell return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF); 24320aa4b4c3SPeter Maydell } 24330aa4b4c3SPeter Maydell 24340aa4b4c3SPeter Maydell uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift) 24350aa4b4c3SPeter Maydell { 24360aa4b4c3SPeter Maydell return do_uqrshl_d(n, (int8_t)shift, true, &env->QF); 24370aa4b4c3SPeter Maydell } 24380aa4b4c3SPeter Maydell 24390aa4b4c3SPeter Maydell /* Operate on 64-bit values, but saturate at 48 bits */ 24400aa4b4c3SPeter Maydell static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift, 24410aa4b4c3SPeter Maydell bool round, uint32_t *sat) 24420aa4b4c3SPeter Maydell { 2443fdcf2269SPeter Maydell int64_t val, extval; 2444fdcf2269SPeter Maydell 24450aa4b4c3SPeter Maydell if (shift <= -48) { 24460aa4b4c3SPeter Maydell /* Rounding the sign bit always produces 0. */ 24470aa4b4c3SPeter Maydell if (round) { 24480aa4b4c3SPeter Maydell return 0; 24490aa4b4c3SPeter Maydell } 24500aa4b4c3SPeter Maydell return src >> 63; 24510aa4b4c3SPeter Maydell } else if (shift < 0) { 24520aa4b4c3SPeter Maydell if (round) { 24530aa4b4c3SPeter Maydell src >>= -shift - 1; 2454fdcf2269SPeter Maydell val = (src >> 1) + (src & 1); 2455fdcf2269SPeter Maydell } else { 2456fdcf2269SPeter Maydell val = src >> -shift; 24570aa4b4c3SPeter Maydell } 2458fdcf2269SPeter Maydell extval = sextract64(val, 0, 48); 2459fdcf2269SPeter Maydell if (!sat || val == extval) { 2460fdcf2269SPeter Maydell return extval; 2461fdcf2269SPeter Maydell } 24620aa4b4c3SPeter Maydell } else if (shift < 48) { 246395351aa7SPeter Maydell int64_t extval = sextract64(src << shift, 0, 48); 246495351aa7SPeter Maydell if (!sat || src == (extval >> shift)) { 24650aa4b4c3SPeter Maydell return extval; 24660aa4b4c3SPeter Maydell } 24670aa4b4c3SPeter Maydell } else if (!sat || src == 0) { 24680aa4b4c3SPeter Maydell return 0; 24690aa4b4c3SPeter Maydell } 24700aa4b4c3SPeter Maydell 24710aa4b4c3SPeter Maydell *sat = 1; 247295351aa7SPeter Maydell return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17); 24730aa4b4c3SPeter Maydell } 24740aa4b4c3SPeter Maydell 24750aa4b4c3SPeter Maydell /* Operate on 64-bit values, but saturate at 48 bits */ 24760aa4b4c3SPeter Maydell static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift, 24770aa4b4c3SPeter Maydell bool round, uint32_t *sat) 24780aa4b4c3SPeter Maydell { 24790aa4b4c3SPeter Maydell uint64_t val, extval; 24800aa4b4c3SPeter Maydell 24810aa4b4c3SPeter Maydell if (shift <= -(48 + round)) { 24820aa4b4c3SPeter Maydell return 0; 24830aa4b4c3SPeter Maydell } else if (shift < 0) { 24840aa4b4c3SPeter Maydell if (round) { 24850aa4b4c3SPeter Maydell val = src >> (-shift - 1); 24860aa4b4c3SPeter Maydell val = (val >> 1) + (val & 1); 24870aa4b4c3SPeter Maydell } else { 24880aa4b4c3SPeter Maydell val = src >> -shift; 24890aa4b4c3SPeter Maydell } 24900aa4b4c3SPeter Maydell extval = extract64(val, 0, 48); 24910aa4b4c3SPeter Maydell if (!sat || val == extval) { 24920aa4b4c3SPeter Maydell return extval; 24930aa4b4c3SPeter Maydell } 24940aa4b4c3SPeter Maydell } else if (shift < 48) { 249595351aa7SPeter Maydell uint64_t extval = extract64(src << shift, 0, 48); 249695351aa7SPeter Maydell if (!sat || src == (extval >> shift)) { 24970aa4b4c3SPeter Maydell return extval; 24980aa4b4c3SPeter Maydell } 24990aa4b4c3SPeter Maydell } else if (!sat || src == 0) { 25000aa4b4c3SPeter Maydell return 0; 25010aa4b4c3SPeter Maydell } 25020aa4b4c3SPeter Maydell 25030aa4b4c3SPeter Maydell *sat = 1; 25040aa4b4c3SPeter Maydell return MAKE_64BIT_MASK(0, 48); 25050aa4b4c3SPeter Maydell } 25060aa4b4c3SPeter Maydell 25070aa4b4c3SPeter Maydell uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift) 25080aa4b4c3SPeter Maydell { 25090aa4b4c3SPeter Maydell return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF); 25100aa4b4c3SPeter Maydell } 25110aa4b4c3SPeter Maydell 25120aa4b4c3SPeter Maydell uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift) 25130aa4b4c3SPeter Maydell { 25140aa4b4c3SPeter Maydell return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF); 25150aa4b4c3SPeter Maydell } 251646321d47SPeter Maydell 251746321d47SPeter Maydell uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 251846321d47SPeter Maydell { 251946321d47SPeter Maydell return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 252046321d47SPeter Maydell } 252146321d47SPeter Maydell 252246321d47SPeter Maydell uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 252346321d47SPeter Maydell { 252446321d47SPeter Maydell return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 252546321d47SPeter Maydell } 252604ea4d3cSPeter Maydell 252704ea4d3cSPeter Maydell uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift) 252804ea4d3cSPeter Maydell { 252904ea4d3cSPeter Maydell return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF); 253004ea4d3cSPeter Maydell } 253104ea4d3cSPeter Maydell 253204ea4d3cSPeter Maydell uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift) 253304ea4d3cSPeter Maydell { 253404ea4d3cSPeter Maydell return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF); 253504ea4d3cSPeter Maydell } 2536395b92d5SPeter Maydell 2537395b92d5SPeter Maydell #define DO_VIDUP(OP, ESIZE, TYPE, FN) \ 2538395b92d5SPeter Maydell uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2539395b92d5SPeter Maydell uint32_t offset, uint32_t imm) \ 2540395b92d5SPeter Maydell { \ 2541395b92d5SPeter Maydell TYPE *d = vd; \ 2542395b92d5SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2543395b92d5SPeter Maydell unsigned e; \ 2544395b92d5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2545395b92d5SPeter Maydell mergemask(&d[H##ESIZE(e)], offset, mask); \ 2546395b92d5SPeter Maydell offset = FN(offset, imm); \ 2547395b92d5SPeter Maydell } \ 2548395b92d5SPeter Maydell mve_advance_vpt(env); \ 2549395b92d5SPeter Maydell return offset; \ 2550395b92d5SPeter Maydell } 2551395b92d5SPeter Maydell 2552395b92d5SPeter Maydell #define DO_VIWDUP(OP, ESIZE, TYPE, FN) \ 2553395b92d5SPeter Maydell uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2554395b92d5SPeter Maydell uint32_t offset, uint32_t wrap, \ 2555395b92d5SPeter Maydell uint32_t imm) \ 2556395b92d5SPeter Maydell { \ 2557395b92d5SPeter Maydell TYPE *d = vd; \ 2558395b92d5SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2559395b92d5SPeter Maydell unsigned e; \ 2560395b92d5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2561395b92d5SPeter Maydell mergemask(&d[H##ESIZE(e)], offset, mask); \ 2562395b92d5SPeter Maydell offset = FN(offset, wrap, imm); \ 2563395b92d5SPeter Maydell } \ 2564395b92d5SPeter Maydell mve_advance_vpt(env); \ 2565395b92d5SPeter Maydell return offset; \ 2566395b92d5SPeter Maydell } 2567395b92d5SPeter Maydell 2568395b92d5SPeter Maydell #define DO_VIDUP_ALL(OP, FN) \ 2569395b92d5SPeter Maydell DO_VIDUP(OP##b, 1, int8_t, FN) \ 2570395b92d5SPeter Maydell DO_VIDUP(OP##h, 2, int16_t, FN) \ 2571395b92d5SPeter Maydell DO_VIDUP(OP##w, 4, int32_t, FN) 2572395b92d5SPeter Maydell 2573395b92d5SPeter Maydell #define DO_VIWDUP_ALL(OP, FN) \ 2574395b92d5SPeter Maydell DO_VIWDUP(OP##b, 1, int8_t, FN) \ 2575395b92d5SPeter Maydell DO_VIWDUP(OP##h, 2, int16_t, FN) \ 2576395b92d5SPeter Maydell DO_VIWDUP(OP##w, 4, int32_t, FN) 2577395b92d5SPeter Maydell 2578395b92d5SPeter Maydell static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2579395b92d5SPeter Maydell { 2580395b92d5SPeter Maydell offset += imm; 2581395b92d5SPeter Maydell if (offset == wrap) { 2582395b92d5SPeter Maydell offset = 0; 2583395b92d5SPeter Maydell } 2584395b92d5SPeter Maydell return offset; 2585395b92d5SPeter Maydell } 2586395b92d5SPeter Maydell 2587395b92d5SPeter Maydell static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2588395b92d5SPeter Maydell { 2589395b92d5SPeter Maydell if (offset == 0) { 2590395b92d5SPeter Maydell offset = wrap; 2591395b92d5SPeter Maydell } 2592395b92d5SPeter Maydell offset -= imm; 2593395b92d5SPeter Maydell return offset; 2594395b92d5SPeter Maydell } 2595395b92d5SPeter Maydell 2596395b92d5SPeter Maydell DO_VIDUP_ALL(vidup, DO_ADD) 2597395b92d5SPeter Maydell DO_VIWDUP_ALL(viwdup, do_add_wrap) 2598395b92d5SPeter Maydell DO_VIWDUP_ALL(vdwdup, do_sub_wrap) 2599eff5d9a9SPeter Maydell 2600eff5d9a9SPeter Maydell /* 2601eff5d9a9SPeter Maydell * Vector comparison. 2602eff5d9a9SPeter Maydell * P0 bits for non-executed beats (where eci_mask is 0) are unchanged. 2603eff5d9a9SPeter Maydell * P0 bits for predicated lanes in executed beats (where mask is 0) are 0. 2604eff5d9a9SPeter Maydell * P0 bits otherwise are updated with the results of the comparisons. 2605eff5d9a9SPeter Maydell * We must also keep unchanged the MASK fields at the top of v7m.vpr. 2606eff5d9a9SPeter Maydell */ 2607eff5d9a9SPeter Maydell #define DO_VCMP(OP, ESIZE, TYPE, FN) \ 2608eff5d9a9SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ 2609eff5d9a9SPeter Maydell { \ 2610eff5d9a9SPeter Maydell TYPE *n = vn, *m = vm; \ 2611eff5d9a9SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2612eff5d9a9SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 2613eff5d9a9SPeter Maydell uint16_t beatpred = 0; \ 2614eff5d9a9SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2615eff5d9a9SPeter Maydell unsigned e; \ 2616eff5d9a9SPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \ 2617eff5d9a9SPeter Maydell bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \ 2618eff5d9a9SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 2619eff5d9a9SPeter Maydell beatpred |= r * emask; \ 2620eff5d9a9SPeter Maydell emask <<= ESIZE; \ 2621eff5d9a9SPeter Maydell } \ 2622eff5d9a9SPeter Maydell beatpred &= mask; \ 2623eff5d9a9SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2624eff5d9a9SPeter Maydell (beatpred & eci_mask); \ 2625eff5d9a9SPeter Maydell mve_advance_vpt(env); \ 2626eff5d9a9SPeter Maydell } 2627eff5d9a9SPeter Maydell 2628cce81873SPeter Maydell #define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \ 2629cce81873SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 2630cce81873SPeter Maydell uint32_t rm) \ 2631cce81873SPeter Maydell { \ 2632cce81873SPeter Maydell TYPE *n = vn; \ 2633cce81873SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2634cce81873SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 2635cce81873SPeter Maydell uint16_t beatpred = 0; \ 2636cce81873SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2637cce81873SPeter Maydell unsigned e; \ 2638cce81873SPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \ 2639cce81873SPeter Maydell bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \ 2640cce81873SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 2641cce81873SPeter Maydell beatpred |= r * emask; \ 2642cce81873SPeter Maydell emask <<= ESIZE; \ 2643cce81873SPeter Maydell } \ 2644cce81873SPeter Maydell beatpred &= mask; \ 2645cce81873SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2646cce81873SPeter Maydell (beatpred & eci_mask); \ 2647cce81873SPeter Maydell mve_advance_vpt(env); \ 2648cce81873SPeter Maydell } 2649cce81873SPeter Maydell 2650eff5d9a9SPeter Maydell #define DO_VCMP_S(OP, FN) \ 2651eff5d9a9SPeter Maydell DO_VCMP(OP##b, 1, int8_t, FN) \ 2652eff5d9a9SPeter Maydell DO_VCMP(OP##h, 2, int16_t, FN) \ 2653cce81873SPeter Maydell DO_VCMP(OP##w, 4, int32_t, FN) \ 2654cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \ 2655cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \ 2656cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN) 2657eff5d9a9SPeter Maydell 2658eff5d9a9SPeter Maydell #define DO_VCMP_U(OP, FN) \ 2659eff5d9a9SPeter Maydell DO_VCMP(OP##b, 1, uint8_t, FN) \ 2660eff5d9a9SPeter Maydell DO_VCMP(OP##h, 2, uint16_t, FN) \ 2661cce81873SPeter Maydell DO_VCMP(OP##w, 4, uint32_t, FN) \ 2662cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \ 2663cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \ 2664cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN) 2665eff5d9a9SPeter Maydell 2666eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M)) 2667eff5d9a9SPeter Maydell #define DO_NE(N, M) ((N) != (M)) 2668eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M)) 2669eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M)) 2670eff5d9a9SPeter Maydell #define DO_GE(N, M) ((N) >= (M)) 2671eff5d9a9SPeter Maydell #define DO_LT(N, M) ((N) < (M)) 2672eff5d9a9SPeter Maydell #define DO_GT(N, M) ((N) > (M)) 2673eff5d9a9SPeter Maydell #define DO_LE(N, M) ((N) <= (M)) 2674eff5d9a9SPeter Maydell 2675eff5d9a9SPeter Maydell DO_VCMP_U(vcmpeq, DO_EQ) 2676eff5d9a9SPeter Maydell DO_VCMP_U(vcmpne, DO_NE) 2677eff5d9a9SPeter Maydell DO_VCMP_U(vcmpcs, DO_GE) 2678eff5d9a9SPeter Maydell DO_VCMP_U(vcmphi, DO_GT) 2679eff5d9a9SPeter Maydell DO_VCMP_S(vcmpge, DO_GE) 2680eff5d9a9SPeter Maydell DO_VCMP_S(vcmplt, DO_LT) 2681eff5d9a9SPeter Maydell DO_VCMP_S(vcmpgt, DO_GT) 2682eff5d9a9SPeter Maydell DO_VCMP_S(vcmple, DO_LE) 2683c386443bSPeter Maydell 2684c386443bSPeter Maydell void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm) 2685c386443bSPeter Maydell { 2686c386443bSPeter Maydell /* 2687c386443bSPeter Maydell * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n] 2688c386443bSPeter Maydell * but note that whether bytes are written to Qd is still subject 2689c386443bSPeter Maydell * to (all forms of) predication in the usual way. 2690c386443bSPeter Maydell */ 2691c386443bSPeter Maydell uint64_t *d = vd, *n = vn, *m = vm; 2692c386443bSPeter Maydell uint16_t mask = mve_element_mask(env); 2693c386443bSPeter Maydell uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 2694c386443bSPeter Maydell unsigned e; 2695c386443bSPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) { 2696c386443bSPeter Maydell uint64_t r = m[H8(e)]; 2697c386443bSPeter Maydell mergemask(&r, n[H8(e)], p0); 2698c386443bSPeter Maydell mergemask(&d[H8(e)], r, mask); 2699c386443bSPeter Maydell } 2700c386443bSPeter Maydell mve_advance_vpt(env); 2701c386443bSPeter Maydell } 2702398e7cd3SPeter Maydell 2703fea3958fSPeter Maydell void HELPER(mve_vpnot)(CPUARMState *env) 2704fea3958fSPeter Maydell { 2705fea3958fSPeter Maydell /* 2706fea3958fSPeter Maydell * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged. 2707fea3958fSPeter Maydell * P0 bits for predicated lanes in executed bits (where mask is 0) are 0. 2708fea3958fSPeter Maydell * P0 bits otherwise are inverted. 2709fea3958fSPeter Maydell * (This is the same logic as VCMP.) 2710fea3958fSPeter Maydell * This insn is itself subject to predication and to beat-wise execution, 2711fea3958fSPeter Maydell * and after it executes VPT state advances in the usual way. 2712fea3958fSPeter Maydell */ 2713fea3958fSPeter Maydell uint16_t mask = mve_element_mask(env); 2714fea3958fSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); 2715fea3958fSPeter Maydell uint16_t beatpred = ~env->v7m.vpr & mask; 2716fea3958fSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask); 2717fea3958fSPeter Maydell mve_advance_vpt(env); 2718fea3958fSPeter Maydell } 2719fea3958fSPeter Maydell 27200f31e37cSPeter Maydell /* 27210f31e37cSPeter Maydell * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed, 27220f31e37cSPeter Maydell * otherwise set according to value of Rn. The calculation of 27230f31e37cSPeter Maydell * newmask here works in the same way as the calculation of the 27240f31e37cSPeter Maydell * ltpmask in mve_element_mask(), but we have pre-calculated 27250f31e37cSPeter Maydell * the masklen in the generated code. 27260f31e37cSPeter Maydell */ 27270f31e37cSPeter Maydell void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen) 27280f31e37cSPeter Maydell { 27290f31e37cSPeter Maydell uint16_t mask = mve_element_mask(env); 27300f31e37cSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); 27310f31e37cSPeter Maydell uint16_t newmask; 27320f31e37cSPeter Maydell 27330f31e37cSPeter Maydell assert(masklen <= 16); 27340f31e37cSPeter Maydell newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 27350f31e37cSPeter Maydell newmask &= mask; 27360f31e37cSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask); 27370f31e37cSPeter Maydell mve_advance_vpt(env); 27380f31e37cSPeter Maydell } 27390f31e37cSPeter Maydell 2740398e7cd3SPeter Maydell #define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \ 2741398e7cd3SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2742398e7cd3SPeter Maydell { \ 2743398e7cd3SPeter Maydell TYPE *d = vd, *m = vm; \ 2744398e7cd3SPeter Maydell uint16_t mask = mve_element_mask(env); \ 2745398e7cd3SPeter Maydell unsigned e; \ 2746398e7cd3SPeter Maydell bool qc = false; \ 2747398e7cd3SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2748398e7cd3SPeter Maydell bool sat = false; \ 2749398e7cd3SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \ 2750398e7cd3SPeter Maydell qc |= sat & mask & 1; \ 2751398e7cd3SPeter Maydell } \ 2752398e7cd3SPeter Maydell if (qc) { \ 2753398e7cd3SPeter Maydell env->vfp.qc[0] = qc; \ 2754398e7cd3SPeter Maydell } \ 2755398e7cd3SPeter Maydell mve_advance_vpt(env); \ 2756398e7cd3SPeter Maydell } 2757398e7cd3SPeter Maydell 2758398e7cd3SPeter Maydell #define DO_VQABS_B(N, SATP) \ 2759398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP) 2760398e7cd3SPeter Maydell #define DO_VQABS_H(N, SATP) \ 2761398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP) 2762398e7cd3SPeter Maydell #define DO_VQABS_W(N, SATP) \ 2763398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP) 2764398e7cd3SPeter Maydell 2765398e7cd3SPeter Maydell #define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP) 2766398e7cd3SPeter Maydell #define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP) 2767398e7cd3SPeter Maydell #define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP) 2768398e7cd3SPeter Maydell 2769398e7cd3SPeter Maydell DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B) 2770398e7cd3SPeter Maydell DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H) 2771398e7cd3SPeter Maydell DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W) 2772398e7cd3SPeter Maydell 2773398e7cd3SPeter Maydell DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B) 2774398e7cd3SPeter Maydell DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H) 2775398e7cd3SPeter Maydell DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W) 2776d5c571eaSPeter Maydell 2777d5c571eaSPeter Maydell /* 2778d5c571eaSPeter Maydell * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its 2779d5c571eaSPeter Maydell * absolute value; we then do an unsigned comparison. 2780d5c571eaSPeter Maydell */ 2781d5c571eaSPeter Maydell #define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \ 2782d5c571eaSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2783d5c571eaSPeter Maydell { \ 2784d5c571eaSPeter Maydell UTYPE *d = vd; \ 2785d5c571eaSPeter Maydell STYPE *m = vm; \ 2786d5c571eaSPeter Maydell uint16_t mask = mve_element_mask(env); \ 2787d5c571eaSPeter Maydell unsigned e; \ 2788d5c571eaSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2789d5c571eaSPeter Maydell UTYPE r = DO_ABS(m[H##ESIZE(e)]); \ 2790d5c571eaSPeter Maydell r = FN(d[H##ESIZE(e)], r); \ 2791d5c571eaSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 2792d5c571eaSPeter Maydell } \ 2793d5c571eaSPeter Maydell mve_advance_vpt(env); \ 2794d5c571eaSPeter Maydell } 2795d5c571eaSPeter Maydell 2796d5c571eaSPeter Maydell DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX) 2797d5c571eaSPeter Maydell DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX) 2798d5c571eaSPeter Maydell DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX) 2799d5c571eaSPeter Maydell DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN) 2800d5c571eaSPeter Maydell DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN) 2801d5c571eaSPeter Maydell DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) 28021e35cd91SPeter Maydell 28031e35cd91SPeter Maydell /* 28041e35cd91SPeter Maydell * 2-operand floating point. Note that if an element is partially 28051e35cd91SPeter Maydell * predicated we must do the FP operation to update the non-predicated 28061e35cd91SPeter Maydell * bytes, but we must be careful to avoid updating the FP exception 28071e35cd91SPeter Maydell * state unless byte 0 of the element was unpredicated. 28081e35cd91SPeter Maydell */ 28091e35cd91SPeter Maydell #define DO_2OP_FP(OP, ESIZE, TYPE, FN) \ 28101e35cd91SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 28111e35cd91SPeter Maydell void *vd, void *vn, void *vm) \ 28121e35cd91SPeter Maydell { \ 28131e35cd91SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 28141e35cd91SPeter Maydell TYPE r; \ 28151e35cd91SPeter Maydell uint16_t mask = mve_element_mask(env); \ 28161e35cd91SPeter Maydell unsigned e; \ 28171e35cd91SPeter Maydell float_status *fpst; \ 28181e35cd91SPeter Maydell float_status scratch_fpst; \ 28191e35cd91SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 28201e35cd91SPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 28211e35cd91SPeter Maydell continue; \ 28221e35cd91SPeter Maydell } \ 28231e35cd91SPeter Maydell fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 28241e35cd91SPeter Maydell &env->vfp.standard_fp_status; \ 28251e35cd91SPeter Maydell if (!(mask & 1)) { \ 28261e35cd91SPeter Maydell /* We need the result but without updating flags */ \ 28271e35cd91SPeter Maydell scratch_fpst = *fpst; \ 28281e35cd91SPeter Maydell fpst = &scratch_fpst; \ 28291e35cd91SPeter Maydell } \ 28301e35cd91SPeter Maydell r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ 28311e35cd91SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 28321e35cd91SPeter Maydell } \ 28331e35cd91SPeter Maydell mve_advance_vpt(env); \ 28341e35cd91SPeter Maydell } 28351e35cd91SPeter Maydell 28361e35cd91SPeter Maydell #define DO_2OP_FP_ALL(OP, FN) \ 28371e35cd91SPeter Maydell DO_2OP_FP(OP##h, 2, float16, float16_##FN) \ 28381e35cd91SPeter Maydell DO_2OP_FP(OP##s, 4, float32, float32_##FN) 28391e35cd91SPeter Maydell 28401e35cd91SPeter Maydell DO_2OP_FP_ALL(vfadd, add) 284182af0153SPeter Maydell DO_2OP_FP_ALL(vfsub, sub) 284282af0153SPeter Maydell DO_2OP_FP_ALL(vfmul, mul) 284382af0153SPeter Maydell 284482af0153SPeter Maydell static inline float16 float16_abd(float16 a, float16 b, float_status *s) 284582af0153SPeter Maydell { 284682af0153SPeter Maydell return float16_abs(float16_sub(a, b, s)); 284782af0153SPeter Maydell } 284882af0153SPeter Maydell 284982af0153SPeter Maydell static inline float32 float32_abd(float32 a, float32 b, float_status *s) 285082af0153SPeter Maydell { 285182af0153SPeter Maydell return float32_abs(float32_sub(a, b, s)); 285282af0153SPeter Maydell } 285382af0153SPeter Maydell 285482af0153SPeter Maydell DO_2OP_FP_ALL(vfabd, abd) 285582af0153SPeter Maydell DO_2OP_FP_ALL(vmaxnm, maxnum) 285682af0153SPeter Maydell DO_2OP_FP_ALL(vminnm, minnum) 2857104afc68SPeter Maydell 285890257a4fSPeter Maydell static inline float16 float16_maxnuma(float16 a, float16 b, float_status *s) 285990257a4fSPeter Maydell { 286090257a4fSPeter Maydell return float16_maxnum(float16_abs(a), float16_abs(b), s); 286190257a4fSPeter Maydell } 286290257a4fSPeter Maydell 286390257a4fSPeter Maydell static inline float32 float32_maxnuma(float32 a, float32 b, float_status *s) 286490257a4fSPeter Maydell { 286590257a4fSPeter Maydell return float32_maxnum(float32_abs(a), float32_abs(b), s); 286690257a4fSPeter Maydell } 286790257a4fSPeter Maydell 286890257a4fSPeter Maydell static inline float16 float16_minnuma(float16 a, float16 b, float_status *s) 286990257a4fSPeter Maydell { 287090257a4fSPeter Maydell return float16_minnum(float16_abs(a), float16_abs(b), s); 287190257a4fSPeter Maydell } 287290257a4fSPeter Maydell 287390257a4fSPeter Maydell static inline float32 float32_minnuma(float32 a, float32 b, float_status *s) 287490257a4fSPeter Maydell { 287590257a4fSPeter Maydell return float32_minnum(float32_abs(a), float32_abs(b), s); 287690257a4fSPeter Maydell } 287790257a4fSPeter Maydell 287890257a4fSPeter Maydell DO_2OP_FP_ALL(vmaxnma, maxnuma) 287990257a4fSPeter Maydell DO_2OP_FP_ALL(vminnma, minnuma) 288090257a4fSPeter Maydell 2881104afc68SPeter Maydell #define DO_VCADD_FP(OP, ESIZE, TYPE, FN0, FN1) \ 2882104afc68SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2883104afc68SPeter Maydell void *vd, void *vn, void *vm) \ 2884104afc68SPeter Maydell { \ 2885104afc68SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 2886104afc68SPeter Maydell TYPE r[16 / ESIZE]; \ 2887104afc68SPeter Maydell uint16_t tm, mask = mve_element_mask(env); \ 2888104afc68SPeter Maydell unsigned e; \ 2889104afc68SPeter Maydell float_status *fpst; \ 2890104afc68SPeter Maydell float_status scratch_fpst; \ 2891104afc68SPeter Maydell /* Calculate all results first to avoid overwriting inputs */ \ 2892104afc68SPeter Maydell for (e = 0, tm = mask; e < 16 / ESIZE; e++, tm >>= ESIZE) { \ 2893104afc68SPeter Maydell if ((tm & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 2894104afc68SPeter Maydell r[e] = 0; \ 2895104afc68SPeter Maydell continue; \ 2896104afc68SPeter Maydell } \ 2897104afc68SPeter Maydell fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2898104afc68SPeter Maydell &env->vfp.standard_fp_status; \ 2899104afc68SPeter Maydell if (!(tm & 1)) { \ 2900104afc68SPeter Maydell /* We need the result but without updating flags */ \ 2901104afc68SPeter Maydell scratch_fpst = *fpst; \ 2902104afc68SPeter Maydell fpst = &scratch_fpst; \ 2903104afc68SPeter Maydell } \ 2904104afc68SPeter Maydell if (!(e & 1)) { \ 2905104afc68SPeter Maydell r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)], fpst); \ 2906104afc68SPeter Maydell } else { \ 2907104afc68SPeter Maydell r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)], fpst); \ 2908104afc68SPeter Maydell } \ 2909104afc68SPeter Maydell } \ 2910104afc68SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2911104afc68SPeter Maydell mergemask(&d[H##ESIZE(e)], r[e], mask); \ 2912104afc68SPeter Maydell } \ 2913104afc68SPeter Maydell mve_advance_vpt(env); \ 2914104afc68SPeter Maydell } 2915104afc68SPeter Maydell 2916104afc68SPeter Maydell DO_VCADD_FP(vfcadd90h, 2, float16, float16_sub, float16_add) 2917104afc68SPeter Maydell DO_VCADD_FP(vfcadd90s, 4, float32, float32_sub, float32_add) 2918104afc68SPeter Maydell DO_VCADD_FP(vfcadd270h, 2, float16, float16_add, float16_sub) 2919104afc68SPeter Maydell DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub) 29203173c0ddSPeter Maydell 29213173c0ddSPeter Maydell #define DO_VFMA(OP, ESIZE, TYPE, CHS) \ 29223173c0ddSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 29233173c0ddSPeter Maydell void *vd, void *vn, void *vm) \ 29243173c0ddSPeter Maydell { \ 29253173c0ddSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 29263173c0ddSPeter Maydell TYPE r; \ 29273173c0ddSPeter Maydell uint16_t mask = mve_element_mask(env); \ 29283173c0ddSPeter Maydell unsigned e; \ 29293173c0ddSPeter Maydell float_status *fpst; \ 29303173c0ddSPeter Maydell float_status scratch_fpst; \ 29313173c0ddSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 29323173c0ddSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 29333173c0ddSPeter Maydell continue; \ 29343173c0ddSPeter Maydell } \ 29353173c0ddSPeter Maydell fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 29363173c0ddSPeter Maydell &env->vfp.standard_fp_status; \ 29373173c0ddSPeter Maydell if (!(mask & 1)) { \ 29383173c0ddSPeter Maydell /* We need the result but without updating flags */ \ 29393173c0ddSPeter Maydell scratch_fpst = *fpst; \ 29403173c0ddSPeter Maydell fpst = &scratch_fpst; \ 29413173c0ddSPeter Maydell } \ 29423173c0ddSPeter Maydell r = n[H##ESIZE(e)]; \ 29433173c0ddSPeter Maydell if (CHS) { \ 29443173c0ddSPeter Maydell r = TYPE##_chs(r); \ 29453173c0ddSPeter Maydell } \ 29463173c0ddSPeter Maydell r = TYPE##_muladd(r, m[H##ESIZE(e)], d[H##ESIZE(e)], \ 29473173c0ddSPeter Maydell 0, fpst); \ 29483173c0ddSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 29493173c0ddSPeter Maydell } \ 29503173c0ddSPeter Maydell mve_advance_vpt(env); \ 29513173c0ddSPeter Maydell } 29523173c0ddSPeter Maydell 29533173c0ddSPeter Maydell DO_VFMA(vfmah, 2, float16, false) 29543173c0ddSPeter Maydell DO_VFMA(vfmas, 4, float32, false) 29553173c0ddSPeter Maydell DO_VFMA(vfmsh, 2, float16, true) 29563173c0ddSPeter Maydell DO_VFMA(vfmss, 4, float32, true) 2957d3cd965cSPeter Maydell 2958d3cd965cSPeter Maydell #define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \ 2959d3cd965cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2960d3cd965cSPeter Maydell void *vd, void *vn, void *vm) \ 2961d3cd965cSPeter Maydell { \ 2962d3cd965cSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \ 2963d3cd965cSPeter Maydell TYPE r0, r1, e1, e2, e3, e4; \ 2964d3cd965cSPeter Maydell uint16_t mask = mve_element_mask(env); \ 2965d3cd965cSPeter Maydell unsigned e; \ 2966d3cd965cSPeter Maydell float_status *fpst0, *fpst1; \ 2967d3cd965cSPeter Maydell float_status scratch_fpst; \ 2968d3cd965cSPeter Maydell /* We loop through pairs of elements at a time */ \ 2969d3cd965cSPeter Maydell for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \ 2970d3cd965cSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \ 2971d3cd965cSPeter Maydell continue; \ 2972d3cd965cSPeter Maydell } \ 2973d3cd965cSPeter Maydell fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2974d3cd965cSPeter Maydell &env->vfp.standard_fp_status; \ 2975d3cd965cSPeter Maydell fpst1 = fpst0; \ 2976d3cd965cSPeter Maydell if (!(mask & 1)) { \ 2977d3cd965cSPeter Maydell scratch_fpst = *fpst0; \ 2978d3cd965cSPeter Maydell fpst0 = &scratch_fpst; \ 2979d3cd965cSPeter Maydell } \ 2980d3cd965cSPeter Maydell if (!(mask & (1 << ESIZE))) { \ 2981d3cd965cSPeter Maydell scratch_fpst = *fpst1; \ 2982d3cd965cSPeter Maydell fpst1 = &scratch_fpst; \ 2983d3cd965cSPeter Maydell } \ 2984d3cd965cSPeter Maydell switch (ROT) { \ 2985d3cd965cSPeter Maydell case 0: \ 2986d3cd965cSPeter Maydell e1 = m[H##ESIZE(e)]; \ 2987d3cd965cSPeter Maydell e2 = n[H##ESIZE(e)]; \ 2988d3cd965cSPeter Maydell e3 = m[H##ESIZE(e + 1)]; \ 2989d3cd965cSPeter Maydell e4 = n[H##ESIZE(e)]; \ 2990d3cd965cSPeter Maydell break; \ 2991d3cd965cSPeter Maydell case 1: \ 2992d3cd965cSPeter Maydell e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ 2993d3cd965cSPeter Maydell e2 = n[H##ESIZE(e + 1)]; \ 2994d3cd965cSPeter Maydell e3 = m[H##ESIZE(e)]; \ 2995d3cd965cSPeter Maydell e4 = n[H##ESIZE(e + 1)]; \ 2996d3cd965cSPeter Maydell break; \ 2997d3cd965cSPeter Maydell case 2: \ 2998d3cd965cSPeter Maydell e1 = TYPE##_chs(m[H##ESIZE(e)]); \ 2999d3cd965cSPeter Maydell e2 = n[H##ESIZE(e)]; \ 3000d3cd965cSPeter Maydell e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ 3001d3cd965cSPeter Maydell e4 = n[H##ESIZE(e)]; \ 3002d3cd965cSPeter Maydell break; \ 3003d3cd965cSPeter Maydell case 3: \ 3004d3cd965cSPeter Maydell e1 = m[H##ESIZE(e + 1)]; \ 3005d3cd965cSPeter Maydell e2 = n[H##ESIZE(e + 1)]; \ 3006d3cd965cSPeter Maydell e3 = TYPE##_chs(m[H##ESIZE(e)]); \ 3007d3cd965cSPeter Maydell e4 = n[H##ESIZE(e + 1)]; \ 3008d3cd965cSPeter Maydell break; \ 3009d3cd965cSPeter Maydell default: \ 3010d3cd965cSPeter Maydell g_assert_not_reached(); \ 3011d3cd965cSPeter Maydell } \ 3012d3cd965cSPeter Maydell r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \ 3013d3cd965cSPeter Maydell r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \ 3014d3cd965cSPeter Maydell mergemask(&d[H##ESIZE(e)], r0, mask); \ 3015d3cd965cSPeter Maydell mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \ 3016d3cd965cSPeter Maydell } \ 3017d3cd965cSPeter Maydell mve_advance_vpt(env); \ 3018d3cd965cSPeter Maydell } 3019d3cd965cSPeter Maydell 3020d3cd965cSPeter Maydell #define DO_VCMULH(N, M, D, S) float16_mul(N, M, S) 3021d3cd965cSPeter Maydell #define DO_VCMULS(N, M, D, S) float32_mul(N, M, S) 3022d3cd965cSPeter Maydell 3023d3cd965cSPeter Maydell #define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S) 3024d3cd965cSPeter Maydell #define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S) 3025d3cd965cSPeter Maydell 3026d3cd965cSPeter Maydell DO_VCMLA(vcmul0h, 2, float16, 0, DO_VCMULH) 3027d3cd965cSPeter Maydell DO_VCMLA(vcmul0s, 4, float32, 0, DO_VCMULS) 3028d3cd965cSPeter Maydell DO_VCMLA(vcmul90h, 2, float16, 1, DO_VCMULH) 3029d3cd965cSPeter Maydell DO_VCMLA(vcmul90s, 4, float32, 1, DO_VCMULS) 3030d3cd965cSPeter Maydell DO_VCMLA(vcmul180h, 2, float16, 2, DO_VCMULH) 3031d3cd965cSPeter Maydell DO_VCMLA(vcmul180s, 4, float32, 2, DO_VCMULS) 3032d3cd965cSPeter Maydell DO_VCMLA(vcmul270h, 2, float16, 3, DO_VCMULH) 3033d3cd965cSPeter Maydell DO_VCMLA(vcmul270s, 4, float32, 3, DO_VCMULS) 3034d3cd965cSPeter Maydell 3035d3cd965cSPeter Maydell DO_VCMLA(vcmla0h, 2, float16, 0, DO_VCMLAH) 3036d3cd965cSPeter Maydell DO_VCMLA(vcmla0s, 4, float32, 0, DO_VCMLAS) 3037d3cd965cSPeter Maydell DO_VCMLA(vcmla90h, 2, float16, 1, DO_VCMLAH) 3038d3cd965cSPeter Maydell DO_VCMLA(vcmla90s, 4, float32, 1, DO_VCMLAS) 3039d3cd965cSPeter Maydell DO_VCMLA(vcmla180h, 2, float16, 2, DO_VCMLAH) 3040d3cd965cSPeter Maydell DO_VCMLA(vcmla180s, 4, float32, 2, DO_VCMLAS) 3041d3cd965cSPeter Maydell DO_VCMLA(vcmla270h, 2, float16, 3, DO_VCMLAH) 3042d3cd965cSPeter Maydell DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) 3043abfe39b2SPeter Maydell 3044abfe39b2SPeter Maydell #define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ 3045abfe39b2SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 3046abfe39b2SPeter Maydell void *vd, void *vn, uint32_t rm) \ 3047abfe39b2SPeter Maydell { \ 3048abfe39b2SPeter Maydell TYPE *d = vd, *n = vn; \ 3049abfe39b2SPeter Maydell TYPE r, m = rm; \ 3050abfe39b2SPeter Maydell uint16_t mask = mve_element_mask(env); \ 3051abfe39b2SPeter Maydell unsigned e; \ 3052abfe39b2SPeter Maydell float_status *fpst; \ 3053abfe39b2SPeter Maydell float_status scratch_fpst; \ 3054abfe39b2SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3055abfe39b2SPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3056abfe39b2SPeter Maydell continue; \ 3057abfe39b2SPeter Maydell } \ 3058abfe39b2SPeter Maydell fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3059abfe39b2SPeter Maydell &env->vfp.standard_fp_status; \ 3060abfe39b2SPeter Maydell if (!(mask & 1)) { \ 3061abfe39b2SPeter Maydell /* We need the result but without updating flags */ \ 3062abfe39b2SPeter Maydell scratch_fpst = *fpst; \ 3063abfe39b2SPeter Maydell fpst = &scratch_fpst; \ 3064abfe39b2SPeter Maydell } \ 3065abfe39b2SPeter Maydell r = FN(n[H##ESIZE(e)], m, fpst); \ 3066abfe39b2SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 3067abfe39b2SPeter Maydell } \ 3068abfe39b2SPeter Maydell mve_advance_vpt(env); \ 3069abfe39b2SPeter Maydell } 3070abfe39b2SPeter Maydell 3071abfe39b2SPeter Maydell #define DO_2OP_FP_SCALAR_ALL(OP, FN) \ 3072abfe39b2SPeter Maydell DO_2OP_FP_SCALAR(OP##h, 2, float16, float16_##FN) \ 3073abfe39b2SPeter Maydell DO_2OP_FP_SCALAR(OP##s, 4, float32, float32_##FN) 3074abfe39b2SPeter Maydell 3075abfe39b2SPeter Maydell DO_2OP_FP_SCALAR_ALL(vfadd_scalar, add) 3076abfe39b2SPeter Maydell DO_2OP_FP_SCALAR_ALL(vfsub_scalar, sub) 3077abfe39b2SPeter Maydell DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul) 30784773e74eSPeter Maydell 30794773e74eSPeter Maydell #define DO_2OP_FP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 30804773e74eSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 30814773e74eSPeter Maydell void *vd, void *vn, uint32_t rm) \ 30824773e74eSPeter Maydell { \ 30834773e74eSPeter Maydell TYPE *d = vd, *n = vn; \ 30844773e74eSPeter Maydell TYPE r, m = rm; \ 30854773e74eSPeter Maydell uint16_t mask = mve_element_mask(env); \ 30864773e74eSPeter Maydell unsigned e; \ 30874773e74eSPeter Maydell float_status *fpst; \ 30884773e74eSPeter Maydell float_status scratch_fpst; \ 30894773e74eSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 30904773e74eSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 30914773e74eSPeter Maydell continue; \ 30924773e74eSPeter Maydell } \ 30934773e74eSPeter Maydell fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 30944773e74eSPeter Maydell &env->vfp.standard_fp_status; \ 30954773e74eSPeter Maydell if (!(mask & 1)) { \ 30964773e74eSPeter Maydell /* We need the result but without updating flags */ \ 30974773e74eSPeter Maydell scratch_fpst = *fpst; \ 30984773e74eSPeter Maydell fpst = &scratch_fpst; \ 30994773e74eSPeter Maydell } \ 31004773e74eSPeter Maydell r = FN(n[H##ESIZE(e)], m, d[H##ESIZE(e)], 0, fpst); \ 31014773e74eSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 31024773e74eSPeter Maydell } \ 31034773e74eSPeter Maydell mve_advance_vpt(env); \ 31044773e74eSPeter Maydell } 31054773e74eSPeter Maydell 31064773e74eSPeter Maydell /* VFMAS is vector * vector + scalar, so swap op2 and op3 */ 31074773e74eSPeter Maydell #define DO_VFMAS_SCALARH(N, M, D, F, S) float16_muladd(N, D, M, F, S) 31084773e74eSPeter Maydell #define DO_VFMAS_SCALARS(N, M, D, F, S) float32_muladd(N, D, M, F, S) 31094773e74eSPeter Maydell 31104773e74eSPeter Maydell /* VFMA is vector * scalar + vector */ 31114773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfma_scalarh, 2, float16, float16_muladd) 31124773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfma_scalars, 4, float32, float32_muladd) 31134773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfmas_scalarh, 2, float16, DO_VFMAS_SCALARH) 31144773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS) 311529f80e7dSPeter Maydell 311629f80e7dSPeter Maydell /* Floating point max/min across vector. */ 311729f80e7dSPeter Maydell #define DO_FP_VMAXMINV(OP, ESIZE, TYPE, ABS, FN) \ 311829f80e7dSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 311929f80e7dSPeter Maydell uint32_t ra_in) \ 312029f80e7dSPeter Maydell { \ 312129f80e7dSPeter Maydell uint16_t mask = mve_element_mask(env); \ 312229f80e7dSPeter Maydell unsigned e; \ 312329f80e7dSPeter Maydell TYPE *m = vm; \ 312429f80e7dSPeter Maydell TYPE ra = (TYPE)ra_in; \ 312529f80e7dSPeter Maydell float_status *fpst = (ESIZE == 2) ? \ 312629f80e7dSPeter Maydell &env->vfp.standard_fp_status_f16 : \ 312729f80e7dSPeter Maydell &env->vfp.standard_fp_status; \ 312829f80e7dSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 312929f80e7dSPeter Maydell if (mask & 1) { \ 313029f80e7dSPeter Maydell TYPE v = m[H##ESIZE(e)]; \ 313129f80e7dSPeter Maydell if (TYPE##_is_signaling_nan(ra, fpst)) { \ 313229f80e7dSPeter Maydell ra = TYPE##_silence_nan(ra, fpst); \ 313329f80e7dSPeter Maydell float_raise(float_flag_invalid, fpst); \ 313429f80e7dSPeter Maydell } \ 313529f80e7dSPeter Maydell if (TYPE##_is_signaling_nan(v, fpst)) { \ 313629f80e7dSPeter Maydell v = TYPE##_silence_nan(v, fpst); \ 313729f80e7dSPeter Maydell float_raise(float_flag_invalid, fpst); \ 313829f80e7dSPeter Maydell } \ 313929f80e7dSPeter Maydell if (ABS) { \ 314029f80e7dSPeter Maydell v = TYPE##_abs(v); \ 314129f80e7dSPeter Maydell } \ 314229f80e7dSPeter Maydell ra = FN(ra, v, fpst); \ 314329f80e7dSPeter Maydell } \ 314429f80e7dSPeter Maydell } \ 314529f80e7dSPeter Maydell mve_advance_vpt(env); \ 314629f80e7dSPeter Maydell return ra; \ 314729f80e7dSPeter Maydell } \ 314829f80e7dSPeter Maydell 314929f80e7dSPeter Maydell #define NOP(X) (X) 315029f80e7dSPeter Maydell 315129f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmvh, 2, float16, false, float16_maxnum) 315229f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmvs, 4, float32, false, float32_maxnum) 315329f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmvh, 2, float16, false, float16_minnum) 315429f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmvs, 4, float32, false, float32_minnum) 315529f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmavh, 2, float16, true, float16_maxnum) 315629f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmavs, 4, float32, true, float32_maxnum) 315729f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmavh, 2, float16, true, float16_minnum) 315829f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) 3159c87fe6d2SPeter Maydell 3160c87fe6d2SPeter Maydell /* FP compares; note that all comparisons signal InvalidOp for QNaNs */ 3161c87fe6d2SPeter Maydell #define DO_VCMP_FP(OP, ESIZE, TYPE, FN) \ 3162c87fe6d2SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ 3163c87fe6d2SPeter Maydell { \ 3164c87fe6d2SPeter Maydell TYPE *n = vn, *m = vm; \ 3165c87fe6d2SPeter Maydell uint16_t mask = mve_element_mask(env); \ 3166c87fe6d2SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 3167c87fe6d2SPeter Maydell uint16_t beatpred = 0; \ 3168c87fe6d2SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 3169c87fe6d2SPeter Maydell unsigned e; \ 3170c87fe6d2SPeter Maydell float_status *fpst; \ 3171c87fe6d2SPeter Maydell float_status scratch_fpst; \ 3172c87fe6d2SPeter Maydell bool r; \ 3173c87fe6d2SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ 3174c87fe6d2SPeter Maydell if ((mask & emask) == 0) { \ 3175c87fe6d2SPeter Maydell continue; \ 3176c87fe6d2SPeter Maydell } \ 3177c87fe6d2SPeter Maydell fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3178c87fe6d2SPeter Maydell &env->vfp.standard_fp_status; \ 3179c87fe6d2SPeter Maydell if (!(mask & (1 << (e * ESIZE)))) { \ 3180c87fe6d2SPeter Maydell /* We need the result but without updating flags */ \ 3181c87fe6d2SPeter Maydell scratch_fpst = *fpst; \ 3182c87fe6d2SPeter Maydell fpst = &scratch_fpst; \ 3183c87fe6d2SPeter Maydell } \ 3184c87fe6d2SPeter Maydell r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ 3185c87fe6d2SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 3186c87fe6d2SPeter Maydell beatpred |= r * emask; \ 3187c87fe6d2SPeter Maydell } \ 3188c87fe6d2SPeter Maydell beatpred &= mask; \ 3189c87fe6d2SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 3190c87fe6d2SPeter Maydell (beatpred & eci_mask); \ 3191c87fe6d2SPeter Maydell mve_advance_vpt(env); \ 3192c87fe6d2SPeter Maydell } 3193c87fe6d2SPeter Maydell 3194c2d8f6bbSPeter Maydell #define DO_VCMP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ 3195c2d8f6bbSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 3196c2d8f6bbSPeter Maydell uint32_t rm) \ 3197c2d8f6bbSPeter Maydell { \ 3198c2d8f6bbSPeter Maydell TYPE *n = vn; \ 3199c2d8f6bbSPeter Maydell uint16_t mask = mve_element_mask(env); \ 3200c2d8f6bbSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \ 3201c2d8f6bbSPeter Maydell uint16_t beatpred = 0; \ 3202c2d8f6bbSPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 3203c2d8f6bbSPeter Maydell unsigned e; \ 3204c2d8f6bbSPeter Maydell float_status *fpst; \ 3205c2d8f6bbSPeter Maydell float_status scratch_fpst; \ 3206c2d8f6bbSPeter Maydell bool r; \ 3207c2d8f6bbSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ 3208c2d8f6bbSPeter Maydell if ((mask & emask) == 0) { \ 3209c2d8f6bbSPeter Maydell continue; \ 3210c2d8f6bbSPeter Maydell } \ 3211c2d8f6bbSPeter Maydell fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3212c2d8f6bbSPeter Maydell &env->vfp.standard_fp_status; \ 3213c2d8f6bbSPeter Maydell if (!(mask & (1 << (e * ESIZE)))) { \ 3214c2d8f6bbSPeter Maydell /* We need the result but without updating flags */ \ 3215c2d8f6bbSPeter Maydell scratch_fpst = *fpst; \ 3216c2d8f6bbSPeter Maydell fpst = &scratch_fpst; \ 3217c2d8f6bbSPeter Maydell } \ 3218c2d8f6bbSPeter Maydell r = FN(n[H##ESIZE(e)], (TYPE)rm, fpst); \ 3219c2d8f6bbSPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \ 3220c2d8f6bbSPeter Maydell beatpred |= r * emask; \ 3221c2d8f6bbSPeter Maydell } \ 3222c2d8f6bbSPeter Maydell beatpred &= mask; \ 3223c2d8f6bbSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 3224c2d8f6bbSPeter Maydell (beatpred & eci_mask); \ 3225c2d8f6bbSPeter Maydell mve_advance_vpt(env); \ 3226c2d8f6bbSPeter Maydell } 3227c2d8f6bbSPeter Maydell 3228c2d8f6bbSPeter Maydell #define DO_VCMP_FP_BOTH(VOP, SOP, ESIZE, TYPE, FN) \ 3229c2d8f6bbSPeter Maydell DO_VCMP_FP(VOP, ESIZE, TYPE, FN) \ 3230c2d8f6bbSPeter Maydell DO_VCMP_FP_SCALAR(SOP, ESIZE, TYPE, FN) 3231c2d8f6bbSPeter Maydell 3232c87fe6d2SPeter Maydell /* 3233c87fe6d2SPeter Maydell * Some care is needed here to get the correct result for the unordered case. 3234c87fe6d2SPeter Maydell * Architecturally EQ, GE and GT are defined to be false for unordered, but 3235c87fe6d2SPeter Maydell * the NE, LT and LE comparisons are defined as simple logical inverses of 3236c87fe6d2SPeter Maydell * EQ, GE and GT and so they must return true for unordered. The softfloat 3237c87fe6d2SPeter Maydell * comparison functions float*_{eq,le,lt} all return false for unordered. 3238c87fe6d2SPeter Maydell */ 3239c87fe6d2SPeter Maydell #define DO_GE16(X, Y, S) float16_le(Y, X, S) 3240c87fe6d2SPeter Maydell #define DO_GE32(X, Y, S) float32_le(Y, X, S) 3241c87fe6d2SPeter Maydell #define DO_GT16(X, Y, S) float16_lt(Y, X, S) 3242c87fe6d2SPeter Maydell #define DO_GT32(X, Y, S) float32_lt(Y, X, S) 3243c87fe6d2SPeter Maydell 3244c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpeqh, vfcmpeq_scalarh, 2, float16, float16_eq) 3245c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpeqs, vfcmpeq_scalars, 4, float32, float32_eq) 3246c87fe6d2SPeter Maydell 3247c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpneh, vfcmpne_scalarh, 2, float16, !float16_eq) 3248c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpnes, vfcmpne_scalars, 4, float32, !float32_eq) 3249c87fe6d2SPeter Maydell 3250c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpgeh, vfcmpge_scalarh, 2, float16, DO_GE16) 3251c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpges, vfcmpge_scalars, 4, float32, DO_GE32) 3252c87fe6d2SPeter Maydell 3253c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmplth, vfcmplt_scalarh, 2, float16, !DO_GE16) 3254c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmplts, vfcmplt_scalars, 4, float32, !DO_GE32) 3255c87fe6d2SPeter Maydell 3256c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpgth, vfcmpgt_scalarh, 2, float16, DO_GT16) 3257c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpgts, vfcmpgt_scalars, 4, float32, DO_GT32) 3258c87fe6d2SPeter Maydell 3259c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpleh, vfcmple_scalarh, 2, float16, !DO_GT16) 3260c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32) 32612a4b939cSPeter Maydell 32622a4b939cSPeter Maydell #define DO_VCVT_FIXED(OP, ESIZE, TYPE, FN) \ 32632a4b939cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm, \ 32642a4b939cSPeter Maydell uint32_t shift) \ 32652a4b939cSPeter Maydell { \ 32662a4b939cSPeter Maydell TYPE *d = vd, *m = vm; \ 32672a4b939cSPeter Maydell TYPE r; \ 32682a4b939cSPeter Maydell uint16_t mask = mve_element_mask(env); \ 32692a4b939cSPeter Maydell unsigned e; \ 32702a4b939cSPeter Maydell float_status *fpst; \ 32712a4b939cSPeter Maydell float_status scratch_fpst; \ 32722a4b939cSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 32732a4b939cSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 32742a4b939cSPeter Maydell continue; \ 32752a4b939cSPeter Maydell } \ 32762a4b939cSPeter Maydell fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 32772a4b939cSPeter Maydell &env->vfp.standard_fp_status; \ 32782a4b939cSPeter Maydell if (!(mask & 1)) { \ 32792a4b939cSPeter Maydell /* We need the result but without updating flags */ \ 32802a4b939cSPeter Maydell scratch_fpst = *fpst; \ 32812a4b939cSPeter Maydell fpst = &scratch_fpst; \ 32822a4b939cSPeter Maydell } \ 32832a4b939cSPeter Maydell r = FN(m[H##ESIZE(e)], shift, fpst); \ 32842a4b939cSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 32852a4b939cSPeter Maydell } \ 32862a4b939cSPeter Maydell mve_advance_vpt(env); \ 32872a4b939cSPeter Maydell } 32882a4b939cSPeter Maydell 32892a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_sh, 2, int16_t, helper_vfp_shtoh) 32902a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_uh, 2, uint16_t, helper_vfp_uhtoh) 32912a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_hs, 2, int16_t, helper_vfp_toshh_round_to_zero) 32922a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_hu, 2, uint16_t, helper_vfp_touhh_round_to_zero) 32932a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_sf, 4, int32_t, helper_vfp_sltos) 32942a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_uf, 4, uint32_t, helper_vfp_ultos) 32952a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_fs, 4, int32_t, helper_vfp_tosls_round_to_zero) 32962a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero) 329753fc5f61SPeter Maydell 329853fc5f61SPeter Maydell /* VCVT with specified rmode */ 329953fc5f61SPeter Maydell #define DO_VCVT_RMODE(OP, ESIZE, TYPE, FN) \ 330053fc5f61SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \ 330153fc5f61SPeter Maydell void *vd, void *vm, uint32_t rmode) \ 330253fc5f61SPeter Maydell { \ 330353fc5f61SPeter Maydell TYPE *d = vd, *m = vm; \ 330453fc5f61SPeter Maydell TYPE r; \ 330553fc5f61SPeter Maydell uint16_t mask = mve_element_mask(env); \ 330653fc5f61SPeter Maydell unsigned e; \ 330753fc5f61SPeter Maydell float_status *fpst; \ 330853fc5f61SPeter Maydell float_status scratch_fpst; \ 330953fc5f61SPeter Maydell float_status *base_fpst = (ESIZE == 2) ? \ 331053fc5f61SPeter Maydell &env->vfp.standard_fp_status_f16 : \ 331153fc5f61SPeter Maydell &env->vfp.standard_fp_status; \ 331253fc5f61SPeter Maydell uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \ 331353fc5f61SPeter Maydell set_float_rounding_mode(rmode, base_fpst); \ 331453fc5f61SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 331553fc5f61SPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 331653fc5f61SPeter Maydell continue; \ 331753fc5f61SPeter Maydell } \ 331853fc5f61SPeter Maydell fpst = base_fpst; \ 331953fc5f61SPeter Maydell if (!(mask & 1)) { \ 332053fc5f61SPeter Maydell /* We need the result but without updating flags */ \ 332153fc5f61SPeter Maydell scratch_fpst = *fpst; \ 332253fc5f61SPeter Maydell fpst = &scratch_fpst; \ 332353fc5f61SPeter Maydell } \ 332453fc5f61SPeter Maydell r = FN(m[H##ESIZE(e)], 0, fpst); \ 332553fc5f61SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 332653fc5f61SPeter Maydell } \ 332753fc5f61SPeter Maydell set_float_rounding_mode(prev_rmode, base_fpst); \ 332853fc5f61SPeter Maydell mve_advance_vpt(env); \ 332953fc5f61SPeter Maydell } 333053fc5f61SPeter Maydell 333153fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_sh, 2, uint16_t, helper_vfp_toshh) 333253fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_uh, 2, uint16_t, helper_vfp_touhh) 333353fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_ss, 4, uint32_t, helper_vfp_tosls) 333453fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_us, 4, uint32_t, helper_vfp_touls) 333573d260dbSPeter Maydell 3336*98e40fbdSPeter Maydell #define DO_VRINT_RM_H(M, F, S) helper_rinth(M, S) 3337*98e40fbdSPeter Maydell #define DO_VRINT_RM_S(M, F, S) helper_rints(M, S) 3338*98e40fbdSPeter Maydell 3339*98e40fbdSPeter Maydell DO_VCVT_RMODE(vrint_rm_h, 2, uint16_t, DO_VRINT_RM_H) 3340*98e40fbdSPeter Maydell DO_VCVT_RMODE(vrint_rm_s, 4, uint32_t, DO_VRINT_RM_S) 3341*98e40fbdSPeter Maydell 334273d260dbSPeter Maydell /* 334373d260dbSPeter Maydell * VCVT between halfprec and singleprec. As usual for halfprec 334473d260dbSPeter Maydell * conversions, FZ16 is ignored and AHP is observed. 334573d260dbSPeter Maydell */ 334673d260dbSPeter Maydell static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top) 334773d260dbSPeter Maydell { 334873d260dbSPeter Maydell uint16_t *d = vd; 334973d260dbSPeter Maydell uint32_t *m = vm; 335073d260dbSPeter Maydell uint16_t r; 335173d260dbSPeter Maydell uint16_t mask = mve_element_mask(env); 335273d260dbSPeter Maydell bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP); 335373d260dbSPeter Maydell unsigned e; 335473d260dbSPeter Maydell float_status *fpst; 335573d260dbSPeter Maydell float_status scratch_fpst; 335673d260dbSPeter Maydell float_status *base_fpst = &env->vfp.standard_fp_status; 335773d260dbSPeter Maydell bool old_fz = get_flush_to_zero(base_fpst); 335873d260dbSPeter Maydell set_flush_to_zero(false, base_fpst); 335973d260dbSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 336073d260dbSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { 336173d260dbSPeter Maydell continue; 336273d260dbSPeter Maydell } 336373d260dbSPeter Maydell fpst = base_fpst; 336473d260dbSPeter Maydell if (!(mask & 1)) { 336573d260dbSPeter Maydell /* We need the result but without updating flags */ 336673d260dbSPeter Maydell scratch_fpst = *fpst; 336773d260dbSPeter Maydell fpst = &scratch_fpst; 336873d260dbSPeter Maydell } 336973d260dbSPeter Maydell r = float32_to_float16(m[H4(e)], ieee, fpst); 337073d260dbSPeter Maydell mergemask(&d[H2(e * 2 + top)], r, mask >> (top * 2)); 337173d260dbSPeter Maydell } 337273d260dbSPeter Maydell set_flush_to_zero(old_fz, base_fpst); 337373d260dbSPeter Maydell mve_advance_vpt(env); 337473d260dbSPeter Maydell } 337573d260dbSPeter Maydell 337673d260dbSPeter Maydell static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top) 337773d260dbSPeter Maydell { 337873d260dbSPeter Maydell uint32_t *d = vd; 337973d260dbSPeter Maydell uint16_t *m = vm; 338073d260dbSPeter Maydell uint32_t r; 338173d260dbSPeter Maydell uint16_t mask = mve_element_mask(env); 338273d260dbSPeter Maydell bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP); 338373d260dbSPeter Maydell unsigned e; 338473d260dbSPeter Maydell float_status *fpst; 338573d260dbSPeter Maydell float_status scratch_fpst; 338673d260dbSPeter Maydell float_status *base_fpst = &env->vfp.standard_fp_status; 338773d260dbSPeter Maydell bool old_fiz = get_flush_inputs_to_zero(base_fpst); 338873d260dbSPeter Maydell set_flush_inputs_to_zero(false, base_fpst); 338973d260dbSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { 339073d260dbSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { 339173d260dbSPeter Maydell continue; 339273d260dbSPeter Maydell } 339373d260dbSPeter Maydell fpst = base_fpst; 339473d260dbSPeter Maydell if (!(mask & (1 << (top * 2)))) { 339573d260dbSPeter Maydell /* We need the result but without updating flags */ 339673d260dbSPeter Maydell scratch_fpst = *fpst; 339773d260dbSPeter Maydell fpst = &scratch_fpst; 339873d260dbSPeter Maydell } 339973d260dbSPeter Maydell r = float16_to_float32(m[H2(e * 2 + top)], ieee, fpst); 340073d260dbSPeter Maydell mergemask(&d[H4(e)], r, mask); 340173d260dbSPeter Maydell } 340273d260dbSPeter Maydell set_flush_inputs_to_zero(old_fiz, base_fpst); 340373d260dbSPeter Maydell mve_advance_vpt(env); 340473d260dbSPeter Maydell } 340573d260dbSPeter Maydell 340673d260dbSPeter Maydell void HELPER(mve_vcvtb_sh)(CPUARMState *env, void *vd, void *vm) 340773d260dbSPeter Maydell { 340873d260dbSPeter Maydell do_vcvt_sh(env, vd, vm, 0); 340973d260dbSPeter Maydell } 341073d260dbSPeter Maydell void HELPER(mve_vcvtt_sh)(CPUARMState *env, void *vd, void *vm) 341173d260dbSPeter Maydell { 341273d260dbSPeter Maydell do_vcvt_sh(env, vd, vm, 1); 341373d260dbSPeter Maydell } 341473d260dbSPeter Maydell void HELPER(mve_vcvtb_hs)(CPUARMState *env, void *vd, void *vm) 341573d260dbSPeter Maydell { 341673d260dbSPeter Maydell do_vcvt_hs(env, vd, vm, 0); 341773d260dbSPeter Maydell } 341873d260dbSPeter Maydell void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm) 341973d260dbSPeter Maydell { 342073d260dbSPeter Maydell do_vcvt_hs(env, vd, vm, 1); 342173d260dbSPeter Maydell } 3422*98e40fbdSPeter Maydell 3423*98e40fbdSPeter Maydell #define DO_1OP_FP(OP, ESIZE, TYPE, FN) \ 3424*98e40fbdSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm) \ 3425*98e40fbdSPeter Maydell { \ 3426*98e40fbdSPeter Maydell TYPE *d = vd, *m = vm; \ 3427*98e40fbdSPeter Maydell TYPE r; \ 3428*98e40fbdSPeter Maydell uint16_t mask = mve_element_mask(env); \ 3429*98e40fbdSPeter Maydell unsigned e; \ 3430*98e40fbdSPeter Maydell float_status *fpst; \ 3431*98e40fbdSPeter Maydell float_status scratch_fpst; \ 3432*98e40fbdSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3433*98e40fbdSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3434*98e40fbdSPeter Maydell continue; \ 3435*98e40fbdSPeter Maydell } \ 3436*98e40fbdSPeter Maydell fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3437*98e40fbdSPeter Maydell &env->vfp.standard_fp_status; \ 3438*98e40fbdSPeter Maydell if (!(mask & 1)) { \ 3439*98e40fbdSPeter Maydell /* We need the result but without updating flags */ \ 3440*98e40fbdSPeter Maydell scratch_fpst = *fpst; \ 3441*98e40fbdSPeter Maydell fpst = &scratch_fpst; \ 3442*98e40fbdSPeter Maydell } \ 3443*98e40fbdSPeter Maydell r = FN(m[H##ESIZE(e)], fpst); \ 3444*98e40fbdSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \ 3445*98e40fbdSPeter Maydell } \ 3446*98e40fbdSPeter Maydell mve_advance_vpt(env); \ 3447*98e40fbdSPeter Maydell } 3448*98e40fbdSPeter Maydell 3449*98e40fbdSPeter Maydell DO_1OP_FP(vrintx_h, 2, float16, float16_round_to_int) 3450*98e40fbdSPeter Maydell DO_1OP_FP(vrintx_s, 4, float32, float32_round_to_int) 3451