1507b6a50SPeter Maydell /*
2507b6a50SPeter Maydell * M-profile MVE Operations
3507b6a50SPeter Maydell *
4507b6a50SPeter Maydell * Copyright (c) 2021 Linaro, Ltd.
5507b6a50SPeter Maydell *
6507b6a50SPeter Maydell * This library is free software; you can redistribute it and/or
7507b6a50SPeter Maydell * modify it under the terms of the GNU Lesser General Public
8507b6a50SPeter Maydell * License as published by the Free Software Foundation; either
9507b6a50SPeter Maydell * version 2.1 of the License, or (at your option) any later version.
10507b6a50SPeter Maydell *
11507b6a50SPeter Maydell * This library is distributed in the hope that it will be useful,
12507b6a50SPeter Maydell * but WITHOUT ANY WARRANTY; without even the implied warranty of
13507b6a50SPeter Maydell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14507b6a50SPeter Maydell * Lesser General Public License for more details.
15507b6a50SPeter Maydell *
16507b6a50SPeter Maydell * You should have received a copy of the GNU Lesser General Public
17507b6a50SPeter Maydell * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18507b6a50SPeter Maydell */
19507b6a50SPeter Maydell
20507b6a50SPeter Maydell #include "qemu/osdep.h"
21507b6a50SPeter Maydell #include "cpu.h"
22507b6a50SPeter Maydell #include "internals.h"
23507b6a50SPeter Maydell #include "vec_internal.h"
24507b6a50SPeter Maydell #include "exec/helper-proto.h"
25*42fa9665SPhilippe Mathieu-Daudé #include "accel/tcg/cpu-ldst.h"
2659c91773SPeter Maydell #include "tcg/tcg.h"
271e35cd91SPeter Maydell #include "fpu/softfloat.h"
288e3da4c7SRichard Henderson #include "crypto/clmul.h"
29507b6a50SPeter Maydell
mve_eci_mask(CPUARMState * env)30e0d40070SPeter Maydell static uint16_t mve_eci_mask(CPUARMState *env)
31e0d40070SPeter Maydell {
32e0d40070SPeter Maydell /*
33e0d40070SPeter Maydell * Return the mask of which elements in the MVE vector correspond
34e0d40070SPeter Maydell * to beats being executed. The mask has 1 bits for executed lanes
35e0d40070SPeter Maydell * and 0 bits where ECI says this beat was already executed.
36e0d40070SPeter Maydell */
37e0d40070SPeter Maydell int eci;
38e0d40070SPeter Maydell
39e0d40070SPeter Maydell if ((env->condexec_bits & 0xf) != 0) {
40e0d40070SPeter Maydell return 0xffff;
41e0d40070SPeter Maydell }
42e0d40070SPeter Maydell
43e0d40070SPeter Maydell eci = env->condexec_bits >> 4;
44e0d40070SPeter Maydell switch (eci) {
45e0d40070SPeter Maydell case ECI_NONE:
46e0d40070SPeter Maydell return 0xffff;
47e0d40070SPeter Maydell case ECI_A0:
48e0d40070SPeter Maydell return 0xfff0;
49e0d40070SPeter Maydell case ECI_A0A1:
50e0d40070SPeter Maydell return 0xff00;
51e0d40070SPeter Maydell case ECI_A0A1A2:
52e0d40070SPeter Maydell case ECI_A0A1A2B0:
53e0d40070SPeter Maydell return 0xf000;
54e0d40070SPeter Maydell default:
55e0d40070SPeter Maydell g_assert_not_reached();
56e0d40070SPeter Maydell }
57e0d40070SPeter Maydell }
58e0d40070SPeter Maydell
mve_element_mask(CPUARMState * env)59507b6a50SPeter Maydell static uint16_t mve_element_mask(CPUARMState *env)
60507b6a50SPeter Maydell {
61507b6a50SPeter Maydell /*
62507b6a50SPeter Maydell * Return the mask of which elements in the MVE vector should be
63507b6a50SPeter Maydell * updated. This is a combination of multiple things:
64507b6a50SPeter Maydell * (1) by default, we update every lane in the vector
65507b6a50SPeter Maydell * (2) VPT predication stores its state in the VPR register;
66507b6a50SPeter Maydell * (3) low-overhead-branch tail predication will mask out part
67507b6a50SPeter Maydell * the vector on the final iteration of the loop
68507b6a50SPeter Maydell * (4) if EPSR.ECI is set then we must execute only some beats
69507b6a50SPeter Maydell * of the insn
70507b6a50SPeter Maydell * We combine all these into a 16-bit result with the same semantics
71507b6a50SPeter Maydell * as VPR.P0: 0 to mask the lane, 1 if it is active.
72507b6a50SPeter Maydell * 8-bit vector ops will look at all bits of the result;
73507b6a50SPeter Maydell * 16-bit ops will look at bits 0, 2, 4, ...;
74507b6a50SPeter Maydell * 32-bit ops will look at bits 0, 4, 8 and 12.
75507b6a50SPeter Maydell * Compare pseudocode GetCurInstrBeat(), though that only returns
76507b6a50SPeter Maydell * the 4-bit slice of the mask corresponding to a single beat.
77507b6a50SPeter Maydell */
78507b6a50SPeter Maydell uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
79507b6a50SPeter Maydell
80507b6a50SPeter Maydell if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
81507b6a50SPeter Maydell mask |= 0xff;
82507b6a50SPeter Maydell }
83507b6a50SPeter Maydell if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
84507b6a50SPeter Maydell mask |= 0xff00;
85507b6a50SPeter Maydell }
86507b6a50SPeter Maydell
87507b6a50SPeter Maydell if (env->v7m.ltpsize < 4 &&
88507b6a50SPeter Maydell env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
89507b6a50SPeter Maydell /*
90507b6a50SPeter Maydell * Tail predication active, and this is the last loop iteration.
91507b6a50SPeter Maydell * The element size is (1 << ltpsize), and we only want to process
92507b6a50SPeter Maydell * loopcount elements, so we want to retain the least significant
93507b6a50SPeter Maydell * (loopcount * esize) predicate bits and zero out bits above that.
94507b6a50SPeter Maydell */
95507b6a50SPeter Maydell int masklen = env->regs[14] << env->v7m.ltpsize;
96507b6a50SPeter Maydell assert(masklen <= 16);
973f4f1880SPeter Maydell uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0;
983f4f1880SPeter Maydell mask &= ltpmask;
99507b6a50SPeter Maydell }
100507b6a50SPeter Maydell
101507b6a50SPeter Maydell /*
102507b6a50SPeter Maydell * ECI bits indicate which beats are already executed;
103507b6a50SPeter Maydell * we handle this by effectively predicating them out.
104507b6a50SPeter Maydell */
105e0d40070SPeter Maydell mask &= mve_eci_mask(env);
106507b6a50SPeter Maydell return mask;
107507b6a50SPeter Maydell }
108507b6a50SPeter Maydell
mve_advance_vpt(CPUARMState * env)109507b6a50SPeter Maydell static void mve_advance_vpt(CPUARMState *env)
110507b6a50SPeter Maydell {
111507b6a50SPeter Maydell /* Advance the VPT and ECI state if necessary */
112507b6a50SPeter Maydell uint32_t vpr = env->v7m.vpr;
113507b6a50SPeter Maydell unsigned mask01, mask23;
114e3152d02SPeter Maydell uint16_t inv_mask;
115e3152d02SPeter Maydell uint16_t eci_mask = mve_eci_mask(env);
116507b6a50SPeter Maydell
117507b6a50SPeter Maydell if ((env->condexec_bits & 0xf) == 0) {
118507b6a50SPeter Maydell env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
119507b6a50SPeter Maydell (ECI_A0 << 4) : (ECI_NONE << 4);
120507b6a50SPeter Maydell }
121507b6a50SPeter Maydell
122507b6a50SPeter Maydell if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
123507b6a50SPeter Maydell /* VPT not enabled, nothing to do */
124507b6a50SPeter Maydell return;
125507b6a50SPeter Maydell }
126507b6a50SPeter Maydell
127e3152d02SPeter Maydell /* Invert P0 bits if needed, but only for beats we actually executed */
128507b6a50SPeter Maydell mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
129507b6a50SPeter Maydell mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
130e3152d02SPeter Maydell /* Start by assuming we invert all bits corresponding to executed beats */
131e3152d02SPeter Maydell inv_mask = eci_mask;
132e3152d02SPeter Maydell if (mask01 <= 8) {
133e3152d02SPeter Maydell /* MASK01 says don't invert low half of P0 */
134e3152d02SPeter Maydell inv_mask &= ~0xff;
135507b6a50SPeter Maydell }
136e3152d02SPeter Maydell if (mask23 <= 8) {
137e3152d02SPeter Maydell /* MASK23 says don't invert high half of P0 */
138e3152d02SPeter Maydell inv_mask &= ~0xff00;
139507b6a50SPeter Maydell }
140e3152d02SPeter Maydell vpr ^= inv_mask;
141e3152d02SPeter Maydell /* Only update MASK01 if beat 1 executed */
142e3152d02SPeter Maydell if (eci_mask & 0xf0) {
143507b6a50SPeter Maydell vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
144e3152d02SPeter Maydell }
145e3152d02SPeter Maydell /* Beat 3 always executes, so update MASK23 */
146507b6a50SPeter Maydell vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
147507b6a50SPeter Maydell env->v7m.vpr = vpr;
148507b6a50SPeter Maydell }
149507b6a50SPeter Maydell
15041704cc2SPeter Maydell /* For loads, predicated lanes are zeroed instead of keeping their old values */
151507b6a50SPeter Maydell #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
152507b6a50SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
153507b6a50SPeter Maydell { \
154507b6a50SPeter Maydell TYPE *d = vd; \
155507b6a50SPeter Maydell uint16_t mask = mve_element_mask(env); \
15641704cc2SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \
157507b6a50SPeter Maydell unsigned b, e; \
158507b6a50SPeter Maydell /* \
159507b6a50SPeter Maydell * R_SXTM allows the dest reg to become UNKNOWN for abandoned \
160507b6a50SPeter Maydell * beats so we don't care if we update part of the dest and \
161507b6a50SPeter Maydell * then take an exception. \
162507b6a50SPeter Maydell */ \
163507b6a50SPeter Maydell for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
16441704cc2SPeter Maydell if (eci_mask & (1 << b)) { \
16541704cc2SPeter Maydell d[H##ESIZE(e)] = (mask & (1 << b)) ? \
16641704cc2SPeter Maydell cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
167507b6a50SPeter Maydell } \
168507b6a50SPeter Maydell addr += MSIZE; \
169507b6a50SPeter Maydell } \
170507b6a50SPeter Maydell mve_advance_vpt(env); \
171507b6a50SPeter Maydell }
172507b6a50SPeter Maydell
173507b6a50SPeter Maydell #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
174507b6a50SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
175507b6a50SPeter Maydell { \
176507b6a50SPeter Maydell TYPE *d = vd; \
177507b6a50SPeter Maydell uint16_t mask = mve_element_mask(env); \
178507b6a50SPeter Maydell unsigned b, e; \
179507b6a50SPeter Maydell for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
180507b6a50SPeter Maydell if (mask & (1 << b)) { \
181507b6a50SPeter Maydell cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
182507b6a50SPeter Maydell } \
183507b6a50SPeter Maydell addr += MSIZE; \
184507b6a50SPeter Maydell } \
185507b6a50SPeter Maydell mve_advance_vpt(env); \
186507b6a50SPeter Maydell }
187507b6a50SPeter Maydell
188507b6a50SPeter Maydell DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
189507b6a50SPeter Maydell DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
190507b6a50SPeter Maydell DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
191507b6a50SPeter Maydell
192507b6a50SPeter Maydell DO_VSTR(vstrb, 1, stb, 1, uint8_t)
193507b6a50SPeter Maydell DO_VSTR(vstrh, 2, stw, 2, uint16_t)
194507b6a50SPeter Maydell DO_VSTR(vstrw, 4, stl, 4, uint32_t)
195507b6a50SPeter Maydell
1962fc6b751SPeter Maydell DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
1972fc6b751SPeter Maydell DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
1982fc6b751SPeter Maydell DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
1992fc6b751SPeter Maydell DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
2002fc6b751SPeter Maydell DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
2012fc6b751SPeter Maydell DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
2022fc6b751SPeter Maydell
2032fc6b751SPeter Maydell DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
2042fc6b751SPeter Maydell DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
2052fc6b751SPeter Maydell DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
2062fc6b751SPeter Maydell
207507b6a50SPeter Maydell #undef DO_VLDR
208507b6a50SPeter Maydell #undef DO_VSTR
2090f0f2bd5SPeter Maydell
2100f0f2bd5SPeter Maydell /*
211dc18628bSPeter Maydell * Gather loads/scatter stores. Here each element of Qm specifies
212dc18628bSPeter Maydell * an offset to use from the base register Rm. In the _os_ versions
213dc18628bSPeter Maydell * that offset is scaled by the element size.
214dc18628bSPeter Maydell * For loads, predicated lanes are zeroed instead of retaining
215dc18628bSPeter Maydell * their previous values.
216dc18628bSPeter Maydell */
217fac80f08SPeter Maydell #define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \
218dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
219dc18628bSPeter Maydell uint32_t base) \
220dc18628bSPeter Maydell { \
221dc18628bSPeter Maydell TYPE *d = vd; \
222dc18628bSPeter Maydell OFFTYPE *m = vm; \
223dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \
224dc18628bSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \
225dc18628bSPeter Maydell unsigned e; \
226dc18628bSPeter Maydell uint32_t addr; \
227dc18628bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
228dc18628bSPeter Maydell if (!(eci_mask & 1)) { \
229dc18628bSPeter Maydell continue; \
230dc18628bSPeter Maydell } \
231dc18628bSPeter Maydell addr = ADDRFN(base, m[H##ESIZE(e)]); \
232dc18628bSPeter Maydell d[H##ESIZE(e)] = (mask & 1) ? \
233dc18628bSPeter Maydell cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
234fac80f08SPeter Maydell if (WB) { \
235fac80f08SPeter Maydell m[H##ESIZE(e)] = addr; \
236fac80f08SPeter Maydell } \
237dc18628bSPeter Maydell } \
238dc18628bSPeter Maydell mve_advance_vpt(env); \
239dc18628bSPeter Maydell }
240dc18628bSPeter Maydell
241dc18628bSPeter Maydell /* We know here TYPE is unsigned so always the same as the offset type */
242fac80f08SPeter Maydell #define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \
243dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
244dc18628bSPeter Maydell uint32_t base) \
245dc18628bSPeter Maydell { \
246dc18628bSPeter Maydell TYPE *d = vd; \
247dc18628bSPeter Maydell TYPE *m = vm; \
248dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \
249fac80f08SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \
250dc18628bSPeter Maydell unsigned e; \
251dc18628bSPeter Maydell uint32_t addr; \
252fac80f08SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
253fac80f08SPeter Maydell if (!(eci_mask & 1)) { \
254fac80f08SPeter Maydell continue; \
255fac80f08SPeter Maydell } \
256dc18628bSPeter Maydell addr = ADDRFN(base, m[H##ESIZE(e)]); \
257dc18628bSPeter Maydell if (mask & 1) { \
258dc18628bSPeter Maydell cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
259dc18628bSPeter Maydell } \
260fac80f08SPeter Maydell if (WB) { \
261fac80f08SPeter Maydell m[H##ESIZE(e)] = addr; \
262fac80f08SPeter Maydell } \
263dc18628bSPeter Maydell } \
264dc18628bSPeter Maydell mve_advance_vpt(env); \
265dc18628bSPeter Maydell }
266dc18628bSPeter Maydell
267dc18628bSPeter Maydell /*
268dc18628bSPeter Maydell * 64-bit accesses are slightly different: they are done as two 32-bit
269dc18628bSPeter Maydell * accesses, controlled by the predicate mask for the relevant beat,
270dc18628bSPeter Maydell * and with a single 32-bit offset in the first of the two Qm elements.
271dc18628bSPeter Maydell * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little).
272fac80f08SPeter Maydell * Address writeback happens on the odd beats and updates the address
273fac80f08SPeter Maydell * stored in the even-beat element.
274dc18628bSPeter Maydell */
275fac80f08SPeter Maydell #define DO_VLDR64_SG(OP, ADDRFN, WB) \
276dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
277dc18628bSPeter Maydell uint32_t base) \
278dc18628bSPeter Maydell { \
279dc18628bSPeter Maydell uint32_t *d = vd; \
280dc18628bSPeter Maydell uint32_t *m = vm; \
281dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \
282dc18628bSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \
283dc18628bSPeter Maydell unsigned e; \
284dc18628bSPeter Maydell uint32_t addr; \
285dc18628bSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
286dc18628bSPeter Maydell if (!(eci_mask & 1)) { \
287dc18628bSPeter Maydell continue; \
288dc18628bSPeter Maydell } \
289dc18628bSPeter Maydell addr = ADDRFN(base, m[H4(e & ~1)]); \
290dc18628bSPeter Maydell addr += 4 * (e & 1); \
291dc18628bSPeter Maydell d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \
292fac80f08SPeter Maydell if (WB && (e & 1)) { \
293fac80f08SPeter Maydell m[H4(e & ~1)] = addr - 4; \
294fac80f08SPeter Maydell } \
295dc18628bSPeter Maydell } \
296dc18628bSPeter Maydell mve_advance_vpt(env); \
297dc18628bSPeter Maydell }
298dc18628bSPeter Maydell
299fac80f08SPeter Maydell #define DO_VSTR64_SG(OP, ADDRFN, WB) \
300dc18628bSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
301dc18628bSPeter Maydell uint32_t base) \
302dc18628bSPeter Maydell { \
303dc18628bSPeter Maydell uint32_t *d = vd; \
304dc18628bSPeter Maydell uint32_t *m = vm; \
305dc18628bSPeter Maydell uint16_t mask = mve_element_mask(env); \
306fac80f08SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \
307dc18628bSPeter Maydell unsigned e; \
308dc18628bSPeter Maydell uint32_t addr; \
309fac80f08SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
310fac80f08SPeter Maydell if (!(eci_mask & 1)) { \
311fac80f08SPeter Maydell continue; \
312fac80f08SPeter Maydell } \
313dc18628bSPeter Maydell addr = ADDRFN(base, m[H4(e & ~1)]); \
314dc18628bSPeter Maydell addr += 4 * (e & 1); \
315dc18628bSPeter Maydell if (mask & 1) { \
316dc18628bSPeter Maydell cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \
317dc18628bSPeter Maydell } \
318fac80f08SPeter Maydell if (WB && (e & 1)) { \
319fac80f08SPeter Maydell m[H4(e & ~1)] = addr - 4; \
320fac80f08SPeter Maydell } \
321dc18628bSPeter Maydell } \
322dc18628bSPeter Maydell mve_advance_vpt(env); \
323dc18628bSPeter Maydell }
324dc18628bSPeter Maydell
325dc18628bSPeter Maydell #define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET))
326dc18628bSPeter Maydell #define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1))
327dc18628bSPeter Maydell #define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2))
328dc18628bSPeter Maydell #define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3))
329dc18628bSPeter Maydell
330fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false)
331fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false)
332fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false)
333dc18628bSPeter Maydell
334fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false)
335fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false)
336fac80f08SPeter Maydell DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false)
337fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false)
338fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false)
339fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false)
DO_VLDR64_SG(vldrd_sg_ud,ADDR_ADD,false)340fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false)
341dc18628bSPeter Maydell
342fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false)
343fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false)
344fac80f08SPeter Maydell DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false)
345fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false)
346fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false)
347dc18628bSPeter Maydell
348fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false)
349fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false)
350fac80f08SPeter Maydell DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false)
351fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false)
352fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false)
353fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false)
354fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false)
355dc18628bSPeter Maydell
356fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false)
357fac80f08SPeter Maydell DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false)
358fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false)
359fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false)
360fac80f08SPeter Maydell
361fac80f08SPeter Maydell DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true)
362fac80f08SPeter Maydell DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true)
363fac80f08SPeter Maydell DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true)
364fac80f08SPeter Maydell DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true)
365dc18628bSPeter Maydell
366dc18628bSPeter Maydell /*
367075e7e97SPeter Maydell * Deinterleaving loads/interleaving stores.
368075e7e97SPeter Maydell *
369075e7e97SPeter Maydell * For these helpers we are passed the index of the first Qreg
370075e7e97SPeter Maydell * (VLD2/VST2 will also access Qn+1, VLD4/VST4 access Qn .. Qn+3)
371075e7e97SPeter Maydell * and the value of the base address register Rn.
372075e7e97SPeter Maydell * The helpers are specialized for pattern and element size, so
373075e7e97SPeter Maydell * for instance vld42h is VLD4 with pattern 2, element size MO_16.
374075e7e97SPeter Maydell *
375075e7e97SPeter Maydell * These insns are beatwise but not predicated, so we must honour ECI,
376075e7e97SPeter Maydell * but need not look at mve_element_mask().
377075e7e97SPeter Maydell *
378075e7e97SPeter Maydell * The pseudocode implements these insns with multiple memory accesses
379075e7e97SPeter Maydell * of the element size, but rules R_VVVG and R_FXDM permit us to make
380075e7e97SPeter Maydell * one 32-bit memory access per beat.
381075e7e97SPeter Maydell */
382075e7e97SPeter Maydell #define DO_VLD4B(OP, O1, O2, O3, O4) \
383075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
384075e7e97SPeter Maydell uint32_t base) \
385075e7e97SPeter Maydell { \
386075e7e97SPeter Maydell int beat, e; \
387075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
388075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
389075e7e97SPeter Maydell uint32_t addr, data; \
390075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
391075e7e97SPeter Maydell if ((mask & 1) == 0) { \
392075e7e97SPeter Maydell /* ECI says skip this beat */ \
393075e7e97SPeter Maydell continue; \
394075e7e97SPeter Maydell } \
395075e7e97SPeter Maydell addr = base + off[beat] * 4; \
396075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
397075e7e97SPeter Maydell for (e = 0; e < 4; e++, data >>= 8) { \
398075e7e97SPeter Maydell uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
399075e7e97SPeter Maydell qd[H1(off[beat])] = data; \
400075e7e97SPeter Maydell } \
401075e7e97SPeter Maydell } \
402075e7e97SPeter Maydell }
403075e7e97SPeter Maydell
404075e7e97SPeter Maydell #define DO_VLD4H(OP, O1, O2) \
405075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
406075e7e97SPeter Maydell uint32_t base) \
407075e7e97SPeter Maydell { \
408075e7e97SPeter Maydell int beat; \
409075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
410075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O1, O2, O2 }; \
411075e7e97SPeter Maydell uint32_t addr, data; \
412075e7e97SPeter Maydell int y; /* y counts 0 2 0 2 */ \
413075e7e97SPeter Maydell uint16_t *qd; \
414075e7e97SPeter Maydell for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
415075e7e97SPeter Maydell if ((mask & 1) == 0) { \
416075e7e97SPeter Maydell /* ECI says skip this beat */ \
417075e7e97SPeter Maydell continue; \
418075e7e97SPeter Maydell } \
419075e7e97SPeter Maydell addr = base + off[beat] * 8 + (beat & 1) * 4; \
420075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
421075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
422075e7e97SPeter Maydell qd[H2(off[beat])] = data; \
423075e7e97SPeter Maydell data >>= 16; \
424075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
425075e7e97SPeter Maydell qd[H2(off[beat])] = data; \
426075e7e97SPeter Maydell } \
427075e7e97SPeter Maydell }
428075e7e97SPeter Maydell
429075e7e97SPeter Maydell #define DO_VLD4W(OP, O1, O2, O3, O4) \
430075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
431075e7e97SPeter Maydell uint32_t base) \
432075e7e97SPeter Maydell { \
433075e7e97SPeter Maydell int beat; \
434075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
435075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
436075e7e97SPeter Maydell uint32_t addr, data; \
437075e7e97SPeter Maydell uint32_t *qd; \
438075e7e97SPeter Maydell int y; \
439075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
440075e7e97SPeter Maydell if ((mask & 1) == 0) { \
441075e7e97SPeter Maydell /* ECI says skip this beat */ \
442075e7e97SPeter Maydell continue; \
443075e7e97SPeter Maydell } \
444075e7e97SPeter Maydell addr = base + off[beat] * 4; \
445075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
446075e7e97SPeter Maydell y = (beat + (O1 & 2)) & 3; \
447075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
448075e7e97SPeter Maydell qd[H4(off[beat] >> 2)] = data; \
449075e7e97SPeter Maydell } \
450075e7e97SPeter Maydell }
451075e7e97SPeter Maydell
452075e7e97SPeter Maydell DO_VLD4B(vld40b, 0, 1, 10, 11)
453075e7e97SPeter Maydell DO_VLD4B(vld41b, 2, 3, 12, 13)
454075e7e97SPeter Maydell DO_VLD4B(vld42b, 4, 5, 14, 15)
455075e7e97SPeter Maydell DO_VLD4B(vld43b, 6, 7, 8, 9)
456075e7e97SPeter Maydell
457075e7e97SPeter Maydell DO_VLD4H(vld40h, 0, 5)
458075e7e97SPeter Maydell DO_VLD4H(vld41h, 1, 6)
459075e7e97SPeter Maydell DO_VLD4H(vld42h, 2, 7)
460075e7e97SPeter Maydell DO_VLD4H(vld43h, 3, 4)
461075e7e97SPeter Maydell
462075e7e97SPeter Maydell DO_VLD4W(vld40w, 0, 1, 10, 11)
463075e7e97SPeter Maydell DO_VLD4W(vld41w, 2, 3, 12, 13)
464075e7e97SPeter Maydell DO_VLD4W(vld42w, 4, 5, 14, 15)
465075e7e97SPeter Maydell DO_VLD4W(vld43w, 6, 7, 8, 9)
466075e7e97SPeter Maydell
467075e7e97SPeter Maydell #define DO_VLD2B(OP, O1, O2, O3, O4) \
468075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
469075e7e97SPeter Maydell uint32_t base) \
470075e7e97SPeter Maydell { \
471075e7e97SPeter Maydell int beat, e; \
472075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
473075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
474075e7e97SPeter Maydell uint32_t addr, data; \
475075e7e97SPeter Maydell uint8_t *qd; \
476075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
477075e7e97SPeter Maydell if ((mask & 1) == 0) { \
478075e7e97SPeter Maydell /* ECI says skip this beat */ \
479075e7e97SPeter Maydell continue; \
480075e7e97SPeter Maydell } \
481075e7e97SPeter Maydell addr = base + off[beat] * 2; \
482075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
483075e7e97SPeter Maydell for (e = 0; e < 4; e++, data >>= 8) { \
484075e7e97SPeter Maydell qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
485075e7e97SPeter Maydell qd[H1(off[beat] + (e >> 1))] = data; \
486075e7e97SPeter Maydell } \
487075e7e97SPeter Maydell } \
488075e7e97SPeter Maydell }
489075e7e97SPeter Maydell
490075e7e97SPeter Maydell #define DO_VLD2H(OP, O1, O2, O3, O4) \
491075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
492075e7e97SPeter Maydell uint32_t base) \
493075e7e97SPeter Maydell { \
494075e7e97SPeter Maydell int beat; \
495075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
496075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
497075e7e97SPeter Maydell uint32_t addr, data; \
498075e7e97SPeter Maydell int e; \
499075e7e97SPeter Maydell uint16_t *qd; \
500075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
501075e7e97SPeter Maydell if ((mask & 1) == 0) { \
502075e7e97SPeter Maydell /* ECI says skip this beat */ \
503075e7e97SPeter Maydell continue; \
504075e7e97SPeter Maydell } \
505075e7e97SPeter Maydell addr = base + off[beat] * 4; \
506075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
507075e7e97SPeter Maydell for (e = 0; e < 2; e++, data >>= 16) { \
508075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
509075e7e97SPeter Maydell qd[H2(off[beat])] = data; \
510075e7e97SPeter Maydell } \
511075e7e97SPeter Maydell } \
512075e7e97SPeter Maydell }
513075e7e97SPeter Maydell
514075e7e97SPeter Maydell #define DO_VLD2W(OP, O1, O2, O3, O4) \
515075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
516075e7e97SPeter Maydell uint32_t base) \
517075e7e97SPeter Maydell { \
518075e7e97SPeter Maydell int beat; \
519075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
520075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
521075e7e97SPeter Maydell uint32_t addr, data; \
522075e7e97SPeter Maydell uint32_t *qd; \
523075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
524075e7e97SPeter Maydell if ((mask & 1) == 0) { \
525075e7e97SPeter Maydell /* ECI says skip this beat */ \
526075e7e97SPeter Maydell continue; \
527075e7e97SPeter Maydell } \
528075e7e97SPeter Maydell addr = base + off[beat]; \
529075e7e97SPeter Maydell data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
530075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
531075e7e97SPeter Maydell qd[H4(off[beat] >> 3)] = data; \
532075e7e97SPeter Maydell } \
533075e7e97SPeter Maydell }
534075e7e97SPeter Maydell
535075e7e97SPeter Maydell DO_VLD2B(vld20b, 0, 2, 12, 14)
536075e7e97SPeter Maydell DO_VLD2B(vld21b, 4, 6, 8, 10)
537075e7e97SPeter Maydell
538075e7e97SPeter Maydell DO_VLD2H(vld20h, 0, 1, 6, 7)
539075e7e97SPeter Maydell DO_VLD2H(vld21h, 2, 3, 4, 5)
540075e7e97SPeter Maydell
541075e7e97SPeter Maydell DO_VLD2W(vld20w, 0, 4, 24, 28)
542075e7e97SPeter Maydell DO_VLD2W(vld21w, 8, 12, 16, 20)
543075e7e97SPeter Maydell
544075e7e97SPeter Maydell #define DO_VST4B(OP, O1, O2, O3, O4) \
545075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
546075e7e97SPeter Maydell uint32_t base) \
547075e7e97SPeter Maydell { \
548075e7e97SPeter Maydell int beat, e; \
549075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
550075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
551075e7e97SPeter Maydell uint32_t addr, data; \
552075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
553075e7e97SPeter Maydell if ((mask & 1) == 0) { \
554075e7e97SPeter Maydell /* ECI says skip this beat */ \
555075e7e97SPeter Maydell continue; \
556075e7e97SPeter Maydell } \
557075e7e97SPeter Maydell addr = base + off[beat] * 4; \
558075e7e97SPeter Maydell data = 0; \
559075e7e97SPeter Maydell for (e = 3; e >= 0; e--) { \
560075e7e97SPeter Maydell uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
561075e7e97SPeter Maydell data = (data << 8) | qd[H1(off[beat])]; \
562075e7e97SPeter Maydell } \
563075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \
564075e7e97SPeter Maydell } \
565075e7e97SPeter Maydell }
566075e7e97SPeter Maydell
567075e7e97SPeter Maydell #define DO_VST4H(OP, O1, O2) \
568075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
569075e7e97SPeter Maydell uint32_t base) \
570075e7e97SPeter Maydell { \
571075e7e97SPeter Maydell int beat; \
572075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
573075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O1, O2, O2 }; \
574075e7e97SPeter Maydell uint32_t addr, data; \
575075e7e97SPeter Maydell int y; /* y counts 0 2 0 2 */ \
576075e7e97SPeter Maydell uint16_t *qd; \
577075e7e97SPeter Maydell for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
578075e7e97SPeter Maydell if ((mask & 1) == 0) { \
579075e7e97SPeter Maydell /* ECI says skip this beat */ \
580075e7e97SPeter Maydell continue; \
581075e7e97SPeter Maydell } \
582075e7e97SPeter Maydell addr = base + off[beat] * 8 + (beat & 1) * 4; \
583075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
584075e7e97SPeter Maydell data = qd[H2(off[beat])]; \
585075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
586075e7e97SPeter Maydell data |= qd[H2(off[beat])] << 16; \
587075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \
588075e7e97SPeter Maydell } \
589075e7e97SPeter Maydell }
590075e7e97SPeter Maydell
591075e7e97SPeter Maydell #define DO_VST4W(OP, O1, O2, O3, O4) \
592075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
593075e7e97SPeter Maydell uint32_t base) \
594075e7e97SPeter Maydell { \
595075e7e97SPeter Maydell int beat; \
596075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
597075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
598075e7e97SPeter Maydell uint32_t addr, data; \
599075e7e97SPeter Maydell uint32_t *qd; \
600075e7e97SPeter Maydell int y; \
601075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
602075e7e97SPeter Maydell if ((mask & 1) == 0) { \
603075e7e97SPeter Maydell /* ECI says skip this beat */ \
604075e7e97SPeter Maydell continue; \
605075e7e97SPeter Maydell } \
606075e7e97SPeter Maydell addr = base + off[beat] * 4; \
607075e7e97SPeter Maydell y = (beat + (O1 & 2)) & 3; \
608075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
609075e7e97SPeter Maydell data = qd[H4(off[beat] >> 2)]; \
610075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \
611075e7e97SPeter Maydell } \
612075e7e97SPeter Maydell }
613075e7e97SPeter Maydell
614075e7e97SPeter Maydell DO_VST4B(vst40b, 0, 1, 10, 11)
615075e7e97SPeter Maydell DO_VST4B(vst41b, 2, 3, 12, 13)
616075e7e97SPeter Maydell DO_VST4B(vst42b, 4, 5, 14, 15)
617075e7e97SPeter Maydell DO_VST4B(vst43b, 6, 7, 8, 9)
618075e7e97SPeter Maydell
619075e7e97SPeter Maydell DO_VST4H(vst40h, 0, 5)
620075e7e97SPeter Maydell DO_VST4H(vst41h, 1, 6)
621075e7e97SPeter Maydell DO_VST4H(vst42h, 2, 7)
622075e7e97SPeter Maydell DO_VST4H(vst43h, 3, 4)
623075e7e97SPeter Maydell
624075e7e97SPeter Maydell DO_VST4W(vst40w, 0, 1, 10, 11)
625075e7e97SPeter Maydell DO_VST4W(vst41w, 2, 3, 12, 13)
626075e7e97SPeter Maydell DO_VST4W(vst42w, 4, 5, 14, 15)
627075e7e97SPeter Maydell DO_VST4W(vst43w, 6, 7, 8, 9)
628075e7e97SPeter Maydell
629075e7e97SPeter Maydell #define DO_VST2B(OP, O1, O2, O3, O4) \
630075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
631075e7e97SPeter Maydell uint32_t base) \
632075e7e97SPeter Maydell { \
633075e7e97SPeter Maydell int beat, e; \
634075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
635075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
636075e7e97SPeter Maydell uint32_t addr, data; \
637075e7e97SPeter Maydell uint8_t *qd; \
638075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
639075e7e97SPeter Maydell if ((mask & 1) == 0) { \
640075e7e97SPeter Maydell /* ECI says skip this beat */ \
641075e7e97SPeter Maydell continue; \
642075e7e97SPeter Maydell } \
643075e7e97SPeter Maydell addr = base + off[beat] * 2; \
644075e7e97SPeter Maydell data = 0; \
645075e7e97SPeter Maydell for (e = 3; e >= 0; e--) { \
646075e7e97SPeter Maydell qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
647075e7e97SPeter Maydell data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \
648075e7e97SPeter Maydell } \
649075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \
650075e7e97SPeter Maydell } \
651075e7e97SPeter Maydell }
652075e7e97SPeter Maydell
653075e7e97SPeter Maydell #define DO_VST2H(OP, O1, O2, O3, O4) \
654075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
655075e7e97SPeter Maydell uint32_t base) \
656075e7e97SPeter Maydell { \
657075e7e97SPeter Maydell int beat; \
658075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
659075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
660075e7e97SPeter Maydell uint32_t addr, data; \
661075e7e97SPeter Maydell int e; \
662075e7e97SPeter Maydell uint16_t *qd; \
663075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
664075e7e97SPeter Maydell if ((mask & 1) == 0) { \
665075e7e97SPeter Maydell /* ECI says skip this beat */ \
666075e7e97SPeter Maydell continue; \
667075e7e97SPeter Maydell } \
668075e7e97SPeter Maydell addr = base + off[beat] * 4; \
669075e7e97SPeter Maydell data = 0; \
670075e7e97SPeter Maydell for (e = 1; e >= 0; e--) { \
671075e7e97SPeter Maydell qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
672075e7e97SPeter Maydell data = (data << 16) | qd[H2(off[beat])]; \
673075e7e97SPeter Maydell } \
674075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \
675075e7e97SPeter Maydell } \
676075e7e97SPeter Maydell }
677075e7e97SPeter Maydell
678075e7e97SPeter Maydell #define DO_VST2W(OP, O1, O2, O3, O4) \
679075e7e97SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
680075e7e97SPeter Maydell uint32_t base) \
681075e7e97SPeter Maydell { \
682075e7e97SPeter Maydell int beat; \
683075e7e97SPeter Maydell uint16_t mask = mve_eci_mask(env); \
684075e7e97SPeter Maydell static const uint8_t off[4] = { O1, O2, O3, O4 }; \
685075e7e97SPeter Maydell uint32_t addr, data; \
686075e7e97SPeter Maydell uint32_t *qd; \
687075e7e97SPeter Maydell for (beat = 0; beat < 4; beat++, mask >>= 4) { \
688075e7e97SPeter Maydell if ((mask & 1) == 0) { \
689075e7e97SPeter Maydell /* ECI says skip this beat */ \
690075e7e97SPeter Maydell continue; \
691075e7e97SPeter Maydell } \
692075e7e97SPeter Maydell addr = base + off[beat]; \
693075e7e97SPeter Maydell qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
694075e7e97SPeter Maydell data = qd[H4(off[beat] >> 3)]; \
695075e7e97SPeter Maydell cpu_stl_le_data_ra(env, addr, data, GETPC()); \
696075e7e97SPeter Maydell } \
697075e7e97SPeter Maydell }
698075e7e97SPeter Maydell
699075e7e97SPeter Maydell DO_VST2B(vst20b, 0, 2, 12, 14)
700075e7e97SPeter Maydell DO_VST2B(vst21b, 4, 6, 8, 10)
701075e7e97SPeter Maydell
702075e7e97SPeter Maydell DO_VST2H(vst20h, 0, 1, 6, 7)
703075e7e97SPeter Maydell DO_VST2H(vst21h, 2, 3, 4, 5)
704075e7e97SPeter Maydell
705075e7e97SPeter Maydell DO_VST2W(vst20w, 0, 4, 24, 28)
706075e7e97SPeter Maydell DO_VST2W(vst21w, 8, 12, 16, 20)
707075e7e97SPeter Maydell
708075e7e97SPeter Maydell /*
7090f0f2bd5SPeter Maydell * The mergemask(D, R, M) macro performs the operation "*D = R" but
7100f0f2bd5SPeter Maydell * storing only the bytes which correspond to 1 bits in M,
7110f0f2bd5SPeter Maydell * leaving other bytes in *D unchanged. We use _Generic
7120f0f2bd5SPeter Maydell * to select the correct implementation based on the type of D.
7130f0f2bd5SPeter Maydell */
7140f0f2bd5SPeter Maydell
7150f0f2bd5SPeter Maydell static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
7160f0f2bd5SPeter Maydell {
7170f0f2bd5SPeter Maydell if (mask & 1) {
7180f0f2bd5SPeter Maydell *d = r;
7190f0f2bd5SPeter Maydell }
7200f0f2bd5SPeter Maydell }
7210f0f2bd5SPeter Maydell
mergemask_sb(int8_t * d,int8_t r,uint16_t mask)7220f0f2bd5SPeter Maydell static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
7230f0f2bd5SPeter Maydell {
7240f0f2bd5SPeter Maydell mergemask_ub((uint8_t *)d, r, mask);
7250f0f2bd5SPeter Maydell }
7260f0f2bd5SPeter Maydell
mergemask_uh(uint16_t * d,uint16_t r,uint16_t mask)7270f0f2bd5SPeter Maydell static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
7280f0f2bd5SPeter Maydell {
72905dd14bdSRichard Henderson uint16_t bmask = expand_pred_b(mask);
7300f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask);
7310f0f2bd5SPeter Maydell }
7320f0f2bd5SPeter Maydell
mergemask_sh(int16_t * d,int16_t r,uint16_t mask)7330f0f2bd5SPeter Maydell static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
7340f0f2bd5SPeter Maydell {
7350f0f2bd5SPeter Maydell mergemask_uh((uint16_t *)d, r, mask);
7360f0f2bd5SPeter Maydell }
7370f0f2bd5SPeter Maydell
mergemask_uw(uint32_t * d,uint32_t r,uint16_t mask)7380f0f2bd5SPeter Maydell static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
7390f0f2bd5SPeter Maydell {
74005dd14bdSRichard Henderson uint32_t bmask = expand_pred_b(mask);
7410f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask);
7420f0f2bd5SPeter Maydell }
7430f0f2bd5SPeter Maydell
mergemask_sw(int32_t * d,int32_t r,uint16_t mask)7440f0f2bd5SPeter Maydell static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
7450f0f2bd5SPeter Maydell {
7460f0f2bd5SPeter Maydell mergemask_uw((uint32_t *)d, r, mask);
7470f0f2bd5SPeter Maydell }
7480f0f2bd5SPeter Maydell
mergemask_uq(uint64_t * d,uint64_t r,uint16_t mask)7490f0f2bd5SPeter Maydell static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
7500f0f2bd5SPeter Maydell {
75105dd14bdSRichard Henderson uint64_t bmask = expand_pred_b(mask);
7520f0f2bd5SPeter Maydell *d = (*d & ~bmask) | (r & bmask);
7530f0f2bd5SPeter Maydell }
7540f0f2bd5SPeter Maydell
mergemask_sq(int64_t * d,int64_t r,uint16_t mask)7550f0f2bd5SPeter Maydell static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
7560f0f2bd5SPeter Maydell {
7570f0f2bd5SPeter Maydell mergemask_uq((uint64_t *)d, r, mask);
7580f0f2bd5SPeter Maydell }
7590f0f2bd5SPeter Maydell
7600f0f2bd5SPeter Maydell #define mergemask(D, R, M) \
7610f0f2bd5SPeter Maydell _Generic(D, \
7620f0f2bd5SPeter Maydell uint8_t *: mergemask_ub, \
7630f0f2bd5SPeter Maydell int8_t *: mergemask_sb, \
7640f0f2bd5SPeter Maydell uint16_t *: mergemask_uh, \
7650f0f2bd5SPeter Maydell int16_t *: mergemask_sh, \
7660f0f2bd5SPeter Maydell uint32_t *: mergemask_uw, \
7670f0f2bd5SPeter Maydell int32_t *: mergemask_sw, \
7680f0f2bd5SPeter Maydell uint64_t *: mergemask_uq, \
7690f0f2bd5SPeter Maydell int64_t *: mergemask_sq)(D, R, M)
7700f0f2bd5SPeter Maydell
HELPER(mve_vdup)771ab59362fSPeter Maydell void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
772ab59362fSPeter Maydell {
773ab59362fSPeter Maydell /*
774ab59362fSPeter Maydell * The generated code already replicated an 8 or 16 bit constant
775ab59362fSPeter Maydell * into the 32-bit value, so we only need to write the 32-bit
776ab59362fSPeter Maydell * value to all elements of the Qreg, allowing for predication.
777ab59362fSPeter Maydell */
778ab59362fSPeter Maydell uint32_t *d = vd;
779ab59362fSPeter Maydell uint16_t mask = mve_element_mask(env);
780ab59362fSPeter Maydell unsigned e;
781ab59362fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) {
782ab59362fSPeter Maydell mergemask(&d[H4(e)], val, mask);
783ab59362fSPeter Maydell }
784ab59362fSPeter Maydell mve_advance_vpt(env);
785ab59362fSPeter Maydell }
786ab59362fSPeter Maydell
7870f0f2bd5SPeter Maydell #define DO_1OP(OP, ESIZE, TYPE, FN) \
7880f0f2bd5SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
7890f0f2bd5SPeter Maydell { \
7900f0f2bd5SPeter Maydell TYPE *d = vd, *m = vm; \
7910f0f2bd5SPeter Maydell uint16_t mask = mve_element_mask(env); \
7920f0f2bd5SPeter Maydell unsigned e; \
7930f0f2bd5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
7940f0f2bd5SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
7950f0f2bd5SPeter Maydell } \
7960f0f2bd5SPeter Maydell mve_advance_vpt(env); \
7970f0f2bd5SPeter Maydell }
7980f0f2bd5SPeter Maydell
7996437f1f7SPeter Maydell #define DO_CLS_B(N) (clrsb32(N) - 24)
8006437f1f7SPeter Maydell #define DO_CLS_H(N) (clrsb32(N) - 16)
8016437f1f7SPeter Maydell
8026437f1f7SPeter Maydell DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
8036437f1f7SPeter Maydell DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
8046437f1f7SPeter Maydell DO_1OP(vclsw, 4, int32_t, clrsb32)
8056437f1f7SPeter Maydell
8060f0f2bd5SPeter Maydell #define DO_CLZ_B(N) (clz32(N) - 24)
8070f0f2bd5SPeter Maydell #define DO_CLZ_H(N) (clz32(N) - 16)
8080f0f2bd5SPeter Maydell
8090f0f2bd5SPeter Maydell DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
8100f0f2bd5SPeter Maydell DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
8110f0f2bd5SPeter Maydell DO_1OP(vclzw, 4, uint32_t, clz32)
812249b5309SPeter Maydell
813249b5309SPeter Maydell DO_1OP(vrev16b, 2, uint16_t, bswap16)
814249b5309SPeter Maydell DO_1OP(vrev32b, 4, uint32_t, bswap32)
815249b5309SPeter Maydell DO_1OP(vrev32h, 4, uint32_t, hswap32)
816249b5309SPeter Maydell DO_1OP(vrev64b, 8, uint64_t, bswap64)
817249b5309SPeter Maydell DO_1OP(vrev64h, 8, uint64_t, hswap64)
818249b5309SPeter Maydell DO_1OP(vrev64w, 8, uint64_t, wswap64)
8198abd3c80SPeter Maydell
8208abd3c80SPeter Maydell #define DO_NOT(N) (~(N))
8218abd3c80SPeter Maydell
8228abd3c80SPeter Maydell DO_1OP(vmvn, 8, uint64_t, DO_NOT)
82359c91773SPeter Maydell
82459c91773SPeter Maydell #define DO_ABS(N) ((N) < 0 ? -(N) : (N))
82559c91773SPeter Maydell #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
82659c91773SPeter Maydell #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
82759c91773SPeter Maydell
82859c91773SPeter Maydell DO_1OP(vabsb, 1, int8_t, DO_ABS)
82959c91773SPeter Maydell DO_1OP(vabsh, 2, int16_t, DO_ABS)
83059c91773SPeter Maydell DO_1OP(vabsw, 4, int32_t, DO_ABS)
83159c91773SPeter Maydell
83259c91773SPeter Maydell /* We can do these 64 bits at a time */
83359c91773SPeter Maydell DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
83459c91773SPeter Maydell DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
835399a8c76SPeter Maydell
836399a8c76SPeter Maydell #define DO_NEG(N) (-(N))
837399a8c76SPeter Maydell #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
838399a8c76SPeter Maydell #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
839399a8c76SPeter Maydell
840399a8c76SPeter Maydell DO_1OP(vnegb, 1, int8_t, DO_NEG)
841399a8c76SPeter Maydell DO_1OP(vnegh, 2, int16_t, DO_NEG)
842399a8c76SPeter Maydell DO_1OP(vnegw, 4, int32_t, DO_NEG)
843399a8c76SPeter Maydell
844399a8c76SPeter Maydell /* We can do these 64 bits at a time */
845399a8c76SPeter Maydell DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
846399a8c76SPeter Maydell DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
84768245e44SPeter Maydell
848eab84139SPeter Maydell /*
849eab84139SPeter Maydell * 1 operand immediates: Vda is destination and possibly also one source.
850eab84139SPeter Maydell * All these insns work at 64-bit widths.
851eab84139SPeter Maydell */
852eab84139SPeter Maydell #define DO_1OP_IMM(OP, FN) \
853eab84139SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
854eab84139SPeter Maydell { \
855eab84139SPeter Maydell uint64_t *da = vda; \
856eab84139SPeter Maydell uint16_t mask = mve_element_mask(env); \
857eab84139SPeter Maydell unsigned e; \
858eab84139SPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
859eab84139SPeter Maydell mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
860eab84139SPeter Maydell } \
861eab84139SPeter Maydell mve_advance_vpt(env); \
862eab84139SPeter Maydell }
863eab84139SPeter Maydell
864eab84139SPeter Maydell #define DO_MOVI(N, I) (I)
865eab84139SPeter Maydell #define DO_ANDI(N, I) ((N) & (I))
866eab84139SPeter Maydell #define DO_ORRI(N, I) ((N) | (I))
867eab84139SPeter Maydell
DO_1OP_IMM(vmovi,DO_MOVI)868eab84139SPeter Maydell DO_1OP_IMM(vmovi, DO_MOVI)
869eab84139SPeter Maydell DO_1OP_IMM(vandi, DO_ANDI)
870eab84139SPeter Maydell DO_1OP_IMM(vorri, DO_ORRI)
871eab84139SPeter Maydell
87268245e44SPeter Maydell #define DO_2OP(OP, ESIZE, TYPE, FN) \
87368245e44SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \
87468245e44SPeter Maydell void *vd, void *vn, void *vm) \
87568245e44SPeter Maydell { \
87668245e44SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \
87768245e44SPeter Maydell uint16_t mask = mve_element_mask(env); \
87868245e44SPeter Maydell unsigned e; \
87968245e44SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
88068245e44SPeter Maydell mergemask(&d[H##ESIZE(e)], \
88168245e44SPeter Maydell FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
88268245e44SPeter Maydell } \
88368245e44SPeter Maydell mve_advance_vpt(env); \
88468245e44SPeter Maydell }
88568245e44SPeter Maydell
8869333fe4dSPeter Maydell /* provide unsigned 2-op helpers for all sizes */
8879333fe4dSPeter Maydell #define DO_2OP_U(OP, FN) \
8889333fe4dSPeter Maydell DO_2OP(OP##b, 1, uint8_t, FN) \
8899333fe4dSPeter Maydell DO_2OP(OP##h, 2, uint16_t, FN) \
8909333fe4dSPeter Maydell DO_2OP(OP##w, 4, uint32_t, FN)
8919333fe4dSPeter Maydell
892cd367ff3SPeter Maydell /* provide signed 2-op helpers for all sizes */
893cd367ff3SPeter Maydell #define DO_2OP_S(OP, FN) \
894cd367ff3SPeter Maydell DO_2OP(OP##b, 1, int8_t, FN) \
895cd367ff3SPeter Maydell DO_2OP(OP##h, 2, int16_t, FN) \
896cd367ff3SPeter Maydell DO_2OP(OP##w, 4, int32_t, FN)
897cd367ff3SPeter Maydell
898ac6ad1dcSPeter Maydell /*
899ac6ad1dcSPeter Maydell * "Long" operations where two half-sized inputs (taken from either the
900ac6ad1dcSPeter Maydell * top or the bottom of the input vector) produce a double-width result.
901ac6ad1dcSPeter Maydell * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output.
902ac6ad1dcSPeter Maydell */
903ac6ad1dcSPeter Maydell #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
904ac6ad1dcSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
905ac6ad1dcSPeter Maydell { \
906ac6ad1dcSPeter Maydell LTYPE *d = vd; \
907ac6ad1dcSPeter Maydell TYPE *n = vn, *m = vm; \
908ac6ad1dcSPeter Maydell uint16_t mask = mve_element_mask(env); \
909ac6ad1dcSPeter Maydell unsigned le; \
910ac6ad1dcSPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
911ac6ad1dcSPeter Maydell LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
912ac6ad1dcSPeter Maydell m[H##ESIZE(le * 2 + TOP)]); \
913ac6ad1dcSPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \
914ac6ad1dcSPeter Maydell } \
915ac6ad1dcSPeter Maydell mve_advance_vpt(env); \
916ac6ad1dcSPeter Maydell }
917ac6ad1dcSPeter Maydell
918380caf6cSPeter Maydell #define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \
919380caf6cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
920380caf6cSPeter Maydell { \
921380caf6cSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \
922380caf6cSPeter Maydell uint16_t mask = mve_element_mask(env); \
923380caf6cSPeter Maydell unsigned e; \
924380caf6cSPeter Maydell bool qc = false; \
925380caf6cSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
926380caf6cSPeter Maydell bool sat = false; \
927d54deb2aSPhilippe Mathieu-Daudé TYPE r_ = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
928d54deb2aSPhilippe Mathieu-Daudé mergemask(&d[H##ESIZE(e)], r_, mask); \
929380caf6cSPeter Maydell qc |= sat & mask & 1; \
930380caf6cSPeter Maydell } \
931380caf6cSPeter Maydell if (qc) { \
932380caf6cSPeter Maydell env->vfp.qc[0] = qc; \
933380caf6cSPeter Maydell } \
934380caf6cSPeter Maydell mve_advance_vpt(env); \
935380caf6cSPeter Maydell }
936380caf6cSPeter Maydell
937483da661SPeter Maydell /* provide unsigned 2-op helpers for all sizes */
938483da661SPeter Maydell #define DO_2OP_SAT_U(OP, FN) \
939483da661SPeter Maydell DO_2OP_SAT(OP##b, 1, uint8_t, FN) \
940483da661SPeter Maydell DO_2OP_SAT(OP##h, 2, uint16_t, FN) \
941483da661SPeter Maydell DO_2OP_SAT(OP##w, 4, uint32_t, FN)
942483da661SPeter Maydell
943483da661SPeter Maydell /* provide signed 2-op helpers for all sizes */
944483da661SPeter Maydell #define DO_2OP_SAT_S(OP, FN) \
945483da661SPeter Maydell DO_2OP_SAT(OP##b, 1, int8_t, FN) \
946483da661SPeter Maydell DO_2OP_SAT(OP##h, 2, int16_t, FN) \
947483da661SPeter Maydell DO_2OP_SAT(OP##w, 4, int32_t, FN)
948483da661SPeter Maydell
94968245e44SPeter Maydell #define DO_AND(N, M) ((N) & (M))
95068245e44SPeter Maydell #define DO_BIC(N, M) ((N) & ~(M))
95168245e44SPeter Maydell #define DO_ORR(N, M) ((N) | (M))
95268245e44SPeter Maydell #define DO_ORN(N, M) ((N) | ~(M))
95368245e44SPeter Maydell #define DO_EOR(N, M) ((N) ^ (M))
95468245e44SPeter Maydell
95568245e44SPeter Maydell DO_2OP(vand, 8, uint64_t, DO_AND)
95668245e44SPeter Maydell DO_2OP(vbic, 8, uint64_t, DO_BIC)
95768245e44SPeter Maydell DO_2OP(vorr, 8, uint64_t, DO_ORR)
95868245e44SPeter Maydell DO_2OP(vorn, 8, uint64_t, DO_ORN)
95968245e44SPeter Maydell DO_2OP(veor, 8, uint64_t, DO_EOR)
9609333fe4dSPeter Maydell
9619333fe4dSPeter Maydell #define DO_ADD(N, M) ((N) + (M))
9629333fe4dSPeter Maydell #define DO_SUB(N, M) ((N) - (M))
9639333fe4dSPeter Maydell #define DO_MUL(N, M) ((N) * (M))
9649333fe4dSPeter Maydell
9659333fe4dSPeter Maydell DO_2OP_U(vadd, DO_ADD)
9669333fe4dSPeter Maydell DO_2OP_U(vsub, DO_SUB)
9679333fe4dSPeter Maydell DO_2OP_U(vmul, DO_MUL)
968ba62cc56SPeter Maydell
969ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
970ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
971ac6ad1dcSPeter Maydell DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
972ac6ad1dcSPeter Maydell DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
973ac6ad1dcSPeter Maydell DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
974ac6ad1dcSPeter Maydell DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
975ac6ad1dcSPeter Maydell
976ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
977ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
978ac6ad1dcSPeter Maydell DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
979ac6ad1dcSPeter Maydell DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
980ac6ad1dcSPeter Maydell DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
981ac6ad1dcSPeter Maydell DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
982ac6ad1dcSPeter Maydell
983ba62cc56SPeter Maydell /*
984c1bd78cbSPeter Maydell * Polynomial multiply. We can always do this generating 64 bits
985c1bd78cbSPeter Maydell * of the result at a time, so we don't need to use DO_2OP_L.
986c1bd78cbSPeter Maydell */
9878e3da4c7SRichard Henderson DO_2OP(vmullpbh, 8, uint64_t, clmul_8x4_even)
9888e3da4c7SRichard Henderson DO_2OP(vmullpth, 8, uint64_t, clmul_8x4_odd)
989c6f0dcb1SRichard Henderson DO_2OP(vmullpbw, 8, uint64_t, clmul_16x2_even)
990c6f0dcb1SRichard Henderson DO_2OP(vmullptw, 8, uint64_t, clmul_16x2_odd)
991c1bd78cbSPeter Maydell
992c1bd78cbSPeter Maydell /*
993ba62cc56SPeter Maydell * Because the computation type is at least twice as large as required,
994ba62cc56SPeter Maydell * these work for both signed and unsigned source types.
995ba62cc56SPeter Maydell */
996ba62cc56SPeter Maydell static inline uint8_t do_mulh_b(int32_t n, int32_t m)
997ba62cc56SPeter Maydell {
998ba62cc56SPeter Maydell return (n * m) >> 8;
999ba62cc56SPeter Maydell }
1000ba62cc56SPeter Maydell
do_mulh_h(int32_t n,int32_t m)1001ba62cc56SPeter Maydell static inline uint16_t do_mulh_h(int32_t n, int32_t m)
1002ba62cc56SPeter Maydell {
1003ba62cc56SPeter Maydell return (n * m) >> 16;
1004ba62cc56SPeter Maydell }
1005ba62cc56SPeter Maydell
do_mulh_w(int64_t n,int64_t m)1006ba62cc56SPeter Maydell static inline uint32_t do_mulh_w(int64_t n, int64_t m)
1007ba62cc56SPeter Maydell {
1008ba62cc56SPeter Maydell return (n * m) >> 32;
1009ba62cc56SPeter Maydell }
1010ba62cc56SPeter Maydell
do_rmulh_b(int32_t n,int32_t m)1011fca87b78SPeter Maydell static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
1012fca87b78SPeter Maydell {
1013fca87b78SPeter Maydell return (n * m + (1U << 7)) >> 8;
1014fca87b78SPeter Maydell }
1015fca87b78SPeter Maydell
do_rmulh_h(int32_t n,int32_t m)1016fca87b78SPeter Maydell static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
1017fca87b78SPeter Maydell {
1018fca87b78SPeter Maydell return (n * m + (1U << 15)) >> 16;
1019fca87b78SPeter Maydell }
1020fca87b78SPeter Maydell
do_rmulh_w(int64_t n,int64_t m)1021fca87b78SPeter Maydell static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
1022fca87b78SPeter Maydell {
1023fca87b78SPeter Maydell return (n * m + (1U << 31)) >> 32;
1024fca87b78SPeter Maydell }
1025fca87b78SPeter Maydell
1026ba62cc56SPeter Maydell DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
1027ba62cc56SPeter Maydell DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
1028ba62cc56SPeter Maydell DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
1029ba62cc56SPeter Maydell DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
1030ba62cc56SPeter Maydell DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
1031ba62cc56SPeter Maydell DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
1032fca87b78SPeter Maydell
1033fca87b78SPeter Maydell DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
1034fca87b78SPeter Maydell DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
1035fca87b78SPeter Maydell DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
1036fca87b78SPeter Maydell DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
1037fca87b78SPeter Maydell DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
1038fca87b78SPeter Maydell DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
1039cd367ff3SPeter Maydell
1040cd367ff3SPeter Maydell #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
1041cd367ff3SPeter Maydell #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
1042cd367ff3SPeter Maydell
DO_2OP_S(vmaxs,DO_MAX)1043cd367ff3SPeter Maydell DO_2OP_S(vmaxs, DO_MAX)
1044cd367ff3SPeter Maydell DO_2OP_U(vmaxu, DO_MAX)
1045cd367ff3SPeter Maydell DO_2OP_S(vmins, DO_MIN)
1046cd367ff3SPeter Maydell DO_2OP_U(vminu, DO_MIN)
1047bc67aa8dSPeter Maydell
1048bc67aa8dSPeter Maydell #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
1049bc67aa8dSPeter Maydell
1050bc67aa8dSPeter Maydell DO_2OP_S(vabds, DO_ABD)
1051bc67aa8dSPeter Maydell DO_2OP_U(vabdu, DO_ABD)
1052abc48e31SPeter Maydell
1053abc48e31SPeter Maydell static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
1054abc48e31SPeter Maydell {
1055abc48e31SPeter Maydell return ((uint64_t)n + m) >> 1;
1056abc48e31SPeter Maydell }
1057abc48e31SPeter Maydell
do_vhadd_s(int32_t n,int32_t m)1058abc48e31SPeter Maydell static inline int32_t do_vhadd_s(int32_t n, int32_t m)
1059abc48e31SPeter Maydell {
1060abc48e31SPeter Maydell return ((int64_t)n + m) >> 1;
1061abc48e31SPeter Maydell }
1062abc48e31SPeter Maydell
do_vhsub_u(uint32_t n,uint32_t m)1063abc48e31SPeter Maydell static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
1064abc48e31SPeter Maydell {
1065abc48e31SPeter Maydell return ((uint64_t)n - m) >> 1;
1066abc48e31SPeter Maydell }
1067abc48e31SPeter Maydell
do_vhsub_s(int32_t n,int32_t m)1068abc48e31SPeter Maydell static inline int32_t do_vhsub_s(int32_t n, int32_t m)
1069abc48e31SPeter Maydell {
1070abc48e31SPeter Maydell return ((int64_t)n - m) >> 1;
1071abc48e31SPeter Maydell }
1072abc48e31SPeter Maydell
DO_2OP_S(vhadds,do_vhadd_s)1073abc48e31SPeter Maydell DO_2OP_S(vhadds, do_vhadd_s)
1074abc48e31SPeter Maydell DO_2OP_U(vhaddu, do_vhadd_u)
1075abc48e31SPeter Maydell DO_2OP_S(vhsubs, do_vhsub_s)
1076abc48e31SPeter Maydell DO_2OP_U(vhsubu, do_vhsub_u)
10771d2386f7SPeter Maydell
10780372cad8SPeter Maydell #define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
10790372cad8SPeter Maydell #define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
1080bb002345SPeter Maydell #define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
1081bb002345SPeter Maydell #define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
10820372cad8SPeter Maydell
10830372cad8SPeter Maydell DO_2OP_S(vshls, DO_VSHLS)
10840372cad8SPeter Maydell DO_2OP_U(vshlu, DO_VSHLU)
1085bb002345SPeter Maydell DO_2OP_S(vrshls, DO_VRSHLS)
1086bb002345SPeter Maydell DO_2OP_U(vrshlu, DO_VRSHLU)
10870372cad8SPeter Maydell
10881eb987a8SPeter Maydell #define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1)
10891eb987a8SPeter Maydell #define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1)
10901eb987a8SPeter Maydell
10911eb987a8SPeter Maydell DO_2OP_S(vrhadds, DO_RHADD_S)
10921eb987a8SPeter Maydell DO_2OP_U(vrhaddu, DO_RHADD_U)
10931eb987a8SPeter Maydell
109489bc4c4fSPeter Maydell static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m,
109589bc4c4fSPeter Maydell uint32_t inv, uint32_t carry_in, bool update_flags)
109689bc4c4fSPeter Maydell {
109789bc4c4fSPeter Maydell uint16_t mask = mve_element_mask(env);
109889bc4c4fSPeter Maydell unsigned e;
109989bc4c4fSPeter Maydell
110089bc4c4fSPeter Maydell /* If any additions trigger, we will update flags. */
110189bc4c4fSPeter Maydell if (mask & 0x1111) {
110289bc4c4fSPeter Maydell update_flags = true;
110389bc4c4fSPeter Maydell }
110489bc4c4fSPeter Maydell
110589bc4c4fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) {
110689bc4c4fSPeter Maydell uint64_t r = carry_in;
110789bc4c4fSPeter Maydell r += n[H4(e)];
110889bc4c4fSPeter Maydell r += m[H4(e)] ^ inv;
110989bc4c4fSPeter Maydell if (mask & 1) {
111089bc4c4fSPeter Maydell carry_in = r >> 32;
111189bc4c4fSPeter Maydell }
111289bc4c4fSPeter Maydell mergemask(&d[H4(e)], r, mask);
111389bc4c4fSPeter Maydell }
111489bc4c4fSPeter Maydell
111589bc4c4fSPeter Maydell if (update_flags) {
111689bc4c4fSPeter Maydell /* Store C, clear NZV. */
1117a26db547SPeter Maydell env->vfp.fpsr &= ~FPSR_NZCV_MASK;
1118a26db547SPeter Maydell env->vfp.fpsr |= carry_in * FPSR_C;
111989bc4c4fSPeter Maydell }
112089bc4c4fSPeter Maydell mve_advance_vpt(env);
112189bc4c4fSPeter Maydell }
112289bc4c4fSPeter Maydell
HELPER(mve_vadc)112389bc4c4fSPeter Maydell void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm)
112489bc4c4fSPeter Maydell {
1125a26db547SPeter Maydell bool carry_in = env->vfp.fpsr & FPSR_C;
112689bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, 0, carry_in, false);
112789bc4c4fSPeter Maydell }
112889bc4c4fSPeter Maydell
HELPER(mve_vsbc)112989bc4c4fSPeter Maydell void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm)
113089bc4c4fSPeter Maydell {
1131a26db547SPeter Maydell bool carry_in = env->vfp.fpsr & FPSR_C;
113289bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, -1, carry_in, false);
113389bc4c4fSPeter Maydell }
113489bc4c4fSPeter Maydell
113589bc4c4fSPeter Maydell
HELPER(mve_vadci)113689bc4c4fSPeter Maydell void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm)
113789bc4c4fSPeter Maydell {
113889bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, 0, 0, true);
113989bc4c4fSPeter Maydell }
114089bc4c4fSPeter Maydell
HELPER(mve_vsbci)114189bc4c4fSPeter Maydell void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
114289bc4c4fSPeter Maydell {
114389bc4c4fSPeter Maydell do_vadc(env, vd, vn, vm, -1, 1, true);
114489bc4c4fSPeter Maydell }
114589bc4c4fSPeter Maydell
114667ec113bSPeter Maydell #define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \
114767ec113bSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
114867ec113bSPeter Maydell { \
114967ec113bSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \
115067ec113bSPeter Maydell uint16_t mask = mve_element_mask(env); \
115167ec113bSPeter Maydell unsigned e; \
115267ec113bSPeter Maydell TYPE r[16 / ESIZE]; \
115367ec113bSPeter Maydell /* Calculate all results first to avoid overwriting inputs */ \
115467ec113bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \
115567ec113bSPeter Maydell if (!(e & 1)) { \
115667ec113bSPeter Maydell r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \
115767ec113bSPeter Maydell } else { \
115867ec113bSPeter Maydell r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \
115967ec113bSPeter Maydell } \
116067ec113bSPeter Maydell } \
116167ec113bSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
116267ec113bSPeter Maydell mergemask(&d[H##ESIZE(e)], r[e], mask); \
116367ec113bSPeter Maydell } \
116467ec113bSPeter Maydell mve_advance_vpt(env); \
116567ec113bSPeter Maydell }
116667ec113bSPeter Maydell
116767ec113bSPeter Maydell #define DO_VCADD_ALL(OP, FN0, FN1) \
116867ec113bSPeter Maydell DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \
116967ec113bSPeter Maydell DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \
117067ec113bSPeter Maydell DO_VCADD(OP##w, 4, int32_t, FN0, FN1)
117167ec113bSPeter Maydell
DO_VCADD_ALL(vcadd90,DO_SUB,DO_ADD)117267ec113bSPeter Maydell DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
117367ec113bSPeter Maydell DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
11748625693aSPeter Maydell DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s)
11758625693aSPeter Maydell DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s)
117667ec113bSPeter Maydell
117739f2ec85SPeter Maydell static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
117839f2ec85SPeter Maydell {
117939f2ec85SPeter Maydell if (val > max) {
118039f2ec85SPeter Maydell *s = true;
118139f2ec85SPeter Maydell return max;
118239f2ec85SPeter Maydell } else if (val < min) {
118339f2ec85SPeter Maydell *s = true;
118439f2ec85SPeter Maydell return min;
118539f2ec85SPeter Maydell }
118639f2ec85SPeter Maydell return val;
118739f2ec85SPeter Maydell }
118839f2ec85SPeter Maydell
118939f2ec85SPeter Maydell #define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
119039f2ec85SPeter Maydell #define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
119139f2ec85SPeter Maydell #define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
119239f2ec85SPeter Maydell
119339f2ec85SPeter Maydell #define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
119439f2ec85SPeter Maydell #define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
119539f2ec85SPeter Maydell #define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
119639f2ec85SPeter Maydell
119739f2ec85SPeter Maydell #define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
119839f2ec85SPeter Maydell #define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
119939f2ec85SPeter Maydell #define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
120039f2ec85SPeter Maydell
120139f2ec85SPeter Maydell #define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
120239f2ec85SPeter Maydell #define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
120339f2ec85SPeter Maydell #define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
12041d2386f7SPeter Maydell
120566c05767SPeter Maydell /*
120666c05767SPeter Maydell * For QDMULH and QRDMULH we simplify "double and shift by esize" into
120766c05767SPeter Maydell * "shift by esize-1", adjusting the QRDMULH rounding constant to match.
120866c05767SPeter Maydell */
120966c05767SPeter Maydell #define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \
121066c05767SPeter Maydell INT8_MIN, INT8_MAX, s)
121166c05767SPeter Maydell #define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \
121266c05767SPeter Maydell INT16_MIN, INT16_MAX, s)
121366c05767SPeter Maydell #define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \
121466c05767SPeter Maydell INT32_MIN, INT32_MAX, s)
121566c05767SPeter Maydell
121666c05767SPeter Maydell #define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \
121766c05767SPeter Maydell INT8_MIN, INT8_MAX, s)
121866c05767SPeter Maydell #define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \
121966c05767SPeter Maydell INT16_MIN, INT16_MAX, s)
122066c05767SPeter Maydell #define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
122166c05767SPeter Maydell INT32_MIN, INT32_MAX, s)
122266c05767SPeter Maydell
1223380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B)
1224380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H)
1225380caf6cSPeter Maydell DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W)
1226380caf6cSPeter Maydell
1227380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
1228380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
1229380caf6cSPeter Maydell DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
1230380caf6cSPeter Maydell
1231f741707bSPeter Maydell DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B)
1232f741707bSPeter Maydell DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H)
1233f741707bSPeter Maydell DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W)
1234f741707bSPeter Maydell DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B)
1235f741707bSPeter Maydell DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H)
1236f741707bSPeter Maydell DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W)
1237f741707bSPeter Maydell
1238f741707bSPeter Maydell DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B)
1239f741707bSPeter Maydell DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H)
1240f741707bSPeter Maydell DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W)
1241f741707bSPeter Maydell DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
1242f741707bSPeter Maydell DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
1243f741707bSPeter Maydell DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
1244f741707bSPeter Maydell
1245483da661SPeter Maydell /*
1246483da661SPeter Maydell * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs()
1247483da661SPeter Maydell * and friends wanting a uint32_t* sat and our needing a bool*.
1248483da661SPeter Maydell */
1249483da661SPeter Maydell #define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
1250483da661SPeter Maydell ({ \
1251483da661SPeter Maydell uint32_t su32 = 0; \
1252d54deb2aSPhilippe Mathieu-Daudé typeof(N) qrshl_ret = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
1253483da661SPeter Maydell if (su32) { \
1254483da661SPeter Maydell *satp = true; \
1255483da661SPeter Maydell } \
1256d54deb2aSPhilippe Mathieu-Daudé qrshl_ret; \
1257483da661SPeter Maydell })
1258483da661SPeter Maydell
1259483da661SPeter Maydell #define DO_SQSHL_OP(N, M, satp) \
1260483da661SPeter Maydell WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
1261483da661SPeter Maydell #define DO_UQSHL_OP(N, M, satp) \
1262483da661SPeter Maydell WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
12639dc868c4SPeter Maydell #define DO_SQRSHL_OP(N, M, satp) \
12649dc868c4SPeter Maydell WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
12659dc868c4SPeter Maydell #define DO_UQRSHL_OP(N, M, satp) \
12669dc868c4SPeter Maydell WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
1267f9ed6174SPeter Maydell #define DO_SUQSHL_OP(N, M, satp) \
1268f9ed6174SPeter Maydell WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
1269483da661SPeter Maydell
DO_2OP_SAT_S(vqshls,DO_SQSHL_OP)1270483da661SPeter Maydell DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
1271483da661SPeter Maydell DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
12729dc868c4SPeter Maydell DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
12739dc868c4SPeter Maydell DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
1274483da661SPeter Maydell
1275fd677f80SPeter Maydell /*
1276fd677f80SPeter Maydell * Multiply add dual returning high half
1277fd677f80SPeter Maydell * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of
1278fd677f80SPeter Maydell * whether to add the rounding constant, and the pointer to the
1279fd677f80SPeter Maydell * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant",
1280fd677f80SPeter Maydell * saturate to twice the input size and return the high half; or
1281fd677f80SPeter Maydell * (A * B - C * D) etc for VQDMLSDH.
1282fd677f80SPeter Maydell */
1283fd677f80SPeter Maydell #define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \
1284fd677f80SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1285fd677f80SPeter Maydell void *vm) \
1286fd677f80SPeter Maydell { \
1287fd677f80SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \
1288fd677f80SPeter Maydell uint16_t mask = mve_element_mask(env); \
1289fd677f80SPeter Maydell unsigned e; \
1290fd677f80SPeter Maydell bool qc = false; \
1291fd677f80SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1292fd677f80SPeter Maydell bool sat = false; \
1293fd677f80SPeter Maydell if ((e & 1) == XCHG) { \
1294d54deb2aSPhilippe Mathieu-Daudé TYPE vqdmladh_ret = FN(n[H##ESIZE(e)], \
1295fd677f80SPeter Maydell m[H##ESIZE(e - XCHG)], \
1296fd677f80SPeter Maydell n[H##ESIZE(e + (1 - 2 * XCHG))], \
1297fd677f80SPeter Maydell m[H##ESIZE(e + (1 - XCHG))], \
1298fd677f80SPeter Maydell ROUND, &sat); \
1299d54deb2aSPhilippe Mathieu-Daudé mergemask(&d[H##ESIZE(e)], vqdmladh_ret, mask); \
1300fd677f80SPeter Maydell qc |= sat & mask & 1; \
1301fd677f80SPeter Maydell } \
1302fd677f80SPeter Maydell } \
1303fd677f80SPeter Maydell if (qc) { \
1304fd677f80SPeter Maydell env->vfp.qc[0] = qc; \
1305fd677f80SPeter Maydell } \
1306fd677f80SPeter Maydell mve_advance_vpt(env); \
1307fd677f80SPeter Maydell }
1308fd677f80SPeter Maydell
1309fd677f80SPeter Maydell static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d,
1310fd677f80SPeter Maydell int round, bool *sat)
1311fd677f80SPeter Maydell {
1312fd677f80SPeter Maydell int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7);
1313fd677f80SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
1314fd677f80SPeter Maydell }
1315fd677f80SPeter Maydell
do_vqdmladh_h(int16_t a,int16_t b,int16_t c,int16_t d,int round,bool * sat)1316fd677f80SPeter Maydell static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d,
1317fd677f80SPeter Maydell int round, bool *sat)
1318fd677f80SPeter Maydell {
1319fd677f80SPeter Maydell int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15);
1320fd677f80SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
1321fd677f80SPeter Maydell }
1322fd677f80SPeter Maydell
do_vqdmladh_w(int32_t a,int32_t b,int32_t c,int32_t d,int round,bool * sat)1323fd677f80SPeter Maydell static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
1324fd677f80SPeter Maydell int round, bool *sat)
1325fd677f80SPeter Maydell {
1326fd677f80SPeter Maydell int64_t m1 = (int64_t)a * b;
1327fd677f80SPeter Maydell int64_t m2 = (int64_t)c * d;
1328fd677f80SPeter Maydell int64_t r;
1329fd677f80SPeter Maydell /*
1330fd677f80SPeter Maydell * Architecturally we should do the entire add, double, round
1331fd677f80SPeter Maydell * and then check for saturation. We do three saturating adds,
1332fd677f80SPeter Maydell * but we need to be careful about the order. If the first
1333fd677f80SPeter Maydell * m1 + m2 saturates then it's impossible for the *2+rc to
1334fd677f80SPeter Maydell * bring it back into the non-saturated range. However, if
1335fd677f80SPeter Maydell * m1 + m2 is negative then it's possible that doing the doubling
1336fd677f80SPeter Maydell * would take the intermediate result below INT64_MAX and the
1337fd677f80SPeter Maydell * addition of the rounding constant then brings it back in range.
1338fd677f80SPeter Maydell * So we add half the rounding constant before doubling rather
1339fd677f80SPeter Maydell * than adding the rounding constant after the doubling.
1340fd677f80SPeter Maydell */
1341fd677f80SPeter Maydell if (sadd64_overflow(m1, m2, &r) ||
1342fd677f80SPeter Maydell sadd64_overflow(r, (round << 30), &r) ||
1343fd677f80SPeter Maydell sadd64_overflow(r, r, &r)) {
1344fd677f80SPeter Maydell *sat = true;
1345fd677f80SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN;
1346fd677f80SPeter Maydell }
1347fd677f80SPeter Maydell return r >> 32;
1348fd677f80SPeter Maydell }
1349fd677f80SPeter Maydell
do_vqdmlsdh_b(int8_t a,int8_t b,int8_t c,int8_t d,int round,bool * sat)135092f11732SPeter Maydell static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d,
135192f11732SPeter Maydell int round, bool *sat)
135292f11732SPeter Maydell {
135392f11732SPeter Maydell int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7);
135492f11732SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
135592f11732SPeter Maydell }
135692f11732SPeter Maydell
do_vqdmlsdh_h(int16_t a,int16_t b,int16_t c,int16_t d,int round,bool * sat)135792f11732SPeter Maydell static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d,
135892f11732SPeter Maydell int round, bool *sat)
135992f11732SPeter Maydell {
136092f11732SPeter Maydell int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15);
136192f11732SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
136292f11732SPeter Maydell }
136392f11732SPeter Maydell
do_vqdmlsdh_w(int32_t a,int32_t b,int32_t c,int32_t d,int round,bool * sat)136492f11732SPeter Maydell static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d,
136592f11732SPeter Maydell int round, bool *sat)
136692f11732SPeter Maydell {
136792f11732SPeter Maydell int64_t m1 = (int64_t)a * b;
136892f11732SPeter Maydell int64_t m2 = (int64_t)c * d;
136992f11732SPeter Maydell int64_t r;
137092f11732SPeter Maydell /* The same ordering issue as in do_vqdmladh_w applies here too */
137192f11732SPeter Maydell if (ssub64_overflow(m1, m2, &r) ||
137292f11732SPeter Maydell sadd64_overflow(r, (round << 30), &r) ||
137392f11732SPeter Maydell sadd64_overflow(r, r, &r)) {
137492f11732SPeter Maydell *sat = true;
137592f11732SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN;
137692f11732SPeter Maydell }
137792f11732SPeter Maydell return r >> 32;
137892f11732SPeter Maydell }
137992f11732SPeter Maydell
1380fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
1381fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
1382fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
1383fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b)
1384fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h)
1385fd677f80SPeter Maydell DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w)
1386fd677f80SPeter Maydell
1387fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b)
1388fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h)
1389fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w)
1390fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
1391fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
1392fd677f80SPeter Maydell DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
1393fd677f80SPeter Maydell
139492f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b)
139592f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h)
139692f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w)
139792f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b)
139892f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h)
139992f11732SPeter Maydell DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w)
140092f11732SPeter Maydell
140192f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b)
140292f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h)
140392f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w)
140492f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b)
140592f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h)
140692f11732SPeter Maydell DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
140792f11732SPeter Maydell
1408e51896b3SPeter Maydell #define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
1409e51896b3SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1410e51896b3SPeter Maydell uint32_t rm) \
1411e51896b3SPeter Maydell { \
1412e51896b3SPeter Maydell TYPE *d = vd, *n = vn; \
1413e51896b3SPeter Maydell TYPE m = rm; \
1414e51896b3SPeter Maydell uint16_t mask = mve_element_mask(env); \
1415e51896b3SPeter Maydell unsigned e; \
1416e51896b3SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1417e51896b3SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
1418e51896b3SPeter Maydell } \
1419e51896b3SPeter Maydell mve_advance_vpt(env); \
1420e51896b3SPeter Maydell }
1421e51896b3SPeter Maydell
142239f2ec85SPeter Maydell #define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
142339f2ec85SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
142439f2ec85SPeter Maydell uint32_t rm) \
142539f2ec85SPeter Maydell { \
142639f2ec85SPeter Maydell TYPE *d = vd, *n = vn; \
142739f2ec85SPeter Maydell TYPE m = rm; \
142839f2ec85SPeter Maydell uint16_t mask = mve_element_mask(env); \
142939f2ec85SPeter Maydell unsigned e; \
143039f2ec85SPeter Maydell bool qc = false; \
143139f2ec85SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
143239f2ec85SPeter Maydell bool sat = false; \
143339f2ec85SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
143439f2ec85SPeter Maydell mask); \
143539f2ec85SPeter Maydell qc |= sat & mask & 1; \
143639f2ec85SPeter Maydell } \
143739f2ec85SPeter Maydell if (qc) { \
143839f2ec85SPeter Maydell env->vfp.qc[0] = qc; \
143939f2ec85SPeter Maydell } \
144039f2ec85SPeter Maydell mve_advance_vpt(env); \
144139f2ec85SPeter Maydell }
144239f2ec85SPeter Maydell
14436b895bf8SPeter Maydell /* "accumulating" version where FN takes d as well as n and m */
14446b895bf8SPeter Maydell #define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
14456b895bf8SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
14466b895bf8SPeter Maydell uint32_t rm) \
14476b895bf8SPeter Maydell { \
14486b895bf8SPeter Maydell TYPE *d = vd, *n = vn; \
14496b895bf8SPeter Maydell TYPE m = rm; \
14506b895bf8SPeter Maydell uint16_t mask = mve_element_mask(env); \
14516b895bf8SPeter Maydell unsigned e; \
14526b895bf8SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
14536b895bf8SPeter Maydell mergemask(&d[H##ESIZE(e)], \
14546b895bf8SPeter Maydell FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \
14556b895bf8SPeter Maydell } \
14566b895bf8SPeter Maydell mve_advance_vpt(env); \
14576b895bf8SPeter Maydell }
14586b895bf8SPeter Maydell
14598be9a250SPeter Maydell #define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
14608be9a250SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
14618be9a250SPeter Maydell uint32_t rm) \
14628be9a250SPeter Maydell { \
14638be9a250SPeter Maydell TYPE *d = vd, *n = vn; \
14648be9a250SPeter Maydell TYPE m = rm; \
14658be9a250SPeter Maydell uint16_t mask = mve_element_mask(env); \
14668be9a250SPeter Maydell unsigned e; \
14678be9a250SPeter Maydell bool qc = false; \
14688be9a250SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
14698be9a250SPeter Maydell bool sat = false; \
14708be9a250SPeter Maydell mergemask(&d[H##ESIZE(e)], \
14718be9a250SPeter Maydell FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \
14728be9a250SPeter Maydell mask); \
14738be9a250SPeter Maydell qc |= sat & mask & 1; \
14748be9a250SPeter Maydell } \
14758be9a250SPeter Maydell if (qc) { \
14768be9a250SPeter Maydell env->vfp.qc[0] = qc; \
14778be9a250SPeter Maydell } \
14788be9a250SPeter Maydell mve_advance_vpt(env); \
14798be9a250SPeter Maydell }
14808be9a250SPeter Maydell
1481e51896b3SPeter Maydell /* provide unsigned 2-op scalar helpers for all sizes */
1482e51896b3SPeter Maydell #define DO_2OP_SCALAR_U(OP, FN) \
1483e51896b3SPeter Maydell DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
1484e51896b3SPeter Maydell DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
1485e51896b3SPeter Maydell DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
1486644f717cSPeter Maydell #define DO_2OP_SCALAR_S(OP, FN) \
1487644f717cSPeter Maydell DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
1488644f717cSPeter Maydell DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
1489644f717cSPeter Maydell DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
1490e51896b3SPeter Maydell
14916b895bf8SPeter Maydell #define DO_2OP_ACC_SCALAR_U(OP, FN) \
14926b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \
14936b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \
14946b895bf8SPeter Maydell DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN)
14956b895bf8SPeter Maydell
DO_2OP_SCALAR_U(vadd_scalar,DO_ADD)1496e51896b3SPeter Maydell DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
149791a358fdSPeter Maydell DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
149891a358fdSPeter Maydell DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
1499644f717cSPeter Maydell DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s)
1500644f717cSPeter Maydell DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
1501644f717cSPeter Maydell DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
1502644f717cSPeter Maydell DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
1503e51896b3SPeter Maydell
150439f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B)
150539f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H)
150639f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W)
150739f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B)
150839f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H)
150939f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W)
151039f2ec85SPeter Maydell
151139f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B)
151239f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H)
151339f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W)
151439f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
151539f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
151639f2ec85SPeter Maydell DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
151739f2ec85SPeter Maydell
151866c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B)
151966c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H)
152066c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W)
152166c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
152266c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
152366c05767SPeter Maydell DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
152466c05767SPeter Maydell
15258be9a250SPeter Maydell static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat)
15268be9a250SPeter Maydell {
15278be9a250SPeter Maydell int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7);
15288be9a250SPeter Maydell return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
15298be9a250SPeter Maydell }
15308be9a250SPeter Maydell
do_vqdmlah_h(int16_t a,int16_t b,int16_t c,int round,bool * sat)15318be9a250SPeter Maydell static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c,
15328be9a250SPeter Maydell int round, bool *sat)
15338be9a250SPeter Maydell {
15348be9a250SPeter Maydell int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15);
15358be9a250SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
15368be9a250SPeter Maydell }
15378be9a250SPeter Maydell
do_vqdmlah_w(int32_t a,int32_t b,int32_t c,int round,bool * sat)15388be9a250SPeter Maydell static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c,
15398be9a250SPeter Maydell int round, bool *sat)
15408be9a250SPeter Maydell {
15418be9a250SPeter Maydell /*
15428be9a250SPeter Maydell * Architecturally we should do the entire add, double, round
15438be9a250SPeter Maydell * and then check for saturation. We do three saturating adds,
15448be9a250SPeter Maydell * but we need to be careful about the order. If the first
15458be9a250SPeter Maydell * m1 + m2 saturates then it's impossible for the *2+rc to
15468be9a250SPeter Maydell * bring it back into the non-saturated range. However, if
15478be9a250SPeter Maydell * m1 + m2 is negative then it's possible that doing the doubling
15488be9a250SPeter Maydell * would take the intermediate result below INT64_MAX and the
15498be9a250SPeter Maydell * addition of the rounding constant then brings it back in range.
15508be9a250SPeter Maydell * So we add half the rounding constant and half the "c << esize"
15518be9a250SPeter Maydell * before doubling rather than adding the rounding constant after
15528be9a250SPeter Maydell * the doubling.
15538be9a250SPeter Maydell */
15548be9a250SPeter Maydell int64_t m1 = (int64_t)a * b;
15558be9a250SPeter Maydell int64_t m2 = (int64_t)c << 31;
15568be9a250SPeter Maydell int64_t r;
15578be9a250SPeter Maydell if (sadd64_overflow(m1, m2, &r) ||
15588be9a250SPeter Maydell sadd64_overflow(r, (round << 30), &r) ||
15598be9a250SPeter Maydell sadd64_overflow(r, r, &r)) {
15608be9a250SPeter Maydell *sat = true;
15618be9a250SPeter Maydell return r < 0 ? INT32_MAX : INT32_MIN;
15628be9a250SPeter Maydell }
15638be9a250SPeter Maydell return r >> 32;
15648be9a250SPeter Maydell }
15658be9a250SPeter Maydell
15668be9a250SPeter Maydell /*
15678be9a250SPeter Maydell * The *MLAH insns are vector * scalar + vector;
15688be9a250SPeter Maydell * the *MLASH insns are vector * vector + scalar
15698be9a250SPeter Maydell */
15708be9a250SPeter Maydell #define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S)
15718be9a250SPeter Maydell #define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S)
15728be9a250SPeter Maydell #define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S)
15738be9a250SPeter Maydell #define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S)
15748be9a250SPeter Maydell #define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S)
15758be9a250SPeter Maydell #define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S)
15768be9a250SPeter Maydell
15778be9a250SPeter Maydell #define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S)
15788be9a250SPeter Maydell #define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S)
15798be9a250SPeter Maydell #define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S)
15808be9a250SPeter Maydell #define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S)
15818be9a250SPeter Maydell #define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S)
15828be9a250SPeter Maydell #define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S)
15838be9a250SPeter Maydell
15848be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B)
15858be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H)
15868be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W)
15878be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B)
15888be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H)
15898be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W)
15908be9a250SPeter Maydell
15918be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B)
15928be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H)
15938be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W)
15948be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B)
15958be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H)
15968be9a250SPeter Maydell DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W)
15978be9a250SPeter Maydell
1598c69e34c6SPeter Maydell /* Vector by scalar plus vector */
1599c69e34c6SPeter Maydell #define DO_VMLA(D, N, M) ((N) * (M) + (D))
1600c69e34c6SPeter Maydell
DO_2OP_ACC_SCALAR_U(vmla,DO_VMLA)1601c69e34c6SPeter Maydell DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA)
1602c69e34c6SPeter Maydell
16036b895bf8SPeter Maydell /* Vector by vector plus scalar */
16046b895bf8SPeter Maydell #define DO_VMLAS(D, N, M) ((N) * (D) + (M))
16056b895bf8SPeter Maydell
16066b895bf8SPeter Maydell DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS)
16076b895bf8SPeter Maydell
1608a8890353SPeter Maydell /*
1609a8890353SPeter Maydell * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
1610a8890353SPeter Maydell * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
1611a8890353SPeter Maydell * SATMASK specifies which bits of the predicate mask matter for determining
1612a8890353SPeter Maydell * whether to propagate a saturation indication into FPSCR.QC -- for
1613a8890353SPeter Maydell * the 16x16->32 case we must check only the bit corresponding to the T or B
1614a8890353SPeter Maydell * half that we used, but for the 32x32->64 case we propagate if the mask
1615a8890353SPeter Maydell * bit is set for either half.
1616a8890353SPeter Maydell */
1617a8890353SPeter Maydell #define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
1618a8890353SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1619a8890353SPeter Maydell uint32_t rm) \
1620a8890353SPeter Maydell { \
1621a8890353SPeter Maydell LTYPE *d = vd; \
1622a8890353SPeter Maydell TYPE *n = vn; \
1623a8890353SPeter Maydell TYPE m = rm; \
1624a8890353SPeter Maydell uint16_t mask = mve_element_mask(env); \
1625a8890353SPeter Maydell unsigned le; \
1626a8890353SPeter Maydell bool qc = false; \
1627a8890353SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1628a8890353SPeter Maydell bool sat = false; \
1629a8890353SPeter Maydell LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \
1630a8890353SPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \
1631a8890353SPeter Maydell qc |= sat && (mask & SATMASK); \
1632a8890353SPeter Maydell } \
1633a8890353SPeter Maydell if (qc) { \
1634a8890353SPeter Maydell env->vfp.qc[0] = qc; \
1635a8890353SPeter Maydell } \
1636a8890353SPeter Maydell mve_advance_vpt(env); \
1637a8890353SPeter Maydell }
1638a8890353SPeter Maydell
1639a8890353SPeter Maydell static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat)
1640a8890353SPeter Maydell {
1641a8890353SPeter Maydell int64_t r = ((int64_t)n * m) * 2;
1642a8890353SPeter Maydell return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat);
1643a8890353SPeter Maydell }
1644a8890353SPeter Maydell
do_qdmullw(int32_t n,int32_t m,bool * sat)1645a8890353SPeter Maydell static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat)
1646a8890353SPeter Maydell {
1647a8890353SPeter Maydell /* The multiply can't overflow, but the doubling might */
1648a8890353SPeter Maydell int64_t r = (int64_t)n * m;
1649a8890353SPeter Maydell if (r > INT64_MAX / 2) {
1650a8890353SPeter Maydell *sat = true;
1651a8890353SPeter Maydell return INT64_MAX;
1652a8890353SPeter Maydell } else if (r < INT64_MIN / 2) {
1653a8890353SPeter Maydell *sat = true;
1654a8890353SPeter Maydell return INT64_MIN;
1655a8890353SPeter Maydell } else {
1656a8890353SPeter Maydell return r * 2;
1657a8890353SPeter Maydell }
1658a8890353SPeter Maydell }
1659a8890353SPeter Maydell
1660a8890353SPeter Maydell #define SATMASK16B 1
1661a8890353SPeter Maydell #define SATMASK16T (1 << 2)
1662a8890353SPeter Maydell #define SATMASK32 ((1 << 4) | 1)
1663a8890353SPeter Maydell
1664a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \
1665a8890353SPeter Maydell do_qdmullh, SATMASK16B)
1666a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \
1667a8890353SPeter Maydell do_qdmullw, SATMASK32)
1668a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
1669a8890353SPeter Maydell do_qdmullh, SATMASK16T)
1670a8890353SPeter Maydell DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
1671a8890353SPeter Maydell do_qdmullw, SATMASK32)
1672a8890353SPeter Maydell
167343364321SPeter Maydell /*
167443364321SPeter Maydell * Long saturating ops
167543364321SPeter Maydell */
167643364321SPeter Maydell #define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
167743364321SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
167843364321SPeter Maydell void *vm) \
167943364321SPeter Maydell { \
168043364321SPeter Maydell LTYPE *d = vd; \
168143364321SPeter Maydell TYPE *n = vn, *m = vm; \
168243364321SPeter Maydell uint16_t mask = mve_element_mask(env); \
168343364321SPeter Maydell unsigned le; \
168443364321SPeter Maydell bool qc = false; \
168543364321SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
168643364321SPeter Maydell bool sat = false; \
168743364321SPeter Maydell LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \
168843364321SPeter Maydell LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \
168943364321SPeter Maydell mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \
169043364321SPeter Maydell qc |= sat && (mask & SATMASK); \
169143364321SPeter Maydell } \
169243364321SPeter Maydell if (qc) { \
169343364321SPeter Maydell env->vfp.qc[0] = qc; \
169443364321SPeter Maydell } \
169543364321SPeter Maydell mve_advance_vpt(env); \
169643364321SPeter Maydell }
169743364321SPeter Maydell
169843364321SPeter Maydell DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B)
169943364321SPeter Maydell DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
170043364321SPeter Maydell DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T)
170143364321SPeter Maydell DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
170243364321SPeter Maydell
do_vbrsrb(uint32_t n,uint32_t m)1703b050543bSPeter Maydell static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
1704b050543bSPeter Maydell {
1705b050543bSPeter Maydell m &= 0xff;
1706b050543bSPeter Maydell if (m == 0) {
1707b050543bSPeter Maydell return 0;
1708b050543bSPeter Maydell }
1709b050543bSPeter Maydell n = revbit8(n);
1710b050543bSPeter Maydell if (m < 8) {
1711b050543bSPeter Maydell n >>= 8 - m;
1712b050543bSPeter Maydell }
1713b050543bSPeter Maydell return n;
1714b050543bSPeter Maydell }
1715b050543bSPeter Maydell
do_vbrsrh(uint32_t n,uint32_t m)1716b050543bSPeter Maydell static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m)
1717b050543bSPeter Maydell {
1718b050543bSPeter Maydell m &= 0xff;
1719b050543bSPeter Maydell if (m == 0) {
1720b050543bSPeter Maydell return 0;
1721b050543bSPeter Maydell }
1722b050543bSPeter Maydell n = revbit16(n);
1723b050543bSPeter Maydell if (m < 16) {
1724b050543bSPeter Maydell n >>= 16 - m;
1725b050543bSPeter Maydell }
1726b050543bSPeter Maydell return n;
1727b050543bSPeter Maydell }
1728b050543bSPeter Maydell
do_vbrsrw(uint32_t n,uint32_t m)1729b050543bSPeter Maydell static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m)
1730b050543bSPeter Maydell {
1731b050543bSPeter Maydell m &= 0xff;
1732b050543bSPeter Maydell if (m == 0) {
1733b050543bSPeter Maydell return 0;
1734b050543bSPeter Maydell }
1735b050543bSPeter Maydell n = revbit32(n);
1736b050543bSPeter Maydell if (m < 32) {
1737b050543bSPeter Maydell n >>= 32 - m;
1738b050543bSPeter Maydell }
1739b050543bSPeter Maydell return n;
1740b050543bSPeter Maydell }
1741b050543bSPeter Maydell
1742b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb)
1743b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh)
1744b050543bSPeter Maydell DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw)
1745b050543bSPeter Maydell
17461d2386f7SPeter Maydell /*
17471d2386f7SPeter Maydell * Multiply add long dual accumulate ops.
17481d2386f7SPeter Maydell */
17491d2386f7SPeter Maydell #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
17501d2386f7SPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
17511d2386f7SPeter Maydell void *vm, uint64_t a) \
17521d2386f7SPeter Maydell { \
17531d2386f7SPeter Maydell uint16_t mask = mve_element_mask(env); \
17541d2386f7SPeter Maydell unsigned e; \
17551d2386f7SPeter Maydell TYPE *n = vn, *m = vm; \
17561d2386f7SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
17571d2386f7SPeter Maydell if (mask & 1) { \
17581d2386f7SPeter Maydell if (e & 1) { \
17591d2386f7SPeter Maydell a ODDACC \
17601d2386f7SPeter Maydell (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
17611d2386f7SPeter Maydell } else { \
17621d2386f7SPeter Maydell a EVENACC \
17631d2386f7SPeter Maydell (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
17641d2386f7SPeter Maydell } \
17651d2386f7SPeter Maydell } \
17661d2386f7SPeter Maydell } \
17671d2386f7SPeter Maydell mve_advance_vpt(env); \
17681d2386f7SPeter Maydell return a; \
17691d2386f7SPeter Maydell }
17701d2386f7SPeter Maydell
17711d2386f7SPeter Maydell DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
17721d2386f7SPeter Maydell DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
17731d2386f7SPeter Maydell DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
17741d2386f7SPeter Maydell DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
17751d2386f7SPeter Maydell
17761d2386f7SPeter Maydell DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
17771d2386f7SPeter Maydell DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
1778181cd971SPeter Maydell
1779181cd971SPeter Maydell DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
1780181cd971SPeter Maydell DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
1781181cd971SPeter Maydell DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
1782181cd971SPeter Maydell DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
178338548747SPeter Maydell
178438548747SPeter Maydell /*
1785f0ffff51SPeter Maydell * Multiply add dual accumulate ops
1786f0ffff51SPeter Maydell */
1787f0ffff51SPeter Maydell #define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
1788f0ffff51SPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1789f0ffff51SPeter Maydell void *vm, uint32_t a) \
1790f0ffff51SPeter Maydell { \
1791f0ffff51SPeter Maydell uint16_t mask = mve_element_mask(env); \
1792f0ffff51SPeter Maydell unsigned e; \
1793f0ffff51SPeter Maydell TYPE *n = vn, *m = vm; \
1794f0ffff51SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1795f0ffff51SPeter Maydell if (mask & 1) { \
1796f0ffff51SPeter Maydell if (e & 1) { \
1797f0ffff51SPeter Maydell a ODDACC \
1798f0ffff51SPeter Maydell n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
1799f0ffff51SPeter Maydell } else { \
1800f0ffff51SPeter Maydell a EVENACC \
1801f0ffff51SPeter Maydell n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
1802f0ffff51SPeter Maydell } \
1803f0ffff51SPeter Maydell } \
1804f0ffff51SPeter Maydell } \
1805f0ffff51SPeter Maydell mve_advance_vpt(env); \
1806f0ffff51SPeter Maydell return a; \
1807f0ffff51SPeter Maydell }
1808f0ffff51SPeter Maydell
1809f0ffff51SPeter Maydell #define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \
1810f0ffff51SPeter Maydell DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \
1811f0ffff51SPeter Maydell DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \
1812f0ffff51SPeter Maydell DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC)
1813f0ffff51SPeter Maydell
1814f0ffff51SPeter Maydell #define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \
1815f0ffff51SPeter Maydell DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \
1816f0ffff51SPeter Maydell DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \
1817f0ffff51SPeter Maydell DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC)
1818f0ffff51SPeter Maydell
1819f0ffff51SPeter Maydell DO_DAV_S(vmladavs, false, +=, +=)
1820f0ffff51SPeter Maydell DO_DAV_U(vmladavu, false, +=, +=)
1821f0ffff51SPeter Maydell DO_DAV_S(vmlsdav, false, +=, -=)
1822f0ffff51SPeter Maydell DO_DAV_S(vmladavsx, true, +=, +=)
1823f0ffff51SPeter Maydell DO_DAV_S(vmlsdavx, true, +=, -=)
1824f0ffff51SPeter Maydell
1825f0ffff51SPeter Maydell /*
1826303db86fSPeter Maydell * Rounding multiply add long dual accumulate high. In the pseudocode
1827303db86fSPeter Maydell * this is implemented with a 72-bit internal accumulator value of which
1828303db86fSPeter Maydell * the top 64 bits are returned. We optimize this to avoid having to
1829303db86fSPeter Maydell * use 128-bit arithmetic -- we can do this because the 74-bit accumulator
1830303db86fSPeter Maydell * is squashed back into 64-bits after each beat.
183138548747SPeter Maydell */
1832303db86fSPeter Maydell #define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
183338548747SPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
183438548747SPeter Maydell void *vm, uint64_t a) \
183538548747SPeter Maydell { \
183638548747SPeter Maydell uint16_t mask = mve_element_mask(env); \
183738548747SPeter Maydell unsigned e; \
183838548747SPeter Maydell TYPE *n = vn, *m = vm; \
1839303db86fSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
184038548747SPeter Maydell if (mask & 1) { \
1841303db86fSPeter Maydell LTYPE mul; \
184238548747SPeter Maydell if (e & 1) { \
1843303db86fSPeter Maydell mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
1844303db86fSPeter Maydell if (SUB) { \
1845303db86fSPeter Maydell mul = -mul; \
184638548747SPeter Maydell } \
1847303db86fSPeter Maydell } else { \
1848303db86fSPeter Maydell mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
1849303db86fSPeter Maydell } \
1850303db86fSPeter Maydell mul = (mul >> 8) + ((mul >> 7) & 1); \
1851303db86fSPeter Maydell a += mul; \
185238548747SPeter Maydell } \
185338548747SPeter Maydell } \
185438548747SPeter Maydell mve_advance_vpt(env); \
1855303db86fSPeter Maydell return a; \
185638548747SPeter Maydell }
185738548747SPeter Maydell
DO_LDAVH(vrmlaldavhsw,int32_t,int64_t,false,false)1858303db86fSPeter Maydell DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
1859303db86fSPeter Maydell DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
186038548747SPeter Maydell
1861303db86fSPeter Maydell DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
186238548747SPeter Maydell
1863303db86fSPeter Maydell DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
1864303db86fSPeter Maydell DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
18656f060a63SPeter Maydell
18666f060a63SPeter Maydell /* Vector add across vector */
18676f060a63SPeter Maydell #define DO_VADDV(OP, ESIZE, TYPE) \
18686f060a63SPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
18696f060a63SPeter Maydell uint32_t ra) \
18706f060a63SPeter Maydell { \
18716f060a63SPeter Maydell uint16_t mask = mve_element_mask(env); \
18726f060a63SPeter Maydell unsigned e; \
18736f060a63SPeter Maydell TYPE *m = vm; \
18746f060a63SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
18756f060a63SPeter Maydell if (mask & 1) { \
18766f060a63SPeter Maydell ra += m[H##ESIZE(e)]; \
18776f060a63SPeter Maydell } \
18786f060a63SPeter Maydell } \
18796f060a63SPeter Maydell mve_advance_vpt(env); \
18806f060a63SPeter Maydell return ra; \
18816f060a63SPeter Maydell } \
18826f060a63SPeter Maydell
1883ed5a59d6SPeter Maydell DO_VADDV(vaddvsb, 1, int8_t)
1884ed5a59d6SPeter Maydell DO_VADDV(vaddvsh, 2, int16_t)
1885ed5a59d6SPeter Maydell DO_VADDV(vaddvsw, 4, int32_t)
18866f060a63SPeter Maydell DO_VADDV(vaddvub, 1, uint8_t)
18876f060a63SPeter Maydell DO_VADDV(vaddvuh, 2, uint16_t)
18886f060a63SPeter Maydell DO_VADDV(vaddvuw, 4, uint32_t)
1889f9ed6174SPeter Maydell
1890688ba4cfSPeter Maydell /*
1891688ba4cfSPeter Maydell * Vector max/min across vector. Unlike VADDV, we must
1892688ba4cfSPeter Maydell * read ra as the element size, not its full width.
1893688ba4cfSPeter Maydell * We work with int64_t internally for simplicity.
1894688ba4cfSPeter Maydell */
1895688ba4cfSPeter Maydell #define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \
1896688ba4cfSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1897688ba4cfSPeter Maydell uint32_t ra_in) \
1898688ba4cfSPeter Maydell { \
1899688ba4cfSPeter Maydell uint16_t mask = mve_element_mask(env); \
1900688ba4cfSPeter Maydell unsigned e; \
1901688ba4cfSPeter Maydell TYPE *m = vm; \
1902688ba4cfSPeter Maydell int64_t ra = (RATYPE)ra_in; \
1903688ba4cfSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1904688ba4cfSPeter Maydell if (mask & 1) { \
1905688ba4cfSPeter Maydell ra = FN(ra, m[H##ESIZE(e)]); \
1906688ba4cfSPeter Maydell } \
1907688ba4cfSPeter Maydell } \
1908688ba4cfSPeter Maydell mve_advance_vpt(env); \
1909688ba4cfSPeter Maydell return ra; \
1910688ba4cfSPeter Maydell } \
1911688ba4cfSPeter Maydell
1912688ba4cfSPeter Maydell #define DO_VMAXMINV_U(INSN, FN) \
1913688ba4cfSPeter Maydell DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \
1914688ba4cfSPeter Maydell DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \
1915688ba4cfSPeter Maydell DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN)
1916688ba4cfSPeter Maydell #define DO_VMAXMINV_S(INSN, FN) \
1917688ba4cfSPeter Maydell DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \
1918688ba4cfSPeter Maydell DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \
1919688ba4cfSPeter Maydell DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN)
1920688ba4cfSPeter Maydell
1921688ba4cfSPeter Maydell /*
1922688ba4cfSPeter Maydell * Helpers for max and min of absolute values across vector:
1923688ba4cfSPeter Maydell * note that we only take the absolute value of 'm', not 'n'
1924688ba4cfSPeter Maydell */
1925688ba4cfSPeter Maydell static int64_t do_maxa(int64_t n, int64_t m)
1926688ba4cfSPeter Maydell {
1927688ba4cfSPeter Maydell if (m < 0) {
1928688ba4cfSPeter Maydell m = -m;
1929688ba4cfSPeter Maydell }
1930688ba4cfSPeter Maydell return MAX(n, m);
1931688ba4cfSPeter Maydell }
1932688ba4cfSPeter Maydell
do_mina(int64_t n,int64_t m)1933688ba4cfSPeter Maydell static int64_t do_mina(int64_t n, int64_t m)
1934688ba4cfSPeter Maydell {
1935688ba4cfSPeter Maydell if (m < 0) {
1936688ba4cfSPeter Maydell m = -m;
1937688ba4cfSPeter Maydell }
1938688ba4cfSPeter Maydell return MIN(n, m);
1939688ba4cfSPeter Maydell }
1940688ba4cfSPeter Maydell
DO_VMAXMINV_S(vmaxvs,DO_MAX)1941688ba4cfSPeter Maydell DO_VMAXMINV_S(vmaxvs, DO_MAX)
1942688ba4cfSPeter Maydell DO_VMAXMINV_U(vmaxvu, DO_MAX)
1943688ba4cfSPeter Maydell DO_VMAXMINV_S(vminvs, DO_MIN)
1944688ba4cfSPeter Maydell DO_VMAXMINV_U(vminvu, DO_MIN)
1945688ba4cfSPeter Maydell /*
1946688ba4cfSPeter Maydell * VMAXAV, VMINAV treat the general purpose input as unsigned
1947688ba4cfSPeter Maydell * and the vector elements as signed.
1948688ba4cfSPeter Maydell */
1949688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa)
1950688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa)
1951688ba4cfSPeter Maydell DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa)
1952688ba4cfSPeter Maydell DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina)
1953688ba4cfSPeter Maydell DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina)
1954688ba4cfSPeter Maydell DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina)
1955688ba4cfSPeter Maydell
19567f061c0aSPeter Maydell #define DO_VABAV(OP, ESIZE, TYPE) \
19577f061c0aSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
19587f061c0aSPeter Maydell void *vm, uint32_t ra) \
19597f061c0aSPeter Maydell { \
19607f061c0aSPeter Maydell uint16_t mask = mve_element_mask(env); \
19617f061c0aSPeter Maydell unsigned e; \
19627f061c0aSPeter Maydell TYPE *m = vm, *n = vn; \
19637f061c0aSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
19647f061c0aSPeter Maydell if (mask & 1) { \
19657f061c0aSPeter Maydell int64_t n0 = n[H##ESIZE(e)]; \
19667f061c0aSPeter Maydell int64_t m0 = m[H##ESIZE(e)]; \
19677f061c0aSPeter Maydell uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \
19687f061c0aSPeter Maydell ra += r; \
19697f061c0aSPeter Maydell } \
19707f061c0aSPeter Maydell } \
19717f061c0aSPeter Maydell mve_advance_vpt(env); \
19727f061c0aSPeter Maydell return ra; \
19737f061c0aSPeter Maydell }
19747f061c0aSPeter Maydell
19757f061c0aSPeter Maydell DO_VABAV(vabavsb, 1, int8_t)
19767f061c0aSPeter Maydell DO_VABAV(vabavsh, 2, int16_t)
19777f061c0aSPeter Maydell DO_VABAV(vabavsw, 4, int32_t)
19787f061c0aSPeter Maydell DO_VABAV(vabavub, 1, uint8_t)
19797f061c0aSPeter Maydell DO_VABAV(vabavuh, 2, uint16_t)
19807f061c0aSPeter Maydell DO_VABAV(vabavuw, 4, uint32_t)
19817f061c0aSPeter Maydell
1982d43ebd9dSPeter Maydell #define DO_VADDLV(OP, TYPE, LTYPE) \
1983d43ebd9dSPeter Maydell uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1984d43ebd9dSPeter Maydell uint64_t ra) \
1985d43ebd9dSPeter Maydell { \
1986d43ebd9dSPeter Maydell uint16_t mask = mve_element_mask(env); \
1987d43ebd9dSPeter Maydell unsigned e; \
1988d43ebd9dSPeter Maydell TYPE *m = vm; \
1989d43ebd9dSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
1990d43ebd9dSPeter Maydell if (mask & 1) { \
1991d43ebd9dSPeter Maydell ra += (LTYPE)m[H4(e)]; \
1992d43ebd9dSPeter Maydell } \
1993d43ebd9dSPeter Maydell } \
1994d43ebd9dSPeter Maydell mve_advance_vpt(env); \
1995d43ebd9dSPeter Maydell return ra; \
1996d43ebd9dSPeter Maydell } \
1997d43ebd9dSPeter Maydell
1998d43ebd9dSPeter Maydell DO_VADDLV(vaddlv_s, int32_t, int64_t)
1999d43ebd9dSPeter Maydell DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
2000d43ebd9dSPeter Maydell
2001f9ed6174SPeter Maydell /* Shifts by immediate */
2002f9ed6174SPeter Maydell #define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
2003f9ed6174SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2004f9ed6174SPeter Maydell void *vm, uint32_t shift) \
2005f9ed6174SPeter Maydell { \
2006f9ed6174SPeter Maydell TYPE *d = vd, *m = vm; \
2007f9ed6174SPeter Maydell uint16_t mask = mve_element_mask(env); \
2008f9ed6174SPeter Maydell unsigned e; \
2009f9ed6174SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2010f9ed6174SPeter Maydell mergemask(&d[H##ESIZE(e)], \
2011f9ed6174SPeter Maydell FN(m[H##ESIZE(e)], shift), mask); \
2012f9ed6174SPeter Maydell } \
2013f9ed6174SPeter Maydell mve_advance_vpt(env); \
2014f9ed6174SPeter Maydell }
2015f9ed6174SPeter Maydell
2016f9ed6174SPeter Maydell #define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
2017f9ed6174SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2018f9ed6174SPeter Maydell void *vm, uint32_t shift) \
2019f9ed6174SPeter Maydell { \
2020f9ed6174SPeter Maydell TYPE *d = vd, *m = vm; \
2021f9ed6174SPeter Maydell uint16_t mask = mve_element_mask(env); \
2022f9ed6174SPeter Maydell unsigned e; \
2023f9ed6174SPeter Maydell bool qc = false; \
2024f9ed6174SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2025f9ed6174SPeter Maydell bool sat = false; \
2026f9ed6174SPeter Maydell mergemask(&d[H##ESIZE(e)], \
2027f9ed6174SPeter Maydell FN(m[H##ESIZE(e)], shift, &sat), mask); \
2028f9ed6174SPeter Maydell qc |= sat & mask & 1; \
2029f9ed6174SPeter Maydell } \
2030f9ed6174SPeter Maydell if (qc) { \
2031f9ed6174SPeter Maydell env->vfp.qc[0] = qc; \
2032f9ed6174SPeter Maydell } \
2033f9ed6174SPeter Maydell mve_advance_vpt(env); \
2034f9ed6174SPeter Maydell }
2035f9ed6174SPeter Maydell
2036f9ed6174SPeter Maydell /* provide unsigned 2-op shift helpers for all sizes */
2037f9ed6174SPeter Maydell #define DO_2SHIFT_U(OP, FN) \
2038f9ed6174SPeter Maydell DO_2SHIFT(OP##b, 1, uint8_t, FN) \
2039f9ed6174SPeter Maydell DO_2SHIFT(OP##h, 2, uint16_t, FN) \
2040f9ed6174SPeter Maydell DO_2SHIFT(OP##w, 4, uint32_t, FN)
20413394116fSPeter Maydell #define DO_2SHIFT_S(OP, FN) \
20423394116fSPeter Maydell DO_2SHIFT(OP##b, 1, int8_t, FN) \
20433394116fSPeter Maydell DO_2SHIFT(OP##h, 2, int16_t, FN) \
20443394116fSPeter Maydell DO_2SHIFT(OP##w, 4, int32_t, FN)
2045f9ed6174SPeter Maydell
2046f9ed6174SPeter Maydell #define DO_2SHIFT_SAT_U(OP, FN) \
2047f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
2048f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
2049f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
2050f9ed6174SPeter Maydell #define DO_2SHIFT_SAT_S(OP, FN) \
2051f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
2052f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
2053f9ed6174SPeter Maydell DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
2054f9ed6174SPeter Maydell
2055f9ed6174SPeter Maydell DO_2SHIFT_U(vshli_u, DO_VSHLU)
20563394116fSPeter Maydell DO_2SHIFT_S(vshli_s, DO_VSHLS)
2057f9ed6174SPeter Maydell DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
2058f9ed6174SPeter Maydell DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
2059f9ed6174SPeter Maydell DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
20603394116fSPeter Maydell DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
20613394116fSPeter Maydell DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
20621b15a97dSPeter Maydell DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP)
20631b15a97dSPeter Maydell DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP)
2064c2262707SPeter Maydell
2065a78b25faSPeter Maydell /* Shift-and-insert; we always work with 64 bits at a time */
2066a78b25faSPeter Maydell #define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
2067a78b25faSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2068a78b25faSPeter Maydell void *vm, uint32_t shift) \
2069a78b25faSPeter Maydell { \
2070a78b25faSPeter Maydell uint64_t *d = vd, *m = vm; \
2071a78b25faSPeter Maydell uint16_t mask; \
2072a78b25faSPeter Maydell uint64_t shiftmask; \
2073a78b25faSPeter Maydell unsigned e; \
2074c88ff884SPeter Maydell if (shift == ESIZE * 8) { \
2075a78b25faSPeter Maydell /* \
2076c88ff884SPeter Maydell * Only VSRI can shift by <dt>; it should mean "don't \
2077c88ff884SPeter Maydell * update the destination". The generic logic can't handle \
2078c88ff884SPeter Maydell * this because it would try to shift by an out-of-range \
2079c88ff884SPeter Maydell * amount, so special case it here. \
2080a78b25faSPeter Maydell */ \
2081a78b25faSPeter Maydell goto done; \
2082a78b25faSPeter Maydell } \
2083a78b25faSPeter Maydell assert(shift < ESIZE * 8); \
2084a78b25faSPeter Maydell mask = mve_element_mask(env); \
2085a78b25faSPeter Maydell /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
2086a78b25faSPeter Maydell shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
2087a78b25faSPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
2088a78b25faSPeter Maydell uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
2089a78b25faSPeter Maydell (d[H8(e)] & ~shiftmask); \
2090a78b25faSPeter Maydell mergemask(&d[H8(e)], r, mask); \
2091a78b25faSPeter Maydell } \
2092a78b25faSPeter Maydell done: \
2093a78b25faSPeter Maydell mve_advance_vpt(env); \
2094a78b25faSPeter Maydell }
2095a78b25faSPeter Maydell
2096a78b25faSPeter Maydell #define DO_SHL(N, SHIFT) ((N) << (SHIFT))
2097a78b25faSPeter Maydell #define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
2098a78b25faSPeter Maydell #define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
2099a78b25faSPeter Maydell #define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
2100a78b25faSPeter Maydell
2101a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
2102a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
2103a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
2104a78b25faSPeter Maydell DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
2105a78b25faSPeter Maydell DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
2106a78b25faSPeter Maydell DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
2107a78b25faSPeter Maydell
2108c2262707SPeter Maydell /*
2109c2262707SPeter Maydell * Long shifts taking half-sized inputs from top or bottom of the input
2110c2262707SPeter Maydell * vector and producing a double-width result. ESIZE, TYPE are for
2111c2262707SPeter Maydell * the input, and LESIZE, LTYPE for the output.
2112c2262707SPeter Maydell * Unlike the normal shift helpers, we do not handle negative shift counts,
2113c2262707SPeter Maydell * because the long shift is strictly left-only.
2114c2262707SPeter Maydell */
2115c2262707SPeter Maydell #define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
2116c2262707SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2117c2262707SPeter Maydell void *vm, uint32_t shift) \
2118c2262707SPeter Maydell { \
2119c2262707SPeter Maydell LTYPE *d = vd; \
2120c2262707SPeter Maydell TYPE *m = vm; \
2121c2262707SPeter Maydell uint16_t mask = mve_element_mask(env); \
2122c2262707SPeter Maydell unsigned le; \
2123c2262707SPeter Maydell assert(shift <= 16); \
2124c2262707SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2125c2262707SPeter Maydell LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
2126c2262707SPeter Maydell mergemask(&d[H##LESIZE(le)], r, mask); \
2127c2262707SPeter Maydell } \
2128c2262707SPeter Maydell mve_advance_vpt(env); \
2129c2262707SPeter Maydell }
2130c2262707SPeter Maydell
2131c2262707SPeter Maydell #define DO_VSHLL_ALL(OP, TOP) \
2132c2262707SPeter Maydell DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
2133c2262707SPeter Maydell DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
2134c2262707SPeter Maydell DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
2135c2262707SPeter Maydell DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
2136c2262707SPeter Maydell
2137c2262707SPeter Maydell DO_VSHLL_ALL(vshllb, false)
2138c2262707SPeter Maydell DO_VSHLL_ALL(vshllt, true)
2139162e2655SPeter Maydell
2140162e2655SPeter Maydell /*
2141162e2655SPeter Maydell * Narrowing right shifts, taking a double sized input, shifting it
2142162e2655SPeter Maydell * and putting the result in either the top or bottom half of the output.
2143162e2655SPeter Maydell * ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
2144162e2655SPeter Maydell */
2145162e2655SPeter Maydell #define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
2146162e2655SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2147162e2655SPeter Maydell void *vm, uint32_t shift) \
2148162e2655SPeter Maydell { \
2149162e2655SPeter Maydell LTYPE *m = vm; \
2150162e2655SPeter Maydell TYPE *d = vd; \
2151162e2655SPeter Maydell uint16_t mask = mve_element_mask(env); \
2152162e2655SPeter Maydell unsigned le; \
2153a5e59e8dSPeter Maydell mask >>= ESIZE * TOP; \
2154162e2655SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2155162e2655SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], shift); \
2156162e2655SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
2157162e2655SPeter Maydell } \
2158162e2655SPeter Maydell mve_advance_vpt(env); \
2159162e2655SPeter Maydell }
2160162e2655SPeter Maydell
2161162e2655SPeter Maydell #define DO_VSHRN_ALL(OP, FN) \
2162162e2655SPeter Maydell DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
2163162e2655SPeter Maydell DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
2164162e2655SPeter Maydell DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
2165162e2655SPeter Maydell DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
2166162e2655SPeter Maydell
2167162e2655SPeter Maydell static inline uint64_t do_urshr(uint64_t x, unsigned sh)
2168162e2655SPeter Maydell {
2169162e2655SPeter Maydell if (likely(sh < 64)) {
2170162e2655SPeter Maydell return (x >> sh) + ((x >> (sh - 1)) & 1);
2171162e2655SPeter Maydell } else if (sh == 64) {
2172162e2655SPeter Maydell return x >> 63;
2173162e2655SPeter Maydell } else {
2174162e2655SPeter Maydell return 0;
2175162e2655SPeter Maydell }
2176162e2655SPeter Maydell }
2177162e2655SPeter Maydell
do_srshr(int64_t x,unsigned sh)2178d6f9e011SPeter Maydell static inline int64_t do_srshr(int64_t x, unsigned sh)
2179d6f9e011SPeter Maydell {
2180d6f9e011SPeter Maydell if (likely(sh < 64)) {
2181d6f9e011SPeter Maydell return (x >> sh) + ((x >> (sh - 1)) & 1);
2182d6f9e011SPeter Maydell } else {
2183d6f9e011SPeter Maydell /* Rounding the sign bit always produces 0. */
2184d6f9e011SPeter Maydell return 0;
2185d6f9e011SPeter Maydell }
2186d6f9e011SPeter Maydell }
2187d6f9e011SPeter Maydell
DO_VSHRN_ALL(vshrn,DO_SHR)2188162e2655SPeter Maydell DO_VSHRN_ALL(vshrn, DO_SHR)
2189162e2655SPeter Maydell DO_VSHRN_ALL(vrshrn, do_urshr)
2190d6f9e011SPeter Maydell
2191d6f9e011SPeter Maydell static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
2192d6f9e011SPeter Maydell bool *satp)
2193d6f9e011SPeter Maydell {
2194d6f9e011SPeter Maydell if (val > max) {
2195d6f9e011SPeter Maydell *satp = true;
2196d6f9e011SPeter Maydell return max;
2197d6f9e011SPeter Maydell } else if (val < min) {
2198d6f9e011SPeter Maydell *satp = true;
2199d6f9e011SPeter Maydell return min;
2200d6f9e011SPeter Maydell } else {
2201d6f9e011SPeter Maydell return val;
2202d6f9e011SPeter Maydell }
2203d6f9e011SPeter Maydell }
2204d6f9e011SPeter Maydell
2205d6f9e011SPeter Maydell /* Saturating narrowing right shifts */
2206d6f9e011SPeter Maydell #define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
2207d6f9e011SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2208d6f9e011SPeter Maydell void *vm, uint32_t shift) \
2209d6f9e011SPeter Maydell { \
2210d6f9e011SPeter Maydell LTYPE *m = vm; \
2211d6f9e011SPeter Maydell TYPE *d = vd; \
2212d6f9e011SPeter Maydell uint16_t mask = mve_element_mask(env); \
2213d6f9e011SPeter Maydell bool qc = false; \
2214d6f9e011SPeter Maydell unsigned le; \
2215a5e59e8dSPeter Maydell mask >>= ESIZE * TOP; \
2216d6f9e011SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2217d6f9e011SPeter Maydell bool sat = false; \
2218d6f9e011SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
2219d6f9e011SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
2220a5e59e8dSPeter Maydell qc |= sat & mask & 1; \
2221d6f9e011SPeter Maydell } \
2222d6f9e011SPeter Maydell if (qc) { \
2223d6f9e011SPeter Maydell env->vfp.qc[0] = qc; \
2224d6f9e011SPeter Maydell } \
2225d6f9e011SPeter Maydell mve_advance_vpt(env); \
2226d6f9e011SPeter Maydell }
2227d6f9e011SPeter Maydell
2228d6f9e011SPeter Maydell #define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
2229d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
2230d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
2231d6f9e011SPeter Maydell
2232d6f9e011SPeter Maydell #define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
2233d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
2234d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
2235d6f9e011SPeter Maydell
2236d6f9e011SPeter Maydell #define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
2237d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
2238d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
2239d6f9e011SPeter Maydell
2240d6f9e011SPeter Maydell #define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
2241d6f9e011SPeter Maydell DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
2242d6f9e011SPeter Maydell DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
2243d6f9e011SPeter Maydell
2244d6f9e011SPeter Maydell #define DO_SHRN_SB(N, M, SATP) \
2245d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
2246d6f9e011SPeter Maydell #define DO_SHRN_UB(N, M, SATP) \
2247d6f9e011SPeter Maydell do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
2248d6f9e011SPeter Maydell #define DO_SHRUN_B(N, M, SATP) \
2249d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
2250d6f9e011SPeter Maydell
2251d6f9e011SPeter Maydell #define DO_SHRN_SH(N, M, SATP) \
2252d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
2253d6f9e011SPeter Maydell #define DO_SHRN_UH(N, M, SATP) \
2254d6f9e011SPeter Maydell do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
2255d6f9e011SPeter Maydell #define DO_SHRUN_H(N, M, SATP) \
2256d6f9e011SPeter Maydell do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
2257d6f9e011SPeter Maydell
2258d6f9e011SPeter Maydell #define DO_RSHRN_SB(N, M, SATP) \
2259d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
2260d6f9e011SPeter Maydell #define DO_RSHRN_UB(N, M, SATP) \
2261d6f9e011SPeter Maydell do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
2262d6f9e011SPeter Maydell #define DO_RSHRUN_B(N, M, SATP) \
2263d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
2264d6f9e011SPeter Maydell
2265d6f9e011SPeter Maydell #define DO_RSHRN_SH(N, M, SATP) \
2266d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
2267d6f9e011SPeter Maydell #define DO_RSHRN_UH(N, M, SATP) \
2268d6f9e011SPeter Maydell do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
2269d6f9e011SPeter Maydell #define DO_RSHRUN_H(N, M, SATP) \
2270d6f9e011SPeter Maydell do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
2271d6f9e011SPeter Maydell
DO_VSHRN_SAT_SB(vqshrnb_sb,vqshrnt_sb,DO_SHRN_SB)2272d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
2273d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
2274d6f9e011SPeter Maydell DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
2275d6f9e011SPeter Maydell DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
2276d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
2277d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
2278d6f9e011SPeter Maydell
2279d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
2280d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
2281d6f9e011SPeter Maydell DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
2282d6f9e011SPeter Maydell DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
2283d6f9e011SPeter Maydell DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
2284d6f9e011SPeter Maydell DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
22852e6a4ce0SPeter Maydell
228654dc78a9SPeter Maydell #define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
228754dc78a9SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
228854dc78a9SPeter Maydell { \
228954dc78a9SPeter Maydell LTYPE *m = vm; \
229054dc78a9SPeter Maydell TYPE *d = vd; \
229154dc78a9SPeter Maydell uint16_t mask = mve_element_mask(env); \
229254dc78a9SPeter Maydell unsigned le; \
229354dc78a9SPeter Maydell mask >>= ESIZE * TOP; \
229454dc78a9SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
229554dc78a9SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], \
229654dc78a9SPeter Maydell m[H##LESIZE(le)], mask); \
229754dc78a9SPeter Maydell } \
229854dc78a9SPeter Maydell mve_advance_vpt(env); \
229954dc78a9SPeter Maydell }
230054dc78a9SPeter Maydell
230154dc78a9SPeter Maydell DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t)
230254dc78a9SPeter Maydell DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t)
230354dc78a9SPeter Maydell DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t)
230454dc78a9SPeter Maydell DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t)
230554dc78a9SPeter Maydell
230654dc78a9SPeter Maydell #define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
230754dc78a9SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
230854dc78a9SPeter Maydell { \
230954dc78a9SPeter Maydell LTYPE *m = vm; \
231054dc78a9SPeter Maydell TYPE *d = vd; \
231154dc78a9SPeter Maydell uint16_t mask = mve_element_mask(env); \
231254dc78a9SPeter Maydell bool qc = false; \
231354dc78a9SPeter Maydell unsigned le; \
231454dc78a9SPeter Maydell mask >>= ESIZE * TOP; \
231554dc78a9SPeter Maydell for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
231654dc78a9SPeter Maydell bool sat = false; \
231754dc78a9SPeter Maydell TYPE r = FN(m[H##LESIZE(le)], &sat); \
231854dc78a9SPeter Maydell mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
231954dc78a9SPeter Maydell qc |= sat & mask & 1; \
232054dc78a9SPeter Maydell } \
232154dc78a9SPeter Maydell if (qc) { \
232254dc78a9SPeter Maydell env->vfp.qc[0] = qc; \
232354dc78a9SPeter Maydell } \
232454dc78a9SPeter Maydell mve_advance_vpt(env); \
232554dc78a9SPeter Maydell }
232654dc78a9SPeter Maydell
232754dc78a9SPeter Maydell #define DO_VMOVN_SAT_UB(BOP, TOP, FN) \
232854dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
232954dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
233054dc78a9SPeter Maydell
233154dc78a9SPeter Maydell #define DO_VMOVN_SAT_UH(BOP, TOP, FN) \
233254dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
233354dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
233454dc78a9SPeter Maydell
233554dc78a9SPeter Maydell #define DO_VMOVN_SAT_SB(BOP, TOP, FN) \
233654dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
233754dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
233854dc78a9SPeter Maydell
233954dc78a9SPeter Maydell #define DO_VMOVN_SAT_SH(BOP, TOP, FN) \
234054dc78a9SPeter Maydell DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
234154dc78a9SPeter Maydell DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
234254dc78a9SPeter Maydell
234354dc78a9SPeter Maydell #define DO_VQMOVN_SB(N, SATP) \
234454dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP)
234554dc78a9SPeter Maydell #define DO_VQMOVN_UB(N, SATP) \
234654dc78a9SPeter Maydell do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP)
234754dc78a9SPeter Maydell #define DO_VQMOVUN_B(N, SATP) \
234854dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP)
234954dc78a9SPeter Maydell
235054dc78a9SPeter Maydell #define DO_VQMOVN_SH(N, SATP) \
235154dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP)
235254dc78a9SPeter Maydell #define DO_VQMOVN_UH(N, SATP) \
235354dc78a9SPeter Maydell do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP)
235454dc78a9SPeter Maydell #define DO_VQMOVUN_H(N, SATP) \
235554dc78a9SPeter Maydell do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP)
235654dc78a9SPeter Maydell
235754dc78a9SPeter Maydell DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB)
235854dc78a9SPeter Maydell DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH)
235954dc78a9SPeter Maydell DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB)
236054dc78a9SPeter Maydell DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH)
236154dc78a9SPeter Maydell DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B)
236254dc78a9SPeter Maydell DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H)
236354dc78a9SPeter Maydell
23642e6a4ce0SPeter Maydell uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
23652e6a4ce0SPeter Maydell uint32_t shift)
23662e6a4ce0SPeter Maydell {
23672e6a4ce0SPeter Maydell uint32_t *d = vd;
23682e6a4ce0SPeter Maydell uint16_t mask = mve_element_mask(env);
23692e6a4ce0SPeter Maydell unsigned e;
23702e6a4ce0SPeter Maydell uint32_t r;
23712e6a4ce0SPeter Maydell
23722e6a4ce0SPeter Maydell /*
23732e6a4ce0SPeter Maydell * For each 32-bit element, we shift it left, bringing in the
23742e6a4ce0SPeter Maydell * low 'shift' bits of rdm at the bottom. Bits shifted out at
23752e6a4ce0SPeter Maydell * the top become the new rdm, if the predicate mask permits.
23762e6a4ce0SPeter Maydell * The final rdm value is returned to update the register.
23772e6a4ce0SPeter Maydell * shift == 0 here means "shift by 32 bits".
23782e6a4ce0SPeter Maydell */
23792e6a4ce0SPeter Maydell if (shift == 0) {
23802e6a4ce0SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) {
23812e6a4ce0SPeter Maydell r = rdm;
23822e6a4ce0SPeter Maydell if (mask & 1) {
23832e6a4ce0SPeter Maydell rdm = d[H4(e)];
23842e6a4ce0SPeter Maydell }
23852e6a4ce0SPeter Maydell mergemask(&d[H4(e)], r, mask);
23862e6a4ce0SPeter Maydell }
23872e6a4ce0SPeter Maydell } else {
23882e6a4ce0SPeter Maydell uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
23892e6a4ce0SPeter Maydell
23902e6a4ce0SPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) {
23912e6a4ce0SPeter Maydell r = (d[H4(e)] << shift) | (rdm & shiftmask);
23922e6a4ce0SPeter Maydell if (mask & 1) {
23932e6a4ce0SPeter Maydell rdm = d[H4(e)] >> (32 - shift);
23942e6a4ce0SPeter Maydell }
23952e6a4ce0SPeter Maydell mergemask(&d[H4(e)], r, mask);
23962e6a4ce0SPeter Maydell }
23972e6a4ce0SPeter Maydell }
23982e6a4ce0SPeter Maydell mve_advance_vpt(env);
23992e6a4ce0SPeter Maydell return rdm;
24002e6a4ce0SPeter Maydell }
2401f4ae6c8cSPeter Maydell
HELPER(mve_sshrl)24020aa4b4c3SPeter Maydell uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
24030aa4b4c3SPeter Maydell {
24040aa4b4c3SPeter Maydell return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
24050aa4b4c3SPeter Maydell }
24060aa4b4c3SPeter Maydell
HELPER(mve_ushll)24070aa4b4c3SPeter Maydell uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
24080aa4b4c3SPeter Maydell {
24090aa4b4c3SPeter Maydell return do_uqrshl_d(n, (int8_t)shift, false, NULL);
24100aa4b4c3SPeter Maydell }
24110aa4b4c3SPeter Maydell
HELPER(mve_sqshll)2412f4ae6c8cSPeter Maydell uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
2413f4ae6c8cSPeter Maydell {
2414f4ae6c8cSPeter Maydell return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
2415f4ae6c8cSPeter Maydell }
2416f4ae6c8cSPeter Maydell
HELPER(mve_uqshll)2417f4ae6c8cSPeter Maydell uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
2418f4ae6c8cSPeter Maydell {
2419f4ae6c8cSPeter Maydell return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
2420f4ae6c8cSPeter Maydell }
24210aa4b4c3SPeter Maydell
HELPER(mve_sqrshrl)24220aa4b4c3SPeter Maydell uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
24230aa4b4c3SPeter Maydell {
24240aa4b4c3SPeter Maydell return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
24250aa4b4c3SPeter Maydell }
24260aa4b4c3SPeter Maydell
HELPER(mve_uqrshll)24270aa4b4c3SPeter Maydell uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
24280aa4b4c3SPeter Maydell {
24290aa4b4c3SPeter Maydell return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
24300aa4b4c3SPeter Maydell }
24310aa4b4c3SPeter Maydell
24320aa4b4c3SPeter Maydell /* Operate on 64-bit values, but saturate at 48 bits */
do_sqrshl48_d(int64_t src,int64_t shift,bool round,uint32_t * sat)24330aa4b4c3SPeter Maydell static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
24340aa4b4c3SPeter Maydell bool round, uint32_t *sat)
24350aa4b4c3SPeter Maydell {
2436fdcf2269SPeter Maydell int64_t val, extval;
2437fdcf2269SPeter Maydell
24380aa4b4c3SPeter Maydell if (shift <= -48) {
24390aa4b4c3SPeter Maydell /* Rounding the sign bit always produces 0. */
24400aa4b4c3SPeter Maydell if (round) {
24410aa4b4c3SPeter Maydell return 0;
24420aa4b4c3SPeter Maydell }
24430aa4b4c3SPeter Maydell return src >> 63;
24440aa4b4c3SPeter Maydell } else if (shift < 0) {
24450aa4b4c3SPeter Maydell if (round) {
24460aa4b4c3SPeter Maydell src >>= -shift - 1;
2447fdcf2269SPeter Maydell val = (src >> 1) + (src & 1);
2448fdcf2269SPeter Maydell } else {
2449fdcf2269SPeter Maydell val = src >> -shift;
24500aa4b4c3SPeter Maydell }
2451fdcf2269SPeter Maydell extval = sextract64(val, 0, 48);
2452fdcf2269SPeter Maydell if (!sat || val == extval) {
2453fdcf2269SPeter Maydell return extval;
2454fdcf2269SPeter Maydell }
24550aa4b4c3SPeter Maydell } else if (shift < 48) {
2456d54deb2aSPhilippe Mathieu-Daudé extval = sextract64(src << shift, 0, 48);
245795351aa7SPeter Maydell if (!sat || src == (extval >> shift)) {
24580aa4b4c3SPeter Maydell return extval;
24590aa4b4c3SPeter Maydell }
24600aa4b4c3SPeter Maydell } else if (!sat || src == 0) {
24610aa4b4c3SPeter Maydell return 0;
24620aa4b4c3SPeter Maydell }
24630aa4b4c3SPeter Maydell
24640aa4b4c3SPeter Maydell *sat = 1;
246595351aa7SPeter Maydell return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17);
24660aa4b4c3SPeter Maydell }
24670aa4b4c3SPeter Maydell
24680aa4b4c3SPeter Maydell /* Operate on 64-bit values, but saturate at 48 bits */
do_uqrshl48_d(uint64_t src,int64_t shift,bool round,uint32_t * sat)24690aa4b4c3SPeter Maydell static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
24700aa4b4c3SPeter Maydell bool round, uint32_t *sat)
24710aa4b4c3SPeter Maydell {
24720aa4b4c3SPeter Maydell uint64_t val, extval;
24730aa4b4c3SPeter Maydell
24740aa4b4c3SPeter Maydell if (shift <= -(48 + round)) {
24750aa4b4c3SPeter Maydell return 0;
24760aa4b4c3SPeter Maydell } else if (shift < 0) {
24770aa4b4c3SPeter Maydell if (round) {
24780aa4b4c3SPeter Maydell val = src >> (-shift - 1);
24790aa4b4c3SPeter Maydell val = (val >> 1) + (val & 1);
24800aa4b4c3SPeter Maydell } else {
24810aa4b4c3SPeter Maydell val = src >> -shift;
24820aa4b4c3SPeter Maydell }
24830aa4b4c3SPeter Maydell extval = extract64(val, 0, 48);
24840aa4b4c3SPeter Maydell if (!sat || val == extval) {
24850aa4b4c3SPeter Maydell return extval;
24860aa4b4c3SPeter Maydell }
24870aa4b4c3SPeter Maydell } else if (shift < 48) {
2488d54deb2aSPhilippe Mathieu-Daudé extval = extract64(src << shift, 0, 48);
248995351aa7SPeter Maydell if (!sat || src == (extval >> shift)) {
24900aa4b4c3SPeter Maydell return extval;
24910aa4b4c3SPeter Maydell }
24920aa4b4c3SPeter Maydell } else if (!sat || src == 0) {
24930aa4b4c3SPeter Maydell return 0;
24940aa4b4c3SPeter Maydell }
24950aa4b4c3SPeter Maydell
24960aa4b4c3SPeter Maydell *sat = 1;
24970aa4b4c3SPeter Maydell return MAKE_64BIT_MASK(0, 48);
24980aa4b4c3SPeter Maydell }
24990aa4b4c3SPeter Maydell
HELPER(mve_sqrshrl48)25000aa4b4c3SPeter Maydell uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
25010aa4b4c3SPeter Maydell {
25020aa4b4c3SPeter Maydell return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
25030aa4b4c3SPeter Maydell }
25040aa4b4c3SPeter Maydell
HELPER(mve_uqrshll48)25050aa4b4c3SPeter Maydell uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
25060aa4b4c3SPeter Maydell {
25070aa4b4c3SPeter Maydell return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
25080aa4b4c3SPeter Maydell }
250946321d47SPeter Maydell
HELPER(mve_uqshl)251046321d47SPeter Maydell uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
251146321d47SPeter Maydell {
251246321d47SPeter Maydell return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
251346321d47SPeter Maydell }
251446321d47SPeter Maydell
HELPER(mve_sqshl)251546321d47SPeter Maydell uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
251646321d47SPeter Maydell {
251746321d47SPeter Maydell return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
251846321d47SPeter Maydell }
251904ea4d3cSPeter Maydell
HELPER(mve_uqrshl)252004ea4d3cSPeter Maydell uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
252104ea4d3cSPeter Maydell {
252204ea4d3cSPeter Maydell return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
252304ea4d3cSPeter Maydell }
252404ea4d3cSPeter Maydell
HELPER(mve_sqrshr)252504ea4d3cSPeter Maydell uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
252604ea4d3cSPeter Maydell {
252704ea4d3cSPeter Maydell return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
252804ea4d3cSPeter Maydell }
2529395b92d5SPeter Maydell
2530395b92d5SPeter Maydell #define DO_VIDUP(OP, ESIZE, TYPE, FN) \
2531395b92d5SPeter Maydell uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
2532395b92d5SPeter Maydell uint32_t offset, uint32_t imm) \
2533395b92d5SPeter Maydell { \
2534395b92d5SPeter Maydell TYPE *d = vd; \
2535395b92d5SPeter Maydell uint16_t mask = mve_element_mask(env); \
2536395b92d5SPeter Maydell unsigned e; \
2537395b92d5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2538395b92d5SPeter Maydell mergemask(&d[H##ESIZE(e)], offset, mask); \
2539395b92d5SPeter Maydell offset = FN(offset, imm); \
2540395b92d5SPeter Maydell } \
2541395b92d5SPeter Maydell mve_advance_vpt(env); \
2542395b92d5SPeter Maydell return offset; \
2543395b92d5SPeter Maydell }
2544395b92d5SPeter Maydell
2545395b92d5SPeter Maydell #define DO_VIWDUP(OP, ESIZE, TYPE, FN) \
2546395b92d5SPeter Maydell uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
2547395b92d5SPeter Maydell uint32_t offset, uint32_t wrap, \
2548395b92d5SPeter Maydell uint32_t imm) \
2549395b92d5SPeter Maydell { \
2550395b92d5SPeter Maydell TYPE *d = vd; \
2551395b92d5SPeter Maydell uint16_t mask = mve_element_mask(env); \
2552395b92d5SPeter Maydell unsigned e; \
2553395b92d5SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2554395b92d5SPeter Maydell mergemask(&d[H##ESIZE(e)], offset, mask); \
2555395b92d5SPeter Maydell offset = FN(offset, wrap, imm); \
2556395b92d5SPeter Maydell } \
2557395b92d5SPeter Maydell mve_advance_vpt(env); \
2558395b92d5SPeter Maydell return offset; \
2559395b92d5SPeter Maydell }
2560395b92d5SPeter Maydell
2561395b92d5SPeter Maydell #define DO_VIDUP_ALL(OP, FN) \
2562395b92d5SPeter Maydell DO_VIDUP(OP##b, 1, int8_t, FN) \
2563395b92d5SPeter Maydell DO_VIDUP(OP##h, 2, int16_t, FN) \
2564395b92d5SPeter Maydell DO_VIDUP(OP##w, 4, int32_t, FN)
2565395b92d5SPeter Maydell
2566395b92d5SPeter Maydell #define DO_VIWDUP_ALL(OP, FN) \
2567395b92d5SPeter Maydell DO_VIWDUP(OP##b, 1, int8_t, FN) \
2568395b92d5SPeter Maydell DO_VIWDUP(OP##h, 2, int16_t, FN) \
2569395b92d5SPeter Maydell DO_VIWDUP(OP##w, 4, int32_t, FN)
2570395b92d5SPeter Maydell
do_add_wrap(uint32_t offset,uint32_t wrap,uint32_t imm)2571395b92d5SPeter Maydell static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
2572395b92d5SPeter Maydell {
2573395b92d5SPeter Maydell offset += imm;
2574395b92d5SPeter Maydell if (offset == wrap) {
2575395b92d5SPeter Maydell offset = 0;
2576395b92d5SPeter Maydell }
2577395b92d5SPeter Maydell return offset;
2578395b92d5SPeter Maydell }
2579395b92d5SPeter Maydell
do_sub_wrap(uint32_t offset,uint32_t wrap,uint32_t imm)2580395b92d5SPeter Maydell static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
2581395b92d5SPeter Maydell {
2582395b92d5SPeter Maydell if (offset == 0) {
2583395b92d5SPeter Maydell offset = wrap;
2584395b92d5SPeter Maydell }
2585395b92d5SPeter Maydell offset -= imm;
2586395b92d5SPeter Maydell return offset;
2587395b92d5SPeter Maydell }
2588395b92d5SPeter Maydell
DO_VIDUP_ALL(vidup,DO_ADD)2589395b92d5SPeter Maydell DO_VIDUP_ALL(vidup, DO_ADD)
2590395b92d5SPeter Maydell DO_VIWDUP_ALL(viwdup, do_add_wrap)
2591395b92d5SPeter Maydell DO_VIWDUP_ALL(vdwdup, do_sub_wrap)
2592eff5d9a9SPeter Maydell
2593eff5d9a9SPeter Maydell /*
2594eff5d9a9SPeter Maydell * Vector comparison.
2595eff5d9a9SPeter Maydell * P0 bits for non-executed beats (where eci_mask is 0) are unchanged.
2596eff5d9a9SPeter Maydell * P0 bits for predicated lanes in executed beats (where mask is 0) are 0.
2597eff5d9a9SPeter Maydell * P0 bits otherwise are updated with the results of the comparisons.
2598eff5d9a9SPeter Maydell * We must also keep unchanged the MASK fields at the top of v7m.vpr.
2599eff5d9a9SPeter Maydell */
2600eff5d9a9SPeter Maydell #define DO_VCMP(OP, ESIZE, TYPE, FN) \
2601eff5d9a9SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
2602eff5d9a9SPeter Maydell { \
2603eff5d9a9SPeter Maydell TYPE *n = vn, *m = vm; \
2604eff5d9a9SPeter Maydell uint16_t mask = mve_element_mask(env); \
2605eff5d9a9SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \
2606eff5d9a9SPeter Maydell uint16_t beatpred = 0; \
2607eff5d9a9SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
2608eff5d9a9SPeter Maydell unsigned e; \
2609eff5d9a9SPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \
2610eff5d9a9SPeter Maydell bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \
2611eff5d9a9SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \
2612eff5d9a9SPeter Maydell beatpred |= r * emask; \
2613eff5d9a9SPeter Maydell emask <<= ESIZE; \
2614eff5d9a9SPeter Maydell } \
2615eff5d9a9SPeter Maydell beatpred &= mask; \
2616eff5d9a9SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
2617eff5d9a9SPeter Maydell (beatpred & eci_mask); \
2618eff5d9a9SPeter Maydell mve_advance_vpt(env); \
2619eff5d9a9SPeter Maydell }
2620eff5d9a9SPeter Maydell
2621cce81873SPeter Maydell #define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \
2622cce81873SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
2623cce81873SPeter Maydell uint32_t rm) \
2624cce81873SPeter Maydell { \
2625cce81873SPeter Maydell TYPE *n = vn; \
2626cce81873SPeter Maydell uint16_t mask = mve_element_mask(env); \
2627cce81873SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \
2628cce81873SPeter Maydell uint16_t beatpred = 0; \
2629cce81873SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
2630cce81873SPeter Maydell unsigned e; \
2631cce81873SPeter Maydell for (e = 0; e < 16 / ESIZE; e++) { \
2632cce81873SPeter Maydell bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \
2633cce81873SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \
2634cce81873SPeter Maydell beatpred |= r * emask; \
2635cce81873SPeter Maydell emask <<= ESIZE; \
2636cce81873SPeter Maydell } \
2637cce81873SPeter Maydell beatpred &= mask; \
2638cce81873SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
2639cce81873SPeter Maydell (beatpred & eci_mask); \
2640cce81873SPeter Maydell mve_advance_vpt(env); \
2641cce81873SPeter Maydell }
2642cce81873SPeter Maydell
2643eff5d9a9SPeter Maydell #define DO_VCMP_S(OP, FN) \
2644eff5d9a9SPeter Maydell DO_VCMP(OP##b, 1, int8_t, FN) \
2645eff5d9a9SPeter Maydell DO_VCMP(OP##h, 2, int16_t, FN) \
2646cce81873SPeter Maydell DO_VCMP(OP##w, 4, int32_t, FN) \
2647cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \
2648cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \
2649cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN)
2650eff5d9a9SPeter Maydell
2651eff5d9a9SPeter Maydell #define DO_VCMP_U(OP, FN) \
2652eff5d9a9SPeter Maydell DO_VCMP(OP##b, 1, uint8_t, FN) \
2653eff5d9a9SPeter Maydell DO_VCMP(OP##h, 2, uint16_t, FN) \
2654cce81873SPeter Maydell DO_VCMP(OP##w, 4, uint32_t, FN) \
2655cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \
2656cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \
2657cce81873SPeter Maydell DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN)
2658eff5d9a9SPeter Maydell
2659eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M))
2660eff5d9a9SPeter Maydell #define DO_NE(N, M) ((N) != (M))
2661eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M))
2662eff5d9a9SPeter Maydell #define DO_EQ(N, M) ((N) == (M))
2663eff5d9a9SPeter Maydell #define DO_GE(N, M) ((N) >= (M))
2664eff5d9a9SPeter Maydell #define DO_LT(N, M) ((N) < (M))
2665eff5d9a9SPeter Maydell #define DO_GT(N, M) ((N) > (M))
2666eff5d9a9SPeter Maydell #define DO_LE(N, M) ((N) <= (M))
2667eff5d9a9SPeter Maydell
2668eff5d9a9SPeter Maydell DO_VCMP_U(vcmpeq, DO_EQ)
2669eff5d9a9SPeter Maydell DO_VCMP_U(vcmpne, DO_NE)
2670eff5d9a9SPeter Maydell DO_VCMP_U(vcmpcs, DO_GE)
2671eff5d9a9SPeter Maydell DO_VCMP_U(vcmphi, DO_GT)
2672eff5d9a9SPeter Maydell DO_VCMP_S(vcmpge, DO_GE)
2673eff5d9a9SPeter Maydell DO_VCMP_S(vcmplt, DO_LT)
2674eff5d9a9SPeter Maydell DO_VCMP_S(vcmpgt, DO_GT)
2675eff5d9a9SPeter Maydell DO_VCMP_S(vcmple, DO_LE)
2676c386443bSPeter Maydell
2677c386443bSPeter Maydell void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm)
2678c386443bSPeter Maydell {
2679c386443bSPeter Maydell /*
2680c386443bSPeter Maydell * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n]
2681c386443bSPeter Maydell * but note that whether bytes are written to Qd is still subject
2682c386443bSPeter Maydell * to (all forms of) predication in the usual way.
2683c386443bSPeter Maydell */
2684c386443bSPeter Maydell uint64_t *d = vd, *n = vn, *m = vm;
2685c386443bSPeter Maydell uint16_t mask = mve_element_mask(env);
2686c386443bSPeter Maydell uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
2687c386443bSPeter Maydell unsigned e;
2688c386443bSPeter Maydell for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) {
2689c386443bSPeter Maydell uint64_t r = m[H8(e)];
2690c386443bSPeter Maydell mergemask(&r, n[H8(e)], p0);
2691c386443bSPeter Maydell mergemask(&d[H8(e)], r, mask);
2692c386443bSPeter Maydell }
2693c386443bSPeter Maydell mve_advance_vpt(env);
2694c386443bSPeter Maydell }
2695398e7cd3SPeter Maydell
HELPER(mve_vpnot)2696fea3958fSPeter Maydell void HELPER(mve_vpnot)(CPUARMState *env)
2697fea3958fSPeter Maydell {
2698fea3958fSPeter Maydell /*
2699fea3958fSPeter Maydell * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged.
2700fea3958fSPeter Maydell * P0 bits for predicated lanes in executed bits (where mask is 0) are 0.
2701fea3958fSPeter Maydell * P0 bits otherwise are inverted.
2702fea3958fSPeter Maydell * (This is the same logic as VCMP.)
2703fea3958fSPeter Maydell * This insn is itself subject to predication and to beat-wise execution,
2704fea3958fSPeter Maydell * and after it executes VPT state advances in the usual way.
2705fea3958fSPeter Maydell */
2706fea3958fSPeter Maydell uint16_t mask = mve_element_mask(env);
2707fea3958fSPeter Maydell uint16_t eci_mask = mve_eci_mask(env);
2708fea3958fSPeter Maydell uint16_t beatpred = ~env->v7m.vpr & mask;
2709fea3958fSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask);
2710fea3958fSPeter Maydell mve_advance_vpt(env);
2711fea3958fSPeter Maydell }
2712fea3958fSPeter Maydell
27130f31e37cSPeter Maydell /*
27140f31e37cSPeter Maydell * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed,
27150f31e37cSPeter Maydell * otherwise set according to value of Rn. The calculation of
27160f31e37cSPeter Maydell * newmask here works in the same way as the calculation of the
27170f31e37cSPeter Maydell * ltpmask in mve_element_mask(), but we have pre-calculated
27180f31e37cSPeter Maydell * the masklen in the generated code.
27190f31e37cSPeter Maydell */
HELPER(mve_vctp)27200f31e37cSPeter Maydell void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen)
27210f31e37cSPeter Maydell {
27220f31e37cSPeter Maydell uint16_t mask = mve_element_mask(env);
27230f31e37cSPeter Maydell uint16_t eci_mask = mve_eci_mask(env);
27240f31e37cSPeter Maydell uint16_t newmask;
27250f31e37cSPeter Maydell
27260f31e37cSPeter Maydell assert(masklen <= 16);
27270f31e37cSPeter Maydell newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0;
27280f31e37cSPeter Maydell newmask &= mask;
27290f31e37cSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask);
27300f31e37cSPeter Maydell mve_advance_vpt(env);
27310f31e37cSPeter Maydell }
27320f31e37cSPeter Maydell
2733398e7cd3SPeter Maydell #define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \
2734398e7cd3SPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2735398e7cd3SPeter Maydell { \
2736398e7cd3SPeter Maydell TYPE *d = vd, *m = vm; \
2737398e7cd3SPeter Maydell uint16_t mask = mve_element_mask(env); \
2738398e7cd3SPeter Maydell unsigned e; \
2739398e7cd3SPeter Maydell bool qc = false; \
2740398e7cd3SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2741398e7cd3SPeter Maydell bool sat = false; \
2742398e7cd3SPeter Maydell mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \
2743398e7cd3SPeter Maydell qc |= sat & mask & 1; \
2744398e7cd3SPeter Maydell } \
2745398e7cd3SPeter Maydell if (qc) { \
2746398e7cd3SPeter Maydell env->vfp.qc[0] = qc; \
2747398e7cd3SPeter Maydell } \
2748398e7cd3SPeter Maydell mve_advance_vpt(env); \
2749398e7cd3SPeter Maydell }
2750398e7cd3SPeter Maydell
2751398e7cd3SPeter Maydell #define DO_VQABS_B(N, SATP) \
2752398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP)
2753398e7cd3SPeter Maydell #define DO_VQABS_H(N, SATP) \
2754398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP)
2755398e7cd3SPeter Maydell #define DO_VQABS_W(N, SATP) \
2756398e7cd3SPeter Maydell do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP)
2757398e7cd3SPeter Maydell
2758398e7cd3SPeter Maydell #define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP)
2759398e7cd3SPeter Maydell #define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP)
2760398e7cd3SPeter Maydell #define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP)
2761398e7cd3SPeter Maydell
2762398e7cd3SPeter Maydell DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B)
2763398e7cd3SPeter Maydell DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H)
2764398e7cd3SPeter Maydell DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W)
2765398e7cd3SPeter Maydell
2766398e7cd3SPeter Maydell DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B)
2767398e7cd3SPeter Maydell DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H)
2768398e7cd3SPeter Maydell DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W)
2769d5c571eaSPeter Maydell
2770d5c571eaSPeter Maydell /*
2771d5c571eaSPeter Maydell * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its
2772d5c571eaSPeter Maydell * absolute value; we then do an unsigned comparison.
2773d5c571eaSPeter Maydell */
2774d5c571eaSPeter Maydell #define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \
2775d5c571eaSPeter Maydell void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2776d5c571eaSPeter Maydell { \
2777d5c571eaSPeter Maydell UTYPE *d = vd; \
2778d5c571eaSPeter Maydell STYPE *m = vm; \
2779d5c571eaSPeter Maydell uint16_t mask = mve_element_mask(env); \
2780d5c571eaSPeter Maydell unsigned e; \
2781d5c571eaSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2782d5c571eaSPeter Maydell UTYPE r = DO_ABS(m[H##ESIZE(e)]); \
2783d5c571eaSPeter Maydell r = FN(d[H##ESIZE(e)], r); \
2784d5c571eaSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \
2785d5c571eaSPeter Maydell } \
2786d5c571eaSPeter Maydell mve_advance_vpt(env); \
2787d5c571eaSPeter Maydell }
2788d5c571eaSPeter Maydell
2789d5c571eaSPeter Maydell DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX)
2790d5c571eaSPeter Maydell DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX)
2791d5c571eaSPeter Maydell DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX)
2792d5c571eaSPeter Maydell DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN)
2793d5c571eaSPeter Maydell DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN)
2794d5c571eaSPeter Maydell DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN)
27951e35cd91SPeter Maydell
27961e35cd91SPeter Maydell /*
27971e35cd91SPeter Maydell * 2-operand floating point. Note that if an element is partially
27981e35cd91SPeter Maydell * predicated we must do the FP operation to update the non-predicated
27991e35cd91SPeter Maydell * bytes, but we must be careful to avoid updating the FP exception
28001e35cd91SPeter Maydell * state unless byte 0 of the element was unpredicated.
28011e35cd91SPeter Maydell */
28021e35cd91SPeter Maydell #define DO_2OP_FP(OP, ESIZE, TYPE, FN) \
28031e35cd91SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \
28041e35cd91SPeter Maydell void *vd, void *vn, void *vm) \
28051e35cd91SPeter Maydell { \
28061e35cd91SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \
28071e35cd91SPeter Maydell TYPE r; \
28081e35cd91SPeter Maydell uint16_t mask = mve_element_mask(env); \
28091e35cd91SPeter Maydell unsigned e; \
28101e35cd91SPeter Maydell float_status *fpst; \
28111e35cd91SPeter Maydell float_status scratch_fpst; \
28121e35cd91SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
28131e35cd91SPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
28141e35cd91SPeter Maydell continue; \
28151e35cd91SPeter Maydell } \
2816f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
28171e35cd91SPeter Maydell if (!(mask & 1)) { \
28181e35cd91SPeter Maydell /* We need the result but without updating flags */ \
28191e35cd91SPeter Maydell scratch_fpst = *fpst; \
28201e35cd91SPeter Maydell fpst = &scratch_fpst; \
28211e35cd91SPeter Maydell } \
28221e35cd91SPeter Maydell r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \
28231e35cd91SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \
28241e35cd91SPeter Maydell } \
28251e35cd91SPeter Maydell mve_advance_vpt(env); \
28261e35cd91SPeter Maydell }
28271e35cd91SPeter Maydell
28281e35cd91SPeter Maydell #define DO_2OP_FP_ALL(OP, FN) \
28291e35cd91SPeter Maydell DO_2OP_FP(OP##h, 2, float16, float16_##FN) \
28301e35cd91SPeter Maydell DO_2OP_FP(OP##s, 4, float32, float32_##FN)
28311e35cd91SPeter Maydell
DO_2OP_FP_ALL(vfadd,add)28321e35cd91SPeter Maydell DO_2OP_FP_ALL(vfadd, add)
283382af0153SPeter Maydell DO_2OP_FP_ALL(vfsub, sub)
283482af0153SPeter Maydell DO_2OP_FP_ALL(vfmul, mul)
283582af0153SPeter Maydell
283682af0153SPeter Maydell static inline float16 float16_abd(float16 a, float16 b, float_status *s)
283782af0153SPeter Maydell {
283882af0153SPeter Maydell return float16_abs(float16_sub(a, b, s));
283982af0153SPeter Maydell }
284082af0153SPeter Maydell
float32_abd(float32 a,float32 b,float_status * s)284182af0153SPeter Maydell static inline float32 float32_abd(float32 a, float32 b, float_status *s)
284282af0153SPeter Maydell {
284382af0153SPeter Maydell return float32_abs(float32_sub(a, b, s));
284482af0153SPeter Maydell }
284582af0153SPeter Maydell
DO_2OP_FP_ALL(vfabd,abd)284682af0153SPeter Maydell DO_2OP_FP_ALL(vfabd, abd)
284782af0153SPeter Maydell DO_2OP_FP_ALL(vmaxnm, maxnum)
284882af0153SPeter Maydell DO_2OP_FP_ALL(vminnm, minnum)
2849104afc68SPeter Maydell
285090257a4fSPeter Maydell static inline float16 float16_maxnuma(float16 a, float16 b, float_status *s)
285190257a4fSPeter Maydell {
285290257a4fSPeter Maydell return float16_maxnum(float16_abs(a), float16_abs(b), s);
285390257a4fSPeter Maydell }
285490257a4fSPeter Maydell
float32_maxnuma(float32 a,float32 b,float_status * s)285590257a4fSPeter Maydell static inline float32 float32_maxnuma(float32 a, float32 b, float_status *s)
285690257a4fSPeter Maydell {
285790257a4fSPeter Maydell return float32_maxnum(float32_abs(a), float32_abs(b), s);
285890257a4fSPeter Maydell }
285990257a4fSPeter Maydell
float16_minnuma(float16 a,float16 b,float_status * s)286090257a4fSPeter Maydell static inline float16 float16_minnuma(float16 a, float16 b, float_status *s)
286190257a4fSPeter Maydell {
286290257a4fSPeter Maydell return float16_minnum(float16_abs(a), float16_abs(b), s);
286390257a4fSPeter Maydell }
286490257a4fSPeter Maydell
float32_minnuma(float32 a,float32 b,float_status * s)286590257a4fSPeter Maydell static inline float32 float32_minnuma(float32 a, float32 b, float_status *s)
286690257a4fSPeter Maydell {
286790257a4fSPeter Maydell return float32_minnum(float32_abs(a), float32_abs(b), s);
286890257a4fSPeter Maydell }
286990257a4fSPeter Maydell
DO_2OP_FP_ALL(vmaxnma,maxnuma)287090257a4fSPeter Maydell DO_2OP_FP_ALL(vmaxnma, maxnuma)
287190257a4fSPeter Maydell DO_2OP_FP_ALL(vminnma, minnuma)
287290257a4fSPeter Maydell
2873104afc68SPeter Maydell #define DO_VCADD_FP(OP, ESIZE, TYPE, FN0, FN1) \
2874104afc68SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \
2875104afc68SPeter Maydell void *vd, void *vn, void *vm) \
2876104afc68SPeter Maydell { \
2877104afc68SPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \
2878104afc68SPeter Maydell TYPE r[16 / ESIZE]; \
2879104afc68SPeter Maydell uint16_t tm, mask = mve_element_mask(env); \
2880104afc68SPeter Maydell unsigned e; \
2881104afc68SPeter Maydell float_status *fpst; \
2882104afc68SPeter Maydell float_status scratch_fpst; \
2883104afc68SPeter Maydell /* Calculate all results first to avoid overwriting inputs */ \
2884104afc68SPeter Maydell for (e = 0, tm = mask; e < 16 / ESIZE; e++, tm >>= ESIZE) { \
2885104afc68SPeter Maydell if ((tm & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
2886104afc68SPeter Maydell r[e] = 0; \
2887104afc68SPeter Maydell continue; \
2888104afc68SPeter Maydell } \
2889f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
2890104afc68SPeter Maydell if (!(tm & 1)) { \
2891104afc68SPeter Maydell /* We need the result but without updating flags */ \
2892104afc68SPeter Maydell scratch_fpst = *fpst; \
2893104afc68SPeter Maydell fpst = &scratch_fpst; \
2894104afc68SPeter Maydell } \
2895104afc68SPeter Maydell if (!(e & 1)) { \
2896104afc68SPeter Maydell r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)], fpst); \
2897104afc68SPeter Maydell } else { \
2898104afc68SPeter Maydell r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)], fpst); \
2899104afc68SPeter Maydell } \
2900104afc68SPeter Maydell } \
2901104afc68SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2902104afc68SPeter Maydell mergemask(&d[H##ESIZE(e)], r[e], mask); \
2903104afc68SPeter Maydell } \
2904104afc68SPeter Maydell mve_advance_vpt(env); \
2905104afc68SPeter Maydell }
2906104afc68SPeter Maydell
2907104afc68SPeter Maydell DO_VCADD_FP(vfcadd90h, 2, float16, float16_sub, float16_add)
2908104afc68SPeter Maydell DO_VCADD_FP(vfcadd90s, 4, float32, float32_sub, float32_add)
2909104afc68SPeter Maydell DO_VCADD_FP(vfcadd270h, 2, float16, float16_add, float16_sub)
2910104afc68SPeter Maydell DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub)
29113173c0ddSPeter Maydell
29123173c0ddSPeter Maydell #define DO_VFMA(OP, ESIZE, TYPE, CHS) \
29133173c0ddSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \
29143173c0ddSPeter Maydell void *vd, void *vn, void *vm) \
29153173c0ddSPeter Maydell { \
29163173c0ddSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \
29173173c0ddSPeter Maydell TYPE r; \
29183173c0ddSPeter Maydell uint16_t mask = mve_element_mask(env); \
29193173c0ddSPeter Maydell unsigned e; \
29203173c0ddSPeter Maydell float_status *fpst; \
29213173c0ddSPeter Maydell float_status scratch_fpst; \
29223173c0ddSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
29233173c0ddSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
29243173c0ddSPeter Maydell continue; \
29253173c0ddSPeter Maydell } \
2926f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
29273173c0ddSPeter Maydell if (!(mask & 1)) { \
29283173c0ddSPeter Maydell /* We need the result but without updating flags */ \
29293173c0ddSPeter Maydell scratch_fpst = *fpst; \
29303173c0ddSPeter Maydell fpst = &scratch_fpst; \
29313173c0ddSPeter Maydell } \
29323173c0ddSPeter Maydell r = n[H##ESIZE(e)]; \
29333173c0ddSPeter Maydell if (CHS) { \
29343173c0ddSPeter Maydell r = TYPE##_chs(r); \
29353173c0ddSPeter Maydell } \
29363173c0ddSPeter Maydell r = TYPE##_muladd(r, m[H##ESIZE(e)], d[H##ESIZE(e)], \
29373173c0ddSPeter Maydell 0, fpst); \
29383173c0ddSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \
29393173c0ddSPeter Maydell } \
29403173c0ddSPeter Maydell mve_advance_vpt(env); \
29413173c0ddSPeter Maydell }
29423173c0ddSPeter Maydell
29433173c0ddSPeter Maydell DO_VFMA(vfmah, 2, float16, false)
29443173c0ddSPeter Maydell DO_VFMA(vfmas, 4, float32, false)
29453173c0ddSPeter Maydell DO_VFMA(vfmsh, 2, float16, true)
29463173c0ddSPeter Maydell DO_VFMA(vfmss, 4, float32, true)
2947d3cd965cSPeter Maydell
2948d3cd965cSPeter Maydell #define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \
2949d3cd965cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \
2950d3cd965cSPeter Maydell void *vd, void *vn, void *vm) \
2951d3cd965cSPeter Maydell { \
2952d3cd965cSPeter Maydell TYPE *d = vd, *n = vn, *m = vm; \
2953d3cd965cSPeter Maydell TYPE r0, r1, e1, e2, e3, e4; \
2954d3cd965cSPeter Maydell uint16_t mask = mve_element_mask(env); \
2955d3cd965cSPeter Maydell unsigned e; \
2956d3cd965cSPeter Maydell float_status *fpst0, *fpst1; \
2957d3cd965cSPeter Maydell float_status scratch_fpst; \
2958d3cd965cSPeter Maydell /* We loop through pairs of elements at a time */ \
2959d3cd965cSPeter Maydell for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \
2960d3cd965cSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \
2961d3cd965cSPeter Maydell continue; \
2962d3cd965cSPeter Maydell } \
2963f81c4698SRichard Henderson fpst0 = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
2964d3cd965cSPeter Maydell fpst1 = fpst0; \
2965d3cd965cSPeter Maydell if (!(mask & 1)) { \
2966d3cd965cSPeter Maydell scratch_fpst = *fpst0; \
2967d3cd965cSPeter Maydell fpst0 = &scratch_fpst; \
2968d3cd965cSPeter Maydell } \
2969d3cd965cSPeter Maydell if (!(mask & (1 << ESIZE))) { \
2970d3cd965cSPeter Maydell scratch_fpst = *fpst1; \
2971d3cd965cSPeter Maydell fpst1 = &scratch_fpst; \
2972d3cd965cSPeter Maydell } \
2973d3cd965cSPeter Maydell switch (ROT) { \
2974d3cd965cSPeter Maydell case 0: \
2975d3cd965cSPeter Maydell e1 = m[H##ESIZE(e)]; \
2976d3cd965cSPeter Maydell e2 = n[H##ESIZE(e)]; \
2977d3cd965cSPeter Maydell e3 = m[H##ESIZE(e + 1)]; \
2978d3cd965cSPeter Maydell e4 = n[H##ESIZE(e)]; \
2979d3cd965cSPeter Maydell break; \
2980d3cd965cSPeter Maydell case 1: \
2981d3cd965cSPeter Maydell e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \
2982d3cd965cSPeter Maydell e2 = n[H##ESIZE(e + 1)]; \
2983d3cd965cSPeter Maydell e3 = m[H##ESIZE(e)]; \
2984d3cd965cSPeter Maydell e4 = n[H##ESIZE(e + 1)]; \
2985d3cd965cSPeter Maydell break; \
2986d3cd965cSPeter Maydell case 2: \
2987d3cd965cSPeter Maydell e1 = TYPE##_chs(m[H##ESIZE(e)]); \
2988d3cd965cSPeter Maydell e2 = n[H##ESIZE(e)]; \
2989d3cd965cSPeter Maydell e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \
2990d3cd965cSPeter Maydell e4 = n[H##ESIZE(e)]; \
2991d3cd965cSPeter Maydell break; \
2992d3cd965cSPeter Maydell case 3: \
2993d3cd965cSPeter Maydell e1 = m[H##ESIZE(e + 1)]; \
2994d3cd965cSPeter Maydell e2 = n[H##ESIZE(e + 1)]; \
2995d3cd965cSPeter Maydell e3 = TYPE##_chs(m[H##ESIZE(e)]); \
2996d3cd965cSPeter Maydell e4 = n[H##ESIZE(e + 1)]; \
2997d3cd965cSPeter Maydell break; \
2998d3cd965cSPeter Maydell default: \
2999d3cd965cSPeter Maydell g_assert_not_reached(); \
3000d3cd965cSPeter Maydell } \
3001d3cd965cSPeter Maydell r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \
3002d3cd965cSPeter Maydell r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \
3003d3cd965cSPeter Maydell mergemask(&d[H##ESIZE(e)], r0, mask); \
3004d3cd965cSPeter Maydell mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \
3005d3cd965cSPeter Maydell } \
3006d3cd965cSPeter Maydell mve_advance_vpt(env); \
3007d3cd965cSPeter Maydell }
3008d3cd965cSPeter Maydell
3009d3cd965cSPeter Maydell #define DO_VCMULH(N, M, D, S) float16_mul(N, M, S)
3010d3cd965cSPeter Maydell #define DO_VCMULS(N, M, D, S) float32_mul(N, M, S)
3011d3cd965cSPeter Maydell
3012d3cd965cSPeter Maydell #define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S)
3013d3cd965cSPeter Maydell #define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S)
3014d3cd965cSPeter Maydell
3015d3cd965cSPeter Maydell DO_VCMLA(vcmul0h, 2, float16, 0, DO_VCMULH)
3016d3cd965cSPeter Maydell DO_VCMLA(vcmul0s, 4, float32, 0, DO_VCMULS)
3017d3cd965cSPeter Maydell DO_VCMLA(vcmul90h, 2, float16, 1, DO_VCMULH)
3018d3cd965cSPeter Maydell DO_VCMLA(vcmul90s, 4, float32, 1, DO_VCMULS)
3019d3cd965cSPeter Maydell DO_VCMLA(vcmul180h, 2, float16, 2, DO_VCMULH)
3020d3cd965cSPeter Maydell DO_VCMLA(vcmul180s, 4, float32, 2, DO_VCMULS)
3021d3cd965cSPeter Maydell DO_VCMLA(vcmul270h, 2, float16, 3, DO_VCMULH)
3022d3cd965cSPeter Maydell DO_VCMLA(vcmul270s, 4, float32, 3, DO_VCMULS)
3023d3cd965cSPeter Maydell
3024d3cd965cSPeter Maydell DO_VCMLA(vcmla0h, 2, float16, 0, DO_VCMLAH)
3025d3cd965cSPeter Maydell DO_VCMLA(vcmla0s, 4, float32, 0, DO_VCMLAS)
3026d3cd965cSPeter Maydell DO_VCMLA(vcmla90h, 2, float16, 1, DO_VCMLAH)
3027d3cd965cSPeter Maydell DO_VCMLA(vcmla90s, 4, float32, 1, DO_VCMLAS)
3028d3cd965cSPeter Maydell DO_VCMLA(vcmla180h, 2, float16, 2, DO_VCMLAH)
3029d3cd965cSPeter Maydell DO_VCMLA(vcmla180s, 4, float32, 2, DO_VCMLAS)
3030d3cd965cSPeter Maydell DO_VCMLA(vcmla270h, 2, float16, 3, DO_VCMLAH)
3031d3cd965cSPeter Maydell DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS)
3032abfe39b2SPeter Maydell
3033abfe39b2SPeter Maydell #define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \
3034abfe39b2SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \
3035abfe39b2SPeter Maydell void *vd, void *vn, uint32_t rm) \
3036abfe39b2SPeter Maydell { \
3037abfe39b2SPeter Maydell TYPE *d = vd, *n = vn; \
3038abfe39b2SPeter Maydell TYPE r, m = rm; \
3039abfe39b2SPeter Maydell uint16_t mask = mve_element_mask(env); \
3040abfe39b2SPeter Maydell unsigned e; \
3041abfe39b2SPeter Maydell float_status *fpst; \
3042abfe39b2SPeter Maydell float_status scratch_fpst; \
3043abfe39b2SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3044abfe39b2SPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3045abfe39b2SPeter Maydell continue; \
3046abfe39b2SPeter Maydell } \
3047f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
3048abfe39b2SPeter Maydell if (!(mask & 1)) { \
3049abfe39b2SPeter Maydell /* We need the result but without updating flags */ \
3050abfe39b2SPeter Maydell scratch_fpst = *fpst; \
3051abfe39b2SPeter Maydell fpst = &scratch_fpst; \
3052abfe39b2SPeter Maydell } \
3053abfe39b2SPeter Maydell r = FN(n[H##ESIZE(e)], m, fpst); \
3054abfe39b2SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \
3055abfe39b2SPeter Maydell } \
3056abfe39b2SPeter Maydell mve_advance_vpt(env); \
3057abfe39b2SPeter Maydell }
3058abfe39b2SPeter Maydell
3059abfe39b2SPeter Maydell #define DO_2OP_FP_SCALAR_ALL(OP, FN) \
3060abfe39b2SPeter Maydell DO_2OP_FP_SCALAR(OP##h, 2, float16, float16_##FN) \
3061abfe39b2SPeter Maydell DO_2OP_FP_SCALAR(OP##s, 4, float32, float32_##FN)
3062abfe39b2SPeter Maydell
3063abfe39b2SPeter Maydell DO_2OP_FP_SCALAR_ALL(vfadd_scalar, add)
3064abfe39b2SPeter Maydell DO_2OP_FP_SCALAR_ALL(vfsub_scalar, sub)
3065abfe39b2SPeter Maydell DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul)
30664773e74eSPeter Maydell
30674773e74eSPeter Maydell #define DO_2OP_FP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
30684773e74eSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \
30694773e74eSPeter Maydell void *vd, void *vn, uint32_t rm) \
30704773e74eSPeter Maydell { \
30714773e74eSPeter Maydell TYPE *d = vd, *n = vn; \
30724773e74eSPeter Maydell TYPE r, m = rm; \
30734773e74eSPeter Maydell uint16_t mask = mve_element_mask(env); \
30744773e74eSPeter Maydell unsigned e; \
30754773e74eSPeter Maydell float_status *fpst; \
30764773e74eSPeter Maydell float_status scratch_fpst; \
30774773e74eSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
30784773e74eSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
30794773e74eSPeter Maydell continue; \
30804773e74eSPeter Maydell } \
3081f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
30824773e74eSPeter Maydell if (!(mask & 1)) { \
30834773e74eSPeter Maydell /* We need the result but without updating flags */ \
30844773e74eSPeter Maydell scratch_fpst = *fpst; \
30854773e74eSPeter Maydell fpst = &scratch_fpst; \
30864773e74eSPeter Maydell } \
30874773e74eSPeter Maydell r = FN(n[H##ESIZE(e)], m, d[H##ESIZE(e)], 0, fpst); \
30884773e74eSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \
30894773e74eSPeter Maydell } \
30904773e74eSPeter Maydell mve_advance_vpt(env); \
30914773e74eSPeter Maydell }
30924773e74eSPeter Maydell
30934773e74eSPeter Maydell /* VFMAS is vector * vector + scalar, so swap op2 and op3 */
30944773e74eSPeter Maydell #define DO_VFMAS_SCALARH(N, M, D, F, S) float16_muladd(N, D, M, F, S)
30954773e74eSPeter Maydell #define DO_VFMAS_SCALARS(N, M, D, F, S) float32_muladd(N, D, M, F, S)
30964773e74eSPeter Maydell
30974773e74eSPeter Maydell /* VFMA is vector * scalar + vector */
30984773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfma_scalarh, 2, float16, float16_muladd)
30994773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfma_scalars, 4, float32, float32_muladd)
31004773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfmas_scalarh, 2, float16, DO_VFMAS_SCALARH)
31014773e74eSPeter Maydell DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS)
310229f80e7dSPeter Maydell
310329f80e7dSPeter Maydell /* Floating point max/min across vector. */
310429f80e7dSPeter Maydell #define DO_FP_VMAXMINV(OP, ESIZE, TYPE, ABS, FN) \
310529f80e7dSPeter Maydell uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
310629f80e7dSPeter Maydell uint32_t ra_in) \
310729f80e7dSPeter Maydell { \
310829f80e7dSPeter Maydell uint16_t mask = mve_element_mask(env); \
310929f80e7dSPeter Maydell unsigned e; \
311029f80e7dSPeter Maydell TYPE *m = vm; \
311129f80e7dSPeter Maydell TYPE ra = (TYPE)ra_in; \
3112f81c4698SRichard Henderson float_status *fpst = \
3113f81c4698SRichard Henderson &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
311429f80e7dSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
311529f80e7dSPeter Maydell if (mask & 1) { \
311629f80e7dSPeter Maydell TYPE v = m[H##ESIZE(e)]; \
311729f80e7dSPeter Maydell if (TYPE##_is_signaling_nan(ra, fpst)) { \
311829f80e7dSPeter Maydell ra = TYPE##_silence_nan(ra, fpst); \
311929f80e7dSPeter Maydell float_raise(float_flag_invalid, fpst); \
312029f80e7dSPeter Maydell } \
312129f80e7dSPeter Maydell if (TYPE##_is_signaling_nan(v, fpst)) { \
312229f80e7dSPeter Maydell v = TYPE##_silence_nan(v, fpst); \
312329f80e7dSPeter Maydell float_raise(float_flag_invalid, fpst); \
312429f80e7dSPeter Maydell } \
312529f80e7dSPeter Maydell if (ABS) { \
312629f80e7dSPeter Maydell v = TYPE##_abs(v); \
312729f80e7dSPeter Maydell } \
312829f80e7dSPeter Maydell ra = FN(ra, v, fpst); \
312929f80e7dSPeter Maydell } \
313029f80e7dSPeter Maydell } \
313129f80e7dSPeter Maydell mve_advance_vpt(env); \
313229f80e7dSPeter Maydell return ra; \
313329f80e7dSPeter Maydell } \
313429f80e7dSPeter Maydell
313529f80e7dSPeter Maydell #define NOP(X) (X)
313629f80e7dSPeter Maydell
313729f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmvh, 2, float16, false, float16_maxnum)
313829f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmvs, 4, float32, false, float32_maxnum)
313929f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmvh, 2, float16, false, float16_minnum)
314029f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmvs, 4, float32, false, float32_minnum)
314129f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmavh, 2, float16, true, float16_maxnum)
314229f80e7dSPeter Maydell DO_FP_VMAXMINV(vmaxnmavs, 4, float32, true, float32_maxnum)
314329f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmavh, 2, float16, true, float16_minnum)
314429f80e7dSPeter Maydell DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum)
3145c87fe6d2SPeter Maydell
3146c87fe6d2SPeter Maydell /* FP compares; note that all comparisons signal InvalidOp for QNaNs */
3147c87fe6d2SPeter Maydell #define DO_VCMP_FP(OP, ESIZE, TYPE, FN) \
3148c87fe6d2SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
3149c87fe6d2SPeter Maydell { \
3150c87fe6d2SPeter Maydell TYPE *n = vn, *m = vm; \
3151c87fe6d2SPeter Maydell uint16_t mask = mve_element_mask(env); \
3152c87fe6d2SPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \
3153c87fe6d2SPeter Maydell uint16_t beatpred = 0; \
3154c87fe6d2SPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
3155c87fe6d2SPeter Maydell unsigned e; \
3156c87fe6d2SPeter Maydell float_status *fpst; \
3157c87fe6d2SPeter Maydell float_status scratch_fpst; \
3158c87fe6d2SPeter Maydell bool r; \
3159c87fe6d2SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \
3160c87fe6d2SPeter Maydell if ((mask & emask) == 0) { \
3161c87fe6d2SPeter Maydell continue; \
3162c87fe6d2SPeter Maydell } \
3163f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
3164c87fe6d2SPeter Maydell if (!(mask & (1 << (e * ESIZE)))) { \
3165c87fe6d2SPeter Maydell /* We need the result but without updating flags */ \
3166c87fe6d2SPeter Maydell scratch_fpst = *fpst; \
3167c87fe6d2SPeter Maydell fpst = &scratch_fpst; \
3168c87fe6d2SPeter Maydell } \
3169c87fe6d2SPeter Maydell r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \
3170c87fe6d2SPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \
3171c87fe6d2SPeter Maydell beatpred |= r * emask; \
3172c87fe6d2SPeter Maydell } \
3173c87fe6d2SPeter Maydell beatpred &= mask; \
3174c87fe6d2SPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
3175c87fe6d2SPeter Maydell (beatpred & eci_mask); \
3176c87fe6d2SPeter Maydell mve_advance_vpt(env); \
3177c87fe6d2SPeter Maydell }
3178c87fe6d2SPeter Maydell
3179c2d8f6bbSPeter Maydell #define DO_VCMP_FP_SCALAR(OP, ESIZE, TYPE, FN) \
3180c2d8f6bbSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
3181c2d8f6bbSPeter Maydell uint32_t rm) \
3182c2d8f6bbSPeter Maydell { \
3183c2d8f6bbSPeter Maydell TYPE *n = vn; \
3184c2d8f6bbSPeter Maydell uint16_t mask = mve_element_mask(env); \
3185c2d8f6bbSPeter Maydell uint16_t eci_mask = mve_eci_mask(env); \
3186c2d8f6bbSPeter Maydell uint16_t beatpred = 0; \
3187c2d8f6bbSPeter Maydell uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
3188c2d8f6bbSPeter Maydell unsigned e; \
3189c2d8f6bbSPeter Maydell float_status *fpst; \
3190c2d8f6bbSPeter Maydell float_status scratch_fpst; \
3191c2d8f6bbSPeter Maydell bool r; \
3192c2d8f6bbSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \
3193c2d8f6bbSPeter Maydell if ((mask & emask) == 0) { \
3194c2d8f6bbSPeter Maydell continue; \
3195c2d8f6bbSPeter Maydell } \
3196f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
3197c2d8f6bbSPeter Maydell if (!(mask & (1 << (e * ESIZE)))) { \
3198c2d8f6bbSPeter Maydell /* We need the result but without updating flags */ \
3199c2d8f6bbSPeter Maydell scratch_fpst = *fpst; \
3200c2d8f6bbSPeter Maydell fpst = &scratch_fpst; \
3201c2d8f6bbSPeter Maydell } \
3202c2d8f6bbSPeter Maydell r = FN(n[H##ESIZE(e)], (TYPE)rm, fpst); \
3203c2d8f6bbSPeter Maydell /* Comparison sets 0/1 bits for each byte in the element */ \
3204c2d8f6bbSPeter Maydell beatpred |= r * emask; \
3205c2d8f6bbSPeter Maydell } \
3206c2d8f6bbSPeter Maydell beatpred &= mask; \
3207c2d8f6bbSPeter Maydell env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
3208c2d8f6bbSPeter Maydell (beatpred & eci_mask); \
3209c2d8f6bbSPeter Maydell mve_advance_vpt(env); \
3210c2d8f6bbSPeter Maydell }
3211c2d8f6bbSPeter Maydell
3212c2d8f6bbSPeter Maydell #define DO_VCMP_FP_BOTH(VOP, SOP, ESIZE, TYPE, FN) \
3213c2d8f6bbSPeter Maydell DO_VCMP_FP(VOP, ESIZE, TYPE, FN) \
3214c2d8f6bbSPeter Maydell DO_VCMP_FP_SCALAR(SOP, ESIZE, TYPE, FN)
3215c2d8f6bbSPeter Maydell
3216c87fe6d2SPeter Maydell /*
3217c87fe6d2SPeter Maydell * Some care is needed here to get the correct result for the unordered case.
3218c87fe6d2SPeter Maydell * Architecturally EQ, GE and GT are defined to be false for unordered, but
3219c87fe6d2SPeter Maydell * the NE, LT and LE comparisons are defined as simple logical inverses of
3220c87fe6d2SPeter Maydell * EQ, GE and GT and so they must return true for unordered. The softfloat
3221c87fe6d2SPeter Maydell * comparison functions float*_{eq,le,lt} all return false for unordered.
3222c87fe6d2SPeter Maydell */
3223c87fe6d2SPeter Maydell #define DO_GE16(X, Y, S) float16_le(Y, X, S)
3224c87fe6d2SPeter Maydell #define DO_GE32(X, Y, S) float32_le(Y, X, S)
3225c87fe6d2SPeter Maydell #define DO_GT16(X, Y, S) float16_lt(Y, X, S)
3226c87fe6d2SPeter Maydell #define DO_GT32(X, Y, S) float32_lt(Y, X, S)
3227c87fe6d2SPeter Maydell
3228c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpeqh, vfcmpeq_scalarh, 2, float16, float16_eq)
3229c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpeqs, vfcmpeq_scalars, 4, float32, float32_eq)
3230c87fe6d2SPeter Maydell
3231c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpneh, vfcmpne_scalarh, 2, float16, !float16_eq)
3232c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpnes, vfcmpne_scalars, 4, float32, !float32_eq)
3233c87fe6d2SPeter Maydell
3234c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpgeh, vfcmpge_scalarh, 2, float16, DO_GE16)
3235c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpges, vfcmpge_scalars, 4, float32, DO_GE32)
3236c87fe6d2SPeter Maydell
3237c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmplth, vfcmplt_scalarh, 2, float16, !DO_GE16)
3238c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmplts, vfcmplt_scalars, 4, float32, !DO_GE32)
3239c87fe6d2SPeter Maydell
3240c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpgth, vfcmpgt_scalarh, 2, float16, DO_GT16)
3241c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpgts, vfcmpgt_scalars, 4, float32, DO_GT32)
3242c87fe6d2SPeter Maydell
3243c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmpleh, vfcmple_scalarh, 2, float16, !DO_GT16)
3244c2d8f6bbSPeter Maydell DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32)
32452a4b939cSPeter Maydell
32462a4b939cSPeter Maydell #define DO_VCVT_FIXED(OP, ESIZE, TYPE, FN) \
32472a4b939cSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm, \
32482a4b939cSPeter Maydell uint32_t shift) \
32492a4b939cSPeter Maydell { \
32502a4b939cSPeter Maydell TYPE *d = vd, *m = vm; \
32512a4b939cSPeter Maydell TYPE r; \
32522a4b939cSPeter Maydell uint16_t mask = mve_element_mask(env); \
32532a4b939cSPeter Maydell unsigned e; \
32542a4b939cSPeter Maydell float_status *fpst; \
32552a4b939cSPeter Maydell float_status scratch_fpst; \
32562a4b939cSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
32572a4b939cSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
32582a4b939cSPeter Maydell continue; \
32592a4b939cSPeter Maydell } \
3260f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
32612a4b939cSPeter Maydell if (!(mask & 1)) { \
32622a4b939cSPeter Maydell /* We need the result but without updating flags */ \
32632a4b939cSPeter Maydell scratch_fpst = *fpst; \
32642a4b939cSPeter Maydell fpst = &scratch_fpst; \
32652a4b939cSPeter Maydell } \
32662a4b939cSPeter Maydell r = FN(m[H##ESIZE(e)], shift, fpst); \
32672a4b939cSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \
32682a4b939cSPeter Maydell } \
32692a4b939cSPeter Maydell mve_advance_vpt(env); \
32702a4b939cSPeter Maydell }
32712a4b939cSPeter Maydell
32722a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_sh, 2, int16_t, helper_vfp_shtoh)
32732a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_uh, 2, uint16_t, helper_vfp_uhtoh)
32742a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_hs, 2, int16_t, helper_vfp_toshh_round_to_zero)
32752a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_hu, 2, uint16_t, helper_vfp_touhh_round_to_zero)
32762a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_sf, 4, int32_t, helper_vfp_sltos)
32772a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_uf, 4, uint32_t, helper_vfp_ultos)
32782a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_fs, 4, int32_t, helper_vfp_tosls_round_to_zero)
32792a4b939cSPeter Maydell DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero)
328053fc5f61SPeter Maydell
328153fc5f61SPeter Maydell /* VCVT with specified rmode */
328253fc5f61SPeter Maydell #define DO_VCVT_RMODE(OP, ESIZE, TYPE, FN) \
328353fc5f61SPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, \
328453fc5f61SPeter Maydell void *vd, void *vm, uint32_t rmode) \
328553fc5f61SPeter Maydell { \
328653fc5f61SPeter Maydell TYPE *d = vd, *m = vm; \
328753fc5f61SPeter Maydell TYPE r; \
328853fc5f61SPeter Maydell uint16_t mask = mve_element_mask(env); \
328953fc5f61SPeter Maydell unsigned e; \
329053fc5f61SPeter Maydell float_status *fpst; \
329153fc5f61SPeter Maydell float_status scratch_fpst; \
3292f81c4698SRichard Henderson float_status *base_fpst = \
3293f81c4698SRichard Henderson &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
329453fc5f61SPeter Maydell uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \
329553fc5f61SPeter Maydell set_float_rounding_mode(rmode, base_fpst); \
329653fc5f61SPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
329753fc5f61SPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
329853fc5f61SPeter Maydell continue; \
329953fc5f61SPeter Maydell } \
330053fc5f61SPeter Maydell fpst = base_fpst; \
330153fc5f61SPeter Maydell if (!(mask & 1)) { \
330253fc5f61SPeter Maydell /* We need the result but without updating flags */ \
330353fc5f61SPeter Maydell scratch_fpst = *fpst; \
330453fc5f61SPeter Maydell fpst = &scratch_fpst; \
330553fc5f61SPeter Maydell } \
330653fc5f61SPeter Maydell r = FN(m[H##ESIZE(e)], 0, fpst); \
330753fc5f61SPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \
330853fc5f61SPeter Maydell } \
330953fc5f61SPeter Maydell set_float_rounding_mode(prev_rmode, base_fpst); \
331053fc5f61SPeter Maydell mve_advance_vpt(env); \
331153fc5f61SPeter Maydell }
331253fc5f61SPeter Maydell
331353fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_sh, 2, uint16_t, helper_vfp_toshh)
331453fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_uh, 2, uint16_t, helper_vfp_touhh)
331553fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_ss, 4, uint32_t, helper_vfp_tosls)
331653fc5f61SPeter Maydell DO_VCVT_RMODE(vcvt_rm_us, 4, uint32_t, helper_vfp_touls)
331773d260dbSPeter Maydell
331898e40fbdSPeter Maydell #define DO_VRINT_RM_H(M, F, S) helper_rinth(M, S)
331998e40fbdSPeter Maydell #define DO_VRINT_RM_S(M, F, S) helper_rints(M, S)
332098e40fbdSPeter Maydell
332198e40fbdSPeter Maydell DO_VCVT_RMODE(vrint_rm_h, 2, uint16_t, DO_VRINT_RM_H)
332298e40fbdSPeter Maydell DO_VCVT_RMODE(vrint_rm_s, 4, uint32_t, DO_VRINT_RM_S)
332398e40fbdSPeter Maydell
332473d260dbSPeter Maydell /*
332573d260dbSPeter Maydell * VCVT between halfprec and singleprec. As usual for halfprec
332673d260dbSPeter Maydell * conversions, FZ16 is ignored and AHP is observed.
332773d260dbSPeter Maydell */
332873d260dbSPeter Maydell static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top)
332973d260dbSPeter Maydell {
333073d260dbSPeter Maydell uint16_t *d = vd;
333173d260dbSPeter Maydell uint32_t *m = vm;
333273d260dbSPeter Maydell uint16_t r;
333373d260dbSPeter Maydell uint16_t mask = mve_element_mask(env);
3334ce07ea61SPeter Maydell bool ieee = !(env->vfp.fpcr & FPCR_AHP);
333573d260dbSPeter Maydell unsigned e;
333673d260dbSPeter Maydell float_status *fpst;
333773d260dbSPeter Maydell float_status scratch_fpst;
3338f069b26bSRichard Henderson float_status *base_fpst = &env->vfp.fp_status[FPST_STD];
333973d260dbSPeter Maydell bool old_fz = get_flush_to_zero(base_fpst);
334073d260dbSPeter Maydell set_flush_to_zero(false, base_fpst);
334173d260dbSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) {
334273d260dbSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) {
334373d260dbSPeter Maydell continue;
334473d260dbSPeter Maydell }
334573d260dbSPeter Maydell fpst = base_fpst;
334673d260dbSPeter Maydell if (!(mask & 1)) {
334773d260dbSPeter Maydell /* We need the result but without updating flags */
334873d260dbSPeter Maydell scratch_fpst = *fpst;
334973d260dbSPeter Maydell fpst = &scratch_fpst;
335073d260dbSPeter Maydell }
335173d260dbSPeter Maydell r = float32_to_float16(m[H4(e)], ieee, fpst);
335273d260dbSPeter Maydell mergemask(&d[H2(e * 2 + top)], r, mask >> (top * 2));
335373d260dbSPeter Maydell }
335473d260dbSPeter Maydell set_flush_to_zero(old_fz, base_fpst);
335573d260dbSPeter Maydell mve_advance_vpt(env);
335673d260dbSPeter Maydell }
335773d260dbSPeter Maydell
do_vcvt_hs(CPUARMState * env,void * vd,void * vm,int top)335873d260dbSPeter Maydell static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top)
335973d260dbSPeter Maydell {
336073d260dbSPeter Maydell uint32_t *d = vd;
336173d260dbSPeter Maydell uint16_t *m = vm;
336273d260dbSPeter Maydell uint32_t r;
336373d260dbSPeter Maydell uint16_t mask = mve_element_mask(env);
3364ce07ea61SPeter Maydell bool ieee = !(env->vfp.fpcr & FPCR_AHP);
336573d260dbSPeter Maydell unsigned e;
336673d260dbSPeter Maydell float_status *fpst;
336773d260dbSPeter Maydell float_status scratch_fpst;
3368f069b26bSRichard Henderson float_status *base_fpst = &env->vfp.fp_status[FPST_STD];
336973d260dbSPeter Maydell bool old_fiz = get_flush_inputs_to_zero(base_fpst);
337073d260dbSPeter Maydell set_flush_inputs_to_zero(false, base_fpst);
337173d260dbSPeter Maydell for (e = 0; e < 16 / 4; e++, mask >>= 4) {
337273d260dbSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) {
337373d260dbSPeter Maydell continue;
337473d260dbSPeter Maydell }
337573d260dbSPeter Maydell fpst = base_fpst;
337673d260dbSPeter Maydell if (!(mask & (1 << (top * 2)))) {
337773d260dbSPeter Maydell /* We need the result but without updating flags */
337873d260dbSPeter Maydell scratch_fpst = *fpst;
337973d260dbSPeter Maydell fpst = &scratch_fpst;
338073d260dbSPeter Maydell }
338173d260dbSPeter Maydell r = float16_to_float32(m[H2(e * 2 + top)], ieee, fpst);
338273d260dbSPeter Maydell mergemask(&d[H4(e)], r, mask);
338373d260dbSPeter Maydell }
338473d260dbSPeter Maydell set_flush_inputs_to_zero(old_fiz, base_fpst);
338573d260dbSPeter Maydell mve_advance_vpt(env);
338673d260dbSPeter Maydell }
338773d260dbSPeter Maydell
HELPER(mve_vcvtb_sh)338873d260dbSPeter Maydell void HELPER(mve_vcvtb_sh)(CPUARMState *env, void *vd, void *vm)
338973d260dbSPeter Maydell {
339073d260dbSPeter Maydell do_vcvt_sh(env, vd, vm, 0);
339173d260dbSPeter Maydell }
HELPER(mve_vcvtt_sh)339273d260dbSPeter Maydell void HELPER(mve_vcvtt_sh)(CPUARMState *env, void *vd, void *vm)
339373d260dbSPeter Maydell {
339473d260dbSPeter Maydell do_vcvt_sh(env, vd, vm, 1);
339573d260dbSPeter Maydell }
HELPER(mve_vcvtb_hs)339673d260dbSPeter Maydell void HELPER(mve_vcvtb_hs)(CPUARMState *env, void *vd, void *vm)
339773d260dbSPeter Maydell {
339873d260dbSPeter Maydell do_vcvt_hs(env, vd, vm, 0);
339973d260dbSPeter Maydell }
HELPER(mve_vcvtt_hs)340073d260dbSPeter Maydell void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm)
340173d260dbSPeter Maydell {
340273d260dbSPeter Maydell do_vcvt_hs(env, vd, vm, 1);
340373d260dbSPeter Maydell }
340498e40fbdSPeter Maydell
340598e40fbdSPeter Maydell #define DO_1OP_FP(OP, ESIZE, TYPE, FN) \
340698e40fbdSPeter Maydell void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm) \
340798e40fbdSPeter Maydell { \
340898e40fbdSPeter Maydell TYPE *d = vd, *m = vm; \
340998e40fbdSPeter Maydell TYPE r; \
341098e40fbdSPeter Maydell uint16_t mask = mve_element_mask(env); \
341198e40fbdSPeter Maydell unsigned e; \
341298e40fbdSPeter Maydell float_status *fpst; \
341398e40fbdSPeter Maydell float_status scratch_fpst; \
341498e40fbdSPeter Maydell for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
341598e40fbdSPeter Maydell if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
341698e40fbdSPeter Maydell continue; \
341798e40fbdSPeter Maydell } \
3418f81c4698SRichard Henderson fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \
341998e40fbdSPeter Maydell if (!(mask & 1)) { \
342098e40fbdSPeter Maydell /* We need the result but without updating flags */ \
342198e40fbdSPeter Maydell scratch_fpst = *fpst; \
342298e40fbdSPeter Maydell fpst = &scratch_fpst; \
342398e40fbdSPeter Maydell } \
342498e40fbdSPeter Maydell r = FN(m[H##ESIZE(e)], fpst); \
342598e40fbdSPeter Maydell mergemask(&d[H##ESIZE(e)], r, mask); \
342698e40fbdSPeter Maydell } \
342798e40fbdSPeter Maydell mve_advance_vpt(env); \
342898e40fbdSPeter Maydell }
342998e40fbdSPeter Maydell
343098e40fbdSPeter Maydell DO_1OP_FP(vrintx_h, 2, float16, float16_round_to_int)
343198e40fbdSPeter Maydell DO_1OP_FP(vrintx_s, 4, float32, float32_round_to_int)
3432