xref: /qemu/target/arm/tcg/mve_helper.c (revision 1d2386f70a0cb2ad9c5fab2cf1eedb80bb5b313d)
1 /*
2  * M-profile MVE Operations
3  *
4  * Copyright (c) 2021 Linaro, Ltd.
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "internals.h"
23 #include "vec_internal.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg.h"
28 
29 static uint16_t mve_element_mask(CPUARMState *env)
30 {
31     /*
32      * Return the mask of which elements in the MVE vector should be
33      * updated. This is a combination of multiple things:
34      *  (1) by default, we update every lane in the vector
35      *  (2) VPT predication stores its state in the VPR register;
36      *  (3) low-overhead-branch tail predication will mask out part
37      *      the vector on the final iteration of the loop
38      *  (4) if EPSR.ECI is set then we must execute only some beats
39      *      of the insn
40      * We combine all these into a 16-bit result with the same semantics
41      * as VPR.P0: 0 to mask the lane, 1 if it is active.
42      * 8-bit vector ops will look at all bits of the result;
43      * 16-bit ops will look at bits 0, 2, 4, ...;
44      * 32-bit ops will look at bits 0, 4, 8 and 12.
45      * Compare pseudocode GetCurInstrBeat(), though that only returns
46      * the 4-bit slice of the mask corresponding to a single beat.
47      */
48     uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
49 
50     if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
51         mask |= 0xff;
52     }
53     if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
54         mask |= 0xff00;
55     }
56 
57     if (env->v7m.ltpsize < 4 &&
58         env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
59         /*
60          * Tail predication active, and this is the last loop iteration.
61          * The element size is (1 << ltpsize), and we only want to process
62          * loopcount elements, so we want to retain the least significant
63          * (loopcount * esize) predicate bits and zero out bits above that.
64          */
65         int masklen = env->regs[14] << env->v7m.ltpsize;
66         assert(masklen <= 16);
67         mask &= MAKE_64BIT_MASK(0, masklen);
68     }
69 
70     if ((env->condexec_bits & 0xf) == 0) {
71         /*
72          * ECI bits indicate which beats are already executed;
73          * we handle this by effectively predicating them out.
74          */
75         int eci = env->condexec_bits >> 4;
76         switch (eci) {
77         case ECI_NONE:
78             break;
79         case ECI_A0:
80             mask &= 0xfff0;
81             break;
82         case ECI_A0A1:
83             mask &= 0xff00;
84             break;
85         case ECI_A0A1A2:
86         case ECI_A0A1A2B0:
87             mask &= 0xf000;
88             break;
89         default:
90             g_assert_not_reached();
91         }
92     }
93 
94     return mask;
95 }
96 
97 static void mve_advance_vpt(CPUARMState *env)
98 {
99     /* Advance the VPT and ECI state if necessary */
100     uint32_t vpr = env->v7m.vpr;
101     unsigned mask01, mask23;
102 
103     if ((env->condexec_bits & 0xf) == 0) {
104         env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
105             (ECI_A0 << 4) : (ECI_NONE << 4);
106     }
107 
108     if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
109         /* VPT not enabled, nothing to do */
110         return;
111     }
112 
113     mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
114     mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
115     if (mask01 > 8) {
116         /* high bit set, but not 0b1000: invert the relevant half of P0 */
117         vpr ^= 0xff;
118     }
119     if (mask23 > 8) {
120         /* high bit set, but not 0b1000: invert the relevant half of P0 */
121         vpr ^= 0xff00;
122     }
123     vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
124     vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
125     env->v7m.vpr = vpr;
126 }
127 
128 
129 #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE)                         \
130     void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr)    \
131     {                                                                   \
132         TYPE *d = vd;                                                   \
133         uint16_t mask = mve_element_mask(env);                          \
134         unsigned b, e;                                                  \
135         /*                                                              \
136          * R_SXTM allows the dest reg to become UNKNOWN for abandoned   \
137          * beats so we don't care if we update part of the dest and     \
138          * then take an exception.                                      \
139          */                                                             \
140         for (b = 0, e = 0; b < 16; b += ESIZE, e++) {                   \
141             if (mask & (1 << b)) {                                      \
142                 d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \
143             }                                                           \
144             addr += MSIZE;                                              \
145         }                                                               \
146         mve_advance_vpt(env);                                           \
147     }
148 
149 #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE)                         \
150     void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr)    \
151     {                                                                   \
152         TYPE *d = vd;                                                   \
153         uint16_t mask = mve_element_mask(env);                          \
154         unsigned b, e;                                                  \
155         for (b = 0, e = 0; b < 16; b += ESIZE, e++) {                   \
156             if (mask & (1 << b)) {                                      \
157                 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
158             }                                                           \
159             addr += MSIZE;                                              \
160         }                                                               \
161         mve_advance_vpt(env);                                           \
162     }
163 
164 DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
165 DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
166 DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
167 
168 DO_VSTR(vstrb, 1, stb, 1, uint8_t)
169 DO_VSTR(vstrh, 2, stw, 2, uint16_t)
170 DO_VSTR(vstrw, 4, stl, 4, uint32_t)
171 
172 DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
173 DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
174 DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
175 DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
176 DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
177 DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
178 
179 DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
180 DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
181 DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
182 
183 #undef DO_VLDR
184 #undef DO_VSTR
185 
186 /*
187  * The mergemask(D, R, M) macro performs the operation "*D = R" but
188  * storing only the bytes which correspond to 1 bits in M,
189  * leaving other bytes in *D unchanged. We use _Generic
190  * to select the correct implementation based on the type of D.
191  */
192 
193 static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
194 {
195     if (mask & 1) {
196         *d = r;
197     }
198 }
199 
200 static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
201 {
202     mergemask_ub((uint8_t *)d, r, mask);
203 }
204 
205 static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
206 {
207     uint16_t bmask = expand_pred_b_data[mask & 3];
208     *d = (*d & ~bmask) | (r & bmask);
209 }
210 
211 static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
212 {
213     mergemask_uh((uint16_t *)d, r, mask);
214 }
215 
216 static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
217 {
218     uint32_t bmask = expand_pred_b_data[mask & 0xf];
219     *d = (*d & ~bmask) | (r & bmask);
220 }
221 
222 static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
223 {
224     mergemask_uw((uint32_t *)d, r, mask);
225 }
226 
227 static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
228 {
229     uint64_t bmask = expand_pred_b_data[mask & 0xff];
230     *d = (*d & ~bmask) | (r & bmask);
231 }
232 
233 static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
234 {
235     mergemask_uq((uint64_t *)d, r, mask);
236 }
237 
238 #define mergemask(D, R, M)                      \
239     _Generic(D,                                 \
240              uint8_t *: mergemask_ub,           \
241              int8_t *:  mergemask_sb,           \
242              uint16_t *: mergemask_uh,          \
243              int16_t *:  mergemask_sh,          \
244              uint32_t *: mergemask_uw,          \
245              int32_t *:  mergemask_sw,          \
246              uint64_t *: mergemask_uq,          \
247              int64_t *:  mergemask_sq)(D, R, M)
248 
249 void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
250 {
251     /*
252      * The generated code already replicated an 8 or 16 bit constant
253      * into the 32-bit value, so we only need to write the 32-bit
254      * value to all elements of the Qreg, allowing for predication.
255      */
256     uint32_t *d = vd;
257     uint16_t mask = mve_element_mask(env);
258     unsigned e;
259     for (e = 0; e < 16 / 4; e++, mask >>= 4) {
260         mergemask(&d[H4(e)], val, mask);
261     }
262     mve_advance_vpt(env);
263 }
264 
265 #define DO_1OP(OP, ESIZE, TYPE, FN)                                     \
266     void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm)         \
267     {                                                                   \
268         TYPE *d = vd, *m = vm;                                          \
269         uint16_t mask = mve_element_mask(env);                          \
270         unsigned e;                                                     \
271         for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) {              \
272             mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask);       \
273         }                                                               \
274         mve_advance_vpt(env);                                           \
275     }
276 
277 #define DO_CLS_B(N)   (clrsb32(N) - 24)
278 #define DO_CLS_H(N)   (clrsb32(N) - 16)
279 
280 DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
281 DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
282 DO_1OP(vclsw, 4, int32_t, clrsb32)
283 
284 #define DO_CLZ_B(N)   (clz32(N) - 24)
285 #define DO_CLZ_H(N)   (clz32(N) - 16)
286 
287 DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
288 DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
289 DO_1OP(vclzw, 4, uint32_t, clz32)
290 
291 DO_1OP(vrev16b, 2, uint16_t, bswap16)
292 DO_1OP(vrev32b, 4, uint32_t, bswap32)
293 DO_1OP(vrev32h, 4, uint32_t, hswap32)
294 DO_1OP(vrev64b, 8, uint64_t, bswap64)
295 DO_1OP(vrev64h, 8, uint64_t, hswap64)
296 DO_1OP(vrev64w, 8, uint64_t, wswap64)
297 
298 #define DO_NOT(N) (~(N))
299 
300 DO_1OP(vmvn, 8, uint64_t, DO_NOT)
301 
302 #define DO_ABS(N) ((N) < 0 ? -(N) : (N))
303 #define DO_FABSH(N)  ((N) & dup_const(MO_16, 0x7fff))
304 #define DO_FABSS(N)  ((N) & dup_const(MO_32, 0x7fffffff))
305 
306 DO_1OP(vabsb, 1, int8_t, DO_ABS)
307 DO_1OP(vabsh, 2, int16_t, DO_ABS)
308 DO_1OP(vabsw, 4, int32_t, DO_ABS)
309 
310 /* We can do these 64 bits at a time */
311 DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
312 DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
313 
314 #define DO_NEG(N)    (-(N))
315 #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
316 #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
317 
318 DO_1OP(vnegb, 1, int8_t, DO_NEG)
319 DO_1OP(vnegh, 2, int16_t, DO_NEG)
320 DO_1OP(vnegw, 4, int32_t, DO_NEG)
321 
322 /* We can do these 64 bits at a time */
323 DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
324 DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
325 
326 #define DO_2OP(OP, ESIZE, TYPE, FN)                                     \
327     void HELPER(glue(mve_, OP))(CPUARMState *env,                       \
328                                 void *vd, void *vn, void *vm)           \
329     {                                                                   \
330         TYPE *d = vd, *n = vn, *m = vm;                                 \
331         uint16_t mask = mve_element_mask(env);                          \
332         unsigned e;                                                     \
333         for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) {              \
334             mergemask(&d[H##ESIZE(e)],                                  \
335                       FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask);        \
336         }                                                               \
337         mve_advance_vpt(env);                                           \
338     }
339 
340 /* provide unsigned 2-op helpers for all sizes */
341 #define DO_2OP_U(OP, FN)                        \
342     DO_2OP(OP##b, 1, uint8_t, FN)               \
343     DO_2OP(OP##h, 2, uint16_t, FN)              \
344     DO_2OP(OP##w, 4, uint32_t, FN)
345 
346 /* provide signed 2-op helpers for all sizes */
347 #define DO_2OP_S(OP, FN)                        \
348     DO_2OP(OP##b, 1, int8_t, FN)                \
349     DO_2OP(OP##h, 2, int16_t, FN)               \
350     DO_2OP(OP##w, 4, int32_t, FN)
351 
352 /*
353  * "Long" operations where two half-sized inputs (taken from either the
354  * top or the bottom of the input vector) produce a double-width result.
355  * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output.
356  */
357 #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN)               \
358     void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
359     {                                                                   \
360         LTYPE *d = vd;                                                  \
361         TYPE *n = vn, *m = vm;                                          \
362         uint16_t mask = mve_element_mask(env);                          \
363         unsigned le;                                                    \
364         for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) {         \
365             LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)],              \
366                          m[H##ESIZE(le * 2 + TOP)]);                    \
367             mergemask(&d[H##LESIZE(le)], r, mask);                      \
368         }                                                               \
369         mve_advance_vpt(env);                                           \
370     }
371 
372 #define DO_AND(N, M)  ((N) & (M))
373 #define DO_BIC(N, M)  ((N) & ~(M))
374 #define DO_ORR(N, M)  ((N) | (M))
375 #define DO_ORN(N, M)  ((N) | ~(M))
376 #define DO_EOR(N, M)  ((N) ^ (M))
377 
378 DO_2OP(vand, 8, uint64_t, DO_AND)
379 DO_2OP(vbic, 8, uint64_t, DO_BIC)
380 DO_2OP(vorr, 8, uint64_t, DO_ORR)
381 DO_2OP(vorn, 8, uint64_t, DO_ORN)
382 DO_2OP(veor, 8, uint64_t, DO_EOR)
383 
384 #define DO_ADD(N, M) ((N) + (M))
385 #define DO_SUB(N, M) ((N) - (M))
386 #define DO_MUL(N, M) ((N) * (M))
387 
388 DO_2OP_U(vadd, DO_ADD)
389 DO_2OP_U(vsub, DO_SUB)
390 DO_2OP_U(vmul, DO_MUL)
391 
392 DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
393 DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
394 DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
395 DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
396 DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
397 DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
398 
399 DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
400 DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
401 DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
402 DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
403 DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
404 DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
405 
406 /*
407  * Because the computation type is at least twice as large as required,
408  * these work for both signed and unsigned source types.
409  */
410 static inline uint8_t do_mulh_b(int32_t n, int32_t m)
411 {
412     return (n * m) >> 8;
413 }
414 
415 static inline uint16_t do_mulh_h(int32_t n, int32_t m)
416 {
417     return (n * m) >> 16;
418 }
419 
420 static inline uint32_t do_mulh_w(int64_t n, int64_t m)
421 {
422     return (n * m) >> 32;
423 }
424 
425 static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
426 {
427     return (n * m + (1U << 7)) >> 8;
428 }
429 
430 static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
431 {
432     return (n * m + (1U << 15)) >> 16;
433 }
434 
435 static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
436 {
437     return (n * m + (1U << 31)) >> 32;
438 }
439 
440 DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
441 DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
442 DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
443 DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
444 DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
445 DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
446 
447 DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
448 DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
449 DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
450 DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
451 DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
452 DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
453 
454 #define DO_MAX(N, M)  ((N) >= (M) ? (N) : (M))
455 #define DO_MIN(N, M)  ((N) >= (M) ? (M) : (N))
456 
457 DO_2OP_S(vmaxs, DO_MAX)
458 DO_2OP_U(vmaxu, DO_MAX)
459 DO_2OP_S(vmins, DO_MIN)
460 DO_2OP_U(vminu, DO_MIN)
461 
462 #define DO_ABD(N, M)  ((N) >= (M) ? (N) - (M) : (M) - (N))
463 
464 DO_2OP_S(vabds, DO_ABD)
465 DO_2OP_U(vabdu, DO_ABD)
466 
467 static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
468 {
469     return ((uint64_t)n + m) >> 1;
470 }
471 
472 static inline int32_t do_vhadd_s(int32_t n, int32_t m)
473 {
474     return ((int64_t)n + m) >> 1;
475 }
476 
477 static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
478 {
479     return ((uint64_t)n - m) >> 1;
480 }
481 
482 static inline int32_t do_vhsub_s(int32_t n, int32_t m)
483 {
484     return ((int64_t)n - m) >> 1;
485 }
486 
487 DO_2OP_S(vhadds, do_vhadd_s)
488 DO_2OP_U(vhaddu, do_vhadd_u)
489 DO_2OP_S(vhsubs, do_vhsub_s)
490 DO_2OP_U(vhsubu, do_vhsub_u)
491 
492 
493 /*
494  * Multiply add long dual accumulate ops.
495  */
496 #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC)                 \
497     uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn,         \
498                                     void *vm, uint64_t a)               \
499     {                                                                   \
500         uint16_t mask = mve_element_mask(env);                          \
501         unsigned e;                                                     \
502         TYPE *n = vn, *m = vm;                                          \
503         for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) {              \
504             if (mask & 1) {                                             \
505                 if (e & 1) {                                            \
506                     a ODDACC                                            \
507                         (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
508                 } else {                                                \
509                     a EVENACC                                           \
510                         (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
511                 }                                                       \
512             }                                                           \
513         }                                                               \
514         mve_advance_vpt(env);                                           \
515         return a;                                                       \
516     }
517 
518 DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
519 DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
520 DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
521 DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
522 
523 DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
524 DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
525