1 /* 2 * M-profile MVE Operations 3 * 4 * Copyright (c) 2021 Linaro, Ltd. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "internals.h" 23 #include "vec_internal.h" 24 #include "exec/helper-proto.h" 25 #include "exec/cpu_ldst.h" 26 #include "exec/exec-all.h" 27 #include "tcg/tcg.h" 28 29 static uint16_t mve_element_mask(CPUARMState *env) 30 { 31 /* 32 * Return the mask of which elements in the MVE vector should be 33 * updated. This is a combination of multiple things: 34 * (1) by default, we update every lane in the vector 35 * (2) VPT predication stores its state in the VPR register; 36 * (3) low-overhead-branch tail predication will mask out part 37 * the vector on the final iteration of the loop 38 * (4) if EPSR.ECI is set then we must execute only some beats 39 * of the insn 40 * We combine all these into a 16-bit result with the same semantics 41 * as VPR.P0: 0 to mask the lane, 1 if it is active. 42 * 8-bit vector ops will look at all bits of the result; 43 * 16-bit ops will look at bits 0, 2, 4, ...; 44 * 32-bit ops will look at bits 0, 4, 8 and 12. 45 * Compare pseudocode GetCurInstrBeat(), though that only returns 46 * the 4-bit slice of the mask corresponding to a single beat. 47 */ 48 uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 49 50 if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) { 51 mask |= 0xff; 52 } 53 if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) { 54 mask |= 0xff00; 55 } 56 57 if (env->v7m.ltpsize < 4 && 58 env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) { 59 /* 60 * Tail predication active, and this is the last loop iteration. 61 * The element size is (1 << ltpsize), and we only want to process 62 * loopcount elements, so we want to retain the least significant 63 * (loopcount * esize) predicate bits and zero out bits above that. 64 */ 65 int masklen = env->regs[14] << env->v7m.ltpsize; 66 assert(masklen <= 16); 67 mask &= MAKE_64BIT_MASK(0, masklen); 68 } 69 70 if ((env->condexec_bits & 0xf) == 0) { 71 /* 72 * ECI bits indicate which beats are already executed; 73 * we handle this by effectively predicating them out. 74 */ 75 int eci = env->condexec_bits >> 4; 76 switch (eci) { 77 case ECI_NONE: 78 break; 79 case ECI_A0: 80 mask &= 0xfff0; 81 break; 82 case ECI_A0A1: 83 mask &= 0xff00; 84 break; 85 case ECI_A0A1A2: 86 case ECI_A0A1A2B0: 87 mask &= 0xf000; 88 break; 89 default: 90 g_assert_not_reached(); 91 } 92 } 93 94 return mask; 95 } 96 97 static void mve_advance_vpt(CPUARMState *env) 98 { 99 /* Advance the VPT and ECI state if necessary */ 100 uint32_t vpr = env->v7m.vpr; 101 unsigned mask01, mask23; 102 103 if ((env->condexec_bits & 0xf) == 0) { 104 env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ? 105 (ECI_A0 << 4) : (ECI_NONE << 4); 106 } 107 108 if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) { 109 /* VPT not enabled, nothing to do */ 110 return; 111 } 112 113 mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01); 114 mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23); 115 if (mask01 > 8) { 116 /* high bit set, but not 0b1000: invert the relevant half of P0 */ 117 vpr ^= 0xff; 118 } 119 if (mask23 > 8) { 120 /* high bit set, but not 0b1000: invert the relevant half of P0 */ 121 vpr ^= 0xff00; 122 } 123 vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); 124 vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1); 125 env->v7m.vpr = vpr; 126 } 127 128 129 #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ 130 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 131 { \ 132 TYPE *d = vd; \ 133 uint16_t mask = mve_element_mask(env); \ 134 unsigned b, e; \ 135 /* \ 136 * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ 137 * beats so we don't care if we update part of the dest and \ 138 * then take an exception. \ 139 */ \ 140 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 141 if (mask & (1 << b)) { \ 142 d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \ 143 } \ 144 addr += MSIZE; \ 145 } \ 146 mve_advance_vpt(env); \ 147 } 148 149 #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ 150 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 151 { \ 152 TYPE *d = vd; \ 153 uint16_t mask = mve_element_mask(env); \ 154 unsigned b, e; \ 155 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 156 if (mask & (1 << b)) { \ 157 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 158 } \ 159 addr += MSIZE; \ 160 } \ 161 mve_advance_vpt(env); \ 162 } 163 164 DO_VLDR(vldrb, 1, ldub, 1, uint8_t) 165 DO_VLDR(vldrh, 2, lduw, 2, uint16_t) 166 DO_VLDR(vldrw, 4, ldl, 4, uint32_t) 167 168 DO_VSTR(vstrb, 1, stb, 1, uint8_t) 169 DO_VSTR(vstrh, 2, stw, 2, uint16_t) 170 DO_VSTR(vstrw, 4, stl, 4, uint32_t) 171 172 DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t) 173 DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t) 174 DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t) 175 DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t) 176 DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t) 177 DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t) 178 179 DO_VSTR(vstrb_h, 1, stb, 2, int16_t) 180 DO_VSTR(vstrb_w, 1, stb, 4, int32_t) 181 DO_VSTR(vstrh_w, 2, stw, 4, int32_t) 182 183 #undef DO_VLDR 184 #undef DO_VSTR 185 186 /* 187 * The mergemask(D, R, M) macro performs the operation "*D = R" but 188 * storing only the bytes which correspond to 1 bits in M, 189 * leaving other bytes in *D unchanged. We use _Generic 190 * to select the correct implementation based on the type of D. 191 */ 192 193 static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask) 194 { 195 if (mask & 1) { 196 *d = r; 197 } 198 } 199 200 static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask) 201 { 202 mergemask_ub((uint8_t *)d, r, mask); 203 } 204 205 static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask) 206 { 207 uint16_t bmask = expand_pred_b_data[mask & 3]; 208 *d = (*d & ~bmask) | (r & bmask); 209 } 210 211 static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask) 212 { 213 mergemask_uh((uint16_t *)d, r, mask); 214 } 215 216 static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask) 217 { 218 uint32_t bmask = expand_pred_b_data[mask & 0xf]; 219 *d = (*d & ~bmask) | (r & bmask); 220 } 221 222 static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask) 223 { 224 mergemask_uw((uint32_t *)d, r, mask); 225 } 226 227 static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask) 228 { 229 uint64_t bmask = expand_pred_b_data[mask & 0xff]; 230 *d = (*d & ~bmask) | (r & bmask); 231 } 232 233 static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask) 234 { 235 mergemask_uq((uint64_t *)d, r, mask); 236 } 237 238 #define mergemask(D, R, M) \ 239 _Generic(D, \ 240 uint8_t *: mergemask_ub, \ 241 int8_t *: mergemask_sb, \ 242 uint16_t *: mergemask_uh, \ 243 int16_t *: mergemask_sh, \ 244 uint32_t *: mergemask_uw, \ 245 int32_t *: mergemask_sw, \ 246 uint64_t *: mergemask_uq, \ 247 int64_t *: mergemask_sq)(D, R, M) 248 249 #define DO_1OP(OP, ESIZE, TYPE, FN) \ 250 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 251 { \ 252 TYPE *d = vd, *m = vm; \ 253 uint16_t mask = mve_element_mask(env); \ 254 unsigned e; \ 255 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 256 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \ 257 } \ 258 mve_advance_vpt(env); \ 259 } 260 261 #define DO_CLS_B(N) (clrsb32(N) - 24) 262 #define DO_CLS_H(N) (clrsb32(N) - 16) 263 264 DO_1OP(vclsb, 1, int8_t, DO_CLS_B) 265 DO_1OP(vclsh, 2, int16_t, DO_CLS_H) 266 DO_1OP(vclsw, 4, int32_t, clrsb32) 267 268 #define DO_CLZ_B(N) (clz32(N) - 24) 269 #define DO_CLZ_H(N) (clz32(N) - 16) 270 271 DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B) 272 DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H) 273 DO_1OP(vclzw, 4, uint32_t, clz32) 274 275 DO_1OP(vrev16b, 2, uint16_t, bswap16) 276 DO_1OP(vrev32b, 4, uint32_t, bswap32) 277 DO_1OP(vrev32h, 4, uint32_t, hswap32) 278 DO_1OP(vrev64b, 8, uint64_t, bswap64) 279 DO_1OP(vrev64h, 8, uint64_t, hswap64) 280 DO_1OP(vrev64w, 8, uint64_t, wswap64) 281 282 #define DO_NOT(N) (~(N)) 283 284 DO_1OP(vmvn, 8, uint64_t, DO_NOT) 285 286 #define DO_ABS(N) ((N) < 0 ? -(N) : (N)) 287 #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff)) 288 #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff)) 289 290 DO_1OP(vabsb, 1, int8_t, DO_ABS) 291 DO_1OP(vabsh, 2, int16_t, DO_ABS) 292 DO_1OP(vabsw, 4, int32_t, DO_ABS) 293 294 /* We can do these 64 bits at a time */ 295 DO_1OP(vfabsh, 8, uint64_t, DO_FABSH) 296 DO_1OP(vfabss, 8, uint64_t, DO_FABSS) 297 298 #define DO_NEG(N) (-(N)) 299 #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000)) 300 #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000)) 301 302 DO_1OP(vnegb, 1, int8_t, DO_NEG) 303 DO_1OP(vnegh, 2, int16_t, DO_NEG) 304 DO_1OP(vnegw, 4, int32_t, DO_NEG) 305 306 /* We can do these 64 bits at a time */ 307 DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH) 308 DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS) 309