1 /* 2 * M-profile MVE Operations 3 * 4 * Copyright (c) 2021 Linaro, Ltd. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "internals.h" 23 #include "vec_internal.h" 24 #include "exec/helper-proto.h" 25 #include "exec/cpu_ldst.h" 26 #include "exec/exec-all.h" 27 28 static uint16_t mve_element_mask(CPUARMState *env) 29 { 30 /* 31 * Return the mask of which elements in the MVE vector should be 32 * updated. This is a combination of multiple things: 33 * (1) by default, we update every lane in the vector 34 * (2) VPT predication stores its state in the VPR register; 35 * (3) low-overhead-branch tail predication will mask out part 36 * the vector on the final iteration of the loop 37 * (4) if EPSR.ECI is set then we must execute only some beats 38 * of the insn 39 * We combine all these into a 16-bit result with the same semantics 40 * as VPR.P0: 0 to mask the lane, 1 if it is active. 41 * 8-bit vector ops will look at all bits of the result; 42 * 16-bit ops will look at bits 0, 2, 4, ...; 43 * 32-bit ops will look at bits 0, 4, 8 and 12. 44 * Compare pseudocode GetCurInstrBeat(), though that only returns 45 * the 4-bit slice of the mask corresponding to a single beat. 46 */ 47 uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 48 49 if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) { 50 mask |= 0xff; 51 } 52 if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) { 53 mask |= 0xff00; 54 } 55 56 if (env->v7m.ltpsize < 4 && 57 env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) { 58 /* 59 * Tail predication active, and this is the last loop iteration. 60 * The element size is (1 << ltpsize), and we only want to process 61 * loopcount elements, so we want to retain the least significant 62 * (loopcount * esize) predicate bits and zero out bits above that. 63 */ 64 int masklen = env->regs[14] << env->v7m.ltpsize; 65 assert(masklen <= 16); 66 mask &= MAKE_64BIT_MASK(0, masklen); 67 } 68 69 if ((env->condexec_bits & 0xf) == 0) { 70 /* 71 * ECI bits indicate which beats are already executed; 72 * we handle this by effectively predicating them out. 73 */ 74 int eci = env->condexec_bits >> 4; 75 switch (eci) { 76 case ECI_NONE: 77 break; 78 case ECI_A0: 79 mask &= 0xfff0; 80 break; 81 case ECI_A0A1: 82 mask &= 0xff00; 83 break; 84 case ECI_A0A1A2: 85 case ECI_A0A1A2B0: 86 mask &= 0xf000; 87 break; 88 default: 89 g_assert_not_reached(); 90 } 91 } 92 93 return mask; 94 } 95 96 static void mve_advance_vpt(CPUARMState *env) 97 { 98 /* Advance the VPT and ECI state if necessary */ 99 uint32_t vpr = env->v7m.vpr; 100 unsigned mask01, mask23; 101 102 if ((env->condexec_bits & 0xf) == 0) { 103 env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ? 104 (ECI_A0 << 4) : (ECI_NONE << 4); 105 } 106 107 if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) { 108 /* VPT not enabled, nothing to do */ 109 return; 110 } 111 112 mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01); 113 mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23); 114 if (mask01 > 8) { 115 /* high bit set, but not 0b1000: invert the relevant half of P0 */ 116 vpr ^= 0xff; 117 } 118 if (mask23 > 8) { 119 /* high bit set, but not 0b1000: invert the relevant half of P0 */ 120 vpr ^= 0xff00; 121 } 122 vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); 123 vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1); 124 env->v7m.vpr = vpr; 125 } 126 127 128 #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ 129 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 130 { \ 131 TYPE *d = vd; \ 132 uint16_t mask = mve_element_mask(env); \ 133 unsigned b, e; \ 134 /* \ 135 * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ 136 * beats so we don't care if we update part of the dest and \ 137 * then take an exception. \ 138 */ \ 139 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 140 if (mask & (1 << b)) { \ 141 d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \ 142 } \ 143 addr += MSIZE; \ 144 } \ 145 mve_advance_vpt(env); \ 146 } 147 148 #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ 149 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 150 { \ 151 TYPE *d = vd; \ 152 uint16_t mask = mve_element_mask(env); \ 153 unsigned b, e; \ 154 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 155 if (mask & (1 << b)) { \ 156 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 157 } \ 158 addr += MSIZE; \ 159 } \ 160 mve_advance_vpt(env); \ 161 } 162 163 DO_VLDR(vldrb, 1, ldub, 1, uint8_t) 164 DO_VLDR(vldrh, 2, lduw, 2, uint16_t) 165 DO_VLDR(vldrw, 4, ldl, 4, uint32_t) 166 167 DO_VSTR(vstrb, 1, stb, 1, uint8_t) 168 DO_VSTR(vstrh, 2, stw, 2, uint16_t) 169 DO_VSTR(vstrw, 4, stl, 4, uint32_t) 170 171 DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t) 172 DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t) 173 DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t) 174 DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t) 175 DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t) 176 DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t) 177 178 DO_VSTR(vstrb_h, 1, stb, 2, int16_t) 179 DO_VSTR(vstrb_w, 1, stb, 4, int32_t) 180 DO_VSTR(vstrh_w, 2, stw, 4, int32_t) 181 182 #undef DO_VLDR 183 #undef DO_VSTR 184 185 /* 186 * The mergemask(D, R, M) macro performs the operation "*D = R" but 187 * storing only the bytes which correspond to 1 bits in M, 188 * leaving other bytes in *D unchanged. We use _Generic 189 * to select the correct implementation based on the type of D. 190 */ 191 192 static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask) 193 { 194 if (mask & 1) { 195 *d = r; 196 } 197 } 198 199 static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask) 200 { 201 mergemask_ub((uint8_t *)d, r, mask); 202 } 203 204 static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask) 205 { 206 uint16_t bmask = expand_pred_b_data[mask & 3]; 207 *d = (*d & ~bmask) | (r & bmask); 208 } 209 210 static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask) 211 { 212 mergemask_uh((uint16_t *)d, r, mask); 213 } 214 215 static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask) 216 { 217 uint32_t bmask = expand_pred_b_data[mask & 0xf]; 218 *d = (*d & ~bmask) | (r & bmask); 219 } 220 221 static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask) 222 { 223 mergemask_uw((uint32_t *)d, r, mask); 224 } 225 226 static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask) 227 { 228 uint64_t bmask = expand_pred_b_data[mask & 0xff]; 229 *d = (*d & ~bmask) | (r & bmask); 230 } 231 232 static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask) 233 { 234 mergemask_uq((uint64_t *)d, r, mask); 235 } 236 237 #define mergemask(D, R, M) \ 238 _Generic(D, \ 239 uint8_t *: mergemask_ub, \ 240 int8_t *: mergemask_sb, \ 241 uint16_t *: mergemask_uh, \ 242 int16_t *: mergemask_sh, \ 243 uint32_t *: mergemask_uw, \ 244 int32_t *: mergemask_sw, \ 245 uint64_t *: mergemask_uq, \ 246 int64_t *: mergemask_sq)(D, R, M) 247 248 #define DO_1OP(OP, ESIZE, TYPE, FN) \ 249 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 250 { \ 251 TYPE *d = vd, *m = vm; \ 252 uint16_t mask = mve_element_mask(env); \ 253 unsigned e; \ 254 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 255 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \ 256 } \ 257 mve_advance_vpt(env); \ 258 } 259 260 #define DO_CLZ_B(N) (clz32(N) - 24) 261 #define DO_CLZ_H(N) (clz32(N) - 16) 262 263 DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B) 264 DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H) 265 DO_1OP(vclzw, 4, uint32_t, clz32) 266