1 /*
2 * RISC-V Vector Extension Internals
3 *
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #ifndef TARGET_RISCV_VECTOR_INTERNALS_H
20 #define TARGET_RISCV_VECTOR_INTERNALS_H
21
22 #include "qemu/bitops.h"
23 #include "hw/registerfields.h"
24 #include "cpu.h"
25 #include "tcg/tcg-gvec-desc.h"
26 #include "internals.h"
27
28 #define VSTART_CHECK_EARLY_EXIT(env, vl) do { \
29 if (env->vstart >= vl) { \
30 env->vstart = 0; \
31 return; \
32 } \
33 } while (0)
34
vext_nf(uint32_t desc)35 static inline uint32_t vext_nf(uint32_t desc)
36 {
37 return FIELD_EX32(simd_data(desc), VDATA, NF);
38 }
39
40 /*
41 * Note that vector data is stored in host-endian 64-bit chunks,
42 * so addressing units smaller than that needs a host-endian fixup.
43 */
44 #if HOST_BIG_ENDIAN
45 #define H1(x) ((x) ^ 7)
46 #define H1_2(x) ((x) ^ 6)
47 #define H1_4(x) ((x) ^ 4)
48 #define H2(x) ((x) ^ 3)
49 #define H4(x) ((x) ^ 1)
50 #define H8(x) ((x))
51 #else
52 #define H1(x) (x)
53 #define H1_2(x) (x)
54 #define H1_4(x) (x)
55 #define H2(x) (x)
56 #define H4(x) (x)
57 #define H8(x) (x)
58 #endif
59
60 /*
61 * Encode LMUL to lmul as following:
62 * LMUL vlmul lmul
63 * 1 000 0
64 * 2 001 1
65 * 4 010 2
66 * 8 011 3
67 * - 100 -
68 * 1/8 101 -3
69 * 1/4 110 -2
70 * 1/2 111 -1
71 */
vext_lmul(uint32_t desc)72 static inline int32_t vext_lmul(uint32_t desc)
73 {
74 return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
75 }
76
vext_vm(uint32_t desc)77 static inline uint32_t vext_vm(uint32_t desc)
78 {
79 return FIELD_EX32(simd_data(desc), VDATA, VM);
80 }
81
vext_vma(uint32_t desc)82 static inline uint32_t vext_vma(uint32_t desc)
83 {
84 return FIELD_EX32(simd_data(desc), VDATA, VMA);
85 }
86
vext_vta(uint32_t desc)87 static inline uint32_t vext_vta(uint32_t desc)
88 {
89 return FIELD_EX32(simd_data(desc), VDATA, VTA);
90 }
91
vext_vta_all_1s(uint32_t desc)92 static inline uint32_t vext_vta_all_1s(uint32_t desc)
93 {
94 return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S);
95 }
96
97 /*
98 * Earlier designs (pre-0.9) had a varying number of bits
99 * per mask value (MLEN). In the 0.9 design, MLEN=1.
100 * (Section 4.5)
101 */
vext_elem_mask(void * v0,int index)102 static inline int vext_elem_mask(void *v0, int index)
103 {
104 int idx = index / 64;
105 int pos = index % 64;
106 return (((uint64_t *)v0)[idx] >> pos) & 1;
107 }
108
109 /*
110 * Get number of total elements, including prestart, body and tail elements.
111 * Note that when LMUL < 1, the tail includes the elements past VLMAX that
112 * are held in the same vector register.
113 */
vext_get_total_elems(CPURISCVState * env,uint32_t desc,uint32_t esz)114 static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
115 uint32_t esz)
116 {
117 uint32_t vlenb = simd_maxsz(desc);
118 uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
119 int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 :
120 ctzl(esz) - ctzl(sew) + vext_lmul(desc);
121 return (vlenb << emul) / esz;
122 }
123
124 /* set agnostic elements to 1s */
125 void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
126 uint32_t tot);
127
128 /* expand macro args before macro */
129 #define RVVCALL(macro, ...) macro(__VA_ARGS__)
130
131 /* (TD, T2, TX2) */
132 #define OP_UU_B uint8_t, uint8_t, uint8_t
133 #define OP_UU_H uint16_t, uint16_t, uint16_t
134 #define OP_UU_W uint32_t, uint32_t, uint32_t
135 #define OP_UU_D uint64_t, uint64_t, uint64_t
136
137 /* (TD, T1, T2, TX1, TX2) */
138 #define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
139 #define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
140 #define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
141 #define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
142
143 #define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
144 static void do_##NAME(void *vd, void *vs2, int i) \
145 { \
146 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
147 *((TD *)vd + HD(i)) = OP(s2); \
148 }
149
150 #define GEN_VEXT_V(NAME, ESZ) \
151 void HELPER(NAME)(void *vd, void *v0, void *vs2, \
152 CPURISCVState *env, uint32_t desc) \
153 { \
154 uint32_t vm = vext_vm(desc); \
155 uint32_t vl = env->vl; \
156 uint32_t total_elems = \
157 vext_get_total_elems(env, desc, ESZ); \
158 uint32_t vta = vext_vta(desc); \
159 uint32_t vma = vext_vma(desc); \
160 uint32_t i; \
161 \
162 VSTART_CHECK_EARLY_EXIT(env, vl); \
163 \
164 for (i = env->vstart; i < vl; i++) { \
165 if (!vm && !vext_elem_mask(v0, i)) { \
166 /* set masked-off elements to 1s */ \
167 vext_set_elems_1s(vd, vma, i * ESZ, \
168 (i + 1) * ESZ); \
169 continue; \
170 } \
171 do_##NAME(vd, vs2, i); \
172 } \
173 env->vstart = 0; \
174 /* set tail elements to 1s */ \
175 vext_set_elems_1s(vd, vta, vl * ESZ, \
176 total_elems * ESZ); \
177 }
178
179 /* operation of two vector elements */
180 typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
181
182 #define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
183 static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
184 { \
185 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
186 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
187 *((TD *)vd + HD(i)) = OP(s2, s1); \
188 }
189
190 void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
191 CPURISCVState *env, uint32_t desc,
192 opivv2_fn *fn, uint32_t esz);
193
194 /* generate the helpers for OPIVV */
195 #define GEN_VEXT_VV(NAME, ESZ) \
196 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
197 void *vs2, CPURISCVState *env, \
198 uint32_t desc) \
199 { \
200 do_vext_vv(vd, v0, vs1, vs2, env, desc, \
201 do_##NAME, ESZ); \
202 }
203
204 typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
205
206 /*
207 * (T1)s1 gives the real operator type.
208 * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
209 */
210 #define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
211 static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
212 { \
213 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
214 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
215 }
216
217 void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
218 CPURISCVState *env, uint32_t desc,
219 opivx2_fn fn, uint32_t esz);
220
221 /* generate the helpers for OPIVX */
222 #define GEN_VEXT_VX(NAME, ESZ) \
223 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
224 void *vs2, CPURISCVState *env, \
225 uint32_t desc) \
226 { \
227 do_vext_vx(vd, v0, s1, vs2, env, desc, \
228 do_##NAME, ESZ); \
229 }
230
231 /* Three of the widening shortening macros: */
232 /* (TD, T1, T2, TX1, TX2) */
233 #define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
234 #define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
235 #define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
236
237 #endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
238