1 /* 2 * ARM AdvSIMD / SVE Vector Operations 3 * 4 * Copyright (c) 2018 Linaro 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/helper-proto.h" 23 #include "tcg/tcg-gvec-desc.h" 24 #include "fpu/softfloat.h" 25 #include "vec_internal.h" 26 27 /* Note that vector data is stored in host-endian 64-bit chunks, 28 so addressing units smaller than that needs a host-endian fixup. */ 29 #ifdef HOST_WORDS_BIGENDIAN 30 #define H1(x) ((x) ^ 7) 31 #define H2(x) ((x) ^ 3) 32 #define H4(x) ((x) ^ 1) 33 #else 34 #define H1(x) (x) 35 #define H2(x) (x) 36 #define H4(x) (x) 37 #endif 38 39 /* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */ 40 static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3, 41 bool neg, bool round, uint32_t *sat) 42 { 43 /* 44 * Simplify: 45 * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16 46 * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15 47 */ 48 int32_t ret = (int32_t)src1 * src2; 49 if (neg) { 50 ret = -ret; 51 } 52 ret += ((int32_t)src3 << 15) + (round << 14); 53 ret >>= 15; 54 55 if (ret != (int16_t)ret) { 56 *sat = 1; 57 ret = (ret < 0 ? INT16_MIN : INT16_MAX); 58 } 59 return ret; 60 } 61 62 uint32_t HELPER(neon_qrdmlah_s16)(CPUARMState *env, uint32_t src1, 63 uint32_t src2, uint32_t src3) 64 { 65 uint32_t *sat = &env->vfp.qc[0]; 66 uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, false, true, sat); 67 uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16, 68 false, true, sat); 69 return deposit32(e1, 16, 16, e2); 70 } 71 72 void HELPER(gvec_qrdmlah_s16)(void *vd, void *vn, void *vm, 73 void *vq, uint32_t desc) 74 { 75 uintptr_t opr_sz = simd_oprsz(desc); 76 int16_t *d = vd; 77 int16_t *n = vn; 78 int16_t *m = vm; 79 uintptr_t i; 80 81 for (i = 0; i < opr_sz / 2; ++i) { 82 d[i] = do_sqrdmlah_h(n[i], m[i], d[i], false, true, vq); 83 } 84 clear_tail(d, opr_sz, simd_maxsz(desc)); 85 } 86 87 uint32_t HELPER(neon_qrdmlsh_s16)(CPUARMState *env, uint32_t src1, 88 uint32_t src2, uint32_t src3) 89 { 90 uint32_t *sat = &env->vfp.qc[0]; 91 uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, true, true, sat); 92 uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16, 93 true, true, sat); 94 return deposit32(e1, 16, 16, e2); 95 } 96 97 void HELPER(gvec_qrdmlsh_s16)(void *vd, void *vn, void *vm, 98 void *vq, uint32_t desc) 99 { 100 uintptr_t opr_sz = simd_oprsz(desc); 101 int16_t *d = vd; 102 int16_t *n = vn; 103 int16_t *m = vm; 104 uintptr_t i; 105 106 for (i = 0; i < opr_sz / 2; ++i) { 107 d[i] = do_sqrdmlah_h(n[i], m[i], d[i], true, true, vq); 108 } 109 clear_tail(d, opr_sz, simd_maxsz(desc)); 110 } 111 112 void HELPER(neon_sqdmulh_h)(void *vd, void *vn, void *vm, 113 void *vq, uint32_t desc) 114 { 115 intptr_t i, opr_sz = simd_oprsz(desc); 116 int16_t *d = vd, *n = vn, *m = vm; 117 118 for (i = 0; i < opr_sz / 2; ++i) { 119 d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, false, vq); 120 } 121 clear_tail(d, opr_sz, simd_maxsz(desc)); 122 } 123 124 void HELPER(neon_sqrdmulh_h)(void *vd, void *vn, void *vm, 125 void *vq, uint32_t desc) 126 { 127 intptr_t i, opr_sz = simd_oprsz(desc); 128 int16_t *d = vd, *n = vn, *m = vm; 129 130 for (i = 0; i < opr_sz / 2; ++i) { 131 d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, true, vq); 132 } 133 clear_tail(d, opr_sz, simd_maxsz(desc)); 134 } 135 136 /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */ 137 static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3, 138 bool neg, bool round, uint32_t *sat) 139 { 140 /* Simplify similarly to int_qrdmlah_s16 above. */ 141 int64_t ret = (int64_t)src1 * src2; 142 if (neg) { 143 ret = -ret; 144 } 145 ret += ((int64_t)src3 << 31) + (round << 30); 146 ret >>= 31; 147 148 if (ret != (int32_t)ret) { 149 *sat = 1; 150 ret = (ret < 0 ? INT32_MIN : INT32_MAX); 151 } 152 return ret; 153 } 154 155 uint32_t HELPER(neon_qrdmlah_s32)(CPUARMState *env, int32_t src1, 156 int32_t src2, int32_t src3) 157 { 158 uint32_t *sat = &env->vfp.qc[0]; 159 return do_sqrdmlah_s(src1, src2, src3, false, true, sat); 160 } 161 162 void HELPER(gvec_qrdmlah_s32)(void *vd, void *vn, void *vm, 163 void *vq, uint32_t desc) 164 { 165 uintptr_t opr_sz = simd_oprsz(desc); 166 int32_t *d = vd; 167 int32_t *n = vn; 168 int32_t *m = vm; 169 uintptr_t i; 170 171 for (i = 0; i < opr_sz / 4; ++i) { 172 d[i] = do_sqrdmlah_s(n[i], m[i], d[i], false, true, vq); 173 } 174 clear_tail(d, opr_sz, simd_maxsz(desc)); 175 } 176 177 uint32_t HELPER(neon_qrdmlsh_s32)(CPUARMState *env, int32_t src1, 178 int32_t src2, int32_t src3) 179 { 180 uint32_t *sat = &env->vfp.qc[0]; 181 return do_sqrdmlah_s(src1, src2, src3, true, true, sat); 182 } 183 184 void HELPER(gvec_qrdmlsh_s32)(void *vd, void *vn, void *vm, 185 void *vq, uint32_t desc) 186 { 187 uintptr_t opr_sz = simd_oprsz(desc); 188 int32_t *d = vd; 189 int32_t *n = vn; 190 int32_t *m = vm; 191 uintptr_t i; 192 193 for (i = 0; i < opr_sz / 4; ++i) { 194 d[i] = do_sqrdmlah_s(n[i], m[i], d[i], true, true, vq); 195 } 196 clear_tail(d, opr_sz, simd_maxsz(desc)); 197 } 198 199 void HELPER(neon_sqdmulh_s)(void *vd, void *vn, void *vm, 200 void *vq, uint32_t desc) 201 { 202 intptr_t i, opr_sz = simd_oprsz(desc); 203 int32_t *d = vd, *n = vn, *m = vm; 204 205 for (i = 0; i < opr_sz / 4; ++i) { 206 d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, false, vq); 207 } 208 clear_tail(d, opr_sz, simd_maxsz(desc)); 209 } 210 211 void HELPER(neon_sqrdmulh_s)(void *vd, void *vn, void *vm, 212 void *vq, uint32_t desc) 213 { 214 intptr_t i, opr_sz = simd_oprsz(desc); 215 int32_t *d = vd, *n = vn, *m = vm; 216 217 for (i = 0; i < opr_sz / 4; ++i) { 218 d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, true, vq); 219 } 220 clear_tail(d, opr_sz, simd_maxsz(desc)); 221 } 222 223 /* Integer 8 and 16-bit dot-product. 224 * 225 * Note that for the loops herein, host endianness does not matter 226 * with respect to the ordering of data within the 64-bit lanes. 227 * All elements are treated equally, no matter where they are. 228 */ 229 230 void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc) 231 { 232 intptr_t i, opr_sz = simd_oprsz(desc); 233 uint32_t *d = vd; 234 int8_t *n = vn, *m = vm; 235 236 for (i = 0; i < opr_sz / 4; ++i) { 237 d[i] += n[i * 4 + 0] * m[i * 4 + 0] 238 + n[i * 4 + 1] * m[i * 4 + 1] 239 + n[i * 4 + 2] * m[i * 4 + 2] 240 + n[i * 4 + 3] * m[i * 4 + 3]; 241 } 242 clear_tail(d, opr_sz, simd_maxsz(desc)); 243 } 244 245 void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc) 246 { 247 intptr_t i, opr_sz = simd_oprsz(desc); 248 uint32_t *d = vd; 249 uint8_t *n = vn, *m = vm; 250 251 for (i = 0; i < opr_sz / 4; ++i) { 252 d[i] += n[i * 4 + 0] * m[i * 4 + 0] 253 + n[i * 4 + 1] * m[i * 4 + 1] 254 + n[i * 4 + 2] * m[i * 4 + 2] 255 + n[i * 4 + 3] * m[i * 4 + 3]; 256 } 257 clear_tail(d, opr_sz, simd_maxsz(desc)); 258 } 259 260 void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc) 261 { 262 intptr_t i, opr_sz = simd_oprsz(desc); 263 uint64_t *d = vd; 264 int16_t *n = vn, *m = vm; 265 266 for (i = 0; i < opr_sz / 8; ++i) { 267 d[i] += (int64_t)n[i * 4 + 0] * m[i * 4 + 0] 268 + (int64_t)n[i * 4 + 1] * m[i * 4 + 1] 269 + (int64_t)n[i * 4 + 2] * m[i * 4 + 2] 270 + (int64_t)n[i * 4 + 3] * m[i * 4 + 3]; 271 } 272 clear_tail(d, opr_sz, simd_maxsz(desc)); 273 } 274 275 void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc) 276 { 277 intptr_t i, opr_sz = simd_oprsz(desc); 278 uint64_t *d = vd; 279 uint16_t *n = vn, *m = vm; 280 281 for (i = 0; i < opr_sz / 8; ++i) { 282 d[i] += (uint64_t)n[i * 4 + 0] * m[i * 4 + 0] 283 + (uint64_t)n[i * 4 + 1] * m[i * 4 + 1] 284 + (uint64_t)n[i * 4 + 2] * m[i * 4 + 2] 285 + (uint64_t)n[i * 4 + 3] * m[i * 4 + 3]; 286 } 287 clear_tail(d, opr_sz, simd_maxsz(desc)); 288 } 289 290 void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) 291 { 292 intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; 293 intptr_t index = simd_data(desc); 294 uint32_t *d = vd; 295 int8_t *n = vn; 296 int8_t *m_indexed = (int8_t *)vm + index * 4; 297 298 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. 299 * Otherwise opr_sz is a multiple of 16. 300 */ 301 segend = MIN(4, opr_sz_4); 302 i = 0; 303 do { 304 int8_t m0 = m_indexed[i * 4 + 0]; 305 int8_t m1 = m_indexed[i * 4 + 1]; 306 int8_t m2 = m_indexed[i * 4 + 2]; 307 int8_t m3 = m_indexed[i * 4 + 3]; 308 309 do { 310 d[i] += n[i * 4 + 0] * m0 311 + n[i * 4 + 1] * m1 312 + n[i * 4 + 2] * m2 313 + n[i * 4 + 3] * m3; 314 } while (++i < segend); 315 segend = i + 4; 316 } while (i < opr_sz_4); 317 318 clear_tail(d, opr_sz, simd_maxsz(desc)); 319 } 320 321 void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) 322 { 323 intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; 324 intptr_t index = simd_data(desc); 325 uint32_t *d = vd; 326 uint8_t *n = vn; 327 uint8_t *m_indexed = (uint8_t *)vm + index * 4; 328 329 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. 330 * Otherwise opr_sz is a multiple of 16. 331 */ 332 segend = MIN(4, opr_sz_4); 333 i = 0; 334 do { 335 uint8_t m0 = m_indexed[i * 4 + 0]; 336 uint8_t m1 = m_indexed[i * 4 + 1]; 337 uint8_t m2 = m_indexed[i * 4 + 2]; 338 uint8_t m3 = m_indexed[i * 4 + 3]; 339 340 do { 341 d[i] += n[i * 4 + 0] * m0 342 + n[i * 4 + 1] * m1 343 + n[i * 4 + 2] * m2 344 + n[i * 4 + 3] * m3; 345 } while (++i < segend); 346 segend = i + 4; 347 } while (i < opr_sz_4); 348 349 clear_tail(d, opr_sz, simd_maxsz(desc)); 350 } 351 352 void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) 353 { 354 intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; 355 intptr_t index = simd_data(desc); 356 uint64_t *d = vd; 357 int16_t *n = vn; 358 int16_t *m_indexed = (int16_t *)vm + index * 4; 359 360 /* This is supported by SVE only, so opr_sz is always a multiple of 16. 361 * Process the entire segment all at once, writing back the results 362 * only after we've consumed all of the inputs. 363 */ 364 for (i = 0; i < opr_sz_8 ; i += 2) { 365 uint64_t d0, d1; 366 367 d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0]; 368 d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1]; 369 d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2]; 370 d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3]; 371 d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0]; 372 d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1]; 373 d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2]; 374 d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3]; 375 376 d[i + 0] += d0; 377 d[i + 1] += d1; 378 } 379 380 clear_tail(d, opr_sz, simd_maxsz(desc)); 381 } 382 383 void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) 384 { 385 intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; 386 intptr_t index = simd_data(desc); 387 uint64_t *d = vd; 388 uint16_t *n = vn; 389 uint16_t *m_indexed = (uint16_t *)vm + index * 4; 390 391 /* This is supported by SVE only, so opr_sz is always a multiple of 16. 392 * Process the entire segment all at once, writing back the results 393 * only after we've consumed all of the inputs. 394 */ 395 for (i = 0; i < opr_sz_8 ; i += 2) { 396 uint64_t d0, d1; 397 398 d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0]; 399 d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1]; 400 d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2]; 401 d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3]; 402 d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0]; 403 d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1]; 404 d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2]; 405 d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3]; 406 407 d[i + 0] += d0; 408 d[i + 1] += d1; 409 } 410 411 clear_tail(d, opr_sz, simd_maxsz(desc)); 412 } 413 414 void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm, 415 void *vfpst, uint32_t desc) 416 { 417 uintptr_t opr_sz = simd_oprsz(desc); 418 float16 *d = vd; 419 float16 *n = vn; 420 float16 *m = vm; 421 float_status *fpst = vfpst; 422 uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); 423 uint32_t neg_imag = neg_real ^ 1; 424 uintptr_t i; 425 426 /* Shift boolean to the sign bit so we can xor to negate. */ 427 neg_real <<= 15; 428 neg_imag <<= 15; 429 430 for (i = 0; i < opr_sz / 2; i += 2) { 431 float16 e0 = n[H2(i)]; 432 float16 e1 = m[H2(i + 1)] ^ neg_imag; 433 float16 e2 = n[H2(i + 1)]; 434 float16 e3 = m[H2(i)] ^ neg_real; 435 436 d[H2(i)] = float16_add(e0, e1, fpst); 437 d[H2(i + 1)] = float16_add(e2, e3, fpst); 438 } 439 clear_tail(d, opr_sz, simd_maxsz(desc)); 440 } 441 442 void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm, 443 void *vfpst, uint32_t desc) 444 { 445 uintptr_t opr_sz = simd_oprsz(desc); 446 float32 *d = vd; 447 float32 *n = vn; 448 float32 *m = vm; 449 float_status *fpst = vfpst; 450 uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); 451 uint32_t neg_imag = neg_real ^ 1; 452 uintptr_t i; 453 454 /* Shift boolean to the sign bit so we can xor to negate. */ 455 neg_real <<= 31; 456 neg_imag <<= 31; 457 458 for (i = 0; i < opr_sz / 4; i += 2) { 459 float32 e0 = n[H4(i)]; 460 float32 e1 = m[H4(i + 1)] ^ neg_imag; 461 float32 e2 = n[H4(i + 1)]; 462 float32 e3 = m[H4(i)] ^ neg_real; 463 464 d[H4(i)] = float32_add(e0, e1, fpst); 465 d[H4(i + 1)] = float32_add(e2, e3, fpst); 466 } 467 clear_tail(d, opr_sz, simd_maxsz(desc)); 468 } 469 470 void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm, 471 void *vfpst, uint32_t desc) 472 { 473 uintptr_t opr_sz = simd_oprsz(desc); 474 float64 *d = vd; 475 float64 *n = vn; 476 float64 *m = vm; 477 float_status *fpst = vfpst; 478 uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1); 479 uint64_t neg_imag = neg_real ^ 1; 480 uintptr_t i; 481 482 /* Shift boolean to the sign bit so we can xor to negate. */ 483 neg_real <<= 63; 484 neg_imag <<= 63; 485 486 for (i = 0; i < opr_sz / 8; i += 2) { 487 float64 e0 = n[i]; 488 float64 e1 = m[i + 1] ^ neg_imag; 489 float64 e2 = n[i + 1]; 490 float64 e3 = m[i] ^ neg_real; 491 492 d[i] = float64_add(e0, e1, fpst); 493 d[i + 1] = float64_add(e2, e3, fpst); 494 } 495 clear_tail(d, opr_sz, simd_maxsz(desc)); 496 } 497 498 void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, 499 void *vfpst, uint32_t desc) 500 { 501 uintptr_t opr_sz = simd_oprsz(desc); 502 float16 *d = vd; 503 float16 *n = vn; 504 float16 *m = vm; 505 float_status *fpst = vfpst; 506 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); 507 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); 508 uint32_t neg_real = flip ^ neg_imag; 509 uintptr_t i; 510 511 /* Shift boolean to the sign bit so we can xor to negate. */ 512 neg_real <<= 15; 513 neg_imag <<= 15; 514 515 for (i = 0; i < opr_sz / 2; i += 2) { 516 float16 e2 = n[H2(i + flip)]; 517 float16 e1 = m[H2(i + flip)] ^ neg_real; 518 float16 e4 = e2; 519 float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag; 520 521 d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst); 522 d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst); 523 } 524 clear_tail(d, opr_sz, simd_maxsz(desc)); 525 } 526 527 void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, 528 void *vfpst, uint32_t desc) 529 { 530 uintptr_t opr_sz = simd_oprsz(desc); 531 float16 *d = vd; 532 float16 *n = vn; 533 float16 *m = vm; 534 float_status *fpst = vfpst; 535 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); 536 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); 537 intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); 538 uint32_t neg_real = flip ^ neg_imag; 539 intptr_t elements = opr_sz / sizeof(float16); 540 intptr_t eltspersegment = 16 / sizeof(float16); 541 intptr_t i, j; 542 543 /* Shift boolean to the sign bit so we can xor to negate. */ 544 neg_real <<= 15; 545 neg_imag <<= 15; 546 547 for (i = 0; i < elements; i += eltspersegment) { 548 float16 mr = m[H2(i + 2 * index + 0)]; 549 float16 mi = m[H2(i + 2 * index + 1)]; 550 float16 e1 = neg_real ^ (flip ? mi : mr); 551 float16 e3 = neg_imag ^ (flip ? mr : mi); 552 553 for (j = i; j < i + eltspersegment; j += 2) { 554 float16 e2 = n[H2(j + flip)]; 555 float16 e4 = e2; 556 557 d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst); 558 d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst); 559 } 560 } 561 clear_tail(d, opr_sz, simd_maxsz(desc)); 562 } 563 564 void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, 565 void *vfpst, uint32_t desc) 566 { 567 uintptr_t opr_sz = simd_oprsz(desc); 568 float32 *d = vd; 569 float32 *n = vn; 570 float32 *m = vm; 571 float_status *fpst = vfpst; 572 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); 573 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); 574 uint32_t neg_real = flip ^ neg_imag; 575 uintptr_t i; 576 577 /* Shift boolean to the sign bit so we can xor to negate. */ 578 neg_real <<= 31; 579 neg_imag <<= 31; 580 581 for (i = 0; i < opr_sz / 4; i += 2) { 582 float32 e2 = n[H4(i + flip)]; 583 float32 e1 = m[H4(i + flip)] ^ neg_real; 584 float32 e4 = e2; 585 float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag; 586 587 d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst); 588 d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst); 589 } 590 clear_tail(d, opr_sz, simd_maxsz(desc)); 591 } 592 593 void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, 594 void *vfpst, uint32_t desc) 595 { 596 uintptr_t opr_sz = simd_oprsz(desc); 597 float32 *d = vd; 598 float32 *n = vn; 599 float32 *m = vm; 600 float_status *fpst = vfpst; 601 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); 602 uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); 603 intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); 604 uint32_t neg_real = flip ^ neg_imag; 605 intptr_t elements = opr_sz / sizeof(float32); 606 intptr_t eltspersegment = 16 / sizeof(float32); 607 intptr_t i, j; 608 609 /* Shift boolean to the sign bit so we can xor to negate. */ 610 neg_real <<= 31; 611 neg_imag <<= 31; 612 613 for (i = 0; i < elements; i += eltspersegment) { 614 float32 mr = m[H4(i + 2 * index + 0)]; 615 float32 mi = m[H4(i + 2 * index + 1)]; 616 float32 e1 = neg_real ^ (flip ? mi : mr); 617 float32 e3 = neg_imag ^ (flip ? mr : mi); 618 619 for (j = i; j < i + eltspersegment; j += 2) { 620 float32 e2 = n[H4(j + flip)]; 621 float32 e4 = e2; 622 623 d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst); 624 d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst); 625 } 626 } 627 clear_tail(d, opr_sz, simd_maxsz(desc)); 628 } 629 630 void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, 631 void *vfpst, uint32_t desc) 632 { 633 uintptr_t opr_sz = simd_oprsz(desc); 634 float64 *d = vd; 635 float64 *n = vn; 636 float64 *m = vm; 637 float_status *fpst = vfpst; 638 intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); 639 uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); 640 uint64_t neg_real = flip ^ neg_imag; 641 uintptr_t i; 642 643 /* Shift boolean to the sign bit so we can xor to negate. */ 644 neg_real <<= 63; 645 neg_imag <<= 63; 646 647 for (i = 0; i < opr_sz / 8; i += 2) { 648 float64 e2 = n[i + flip]; 649 float64 e1 = m[i + flip] ^ neg_real; 650 float64 e4 = e2; 651 float64 e3 = m[i + 1 - flip] ^ neg_imag; 652 653 d[i] = float64_muladd(e2, e1, d[i], 0, fpst); 654 d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst); 655 } 656 clear_tail(d, opr_sz, simd_maxsz(desc)); 657 } 658 659 #define DO_2OP(NAME, FUNC, TYPE) \ 660 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \ 661 { \ 662 intptr_t i, oprsz = simd_oprsz(desc); \ 663 TYPE *d = vd, *n = vn; \ 664 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ 665 d[i] = FUNC(n[i], stat); \ 666 } \ 667 clear_tail(d, oprsz, simd_maxsz(desc)); \ 668 } 669 670 DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16) 671 DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32) 672 DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64) 673 674 DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16) 675 DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32) 676 DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64) 677 678 #undef DO_2OP 679 680 /* Floating-point trigonometric starting value. 681 * See the ARM ARM pseudocode function FPTrigSMul. 682 */ 683 static float16 float16_ftsmul(float16 op1, uint16_t op2, float_status *stat) 684 { 685 float16 result = float16_mul(op1, op1, stat); 686 if (!float16_is_any_nan(result)) { 687 result = float16_set_sign(result, op2 & 1); 688 } 689 return result; 690 } 691 692 static float32 float32_ftsmul(float32 op1, uint32_t op2, float_status *stat) 693 { 694 float32 result = float32_mul(op1, op1, stat); 695 if (!float32_is_any_nan(result)) { 696 result = float32_set_sign(result, op2 & 1); 697 } 698 return result; 699 } 700 701 static float64 float64_ftsmul(float64 op1, uint64_t op2, float_status *stat) 702 { 703 float64 result = float64_mul(op1, op1, stat); 704 if (!float64_is_any_nan(result)) { 705 result = float64_set_sign(result, op2 & 1); 706 } 707 return result; 708 } 709 710 static float32 float32_abd(float32 op1, float32 op2, float_status *stat) 711 { 712 return float32_abs(float32_sub(op1, op2, stat)); 713 } 714 715 #define DO_3OP(NAME, FUNC, TYPE) \ 716 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ 717 { \ 718 intptr_t i, oprsz = simd_oprsz(desc); \ 719 TYPE *d = vd, *n = vn, *m = vm; \ 720 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ 721 d[i] = FUNC(n[i], m[i], stat); \ 722 } \ 723 clear_tail(d, oprsz, simd_maxsz(desc)); \ 724 } 725 726 DO_3OP(gvec_fadd_h, float16_add, float16) 727 DO_3OP(gvec_fadd_s, float32_add, float32) 728 DO_3OP(gvec_fadd_d, float64_add, float64) 729 730 DO_3OP(gvec_fsub_h, float16_sub, float16) 731 DO_3OP(gvec_fsub_s, float32_sub, float32) 732 DO_3OP(gvec_fsub_d, float64_sub, float64) 733 734 DO_3OP(gvec_fmul_h, float16_mul, float16) 735 DO_3OP(gvec_fmul_s, float32_mul, float32) 736 DO_3OP(gvec_fmul_d, float64_mul, float64) 737 738 DO_3OP(gvec_ftsmul_h, float16_ftsmul, float16) 739 DO_3OP(gvec_ftsmul_s, float32_ftsmul, float32) 740 DO_3OP(gvec_ftsmul_d, float64_ftsmul, float64) 741 742 DO_3OP(gvec_fabd_s, float32_abd, float32) 743 744 #ifdef TARGET_AARCH64 745 746 DO_3OP(gvec_recps_h, helper_recpsf_f16, float16) 747 DO_3OP(gvec_recps_s, helper_recpsf_f32, float32) 748 DO_3OP(gvec_recps_d, helper_recpsf_f64, float64) 749 750 DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16) 751 DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32) 752 DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64) 753 754 #endif 755 #undef DO_3OP 756 757 /* For the indexed ops, SVE applies the index per 128-bit vector segment. 758 * For AdvSIMD, there is of course only one such vector segment. 759 */ 760 761 #define DO_MUL_IDX(NAME, TYPE, H) \ 762 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ 763 { \ 764 intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ 765 intptr_t idx = simd_data(desc); \ 766 TYPE *d = vd, *n = vn, *m = vm; \ 767 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ 768 TYPE mm = m[H(i + idx)]; \ 769 for (j = 0; j < segment; j++) { \ 770 d[i + j] = n[i + j] * mm; \ 771 } \ 772 } \ 773 clear_tail(d, oprsz, simd_maxsz(desc)); \ 774 } 775 776 DO_MUL_IDX(gvec_mul_idx_h, uint16_t, H2) 777 DO_MUL_IDX(gvec_mul_idx_s, uint32_t, H4) 778 DO_MUL_IDX(gvec_mul_idx_d, uint64_t, ) 779 780 #undef DO_MUL_IDX 781 782 #define DO_MLA_IDX(NAME, TYPE, OP, H) \ 783 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ 784 { \ 785 intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ 786 intptr_t idx = simd_data(desc); \ 787 TYPE *d = vd, *n = vn, *m = vm, *a = va; \ 788 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ 789 TYPE mm = m[H(i + idx)]; \ 790 for (j = 0; j < segment; j++) { \ 791 d[i + j] = a[i + j] OP n[i + j] * mm; \ 792 } \ 793 } \ 794 clear_tail(d, oprsz, simd_maxsz(desc)); \ 795 } 796 797 DO_MLA_IDX(gvec_mla_idx_h, uint16_t, +, H2) 798 DO_MLA_IDX(gvec_mla_idx_s, uint32_t, +, H4) 799 DO_MLA_IDX(gvec_mla_idx_d, uint64_t, +, ) 800 801 DO_MLA_IDX(gvec_mls_idx_h, uint16_t, -, H2) 802 DO_MLA_IDX(gvec_mls_idx_s, uint32_t, -, H4) 803 DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, ) 804 805 #undef DO_MLA_IDX 806 807 #define DO_FMUL_IDX(NAME, TYPE, H) \ 808 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ 809 { \ 810 intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ 811 intptr_t idx = simd_data(desc); \ 812 TYPE *d = vd, *n = vn, *m = vm; \ 813 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ 814 TYPE mm = m[H(i + idx)]; \ 815 for (j = 0; j < segment; j++) { \ 816 d[i + j] = TYPE##_mul(n[i + j], mm, stat); \ 817 } \ 818 } \ 819 clear_tail(d, oprsz, simd_maxsz(desc)); \ 820 } 821 822 DO_FMUL_IDX(gvec_fmul_idx_h, float16, H2) 823 DO_FMUL_IDX(gvec_fmul_idx_s, float32, H4) 824 DO_FMUL_IDX(gvec_fmul_idx_d, float64, ) 825 826 #undef DO_FMUL_IDX 827 828 #define DO_FMLA_IDX(NAME, TYPE, H) \ 829 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \ 830 void *stat, uint32_t desc) \ 831 { \ 832 intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ 833 TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \ 834 intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \ 835 TYPE *d = vd, *n = vn, *m = vm, *a = va; \ 836 op1_neg <<= (8 * sizeof(TYPE) - 1); \ 837 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ 838 TYPE mm = m[H(i + idx)]; \ 839 for (j = 0; j < segment; j++) { \ 840 d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \ 841 mm, a[i + j], 0, stat); \ 842 } \ 843 } \ 844 clear_tail(d, oprsz, simd_maxsz(desc)); \ 845 } 846 847 DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2) 848 DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4) 849 DO_FMLA_IDX(gvec_fmla_idx_d, float64, ) 850 851 #undef DO_FMLA_IDX 852 853 #define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \ 854 void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \ 855 { \ 856 intptr_t i, oprsz = simd_oprsz(desc); \ 857 TYPEN *d = vd, *n = vn; TYPEM *m = vm; \ 858 bool q = false; \ 859 for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \ 860 WTYPE dd = (WTYPE)n[i] OP m[i]; \ 861 if (dd < MIN) { \ 862 dd = MIN; \ 863 q = true; \ 864 } else if (dd > MAX) { \ 865 dd = MAX; \ 866 q = true; \ 867 } \ 868 d[i] = dd; \ 869 } \ 870 if (q) { \ 871 uint32_t *qc = vq; \ 872 qc[0] = 1; \ 873 } \ 874 clear_tail(d, oprsz, simd_maxsz(desc)); \ 875 } 876 877 DO_SAT(gvec_uqadd_b, int, uint8_t, uint8_t, +, 0, UINT8_MAX) 878 DO_SAT(gvec_uqadd_h, int, uint16_t, uint16_t, +, 0, UINT16_MAX) 879 DO_SAT(gvec_uqadd_s, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX) 880 881 DO_SAT(gvec_sqadd_b, int, int8_t, int8_t, +, INT8_MIN, INT8_MAX) 882 DO_SAT(gvec_sqadd_h, int, int16_t, int16_t, +, INT16_MIN, INT16_MAX) 883 DO_SAT(gvec_sqadd_s, int64_t, int32_t, int32_t, +, INT32_MIN, INT32_MAX) 884 885 DO_SAT(gvec_uqsub_b, int, uint8_t, uint8_t, -, 0, UINT8_MAX) 886 DO_SAT(gvec_uqsub_h, int, uint16_t, uint16_t, -, 0, UINT16_MAX) 887 DO_SAT(gvec_uqsub_s, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX) 888 889 DO_SAT(gvec_sqsub_b, int, int8_t, int8_t, -, INT8_MIN, INT8_MAX) 890 DO_SAT(gvec_sqsub_h, int, int16_t, int16_t, -, INT16_MIN, INT16_MAX) 891 DO_SAT(gvec_sqsub_s, int64_t, int32_t, int32_t, -, INT32_MIN, INT32_MAX) 892 893 #undef DO_SAT 894 895 void HELPER(gvec_uqadd_d)(void *vd, void *vq, void *vn, 896 void *vm, uint32_t desc) 897 { 898 intptr_t i, oprsz = simd_oprsz(desc); 899 uint64_t *d = vd, *n = vn, *m = vm; 900 bool q = false; 901 902 for (i = 0; i < oprsz / 8; i++) { 903 uint64_t nn = n[i], mm = m[i], dd = nn + mm; 904 if (dd < nn) { 905 dd = UINT64_MAX; 906 q = true; 907 } 908 d[i] = dd; 909 } 910 if (q) { 911 uint32_t *qc = vq; 912 qc[0] = 1; 913 } 914 clear_tail(d, oprsz, simd_maxsz(desc)); 915 } 916 917 void HELPER(gvec_uqsub_d)(void *vd, void *vq, void *vn, 918 void *vm, uint32_t desc) 919 { 920 intptr_t i, oprsz = simd_oprsz(desc); 921 uint64_t *d = vd, *n = vn, *m = vm; 922 bool q = false; 923 924 for (i = 0; i < oprsz / 8; i++) { 925 uint64_t nn = n[i], mm = m[i], dd = nn - mm; 926 if (nn < mm) { 927 dd = 0; 928 q = true; 929 } 930 d[i] = dd; 931 } 932 if (q) { 933 uint32_t *qc = vq; 934 qc[0] = 1; 935 } 936 clear_tail(d, oprsz, simd_maxsz(desc)); 937 } 938 939 void HELPER(gvec_sqadd_d)(void *vd, void *vq, void *vn, 940 void *vm, uint32_t desc) 941 { 942 intptr_t i, oprsz = simd_oprsz(desc); 943 int64_t *d = vd, *n = vn, *m = vm; 944 bool q = false; 945 946 for (i = 0; i < oprsz / 8; i++) { 947 int64_t nn = n[i], mm = m[i], dd = nn + mm; 948 if (((dd ^ nn) & ~(nn ^ mm)) & INT64_MIN) { 949 dd = (nn >> 63) ^ ~INT64_MIN; 950 q = true; 951 } 952 d[i] = dd; 953 } 954 if (q) { 955 uint32_t *qc = vq; 956 qc[0] = 1; 957 } 958 clear_tail(d, oprsz, simd_maxsz(desc)); 959 } 960 961 void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn, 962 void *vm, uint32_t desc) 963 { 964 intptr_t i, oprsz = simd_oprsz(desc); 965 int64_t *d = vd, *n = vn, *m = vm; 966 bool q = false; 967 968 for (i = 0; i < oprsz / 8; i++) { 969 int64_t nn = n[i], mm = m[i], dd = nn - mm; 970 if (((dd ^ nn) & (nn ^ mm)) & INT64_MIN) { 971 dd = (nn >> 63) ^ ~INT64_MIN; 972 q = true; 973 } 974 d[i] = dd; 975 } 976 if (q) { 977 uint32_t *qc = vq; 978 qc[0] = 1; 979 } 980 clear_tail(d, oprsz, simd_maxsz(desc)); 981 } 982 983 984 #define DO_SRA(NAME, TYPE) \ 985 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ 986 { \ 987 intptr_t i, oprsz = simd_oprsz(desc); \ 988 int shift = simd_data(desc); \ 989 TYPE *d = vd, *n = vn; \ 990 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ 991 d[i] += n[i] >> shift; \ 992 } \ 993 clear_tail(d, oprsz, simd_maxsz(desc)); \ 994 } 995 996 DO_SRA(gvec_ssra_b, int8_t) 997 DO_SRA(gvec_ssra_h, int16_t) 998 DO_SRA(gvec_ssra_s, int32_t) 999 DO_SRA(gvec_ssra_d, int64_t) 1000 1001 DO_SRA(gvec_usra_b, uint8_t) 1002 DO_SRA(gvec_usra_h, uint16_t) 1003 DO_SRA(gvec_usra_s, uint32_t) 1004 DO_SRA(gvec_usra_d, uint64_t) 1005 1006 #undef DO_SRA 1007 1008 #define DO_RSHR(NAME, TYPE) \ 1009 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ 1010 { \ 1011 intptr_t i, oprsz = simd_oprsz(desc); \ 1012 int shift = simd_data(desc); \ 1013 TYPE *d = vd, *n = vn; \ 1014 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ 1015 TYPE tmp = n[i] >> (shift - 1); \ 1016 d[i] = (tmp >> 1) + (tmp & 1); \ 1017 } \ 1018 clear_tail(d, oprsz, simd_maxsz(desc)); \ 1019 } 1020 1021 DO_RSHR(gvec_srshr_b, int8_t) 1022 DO_RSHR(gvec_srshr_h, int16_t) 1023 DO_RSHR(gvec_srshr_s, int32_t) 1024 DO_RSHR(gvec_srshr_d, int64_t) 1025 1026 DO_RSHR(gvec_urshr_b, uint8_t) 1027 DO_RSHR(gvec_urshr_h, uint16_t) 1028 DO_RSHR(gvec_urshr_s, uint32_t) 1029 DO_RSHR(gvec_urshr_d, uint64_t) 1030 1031 #undef DO_RSHR 1032 1033 #define DO_RSRA(NAME, TYPE) \ 1034 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ 1035 { \ 1036 intptr_t i, oprsz = simd_oprsz(desc); \ 1037 int shift = simd_data(desc); \ 1038 TYPE *d = vd, *n = vn; \ 1039 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ 1040 TYPE tmp = n[i] >> (shift - 1); \ 1041 d[i] += (tmp >> 1) + (tmp & 1); \ 1042 } \ 1043 clear_tail(d, oprsz, simd_maxsz(desc)); \ 1044 } 1045 1046 DO_RSRA(gvec_srsra_b, int8_t) 1047 DO_RSRA(gvec_srsra_h, int16_t) 1048 DO_RSRA(gvec_srsra_s, int32_t) 1049 DO_RSRA(gvec_srsra_d, int64_t) 1050 1051 DO_RSRA(gvec_ursra_b, uint8_t) 1052 DO_RSRA(gvec_ursra_h, uint16_t) 1053 DO_RSRA(gvec_ursra_s, uint32_t) 1054 DO_RSRA(gvec_ursra_d, uint64_t) 1055 1056 #undef DO_RSRA 1057 1058 #define DO_SRI(NAME, TYPE) \ 1059 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ 1060 { \ 1061 intptr_t i, oprsz = simd_oprsz(desc); \ 1062 int shift = simd_data(desc); \ 1063 TYPE *d = vd, *n = vn; \ 1064 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ 1065 d[i] = deposit64(d[i], 0, sizeof(TYPE) * 8 - shift, n[i] >> shift); \ 1066 } \ 1067 clear_tail(d, oprsz, simd_maxsz(desc)); \ 1068 } 1069 1070 DO_SRI(gvec_sri_b, uint8_t) 1071 DO_SRI(gvec_sri_h, uint16_t) 1072 DO_SRI(gvec_sri_s, uint32_t) 1073 DO_SRI(gvec_sri_d, uint64_t) 1074 1075 #undef DO_SRI 1076 1077 #define DO_SLI(NAME, TYPE) \ 1078 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ 1079 { \ 1080 intptr_t i, oprsz = simd_oprsz(desc); \ 1081 int shift = simd_data(desc); \ 1082 TYPE *d = vd, *n = vn; \ 1083 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ 1084 d[i] = deposit64(d[i], shift, sizeof(TYPE) * 8 - shift, n[i]); \ 1085 } \ 1086 clear_tail(d, oprsz, simd_maxsz(desc)); \ 1087 } 1088 1089 DO_SLI(gvec_sli_b, uint8_t) 1090 DO_SLI(gvec_sli_h, uint16_t) 1091 DO_SLI(gvec_sli_s, uint32_t) 1092 DO_SLI(gvec_sli_d, uint64_t) 1093 1094 #undef DO_SLI 1095 1096 /* 1097 * Convert float16 to float32, raising no exceptions and 1098 * preserving exceptional values, including SNaN. 1099 * This is effectively an unpack+repack operation. 1100 */ 1101 static float32 float16_to_float32_by_bits(uint32_t f16, bool fz16) 1102 { 1103 const int f16_bias = 15; 1104 const int f32_bias = 127; 1105 uint32_t sign = extract32(f16, 15, 1); 1106 uint32_t exp = extract32(f16, 10, 5); 1107 uint32_t frac = extract32(f16, 0, 10); 1108 1109 if (exp == 0x1f) { 1110 /* Inf or NaN */ 1111 exp = 0xff; 1112 } else if (exp == 0) { 1113 /* Zero or denormal. */ 1114 if (frac != 0) { 1115 if (fz16) { 1116 frac = 0; 1117 } else { 1118 /* 1119 * Denormal; these are all normal float32. 1120 * Shift the fraction so that the msb is at bit 11, 1121 * then remove bit 11 as the implicit bit of the 1122 * normalized float32. Note that we still go through 1123 * the shift for normal numbers below, to put the 1124 * float32 fraction at the right place. 1125 */ 1126 int shift = clz32(frac) - 21; 1127 frac = (frac << shift) & 0x3ff; 1128 exp = f32_bias - f16_bias - shift + 1; 1129 } 1130 } 1131 } else { 1132 /* Normal number; adjust the bias. */ 1133 exp += f32_bias - f16_bias; 1134 } 1135 sign <<= 31; 1136 exp <<= 23; 1137 frac <<= 23 - 10; 1138 1139 return sign | exp | frac; 1140 } 1141 1142 static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2) 1143 { 1144 /* 1145 * Branchless load of u32[0], u64[0], u32[1], or u64[1]. 1146 * Load the 2nd qword iff is_q & is_2. 1147 * Shift to the 2nd dword iff !is_q & is_2. 1148 * For !is_q & !is_2, the upper bits of the result are garbage. 1149 */ 1150 return ptr[is_q & is_2] >> ((is_2 & ~is_q) << 5); 1151 } 1152 1153 /* 1154 * Note that FMLAL requires oprsz == 8 or oprsz == 16, 1155 * as there is not yet SVE versions that might use blocking. 1156 */ 1157 1158 static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst, 1159 uint32_t desc, bool fz16) 1160 { 1161 intptr_t i, oprsz = simd_oprsz(desc); 1162 int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); 1163 int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); 1164 int is_q = oprsz == 16; 1165 uint64_t n_4, m_4; 1166 1167 /* Pre-load all of the f16 data, avoiding overlap issues. */ 1168 n_4 = load4_f16(vn, is_q, is_2); 1169 m_4 = load4_f16(vm, is_q, is_2); 1170 1171 /* Negate all inputs for FMLSL at once. */ 1172 if (is_s) { 1173 n_4 ^= 0x8000800080008000ull; 1174 } 1175 1176 for (i = 0; i < oprsz / 4; i++) { 1177 float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); 1178 float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16); 1179 d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); 1180 } 1181 clear_tail(d, oprsz, simd_maxsz(desc)); 1182 } 1183 1184 void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm, 1185 void *venv, uint32_t desc) 1186 { 1187 CPUARMState *env = venv; 1188 do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc, 1189 get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); 1190 } 1191 1192 void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, 1193 void *venv, uint32_t desc) 1194 { 1195 CPUARMState *env = venv; 1196 do_fmlal(vd, vn, vm, &env->vfp.fp_status, desc, 1197 get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); 1198 } 1199 1200 static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst, 1201 uint32_t desc, bool fz16) 1202 { 1203 intptr_t i, oprsz = simd_oprsz(desc); 1204 int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); 1205 int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); 1206 int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3); 1207 int is_q = oprsz == 16; 1208 uint64_t n_4; 1209 float32 m_1; 1210 1211 /* Pre-load all of the f16 data, avoiding overlap issues. */ 1212 n_4 = load4_f16(vn, is_q, is_2); 1213 1214 /* Negate all inputs for FMLSL at once. */ 1215 if (is_s) { 1216 n_4 ^= 0x8000800080008000ull; 1217 } 1218 1219 m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16); 1220 1221 for (i = 0; i < oprsz / 4; i++) { 1222 float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); 1223 d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); 1224 } 1225 clear_tail(d, oprsz, simd_maxsz(desc)); 1226 } 1227 1228 void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm, 1229 void *venv, uint32_t desc) 1230 { 1231 CPUARMState *env = venv; 1232 do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc, 1233 get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); 1234 } 1235 1236 void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, 1237 void *venv, uint32_t desc) 1238 { 1239 CPUARMState *env = venv; 1240 do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc, 1241 get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); 1242 } 1243 1244 void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc) 1245 { 1246 intptr_t i, opr_sz = simd_oprsz(desc); 1247 int8_t *d = vd, *n = vn, *m = vm; 1248 1249 for (i = 0; i < opr_sz; ++i) { 1250 int8_t mm = m[i]; 1251 int8_t nn = n[i]; 1252 int8_t res = 0; 1253 if (mm >= 0) { 1254 if (mm < 8) { 1255 res = nn << mm; 1256 } 1257 } else { 1258 res = nn >> (mm > -8 ? -mm : 7); 1259 } 1260 d[i] = res; 1261 } 1262 clear_tail(d, opr_sz, simd_maxsz(desc)); 1263 } 1264 1265 void HELPER(gvec_sshl_h)(void *vd, void *vn, void *vm, uint32_t desc) 1266 { 1267 intptr_t i, opr_sz = simd_oprsz(desc); 1268 int16_t *d = vd, *n = vn, *m = vm; 1269 1270 for (i = 0; i < opr_sz / 2; ++i) { 1271 int8_t mm = m[i]; /* only 8 bits of shift are significant */ 1272 int16_t nn = n[i]; 1273 int16_t res = 0; 1274 if (mm >= 0) { 1275 if (mm < 16) { 1276 res = nn << mm; 1277 } 1278 } else { 1279 res = nn >> (mm > -16 ? -mm : 15); 1280 } 1281 d[i] = res; 1282 } 1283 clear_tail(d, opr_sz, simd_maxsz(desc)); 1284 } 1285 1286 void HELPER(gvec_ushl_b)(void *vd, void *vn, void *vm, uint32_t desc) 1287 { 1288 intptr_t i, opr_sz = simd_oprsz(desc); 1289 uint8_t *d = vd, *n = vn, *m = vm; 1290 1291 for (i = 0; i < opr_sz; ++i) { 1292 int8_t mm = m[i]; 1293 uint8_t nn = n[i]; 1294 uint8_t res = 0; 1295 if (mm >= 0) { 1296 if (mm < 8) { 1297 res = nn << mm; 1298 } 1299 } else { 1300 if (mm > -8) { 1301 res = nn >> -mm; 1302 } 1303 } 1304 d[i] = res; 1305 } 1306 clear_tail(d, opr_sz, simd_maxsz(desc)); 1307 } 1308 1309 void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc) 1310 { 1311 intptr_t i, opr_sz = simd_oprsz(desc); 1312 uint16_t *d = vd, *n = vn, *m = vm; 1313 1314 for (i = 0; i < opr_sz / 2; ++i) { 1315 int8_t mm = m[i]; /* only 8 bits of shift are significant */ 1316 uint16_t nn = n[i]; 1317 uint16_t res = 0; 1318 if (mm >= 0) { 1319 if (mm < 16) { 1320 res = nn << mm; 1321 } 1322 } else { 1323 if (mm > -16) { 1324 res = nn >> -mm; 1325 } 1326 } 1327 d[i] = res; 1328 } 1329 clear_tail(d, opr_sz, simd_maxsz(desc)); 1330 } 1331 1332 /* 1333 * 8x8->8 polynomial multiply. 1334 * 1335 * Polynomial multiplication is like integer multiplication except the 1336 * partial products are XORed, not added. 1337 * 1338 * TODO: expose this as a generic vector operation, as it is a common 1339 * crypto building block. 1340 */ 1341 void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc) 1342 { 1343 intptr_t i, j, opr_sz = simd_oprsz(desc); 1344 uint64_t *d = vd, *n = vn, *m = vm; 1345 1346 for (i = 0; i < opr_sz / 8; ++i) { 1347 uint64_t nn = n[i]; 1348 uint64_t mm = m[i]; 1349 uint64_t rr = 0; 1350 1351 for (j = 0; j < 8; ++j) { 1352 uint64_t mask = (nn & 0x0101010101010101ull) * 0xff; 1353 rr ^= mm & mask; 1354 mm = (mm << 1) & 0xfefefefefefefefeull; 1355 nn >>= 1; 1356 } 1357 d[i] = rr; 1358 } 1359 clear_tail(d, opr_sz, simd_maxsz(desc)); 1360 } 1361 1362 /* 1363 * 64x64->128 polynomial multiply. 1364 * Because of the lanes are not accessed in strict columns, 1365 * this probably cannot be turned into a generic helper. 1366 */ 1367 void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc) 1368 { 1369 intptr_t i, j, opr_sz = simd_oprsz(desc); 1370 intptr_t hi = simd_data(desc); 1371 uint64_t *d = vd, *n = vn, *m = vm; 1372 1373 for (i = 0; i < opr_sz / 8; i += 2) { 1374 uint64_t nn = n[i + hi]; 1375 uint64_t mm = m[i + hi]; 1376 uint64_t rhi = 0; 1377 uint64_t rlo = 0; 1378 1379 /* Bit 0 can only influence the low 64-bit result. */ 1380 if (nn & 1) { 1381 rlo = mm; 1382 } 1383 1384 for (j = 1; j < 64; ++j) { 1385 uint64_t mask = -((nn >> j) & 1); 1386 rlo ^= (mm << j) & mask; 1387 rhi ^= (mm >> (64 - j)) & mask; 1388 } 1389 d[i] = rlo; 1390 d[i + 1] = rhi; 1391 } 1392 clear_tail(d, opr_sz, simd_maxsz(desc)); 1393 } 1394 1395 /* 1396 * 8x8->16 polynomial multiply. 1397 * 1398 * The byte inputs are expanded to (or extracted from) half-words. 1399 * Note that neon and sve2 get the inputs from different positions. 1400 * This allows 4 bytes to be processed in parallel with uint64_t. 1401 */ 1402 1403 static uint64_t expand_byte_to_half(uint64_t x) 1404 { 1405 return (x & 0x000000ff) 1406 | ((x & 0x0000ff00) << 8) 1407 | ((x & 0x00ff0000) << 16) 1408 | ((x & 0xff000000) << 24); 1409 } 1410 1411 static uint64_t pmull_h(uint64_t op1, uint64_t op2) 1412 { 1413 uint64_t result = 0; 1414 int i; 1415 1416 for (i = 0; i < 8; ++i) { 1417 uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff; 1418 result ^= op2 & mask; 1419 op1 >>= 1; 1420 op2 <<= 1; 1421 } 1422 return result; 1423 } 1424 1425 void HELPER(neon_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc) 1426 { 1427 int hi = simd_data(desc); 1428 uint64_t *d = vd, *n = vn, *m = vm; 1429 uint64_t nn = n[hi], mm = m[hi]; 1430 1431 d[0] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm)); 1432 nn >>= 32; 1433 mm >>= 32; 1434 d[1] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm)); 1435 1436 clear_tail(d, 16, simd_maxsz(desc)); 1437 } 1438 1439 #ifdef TARGET_AARCH64 1440 void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc) 1441 { 1442 int shift = simd_data(desc) * 8; 1443 intptr_t i, opr_sz = simd_oprsz(desc); 1444 uint64_t *d = vd, *n = vn, *m = vm; 1445 1446 for (i = 0; i < opr_sz / 8; ++i) { 1447 uint64_t nn = (n[i] >> shift) & 0x00ff00ff00ff00ffull; 1448 uint64_t mm = (m[i] >> shift) & 0x00ff00ff00ff00ffull; 1449 1450 d[i] = pmull_h(nn, mm); 1451 } 1452 } 1453 #endif 1454 1455 #define DO_CMP0(NAME, TYPE, OP) \ 1456 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ 1457 { \ 1458 intptr_t i, opr_sz = simd_oprsz(desc); \ 1459 for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \ 1460 TYPE nn = *(TYPE *)(vn + i); \ 1461 *(TYPE *)(vd + i) = -(nn OP 0); \ 1462 } \ 1463 clear_tail(vd, opr_sz, simd_maxsz(desc)); \ 1464 } 1465 1466 DO_CMP0(gvec_ceq0_b, int8_t, ==) 1467 DO_CMP0(gvec_clt0_b, int8_t, <) 1468 DO_CMP0(gvec_cle0_b, int8_t, <=) 1469 DO_CMP0(gvec_cgt0_b, int8_t, >) 1470 DO_CMP0(gvec_cge0_b, int8_t, >=) 1471 1472 DO_CMP0(gvec_ceq0_h, int16_t, ==) 1473 DO_CMP0(gvec_clt0_h, int16_t, <) 1474 DO_CMP0(gvec_cle0_h, int16_t, <=) 1475 DO_CMP0(gvec_cgt0_h, int16_t, >) 1476 DO_CMP0(gvec_cge0_h, int16_t, >=) 1477 1478 #undef DO_CMP0 1479 1480 #define DO_ABD(NAME, TYPE) \ 1481 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ 1482 { \ 1483 intptr_t i, opr_sz = simd_oprsz(desc); \ 1484 TYPE *d = vd, *n = vn, *m = vm; \ 1485 \ 1486 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \ 1487 d[i] = n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \ 1488 } \ 1489 clear_tail(d, opr_sz, simd_maxsz(desc)); \ 1490 } 1491 1492 DO_ABD(gvec_sabd_b, int8_t) 1493 DO_ABD(gvec_sabd_h, int16_t) 1494 DO_ABD(gvec_sabd_s, int32_t) 1495 DO_ABD(gvec_sabd_d, int64_t) 1496 1497 DO_ABD(gvec_uabd_b, uint8_t) 1498 DO_ABD(gvec_uabd_h, uint16_t) 1499 DO_ABD(gvec_uabd_s, uint32_t) 1500 DO_ABD(gvec_uabd_d, uint64_t) 1501 1502 #undef DO_ABD 1503 1504 #define DO_ABA(NAME, TYPE) \ 1505 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ 1506 { \ 1507 intptr_t i, opr_sz = simd_oprsz(desc); \ 1508 TYPE *d = vd, *n = vn, *m = vm; \ 1509 \ 1510 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \ 1511 d[i] += n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \ 1512 } \ 1513 clear_tail(d, opr_sz, simd_maxsz(desc)); \ 1514 } 1515 1516 DO_ABA(gvec_saba_b, int8_t) 1517 DO_ABA(gvec_saba_h, int16_t) 1518 DO_ABA(gvec_saba_s, int32_t) 1519 DO_ABA(gvec_saba_d, int64_t) 1520 1521 DO_ABA(gvec_uaba_b, uint8_t) 1522 DO_ABA(gvec_uaba_h, uint16_t) 1523 DO_ABA(gvec_uaba_s, uint32_t) 1524 DO_ABA(gvec_uaba_d, uint64_t) 1525 1526 #undef DO_ABA 1527